diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index e0859b6e812b7220e8122c5eb4ee0e5d0dd81de6..23b1abe2a5205cb339759fe8df5ee955ae605b89 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -133,6 +133,7 @@ source "drivers/net/ethernet/motorcomm/Kconfig" source "drivers/net/ethernet/mscc/Kconfig" source "drivers/net/ethernet/microsoft/Kconfig" source "drivers/net/ethernet/moxa/Kconfig" +source "drivers/net/ethernet/mucse/Kconfig" source "drivers/net/ethernet/myricom/Kconfig" config FEALNX diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 7c29f819a28f9ceb7be655f3967eb3d79f12cae1..40939012a08854639652d6d2f4c18cd6fc2141eb 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -66,6 +66,7 @@ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ obj-$(CONFIG_NET_VENDOR_MOTORCOMM) += motorcomm/ obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/ obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/ +obj-$(CONFIG_NET_VENDOR_MUCSE) += mucse/ obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ diff --git a/drivers/net/ethernet/mucse/Kconfig b/drivers/net/ethernet/mucse/Kconfig new file mode 100755 index 0000000000000000000000000000000000000000..c359c5629dae3e6be4b2c9b5d3dfc7e3d52005d0 --- /dev/null +++ b/drivers/net/ethernet/mucse/Kconfig @@ -0,0 +1,114 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Mucse network device configuration +# + +config NET_VENDOR_MUCSE + bool "Mucse devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Mucse cards. If you say Y, you will be asked for + your specific card in the following questions. + + +if NET_VENDOR_MUCSE +config MXGBE + tristate "Mucse(R) 1/10/25/40GbE PCI Express adapters support" + depends on PCI + imply PTP_1588_CLOCK + help + This driver supports Mucse(R) 1/10/25/40GbE PCI Express family of + adapters. + + To compile this driver as a module, choose M here. The module + will be called rnp. + +config MXGBE_FIX_VF_BUG + bool "Fix VF Bug Support(pf)" + default y + depends on MXGBE + help + Say Y here if you want to fix vf bug in the driver. + + If unsure, say N. + +config MXGBE_FIX_MAC_PADDIN + bool "Close Mac Padding Function(pf)" + default y + depends on MXGBE + help + Say Y here if you want to fix close mac padding in the driver. + + If unsure, say N. + +config MXGBE_MSIX_COUNT + int "Number of msix count" + default "64" + depends on MXGBE + help + MXGBE range [2,64]. + +config MGBE + tristate "Mucse(R) 1GbE PCI Express adapters support" + depends on PCI + imply PTP_1588_CLOCK + help + This driver supports Mucse(R) 1GbE PCI Express family of + adapters. + + To compile this driver as a module, choose M here. The module + will be called rnp. + +config MGBE_MSIX_COUNT + int "Number of msix count" + default "26" + depends on MGBE + help + MXGBE range [2,26]. + + +#config MXGBEVF +# tristate "Mucse(R) 1/10/25/40G PCI Express Virtual Function adapters support" +# depends on PCI +# help +# This driver supports Mucse(R) 1/10/25/40GbE PCI Express family of +# adapters. +# +# To compile this driver as a module, choose M here. The module +# will be called rnpvf. +# +#config MXGBEVF_FIX_VF_BUG +# bool "Fix VF Bug Support(vf)" +# default y +# depends on MXGBEVF +# help +# Say Y here if you want to fix vf bug in the driver. +# +# If unsure, say N. +# +#config MXGBEVF_FIX_MAC_PADDIN +# bool "Close Mac Padding Function(vf)" +# default y +# depends on MXGBEVF +# help +# Say Y here if you want to fix close mac padding in the driver. +# +# If unsure, say N. + +#config MXGBEM +# tristate "Mucse(R) 1/10GbE PCI Express 2/4 ports adapters support" +# depends on PCI +# imply PTP_1588_CLOCK +# help +# This driver supports Mucse(R) 1/10GbE 2/4 ports PCI Express family of +# adapters. +# +# To compile this driver as a module, choose M here. The module +# will be called rnpm. + +endif # NET_VENDOR_MUCSE + diff --git a/drivers/net/ethernet/mucse/Makefile b/drivers/net/ethernet/mucse/Makefile new file mode 100755 index 0000000000000000000000000000000000000000..e4d73a7bcf1e091a7b66c67c456d9f11fc50b168 --- /dev/null +++ b/drivers/net/ethernet/mucse/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Mucse network device drivers. +# + +obj-$(CONFIG_MXGBE) += rnp/ +obj-$(CONFIG_MGBE) += rnpgbe/ +#obj-$(CONFIG_MXGBEVF) += rnpvf/ +#obj-$(CONFIG_MXGBEM) += rnpm/ + diff --git a/drivers/net/ethernet/mucse/rnp/Makefile b/drivers/net/ethernet/mucse/rnp/Makefile new file mode 100755 index 0000000000000000000000000000000000000000..8c67c4e007c090657e5ffbee8b374b4bb3c4ab8f --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/Makefile @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2022 - 2024 Mucse Corporation. + +obj-$(CONFIG_MXGBE) += rnp.o +rnp-objs := \ + rnp_main.o \ + rnp_common.o \ + rnp_debugfs.o \ + rnp_ethtool.o \ + rnp_lib.o \ + rnp_mbx.o \ + rnp_pcs.o \ + rnp_n10.o \ + rnp_mbx_fw.o\ + rnp_sriov.o \ + rnp_param.o \ + rnp_compat.o \ + rnp_sysfs.o \ + rnp_dcb.o \ + rnp_ptp.o \ + rnp_mpe.o diff --git a/drivers/net/ethernet/mucse/rnp/common.mk b/drivers/net/ethernet/mucse/rnp/common.mk new file mode 100755 index 0000000000000000000000000000000000000000..590f9096474e50471ee81bf9445fe56a88421526 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/common.mk @@ -0,0 +1,459 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2022 - 2024 Mucse Corporation. + +# +# common Makefile rules useful for out-of-tree Linux driver builds +# +# Usage: include common.mk +# +# After including, you probably want to add a minimum_kver_check call +# +# Required Variables: +# DRIVER +# -- Set to the lowercase driver name + +##################### +# Helpful functions # +##################### + +readlink = $(shell readlink -f ${1}) + +# helper functions for converting kernel version to version codes +get_kver = $(or $(word ${2},$(subst ., ,${1})),0) +get_kvercode = $(shell [ "${1}" -ge 0 -a "${1}" -le 255 2>/dev/null ] && \ + [ "${2}" -ge 0 -a "${2}" -le 255 2>/dev/null ] && \ + [ "${3}" -ge 0 -a "${3}" -le 255 2>/dev/null ] && \ + printf %d $$(( ( ${1} << 16 ) + ( ${2} << 8 ) + ( ${3} ) )) ) + +################ +# depmod Macro # +################ + +cmd_depmod = /sbin/depmod $(if ${SYSTEM_MAP_FILE},-e -F ${SYSTEM_MAP_FILE}) \ + $(if $(strip ${INSTALL_MOD_PATH}),-b ${INSTALL_MOD_PATH}) \ + -a ${KVER} + +################ +# dracut Macro # +################ + +cmd_initrd := $(shell \ + if which dracut > /dev/null 2>&1 ; then \ + echo "dracut --force"; \ + elif which update-initramfs > /dev/null 2>&1 ; then \ + echo "update-initramfs -u"; \ + fi ) + +##################### +# Environment tests # +##################### + +DRIVER_UPPERCASE := $(shell echo ${DRIVER} | tr "[:lower:]" "[:upper:]") + +ifeq (,${BUILD_KERNEL}) +BUILD_KERNEL=$(shell uname -r) +endif + +# Kernel Search Path +# All the places we look for kernel source +KSP := /lib/modules/${BUILD_KERNEL}/source \ + /lib/modules/${BUILD_KERNEL}/build \ + /usr/src/linux-${BUILD_KERNEL} \ + /usr/src/linux-$(${BUILD_KERNEL} | sed 's/-.*//') \ + /usr/src/kernel-headers-${BUILD_KERNEL} \ + /usr/src/kernel-source-${BUILD_KERNEL} \ + /usr/src/linux-$(${BUILD_KERNEL} | sed 's/\([0-9]*\.[0-9]*\)\..*/\1/') \ + /usr/src/linux \ + /usr/src/kernels/${BUILD_KERNEL} \ + /usr/src/kernels + +# prune the list down to only values that exist and have an include/linux +# sub-directory. We can't use include/config because some older kernels don't +# have this. +test_dir = $(shell [ -e ${dir}/include/linux ] && echo ${dir}) +KSP := $(foreach dir, ${KSP}, ${test_dir}) + +# we will use this first valid entry in the search path +ifeq (,${KSRC}) + KSRC := $(firstword ${KSP}) +endif + +ifeq (,${KSRC}) + $(warning *** Kernel header files not in any of the expected locations.) + $(warning *** Install the appropriate kernel development package, e.g.) + $(error kernel-devel, for building kernel modules and try again) +else +ifeq (/lib/modules/${BUILD_KERNEL}/source, ${KSRC}) + KOBJ := /lib/modules/${BUILD_KERNEL}/build +else + KOBJ := ${KSRC} +endif +endif + +# Version file Search Path +VSP := ${KOBJ}/include/generated/utsrelease.h \ + ${KOBJ}/include/linux/utsrelease.h \ + ${KOBJ}/include/linux/version.h \ + ${KOBJ}/include/generated/uapi/linux/version.h \ + /boot/vmlinuz.version.h + +# Config file Search Path +CSP := ${KOBJ}/include/generated/autoconf.h \ + ${KOBJ}/include/linux/autoconf.h \ + /boot/vmlinuz.autoconf.h + +# System.map Search Path (for depmod) +MSP := ${KSRC}/System.map \ + /boot/System.map-${BUILD_KERNEL} + +# prune the lists down to only files that exist +test_file = $(shell [ -f ${1} ] && echo ${1}) +VSP := $(foreach file, ${VSP}, $(call test_file,${file})) +CSP := $(foreach file, ${CSP}, $(call test_file,${file})) +MSP := $(foreach file, ${MSP}, $(call test_file,${file})) + + +# and use the first valid entry in the Search Paths +ifeq (,${VERSION_FILE}) + VERSION_FILE := $(firstword ${VSP}) +endif + +ifeq (,${CONFIG_FILE}) + CONFIG_FILE := $(firstword ${CSP}) +endif + +ifeq (,${SYSTEM_MAP_FILE}) + SYSTEM_MAP_FILE := $(firstword ${MSP}) +endif + +ifeq (,$(wildcard ${VERSION_FILE})) + $(error Linux kernel source not configured - missing version header file) +endif + +ifeq (,$(wildcard ${CONFIG_FILE})) + $(error Linux kernel source not configured - missing autoconf.h) +endif + +ifeq (,$(wildcard ${SYSTEM_MAP_FILE})) + $(warning Missing System.map file - depmod will not check for missing symbols during module installation) +endif + +ifneq ($(words $(subst :, ,$(CURDIR))), 1) + $(error Sources directory '$(CURDIR)' cannot contain spaces nor colons. Rename directory or move sources to another path) +endif + +######################## +# Extract config value # +######################## + +get_config_value = $(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\ + grep -m 1 ${1} | awk '{ print $$3 }') + +######################## +# Check module signing # +######################## + +CONFIG_MODULE_SIG_ALL := $(call get_config_value,CONFIG_MODULE_SIG_ALL) +CONFIG_MODULE_SIG_FORCE := $(call get_config_value,CONFIG_MODULE_SIG_FORCE) +CONFIG_MODULE_SIG_KEY := $(call get_config_value,CONFIG_MODULE_SIG_KEY) + +SIG_KEY_SP := ${KOBJ}/${CONFIG_MODULE_SIG_KEY} \ + ${KOBJ}/certs/signing_key.pem + +SIG_KEY_FILE := $(firstword $(foreach file, ${SIG_KEY_SP}, $(call test_file,${file}))) + +# print a warning if the kernel configuration attempts to sign modules but +# the signing key can't be found. +ifneq (${SIG_KEY_FILE},) +warn_signed_modules := : ; +else +warn_signed_modules := +ifeq (${CONFIG_MODULE_SIG_ALL},1) +warn_signed_modules += \ + echo "*** The target kernel has CONFIG_MODULE_SIG_ALL enabled, but" ; \ + echo "*** the signing key cannot be found. Module signing has been" ; \ + echo "*** disabled for this build." ; +endif # CONFIG_MODULE_SIG_ALL=y +ifeq (${CONFIG_MODULE_SIG_FORCE},1) + echo "warning: The target kernel has CONFIG_MODULE_SIG_FORCE enabled," ; \ + echo "warning: but the signing key cannot be found. The module must" ; \ + echo "warning: be signed manually using 'scripts/sign-file'." ; +endif # CONFIG_MODULE_SIG_FORCE +DISABLE_MODULE_SIGNING := Yes +endif + +####################### +# Linux Version Setup # +####################### + +# The following command line parameter is intended for development of KCOMPAT +# against upstream kernels such as net-next which have broken or non-updated +# version codes in their Makefile. They are intended for debugging and +# development purpose only so that we can easily test new KCOMPAT early. If you +# don't know what this means, you do not need to set this flag. There is no +# arcane magic here. + +# Convert LINUX_VERSION into LINUX_VERSION_CODE +ifneq (${LINUX_VERSION},) + LINUX_VERSION_CODE=$(call get_kvercode,$(call get_kver,${LINUX_VERSION},1),$(call get_kver,${LINUX_VERSION},2),$(call get_kver,${LINUX_VERSION},3)) +endif + +# Honor LINUX_VERSION_CODE +ifneq (${LINUX_VERSION_CODE},) + $(warning Forcing target kernel to build with LINUX_VERSION_CODE of ${LINUX_VERSION_CODE}$(if ${LINUX_VERSION}, from LINUX_VERSION=${LINUX_VERSION}). Do this at your own risk.) + KVER_CODE := ${LINUX_VERSION_CODE} + EXTRA_CFLAGS += -DLINUX_VERSION_CODE=${LINUX_VERSION_CODE} +endif + +# Determine SLE_KERNEL_REVISION for SuSE SLE >= 11 (needed by kcompat) +# This assumes SuSE will continue setting CONFIG_LOCALVERSION to the string +# appended to the stable kernel version on which their kernel is based with +# additional versioning information (up to 3 numbers), a possible abbreviated +# git SHA1 commit id and a kernel type, e.g. CONFIG_LOCALVERSION=-1.2.3-default +# or CONFIG_LOCALVERSION=-999.gdeadbee-default +# +# SLE_LOCALVERSION_CODE is also exported to support legacy kcompat.h +# definitions. +ifeq (1,$(call get_config_value,CONFIG_SUSE_KERNEL)) + +ifneq (10,$(call get_config_value,CONFIG_SLE_VERSION)) + + CONFIG_LOCALVERSION := $(call get_config_value,CONFIG_LOCALVERSION) + LOCALVERSION := $(shell echo ${CONFIG_LOCALVERSION} | \ + cut -d'-' -f2 | sed 's/\.g[[:xdigit:]]\{7\}//') + LOCALVER_A := $(shell echo ${LOCALVERSION} | cut -d'.' -f1) +ifeq ($(shell test ${LOCALVER_A} -gt 65535; echo $$?),0) + LOCAL_VER_MAJOR := $(shell echo ${LOCALVER_A:0:3}) + LOCAL_VER_MINOR := $(shell echo ${LOCALVER_A:3:3}) + LOCALVER_B := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f2) + LOCALVER_C := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f3) + LOCALVER_D := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f4) + SLE_LOCALVERSION_CODE := $(shell expr ${LOCALVER_B} \* 65536 + \ + 0${LOCALVER_C} \* 256 + 0${LOCALVER_D}) + EXTRA_CFLAGS += -DSLE_LOCALVERSION_CODE=${SLE_LOCALVERSION_CODE} + EXTRA_CFLAGS += -DSLE_KERNEL_REVISION=${LOCALVER_B} +else + LOCALVER_B := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f2) + LOCALVER_C := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f3) + SLE_LOCALVERSION_CODE := $(shell expr ${LOCALVER_A} \* 65536 + \ + 0${LOCALVER_B} \* 256 + 0${LOCALVER_C}) + EXTRA_CFLAGS += -DSLE_LOCALVERSION_CODE=${SLE_LOCALVERSION_CODE} + EXTRA_CFLAGS += -DSLE_KERNEL_REVISION=${LOCALVER_A} +endif +endif +endif + + +EXTRA_CFLAGS += ${CFLAGS_EXTRA} + +# get the kernel version - we use this to find the correct install path +KVER := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM ${VERSION_FILE} | grep UTS_RELEASE | \ + awk '{ print $$3 }' | sed 's/\"//g') + +# assume source symlink is the same as build, otherwise adjust KOBJ +ifneq (,$(wildcard /lib/modules/${KVER}/build)) + ifneq (${KSRC},$(call readlink,/lib/modules/${KVER}/build)) + KOBJ=/lib/modules/${KVER}/build + endif +endif + +ifeq (${KVER_CODE},) + KVER_CODE := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM ${VSP} 2> /dev/null |\ + grep -m 1 LINUX_VERSION_CODE | awk '{ print $$3 }' | sed 's/\"//g') +endif + +# minimum_kver_check +# +# helper function to provide uniform output for different drivers to abort the +# build based on kernel version check. Usage: "$(call minimum_kver_check,2,6,XX)". +define _minimum_kver_check +ifeq (0,$(shell [ ${KVER_CODE} -lt $(call get_kvercode,${1},${2},${3}) ]; echo "$$?")) + $$(warning *** Aborting the build.) + $$(error This driver is not supported on kernel versions older than ${1}.${2}.${3}) +endif +endef +minimum_kver_check = $(eval $(call _minimum_kver_check,${1},${2},${3})) + +############################# +# kcompat definitions setup # +############################# + +# In most cases, kcompat flags can be checked within the driver source files +# using simple CPP checks. However, it may be necessary to check for a flag +# value within the Makefile for some specific edge cases. For example, if an +# entire feature ought to be excluded on some kernels due to missing +# functionality. +# +# To support this, kcompat_defs.h is compiled and converted into a word list +# that can be checked to determine whether a given kcompat feature flag will +# be defined for this kernel. +# +# KCOMPAT_DEFINITIONS holds the set of all macros which are defined. Note +# this does include a large number of standard/builtin definitions. +# +# Use is_kcompat_defined as a $(call) function to check whether a given flag +# is defined or undefined. For example: +# +# ifeq ($(call is_kcompat_defined,HAVE_FEATURE_FLAG),1) +# +# ifneq ($(call is_kcompat_defined,HAVE_FEATURE_FLAG),1) +# +# The is_kcompat_defined function returns 1 if the macro name is defined, +# and the empty string otherwise. +# +# There is no mechanism to extract the value of the kcompat definition. +# Supporting this would be non-trivial as Make does not have a map variable +# type. +# +# Note that only the new layout is supported. Legacy definitions in +# kcompat.h are not supported. If you need to check one of these, please +# refactor it into the new layout. + +ifneq ($(wildcard ./kcompat_defs.h),) +# call script that populates defines automatically +# +# since is_kcompat_defined() is a macro, it's "computed" before any target +# recipe, kcompat_generated_defs.h is needed prior to that, so needs to be +# generated also via $(shell) call, which makes error handling ugly +$(if $(shell KSRC=${KSRC} OUT=kcompat_generated_defs.h CONFFILE=${CONFIG_FILE} \ + bash kcompat-generator.sh && echo ok), , $(error kcompat-generator.sh failed)) + +#KCOMPAT_DEFINITIONS := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM \ + -I${KOBJ}/include \ + -I${KOBJ}/include/generated/uapi \ + kcompat_defs.h | awk '{ print $$2 }') + +is_kcompat_defined = $(if $(filter ${1},${KCOMPAT_DEFINITIONS}),1,) +else +KCOMPAT_DEFINITIONS := +is_kcompat_defined = +endif + +################ +# Manual Pages # +################ + +MANSECTION = 7 + +ifeq (,${MANDIR}) + # find the best place to install the man page + MANPATH := $(shell (manpath 2>/dev/null || echo $MANPATH) | sed 's/:/ /g') + ifneq (,${MANPATH}) + # test based on inclusion in MANPATH + test_dir = $(findstring ${dir}, ${MANPATH}) + else + # no MANPATH, test based on directory existence + test_dir = $(shell [ -e ${dir} ] && echo ${dir}) + endif + # our preferred install path + # should /usr/local/man be in here ? + MANDIR := /usr/share/man /usr/man + MANDIR := $(foreach dir, ${MANDIR}, ${test_dir}) + MANDIR := $(firstword ${MANDIR}) +endif +ifeq (,${MANDIR}) + # fallback to /usr/man + MANDIR := /usr/man +endif + +#################### +# CCFLAGS variable # +#################### + +# set correct CCFLAGS variable for kernels older than 2.6.24 +ifeq (0,$(shell [ ${KVER_CODE} -lt $(call get_kvercode,2,6,24) ]; echo $$?)) +CCFLAGS_VAR := EXTRA_CFLAGS +else +CCFLAGS_VAR := ccflags-y +endif + +################# +# KBUILD_OUTPUT # +################# + +# Only set KBUILD_OUTPUT if the real paths of KOBJ and KSRC differ +ifneq ($(call readlink,${KSRC}),$(call readlink,${KOBJ})) +export KBUILD_OUTPUT ?= ${KOBJ} +endif + +############################ +# Module Install Directory # +############################ + +# Default to using updates/drivers/net/ethernet/mucse/ path, since depmod since +# v3.1 defaults to checking updates folder first, and only checking kernels/ +# and extra afterwards. We use updates instead of kernel/* due to desire to +# prevent over-writing built-in modules files. +export INSTALL_MOD_DIR ?= updates/drivers/net/ethernet/mucse/${DRIVER} + +################# +# Auxiliary Bus # +################# + +# If the check_aux_bus script exists, then this driver depends on the +# auxiliary module. Run the script to determine if we need to include +# auxiliary files with this build. +ifneq ($(call test_file,../scripts/check_aux_bus),) +NEED_AUX_BUS := $(shell ../scripts/check_aux_bus --ksrc="${KSRC}" --build-kernel="${BUILD_KERNEL}" >/dev/null 2>&1; echo $$?) +endif # check_aux_bus exists + +# The out-of-tree auxiliary module we ship should be moved into this +# directory as part of installation. +export INSTALL_AUX_DIR ?= updates/drivers/net/ethernet/mucse/auxiliary + +# If we're installing auxiliary bus out-of-tree, the following steps are +# necessary to ensure the relevant files get put in place. +ifeq (${NEED_AUX_BUS},2) +define auxiliary_post_install + install -D -m 644 Module.symvers ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_AUX_DIR}/Module.symvers + mv -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_MOD_DIR}/auxiliary.ko \ + ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_AUX_DIR}/auxiliary.ko + install -D -m 644 linux/auxiliary_bus.h ${INSTALL_MOD_PATH}/${KSRC}/include/linux/auxiliary_bus.h +endef +else +auxiliary_post_install = +endif + +ifeq (${NEED_AUX_BUS},2) +define auxiliary_post_uninstall + rm -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_AUX_DIR}/Module.symvers + rm -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_AUX_DIR}/auxiliary.ko + rm -f ${INSTALL_MOD_PATH}/${KSRC}/include/linux/auxiliary_bus.h +endef +else +auxiliary_post_uninstall = +endif + +###################### +# Kernel Build Macro # +###################### + +# kernel build function +# ${1} is the kernel build target +# ${2} may contain any extra rules to pass directly to the sub-make process +# +# This function is expected to be executed by +# @+$(call kernelbuild,,) +# from within a Makefile recipe. +# +# The following variables are expected to be defined for its use: +# GCC_I_SYS -- if set it will enable use of gcc-i-sys.sh wrapper to use -isystem +# CCFLAGS_VAR -- the CCFLAGS variable to set extra CFLAGS +# EXTRA_CFLAGS -- a set of extra CFLAGS to pass into the ccflags-y variable +# KSRC -- the location of the kernel source tree to build against +# DRIVER_UPPERCASE -- the uppercase name of the kernel module, set from DRIVER +# W -- if set, enables the W= kernel warnings options +# C -- if set, enables the C= kernel sparse build options +# +kernelbuild = $(call warn_signed_modules) \ + ${MAKE} $(if ${GCC_I_SYS},CC="${GCC_I_SYS}") \ + ${CCFLAGS_VAR}="${EXTRA_CFLAGS}" \ + -C "${KSRC}" \ + CONFIG_${DRIVER_UPPERCASE}=m \ + $(if ${DISABLE_MODULE_SIGNING},CONFIG_MODULE_SIG=n) \ + $(if ${DISABLE_MODULE_SIGNING},CONFIG_MODULE_SIG_ALL=) \ + M="${CURDIR}" \ + $(if ${W},W="${W}") \ + $(if ${C},C="${C}") \ + $(if ${NEED_AUX_BUS},NEED_AUX_BUS="${NEED_AUX_BUS}") \ + ${2} ${1} diff --git a/drivers/net/ethernet/mucse/rnp/kcompat-generator.sh b/drivers/net/ethernet/mucse/rnp/kcompat-generator.sh new file mode 100755 index 0000000000000000000000000000000000000000..75d69ff7ee5472d6604bdfc8a3737833449a8948 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/kcompat-generator.sh @@ -0,0 +1,303 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-only +# Copyright(c) 2022 - 2024 Mucse Corporation + +set -Eeuo pipefail + +# This file generates HAVE_ and NEED_ defines for current kernel +# (or KSRC if provided). +# +# It does so by 'gen' function calls (see body of 'gen-devlink' for examples). +# 'gen' could look for various kinds of declarations in provided kernel headers, +# eg look for an enum in one of files specified and check if given enumeration +# (single value) is present. See 'Documentation' or comment above the 'gen' fun +# in the kcompat-lib.sh. + +# Why using bash/awk instead of an old/legacy approach? +# +# The aim is to replicate all the defines provided by human developers +# in the past. Additional bonus is the fact, that we no longer need to care +# about backports done by OS vendors (RHEL, SLES, ORACLE, UBUNTU, more to come). +# We will even work (compile) with only part of backports provided. +# +# To enable smooth transition, especially in time of late fixes, "old" method +# of providing flags should still work as usual. + +# End of intro. +# Find info about coding style/rules at the end of file. +# Most of the implementation is in kcompat-lib.sh, here are actual 'gen' calls. + +export LC_ALL=C +ORIG_CWD="$(pwd)" +trap 'rc=$?; echo >&2 "$(realpath "$ORIG_CWD/${BASH_SOURCE[0]}"):$LINENO: failed with rc: $rc"' ERR + +# shellcheck source=kcompat-lib.sh +source "$ORIG_CWD"/kcompat-lib.sh + +# DO NOT break gen calls below (via \), to make our compat code more grep-able, +# keep them also grouped, first by feature (like DEVLINK), then by .h filename +# finally, keep them sorted within a group (sort by flag name) + +# handy line of DOC copy-pasted form kcompat-lib.sh: +# gen DEFINE if (KIND [METHOD of]) NAME [(matches|lacks) PATTERN|absent] in + +function gen-sysfs() { + + dh='include/linux/sysfs.h' + gen NEED_SYSFS_CREATE_GROUPS if fun sysfs_create_groups absent in "$dh" + gen NEED_SYSFS_REMOVE_GROUPS if fun sysfs_remove_groups absent in "$dh" +} + + +function gen-device() { + dh='include/linux/device.h' + dph='include/linux/dev_printk.h' + gen NEED_BUS_FIND_DEVICE_CONST_DATA if fun bus_find_device lacks 'const void \\*data' in "$dh" + gen NEED_DEV_LEVEL_ONCE if macro dev_level_once absent in "$dh" "$dph" + gen NEED_DEVM_KASPRINTF if fun devm_kasprintf absent in "$dh" + gen NEED_DEVM_KFREE if fun devm_kfree absent in "$dh" + gen NEED_DEVM_KVASPRINTF if fun devm_kvasprintf absent in "$dh" + gen NEED_DEVM_KZALLOC if fun devm_kzalloc absent in "$dh" +} + +function gen-devlink() { + dh='include/net/devlink.h' + gen HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY if fun devlink_flash_update_begin_notify in "$dh" + gen HAVE_DEVLINK_FLASH_UPDATE_PARAMS if struct devlink_flash_update_params in "$dh" + gen HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW if struct devlink_flash_update_params matches 'struct firmware \\*fw' in "$dh" + gen HAVE_DEVLINK_HEALTH if enum devlink_health_reporter_state in "$dh" + gen HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER if fun devlink_health_reporter_create lacks auto_recover in "$dh" + gen HAVE_DEVLINK_HEALTH_OPS_EXTACK if method dump of devlink_health_reporter_ops matches ext_ack in "$dh" + gen HAVE_DEVLINK_INFO_DRIVER_NAME_PUT if fun devlink_info_driver_name_put in "$dh" + gen HAVE_DEVLINK_PARAMS if method validate of devlink_param matches ext_ack in "$dh" + gen HAVE_DEVLINK_PARAMS_PUBLISH if fun devlink_params_publish in "$dh" + gen HAVE_DEVLINK_PORT_NEW if method port_new of devlink_ops in "$dh" + gen HAVE_DEVLINK_PORT_OPS if struct devlink_port_ops in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT if method port_split of devlink_ops in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT_EXTACK if method port_split of devlink_ops matches netlink_ext_ack in "$dh" + gen HAVE_DEVLINK_PORT_SPLIT_PORT_STRUCT if method port_split of devlink_ops matches devlink_port in "$dh" + gen HAVE_DEVLINK_PORT_TYPE_ETH_HAS_NETDEV if fun devlink_port_type_eth_set matches 'struct net_device' in "$dh" + gen HAVE_DEVLINK_RATE_NODE_CREATE if fun devl_rate_node_create in "$dh" + # keep devlink_region_ops body in variable, to not look 4 times for + # exactly the same thing in big file + # please consider it as an example of "how to speed up if needed" + REGION_OPS="$(find-struct-decl devlink_region_ops "$dh")" + gen HAVE_DEVLINK_REGIONS if struct devlink_region_ops in - <<< "$REGION_OPS" + gen HAVE_DEVLINK_REGION_OPS_SNAPSHOT if fun snapshot in - <<< "$REGION_OPS" + gen HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS if fun snapshot matches devlink_region_ops in - <<< "$REGION_OPS" + gen HAVE_DEVLINK_REGISTER_SETS_DEV if fun devlink_register matches 'struct device' in "$dh" + gen HAVE_DEVLINK_RELOAD_ENABLE_DISABLE if fun devlink_reload_enable in "$dh" + gen HAVE_DEVLINK_SET_FEATURES if fun devlink_set_features in "$dh" + gen HAVE_DEVL_PORT_REGISTER if fun devl_port_register in "$dh" + + gen HAVE_DEVLINK_PORT_FLAVOUR_PCI_SF if enum devlink_port_flavour matches DEVLINK_PORT_FLAVOUR_PCI_SF in include/uapi/linux/devlink.h + gen HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT if enum devlink_reload_action matches DEVLINK_RELOAD_ACTION_FW_ACTIVATE in include/uapi/linux/devlink.h + + gen NEED_DEVLINK_RESOURCES_UNREGISTER_NO_RESOURCE if fun devlink_resources_unregister matches 'struct devlink_resource \\*' in "$dh" + gen NEED_DEVLINK_TO_DEV if fun devlink_to_dev absent in "$dh" + gen NEED_DEVLINK_UNLOCKED_RESOURCE if fun devl_resource_size_get absent in "$dh" +} + +function gen-ethtool() { + eth='include/linux/ethtool.h' + ueth='include/uapi/linux/ethtool.h' + gen HAVE_ETHTOOL_COALESCE_EXTACK if method get_coalesce of ethtool_ops matches 'struct kernel_ethtool_coalesce \\*' in "$eth" + gen HAVE_ETHTOOL_EXTENDED_RINGPARAMS if method get_ringparam of ethtool_ops matches 'struct kernel_ethtool_ringparam \\*' in "$eth" + gen HAVE_ETHTOOL_RXFH_PARAM if struct ethtool_rxfh_param in "$eth" + gen NEED_ETHTOOL_SPRINTF if fun ethtool_sprintf absent in "$eth" + gen HAVE_ETHTOOL_FLOW_RSS if macro FLOW_RSS in "$ueth" +} + +function gen-filter() { + fh='include/linux/filter.h' + gen HAVE_XDP_DO_FLUSH if fun xdp_do_flush_map in "$fh" + gen NEED_NO_NETDEV_PROG_XDP_WARN_ACTION if fun bpf_warn_invalid_xdp_action lacks 'struct net_device \\*' in "$fh" +} + +function gen-flow-dissector() { + gen HAVE_FLOW_DISSECTOR_KEY_PPPOE if enum flow_dissector_key_id matches FLOW_DISSECTOR_KEY_PPPOE in include/net/flow_dissector.h include/net/flow_keys.h + # following HAVE ... CVLAN flag is mistakenly named after an enum key, + # but guards code around function call that was introduced later + gen HAVE_FLOW_DISSECTOR_KEY_CVLAN if fun flow_rule_match_cvlan in include/net/flow_offload.h +} + +function gen-gnss() { + cdh='include/linux/cdev.h' + clh='include/linux/device/class.h' + dh='include/linux/device.h' + gh='include/linux/gnss.h' + th='include/uapi/linux/types.h' + fh='include/linux/fs.h' + + gen HAVE_CDEV_DEVICE if fun cdev_device_add in "$cdh" + gen HAVE_DEV_UEVENT_CONST if method dev_uevent of class matches 'const struct device' in "$clh" + gen HAVE_POLL_T if typedef __poll_t in "$th" + gen HAVE_STREAM_OPEN if fun stream_open in "$fh" + # There can be either macro class_create or a function + gen NEED_CLASS_CREATE_WITH_MODULE_PARAM if fun class_create matches 'owner' in "$clh" "$dh" + gen NEED_CLASS_CREATE_WITH_MODULE_PARAM if macro class_create in "$clh" "$dh" + + if ! grep -qE CONFIG_SUSE_KERNEL.+1 "$CONFFILE"; then + gen HAVE_GNSS_MODULE if struct gnss_device in "$gh" + fi +} + +function gen-netdevice() { + ndh='include/linux/netdevice.h' + gen HAVE_NDO_ETH_IOCTL if fun ndo_eth_ioctl in "$ndh" + gen HAVE_NDO_FDB_ADD_VID if method ndo_fdb_del of net_device_ops matches 'u16 vid' in "$ndh" + gen HAVE_NDO_FDB_DEL_EXTACK if method ndo_fdb_del of net_device_ops matches ext_ack in "$ndh" + gen HAVE_NDO_GET_DEVLINK_PORT if method ndo_get_devlink_port of net_device_ops in "$ndh" + gen HAVE_NDO_UDP_TUNNEL_CALLBACK if method ndo_udp_tunnel_add of net_device_ops in "$ndh" + gen HAVE_NETIF_SET_TSO_MAX if fun netif_set_tso_max_size in "$ndh" + gen HAVE_SET_NETDEV_DEVLINK_PORT if macro SET_NETDEV_DEVLINK_PORT in "$ndh" + gen NEED_NETIF_NAPI_ADD_NO_WEIGHT if fun netif_napi_add matches 'int weight' in "$ndh" + gen NEED_NET_PREFETCH if fun net_prefetch absent in "$ndh" +} + +function gen-pci() { + pcih='include/linux/pci.h' + gen HAVE_PCI_MSIX_ALLOC_IRQ_AT if fun pci_msix_alloc_irq_at in "$pcih" + gen HAVE_PCI_MSIX_CAN_ALLOC_DYN if fun pci_msix_can_alloc_dyn in "$pcih" + gen HAVE_PCI_MSIX_FREE_IRQ if fun pci_msix_free_irq in "$pcih" + gen HAVE_PER_VF_MSIX_SYSFS if method sriov_set_msix_vec_count of pci_driver in "$pcih" + gen HAVE_STRUCT_PCI_DEV_PTM_ENABLED if struct pci_dev matches ptm_enabled in "$pcih" + gen NEED_PCIE_PTM_ENABLED if fun pcie_ptm_enabled absent in "$pcih" + gen NEED_PCI_ENABLE_PTM if fun pci_enable_ptm absent in "$pcih" +} + +function gen-other() { + ush='include/linux/u64_stats_sync.h' + #gen NEED_PCI_AER_CLEAR_NONFATAL_STATUS if fun pci_aer_clear_nonfatal_status absent in include/linux/aer.h + #gen NEED_BITMAP_COPY_CLEAR_TAIL if fun bitmap_copy_clear_tail absent in include/linux/bitmap.h + #gen NEED_BITMAP_FROM_ARR32 if fun bitmap_from_arr32 absent in include/linux/bitmap.h + #gen NEED_BITMAP_TO_ARR32 if fun bitmap_to_arr32 absent in include/linux/bitmap.h + #gen HAVE_COMPLETION_RAW_SPINLOCK if struct completion matches 'struct swait_queue_head' in include/linux/completion.h + #gen NEED_DEBUGFS_LOOKUP if fun debugfs_lookup absent in include/linux/debugfs.h + #gen NEED_DEBUGFS_LOOKUP_AND_REMOVE if fun debugfs_lookup_and_remove absent in include/linux/debugfs.h + gen NEED_ETH_HW_ADDR_SET if fun eth_hw_addr_set absent in include/linux/etherdevice.h + #gen HAVE_HWMON_DEVICE_REGISTER_WITH_INFO if fun hwmon_device_register_with_info in include/linux/hwmon.h + #gen NEED_HWMON_CHANNEL_INFO if macro HWMON_CHANNEL_INFO absent in include/linux/hwmon.h + #gen HAVE_IOMMU_DEV_FEAT_AUX if enum iommu_dev_features matches IOMMU_DEV_FEAT_AUX in include/linux/iommu.h + #gen NEED_DEFINE_STATIC_KEY_FALSE if macro DEFINE_STATIC_KEY_FALSE absent in include/linux/jump_label.h + #gen NEED_STATIC_BRANCH_LIKELY if macro static_branch_likely absent in include/linux/jump_label.h + #gen HAVE_STRUCT_STATIC_KEY_FALSE if struct static_key_false in include/linux/jump_label.h include/linux/jump_label_type.h + #gen NEED_DECLARE_STATIC_KEY_FALSE if macro DECLARE_STATIC_KEY_FALSE absent in include/linux/jump_label.h include/linux/jump_label_type.h + #gen NEED_LOWER_16_BITS if macro lower_16_bits absent in include/linux/kernel.h + #gen NEED_UPPER_16_BITS if macro upper_16_bits absent in include/linux/kernel.h + gen NEED_MUL_U64_U64_DIV_U64 if fun mul_u64_u64_div_u64 absent in include/linux/math64.h + #gen HAVE_MDEV_GET_DRVDATA if fun mdev_get_drvdata in include/linux/mdev.h + #gen HAVE_MDEV_REGISTER_PARENT if fun mdev_register_parent in include/linux/mdev.h + #gen NEED_DEV_PM_DOMAIN_ATTACH if fun dev_pm_domain_attach absent in include/linux/pm_domain.h include/linux/pm.h + #gen NEED_DEV_PM_DOMAIN_DETACH if fun dev_pm_domain_detach absent in include/linux/pm_domain.h include/linux/pm.h + #gen NEED_PTP_CLASSIFY_RAW if fun ptp_classify_raw absent in include/linux/ptp_classify.h + #gen NEED_PTP_PARSE_HEADER if fun ptp_parse_header absent in include/linux/ptp_classify.h + gen HAVE_PTP_CLOCK_INFO_ADJFINE if method adjfine of ptp_clock_info in include/linux/ptp_clock_kernel.h + gen NEED_DIFF_BY_SCALED_PPM if fun diff_by_scaled_ppm absent in include/linux/ptp_clock_kernel.h + #gen NEED_PTP_SYSTEM_TIMESTAMP if fun ptp_read_system_prets absent in include/linux/ptp_clock_kernel.h + #gen NEED_DEV_PAGE_IS_REUSABLE if fun dev_page_is_reusable absent in include/linux/skbuff.h + #gen NEED_SYSFS_EMIT if fun sysfs_emit absent in include/linux/sysfs.h + #gen HAVE_TRACE_ENABLED_SUPPORT if implementation of macro __DECLARE_TRACE matches 'trace_##name##_enabled' in include/linux/tracepoint.h + #gen HAVE_U64_STATS_FETCH_BEGIN_IRQ if fun u64_stats_fetch_begin_irq in "$ush" + #gen HAVE_U64_STATS_FETCH_RETRY_IRQ if fun u64_stats_fetch_retry_irq in "$ush" + #gen NEED_U64_STATS_READ if fun u64_stats_read absent in "$ush" + #gen NEED_U64_STATS_SET if fun u64_stats_set absent in "$ush" + #gen HAVE_LMV1_SUPPORT if macro VFIO_REGION_TYPE_MIGRATION in include/uapi/linux/vfio.h +} + +# all the generations, extracted from main() to keep normal code and various +# prep separated +function gen-all() { + if grep -qE CONFIG_NET_DEVLINK.+1 "$CONFFILE"; then + gen-devlink + fi + gen-netdevice + # code above is covered by unit_tests/test_gold.sh + if [ -n "${JUST_UNIT_TESTING-}" ]; then + return + fi + gen-device + gen-ethtool + gen-filter + gen-flow-dissector + gen-gnss + gen-pci + gen-other + gen-sysfs +} + +function main() { + # check if caller (like our makefile) wants to redirect output to file + if [ -n "${OUT-}" ]; then + + # in case OUT exists, we don't want to overwrite it, instead + # write to a temporary copy. + if [ -s "${OUT}" ]; then + TMP_OUT="$(mktemp "${OUT}.XXX")" + trap "rm -f '${TMP_OUT}'" EXIT + + REAL_OUT="${OUT}" + OUT="${TMP_OUT}" + fi + + exec > "$OUT" + # all stdout goes to OUT since now + echo "/* Autogenerated for KSRC=${KSRC-} via $(basename "$0") */" + fi + if [ -d "${KSRC-}" ]; then + cd "${KSRC}" + fi + + # check if KSRC was ok/if we are in proper place to look for headers + if [ -z "$(filter-out-bad-files include/linux/kernel.h)" ]; then + echo >&2 "seems that there are no kernel includes placed in KSRC=${KSRC} + pwd=$(pwd); ls -l:" + ls -l >&2 + exit 8 + fi + + # we need just CONFIG_NET_DEVLINK so far, but it's in .config, required + if [ ! -f "${CONFFILE-}" ]; then + echo >&2 ".config should be passed as env CONFFILE + (and it's not set or not a file)" + exit 9 + fi + + gen-all + + if [ -n "${OUT-}" ]; then + cd "$ORIG_CWD" + + # Compare and see if anything changed. This avoids updating + # mtime of the file. + if [ -n "${REAL_OUT-}" ]; then + if cmp --silent "${REAL_OUT}" "${TMP_OUT}"; then + # exit now, skipping print of the output since + # there were no changes. the trap should + # cleanup TMP_OUT + exit 0 + fi + + mv -f "${TMP_OUT}" "${REAL_OUT}" + OUT="${REAL_OUT}" + fi + + # dump output, will be visible in CI + if [ -n "${JUST_UNIT_TESTING-}${QUIET_COMPAT-}" ]; then + return + fi + cat -n "$OUT" >&2 + fi +} + +main + +# Coding style: +# - rely on `set -e` handling as much as possible, so: +# - do not use <(bash process substitution) - it breaks error handling; +# - do not put substantial logic in `if`-like statement - it disables error +# handling inside of the conditional (`if big-fun call; then` is substantial) +# - make shellcheck happy - https://www.shellcheck.net +# +# That enables us to move processing out of `if` or `... && ...` statements, +# what finally means that bash error handling (`set -e`) would break on errors. diff --git a/drivers/net/ethernet/mucse/rnp/kcompat-lib.sh b/drivers/net/ethernet/mucse/rnp/kcompat-lib.sh new file mode 100755 index 0000000000000000000000000000000000000000..6d064a5c7536332c9a3a00f9eeaa05c4cfe0a3b1 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/kcompat-lib.sh @@ -0,0 +1,278 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-only +# Copyright(c) 2022 - 2024 Mucse Corporation + +# to be sourced + +# General shell helpers + +# exit with non-zero exit code; if there is only one param: +# exit with msg $1 and exit code from last command (or 99 if = 0) +# otherwise, exit with $1 and use remaining arguments as msg +function die() { + rc=$? + if [ $# -gt 1 ]; then + rc="$1" + shift + fi + [ "$rc" -ne 0 ] || rc=99 + echo >&2 "$@" + exit $rc +} + +# filter out paths that are not files +# input $@, output via echo; +# note: pass `-` for stdin +# note: outputs nothing if all input files are "bad" (eg. not existing), but it +# is left for caller to decide if this is an erorr condition; +# note: whitespaces are considered "bad" as part of filename, it's an error. +function filter-out-bad-files() { + if [[ $# = 1 && "$1" = '-' ]]; then + echo - + return 0 + fi + if [ $# = 0 ]; then + die 10 "no files passed, use '-' when reading from pipe (|)" + fi + local any=0 diagmsgs=/dev/stderr re=$'[\t \n]' + [ -n "${QUIET_COMPAT-}" ] && diagmsgs=/dev/null + for x in "$@"; do + if [ -e "$x" ]; then + if [[ "$x" =~ $re ]]; then + die 11 "err: filename contains whitespaces: $x." + fi + echo "$x" + any=1 + else + echo >&"$diagmsgs" filtering "$x" out + fi + done + if [ $any = 0 ]; then + echo >&"$diagmsgs" 'all files (for given query) filtered out' + fi +} + +# Basics of regexp explained, as a reference for mostly-C programmers: +# (bash) "regexp-$VAR-regexp" - bash' VARs are placed into "QUOTED" strings +# /\);?$/ - match end of function declaration, $ is end of string +# ^[ \t]* - (heuristic), anything but comment, eg to exclude function docs +# /STH/, /END/ - (awk), print all lines sice STH matched, up to END, inclusive + +# "Whitespace only" +WB='[ \t\n]' + +# Helpers below print the thing that is looked for, for further grep'ping/etc. +# That simplifies process of excluding comments or spares us state machine impl. +# +# We take advantage of current/common linux codebase formatting here. +# +# Functions in this section require input file/s passed as args +# (usually one, but more could be supplied in case of renames in kernel), +# '-' could be used as an (only) file argument to read from stdin/pipe. + +# wrapper over find-something-decl() functions below, to avoid repetition +# pass $what as $1, $end as $2, and $files to look in as rest of args +function find-decl() { + test $# -ge 3 # ensure that there are at least 3 params + local what end files + what="$1" + end="$2" + shift 2 + files="$(filter-out-bad-files "$@")" || die + if [ -z "$files" ]; then + return 0 + fi + # shellcheck disable=SC2086 + awk " + /^$WB*\*/ {next} + $what, $end + " $files +} + +# yield $1 function declaration (signature), don't pass return type in $1 +# looks only in files specified ($2, $3...) +function find-fun-decl() { + test $# -ge 2 + local what end + what="/$WB*([(]\*)?$1$WB*($|[()])/" + end='/\);?$/' + shift + find-decl "$what" "$end" "$@" +} + +# yield $1 enum declaration (type/body) +function find-enum-decl() { + test $# -ge 2 + local what end + what="/^$WB*enum$WB+$1"' \{$/' + end='/\};$/' + shift + find-decl "$what" "$end" "$@" +} + +# yield $1 struct declaration (type/body) +function find-struct-decl() { + test $# -ge 2 + local what end + what="/^$WB*struct$WB+$1"' \{$/' + end='/^\};$/' # that's (^) different from enum-decl + shift + find-decl "$what" "$end" "$@" +} + +# yield first line of $1 macro definition +function find-macro-decl() { + test $# -ge 2 + local what end + # only unindented defines, only whole-word match + what="/^#define$WB+$1"'([ \t\(]|$)/' + end=1 # only first line; use find-macro-implementation-decl for full body + shift + find-decl "$what" "$end" "$@" +} + +# yield full macro implementation +function find-macro-implementation-decl() { + test $# -ge 2 + local what end + # only unindented defines, only whole-word match + what="/^#define$WB+$1"'([ \t\(]|$)/' + # full implementation, until a line not ending in a backslash. + # Does not handle macros with comments embedded within the definition. + end='/[^\\]$/' + shift + find-decl "$what" "$end" "$@" +} + +# yield first line of $1 typedef definition (simple typedefs only) +# this probably won't handle typedef struct { \n int foo;\n}; +function find-typedef-decl() { + test $# -ge 2 + local what end + what="/^typedef .* $1"';$/' + end=1 + shift + find-decl "$what" "$end" "$@" +} + +# gen() - DSL-like function to wrap around all the other +# +# syntax: +# gen DEFINE if (KIND [METHOD of]) NAME [(matches|lacks) PATTERN|absent] in + +# where: +# DEFINE is HAVE_ or NEED_ #define to print; +# `if` is there to just read it easier and made syntax easier to check; +# +# NAME is the name for what we are looking for; +# +# KIND specifies what kind of declaration/definition we are looking for, +# could be: fun, enum, struct, method, macro, typedef, +# 'implementation of macro' +# for KIND=method, we are looking for function ptr named METHOD in struct +# named NAME (two optional args are then necessary (METHOD & of)); +# +# for KIND='implementation of macro' we are looking for the full +# implementation of the macro, not just its first line. This is usually +# combined with "matches" or "lacks". +# +# next [optional] args could be used: +# matches PATTERN - use to grep for the PATTERN within definition +# (eg, for ext_ack param) +# lacks - use to add #define only if there is no match of the PATTERN, +# *but* the NAME is *found* +# absent - the NAME that we grep for must be not found +# (ie: function not exisiting) +# +# without this optional params, behavior is the same as with +# `matches .` - use to grep just for existence of NAME; +# +# `in` is there to ease syntax, similar to `if` before. +# +# is just space-separate list of files to look in, +# single (-) for stdin. +# +# PATTERN is awk pattern, will be wrapped by two slashes (/) +function gen() { + test $# -ge 6 || die 20 "too few arguments, $# given, at least 6 needed" + local define if_kw kind name in_kw # mandatory + local of_kw method_name operator pattern # optional + local src_line="${BASH_SOURCE[0]}:${BASH_LINENO[0]}" + define="$1" + if_kw="$2" + kind="$3" + local orig_args_cnt=$# + shift 3 + [ "$if_kw" != if ] && die 21 "$src_line: 'if' keyword expected, '$if_kw' given" + case "$kind" in + fun|enum|struct|macro|typedef) + name="$1" + shift + ;; + method) + test $# -ge 5 || die 22 "$src_line: too few arguments, $orig_args_cnt given, at least 8 needed" + method_name="$1" + of_kw="$2" + name="$3" + shift 3 + [ "$of_kw" != of ] && die 23 "$src_line: 'of' keyword expected, '$of_kw' given" + ;; + implementation) + test $# -ge 5 || die 28 "$src_line: too few arguments, $orig_args_cnt given, at least 8 needed" + of_kw="$1" + kind="$2" + name="$3" + shift 3 + [ "$of_kw" != of ] && die 29 "$src_line: 'of' keyword expected, '$of_kw' given" + [ "$kind" != macro ] && die 30 "$src_line: implementation only supports 'macro', '$kind' given" + kind=macro-implementation + ;; + *) die 24 "$src_line: unknown KIND ($kind) to look for" ;; + esac + operator="$1" + case "$operator" in + absent) + pattern='.' + in_kw="$2" + shift 2 + ;; + matches|lacks) + pattern="$2" + in_kw="$3" + shift 3 + ;; + in) + operator=matches + pattern='.' + in_kw=in + shift + ;; + *) die 25 "$src_line: unknown OPERATOR ($operator) to look for" ;; + esac + [ "$in_kw" != in ] && die 26 "$src_line: 'in' keyword expected, '$in_kw' given" + test $# -ge 1 || die 27 "$src_line: too few arguments, at least one filename expected" + + local first_decl= + if [ "$kind" = method ]; then + first_decl="$(find-struct-decl "$name" "$@")" || exit 28 + # prepare params for next lookup phase + set -- - # overwrite $@ to be single dash (-) + name="$method_name" + kind=fun + elif [[ $# = 1 && "$1" = '-' ]]; then + # avoid losing stdin provided to gen() due to redirection (<<<) + first_decl="$(cat -)" + fi + + # lookup the NAME + local body + body="$(find-$kind-decl "$name" "$@" <<< "$first_decl")" || exit 29 + awk -v define="$define" -v pattern="$pattern" -v "$operator"=1 ' + /./ { not_empty = 1 } + $0 ~ pattern { found = 1 } + END { + if (lacks && !found && not_empty || matches && found || absent && !found) + print "#define", define + } + ' <<< "$body" +} diff --git a/drivers/net/ethernet/mucse/rnp/kcompat_defs.h b/drivers/net/ethernet/mucse/rnp/kcompat_defs.h new file mode 100755 index 0000000000000000000000000000000000000000..4a004aff43779fd6544ad6df6e9c68a9012c0be2 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/kcompat_defs.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _KCOMPAT_DEFS_H_ +#define _KCOMPAT_DEFS_H_ + +#ifndef LINUX_VERSION_CODE +#include +#else +#ifndef KERNEL_VERSION +#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#endif /* LINUX_VERSION_CODE */ + +#ifndef UTS_RELEASE +#include +#endif + +/* + * Include the definitions file for HAVE/NEED flags for the standard + * upstream kernels. + * + * Then, based on the distribution we detect, load the distribution + * specific definitions file that customizes the definitions for the + * target distribution. + */ +#include "kcompat_std_defs.h" + +#ifdef CONFIG_SUSE_KERNEL +#include "kcompat_sles_defs.h" +#elif UBUNTU_VERSION_CODE +#include "kcompat_ubuntu_defs.h" +#elif RHEL_RELEASE_CODE +#include "kcompat_rhel_defs.h" +#else + +#if defined(KYLIN_OS) || defined(CONFIG_KYLINOS_SERVER) || \ + defined(CONFIG_KYLINOS_DESKTOP) +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 4, 130)) +// keylin 4.4.131 +#define NONEED_NAPI_CONSUME_SKB +#define NONEED_CSUM_REPLACE_BY_DIFF +#define NONEED_PCI_REQUEST_IO_REGIONS +#define NONEED_ETH_TYPE_VLAN +#define NONEED_UUID_SIZE +//#define FEITENG_4_4_131 +#endif + +#if defined(KYLIN_RELEASE_CODE) +#if (KYLIN_RELEASE_CODE <= KYLIN_RELEASE_VERSION(10, 2)) +#define NEED_SKB_FRAG_OFF +#define NEED_SKB_FRAG_OFF_ADD +#else +#undef NEED_NETDEV_TX_SENT_QUEUE +#undef NEED_SKB_FRAG_OFF +#undef NEED_SKB_FRAG_OFF_ADD +#endif +#endif + +#endif + +#endif + +#include "kcompat_generated_defs.h" + +#endif /* _KCOMPAT_DEFS_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/kcompat_gcc.h b/drivers/net/ethernet/mucse/rnp/kcompat_gcc.h new file mode 100755 index 0000000000000000000000000000000000000000..0b40646875dcb22a0cede336e761a74c74c3ad5c --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/kcompat_gcc.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _KCOMPAT_GCC_H_ +#define _KCOMPAT_GCC_H_ + +#ifdef __has_attribute +#if __has_attribute(__fallthrough__) +#define fallthrough __attribute__((__fallthrough__)) +#else +#define fallthrough \ + do { \ + } while (0) /* fallthrough */ +#endif /* __has_attribute(fallthrough) */ +#else +#define fallthrough \ + do { \ + } while (0) /* fallthrough */ +#endif /* __has_attribute */ + +#endif /* _KCOMPAT_GCC_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/kcompat_generated_defs.h b/drivers/net/ethernet/mucse/rnp/kcompat_generated_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..f3071607c9bdcf212195799521dd7f42b2064011 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/kcompat_generated_defs.h @@ -0,0 +1,41 @@ +/* Autogenerated for KSRC=/lib/modules/5.15.0-25-generic/build via kcompat-generator.sh */ +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW +#define HAVE_DEVLINK_HEALTH +#define HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER +#define HAVE_DEVLINK_HEALTH_OPS_EXTACK +#define HAVE_DEVLINK_INFO_DRIVER_NAME_PUT +#define HAVE_DEVLINK_PARAMS +#define HAVE_DEVLINK_PARAMS_PUBLISH +#define HAVE_DEVLINK_PORT_NEW +#define HAVE_DEVLINK_PORT_SPLIT +#define HAVE_DEVLINK_PORT_SPLIT_EXTACK +#define HAVE_DEVLINK_PORT_TYPE_ETH_HAS_NETDEV +#define HAVE_DEVLINK_REGIONS +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#define HAVE_DEVLINK_PORT_FLAVOUR_PCI_SF +#define HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT +#define NEED_DEVLINK_RESOURCES_UNREGISTER_NO_RESOURCE +#define NEED_DEVLINK_TO_DEV +#define NEED_DEVLINK_UNLOCKED_RESOURCE +#define HAVE_NDO_ETH_IOCTL +#define HAVE_NDO_FDB_ADD_VID +#define HAVE_NDO_GET_DEVLINK_PORT +#define NEED_NETIF_NAPI_ADD_NO_WEIGHT +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_FLOW_RSS +#define HAVE_XDP_DO_FLUSH +#define NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#define HAVE_FLOW_DISSECTOR_KEY_CVLAN +#define HAVE_CDEV_DEVICE +#define HAVE_POLL_T +#define HAVE_STREAM_OPEN +#define NEED_CLASS_CREATE_WITH_MODULE_PARAM +#define NEED_CLASS_CREATE_WITH_MODULE_PARAM +#define HAVE_GNSS_MODULE +#define HAVE_PER_VF_MSIX_SYSFS +#define HAVE_STRUCT_PCI_DEV_PTM_ENABLED +#define HAVE_PTP_CLOCK_INFO_ADJFINE +#define NEED_DIFF_BY_SCALED_PPM diff --git a/drivers/net/ethernet/mucse/rnp/kcompat_impl.h b/drivers/net/ethernet/mucse/rnp/kcompat_impl.h new file mode 100755 index 0000000000000000000000000000000000000000..00efec68a6d5e001e9e27e4516ec45d1b8e451f9 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/kcompat_impl.h @@ -0,0 +1,981 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _KCOMPAT_IMPL_H_ +#define _KCOMPAT_IMPL_H_ + +/* This file contains implementations of backports from various kernels. It + * must rely only on NEED_ and HAVE_ checks. It must not make + * any checks to determine the kernel version when deciding whether to + * include an implementation. + * + * All new implementations must go in this file, and legacy implementations + * should be migrated to the new format over time. + */ + +/* + * generic network stack functions + */ + +/* NEED_NETDEV_TXQ_BQL_PREFETCH + * + * functions + * netdev_txq_bql_complete_prefetchw() + * netdev_txq_bql_enqueue_prefetchw() + * + * were added in kernel 4.20 upstream commit + * 535114539bb2 ("net: add netdev_txq_bql_{enqueue, complete}_prefetchw() + * helpers") + */ +#ifdef NEED_NETDEV_TXQ_BQL_PREFETCH +/** + * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write + * @dev_queue: pointer to transmit queue + * + * BQL enabled drivers might use this helper in their ndo_start_xmit(), + * to give appropriate hint to the CPU. + */ +static inline void +netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) +{ +#ifdef CONFIG_BQL + prefetchw(&dev_queue->dql.num_queued); +#endif +} + +/** + * netdev_txq_bql_complete_prefetchw - prefetch bql data for write + * @dev_queue: pointer to transmit queue + * + * BQL enabled drivers might use this helper in their TX completion path, + * to give appropriate hint to the CPU. + */ +static inline void +netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) +{ +#ifdef CONFIG_BQL + prefetchw(&dev_queue->dql.limit); +#endif +} +#endif /* NEED_NETDEV_TXQ_BQL_PREFETCH */ + +/* NEED_NETDEV_TX_SENT_QUEUE + * + * __netdev_tx_sent_queue was added in kernel 4.20 upstream commit + * 3e59020abf0f ("net: bql: add __netdev_tx_sent_queue()") + */ +#ifdef NEED_NETDEV_TX_SENT_QUEUE +/* Variant of netdev_tx_sent_queue() for drivers that are aware + * that they should not test BQL status themselves. + * We do want to change __QUEUE_STATE_STACK_XOFF only for the last + * skb of a batch. + * Returns true if the doorbell must be used to kick the NIC. + */ +static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, + unsigned int bytes, bool xmit_more) +{ + if (xmit_more) { +#ifdef CONFIG_BQL + dql_queued(&dev_queue->dql, bytes); +#endif + return netif_tx_queue_stopped(dev_queue); + } + netdev_tx_sent_queue(dev_queue, bytes); + return true; +} +#endif /* NEED_NETDEV_TX_SENT_QUEUE */ + +/* NEED_NET_PREFETCH + * + * net_prefetch was introduced by commit f468f21b7af0 ("net: Take common + * prefetch code structure into a function") + * + * This function is trivial to re-implement in full. + */ +#ifdef NEED_NET_PREFETCH +static inline void net_prefetch(void *p) +{ + prefetch(p); +#if L1_CACHE_BYTES < 128 + prefetch((u8 *)p + L1_CACHE_BYTES); +#endif +} +#endif /* NEED_NET_PREFETCH */ + +/* NEED_SKB_FRAG_OFF and NEED_SKB_FRAG_OFF_ADD + * + * skb_frag_off and skb_frag_off_add were added in upstream commit + * 7240b60c98d6 ("linux: Add skb_frag_t page_offset accessors") + * + * Implementing the wrappers directly for older kernels which still have the + * old implementation of skb_frag_t is trivial. + * + * LTS 4.19 backported the define for skb_frag_off in 4.19.201. + * d94d95ae0dd0 ("gro: ensure frag0 meets IP header alignment") + * Need to exclude defining skb_frag_off for 4.19.X where X > 200 + */ +#ifdef NEED_SKB_FRAG_OFF +static inline unsigned int skb_frag_off(const skb_frag_t *frag) +{ + return frag->page_offset; +} +#endif /* NEED_SKB_FRAG_OFF */ +#ifdef NEED_SKB_FRAG_OFF_ADD +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) +{ + frag->page_offset += delta; +} +#endif /* NEED_SKB_FRAG_OFF_ADD */ + +/* + * NETIF_F_HW_L2FW_DOFFLOAD related functions + * + * Support for NETIF_F_HW_L2FW_DOFFLOAD was first introduced upstream by + * commit a6cc0cfa72e0 ("net: Add layer 2 hardware acceleration operations for + * macvlan devices") + */ +#ifdef NETIF_F_HW_L2FW_DOFFLOAD + +#include + +/* NEED_MACVLAN_ACCEL_PRIV + * + * macvlan_accel_priv is an accessor function that replaced direct access to + * the macvlan->fwd_priv variable. It was introduced in commit 7d775f63470c + * ("macvlan: Rename fwd_priv to accel_priv and add accessor function") + * + * Implement the new wrapper name by simply accessing the older + * macvlan->fwd_priv name. + */ +#ifdef NEED_MACVLAN_ACCEL_PRIV +static inline void *macvlan_accel_priv(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->fwd_priv; +} +#endif /* NEED_MACVLAN_ACCEL_PRIV */ + +/* NEED_MACVLAN_RELEASE_L2FW_OFFLOAD + * + * macvlan_release_l2fw_offload was introduced upstream by commit 53cd4d8e4dfb + * ("macvlan: Provide function for interfaces to release HW offload") + * + * Implementing this is straight forward, but we must be careful to use + * fwd_priv instead of accel_priv. Note that both the change to accel_priv and + * introduction of this function happened in the same release. + */ +#ifdef NEED_MACVLAN_RELEASE_L2FW_OFFLOAD +static inline int macvlan_release_l2fw_offload(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + macvlan->fwd_priv = NULL; + return dev_uc_add(macvlan->lowerdev, dev->dev_addr); +} +#endif /* NEED_MACVLAN_RELEASE_L2FW_OFFLOAD */ + +/* NEED_MACVLAN_SUPPORTS_DEST_FILTER + * + * macvlan_supports_dest_filter was introduced upstream by commit 6cb1937d4eff + * ("macvlan: Add function to test for destination filtering support") + * + * The implementation doesn't rely on anything new and is trivial to backport + * for kernels that have NETIF_F_HW_L2FW_DOFFLOAD support. + */ +#ifdef NEED_MACVLAN_SUPPORTS_DEST_FILTER +static inline bool macvlan_supports_dest_filter(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->mode == MACVLAN_MODE_PRIVATE || + macvlan->mode == MACVLAN_MODE_VEPA || + macvlan->mode == MACVLAN_MODE_BRIDGE; +} +#endif /* NEED_MACVLAN_SUPPORTS_DEST_FILTER */ + +#endif /* NETIF_F_HW_L2FW_DOFFLOAD */ + +/* + * tc functions + */ + +/* NEED_FLOW_INDR_BLOCK_CB_REGISTER + * + * __flow_indr_block_cb_register and __flow_indr_block_cb_unregister were + * added in upstream commit 4e481908c51b ("flow_offload: move tc indirect + * block to flow offload") + * + * This was a simple rename so we can just translate from the old + * naming scheme with a macro. + */ +#ifdef NEED_FLOW_INDR_BLOCK_CB_REGISTER +#define __flow_indr_block_cb_register __tc_indr_block_cb_register +#define __flow_indr_block_cb_unregister __tc_indr_block_cb_unregister +#endif + +/* + * devlink support + */ +#if IS_ENABLED(CONFIG_NET_DEVLINK) + +#include + +#ifdef HAVE_DEVLINK_REGIONS +/* NEED_DEVLINK_REGION_CREATE_OPS + * + * The ops parameter to devlink_region_create was added by commit e8937681797c + * ("devlink: prepare to support region operations") + * + * For older kernels, define _kc_devlink_region_create that takes an ops + * parameter, and calls the old implementation function by extracting the name + * from the structure. + */ +#ifdef NEED_DEVLINK_REGION_CREATE_OPS +struct devlink_region_ops { + const char *name; + void (*destructor)(const void *data); +}; + +static inline struct devlink_region * +_kc_devlink_region_create(struct devlink *devlink, + const struct devlink_region_ops *ops, + u32 region_max_snapshots, u64 region_size) +{ + return devlink_region_create(devlink, ops->name, region_max_snapshots, + region_size); +} + +#define devlink_region_create _kc_devlink_region_create +#endif /* NEED_DEVLINK_REGION_CREATE_OPS */ +#endif /* HAVE_DEVLINK_REGIONS */ + +/* NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY + * + * devlink_flash_update_status_notify, _begin_notify, and _end_notify were + * added by upstream commit 191ed2024de9 ("devlink: allow driver to update + * progress of flash update") + * + * For older kernels that lack the netlink messages, convert the functions + * into no-ops. + */ +#ifdef NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY +static inline void +devlink_flash_update_begin_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_end_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_status_notify(struct devlink __always_unused *devlink, + const char __always_unused *status_msg, + const char __always_unused *component, + unsigned long __always_unused done, + unsigned long __always_unused total) +{ +} +#endif /* NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY */ + +#ifndef HAVE_DEVLINK_FLASH_UPDATE_PARAMS +struct devlink_flash_update_params { + const char *file_name; + const char *component; + u32 overwrite_mask; +}; + +#ifndef DEVLINK_FLASH_OVERWRITE_SETTINGS +#define DEVLINK_FLASH_OVERWRITE_SETTINGS BIT(0) +#endif + +#ifndef DEVLINK_FLASH_OVERWRITE_IDENTIFIERS +#define DEVLINK_FLASH_OVERWRITE_IDENTIFIERS BIT(1) +#endif +#endif /* !HAVE_DEVLINK_FLASH_UPDATE_PARAMS */ + +/* NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY + * + * devlink_flash_update_timeout_notify was added by upstream commit + * f92970c694b3 ("devlink: add timeout information to status_notify"). + * + * For older kernels, just convert timeout notifications into regular status + * notification messages without timeout information. + */ +#ifdef NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY +static inline void devlink_flash_update_timeout_notify( + struct devlink *devlink, const char *status_msg, const char *component, + unsigned long __always_unused timeout) +{ + devlink_flash_update_status_notify(devlink, status_msg, component, 0, + 0); +} +#endif /* NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY */ + +/* + * NEED_DEVLINK_PORT_ATTRS_SET_STRUCT + * + * HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR + * HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID + * + * devlink_port_attrs_set was introduced by commit b9ffcbaf56d3 ("devlink: + * introduce devlink_port_attrs_set") + * + * It's function signature has changed multiple times over several kernel + * releases: + * + * commit 5ec1380a21bb ("devlink: extend attrs_set for setting port + * flavours") added the ability to set port flavour. (Note that there is no + * official kernel release with devlink_port_attrs_set without the flavour + * argument, as they were introduced in the same series.) + * + * commit bec5267cded2 ("net: devlink: extend port attrs for switch ID") added + * the ability to set the switch ID (HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID) + * + * Finally commit 71ad8d55f8e5 ("devlink: Replace devlink_port_attrs_set + * parameters with a struct") refactored to pass devlink_port_attrs struct + * instead of individual parameters. (!NEED_DEVLINK_PORT_ATTRS_SET_STRUCT) + * + * We want core drivers to just use the latest form that takes + * a devlink_port_attrs structure. Note that this structure did exist as part + * of but was never used directly by driver code prior to the + * function parameter change. For this reason, the implementation always + * relies on _kc_devlink_port_attrs instead of what was defined in the kernel. + */ +#ifdef NEED_DEVLINK_PORT_ATTRS_SET_STRUCT + +#ifndef HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR +enum devlink_port_flavour { + DEVLINK_PORT_FLAVOUR_PHYSICAL, + DEVLINK_PORT_FLAVOUR_CPU, + DEVLINK_PORT_FLAVOUR_DSA, + DEVLINK_PORT_FLAVOUR_PCI_PF, + DEVLINK_PORT_FLAVOUR_PCI_VF, +}; +#endif + +struct _kc_devlink_port_phys_attrs { + u32 port_number; + u32 split_subport_number; +}; + +struct _kc_devlink_port_pci_pf_attrs { + u16 pf; +}; + +struct _kc_devlink_port_pci_vf_attrs { + u16 pf; + u16 vf; +}; + +struct _kc_devlink_port_attrs { + u8 split : 1, splittable : 1; + u32 lanes; + enum devlink_port_flavour flavour; + struct netdev_phys_item_id switch_id; + union { + struct _kc_devlink_port_phys_attrs phys; + struct _kc_devlink_port_pci_pf_attrs pci_pf; + struct _kc_devlink_port_pci_vf_attrs pci_vf; + }; +}; + +#define devlink_port_attrs _kc_devlink_port_attrs + +static inline void +_kc_devlink_port_attrs_set(struct devlink_port *devlink_port, + struct _kc_devlink_port_attrs *attrs) +{ +#if defined(HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID) + devlink_port_attrs_set(devlink_port, attrs->flavour, + attrs->phys.port_number, attrs->split, + attrs->phys.split_subport_number, + attrs->switch_id.id, attrs->switch_id.id_len); +#elif defined(HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR) + devlink_port_attrs_set(devlink_port, attrs->flavour, + attrs->phys.port_number, attrs->split, + attrs->phys.split_subport_number); +#else + if (attrs->split) + devlink_port_split_set(devlink_port, attrs->phys.port_number); +#endif +} + +#define devlink_port_attrs_set _kc_devlink_port_attrs_set + +#endif /* NEED_DEVLINK_PORT_ATTRS_SET_STRUCT */ + +/* + * NEED_DEVLINK_ALLOC_SETS_DEV + * + * Since commit 919d13a7e455 ("devlink: Set device as early as possible"), the + * devlink device pointer is set by devlink_alloc instead of by + * devlink_register. + * + * devlink_alloc now includes the device pointer in its signature, while + * devlink_register no longer includes it. + * + * This implementation provides a replacement for devlink_alloc which will + * take and then silently discard the extra dev pointer. + * + * To use devlink_register, drivers must check + * HAVE_DEVLINK_REGISTER_SETS_DEV. Note that we can't easily provide + * a backport of the change to devlink_register directly. Although the dev + * pointer is accessible from the devlink pointer through the driver private + * section, it is device driver specific and is not easily accessible in + * compat code. + */ +#ifdef NEED_DEVLINK_ALLOC_SETS_DEV + +//static inline struct devlink * +//_kc_devlink_alloc(const struct devlink_ops *ops, size_t priv_size, +// struct device * __always_unused dev) +//{ +// return devlink_alloc(ops, priv_size); +//} +// +//#define devlink_alloc _kc_devlink_alloc +#endif /* NEED_DEVLINK_ALLOC_SETS_DEV */ + +#endif /* CONFIG_NET_DEVLINK */ + +#ifdef NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +/* ida_alloc(), ida_alloc_min(), ida_alloc_max(), ida_alloc_range(), and + * ida_free() were added in commit 5ade60dda43c ("ida: add new API"). + * + * Also, using "0" as the "end" argument (3rd argument) to ida_simple_get() is + * considered the max value, which is why it's used in ida_alloc() and + * ida_alloc_min(). + */ +static inline int ida_alloc(struct ida *ida, gfp_t gfp) +{ + return ida_simple_get(ida, 0, 0, gfp); +} + +static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp) +{ + return ida_simple_get(ida, min, 0, gfp); +} + +static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp) +{ + return ida_simple_get(ida, 0, max, gfp); +} + +static inline int ida_alloc_range(struct ida *ida, unsigned int min, + unsigned int max, gfp_t gfp) +{ + return ida_simple_get(ida, min, max, gfp); +} + +static inline void ida_free(struct ida *ida, unsigned int id) +{ + ida_simple_remove(ida, id); +} +#endif /* NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE */ + +/* + * dev_printk implementations + */ + +/* NEED_DEV_PRINTK_ONCE + * + * The dev_*_once family of printk functions was introduced by commit + * e135303bd5be ("device: Add dev__once variants") + * + * The implementation is very straight forward so we will just implement them + * as-is here. + */ +#ifdef NEED_DEV_PRINTK_ONCE +#ifdef CONFIG_PRINTK +#define dev_level_once(dev_level, dev, fmt, ...) \ + do { \ + static bool __print_once __read_mostly; \ + \ + if (!__print_once) { \ + __print_once = true; \ + dev_level(dev, fmt, ##__VA_ARGS__); \ + } \ + } while (0) +#else +#define dev_level_once(dev_level, dev, fmt, ...) \ + do { \ + if (0) \ + dev_level(dev, fmt, ##__VA_ARGS__); \ + } while (0) +#endif + +#define dev_emerg_once(dev, fmt, ...) \ + dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__) +#define dev_alert_once(dev, fmt, ...) \ + dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__) +#define dev_crit_once(dev, fmt, ...) \ + dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__) +#define dev_err_once(dev, fmt, ...) \ + dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__) +#define dev_warn_once(dev, fmt, ...) \ + dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__) +#define dev_notice_once(dev, fmt, ...) \ + dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__) +#define dev_info_once(dev, fmt, ...) \ + dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) +#define dev_dbg_once(dev, fmt, ...) \ + dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__) +#endif /* NEED_DEV_PRINTK_ONCE */ + +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO + +/* NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 + * + * tc_cls_can_offload_and_chain0 was added by upstream commit + * 878db9f0f26d ("pkt_cls: add new tc cls helper to check offload flag and + * chain index"). + * + * This patch backports this function for older kernels by calling + * tc_can_offload() directly. + */ +#ifdef NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 +#include +static inline bool +tc_cls_can_offload_and_chain0(const struct net_device *dev, + struct tc_cls_common_offload *common) +{ + if (!tc_can_offload(dev)) + return false; + if (common->chain_index) + return false; + + return true; +} +#endif /* NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 */ +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ + +/* NEED_TC_SETUP_QDISC_MQPRIO + * + * TC_SETUP_QDISC_MQPRIO was added by upstream commit + * 575ed7d39e2f ("net_sch: mqprio: Change TC_SETUP_MQPRIO to + * TC_SETUP_QDISC_MQPRIO"). + * + * For older kernels which are using TC_SETUP_MQPRIO + */ +#ifdef NEED_TC_SETUP_QDISC_MQPRIO +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#endif /* NEED_TC_SETUP_QDISC_MQPRIO */ + +/* + * ART/TSC functions + */ +#ifdef HAVE_PTP_CROSSTIMESTAMP +/* NEED_CONVERT_ART_NS_TO_TSC + * + * convert_art_ns_to_tsc was added by upstream commit fc804f65d462 ("x86/tsc: + * Convert ART in nanoseconds to TSC"). + * + * This function is similar to convert_art_to_tsc, but expects the input in + * terms of nanoseconds, rather than ART cycles. We implement this by + * accessing the tsc_khz value and performing the proper calculation. In order + * to access the correct clock object on returning, we use the function + * convert_art_to_tsc, because the art_related_clocksource is inaccessible. + */ +#ifdef NEED_CONVERT_ART_NS_TO_TSC +#ifdef CONFIG_X86 +#include + +static inline struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns) +{ + struct system_counterval_t system; + u64 tmp, res, rem; + + rem = do_div(art_ns, USEC_PER_SEC); + + res = art_ns * tsc_khz; + tmp = rem * tsc_khz; + + do_div(tmp, USEC_PER_SEC); + res += tmp; + + system = convert_art_to_tsc(art_ns); + system.cycles = res; + + return system; +} +#else /* CONFIG_X86 */ +static inline struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns) +{ + WARN_ONCE(1, "%s is only supported on X86", __func__); + return (struct system_counterval_t){}; +} +#endif /* !CONFIG_X86 */ +#endif /* NEED_CONVERT_ART_NS_TO_TSC */ +#endif /* HAVE_PTP_CROSSTIMESTAMP */ + +/* + * PTP functions and definitions + */ +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#include +#include + +/* PTP_* ioctl flags + * + * PTP_PEROUT_ONE_SHOT and PTP_PEROUT_DUTY_CYCLE were added by commit + * f65b71aa25a6 ("ptp: add ability to configure duty cycle for periodic + * output") + * + * PTP_PEROUT_PHASE was added in commit b6bd41363a1c ("ptp: introduce + * a phase offset in the periodic output request") + * + * PTP_STRICT_FLAGS was added in commit 6138e687c7b6 ("ptp: Introduce strict + * checking of external time stamp options.") + * + * These flags control behavior for the periodic output PTP ioctl. For older + * kernels, we define the flags as 0. This allows bitmask checks on flags to + * work as expected, since these feature flags will become no-ops on kernels + * that lack support. + * + * Drivers can check if the relevant feature is actually supported by using an + * '#if' on the flag instead of an '#ifdef' + */ +#ifndef PTP_PEROUT_PHASE +#define PTP_PEROUT_PHASE 0 +#endif + +#ifndef PTP_PEROUT_DUTY_CYCLE +#define PTP_PEROUT_DUTY_CYCLE 0 +#endif + +#ifndef PTP_STRICT_FLAGS +#define PTP_STRICT_FLAGS 0 +#endif + +#ifndef PTP_PEROUT_PHASE +/* PTP_PEROUT_PHASE + * + * The PTP_PEROUT_PHASE flag was added in commit b6bd41363a1c ("ptp: introduce + * a phase offset in the periodic output request") as a way for userspace to + * request a phase-offset periodic output that starts on some arbitrary + * multiple of the clock period. + * + * For older kernels, define this flag to 0 so that checks for if it is + * enabled will always fail. Drivers should use '#if PTP_PEROUT_PHASE' to + * determine if the kernel has phase support, and use the flag as normal for + * checking supported flags or if the flag is enabled for a given request. + */ +#define PTP_PEROUT_PHASE 0 +#endif + +#endif /* CONFIG_PTP_1588_CLOCK */ + +#ifdef NEED_BUS_FIND_DEVICE_CONST_DATA +/* NEED_BUS_FIND_DEVICE_CONST_DATA + * + * bus_find_device() was updated in upstream commit 418e3ea157ef + * ("bus_find_device: Unify the match callback with class_find_device") + * to take a const void *data parameter and also have the match() function + * passed in take a const void *data parameter. + * + * all of the kcompat below makes it so the caller can always just call + * bus_find_device() according to the upstream kernel without having to worry + * about const vs. non-const arguments. + */ +struct _kc_bus_find_device_custom_data { + const void *real_data; + int (*real_match)(struct device *dev, const void *data); +}; + +static inline int _kc_bus_find_device_wrapped_match(struct device *dev, + void *data) +{ + struct _kc_bus_find_device_custom_data *custom_data = data; + + return custom_data->real_match(dev, custom_data->real_data); +} + +static inline struct device * +_kc_bus_find_device(struct bus_type *type, struct device *start, + const void *data, + int (*match)(struct device *dev, const void *data)) +{ + struct _kc_bus_find_device_custom_data custom_data = {}; + + custom_data.real_data = data; + custom_data.real_match = match; + + return bus_find_device(type, start, &custom_data, + _kc_bus_find_device_wrapped_match); +} + +/* force callers of bus_find_device() to call _kc_bus_find_device() on kernels + * where NEED_BUS_FIND_DEVICE_CONST_DATA is defined + */ +#define bus_find_device(type, start, data, match) \ + _kc_bus_find_device(type, start, data, match) +#endif /* NEED_BUS_FIND_DEVICE_CONST_DATA */ + +#ifdef NEED_CPU_LATENCY_QOS_RENAME +/* NEED_CPU_LATENCY_QOS_RENAME + * + * The PM_QOS_CPU_DMA_LATENCY definition was removed in 67b06ba01857 ("PM: + * QoS: Drop PM_QOS_CPU_DMA_LATENCY and rename related functions"). The + * related functions were renamed to use "cpu_latency_qos_" prefix. + * + * Use wrapper functions to map the new API onto the API available in older + * kernels. + */ +#include +static inline void cpu_latency_qos_add_request(struct pm_qos_request *req, + s32 value) +{ + pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY, value); +} + +static inline void cpu_latency_qos_update_request(struct pm_qos_request *req, + s32 new_value) +{ + pm_qos_update_request(req, new_value); +} + +static inline void cpu_latency_qos_remove_request(struct pm_qos_request *req) +{ + pm_qos_remove_request(req); +} +#endif /* NEED_CPU_LATENCY_QOS_RENAME */ + +#ifdef NEED_DECLARE_STATIC_KEY_FALSE +/* NEED_DECLARE_STATIC_KEY_FALSE + * + * DECLARE_STATIC_KEY_FALSE was added by upstream commit + * 525e0ac4d2b2 ("locking/static_keys: Provide DECLARE and + * well as DEFINE macros") + * + * The definition is now necessary to handle + * the xdpdrv work with more than 64 cpus + */ +#define DECLARE_STATIC_KEY_FALSE(name) extern struct static_key_false name +#endif /* NEED_DECLARE_STATIC_KEY_FALSE */ + +#ifdef NEED_DEFINE_STATIC_KEY_FALSE +/* NEED_DEFINE_STATIC_KEY_FALSE + * + * DEFINE_STATIC_KEY_FALSE was added by upstream commit + * 11276d5306b8 ("locking/static_keys: Add a new + * static_key interface") + * + * The definition is now necessary to handle + * the xdpdrv work with more than 64 cpus + */ +#define DECLARE_STATIC_KEY_FALSE(name) extern struct static_key name + +#define DEFINE_STATIC_KEY_FALSE(name) \ + struct static_key name = STATIC_KEY_INIT_FALSE +#endif /* NEED_DEFINE_STATIC_KEY_FALSE */ + +#ifdef NEED_STATIC_BRANCH +/* NEED_STATIC_BRANCH + * + * static_branch_likely, static_branch_unlikely, + * static_branch_inc, static_branch_dec was added by upstream commit + * 11276d5306b8 ("locking/static_keys: Add a new + * static_key interface") + * + * The definition is now necessary to handle + * the xdpdrv work with more than 64 cpus + */ +#define static_branch_likely(x) likely(static_key_enabled(x)) +#define static_branch_unlikely(x) unlikely(static_key_enabled(x)) + +#define static_branch_inc(x) static_key_slow_inc(x) +#define static_branch_dec(x) static_key_slow_dec(x) + +#endif /* NEED_STATIC_BRANCH */ + +#ifdef NEED_NETDEV_XDP_STRUCT +#define netdev_bpf netdev_xdp +#endif /* NEED_NETDEV_XDP_STRUCT */ + +#ifdef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#ifdef HAVE_XDP_SUPPORT +#include +#endif /* HAVE_XDP_SUPPORT */ +#endif /* HAVE_NETDEV_PROG_XDP_WARN_ACTION */ + +/* NEED_ETH_HW_ADDR_SET + * + * eth_hw_addr_set was added by upstream commit + * 48eab831ae8b ("net: create netdev->dev_addr assignment helpers") + * + * Using eth_hw_addr_set became required in 5.17, when the dev_addr field in + * the netdev struct was constified. See 48eab831ae8b ("net: create + * netdev->dev_addr assignment helpers") + */ +#ifdef NEED_ETH_HW_ADDR_SET +static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr) +{ + ether_addr_copy(dev->dev_addr, addr); +} +#endif /* NEED_ETH_HW_ADDR_SET */ + +/* + * NEED_NETIF_NAPI_ADD_NO_WEIGHT + * + * Upstream commit b48b89f9c189 ("net: drop the weight argument from + * netif_napi_add") removes weight argument from function call. + * + * Our drivers always used default weight, which is 64. + * + * Define NEED_NETIF_NAPI_ADD_NO_WEIGHT on kernels 3.10+ to use old + * implementation. Undef for 6.1+ where new function was introduced. + * RedHat 9.2 required using no weight parameter option. + */ +#ifdef NEED_NETIF_NAPI_ADD_NO_WEIGHT +static inline void _kc_netif_napi_add(struct net_device *dev, + struct napi_struct *napi, + int (*poll)(struct napi_struct *, int)) +{ + return netif_napi_add(dev, napi, poll); +} + +/* RHEL7 complains about redefines. Undef first, then define compat wrapper */ +#ifdef netif_napi_add +#undef netif_napi_add +#endif +#define netif_napi_add _kc_netif_napi_add +#endif /* NEED_NETIF_NAPI_ADD_NO_WEIGHT */ + + +#ifdef NEED_JIFFIES_64_TIME_IS_MACROS +/* NEED_JIFFIES_64_TIME_IS_MACROS + * + * The jiffies64 time_is_* macros were introduced upstream by 3740dcdf8a77 + * ("jiffies: add time comparison functions for 64 bit jiffies") in Linux 4.9. + * + * Support for 64-bit jiffies has been available since the initial import of + * Linux into git in 2005, so its safe to just implement the macros as-is + * here. + */ +#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a) +#define time_is_after_jiffies64(a) time_before64(get_jiffies_64(), a) +#define time_is_before_eq_jiffies64(a) time_after_eq64(get_jiffies_64(), a) +#define time_is_after_eq_jiffies64(a) time_before_eq64(get_jiffies_64(), a) +#endif /* NEED_JIFFIES_64_TIME_IS_MACROS */ + +#ifdef NEED_INDIRECT_CALL_WRAPPER_MACROS +/* NEED_INDIRECT_CALL_WRAPPER_MACROS + * + * The INDIRECT_CALL_* macros were introduced upstream as upstream commit + * 283c16a2dfd3 ("indirect call wrappers: helpers to speed-up indirect calls + * of builtin") which landed in Linux 5.0 + * + * These are easy to implement directly. + */ +#ifdef CONFIG_RETPOLINE +#define INDIRECT_CALL_1(f, f1, ...) \ + ({ likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); }) +#define INDIRECT_CALL_2(f, f2, f1, ...) \ + ({ \ + likely(f == f2) ? f2(__VA_ARGS__) : \ + INDIRECT_CALL_1(f, f1, __VA_ARGS__); \ + }) + +#define INDIRECT_CALLABLE_DECLARE(f) f +#define INDIRECT_CALLABLE_SCOPE +#else /* !CONFIG_RETPOLINE */ +#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALLABLE_DECLARE(f) +#define INDIRECT_CALLABLE_SCOPE static +#endif /* CONFIG_RETPOLINE */ +#endif /* NEED_INDIRECT_CALL_WRAPPER_MACROS */ + +#ifdef NEED_INDIRECT_CALL_3_AND_4 +/* NEED_INDIRECT_CALL_3_AND_4 + * Support for the 3 and 4 call variants was added in upstream commit + * e678e9ddea96 ("indirect_call_wrapper: extend indirect wrapper to support up + * to 4 calls") + * + * These are easy to implement directly. + */ + +#ifdef CONFIG_RETPOLINE +#define INDIRECT_CALL_3(f, f3, f2, f1, ...) \ + ({ \ + likely(f == f3) ? f3(__VA_ARGS__) : \ + INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \ + }) +#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \ + ({ \ + likely(f == f4) ? f4(__VA_ARGS__) : \ + INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \ + }) +#else /* !CONFIG_RETPOLINE */ +#define INDIRECT_CALL_3(f, f3, f2, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__) +#endif /* CONFIG_RETPOLINE */ +#endif /* NEED_INDIRECT_CALL_3_AND_4 */ + +#ifdef NEED_EXPORT_INDIRECT_CALLABLE +/* NEED_EXPORT_INDIRECT_CALLABLE + * + * Support for EXPORT_INDIRECT_CALLABLE was added in upstream commit + * 0053859496ba ("net: add EXPORT_INDIRECT_CALLABLE wrapper") + * + * These are easy to implement directly. + */ +#ifdef CONFIG_RETPOLINE +#define EXPORT_INDIRECT_CALLABLE(f) EXPORT_SYMBOL(f) +#else +#define EXPORT_INDIRECT_CALLABLE(f) +#endif /* CONFIG_RETPOLINE */ +#endif /* NEED_EXPORT_INDIRECT_CALLABLE */ + +/* NEED_MUL_U64_U64_DIV_U64 + * + * mul_u64_u64_div_u64 was introduced in Linux 5.9 as part of commit + * 3dc167ba5729 ("sched/cputime: Improve cputime_adjust()") + */ +#ifdef NEED_MUL_U64_U64_DIV_U64 +u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); +#ifndef div64_u64_rem +extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder); +#endif +#endif /* NEED_MUL_U64_U64_DIV_U64 */ + + +/* NEED_DIFF_BY_SCALED_PPM + * + * diff_by_scaled_ppm and adjust_by_scaled_ppm were introduced in + * kernel 6.1 by upstream commit 1060707e3809 ("ptp: introduce helpers + * to adjust by scaled parts per million"). + */ +#if 0//def NEED_DIFF_BY_SCALED_PPM +static inline bool +diff_by_scaled_ppm(u64 base, long scaled_ppm, u64 *diff) +{ + bool negative = false; + + if (scaled_ppm < 0) { + negative = true; + scaled_ppm = -scaled_ppm; + } + + *diff = mul_u64_u64_div_u64(base, (u64)scaled_ppm, + 1000000ULL << 16); + + return negative; +} + +static inline u64 +adjust_by_scaled_ppm(u64 base, long scaled_ppm) +{ + u64 diff; + + if (diff_by_scaled_ppm(base, scaled_ppm, &diff)) + return base - diff; + + return base + diff; +} +#endif /* NEED_DIFF_BY_SCALED_PPM */ + +#endif /* _KCOMPAT_IMPL_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/kcompat_overflow.h b/drivers/net/ethernet/mucse/rnp/kcompat_overflow.h new file mode 100755 index 0000000000000000000000000000000000000000..ec863bdc08b654a4ae001059fff35218418eda93 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/kcompat_overflow.h @@ -0,0 +1,326 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef __LINUX_OVERFLOW_H +#define __LINUX_OVERFLOW_H + +#include + +/* + * In the fallback code below, we need to compute the minimum and + * maximum values representable in a given type. These macros may also + * be useful elsewhere, so we provide them outside the + * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block. + * + * It would seem more obvious to do something like + * + * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) + * #define type_max(T) (T)(is_signed_type(T) ? + * ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) + * + * Unfortunately, the middle expressions, strictly speaking, have + * undefined behaviour, and at least some versions of gcc warn about + * the type_max expression (but not if -fsanitize=undefined is in + * effect; in that case, the warning is deferred to runtime...). + * + * The slightly excessive casting in type_min is to make sure the + * macros also produce sensible values for the exotic type _Bool. [The + * overflow checkers only almost work for _Bool, but that's + * a-feature-not-a-bug, since people shouldn't be doing arithmetic on + * _Bools. Besides, the gcc builtins don't allow _Bool* as third + * argument.] + * + * Idea stolen from + * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html - + * credit to Christian Biere. + */ +/* The is_signed_type macro is redefined in a few places in various kernel + * headers. If this header is included at the same time as one of those, we + * will generate compilation warnings. Since we can't fix every old kernel, + * rename is_signed_type for this file to _kc_is_signed_type. This prevents + * the macro name collision, and should be safe since our drivers do not + * directly call the macro. + */ +#define _kc_is_signed_type(type) (((type)(-1)) < (type)1) +#define __type_half_max(type) \ + ((type)1 << (8 * sizeof(type) - 1 - _kc_is_signed_type(type))) +#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) +#define type_min(T) ((T)((T)-type_max(T) - (T)1)) + +#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW +/* + * For simplicity and code hygiene, the fallback code below insists on + * a, b and *d having the same type (similar to the min() and max() + * macros), whereas gcc's type-generic overflow checkers accept + * different types. Hence we don't just make check_add_overflow an + * alias for __builtin_add_overflow, but add type checks similar to + * below. + */ +#define check_add_overflow(a, b, d) \ + ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void)(&__a == &__b); \ + (void)(&__a == __d); \ + __builtin_add_overflow(__a, __b, __d); \ + }) + +#define check_sub_overflow(a, b, d) \ + ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void)(&__a == &__b); \ + (void)(&__a == __d); \ + __builtin_sub_overflow(__a, __b, __d); \ + }) + +#define check_mul_overflow(a, b, d) \ + ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void)(&__a == &__b); \ + (void)(&__a == __d); \ + __builtin_mul_overflow(__a, __b, __d); \ + }) + +#else + +/* Checking for unsigned overflow is relatively easy without causing UB. */ +#define __unsigned_add_overflow(a, b, d) \ + ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void)(&__a == &__b); \ + (void)(&__a == __d); \ + *__d = __a + __b; \ + *__d < __a; \ + }) +#define __unsigned_sub_overflow(a, b, d) \ + ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void)(&__a == &__b); \ + (void)(&__a == __d); \ + *__d = __a - __b; \ + __a < __b; \ + }) +/* + * If one of a or b is a compile-time constant, this avoids a division. + */ +#define __unsigned_mul_overflow(a, b, d) \ + ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void)(&__a == &__b); \ + (void)(&__a == __d); \ + *__d = __a * __b; \ + __builtin_constant_p(__b) ? \ + __b > 0 && __a > type_max(typeof(__a)) / __b : \ + __a > 0 && __b > type_max(typeof(__b)) / __a; \ + }) + +/* + * For signed types, detecting overflow is much harder, especially if + * we want to avoid UB. But the interface of these macros is such that + * we must provide a result in *d, and in fact we must produce the + * result promised by gcc's builtins, which is simply the possibly + * wrapped-around value. Fortunately, we can just formally do the + * operations in the widest relevant unsigned type (u64) and then + * truncate the result - gcc is smart enough to generate the same code + * with and without the (u64) casts. + */ + +/* + * Adding two signed integers can overflow only if they have the same + * sign, and overflow has happened iff the result has the opposite + * sign. + */ +#define __signed_add_overflow(a, b, d) \ + ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void)(&__a == &__b); \ + (void)(&__a == __d); \ + *__d = (u64)__a + (u64)__b; \ + (((~(__a ^ __b)) & (*__d ^ __a)) & type_min(typeof(__a))) != \ + 0; \ + }) + +/* + * Subtraction is similar, except that overflow can now happen only + * when the signs are opposite. In this case, overflow has happened if + * the result has the opposite sign of a. + */ +#define __signed_sub_overflow(a, b, d) \ + ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void)(&__a == &__b); \ + (void)(&__a == __d); \ + *__d = (u64)__a - (u64)__b; \ + ((((__a ^ __b)) & (*__d ^ __a)) & type_min(typeof(__a))) != 0; \ + }) + +/* + * Signed multiplication is rather hard. gcc always follows C99, so + * division is truncated towards 0. This means that we can write the + * overflow check like this: + * + * (a > 0 && (b > MAX/a || b < MIN/a)) || + * (a < -1 && (b > MIN/a || b < MAX/a) || + * (a == -1 && b == MIN) + * + * The redundant casts of -1 are to silence an annoying -Wtype-limits + * (included in -Wextra) warning: When the type is u8 or u16, the + * __b_c_e in check_mul_overflow obviously selects + * __unsigned_mul_overflow, but unfortunately gcc still parses this + * code and warns about the limited range of __b. + */ + +#define __signed_mul_overflow(a, b, d) \ + ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + typeof(a) __tmax = type_max(typeof(a)); \ + typeof(a) __tmin = type_min(typeof(a)); \ + (void)(&__a == &__b); \ + (void)(&__a == __d); \ + *__d = (u64)__a * (u64)__b; \ + (__b > 0 && (__a > __tmax / __b || __a < __tmin / __b)) || \ + (__b < (typeof(__b))-1 && \ + (__a > __tmin / __b || __a < __tmax / __b)) || \ + (__b == (typeof(__b))-1 && __a == __tmin); \ + }) + +#define check_add_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_add_overflow(a, b, d), \ + __unsigned_add_overflow(a, b, d)) + +#define check_sub_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_sub_overflow(a, b, d), \ + __unsigned_sub_overflow(a, b, d)) + +#define check_mul_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_mul_overflow(a, b, d), \ + __unsigned_mul_overflow(a, b, d)) + +#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ + +/** check_shl_overflow() - Calculate a left-shifted value and check + * overflow + * + * @a: Value to be shifted + * @s: How many bits left to shift + * @d: Pointer to where to store the result + * + * Computes *@d = (@a << @s) + * + * Returns true if '*d' cannot hold the result or when 'a << s' doesn't + * make sense. Example conditions: + * - 'a << s' causes bits to be lost when stored in *d. + * - 's' is garbage (e.g. negative) or so large that the result of + * 'a << s' is guaranteed to be 0. + * - 'a' is negative. + * - 'a << s' sets the sign bit, if any, in '*d'. + * + * '*d' will hold the results of the attempted shift, but is not + * considered "safe for use" if false is returned. + */ +#define check_shl_overflow(a, s, d) \ + ({ \ + typeof(a) _a = a; \ + typeof(s) _s = s; \ + typeof(d) _d = d; \ + u64 _a_full = _a; \ + unsigned int _to_shift = _s >= 0 && _s < 8 * sizeof(*d) ? _s : \ + 0; \ + *_d = (_a_full << _to_shift); \ + (_to_shift != _s || *_d < 0 || _a < 0 || \ + (*_d >> _to_shift) != _a); \ + }) + +/** + * array_size() - Calculate size of 2-dimensional array. + * + * @a: dimension one + * @b: dimension two + * + * Calculates size of 2-dimensional array: @a * @b. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array_size(size_t a, size_t b) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * array3_size() - Calculate size of 3-dimensional array. + * + * @a: dimension one + * @b: dimension two + * @c: dimension three + * + * Calculates size of 3-dimensional array: @a * @b * @c. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + if (check_mul_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(n, size, &bytes)) + return SIZE_MAX; + if (check_add_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * struct_size() - Calculate size of structure with trailing array. + * @p: Pointer to the structure. + * @member: Name of the array member. + * @n: Number of elements in the array. + * + * Calculates size of memory needed for structure @p followed by an + * array of @n @member elements. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define struct_size(p, member, n) \ + __ab_c_size(n, sizeof(*(p)->member) + __must_be_array((p)->member), \ + sizeof(*(p))) + +#endif /* __LINUX_OVERFLOW_H */ diff --git a/drivers/net/ethernet/mucse/rnp/kcompat_rhel_defs.h b/drivers/net/ethernet/mucse/rnp/kcompat_rhel_defs.h new file mode 100755 index 0000000000000000000000000000000000000000..523f4d4bcb4426b41f1ea6d7e5edf588b31a4ff6 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/kcompat_rhel_defs.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _KCOMPAT_RHEL_DEFS_H_ +#define _KCOMPAT_RHEL_DEFS_H_ + +/* This is the RedHat Enterprise Linux distribution specific definitions file. + * It defines what features need backports for a given version of the RHEL + * kernel. + * + * It checks the RHEL_RELEASE_CODE and RHEL_RELEASE_VERSION macros to decide + * what support the target kernel has. + * + * It assumes that kcompat_std_defs.h has already been processed, and will + * #define or #undef any flags that have changed based on backports done by + * RHEL. + */ + +#if !RHEL_RELEASE_CODE +#error "RHEL_RELEASE_CODE is 0 or undefined" +#endif + +#ifndef RHEL_RELEASE_VERSION +#error "RHEL_RELEASE_VERSION is undefined" +#endif + +#if (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(6, 5)) +#define NEED_DIV64_U64_REM +#else +#endif + +#if (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(6, 10)) +#define NO_BIT_ATTRS +#define NO_SKB_DUMP +#define NO_REAL_QUEUE_NUM +#define COMPAT_PTP_NO_PINS +#define NO_SKB_VLAN_PROTO +#else /* > 6.8 */ +#endif /* 6.8 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 1)) +#define COMPAT_PTP_NO_PINS +#else /* >= 7.1 */ +#define HAVE_NDO_FEATURES_CHECK +#endif /* 7.1 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 3)) +#else /* >= 7.3 */ +#undef NEED_DEV_PRINTK_ONCE +#endif /* 7.3 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 5)) +#else /* >= 7.5 */ +#define HAVE_TCF_EXTS_TO_LIST +#endif /* 7.5 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 6)) +#else /* >= 7.6 */ +#undef NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 +/* CentOS-7-aarch64-Everything-1810.iso */ +#ifndef CONFIG_ARM64 +#undef NEED_TC_SETUP_QDISC_MQPRIO +#endif /* CONFIG_ARM64 */ +#endif /* 7.6 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 7)) +#define NO_TX_MAXRATE +#else /* >= 7.7 */ +// if anolios need this +#ifdef ANOLIS_OS +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif +#define HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR +#define HAVE_ETHTOOL_NEW_100G_BITS +#undef NEED_NETDEV_TX_SENT_QUEUE +#endif /* 7.7 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 0)) +#else /* >= 8.0 */ +#undef HAVE_TCF_EXTS_TO_LIST +#undef HAVE_ETHTOOL_NEW_100G_BITS +#define HAVE_NDO_OFFLOAD_STATS +#undef HAVE_RHEL7_EXTENDED_OFFLOAD_STATS +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +/* 7.7 undefs it due to a backport in 7.7+, but 8.0 needs it still */ +#define NEED_NETDEV_TX_SENT_QUEUE +#define HAVE_DEVLINK_REGIONS +#define HAVE_DEVLINK_PARAMS +#endif /* 8.0 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 1)) +#define NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +#else /* >= 8.1 */ +#define HAVE_ETHTOOL_NEW_100G_BITS +#undef NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +#define HAVE_DEVLINK_PARAMS_PUBLISH +#undef NEED_NETDEV_TX_SENT_QUEUE +#undef NEED_INDIRECT_CALL_WRAPPER_MACROS +#define HAVE_INDIRECT_CALL_WRAPPER_HEADER +#endif /* 8.1 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 2)) +#else /* >= 8.2 */ +#undef NEED_BUS_FIND_DEVICE_CONST_DATA +#undef NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY +#undef NEED_SKB_FRAG_OFF +#undef NEED_SKB_FRAG_OFF_ADD +#undef NEED_FLOW_INDR_BLOCK_CB_REGISTER +#define HAVE_FLOW_INDR_BLOCK_LOCK +#define HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID +#define HAVE_DEVLINK_HEALTH +#define HAVE_NETDEV_SB_DEV +#endif /* 8.2 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 3)) +#else /* >= 8.3 */ +#undef NEED_CPU_LATENCY_QOS_RENAME +#define HAVE_DEVLINK_HEALTH_OPS_EXTACK +#define HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#undef NEED_DEVLINK_REGION_CREATE_OPS +#endif /* 8.3 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 4)) +#else /* >= 8.4 */ +#undef NEED_DEVLINK_PORT_ATTRS_SET_STRUCT +#undef NEED_NET_PREFETCH +#undef NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY +#undef HAVE_XDP_QUERY_PROG +#define HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT +#endif /* 8.4 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 5)) +#else /* >= 8.5 */ +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#undef HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY +#undef HAVE_NAPI_BUSY_LOOP +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#endif /* 8.5 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 6)) +#else /* >= 8.6 */ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9, 0)) +#ifdef ANOLIS_OS +#define HAVE_NETIF_NAPI_ADD_WEIGHT +#endif +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif /* < 9.0 */ +#undef NEED_ETH_HW_ADDR_SET +#endif /* 8.6 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 7)) +#else /* >= 8.7 */ +#undef NEED_DEVLINK_ALLOC_SETS_DEV +#undef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#undef HAVE_DEVLINK_PARAMS_PUBLISH +#undef HAVE_DEVLINK_RELOAD_ENABLE_DISABLE +#undef HAVE_DEVLINK_REGISTER_SETS_DEV +#define HAVE_DEVLINK_NOTIFY_REGISTER +#define HAVE_DEVLINK_SET_FEATURES +#endif /* 8.7 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 8)) +#else /* >= 8.8 */ +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#undef HAVE_NETIF_NAPI_ADD_WEIGHT +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 10)) +#else /* >= 8.10 */ +#define HAVE_NETIF_NAPI_ADD_WEIGHT +#endif + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9, 0)) +#else /* >= 9.0 */ +#undef HAVE_NETIF_NAPI_ADD_WEIGHT +#undef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#undef HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_XDP_BUFF_RXQ +#undef NEED_DEVLINK_ALLOC_SETS_DEV +#undef HAVE_DEVLINK_PARAMS_PUBLISH +#undef HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT +#undef HAVE_DEVLINK_REGISTER_SETS_DEV +#define HAVE_NDO_ETH_IOCTL +#endif /* 9.0 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9, 1)) +//#undef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +//#undef HAVE_ETHTOOL_COALESCE_EXTACK +#else /* >= 9.1 */ +#undef HAVE_PASID_SUPPORT +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif /* 9.1 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9, 2)) +#else /* >= 9.2 */ +#define HAVE_XDP_BUFF_RXQ +#define HAVE_NETIF_NAPI_ADD_WEIGHT +//#undef HAVE_ETHTOOL_COALESCE_EXTACK +#endif /* 9.2 */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9, 3)) +#else /* >= 9.3 */ +#define NO_PCIE_ERROR_REPORTING +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif /* 9.3 */ + +#endif /* _KCOMPAT_RHEL_DEFS_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/kcompat_sles_defs.h b/drivers/net/ethernet/mucse/rnp/kcompat_sles_defs.h new file mode 100755 index 0000000000000000000000000000000000000000..e447c22b7b34083f3f2f6ebd1cdeff6583b04ec9 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/kcompat_sles_defs.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _KCOMPAT_SLES_DEFS_H_ +#define _KCOMPAT_SLES_DEFS_H_ + +/* This is the SUSE Linux Enterprise distribution specific definitions file + * It defines what features need backports for a given version of the SUSE + * Linux Enterprise kernel. + * + * It checks a combination of the LINUX_VERSION code and the + * SLE_LOCALVERSION_CODE to determine what support the kernel has. + * + * It assumes that kcompat_std_defs.h has already been processed, and will + * #define or #undef any flags that have changed based on backports done by + * SUSE. + */ + +#ifndef LINUX_VERSION_CODE +#error "LINUX_VERSION_CODE is undefined" +#endif + +#ifndef KERNEL_VERSION +#error "KERNEL_VERSION is undefined" +#endif + +#if !SLE_KERNEL_REVISION +#error "SLE_KERNEL_REVISION is 0 or undefined" +#endif + +#if SLE_KERNEL_REVISION > 65535 +#error "SLE_KERNEL_REVISION is unexpectedly large" +#endif + +/* SLE kernel versions are a combination of the LINUX_VERSION_CODE along + * with an extra digit that indicates the SUSE specific revision of that + * kernel. This value is found in the CONFIG_LOCALVERSION of the SUSE + * kernel, which is extracted by common.mk and placed into + * SLE_KERNEL_REVISION_CODE. + * + * We combine the value of SLE_KERNEL_REVISION along with the LINUX_VERSION + * code to generate the useful value that determines what specific kernel + * we're dealing with. + * + * Just in case the SLE_KERNEL_REVISION ever goes above 255, we reserve 16 + * bits instead of 8 for this value. + */ +#define SLE_KERNEL_CODE ((LINUX_VERSION_CODE << 16) + SLE_KERNEL_REVISION) +#define SLE_KERNEL_VERSION(a, b, c, d) ((KERNEL_VERSION(a, b, c) << 16) + (d)) + +/* Unlike RHEL, SUSE kernels are not always tied to a single service pack. + * For example, 4.12.14 was used as the base for SLE 15 SP1, SLE 12 SP4, + * and SLE 12 SP5. + * + * You can find the patches that SUSE applied to the kernel tree at + * https://github.com/SUSE/kernel-source. + * + * You can find the correct kernel version for a check by using steps similar + * to the following + * + * 1) download the kernel-source repo + * 2) checkout the relevant branch, i.e SLE15-SP3 + * 3) find the relevant backport you're interested in the patches.suse + * directory + * 4) git log to locate the commit that introduced the backport + * 5) git describe --contains to find the relevant tag that includes that + * commit, i.e. rpm-5.3.18-37 + * 6) those digits represent the SLE kernel that introduced that backport. + * + * Try to keep the checks in SLE_KERNEL_CODE order and condense where + * possible. + */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE > SLE_KERNEL_VERSION(4, 12, 14, 23) && \ + SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4, 12, 14, 94)) +/* + * 4.12.14 is used as the base for SLE 12 SP4, SLE 12 SP5, SLE 15, and SLE 15 + * SP1. Unfortunately the revision codes do not line up cleanly. SLE 15 + * launched with 4.12.14-23. It appears that SLE 12 SP4 and SLE 15 SP1 both + * diverged from this point, with SLE 12 SP4 kernels starting around + * 4.12.14-94. A few backports for SLE 15 SP1 landed in some alpha and beta + * kernels tagged between 4.12.14-25 up to 4.12.14-32. These changes did not + * make it into SLE 12 SP4. This was cleaned up with SLE 12 SP5 by an apparent + * merge in 4.12.14-111. The official launch of SLE 15 SP1 ended up with + * version 4.12.14-195. + * + * Because of this inconsistency and because all of these kernels appear to be + * alpha or beta kernel releases for SLE 15 SP1, we do not rely on version + * checks between this range. Issue a warning to indicate that we do not + * support these. + */ +#warning \ + "SLE kernel versions between 4.12.14-23 and 4.12.14-94 are not supported" +#endif + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4, 12, 14, 100)) +#else /* >= 4.12.14-100 */ +#undef HAVE_TCF_EXTS_TO_LIST +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#endif /* 4.12.14-100 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4, 12, 14, 111)) +#define NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +#else /* >= 4.12.14-111 */ +#define HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR +#undef NEED_MACVLAN_ACCEL_PRIV +#undef NEED_MACVLAN_RELEASE_L2FW_OFFLOAD +#undef NEED_MACVLAN_SUPPORTS_DEST_FILTER +#undef NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +#endif /* 4.12.14-111 */ + +/*****************************************************************************/ +/* SLES 12-SP5 base kernel version */ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4, 12, 14, 120)) +#else /* >= 4.12.14-120 */ +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_TCF_MIRRED_DEV +#define HAVE_TCF_BLOCK +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#undef NEED_TC_SETUP_QDISC_MQPRIO +#undef NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 +#undef NEED_NETDEV_TX_SENT_QUEUE +#endif /* 4.12.14-120 */ + +/*****************************************************************************/ +/* SLES 15-SP1 base */ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4, 12, 14, 195)) +#else /* >= 4.12.14-195 */ +#undef NEED_NETDEV_TX_SENT_QUEUE +#endif /* 4.12.14-195 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5, 3, 8, 2)) +#else /* >= 5.3.8-2 */ +#undef NEED_BUS_FIND_DEVICE_CONST_DATA +#undef NEED_FLOW_INDR_BLOCK_CB_REGISTER +#undef NEED_SKB_FRAG_OFF +#undef NEED_SKB_FRAG_OFF_ADD +#define HAVE_FLOW_INDR_BLOCK_LOCK +#endif /* 5.3.8-2 */ + +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5, 3, 16, 2)) +#else /* >= 5.3.16-2 */ +#define HAVE_DEVLINK_HEALTH_OPS_EXTACK +#endif /* 5.3.16-2 */ + +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5, 3, 18, 26)) +#else /* >= 5.3.18-26 */ +#undef NEED_CPU_LATENCY_QOS_RENAME +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#endif + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5, 3, 18, 34)) +#else /* >= 5.3.18-34 */ +#undef NEED_DEVLINK_REGION_CREATE_OPS +#undef NEED_DEVLINK_PORT_ATTRS_SET_STRUCT +#define HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER +#endif /* 5.3.18-34 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5, 3, 18, 37)) +#else /* >= 5.3.18-37 */ +#undef NEED_NET_PREFETCH +#endif /* 5.3.18-37 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5, 3, 18, 38)) +#else /* >= 5.3.18-38 */ +#undef NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY +#endif /* 5.3.18-38 */ + +/*****************************************************************************/ +#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5, 14, 21, 9)) +#else /* >= 5.14.21-150400.9 */ +#undef NEED_DEVLINK_ALLOC_SETS_DEV +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif /* 5.14.21-150400.9 */ + +#endif /* _KCOMPAT_SLES_DEFS_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/kcompat_std_defs.h b/drivers/net/ethernet/mucse/rnp/kcompat_std_defs.h new file mode 100755 index 0000000000000000000000000000000000000000..eddcfe188f6f89040181eaaa9c033453f3871289 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/kcompat_std_defs.h @@ -0,0 +1,299 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _KCOMPAT_STD_DEFS_H_ +#define _KCOMPAT_STD_DEFS_H_ + +/* This file contains the definitions for what kernel features need + * backports for a given kernel. It targets only the standard stable kernel + * releases. It must check only LINUX_VERSION_CODE and assume the kernel is + * a standard release, and not a custom distribution. + * + * It must define HAVE_ and NEED_ for features. It must not + * implement any backports, instead leaving the implementation to the + * kcompat_impl.h header. + * + * If a feature can be easily implemented as a replacement macro or fully + * backported, use a NEED_ to indicate that the feature needs + * a backport. (If NEED_ is undefined, then no backport for that + * feature is needed). + * + * If a feature cannot be easily implemented in kcompat directly, but + * requires drivers to make specific changes such as stripping out an entire + * feature or modifying a function pointer prototype, use a HAVE_. + */ + +#ifndef LINUX_VERSION_CODE +#error "LINUX_VERSION_CODE is undefined" +#endif + +#ifndef KERNEL_VERSION +#error "KERNEL_VERSION is undefined" +#endif + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) +#define NEED_DEV_PM_DOMAIN_ATTACH_DETACH +#else /* >= 3,18,0 */ +#endif /* 3,18,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) +#define NO_TX_MAXRATE +#define NEED_DEV_PRINTK_ONCE +#else /* >= 3,19,0 */ +#endif /* 3,19,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) +#define NEED_DEFINE_STATIC_KEY_FALSE +#define NEED_STATIC_BRANCH +#else /* >= 4,3,0 */ +#define NEED_DECLARE_STATIC_KEY_FALSE +#endif /* 4,3,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) +#else /* >= 4,8,0 */ +#define HAVE_TCF_EXTS_TO_LIST +#define HAVE_PCI_ALLOC_IRQ +#endif /* 4,8,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) +#define NEED_JIFFIES_64_TIME_IS_MACROS +#else /* >= 4,9,0 */ +#define HAVE_KTHREAD_DELAYED_API +#define HAVE_NDO_OFFLOAD_STATS +#undef NEED_DECLARE_STATIC_KEY_FALSE +#endif /* 4,9,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) +#else /* >= 4,12,0 */ +#define HAVE_NAPI_BUSY_LOOP +#endif /* 4,12,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) +#define NEED_TC_SETUP_QDISC_MQPRIO +#define NEED_NETDEV_XDP_STRUCT +#else /* >= 4,15,0 */ +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_NDO_BPF +#endif /* 4,15,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) +#define NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 +#else /* >= 4,16,0 */ +#define HAVE_XDP_BUFF_RXQ +#define HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#endif /* 4,16,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) +#define NEED_CONVERT_ART_NS_TO_TSC +#else /* >= 4,17,0 */ +#endif /* 4,17,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)) +#define NEED_MACVLAN_ACCEL_PRIV +#define NEED_MACVLAN_RELEASE_L2FW_OFFLOAD +#define NEED_MACVLAN_SUPPORTS_DEST_FILTER +#else /* >= 4,18,0 */ +#define HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR +#endif /* 4,18,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) +#define NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +#else /* >= 4,19,0 */ +#undef HAVE_TCF_EXTS_TO_LIST +#define HAVE_TCF_EXTS_FOR_EACH_ACTION +#define HAVE_DEVLINK_REGIONS +#define HAVE_TC_ETF_QOPT_OFFLOAD +#define HAVE_DEVLINK_PARAMS +#endif /* 4,19,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)) +#define NEED_NETDEV_TX_SENT_QUEUE +#else /* >= 4.20.0 */ +#endif /* 4.20.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) +#define NEED_INDIRECT_CALL_WRAPPER_MACROS +#else /* >= 5.0.0 */ +#define HAVE_INDIRECT_CALL_WRAPPER_HEADER +#endif /* 5.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)) +#else /* >= 5.1.0 */ +#define HAVE_ETHTOOL_200G_BITS +#define HAVE_ETHTOOL_NEW_100G_BITS +#define HAVE_DEVLINK_PARAMS_PUBLISH +#define HAVE_DEVLINK_HEALTH +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) +#else /* >= 5.2.0 */ +#define HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID +#endif /* 5.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0)) +#define NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY +#define NEED_BUS_FIND_DEVICE_CONST_DATA +#else /* >= 5.3.0 */ +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) +#define NEED_SKB_FRAG_OFF_ADD +#define NEED_SKB_FRAG_OFF +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 14, 241) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) +#undef NEED_SKB_FRAG_OFF +#endif /* > 4.14.241 && < 4.15.0 */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 19, 200) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)) +#undef NEED_SKB_FRAG_OFF +#endif /* > 4.19.200 && < 4.20.0 */ + +#define NEED_FLOW_INDR_BLOCK_CB_REGISTER +#else /* >= 5.4.0 */ +#define HAVE_FLOW_INDR_BLOCK_LOCK +#define HAVE_XSK_UNALIGNED_CHUNK_PLACEMENT +#endif /* 5.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)) +#else /* >= 5.5.0 */ +#define HAVE_DEVLINK_HEALTH_OPS_EXTACK +#endif /* 5.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0)) +#define NEED_DEVLINK_REGION_CREATE_OPS +#define NEED_CPU_LATENCY_QOS_RENAME +#else /* >= 5.7.0 */ +#define HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#endif /* 5.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) +#else /* >= 5.8.0 */ +#undef HAVE_XSK_UNALIGNED_CHUNK_PLACEMENT +#endif /* 5.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)) +#define NEED_DEVLINK_PORT_ATTRS_SET_STRUCT +#define HAVE_XDP_QUERY_PROG +#define NEED_INDIRECT_CALL_3_AND_4 +#else /* >= 5.9.0 */ +#define HAVE_TASKLET_SETUP +#endif /* 5.9.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) +#define NEED_NET_PREFETCH +#define NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY +#else /* >= 5.10.0 */ +#define HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_UDP_TUNNEL_NIC_SHARED +#endif /* 5.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) +#define HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY +#else /* >= 5.11.0 */ +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW +#define HAVE_XSK_BATCHED_DESCRIPTOR_INTERFACES +#define HAVE_PASID_SUPPORT +#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#endif /* 5.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 12, 0)) +#define NEED_EXPORT_INDIRECT_CALLABLE +#else /* >= 5.12.0 */ +#endif /* 5.12.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0)) +/* HAVE_KOBJ_IN_MDEV_PARENT_OPS_CREATE + * + * create api changed as part of the commit c2ef2f50ad0c( vfio/mdev: Remove + * kobj from mdev_parent_ops->create()) + * + * if flag is defined use the old API else new API + */ +#define HAVE_KOBJ_IN_MDEV_PARENT_OPS_CREATE +#define HAVE_DEV_IN_MDEV_API +#else /* >= 5.13.0 */ +#endif /* 5.13.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)) +#else /* >= 5.14.0 */ +#define HAVE_TTY_WRITE_ROOM_UINT +#endif /* 5.14.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0)) +#define NEED_DEVLINK_ALLOC_SETS_DEV +#define HAVE_DEVLINK_REGISTER_SETS_DEV +//#define NEED_ETH_HW_ADDR_SET +#else /* >= 5.15.0 */ +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_NDO_ETH_IOCTL +#define HAVE_DEVICE_IN_MDEV_PARENT_OPS +#undef HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT +#undef HAVE_DEVLINK_PARAMS_PUBLISH +#endif /* 5.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)) +#else /* >= 5.16.0 */ +#undef HAVE_PASID_SUPPORT +#define HAVE_DEVLINK_SET_FEATURES +#define HAVE_DEVLINK_NOTIFY_REGISTER +#endif /* 5.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0)) +#define NEED_NO_NETDEV_PROG_XDP_WARN_ACTION +#else /* >=5.17.0*/ +#define HAVE_XDP_DO_FLUSH +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif /* 5.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0)) +#define HAVE_XSK_TX_PEEK_RELEASE_DESC_BATCH_3_PARAMS +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) +#undef HAVE_XSK_TX_PEEK_RELEASE_DESC_BATCH_3_PARAMS +#endif /* 5.11.0 */ +#else /* >=5.18.0*/ +#endif /* 5.18.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)) +#else /* >=5.19.0*/ +#define HAVE_NETIF_NAPI_ADD_WEIGHT +#endif /* 5.19.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)) +#else /* >=6.2.0*/ +//#define COMPAT_PTP_NO_ADJFREQ +#endif /* 6.2.0 */ + +#endif /* _KCOMPAT_STD_DEFS_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/kcompat_ubuntu_defs.h b/drivers/net/ethernet/mucse/rnp/kcompat_ubuntu_defs.h new file mode 100755 index 0000000000000000000000000000000000000000..2f706d60b903a2ba0aa8aa1643cd548e63b0aa6a --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/kcompat_ubuntu_defs.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _KCOMPAT_UBUNTU_DEFS_H_ +#define _KCOMPAT_UBUNTU_DEFS_H_ + +/* This file contains the definitions for the Ubuntu specific distribution + * of the Linux kernel. + * + * It checks the UBUNTU_VERSION_CODE to decide which features are available + * in the target kernel.It assumes that kcompat_std_defs.h has already been + * processed, and will #define or #undef the relevant flags based on what + * features were backported by Ubuntu. + */ + +#if !UTS_UBUNTU_RELEASE_ABI +#error "UTS_UBUNTU_RELEASE_ABI is 0 or undefined" +#endif + +#if !UBUNTU_VERSION_CODE +#error "UBUNTU_VERSION_CODE is 0 or undefined" +#endif + +#ifndef UBUNTU_VERSION +#error "UBUNTU_VERSION is undefined" +#endif + +#if (UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4, 15, 0, 159) && \ + UBUNTU_VERSION_CODE < UBUNTU_VERSION(4, 15, 0, 999)) +#undef NEED_SKB_FRAG_OFF +#endif + +/*****************************************************************************/ +#if (UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4, 15, 0, 159) && \ + UBUNTU_VERSION_CODE < UBUNTU_VERSION(4, 15, 0, 999)) +#undef NEED_SKB_FRAG_OFF +#endif + +#endif /* _KCOMPAT_UBUNTU_DEFS_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp.h b/drivers/net/ethernet/mucse/rnp/rnp.h new file mode 100755 index 0000000000000000000000000000000000000000..23980baee09a57521482b8a92b1cb462991c900a --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp.h @@ -0,0 +1,1242 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_H_ +#define _RNP_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +//#include +#include +#include +#include + +#include "rnp_type.h" +#include "rnp_common.h" +#include "rnp_compat.h" +#include "rnp_dcb.h" +#ifdef CONFIG_RNP_DCA +#include +#endif + +/* common prefix used by pr_<> macros */ +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define RNP_ALLOC_PAGE_ORDER 0 +#define RNP_PAGE_BUFFER_NUMS(ring) \ + ((1 << RNP_ALLOC_PAGE_ORDER) * PAGE_SIZE / \ + ALIGN((rnp_rx_offset(ring) + rnp_rx_bufsz(ring) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ + RNP_RX_HWTS_OFFSET), \ + 1024)) + +#define RNP_DEFAULT_TX_WORK (128) +#define RNP_MIN_TX_WORK (32) +#define RNP_MAX_TX_WORK (512) +#define RNP_MIN_RX_WORK (32) +#define RNP_MAX_RX_WORK (512) +#define RNP_WORK_ALIGN (2) +#define RNP_MIN_TX_FRAME (1) +#define RNP_MAX_TX_FRAME (256) +#define RNP_MIN_TX_USEC (30) +#define RNP_MAX_TX_USEC (10000) + +#define RNP_MIN_RX_FRAME (1) +#define RNP_MAX_RX_FRAME (256) +#define RNP_MIN_RX_USEC (10) +#define RNP_MAX_RX_USEC (10000) + +#define RNP_MAX_TXD (4096) +#define RNP_MIN_TXD (64) + +#define ACTION_TO_MPE (130) +#define MPE_PORT (10) +#define AUTO_ALL_MODES 0 +/* TX/RX descriptor defines */ +#ifdef FEITENG +#define RNP_DEFAULT_TXD 4096 +#else +#define RNP_DEFAULT_TXD 512 +#endif + +#define RNP_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define RNP_REQ_RX_DESCRIPTOR_MULTIPLE 8 + +#ifdef FEITENG +#define RNP_DEFAULT_RXD 4096 +#else +#define RNP_DEFAULT_RXD 512 +#endif +#define RNP_MAX_RXD 4096 +#define RNP_MIN_RXD 64 + +/* flow control */ +#define RNP_MIN_FCRTL 0x40 +#define RNP_MAX_FCRTL 0x7FF80 +#define RNP_MIN_FCRTH 0x600 +#define RNP_MAX_FCRTH 0x7FFF0 +#define RNP_DEFAULT_FCPAUSE 0xFFFF +#define RNP10_DEFAULT_HIGH_WATER 0x320 +#define RNP10_DEFAULT_LOW_WATER 0x270 +#define RNP500_DEFAULT_HIGH_WATER 400 +#define RNP500_DEFAULT_LOW_WATER 256 +#define RNP_MIN_FCPAUSE 0 +#define RNP_MAX_FCPAUSE 0xFFFF + +/* Supported Rx Buffer Sizes */ +#define RNP_RXBUFFER_256 256 /* Used for skb receive header */ +#define RNP_RXBUFFER_1536 1536 +#define RNP_RXBUFFER_2K 2048 +#define RNP_RXBUFFER_3K 3072 +#define RNP_RXBUFFER_4K 4096 +#define RNP_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ +#define RNP_RXBUFFER_MAX (RNP_RXBUFFER_2K) +#ifdef CONFIG_RNP_DISABLE_PACKET_SPLIT +#define RNP_RXBUFFER_7K 7168 +#define RNP_RXBUFFER_8K 8192 +#define RNP_RXBUFFER_15K 15360 +#endif /* CONFIG_RNP_DISABLE_PACKET_SPLIT */ + +#define MAX_Q_VECTORS 128 + +#define RNP_RING_COUNTS_PEER_PF 8 +#ifdef NETIF_F_GSO_PARTIAL +#define RNP_GSO_PARTIAL_FEATURES \ + (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) +#endif /* NETIF_F_GSO_PARTIAL */ + +/* + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, + * this adds up to 448 bytes of extra data. + * + * Since netdev_alloc_skb now allocates a page fragment we can use a value + * of 256 and the resultant skb will have a truesize of 960 or less. + */ +#define RNP_RX_HDR_SIZE RNP_RXBUFFER_256 + +#define RNP_ITR_ADAPTIVE_MIN_INC 2 +#define RNP_ITR_ADAPTIVE_MIN_USECS 5 +#define RNP_ITR_ADAPTIVE_MAX_USECS 800 +#define RNP_ITR_ADAPTIVE_LATENCY 0x400 +#define RNP_ITR_ADAPTIVE_BULK 0x00 +#define RNP_ITR_ADAPTIVE_MASK_USECS \ + (RNP_ITR_ADAPTIVE_LATENCY - RNP_ITR_ADAPTIVE_MIN_INC) + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#ifdef OPTM_WITH_LPAGE +#define RNP_RX_BUFFER_WRITE (PAGE_SIZE / 2048) /* Must be power of 2 */ +#else +#define RNP_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#endif +enum rnp_tx_flags { + /* cmd_type flags */ + RNP_TX_FLAGS_HW_VLAN = 0x01, + RNP_TX_FLAGS_TSO = 0x02, + RNP_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + RNP_TX_FLAGS_CC = 0x08, + RNP_TX_FLAGS_IPV4 = 0x10, + RNP_TX_FLAGS_CSUM = 0x20, + + /* software defined flags */ + RNP_TX_FLAGS_SW_VLAN = 0x40, + RNP_TX_FLAGS_FCOE = 0x80, +}; +#ifndef RNP_MAX_VF_CNT +#define RNP_MAX_VF_CNT 64 +#endif + +#define RNP_RX_RATE_HIGH 450000 +#define RNP_RX_COAL_TIME_HIGH 128 +#define RNP_RX_SIZE_THRESH 1024 +#define RNP_RX_RATE_THRESH (1000000 / RNP_RX_COAL_TIME_HIGH) +#define RNP_SAMPLE_INTERVAL 0 +#define RNP_AVG_PKT_SMALL 256 + +#define RNP_MAX_VF_MC_ENTRIES 30 +#define RNP_MAX_VF_FUNCTIONS RNP_MAX_VF_CNT +#define RNP_MAX_VFTA_ENTRIES 128 +#define MAX_EMULATION_MAC_ADDRS 16 +#define RNP_MAX_PF_MACVLANS_N10 15 +//#define RNP_MAX_PF_MACVLANS 15 +#define PF_RING_CNT_WHEN_IOV_ENABLED 2 +#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) + +enum vf_link_state { + rnp_link_state_on, + rnp_link_state_auto, + rnp_link_state_off, + +}; + +struct vf_data_storage { + unsigned char vf_mac_addresses[ETH_ALEN]; + unsigned char vf_mac_fake_address[ETH_ALEN]; + int vf_mac_fake_set; + u16 vf_mc_hashes[RNP_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 default_vf_vlan_id; + u16 vlans_enabled; + bool clear_to_send; + bool pf_set_mac; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 vf_vlan; // vf just can set 1 vlan + u16 pf_qos; + u16 tx_rate; +#ifdef HAVE_NDO_SET_VF_LINK_STATE + int link_state; +#endif + u16 vlan_count; + u8 spoofchk_enabled; + u8 trusted; + bool promisc_mode; + unsigned long status; + unsigned int vf_api; +}; + +enum vf_state_t { + __VF_MBX_USED, +}; + +struct vf_macvlans { + struct list_head l; + int vf; + int rar_entry; + bool free; + bool is_macvlan; + u8 vf_macvlan[ETH_ALEN]; +}; + +/* now tx max 4k for one desc */ +// feiteng use 12k can get better netperf performance +#define RNP_MAX_TXD_PWR 12 +#define RNP_MAX_DATA_PER_TXD (1 << RNP_MAX_TXD_PWR) +//#define RNP_MAX_DATA_PER_TXD (12 * 1024) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), RNP_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffers + */ +struct rnp_tx_buffer { + struct rnp_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + bool gso_need_padding; + + __be16 protocol; + __be16 priv_tags; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + union { + u32 mss_len_vf_num; + struct { + __le16 mss_len; + u8 vf_num; + u8 l4_hdr_len; + }; + }; + union { + u32 inner_vlan_tunnel_len; + struct { + u8 tunnel_hdr_len; + u8 inner_vlan_l; + u8 inner_vlan_h; + u8 resv; + }; + }; + bool ctx_flag; +}; + +struct rnp_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; +#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; +#endif +}; + +struct rnp_queue_stats { + u64 packets; + u64 bytes; +}; + +struct rnp_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; + u64 clean_desc; + u64 poll_count; + u64 irq_more_count; + u64 send_bytes; + u64 send_bytes_to_hw; + u64 todo_update; + u64 send_done_bytes; + u64 vlan_add; + u64 tx_next_to_clean; + u64 tx_irq_miss; + u64 tx_equal_count; + u64 tx_clean_times; + u64 tx_clean_count; +}; + +struct rnp_rx_queue_stats { + u64 driver_drop_packets; + u64 rsc_count; + u64 rsc_flush; + u64 non_eop_descs; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 alloc_rx_page; + u64 csum_err; + u64 csum_good; + u64 poll_again_count; + u64 vlan_remove; + u64 rx_next_to_clean; + u64 rx_irq_miss; + u64 rx_equal_count; + u64 rx_clean_times; + u64 rx_clean_count; +}; + +enum rnp_ring_state_t { +#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT + __RNP_RX_3K_BUFFER, + __RNP_RX_BUILD_SKB_ENABLED, +#endif + __RNP_TX_FDIR_INIT_DONE, + __RNP_TX_XPS_INIT_DONE, + __RNP_TX_DETECT_HANG, + __RNP_HANG_CHECK_ARMED, + __RNP_RX_CSUM_UDP_ZERO_ERR, + __RNP_RX_FCOE, +}; + +#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT +#define ring_uses_build_skb(ring) \ + test_bit(__RNP_RX_BUILD_SKB_ENABLED, &(ring)->state) +#endif + +#define check_for_tx_hang(ring) test_bit(__RNP_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__RNP_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__RNP_TX_DETECT_HANG, &(ring)->state) +struct rnp_ring { + struct rnp_ring *next; /* pointer to next ring in q_vector */ + struct rnp_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct device *dev; /* device for DMA mapping */ + void *desc; /* descriptor ring memory */ + union { + struct rnp_tx_buffer *tx_buffer_info; + struct rnp_rx_buffer *rx_buffer_info; + }; + unsigned long last_rx_timestamp; + unsigned long state; + u8 __iomem *ring_addr; + u8 __iomem *tail; + u8 __iomem *dma_int_stat; + u8 __iomem *dma_int_mask; + u8 __iomem *dma_int_clr; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + u32 ring_flags; +#define RNP_RING_FLAG_DELAY_SETUP_RX_LEN ((u32)(1 << 0)) +#define RNP_RING_FLAG_CHANGE_RX_LEN ((u32)(1 << 1)) +#define RNP_RING_FLAG_DO_RESET_RX_LEN ((u32)(1 << 2)) +#define RNP_RING_SKIP_TX_START ((u32)(1 << 3)) +#define RNP_RING_NO_TUNNEL_SUPPORT ((u32)(1 << 4)) +#define RNP_RING_SIZE_CHANGE_FIX ((u32)(1 << 5)) +#define RNP_RING_SCATER_SETUP ((u32)(1 << 6)) +#define RNP_RING_STAGS_SUPPORT ((u32)(1 << 7)) +#define RNP_RING_DOUBLE_VLAN_SUPPORT ((u32)(1 << 8)) +#define RNP_RING_VEB_MULTI_FIX ((u32)(1 << 9)) +#define RNP_RING_IRQ_MISS_FIX ((u32)(1 << 10)) +#define RNP_RING_OUTER_VLAN_FIX ((u32)(1 << 11)) +#define RNP_RING_CHKSM_FIX ((u32)(1 << 12)) +#define RNP_RING_LOWER_ITR ((u32)(1 << 13)) + u8 pfvfnum; + + u16 count; /* amount of descriptors */ + u16 temp_count; + u16 reset_count; + + u8 queue_index; /* queue_index needed for multiqueue queue management */ + u8 rnp_queue_idx; /* the real ring,used by dma */ + u16 next_to_use; + u16 next_to_clean; + + u16 device_id; +#ifdef OPTM_WITH_LPAGE + u16 rx_page_buf_nums; + u32 rx_per_buf_mem; + struct sk_buff *skb; +#endif + union { +#ifdef CONFIG_RNP_DISABLE_PACKET_SPLIT + u16 rx_buf_len; +#else + u16 next_to_alloc; +#endif + struct { + u8 atr_sample_rate; + u8 atr_count; + }; + }; + + u8 dcb_tc; + struct rnp_queue_stats stats; +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync syncp; +#endif + union { + struct rnp_tx_queue_stats tx_stats; + struct rnp_rx_queue_stats rx_stats; + }; +} ____cacheline_internodealigned_in_smp; + +#define RING2ADAPT(ring) netdev_priv((ring)->netdev) + +enum rnp_ring_f_enum { + RING_F_NONE = 0, + RING_F_VMDQ, /* SR-IOV uses the same ring feature */ + RING_F_RSS, + RING_F_FDIR, + + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + +#define RNP_MAX_RSS_INDICES 128 +#define RNP_MAX_RSS_INDICES_UV3P 8 +#define RNP_MAX_VMDQ_INDICES 64 +#define RNP_MAX_FDIR_INDICES 63 /* based on q_vector limit */ +#define RNP_MAX_FCOE_INDICES 8 +#define MAX_RX_QUEUES (128) +#define MAX_TX_QUEUES (128) +struct rnp_ring_feature { + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ +} ____cacheline_internodealigned_in_smp; + +#define RNP_n10_VMDQ_8Q_MASK 0x78 +#define RNP_n10_VMDQ_4Q_MASK 0x7C +#define RNP_n10_VMDQ_2Q_MASK 0x7E + +/* + * FCoE requires that all Rx buffers be over 2200 bytes in length. Since + * this is twice the size of a half page we need to double the page order + * for FCoE enabled Rx queues. + */ +static inline unsigned int rnp_rx_bufsz(struct rnp_ring *ring) +{ + return (RNP_RXBUFFER_1536 - NET_IP_ALIGN); +} + +static inline unsigned int rnp_rx_pg_order(struct rnp_ring *ring) +{ + /* fixed 1 page */ + /* we don't support 3k buffer */ + return 0; +} +#define rnp_rx_pg_size(_ring) (PAGE_SIZE << rnp_rx_pg_order(_ring)) + +struct rnp_ring_container { + struct rnp_ring *ring; /* pointer to linked list of rings */ + unsigned long next_update; /* jiffies value of last update */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + unsigned int total_packets_old; + u16 work_limit; /* total work allowed per interrupt */ + u16 count; /* total number of rings in vector */ + u16 itr; /* current ITR/MSIX vector setting for ring */ + u16 add_itr; +}; + +/* iterator for handling rings in ring container */ +#define rnp_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +#define MAX_RX_PACKET_BUFFERS ((adapter->flags & RNP_FLAG_DCB_ENABLED) ? 8 : 1) +#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS + +/* MAX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ + +#define SUPPORT_IRQ_AFFINITY_CHANGE +struct rnp_q_vector { + int old_rx_count; + int new_rx_count; + int new_tx_count; + int large_times; + int small_times; + int too_small_times; + int middle_time; + int large_times_tx; + int small_times_tx; + int too_small_times_tx; + int middle_time_tx; + struct rnp_adapter *adapter; +#ifdef CONFIG_RNP_DCA + int cpu; /* CPU for DCA */ +#endif + int v_idx; + /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring */ + u16 itr_rx; + u16 itr_tx; + struct rnp_ring_container rx, tx; + + struct napi_struct napi; +#ifdef HAVE_IRQ_AFFINITY_HINT + cpumask_t affinity_mask; +#endif /* HAVE_IRQ_AFFINITY_HINT */ +#ifdef HAVE_IRQ_AFFINITY_NOTIFY +#ifdef SUPPORT_IRQ_AFFINITY_CHANGE + struct irq_affinity_notify affinity_notify; +#endif /* SUPPORT_IRQ_AFFINITY_CHANGE */ +#endif /* HAVE_IRQ_AFFINITY_NOTIFY */ + int numa_node; + struct rcu_head rcu; /* to avoid race with update stats on free */ + + u32 vector_flags; +#define RNP_QVECTOR_FLAG_IRQ_MISS_CHECK ((u32)(1 << 0)) +#define RNP_QVECTOR_FLAG_ITR_FEATURE ((u32)(1 << 1)) +#define RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS ((u32)(1 << 2)) + int irq_check_usecs; + struct hrtimer irq_miss_check_timer; + + char name[IFNAMSIZ + 9]; + + /* for dynamic allocation of rings associated with this q_vector */ + struct rnp_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +static inline __le16 rnp_test_ext_cmd(union rnp_rx_desc *rx_desc, + const u16 stat_err_bits) +{ + return rx_desc->wb.rev1 & cpu_to_le16(stat_err_bits); +} + +#ifdef RNP_HWMON + +#define RNP_HWMON_TYPE_LOC 0 +#define RNP_HWMON_TYPE_TEMP 1 +#define RNP_HWMON_TYPE_CAUTION 2 +#define RNP_HWMON_TYPE_MAX 3 +#define RNP_HWMON_TYPE_NAME 4 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct rnp_hw *hw; + struct rnp_thermal_diode_data *sensor; + char name[12]; +}; + +struct hwmon_buff { +#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS + struct attribute_group group; + const struct attribute_group *groups[2]; + struct attribute *attrs[RNP_MAX_SENSORS * 4 + 1]; + struct hwmon_attr hwmon_list[RNP_MAX_SENSORS * 4]; +#else + struct device *device; + struct hwmon_attr *hwmon_list; +#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */ + unsigned int n_hwmon; +}; +#endif /* RNPM_HWMON */ + +/* + rnp_test_staterr - tests bits in Rx descriptor status and error fields +*/ +static inline __le16 rnp_test_staterr(union rnp_rx_desc *rx_desc, + const u16 stat_err_bits) +{ + return rx_desc->wb.cmd & cpu_to_le16(stat_err_bits); +} + +static inline __le16 rnp_get_stat(union rnp_rx_desc *rx_desc, + const u16 stat_mask) +{ + return rx_desc->wb.cmd & cpu_to_le16(stat_mask); +} + +static inline u16 rnp_desc_unused(struct rnp_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +static inline u16 rnp_desc_unused_rx(struct rnp_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +#define RNP_RX_DESC(R, i) (&(((union rnp_rx_desc *)((R)->desc))[i])) +#define RNP_TX_DESC(R, i) (&(((struct rnp_tx_desc *)((R)->desc))[i])) +#define RNP_TX_CTXTDESC(R, i) (&(((struct rnp_tx_ctx_desc *)((R)->desc))[i])) + +#define RNP_MAX_JUMBO_FRAME_SIZE 9590 /* Maximum Supported Size 9.5KB */ +#define RNP_MIN_MTU 68 +#define RNP500_MAX_JUMBO_FRAME_SIZE 9722 /* Maximum Supported Size 9728 */ + +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR) + +/* default to trying for four seconds */ +#define RNP_TRY_LINK_TIMEOUT (4 * HZ) + +#define RNP_MAX_USER_PRIO (8) +#define RNP_MAX_TCS_NUM (4) +struct rnp_pfc_cfg { + u8 pfc_max; /* hardware can enabled max pfc channel */ + u8 hw_pfc_map; /* enable the prio channel bit */ + u8 pfc_num; /* at present enabled the pfc-channel num */ + u8 pfc_en; /* enabled the pfc feature or not */ +}; + +struct rnp_dcb_num_tcs { + u8 pg_tcs; + u8 pfc_tcs; +}; + +struct rnp_dcb_cfg { + u8 tc_num; + u16 delay; /* pause time */ + u8 dcb_en; /* enabled the dcb feature or not */ + u8 dcbx_mode; + struct rnp_pfc_cfg pfc_cfg; + struct rnp_dcb_num_tcs num_tcs; + /* statistic info */ + u64 requests[RNP_MAX_TCS_NUM]; + u64 indications[RNP_MAX_TCS_NUM]; + enum rnp_fc_mode last_lfc_mode; +}; + +struct rnp_pps_cfg { + bool available; + struct timespec64 start; + struct timespec64 period; +}; + +enum rss_func_mode_enum { + rss_func_top, + rss_func_xor, + rss_func_order, +}; + +enum outer_vlan_type_enum { + outer_vlan_type_88a8, +#ifdef ETH_P_QINQ1 + outer_vlan_type_9100, +#endif +#ifdef ETH_P_QINQ2 + outer_vlan_type_9200, +#endif + outer_vlan_type_max, +}; + +enum irq_mode_enum { + irq_mode_legency, + irq_mode_msi, + irq_mode_msix, +}; + +/* board specific private data structure */ +struct rnp_adapter { +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) +#ifdef HAVE_VLAN_RX_REGISTER + struct vlan_group *vlgrp; /* must be first, see rnp_receive_skb */ +#else + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + unsigned long active_vlans_stags[BITS_TO_LONGS(VLAN_N_VID)]; +#endif + +#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_VLAN_CTAG_TX */ + /* OS defined structs */ + u16 vf_vlan; + u16 vlan_count; + int miss_time; + struct net_device *netdev; + struct pci_dev *pdev; + bool quit_poll_thread; + struct task_struct *rx_poll_thread; + unsigned long state; +#ifdef HAVE_TX_MQ +#ifndef HAVE_NETDEV_SELECT_QUEUE + unsigned int indices; +#endif +#endif + spinlock_t link_stat_lock; + + /* this var is used for auto itr modify */ + /* hw not Supported well */ + unsigned long last_moder_packets[MAX_RX_QUEUES]; + unsigned long last_moder_tx_packets; + unsigned long last_moder_bytes[MAX_RX_QUEUES]; + unsigned long last_moder_jiffies; + int last_moder_time[MAX_RX_QUEUES]; + /* only rx itr is Supported */ + int usecendcount; + u16 rx_usecs; + u16 rx_usecs_usr_set; + u16 rx_frames; + u16 usecstocount; + u16 tx_frames; + u16 tx_usecs; + u16 tx_usecs_usr_set; + u32 pkt_rate_low; + u16 rx_usecs_low; + u32 pkt_rate_high; + u16 rx_usecs_high; + u32 sample_interval; + u32 adaptive_rx_coal; + u32 adaptive_tx_coal; + u32 auto_rx_coal; + int napi_budge; + union { + int phy_addr; + struct { + u8 mod_abs : 1; + u8 fault : 1; + u8 tx_dis : 1; + u8 los : 1; + } sfp; + }; + + struct { + u32 main; + u32 pre; + u32 post; + u32 tx_boost; + } si; + + int speed; + + u8 an : 1; + u8 fec : 1; + u8 link_traing : 1; + u8 duplex : 1; + u8 rpu_inited : 1; + + /* Some features need tri-state capability, + * thus the additional *_CAPABLE flags. + */ + u32 vf_num_for_pf; + u32 flags; +#define RNP_FLAG_MSI_CAPABLE ((u32)(1 << 0)) +#define RNP_FLAG_MSI_ENABLED ((u32)(1 << 1)) +#define RNP_FLAG_MSIX_CAPABLE ((u32)(1 << 2)) +#define RNP_FLAG_MSIX_ENABLED ((u32)(1 << 3)) +#define RNP_FLAG_RX_1BUF_CAPABLE ((u32)(1 << 4)) +#define RNP_FLAG_RX_PS_CAPABLE ((u32)(1 << 5)) +#define RNP_FLAG_RX_PS_ENABLED ((u32)(1 << 6)) +#define RNP_FLAG_IN_NETPOLL ((u32)(1 << 7)) +#define RNP_FLAG_DCA_ENABLED ((u32)(1 << 8)) +#define RNP_FLAG_DCA_CAPABLE ((u32)(1 << 9)) +#define RNP_FLAG_IMIR_ENABLED ((u32)(1 << 10)) +#define RNP_FLAG_MQ_CAPABLE ((u32)(1 << 11)) +#define RNP_FLAG_DCB_ENABLED ((u32)(1 << 12)) +#define RNP_FLAG_VMDQ_CAPABLE ((u32)(1 << 13)) +#define RNP_FLAG_VMDQ_ENABLED ((u32)(1 << 14)) +#define RNP_FLAG_FAN_FAIL_CAPABLE ((u32)(1 << 15)) +#define RNP_FLAG_NEED_LINK_UPDATE ((u32)(1 << 16)) +#define RNP_FLAG_NEED_LINK_CONFIG ((u32)(1 << 17)) +#define RNP_FLAG_FDIR_HASH_CAPABLE ((u32)(1 << 18)) +#define RNP_FLAG_FDIR_PERFECT_CAPABLE ((u32)(1 << 19)) +#define RNP_FLAG_FCOE_CAPABLE ((u32)(1 << 20)) +#define RNP_FLAG_FCOE_ENABLED ((u32)(1 << 21)) +#define RNP_FLAG_SRIOV_CAPABLE ((u32)(1 << 22)) +#define RNP_FLAG_SRIOV_ENABLED ((u32)(1 << 23)) +#define RNP_FLAG_VXLAN_OFFLOAD_CAPABLE ((u32)(1 << 24)) +#define RNP_FLAG_VXLAN_OFFLOAD_ENABLE ((u32)(1 << 25)) +#define RNP_FLAG_SWITCH_LOOPBACK_EN ((u32)(1 << 26)) +#define RNP_FLAG_SRIOV_INIT_DONE ((u32)(1 << 27)) +#define RNP_FLAG_IN_IRQ ((u32)(1 << 28)) +#define RNP_FLAG_VF_INIT_DONE ((u32)(1 << 29)) +#define RNP_FLAG_LEGACY_CAPABLE ((u32)(1 << 30)) +#define RNP_FLAG_LEGACY_ENABLED ((u32)(1 << 31)) + u32 flags2; +#define RNP_FLAG2_RSC_CAPABLE ((u32)(1 << 0)) +#define RNP_FLAG2_RSC_ENABLED ((u32)(1 << 1)) +#define RNP_FLAG2_TEMP_SENSOR_CAPABLE ((u32)(1 << 2)) +#define RNP_FLAG2_TEMP_SENSOR_EVENT ((u32)(1 << 3)) +#define RNP_FLAG2_SEARCH_FOR_SFP ((u32)(1 << 4)) +#define RNP_FLAG2_SFP_NEEDS_RESET ((u32)(1 << 5)) +#define RNP_FLAG2_RESET_REQUESTED ((u32)(1 << 6)) +#define RNP_FLAG2_FDIR_REQUIRES_REINIT ((u32)(1 << 7)) +#define RNP_FLAG2_RSS_FIELD_IPV4_UDP ((u32)(1 << 8)) +#define RNP_FLAG2_RSS_FIELD_IPV6_UDP ((u32)(1 << 9)) +#define RNP_FLAG2_PTP_ENABLED ((u32)(1 << 10)) +#define RNP_FLAG2_PTP_PPS_ENABLED ((u32)(1 << 11)) +#define RNP_FLAG2_BRIDGE_MODE_VEB ((u32)(1 << 12)) +#define RNP_FLAG2_VLAN_STAGS_ENABLED ((u32)(1 << 13)) +#define RNP_FLAG2_UDP_TUN_REREG_NEEDED ((u32)(1 << 14)) +#define RNP_FLAG2_RESET_PF ((u32)(1 << 15)) +#define RNP_FLAG2_CHKSM_FIX ((u32)(1 << 16)) + + u32 priv_flags; +#define RNP_PRIV_FLAG_MAC_LOOPBACK BIT(0) +#define RNP_PRIV_FLAG_SWITCH_LOOPBACK BIT(1) +#define RNP_PRIV_FLAG_VEB_ENABLE BIT(2) +#define RNP_PRIV_FLAG_FT_PADDING BIT(3) +#define RNP_PRIV_FLAG_PADDING_DEBUG BIT(4) +#define RNP_PRIV_FLAG_PTP_DEBUG BIT(5) +#define RNP_PRIV_FLAG_SIMUATE_DOWN BIT(6) +#define RNP_PRIV_FLAG_VXLAN_INNER_MATCH BIT(7) +#define RNP_PRIV_FLAG_ULTRA_SHORT BIT(8) +#define RNP_PRIV_FLAG_DOUBLE_VLAN BIT(9) +#define RNP_PRIV_FLAG_TCP_SYNC BIT(10) +#define RNP_PRIV_FLAG_PAUSE_OWN BIT(11) +#define RNP_PRIV_FLAG_JUMBO BIT(12) +#define RNP_PRIV_FLAG_TX_PADDING BIT(13) +#define RNP_PRIV_FLAG_RX_ALL BIT(14) +#define RNP_PRIV_FLAG_REC_HDR_LEN_ERR BIT(15) +#define RNP_PRIV_FLAG_RX_FCS BIT(16) +#define RNP_PRIV_FLAG_DOUBLE_VLAN_RECEIVE BIT(17) +#define RNP_PRIV_FLGA_TEST_TX_HANG BIT(18) +#define RNP_PRIV_FLAG_RX_SKIP_EN BIT(19) +#define RNP_PRIV_FLAG_TCP_SYNC_PRIO BIT(20) +#define RNP_PRIV_FLAG_REMAP_PRIO BIT(21) +#define RNP_PRIV_FLAG_8023_PRIO BIT(22) +#define RNP_PRIV_FLAG_SRIOV_VLAN_MODE BIT(23) +#define RNP_PRIV_FLAG_REMAP_MODE BIT(24) +#define RNP_PRIV_FLAG_LLDP_EN_STAT BIT(25) +#define RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE BIT(26) +#define RNP_PRIV_FLAG_LINK_DOWN_BEFORE BIT(27) +#define RNP_PRIV_FLAG_OLD_VF_QUEUE BIT(28) + +#define PRIV_DATA_EN BIT(7) + int rss_func_mode; + int outer_vlan_type; + int tcp_sync_queue; + int priv_skip_count; + u64 rx_drop_status; + int drop_time; + /* Tx fast path data */ + unsigned int num_tx_queues; + unsigned int max_ring_pair_counts; + u16 tx_work_limit; +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) + __be16 vxlan_port; +#endif /* HAVE_UDP_ENC_RX_OFFLAD || HAVE_VXLAN_RX_OFFLOAD */ +#ifdef HAVE_UDP_ENC_RX_OFFLOAD + __be16 geneve_port; +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; + u32 eth_queue_idx; + u32 max_rate[MAX_TX_QUEUES]; + /* TX */ + struct rnp_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; + int tx_ring_item_count; + u64 restart_queue; + u64 lsc_int; + u32 tx_timeout_count; + /* RX */ + struct rnp_ring *rx_ring[MAX_RX_QUEUES]; + int rx_ring_item_count; + u64 hw_csum_rx_error; + u64 hw_csum_rx_good; + u64 hw_rx_no_dma_resources; + u64 rsc_total_count; + u64 rsc_total_flush; + u64 non_eop_descs; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + int num_other_vectors; + int irq_mode; + struct rnp_q_vector *q_vector[MAX_Q_VECTORS]; + /* used for IEEE 1588 ptp clock start */ + u8 __iomem *ptp_addr; + int gmac4; + const struct rnp_hwtimestamp *hwts_ops; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_ops; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + u32 ptp_config_value; + spinlock_t ptp_lock; /* Used to protect the SYSTIME registers. */ + u64 clk_ptp_rate; /* uint is HZ 1MHz=1 000 000Hz */ + u32 sub_second_inc; + u32 systime_flags; + struct timespec64 ptp_prev_hw_time; + unsigned int default_addend; + bool ptp_tx_en; + bool ptp_rx_en; + struct work_struct tx_hwtstamp_work; + unsigned long tx_hwtstamp_start; + unsigned long tx_hwtstamp_skipped; + unsigned long tx_timeout_factor; + u64 tx_hwtstamp_timeouts; + /*used for IEEE 1588 ptp clock end */ + /* DCB parameters */ + struct rnp_dcb_cfg dcb_cfg; + u8 prio_tc_map[RNP_MAX_USER_PRIO * 2]; + u8 num_tc; + int num_q_vectors; /* current number of q_vectors for device */ + int max_q_vectors; /* true count of q_vectors for device */ + struct rnp_ring_feature ring_feature[RING_F_ARRAY_SIZE]; + struct msix_entry *msix_entries; + u32 test_icr; + struct rnp_ring test_tx_ring; + struct rnp_ring test_rx_ring; + /* structs defined in rnp_hw.h */ + struct rnp_hw hw; + u16 msg_enable; + struct rnp_hw_stats hw_stats; + u64 tx_busy; + u32 link_speed; + bool link_up; + bool duplex_status; + u32 link_speed_old; + bool link_up_old; + bool duplex_old; + unsigned long link_check_timeout; + struct timer_list service_timer; + struct work_struct service_task; + /* fdir relative */ + struct hlist_head fdir_filter_list; + unsigned long fdir_overflow; /* number of times ATR was backed off */ + union rnp_atr_input fdir_mask; + int fdir_mode; + int fdir_filter_count; + int layer2_count; + int tuple_5_count; + u32 fdir_pballoc; + u32 atr_sample_rate; + spinlock_t fdir_perfect_lock; + u8 __iomem *io_addr_bar0; + u8 __iomem *io_addr; + u32 wol; + u16 bd_number; + u16 q_vector_off; + u16 eeprom_verh; + u16 eeprom_verl; + u16 eeprom_cap; + u16 stags_vid; + u32 sysfs_tx_ring_num; + u32 sysfs_rx_ring_num; + u32 sysfs_tx_desc_num; + u32 sysfs_rx_desc_num; + u32 interrupt_event; + u32 led_reg; + /* maintain */ + char *maintain_buf; + int maintain_buf_len; + void *maintain_dma_buf; + dma_addr_t maintain_dma_phy; + int maintain_dma_size; + int maintain_in_bytes; + /* SR-IOV */ + DECLARE_BITMAP(active_vfs, RNP_MAX_VF_FUNCTIONS); + unsigned int num_vfs; + struct vf_data_storage *vfinfo; + int vf_rate_link_speed; + struct vf_macvlans vf_mvs; + struct vf_macvlans *mv_list; + u32 timer_event_accumulator; + u32 vferr_refcount; + struct kobject *info_kobj; +#ifdef RNP_SYSFS +#ifdef RNP_HWMON +#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS + struct hwmon_buff *rnp_hwmon_buff; +#else + struct hwmon_buff rnp_hwmon_buff; +#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */ +#endif /* RNP_HWMON */ +#endif /* RNPM_SYSFS */ +#ifdef CONFIG_DEBUG_FS + struct dentry *rnp_dbg_adapter; +#endif /*CONFIG_DEBUG_FS*/ + u8 default_up; + u8 port; /* nr_pf_port: 0 or 1 */ + u8 portid_of_card; /* port num in card*/ +#define RNP_MAX_RETA_ENTRIES 512 + u8 rss_indir_tbl[RNP_MAX_RETA_ENTRIES]; +#define RNP_MAX_TC_ENTRIES 8 + u8 rss_tc_tbl[RNP_MAX_TC_ENTRIES]; + int rss_indir_tbl_num; + int rss_tc_tbl_num; + u32 rss_tbl_setup_flag; +#define RNP_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ + u8 rss_key[RNP_RSS_KEY_SIZE]; + u32 rss_key_setup_flag; + u32 sysfs_is_phy_ext_reg; + u32 sysfs_phy_reg; + u32 sysfs_bar4_reg_val; + u32 sysfs_bar4_reg_addr; + u32 sysfs_pcs_lane_num; + int sysfs_input_arg_cnt; + bool dma2_in_1pf; + char name[60]; + void *csl_dma_buf; + dma_addr_t csl_dma_phy; + int csl_dma_size; +}; + +struct device_list_own { + unsigned short vendor; + unsigned short device; +}; + +struct rnp_fdir_filter { + struct hlist_node fdir_node; + union rnp_atr_input filter; + u16 sw_idx; + u16 hw_idx; + u32 vf_num; + u64 action; +}; + +enum rnp_state_t { + __RNP_TESTING, + __RNP_RESETTING, + __RNP_DOWN, + __RNP_SERVICE_SCHED, + __RNP_IN_SFP_INIT, + __RNP_READ_I2C, + __RNP_PTP_TX_IN_PROGRESS, + __RNP_USE_VFINFI, + __RNP_IN_IRQ, + __RNP_REMOVE, + __RNP_SERVICE_CHECK, +}; + +struct rnp_cb { + union { /* Union defining head/tail partner */ + struct sk_buff *head; + struct sk_buff *tail; + }; + dma_addr_t dma; + u16 append_cnt; + bool page_released; +}; +#define RNP_CB(skb) ((struct rnp_cb *)(skb)->cb) + +enum rnp_boards { + board_n10_709_1pf_2x10G, + board_vu440s, + board_n10, + board_n400, +}; + +#if IS_ENABLED(CONFIG_DCB) +extern const struct dcbnl_rtnl_ops dcbnl_ops; +#endif + +extern char rnp_driver_name[]; +extern const char rnp_driver_version[]; + +extern void rnp_up(struct rnp_adapter *adapter); +extern void rnp_down(struct rnp_adapter *adapter); +extern void rnp_reinit_locked(struct rnp_adapter *adapter); +extern void rnp_reset(struct rnp_adapter *adapter); +extern void rnp_set_ethtool_ops(struct net_device *netdev); +extern int rnp_setup_rx_resources(struct rnp_ring *, struct rnp_adapter *); +extern int rnp_setup_tx_resources(struct rnp_ring *, struct rnp_adapter *); +extern void rnp_free_rx_resources(struct rnp_ring *); +extern void rnp_free_tx_resources(struct rnp_ring *); +extern void rnp_configure_rx_ring(struct rnp_adapter *, struct rnp_ring *); +extern void rnp_configure_tx_ring(struct rnp_adapter *, struct rnp_ring *); +extern void rnp_disable_rx_queue(struct rnp_adapter *adapter, + struct rnp_ring *); +extern void rnp_update_stats(struct rnp_adapter *adapter); +extern int rnp_init_interrupt_scheme(struct rnp_adapter *adapter); +extern int rnp_wol_supported(struct rnp_adapter *adapter, u16 device_id, + u16 subdevice_id); +extern void rnp_clear_interrupt_scheme(struct rnp_adapter *adapter); +extern netdev_tx_t rnp_xmit_frame_ring(struct sk_buff *, struct rnp_adapter *, + struct rnp_ring *, bool); +extern int rnp_poll(struct napi_struct *napi, int budget); +extern int ethtool_ioctl(struct ifreq *ifr); +extern void rnp_release_hw_control(struct rnp_adapter *adapter); +extern void rnp_get_hw_control(struct rnp_adapter *adapter); +extern s32 rnp_fdir_write_perfect_filter(int fdir_mode, struct rnp_hw *hw, + union rnp_atr_input *filter, u16 hw_id, + u8 queue, bool prio_flag); +extern void rnp_set_rx_mode(struct net_device *netdev); +#ifdef CONFIG_RNP_DCB +extern void rnp_set_rx_drop_en(struct rnp_adapter *adapter); +#endif +extern int rnp_setup_tx_maxrate(struct rnp_ring *tx_ring, u64 max_rate, + int sample_interval); +extern int rnp_setup_tc(struct net_device *dev, u8 tc); +void rnp_check_options(struct rnp_adapter *adapter); +extern int rnp_open(struct net_device *netdev); +extern int rnp_close(struct net_device *netdev); +void rnp_tx_ctxtdesc(struct rnp_ring *tx_ring, u32 mss_len_vf_num, + u32 inner_vlan_tunnel_len, int ignore_vlan, bool crc_pad); +void rnp_maybe_tx_ctxtdesc(struct rnp_ring *tx_ring, + struct rnp_tx_buffer *first, u32 type_tucmd); +extern void rnp_store_reta(struct rnp_adapter *adapter); +extern void rnp_store_key(struct rnp_adapter *adapter); +extern int rnp_init_rss_key(struct rnp_adapter *adapter); +extern int rnp_init_rss_table(struct rnp_adapter *adapter); +extern void rnp_setup_dma_rx(struct rnp_adapter *adapter, int count_in_dw); +extern s32 rnp_fdir_erase_perfect_filter(int fdir_mode, struct rnp_hw *hw, + union rnp_atr_input *input, u16 hw_id); +extern u32 rnp_rss_indir_tbl_entries(struct rnp_adapter *adapter); +extern u32 rnp_tx_desc_unused_sw(struct rnp_ring *tx_ring); +extern u32 rnp_tx_desc_unused_hw(struct rnp_hw *hw, struct rnp_ring *tx_ring); +extern s32 rnp_disable_rxr_maxrate(struct net_device *netdev, u8 queue_index); +extern s32 rnp_enable_rxr_maxrate(struct net_device *netdev, u8 queue_index, + u32 maxrate); +extern u32 rnp_rx_desc_used_hw(struct rnp_hw *hw, struct rnp_ring *rx_ring); +extern void rnp_do_reset(struct net_device *netdev); +#ifdef CONFIG_RNP_HWMON +extern void rnp_sysfs_exit(struct rnp_adapter *adapter); +extern int rnp_sysfs_init(struct rnp_adapter *adapter); +#endif /* CONFIG_RNP_HWMON */ +#ifdef CONFIG_DEBUG_FS +extern void rnp_dbg_adapter_init(struct rnp_adapter *adapter); +extern void rnp_dbg_adapter_exit(struct rnp_adapter *adapter); +extern void rnp_dbg_init(void); +extern void rnp_dbg_exit(void); +#else +static inline void rnp_dbg_adapter_init(struct rnp_adapter *adapter) +{ +} +static inline void rnp_dbg_adapter_exit(struct rnp_adapter *adapter) +{ +} +static inline void rnp_dbg_init(void) +{ +} +static inline void rnp_dbg_exit(void) +{ +} +#endif /* CONFIG_DEBUG_FS */ +static inline struct netdev_queue *txring_txq(const struct rnp_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +void rnp_service_event_schedule(struct rnp_adapter *adapter); +extern void rnp_ptp_init(struct rnp_adapter *adapter); +extern void rnp_ptp_stop(struct rnp_adapter *adapter); +extern void rnp_ptp_overflow_check(struct rnp_adapter *adapter); +extern void rnp_ptp_rx_hang(struct rnp_adapter *adapter); +extern void __rnp_ptp_rx_hwtstamp(struct rnp_q_vector *q_vector, + struct sk_buff *skb); +static inline void rnp_ptp_rx_hwtstamp(struct rnp_ring *rx_ring, + union rnp_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (unlikely(!rnp_test_staterr(rx_desc, RNP_RXD_STAT_PTP))) + return; + /* + * Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + rx_ring->last_rx_timestamp = jiffies; +} + +static inline int ignore_veb_vlan(struct rnp_adapter *adapter, + union rnp_rx_desc *rx_desc) +{ + if (unlikely((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && + (cpu_to_le16(rx_desc->wb.rev1) & VEB_VF_IGNORE_VLAN))) { + return 1; + } + return 0; +} + +static inline int ignore_veb_pkg_err(struct rnp_adapter *adapter, + union rnp_rx_desc *rx_desc) +{ + if (unlikely((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && + (cpu_to_le16(rx_desc->wb.rev1) & VEB_VF_PKG))) { + return 1; + } + return 0; +} + +int rnp_update_ethtool_fdir_entry(struct rnp_adapter *adapter, + struct rnp_fdir_filter *input, u16 sw_idx); + +static inline int rnp_is_pf1(struct rnp_hw *hw) +{ + return !!(hw->pfvfnum & BIT(PF_BIT)); +} + +static inline int rnp_is_pf0(struct rnp_hw *hw) +{ + return !rnp_is_pf1(hw); +} + +static inline int rnp_get_fuc(struct pci_dev *pdev) +{ + return pdev->devfn; +} + +extern void rnp_service_task(struct work_struct *work); +extern void rnp_sysfs_exit(struct rnp_adapter *adapter); +extern int rnp_sysfs_init(struct rnp_adapter *adapter); + +#ifdef CONFIG_PCI_IOV +void rnp_sriov_reinit(struct rnp_adapter *adapter); +#endif + +#define SET_BIT(n, var) (var = (var | (1 << n))) +#define CLR_BIT(n, var) (var = (var & (~(1 << n)))) +#define CHK_BIT(n, var) (var & (1 << n)) + +#ifdef HAVE_STRUCT_DMA_ATTRS +#define RNP_RX_DMA_ATTR NULL +#else +#define RNP_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#endif + +static inline bool rnp_removed(void __iomem *addr) +{ + return unlikely(!addr); +} +#define RNP_REMOVED(a) rnp_removed(a) +int rnp_fw_msg_handler(struct rnp_adapter *adapter); + +int rnp500_fw_update(struct rnp_hw *hw, int partition, const u8 *fw_bin, + int bytes); + +int rnp_fw_update(struct rnp_hw *hw, int partition, const u8 *fw_bin, + int bytes); +#define RNPM_FW_VERSION_NEW_ETHTOOL 0x00050010 +static inline bool rnp_fw_is_old_ethtool(struct rnp_hw *hw) +{ + return hw->fw_version >= RNPM_FW_VERSION_NEW_ETHTOOL ? false : true; +} + +#endif /* _RNP_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_common.c b/drivers/net/ethernet/mucse/rnp/rnp_common.c new file mode 100755 index 0000000000000000000000000000000000000000..6365f38fc6f86857c929a47731ec9063e69e80d2 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_common.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include +#include + +#include "rnp.h" +#include "rnp_common.h" +#include "rnp_phy.h" +#include "rnp_mbx.h" + +unsigned int rnp_loglevel; +module_param(rnp_loglevel, uint, S_IRUSR | S_IWUSR); + + diff --git a/drivers/net/ethernet/mucse/rnp/rnp_common.h b/drivers/net/ethernet/mucse/rnp/rnp_common.h new file mode 100755 index 0000000000000000000000000000000000000000..513aa32a364992ed924c3a8c941abd12f5a59ffd --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_common.h @@ -0,0 +1,385 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_COMMON_H_ +#define _RNP_COMMON_H_ + +#include +#include +#include "rnp_type.h" +#include "rnp.h" +#include "rnp_regs.h" +#include "rnp_compat.h" + +struct rnp_adapter; + +#define TRACE() printk(KERN_DEBUG "==[ %s %d ] ==\n", __func__, __LINE__) + +#ifdef CONFIG_RNP_RX_DEBUG +#define rx_debug_printk printk +#define rx_buf_dump buf_dump +#define rx_dbg(fmt, args...) \ + printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args) +#else +#define rx_debug_printk(fmt, args...) +#define rx_buf_dump(a, b, c) +#define rx_dbg(fmt, args...) +#endif //CONFIG_RNP_RX_DEBUG + +#ifdef CONFIG_RNP_TX_DEBUG +#define desc_hex_dump(msg, buf, len) \ + print_hex_dump(KERN_WARNING, msg, DUMP_PREFIX_OFFSET, 16, 1, (buf), \ + (len), false) +#define rnp_skb_dump _rnp_skb_dump + +#define tx_dbg(fmt, args...) \ + printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args) +#else +#define desc_hex_dump(msg, buf, len) +#define rnp_skb_dump(skb, full_pkt) +#define tx_dbg(fmt, args...) +#endif //CONFIG_RNP_TX_DEBUG + +#ifdef DEBUG +#define dbg(fmt, args...) \ + printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args) +#else +#define dbg(fmt, args...) +#endif + +#ifdef CONFIG_RNP_VF_DEBUG +#define vf_dbg(fmt, args...) \ + printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args) +#else +#define vf_dbg(fmt, args...) +#endif + +int rnp_acquire_msix_vectors(struct rnp_adapter *adapter, int vectors); + +//================= registers read/write helper ===== +#define p_rnp_wr_reg(reg, val) \ + do { \ + printk(KERN_DEBUG " wr-reg: %p <== 0x%08x \t#%-4d %s\n", \ + (reg), (val), __LINE__, __FILE__); \ + iowrite32((val), (void *)(reg)); \ + } while (0) + +static inline unsigned int prnp_rd_reg(void *reg) +{ + unsigned int v = ioread32((void *)(reg)); + + printk(KERN_DEBUG " %p => 0x%08x\n", reg, v); + return v; +} + +#ifdef IO_PRINT +static inline unsigned int rnp_rd_reg(void *reg) +{ + unsigned int v = ioread32((void *)(reg)); + + dbg(" rd-reg: %p <== 0x%08x\n", reg, v); + return v; +} +#define rnp_wr_reg(reg, val) \ + do { \ + dbg(" wr-reg: %p <== 0x%08x \t#%-4d %s\n", (reg), (val), \ + __LINE__, __FILE__); \ + iowrite32((val), (void *)(reg)); \ + } while (0) +#else +#define rnp_rd_reg(reg) readl((void *)(reg)) +#define rnp_wr_reg(reg, val) writel((val), (void *)(reg)) +#endif + +#define rd32(hw, off) rnp_rd_reg((hw)->hw_addr + (off)) +#define wr32(hw, off, val) rnp_wr_reg((hw)->hw_addr + (off), (val)) + +#define nic_rd32(nic, off) rnp_rd_reg((nic)->nic_base_addr + (off)) +#define nic_wr32(nic, off, val) rnp_wr_reg((nic)->nic_base_addr + (off), (val)) + +#define dma_rd32(dma, off) rnp_rd_reg((dma)->dma_base_addr + (off)) +#define dma_wr32(dma, off, val) rnp_wr_reg((dma)->dma_base_addr + (off), (val)) + +#define dma_ring_rd32(dma, off) rnp_rd_reg((dma)->dma_ring_addr + (off)) +#define dma_ring_wr32(dma, off, val) \ + rnp_wr_reg((dma)->dma_ring_addr + (off), (val)) + +#define eth_rd32(eth, off) rnp_rd_reg((eth)->eth_base_addr + (off)) +#define eth_wr32(eth, off, val) rnp_wr_reg((eth)->eth_base_addr + (off), (val)) + +#define mac_rd32(mac, off) rnp_rd_reg((mac)->mac_addr + (off)) +#define mac_wr32(mac, off, val) rnp_wr_reg((mac)->mac_addr + (off), (val)) +#ifdef debug_ring +static inline unsigned int rnp_rd_reg_1(int ring, u32 off, void *reg) +{ + unsigned int v = ioread32((void *)(reg)); + + printk(KERN_DEBUG "%d rd-reg: %x <== 0x%08x\n", ring, off, v); + return v; +} + +#define ring_rd32(ring, off) \ + rnp_rd_reg_1(ring->rnp_queue_idx, off, (ring)->ring_addr + (off)) +#define ring_wr32(ring, off, val) rnp_wr_reg((ring)->ring_addr + (off), (val)) +#else +#define ring_rd32(ring, off) rnp_rd_reg((ring)->ring_addr + (off)) +#define ring_wr32(ring, off, val) rnp_wr_reg((ring)->ring_addr + (off), (val)) +#endif + +#define pwr32(hw, off, val) p_rnp_wr_reg((hw)->hw_addr + (off), (val)) + +#define rnp_mbx_rd(hw, off) rnp_rd_reg((hw)->ring_msix_base + (off)) +#define rnp_mbx_wr(hw, off, val) rnp_wr_reg((hw)->ring_msix_base + (off), val) + +static inline void hw_queue_strip_rx_vlan(struct rnp_hw *hw, u8 ring_num, + bool enable) +{ + u32 reg = RNP_ETH_VLAN_VME_REG(ring_num / 32); + u32 offset = ring_num % 32; + u32 data = rd32(hw, reg); + + if (enable == true) + data |= (1 << offset); + else + data &= ~(1 << offset); + wr32(hw, reg, data); +} + +#define rnp_set_reg_bit(hw, reg_def, bit) \ + do { \ + u32 reg = reg_def; \ + u32 value = rd32(hw, reg); \ + dbg("before set %x %x\n", reg, value); \ + value |= (0x01 << bit); \ + dbg("after set %x %x\n", reg, value); \ + wr32(hw, reg, value); \ + } while (0) + +#define rnp_clr_reg_bit(hw, reg_def, bit) \ + do { \ + u32 reg = reg_def; \ + u32 value = rd32(hw, reg); \ + dbg("before clr %x %x\n", reg, value); \ + value &= (~(0x01 << bit)); \ + dbg("after clr %x %x\n", reg, value); \ + wr32(hw, reg, value); \ + } while (0) + +#define rnp_vlan_filter_on(hw) \ + rnp_set_reg_bit(hw, RNP_ETH_VLAN_FILTER_ENABLE, 30) +#define rnp_vlan_filter_off(hw) \ + rnp_clr_reg_bit(hw, RNP_ETH_VLAN_FILTER_ENABLE, 30) + +#define DPRINTK(nlevel, klevel, fmt, args...) \ + ((NETIF_MSG_##nlevel & adapter->msg_enable) ? \ + (void)(netdev_printk(KERN_##klevel, adapter->netdev, fmt, \ + ##args)) : \ + NULL) + +//==== log helper === +#ifdef HW_DEBUG +#define hw_dbg(hw, fmt, args...) printk(KERN_DEBUG "hw-dbg : " fmt, ##args) +#define eth_dbg(eth, fmt, args...) printk(KERN_DEBUG "hw-dbg : " fmt, ##args) +#else +#define hw_dbg(hw, fmt, args...) +#define eth_dbg(hw, fmt, args...) +#endif + +//#define RNP_DEBUG_OPEN +#ifdef RNP_DEBUG_OPEN +#define rnp_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args) +#else +#define rnp_dbg(fmt, args...) +#endif +#define rnp_info(fmt, args...) printk(KERN_DEBUG "rnp-info: " fmt, ##args) +#define rnp_warn(fmt, args...) printk(KERN_DEBUG "rnp-warn: " fmt, ##args) +#define rnp_err(fmt, args...) printk(KERN_ERR "rnp-err : " fmt, ##args) + +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ##arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ##arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ##arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ##arg) + +#define e_dev_info(format, arg...) dev_info(&adapter->pdev->dev, format, ##arg) +#define e_dev_warn(format, arg...) dev_warn(&adapter->pdev->dev, format, ##arg) +#define e_dev_err(format, arg...) dev_err(&adapter->pdev->dev, format, ##arg) + +#ifdef CONFIG_RNP_TX_DEBUG +static inline void buf_dump_line(const char *msg, int line, void *buf, int len) +{ + int i, offset = 0; + int msg_len = 1024; + u8 msg_buf[1024]; + u8 *ptr = (u8 *)buf; + + offset += snprintf(msg_buf + offset, msg_len, + "=== %s #%d line:%d buf:%p==\n000: ", msg, len, line, + buf); + + for (i = 0; i < len; ++i) { + if ((i != 0) && (i % 16) == 0 && (offset >= (1024 - 10 * 16))) { + printk(KERN_DEBUG "%s\n", msg_buf); + offset = 0; + } + + if ((i != 0) && (i % 16) == 0) { + offset += snprintf(msg_buf + offset, msg_len, + "\n%03x: ", i); + } + offset += snprintf(msg_buf + offset, msg_len, "%02x ", ptr[i]); + } + + offset += snprintf(msg_buf + offset, msg_len, "\n"); + printk(KERN_DEBUG "%s\n", msg_buf); +} +#else +#define buf_dump_line(msg, line, buf, len) +#endif + +static inline __le64 build_ctob(u32 vlan_cmd, u32 mac_ip_len, u32 size) +{ + return cpu_to_le64(((u64)vlan_cmd << 32) | ((u64)mac_ip_len << 16) | + ((u64)size)); +} + +static inline void buf_dump(const char *msg, void *buf, int len) +{ + int i, offset = 0; + int msg_len = 1024; + u8 msg_buf[1024]; + u8 *ptr = (u8 *)buf; + + offset += snprintf(msg_buf + offset, msg_len, + "=== %s #%d ==\n000: ", msg, len); + + for (i = 0; i < len; ++i) { + if ((i != 0) && (i % 16) == 0 && (offset >= (1024 - 10 * 16))) { + printk(KERN_DEBUG "%s\n", msg_buf); + offset = 0; + } + + if ((i != 0) && (i % 16) == 0) { + offset += snprintf(msg_buf + offset, msg_len, + "\n%03x: ", i); + } + offset += snprintf(msg_buf + offset, msg_len, "%02x ", ptr[i]); + } + + offset += snprintf(msg_buf + offset, msg_len, "\n=== done ==\n"); + printk(KERN_DEBUG "%s\n", msg_buf); +} + +#ifndef NO_SKB_DUMP +static inline void _rnp_skb_dump(const struct sk_buff *skb, bool full_pkt) +{ + static atomic_t can_dump_full = ATOMIC_INIT(5); +#ifdef DEBUG + struct skb_shared_info *sh = skb_shinfo(skb); +#endif + struct net_device *dev = skb->dev; + //struct sock *sk = skb->sk; + struct sk_buff *list_skb; + bool has_mac, has_trans; + int headroom, tailroom; + int i, len, seg_len; + const char *level = KERN_WARNING; + + if (full_pkt) + full_pkt = atomic_dec_if_positive(&can_dump_full) >= 0; + + if (full_pkt) + len = skb->len; + else + len = min_t(int, skb->len, MAX_HEADER + 128); + + headroom = skb_headroom(skb); + tailroom = skb_tailroom(skb); + + has_mac = skb_mac_header_was_set(skb); + has_trans = skb_transport_header_was_set(skb); + + dbg("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" + "mac=(%d,%d) net=(%d,%d) trans=%d\n" + "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" + "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n" + "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n", + level, skb->len, headroom, skb_headlen(skb), tailroom, + has_mac ? skb->mac_header : -1, + has_mac ? (skb->network_header - skb->mac_header) : -1, + skb->network_header, has_trans ? skb_network_header_len(skb) : -1, + has_trans ? skb->transport_header : -1, sh->tx_flags, sh->nr_frags, + sh->gso_size, sh->gso_type, sh->gso_segs, skb->csum, skb->ip_summed, + skb->csum_complete_sw, skb->csum_valid, skb->csum_level, skb->hash, + skb->sw_hash, skb->l4_hash, ntohs(skb->protocol), skb->pkt_type, + skb->skb_iif); + + if (dev) { + dbg("%sdev name=%s feat=0x%pNF\n", level, dev->name, + &dev->features); + } + + seg_len = min_t(int, skb_headlen(skb), len); + if (seg_len) + print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, 16, + 1, skb->data, seg_len, false); + len -= seg_len; + + for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + u32 p_len; + struct page *p; + u8 *vaddr; + + p = skb_frag_address(frag); + p_len = skb_frag_size(frag); + seg_len = min_t(int, p_len, len); + vaddr = kmap_atomic(p); + print_hex_dump(level, "skb frag: ", DUMP_PREFIX_OFFSET, 16, + 1, vaddr, seg_len, false); + kunmap_atomic(vaddr); + len -= seg_len; + if (!len) + break; + } + + if (full_pkt && skb_has_frag_list(skb)) { + dbg("skb fraglist:\n"); + skb_walk_frags(skb, list_skb) _rnp_skb_dump(list_skb, true); + } +} +#endif + +enum RNP_LOG_EVT { + LOG_MBX_IN, + LOG_MBX_OUT, + LOG_MBX_MSG_IN, + LOG_MBX_MSG_OUT, + LOG_LINK_EVENT, + LOG_ADPT_STAT, + LOG_MBX_ABLI, + LOG_MBX_LINK_STAT, + LOG_MBX_IFUP_DOWN, + LOG_MBX_LOCK, + LOG_ETHTOOL, + LOG_PHY, + +}; + +#define MII_BUSY 0x00000001 +#define MII_WRITE 0x00000002 +#define MII_DATA_MASK GENMASK(15, 0) + +extern unsigned int rnp_loglevel; + +#define rnp_logd(evt, fmt, args...) \ + do { \ + if (BIT(evt) & rnp_loglevel) { \ + printk(KERN_DEBUG fmt, ##args); \ + } \ + } while (0) + +#endif /* _RNP_COMMON_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_compat.c b/drivers/net/ethernet/mucse/rnp/rnp_compat.c new file mode 100755 index 0000000000000000000000000000000000000000..abf66885cd0263d8575d1771701948b2132b639c --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_compat.c @@ -0,0 +1,3095 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include "rnp_compat.h" + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 8)) || defined __VMKLNX__ +/* From lib/vsprintf.c */ +#include + +static int skip_atoi(const char **s) +{ + int i = 0; + + while (isdigit(**s)) + i = i * 10 + *((*s)++) - '0'; + return i; +} + +#define _kc_ZEROPAD 1 /* pad with zero */ +#define _kc_SIGN 2 /* unsigned/signed long */ +#define _kc_PLUS 4 /* show plus */ +#define _kc_SPACE 8 /* space if plus */ +#define _kc_LEFT 16 /* left justified */ +#define _kc_SPECIAL 32 /* 0x */ +#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +static char *number(char *buf, char *end, long long num, int base, int size, + int precision, int type) +{ + char c, sign, tmp[66]; + const char *digits; + const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; + const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + int i; + + digits = (type & _kc_LARGE) ? large_digits : small_digits; + if (type & _kc_LEFT) + type &= ~_kc_ZEROPAD; + if (base < 2 || base > 36) + return 0; + c = (type & _kc_ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & _kc_SIGN) { + if (num < 0) { + sign = '-'; + num = -num; + size--; + } else if (type & _kc_PLUS) { + sign = '+'; + size--; + } else if (type & _kc_SPACE) { + sign = ' '; + size--; + } + } + if (type & _kc_SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++] = '0'; + else + while (num != 0) + tmp[i++] = digits[do_div(num, base)]; + if (i > precision) + precision = i; + size -= precision; + if (!(type & (_kc_ZEROPAD + _kc_LEFT))) { + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + } + if (sign) { + if (buf <= end) + *buf = sign; + ++buf; + } + if (type & _kc_SPECIAL) { + if (base == 8) { + if (buf <= end) + *buf = '0'; + ++buf; + } else if (base == 16) { + if (buf <= end) + *buf = '0'; + ++buf; + if (buf <= end) + *buf = digits[33]; + ++buf; + } + } + if (!(type & _kc_LEFT)) { + while (size-- > 0) { + if (buf <= end) + *buf = c; + ++buf; + } + } + while (i < precision--) { + if (buf <= end) + *buf = '0'; + ++buf; + } + while (i-- > 0) { + if (buf <= end) + *buf = tmp[i]; + ++buf; + } + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + return buf; +} + +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int len; + unsigned long long num; + int i, base; + char *str, *end, c; + const char *s; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + + str = buf; + end = buf + size - 1; + + if (end < buf - 1) { + end = ((void *)-1); + size = end - buf + 1; + } + + for (; *fmt; ++fmt) { + if (*fmt != '%') { + if (str <= end) + *str = *fmt; + ++str; + continue; + } + + /* process flags */ + flags = 0; +repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': + flags |= _kc_LEFT; + goto repeat; + case '+': + flags |= _kc_PLUS; + goto repeat; + case ' ': + flags |= _kc_SPACE; + goto repeat; + case '#': + flags |= _kc_SPECIAL; + goto repeat; + case '0': + flags |= _kc_ZEROPAD; + goto repeat; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= _kc_LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt == 'Z') { + qualifier = *fmt; + ++fmt; + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & _kc_LEFT)) { + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + } + c = (unsigned char)va_arg(args, int); + if (str <= end) + *str = c; + ++str; + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 's': + s = va_arg(args, char *); + if (!s) + s = ""; + + len = strnlen(s, precision); + + if (!(flags & _kc_LEFT)) { + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + } + for (i = 0; i < len; ++i) { + if (str <= end) + *str = *s; + ++str; + ++s; + } + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 'p': + if ('M' == *(fmt + 1)) { + str = get_mac(str, end, + va_arg(args, unsigned char *)); + fmt++; + } else { + if (field_width == -1) { + field_width = 2 * sizeof(void *); + flags |= _kc_ZEROPAD; + } + str = number(str, end, + (unsigned long)va_arg(args, + void *), + 16, field_width, precision, flags); + } + continue; + + case 'n': + /* FIXME: + * What does C99 say about the overflow case here? */ + if (qualifier == 'l') { + long *ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z') { + size_t *ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int *ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + if (str <= end) + *str = '%'; + ++str; + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'X': + flags |= _kc_LARGE; + case 'x': + base = 16; + break; + + case 'd': + case 'i': + flags |= _kc_SIGN; + case 'u': + break; + + default: + if (str <= end) + *str = '%'; + ++str; + if (*fmt) { + if (str <= end) + *str = *fmt; + ++str; + } else { + --fmt; + } + continue; + } + if (qualifier == 'L') + num = va_arg(args, long long); + else if (qualifier == 'l') { + num = va_arg(args, unsigned long); + if (flags & _kc_SIGN) + num = (signed long)num; + } else if (qualifier == 'Z') { + num = va_arg(args, size_t); + } else if (qualifier == 'h') { + num = (unsigned short)va_arg(args, int); + if (flags & _kc_SIGN) + num = (signed short)num; + } else { + num = va_arg(args, unsigned int); + if (flags & _kc_SIGN) + num = (signed int)num; + } + str = number(str, end, num, base, field_width, precision, + flags); + } + if (str <= end) + *str = '\0'; + else if (size > 0) + /* don't write out a null byte if the buf size is zero */ + *end = '\0'; + /* the trailing null byte doesn't count towards the total + * ++str; + */ + return str - buf; +} + +int _kc_snprintf(char *buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = _kc_vsnprintf(buf, size, fmt, args); + va_end(args); + return i; +} +#endif /* < 2.4.8 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 13)) + +/**************************************/ +/* PCI DMA MAPPING */ + +#if defined(CONFIG_HIGHMEM) + +#ifndef PCI_DRAM_OFFSET +#define PCI_DRAM_OFFSET 0 +#endif + +u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, + unsigned long offset, size_t size, int direction) +{ + return (((u64)(page - mem_map) << PAGE_SHIFT) + offset + + PCI_DRAM_OFFSET); +} + +#else /* CONFIG_HIGHMEM */ + +u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, + unsigned long offset, size_t size, int direction) +{ + return pci_map_single(dev, (void *)page_address(page) + offset, size, + direction); +} + +#endif /* CONFIG_HIGHMEM */ + +void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, + int direction) +{ + return pci_unmap_single(dev, dma_addr, size, direction); +} + +#endif /* 2.4.13 => 2.4.3 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3)) + +/**************************************/ +/* PCI DRIVER API */ + +int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) +{ + if (!pci_dma_supported(dev, mask)) + return -EIO; + dev->dma_mask = mask; + return 0; +} + +int _kc_pci_request_regions(struct pci_dev *dev, char *res_name) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) { + if (!request_region(pci_resource_start(dev, i), + pci_resource_len(dev, i), + res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { + if (!request_mem_region(pci_resource_start(dev, i), + pci_resource_len(dev, i), + res_name)) { + pci_release_regions(dev); + return -EBUSY; + } + } + } + return 0; +} + +void _kc_pci_release_regions(struct pci_dev *dev) +{ + int i; + + for (i = 0; i < 6; i++) { + if (pci_resource_len(dev, i) == 0) + continue; + + if (pci_resource_flags(dev, i) & IORESOURCE_IO) + release_region(pci_resource_start(dev, i), + pci_resource_len(dev, i)); + + else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) + release_mem_region(pci_resource_start(dev, i), + pci_resource_len(dev, i)); + } +} + +/**************************************/ +/* NETWORK DRIVER API */ + +struct net_device *_kc_alloc_etherdev(int sizeof_priv) +{ + struct net_device *dev; + int alloc_size; + + alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; + dev = kzalloc(alloc_size, GFP_KERNEL); + if (!dev) + return NULL; + + if (sizeof_priv) + dev->priv = (void *)(((unsigned long)(dev + 1) + 31) & ~31); + dev->name[0] = '\0'; + ether_setup(dev); + + return dev; +} + +int _kc_is_valid_ether_addr(u8 *addr) +{ + const char zaddr[6] = { + 0, + }; + + return !(addr[0] & 1) && memcmp(addr, zaddr, 6); +} + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6)) + +int _kc_pci_set_power_state(struct pci_dev *dev, int state) +{ + return 0; +} + +int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) +{ + return 0; +} + +#endif /* 2.4.6 => 2.4.3 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, + int off, int size) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + frag->page = page; + frag->page_offset = off; + frag->size = size; + skb_shinfo(skb)->nr_frags = i + 1; +} + +/* + * Original Copyright: + * find_next_bit.c: fallback find next bit implementation + * + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG - 1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG - 1)) { + tmp = *(p++); + if (tmp) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + ffs(tmp); +} + +size_t _kc_strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + +#ifndef do_div +#if BITS_PER_LONG == 32 +uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base) +{ + uint64_t rem = *n; + uint64_t b = base; + uint64_t res, d = 1; + uint32_t high = rem >> 32; + + /* Reduce the thing a bit first */ + res = 0; + if (high >= base) { + high /= base; + res = (uint64_t)high << 32; + rem -= (uint64_t)(high * base) << 32; + } + + while ((int64_t)b > 0 && b < rem) { + b = b + b; + d = d + d; + } + + do { + if (rem >= b) { + rem -= b; + res += d; + } + b >>= 1; + d >>= 1; + } while (d); + + *n = res; + return rem; +} +#endif /* BITS_PER_LONG == 32 */ +#endif /* do_div */ +#endif /* 2.6.0 => 2.4.6 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 4)) +int _kc_scnprintf(char *buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + return (i >= size) ? (size - 1) : i; +} +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10)) +DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = { 1 }; +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13)) +char *_kc_kstrdup(const char *s, unsigned int gfp) +{ + size_t len; + char *buf; + + if (!s) + return NULL; + + len = strlen(s) + 1; + buf = kmalloc(len, gfp); + if (buf) + memcpy(buf, s, len); + return buf; +} +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)) +void *_kc_kzalloc(size_t size, int flags) +{ + void *ret = kmalloc(size, flags); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif /* <= 2.6.13 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)) +int _kc_skb_pad(struct sk_buff *skb, int pad) +{ + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data + skb->len, 0, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)) + goto free_skb; + } + +#ifdef MAX_SKB_FRAGS + if (skb_is_nonlinear(skb) && !__pskb_pull_tail(skb, skb->data_len)) + goto free_skb; + +#endif + memset(skb->data + skb->len, 0, pad); + return 0; + +free_skb: + kfree_skb(skb); + return -ENOMEM; +} + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 4))) +int _kc_pci_save_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset, pcie_link_status; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + /* no ->dev for 2.4 kernels */ + WARN_ON(pdev->dev.driver_data == NULL); +#endif + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset) { + if (!pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + } + pci_config_space_ich8lan(); +#ifdef HAVE_PCI_ERS + if (adapter->config_space == NULL) +#else + WARN_ON(adapter->config_space != NULL); +#endif + adapter->config_space = kmalloc(size, GFP_KERNEL); + if (!adapter->config_space) { + printk(KERN_ERR "Out of memory in pci_save_state\n"); + return -ENOMEM; + } + for (i = 0; i < (size / 4); i++) + pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); + return 0; +} + +void _kc_pci_restore_state(struct pci_dev *pdev) +{ + struct adapter_struct *adapter = pci_get_drvdata(pdev); + int size = PCI_CONFIG_SPACE_LEN, i; + u16 pcie_cap_offset; + u16 pcie_link_status; + + if (adapter->config_space != NULL) { + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pcie_cap_offset && + !pci_read_config_word(pdev, + pcie_cap_offset + PCIE_LINK_STATUS, + &pcie_link_status)) + size = PCIE_CONFIG_SPACE_LEN; + + pci_config_space_ich8lan(); + for (i = 0; i < (size / 4); i++) + pci_write_config_dword(pdev, i * 4, + adapter->config_space[i]); +#ifndef HAVE_PCI_ERS + kfree(adapter->config_space); + adapter->config_space = NULL; +#endif + } +} +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +void _kc_free_netdev(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + + kfree(adapter->config_space); +#ifdef CONFIG_SYSFS + if (netdev->reg_state == NETREG_UNINITIALIZED) { + kfree((char *)netdev - netdev->padded); + } else { + BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); + netdev->reg_state = NETREG_RELEASED; + class_device_put(&netdev->class_dev); + } +#else + kfree((char *)netdev - netdev->padded); +#endif +} +#endif + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) +{ + void *p; + + p = kzalloc(len, gfp); + if (p) + memcpy(p, src, len); + return p; +} +#endif /* <= 2.6.19 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21)) +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev) +{ + return ((struct adapter_struct *)netdev_priv(netdev))->pdev; +} +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22)) +/* hexdump code taken from lib/hexdump.c */ +static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, unsigned char *linebuf, + size_t linebuflen, bool ascii) +{ + const u8 *ptr = buf; + u8 ch; + int j, lx = 0; + int ascii_column; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + if (!len) + goto nil; + if (len > rowsize) /* limit to one line at a time */ + len = rowsize; + if ((len % groupsize) != 0) /* no mixed size output */ + groupsize = 1; + + switch (groupsize) { + case 8: { + const u64 *ptr8 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%16.16llx", j ? " " : "", + (unsigned long long)*(ptr8 + j)); + ascii_column = 17 * ngroups + 2; + break; + } + + case 4: { + const u32 *ptr4 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%8.8x", j ? " " : "", *(ptr4 + j)); + ascii_column = 9 * ngroups + 2; + break; + } + + case 2: { + const u16 *ptr2 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, + "%s%4.4x", j ? " " : "", *(ptr2 + j)); + ascii_column = 5 * ngroups + 2; + break; + } + + default: + for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { + ch = ptr[j]; + linebuf[lx++] = hex_asc(ch >> 4); + linebuf[lx++] = hex_asc(ch & 0x0f); + linebuf[lx++] = ' '; + } + if (j) + lx--; + + ascii_column = 3 * rowsize + 2; + break; + } + if (!ascii) + goto nil; + + while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) + linebuf[lx++] = ' '; + for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) + linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] : + '.'; +nil: + linebuf[lx++] = '\0'; +} + +void _kc_print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ + const u8 *ptr = buf; + int i, linelen, remaining = len; + unsigned char linebuf[200]; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + for (i = 0; i < len; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, + linebuf, sizeof(linebuf), ascii); + + switch (prefix_type) { + case DUMP_PREFIX_ADDRESS: + printk("%s%s%*p: %s\n", level, prefix_str, + (int)(2 * sizeof(void *)), ptr + i, linebuf); + break; + case DUMP_PREFIX_OFFSET: + printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); + break; + default: + printk("%s%s%s\n", level, prefix_str, linebuf); + break; + } + } +} + +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)) +int ixgbe_dcb_netlink_register(void) +{ + return 0; +} + +int ixgbe_dcb_netlink_unregister(void) +{ + return 0; +} + +int ixgbe_copy_dcb_cfg(struct ixgbe_adapter __always_unused *adapter, + int __always_unused tc_max) +{ + return 0; +} +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +#ifdef NAPI +struct net_device *napi_to_poll_dev(const struct napi_struct *napi) +{ + struct adapter_q_vector *q_vector = + container_of(napi, struct adapter_q_vector, napi); + return &q_vector->poll_dev; +} + +int __kc_adapter_clean(struct net_device *netdev, int *budget) +{ + int work_done; + int work_to_do = min(*budget, netdev->quota); + /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */ + struct napi_struct *napi = netdev->priv; + work_done = napi->poll(napi, work_to_do); + *budget -= work_done; + netdev->quota -= work_done; + return (work_done >= work_to_do) ? 1 : 0; +} +#endif /* NAPI */ +#endif /* <= 2.6.24 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)) +void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) +{ + struct pci_dev *parent = pdev->bus->self; + u16 link_state; + int pos; + + if (!parent) + return; + + pos = pci_find_capability(parent, PCI_CAP_ID_EXP); + if (pos) { + pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); + link_state &= ~state; + pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); + } +} +#endif /* < 2.6.26 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)) +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_stop_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +} +void _kc_netif_tx_wake_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_wake_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_wake_subqueue(netdev, i); +} +void _kc_netif_tx_start_all_queues(struct net_device *netdev) +{ + struct adapter_struct *adapter = netdev_priv(netdev); + int i; + + netif_start_queue(netdev); + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_start_subqueue(netdev, i); +} +#endif /* HAVE_TX_MQ */ + +void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) +{ + va_list args; + + printk(KERN_WARNING "------------[ cut here ]------------\n"); + printk(KERN_WARNING "WARNING: at %s:%d \n", file, line); + va_start(args, fmt); + vprintk(fmt, args); + va_end(args); + + dump_stack(); +} +#endif /* __VMKLNX__ */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)) + +int _kc_pci_prepare_to_sleep(struct pci_dev *dev) +{ + pci_power_t target_state; + int error; + + target_state = pci_choose_state(dev, PMSG_SUSPEND); + + pci_enable_wake(dev, target_state, true); + + error = pci_set_power_state(dev, target_state); + + if (error) + pci_enable_wake(dev, target_state, false); + + return error; +} + +int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) +{ + int err; + + err = pci_enable_wake(dev, PCI_D3cold, enable); + if (err) + goto out; + + err = pci_enable_wake(dev, PCI_D3hot, enable); + +out: + return err; +} +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)) +static void __kc_pci_set_master(struct pci_dev *pdev, bool enable) +{ + u16 old_cmd, cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &old_cmd); + if (enable) + cmd = old_cmd | PCI_COMMAND_MASTER; + else + cmd = old_cmd & ~PCI_COMMAND_MASTER; + if (cmd != old_cmd) { + dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n", + enable ? "enabling" : "disabling"); + pci_write_config_word(pdev, PCI_COMMAND, cmd); + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7)) + pdev->is_busmaster = enable; +#endif +} + +void _kc_pci_clear_master(struct pci_dev *dev) +{ + __kc_pci_set_master(dev, false); +} +#endif /* < 2.6.29 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 0)) +int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev) +{ + int num_vf = 0; +#ifdef CONFIG_PCI_IOV + struct pci_dev *vfdev; + + /* loop through all ethernet devices starting at PF dev */ + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL); + while (vfdev) { + if (vfdev->is_virtfn && vfdev->physfn == dev) + num_vf++; + + vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev); + } + +#endif + return num_vf; +} +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 2.6.34 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)) +#ifdef HAVE_TX_MQ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 0))) +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) +{ + unsigned int real_num = dev->real_num_tx_queues; + struct Qdisc *qdisc; + int i; + + if (txq < 1 || txq > dev->num_tx_queues) + return -EINVAL; + + else if (txq > real_num) + dev->real_num_tx_queues = txq; + else if (txq < real_num) { + dev->real_num_tx_queues = txq; + for (i = txq; i < dev->num_tx_queues; i++) { + qdisc = netdev_get_tx_queue(dev, i)->qdisc; + if (qdisc) { + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); + } + } + } + + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#endif /* HAVE_TX_MQ */ + +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count) +{ + loff_t pos = *ppos; + size_t res; + + if (pos < 0) + return -EINVAL; + if (pos >= available || !count) + return 0; + if (count > available - pos) + count = available - pos; + res = copy_from_user(to + pos, from, count); + if (res == count) + return -EFAULT; + count -= res; + *ppos = pos + count; + return count; +} + +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) +static const u32 _kc_flags_dup_features = + (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); + +u32 _kc_ethtool_op_get_flags(struct net_device *dev) +{ + return dev->features & _kc_flags_dup_features; +} + +int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) +{ + if (data & ~supported) + return -EINVAL; + + dev->features = ((dev->features & ~_kc_flags_dup_features) | + (data & _kc_flags_dup_features)); + return 0; +} +#endif /* < 2.6.36 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 0))) +#ifdef HAVE_NETDEV_SELECT_QUEUE +#include +#include + +u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb, + u16 num_tx_queues) +{ + u32 hash; + u16 qoffset = 0; + u16 qcount = num_tx_queues; + + if (skb_rx_queue_recorded(skb)) { + hash = skb_get_rx_queue(skb); + while (unlikely(hash >= num_tx_queues)) + hash -= num_tx_queues; + return hash; + } + + if (netdev_get_num_tc(dev)) { + struct adapter_struct *kc_adapter = netdev_priv(dev); + + if (skb->priority == TC_PRIO_CONTROL) { + qoffset = kc_adapter->dcb_tc - 1; + } else { + qoffset = skb->vlan_tci; + qoffset &= RNP_TX_FLAGS_VLAN_PRIO_MASK; + qoffset >>= 13; + } + + qcount = kc_adapter->ring_feature[RING_F_RSS].indices; + qoffset *= qcount; + } + + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else +#ifdef NETIF_F_RXHASH + hash = (__force u16)skb->protocol ^ skb->rxhash; +#else + hash = skb->protocol; +#endif + + hash = jhash_1word(hash, _kc_hashrnd); + + return (u16)(((u64)hash * qcount) >> 32) + qoffset; +} +#endif /* HAVE_NETDEV_SELECT_QUEUE */ + +u8 _kc_netdev_get_num_tc(struct net_device *dev) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + if (kc_adapter->flags & RNP_FLAG_DCB_ENABLED) + return kc_adapter->dcb_tc; + else + return 0; +} + +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + + if (num_tc > RNP_DCB_MAX_TRAFFIC_CLASS) + return -EINVAL; + + kc_adapter->dcb_tc = num_tc; + + return 0; +} + +u8 _kc_netdev_get_prio_tc_map(struct net_device __maybe_unused *dev, + u8 __maybe_unused up) +{ + struct adapter_struct *kc_adapter = netdev_priv(dev); + + return ixgbe_dcb_get_tc_from_up(&kc_adapter->dcb_cfg, 0, up); +} + +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ +#endif /* < 2.6.39 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) +void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, + int size, unsigned int truesize) +{ + skb_fill_page_desc(skb, i, page, off, size); + skb->len += size; + skb->data_len += size; + skb->truesize += truesize; +} + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0)) +int _kc_simple_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) + file->private_data = inode->i_private; + + return 0; +} +#endif /* SLE_VERSION < 11,3,0 */ + +#endif /* < 3.4.0 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) +static inline int __kc_pcie_cap_version(struct pci_dev *dev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return 0; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); + return reg16 & PCI_EXP_FLAGS_VERS; +} + +static inline bool +__kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev) +{ + return true; +} + +static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_ENDPOINT || type == PCI_EXP_TYPE_LEG_END; +} + +static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + int pos; + u16 pcie_flags_reg; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return false; + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + (type == PCI_EXP_TYPE_DOWNSTREAM && + pcie_flags_reg & PCI_EXP_FLAGS_SLOT); +} + +static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return __kc_pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || type == PCI_EXP_TYPE_RC_EC; +} + +static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos) +{ + if (!pci_is_pcie(dev)) + return false; + + switch (pos) { + case PCI_EXP_FLAGS_TYPE: + return true; + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVSTA: + return __kc_pcie_cap_has_devctl(dev); + case PCI_EXP_LNKCAP: + case PCI_EXP_LNKCTL: + case PCI_EXP_LNKSTA: + return __kc_pcie_cap_has_lnkctl(dev); + case PCI_EXP_SLTCAP: + case PCI_EXP_SLTCTL: + case PCI_EXP_SLTSTA: + return __kc_pcie_cap_has_sltctl(dev); + case PCI_EXP_RTCTL: + case PCI_EXP_RTCAP: + case PCI_EXP_RTSTA: + return __kc_pcie_cap_has_rtctl(dev); + case PCI_EXP_DEVCAP2: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCAP2: + case PCI_EXP_LNKCTL2: + case PCI_EXP_LNKSTA2: + return __kc_pcie_cap_version(dev) > 1; + default: + return false; + } +} + +/* + * Note that these accessor functions are only for the "PCI Express + * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the + * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) + */ +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) +{ + int ret; + + *val = 0; + if (pos & 1) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_word() fails, it may + * have been written as 0xFFFF if hardware error happens + * during pci_read_config_word(). + */ + if (ret) + *val = 0; + return ret; + } + + /* + * For Functions that do not implement the Slot Capabilities, + * Slot Status, and Slot Control registers, these spaces must + * be hardwired to 0b, with the exception of the Presence Detect + * State bit in the Slot Status register of Downstream Ports, + * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) + */ + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) +{ + int ret; + + *val = 0; + if (pos & 3) + return -EINVAL; + + if (__kc_pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_dword() fails, it may + * have been written as 0xFFFFFFFF if hardware error happens + * during pci_read_config_dword(). + */ + if (ret) + *val = 0; + return ret; + } + + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} + +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) +{ + if (pos & 1) + return -EINVAL; + + if (!__kc_pcie_capability_reg_implemented(dev, pos)) + return 0; + + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); +} + +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set) +{ + int ret; + u16 val; + + ret = __kc_pcie_capability_read_word(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = __kc_pcie_capability_write_word(dev, pos, val); + } + + return ret; +} + +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, u16 clear) +{ + return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); +} +#endif /* < 3.7.0 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) +#ifdef CONFIG_XPS +#if NR_CPUS < 64 +#define _KC_MAX_XPS_CPUS NR_CPUS +#else +#define _KC_MAX_XPS_CPUS 64 +#endif + +/* + * netdev_queue sysfs structures and functions. + */ +struct _kc_netdev_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, char *buf); + ssize_t (*store)(struct netdev_queue *queue, + struct _kc_netdev_queue_attribute *attr, + const char *buf, size_t len); +}; + +#define to_kc_netdev_queue_attr(_attr) \ + container_of(_attr, struct _kc_netdev_queue_attribute, attr) + +int __kc_netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, + u16 index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, index); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)) + /* Redhat requires some odd extended netdev structures */ + struct netdev_tx_queue_extended *txq_ext = + netdev_extended(dev)->_tx_ext + index; + struct kobj_type *ktype = txq_ext->kobj.ktype; +#else + struct kobj_type *ktype = txq->kobj.ktype; +#endif + struct _kc_netdev_queue_attribute *xps_attr; + struct attribute *attr = NULL; + int i, len, err; +#define _KC_XPS_BUFLEN (DIV_ROUND_UP(_KC_MAX_XPS_CPUS, 32) * 9) + char buf[_KC_XPS_BUFLEN]; + + if (!ktype) + return -ENOMEM; + + /* attempt to locate the XPS attribute in the Tx queue */ + for (i = 0; (attr = ktype->default_attrs[i]); i++) { + if (!strcmp("xps_cpus", attr->name)) + break; + } + + /* if we did not find it return an error */ + if (!attr) + return -EINVAL; + + /* copy the mask into a string */ + len = bitmap_scnprintf(buf, _KC_XPS_BUFLEN, cpumask_bits(mask), + _KC_MAX_XPS_CPUS); + if (!len) + return -ENOMEM; + + xps_attr = to_kc_netdev_queue_attr(attr); + + /* Store the XPS value using the SYSFS store call */ + err = xps_attr->store(txq, xps_attr, buf, len); + + /* we only had an error on err < 0 */ + return (err < 0) ? err : 0; +} +#endif /* CONFIG_XPS */ +#ifdef HAVE_NETDEV_SELECT_QUEUE +static inline int kc_get_xps_queue(struct net_device *dev, struct sk_buff *skb) +{ +#ifdef CONFIG_XPS + struct xps_dev_maps *dev_maps; + struct xps_map *map; + int queue_index = -1; + + rcu_read_lock(); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)) + /* Redhat requires some odd extended netdev structures */ + dev_maps = rcu_dereference(netdev_extended(dev)->xps_maps); +#else + dev_maps = rcu_dereference(dev->xps_maps); +#endif + if (dev_maps) { + map = rcu_dereference( + dev_maps->cpu_map[raw_smp_processor_id()]); + if (map) { + if (map->len == 1) + queue_index = map->queues[0]; + else { + u32 hash; + if (skb->sk && skb->sk->sk_hash) + hash = skb->sk->sk_hash; + else + hash = (__force u16)skb->protocol ^ + skb->rxhash; + hash = jhash_1word(hash, _kc_hashrnd); + queue_index = + map->queues[((u64)hash * map->len) >> + 32]; + } + if (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index = -1; + } + } + rcu_read_unlock(); + + return queue_index; +#else + struct adapter_struct *kc_adapter = netdev_priv(dev); + int queue_index = -1; + + if (kc_adapter->flags & RNP_FLAG_FDIR_HASH_CAPABLE) { + queue_index = skb_rx_queue_recorded(skb) ? + skb_get_rx_queue(skb) : + smp_processor_id(); + while (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index -= dev->real_num_tx_queues; + return queue_index; + } + + return -1; +#endif +} + +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + int queue_index = sk_tx_queue_get(sk); + int new_index; + + if (queue_index >= 0 && queue_index < dev->real_num_tx_queues) { +#ifdef CONFIG_XPS + if (!skb->ooo_okay) +#endif + return queue_index; + } + + new_index = kc_get_xps_queue(dev, skb); + if (new_index < 0) + new_index = skb_tx_hash(dev, skb); + + if (queue_index != new_index && sk) { + struct dst_entry *dst = rcu_dereference(sk->sk_dst_cache); + + if (dst && skb_dst(skb) == dst) + sk_tx_queue_set(sk, new_index); + } + + return new_index; +} + +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* 3.9.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 flags) +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} + +#ifdef USE_CONST_DEV_UC_CHAR +#ifdef HAVE_FDB_DEL_NLATTR +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr) +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr) +#endif +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr) +#endif +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (!(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + + return err; +} + +#endif /* HAVE_FDB_OPS */ +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev) +{ + unsigned int vfs_assigned = 0; +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED + int pos; + struct pci_dev *vfdev; + unsigned short dev_id; + + /* only search if we are a PF */ + if (!dev->is_physfn) + return 0; + + /* find SR-IOV capability */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + + /* + * determine the device ID for the VFs, the vendor ID will be the + * same as the PF so there is no need to check for that one + */ + pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id); + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(dev->vendor, dev_id, NULL); + while (vfdev) { + /* + * It is considered assigned if it is a virtual function with + * our dev as the physical function and the assigned bit is set + */ + if (vfdev->is_virtfn && (vfdev->physfn == dev) && + (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) + vfs_assigned++; + + vfdev = pci_get_device(dev->vendor, dev_id, vfdev); + } + +#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ + return vfs_assigned; +} + +#endif /* CONFIG_PCI_IOV */ +#endif /* 3.10.0 */ + +static const unsigned char __maybe_unused pcie_link_speed[] = { + PCI_SPEED_UNKNOWN, /* 0 */ + PCIE_SPEED_2_5GT, /* 1 */ + PCIE_SPEED_5_0GT, /* 2 */ + PCIE_SPEED_8_0GT, /* 3 */ + PCIE_SPEED_16_0GT, /* 4 */ + PCI_SPEED_UNKNOWN, /* 5 */ + PCI_SPEED_UNKNOWN, /* 6 */ + PCI_SPEED_UNKNOWN, /* 7 */ + PCI_SPEED_UNKNOWN, /* 8 */ + PCI_SPEED_UNKNOWN, /* 9 */ + PCI_SPEED_UNKNOWN, /* A */ + PCI_SPEED_UNKNOWN, /* B */ + PCI_SPEED_UNKNOWN, /* C */ + PCI_SPEED_UNKNOWN, /* D */ + PCI_SPEED_UNKNOWN, /* E */ + PCI_SPEED_UNKNOWN /* F */ +}; + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + while (dev) { + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + int ret = + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + + if (ret) + return ret; + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + if (next_speed < *speed) + *speed = next_speed; + + if (next_width < *width) + *width = next_width; + + dev = dev->bus->self; + } + + return 0; +} + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev) +{ + int i; + u16 status; + + /* Wait for Transaction Pending bit clean */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + return 1; + } + + return 0; +} +#endif /* crs_timeout) { + printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not " + "responding\n", + pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn)); + return false; + } + } + + return true; +} + +bool _kc_pci_device_is_present(struct pci_dev *pdev) +{ + u32 v; + + return _kc_pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); +} +#endif /* nexthdr; + bool found; + +#define __KC_IP6_FH_F_FRAG BIT(0) +#define __KC_IP6_FH_F_AUTH BIT(1) +#define __KC_IP6_FH_F_SKIP_RH BIT(2) + + if (fragoff) + *fragoff = 0; + + if (*offset) { + struct ipv6hdr _ip6, *ip6; + + ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); + if (!ip6 || (ip6->version != 6)) { + printk(KERN_ERR "IPv6 header not found\n"); + return -EBADMSG; + } + start = *offset + sizeof(struct ipv6hdr); + nexthdr = ip6->nexthdr; + } + + do { + struct ipv6_opt_hdr _hdr, *hp; + unsigned int hdrlen; + found = (nexthdr == target); + + if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { + if (target < 0 || found) + break; + return -ENOENT; + } + + hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); + if (!hp) + return -EBADMSG; + + if (nexthdr == NEXTHDR_ROUTING) { + struct ipv6_rt_hdr _rh, *rh; + + rh = skb_header_pointer(skb, start, sizeof(_rh), &_rh); + if (!rh) + return -EBADMSG; + + if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) && + rh->segments_left == 0) + found = false; + } + + if (nexthdr == NEXTHDR_FRAGMENT) { + unsigned short _frag_off; + __be16 *fp; + + if (flags) /* Indicate that this is a fragment */ + *flags |= __KC_IP6_FH_F_FRAG; + fp = skb_header_pointer( + skb, + start + offsetof(struct frag_hdr, frag_off), + sizeof(_frag_off), &_frag_off); + if (!fp) + return -EBADMSG; + + _frag_off = ntohs(*fp) & ~0x7; + if (_frag_off) { + if (target < 0 && + ((!ipv6_ext_hdr(hp->nexthdr)) || + hp->nexthdr == NEXTHDR_NONE)) { + if (fragoff) + *fragoff = _frag_off; + return hp->nexthdr; + } + return -ENOENT; + } + hdrlen = 8; + } else if (nexthdr == NEXTHDR_AUTH) { + if (flags && (*flags & __KC_IP6_FH_F_AUTH) && + (target < 0)) + break; + hdrlen = (hp->hdrlen + 2) << 2; + } else + hdrlen = ipv6_optlen(hp); + + if (!found) { + nexthdr = hp->nexthdr; + start += hdrlen; + } + } while (!found); + + *offset = start; + return nexthdr; +} + +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(dev, entries, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif /* 3.14.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)) +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) +{ + size_t size; + char *buf; + + if (!s) + return NULL; + + size = strlen(s) + 1; + buf = devm_kzalloc(dev, size, gfp); + if (buf) + memcpy(buf, s, size); + return buf; +} + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + /* Set of random keys generated using kernel random number generator */ + static const u8 seed[NETDEV_RSS_KEY_LEN] = { + 0xE6, 0xFA, 0x35, 0x62, 0x95, 0x12, 0x3E, 0xA3, 0xFB, + 0x46, 0xC1, 0x5F, 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, + 0x50, 0x95, 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, + 0xBC, 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, + 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, 0xD7, + 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20 + }; + + BUG_ON(len > NETDEV_RSS_KEY_LEN); + memcpy(buffer, seed, len); +} +#endif /* 3.15.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev( + struct netdev_hw_addr_list *list, struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + int err; + + /* first go through and flush out any stale entries */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + if (!ha->synced || ha->refcount != 1) +#else + if (!ha->sync_cnt || ha->refcount != 1) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } + + /* go through and sync new entries to the list */ + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + if (ha->synced) +#else + if (ha->sync_cnt) +#endif + continue; + + err = sync(dev, ha->addr); + if (err) + return err; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + ha->synced = true; +#else + ha->sync_cnt++; +#endif + ha->refcount++; + } + + return 0; +} + +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + struct netdev_hw_addr *ha, *tmp; + + list_for_each_entry_safe(ha, tmp, &list->list, list) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + if (!ha->synced) +#else + if (!ha->sync_cnt) +#endif + continue; + + if (unsync && unsync(dev, ha->addr)) + continue; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) + ha->synced = false; +#else + ha->sync_cnt--; +#endif + if (--ha->refcount) + continue; + + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + } +} + +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev( + struct dev_addr_list **list, int *count, struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)) +{ + struct dev_addr_list *da, **next = list; + int err; + + /* first go through and flush out any stale entries */ + while ((da = *next) != NULL) { + if (da->da_synced && da->da_users == 1) { + if (!unsync || !unsync(dev, da->da_addr)) { + *next = da->next; + kfree(da); + (*count)--; + continue; + } + } + next = &da->next; + } + + /* go through and sync new entries to the list */ + for (da = *list; da != NULL; da = da->next) { + if (da->da_synced) + continue; + + err = sync(dev, da->da_addr); + if (err) + return err; + + da->da_synced++; + da->da_users++; + } + + return 0; +} + +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + struct dev_addr_list *da; + + while ((da = *list) != NULL) { + if (da->da_synced) { + if (!unsync || !unsync(dev, da->da_addr)) { + da->da_synced--; + if (--da->da_users == 0) { + *list = da->next; + kfree(da); + (*count)--; + continue; + } + } + } + list = &da->next; + } +} +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp) +{ + void *p; + + p = devm_kzalloc(dev, len, gfp); + if (p) + memcpy(p, src, len); + + return p; +} +#endif /* 3.16.0 */ + +/******************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 5))) +#endif /* <3.17.0 && RHEL_RELEASE_CODE < RHEL7.5 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) +#ifndef NO_PTP_SUPPORT +static void __kc_sock_efree(struct sk_buff *skb) +{ + sock_put(skb->sk); +} + +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + struct sk_buff *clone; + + if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) + return NULL; + + clone = skb_clone(skb, GFP_ATOMIC); + if (!clone) { + sock_put(sk); + return NULL; + } + + clone->sk = sk; + clone->destructor = __kc_sock_efree; + + return clone; +} + +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps) +{ + struct sock_exterr_skb *serr; + struct sock *sk = skb->sk; + int err; + + sock_hold(sk); + + *skb_hwtstamps(skb) = *hwtstamps; + + serr = SKB_EXT_ERR(skb); + memset(serr, 0, sizeof(*serr)); + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + + err = sock_queue_err_skb(sk, skb); + if (err) + kfree_skb(skb); + + sock_put(sk); +} +#endif + +/* include headers needed for get_headlen function */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#include +#endif +#ifdef HAVE_SCTP +#include +#endif + +u32 __kc_eth_get_headlen(const struct net_device __always_unused *dev, + unsigned char *data, unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + __be16 proto; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + proto = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + +again: + switch (proto) { + /* handle any vlan tag if present */ + case __constant_htons(ETH_P_8021AD): + case __constant_htons(ETH_P_8021Q): + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + proto = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; + goto again; + /* handle L3 protocols */ + case __constant_htons(ETH_P_IP): + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol if header is present */ + if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) + nexthdr = hdr.ipv4->protocol; + + hdr.network += hlen; + break; +#ifdef NETIF_F_TSO6 + case __constant_htons(ETH_P_IPV6): + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hdr.network += sizeof(struct ipv6hdr); + break; +#endif /* NETIF_F_TSO6 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) + case __constant_htons(ETH_P_FCOE): + hdr.network += FCOE_HEADER_LEN; + break; +#endif + default: + return hdr.network - data; + } + + /* finally sort out L4 */ + switch (nexthdr) { + case IPPROTO_TCP: + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; + + /* access doff as a u8 to avoid unaligned access on ia64 */ + hdr.network += max_t(u8, sizeof(struct tcphdr), + (hdr.network[12] & 0xF0) >> 2); + + break; + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + hdr.network += sizeof(struct udphdr); + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + hdr.network += sizeof(struct sctphdr); + break; +#endif + } + + /* + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + return min_t(unsigned int, hdr.network - data, max_len); +} + +#endif /* < 3.18.0 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) +#ifdef HAVE_NET_GET_RANDOM_ONCE +static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; + +void __kc_netdev_rss_key_fill(void *buffer, size_t len) +{ + BUG_ON(len > sizeof(__kc_netdev_rss_key)); + net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); + memcpy(buffer, __kc_netdev_rss_key, len); +} +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, int nmaskbits) +{ + ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2; + int n = 0; + + if (len > 1) { + n = list ? bitmap_scnlistprintf(buf, len, maskp, nmaskbits) : + bitmap_scnprintf(buf, len, maskp, nmaskbits); + buf[n++] = '\n'; + buf[n] = '\0'; + } + return n; +} +#endif + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12, 1, 0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node) +{ + int cpu; + + /* Wrap: we always want a cpu. */ + i %= num_online_cpus(); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)) + /* Kernels prior to 2.6.28 do not have for_each_cpu or + * cpumask_of_node, so just use for_each_online_cpu() + */ + for_each_online_cpu(cpu) + if (i-- == 0) + return cpu; + + return 0; +#else + if (node == -1) { + for_each_cpu(cpu, cpu_online_mask) + if (i-- == 0) + return cpu; + } else { + /* NUMA first. */ + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) + if (i-- == 0) + return cpu; + + for_each_cpu(cpu, cpu_online_mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, cpumask_of_node(node))) + continue; + + if (i-- == 0) + return cpu; + } + } +#endif /* KERNEL_VERSION >= 2.6.28 */ + BUG(); +} +#endif +#endif + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12, 2, 0))) +/** + * _kc_skb_flow_dissect_flow_keys - parse SKB to fill _kc_flow_keys + * @skb: SKB used to fille _kc_flow_keys + * @flow: _kc_flow_keys to set with SKB fields + * @flags: currently unused flags + * + * The purpose of using kcompat for this function is so the caller doesn't have + * to care about which kernel version they are on, which prevents a larger than + * normal #ifdef mess created by using a HAVE_* flag for this case. This is also + * done for 4.2 kernels to simplify calling skb_flow_dissect_flow_keys() + * because in 4.2 kernels skb_flow_dissect_flow_keys() exists, but only has 2 + * arguments. Recent kernels have skb_flow_dissect_flow_keys() that has 3 + * arguments. + * + * The caller needs to understand that this function was only implemented as a + * bare-minimum replacement for recent versions of skb_flow_dissect_flow_keys() + * and this function is in no way similar to skb_flow_dissect_flow_keys(). An + * example use can be found in the ice driver, specifically ice_arfs.c. + * + * This function is treated as a whitelist of supported fields the SKB can + * parse. If new functionality is added make sure to keep this format (i.e. only + * check for fields that are explicity wanted). + * + * Current whitelist: + * + * TCPv4, TCPv6, UDPv4, UDPv6 + * + * If any unexpected protocol or other field is found this function memsets the + * flow passed in back to 0 and returns false. Otherwise the flow is populated + * and returns true. + */ +bool _kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct _kc_flow_keys *flow, + unsigned int __always_unused flags) +{ + memset(flow, 0, sizeof(*flow)); + + flow->basic.n_proto = skb->protocol; + switch (flow->basic.n_proto) { + case htons(ETH_P_IP): + flow->basic.ip_proto = ip_hdr(skb)->protocol; + flow->addrs.v4addrs.src = ip_hdr(skb)->saddr; + flow->addrs.v4addrs.dst = ip_hdr(skb)->daddr; + break; + case htons(ETH_P_IPV6): + flow->basic.ip_proto = ipv6_hdr(skb)->nexthdr; + memcpy(&flow->addrs.v6addrs.src, &ipv6_hdr(skb)->saddr, + sizeof(struct in6_addr)); + memcpy(&flow->addrs.v6addrs.dst, &ipv6_hdr(skb)->daddr, + sizeof(struct in6_addr)); + break; + default: + netdev_dbg( + skb->dev, + "%s: Unsupported/unimplemented layer 3 protocol %04x\n", + __func__, htons(flow->basic.n_proto)); + goto unsupported; + } + + switch (flow->basic.ip_proto) { + case IPPROTO_TCP: { + struct tcphdr *tcph; + + tcph = tcp_hdr(skb); + flow->ports.src = tcph->source; + flow->ports.dst = tcph->dest; + break; + } + case IPPROTO_UDP: { + struct udphdr *udph; + + udph = udp_hdr(skb); + flow->ports.src = udph->source; + flow->ports.dst = udph->dest; + break; + } + default: + netdev_dbg( + skb->dev, + "%s: Unsupported/unimplemented layer 4 protocol %02x\n", + __func__, flow->basic.ip_proto); + return false; + } + + return true; + +unsupported: + memset(flow, 0, sizeof(*flow)); + return false; +} +#endif /* ! >= RHEL7.4 && ! >= SLES12.2 */ +#endif /* 4.3.0 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3))) +#ifdef CONFIG_SPARC +#include +#include +#endif +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused) +{ +#if (((LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)) && defined(CONFIG_OF) && \ + !defined(HAVE_STRUCT_DEVICE_OF_NODE) || \ + !defined(CONFIG_OF)) && \ + !defined(CONFIG_SPARC)) + return -ENODEV; +#else + const unsigned char *addr; + struct device_node *dp; + + if (dev_is_pci(dev)) + dp = pci_device_to_OF_node(to_pci_dev(dev)); + else +#if defined(HAVE_STRUCT_DEVICE_OF_NODE) && defined(CONFIG_OF) + dp = dev->of_node; +#else + dp = NULL; +#endif + + addr = NULL; + if (dp) + addr = of_get_mac_address(dp); +#ifdef CONFIG_SPARC + /* Kernel hasn't implemented arch_get_platform_mac_address, but we + * should handle the SPARC case here since it was supported + * originally. This is replaced by arch_get_platform_mac_address() + * upstream. + */ + if (!addr) + addr = idprom->id_ethaddr; +#endif + if (!addr) + return -ENODEV; + + ether_addr_copy(mac_addr, addr); + return 0; +#endif +} +#endif /* !(RHEL_RELEASE >= 7.3) */ +#endif /* < 4.5.0 */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12, 3, 0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7, 5)))) +const char *_kc_phy_speed_to_str(int speed) +{ + switch (speed) { + case SPEED_10: + return "10Mbps"; + case SPEED_100: + return "100Mbps"; + case SPEED_1000: + return "1Gbps"; + case SPEED_2500: + return "2.5Gbps"; + case SPEED_5000: + return "5Gbps"; + case SPEED_10000: + return "10Gbps"; + case SPEED_14000: + return "14Gbps"; + case SPEED_20000: + return "20Gbps"; + case SPEED_25000: + return "25Gbps"; + case SPEED_40000: + return "40Gbps"; + case SPEED_50000: + return "50Gbps"; + case SPEED_56000: + return "56Gbps"; +#ifdef SPEED_100000 + case SPEED_100000: + return "100Gbps"; +#endif + case SPEED_UNKNOWN: + return "Unknown"; + default: + return "Unsupported (update phy-core.c)"; + } +} +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src) +{ + unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); + unsigned int idx = 0; + + for (; idx < size; idx++) { + dst->link_modes.supported[idx] &= + src->link_modes.supported[idx]; + dst->link_modes.advertising[idx] &= + src->link_modes.advertising[idx]; + } +} +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12, 5, 0) && \ + SLE_VERSION_CODE < SLE_VERSION(15, 0, 0) || \ + SLE_VERSION_CODE >= SLE_VERSION(15, 1, 0)) +#if BITS_PER_LONG == 64 +/** + * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap + * @bitmap: array of unsigned longs, the destination bitmap + * @buf: array of u32 (in host byte order), the source bitmap + * @nbits: number of bits in @bitmap + */ +void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, + unsigned int nbits) +{ + unsigned int i, halfwords; + + halfwords = DIV_ROUND_UP(nbits, 32); + for (i = 0; i < halfwords; i++) { + bitmap[i / 2] = (unsigned long)buf[i]; + if (++i < halfwords) + bitmap[i / 2] |= ((unsigned long)buf[i]) << 32; + } + + /* Clear tail bits in last word beyond nbits. */ + if (nbits % BITS_PER_LONG) + bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits); +} +#endif /* BITS_PER_LONG == 64 */ +#endif /* !(RHEL >= 8.0) && !(SLES >= 12.5 && SLES < 15.0 || SLES >= 15.1) */ +#endif /* 4.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) +/* PCIe link information */ +#define PCIE_SPEED2STR(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \ + (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \ + (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \ + (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \ + "Unknown speed") + +/* PCIe speed to Mb/s reduced by encoding overhead */ +#define PCIE_SPEED2MBS_ENC(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? 16000 * 128 / 130 : \ + (speed) == PCIE_SPEED_8_0GT ? 8000 * 128 / 130 : \ + (speed) == PCIE_SPEED_5_0GT ? 5000 * 8 / 10 : \ + (speed) == PCIE_SPEED_2_5GT ? 2500 * 8 / 10 : \ + 0) + +static u32 _kc_pcie_bandwidth_available(struct pci_dev *dev, + struct pci_dev **limiting_dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + u32 bw, next_bw; + + if (speed) + *speed = PCI_SPEED_UNKNOWN; + if (width) + *width = PCIE_LNK_WIDTH_UNKNOWN; + + bw = 0; + + while (dev) { + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); + + /* Check if current device limits the total bandwidth */ + if (!bw || next_bw <= bw) { + bw = next_bw; + + if (limiting_dev) + *limiting_dev = dev; + if (speed) + *speed = next_speed; + if (width) + *width = next_width; + } + + dev = pci_upstream_bridge(dev); + } + + return bw; +} + +static enum pci_bus_speed _kc_pcie_get_speed_cap(struct pci_dev *dev) +{ + u32 lnkcap2, lnkcap; + + /* + * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link + * Speeds Vector in Link Capabilities 2 when supported, falling + * back to Max Link Speed in Link Capabilities otherwise. + */ + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); + if (lnkcap2) { /* PCIe r3.0-compliant */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + return PCI_SPEED_UNKNOWN; + } + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) { + if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + } + + return PCI_SPEED_UNKNOWN; +} + +static enum pcie_link_width _kc_pcie_get_width_cap(struct pci_dev *dev) +{ + u32 lnkcap; + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) + return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; + + return PCIE_LNK_WIDTH_UNKNOWN; +} + +static u32 _kc_pcie_bandwidth_capable(struct pci_dev *dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + *speed = _kc_pcie_get_speed_cap(dev); + *width = _kc_pcie_get_width_cap(dev); + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) + return 0; + + return *width * PCIE_SPEED2MBS_ENC(*speed); +} + +void _kc_pcie_print_link_status(struct pci_dev *dev) +{ + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + struct pci_dev *limiting_dev = NULL; + u32 bw_avail, bw_cap; + + bw_cap = _kc_pcie_bandwidth_capable(dev, &speed_cap, &width_cap); + bw_avail = _kc_pcie_bandwidth_available(dev, &limiting_dev, &speed, + &width); + + if (bw_avail >= bw_cap) + pci_info( + dev, + "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", + bw_cap / 1000, bw_cap % 1000, PCIE_SPEED2STR(speed_cap), + width_cap); + else + pci_info( + dev, + "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", + bw_avail / 1000, bw_avail % 1000, PCIE_SPEED2STR(speed), + width, + limiting_dev ? pci_name(limiting_dev) : "", + bw_cap / 1000, bw_cap % 1000, PCIE_SPEED2STR(speed_cap), + width_cap); +} +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)) || \ + (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 1))) +#ifdef HAVE_TC_SETUP_CLSFLOWER +#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ + const struct flow_match *__m = &(__rule)->match; \ + struct flow_dissector *__d = (__m)->dissector; \ + \ + (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ + (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); + +void flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); +} + +void flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); +} + +void flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); +} + +#ifdef HAVE_TC_FLOWER_ENC +void flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); +} + +void flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); +} + +void flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); +} + +void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); +} + +void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); +} +#endif + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +void flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); +} +#endif + +void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); +} + +void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); +} + +void flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); +} +#endif /* HAVE_TC_SETUP_CLSFLOWER */ +#endif /* 5.1.0 || (RHEL && RHEL < 8.1) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0)) +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 2)))) +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head __always_unused *driver_list, + tc_setup_cb_t *cb, void *cb_ident, + void *cb_priv, bool ingress_only) +{ + if (ingress_only && + f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + /* Note: Upstream has driver_block_list, but older kernels do not */ + switch (f->command) { + case TC_BLOCK_BIND: +#ifdef HAVE_TCF_BLOCK_CB_REGISTER_EXTACK + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv, + f->extack); +#else + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv); +#endif + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, cb, cb_ident); + return 0; + default: + return -EOPNOTSUPP; + } +} +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#endif /* !RHEL >= 8.2 */ +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0)) +u64 _kc_pci_get_dsn(struct pci_dev *dev) +{ + u32 dword; + u64 dsn; + int pos; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN); + if (!pos) + return 0; + + /* + * The Device Serial Number is two dwords offset 4 bytes from the + * capability position. The specification says that the first dword is + * the lower half, and the second dword is the upper half. + */ + pos += 4; + pci_read_config_dword(dev, pos, &dword); + dsn = (u64)dword; + pci_read_config_dword(dev, pos + 4, &dword); + dsn |= ((u64)dword) << 32; + + return dsn; +} +#endif /* 5.7.0 */ + +#ifdef NEED_MUL_U64_U64_DIV_U64 +#ifdef NEED_DIV64_U64_REM +static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) +{ + *remainder = dividend % divisor; + return dividend / divisor; +} +#endif +u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) +{ + u64 res = 0, div, rem; + int shift; + + /* can a * b overflow ? */ + if (ilog2(a) + ilog2(b) > 62) { + /* + * (b * a) / c is equal to + * + * (b / c) * a + + * (b % c) * a / c + * + * if nothing overflows. Can the 1st multiplication + * overflow? Yes, but we do not care: this can only + * happen if the end result can't fit in u64 anyway. + * + * So the code below does + * + * res = (b / c) * a; + * b = b % c; + */ + div = div64_u64_rem(b, c, &rem); + res = div * a; + b = rem; + + shift = ilog2(a) + ilog2(b) - 62; + if (shift > 0) { + /* drop precision */ + b >>= shift; + c >>= shift; + if (!c) + return res; + } + } + + return res + div64_u64(a * b, c); +} +#endif /* NEED_MUL_U64_U64_DIV_U64 */ + +#ifdef NEED_SYSFS_CREATE_GROUPS +int sysfs_create_groups(struct kobject *kobj, + const struct attribute_group **groups) +{ + int error = 0; + int i; + + if (!groups) + return 0; + + for (i = 0; groups[i]; i++) { + error = sysfs_create_group(kobj, groups[i]); + if (error) { + while (--i >= 0) + sysfs_remove_group(kobj, groups[i]); + break; + } + } + return error; +} + + +#endif + + +#ifdef NEED_SYSFS_REMOVE_GROUPS +void sysfs_remove_groups(struct kobject *kobj, + const struct attribute_group **groups) +{ + int i; + + if (!groups) + return; + for (i = 0; groups[i]; i++) + sysfs_remove_group(kobj, groups[i]); +} + +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_compat.h b/drivers/net/ethernet/mucse/rnp/rnp_compat.h new file mode 100755 index 0000000000000000000000000000000000000000..4bc2a9dea69a878e8fc34a3efe9e51d6369a04ef --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_compat.h @@ -0,0 +1,7649 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _KCOMPAT_H_ +#define _KCOMPAT_H_ + +#include "kcompat_gcc.h" +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef GCC_VERSION +#define GCC_VERSION \ + (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) +#endif /* GCC_VERSION */ + +#ifndef IEEE_8021QAZ_APP_SEL_DSCP +#define IEEE_8021QAZ_APP_SEL_DSCP 5 +#endif + +/* Backport macros for controlling GCC diagnostics */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)) + +/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */ +#if GCC_VERSION >= 40600 +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) +#else +#define __diag(s) +#endif /* GCC_VERSION >= 4.6 */ +#define __diag_push() __diag(push) +#define __diag_pop() __diag(pop) +#endif /* LINUX_VERSION < 4.18.0 */ + +#ifndef NSEC_PER_MSEC +#define NSEC_PER_MSEC 1000000L +#endif +#include +/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ +#ifndef UTS_RELEASE +/* utsrelease.h changed locations in 2.6.33 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) +#include +#else +#include +#endif +#endif + +/* NAPI enable/disable flags here */ +#define NAPI + +#define adapter_struct rnp_adapter +#define adapter_q_vector rnp_q_vector + +/* and finally set defines so that the code sees the changes */ +#ifdef NAPI +#else +#endif /* NAPI */ + +/* Dynamic LTR and deeper C-State support disable/enable */ +//#define DISABLE_PACKET_SPLIT +/* packet split disable/enable */ +#ifdef DISABLE_PACKET_SPLIT +#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT +#define CONFIG_RNP_DISABLE_PACKET_SPLIT +#endif +#endif /* DISABLE_PACKET_SPLIT */ + +/* MSI compatibility code for all kernels and drivers */ +#ifdef DISABLE_PCI_MSI +#undef CONFIG_PCI_MSI +#endif +#ifndef CONFIG_PCI_MSI +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 8)) +struct msix_entry { + u16 vector; /* kernel uses to write allocated vector */ + u16 entry; /* driver uses to specify entry, OS writes */ +}; +#endif +#undef pci_enable_msi +#define pci_enable_msi(a) -ENOTSUPP +#undef pci_disable_msi +#define pci_disable_msi(a) \ + do { \ + } while (0) +#undef pci_enable_msix +#define pci_enable_msix(a, b, c) -ENOTSUPP +#undef pci_disable_msix +#define pci_disable_msix(a) \ + do { \ + } while (0) +#define msi_remove_pci_irq_vectors(a) \ + do { \ + } while (0) +#endif /* CONFIG_PCI_MSI */ +#ifdef DISABLE_PM +#undef CONFIG_PM +#endif + +#ifdef DISABLE_NET_POLL_CONTROLLER +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef PMSG_SUSPEND +#define PMSG_SUSPEND 3 +#endif + +/* generic boolean compatibility */ +#undef TRUE +#undef FALSE +#define TRUE true +#define FALSE false +#ifdef GCC_VERSION +#if (GCC_VERSION < 3000) +#define _Bool char +#endif +#else +#define _Bool char +#endif + +#ifndef BIT +#define BIT(nr) (1UL << (nr)) +#endif + +#undef __always_unused +#define __always_unused __attribute__((__unused__)) + +#undef __maybe_unused +#define __maybe_unused __attribute__((__unused__)) + +/* kernels less than 2.4.14 don't have this */ +#ifndef ETH_P_8021Q +#define ETH_P_8021Q 0x8100 +#endif + +#ifndef module_param +#define module_param(v, t, p) MODULE_PARM(v, "i"); +#endif + +#ifndef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef PCI_CAP_ID_EXP +#define PCI_CAP_ID_EXP 0x10 +#endif + +#ifndef uninitialized_var +#define uninitialized_var(x) (x = x) +#endif + +#ifndef PCIE_LINK_STATE_L0S +#define PCIE_LINK_STATE_L0S 1 +#endif +#ifndef PCIE_LINK_STATE_L1 +#define PCIE_LINK_STATE_L1 2 +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) +#endif + +#if !defined(HAVE_FREE_NETDEV) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)) +#define free_netdev(x) kfree(x) +#endif + +#ifdef HAVE_POLL_CONTROLLER +#define CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef SKB_DATAREF_SHIFT +/* + *if we do not have the infrastructure to detect if skb_header is cloned + *just return false in all cases + */ +#define skb_header_cloned(x) 0 +#endif + +#ifndef NETIF_F_GSO +#define gso_size tso_size +#define gso_segs tso_segs +#endif + +#ifndef NETIF_F_GRO +#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ + vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) +#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) +#endif + +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + +#ifndef NETIF_F_LRO +#define NETIF_F_LRO BIT(15) +#endif + +#ifndef NETIF_F_NTUPLE +#define NETIF_F_NTUPLE BIT(27) +#endif + +#ifndef NETIF_F_ALL_FCOE +#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | NETIF_F_FSO) +#endif + +#ifndef IPPROTO_SCTP +#define IPPROTO_SCTP 132 +#endif + +#ifndef IPPROTO_UDPLITE +#define IPPROTO_UDPLITE 136 +#endif + +#ifndef CHECKSUM_PARTIAL +#define CHECKSUM_PARTIAL CHECKSUM_HW +#define CHECKSUM_COMPLETE CHECKSUM_HW +#endif + +#ifndef __read_mostly +#define __read_mostly +#endif + +#ifndef MII_RESV1 +#define MII_RESV1 0x17 /* Reserved... */ +#endif + +#ifndef unlikely +#define unlikely(_x) _x +#define likely(_x) _x +#endif + +#ifndef WARN_ON +#define WARN_ON(x) ({ 0; }) +#endif + +#ifndef PCI_DEVICE +#define PCI_DEVICE(vend, dev) \ + .vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \ + .subdevice = PCI_ANY_ID +#endif + +#ifndef node_online +#define node_online(node) ((node) == 0) +#endif + +#ifndef _LINUX_RANDOM_H +#include +#endif + +#ifndef BITS_PER_TYPE +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#endif + +#ifndef BITS_TO_LONGS +#define BITS_TO_LONGS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) +#endif + +#ifndef DECLARE_BITMAP +#define DECLARE_BITMAP(name, bits) long name[BITS_TO_LONGS(bits)] +#endif + +#ifndef VLAN_HLEN +#define VLAN_HLEN 4 +#endif + +#ifndef VLAN_ETH_HLEN +#define VLAN_ETH_HLEN 18 +#endif + +#ifndef VLAN_ETH_FRAME_LEN +#define VLAN_ETH_FRAME_LEN 1518 +#endif + +#ifndef DCA_GET_TAG_TWO_ARGS +#define dca3_get_tag(a, b) dca_get_tag(b) +#endif + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#if defined(__i386__) || defined(__x86_64__) +#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#endif +#endif + +/* taken from 2.6.24 definition in linux/kernel.h */ +#ifndef IS_ALIGNED +#define IS_ALIGNED(x, a) (((x) % ((typeof(x))(a))) == 0) +#endif + +#ifdef IS_ENABLED +#undef IS_ENABLED +#undef __ARG_PLACEHOLDER_1 +#undef config_enabled +#undef _config_enabled +#undef __config_enabled +#undef ___config_enabled +#endif + +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#ifdef __CHECKER__ +/* cppcheck-suppress preprocessorErrorDirective */ +#endif /* __CHECKER__ */ +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val + +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) + +#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) +struct _kc_vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_ethhdr _kc_vlan_ethhdr +struct _kc_vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_hdr _kc_vlan_hdr +#define vlan_tx_tag_present(_skb) 0 +#define vlan_tx_tag_get(_skb) 0 +#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ + +#ifndef VLAN_PRIO_SHIFT +#define VLAN_PRIO_SHIFT 13 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_2_5GB +#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_5_0GB +#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_8_0GB +#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X1 +#define PCI_EXP_LNKSTA_NLW_X1 0x0010 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X2 +#define PCI_EXP_LNKSTA_NLW_X2 0x0020 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X4 +#define PCI_EXP_LNKSTA_NLW_X4 0x0040 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X8 +#define PCI_EXP_LNKSTA_NLW_X8 0x0080 +#endif + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + +#ifndef IP_OFFSET +#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ +#endif + +/*****************************************************************************/ +/* + * Installations with ethtool version without eeprom, adapter id, or statistics + * support + */ + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x1d +#undef ethtool_drvinfo +#define ethtool_drvinfo k_ethtool_drvinfo +struct k_ethtool_drvinfo { + u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char reserved1[32]; + char reserved2[16]; + u32 n_stats; + u32 testinfo_len; + u32 eedump_len; + u32 regdump_len; +}; + +struct ethtool_stats { + u32 cmd; + u32 n_stats; + u64 data[0]; +}; +#endif /* ETHTOOL_GSTATS */ + +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x1c +#endif /* ETHTOOL_PHYS_ID */ + +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x1b +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; +struct ethtool_gstrings { + u32 cmd; /* ETHTOOL_GSTRINGS */ + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + u32 len; /* number of strings in the string set */ + u8 data[0]; +}; +#endif /* ETHTOOL_GSTRINGS */ + +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x1a +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = BIT(0), + ETH_TEST_FL_FAILED = BIT(1), +}; +struct ethtool_test { + u32 cmd; + u32 flags; + u32 reserved; + u32 len; + u64 data[0]; +}; +#endif /* ETHTOOL_TEST */ + +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0xb +#undef ETHTOOL_GREGS +struct ethtool_eeprom { + u32 cmd; + u32 magic; + u32 offset; + u32 len; + u8 data[0]; +}; + +struct ethtool_value { + u32 cmd; + u32 data; +}; +#endif /* ETHTOOL_GEEPROM */ + +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0xa +#endif /* ETHTOOL_GLINK */ + +#ifndef ETHTOOL_GWOL +#define ETHTOOL_GWOL 0x5 +#define ETHTOOL_SWOL 0x6 +#define SOPASS_MAX 6 +struct ethtool_wolinfo { + u32 cmd; + u32 supported; + u32 wolopts; + u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; +#endif /* ETHTOOL_GWOL */ + +#ifndef ETHTOOL_GREGS +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ +#define ethtool_regs _kc_ethtool_regs +/* for passing big chunks of data */ +struct _kc_ethtool_regs { + u32 cmd; + u32 version; /* driver-specific, indicates different chips/revs */ + u32 len; /* bytes */ + u8 data[0]; +}; +#endif /* ETHTOOL_GREGS */ + +#ifndef ETHTOOL_GMSGLVL +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#endif +#ifndef ETHTOOL_SMSGLVL +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ +#endif +#ifndef ETHTOOL_NWAY_RST +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ +#endif +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0x0000000a /* Get link status */ +#endif +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#endif +#ifndef ETHTOOL_SEEPROM +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ +#endif +#ifndef ETHTOOL_GCOALESCE +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +/* for configuring coalescing parameters of chip */ +#define ethtool_coalesce _kc_ethtool_coalesce +struct _kc_ethtool_coalesce { + u32 cmd; /* ETHTOOL_{G,S}COALESCE */ + + /* How many usecs to delay an RX interrupt after + * a packet arrives. If 0, only rx_max_coalesced_frames + * is used. + */ + u32 rx_coalesce_usecs; + + /* How many packets to delay an RX interrupt after + * a packet arrives. If 0, only rx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause RX interrupts to never be + * generated. + */ + u32 rx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + + /* How many usecs to delay a TX interrupt after + * a packet is sent. If 0, only tx_max_coalesced_frames + * is used. + */ + u32 tx_coalesce_usecs; + + /* How many packets to delay a TX interrupt after + * a packet is sent. If 0, only tx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause TX interrupts to never be + * generated. + */ + u32 tx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + + /* How many usecs to delay in-memory statistics + * block updates. Some drivers do not have an in-memory + * statistic block, and in such cases this value is ignored. + * This value must not be zero. + */ + u32 stats_block_coalesce_usecs; + + /* Adaptive RX/TX coalescing is an algorithm implemented by + * some drivers to improve latency under low packet rates and + * improve throughput under high packet rates. Some drivers + * only implement one of RX or TX adaptive coalescing. Anything + * not implemented by the driver causes these values to be + * silently ignored. + */ + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + + /* When the packet rate (measured in packets per second) + * is below pkt_rate_low, the {rx,tx}_*_low parameters are + * used. + */ + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + u32 rate_sample_interval; +}; +#endif /* ETHTOOL_GCOALESCE */ + +#ifndef ETHTOOL_SCOALESCE +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#endif +#ifndef ETHTOOL_GRINGPARAM +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +/* for configuring RX/TX ring parameters */ +#define ethtool_ringparam _kc_ethtool_ringparam +struct _kc_ethtool_ringparam { + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + u32 rx_max_pending; + u32 rx_mini_max_pending; + u32 rx_jumbo_max_pending; + u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + u32 rx_pending; + u32 rx_mini_pending; + u32 rx_jumbo_pending; + u32 tx_pending; +}; +#endif /* ETHTOOL_GRINGPARAM */ + +#ifndef ETHTOOL_SRINGPARAM +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ +#endif +#ifndef ETHTOOL_GPAUSEPARAM +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +/* for configuring link flow control parameters */ +#define ethtool_pauseparam _kc_ethtool_pauseparam +struct _kc_ethtool_pauseparam { + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autoneg' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; +#endif /* ETHTOOL_GPAUSEPARAM */ + +#ifndef ETHTOOL_SPAUSEPARAM +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#endif +#ifndef ETHTOOL_GRXCSUM +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_SRXCSUM +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GTXCSUM +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STXCSUM +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GSG +#define ETHTOOL_GSG 0x00000018 +/* Get scatter-gather enable + * (ethtool_value) + */ +#endif +#ifndef ETHTOOL_SSG +#define ETHTOOL_SSG 0x00000019 +/* Set scatter-gather enable + * (ethtool_value). + */ +#endif +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ +#endif +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#endif +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#endif +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#endif +#ifndef ETHTOOL_GTSO +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STSO +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#endif + +#ifndef ETHTOOL_BUSINFO_LEN +#define ETHTOOL_BUSINFO_LEN 32 +#endif + +#ifndef WAKE_FILTER +#define WAKE_FILTER BIT(7) +#endif + +#ifndef SPEED_2500 +#define SPEED_2500 2500 +#endif +#ifndef SPEED_5000 +#define SPEED_5000 5000 +#endif +#ifndef SPEED_14000 +#define SPEED_14000 14000 +#endif +#ifndef SPEED_25000 +#define SPEED_25000 25000 +#endif +#ifndef SPEED_50000 +#define SPEED_50000 50000 +#endif +#ifndef SPEED_56000 +#define SPEED_56000 56000 +#endif +#ifndef SPEED_100000 +#define SPEED_100000 100000 +#endif +#ifndef SPEED_200000 +#define SPEED_200000 200000 +#endif + +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b)) +#endif +#ifndef AX_RELEASE_VERSION +#define AX_RELEASE_VERSION(a, b) (((a) << 8) + (b)) +#endif + +#ifndef AX_RELEASE_CODE +#define AX_RELEASE_CODE 0 +#endif + +#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3, 0)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5, 0) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3, 1)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5, 1) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3, 2)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5, 3) +#endif + +#ifndef RHEL_RELEASE_CODE +/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ +#define RHEL_RELEASE_CODE 0 +#endif + +/* RHEL 7 didn't backport the parameter change in + * create_singlethread_workqueue. + * If/when RH corrects this we will want to tighten up the version check. + */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 0)) +#undef create_singlethread_workqueue +#define create_singlethread_workqueue(name) \ + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) +#endif + +/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find + * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new + * enough versions of Ubuntu. Otherwise you can simply see it in the output of + * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in + * the linux-source package, but in the linux-headers package. It begins to + * appear in later releases of 14.04 and 14.10. + * + * Ex: + * + * $uname -r + * 3.13.0-45-generic + * ABI is 45 + * + * + * $uname -r + * 3.16.0-23-generic + * ABI is 23 + */ +#ifndef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#else +/* Ubuntu does not provide actual release version macro, so we use the kernel + * version plus the ABI to generate a unique version code specific to Ubuntu. + * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to + * ignore differences in sublevel which are not important since we have the + * ABI value. Otherwise, it becomes impossible to correlate ABI to version for + * ordering checks. + * + * This also lets us store an ABI value up to 65535, since it can take the + * space that would use the lower byte of the Linux version code. + */ +#define UBUNTU_VERSION_CODE \ + (((~0xFF & LINUX_VERSION_CODE) << 8) + UTS_UBUNTU_RELEASE_ABI) + +#if UTS_UBUNTU_RELEASE_ABI > 65535 +#error UTS_UBUNTU_RELEASE_ABI is larger than 65535... +#endif /* UTS_UBUNTU_RELEASE_ABI > 65535 */ + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0)) +/* Our version code scheme does not make sense for non 3.x or newer kernels, + * and we have no support in kcompat for this scenario. Thus, treat this as a + * non-Ubuntu kernel. Possibly might be better to error here. + */ +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#endif /* <= 3.0.0 */ +#endif /* !UTS_UBUNTU_RELEASE_ABI */ + +/* We ignore the 3rd digit since we want to give precedence to the additional + * ABI value provided by Ubuntu. + */ +#define UBUNTU_VERSION(a, b, c, d) (((a) << 24) + ((b) << 16) + (d)) + +/* SLE_VERSION is used to generate a 3-digit encoding that can order SLE + * kernels based on their major release, service pack, and a possible + * maintenance release. + */ +#define SLE_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) + +/* The SLE_LOCALVERSION_CODE comes from a 3-digit code added as part of the + * Linux kernel version. It is extracted by the driver Makefile. This macro is + * used to generate codes for making comparisons below. + */ +#define SLE_LOCALVERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) + +#ifdef CONFIG_SUSE_KERNEL +/* Starting since at least SLE 12sp4 and SLE 15, the SUSE kernels have + * provided CONFIG_SUSE_VERSION, CONFIG_SUSE_PATCHLEVEL and + * CONFIG_SUSE_AUXRELEASE. Use these to generate SLE_VERSION if available. + * Only fall back to the manual table otherwise. We expect all future versions + * of SLE kernels to include these values, so the table will remain only for + * the older releases. + */ +#ifdef CONFIG_SUSE_VERSION +#ifndef CONFIG_SUSE_PATCHLEVEL +#error "CONFIG_SUSE_VERSION exists but CONFIG_SUSE_PATCHLEVEL is missing" +#endif +#ifndef CONFIG_SUSE_AUXRELEASE +#error "CONFIG_SUSE_VERSION exists but CONFIG_SUSE_AUXRELEASE is missing" +#endif +#define SLE_VERSION_CODE \ + SLE_VERSION(CONFIG_SUSE_VERSION, CONFIG_SUSE_PATCHLEVEL, \ + CONFIG_SUSE_AUXRELEASE) +#else +/* If we do not have the CONFIG_SUSE_VERSION configuration values, fall back + * to the following table for older releases. + */ +#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 27)) +/* SLES11 GA is 2.6.27 based */ +#define SLE_VERSION_CODE SLE_VERSION(11, 0, 0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 32)) +/* SLES11 SP1 is 2.6.32 based */ +#define SLE_VERSION_CODE SLE_VERSION(11, 1, 0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3, 0, 13)) +/* SLES11 SP2 GA is 3.0.13-0.27 */ +#define SLE_VERSION_CODE SLE_VERSION(11, 2, 0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3, 0, 76))) +/* SLES11 SP3 GA is 3.0.76-0.11 */ +#define SLE_VERSION_CODE SLE_VERSION(11, 3, 0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3, 0, 101)) +#if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0, 8, 0)) +/* some SLES11sp2 update kernels up to 3.0.101-0.7.x */ +#define SLE_VERSION_CODE SLE_VERSION(11, 2, 0) +#elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63, 0, 0)) +/* most SLES11sp3 update kernels */ +#define SLE_VERSION_CODE SLE_VERSION(11, 3, 0) +#else +/* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */ +#define SLE_VERSION_CODE SLE_VERSION(11, 4, 0) +#endif +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3, 12, 28)) +/** + * SLES12 GA is 3.12.28-4 + * kernel updates 3.12.xx-<33 through 52>[.yy] + */ +#define SLE_VERSION_CODE SLE_VERSION(12, 0, 0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3, 12, 49)) +/** + * SLES12 SP1 GA is 3.12.49-11 + * updates 3.12.xx-60.yy where xx={51..} + */ +#define SLE_VERSION_CODE SLE_VERSION(12, 1, 0) +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 21) && \ + (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 4, 59))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 74) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(92, 0, 0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(93, 0, 0))) +/** SLES12 SP2 GA is 4.4.21-69. + * SLES12 SP2 updates before SLES12 SP3 are: 4.4.{21,38,49,59} + * SLES12 SP2 updates after SLES12 SP3 are: 4.4.{74,90,103,114,120} + * but they all use a SLE_LOCALVERSION_CODE matching 92.nn.y + */ +#define SLE_VERSION_CODE SLE_VERSION(12, 2, 0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 73) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 82) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 92)) || \ + (LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 103) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(6, 33, 0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(6, 38, 0))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 114) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(94, 0, 0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(95, 0, 0))) +/* SLES12 SP3 GM is 4.4.73-5 and update kernels are 4.4.82-6.3. + * SLES12 SP3 updates not conflicting with SP2 are: 4.4.{82,92} + * SLES12 SP3 updates conflicting with SP2 are: + * - 4.4.103-6.33.1, 4.4.103-6.38.1 + * - 4.4.{114,120}-94.nn.y + */ +#define SLE_VERSION_CODE SLE_VERSION(12, 3, 0) +#else +#error "This looks like a SUSE kernel, but it has an unrecognized local version code." +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ +#endif /* !CONFIG_SUSE_VERSION */ +#endif /* CONFIG_SUSE_KERNEL */ +#ifndef SLE_VERSION_CODE +#define SLE_VERSION_CODE 0 +#endif /* SLE_VERSION_CODE */ +#ifndef SLE_LOCALVERSION_CODE +#define SLE_LOCALVERSION_CODE 0 +#endif /* SLE_LOCALVERSION_CODE */ + +/* Include definitions from the new kcompat layout */ +#include "kcompat_defs.h" + +/* + * ADQ depends on __TC_MQPRIO_MODE_MAX and related kernel code + * added around 4.15. Some distributions (e.g. Oracle Linux 7.7) + * have done a partial back-port of that to their kernels based + * on older mainline kernels that did not include all the necessary + * kernel enablement to support ADQ. + * Undefine __TC_MQPRIO_MODE_MAX for all OSV distributions with + * kernels based on mainline kernels older than 4.15 except for + * RHEL, SLES and Ubuntu which are known to have good back-ports. + */ +#if (!RHEL_RELEASE_CODE && !SLE_VERSION_CODE && !UBUNTU_VERSION_CODE) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) +#undef __TC_MQPRIO_MODE_MAX +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(4,15,0) */ +#endif /* if (NOT RHEL && NOT SLES && NOT UBUNTU) */ + +#ifdef __KLOCWORK__ +* / +#ifdef ARRAY_SIZE +#undef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#define memcpy(dest, src, len) memcpy_s(dest, len, src, len) +#define memset(dest, ch, len) memset_s(dest, len, ch, len) + + static inline int _kc_test_and_clear_bit(int nr, + volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old & ~mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_clear_bit(nr, addr) _kc_test_and_clear_bit(nr, addr) + +static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old | mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_set_bit(nr, addr) _kc_test_and_set_bit(nr, addr) + +#ifdef CONFIG_DYNAMIC_DEBUG +#undef dev_dbg +#define dev_dbg(dev, format, arg...) dev_printk(KERN_DEBUG, dev, format, ##arg) +#undef pr_debug +#define pr_debug(format, arg...) printk(KERN_DEBUG format, ##arg) +#endif /* CONFIG_DYNAMIC_DEBUG */ + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (n = NULL, \ + pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ + pos; pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), \ + member)) + +#ifdef uninitialized_var +#undef uninitialized_var +#define uninitialized_var(x) (x = *(&(x))) +#endif + +#ifdef WRITE_ONCE +#undef WRITE_ONCE +#define WRITE_ONCE(x, val) ((x) = (val)) +#endif /* WRITE_ONCE */ + +#ifdef wait_event_interruptible_timeout +#undef wait_event_interruptible_timeout +#define wait_event_interruptible_timeout(wq_head, condition, timeout) \ + ({ \ + long ret; \ + if ((condition)) \ + ret = timeout; \ + else \ + ret = 0; \ + ret; \ + }) +#endif /* wait_event_interruptible_timeout */ + +#ifdef max_t +#undef max_t +#define max_t(type, x, y) \ + ({ \ + type __x = (x); \ + type __y = (y); \ + __x > __y ? __x : __y; \ + }) +#endif /* max_t */ + +#ifdef min_t +#undef min_t +#define min_t(type, x, y) \ + ({ \ + type __x = (x); \ + type __y = (y); \ + __x < __y ? __x : __y; \ + }) +#endif /* min_t */ +#endif /* __KLOCWORK__ */ + +/* Older versions of GCC will trigger -Wformat-nonliteral warnings for const + * char * strings. Unfortunately, the implementation of do_trace_printk does + * this, in order to add a storage attribute to the memory. This was fixed in + * GCC 5.1, but we still use older distributions built with GCC 4.x. + * + * The string pointer is only passed as a const char * to the __trace_bprintk + * function. Since that function has the __printf attribute, it will trigger + * the warnings. We can't remove the attribute, so instead we'll use the + * __diag macro to disable -Wformat-nonliteral around the call to + * __trace_bprintk. + */ +#if GCC_VERSION < 50100 +#define __trace_bprintk(ip, fmt, args...) \ + ({ \ + int err; \ + __diag_push(); \ + __diag(ignored "-Wformat-nonliteral"); \ + err = __trace_bprintk(ip, fmt, ##args); \ + __diag_pop(); \ + err; \ + }) +#endif /* GCC_VERSION < 5.1.0 */ + +/* Newer kernels removed */ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) && \ + (!(RHEL_RELEASE_CODE && \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3)) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15, 3, 0))))) +#define HAVE_PCI_ASPM_H +#endif + +/*****************************************************************************/ +/* 2.4.3 => 2.4.0 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3)) + +/**************************************/ +/* PCI DRIVER API */ + +#ifndef pci_set_dma_mask +#define pci_set_dma_mask _kc_pci_set_dma_mask +int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); +#endif + +#ifndef pci_request_regions +#define pci_request_regions _kc_pci_request_regions +int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); +#endif + +#ifndef pci_release_regions +#define pci_release_regions _kc_pci_release_regions +void _kc_pci_release_regions(struct pci_dev *pdev); +#endif + +/**************************************/ +/* NETWORK DRIVER API */ + +#ifndef alloc_etherdev +#define alloc_etherdev _kc_alloc_etherdev +struct net_device *_kc_alloc_etherdev(int sizeof_priv); +#endif + +#ifndef is_valid_ether_addr +#define is_valid_ether_addr _kc_is_valid_ether_addr +int _kc_is_valid_ether_addr(u8 *addr); +#endif + +/**************************************/ +/* MISCELLANEOUS */ + +#ifndef INIT_TQUEUE +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) +#endif + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 5)) +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +/* Basic mode control register. */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +/* Basic mode status register. */ +#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ +#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ +#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ +#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ +#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ +#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ +/* Advertisement control register. */ +#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define ADVERTISE_ALL \ + (ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF | \ + ADVERTISE_100FULL) +/* Expansion register for auto-negotiation. */ +#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ +#endif + +/*****************************************************************************/ +/* 2.4.6 => 2.4.3 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6)) + +#ifndef pci_set_power_state +#define pci_set_power_state _kc_pci_set_power_state +int _kc_pci_set_power_state(struct pci_dev *dev, int state); +#endif + +#ifndef pci_enable_wake +#define pci_enable_wake _kc_pci_enable_wake +int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); +#endif + +#ifndef pci_disable_device +#define pci_disable_device _kc_pci_disable_device +void _kc_pci_disable_device(struct pci_dev *pdev); +#endif + +/* PCI PM entry point syntax changed, so don't support suspend/resume */ +#undef CONFIG_PM + +#endif /* 2.4.6 => 2.4.3 */ + +#ifndef HAVE_PCI_SET_MWI +#define pci_set_mwi(X) \ + pci_write_config_word(X, PCI_COMMAND, \ + adapter->hw.bus.pci_cmd_word | \ + PCI_COMMAND_INVALIDATE); +#define pci_clear_mwi(X) \ + pci_write_config_word(X, PCI_COMMAND, \ + adapter->hw.bus.pci_cmd_word & \ + ~PCI_COMMAND_INVALIDATE); +#endif + +/*****************************************************************************/ +/* 2.4.10 => 2.4.9 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 10)) + +/**************************************/ +/* MODULE API */ + +#ifndef MODULE_LICENSE +#define MODULE_LICENSE(X) +#endif + +/**************************************/ +/* OTHER */ + +#undef min +#define min(x, y) \ + ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void)(&_x == &_y); \ + _x < _y ? _x : _y; \ + }) + +#undef max +#define max(x, y) \ + ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void)(&_x == &_y); \ + _x > _y ? _x : _y; \ + }) + +#define min_t(type, x, y) \ + ({ \ + type _x = (x); \ + type _y = (y); \ + _x < _y ? _x : _y; \ + }) + +#define max_t(type, x, y) \ + ({ \ + type _x = (x); \ + type _y = (y); \ + _x > _y ? _x : _y; \ + }) + +#ifndef list_for_each_safe +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 8)) +int _kc_snprintf(char *buf, size_t size, const char *fmt, ...); +#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) +#else /* 2.4.8 => 2.4.9 */ +int snprintf(char *buf, size_t size, const char *fmt, ...); +int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#endif +#endif /* 2.4.10 -> 2.4.6 */ + +/*****************************************************************************/ +/* 2.4.12 => 2.4.10 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 12)) +#ifndef HAVE_NETIF_MSG +#define HAVE_NETIF_MSG 1 +enum { + NETIF_MSG_DRV = 0x0001, + NETIF_MSG_PROBE = 0x0002, + NETIF_MSG_LINK = 0x0004, + NETIF_MSG_TIMER = 0x0008, + NETIF_MSG_IFDOWN = 0x0010, + NETIF_MSG_IFUP = 0x0020, + NETIF_MSG_RX_ERR = 0x0040, + NETIF_MSG_TX_ERR = 0x0080, + NETIF_MSG_TX_QUEUED = 0x0100, + NETIF_MSG_INTR = 0x0200, + NETIF_MSG_TX_DONE = 0x0400, + NETIF_MSG_RX_STATUS = 0x0800, + NETIF_MSG_PKTDATA = 0x1000, + NETIF_MSG_HW = 0x2000, + NETIF_MSG_WOL = 0x4000, +}; + +#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) +#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) +#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) +#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) +#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) +#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) +#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) +#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) +#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) +#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) +#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) +#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) +#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) +#endif /* !HAVE_NETIF_MSG */ +#endif /* 2.4.12 => 2.4.10 */ + +/*****************************************************************************/ +/* 2.4.13 => 2.4.12 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 13)) + +/**************************************/ +/* PCI DMA MAPPING */ + +#ifndef virt_to_page +#define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) +#endif + +#ifndef pci_map_page +#define pci_map_page _kc_pci_map_page +u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, + unsigned long offset, size_t size, int direction); +#endif + +#ifndef pci_unmap_page +#define pci_unmap_page _kc_pci_unmap_page +void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, + int direction); +#endif + +/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ + +#undef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0xffffffff +#undef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffff + +/**************************************/ +/* OTHER */ + +#ifndef cpu_relax +#define cpu_relax() rep_nop() +#endif + +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + unsigned short h_vlan_proto; + unsigned short h_vlan_TCI; + unsigned short h_vlan_encapsulated_proto; +}; +#endif /* 2.4.13 => 2.4.12 */ + +/*****************************************************************************/ +/* 2.4.17 => 2.4.12 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 17)) + +#ifndef __devexit_p +#define __devexit_p(x) (&(x)) +#endif + +#endif /* 2.4.17 => 2.4.13 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 18)) +#define NETIF_MSG_HW 0x2000 +#define NETIF_MSG_WOL 0x4000 + +#ifndef netif_msg_hw +#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) +#endif +#ifndef netif_msg_wol +#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) +#endif +#endif /* 2.4.18 */ + +/*****************************************************************************/ + +/*****************************************************************************/ +/* 2.4.20 => 2.4.19 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 20)) + +/* we won't support NAPI on less than 2.4.20 */ +#ifdef NAPI +#undef NAPI +#endif + +#endif /* 2.4.20 => 2.4.19 */ + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 22)) +#define pci_name(x) ((x)->slot_name) +#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) + +#ifndef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full BIT(12) +#endif +#ifndef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full BIT(12) +#endif +#endif + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 22)) +#endif + +/*****************************************************************************/ +/*****************************************************************************/ +/* 2.4.23 => 2.4.22 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 23)) +/*****************************************************************************/ +#ifdef NAPI +#ifndef netif_poll_disable +#define netif_poll_disable(x) _kc_netif_poll_disable(x) +static inline void _kc_netif_poll_disable(struct net_device *netdev) +{ + while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { + /* No hurry */ + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1); + } +} +#endif +#ifndef netif_poll_enable +#define netif_poll_enable(x) _kc_netif_poll_enable(x) +static inline void _kc_netif_poll_enable(struct net_device *netdev) +{ + clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); +} +#endif +#endif /* NAPI */ +#ifndef netif_tx_disable +#define netif_tx_disable(x) _kc_netif_tx_disable(x) +static inline void _kc_netif_tx_disable(struct net_device *dev) +{ + spin_lock_bh(&dev->xmit_lock); + netif_stop_queue(dev); + spin_unlock_bh(&dev->xmit_lock); +} +#endif +#else /* 2.4.23 => 2.4.22 */ +#define HAVE_SCTP +#endif /* 2.4.23 => 2.4.22 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 25) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 4))) +#define ETHTOOL_OPS_COMPAT +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 27)) +#define __user +#endif /* < 2.4.27 */ + +/*****************************************************************************/ +/* 2.5.71 => 2.4.x */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 71)) +#define sk_protocol protocol +#define pci_get_device pci_find_device +#endif /* 2.5.70 => 2.4.x */ + +/*****************************************************************************/ +/* < 2.4.27 or 2.6.0 <= 2.6.5 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 27) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 5))) + +#ifndef netif_msg_init +#define netif_msg_init _kc_netif_msg_init +static inline u32 _kc_netif_msg_init(int debug_value, + int default_msg_enable_bits) +{ + /* use default */ + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) /* no output */ + return 0; + /* set low N bits */ + return (1 << debug_value) - 1; +} +#endif + +#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 27)) || \ + ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 3)))) +#define netdev_priv(x) x->priv +#endif + +/*****************************************************************************/ +/* <= 2.5.0 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)) +#include +#undef pci_register_driver +#define pci_register_driver pci_module_init + +/* + * Most of the dma compat code is copied/modifed from the 2.4.37 + * /include/linux/libata-compat.h header file + */ +/* These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts */ +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct device { + struct pci_dev pdev; +}; + +static inline struct pci_dev *to_pci_dev(struct device *dev) +{ + return (struct pci_dev *)dev; +} +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return (struct device *)pdev; +} +#define pdev_printk(lvl, pdev, fmt, args...) \ + printk("%s %s: " fmt, lvl, pci_name(pdev), ##args) +#define dev_err(dev, fmt, args...) \ + pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ##args) +#define dev_info(dev, fmt, args...) \ + pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ##args) +#define dev_warn(dev, fmt, args...) \ + pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ##args) +#define dev_notice(dev, fmt, args...) \ + pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ##args) +#define dev_dbg(dev, fmt, args...) \ + pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ##args) + +/* NOTE: dangerous! we ignore the 'gfp' argument */ +#define dma_alloc_coherent(dev, sz, dma, gfp) \ + pci_alloc_consistent(to_pci_dev(dev), (sz), (dma)) +#define dma_free_coherent(dev, sz, addr, dma_addr) \ + pci_free_consistent(to_pci_dev(dev), (sz), (addr), (dma_addr)) + +#define dma_map_page(dev, a, b, c, d) \ + pci_map_page(to_pci_dev(dev), (a), (b), (c), (d)) +#define dma_unmap_page(dev, a, b, c) \ + pci_unmap_page(to_pci_dev(dev), (a), (b), (c)) + +#define dma_map_single(dev, a, b, c) \ + pci_map_single(to_pci_dev(dev), (a), (b), (c)) +#define dma_unmap_single(dev, a, b, c) \ + pci_unmap_single(to_pci_dev(dev), (a), (b), (c)) + +#define dma_map_sg(dev, sg, nents, dir) \ + pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir)) +#define dma_unmap_sg(dev, sg, nents, dir) \ + pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir)) + +#define dma_sync_single(dev, a, b, c) \ + pci_dma_sync_single(to_pci_dev(dev), (a), (b), (c)) + +/* for range just sync everything, that's all the pci API can do */ +#define dma_sync_single_range(dev, addr, off, sz, dir) \ + pci_dma_sync_single(to_pci_dev(dev), (addr), (off) + (sz), (dir)) + +#define dma_set_mask(dev, mask) pci_set_dma_mask(to_pci_dev(dev), (mask)) + +/* hlist_* code - double linked lists */ +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = NULL; + n->pprev = NULL; +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} +#define HLIST_HEAD_INIT \ + { \ + .first = NULL \ + } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +#ifndef might_sleep +#define might_sleep() +#endif +#else +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} +#endif /* <= 2.5.0 */ + +/*****************************************************************************/ +/* 2.5.28 => 2.4.23 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28)) + +#include +#define work_struct tq_struct +#undef INIT_WORK +#define INIT_WORK(a, b) INIT_TQUEUE(a, (void (*)(void *))b, a) +#undef container_of +#define container_of list_entry +#define schedule_work schedule_task +#define flush_scheduled_work flush_scheduled_tasks +#define cancel_work_sync(x) flush_scheduled_work() + +#endif /* 2.5.28 => 2.4.17 */ + +/*****************************************************************************/ +/* 2.6.0 => 2.5.28 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#ifndef read_barrier_depends +#define read_barrier_depends() rmb() +#endif + +#ifndef rcu_head +struct __kc_callback_head { + struct __kc_callback_head *next; + void (*func)(struct callback_head *head); +}; +#define rcu_head __kc_callback_head +#endif + +#undef get_cpu +#define get_cpu() smp_processor_id() +#undef put_cpu +#define put_cpu() \ + do { \ + } while (0) +#define MODULE_INFO(version, _version) + +#define dma_set_coherent_mask(dev, mask) 1 + +#undef dev_put +#define dev_put(dev) __dev_put(dev) + +#ifndef skb_fill_page_desc +#define skb_fill_page_desc _kc_skb_fill_page_desc +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, + int off, int size); +#endif + +#undef ALIGN +#define ALIGN(x, a) (((x) + (a)-1) & ~((a)-1)) + +#ifndef page_count +#define page_count(p) atomic_read(&(p)->count) +#endif + +#ifdef MAX_NUMNODES +#undef MAX_NUMNODES +#endif +#define MAX_NUMNODES 1 + +/* find_first_bit and find_next bit are not defined for most + * 2.4 kernels (except for the redhat 2.4.21 kernels + */ +#include +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#undef find_next_bit +#define find_next_bit _kc_find_next_bit +unsigned long _kc_find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset); +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (strchr(dev->name, '%')) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#ifndef strlcpy +#define strlcpy _kc_strlcpy +size_t _kc_strlcpy(char *dest, const char *src, size_t size); +#endif /* strlcpy */ + +#ifndef do_div +#if BITS_PER_LONG == 64 +#define do_div(n, base) \ + ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) +#elif BITS_PER_LONG == 32 +uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); +#define do_div(n, base) \ + ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = _kc__div64_32(&(n), __base); \ + __rem; \ + }) +#else /* BITS_PER_LONG == ?? */ +#error do_div() does not yet support the C64 +#endif /* BITS_PER_LONG */ +#endif /* do_div */ + +#ifndef NSEC_PER_SEC +#define NSEC_PER_SEC 1000000000L +#endif + +#undef HAVE_I2C_SUPPORT +#else /* 2.6.0 */ + +#endif /* 2.6.0 => 2.5.28 */ +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 3)) +#define dma_pool pci_pool +#define dma_pool_destroy pci_pool_destroy +#define dma_pool_alloc pci_pool_alloc +#define dma_pool_free pci_pool_free + +#define dma_pool_create(name, dev, size, align, allocation) \ + pci_pool_create((name), to_pci_dev(dev), (size), (align), (allocation)) +#endif /* < 2.6.3 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 4)) +#define MODULE_VERSION(_version) MODULE_INFO(version, _version) +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +/* 2.6.5 => 2.6.0 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 5)) +#define dma_sync_single_for_cpu dma_sync_single +#define dma_sync_single_for_device dma_sync_single +#define dma_sync_single_range_for_cpu dma_sync_single_range +#define dma_sync_single_range_for_device dma_sync_single_range +#ifndef pci_dma_mapping_error +#define pci_dma_mapping_error _kc_pci_dma_mapping_error +static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) +{ + return dma_addr == 0; +} +#endif +#endif /* 2.6.5 => 2.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 4)) +int _kc_scnprintf(char *buf, size_t size, const char *fmt, ...); +#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 6)) +/* taken from 2.6 include/linux/bitmap.h */ +#undef bitmap_zero +#define bitmap_zero _kc_bitmap_zero +static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) +{ + if (nbits <= BITS_PER_LONG) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} +#define page_to_nid(x) 0 + +#endif /* < 2.6.6 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7)) +#undef if_mii +#define if_mii _kc_if_mii +static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) +{ + return (struct mii_ioctl_data *)&rq->ifr_ifru; +} + +#ifndef __force +#define __force +#endif +#endif /* < 2.6.7 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 8)) +#ifndef PCI_EXP_DEVCTL +#define PCI_EXP_DEVCTL 8 +#endif +#ifndef PCI_EXP_DEVCTL_CERE +#define PCI_EXP_DEVCTL_CERE 0x0001 +#endif +#define PCI_EXP_FLAGS 2 /* Capabilities register */ +#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ +#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ +#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ +#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ +#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ +#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ +#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ +#define PCI_EXP_DEVCAP 4 /* Device capabilities */ +#define PCI_EXP_DEVSTA 10 /* Device Status */ +#define msleep(x) \ + do { \ + set_current_state(TASK_UNINTERRUPTIBLE); \ + schedule_timeout((x * HZ) / 1000 + 2); \ + } while (0) + +#endif /* < 2.6.8 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 9)) +#include +#define __iomem + +#ifndef kcalloc +#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) +void *_kc_kzalloc(size_t size, int flags); +#endif +#define MSEC_PER_SEC 1000L +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) +{ +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return (j + (HZ / MSEC_PER_SEC) - 1) / (HZ / MSEC_PER_SEC); +#else + return (j * MSEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return m * (HZ / MSEC_PER_SEC); +#else + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; +#endif +} + +#define msleep_interruptible _kc_msleep_interruptible +static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) +{ + unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; + + while (timeout && !signal_pending(current)) { + __set_current_state(TASK_INTERRUPTIBLE); + timeout = schedule_timeout(timeout); + } + return _kc_jiffies_to_msecs(timeout); +} + +/* Basic mode control register. */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ + +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 +#endif +#ifndef __be16 +#define __be16 u16 +#endif +#ifndef __be32 +#define __be32 u32 +#endif +#ifndef __be64 +#define __be64 u64 +#endif + +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb->mac.raw; +} + +/* Wake-On-Lan options. */ +#define WAKE_PHY BIT(0) +#define WAKE_UCAST BIT(1) +#define WAKE_MCAST BIT(2) +#define WAKE_BCAST BIT(3) +#define WAKE_ARP BIT(4) +#define WAKE_MAGIC BIT(5) +#define WAKE_MAGICSECURE BIT(6) /* only meaningful if WAKE_MAGIC */ + +#define skb_header_pointer _kc_skb_header_pointer +static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, + int offset, int len, void *buffer) +{ + int hlen = skb_headlen(skb); + + if (hlen - offset >= len) + return skb->data + offset; + +#ifdef MAX_SKB_FRAGS + if (skb_copy_bits(skb, offset, buffer, len) < 0) + return NULL; + + return buffer; +#else + return NULL; +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 +#endif +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 +#endif +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 +#endif +} + +#ifndef __bitwise +#define __bitwise +#endif +#endif /* < 2.6.9 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10)) +#ifdef module_param_array_named +#undef module_param_array_named +#define module_param_array_named(name, array, type, nump, perm) \ + static struct kparam_array __param_arr_##name = { \ + ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ + sizeof(array[0]), array \ + }; \ + module_param_call(name, param_array_set, param_array_get, \ + &__param_arr_##name, perm) +#endif /* module_param_array_named */ +/* + * num_online is broken for all < 2.6.10 kernels. This is needed to support + * Node module parameter of rnp. + */ +#undef num_online_nodes +#define num_online_nodes(n) 1 +extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); +#undef node_online_map +#define node_online_map _kcompat_node_online_map +#define pci_get_class pci_find_class +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)) +#define PCI_D0 0 +#define PCI_D1 1 +#define PCI_D2 2 +#define PCI_D3hot 3 +#define PCI_D3cold 4 +typedef int pci_power_t; +#define pci_choose_state(pdev, state) state +#define PMSG_SUSPEND 3 +#define PCI_EXP_LNKCTL 16 + +#undef NETIF_F_LLTX + +#ifndef ARCH_HAS_PREFETCH +#define prefetch(X) +#endif + +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + +#define KC_USEC_PER_SEC 1000000L +#define usecs_to_jiffies _kc_usecs_to_jiffies +static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) +{ +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (KC_USEC_PER_SEC / HZ) * j; +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return (j + (HZ / KC_USEC_PER_SEC) - 1) / (HZ / KC_USEC_PER_SEC); +#else + return (j * KC_USEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return m * (HZ / KC_USEC_PER_SEC); +#else + return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; +#endif +} + +#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ +#define PCI_EXP_LNKSTA 18 /* Link Status */ +#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ +#define PCI_EXP_SLTCTL 24 /* Slot Control */ +#define PCI_EXP_SLTSTA 26 /* Slot Status */ +#define PCI_EXP_RTCTL 28 /* Root Control */ +#define PCI_EXP_RTCAP 30 /* Root Capabilities */ +#define PCI_EXP_RTSTA 32 /* Root Status */ +#endif /* < 2.6.11 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12)) +#include +#define USE_REBOOT_NOTIFIER + +/* Generic MII registers. */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +/* Advertisement control register. */ +#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ +#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ +/* Link partner ability register. */ +#define LPA_PAUSE_CAP 0x0400 /* Can pause */ +#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ +/* 1000BASE-T Control register */ +#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +/* 1000BASE-T Status register */ +#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ +#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ + +#ifndef is_zero_ether_addr +#define is_zero_ether_addr _kc_is_zero_ether_addr +static inline int _kc_is_zero_ether_addr(const u8 *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} +#endif /* is_zero_ether_addr */ +#ifndef is_multicast_ether_addr +#define is_multicast_ether_addr _kc_is_multicast_ether_addr +static inline int _kc_is_multicast_ether_addr(const u8 *addr) +{ + return addr[0] & 0x01; +} +#endif /* is_multicast_ether_addr */ +#endif /* < 2.6.12 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13)) +#ifndef kstrdup +#define kstrdup _kc_kstrdup +char *_kc_kstrdup(const char *s, unsigned int gfp); +#endif +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)) +#define pm_message_t u32 +#ifndef kzalloc +#define kzalloc _kc_kzalloc +void *_kc_kzalloc(size_t size, int flags); +#endif + +/* Generic MII registers. */ +#define MII_ESTATUS 0x0f /* Extended Status */ +/* Basic mode status register. */ +#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ +/* Extended status register. */ +#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ +#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ + +#define SUPPORTED_Pause BIT(13) +#define SUPPORTED_Asym_Pause BIT(14) +#define ADVERTISED_Pause BIT(13) +#define ADVERTISED_Asym_Pause BIT(14) + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4, 3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5, 0)))) +#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 9)) && !defined(gfp_t)) +#define gfp_t unsigned +#else +typedef unsigned gfp_t; +#endif +#endif /* !RHEL4.3->RHEL5.0 */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 9)) +#ifdef CONFIG_X86_64 +#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ + dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) +#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ + dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) +#endif +#endif +#endif /* < 2.6.14 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 15)) +#ifndef kfree_rcu +/* this is placed here due to a lack of rcu_barrier in previous kernels */ +#define kfree_rcu(_ptr, _offset) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef vmalloc_node +#define vmalloc_node(a, b) vmalloc(a) +#endif /* vmalloc_node*/ + +#define setup_timer(_timer, _function, _data) \ + do { \ + (_timer)->function = _function; \ + (_timer)->data = _data; \ + init_timer(_timer); \ + } while (0) +#ifndef device_can_wakeup +#define device_can_wakeup(dev) (1) +#endif +#ifndef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) \ + do { \ + } while (0) +#endif +#ifndef device_init_wakeup +#define device_init_wakeup(dev, val) \ + do { \ + } while (0) +#endif +static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) +{ + const u16 *a = (const u16 *)addr1; + const u16 *b = (const u16 *)addr2; + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; +} +#undef compare_ether_addr +#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) +#endif /* < 2.6.15 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16)) +#undef DEFINE_MUTEX +#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) +#define mutex_lock(x) down_interruptible(x) +#define mutex_unlock(x) up(x) + +#ifndef ____cacheline_internodealigned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp +#else +#define ____cacheline_internodealigned_in_smp +#endif /* CONFIG_SMP */ +#endif /* ____cacheline_internodealigned_in_smp */ +#undef HAVE_PCI_ERS +#else /* 2.6.16 and above */ +#undef HAVE_PCI_ERS +#define HAVE_PCI_ERS +#if (SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10, 4, 0)) +#ifdef device_can_wakeup +#undef device_can_wakeup +#endif /* device_can_wakeup */ +#define device_can_wakeup(dev) 1 +#endif /* SLE_VERSION(10,4,0) */ +#endif /* < 2.6.16 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)) +#ifndef dev_notice +#define dev_notice(dev, fmt, args...) dev_printk(KERN_NOTICE, dev, fmt, ##args) +#endif + +#ifndef first_online_node +#define first_online_node 0 +#endif +#ifndef NET_SKB_PAD +#define NET_SKB_PAD 16 +#endif +#endif /* < 2.6.17 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) + +#ifndef IRQ_HANDLED +#define irqreturn_t void +#define IRQ_HANDLED +#define IRQ_NONE +#endif + +#ifndef IRQF_PROBE_SHARED +#ifdef SA_PROBEIRQ +#define IRQF_PROBE_SHARED SA_PROBEIRQ +#else +#define IRQF_PROBE_SHARED 0 +#endif +#endif + +#ifndef IRQF_SHARED +#define IRQF_SHARED SA_SHIRQ +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef skb_is_gso +#ifdef NETIF_F_TSO +#define skb_is_gso _kc_skb_is_gso +static inline int _kc_skb_is_gso(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} +#else +#define skb_is_gso(a) 0 +#endif +#endif + +#ifndef resource_size_t +#define resource_size_t unsigned long +#endif + +#ifdef skb_pad +#undef skb_pad +#endif +#define skb_pad(x, y) _kc_skb_pad(x, y) +int _kc_skb_pad(struct sk_buff *skb, int pad); +#ifdef skb_padto +#undef skb_padto +#endif +#define skb_padto(x, y) _kc_skb_padto(x, y) +static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if (likely(size >= len)) + return 0; + return _kc_skb_pad(skb, len - size); +} + +#ifndef DECLARE_PCI_UNMAP_ADDR +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) u32 LEN_NAME +#define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif /* DECLARE_PCI_UNMAP_ADDR */ +#endif /* < 2.6.18 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)) +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0x00, + PCIE_LNK_X1 = 0x01, + PCIE_LNK_X2 = 0x02, + PCIE_LNK_X4 = 0x04, + PCIE_LNK_X8 = 0x08, + PCIE_LNK_X12 = 0x0C, + PCIE_LNK_X16 = 0x10, + PCIE_LNK_X32 = 0x20, + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, +}; + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 0))) +#define i_private u.generic_ip +#endif /* >= RHEL 5.0 */ + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d)) +#endif +#ifndef __ALIGN_MASK +#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 0)) +#if (!((RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4, 4) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5, 0)) || \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5, 0)))))) +typedef irqreturn_t (*irq_handler_t)(int, void *, struct pt_regs *); +#endif +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 0)) +#undef CONFIG_INET_LRO +#undef CONFIG_INET_LRO_MODULE +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#endif +typedef irqreturn_t (*new_handler_t)(int, void *); +static inline irqreturn_t _kc_request_irq(unsigned int irq, + new_handler_t handler, + unsigned long flags, + const char *devname, void *dev_id) +#else /* 2.4.x */ +typedef void (*irq_handler_t)(int, void *, struct pt_regs *); +typedef void (*new_handler_t)(int, void *); +static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, + unsigned long flags, const char *devname, + void *dev_id) +#endif /* >= 2.5.x */ +{ + irq_handler_t new_handler = (irq_handler_t)handler; + return request_irq(irq, new_handler, flags, devname, dev_id); +} + +#undef request_irq +#define request_irq(irq, handler, flags, devname, dev_id) \ + _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) + +#define irq_handler_t new_handler_t + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)) +#ifndef skb_checksum_help +static inline int __kc_skb_checksum_help(struct sk_buff *skb) +{ + return skb_checksum_help(skb, 0); +} +#define skb_checksum_help(skb) __kc_skb_checksum_help((skb)) +#endif +#endif /* < 2.6.19 && >= 2.6.11 */ + +/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 4))) +#define PCIE_CONFIG_SPACE_LEN 256 +#define PCI_CONFIG_SPACE_LEN 64 +#define PCIE_LINK_STATUS 0x12 +#define pci_config_space_ich8lan() \ + do { \ + } while (0) +#undef pci_save_state +int _kc_pci_save_state(struct pci_dev *); +#define pci_save_state(pdev) _kc_pci_save_state(pdev) +#undef pci_restore_state +void _kc_pci_restore_state(struct pci_dev *); +#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +#undef free_netdev +void _kc_free_netdev(struct net_device *); +#define free_netdev(netdev) _kc_free_netdev(netdev) +#endif +static inline int +pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) +{ + return 0; +} +#define pci_disable_pcie_error_reporting(dev) \ + do { \ + } while (0) +#define pci_cleanup_aer_uncorrect_error_status(dev) \ + do { \ + } while (0) + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); +#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) +#ifndef bool +#define bool _Bool +#define true 1 +#define false 0 +#endif +#else /* 2.6.19 */ +#include +#include + +#define NEW_SKB_CSUM_HELP +#endif /* < 2.6.19 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 28)) +#undef INIT_WORK +#define INIT_WORK(_work, _func) \ + do { \ + INIT_LIST_HEAD(&(_work)->entry); \ + (_work)->pending = 0; \ + (_work)->func = (void (*)(void *))_func; \ + (_work)->data = _work; \ + init_timer(&(_work)->timer); \ + } while (0) +#endif + +#ifndef PCI_VDEVICE +#define PCI_VDEVICE(ven, dev) \ + PCI_VENDOR_ID_##ven, (dev), PCI_ANY_ID, PCI_ANY_ID, 0, 0 +#endif + +#ifndef PCI_VENDOR_ID_INTEL +#define PCI_VENDOR_ID_INTEL 0x8086 +#endif + +#ifndef round_jiffies +#define round_jiffies(x) x +#endif + +#define csum_offset csum + +#define HAVE_EARLY_VMALLOC_NODE +#define dev_to_node(dev) -1 +#undef set_dev_node +/* remove compiler warning with b=b, for unused variable */ +#define set_dev_node(a, b) \ + do { \ + (b) = (b); \ + } while (0) + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4, 7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5, 0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 6)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10, 2, 0))) +typedef __u16 __bitwise __sum16; +typedef __u32 __bitwise __wsum; +#endif + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4, 7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5, 0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 4)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10, 2, 0))) +static inline __wsum csum_unfold(__sum16 n) +{ + return (__force __wsum)n; +} +#endif + +#else /* < 2.6.20 */ +#define HAVE_DEVICE_NUMA_NODE +#endif /* < 2.6.20 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21)) +#define to_net_dev(class) container_of(class, struct net_device, class_dev) +#define NETDEV_CLASS_DEV +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5, 5))) +#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) +#define vlan_group_set_device(vg, id, dev) \ + do { \ + if (vg) \ + vg->vlan_devices[id] = dev; \ + } while (0) +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ +#define pci_channel_offline(pdev) \ + (pdev->error_state && pdev->error_state != pci_channel_io_normal) +#define pci_request_selected_regions(pdev, bars, name) \ + pci_request_regions(pdev, name) +#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); + +#ifndef __aligned +#define __aligned(x) __attribute__((aligned(x))) +#endif + +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); +#define netdev_to_dev(netdev) pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) +#define devm_kzalloc(dev, size, flags) kzalloc(size, flags) +#define devm_kfree(dev, p) kfree(p) +#else /* 2.6.21 */ +static inline struct device *netdev_to_dev(struct net_device *netdev) +{ + return &netdev->dev; +} + +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22)) +#define tcp_hdr(skb) (skb->h.th) +#define tcp_hdrlen(skb) (skb->h.th->doff << 2) +#define skb_transport_offset(skb) (skb->h.raw - skb->data) +#define skb_transport_header(skb) (skb->h.raw) +#define ipv6_hdr(skb) (skb->nh.ipv6h) +#define ip_hdr(skb) (skb->nh.iph) +#define skb_network_offset(skb) (skb->nh.raw - skb->data) +#define skb_network_header(skb) (skb->nh.raw) +#define skb_tail_pointer(skb) skb->tail +#define skb_reset_tail_pointer(skb) \ + do { \ + skb->tail = skb->data; \ + } while (0) +#define skb_set_tail_pointer(skb, offset) \ + do { \ + skb->tail = skb->data + offset; \ + } while (0) +#define skb_copy_to_linear_data(skb, from, len) memcpy(skb->data, from, len) +#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ + memcpy(skb->data + offset, from, len) +#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) +#define pci_register_driver pci_module_init +#define skb_mac_header(skb) skb->mac.raw + +#ifdef NETIF_F_MULTI_QUEUE +#ifndef alloc_etherdev_mq +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) +#endif +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef ETH_FCS_LEN +#define ETH_FCS_LEN 4 +#endif +#define cancel_work_sync(x) flush_scheduled_work() +#ifndef udp_hdr +#define udp_hdr _udp_hdr +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) +{ + return (struct udphdr *)skb_transport_header(skb); +} +#endif + +#ifdef cpu_to_be16 +#undef cpu_to_be16 +#endif +#define cpu_to_be16(x) __constant_htons(x) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5, 1))) +enum { DUMP_PREFIX_NONE, DUMP_PREFIX_ADDRESS, DUMP_PREFIX_OFFSET }; +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ +#ifndef hex_asc +#define hex_asc(x) "0123456789abcdef"[x] +#endif +#include +void _kc_print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ + _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) +#ifndef ADVERTISED_2500baseX_Full +#define ADVERTISED_2500baseX_Full BIT(15) +#endif +#ifndef SUPPORTED_2500baseX_Full +#define SUPPORTED_2500baseX_Full BIT(15) +#endif + +#ifndef ETH_P_PAUSE +#define ETH_P_PAUSE 0x8808 +#endif + +static inline int compound_order(struct page *page) +{ + return 0; +} + +#define __must_be_array(a) 0 + +#ifndef SKB_WITH_OVERHEAD +#define SKB_WITH_OVERHEAD(X) \ + ((X)-SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#endif +#else /* 2.6.22 */ +#define ETH_TYPE_TRANS_SETS_DEV +#define HAVE_NETDEV_STATS_IN_NETDEV +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22)) +#undef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) \ + do { \ + } while (0) +#endif /* > 2.6.22 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)) +#define netif_subqueue_stopped(_a, _b) 0 +#ifndef PTR_ALIGN +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#endif + +#ifndef CONFIG_PM_SLEEP +#define CONFIG_PM_SLEEP CONFIG_PM +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13)) +#define HAVE_ETHTOOL_GET_PERM_ADDR +#endif /* 2.6.14 through 2.6.22 */ + +static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + int delta = 0; + + if (headroom > (skb->data - skb->head)) + delta = headroom - (skb->data - skb->head); + + if (delta || skb_header_cloned(skb)) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + return 0; +} +#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h)) +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +#ifndef ETH_FLAG_LRO +#define ETH_FLAG_LRO NETIF_F_LRO +#endif + +#ifndef ACCESS_ONCE +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#endif + +/* if GRO is supported then the napi struct must already exist */ +#ifndef NETIF_F_GRO +/* NAPI API changes in 2.6.24 break everything */ +struct napi_struct { + /* used to look up the real NAPI polling routine */ + int (*poll)(struct napi_struct *, int); + struct net_device *dev; + int weight; +}; +#endif + +#ifdef NAPI +int __kc_adapter_clean(struct net_device *, int *); +/* The following definitions are multi-queue aware, and thus we have a driver + * define list which determines which drivers support multiple queues, and + * thus need these stronger defines. If a driver does not support multi-queue + * functionality, you don't need to add it to this list. + */ +struct net_device *napi_to_poll_dev(const struct napi_struct *napi); + +static inline void +__kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + poll_dev->poll = __kc_adapter_clean; + poll_dev->priv = napi; + poll_dev->weight = weight; + set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); + set_bit(__LINK_STATE_START, &poll_dev->state); + dev_hold(poll_dev); + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_add __kc_mq_netif_napi_add + +static inline void __kc_mq_netif_napi_del(struct napi_struct *napi) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); + dev_put(poll_dev); + memset(poll_dev, 0, sizeof(struct net_device)); +} + +#define netif_napi_del __kc_mq_netif_napi_del + +static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi) +{ + return netif_running(napi->dev) && + netif_rx_schedule_prep(napi_to_poll_dev(napi)); +} +#define napi_schedule_prep __kc_mq_napi_schedule_prep + +static inline void __kc_mq_napi_schedule(struct napi_struct *napi) +{ + if (napi_schedule_prep(napi)) + __netif_rx_schedule(napi_to_poll_dev(napi)); +} +#define napi_schedule __kc_mq_napi_schedule + +#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) +#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) +#ifdef CONFIG_SMP +static inline void napi_synchronize(const struct napi_struct *n) +{ + struct net_device *dev = napi_to_poll_dev(n); + + while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { + /* No hurry. */ + msleep(1); + } +} +#else +#define napi_synchronize(n) barrier() +#endif /* CONFIG_SMP */ +#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) +static inline void _kc_napi_complete(struct napi_struct *napi) +{ +#ifdef NETIF_F_GRO + napi_gro_flush(napi); +#endif + netif_rx_complete(napi_to_poll_dev(napi)); +} +#define napi_complete _kc_napi_complete +#else /* NAPI */ + +/* The following definitions are only used if we don't support NAPI at all. */ + +static inline __kc_netif_napi_add(struct net_device *dev, + struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), + int weight) +{ + dev->poll = poll; + dev->weight = weight; + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_del(_a) \ + do { \ + } while (0) +#endif /* NAPI */ + +#undef dev_get_by_name +#define dev_get_by_name(_a, _b) dev_get_by_name(_b) +#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) +#ifndef DMA_BIT_MASK +#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL << (n)) - 1)) +#endif + +#ifdef NETIF_F_TSO6 +#define skb_is_gso_v6 _kc_skb_is_gso_v6 +static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} +#endif /* NETIF_F_TSO6 */ + +#ifndef KERN_CONT +#define KERN_CONT "" +#endif +#ifndef pr_err +#define pr_err(fmt, arg...) printk(KERN_ERR fmt, ##arg) +#endif + +#ifndef rounddown_pow_of_two +#define rounddown_pow_of_two(n) \ + __builtin_constant_p(n) ? ((n == 1) ? 0 : (1UL << ilog2(n))) : \ + (1UL << (fls_long(n) - 1)) +#endif + +#else /* < 2.6.24 */ +#define HAVE_ETHTOOL_GET_SSET_COUNT +#define HAVE_NETDEV_NAPI_LIST +#endif /* < 2.6.24 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 24)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) +#define INCLUDE_PM_QOS_PARAMS_H +#include +#else /* >= 3.2.0 */ +#include +#endif /* else >= 3.2.0 */ +#endif /* > 2.6.24 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)) +#define PM_QOS_CPU_DMA_LATENCY 1 + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)) +#include +#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY +#define pm_qos_add_requirement(pm_qos_class, name, value) \ + set_acceptable_latency(name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) \ + remove_acceptable_latency(name) +#define pm_qos_update_requirement(pm_qos_class, name, value) \ + modify_acceptable_latency(name, value) +#else +#define PM_QOS_DEFAULT_VALUE -1 +#define pm_qos_add_requirement(pm_qos_class, name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) +#define pm_qos_update_requirement(pm_qos_class, name, value) \ + { \ + if (value != PM_QOS_DEFAULT_VALUE) { \ + printk(KERN_WARNING \ + "%s: unable to set PM QoS requirement\n", \ + pci_name(adapter->pdev)); \ + } \ + } + +#endif /* > 2.6.18 */ + +#define pci_enable_device_mem(pdev) pci_enable_device(pdev) + +#ifndef DEFINE_PCI_DEVICE_TABLE +#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] +#endif /* DEFINE_PCI_DEVICE_TABLE */ + +#ifndef strict_strtol +#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r) +static inline int _kc_strict_strtol(const char *buf, unsigned int base, + long *res) +{ + /* adapted from strict_strtoul() in 2.6.25 */ + char *tail; + long val; + size_t len; + + *res = 0; + len = strlen(buf); + if (!len) + return -EINVAL; + val = simple_strtol(buf, &tail, base); + if (tail == buf) + return -EINVAL; + if ((*tail == '\0') || + ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) { + *res = val; + return 0; + } + + return -EINVAL; +} +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#ifndef RNP_PROCFS +#define RNP_PROCFS +#endif /* RNP_PROCFS */ +#endif /* >= 2.6.0 */ + +#else /* < 2.6.25 */ + +#if IS_ENABLED(CONFIG_SYSFS) +#ifndef RNP_SYSFS +#define RNP_SYSFS +#endif /* RNP_SYSFS */ +#endif /* CONFIG_SYSFS */ +#if IS_ENABLED(CONFIG_HWMON) +#ifndef RNP_HWMON +#define RNP_HWMON +#endif /* RNP_HWMON */ +#endif /* CONFIG_HWMON */ + +#endif /* < 2.6.25 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)) +#ifndef clamp_t +#define clamp_t(type, val, min, max) \ + ({ \ + type __val = (val); \ + type __min = (min); \ + type __max = (max); \ + __val = __val < __min ? __min : __val; \ + __val > __max ? __max : __val; \ + }) +#endif /* clamp_t */ +#undef kzalloc_node +#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) + +void _kc_pci_disable_link_state(struct pci_dev *dev, int state); +#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) +#else /* < 2.6.26 */ +#define NETDEV_CAN_SET_GSO_MAX_SIZE +#ifdef HAVE_PCI_ASPM_H +#include +#endif +#define HAVE_NETDEV_VLAN_FEATURES +#ifndef PCI_EXP_LNKCAP_ASPMS +#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ +#endif /* PCI_EXP_LNKCAP_ASPMS */ +#endif /* < 2.6.26 */ +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)) +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)speed; + /* ep->speed_hi = (__u16)(speed >> 16); */ +} +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set + +static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + /* no speed_hi before 2.6.27, and probably no need for it yet */ + return (__u32)ep->speed; +} +#define ethtool_cmd_speed _kc_ethtool_cmd_speed + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 15)) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)) && defined(CONFIG_PM)) +#define ANCIENT_PM 1 +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)) && \ + defined(CONFIG_PM_SLEEP)) +#define NEWER_PM 1 +#endif +#if defined(ANCIENT_PM) || defined(NEWER_PM) +#undef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) \ + do { \ + u16 pmc = 0; \ + int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ + if (pm) { \ + pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ + &pmc); \ + } \ + (dev)->power.can_wakeup = !!(pmc >> 11); \ + (dev)->power.should_wakeup = (val && (pmc >> 11)); \ + } while (0) +#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ +#endif /* 2.6.15 through 2.6.27 */ +#ifndef netif_napi_del +#define netif_napi_del(_a) \ + do { \ + } while (0) +#ifdef NAPI +#ifdef CONFIG_NETPOLL +#undef netif_napi_del +#define netif_napi_del(_a) list_del(&(_a)->dev_list); +#endif +#endif +#endif /* netif_napi_del */ +#ifdef dma_mapping_error +#undef dma_mapping_error +#endif +#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) + +#ifdef CONFIG_NETDEVICES_MULTIQUEUE +#define HAVE_TX_MQ +#endif + +#ifndef DMA_ATTR_WEAK_ORDERING +#define DMA_ATTR_WEAK_ORDERING 0 +#endif + +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *); +void _kc_netif_tx_wake_all_queues(struct net_device *); +void _kc_netif_tx_start_all_queues(struct net_device *); +#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) +#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) +#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) +#undef netif_stop_subqueue +#define netif_stop_subqueue(_ndev, _qi) \ + do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_stop_subqueue((_ndev), (_qi)); \ + else \ + netif_stop_queue((_ndev)); \ + } while (0) +#undef netif_start_subqueue +#define netif_start_subqueue(_ndev, _qi) \ + do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_start_subqueue((_ndev), (_qi)); \ + else \ + netif_start_queue((_ndev)); \ + } while (0) +#else /* HAVE_TX_MQ */ +#define netif_tx_stop_all_queues(a) netif_stop_queue(a) +#define netif_tx_wake_all_queues(a) netif_wake_queue(a) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 12)) +#define netif_tx_start_all_queues(a) netif_start_queue(a) +#else +#define netif_tx_start_all_queues(a) \ + do { \ + } while (0) +#endif +#define netif_stop_subqueue(_ndev, _qi) netif_stop_queue((_ndev)) +#define netif_start_subqueue(_ndev, _qi) netif_start_queue((_ndev)) +#endif /* HAVE_TX_MQ */ +#ifndef NETIF_F_MULTI_QUEUE +#define NETIF_F_MULTI_QUEUE 0 +#define netif_is_multiqueue(a) 0 +#define netif_wake_subqueue(a, b) +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef __WARN_printf +void __kc_warn_slowpath(const char *file, const int line, const char *fmt, ...) + __attribute__((format(printf, 3, 4))); +#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) +#endif /* __WARN_printf */ + +#ifndef WARN +#define WARN(condition, format...) \ + ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ + }) +#endif /* WARN */ +#undef HAVE_RNP_DEBUG_FS +#else /* < 2.6.27 */ +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)(speed & 0xFFFF); + ep->speed_hi = (__u16)(speed >> 16); +} +#define HAVE_TX_MQ +#define HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_DEBUG_FS +#define HAVE_RNP_DEBUG_FS +#endif /* CONFIG_DEBUG_FS */ +#endif /* < 2.6.27 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)) +#define pci_ioremap_bar(pdev, bar) \ + ioremap(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar)) +#define pci_wake_from_d3 _kc_pci_wake_from_d3 +#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep +int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); +int _kc_pci_prepare_to_sleep(struct pci_dev *dev); +#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) +#ifndef __skb_queue_head_init +static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} +#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) +#endif + +#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ + +#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ +#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ + +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)) +#ifndef swap +#define swap(a, b) \ + do { \ + typeof(a) __tmp = (a); \ + (a) = (b); \ + (b) = __tmp; \ + } while (0) +#endif +#define pci_request_selected_regions_exclusive(pdev, bars, name) \ + pci_request_selected_regions(pdev, bars, name) +#ifndef CONFIG_NR_CPUS +#define CONFIG_NR_CPUS 1 +#endif /* CONFIG_NR_CPUS */ +#ifndef pcie_aspm_enabled +#define pcie_aspm_enabled() (1) +#endif /* pcie_aspm_enabled */ + +#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ + +#ifndef PCI_EXP_LNKSTA_CLS +#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ +#endif +#ifndef PCI_EXP_LNKSTA_NLW +#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ +#endif + +#ifndef pci_clear_master +void _kc_pci_clear_main(struct pci_dev *dev); +#define pci_clear_master(dev) _kc_pci_clear_main(dev) +#endif + +#ifndef PCI_EXP_LNKCTL_ASPMC +#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ +#endif + +#ifndef PCI_EXP_LNKCAP_MLW +#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ +#endif + +#else /* < 2.6.29 */ +#ifndef HAVE_NET_DEVICE_OPS +#define HAVE_NET_DEVICE_OPS +#endif +#ifdef CONFIG_DCB +#define HAVE_PFC_MODE_ENABLE +#endif /* CONFIG_DCB */ +#endif /* < 2.6.29 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)) +#define NO_PTP_SUPPORT +#define skb_rx_queue_recorded(a) false +#define skb_get_rx_queue(a) 0 +#define skb_record_rx_queue(a, b) \ + do { \ + } while (0) +#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) +#undef CONFIG_FCOE +#undef CONFIG_FCOE_MODULE +#ifndef CONFIG_PCI_IOV +#undef pci_enable_sriov +#define pci_enable_sriov(a, b) -ENOTSUPP +#undef pci_disable_sriov +#define pci_disable_sriov(a) \ + do { \ + } while (0) +#endif /* CONFIG_PCI_IOV */ +#ifndef pr_cont +#define pr_cont(fmt, ...) printk(KERN_CONT fmt, ##__VA_ARGS__) +#endif /* pr_cont */ +static inline void _kc_synchronize_irq(unsigned int a) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28)) + synchronize_irq(); +#else /* < 2.5.28 */ + synchronize_irq(a); +#endif /* < 2.5.28 */ +} +#undef synchronize_irq +#define synchronize_irq(a) _kc_synchronize_irq(a) + +#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ + +#ifdef nr_cpus_node +#undef nr_cpus_node +#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 5)) +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* RHEL >= 5.5 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 5))) +static inline bool pci_is_root_bus(struct pci_bus *pbus) +{ + return !(pbus->parent); +} +#endif + +#else /* < 2.6.30 */ +#define HAVE_ASPM_QUIRKS +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* < 2.6.30 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) +#define ETH_P_1588 0x88F7 +#define ETH_P_FIP 0x8914 +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc_count) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(uclist, dev) \ + for (uclist = dev->uc_list; uclist; uclist = uclist->next) +#endif +#ifndef PORT_OTHER +#define PORT_OTHER 0xff +#endif +#ifndef MDIO_PHY_ID_PRTAD +#define MDIO_PHY_ID_PRTAD 0x03e0 +#endif +#ifndef MDIO_PHY_ID_DEVAD +#define MDIO_PHY_ID_DEVAD 0x001f +#endif +#ifndef skb_dst +#define skb_dst(s) ((s)->dst) +#endif + +#ifndef SUPPORTED_1000baseKX_Full +#define SUPPORTED_1000baseKX_Full BIT(17) +#endif +#ifndef SUPPORTED_10000baseKX4_Full +#define SUPPORTED_10000baseKX4_Full BIT(18) +#endif +#ifndef SUPPORTED_10000baseKR_Full +#define SUPPORTED_10000baseKR_Full BIT(19) +#endif + +#ifndef ADVERTISED_1000baseKX_Full +#define ADVERTISED_1000baseKX_Full BIT(17) +#endif +#ifndef ADVERTISED_10000baseKX4_Full +#define ADVERTISED_10000baseKX4_Full BIT(18) +#endif +#ifndef ADVERTISED_10000baseKR_Full +#define ADVERTISED_10000baseKR_Full BIT(19) +#endif + +static inline unsigned long dev_trans_start(struct net_device *dev) +{ + return dev->trans_start; +} +#else /* < 2.6.31 */ +#ifndef HAVE_NETDEV_STORAGE_ADDRESS +#define HAVE_NETDEV_STORAGE_ADDRESS +#endif +#ifndef HAVE_NETDEV_HW_ADDR +#define HAVE_NETDEV_HW_ADDR +#endif +#ifndef HAVE_TRANS_START_IN_QUEUE +#define HAVE_TRANS_START_IN_QUEUE +#endif +#ifndef HAVE_INCLUDE_LINUX_MDIO_H +#define HAVE_INCLUDE_LINUX_MDIO_H +#endif +#include +#endif /* < 2.6.31 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)) +#undef netdev_tx_t +#define netdev_tx_t int +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef NETIF_F_FCOE_MTU +#define NETIF_F_FCOE_MTU BIT(26) +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +static inline int _kc_pm_runtime_get_sync(void) +{ + return 1; +} +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() +#else /* 2.6.0 => 2.6.32 */ +static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev) +{ + return 1; +} +#ifndef pm_runtime_get_sync +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) +#endif +#endif /* 2.6.0 => 2.6.32 */ +#ifndef pm_runtime_put +#define pm_runtime_put(dev) \ + do { \ + } while (0) +#endif +#ifndef pm_runtime_put_sync +#define pm_runtime_put_sync(dev) \ + do { \ + } while (0) +#endif +#ifndef pm_runtime_resume +#define pm_runtime_resume(dev) \ + do { \ + } while (0) +#endif +#ifndef pm_schedule_suspend +#define pm_schedule_suspend(dev, t) \ + do { \ + } while (0) +#endif +#ifndef pm_runtime_set_suspended +#define pm_runtime_set_suspended(dev) \ + do { \ + } while (0) +#endif +#ifndef pm_runtime_disable +#define pm_runtime_disable(dev) \ + do { \ + } while (0) +#endif +#ifndef pm_runtime_put_noidle +#define pm_runtime_put_noidle(dev) \ + do { \ + } while (0) +#endif +#ifndef pm_runtime_set_active +#define pm_runtime_set_active(dev) \ + do { \ + } while (0) +#endif +#ifndef pm_runtime_enable +#define pm_runtime_enable(dev) \ + do { \ + } while (0) +#endif +#ifndef pm_runtime_get_noresume +#define pm_runtime_get_noresume(dev) \ + do { \ + } while (0) +#endif +#else /* < 2.6.32 */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 2)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0))) +#define HAVE_RHEL6_NET_DEVICE_EXTENDED +#endif /* RHEL >= 6.2 && RHEL < 7.0 */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 6)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0))) +#define HAVE_RHEL6_NET_DEVICE_OPS_EXT +#define HAVE_NDO_SET_FEATURES +#endif /* RHEL >= 6.6 && RHEL < 7.0 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +#define HAVE_NETDEV_OPS_FCOE_ENABLE +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_OPS_GETAPP +#define HAVE_DCBNL_OPS_GETAPP +#endif +#endif /* CONFIG_DCB */ +#include +/* IOV bad DMA target work arounds require at least this kernel rev support */ +#define HAVE_PCIE_TYPE +#endif /* < 2.6.32 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) +#ifndef pci_pcie_cap +#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) +#endif +#ifndef IPV4_FLOW +#define IPV4_FLOW 0x10 +#endif /* IPV4_FLOW */ +#ifndef IPV6_FLOW +#define IPV6_FLOW 0x11 +#endif /* IPV6_FLOW */ +/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 0)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 1, 0))) +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* RHEL6 or SLES11 SP1 */ +#ifndef __percpu +#define __percpu +#endif /* __percpu */ + +#ifndef PORT_DA +#define PORT_DA PORT_OTHER +#endif /* PORT_DA */ +#ifndef PORT_NONE +#define PORT_NONE PORT_OTHER +#endif + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)))) +#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 8)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 0))) || \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 1)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)))))) +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return !!pci_pcie_cap(dev); +} +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 2)))) +#define sk_tx_queue_get(_sk) (-1) +#define sk_tx_queue_set(_sk, _tx_queue) \ + do { \ + } while (0) +#endif /* !(RHEL >= 6.2) */ + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0))) +#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_ETHTOOL_SET_PHYS_ID +#define HAVE_ETHTOOL_GET_TS_INFO +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 5)) +#define HAVE_ETHTOOL_GSRSSH +#define HAVE_RHEL6_SRIOV_CONFIGURE +#define HAVE_RXFH_NONCONST +#endif /* RHEL > 6.5 */ +#endif /* RHEL >= 6.4 && RHEL < 7.0 */ + +#else /* < 2.6.33 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* < 2.6.33 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 0)) +#ifndef pci_num_vf +#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) +int _kc_pci_num_vf(struct pci_dev *dev); +#endif +#endif /* RHEL_RELEASE_CODE */ + +#ifndef dev_is_pci +#define dev_is_pci(d) ((d)->bus == &pci_bus_type) +#endif + +#ifndef ETH_FLAG_NTUPLE +#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE +#endif + +#ifndef netdev_mc_count +#define netdev_mc_count(dev) ((dev)->mc_count) +#endif +#ifndef netdev_mc_empty +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#endif +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif +#ifndef netdev_uc_empty +#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif +#ifndef dma_set_coherent_mask +#define dma_set_coherent_mask(dev, mask) \ + pci_set_consistent_dma_mask(to_pci_dev(dev), (mask)) +#endif +#ifndef pci_dev_run_wake +#define pci_dev_run_wake(pdev) (0) +#endif + +#ifndef dma_mmap_coherent +#define NO_CSL_DEBUG +#endif + +/* netdev logging taken from include/linux/netdevice.h */ +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#undef netdev_printk +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#define netdev_printk(level, netdev, format, args...) \ + do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + printk(level "%s: " format, pci_name(pdev), ##args); \ + } while (0) +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21)) +#define netdev_printk(level, netdev, format, args...) \ + do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + struct device *dev = pci_dev_to_dev(pdev); \ + dev_printk(level, dev, "%s: " format, netdev_name(netdev), \ + ##args); \ + } while (0) +#else /* 2.6.21 => 2.6.34 */ +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, (netdev)->dev.parent, "%s: " format, \ + netdev_name(netdev), ##args) +#endif /* <2.6.0 <2.6.21 <2.6.34 */ +#undef netdev_emerg +#define netdev_emerg(dev, format, args...) \ + netdev_printk(KERN_EMERG, dev, format, ##args) +#undef netdev_alert +#define netdev_alert(dev, format, args...) \ + netdev_printk(KERN_ALERT, dev, format, ##args) +#undef netdev_crit +#define netdev_crit(dev, format, args...) \ + netdev_printk(KERN_CRIT, dev, format, ##args) +#undef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#undef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#undef netdev_notice +#define netdev_notice(dev, format, args...) \ + netdev_printk(KERN_NOTICE, dev, format, ##args) +#undef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#undef netdev_dbg +#if defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ + do { \ + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ + netdev_name(__dev), ##args); \ + } while (0) +#else /* DEBUG */ +#define netdev_dbg(__dev, format, args...) \ + ({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ + }) +#endif /* DEBUG */ + +#undef netif_printk +#define netif_printk(priv, type, level, dev, fmt, args...) \ + do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ + } while (0) + +#undef netif_emerg +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#undef netif_alert +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#undef netif_crit +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#undef netif_err +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#undef netif_warn +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#undef netif_notice +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#undef netif_info +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) +#undef netif_dbg +#define netif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, fmt, ##args) + +#ifdef SET_SYSTEM_SLEEP_PM_OPS +#define HAVE_SYSTEM_SLEEP_PM_OPS +#endif + +#ifndef for_each_set_bit +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit */ + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN +#define dma_unmap_addr pci_unmap_addr +#define dma_unmap_addr_set pci_unmap_addr_set +#define dma_unmap_len pci_unmap_len +#define dma_unmap_len_set pci_unmap_len_set +#endif /* DEFINE_DMA_UNMAP_ADDR */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 3)) +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define sysfs_attr_init(attr) \ + do { \ + static struct lock_class_key __key; \ + (attr)->key = &__key; \ + } while (0) +#else +#define sysfs_attr_init(attr) \ + do { \ + } while (0) +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* RHEL_RELEASE_CODE */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +static inline bool _kc_pm_runtime_suspended(void) +{ + return false; +} +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() +#else /* 2.6.0 => 2.6.34 */ +static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev) +{ + return false; +} +#ifndef pm_runtime_suspended +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) +#endif +#endif /* 2.6.0 => 2.6.34 */ + +#ifndef pci_bus_speed +/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ +enum _kc_pci_bus_speed { + _KC_PCIE_SPEED_2_5GT = 0x14, + _KC_PCIE_SPEED_5_0GT = 0x15, + _KC_PCIE_SPEED_8_0GT = 0x16, + _KC_PCI_SPEED_UNKNOWN = 0xff, +}; +#define pci_bus_speed _kc_pci_bus_speed +#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT +#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT +#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT +#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN +#endif /* pci_bus_speed */ + +#else /* < 2.6.34 */ +#define HAVE_SYSTEM_SLEEP_PM_OPS +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif + +#endif /* < 2.6.34 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)) +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count); +#define simple_write_to_buffer _kc_simple_write_to_buffer + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4))) +static inline struct pci_dev *pci_physfn(struct pci_dev *dev) +{ +#ifdef HAVE_PCI_DEV_IS_VIRTFN_BIT +#ifdef CONFIG_PCI_IOV + if (dev->is_virtfn) + dev = dev->physfn; +#endif /* CONFIG_PCI_IOV */ +#endif /* HAVE_PCI_DEV_IS_VIRTFN_BIT */ + return dev; +} +#endif /* ! RHEL >= 6.4 */ + +#ifndef PCI_EXP_LNKSTA_NLW_SHIFT +#define PCI_EXP_LNKSTA_NLW_SHIFT 4 +#endif + +#ifndef numa_node_id +#define numa_node_id() 0 +#endif +#ifndef numa_mem_id +#define numa_mem_id numa_node_id +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 0))) +#ifdef HAVE_TX_MQ +#include +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); +#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + dev->egress_subqueue_count = txq; + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ +#else /* HAVE_TX_MQ */ +static inline int +_kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused txq) +{ + return 0; +} +#endif /* HAVE_TX_MQ */ +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#ifndef ETH_FLAG_RXHASH +#define ETH_FLAG_RXHASH (1 << 28) +#endif /* ETH_FLAG_RXHASH */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 0)) +#define HAVE_IRQ_AFFINITY_HINT +#endif +struct device_node; +#else /* < 2.6.35 */ +#define HAVE_STRUCT_DEVICE_OF_NODE +#define HAVE_PM_QOS_REQUEST_LIST +#define HAVE_IRQ_AFFINITY_HINT +#include +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) +int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); +#define ethtool_op_set_flags _kc_ethtool_op_set_flags +u32 _kc_ethtool_op_get_flags(struct net_device *); +#define ethtool_op_get_flags _kc_ethtool_op_get_flags + +enum { + WQ_UNBOUND = 0, + WQ_RESCUER = 0, +}; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifdef NET_IP_ALIGN +#undef NET_IP_ALIGN +#endif +#define NET_IP_ALIGN 0 +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +#ifdef NET_SKB_PAD +#undef NET_SKB_PAD +#endif + +#if (L1_CACHE_BYTES > 32) +#define NET_SKB_PAD L1_CACHE_BYTES +#else +#define NET_SKB_PAD 32 +#endif + +static inline struct sk_buff * +_kc_netdev_alloc_skb_ip_align(struct net_device *dev, unsigned int length) +{ + struct sk_buff *skb; + + skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); + if (skb) { +#if (NET_IP_ALIGN + NET_SKB_PAD) + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); +#endif + skb->dev = dev; + } + return skb; +} + +#ifdef netdev_alloc_skb_ip_align +#undef netdev_alloc_skb_ip_align +#endif +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) + +#undef netif_level +#define netif_level(level, priv, type, dev, fmt, args...) \ + do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ + } while (0) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 3))) +#undef usleep_range +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) +#endif + +#define u64_stats_update_begin(a) \ + do { \ + } while (0) +#define u64_stats_update_end(a) \ + do { \ + } while (0) +#define u64_stats_fetch_begin(a) \ + do { \ + } while (0) +#define u64_stats_fetch_retry_bh(a, b) (0) +#define u64_stats_fetch_begin_bh(a) (0) + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 1)) +#define HAVE_8021P_SUPPORT +#endif + +/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11, 2, 0))) +static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) +{ + return; +} +#endif + +#else /* < 2.6.36 */ + +#define msleep(x) \ + do { \ + if (x > 20) \ + msleep(x); \ + else \ + usleep_range(1000 * x, 2000 * x); \ + } while (0) + +#define HAVE_PM_QOS_REQUEST_ACTIVE +#define HAVE_8021P_SUPPORT +#define HAVE_NDO_GET_STATS64 +#endif /* < 2.6.36 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)) +#define HAVE_NON_CONST_PCI_DRIVER_NAME +#ifndef netif_set_real_num_tx_queues +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + netif_set_real_num_tx_queues(dev, txq); + return 0; +} +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif +#ifndef netif_set_real_num_rx_queues +static inline int +__kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused rxq) +{ + return 0; +} +#define netif_set_real_num_rx_queues(dev, rxq) \ + __kc_netif_set_real_num_rx_queues((dev), (rxq)) +#endif +#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR +#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) +#endif +#ifndef VLAN_N_VID +#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN +#endif /* VLAN_N_VID */ +#ifndef ETH_FLAG_TXVLAN +#define ETH_FLAG_TXVLAN BIT(7) +#endif /* ETH_FLAG_TXVLAN */ +#ifndef ETH_FLAG_RXVLAN +#define ETH_FLAG_RXVLAN BIT(8) +#endif /* ETH_FLAG_RXVLAN */ + +#define WQ_MEM_RECLAIM WQ_RESCUER + +static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) +{ + WARN_ON(skb->ip_summed != CHECKSUM_NONE); +} +#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) + +static inline void *_kc_vzalloc_node(unsigned long size, int node) +{ + void *addr = vmalloc_node(size, node); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) + +static inline void *_kc_vzalloc(unsigned long size) +{ + void *addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc(_size) _kc_vzalloc(_size) + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 7)) || \ + (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6, 0))) +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb) || + skb->protocol != cpu_to_be16(ETH_P_8021Q)) + return skb->protocol; + + if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) + return 0; + + return ((struct vlan_ethhdr *)skb->data)->h_vlan_encapsulated_proto; +} +#endif /* !RHEL5.7+ || RHEL6.0 */ + +#ifdef HAVE_HW_TIME_STAMP +#define SKBTX_HW_TSTAMP BIT(0) +#define SKBTX_IN_PROGRESS BIT(2) +#define SKB_SHARED_TX_IS_UNION +#endif + +#ifndef device_wakeup_enable +#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 18)) +#ifndef HAVE_VLAN_RX_REGISTER +#define HAVE_VLAN_RX_REGISTER +#endif +#endif /* > 2.4.18 */ +#endif /* < 2.6.37 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22)) +#define skb_checksum_start_offset(skb) skb_transport_offset(skb) +#else /* 2.6.22 -> 2.6.37 */ +static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} +#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) +#endif /* 2.6.22 -> 2.6.37 */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_MAX_TCS +#define IEEE_8021QAZ_MAX_TCS 8 +#endif +#ifndef DCB_CAP_DCBX_HOST +#define DCB_CAP_DCBX_HOST 0x01 +#endif +#ifndef DCB_CAP_DCBX_LLD_MANAGED +#define DCB_CAP_DCBX_LLD_MANAGED 0x02 +#endif +#ifndef DCB_CAP_DCBX_VER_CEE +#define DCB_CAP_DCBX_VER_CEE 0x04 +#endif +#ifndef DCB_CAP_DCBX_VER_IEEE +#define DCB_CAP_DCBX_VER_IEEE 0x08 +#endif +#ifndef DCB_CAP_DCBX_STATIC +#define DCB_CAP_DCBX_STATIC 0x10 +#endif +#endif /* CONFIG_DCB */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 2)) +#define CONFIG_XPS +#endif /* RHEL_RELEASE_VERSION(6,2) */ +#endif /* < 2.6.38 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)) +#ifndef TC_BITMASK +#define TC_BITMASK 15 +#endif +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM BIT(29) +#endif +#ifndef skb_queue_reverse_walk_safe +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); skb = tmp, tmp = skb->prev) +#endif +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef FCOE_MTU +#define FCOE_MTU 2158 +#endif +#endif +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4))) +#define kstrtoul(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtouint(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtou32(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 0))) +u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16); +#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q)) +u8 _kc_netdev_get_num_tc(struct net_device *dev); +#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev) +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc); +#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc)) +#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0) +#define netdev_set_tc_queue(dev, tc, cnt, off) \ + do { \ + } while (0) +u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up); +#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up) +#define netdev_set_prio_tc_map(dev, up, tc) \ + do { \ + } while (0) +#else /* RHEL6.1 or greater */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif /* HAVE_MQPRIO */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#ifndef IEEE_8021QAZ_TSA_STRICT +#define IEEE_8021QAZ_TSA_STRICT 0 +#endif +#ifndef IEEE_8021QAZ_TSA_ETS +#define IEEE_8021QAZ_TSA_ETS 2 +#endif +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#endif /* CONFIG_DCB */ +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ + +#ifndef udp_csum +#define udp_csum __kc_udp_csum +static inline __wsum __kc_udp_csum(struct sk_buff *skb) +{ + __wsum csum = csum_partial(skb_transport_header(skb), + sizeof(struct udphdr), skb->csum); + + for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { + csum = csum_add(csum, skb->csum); + } + return csum; +} +#endif /* udp_csum */ +#else /* < 2.6.39 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#endif +#endif /* CONFIG_DCB */ +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#define HAVE_IRQ_AFFINITY_NOTIFY +#endif /* < 2.6.39 */ + +/*****************************************************************************/ +/* use < 2.6.40 because of a Fedora 15 kernel update where they + * updated the kernel version to 2.6.40.x and they back-ported 3.0 features + * like set_phys_id for ethtool. + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 40)) +#ifdef ETHTOOL_GRXRINGS +#ifndef FLOW_EXT +#define FLOW_EXT 0x80000000 +union _kc_ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + __u8 hdata[60]; +}; +struct _kc_ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; +struct _kc_ethtool_rx_flow_spec { + __u32 flow_type; + union _kc_ethtool_flow_union h_u; + struct _kc_ethtool_flow_ext h_ext; + union _kc_ethtool_flow_union m_u; + struct _kc_ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; +#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec +#endif /* FLOW_EXT */ +#endif + +#define pci_disable_link_state_locked pci_disable_link_state + +#ifndef PCI_LTR_VALUE_MASK +#define PCI_LTR_VALUE_MASK 0x000003ff +#endif +#ifndef PCI_LTR_SCALE_MASK +#define PCI_LTR_SCALE_MASK 0x00001c00 +#endif +#ifndef PCI_LTR_SCALE_SHIFT +#define PCI_LTR_SCALE_SHIFT 10 +#endif + +#else /* < 2.6.40 */ +#define HAVE_ETHTOOL_SET_PHYS_ID +#endif /* < 2.6.40 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)) +#define USE_LEGACY_PM_SUPPORT +#ifndef kfree_rcu +#define kfree_rcu(_ptr, _rcu_head) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef kstrtol_from_user +#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r) +static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, + unsigned int base, long *res) +{ + /* sign, base 2 representation, newline, terminator */ + char buf[1 + sizeof(long) * 8 + 1 + 1]; + + count = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, s, count)) + return -EFAULT; + buf[count] = '\0'; + return strict_strtol(buf, base, res); +} +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 0) || \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5, 7))) +/* 20000base_blah_full Supported and Advertised Registers */ +#define SUPPORTED_20000baseMLD2_Full BIT(21) +#define SUPPORTED_20000baseKR2_Full BIT(22) +#define ADVERTISED_20000baseMLD2_Full BIT(21) +#define ADVERTISED_20000baseKR2_Full BIT(22) +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 3.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)) +#ifndef __netdev_alloc_skb_ip_align +#define __netdev_alloc_skb_ip_align(d, l, _g) netdev_alloc_skb_ip_align(d, l) +#endif /* __netdev_alloc_skb_ip_align */ +#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) +#define dcb_ieee_delapp(dev, app) 0 +#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) + +/* 1000BASE-T Control register */ +#define CTL1000_AS_MASTER 0x0800 +#define CTL1000_ENABLE_MASTER 0x1000 + +/* kernels less than 3.0.0 don't have this */ +#ifndef ETH_P_8021AD +#define ETH_P_8021AD 0x88A8 +#endif + +/* Stub definition for !CONFIG_OF is introduced later */ +#ifdef CONFIG_OF +static inline struct device_node * +pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev) +{ +#ifdef HAVE_STRUCT_DEVICE_OF_NODE + return pdev ? pdev->dev.of_node : NULL; +#else + return NULL; +#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */ +} +#endif /* CONFIG_OF */ +#else /* < 3.1.0 */ +#ifndef HAVE_DCBNL_IEEE_DELAPP +#define HAVE_DCBNL_IEEE_DELAPP +#endif +#endif /* < 3.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) +static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif +#ifdef ETHTOOL_GRXRINGS +#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS +#endif /* ETHTOOL_GRXRINGS */ + +#ifndef skb_frag_size +#define skb_frag_size(frag) _kc_skb_frag_size(frag) +static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif /* skb_frag_size */ + +#ifndef skb_frag_size_sub +#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) +static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} +#endif /* skb_frag_size_sub */ + +#ifndef skb_frag_page +#define skb_frag_page(frag) _kc_skb_frag_page(frag) +static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} +#endif /* skb_frag_page */ + +#ifndef skb_frag_address +#define skb_frag_address(frag) _kc_skb_frag_address(frag) +static inline void *_kc_skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} +#endif /* skb_frag_address */ + +#ifndef skb_frag_dma_map +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#include +#endif +#define skb_frag_dma_map(dev, frag, offset, size, dir) \ + _kc_skb_frag_dma_map(dev, frag, offset, size, dir) +static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} +#endif /* skb_frag_dma_map */ + +#ifndef __skb_frag_unref +#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) +static inline void __kc_skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} +#endif /* __skb_frag_unref */ + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 3)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0))) +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#endif +#endif +#else /* < 3.2.0 */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#ifndef HAVE_SKB_L4_RXHASH +#define HAVE_SKB_L4_RXHASH +#endif +#define HAVE_IOMMU_PRESENT +#define HAVE_PM_QOS_REQUEST_LIST_NEW +#endif /* < 3.2.0 */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6, 2)) +#undef rnp_get_netdev_tc_txq +#define rnp_get_netdev_tc_txq(dev, tc) \ + (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) +#endif +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) +/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than + * alloc_workqueue() to avoid compiler warning from -Wvarargs + */ +static inline struct workqueue_struct *__attribute__((format(printf, 3, 4))) +_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, + const char *fmt, ...) +{ + struct workqueue_struct *wq; + va_list args, temp; + unsigned int len; + char *p; + + va_start(args, fmt); + va_copy(temp, args); + len = vsnprintf(NULL, 0, fmt, temp); + va_end(temp); + + p = kmalloc(len + 1, GFP_KERNEL); + if (!p) { + va_end(args); + return NULL; + } + + vsnprintf(p, len + 1, fmt, args); + va_end(args); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) + wq = create_workqueue(p); +#else + wq = alloc_workqueue(p, flags, max_active); +#endif + kfree(p); + + return wq; +} +#ifdef alloc_workqueue +#undef alloc_workqueue +#endif +#define alloc_workqueue(fmt, flags, max_active, args...) \ + _kc_alloc_workqueue(flags, max_active, fmt, ##args) + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 5)) +typedef u32 netdev_features_t; +#endif +#undef PCI_EXP_TYPE_RC_EC +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#ifndef CONFIG_BQL +#define netdev_tx_completed_queue(_q, _p, _b) \ + do { \ + } while (0) +#define netdev_completed_queue(_n, _p, _b) \ + do { \ + } while (0) +#define netdev_tx_sent_queue(_q, _b) \ + do { \ + } while (0) +#define netdev_sent_queue(_n, _b) \ + do { \ + } while (0) +#define netdev_tx_reset_queue(_q) \ + do { \ + } while (0) +#define netdev_reset_queue(_n) \ + do { \ + } while (0) +#endif +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0)) +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#endif /* SLE_VERSION(11,3,0) */ +#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 4, 0)) +static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, + u8 *nexthdrp, + __be16 __always_unused *frag_offp) +{ + return ipv6_skip_exthdr(skb, start, nexthdrp); +} +#undef ipv6_skip_exthdr +#define ipv6_skip_exthdr(a, b, c, d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) +#endif /* !SLES11sp4 or greater */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0))) +static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) +{ + return index % n_rx_rings; +} +#endif + +#else /* ! < 3.3.0 */ +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif +#endif /* < 3.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif /* NETIF_F_RXFCS */ +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif /* NETIF_F_RXALL */ + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0)) +#define NUMTCS_RETURNS_U8 + +int _kc_simple_open(struct inode *inode, struct file *file); +#define simple_open _kc_simple_open +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ + +#ifndef skb_add_rx_frag +#define skb_add_rx_frag _kc_skb_add_rx_frag +void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, + int size, unsigned int truesize); +#endif +#ifdef NET_ADDR_RANDOM +#define eth_hw_addr_random(N) \ + do { \ + eth_random_addr(N->dev_addr); \ + N->addr_assign_type |= NET_ADDR_RANDOM; \ + } while (0) +#else /* NET_ADDR_RANDOM */ +#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) +#endif /* NET_ADDR_RANDOM */ + +#ifndef for_each_set_bit_from +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit_from */ + +#else /* < 3.4.0 */ +#include +#endif /* >= 3.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)) +#ifndef NO_PTP_SUPPORT +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#define HAVE_PTP_1588_CLOCK +#endif /* CONFIG_PTP_1588_CLOCK */ +#endif /* !NO_PTP_SUPPORT */ +#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)) + +#ifndef SIZE_MAX +#define SIZE_MAX (~(size_t)0) +#endif + +#ifndef BITS_PER_LONG_LONG +#define BITS_PER_LONG_LONG 64 +#endif + +#ifndef ether_addr_equal +static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} +#define ether_addr_equal(_addr1, _addr2) \ + __kc_ether_addr_equal((_addr1), (_addr2)) +#endif + +/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */ +#ifdef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif +#else +#include +#define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO +#endif /* < 3.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) +#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ + +#ifndef MDIO_EEE_100TX +#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ +#endif +#ifndef MDIO_EEE_1000T +#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ +#endif +#ifndef MDIO_EEE_10GT +#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ +#endif +#ifndef MDIO_EEE_1000KX +#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ +#endif +#ifndef MDIO_EEE_10GKX4 +#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ +#endif +#ifndef MDIO_EEE_10GKR +#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ +#endif + +#ifndef __GFP_MEMALLOC +#define __GFP_MEMALLOC 0 +#endif + +#ifndef eth_broadcast_addr +#define eth_broadcast_addr _kc_eth_broadcast_addr +static inline void _kc_eth_broadcast_addr(u8 *addr) +{ + memset(addr, 0xff, ETH_ALEN); +} +#endif + +#ifndef eth_random_addr +#define eth_random_addr _kc_eth_random_addr +static inline void _kc_eth_random_addr(u8 *addr) +{ + get_random_bytes(addr, ETH_ALEN); + addr[0] &= 0xfe; /* clear multicast */ + addr[0] |= 0x02; /* set local assignment */ +} +#endif /* eth_random_addr */ + +#ifndef DMA_ATTR_SKIP_CPU_SYNC +#define DMA_ATTR_SKIP_CPU_SYNC 0 +#endif +#else /* < 3.6.0 */ +#define HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* < 3.6.0 */ + +/******************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) +#include +#ifndef ADVERTISED_40000baseKR4_Full +/* these defines were all added in one commit, so should be safe + * to trigger activiation on one define + */ +#define SUPPORTED_40000baseKR4_Full BIT(23) +#define SUPPORTED_40000baseCR4_Full BIT(24) +#define SUPPORTED_40000baseSR4_Full BIT(25) +#define SUPPORTED_40000baseLR4_Full BIT(26) +#define ADVERTISED_40000baseKR4_Full BIT(23) +#define ADVERTISED_40000baseCR4_Full BIT(24) +#define ADVERTISED_40000baseSR4_Full BIT(25) +#define ADVERTISED_40000baseLR4_Full BIT(26) +#endif + +#ifndef mmd_eee_cap_to_ethtool_sup_t +/** + * mmd_eee_cap_to_ethtool_sup_t + * @eee_cap: value of the MMD EEE Capability register + * + * A small helper function that translates MMD EEE Capability (3.20) bits + * to ethtool supported settings. + */ +static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) +{ + u32 supported = 0; + + if (eee_cap & MDIO_EEE_100TX) + supported |= SUPPORTED_100baseT_Full; + if (eee_cap & MDIO_EEE_1000T) + supported |= SUPPORTED_1000baseT_Full; + if (eee_cap & MDIO_EEE_10GT) + supported |= SUPPORTED_10000baseT_Full; + if (eee_cap & MDIO_EEE_1000KX) + supported |= SUPPORTED_1000baseKX_Full; + if (eee_cap & MDIO_EEE_10GKX4) + supported |= SUPPORTED_10000baseKX4_Full; + if (eee_cap & MDIO_EEE_10GKR) + supported |= SUPPORTED_10000baseKR_Full; + + return supported; +} +#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ + __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) +#endif /* mmd_eee_cap_to_ethtool_sup_t */ + +#ifndef mmd_eee_adv_to_ethtool_adv_t +/** + * mmd_eee_adv_to_ethtool_adv_t + * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers + * + * A small helper function that translates the MMD EEE Advertisement (7.60) + * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement + * settings. + */ +static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) +{ + u32 adv = 0; + + if (eee_adv & MDIO_EEE_100TX) + adv |= ADVERTISED_100baseT_Full; + if (eee_adv & MDIO_EEE_1000T) + adv |= ADVERTISED_1000baseT_Full; + if (eee_adv & MDIO_EEE_10GT) + adv |= ADVERTISED_10000baseT_Full; + if (eee_adv & MDIO_EEE_1000KX) + adv |= ADVERTISED_1000baseKX_Full; + if (eee_adv & MDIO_EEE_10GKX4) + adv |= ADVERTISED_10000baseKX4_Full; + if (eee_adv & MDIO_EEE_10GKR) + adv |= ADVERTISED_10000baseKR_Full; + + return adv; +} + +#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ + __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) +#endif /* mmd_eee_adv_to_ethtool_adv_t */ + +#ifndef ethtool_adv_to_mmd_eee_adv_t +/** + * ethtool_adv_to_mmd_eee_adv_t + * @adv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement settings + * to EEE advertisements for the MMD EEE Advertisement (7.60) and + * MMD EEE Link Partner Ability (7.61) registers. + */ +static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) +{ + u16 reg = 0; + + if (adv & ADVERTISED_100baseT_Full) + reg |= MDIO_EEE_100TX; + if (adv & ADVERTISED_1000baseT_Full) + reg |= MDIO_EEE_1000T; + if (adv & ADVERTISED_10000baseT_Full) + reg |= MDIO_EEE_10GT; + if (adv & ADVERTISED_1000baseKX_Full) + reg |= MDIO_EEE_1000KX; + if (adv & ADVERTISED_10000baseKX4_Full) + reg |= MDIO_EEE_10GKX4; + if (adv & ADVERTISED_10000baseKR_Full) + reg |= MDIO_EEE_10GKR; + + return reg; +} +#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv) +#endif /* ethtool_adv_to_mmd_eee_adv_t */ + +#ifndef pci_pcie_type +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +static inline u8 pci_pcie_type(struct pci_dev *pdev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + BUG_ON(!pos); + pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); + return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; +} +#else /* < 2.6.24 */ +#define pci_pcie_type(x) (x)->pcie_type +#endif /* < 2.6.24 */ +#endif /* pci_pcie_type */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4))) && \ + (!(SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0))) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) +#define ptp_clock_register(caps, args...) ptp_clock_register(caps) +#endif + +#ifndef pcie_capability_read_word +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); +#define pcie_capability_read_word(d, p, v) \ + __kc_pcie_capability_read_word(d, p, v) +#endif /* pcie_capability_read_word */ + +#ifndef pcie_capability_read_dword +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); +#define pcie_capability_read_dword(d, p, v) \ + __kc_pcie_capability_read_dword(d, p, v) +#endif + +#ifndef pcie_capability_write_word +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); +#define pcie_capability_write_word(d, p, v) \ + __kc_pcie_capability_write_word(d, p, v) +#endif /* pcie_capability_write_word */ + +#ifndef pcie_capability_clear_and_set_word +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set); +#define pcie_capability_clear_and_set_word(d, p, c, s) \ + __kc_pcie_capability_clear_and_set_word(d, p, c, s) +#endif /* pcie_capability_clear_and_set_word */ + +#ifndef pcie_capability_clear_word +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, u16 clear); +#define pcie_capability_clear_word(d, p, c) \ + __kc_pcie_capability_clear_word(d, p, c) +#endif /* pcie_capability_clear_word */ + +#ifndef PCI_EXP_LNKSTA2 +#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0)) +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 8)) +#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi) +#endif /* !RHEL6.8+ */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 6)) +#include +#else + +#define DEFINE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] = { [0 ...((1 << (bits)) - 1)] = \ + HLIST_HEAD_INIT } + +#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] __read_mostly = { \ + [0 ...((1 << (bits)) - 1)] = HLIST_HEAD_INIT \ + } + +#define DECLARE_HASHTABLE(name, bits) struct hlist_head name[1 << (bits)] + +#define HASH_SIZE(name) (ARRAY_SIZE(name)) +#define HASH_BITS(name) ilog2(HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define hash_min(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) + +#define hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +static inline bool hash_hashed(struct hlist_node *node) +{ + return !hlist_unhashed(node); +} + +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + if (!hlist_empty(&ht[i])) + return false; + + return true; +} + +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) + +static inline void hash_del(struct hlist_node *node) +{ + hlist_del_init(node); +} +#endif /* RHEL >= 6.6 */ + +/* We don't have @flags support prior to 3.7, so we'll simply ignore the flags + * parameter on these older kernels. + */ +#define __setup_timer(_timer, _fn, _data, _flags) \ + setup_timer((_timer), (_fn), (_data)) + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 7))) && \ + (!(SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0))) + +#ifndef mod_delayed_work +/** + * __mod_delayed_work - modify delay or queue delayed work + * @wq: workqueue to use + * @dwork: delayed work to queue + * @delay: number of jiffies to wait before queueing + * + * Return: %true if @dwork was pending and was rescheduled; + * %false if it wasn't pending + * + * Note: the dwork parameter was declared as a void* + * to avoid comptibility problems with early 2.6 kernels + * where struct delayed_work is not declared. Unlike the original + * implementation flags are not preserved and it shouldn't be + * used in the interrupt context. + */ +static inline bool __mod_delayed_work(struct workqueue_struct *wq, void *dwork, + unsigned long delay) +{ + bool ret = cancel_delayed_work(dwork); + queue_delayed_work(wq, dwork, delay); + return ret; +} +#define mod_delayed_work(wq, dwork, delay) __mod_delayed_work(wq, dwork, delay) +#endif /* mod_delayed_work */ + +#endif /* !(RHEL >= 6.7) && !(SLE >= 12.0) */ +#else /* >= 3.7.0 */ +#include +#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif /* >= 3.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) +#if (!(RHEL_RELEASE_CODE && \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 5)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 4, 0))) +#ifndef pci_sriov_set_totalvfs +static inline int +__kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, + u16 __always_unused numvfs) +{ + return 0; +} +#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b)) +#endif +#endif /* !(RHEL_RELEASE_CODE >= 6.5 && SLE_VERSION_CODE >= 11.4) */ +#ifndef PCI_EXP_LNKCTL_ASPM_L0S +#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ +#endif +#ifndef PCI_EXP_LNKCTL_ASPM_L1 +#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ +#endif +#define HAVE_CONFIG_HOTPLUG +/* Reserved Ethernet Addresses per IEEE 802.1Q */ +static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { 0x01, 0x80, + 0xc2, 0x00, + 0x00, 0x00 }; + +#ifndef is_link_local_ether_addr +static inline bool __kc_is_link_local_ether_addr(const u8 *addr) +{ + __be16 *a = (__be16 *)addr; + static const __be16 *b = (const __be16 *)eth_reserved_addr_base; + static const __be16 m = cpu_to_be16(0xfff0); + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; +} +#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr) +#endif /* is_link_local_ether_addr */ + +#ifndef FLOW_MAC_EXT +#define FLOW_MAC_EXT 0x40000000 +#endif /* FLOW_MAC_EXT */ + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 4, 0)) +#define HAVE_SRIOV_CONFIGURE +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_2_5GB +#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_5_0GB +#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ +#endif + +#undef PCI_EXP_LNKCAP2_SLS_2_5GB +#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_5_0GB +#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_8_0GB +#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ + +#else /* >= 3.8.0 */ +#ifndef __devinit +#define __devinit +#endif + +#ifndef __devinitdata +#define __devinitdata +#endif + +#ifndef __devinitconst +#define __devinitconst +#endif + +#ifndef __devexit +#define __devexit +#endif + +#ifndef __devexit_p +#define __devexit_p +#endif + +#ifndef HAVE_ENCAP_CSUM_OFFLOAD +#define HAVE_ENCAP_CSUM_OFFLOAD +#endif + +#ifndef HAVE_GRE_ENCAP_OFFLOAD +#define HAVE_GRE_ENCAP_OFFLOAD +#endif + +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif + +#define HAVE_BRIDGE_ATTRIBS +#ifndef BRIDGE_MODE_VEB +#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ +#endif /* BRIDGE_MODE_VEB */ +#ifndef BRIDGE_MODE_VEPA +#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ +#endif /* BRIDGE_MODE_VEPA */ +#endif /* >= 3.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) + +#undef BUILD_BUG_ON +#ifdef __CHECKER__ +#define BUILD_BUG_ON(condition) (0) +#else /* __CHECKER__ */ +#ifndef __compiletime_warning +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_warning(message) __attribute__((warning(message))) +#else /* __GNUC__ */ +#define __compiletime_warning(message) +#endif /* __GNUC__ */ +#endif /* __compiletime_warning */ +#ifndef __compiletime_error +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_error(message) __attribute__((error(message))) +#define __compiletime_error_fallback(condition) \ + do { \ + } while (0) +#else /* __GNUC__ */ +#define __compiletime_error(message) +#define __compiletime_error_fallback(condition) \ + do { \ + ((void)sizeof(char[1 - 2 * condition])); \ + } while (0) +#endif /* __GNUC__ */ +#else /* __compiletime_error */ +#define __compiletime_error_fallback(condition) \ + do { \ + } while (0) +#endif /* __compiletime_error */ +#define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + bool __cond = !(condition); \ + extern void prefix##suffix(void) __compiletime_error(msg); \ + if (__cond) \ + prefix##suffix(); \ + __compiletime_error_fallback(__cond); \ + } while (0) + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) +#ifndef __OPTIMIZE__ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2 * !!(condition)])) +#else /* __OPTIMIZE__ */ +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif /* __OPTIMIZE__ */ +#endif /* __CHECKER__ */ + +#undef hlist_entry +#define hlist_entry(ptr, type, member) container_of(ptr, type, member) + +#undef hlist_entry_safe +#define hlist_entry_safe(ptr, type, member) \ + ({ \ + typeof(ptr) ____ptr = (ptr); \ + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ + }) + +#undef hlist_for_each_entry +#define hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ + pos; pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), \ + member)) + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ + pos && ({ \ + n = pos->member.next; \ + 1; \ + }); \ + pos = hlist_entry_safe(n, typeof(*pos), member)) + +#undef hlist_for_each_entry_continue +#define hlist_for_each_entry_continue(pos, member) \ + for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), \ + member); \ + pos; pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), \ + member)) + +#undef hlist_for_each_entry_from +#define hlist_for_each_entry_from(pos, member) \ + for (; pos; pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), \ + member)) + +#undef hash_for_each +#define hash_for_each(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name); \ + (bkt)++) \ + hlist_for_each_entry(obj, &name[bkt], member) + +#undef hash_for_each_safe +#define hash_for_each_safe(name, bkt, tmp, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name); \ + (bkt)++) \ + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) + +#undef hash_for_each_possible +#define hash_for_each_possible(name, obj, member, key) \ + hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) + +#undef hash_for_each_possible_safe +#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ + hlist_for_each_entry_safe( \ + obj, tmp, &name[hash_min(key, HASH_BITS(name))], member) + +#ifdef CONFIG_XPS +int __kc_netif_set_xps_queue(struct net_device *, const struct cpumask *, u16); +#define netif_set_xps_queue(_dev, _mask, _idx) \ + __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) +#else /* CONFIG_XPS */ +#define netif_set_xps_queue(_dev, _mask, _idx) \ + do { \ + } while (0) +#endif /* CONFIG_XPS */ + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define _kc_hashrnd 0xd631614b /* not so random hash salt */ +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); +#define __netdev_pick_tx __kc_netdev_pick_tx +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else +#define HAVE_BRIDGE_FILTER +#define HAVE_FDB_DEL_NLATTR +#endif /* < 3.9.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) +#ifndef NAPI_POLL_WEIGHT +#define NAPI_POLL_WEIGHT 64 +#endif +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev *dev); +#else +static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) +{ + return 0; +} +#endif +#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) + +#ifndef list_first_entry_or_null +#define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +#ifndef VLAN_TX_COOKIE_MAGIC +static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ +#ifdef VLAN_TAG_PRESENT + vlan_tci |= VLAN_TAG_PRESENT; +#endif + skb->vlan_tci = vlan_tci; + return skb; +} +#define __vlan_hwaccel_push_inside(skb) __vlan_put_tag(skb, skb->vlan_tci) + +#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ + __kc__vlan_hwaccel_put_tag(skb, vlan_tci) +#else + +#endif + +#ifdef HAVE_FDB_OPS +#if defined(HAVE_NDO_FDB_ADD_NLATTR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 flags); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr, u16 flags); +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags); +#endif /* HAVE_NDO_FDB_ADD_NLATTR */ +#if defined(HAVE_FDB_DEL_NLATTR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr); +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr); +#endif /* HAVE_FDB_DEL_NLATTR */ +#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add +#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del +#endif /* HAVE_FDB_OPS */ + +#ifndef PCI_DEVID +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +#endif + +/* The definitions for these functions when CONFIG_OF_NET is defined are + * pulled in from . For kernels older than 3.5 we already have + * backports for when CONFIG_OF_NET is true. These are separated and + * duplicated in order to cover all cases so that all kernels get either the + * real definitions (when CONFIG_OF_NET is defined) or the stub definitions + * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real + * definitions). + */ +#ifndef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif + +#else /* >= 3.10.0 */ +#define HAVE_ENCAP_TSO_OFFLOAD +#define USE_DEFAULT_FDB_DEL_DUMP +#define HAVE_SKB_INNER_NETWORK_HEADER + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 0))) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 0)) +#define HAVE_RHEL7_PCI_DRIVER_RH +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2)) +#define HAVE_RHEL7_PCI_RESET_NOTIFY +#endif /* RHEL >= 7.2 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 5)) +#define HAVE_GENEVE_RX_OFFLOAD +#endif /* RHEL < 7.5 */ +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +/* CentOS-7-aarch64-Everything-1810.iso not define this */ +#ifndef CONFIG_ARM64 +#define HAVE_RHEL7_NET_DEVICE_OPS_EXT +#endif /* CONFIG_ARM64 */ +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif /* !HAVE_UDP_ENC_TUNNEL && CONFIG_GENEVE */ +#endif /* RHEL >= 7.3 */ + +/* new hooks added to net_device_ops_extended in RHEL7.4 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +/* CentOS-7-aarch64-Everything-1810.iso not define this */ +#ifndef CONFIG_ARM64 +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_TX_MAXRATE +#endif /* CONFIG_ARM64 */ +#define HAVE_UDP_ENC_RX_OFFLOAD +#endif /* RHEL >= 7.4 */ +#else /* RHEL >= 8.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#endif /* RHEL >= 8.0 */ +#endif /* RHEL >= 7.0 */ +#endif /* >= 3.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)) +#define netdev_notifier_info_to_dev(ptr) ptr +#ifndef time_in_range64 +#define time_in_range64(a, b, c) \ + (time_after_eq64(a, b) && time_before_eq64(a, c)) +#endif /* time_in_range64 */ +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 10)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 4, 0))) +#define HAVE_NDO_SET_VF_LINK_STATE +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#else /* >= 3.11.0 */ +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_SKB_INNER_PROTOCOL +#define HAVE_MPLS_FEATURES +#endif /* >= 3.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width); +#ifndef pcie_get_minimum_link +#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev); +#define pci_wait_for_pending_transaction _kc_pci_wait_for_pending_transaction +#endif /* = 3.12.0 */ +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) +#define HAVE_VXLAN_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NDO_GET_PHYS_PORT_ID +#define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK +#endif /* >= 3.12.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) +#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) +int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); +#ifndef u64_stats_init +#define u64_stats_init(a) \ + do { \ + } while (0) +#endif +#undef BIT_ULL +#define BIT_ULL(n) (1ULL << (n)) + +#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0)) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 0))) +static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) +{ + dev = pci_physfn(dev); + if (pci_is_root_bus(dev->bus)) + return NULL; + + return dev->bus->self; +} +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 1, 0)) +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#endif +#ifndef list_next_entry +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif +#ifndef list_prev_entry +#define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 20)) +#define devm_kcalloc(dev, cnt, size, flags) \ + devm_kzalloc(dev, (cnt) * (size), flags) +#endif /* > 2.6.20 */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2))) +#define list_last_entry(ptr, type, member) list_entry((ptr)->prev, type, member) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)) +bool _kc_pci_device_is_present(struct pci_dev *pdev); +#define pci_device_is_present _kc_pci_device_is_present +#endif /* = 3.13.0 */ +#define HAVE_VXLAN_CHECKS +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3, 13, 0, 24)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#else +#define HAVE_NDO_SELECT_QUEUE_ACCEL +#endif +#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS +#endif + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + +#ifndef U16_MAX +#define U16_MAX ((u16)~0U) +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif + +#ifndef U64_MAX +#define U64_MAX ((u64)~0ULL) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2))) +#define dev_consume_skb_any(x) dev_kfree_skb_any(x) +#define dev_consume_skb_irq(x) dev_kfree_skb_irq(x) +#endif + +#if (!(RHEL_RELEASE_CODE && \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0))) + +/* it isn't expected that this would be a #define unless we made it so */ +#ifndef skb_set_hash + +#define PKT_HASH_TYPE_NONE 0 +#define PKT_HASH_TYPE_L2 1 +#define PKT_HASH_TYPE_L3 2 +#define PKT_HASH_TYPE_L4 3 + +enum _kc_pkt_hash_types { + _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE, + _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2, + _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3, + _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4, +}; +#define pkt_hash_types _kc_pkt_hash_types + +#define skb_set_hash __kc_skb_set_hash +static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, + u32 __maybe_unused hash, + int __maybe_unused type) +{ +#ifdef HAVE_SKB_L4_RXHASH + skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); +#endif +#ifdef NETIF_F_RXHASH + skb->rxhash = hash; +#endif +} +#endif /* !skb_set_hash */ + +#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7, 0)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE <= SLE_VERSION(12, 1, 0))) +/* GPLv2 code taken from 5.10-rc2 kernel source include/linux/pci.h, Copyright + * original authors. + */ +//static inline int pci_enable_msix_exact(struct pci_dev *dev, +// struct msix_entry *entries, int nvec) +//{ +// int rc = pci_enable_msix_range(dev, entries, nvec, nvec); +// if (rc < 0) +// return rc; +// return 0; +//} +#endif /* <=EL7.0 || <=SLES 12.1 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5))) +#ifndef HAVE_VXLAN_RX_OFFLOAD +#define HAVE_VXLAN_RX_OFFLOAD +#endif /* HAVE_VXLAN_RX_OFFLOAD */ +#endif + +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif + +#ifndef HAVE_VXLAN_CHECKS +#define HAVE_VXLAN_CHECKS +#endif /* HAVE_VXLAN_CHECKS */ +#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0))) +#define HAVE_NDO_DFWD_OPS +#endif + +#ifndef pci_enable_msix_range +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec); +#define pci_enable_msix_range __kc_pci_enable_msix_range +#endif + +#ifndef ether_addr_copy +#define ether_addr_copy __kc_ether_addr_copy +static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + *(u32 *)dst = *(const u32 *)src; + *(u16 *)(dst + 4) = *(const u16 *)(src + 4); +#else + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; +#endif +} +#endif /* ether_addr_copy */ +int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + int target, unsigned short *fragoff, int *flags); +#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e)) + +#ifndef OPTIMIZER_HIDE_VAR +#ifdef __GNUC__ +#define OPTIMIZER_HIDE_VAR(var) __asm__("" : "=r"(var) : "0"(var)) +#else +#include +#define OPTIMIZER_HIDE_VAR(var) barrier() +#endif +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10, 4, 0))) +static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) +{ +#ifdef NETIF_F_RXHASH + return skb->rxhash; +#else + return 0; +#endif /* NETIF_F_RXHASH */ +} +#endif /* !RHEL > 5.9 && !SLES >= 10.4 */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 5)) +#define request_firmware_direct request_firmware +#endif /* !RHEL || RHEL < 7.5 */ + +#else /* >= 3.14.0 */ + +/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */ +#ifndef HAVE_NDO_DFWD_OPS +#define HAVE_NDO_DFWD_OPS +#endif +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif /* 3.14.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)) +#define HAVE_SKBUFF_RXHASH +#endif /* >= 2.6.35 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 1)) && \ + !(UBUNTU_VERSION_CODE && \ + UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3, 13, 0, 30))) +#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh +#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh +#endif + +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); +#define devm_kstrdup(dev, s, gfp) _kc_devm_kstrdup(dev, s, gfp) + +#else /* >= 3.15.0 */ +#define HAVE_NET_GET_RANDOM_ONCE +#define HAVE_PTP_1588_CLOCK_PINS +#define HAVE_NETDEV_PORT +#endif /* 3.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() +#endif +#ifndef __dev_uc_sync +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev( + struct netdev_hw_addr_list *list, struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)); +#endif +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev( + struct dev_addr_list **list, int *count, struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)); +#endif +#endif /* HAVE_SET_RX_MODE */ + +static inline int __kc_dev_uc_sync( + struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, + const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_UNICAST + return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, dev, sync, + unsync); +#else + return 0; +#endif +} +#define __dev_uc_sync __kc_dev_uc_sync + +static inline void +__kc_dev_uc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, + const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST + __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_uc_unsync __kc_dev_uc_unsync + +static inline int __kc_dev_mc_sync( + struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, + const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, dev, sync, + unsync); +#else + return 0; +#endif +} +#define __dev_mc_sync __kc_dev_mc_sync + +static inline void +__kc_dev_mc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, + const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_MULTICAST + __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_mc_unsync __kc_dev_mc_unsync +#endif /* __dev_uc_sync */ + +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 1)) +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif + +#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM +/* if someone backports this, hopefully they backport as a #define. + * declare it as zero on older kernels so that if it get's or'd in + * it won't effect anything, therefore preventing core driver changes + */ +#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 +#define SKB_GSO_UDP_TUNNEL_CSUM 0 +#endif +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp); +#define devm_kmemdup __kc_devm_kmemdup + +#else +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 4, 0)))) +#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY +#endif /* >= 3.16.0 && < 4.13.0 && !(SLES >= 12sp4) */ +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif /* 3.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) +#if (!RHEL_RELEASE_CODE) +#define __vlan_hwaccel_push_inside(skb) \ + __vlan_put_tag(skb, skb->vlan_proto, skb->vlan_tci) + +#endif +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)) && \ + !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2)) +#ifndef timespec64 +#define timespec64 timespec +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + return ts; +} +static inline struct timespec +timespec64_to_timespec(const struct timespec64 ts64) +{ + return ts64; +} +#define timespec64_equal timespec_equal +#define timespec64_compare timespec_compare +#define set_normalized_timespec64 set_normalized_timespec +#define timespec64_add_safe timespec_add_safe +#define timespec64_add timespec_add +#define timespec64_sub timespec_sub +#define timespec64_valid timespec_valid +#define timespec64_valid_strict timespec_valid_strict +#define timespec64_to_ns timespec_to_ns +#define ns_to_timespec64 ns_to_timespec +#define ktime_to_timespec64 ktime_to_timespec +#define ktime_get_ts64 ktime_get_ts +#define ktime_get_real_ts64 ktime_get_real_ts +#define timespec64_add_ns timespec_add_ns +#endif /* timespec64 */ +#endif /* !(RHEL6.8= RHEL_RELEASE_VERSION(6, 8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)) +static inline void ktime_get_real_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get_real()); +} + +static inline void ktime_get_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get()); +} +#endif + +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 5)) +#endif /* RHEL_RELEASE_CODE < RHEL7.5 */ + +#if RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 3) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 3) +static inline u64 ktime_get_ns(void) +{ + return ktime_to_ns(ktime_get()); +} + +static inline u64 ktime_get_real_ns(void) +{ + return ktime_to_ns(ktime_get_real()); +} + +static inline u64 ktime_get_boot_ns(void) +{ + return ktime_to_ns(ktime_get_boottime()); +} +#endif /* RHEL < 7.3 */ + +#else +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#include +#define HAVE_RHASHTABLE +#endif /* 3.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) +#ifndef NO_PTP_SUPPORT +#include +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb); +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); +#define skb_clone_sk __kc_skb_clone_sk +#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp +#endif +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 2)))) +u32 __kc_eth_get_headlen(const struct net_device *dev, unsigned char *data, + unsigned int max_len); +#else +unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len); +#endif /* !RHEL >= 8.2 */ + +#define eth_get_headlen __kc_eth_get_headlen +#ifndef ETH_P_XDSA +#define ETH_P_XDSA 0x00F8 +#endif +/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 1)) +#define HAVE_SKBUFF_CSUM_LEVEL +#endif /* >= RH 7.1 */ + +/* RHEL 7.3 backported xmit_more */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)) +#define HAVE_SKB_XMIT_MORE +#endif /* >= RH 7.3 */ + +#undef GENMASK +#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#undef GENMASK_ULL +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + +#else /* 3.18.0 */ +#define HAVE_SKBUFF_CSUM_LEVEL +#define HAVE_SKB_XMIT_MORE +#define HAVE_SKB_INNER_PROTOCOL_TYPE +#endif /* 3.18.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 4)) +#else +#define HAVE_NDO_FEATURES_CHECK +#endif /* 3.18.4 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 5)) +#else +#define HAVE_PCI_DEV_FLAGS_NO_BUS_RESET +#endif /* 3.18.5 */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7, 1)) +#define gettime64 gettime +#define settime64 settime +#define COMPAT_PTP_NO_PINS 1 +#define NO_PUSH_INSIDE +#undef HAVE_NDO_FEATURES_CHECK /* 7.1 */ +#define __vlan_hwaccel_push_inside(skb) \ + __vlan_put_tag(skb, skb->vlan_proto, skb->vlan_tci) +#endif + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 13)) +#ifndef WRITE_ONCE +#define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = (val); }) +#endif +#endif /* 3.18.13 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) +/* netdev_phys_port_id renamed to netdev_phys_item_id */ +#define netdev_phys_item_id netdev_phys_port_id + +static inline void _kc_napi_complete_done(struct napi_struct *napi, + int __always_unused work_done) +{ + napi_complete(napi); +} +/* don't use our backport if the distro kernels already have it */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12, 3, 0))) || \ + (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 5))) +#define napi_complete_done _kc_napi_complete_done +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, int nmaskbits); +#define bitmap_print_to_pagebuf _kc_bitmap_print_to_pagebuf + +#ifndef NETDEV_RSS_KEY_LEN +#define NETDEV_RSS_KEY_LEN (13 * 4) +#endif +#if (!(RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2))))) +#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len) +#endif /* RHEL_RELEASE_CODE */ +void __kc_netdev_rss_key_fill(void *buffer, size_t len); +#define SPEED_20000 20000 +#define SPEED_40000 40000 +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif +#ifndef dev_alloc_pages +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE -1 +#endif +#define dev_alloc_pages(_order) \ + alloc_pages_node(NUMA_NO_NODE, \ + (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | \ + __GFP_MEMALLOC), \ + (_order)) +#endif +#ifndef dev_alloc_page +#define dev_alloc_page() dev_alloc_pages(0) +#endif +#if !defined(eth_skb_pad) && !defined(skb_put_padto) +/** + * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} +#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) + +static inline int __kc_eth_skb_pad(struct sk_buff *skb) +{ + return __kc_skb_put_padto(skb, ETH_ZLEN); +} +#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) +#endif /* eth_skb_pad && skb_put_padto */ + +#ifndef SKB_ALLOC_NAPI +/* RHEL 7.2 backported napi_alloc_skb and friends */ +static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, + unsigned int length) +{ + return netdev_alloc_skb_ip_align(napi->dev, length); +} +#define napi_alloc_skb(napi, len) __kc_napi_alloc_skb(napi, len) +#define __napi_alloc_skb(napi, len, mask) __kc_napi_alloc_skb(napi, len) +#endif /* SKB_ALLOC_NAPI */ +#define HAVE_CONFIG_PM_RUNTIME +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0))) +#define HAVE_RXFH_HASHFUNC +#endif /* 6.7 < RHEL < 7.0 */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 1)) +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* RHEL > 7.1 */ +#ifndef napi_schedule_irqoff +#define napi_schedule_irqoff napi_schedule +#endif +#ifndef READ_ONCE +#define READ_ONCE(_x) ACCESS_ONCE(_x) +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) +#define HAVE_NDO_FDB_ADD_VID +#endif +#ifndef ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif +#ifndef ETH_MODULE_SFF_8636_LEN +#define ETH_MODULE_SFF_8636_LEN 256 +#endif +#ifndef ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif +#ifndef ETH_MODULE_SFF_8436_LEN +#define ETH_MODULE_SFF_8436_LEN 256 +#endif +#ifndef writel_relaxed +#define writel_relaxed writel +#endif +#else /* 3.19.0 */ +#define HAVE_NDO_FDB_ADD_VID +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* 3.19.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(6, 8, 0)) +#define HAVE_ETHTOOL_RXFH_PARAM +#endif + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 20, 0)) +/* vlan_tx_xx functions got renamed to skb_vlan */ +#ifndef skb_vlan_tag_get +#define skb_vlan_tag_get vlan_tx_tag_get +#endif +#ifndef skb_vlan_tag_present +#define skb_vlan_tag_present vlan_tx_tag_present +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 1)) +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif +#else +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif /* 3.20.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) +/* Definition for CONFIG_OF was introduced earlier */ +#if !defined(CONFIG_OF) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) +static inline struct device_node * +pci_device_to_OF_node(const struct pci_dev __always_unused *pdev) +{ + return NULL; +} +#else /* !CONFIG_OF && RHEL < 7.3 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* !CONFIG_OF && RHEL < 7.3 */ +#else /* < 4.0 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* < 4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) +#ifndef NO_PTP_SUPPORT +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#else +#include +#endif +static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta) +{ + tc->nsec += delta; +} + +static inline struct net_device * +of_find_net_device_by_node(struct device_node __always_unused *np) +{ + return NULL; +} + +#define timecounter_adjtime __kc_timecounter_adjtime +#endif +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 2, 0)))) +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_RHEL7_EXTENDED_NDO_SET_TX_MAXRATE +#define HAVE_NDO_SET_TX_MAXRATE +#endif +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12, 1, 0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node); +#define cpumask_local_spread _kc_cpumask_local_spread +#endif +#ifdef HAVE_RHASHTABLE +#define rhashtable_loopup_fast(ht, key, params) \ + do { \ + (void)params; \ + rhashtable_lookup((ht), (key)); \ + } while (0) + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) +#define rhashtable_insert_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_insert((ht), (obj), GFP_KERNEL); \ + } while (0) + +#define rhashtable_remove_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_remove((ht), (obj), GFP_KERNEL); \ + } while (0) + +#else /* >= 3,19,0 */ +#define rhashtable_insert_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_insert((ht), (obj)); \ + } while (0) + +#define rhashtable_remove_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_remove((ht), (obj)); \ + } while (0) + +#endif /* 3,19,0 */ +#endif /* HAVE_RHASHTABLE */ +#else /* >= 4,1,0 */ +#define HAVE_NDO_GET_PHYS_PORT_NAME +#define HAVE_PTP_CLOCK_INFO_GETTIME64 +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_PASSTHRU_FEATURES_CHECK +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#define HAVE_NDO_SET_TX_MAXRATE +#endif /* 4,1,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 9)) +#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11, 3, 0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0, 47, 71))) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11, 4, 0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65, 0, 0))) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12, 1, 0))) +static inline bool page_is_pfmemalloc(struct page __maybe_unused *page) +{ +#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC + return page->pfmemalloc; +#else + return false; +#endif +} +#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */ +#else +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* 4.1.9 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12, 1, 0))) +#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000ULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 +static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; +}; + +static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; +}; +#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27)) +#if (!((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2))) +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return bus->self && bus->self->ari_enabled; +} +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2)) +#define HAVE_VF_STATS +#endif /* (RHEL7.2+) */ +#endif /* !(RHEL6.8+ || RHEL7.2+) */ +#else +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return false; +} +#endif /* 2.6.27 */ +#else +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#define HAVE_VF_STATS +#endif /* 4.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12, 2, 0))) +/** + * _kc_flow_dissector_key_ipv4_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct _kc_flow_dissector_key_ipv4_addrs { + __be32 src; + __be32 dst; +}; + +/** + * _kc_flow_dissector_key_ipv6_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct _kc_flow_dissector_key_ipv6_addrs { + struct in6_addr src; + struct in6_addr dst; +}; + +/** + * _kc_flow_dissector_key_addrs: + * @v4addrs: IPv4 addresses + * @v6addrs: IPv6 addresses + */ +struct _kc_flow_dissector_key_addrs { + union { + struct _kc_flow_dissector_key_ipv4_addrs v4addrs; + struct _kc_flow_dissector_key_ipv6_addrs v6addrs; + }; +}; + +/** + * _kc_flow_dissector_key_tp_ports: + * @ports: port numbers of Transport header + * src: source port number + * dst: destination port number + */ +struct _kc_flow_dissector_key_ports { + union { + __be32 ports; + struct { + __be16 src; + __be16 dst; + }; + }; +}; + +/** + * _kc_flow_dissector_key_basic: + * @n_proto: Network header protocol (eg. IPv4/IPv6) + * @ip_proto: Transport header protocol (eg. TCP/UDP) + * @padding: padding for alignment + */ +struct _kc_flow_dissector_key_basic { + __be16 n_proto; + u8 ip_proto; + u8 padding; +}; + +struct _kc_flow_keys { + struct _kc_flow_dissector_key_basic basic; + struct _kc_flow_dissector_key_ports ports; + struct _kc_flow_dissector_key_addrs addrs; +}; + +/* These are all the include files for kernels inside this #ifdef block that + * have any reference to the in kernel definition of struct flow_keys. The + * reason for putting them here is to make 100% sure that these files do not get + * included after re-defining flow_keys to _kc_flow_keys. This is done to + * prevent any possible ABI issues that this structure re-definition could case. + */ +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 7) || \ + SLE_VERSION_CODE >= SLE_VERSION(11, 4, 0)) +#include +#endif /* (>= 3.3.0 && < 4.2.0) || >= RHEL 6.7 || >= SLE 11.4 */ +#if (LINUX_VERSION_CODE == KERNEL_VERSION(4, 2, 0)) +#include +#endif /* 4.2.0 */ +#include +#include +#include +#include + +#define flow_keys _kc_flow_keys +bool _kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct flow_keys *flow, + unsigned int __always_unused flags); +#define skb_flow_dissect_flow_keys _kc_skb_flow_dissect_flow_keys +#endif /* ! >= RHEL 7.4 && ! >= SLES 12.2 */ + +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12, 2, 0))) +#include +#endif /* >= RHEL7.3 || >= SLE12sp2 */ +#else /* >= 4.3.0 */ +#include +#endif /* 4.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)) +#define HAVE_NDO_SET_VF_TRUST +#endif /* (RHEL_RELEASE >= 7.3) */ +#ifndef CONFIG_64BIT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) +#include /* 32-bit readq/writeq */ +#else /* 3.3.0 => 4.3.x */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)) +#include +#endif /* 2.6.26 => 3.3.0 */ +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#define readq readq +#endif + +#ifndef writeq +static inline void writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, (u8 *)addr + 4); +} +#define writeq writeq +#endif +#endif /* < 3.3.0 */ +#endif /* !CONFIG_64BIT */ +#else /* < 4.4.0 */ +#define HAVE_NDO_SET_VF_TRUST + +#ifndef CONFIG_64BIT +#include /* 32-bit readq/writeq */ +#endif /* !CONFIG_64BIT */ +#endif /* 4.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) +/* protect against a likely backport */ +#ifndef NETIF_F_CSUM_MASK +#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM +#endif /* NETIF_F_CSUM_MASK */ +#ifndef NETIF_F_SCTP_CRC +#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM +#endif /* NETIF_F_SCTP_CRC */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3))) +#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused); +#endif /* !(RHEL_RELEASE >= 7.3) */ +#else /* 4.5.0 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) +#define HAVE_GENEVE_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#define HAVE_NETDEV_UPPER_INFO +#endif /* 4.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 3)) +static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)) + return skb->head + skb->csum_start; +#else /* < 2.6.22 */ + return skb_transport_header(skb); +#endif +} +#endif + +#if !(UBUNTU_VERSION_CODE && \ + UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4, 4, 0, 21)) && \ + !(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) +#ifndef NONEED_NAPI_CONSUME_SKB +static inline void napi_consume_skb(struct sk_buff *skb, + int __always_unused budget) +{ + dev_consume_skb_any(skb); +} +#endif +#endif /* UBUNTU 4,4,0,21, RHEL 7.2, SLES12 SP3 */ +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) && \ + !(RHEL_RELEASE_CODE && \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +#ifndef NONEED_CSUM_REPLACE_BY_DIFF +static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} +#endif +#endif +#if !(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12, 3, 0))) +static inline void page_ref_inc(struct page *page) +{ + get_page(page); +} +#else +#define HAVE_PAGE_COUNT_BULK_UPDATE +#endif +#ifndef IPV4_USER_FLOW +#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#endif + +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_FLOWER_ENC +#endif + +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 7)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12, 2, 0))) +#define HAVE_TC_SETUP_CLSU32 +#endif + +#if (SLE_VERSION_CODE >= SLE_VERSION(12, 2, 0)) +#define HAVE_TC_SETUP_CLSFLOWER +#endif + +#ifndef kstrtobool +#define kstrtobool _kc_kstrtobool +int _kc_kstrtobool(const char *s, bool *res); +#endif + +#else /* >= 4.6.0 */ +#define HAVE_PAGE_COUNT_BULK_UPDATE +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_PTP_CROSSTIMESTAMP +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_SETUP_CLSU32 +#endif /* 4.6.0 */ + +#if defined(KYLIN_OS) || defined(CONFIG_KYLINOS_SERVER) +//#if ( LINUX_VERSION_CODE > KERNEL_VERSION(4,19,0) ) +#if defined(KYLIN_RELEASE_CODE) +#if (KYLIN_RELEASE_CODE <= KYLIN_RELEASE_VERSION(10, 2)) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 64)) +#undef HAVE_TC_SETUP_CLSFLOWER +#endif +#if (KYLIN_RELEASE_CODE == KYLIN_RELEASE_VERSION(10, 3)) +// close this in sp3 +#undef HAVE_SKB_XMIT_MORE +#endif +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 90)) +#undef HAVE_TC_SETUP_CLSFLOWER +#endif + +#if defined(KYLIN_RELEASE_CODE) +#if (KYLIN_RELEASE_CODE == KYLIN_RELEASE_VERSION(10, 4)) +#undef NEED_DEVLINK_REGION_CREATE_OPS +#undef NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY +#undef NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY +#undef NEED_ETH_HW_ADDR_SET +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#endif +#endif + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 4, 131)) +#define KYLIN_V4_ETHTOOL_FIX_BOND +#endif +#endif + +/* uos os detect here */ +#if defined(UOS_OS) || defined(UTS_RELEASE) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) +#undef HAVE_SKB_XMIT_MORE +#undef HAVE_TC_SETUP_CLSFLOWER +#define NO_ETH_GET_HEADLEN +#undef NEED_NETDEV_TX_SENT_QUEUE +#define NO_NEED_PTP_SYSTEM_TIMESTAMP +#endif +#endif + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) +#if ((SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0)) || \ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4))) +#define HAVE_NETIF_TRANS_UPDATE +#endif /* SLES12sp3+ || RHEL7.4+ */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#endif /* RHEL7.3+ || SLES12sp3+ */ +#ifdef ETHTOOL_GLINKSETTINGS /* kernel ethtool.h */ +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 3)) +#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#endif +#endif +#else /* 4.7.0 */ +#define HAVE_NETIF_TRANS_UPDATE +#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#define HAVE_TCF_MIRRED_REDIRECT +#endif /* 4.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN, + UDP_TUNNEL_TYPE_GENEVE, +}; +struct udp_tunnel_info { + unsigned short type; + sa_family_t sa_family; + __be16 port; +}; +#endif + +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE < UBUNTU_VERSION(4, 8, 0, 0)) +#define tc_no_actions(_exts) true +#define tc_for_each_action(_a, _exts) while (0) +#endif +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) && \ + !(RHEL_RELEASE_CODE && \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +#ifndef NONEED_PCI_REQUEST_IO_REGIONS +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_io_regions(struct pci_dev *pdev, char *name) +#else +pci_request_io_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions( + pdev, pci_select_bars(pdev, IORESOURCE_IO), name); +} + +static inline void pci_release_io_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions( + pdev, pci_select_bars(pdev, IORESOURCE_IO)); +} + +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_mem_regions(struct pci_dev *pdev, char *name) +#else +pci_request_mem_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions( + pdev, pci_select_bars(pdev, IORESOURCE_MEM), name); +} + +static inline void pci_release_mem_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions( + pdev, pci_select_bars(pdev, IORESOURCE_MEM)); +} +#endif +#endif /* !SLE_VERSION(12,3,0) */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* RHEL7.4+ || SLES12sp3+ */ +#else +#define HAVE_UDP_ENC_RX_OFFLOAD +#define HAVE_ETHTOOL_NEW_50G_BITS +#endif /* 4.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) +#ifdef HAVE_TC_SETUP_CLSFLOWER +#if (!(RHEL_RELEASE_CODE) && !(SLE_VERSION_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12, 3, 0)))) +#define HAVE_TC_FLOWER_VLAN_IN_TAGS +#endif /* !RHEL_RELEASE_CODE && !SLE_VERSION_CODE || = RHEL_RELEASE_VERSION(7, 4)) +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* RHEL7.4+ */ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 4)) +static inline void bitmap_from_u64(unsigned long *dst, u64 mask) +{ + dst[0] = mask & ULONG_MAX; + + if (sizeof(mask) > sizeof(unsigned long)) + dst[1] = mask >> 32; +} +#endif /* = RHEL_RELEASE_VERSION(7, 4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0)) && \ + !(UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4, 13, 0, 16))) +#ifndef NONEED_ETH_TYPE_VLAN +static inline bool eth_type_vlan(__be16 ethertype) +{ + switch (ethertype) { + case htons(ETH_P_8021Q): +#ifdef ETH_P_8021AD + case htons(ETH_P_8021AD): +#endif + return true; + default: + return false; + } +} +#endif +#endif /* Linux < 4.9 || RHEL < 7.4 || SLES < 12.3 || Ubuntu < 4.3.0-16 */ +#else /* >=4.9 */ +#define HAVE_FLOW_DISSECTOR_KEY_VLAN_PRIO +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* KERNEL_VERSION(4.9.0) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) +/* SLES 12.3 and RHEL 7.5 backported this interface */ +#if (!SLE_VERSION_CODE && !RHEL_RELEASE_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12, 3, 0))) || \ + (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 5))) +static inline bool _kc_napi_complete_done2(struct napi_struct *napi, + int __always_unused work_done) +{ + /* it was really hard to get napi_complete_done to be safe to call + * recursively without running into our own kcompat, so just use + * napi_complete + */ + napi_complete(napi); + + /* true means that the stack is telling the driver to go-ahead and + * re-enable interrupts + */ + return true; +} + +#ifdef napi_complete_done +#undef napi_complete_done +#endif +#define napi_complete_done _kc_napi_complete_done2 +#endif /* sles and rhel exclusion for < 4.10 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +#endif /* RHEL7.4+ */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE == SLE_VERSION(12, 3, 0))) +#define HAVE_STRUCT_DMA_ATTRS +#endif /* (SLES == 12.3.0) */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif /* (SLES >= 12.3.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5))) +#define HAVE_STRUCT_DMA_ATTRS +#define HAVE_RHEL7_EXTENDED_MIN_MAX_MTU +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif +#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) && \ + !(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5)))) +#ifndef dma_map_page_attrs +#define dma_map_page_attrs __kc_dma_map_page_attrs +static inline dma_addr_t +__kc_dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, + size_t size, enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} +#endif + +#ifndef dma_unmap_page_attrs +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs +static inline void +__kc_dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} +#endif + +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); +#else + BUG_ON(count > 1); + if (!count) + return; +#endif + __free_pages(page, compound_order(page)); +} +#endif /* !SLE_VERSION(12,3,0) && !RHEL_VERSION(7,5) */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12, 3, 0))) || \ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5))) +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#endif + +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(15, 0, 0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7, 4)))) +#define page_frag_free __free_page_frag +#endif +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif /* ETH_MIN_MTU */ + +/* If kernel is older than 4.10 but distro is RHEL >= 7.5 || SLES > 12SP4, + * it does have support for NAPI_STATE + */ +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 4, 0)))) +#define HAVE_NAPI_STATE_IN_BUSY_POLL +#endif /* RHEL >= 7.5 || SLES >=12.4 */ +#else /* >= 4.10 */ +#define HAVE_TC_FLOWER_ENC +#define HAVE_NETDEVICE_MIN_MAX_MTU +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#define HAVE_NETDEV_TC_RESETS_XPS +#define HAVE_XPS_QOS_SUPPORT +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +/* kernel 4.10 onwards, as part of busy_poll rewrite, new state were added + * which is part of NAPI:state. If NAPI:state=NAPI_STATE_IN_BUSY_POLL, + * it means napi_poll is invoked in busy_poll context + */ +#define HAVE_NAPI_STATE_IN_BUSY_POLL +#define HAVE_TCF_MIRRED_EGRESS_REDIRECT +#define HAVE_PTP_CLOCK_INFO_ADJFINE +#endif /* 4.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) +#ifdef CONFIG_NET_RX_BUSY_POLL +#define HAVE_NDO_BUSY_POLL +#endif /* CONFIG_NET_RX_BUSY_POLL */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5)))) +#define HAVE_VOID_NDO_GET_STATS64 +#endif /* (SLES >= 12.3.0) && (RHEL >= 7.5) */ + +static inline void _kc_dev_kfree_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_irq(skb); +} + +#undef dev_kfree_skb_irq +#define dev_kfree_skb_irq _kc_dev_kfree_skb_irq + +static inline void _kc_dev_consume_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_irq(skb); +} + +#undef dev_consume_skb_irq +#define dev_consume_skb_irq _kc_dev_consume_skb_irq + +static inline void _kc_dev_kfree_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_any(skb); +} + +#undef dev_kfree_skb_any +#define dev_kfree_skb_any _kc_dev_kfree_skb_any + +static inline void _kc_dev_consume_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_any(skb); +} + +#undef dev_consume_skb_any +#define dev_consume_skb_any _kc_dev_consume_skb_any + +#else /* > 4.11 */ +#define HAVE_VOID_NDO_GET_STATS64 +#define HAVE_VM_OPS_FAULT_NO_VMA +#endif /* 4.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 0)) +/* The RHEL 7.7+ NL_SET_ERR_MSG_MOD triggers unused parameter warnings */ +#undef NL_SET_ERR_MSG_MOD +#endif +/* If kernel is older than 4.12 but distro is RHEL >= 7.5 || SLES > 12SP4, + * it does have support for MIN_NAPI_ID + */ +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 4, 0)))) +#define HAVE_MIN_NAPI_ID +#endif /* RHEL >= 7.5 || SLES >= 12.4 */ +#ifndef NL_SET_ERR_MSG_MOD +#define NL_SET_ERR_MSG_MOD(extack, msg) \ + do { \ + uninitialized_var(extack); \ + pr_err(KBUILD_MODNAME ": " msg); \ + } while (0) +#endif /* !NL_SET_ERR_MSG_MOD */ +#else /* >= 4.12 */ +#define HAVE_MIN_NAPI_ID +#endif /* 4.12 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12, 3, 0))) || \ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5))) +#define HAVE_TCF_EXTS_HAS_ACTION +#endif +#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 4, 0))) +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#endif /* SLES >= 12sp4 */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12, 4, 0))) +#ifndef NONEED_UUID_SIZE +#define UUID_SIZE 16 +typedef struct { + __u8 b[UUID_SIZE]; +} uuid_t; +#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ + ((uuid_t){ { ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, \ + ((a) >> 8) & 0xff, (a) & 0xff, ((b) >> 8) & 0xff, \ + (b) & 0xff, ((c) >> 8) & 0xff, (c) & 0xff, (d0), (d1), \ + (d2), (d3), (d4), (d5), (d6), (d7) } }) + +static inline bool uuid_equal(const uuid_t *u1, const uuid_t *u2) +{ + return memcmp(u1, u2, sizeof(uuid_t)) == 0; +} +#endif +#else +#define HAVE_METADATA_PORT_INFO +#endif /* !(RHEL >= 7.5) && !(SLES >= 12.4) */ +#else /* > 4.13 */ +#define HAVE_METADATA_PORT_INFO +#define HAVE_HWTSTAMP_FILTER_NTP_ALL +#define HAVE_NDO_SETUP_TC_CHAIN_INDEX +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_PTP_CLOCK_DO_AUX_WORK +#endif /* 4.13.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef ethtool_link_ksettings_del_link_mode +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \ + __clear_bit(ETHTOOL_LINK_MODE_##mode##_BIT, (ptr)->link_modes.name) +#endif +#endif /* ETHTOOL_GLINKSETTINGS */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 4, 0))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC +#endif + +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) + +#define timer_setup(timer, callback, flags) \ + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) + +#define from_timer(var, callback_timer, timer_fieldname) \ + container_of(callback_timer, typeof(*var), timer_fieldname) + +#ifndef xdp_do_flush_map +#define xdp_do_flush_map() \ + do { \ + } while (0) +#endif +struct _kc_xdp_buff { + void *data; + void *data_end; + void *data_hard_start; +}; +#define xdp_buff _kc_xdp_buff +struct _kc_bpf_prog {}; +#define bpf_prog _kc_bpf_prog +#ifndef DIV_ROUND_DOWN_ULL +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ \ + unsigned long long _tmp = (ll); \ + do_div(_tmp, d); \ + _tmp; \ + }) +#endif /* DIV_ROUND_DOWN_ULL */ +#else /* > 4.14 */ +#define HAVE_XDP_SUPPORT +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_TCF_EXTS_HAS_ACTION +#endif /* 4.14.0 */ + +/*****************************************************************************/ +#if !defined(ETHTOOL_GLINKSETTINGS) || defined(KYLIN_V4_ETHTOOL_FIX_BOND) + +#define __ETHTOOL_LINK_MODE_MASK_NBITS 32 +#define ETHTOOL_LINK_MASK_SIZE BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS) + +/** + * struct ethtool_link_ksettings + * @link_modes: supported and advertising, single item arrays + * @link_modes.supported: bitmask of supported link speeds + * @link_modes.advertising: bitmask of currently advertised speeds + * @base: base link details + * @base.speed: current link speed + * @base.port: current port type + * @base.duplex: current duplex mode + * @base.autoneg: current autonegotiation settings + * + * This struct and the following macros provide a way to support the old + * ethtool get/set_settings API on older kernels, but in the style of the new + * GLINKSETTINGS API. In this way, the same code can be used to support both + * APIs as seemlessly as possible. + * + * It should be noted the old API only has support up to the first 32 bits. + */ +struct ethtool_link_ksettings { + struct { + u32 speed; + u8 port; + u8 duplex; + u8 autoneg; + } base; + struct { + unsigned long supported[ETHTOOL_LINK_MASK_SIZE]; + unsigned long advertising[ETHTOOL_LINK_MASK_SIZE]; + } link_modes; +}; + +#define ETHTOOL_LINK_NAME_advertising(mode) ADVERTISED_##mode +#define ETHTOOL_LINK_NAME_supported(mode) SUPPORTED_##mode +#define ETHTOOL_LINK_NAME(name) ETHTOOL_LINK_NAME_##name +#define ETHTOOL_LINK_CONVERT(name, mode) ETHTOOL_LINK_NAME(name)(mode) + +/** + * ethtool_link_ksettings_zero_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + */ +#define ethtool_link_ksettings_zero_link_mode(ptr, name) \ + (*((ptr)->link_modes.name) = 0x0) + +/** + * ethtool_link_ksettings_add_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_add_link_mode(ptr, name, mode) \ + (*((ptr)->link_modes.name) |= \ + (typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_del_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to delete + */ +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \ + (*((ptr)->link_modes.name) &= \ + ~(typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_test_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_test_link_mode(ptr, name, mode) \ + (!!(*((ptr)->link_modes.name) & ETHTOOL_LINK_CONVERT(name, mode))) + +/** + * _kc_ethtool_ksettings_to_cmd - Convert ethtool_link_ksettings to ethtool_cmd + * @ks: ethtool_link_ksettings struct + * @cmd: ethtool_cmd struct + * + * Convert an ethtool_link_ksettings structure into the older ethtool_cmd + * structure. We provide this in kcompat.h so that drivers can easily + * implement the older .{get|set}_settings as wrappers around the new api. + * Hence, we keep it prefixed with _kc_ to make it clear this isn't actually + * a real function in the kernel. + */ +static inline void +_kc_ethtool_ksettings_to_cmd(struct ethtool_link_ksettings *ks, + struct ethtool_cmd *cmd) +{ + cmd->supported = (u32)ks->link_modes.supported[0]; + cmd->advertising = (u32)ks->link_modes.advertising[0]; + ethtool_cmd_speed_set(cmd, ks->base.speed); + cmd->duplex = ks->base.duplex; + cmd->autoneg = ks->base.autoneg; + cmd->port = ks->base.port; +} + +#endif /* !ETHTOOL_GLINKSETTINGS */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12, 3, 0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7, 5)))) +#define phy_speed_to_str _kc_phy_speed_to_str +const char *_kc_phy_speed_to_str(int speed); +#else /* (LINUX >= 4.14.0) || (SLES > 12.3.0) || (RHEL > 7.5) */ +#include +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 6))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15, 1, 0)))) +/* CentOS-7-aarch64-Everything-1810.iso not define this */ +#ifndef CONFIG_ARM64 +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#endif /* CONFIG_ARM64 */ +#else /* RHEL >= 7.6 || SLES >= 15.1 */ +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src); +#define ethtool_intersect_link_masks _kc_ethtool_intersect_link_masks +#else /* >= 4.15 */ +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 7)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12, 4, 0) && \ + SLE_VERSION_CODE < SLE_VERSION(15, 0, 0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(15, 1, 0))) +/* The return value of the strscpy() and strlcpy() functions is different. + * This could be potentially hazard for the future. + * To avoid this the void result is forced. + * So it is not possible use this function with the return value. + * Return value is required in kernel 4.3 through 4.15 + */ +#define strscpy(...) (void)(strlcpy(__VA_ARGS__)) +#endif /* !RHEL >= 7.7 && !SLES12sp4+ && !SLES15sp1+ */ + +#define pci_printk(level, pdev, fmt, arg...) \ + dev_printk(level, &(pdev)->dev, fmt, ##arg) +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) + +#ifndef array_index_nospec +static inline unsigned long _kc_array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + /* + * Always calculate and emit the mask even if the compiler + * thinks the mask is not needed. The compiler does not take + * into account the value of @index under speculation. + */ + OPTIMIZER_HIDE_VAR(index); + return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1); +} + +#define array_index_nospec(index, size) \ + ({ \ + typeof(index) _i = (index); \ + typeof(size) _s = (size); \ + unsigned long _mask = _kc_array_index_mask_nospec(_i, _s); \ + \ + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ + \ + (typeof(_i))(_i & _mask); \ + }) +#endif /* array_index_nospec */ +#ifndef sizeof_field +#define sizeof_field(TYPE, MEMBER) (sizeof((((TYPE *)0)->MEMBER))) +#endif /* sizeof_field */ +/* add a check for the Oracle UEK 4.14.35 kernel as + * it backported a version of this bitmap function + */ +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12, 5, 0) && \ + SLE_VERSION_CODE < SLE_VERSION(15, 0, 0) || \ + SLE_VERSION_CODE >= SLE_VERSION(15, 1, 0)) && \ + !(LINUX_VERSION_CODE == KERNEL_VERSION(4, 14, 35)) +/* + * Copy bitmap and clear tail bits in last word. + */ +static inline void bitmap_copy_clear_tail(unsigned long *dst, + const unsigned long *src, + unsigned int nbits) +{ + bitmap_copy(dst, src, nbits); + if (nbits % BITS_PER_LONG) + dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); +} + +/* + * On 32-bit systems bitmaps are represented as u32 arrays internally, and + * therefore conversion is not needed when copying data from/to arrays of u32. + */ +#if BITS_PER_LONG == 64 +void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, + unsigned int nbits); +#else +#define bitmap_from_arr32(bitmap, buf, nbits) \ + bitmap_copy_clear_tail((unsigned long *)(bitmap), \ + (const unsigned long *)(buf), (nbits)) +#endif /* BITS_PER_LONG == 64 */ +#endif /* !(RHEL >= 8.0) && !(SLES >= 12.5 && SLES < 15.0 || SLES >= 15.1) */ +#else /* >= 4.16 */ +#include +#define HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK +#define HAVE_TCF_MIRRED_DEV +#define HAVE_VF_STATS_DROPPED +#endif /* 4.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) +#include +#include +#define PCIE_SPEED_16_0GT 0x17 +#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */ +#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */ +#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ +void _kc_pcie_print_link_status(struct pci_dev *dev); +#define pcie_print_link_status _kc_pcie_print_link_status +#else /* >= 4.17.0 */ +#define HAVE_XDP_BUFF_IN_XDP_H +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)) +#include "kcompat_overflow.h" + +#if (SLE_VERSION_CODE < SLE_VERSION(15, 1, 0)) +#define firmware_request_nowarn request_firmware_direct +#endif /* SLES < 15.1 */ + +#else +#include +#include +#define HAVE_XDP_FRAME_STRUCT +#define HAVE_XDP_SOCK +#define HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS +#define NO_NDO_XDP_FLUSH +#endif /* 4.18.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) +#define bitmap_alloc(nbits, flags) \ + kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), flags) +#define bitmap_zalloc(nbits, flags) bitmap_alloc(nbits, ((flags) | __GFP_ZERO)) +#define bitmap_free(bitmap) kfree(bitmap) +#ifdef ETHTOOL_GLINKSETTINGS +#define ethtool_ks_clear(ptr, name) \ + ethtool_link_ksettings_zero_link_mode(ptr, name) +#define ethtool_ks_add_mode(ptr, name, mode) \ + ethtool_link_ksettings_add_link_mode(ptr, name, mode) +#define ethtool_ks_del_mode(ptr, name, mode) \ + ethtool_link_ksettings_del_link_mode(ptr, name, mode) +#define ethtool_ks_test(ptr, name, mode) \ + ethtool_link_ksettings_test_link_mode(ptr, name, mode) +#endif /* ETHTOOL_GLINKSETTINGS */ +#define HAVE_NETPOLL_CONTROLLER +#define REQUIRE_PCI_CLEANUP_AER_ERROR_STATUS +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15, 1, 0))) +#define HAVE_TCF_MIRRED_DEV +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#endif + +static inline void __kc_metadata_dst_free(void *md_dst) +{ + kfree(md_dst); +} + +#define metadata_dst_free(md_dst) __kc_metadata_dst_free(md_dst) +#else /* >= 4.19.0 */ +#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK +#define NO_NETDEV_BPF_PROG_ATTACHED +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_NETDEV_SB_DEV +#define HAVE_TCF_VLAN_TPID +#define HAVE_RHASHTABLE_TYPES +#endif /* 4.19.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)) +#define HAVE_XDP_UMEM_PROPS +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* RHEL >= 8.0 */ +#if ((SLE_VERSION_CODE >= SLE_VERSION(12, 5, 0) && \ + SLE_VERSION_CODE < SLE_VERSION(15, 0, 0)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(15, 1, 0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* SLE == 12sp5 || SLE >= 15sp1 */ +#else /* >= 4.20.0 */ +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#define HAVE_AF_XDP_ZC_SUPPORT +#define HAVE_VXLAN_TYPE +#define HAVE_ETF_SUPPORT /* Earliest TxTime First */ +#endif /* 4.20.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8, 0))) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) +#define NETLINK_MAX_COOKIE_LEN 20 +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + u8 cookie[NETLINK_MAX_COOKIE_LEN]; + u8 cookie_len; +}; + +#endif /* < 4.12 */ +/* +static inline int _kc_dev_open(struct net_device *netdev, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_open(netdev); +} + +#define dev_open _kc_dev_open +*/ +static inline int +_kc_dev_change_flags(struct net_device *netdev, unsigned int flags, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_change_flags(netdev, flags); +} + +#define dev_change_flags _kc_dev_change_flags +#endif /* !(RHEL_RELEASE_CODE && RHEL > RHEL(8,0)) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 1))) +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#define HAVE_PTP_CLOCK_INFO_GETTIMEX64 +#else /* RHEL >= 7.7 && RHEL < 8.0 || RHEL >= 8.1 */ +#ifndef NO_NEED_PTP_SYSTEM_TIMESTAMP +#if !(defined(CONFIG_KYLINOS_SERVER) && \ + (LINUX_VERSION_CODE > KERNEL_VERSION(4, 19, 0))) +#if !(defined(EULER_OS)) +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +static inline void +ptp_read_system_prets(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} + +static inline void +ptp_read_system_postts(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} +#endif +#endif +#endif +#endif /* !(RHEL >= 7.7 && RHEL != 8.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 1))) +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#endif /* RHEL 8.1 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 2)) +#define HAVE_TC_INDIR_BLOCK +#endif /* RHEL 8.2 */ +#else /* >= 5.0.0 */ +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#define HAVE_PTP_CLOCK_INFO_GETTIMEX64 +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM +#define HAVE_GENEVE_TYPE +#define HAVE_TC_INDIR_BLOCK +#endif /* 5.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 1))) +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#else /* RHEL < 8.1 */ +#if defined(HAVE_TC_SETUP_CLSFLOWER) && \ + !(defined(CONFIG_KYLINOS_SERVER) && \ + (LINUX_VERSION_CODE > KERNEL_VERSION(4, 19, 0))) +#include + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key, *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key, *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key, *mask; +}; + +#ifdef HAVE_TC_FLOWER_ENC +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key, *mask; +}; +#endif + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +struct flow_match_vlan { + struct flow_dissector_key_vlan *key, *mask; +}; +#endif + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key, *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key, *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key, *mask; +}; + +struct flow_rule { + struct flow_match match; +#if 0 + /* In 5.1+ kernels, action is a member of struct flow_rule but is + * not compatible with how we kcompat tc_cls_flower_offload_flow_rule + * below. By not declaring it here, any driver that attempts to use + * action as an element of struct flow_rule will fail to compile + * instead of silently trying to access memory that shouldn't be. + */ + struct flow_action action; +#endif +}; + +void flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out); +void flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out); +void flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out); +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +void flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out); +#endif +void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +#ifdef HAVE_TC_FLOWER_ENC +void flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +void flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out); +void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out); +#endif + +static inline struct flow_rule * +tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) +{ + return (struct flow_rule *)&tc_flow_cmd->dissector; +} + +static inline bool flow_rule_match_key(const struct flow_rule *rule, + enum flow_dissector_key_id key) +{ + return dissector_uses_key(rule->match.dissector, key); +} +#endif /* HAVE_TC_SETUP_CLSFLOWER */ + +#endif /* RHEL < 8.1 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 1))) +#if defined(CONFIG_KYLINOS_SERVER) && \ + (LINUX_VERSION_CODE > KERNEL_VERSION(4, 19, 0)) +#else +#define devlink_params_publish(devlink) \ + do { \ + } while (0) +#define devlink_params_unpublish(devlink) \ + do { \ + } while (0) +#endif +#endif + +#else /* >= 5.1.0 */ +#define HAVE_NDO_FDB_ADD_EXTACK +#define NO_XDP_QUERY_XSK_UMEM +#define HAVE_AF_XDP_NETDEV_UMEM +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_TC_FLOWER_ENC_IP +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#define HAVE_DEVLINK_PORT_PARAMS +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) +#if (defined HAVE_SKB_XMIT_MORE) && \ + (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 2)))) +#define netdev_xmit_more() (skb->xmit_more) +#else +#define netdev_xmit_more() (0) +#endif + +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 2)))) +#ifndef NO_ETH_GET_HEADLEN +#if !defined(eth_get_headlen) && \ + !(defined(CONFIG_KYLINOS_SERVER) && \ + (LINUX_VERSION_CODE > KERNEL_VERSION(4, 19, 0))) +static inline u32 +__kc_eth_get_headlen(const struct net_device __always_unused *dev, void *data, + unsigned int len) +{ + return eth_get_headlen(data, len); +} + +#define eth_get_headlen(dev, data, len) __kc_eth_get_headlen(dev, data, len) +#endif /* !eth_get_headlen */ +#endif +#endif /* !RHEL >= 8.2 */ + +#ifndef mmiowb +#ifdef CONFIG_IA64 +#define mmiowb() asm volatile("mf.a" ::: "memory") +#else +#define mmiowb() +#endif +#endif /* mmiowb */ + +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8, 1)) +#define HAVE_NDO_GET_DEVLINK_PORT +#endif /* RHEL > 8.1 */ + +#else /* >= 5.2.0 */ +#define HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED +#define SPIN_UNLOCK_IMPLIES_MMIOWB +#define HAVE_NDO_GET_DEVLINK_PORT +#endif /* 5.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 2))) +#define flow_block_offload tc_block_offload +#define flow_block_command tc_block_command +#define flow_cls_offload tc_cls_flower_offload +#define flow_block_binder_type tcf_block_binder_type +#define flow_cls_common_offload tc_cls_common_offload +#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule +#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE +#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY +#define FLOW_CLS_STATS TC_CLSFLOWER_STATS +#define FLOW_CLS_TMPLT_CREATE TC_CLSFLOWER_TMPLT_CREATE +#define FLOW_CLS_TMPLT_DESTROY TC_CLSFLOWER_TMPLT_DESTROY +#define FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS \ + TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS +#define FLOW_BLOCK_BIND TC_BLOCK_BIND +#define FLOW_BLOCK_UNBIND TC_BLOCK_UNBIND + +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include + +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head *driver_list, + tc_setup_cb_t *cb, void *cb_ident, + void *cb_priv, bool ingress_only); + +#define flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) \ + _kc_flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#else /* RHEL >= 8.2 */ +#define HAVE_FLOW_BLOCK_API +#define HAVE_DEVLINK_PORT_ATTR_PCI_VF +#endif /* RHEL >= 8.2 */ + +#ifndef ETH_P_LLDP +#define ETH_P_LLDP 0x88CC +#endif /* !ETH_P_LLDP */ + +#else /* >= 5.3.0 */ +#define XSK_UMEM_RETURNS_XDP_DESC +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15, 3, 0)) +#define HAVE_XSK_UMEM_HAS_ADDRS +#endif /* SLE < 15.3 */ +#endif /* < 5.8.0*/ +#define HAVE_FLOW_BLOCK_API +#define HAVE_DEVLINK_PORT_ATTR_PCI_VF +#if IS_ENABLED(CONFIG_DIMLIB) +#define HAVE_CONFIG_DIMLIB +#endif +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) +#if (SLE_VERSION_CODE >= SLE_VERSION(15, 2, 0)) +#define HAVE_NDO_XSK_WAKEUP +#endif /* SLES15sp2 */ +#else /* >= 5.4.0 */ +#define HAVE_NDO_XSK_WAKEUP +#endif /* 5.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)) +static inline unsigned long _kc_bitmap_get_value8(const unsigned long *map, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + return (map[index] >> offset) & 0xFF; +} +#define bitmap_get_value8 _kc_bitmap_get_value8 + +static inline void _kc_bitmap_set_value8(unsigned long *map, + unsigned long value, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + map[index] &= ~(0xFFUL << offset); + map[index] |= value << offset; +} +#define bitmap_set_value8 _kc_bitmap_set_value8 + +#endif /* 5.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#define xsk_umem_release_addr xsk_umem_discard_addr +#define xsk_umem_release_addr_rq xsk_umem_discard_addr_rq +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15, 3, 0))) +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif +#else /* >= 5.6.0 */ +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif /* 5.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0)) +u64 _kc_pci_get_dsn(struct pci_dev *dev); +#define pci_get_dsn(dev) _kc_pci_get_dsn(dev) +/* add a check for the Oracle UEK 5.4.17 kernel which + * backported the rename of the aer functions + */ +#if !(SLE_VERSION_CODE > SLE_VERSION(15, 2, 0)) && \ + !((LINUX_VERSION_CODE == KERNEL_VERSION(5, 3, 18)) && \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(14, 0, 0))) && \ + !(LINUX_VERSION_CODE == KERNEL_VERSION(5, 4, 17)) && \ + !(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3))) +#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status +#endif + +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID +#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id" +#endif +#else /* >= 5.7.0 */ +#define HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT +#endif /* 5.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) +#if !(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 4))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15, 3, 0)) +/* (RHEL < 8.4) || (SLE < 15.3) */ +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#elif (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 4))) +/* RHEL >= 8.4 */ +#define HAVE_XDP_BUFF_FRAME_SZ +#endif +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE != RHEL_RELEASE_VERSION(8, 10))) +#ifndef flex_array_size +#define flex_array_size(p, member, count) \ + array_size(count, sizeof(*(p)->member) + __must_be_array((p)->member)) +#endif +#endif +#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15, 3, 0))) +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifndef xsk_umem_get_rx_frame_size +static inline u32 _xsk_umem_get_rx_frame_size(struct xdp_umem *umem) +{ + return umem->chunk_size_nohr - XDP_PACKET_HEADROOM; +} + +#define xsk_umem_get_rx_frame_size _xsk_umem_get_rx_frame_size +#endif /* xsk_umem_get_rx_frame_size */ +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#else /* SLE >= 15.3 */ +#define HAVE_XDP_BUFF_FRAME_SZ +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#endif /* SLE >= 15.3 */ +#else /* >= 5.8.0 */ +#define HAVE_XDP_SOCK_DRV +#define HAVE_TC_FLOW_INDIR_DEV +#define HAVE_TC_FLOW_INDIR_BLOCK_CLEANUP +#define HAVE_XDP_BUFF_FRAME_SZ +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#endif /* 5.8.0 */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3))) +#define HAVE_TC_FLOW_INDIR_DEV +#endif +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15, 3, 0))) +#define HAVE_TC_FLOW_INDIR_DEV +#endif /* SLE_VERSION_CODE && SLE_VERSION_CODE >= SLES15SP3 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 4))) +#define HAVE_TC_FLOW_INDIR_BLOCK_CLEANUP +#endif /* (RHEL >= 8.4) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)) +#else /* >= 5.9.0 */ +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#define HAVE_UDP_TUNNEL_NIC_INFO +#endif /* 5.9.0 */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8, 3))) +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#endif +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15, 3, 0))) +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#endif /* SLE_VERSION_CODE && SLE_VERSION_CODE >= SLES15SP3 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) +#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15, 3, 0)))) +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xsk_get_pool_from_qid xdp_get_umem_from_qid +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup + +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +#include +static inline void _kc_xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, + void __always_unused *pool) +{ + xsk_buff_dma_sync_for_cpu(xdp); +} + +#define xsk_buff_dma_sync_for_cpu(xdp, pool) \ + _kc_xsk_buff_dma_sync_for_cpu(xdp, pool) +#endif /* HAVE_MEM_TYPE_XSK_BUFF_POOL */ + +#else /* SLE >= 15.3 */ +#define HAVE_NETDEV_BPF_XSK_POOL +#endif /* SLE >= 15.3 */ +#else /* >= 5.10.0 */ +#define HAVE_NETDEV_BPF_XSK_POOL +#endif /* <5.10.0 */ + +#if defined(EULER_OS) || defined(OPENEULER_VERSION_CODE) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif + +#if defined(OPENEULER_VERSION_CODE) +#if (OPENEULER_VERSION_CODE > OPENEULER_VERSION(2203, 2)) +#ifdef NEED_ETH_HW_ADDR_SET +#undef NEED_ETH_HW_ADDR_SET +#endif +#endif +#endif + +#endif +/*****************************************************************************/ +#ifdef HAVE_XDP_RXQ_INFO_REG_3_PARAMS +#ifdef HAVE_XDP_BUFF_IN_XDP_H +#include +#else +#include +#endif /* HAVE_XDP_BUFF_IN_XDP_H */ +static inline int _kc_xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, + struct net_device *dev, u32 queue_index, + unsigned int __always_unused napi_id) +{ + return xdp_rxq_info_reg(xdp_rxq, dev, queue_index); +} + +#define xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) \ + _kc_xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) +#endif /* HAVE_XDP_RXQ_INFO_REG_3_PARAMS */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) +#ifdef HAVE_NAPI_BUSY_LOOP +#ifdef CONFIG_NET_RX_BUSY_POLL +#include +static inline void _kc_napi_busy_loop(unsigned int napi_id, + bool (*loop_end)(void *, unsigned long), + void *loop_end_arg, + bool __always_unused prefer_busy_poll, + u16 __always_unused budget) +{ + napi_busy_loop(napi_id, loop_end, loop_end_arg); +} + +#define napi_busy_loop(napi_id, loop_end, loop_end_arg, prefer_busy_poll, \ + budget) \ + _kc_napi_busy_loop(napi_id, loop_end, loop_end_arg, prefer_busy_poll, \ + budget) +#endif /* CONFIG_NET_RX_BUSY_POLL */ +#endif /* HAVE_NAPI_BUSY_LOOP */ +#endif /* <5.11.0 */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0)) +#define NO_PCIE_ERROR_REPORTING +#endif + +/* + * Load the implementations file which actually defines kcompat backports. + * Legacy backports still exist in this file, but all new backports must be + * implemented using kcompat_*defs.h and kcompat_impl.h + */ +#include "kcompat_impl.h" +#ifdef NEED_SYSFS_CREATE_GROUPS +int sysfs_create_groups(struct kobject *kobj, + const struct attribute_group **groups); +#endif +#ifdef NEED_SYSFS_REMOVE_GROUPS +void sysfs_remove_groups(struct kobject *kobj, + const struct attribute_group **groups); +#endif +#endif /* _KCOMPAT_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_dcb.c b/drivers/net/ethernet/mucse/rnp/rnp_dcb.c new file mode 100755 index 0000000000000000000000000000000000000000..b0a2871ad701d7800d04b53d4dc9e60a46840a5e --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_dcb.c @@ -0,0 +1,365 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include + +#ifdef CONFIG_DCB +#include "rnp.h" +#include "rnp_dcb.h" +#include "rnp_sriov.h" +#include "rnp_common.h" + +static void rnp_config_prio_map(struct rnp_adapter *adapter, u8 pfc_map) +{ + int i, j; + u32 prio_map = 0; + u8 port = adapter->port; + u8 *prio_tc = adapter->prio_tc_map; + void __iomem *ioaddr = adapter->hw.hw_addr; + u8 num_tc = adapter->num_tc; + + for (i = 0; i < num_tc; i++) { + if (i > RNP_MAX_TCS_NUM) + break; + for (j = 0; j < RNP_MAX_USER_PRIO; j++) { + dbg("prio_tc[%d]==%d tc_num[%d] pfc_map 0x%.2x\n", j, + prio_tc[j], i, pfc_map); + if ((prio_tc[j] == i) && (pfc_map & BIT(j))) { + dbg("match rule tc_num %d prio_%d\n", i, j); + prio_map |= (i << (2 * j)); + dbg("match prio_tc change to 0x%.2x\n", + prio_map); + } + } + } + /* config untage pkt fifo */ + /* we just have four tc fifo and one fifo is must belong to untage-pkt + * so untage need map to the remain tc fifio + */ + prio_map |= i << RNP_FC_UNCTAGS_MAP_OFFSET; + prio_map |= (1 << 30) | (1 << 31); + rnp_wr_reg(ioaddr + RNP_FC_PORT_PRIO_MAP(port), prio_map); + dbg("tc_prio_map[%d] 0x%.2x\n", i, prio_map); + + /* enable port prio_map config */ + rnp_wr_reg(ioaddr + RNP_FC_EN_CONF_AVAILABLE, 1); +} + +static int rnp_dcb_hw_pfc_config(struct rnp_adapter *adapter, u8 pfc_map) +{ + struct rnp_dcb_cfg *dcb = &adapter->dcb_cfg; + void __iomem *ioaddr = adapter->hw.hw_addr; + u8 i = 0, j = 0; + u32 reg = 0; + u8 num_tc = adapter->num_tc; + + if (!(adapter->flags & RNP_FLAG_DCB_ENABLED) || + adapter->num_rx_queues <= 1) { + dev_warn(&adapter->pdev->dev, "%s DCB_FLAG%d", + "don't support pfc when rx quene less" + "than 1 or disable dcb feature \n", + adapter->flags & RNP_FLAG_DCB_ENABLED); + return 0; + } + /* 1.Enable Receive Priority Flow Control */ + reg = RNP_RX_RFE | RNP_PFCE; + rnp_wr_reg(ioaddr + RNP_MAC_RX_FLOW_CTRL, reg); + /* 2.Configure which port will in pfc mode*/ + reg = rnp_rd_reg(ioaddr + RNP_FC_PORT_ENABLE); + /* 3.For Now just support two port Version So just enabled + * PF port 0 to enable flow control + */ + reg |= 1 << adapter->port; + rnp_wr_reg(ioaddr + RNP_FC_PORT_ENABLE, reg); + + for (i = 0; i < num_tc; i++) { + int enabled = 0; + + for (j = 0; j < RNP_MAX_USER_PRIO; j++) { + if ((adapter->prio_tc_map[j] == i) && + (pfc_map & BIT(j))) { + enabled = 1; + dcb->pfc_cfg.hw_pfc_map |= BIT(j); + dcb->pfc_cfg.pfc_num++; + break; + } + } + if (enabled) { + /* 4.Enable Transmit Priority Flow Control */ + reg = RNP_TX_TFE | + (RNP_PAUSE_28_SLOT_TIME + << RNP_FC_TX_PLTH_OFFSET) | + (RNP_DEFAULT_PAUSE_TIME << RNP_FC_TX_PT_OFFSET); + + rnp_wr_reg(ioaddr + RNP_MAC_Q0_TX_FLOW_CTRL(j), reg); + } + } + /* the below configure can just use default config */ + /* 5.config for pri_map */ + rnp_config_prio_map(adapter, pfc_map); + /* 6.Configure PFC Rx high/low thresholds per TC */ + + /* 7.Configure Rx full/empty thresholds per tc*/ + + /* 8.Configure pause time (3 TCs per register) */ + /* 9.Configure flow control pause low threshold value */ + + return 0; +} + +__maybe_unused static int rnp_dcb_hw_fc_enable(struct rnp_adapter *adapter) +{ + void __iomem *ioaddr = adapter->hw.hw_addr; + + /* 1. Enabled Transmit Flow Control */ + rnp_wr_reg(ioaddr + RNP_MAC_Q0_TX_FLOW_CTRL(0), RNP_TX_TFE); + /* 2. Enabled Recvive Flow Control */ + rnp_wr_reg(ioaddr + RNP_MAC_RX_FLOW_CTRL, RNP_RX_RFE); + /* 3. Configure Fc Pause Time And Pause Low Threshold + * just use default value? + */ + return 0; +} + +static int rnp_dcbnl_getpfc(struct net_device *dev, struct ieee_pfc *pfc) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_dcb_cfg *dcb = &adapter->dcb_cfg; + u8 i = 0, j = 0; + + memset(pfc, 0, sizeof(*pfc)); + pfc->pfc_cap = dcb->pfc_cfg.pfc_max; + /* Pfc setting is based on TC */ + for (i = 0; i < adapter->num_tc; i++) { + for (j = 0; j < RNP_MAX_USER_PRIO; j++) { + if ((adapter->prio_tc_map[j] == i) && + (dcb->pfc_cfg.hw_pfc_map & BIT(i))) + pfc->pfc_en |= BIT(j); + } + } + /* do we need to get the pfc statistic*/ + /* 1. get the tc channel send and recv pfc pkts*/ + /* + *for (i = 0; i < TSRN10_MAX_TC_NUM; i++) { + * pfc->requests[i] = dcb->requests[i]; + * pfc->indications[i] = dcb->indications[i]; + } + */ + + return 0; +} + +/* rnp Support IEEE 802.3 flow-control and + * Priority base flow control (PFC) + */ +static u8 rnp_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap) +{ + struct rnp_adapter *priv = netdev_priv(net_dev); + + switch (capid) { + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_DCBX: + *cap = priv->dcb_cfg.dcbx_mode; + break; + default: + *cap = false; + break; + } + + return 0; +} + +static u8 rnp_dcbnl_getstate(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + return !!(adapter->flags & RNP_FLAG_DCB_ENABLED); +} + +static u8 rnp_dcbnl_setstate(struct net_device *netdev, u8 state) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + int err = 0; + + /* verify there is something to do, if not then exit */ + if (!state == !(adapter->flags & RNP_FLAG_DCB_ENABLED)) + goto out; + + err = rnp_setup_tc(netdev, + state ? adapter->dcb_cfg.num_tcs.pfc_tcs : 0); +out: + return !!err; +} + +static u8 rnp_dcbnl_getdcbx(struct net_device *net_dev) +{ + struct rnp_adapter *adapter = netdev_priv(net_dev); + + return adapter->dcb_cfg.dcbx_mode; +} + +static u8 rnp_dcbnl_setdcbx(struct net_device *net_dev, u8 mode) +{ + struct rnp_adapter *adapter = netdev_priv(net_dev); + + adapter->dcb_cfg.dcbx_mode = mode; + + return 0; + return (mode != (adapter->dcb_cfg.dcbx_mode)) ? 1 : 0; +} + +#ifdef NUMTCS_RETURNS_U8 +static u8 rnp_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +#else +static int rnp_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +#endif +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + u8 rval = 0; + + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + switch (tcid) { + //case DCB_NUMTCS_ATTR_PG: + // *num = adapter->dcb_cfg.num_tcs.pg_tcs; + // break; + case DCB_NUMTCS_ATTR_PFC: + if (adapter->dcb_cfg.num_tcs.pfc_tcs > + RNP_MAX_TCS_NUM) { + rval = -EINVAL; + break; + } + *num = adapter->dcb_cfg.num_tcs.pfc_tcs; + break; + default: + rval = -EINVAL; + break; + } + } else { + rval = -EINVAL; + } + + return rval; +} + +#ifdef NUMTCS_RETURNS_U8 +static u8 rnp_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +#else +static int rnp_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +#endif +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + u8 rval = 0; + + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + switch (tcid) { + //case DCB_NUMTCS_ATTR_PG: + // adapter->dcb_cfg.num_tcs.pg_tcs = num; + // break; + case DCB_NUMTCS_ATTR_PFC: + adapter->dcb_cfg.num_tcs.pfc_tcs = num; + break; + default: + rval = -EINVAL; + break; + } + } else { + rval = -EINVAL; + } + + return rval; +} + +static int rnp_dcb_parse_config(struct rnp_dcb_cfg *dcb, struct ieee_pfc *pfc) +{ + u8 j = 0, pfc_en_num = 0, pfc_map = 0; + + for (j = 0; j < RNP_MAX_USER_PRIO; j++) { + if ((pfc->pfc_en & BIT(j))) { + pfc_map |= BIT(j); + pfc_en_num++; + } + } + dcb->pfc_cfg.pfc_num = pfc_en_num; + dcb->pfc_cfg.hw_pfc_map = pfc_map; + dbg("pfc_map 0x%.2x pfc->pfc_en 0x%.2x\n", pfc_map, pfc->pfc_en); + /* tc resource rebuild */ + /* we need to decide tx_ring bind to tc 4 fifo-mac*/ + return pfc_map; +} + +static int rnp_dcbnl_setpfc(struct net_device *dev, struct ieee_pfc *pfc) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_dcb_cfg *dcb = &adapter->dcb_cfg; + u8 pfc_map = 0; + + dbg("%s:%d pfc enabled %d\n", __func__, __LINE__, pfc->pfc_en); + if (pfc->pfc_en) { + /*set PFC Priority mask */ + pfc_map = rnp_dcb_parse_config(dcb, pfc); + rnp_dcb_hw_pfc_config(adapter, pfc_map); + } else { + /* set PAUSE mode */ + // fc is controlled by ethtool + //rnp_dcb_hw_fc_enable(adapter); + } + + return 0; +} + +static u8 rnp_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_pfc_cfg *pfc_cfg = &adapter->dcb_cfg.pfc_cfg; + + return pfc_cfg->pfc_en; +} + +static void rnp_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + adapter->dcb_cfg.pfc_cfg.pfc_en = state; +} + +const struct dcbnl_rtnl_ops rnp_dcbnl_ops = { + /*DCB PFC*/ + /*IEEE*/ + .ieee_getpfc = rnp_dcbnl_getpfc, + .ieee_setpfc = rnp_dcbnl_setpfc, + .getcap = rnp_dcbnl_getcap, + .setdcbx = rnp_dcbnl_setdcbx, + .getdcbx = rnp_dcbnl_getdcbx, + .getnumtcs = rnp_dcbnl_getnumtcs, + .setnumtcs = rnp_dcbnl_setnumtcs, + + /*CEE*/ + .getstate = rnp_dcbnl_getstate, + .setstate = rnp_dcbnl_setstate, + + .getpfcstate = rnp_dcbnl_getpfcstate, + .setpfcstate = rnp_dcbnl_setpfcstate, +}; + +int rnp_dcb_init(struct net_device *dev, struct rnp_adapter *adapter) +{ + struct rnp_dcb_cfg *dcb = &adapter->dcb_cfg; + struct rnp_hw *hw = &adapter->hw; + + if (hw->hw_type != rnp_hw_n10) + return 0; + + dcb->dcb_en = false; + dcb->pfc_cfg.pfc_max = RNP_MAX_TCS_NUM; + dcb->num_tcs.pfc_tcs = RNP_MAX_TCS_NUM; + dcb->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; + dev->dcbnl_ops = &rnp_dcbnl_ops; + + return 0; +} +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_dcb.h b/drivers/net/ethernet/mucse/rnp/rnp_dcb.h new file mode 100755 index 0000000000000000000000000000000000000000..23941b81eef5f75831ee5b27008985530ad04356 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_dcb.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef __RNP_DCB_H__ +#define __RNP_DCB_H__ +#include "rnp.h" + +enum rnp_pause_low_thrsh { + RNP_PAUSE_4_SLOT_TIME = 0, + RNP_PAUSE_28_SLOT_TIME, + RNP_PAUSE_36_SLOT_TIME, + RNP_PAUSE_144_SLOT_TIME, + RNP_PAUSE_256_SLOT_TIME, +}; +/*Rx Flow Ctrl */ +#define RNP_RX_RFE BIT(0) /* Receive Flow Control Enable */ +#define RNP_UP BIT(1) /* Unicast Pause Packet Detect */ +#define RNP_PFCE BIT(8) /* Priority Based Flow Control Enable. */ + +/*Tx Flow Ctrl */ +#define RNP_TX_FCB BIT(0) /* Tx Flow Control Busy. */ +#define RNP_TX_TFE BIT(1) /* Transmit Flow Control Enable.*/ +#define RNP_TX_PLT GENMASK(6, 4) /* Pause Low Threshold. */ +#define RNP_DZPQ BIT(7) /*Disable Zero-Quanta Pause.*/ +#define RNP_PT GENMASK(31, 16) /* Pause Time. */ + +#define RNP_DEFAULT_PAUSE_TIME (0x100) /* */ +#define RNP_FC_TX_PLTH_OFFSET (4) /* Pause Low Threshold */ +#define RNP_FC_TX_PT_OFFSET (16) /* Pause Time */ + +#define RNP_DCB_MAX_TCS_NUM (4) +#define RNP_DCB_MAX_PFC_NUM (4) + +struct rnp_adapter; +int rnp_dcb_init(struct net_device *dev, struct rnp_adapter *adapter); +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_debugfs.c b/drivers/net/ethernet/mucse/rnp/rnp_debugfs.c new file mode 100755 index 0000000000000000000000000000000000000000..111020dacb248da75b7a4a735bad55d23000ca3f --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_debugfs.c @@ -0,0 +1,540 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include + +#include "rnp.h" +#include "rnp_type.h" + +#ifdef HAVE_RNP_DEBUG_FS +static struct dentry *rnp_dbg_root; +static char rnp_dbg_reg_ops_buf[256] = ""; + +#ifndef NO_CSL_DEBUG +#ifndef bus_to_virt +#define bus_to_virt phys_to_virt +#endif + + +static int rnp_dbg_csl_open(struct inode *inode, struct file *filp) +{ + void *dma_buf = NULL; + dma_addr_t dma_phy; + int err, bytes = 4096; + struct rnp_adapter *adapter; + const char *name; + struct rnp_hw *hw; + + if (inode->i_private) { + filp->private_data = inode->i_private; + } else { + return -EIO; + } + + adapter = filp->private_data; + + if (adapter == NULL) { + return -EIO; + } + + if (adapter->csl_dma_buf != NULL) { + return 0; + } + hw = &adapter->hw; + name = adapter->name; + + dma_buf = + dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy, GFP_ATOMIC); + if (!dma_buf) { + e_dev_err("%s: no dma buf", name); + return -ENOMEM; + } + memset(dma_buf, 0, bytes); + + adapter->csl_dma_buf = dma_buf; + adapter->csl_dma_phy = dma_phy; + adapter->csl_dma_size = bytes; + + err = rnp_mbx_ddr_csl_enable(hw, 1, dma_phy, bytes); + if (err) { + dma_free_coherent(&hw->pdev->dev, bytes, dma_buf, dma_phy); + adapter->csl_dma_buf = NULL; + return -EIO; + } + + return 0; +} + +static int rnp_dbg_csl_release(struct inode *inode, struct file *filp) +{ + struct rnp_adapter *adapter = filp->private_data; + struct rnp_hw *hw = &adapter->hw; + + if (adapter->csl_dma_buf) { + rnp_mbx_ddr_csl_enable(hw, 0, 0, 0); + dma_free_coherent(&hw->pdev->dev, adapter->csl_dma_size, + adapter->csl_dma_buf, adapter->csl_dma_phy); + adapter->csl_dma_buf = NULL; + } + + return 0; +} + +static int rnp_dbg_csl_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long length; + struct rnp_adapter *adapter = filp->private_data; + void *dma_buf = adapter->csl_dma_buf; + dma_addr_t dma_phy = adapter->csl_dma_phy; + int dma_bytes = adapter->csl_dma_size; + int ret = 0; + + length = (unsigned long)(vma->vm_end - vma->vm_start); + + if (length > dma_bytes) { + return -EIO; + } + if (vma->vm_pgoff == 0) { + ret = dma_mmap_coherent(&adapter->pdev->dev, vma, dma_buf, dma_phy, length); + } else { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + ret = remap_pfn_range( + vma, vma->vm_start, + PFN_DOWN(virt_to_phys(bus_to_virt(dma_phy))) + + vma->vm_pgoff, + length, vma->vm_page_prot); + } + + if (ret < 0) { + printk(KERN_ERR "%s: remap failed (%d)\n", __func__, ret); + return ret; + } + + return 0; +} + +static const struct file_operations rnp_dbg_csl_fops = { + .owner = THIS_MODULE, + .open = rnp_dbg_csl_open, + .release = rnp_dbg_csl_release, + .mmap = rnp_dbg_csl_mmap, +}; +#endif +static ssize_t rnp_dbg_eth_info_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + char *buf = NULL; + int len; + + if (adapter == NULL) { + return -EIO; + } + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "bd:%d port%d %s %s\n", adapter->bd_number, + 0, adapter->netdev->name, pci_name(adapter->pdev)); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static const struct file_operations rnp_dbg_eth_info_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = rnp_dbg_eth_info_read, +}; + +static ssize_t rnp_dbg_mbx_cookies_info_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + char *buf = NULL; + int len,i; + struct mbx_req_cookie_pool* cookie_pool = &(adapter->hw.mbx.cookie_pool); + struct mbx_req_cookie*cookie; + int free_cnt=0, wait_timout_cnt=0, alloced_cnt=0; + + if (adapter == NULL) { + return -EIO; + } + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + for(i=0;icookies[i]); + if(cookie->stat == COOKIE_FREE){ + free_cnt++; + }else if(cookie->stat == COOKIE_FREE_WAIT_TIMEOUT){ + wait_timout_cnt++; + }else if(cookie->stat == COOKIE_ALLOCED){ + alloced_cnt++; + } + } + + buf = kasprintf(GFP_KERNEL, "pool items:cur:%d total: %d. free:%d wait_free:%d alloced:%d \n", cookie_pool->next_idx, MAX_COOKIES_ITEMS, + free_cnt, wait_timout_cnt, alloced_cnt); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static const struct file_operations rnp_dbg_mbx_cookies_info_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = rnp_dbg_mbx_cookies_info_read, +}; + +/** + * rnp_dbg_reg_ops_read - read for reg_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t rnp_dbg_reg_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: %s\n", adapter->name, + rnp_dbg_reg_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +/** + * rnp_dbg_reg_ops_write - write into reg_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t rnp_dbg_reg_ops_write(struct file *filp, + const char __user *buffer, size_t count, + loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + struct rnp_hw *hw = &adapter->hw; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(rnp_dbg_reg_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(rnp_dbg_reg_ops_buf, + sizeof(rnp_dbg_reg_ops_buf) - 1, ppos, + buffer, count); + if (len < 0) + return len; + + rnp_dbg_reg_ops_buf[len] = '\0'; + + if (strncmp(rnp_dbg_reg_ops_buf, "write", 5) == 0) { + u32 reg, value; + int cnt; + + cnt = sscanf(&rnp_dbg_reg_ops_buf[5], "%x %x", ®, &value); + if (cnt == 2) { + if (reg >= 0x30000000) { + rnp_mbx_reg_write(hw, reg, value); + e_dev_info("write: 0x%08x = 0x%08x\n", reg, + value); + } else { + rnp_wr_reg(hw->hw_addr + reg, value); + value = rnp_rd_reg(hw->hw_addr + reg); + e_dev_info("write: 0x%08x = 0x%08x\n", reg, + value); + } + } else { + e_dev_info("write \n"); + } + } else if (strncmp(rnp_dbg_reg_ops_buf, "read", 4) == 0) { + u32 reg, value; + int cnt; + + cnt = sscanf(&rnp_dbg_reg_ops_buf[4], "%x", ®); + if (cnt == 1) { + if (reg >= 0x30000000) { + value = rnp_mbx_fw_reg_read(hw, reg); + } else { + value = rnp_rd_reg(hw->hw_addr + reg); + } + snprintf(rnp_dbg_reg_ops_buf, + sizeof(rnp_dbg_reg_ops_buf), "0x%08x: 0x%08x", + reg, value); + e_dev_info("read 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("read \n"); + } + } else { + e_dev_info("Unknown command %s\n", rnp_dbg_reg_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" read \n"); + e_dev_info(" write \n"); + } + return count; +} + +static const struct file_operations rnp_dbg_reg_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = rnp_dbg_reg_ops_read, + .write = rnp_dbg_reg_ops_write, +}; + +static char rnp_dbg_netdev_ops_buf[256] = ""; + +/** + * rnp_dbg_netdev_ops_read - read for netdev_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t rnp_dbg_netdev_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: %s\n", adapter->name, + rnp_dbg_netdev_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +/** + * rnp_dbg_netdev_ops_write - write into netdev_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t rnp_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, size_t count, + loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(rnp_dbg_netdev_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(rnp_dbg_netdev_ops_buf, + sizeof(rnp_dbg_netdev_ops_buf) - 1, ppos, + buffer, count); + if (len < 0) + return len; + + rnp_dbg_netdev_ops_buf[len] = '\0'; + + if (strncmp(rnp_dbg_netdev_ops_buf, "stat", 4) == 0) { + rnp_info("adapter->stat=0x%lx\n", adapter->state); + rnp_info("adapter->tx_timeout_count=%d\n", + adapter->tx_timeout_count); + } else if (strncmp(rnp_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { +#ifdef HAVE_NET_DEVICE_OPS +#ifdef HAVE_TX_TIMEOUT_TXQUEUE + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev, + UINT_MAX); +#else + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev); +#endif +#else + adapter->netdev->tx_timeout(adapter->netdev); +#endif + e_dev_info("tx_timeout called\n"); + } else { + e_dev_info("Unknown command: %s\n", rnp_dbg_netdev_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" tx_timeout\n"); + } + return count; +} + +static const struct file_operations rnp_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = rnp_dbg_netdev_ops_read, + .write = rnp_dbg_netdev_ops_write, +}; + +static ssize_t rnp_dbg_netdev_temp_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + struct rnp_hw *hw = &adapter->hw; + char *buf; + int len; + int temp = 0, voltage = 0; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + temp = rnp_mbx_get_temp(hw, &voltage); + + buf = kasprintf(GFP_KERNEL, "%s: temp: %d oC voltage:%d mV\n", + adapter->name, temp, voltage); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} +static const struct file_operations rnp_dbg_netdev_temp = { + .owner = THIS_MODULE, + .open = simple_open, + .read = rnp_dbg_netdev_temp_read, +}; + +/** + * rnp_dbg_adapter_init - setup the debugfs directory for the adapter + * @adapter: the adapter that is starting up + **/ +void rnp_dbg_adapter_init(struct rnp_adapter *adapter) +{ + const char *name = adapter->name; + struct dentry *pfile; + + adapter->rnp_dbg_adapter = debugfs_create_dir(name, rnp_dbg_root); + if (adapter->rnp_dbg_adapter) { + pfile = debugfs_create_file("reg_ops", 0600, + adapter->rnp_dbg_adapter, adapter, + &rnp_dbg_reg_ops_fops); + if (!pfile) + e_dev_err("debugfs reg_ops for %s failed\n", name); + pfile = debugfs_create_file("netdev_ops", 0600, + adapter->rnp_dbg_adapter, adapter, + &rnp_dbg_netdev_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); + + pfile = debugfs_create_file("temp", 0600, + adapter->rnp_dbg_adapter, adapter, + &rnp_dbg_netdev_temp); + if (!pfile) + e_dev_err("debugfs temp for %s failed\n", name); +#ifndef NO_CSL_DEBUG + if (rnp_is_pf1(&adapter->hw) == 0) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)) + pfile = debugfs_create_file_unsafe("csl", 0755, +#else + pfile = debugfs_create_file("csl", 0755, +#endif + adapter->rnp_dbg_adapter, + adapter, &rnp_dbg_csl_fops); + if (!pfile) + e_dev_err("debugfs csl failed\n"); + } +#endif + pfile = debugfs_create_file("info", 0600, + adapter->rnp_dbg_adapter, adapter, + &rnp_dbg_eth_info_fops); + if (!pfile) + e_dev_err("debugfs info failed\n"); + pfile = debugfs_create_file("mbx_cookies_info", 0600, + adapter->rnp_dbg_adapter, adapter, + &rnp_dbg_mbx_cookies_info_fops); + if (!pfile) + e_dev_err("debugfs reg_ops for mbx_cookies_info failed\n"); + } else { + e_dev_err("debugfs entry for %s failed\n", name); + } +} + +/** + * rnp_dbg_adapter_exit - clear out the adapter's debugfs entries + * @pf: the pf that is stopping + **/ +void rnp_dbg_adapter_exit(struct rnp_adapter *adapter) +{ + debugfs_remove_recursive(adapter->rnp_dbg_adapter); + adapter->rnp_dbg_adapter = NULL; +} + +/** + * rnp_dbg_init - start up debugfs for the driver + **/ +void rnp_dbg_init(void) +{ + rnp_dbg_root = debugfs_create_dir(rnp_driver_name, NULL); + if (rnp_dbg_root == NULL) + pr_err("init of debugfs failed\n"); +} + +/** + * rnp_dbg_exit - clean out the driver's debugfs entries + **/ +void rnp_dbg_exit(void) +{ + debugfs_remove_recursive(rnp_dbg_root); +} +#endif /* HAVE_RNP_DEBUG_FS */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_ethtool.c b/drivers/net/ethernet/mucse/rnp/rnp_ethtool.c new file mode 100755 index 0000000000000000000000000000000000000000..fb77d231269ba0698a9e77d8dbfa8d38ed12053f --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_ethtool.c @@ -0,0 +1,2137 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "rnp.h" +#include "rnp_phy.h" +#include "rnp_sriov.h" +#include "rnp_mbx_fw.h" +#include "rnp_ethtool.h" + +int rnp_wol_exclusion(struct rnp_adapter *adapter, struct ethtool_wolinfo *wol) +{ + struct rnp_hw *hw = &adapter->hw; + int retval = 0; + + /* WOL not supported for all devices */ + if (!rnp_wol_supported(adapter, hw->device_id, + hw->subsystem_device_id)) { + retval = 1; + wol->supported = 0; + } + + return retval; +} + +void rnp_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + wol->wolopts = 0; + + /* we now can't wol */ + if (rnp_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + /* Only support magic */ + if (RNP_WOL_GET_SUPPORTED(adapter)) + wol->supported = hw->wol_supported; + if (RNP_WOL_GET_STATUS(adapter)) + wol->wolopts |= hw->wol_supported; +} + +int rnp_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (!!wol->wolopts) { + if ((wol->wolopts & (~hw->wol_supported)) || + !RNP_WOL_GET_SUPPORTED(adapter)) + return -EOPNOTSUPP; + } + + RNP_WOL_SET_SUPPORTED(adapter); + if (wol->wolopts & WAKE_MAGIC) { + RNP_WOL_SET_SUPPORTED(adapter); + RNP_WOL_SET_STATUS(adapter); + } else { + RNP_WOL_CLEAR_STATUS(adapter); + } + + rnp_mbx_wol_set(hw, RNP_WOL_GET_STATUS(adapter)); + device_set_wakeup_enable(&adapter->pdev->dev, !!wol->wolopts); + + return 0; +} + +/* ethtool register test data */ +struct rnp_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* default n10 register test */ +static struct rnp_reg_test reg_test_n10[] = { + //{RNP_DMA_CONFIG, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF}, + /* + * { RNP_FCRTL_n10(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + * { RNP_FCRTH_n10(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + * { RNP_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + * { RNP_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + * { RNP_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + * { RNP_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + * { RNP_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + * { RNP_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + * { RNP_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, + * { RNP_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + * { RNP_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + * { RNP_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + * { RNP_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + * { RNP_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, + * { RNP_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, + * { RNP_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + * { RNP_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, + * { RNP_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + */ + { .reg = 0 }, +}; + +/* write and read check */ +static bool reg_pattern_test(struct rnp_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 pat, val, before; + static const u32 test_pattern[] = { 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, + 0xFFFFFFFF }; + + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = readl(adapter->hw.hw_addr + reg); + printk("before reg %x is %x\n", reg, before); + writel((test_pattern[pat] & write), + (adapter->hw.hw_addr + reg)); + val = readl(adapter->hw.hw_addr + reg); + if (val != (test_pattern[pat] & write & mask)) { + e_err(drv, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, (test_pattern[pat] & write & mask)); + *data = reg; + writel(before, adapter->hw.hw_addr + reg); + return 1; + } + writel(before, adapter->hw.hw_addr + reg); + } + return 0; +} + +static bool reg_set_and_check(struct rnp_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 val, before; + + before = readl(adapter->hw.hw_addr + reg); + writel((write & mask), (adapter->hw.hw_addr + reg)); + val = readl(adapter->hw.hw_addr + reg); + if ((write & mask) != (val & mask)) { + e_err(drv, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + writel(before, (adapter->hw.hw_addr + reg)); + return 1; + } + writel(before, (adapter->hw.hw_addr + reg)); + return 0; +} + +static bool rnp_reg_test(struct rnp_adapter *adapter, u64 *data) +{ + struct rnp_reg_test *test; + struct rnp_hw *hw = &adapter->hw; + u32 i; + + if (RNP_REMOVED(hw->hw_addr)) { + e_err(drv, "Adapter removed - register test blocked\n"); + *data = 1; + return true; + } + + test = reg_test_n10; + /* + * Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + bool b = false; + + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, test->write); + break; + case WRITE_NO_TEST: + wr32(hw, test->reg + (i * 0x40), test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + (test->reg + 4) + (i * 8), + test->mask, test->write); + break; + } + if (b) + return true; + } + test++; + } + + *data = 0; + return false; +} + +static int rnp_link_test(struct rnp_adapter *adapter, u64 *data) +{ + struct rnp_hw *hw = &adapter->hw; + bool link_up; + u32 link_speed = 0; + bool duplex; + *data = 0; + + hw->ops.check_link(hw, &link_speed, &link_up, &duplex, true); + if (!link_up) + *data = 1; + return *data; +} + +void rnp_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, + u64 *data) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + bool if_running = netif_running(netdev); + + set_bit(__RNP_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + int i; + + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) { + netdev_warn( + netdev, "%s", + "offline diagnostic is not supported when VFs " + "are present\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__RNP_TESTING, + &adapter->state); + goto skip_ol_tests; + } + } + } + + /* Offline tests */ + e_info(hw, "offline testing starting\n"); + + /* bringing adapter down disables SFP+ optics */ + if (hw->ops.enable_tx_laser) + hw->ops.enable_tx_laser(hw); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (rnp_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e_info(hw, "register testing starting\n"); + if (rnp_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + data[1] = 0; + data[2] = 0; + /* If SRIOV or VMDq is enabled then skip MAC + * loopback diagnostic. + */ + if (adapter->flags & + (RNP_FLAG_SRIOV_ENABLED | RNP_FLAG_VMDQ_ENABLED)) { + e_info(hw, "Skip MAC loopback diagnostic in VT mode\n"); + data[3] = 0; + goto skip_loopback; + } + + data[3] = 0; + /* loopback test is not added now */ + /* + * rnp_reset(adapter); + * e_info(hw, "loopback testing starting\n"); + * todo Loopback test + * if (rnp_loopback_test(adapter, &data[3])) + * eth_test->flags |= ETH_TEST_FL_FAILED; + */ +skip_loopback: + /* clear testing bit and return adapter to previous state */ + clear_bit(__RNP_TESTING, &adapter->state); + } else { + e_info(hw, "online testing starting\n"); + + /* if adapter is down, SFP+ optics will be disabled */ + if (!if_running && hw->ops.enable_tx_laser) + hw->ops.enable_tx_laser(hw); + + /* Online tests */ + if (rnp_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Offline tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__RNP_TESTING, &adapter->state); + } + + /* if adapter was down, ensure SFP+ optics are disabled again */ + if (!if_running && hw->ops.disable_tx_laser) + hw->ops.disable_tx_laser(hw); +skip_ol_tests: + msleep_interruptible(4 * 1000); +} + +#ifdef ETHTOOL_GFECPARAM +int rnp_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + int err; + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + err = rnp_mbx_get_lane_stat(hw); + if (err) + return err; + + if (adapter->fec) { + fecparam->active_fec = ETHTOOL_FEC_BASER; + } else { + fecparam->active_fec = ETHTOOL_FEC_NONE; + } + fecparam->fec = ETHTOOL_FEC_BASER; + + return 0; +} + +int rnp_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (fecparam->fec & ETHTOOL_FEC_OFF) { + return rnp_set_lane_fun(hw, LANE_FUN_FEC, 0, 0, 0, 0); + } else if (fecparam->fec & ETHTOOL_FEC_BASER) { + return rnp_set_lane_fun(hw, LANE_FUN_FEC, 1, 0, 0, 0); + } + + return -EINVAL; +} +#endif +u32 rnp_get_msglevel(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +void rnp_set_msglevel(struct net_device *netdev, u32 data) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +int rnp_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + rnp_mbx_led_set(hw, 1); + return 2; + + case ETHTOOL_ID_ON: + rnp_mbx_led_set(hw, 2); + break; + + case ETHTOOL_ID_OFF: + rnp_mbx_led_set(hw, 3); + break; + + case ETHTOOL_ID_INACTIVE: + rnp_mbx_led_set(hw, 0); + break; + } + return 0; +} + +int rnp_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + + /* For we just set it as pf0 */ + if (!(adapter->flags2 & RNP_FLAG2_PTP_ENABLED)) + return ethtool_op_get_ts_info(dev, info); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + dbg("phc_index is %d\n", info->phc_index); + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | +#ifdef PTP_802_AS1 + /* 802.AS1 */ + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | +#endif + BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + +static unsigned int rnp_max_channels(struct rnp_adapter *adapter) +{ + unsigned int max_combined; + struct rnp_hw *hw = &adapter->hw; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + /* SR-IOV currently only allows 2 queue on the PF */ + max_combined = hw->sriov_ring_limit; + } else if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + /* dcb on max support 32 */ + max_combined = 32; + } else { + /* support up to 16 queues with RSS */ + max_combined = adapter->max_ring_pair_counts; + /* should not large than q_vectors ? */ + } +#ifdef RNP_MAX_RINGS + if (max_combined > RNP_MAX_RINGS) + max_combined = RNP_MAX_RINGS; +#endif + + return max_combined; +} + +void rnp_get_channels(struct net_device *dev, struct ethtool_channels *ch) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + + /* report maximum channels */ + ch->max_combined = rnp_max_channels(adapter); + + /* report info for other vector */ + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + + /* record RSS queues */ + ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; + + /* nothing else to report if RSS is disabled */ + if (ch->combined_count == 1) + return; + + /* we do not support ATR queueing if SR-IOV is enabled */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + return; + + /* same thing goes for being DCB enabled */ + if (netdev_get_num_tc(dev) > 1) + return; +} + +int rnp_set_channels(struct net_device *dev, struct ethtool_channels *ch) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + unsigned int count = ch->combined_count; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + return -EINVAL; + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + dbg("call set channels %d %d %d \n", count, ch->rx_count, ch->tx_count); + dbg("max channels %d\n", rnp_max_channels(adapter)); + /* verify the number of channels does not exceed hardware limits */ + if (count > rnp_max_channels(adapter)) + return -EINVAL; + + /* update feature limits from largest to smallest supported values */ + adapter->ring_feature[RING_F_FDIR].limit = count; + + if (count > adapter->max_ring_pair_counts) + count = adapter->max_ring_pair_counts; + adapter->ring_feature[RING_F_RSS].limit = count; + + /* use setup TC to update any traffic class queue mapping */ + return rnp_setup_tc(dev, netdev_get_num_tc(dev)); +} + +int rnp_get_module_info(struct net_device *dev, struct ethtool_modinfo *modinfo) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_hw *hw = &adapter->hw; + u8 module_id, diag_supported; + int rc; + + rnp_mbx_get_lane_stat(hw); + + if (hw->is_sgmii) + return -EIO; + + rc = rnp_mbx_sfp_module_eeprom_info(hw, 0xA0, SFF_MODULE_ID_OFFSET, 1, + &module_id); + if (rc || module_id == 0xff) { + return -EIO; + } + rc = rnp_mbx_sfp_module_eeprom_info(hw, 0xA0, SFF_DIAG_SUPPORT_OFFSET, + 1, &diag_supported); + if (!rc) { + switch (module_id) { + case SFF_MODULE_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + if (!diag_supported) + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case SFF_MODULE_ID_QSFP: + case SFF_MODULE_ID_QSFP_PLUS: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case SFF_MODULE_ID_QSFP28: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + break; + default: + printk("%s: module_id:0x%x diag_supported:0x%x\n", + __func__, module_id, diag_supported); + rc = -EOPNOTSUPP; + break; + } + } + + return rc; +} + +int rnp_get_module_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_hw *hw = &adapter->hw; + u16 start = eeprom->offset, length = eeprom->len; + int rc = 0; + + rnp_mbx_get_lane_stat(hw); + + if (hw->is_sgmii) + return -EIO; + + memset(data, 0, eeprom->len); + + /* Read A0 portion of the EEPROM */ + if (start < ETH_MODULE_SFF_8436_LEN) { + if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) + length = ETH_MODULE_SFF_8436_LEN - start; + rc = rnp_mbx_sfp_module_eeprom_info(hw, 0xA0, start, length, + data); + if (rc) + return rc; + start += length; + data += length; + length = eeprom->len - length; + } + + /* Read A2 portion of the EEPROM */ + if (length) { + start -= ETH_MODULE_SFF_8436_LEN; + rc = rnp_mbx_sfp_module_eeprom_info(hw, 0xA2, start, length, + data); + } + + return rc; +} +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +void rnp_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +#else +void rnp_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#endif /* HAVE_ETHTOOL_EXTENDED_RINGPARAMS */ +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + /* all ring share the same status*/ + + ring->rx_max_pending = RNP_MAX_RXD; + ring->tx_max_pending = RNP_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_ring_item_count; + ring->tx_pending = adapter->tx_ring_item_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +int rnp_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +#else +int rnp_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +#endif /* HAVE_ETHTOOL_EXTENDED_RINGPARAMS */ +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_ring *temp_ring; + int i, err = 0; + u32 new_rx_count, new_tx_count; + + /* sriov mode can't change ring param */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + return -EINVAL; + } + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if ((ring->tx_pending < RNP_MIN_TXD) || + (ring->tx_pending > RNP_MAX_TXD) || + (ring->rx_pending < RNP_MIN_RXD) || + (ring->rx_pending > RNP_MAX_RXD)) { + netdev_info( + netdev, + "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + ring->tx_pending, ring->rx_pending, RNP_MIN_TXD, + RNP_MAX_TXD); + return -EINVAL; + } + + new_tx_count = clamp_t(u32, ring->tx_pending, RNP_MIN_TXD, RNP_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, RNP_REQ_TX_DESCRIPTOR_MULTIPLE); + new_rx_count = clamp_t(u32, ring->rx_pending, RNP_MIN_RXD, RNP_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, RNP_REQ_RX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_item_count) && + (new_rx_count == adapter->rx_ring_item_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__RNP_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_item_count = new_tx_count; + adapter->rx_ring_item_count = new_rx_count; + goto clear_reset; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + temp_ring = vmalloc(i * sizeof(struct rnp_ring)); + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + memset(temp_ring, 0x00, i * sizeof(struct rnp_ring)); + + if (new_rx_count != adapter->rx_ring_item_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + adapter->rx_ring[i]->reset_count = new_rx_count; + if (!(adapter->rx_ring[i]->ring_flags & + RNP_RING_SIZE_CHANGE_FIX)) + adapter->rx_ring[i]->ring_flags |= + RNP_RING_FLAG_CHANGE_RX_LEN; + } + } + rnp_down(adapter); + /* + * Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != adapter->tx_ring_item_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct rnp_ring)); + + temp_ring[i].count = new_tx_count; + err = rnp_setup_tx_resources(&temp_ring[i], adapter); + if (err) { + while (i) { + i--; + rnp_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + rnp_free_tx_resources(adapter->tx_ring[i]); + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct rnp_ring)); + } + + adapter->tx_ring_item_count = new_tx_count; + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != adapter->rx_ring_item_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct rnp_ring)); + /* setup ring count */ + if (!(adapter->rx_ring[i]->ring_flags & + RNP_RING_FLAG_DELAY_SETUP_RX_LEN)) { + temp_ring[i].count = new_rx_count; + } else { + /* setup temp count */ + temp_ring[i].count = temp_ring[i].temp_count; + adapter->rx_ring[i]->reset_count = new_rx_count; + new_rx_count = temp_ring[i].temp_count; + } + err = rnp_setup_rx_resources(&temp_ring[i], adapter); + if (err) { + while (i) { + i--; + rnp_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + rnp_free_rx_resources(adapter->rx_ring[i]); + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct rnp_ring)); + } + adapter->rx_ring_item_count = new_rx_count; + } + +err_setup: + rnp_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__RNP_RESETTING, &adapter->state); + return err; +} + +int rnp_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + rnp_mbx_get_dump(&adapter->hw, 0, NULL, 0); + + dump->flag = adapter->hw.dump.flag; + dump->len = adapter->hw.dump.len; + dump->version = adapter->hw.dump.version; + + return 0; +} + +int rnp_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, + void *buffer) +{ + int err; + struct rnp_adapter *adapter = netdev_priv(netdev); + + err = rnp_mbx_get_dump(&adapter->hw, dump->flag, buffer, dump->len); + if (err) + return err; + + dump->flag = adapter->hw.dump.flag; + dump->len = adapter->hw.dump.len; + dump->version = adapter->hw.dump.version; + + return 0; +} + +int rnp_set_dump(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + rnp_mbx_set_dump(&adapter->hw, dump->flag); + + return 0; +} + +int rnp_get_coalesce(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else + struct ethtool_coalesce *coal) +#endif + +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + coal->use_adaptive_tx_coalesce = adapter->adaptive_tx_coal; + coal->tx_coalesce_usecs = adapter->tx_usecs_usr_set; + coal->tx_coalesce_usecs_irq = 0; + coal->tx_max_coalesced_frames = adapter->tx_frames; + coal->tx_max_coalesced_frames_irq = adapter->tx_work_limit; + + coal->use_adaptive_rx_coalesce = adapter->adaptive_rx_coal; + coal->rx_coalesce_usecs_irq = 0; + coal->rx_coalesce_usecs = adapter->rx_usecs_usr_set; + coal->rx_max_coalesced_frames = adapter->rx_frames; + coal->rx_max_coalesced_frames_irq = adapter->napi_budge; + + /* this is not support */ + coal->pkt_rate_low = 0; + coal->pkt_rate_high = 0; + coal->rx_coalesce_usecs_low = 0; + coal->rx_max_coalesced_frames_low = 0; + coal->tx_coalesce_usecs_low = 0; + coal->tx_max_coalesced_frames_low = 0; + coal->rx_coalesce_usecs_high = 0; + coal->rx_max_coalesced_frames_high = 0; + coal->tx_coalesce_usecs_high = 0; + coal->tx_max_coalesced_frames_high = 0; + coal->rate_sample_interval = 0; + + return 0; +} + +int rnp_set_coalesce(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else + struct ethtool_coalesce *ec) +#endif + +{ + int reset = 0; + struct rnp_adapter *adapter = netdev_priv(netdev); + u32 value; + /* we don't support close tx and rx coalesce */ + if (!(ec->use_adaptive_tx_coalesce) || !(ec->use_adaptive_rx_coalesce)) + return -EINVAL; + + /* check coalesce frame irq */ + if ((ec->tx_max_coalesced_frames_irq < RNP_MIN_TX_WORK) || + (ec->tx_max_coalesced_frames_irq > RNP_MAX_TX_WORK)) + return -EINVAL; + + value = clamp_t(u32, ec->tx_max_coalesced_frames_irq, RNP_MIN_TX_WORK, + RNP_MAX_TX_WORK); + value = ALIGN(value, RNP_WORK_ALIGN); + + if (adapter->tx_work_limit != value) { + reset = 1; + adapter->tx_work_limit = value; + } + + if ((ec->tx_max_coalesced_frames < RNP_MIN_TX_FRAME) || + (ec->tx_max_coalesced_frames > RNP_MAX_TX_FRAME)) + return -EINVAL; + + value = clamp_t(u32, ec->tx_max_coalesced_frames, RNP_MIN_TX_FRAME, + RNP_MAX_TX_FRAME); + if (adapter->tx_frames != value) { + reset = 1; + adapter->tx_frames = value; + } + + /* check vlaue */ + if ((ec->tx_coalesce_usecs < RNP_MIN_TX_USEC) || + (ec->tx_coalesce_usecs > RNP_MAX_TX_USEC)) + return -EINVAL; + + value = clamp_t(u32, ec->tx_coalesce_usecs, RNP_MIN_TX_USEC, + RNP_MAX_TX_USEC); + if (adapter->tx_usecs != value) { + reset = 1; + adapter->tx_usecs = value; + adapter->tx_usecs_usr_set = value; + } + + if ((ec->rx_max_coalesced_frames_irq < RNP_MIN_RX_WORK) || + (ec->rx_max_coalesced_frames_irq > RNP_MAX_RX_WORK)) + return -EINVAL; + + value = clamp_t(u32, ec->rx_max_coalesced_frames_irq, RNP_MIN_RX_WORK, + RNP_MAX_RX_WORK); + value = ALIGN(value, RNP_WORK_ALIGN); + + if (adapter->napi_budge != value) { + reset = 1; + adapter->napi_budge = value; + } + + if ((ec->rx_max_coalesced_frames < RNP_MIN_RX_FRAME) || + (ec->rx_max_coalesced_frames > RNP_MAX_RX_FRAME)) + return -EINVAL; + + value = clamp_t(u32, ec->rx_max_coalesced_frames, RNP_MIN_RX_FRAME, + RNP_MAX_RX_FRAME); + if (adapter->rx_frames != value) { + reset = 1; + adapter->rx_frames = value; + } + + /* check vlaue */ + if ((ec->rx_coalesce_usecs < RNP_MIN_RX_USEC) || + (ec->rx_coalesce_usecs > RNP_MAX_RX_USEC)) + return -EINVAL; + + value = clamp_t(u32, ec->rx_coalesce_usecs, RNP_MIN_RX_USEC, + RNP_MAX_RX_USEC); + + if (adapter->rx_usecs != value) { + reset = 1; + adapter->rx_usecs = value; + adapter->rx_usecs_usr_set = value; + } + /* other setup is not supported */ + if ((ec->pkt_rate_low) || (ec->pkt_rate_high) || + (ec->rx_coalesce_usecs_low) || (ec->rx_max_coalesced_frames_low) || + (ec->tx_coalesce_usecs_low) || (ec->tx_max_coalesced_frames_low) || + (ec->rx_coalesce_usecs_high) || + (ec->rx_max_coalesced_frames_high) || + (ec->tx_coalesce_usecs_high) || + (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval) || + (ec->tx_coalesce_usecs_irq) || (ec->rx_coalesce_usecs_irq)) + return -EINVAL; + + if (reset) + return rnp_setup_tc(netdev, netdev_get_num_tc(netdev)); + + return 0; +} + +#ifndef HAVE_NDO_SET_FEATURES +u32 rnp_get_rx_csum(struct net_device *netdev) +{ + return !!(netdev->features & NETIF_F_RXCSUM); +} + +int rnp_set_rx_csum(struct net_device *netdev, u32 data) +{ + if (data) + netdev->features |= NETIF_F_RXCSUM; + else + netdev->features &= ~NETIF_F_RXCSUM; + + return 0; +} + +int rnp_set_tx_csum(struct net_device *netdev, u32 data) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); +#ifdef NETIF_F_IPV6_CSUM + u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; +#else + u32 feature_list = NETIF_F_IP_CSUM; +#endif + + switch (adapter->hw.hw_type) { + case rnp_hw_n10: + case rnp_hw_n400: +#ifdef HAVE_ENCAP_TSO_OFFLOAD + if (data) + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; + else + netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL; + feature_list |= NETIF_F_GSO_UDP_TUNNEL; +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + feature_list |= NETIF_F_SCTP_CSUM; + break; + default: + break; + } + + if (data) + netdev->features |= feature_list; + else + netdev->features &= ~feature_list; + + return 0; +} + +#ifdef NETIF_F_TSO +int rnp_set_tso(struct net_device *netdev, u32 data) +{ +#ifdef NETIF_F_TSO6 + u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6; +#else + u32 feature_list = NETIF_F_TSO; +#endif + + if (data) + netdev->features |= feature_list; + else + netdev->features &= ~feature_list; + +#ifndef HAVE_NETDEV_VLAN_FEATURES + if (!data) { + struct rnp_adapter *adapter = netdev_priv(netdev); + struct net_device *v_netdev; + int i; + + /* disable TSO on all VLANs if they're present */ + if (!adapter->vlgrp) + goto tso_out; + + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { + v_netdev = vlan_group_get_device(adapter->vlgrp, i); + if (!v_netdev) + continue; + + v_netdev->features &= ~feature_list; + vlan_group_set_device(adapter->vlgrp, i, v_netdev); + } + } + +tso_out: + +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + return 0; +} +#endif +#endif + +#ifdef ETHTOOL_GRXRINGS + +static int rnp_get_rss_hash_opts(struct rnp_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on rnp */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + fallthrough; + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + fallthrough; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + fallthrough; + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + fallthrough; + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + return 0; +} + +static int rnp_get_ethtool_fdir_entry(struct rnp_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct hlist_node *node2; + struct rnp_fdir_filter *rule = NULL; + + /* report total rule count */ + cmd->data = adapter->fdir_pballoc; + + hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, + fdir_node) + if (fsp->location <= rule->sw_idx) + break; + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + /* set flow type field */ + switch (rule->filter.formatted.flow_type) { + case RNP_ATR_FLOW_TYPE_TCPV4: + fsp->flow_type = TCP_V4_FLOW; + break; + case RNP_ATR_FLOW_TYPE_UDPV4: + fsp->flow_type = UDP_V4_FLOW; + break; + case RNP_ATR_FLOW_TYPE_SCTPV4: + fsp->flow_type = SCTP_V4_FLOW; + break; + case RNP_ATR_FLOW_TYPE_IPV4: + fsp->flow_type = IP_USER_FLOW; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + if (adapter->fdir_mode == fdir_mode_tuple5) { + fsp->h_u.usr_ip4_spec.proto = + rule->filter.formatted.inner_mac[0]; + fsp->m_u.usr_ip4_spec.proto = 0xff; + } else { + fsp->h_u.usr_ip4_spec.proto = + rule->filter.formatted.inner_mac[0] & + rule->filter.formatted.inner_mac_mask[0]; + fsp->m_u.usr_ip4_spec.proto = + rule->filter.formatted.inner_mac_mask[0]; + } + break; + case RNP_ATR_FLOW_TYPE_ETHER: + fsp->flow_type = ETHER_FLOW; + /* support proto and mask only in this mode */ + fsp->h_u.ether_spec.h_proto = rule->filter.layer2_formate.proto; + fsp->m_u.ether_spec.h_proto = 0xffff; + break; + default: + return -EINVAL; + } + if (rule->filter.formatted.flow_type != RNP_ATR_FLOW_TYPE_ETHER) { + /* not support mask in tuple 5 mode */ + if (adapter->fdir_mode == fdir_mode_tuple5) { + fsp->h_u.tcp_ip4_spec.psrc = + rule->filter.formatted.src_port; + fsp->h_u.tcp_ip4_spec.pdst = + rule->filter.formatted.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = + rule->filter.formatted.src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = + rule->filter.formatted.dst_ip[0]; + fsp->m_u.tcp_ip4_spec.psrc = 0xffff; + fsp->m_u.tcp_ip4_spec.pdst = 0xffff; + fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff; + fsp->m_u.tcp_ip4_spec.ip4dst = 0xffffffff; + } else { + fsp->h_u.tcp_ip4_spec.psrc = + rule->filter.formatted.src_port & + rule->filter.formatted.src_port_mask; + fsp->m_u.tcp_ip4_spec.psrc = + rule->filter.formatted.src_port_mask; + fsp->h_u.tcp_ip4_spec.pdst = + rule->filter.formatted.dst_port & + rule->filter.formatted.dst_port_mask; + fsp->m_u.tcp_ip4_spec.pdst = + rule->filter.formatted.dst_port_mask; + + fsp->h_u.tcp_ip4_spec.ip4src = + rule->filter.formatted.src_ip[0] & + rule->filter.formatted.src_ip_mask[0]; + fsp->m_u.tcp_ip4_spec.ip4src = + rule->filter.formatted.src_ip_mask[0]; + + fsp->h_u.tcp_ip4_spec.ip4dst = + rule->filter.formatted.dst_ip[0] & + rule->filter.formatted.dst_ip_mask[0]; + fsp->m_u.tcp_ip4_spec.ip4dst = + rule->filter.formatted.dst_ip_mask[0]; + } + } + + /* record action */ + if (rule->action == RNP_FDIR_DROP_QUEUE) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else { + int add = 0; + + if (rule->action & 0x1) + add = 1; + + if (rule->vf_num != 0) { + fsp->ring_cookie = ((u64)rule->vf_num << 32) | (add); + } else { + fsp->ring_cookie = rule->action; + } + } + + return 0; +} + +static int rnp_get_ethtool_fdir_all(struct rnp_adapter *adapter, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct hlist_node *node2; + struct rnp_fdir_filter *rule; + int cnt = 0; + + /* report total rule count */ + cmd->data = adapter->fdir_pballoc; + + hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, + fdir_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +int rnp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, +#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS + void *rule_locs) +#else + u32 *rule_locs) +#endif +{ + struct rnp_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + struct rnp_hw *hw = &adapter->hw; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + /* we fix 2 when srio on */ + cmd->data = hw->sriov_ring_limit; + } else { + cmd->data = adapter->num_rx_queues; + } + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->fdir_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = rnp_get_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = rnp_get_ethtool_fdir_all(adapter, cmd, (u32 *)rule_locs); + break; + case ETHTOOL_GRXFH: + ret = rnp_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} +#define UDP_RSS_FLAGS \ + (RNP_FLAG2_RSS_FIELD_IPV4_UDP | RNP_FLAG2_RSS_FIELD_IPV6_UDP) +static int rnp_set_rss_hash_opt(struct rnp_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + /* + * RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & + ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V4_FLOW: + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + return 0; +} + +static int rnp_flowspec_to_flow_type(struct rnp_adapter *adapter, + struct ethtool_rx_flow_spec *fsp, + uint8_t *flow_type, + struct rnp_fdir_filter *input) +{ + int i; + int ret = 1; + /* not support flow_ext */ + if (fsp->flow_type & FLOW_EXT) + return 0; + + switch (fsp->flow_type & ~FLOW_EXT) { + /* todo ipv6 is not considered*/ + case TCP_V4_FLOW: + *flow_type = RNP_ATR_FLOW_TYPE_TCPV4; + break; + case UDP_V4_FLOW: + *flow_type = RNP_ATR_FLOW_TYPE_UDPV4; + break; + case SCTP_V4_FLOW: + *flow_type = RNP_ATR_FLOW_TYPE_SCTPV4; + break; + case ETHER_FLOW: + /* layer 2 flow */ + *flow_type = RNP_ATR_FLOW_TYPE_ETHER; + input->filter.layer2_formate.proto = + fsp->h_u.ether_spec.h_proto; + break; + case IP_USER_FLOW: + switch (fsp->h_u.usr_ip4_spec.proto) { + case IPPROTO_TCP: + *flow_type = RNP_ATR_FLOW_TYPE_TCPV4; + break; + case IPPROTO_UDP: + *flow_type = RNP_ATR_FLOW_TYPE_UDPV4; + break; + case IPPROTO_SCTP: + *flow_type = RNP_ATR_FLOW_TYPE_SCTPV4; + break; + case 0: + /* if only ip4 no src no dst*/ + if (!(fsp->h_u.tcp_ip4_spec.ip4src) && + (!(fsp->h_u.tcp_ip4_spec.ip4dst))) { + /* if have no l4 proto, use layer2 */ + *flow_type = RNP_ATR_FLOW_TYPE_ETHER; + input->filter.layer2_formate.proto = + htons(0x0800); + } else { + /* may only src or dst input */ + *flow_type = RNP_ATR_FLOW_TYPE_IPV4; + } + break; + default: + /* other unknown l4 proto ip */ + *flow_type = RNP_ATR_FLOW_TYPE_IPV4; + } + break; + default: + return 0; + } + /* layer2 flow */ + if (*flow_type == RNP_ATR_FLOW_TYPE_ETHER) { + if (adapter->layer2_count < 0) { + e_err(drv, "layer2 count full\n"); + ret = 0; + } + /* should check dst mac filter */ + /* should check src dst all zeros */ + for (i = 0; i < ETH_ALEN; i++) { + if (fsp->h_u.ether_spec.h_source[i] != 0) + ret = 0; + + if (fsp->h_u.ether_spec.h_dest[i] != 0) + ret = 0; + + if (fsp->m_u.ether_spec.h_source[i] != 0) + ret = 0; + + if (fsp->m_u.ether_spec.h_dest[i] != 0) + ret = 0; + } + } else if (*flow_type == RNP_ATR_FLOW_TYPE_IPV4) { + if (adapter->fdir_mode == fdir_mode_tuple5) { + if (adapter->tuple_5_count < 0) { + e_err(drv, "tuple 5 count full\n"); + ret = 0; + } + if ((fsp->h_u.usr_ip4_spec.ip4src != 0) && + (fsp->m_u.usr_ip4_spec.ip4src != 0xffffffff)) { + e_err(drv, "ip src mask error\n"); + ret = 0; + } + if ((fsp->h_u.usr_ip4_spec.ip4dst != 0) && + (fsp->m_u.usr_ip4_spec.ip4dst != 0xffffffff)) { + e_err(drv, "ip dst mask error\n"); + ret = 0; + } + if ((fsp->h_u.usr_ip4_spec.proto != 0) && + (fsp->m_u.usr_ip4_spec.proto != 0xff)) { + e_err(drv, "ip l4 proto mask error\n"); + ret = 0; + } + } else { + if (adapter->tuple_5_count < 0) { + e_err(drv, "tcam count full\n"); + ret = 0; + } + /* tcam mode can support mask */ + } + /* not support l4_4_bytes */ + if ((fsp->h_u.usr_ip4_spec.l4_4_bytes != 0)) { + e_err(drv, "ip l4_4_bytes error\n"); + ret = 0; + } + } else { + if (adapter->fdir_mode == fdir_mode_tuple5) { + /* should check mask all ff */ + if (adapter->tuple_5_count < 0) { + e_err(drv, "tuple 5 count full\n"); + ret = 0; + } + if ((fsp->h_u.tcp_ip4_spec.ip4src != 0) && + (fsp->m_u.tcp_ip4_spec.ip4src != 0xffffffff)) { + e_err(drv, "src mask error\n"); + ret = 0; + } + if ((fsp->h_u.tcp_ip4_spec.ip4dst != 0) && + (fsp->m_u.tcp_ip4_spec.ip4dst != 0xffffffff)) { + e_err(drv, "dst mask error\n"); + ret = 0; + } + if ((fsp->h_u.tcp_ip4_spec.psrc != 0) && + (fsp->m_u.tcp_ip4_spec.psrc != 0xffff)) { + e_err(drv, "src port mask error\n"); + ret = 0; + } + if ((fsp->h_u.tcp_ip4_spec.pdst != 0) && + (fsp->m_u.tcp_ip4_spec.pdst != 0xffff)) { + e_err(drv, "src port mask error\n"); + ret = 0; + } + } else { + if (adapter->tuple_5_count < 0) { + e_err(drv, "tcam count full\n"); + ret = 0; + } + } + /* l4 tos is not supported */ + if (fsp->h_u.tcp_ip4_spec.tos != 0) { + e_err(drv, "tos error\n"); + ret = 0; + } + } + + return ret; +} + +int rnp_update_ethtool_fdir_entry(struct rnp_adapter *adapter, + struct rnp_fdir_filter *input, u16 sw_idx) +{ + struct rnp_hw *hw = &adapter->hw; + struct hlist_node *node2; + struct rnp_fdir_filter *rule, *parent; + bool deleted = false; + u16 hw_idx_layer2 = 0; + u16 hw_idx_tuple5 = 0; + + s32 err; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, + fdir_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + + parent = rule; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->sw_idx == sw_idx)) { + /* only clear hw enable bits */ + /* hardware filters are only configured when interface is up, + * and we should not issue filter commands while the interface + * is down + */ + if (netif_running(adapter->netdev) && (!input)) { + err = rnp_fdir_erase_perfect_filter(adapter->fdir_mode, + hw, &rule->filter, + rule->hw_idx); + if (err) + return -EINVAL; + } + + adapter->fdir_filter_count--; + if (rule->filter.formatted.flow_type == + RNP_ATR_FLOW_TYPE_ETHER) { + /* used to determine hw reg offset */ + adapter->layer2_count++; + } else { + adapter->tuple_5_count++; + } + + hlist_del(&rule->fdir_node); + kfree(rule); + deleted = true; + } + + /* If we weren't given an input, then this was a request to delete a + * filter. We should return -EINVAL if the filter wasn't found, but + * return 0 if the rule was successfully deleted. + */ + if (!input) + return deleted ? 0 : -EINVAL; + + /* initialize node and set software index */ + INIT_HLIST_NODE(&input->fdir_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->fdir_node, &parent->fdir_node); + else + hlist_add_head(&input->fdir_node, &adapter->fdir_filter_list); + + /* we must setup all */ + /* should first earase all tcam and l2 rule */ + + if (adapter->fdir_mode != fdir_mode_tcam) { + hw->ops.clr_all_layer2_remapping(hw); + /* earase all layer2 */ + } else { + hw->ops.clr_all_tuple5_remapping(hw); + /* earase all tcam */ + } + + /* setup hw */ + hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, + fdir_node) { + if (netif_running(adapter->netdev)) { + /* hw_idx */ + if (rule->filter.formatted.flow_type == + RNP_ATR_FLOW_TYPE_ETHER) { + rule->hw_idx = hw_idx_layer2++; + } else { + rule->hw_idx = hw_idx_tuple5++; + } + + if ((!rule->vf_num) && + (rule->action != ACTION_TO_MPE)) { + int idx = rule->action; + + err = rnp_fdir_write_perfect_filter( + adapter->fdir_mode, hw, &rule->filter, + rule->hw_idx, + (rule->action == RNP_FDIR_DROP_QUEUE) ? + RNP_FDIR_DROP_QUEUE : + adapter->rx_ring[idx] + ->rnp_queue_idx, + (adapter->priv_flags & + RNP_PRIV_FLAG_REMAP_PRIO) ? + true : + false); + } else { + /* ACTION_TO_MPE use this */ + err = rnp_fdir_write_perfect_filter( + adapter->fdir_mode, hw, &rule->filter, + rule->hw_idx, + (rule->action == RNP_FDIR_DROP_QUEUE) ? + RNP_FDIR_DROP_QUEUE : + rule->action, + (adapter->priv_flags & + RNP_PRIV_FLAG_REMAP_PRIO) ? + true : + false); + } + if (err) + return -EINVAL; + } + } + + /* update counts */ + adapter->fdir_filter_count++; + if (input->filter.formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER) { + /* used to determine hw reg offset */ + adapter->layer2_count--; + } else { + adapter->tuple_5_count--; + } + return 0; +} + +/* used to dbg flo_spec info */ +static void print_fsp(struct ethtool_rx_flow_spec *fsp) +{ + int i; + + switch (fsp->flow_type & ~FLOW_EXT) { + case ETHER_FLOW: + for (i = 0; i < ETH_ALEN; i++) + dbg("src 0x%02x\n", fsp->h_u.ether_spec.h_source[i]); + for (i = 0; i < ETH_ALEN; i++) + dbg("dst 0x%02x\n", fsp->h_u.ether_spec.h_dest[i]); + for (i = 0; i < ETH_ALEN; i++) + dbg("src mask 0x%02x\n", + fsp->m_u.ether_spec.h_source[i]); + for (i = 0; i < ETH_ALEN; i++) + dbg("dst mask 0x%02x\n", fsp->m_u.ether_spec.h_dest[i]); + + dbg("proto type is %x\n", fsp->h_u.ether_spec.h_proto); + + break; + + default: + dbg("flow type is %x\n", fsp->flow_type); + + dbg("ip4 src ip is %x\n", fsp->h_u.tcp_ip4_spec.ip4src); + dbg("ip4 src ip mask is %x\n", fsp->m_u.tcp_ip4_spec.ip4src); + + dbg("ip4 dst ip is %x\n", fsp->h_u.tcp_ip4_spec.ip4dst); + dbg("ip4 dst ip mask is %x\n", fsp->m_u.tcp_ip4_spec.ip4dst); + + dbg("ip4 src port is %x\n", fsp->h_u.tcp_ip4_spec.psrc); + dbg("ip4 src port mask is %x\n", fsp->m_u.tcp_ip4_spec.psrc); + + dbg("ip4 dst port is %x\n", fsp->h_u.tcp_ip4_spec.pdst); + dbg("ip4 dst port mask is %x\n", fsp->m_u.tcp_ip4_spec.pdst); + + dbg("l4 proto type is %x\n", fsp->h_u.usr_ip4_spec.proto); + break; + } +} + +static int rnp_add_ethtool_fdir_entry(struct rnp_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct rnp_fdir_filter *input; + struct rnp_hw *hw = &adapter->hw; + /* we don't support mask */ + int err; + int vf_fix = 0; + + u32 ring_cookie_high = fsp->ring_cookie >> 32; + + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + vf_fix = 1; + + if (!(adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE)) + return -EOPNOTSUPP; + + /* + * Don't allow programming if the action is a queue greater than + * the number of online Rx queues. + */ + /* is sriov is on, allow vf and queue */ + /* vf should smaller than num_vfs */ + print_fsp(fsp); + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && + (((ring_cookie_high & 0xff) > adapter->num_vfs) || + ((fsp->ring_cookie & (u64)0xffffffff) >= + hw->sriov_ring_limit))) + /* return error if not mpe */ + if (fsp->ring_cookie != ACTION_TO_MPE) + return -EINVAL; + + } else { + if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && + (fsp->ring_cookie >= adapter->num_rx_queues)) { + /* ACTION_TO_MPE to mpe special */ + if (fsp->ring_cookie != ACTION_TO_MPE) + return -EINVAL; + } + } + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= (adapter->fdir_pballoc)) { + e_err(drv, "Location out of range\n"); + return -EINVAL; + } + + input = kzalloc(sizeof(*input), GFP_ATOMIC); + if (!input) + return -ENOMEM; + + /* set SW index */ + input->sw_idx = fsp->location; + + /* record flow type */ + if (!rnp_flowspec_to_flow_type( + adapter, fsp, &input->filter.formatted.flow_type, input)) { + e_err(drv, "Unrecognized flow type\n"); + goto err_out; + } + + if (input->filter.formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER) { + /* used to determine hw reg offset */ + } else if (input->filter.formatted.flow_type == + RNP_ATR_FLOW_TYPE_IPV4) { + /* Copy input into formatted structures */ + input->filter.formatted.src_ip[0] = + fsp->h_u.usr_ip4_spec.ip4src; + input->filter.formatted.src_ip_mask[0] = + fsp->m_u.usr_ip4_spec.ip4src; + input->filter.formatted.dst_ip[0] = + fsp->h_u.usr_ip4_spec.ip4dst; + input->filter.formatted.dst_ip_mask[0] = + fsp->m_u.usr_ip4_spec.ip4dst; + input->filter.formatted.src_port = 0; + input->filter.formatted.src_port_mask = 0xffff; + input->filter.formatted.dst_port = 0; + input->filter.formatted.dst_port_mask = 0xffff; + input->filter.formatted.inner_mac[0] = + fsp->h_u.usr_ip4_spec.proto; + input->filter.formatted.inner_mac_mask[0] = + fsp->m_u.usr_ip4_spec.proto; + } else { + /* tcp or udp or sctp*/ + /* Copy input into formatted structures */ + input->filter.formatted.src_ip[0] = + fsp->h_u.tcp_ip4_spec.ip4src; + input->filter.formatted.src_ip_mask[0] = + fsp->m_u.usr_ip4_spec.ip4src; + input->filter.formatted.dst_ip[0] = + fsp->h_u.tcp_ip4_spec.ip4dst; + input->filter.formatted.dst_ip_mask[0] = + fsp->m_u.usr_ip4_spec.ip4dst; + input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; + input->filter.formatted.src_port_mask = + fsp->m_u.tcp_ip4_spec.psrc; + input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + input->filter.formatted.dst_port_mask = + fsp->m_u.tcp_ip4_spec.pdst; + } + + /* determine if we need to drop or route the packet */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + input->action = RNP_FDIR_DROP_QUEUE; + else { + input->vf_num = (fsp->ring_cookie >> 32) & 0xff; + if (input->vf_num) { + /* in vf mode input->action is the real queue nums */ + if (adapter->priv_flags & RNP_PRIV_FLAG_REMAP_MODE) { + input->action = (fsp->ring_cookie & 0xffffffff); + } else { + input->action = + 2 * (((fsp->ring_cookie >> 32) & 0xff) + + vf_fix - 1) + + (fsp->ring_cookie & 0xffffffff); + } + } else + input->action = fsp->ring_cookie; + } + + spin_lock(&adapter->fdir_perfect_lock); + err = rnp_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +err_out: + kfree(input); + return -EINVAL; +} + +static int rnp_del_ethtool_fdir_entry(struct rnp_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + spin_lock(&adapter->fdir_perfect_lock); + err = rnp_update_ethtool_fdir_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +} + +int rnp_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = rnp_add_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = rnp_del_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_SRXFH: + ret = rnp_set_rss_hash_opt(adapter, cmd); + break; + default: + break; + } + + return ret; +} +#endif + +#ifdef ETHTOOL_SRXNTUPLE +/* + * We need to keep this around for kernels 2.6.33-2.6.39 in order to avoid + * a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag + * was defined that this function was present. + */ +int rnp_set_rx_ntuple(struct net_device __always_unused *dev, + struct ethtool_rx_ntuple __always_unused *cmd) +{ + return -EOPNOTSUPP; +} +#endif + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + +u32 rnp_rss_indir_size(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + return rnp_rss_indir_tbl_entries(adapter); +} + +u32 rnp_get_rxfh_key_size(struct net_device *netdev) +{ + return RNP_RSS_KEY_SIZE; +} + +void rnp_get_reta(struct rnp_adapter *adapter, u32 *indir) +{ + int i, reta_size = rnp_rss_indir_tbl_entries(adapter); + u16 rss_m = adapter->ring_feature[RING_F_RSS].mask; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + rss_m = adapter->ring_feature[RING_F_RSS].indices - 1; + + for (i = 0; i < reta_size; i++) + indir[i] = adapter->rss_indir_tbl[i] & rss_m; +} + +#ifdef HAVE_ETHTOOL_RXFH_PARAM +int rnp_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh) +#else +#ifdef HAVE_RXFH_HASHFUNC +int rnp_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +#else +int rnp_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#endif +#endif +{ + struct rnp_adapter *adapter = netdev_priv(netdev); +#ifdef HAVE_ETHTOOL_RXFH_PARAM + u32 *indir = rxfh->indir; + u8 *key = rxfh->key; + u8 *hfunc = &rxfh->hfunc; +#endif + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc) { + switch (adapter->rss_func_mode) { + case rss_func_top: + *hfunc = ETH_RSS_HASH_TOP; + break; + case rss_func_xor: + *hfunc = ETH_RSS_HASH_XOR; + break; + case rss_func_order: + *hfunc = ETH_RSS_HASH_TOP; + break; + } + } +#endif + + if (indir) + rnp_get_reta(adapter, indir); + + if (key) + memcpy(key, adapter->rss_key, rnp_get_rxfh_key_size(netdev)); + + return 0; +} + +enum { + PART_FW, + PART_CFG, + PART_MACSN, + PART_PCSPHY, + PART_PXE, +}; + +#define UCFG_OFF 0x41000 +#define UCFG_SZ (4096) +#define PXE_OFF 0x4a000 +#define PXE_SZ (512 * 1024) + +static int rnp_flash_firmware(struct rnp_adapter *adapter, int region, + const u8 *data, int bytes) +{ + struct rnp_hw *hw = &adapter->hw; + + switch (region) { + case PART_FW: { + if (*((u32 *)(data + 28)) != 0xA51BBEAF) { + return -EINVAL; + } + if (bytes > PXE_OFF) { + int err; + int wbytes_seg1 = bytes - PXE_OFF; + if (wbytes_seg1 > PXE_SZ) { + wbytes_seg1 = PXE_SZ; + } + + err = rnp_fw_update(hw, PART_FW, data, UCFG_OFF); + if (err) { + return err; + } + /* skip ucfg flush only pxe */ + err = rnp_fw_update(hw, PART_PXE, data + PXE_OFF, + wbytes_seg1); + if (err) { + return err; + } + return 0; + } + break; + } + case PART_CFG: { + if (*((u32 *)(data)) != 0x00010cf9) { + return -EINVAL; + } + break; + } + case PART_MACSN: { + break; + } + case PART_PCSPHY: { + if (*((u16 *)(data)) != 0x081d) { + return -EINVAL; + } + break; + } + case PART_PXE: { + if ((*((u16 *)(data)) != 0xaa55) && + (*((u16 *)(data)) != 0x5a4d)) { + return -EINVAL; + } + break; + } + default: { + return -EINVAL; + } + } + return rnp_fw_update(hw, region, data, bytes); +} + +static int rnp_flash_firmware_from_file(struct net_device *dev, + struct rnp_adapter *adapter, int region, + const char *filename) +{ + const struct firmware *fw; + int rc; + + rc = request_firmware(&fw, filename, &dev->dev); + if (rc != 0) { + netdev_err(dev, "Error %d requesting firmware file: %s\n", rc, + filename); + return rc; + } + + rc = rnp_flash_firmware(adapter, region, fw->data, fw->size); + release_firmware(fw); + return rc; +} + +int rnp_flash_device(struct net_device *dev, struct ethtool_flash *flash) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + + if (IS_VF(adapter->hw.pfvfnum)) { + netdev_err(dev, + "flashdev not supported from a virtual function\n"); + return -EINVAL; + } + + return rnp_flash_firmware_from_file(dev, adapter, flash->region, + flash->data); +} +static int rnp_rss_indir_tbl_max(struct rnp_adapter *adapter) +{ + if (adapter->hw.rss_type == rnp_rss_uv3p) + return 8; + else if (adapter->hw.rss_type == rnp_rss_uv440) + return 128; + else if (adapter->hw.rss_type == rnp_rss_n10) + return 128; + else + return 128; +} + +#ifdef HAVE_ETHTOOL_RXFH_PARAM +int rnp_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh, struct netlink_ext_ack *extack) +#else +#ifdef HAVE_RXFH_HASHFUNC +int rnp_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, + const u8 hfunc) +#else +#ifdef HAVE_RXFH_NONCONST +int rnp_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#else +int rnp_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key) +#endif /* HAVE_RXFH_NONCONST */ +#endif /* HAVE_RXFH_HASHFUNC */ +#endif +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + int i; + u32 reta_entries = rnp_rss_indir_tbl_entries(adapter); +#ifdef HAVE_ETHTOOL_RXFH_PARAM + const u32 *indir = rxfh->indir; + const u8 *key = rxfh->key; + const u8 hfunc = rxfh->hfunc; +#endif + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc != ETH_RSS_HASH_NO_CHANGE && + hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; +#endif + if ((indir) && (adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + return -EINVAL; + } + + /* Fill out the redirection table */ + if (indir) { + int max_queues = min_t(int, adapter->num_rx_queues, + rnp_rss_indir_tbl_max(adapter)); + + /* Allow max 2 queues w/ SR-IOV. */ + if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && + (max_queues > 2)) + max_queues = 2; + + /* Verify user input. */ + for (i = 0; i < reta_entries; i++) + if (indir[i] >= max_queues) + return -EINVAL; + + /* store rss tbl */ + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + rnp_store_reta(adapter); + } + + /* Fill out the rss hash key */ + if (key) { + memcpy(adapter->rss_key, key, rnp_get_rxfh_key_size(netdev)); + rnp_store_key(adapter); + } + + return 0; +} + +#endif + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +static const struct ethtool_ops_ext rnp_ethtool_ops_ext = { + .size = sizeof(struct ethtool_ops_ext), + .get_ts_info = rnp_get_ts_info, + .set_phys_id = rnp_set_phys_id, + .get_channels = rnp_get_channels, + .set_channels = rnp_set_channels, +#ifdef ETHTOOL_GMODULEINFO + .get_module_info = rnp_get_module_info, + .get_module_eeprom = rnp_get_module_eeprom, +#endif +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_indir_size = rnp_rss_indir_size, + .get_rxfh_key_size = rnp_get_rxfh_key_size, + .get_rxfh = rnp_get_rxfh, + .set_rxfh = rnp_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ +}; +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + +void rnp_set_ethtool_ops(struct net_device *netdev) +{ +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + set_ethtool_ops_ext(netdev, &rnp_ethtool_ops_ext); +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_ethtool.h b/drivers/net/ethernet/mucse/rnp/rnp_ethtool.h new file mode 100755 index 0000000000000000000000000000000000000000..bb5ec1597bf965d5d8439f2faf743cf35d515bc0 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_ethtool.h @@ -0,0 +1,214 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_ETHTOOL_H_ +#define _RNP_ETHTOOL_H_ + +enum { NETDEV_STATS, RNP_STATS }; + +struct rnp_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +/* rnp allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#ifdef HAVE_TX_MQ +#ifdef HAVE_NETDEV_SELECT_QUEUE +#ifdef NO_REAL_QUEUE_NUM +#define RNP_NUM_RX_QUEUES netdev->num_tx_queues +#define RNP_NUM_TX_QUEUES netdev->num_tx_queues +#else +#define RNP_NUM_RX_QUEUES netdev->real_num_rx_queues +#define RNP_NUM_TX_QUEUES netdev->real_num_tx_queues + +#endif +#else +#define RNP_NUM_RX_QUEUES adapter->indices +#define RNP_NUM_TX_QUEUES adapter->indices +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else /* HAVE_TX_MQ */ +#define RNP_NUM_TX_QUEUES 1 +#define RNP_NUM_RX_QUEUES \ + (((struct rnp_adapter *)netdev_priv(netdev))->num_rx_queues) +#endif /* HAVE_TX_MQ */ + +#define RNP_NETDEV_STAT(_net_stat) \ + { \ + .stat_string = #_net_stat, \ + .sizeof_stat = \ + sizeof_field(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ + } + +#define RNP_HW_STAT(_name, _stat) \ + { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct rnp_adapter, _stat), \ + .stat_offset = offsetof(struct rnp_adapter, _stat) \ + } + +struct rnp_tx_queue_ring_stat { + u64 hw_head; + u64 hw_tail; + u64 sw_to_clean; + u64 sw_to_next_to_use; +}; + +struct rnp_rx_queue_ring_stat { + u64 hw_head; + u64 hw_tail; + u64 sw_to_use; + u64 sw_to_clean; +}; + +#define RNP_QUEUE_STATS_LEN \ + (RNP_NUM_TX_QUEUES * \ + (sizeof(struct rnp_tx_queue_stats) / sizeof(u64) + \ + sizeof(struct rnp_queue_stats) / sizeof(u64) + \ + sizeof(struct rnp_tx_queue_ring_stat) / sizeof(u64)) + \ + RNP_NUM_RX_QUEUES * \ + (sizeof(struct rnp_rx_queue_stats) / sizeof(u64) + \ + sizeof(struct rnp_queue_stats) / sizeof(u64) + \ + sizeof(struct rnp_rx_queue_ring_stat) / sizeof(u64))) + +#define RNP_STATS_LEN \ + (RNP_GLOBAL_STATS_LEN + RNP_HWSTRINGS_STATS_LEN + RNP_QUEUE_STATS_LEN) + +int rnp_wol_exclusion(struct rnp_adapter *adapter, struct ethtool_wolinfo *wol); +void rnp_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol); + +int rnp_wol_exclusion(struct rnp_adapter *adapter, struct ethtool_wolinfo *wol); +int rnp_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol); +void rnp_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, + u64 *data); +void rnp_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause); +int rnp_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause); +#ifdef ETHTOOL_GFECPARAM +int rnp_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fecparam); + +int rnp_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fecparam); +#endif + +u32 rnp_get_msglevel(struct net_device *netdev); +void rnp_set_msglevel(struct net_device *netdev, u32 data); +int rnp_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state); + +int rnp_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info); +void rnp_get_channels(struct net_device *dev, struct ethtool_channels *ch); +int rnp_set_channels(struct net_device *dev, struct ethtool_channels *ch); +int rnp_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo); +int rnp_get_module_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data); + +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +void rnp_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack); +#else +void rnp_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring); +#endif /* HAVE_ETHTOOL_EXTENDED_RINGPARAMS */ + +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +int rnp_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack); +#else +int rnp_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring); +#endif /* HAVE_ETHTOOL_EXTENDED_RINGPARAMS */ + +int rnp_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump); +int rnp_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, + void *buffer); +int rnp_set_dump(struct net_device *netdev, struct ethtool_dump *dump); +int rnp_get_coalesce(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); +#else + struct ethtool_coalesce *coal); +#endif +int rnp_set_coalesce(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); +#else + struct ethtool_coalesce *ec); +#endif + +#ifndef HAVE_NDO_SET_FEATURES +u32 rnp_get_rx_csum(struct net_device *netdev); +int rnp_set_rx_csum(struct net_device *netdev, u32 data); +int rnp_set_tx_csum(struct net_device *netdev, u32 data); +#ifdef NETIF_F_TSO +int rnp_set_tso(struct net_device *netdev, u32 data); +#endif +#endif + +#ifdef ETHTOOL_GRXRINGS +int rnp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, +#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS + void *rule_locs); +#else + u32 *rule_locs); +#endif +int rnp_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd); +#endif + +#ifdef ETHTOOL_SRXNTUPLE +int rnp_set_rx_ntuple(struct net_device __always_unused *dev, + struct ethtool_rx_ntuple __always_unused *cmd); +#endif + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) +u32 rnp_rss_indir_size(struct net_device *netdev); +u32 rnp_get_rxfh_key_size(struct net_device *netdev); +void rnp_get_reta(struct rnp_adapter *adapter, u32 *indir); +#ifdef HAVE_ETHTOOL_RXFH_PARAM +int rnp_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *param); +#else +#ifdef HAVE_RXFH_HASHFUNC +int rnp_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc); +#else +int rnp_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key); +#endif +#endif +int rnp_flash_device(struct net_device *dev, struct ethtool_flash *flash); +#ifdef HAVE_ETHTOOL_RXFH_PARAM +int rnp_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *param, struct netlink_ext_ack *extack); +#else +#ifdef HAVE_RXFH_HASHFUNC +int rnp_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, + const u8 hfunc); +#else +#ifdef HAVE_RXFH_NONCONST +int rnp_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key); +#else +int rnp_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key); +#endif /* HAVE_RXFH_NONCONST */ +#endif /* HAVE_RXFH_HASHFUNC */ +#endif +#endif + +#define RNP_WOL_GET_SUPPORTED(adapter) (!!(adapter->wol & GENMASK(3, 0))) +#define RNP_WOL_GET_STATUS(adapter) (!!(adapter->wol & GENMASK(7, 4))) +#define RNP_WOL_SET_SUPPORTED(adapter) (adapter->wol |= BIT(0)) +#define RNP_WOL_SET_STATUS(adapter) (adapter->wol |= BIT(4)) +#define RNP_WOL_CLEAR_STATUS(adapter) (adapter->wol &= ~BIT(4)) + +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_lib.c b/drivers/net/ethernet/mucse/rnp/rnp_lib.c new file mode 100755 index 0000000000000000000000000000000000000000..358e4a738fd85a569a7decaee477ff5a70d5c703 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_lib.c @@ -0,0 +1,1394 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include "rnp.h" +#include "rnp_sriov.h" +#include "rnp_common.h" + +//#define CPU_OFFSET_TEST +#if IS_ENABLED(CONFIG_DCB) +/** + * rnp_cache_ring_dcb_sriov - Descriptor ring to register mapping for SRIOV + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE are enabled along + * with VMDq. + * + **/ +static bool rnp_cache_ring_dcb_sriov(struct rnp_adapter *adapter) +{ + u8 tcs = netdev_get_num_tc(adapter->netdev); + /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return false; + + return true; +} +#endif + +/** + * rnp_cache_ring_dcb - Descriptor ring to register mapping for DCB + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for DCB to the assigned rings. + * + **/ +static bool rnp_cache_ring_dcb(struct rnp_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + unsigned int tx_idx, rx_idx; + int tc, offset, rss_i, i, step; + u8 num_tcs = netdev_get_num_tc(dev); + struct rnp_ring *ring; + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + + /* verify we have DCB queueing enabled before proceeding */ + if (num_tcs <= 1) + return false; + + rss_i = adapter->ring_feature[RING_F_RSS].indices; + + step = 4; + for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { + /* + * we from tc start + * tc0 0 4 8 c + * tc1 1 5 9 d + * tc2 2 6 a e + * tc3 3 7 b f + */ + tx_idx = tc; + rx_idx = tc; + for (i = 0; i < rss_i; i++, tx_idx += step, rx_idx += step) { + ring = adapter->tx_ring[offset + i]; + + ring->ring_addr = + dma->dma_ring_addr + RING_OFFSET(tx_idx); + ring->rnp_queue_idx = tx_idx; + ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR; + + ring = adapter->rx_ring[offset + i]; + ring->ring_addr = + dma->dma_ring_addr + RING_OFFSET(rx_idx); + ring->rnp_queue_idx = rx_idx; + ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR; + } + } + + return true; +} + +/** + * rnp_cache_ring_sriov - Descriptor ring to register mapping for sriov + * @adapter: board private structure to initialize + * + * SR-IOV doesn't use any descriptor rings but changes the default if + * no other mapping is used. + * + */ +static bool rnp_cache_ring_sriov(struct rnp_adapter *adapter) +{ + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & RNP_FLAG_VMDQ_ENABLED)) + return false; + + return true; +} + +/** + * rnp_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS to the assigned rings. + * + **/ +static bool rnp_cache_ring_rss(struct rnp_adapter *adapter) +{ + int i; + /* setup here */ + int ring_step = 1; + struct rnp_ring *ring; + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + + /* n400 use 0 4 8 c */ + if (hw->hw_type == rnp_hw_n400) + ring_step = 4; + + /* some ring alloc rules can be added here */ + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = adapter->tx_ring[i]; + ring->rnp_queue_idx = i * ring_step; + ring->ring_addr = + dma->dma_ring_addr + RING_OFFSET(ring->rnp_queue_idx); + + ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + ring = adapter->rx_ring[i]; + ring->rnp_queue_idx = i * ring_step; + ring->ring_addr = + dma->dma_ring_addr + RING_OFFSET(ring->rnp_queue_idx); + ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR; + } + + return true; +} + +/** + * rnp_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + * + * Note, the order the various feature calls is important. It must start + * with the "most" features enabled at the same time, then trickle down to + * the least amount of features turned on at once. + **/ +static void rnp_cache_ring_register(struct rnp_adapter *adapter) +{ + /* start with default case */ + +#if IS_ENABLED(CONFIG_DCB) + if (rnp_cache_ring_dcb_sriov(adapter)) + return; + +#endif + if (rnp_cache_ring_dcb(adapter)) + return; + + /* sriov ring alloc is added before, this maybe no use */ + if (rnp_cache_ring_sriov(adapter)) + return; + + rnp_cache_ring_rss(adapter); +} + +#define RNP_RSS_128Q_MASK 0x7F +#define RNP_RSS_64Q_MASK 0x3F +#define RNP_RSS_16Q_MASK 0xF +#define RNP_RSS_32Q_MASK 0x1F +#define RNP_RSS_8Q_MASK 0x7 +#define RNP_RSS_4Q_MASK 0x3 +#define RNP_RSS_2Q_MASK 0x1 +#define RNP_RSS_DISABLED_MASK 0x0 + +#if IS_ENABLED(CONFIG_DCB) + +/** + * rnp_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB + * @adapter: board private structure to initialize + * + * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * and VM pools where appropriate. Also assign queues based on DCB + * priorities and map accordingly.. + * + **/ +static bool rnp_set_dcb_sriov_queues(struct rnp_adapter *adapter) +{ + int i; + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return false; + + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* 16 pools w/ 8 TC per pool */ + if (tcs > 4) { + vmdq_i = min_t(u16, vmdq_i, 16); + vmdq_m = RNP_n10_VMDQ_8Q_MASK; + /* 32 pools w/ 4 TC per pool */ + } else { + vmdq_i = min_t(u16, vmdq_i, 32); + vmdq_m = RNP_n10_VMDQ_4Q_MASK; + } + + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* + * We do not support DCB, VMDq, and RSS all simultaneously + * so we will disable RSS since it is the lowest priority + */ + adapter->ring_feature[RING_F_RSS].indices = 2; + adapter->ring_feature[RING_F_RSS].mask = RNP_RSS_DISABLED_MASK; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE; + + adapter->num_tx_queues = vmdq_i * tcs; + adapter->num_rx_queues = vmdq_i * tcs; + + /* configure TC to queue mapping */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(adapter->netdev, i, 1, i); + + return true; +} +#endif + +static bool rnp_set_dcb_queues(struct rnp_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct rnp_ring_feature *f; + int rss_i, rss_m, i; + int tcs; + + /* Map queue offset and counts onto allocated tx queues */ + tcs = netdev_get_num_tc(dev); + + /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* determine the upper limit for our current DCB mode */ + rss_i = dev->num_tx_queues / tcs; + + /* we only support 4 tc , rss_i max is 32 */ + + /* 4 TC w/ 32 queues per TC */ + rss_i = min_t(u16, rss_i, 32); + rss_m = RNP_RSS_32Q_MASK; + + /* set RSS mask and indices */ + /* f->limit is relative with cpu_vector */ + f = &adapter->ring_feature[RING_F_RSS]; + /* use f->limit to change rss */ + rss_i = min_t(int, rss_i, f->limit); + f->indices = rss_i; + f->mask = rss_m; + + /* disable ATR as it is not supported when multiple TCs are enabled */ + adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE; + + /* setup queue tc num */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(dev, i, rss_i, rss_i * i); + + /* set the true queues */ + adapter->num_tx_queues = rss_i * tcs; + adapter->num_rx_queues = rss_i * tcs; + + return true; +} + +/** + * rnp_set_sriov_queues - Allocate queues for SR-IOV devices + * @adapter: board private structure to initialize + * + * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * and VM pools where appropriate. If RSS is available, then also try and + * enable RSS and map accordingly. + * + **/ +static bool rnp_set_sriov_queues(struct rnp_adapter *adapter) +{ + u16 vmdq_m = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; + u16 rss_m = RNP_RSS_DISABLED_MASK; + struct rnp_hw *hw = &adapter->hw; + + /* only proceed if SR-IOV is enabled */ + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return false; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = + adapter->max_ring_pair_counts - 1; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* limit RSS based on user input and save for later use */ + adapter->ring_feature[RING_F_RSS].indices = rss_i; + adapter->ring_feature[RING_F_RSS].mask = rss_m; + + adapter->num_rx_queues = hw->sriov_ring_limit; + adapter->num_tx_queues = hw->sriov_ring_limit; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE; + + return true; +} + +u32 rnp_rss_indir_tbl_entries(struct rnp_adapter *adapter) +{ + if (adapter->hw.rss_type == rnp_rss_uv3p) + return 8; + else if (adapter->hw.rss_type == rnp_rss_uv440) + return 128; + else if (adapter->hw.rss_type == rnp_rss_n10) + return 128; + else + return 128; +} + +/** + * rnp_set_rss_queues - Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static bool rnp_set_rss_queues(struct rnp_adapter *adapter) +{ + struct rnp_ring_feature *f; + u16 rss_i; + + f = &adapter->ring_feature[RING_F_RSS]; + /* use thid to change ring num */ + rss_i = f->limit; + /* set limit -> indices */ + f->indices = rss_i; + + /* should init rss mask */ + switch (adapter->hw.rss_type) { + case rnp_rss_uv3p: + f->mask = RNP_RSS_8Q_MASK; + break; + case rnp_rss_uv440: + f->mask = RNP_RSS_64Q_MASK; + break; + case rnp_rss_n10: + /* maybe not good */ + f->mask = RNP_RSS_128Q_MASK; + break; + /* maybe not good */ + default: + f->mask = 0; + + break; + } + + adapter->num_tx_queues = + min_t(int, rss_i, adapter->max_ring_pair_counts); + adapter->num_rx_queues = adapter->num_tx_queues; + + rnp_dbg("[%s] limit:%d indices:%d queues:%d\n", adapter->name, f->limit, + f->indices, adapter->num_tx_queues); + + return true; +} + +/** + * rnp_set_num_queues - Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static void rnp_set_num_queues(struct rnp_adapter *adapter) +{ + /* Start with base case */ + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + +#if IS_ENABLED(CONFIG_DCB) + if (rnp_set_dcb_sriov_queues(adapter)) + return; + +#endif + if (rnp_set_dcb_queues(adapter)) + return; + + if (rnp_set_sriov_queues(adapter)) + return; + /* at last we support rss */ + rnp_set_rss_queues(adapter); +} + +int rnp_acquire_msix_vectors(struct rnp_adapter *adapter, int vectors) +{ + int err; + +#ifdef DISABLE_RX_IRQ + vectors -= adapter->num_other_vectors; + adapter->num_q_vectors = min(vectors, adapter->max_q_vectors); + return 0; +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) + err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vectors, vectors); +#else + err = pci_enable_msix(adapter->pdev, adapter->msix_entries, vectors); +#endif + if (err < 0) { + rnp_err("pci_enable_msix failed: req:%d err:%d\n", vectors, + err); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return -EINVAL; + } + /* + * Adjust for only the vectors we'll use, which is minimum + * of max_msix_q_vectors + NON_Q_VECTORS, or the number of + * vectors we were allocated. + */ + vectors -= adapter->num_other_vectors; + adapter->num_q_vectors = min(vectors, adapter->max_q_vectors); + /* in dcb we use max 32 q-vectors */ + /* each vectors for max 4 tcs */ + if (adapter->flags & RNP_FLAG_DCB_ENABLED) + adapter->num_q_vectors = min(32, adapter->num_q_vectors); + + return 0; +} + +static void rnp_add_ring(struct rnp_ring *ring, struct rnp_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +static inline void rnp_irq_enable_queues(struct rnp_q_vector *q_vector) +{ + struct rnp_ring *ring; + + rnp_for_each_ring(ring, q_vector->rx) { +#ifdef CONFIG_RNP_DISABLE_TX_IRQ + rnp_wr_reg(ring->dma_int_mask, ~(RX_INT_MASK)); +#else + rnp_wr_reg(ring->dma_int_mask, ~(RX_INT_MASK | TX_INT_MASK)); +#endif + } +} + +static inline void rnp_irq_disable_queues(struct rnp_q_vector *q_vector) +{ + struct rnp_ring *ring; + + rnp_for_each_ring(ring, q_vector->tx) { + rnp_wr_reg(ring->dma_int_mask, (RX_INT_MASK | TX_INT_MASK)); + } +} + +static enum hrtimer_restart irq_miss_check(struct hrtimer *hrtimer) +{ + struct rnp_q_vector *q_vector; + struct rnp_ring *ring; + struct rnp_tx_desc *eop_desc; + struct rnp_adapter *adapter; + + int tx_next_to_clean; + int tx_next_to_use; + + struct rnp_tx_buffer *tx_buffer; + union rnp_rx_desc *rx_desc; + + q_vector = container_of(hrtimer, struct rnp_q_vector, + irq_miss_check_timer); + adapter = q_vector->adapter; + if (test_bit(__RNP_DOWN, &adapter->state) || + test_bit(__RNP_RESETTING, &adapter->state)) + goto do_self_napi; + rnp_irq_disable_queues(q_vector); + /* check tx irq miss */ + rnp_for_each_ring(ring, q_vector->tx) { + tx_next_to_clean = ring->next_to_clean; + tx_next_to_use = ring->next_to_use; + /* have work to do */ + if (tx_next_to_use == tx_next_to_clean) + continue; + /* have tx done */ + tx_buffer = &ring->tx_buffer_info[tx_next_to_clean]; + eop_desc = tx_buffer->next_to_watch; + /* next_to_watch maybe null in some condition */ + if (eop_desc) { + if ((eop_desc->vlan_cmd & + cpu_to_le32(RNP_TXD_STAT_DD))) { + if (q_vector->new_rx_count != + q_vector->old_rx_count) { + ring_wr32(ring, + RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + q_vector->new_rx_count); + q_vector->old_rx_count = + q_vector->new_rx_count; + } + napi_schedule_irqoff(&q_vector->napi); + goto do_self_napi; + } + } + } + + /* check rx irq */ + rnp_for_each_ring(ring, q_vector->rx) { + rx_desc = RNP_RX_DESC(ring, ring->next_to_clean); + if (rnp_test_staterr(rx_desc, RNP_RXD_STAT_DD)) { + int size; + + size = le16_to_cpu(rx_desc->wb.len); + + if (size) { + if (q_vector->new_rx_count != + q_vector->old_rx_count) { + ring_wr32( + ring, + RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + q_vector->new_rx_count); + q_vector->old_rx_count = + q_vector->new_rx_count; + } + napi_schedule_irqoff(&q_vector->napi); + } else { + /* in sriov mode set reset pf flags */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + adapter->flags2 |= RNP_FLAG2_RESET_PF; + else + adapter->flags2 |= + RNP_FLAG2_RESET_REQUESTED; + } + goto do_self_napi; + } + } + /* open irq again */ + rnp_irq_enable_queues(q_vector); +do_self_napi: + return HRTIMER_NORESTART; +} + +/** + * rnp_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int rnp_alloc_q_vector(struct rnp_adapter *adapter, int eth_queue_idx, + int v_idx, int r_idx, int r_count, int step) +{ + struct rnp_q_vector *q_vector; + struct rnp_ring *ring; + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + int node = NUMA_NO_NODE; + int cpu = -1; + int ring_count, size; + int txr_count, rxr_count, idx; + int rxr_idx = r_idx, txr_idx = r_idx; + int cpu_offset = 0; +#ifdef CPU_OFFSET_TEST + struct device *dev = &adapter->pdev->dev; + int i; + int orig_node = dev_to_node(dev); +#endif + + DPRINTK(PROBE, INFO, + "eth_queue_idx:%d v_idx:%d(off:%d) ring:%d ring_cnt:%d, " + "step:%d\n", + eth_queue_idx, v_idx, adapter->q_vector_off, r_idx, r_count, + step); + + txr_count = rxr_count = r_count; + + ring_count = txr_count + rxr_count; + size = sizeof(struct rnp_q_vector) + + (sizeof(struct rnp_ring) * ring_count); + +#ifdef CPU_OFFSET_TEST + for (i = 0; i < num_online_cpus(); i++) { + cpu = i; + node = cpu_to_node(cpu); + if (node == orig_node) { + cpu_offset = cpu; + break; + } + } + + if (cpu_offset + v_idx - adapter->q_vector_off > num_online_cpus()) { + cpu_offset = cpu_offset - num_online_cpus(); + rnp_dbg("start from zero cpu %d\n", num_online_cpus()); + } +#endif + /* should minis adapter->q_vector_off */ + if (cpu_online(cpu_offset + v_idx - adapter->q_vector_off)) { + /* cpu 1 - 7 */ + cpu = cpu_offset + v_idx - adapter->q_vector_off; + node = cpu_to_node(cpu); + } + + /* allocate q_vector and rings */ + q_vector = kzalloc_node(size, GFP_KERNEL, node); + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* setup affinity mask and node */ +#ifdef HAVE_IRQ_AFFINITY_HINT + if (cpu != -1) + cpumask_set_cpu(cpu, &q_vector->affinity_mask); + +#endif + q_vector->numa_node = node; + +#ifdef CONFIG_RNP_DCA + /* initialize CPU for DCA */ + q_vector->cpu = -1; + +#endif + + /* initialize nap */ +#ifdef HAVE_NETIF_NAPI_ADD_WEIGHT + netif_napi_add_weight(adapter->netdev, &q_vector->napi, rnp_poll, + adapter->napi_budge); +#else + netif_napi_add(adapter->netdev, &q_vector->napi, rnp_poll); +#endif + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx - adapter->q_vector_off] = q_vector; + q_vector->adapter = adapter; + q_vector->v_idx = v_idx; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + for (idx = 0; idx < txr_count; idx++) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + rnp_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_item_count; + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + int rss_i; + + rss_i = adapter->ring_feature[RING_F_RSS].indices; + /* in dcb mode should assign rss */ + ring->queue_index = eth_queue_idx + idx * rss_i; + } else { + ring->queue_index = eth_queue_idx + idx; + } + /* rnp_queue_idx can be changed after */ + /* it is used to location hw reg */ + ring->rnp_queue_idx = txr_idx; + ring->ring_addr = dma->dma_ring_addr + RING_OFFSET(txr_idx); + ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR; + ring->device_id = adapter->pdev->device; + ring->pfvfnum = hw->pfvfnum; + /* n10 should skip tx start control */ + if (hw->hw_type == rnp_hw_n10) + ring->ring_flags |= RNP_RING_SKIP_TX_START; + + if (hw->hw_type == rnp_hw_n400) + ring->ring_flags |= RNP_RING_SKIP_TX_START; + + /* assign ring to adapter */ + adapter->tx_ring[ring->queue_index] = ring; + + /* update count and index */ + txr_idx += step; + + rnp_dbg("\t\t%s:vector[%d] <--RNP TxRing:%d, eth_queue:%d\n", + adapter->name, v_idx, ring->rnp_queue_idx, + ring->queue_index); + + /* push pointer to next ring */ + ring++; + } + + for (idx = 0; idx < rxr_count; idx++) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + rnp_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_item_count; + /* rnp_queue_idx can be changed after */ + /* it is used to location hw reg */ + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + int rss_i; + + rss_i = adapter->ring_feature[RING_F_RSS].indices; + /* in dcb mode should assign rss */ + ring->queue_index = eth_queue_idx + idx * rss_i; + } else { + ring->queue_index = eth_queue_idx + idx; + } + ring->rnp_queue_idx = rxr_idx; + ring->ring_addr = dma->dma_ring_addr + RING_OFFSET(rxr_idx); + ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR; + ring->device_id = adapter->pdev->device; + ring->pfvfnum = hw->pfvfnum; + if (hw->hw_type == rnp_hw_n10) { + } else if (hw->hw_type == rnp_hw_n400) { + } + + /* assign ring to adapter */ + adapter->rx_ring[ring->queue_index] = ring; + rnp_dbg("\t\t%s:vector[%d] <--RNP RxRing:%d, eth_queue:%d\n", + adapter->name, v_idx, ring->rnp_queue_idx, + ring->queue_index); + + /* update count and index */ + rxr_idx += step; + + /* push pointer to next ring */ + ring++; + } + if ((hw->hw_type == rnp_hw_n10) || (hw->hw_type == rnp_hw_n400)) { + q_vector->vector_flags |= RNP_QVECTOR_FLAG_IRQ_MISS_CHECK; + q_vector->vector_flags |= RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS; + /* initialize timer */ + q_vector->irq_check_usecs = 1000; + hrtimer_init(&q_vector->irq_miss_check_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + q_vector->irq_miss_check_timer.function = irq_miss_check; + q_vector->new_rx_count = adapter->rx_frames; + q_vector->old_rx_count = adapter->rx_frames; + } + + return 0; +} + +/** + * rnp_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void rnp_free_q_vector(struct rnp_adapter *adapter, int v_idx) +{ + struct rnp_q_vector *q_vector = adapter->q_vector[v_idx]; + struct rnp_ring *ring; + + rnp_dbg("v_idx:%d\n", v_idx); + + rnp_for_each_ring(ring, q_vector->tx) + adapter->tx_ring[ring->queue_index] = NULL; + + rnp_for_each_ring(ring, q_vector->rx) + adapter->rx_ring[ring->queue_index] = NULL; + + adapter->q_vector[v_idx] = NULL; + netif_napi_del(&q_vector->napi); + + /* must stop timer */ + if (q_vector->vector_flags & RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) + hrtimer_cancel(&q_vector->irq_miss_check_timer); + + /* + * rnp_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + kfree_rcu(q_vector, rcu); +} + +/** + * rnp_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int rnp_alloc_q_vectors(struct rnp_adapter *adapter) +{ + int v_idx = adapter->q_vector_off; + int ring_idx = 0; + int r_remaing = + min_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + int ring_step = 1; + int err, ring_cnt, v_remaing = adapter->num_q_vectors; + int q_vector_nums = 0; + struct rnp_hw *hw = &adapter->hw; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + ring_idx = 0; + /* only 2 rings when sriov enabled */ + /* from back */ + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + ring_idx = 0; + r_remaing = hw->sriov_ring_limit; + } else { + ring_idx = adapter->max_ring_pair_counts - + ring_step * hw->sriov_ring_limit; + r_remaing = hw->sriov_ring_limit; + } + } + + adapter->eth_queue_idx = 0; + BUG_ON(adapter->num_q_vectors == 0); + + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + rnp_dbg("in dcb mode r_remaing %d, num_q_vectors %d\n", + r_remaing, v_remaing); + } + + rnp_dbg("r_remaing:%d, ring_step:%d num_q_vectors:%d\n", r_remaing, + ring_step, v_remaing); + + /* can support muti rings in one q_vector */ + for (; r_remaing > 0 && v_remaing > 0; v_remaing--) { + ring_cnt = DIV_ROUND_UP(r_remaing, v_remaing); + if (adapter->flags & RNP_FLAG_DCB_ENABLED) + BUG_ON(ring_cnt != adapter->num_tc); + + err = rnp_alloc_q_vector(adapter, adapter->eth_queue_idx, v_idx, + ring_idx, ring_cnt, ring_step); + if (err) + goto err_out; + ring_idx += ring_step * ring_cnt; + r_remaing -= ring_cnt; + v_idx++; + q_vector_nums++; + /* dcb mode only add 1 */ + if (adapter->flags & RNP_FLAG_DCB_ENABLED) + adapter->eth_queue_idx += 1; + else + adapter->eth_queue_idx += ring_cnt; + } + /* should fix the real used q_vectors_nums */ + adapter->num_q_vectors = q_vector_nums; + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + rnp_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * rnp_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void rnp_free_q_vectors(struct rnp_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_rx_queues = 0; + adapter->num_tx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + rnp_free_q_vector(adapter, v_idx); +} + +static void rnp_reset_interrupt_capability(struct rnp_adapter *adapter) +{ + if (adapter->flags & RNP_FLAG_MSIX_ENABLED) + pci_disable_msix(adapter->pdev); + else if (adapter->flags & RNP_FLAG_MSI_CAPABLE) + pci_disable_msi(adapter->pdev); + + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + adapter->q_vector_off = 0; + + /* frist clean msix flags */ + adapter->flags &= (~RNP_FLAG_MSIX_ENABLED); + adapter->flags &= (~RNP_FLAG_MSI_ENABLED); +} + +/** + * rnp_set_interrupt_capability - set MSI-X or MSI if supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int rnp_set_interrupt_capability(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int vector, v_budget, err = 0; + int irq_mode_back = adapter->irq_mode; + + v_budget = min_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + /* in one ring mode should reset v_budget */ +#ifdef RNP_MAX_RINGS + v_budget = min_t(int, v_budget, RNP_MAX_RINGS); +#else + v_budget = min_t(int, v_budget, num_online_cpus()); +#endif + v_budget += adapter->num_other_vectors; + + v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); + + if (adapter->irq_mode == irq_mode_msix) { + adapter->msix_entries = kcalloc( + v_budget, sizeof(struct msix_entry), GFP_KERNEL); + + if (!adapter->msix_entries) { + rnp_err("alloc msix_entries failed!\n"); + return -EINVAL; + } + dbg("[%s] adapter:%p msix_entry:%p\n", __func__, adapter, + adapter->msix_entries); + + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + err = rnp_acquire_msix_vectors(adapter, v_budget); + if (!err) { + if (adapter->num_other_vectors) + adapter->q_vector_off = 1; + rnp_dbg("adapter%d alloc vectors: cnt:%d [%d~%d] num_q_vectors:%d\n", + adapter->bd_number, v_budget, + adapter->q_vector_off, + adapter->q_vector_off + v_budget - 1, + adapter->num_q_vectors); + adapter->flags |= RNP_FLAG_MSIX_ENABLED; + + goto out; + } + /* if has msi capability try it */ + if (adapter->flags & RNP_FLAG_MSI_CAPABLE) + adapter->irq_mode = irq_mode_msi; + kfree(adapter->msix_entries); + rnp_dbg("acquire msix failed, try to use msi\n"); + } else { + rnp_dbg("adapter%d not in msix mode\n", adapter->bd_number); + } + /* if has msi capability or set irq_mode */ + if (adapter->irq_mode == irq_mode_msi) { + err = pci_enable_msi(adapter->pdev); + if (err) { + rnp_dbg("Failed to allocate MSI interrupt, falling back to legacy. Error"); + } else { + /* msi mode use only 1 irq */ + adapter->flags |= RNP_FLAG_MSI_ENABLED; + } + } + /* write back origin irq_mode */ + adapter->irq_mode = irq_mode_back; + /* legacy and msi only 1 vectors */ + adapter->num_q_vectors = 1; +out: + return err; +} + +static void rnp_print_ring_info(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *ring; + struct rnp_q_vector *q_vector; + + rnp_dbg("tx_queue count %d\n", adapter->num_tx_queues); + rnp_dbg("queue-mapping :\n"); + for (i = 0; i < adapter->num_tx_queues; i++) { + ring = adapter->tx_ring[i]; + rnp_dbg(" queue %d , physical ring %d\n", i, + ring->rnp_queue_idx); + } + rnp_dbg("rx_queue count %d\n", adapter->num_rx_queues); + rnp_dbg("queue-mapping :\n"); + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = adapter->rx_ring[i]; + rnp_dbg(" queue %d , physical ring %d\n", i, + ring->rnp_queue_idx); + } + rnp_dbg("q_vector count %d\n", adapter->num_q_vectors); + rnp_dbg("vector-queue mapping:\n"); + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vector[i]; + rnp_dbg("vector %d\n", i); + rnp_for_each_ring(ring, q_vector->tx) + rnp_dbg(" tx physical ring %d\n", ring->rnp_queue_idx); + + rnp_for_each_ring(ring, q_vector->rx) + rnp_dbg(" rx physical ring %d\n", ring->rnp_queue_idx); + } +} + +/** + * rnp_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + * + * We determine which interrupt scheme to use based on... + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int rnp_init_interrupt_scheme(struct rnp_adapter *adapter) +{ + int err; + + /* Number of supported queues */ + rnp_set_num_queues(adapter); + + /* Set interrupt mode */ + err = rnp_set_interrupt_capability(adapter); + if (err) { + e_dev_err("Unable to get interrupt\n"); + goto err_set_interrupt; + } + + err = rnp_alloc_q_vectors(adapter); + if (err) { + e_dev_err("Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + rnp_cache_ring_register(adapter); + + DPRINTK(PROBE, INFO, + "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n\n", + (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", + adapter->num_rx_queues, adapter->num_tx_queues); + rnp_print_ring_info(adapter); + + set_bit(__RNP_DOWN, &adapter->state); + + return 0; + +err_alloc_q_vectors: + rnp_reset_interrupt_capability(adapter); +err_set_interrupt:; + return err; +} + +/** + * rnp_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void rnp_clear_interrupt_scheme(struct rnp_adapter *adapter) +{ + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + + rnp_free_q_vectors(adapter); + rnp_reset_interrupt_capability(adapter); +} + +/** + * rnp_tx_ctxtdesc - Send a control desc to hw + * @tx_ring: target ring of this control desc + * @mss_seg_len: mss length + * @l4_hdr_len: l4 length + * @tunnel_hdr_len: tunnel_hdr_len + * @inner_vlan_tag: inner_vlan_tag + * @type_tucmd: cmd + * + **/ +void rnp_tx_ctxtdesc(struct rnp_ring *tx_ring, u32 mss_len_vf_num, + u32 inner_vlan_tunnel_len, int ignore_vlan, bool crc_pad) +{ + struct rnp_tx_ctx_desc *context_desc; + u16 i = tx_ring->next_to_use; + struct rnp_adapter *adapter = RING2ADAPT(tx_ring); + u32 type_tucmd = 0; + + context_desc = RNP_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= RNP_TXD_CTX_CTRL_DESC; + + if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) { + if (!crc_pad) + type_tucmd |= RNP_TXD_MTI_CRC_PAD_CTRL; + /* close mac padding */ + } + + if (tx_ring->ring_flags & RNP_RING_OUTER_VLAN_FIX) { +#define VLAN_MASK (0x0000ffff) +#define VLAN_INSERT (0x00800000) + if (inner_vlan_tunnel_len & VLAN_MASK) + type_tucmd |= VLAN_INSERT; + + } else { + if (inner_vlan_tunnel_len & 0x00ffff00) { + /* if a inner vlan */ + type_tucmd |= RNP_TXD_CMD_INNER_VLAN; + } + } + + context_desc->mss_len_vf_num = cpu_to_le32(mss_len_vf_num); + context_desc->inner_vlan_tunnel_len = + cpu_to_le32(inner_vlan_tunnel_len); + context_desc->resv_cmd = cpu_to_le32(type_tucmd); + context_desc->resv = 0; + if (tx_ring->q_vector->adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + if (ignore_vlan) + context_desc->inner_vlan_tunnel_len |= + VF_VEB_IGNORE_VLAN; + } + buf_dump_line("ctx ", __LINE__, context_desc, sizeof(*context_desc)); +} + +void rnp_maybe_tx_ctxtdesc(struct rnp_ring *tx_ring, + struct rnp_tx_buffer *first, u32 ignore_vlan) +{ + /* sriov mode pf use the last vf */ + if (first->ctx_flag) { + rnp_tx_ctxtdesc(tx_ring, first->mss_len_vf_num, + first->inner_vlan_tunnel_len, ignore_vlan, + first->gso_need_padding); + } +} + +void rnp_store_reta(struct rnp_adapter *adapter) +{ + u32 i, reta_entries = rnp_rss_indir_tbl_entries(adapter); + struct rnp_hw *hw = &adapter->hw; + u32 reta = 0; + /* relative with rss table */ + struct rnp_ring *rx_ring; + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + reta = adapter->rss_indir_tbl[i]; + } else { + rx_ring = adapter->rx_ring[adapter->rss_indir_tbl[i]]; + reta = rx_ring->rnp_queue_idx; + } + hw->rss_indir_tbl[i] = reta; + } + hw->ops.set_rss_table(hw); +} + +void rnp_store_key(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + + hw->ops.set_rss_key(hw, sriov_flag); +} + +int rnp_init_rss_key(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + + /* only init rss key once */ + /* no change rss key if user input one */ + if (!adapter->rss_key_setup_flag) { + netdev_rss_key_fill(adapter->rss_key, RNP_RSS_KEY_SIZE); + adapter->rss_key_setup_flag = 1; + } + hw->ops.set_rss_key(hw, sriov_flag); + + return 0; +} + +int rnp_init_rss_table(struct rnp_adapter *adapter) +{ + int rx_nums = adapter->num_rx_queues; + int i, j; + struct rnp_hw *hw = &adapter->hw; + struct rnp_ring *rx_ring; + u32 reta = 0; + u32 reta_entries = rnp_rss_indir_tbl_entries(adapter); + + if (adapter->priv_flags & RNP_PRIV_FLAG_OLD_VF_QUEUE) { + if (rx_nums > 2) + rx_nums = 2; + } + + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + rx_nums = rx_nums / adapter->num_tc; + for (i = 0, j = 0; i < 8; i++) { + adapter->rss_tc_tbl[i] = j; + hw->rss_tc_tbl[i] = j; + j = (j + 1) % adapter->num_tc; + } + } else { + for (i = 0, j = 0; i < 8; i++) { + hw->rss_tc_tbl[i] = 0; + adapter->rss_tc_tbl[i] = 0; + } + } + + /* adapter->num_q_vectors is not correct */ + for (i = 0, j = 0; i < reta_entries; i++) { + /* init with default value */ + if (!adapter->rss_tbl_setup_flag) + adapter->rss_indir_tbl[i] = j; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + /* in sriov mode reta in [0, rx_nums] */ + reta = j; + } else { + /* in no sriov, reta is real ring number */ + rx_ring = adapter->rx_ring[adapter->rss_indir_tbl[i]]; + reta = rx_ring->rnp_queue_idx; + } + /* store rss_indir_tbl */ + hw->rss_indir_tbl[i] = reta; + + j = (j + 1) % rx_nums; + } + /* tbl only init once */ + adapter->rss_tbl_setup_flag = 1; + + hw->ops.set_rss_table(hw); + return 0; +} + +void rnp_setup_dma_rx(struct rnp_adapter *adapter, int count_in_dw) +{ + struct rnp_hw *hw = &adapter->hw; + u32 data; + + data = rd32(hw, RNP_DMA_CONFIG); + data &= (0x00000ffff); + data |= (count_in_dw << 16); + wr32(hw, RNP_DMA_CONFIG, data); +} + +/* setup to the hw */ +s32 rnp_fdir_write_perfect_filter(int fdir_mode, struct rnp_hw *hw, + union rnp_atr_input *filter, u16 hw_id, + u8 queue, bool prio_flag) +{ + if (filter->formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER) + hw->ops.set_layer2_remapping(hw, filter, hw_id, queue, + prio_flag); + else + hw->ops.set_tuple5_remapping(hw, filter, hw_id, queue, + prio_flag); + + return 0; +} + +s32 rnp_fdir_erase_perfect_filter(int fdir_mode, struct rnp_hw *hw, + union rnp_atr_input *input, u16 pri_id) +{ + /* just disable filter */ + if (input->formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER) { + hw->ops.clr_layer2_remapping(hw, pri_id); + dbg("disable layer2 %d\n", pri_id); + } else { + hw->ops.clr_tuple5_remapping(hw, pri_id); + dbg("disable tuple5 %d\n", pri_id); + } + + return 0; +} + +u32 rnp_tx_desc_unused_sw(struct rnp_ring *tx_ring) +{ + u16 ntu = tx_ring->next_to_use; + u16 ntc = tx_ring->next_to_clean; + u16 count = tx_ring->count; + + return ((ntu >= ntc) ? (count - ntu + ntc) : (ntc - ntu)); +} + +u32 rnp_rx_desc_used_hw(struct rnp_hw *hw, struct rnp_ring *rx_ring) +{ + u32 head = ring_rd32(rx_ring, RNP_DMA_REG_RX_DESC_BUF_HEAD); + u32 tail = ring_rd32(rx_ring, RNP_DMA_REG_RX_DESC_BUF_TAIL); + u16 count = rx_ring->count; + + return ((tail >= head) ? (count - tail + head) : (head - tail)); +} + +u32 rnp_tx_desc_unused_hw(struct rnp_hw *hw, struct rnp_ring *tx_ring) +{ + u32 head = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + u32 tail = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_TAIL); + u16 count = tx_ring->count; + + return ((tail >= head) ? (count - tail + head) : (head - tail)); +} + +s32 rnp_disable_rxr_maxrate(struct net_device *netdev, u8 queue_index) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_ring *rx_ring = adapter->rx_ring[queue_index]; + u32 reg_idx = rx_ring->rnp_queue_idx; + + /* disable which dma ring in maxrate limit mode */ + wr32(hw, RNP_SELECT_RING_EN(reg_idx), 0); + /* Clear Tx Ring maxrate */ + wr32(hw, RNP_RX_RING_MAXRATE(reg_idx), 0); + + return 0; +} + +s32 rnp_enable_rxr_maxrate(struct net_device *netdev, u8 queue_index, + u32 maxrate) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_ring *rx_ring = adapter->rx_ring[queue_index]; + u32 reg_idx = rx_ring->rnp_queue_idx; + u32 real_rate = maxrate / 16; + + if (!real_rate) + return -EINVAL; + + wr32(hw, RNP_RING_FC_ENABLE, true); + /* disable which dma ring in maxrate limit mode */ + wr32(hw, RNP_SELECT_RING_EN(reg_idx), true); + /* Clear Tx Ring maxrate */ + wr32(hw, RNP_RX_RING_MAXRATE(reg_idx), real_rate); + + return 0; +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_main.c b/drivers/net/ethernet/mucse/rnp/rnp_main.c new file mode 100755 index 0000000000000000000000000000000000000000..4ba19bc127bd04495a35d5e94fa01017d9c09888 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_main.c @@ -0,0 +1,9448 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef NETIF_F_HW_TC +#include +#include +#include +#endif + +#include "rnp_tc_u32_parse.h" +#include "rnp_common.h" +#include "rnp.h" +#include "rnp_dcb.h" +#include "rnp_sriov.h" +#include "rnp_ptp.h" +#include "rnp_ethtool.h" +#include "rnp_mpe.h" + +#ifdef HAVE_XDP_SOCK_DRV +#include +#endif + +#ifdef HAVE_UDP_ENC_RX_OFFLOAD +#include +#include +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ +#ifdef HAVE_VXLAN_RX_OFFLOAD +#include +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + +#ifdef CONFIG_ARM64 +#define NO_BQL_TEST +#endif + +#define USE_NUMA_MEMORY +#define SUPPORT_IRQ_AFFINITY_CHANGE +#define NO_VU440 + +char rnp_driver_name[] = "rnp"; +static const char rnp_driver_string[] = + "mucse 1/10/25/40 Gigabit PCI Express Network Driver"; +#define DRV_VERSION "1.0.1-rc16" +#include "version.h" + +const char rnp_driver_version[] = DRV_VERSION; +static const char rnp_copyright[] = + "Copyright (c) 2020-2024 mucse Corporation."; + +#ifndef NO_VU440 +extern struct rnp_info rnp_vu440_info; +#endif +extern struct rnp_info rnp_n10_info; +extern struct rnp_info rnp_n400_info; + +static struct rnp_info *rnp_info_tbl[] = { +#ifndef NO_VU440 + [board_vu440] = &rnp_vu440_info, +#endif + [board_n10] = &rnp_n10_info, + [board_n400] = &rnp_n400_info, +}; + +static int register_mbx_irq(struct rnp_adapter *adapter); +static void remove_mbx_irq(struct rnp_adapter *adapter); + +#ifdef CONFIG_RNP_DISABLE_PACKET_SPLIT +static bool rnp_alloc_mapped_skb(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *bi); +#else +static void rnp_pull_tail(struct sk_buff *skb); +#ifdef OPTM_WITH_LPAGE +static bool rnp_alloc_mapped_page(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *bi, + union rnp_rx_desc *rx_desc, u16 bufsz, + u64 fun_id); + +static void rnp_put_rx_buffer(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer); +#else +static bool rnp_alloc_mapped_page(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *bi); +static void rnp_put_rx_buffer(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + struct sk_buff *skb); +#endif + +#endif + +static struct pci_device_id rnp_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N10), + .driver_data = board_n10 }, /* n10 40G 10G */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N10_X1), + .driver_data = board_n10 }, /* n10 40G 10G */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N10_TP), + .driver_data = board_n10 }, /* n10 10G TP */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N400), + .driver_data = board_n400 }, /* n400 4port 1G */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N400C), + .driver_data = board_n400 }, /* n400 4port 1G */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N400_X1), + .driver_data = board_n10 }, /* n400 1port 10G/1G */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N400C_X1), + .driver_data = board_n10 }, /* n400 1port 10G/1G */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N10C), + .driver_data = board_n10 }, /* n10c 40G 10G */ + /* required last entry */ + { + 0, + }, +}; + +MODULE_DEVICE_TABLE(pci, rnp_pci_tbl); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0000); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static unsigned int fix_eth_name; +module_param(fix_eth_name, uint, 0000); +MODULE_PARM_DESC(fix_eth_name, "set eth adapter name to rnpXX"); + +static int module_enable_ptp = 1; +module_param(module_enable_ptp, uint, 0000); +MODULE_PARM_DESC(module_enable_ptp, "enable ptp feature, disabled default"); + +unsigned int mpe_src_port; +module_param(mpe_src_port, uint, 0000); +MODULE_PARM_DESC(mpe_src_port, "mpe src port"); + +unsigned int mpe_pkt_version; +module_param(mpe_pkt_version, uint, 0000); +MODULE_PARM_DESC(mpe_pkt_version, "ipv4 or ipv6 src port"); + +MODULE_AUTHOR("Mucse Corporation, "); +MODULE_DESCRIPTION("Mucse(R) 1/10/25/40 Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static struct workqueue_struct *rnp_wq; +static int enable_hi_dma; +extern void rnp_service_timer(struct timer_list *t); + +void rnp_service_event_schedule(struct rnp_adapter *adapter) +{ + if (!test_bit(__RNP_DOWN, &adapter->state) && + !test_and_set_bit(__RNP_SERVICE_SCHED, &adapter->state)) + queue_work(rnp_wq, &adapter->service_task); +} + +static void rnp_service_event_complete(struct rnp_adapter *adapter) +{ + BUG_ON(!test_bit(__RNP_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(__RNP_SERVICE_SCHED, &adapter->state); +} + + +/** + * rnp_set_ring_vector - set the ring_vector registers, mapping interrupt + * causes to vectors + * + * @adapter: pointer to adapter struct + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + */ +static void rnp_set_ring_vector(struct rnp_adapter *adapter, u8 rnp_queue, + u8 rnp_msix_vector) +{ + struct rnp_hw *hw = &adapter->hw; + u32 data = 0; + + data = hw->pfvfnum << 24; + data |= (rnp_msix_vector << 8); + data |= (rnp_msix_vector << 0); + + DPRINTK(IFUP, INFO, + "Set Ring-Vector queue:%d (reg:0x%x) <-- Rx-MSIX:%d, Tx-MSIX:%d\n", + rnp_queue, RING_VECTOR(rnp_queue), rnp_msix_vector, + rnp_msix_vector); + + rnp_wr_reg(hw->ring_msix_base + RING_VECTOR(rnp_queue), data); +} + +static void rnp_unmap_and_free_tx_resource(struct rnp_ring *ring, + struct rnp_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +static u64 rnp_get_tx_completed(struct rnp_ring *ring) +{ + return ring->stats.packets; +} + +static u64 rnp_get_tx_pending(struct rnp_ring *ring) +{ + u32 head = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + u32 tail = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_TAIL); + + if (head != tail) + return (head < tail) ? tail - head : + (tail + ring->count - head); + + return 0; +} + +static inline bool rnp_check_tx_hang(struct rnp_ring *tx_ring) +{ + u32 tx_done = rnp_get_tx_completed(tx_ring); + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; + u32 tx_pending = rnp_get_tx_pending(tx_ring); + bool ret = false; + + clear_check_for_tx_hang(tx_ring); + + /* + * Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * pfc clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if ((tx_done_old == tx_done) && tx_pending) { + /* make sure it is true for two checks in a row */ + ret = test_and_set_bit(__RNP_HANG_CHECK_ARMED, &tx_ring->state); + } else { + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__RNP_HANG_CHECK_ARMED, &tx_ring->state); + } + return ret; +} + +/** + * rnp_tx_timeout_reset - initiate reset due to Tx timeout + * @adapter: driver private struct + **/ +static void rnp_tx_timeout_reset(struct rnp_adapter *adapter) +{ + /* Do the reset outside of interrupt context */ + if (!test_bit(__RNP_DOWN, &adapter->state)) { + adapter->flags2 |= RNP_FLAG2_RESET_REQUESTED; + e_warn(drv, "initiating reset due to tx timeout\n"); + rnp_service_event_schedule(adapter); + } +} + +static void rnp_check_restart_tx(struct rnp_q_vector *q_vector, + struct rnp_ring *tx_ring) +{ + struct rnp_adapter *adapter = q_vector->adapter; +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (likely(netif_carrier_ok(tx_ring->netdev) && + (rnp_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); +#ifdef HAVE_TX_MQ + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !test_bit(__RNP_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } +#else + if (__netif_queue_stopped(tx_ring->netdev) && + !test_bit(__RNP_DOWN, &adapter->state)) { + netif_wake_queue(tx_ring->netdev); + ++tx_ring->tx_stats.restart_queue; + } + +#endif + } +} + +/** + * rnp_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + **/ +static bool rnp_clean_tx_irq(struct rnp_q_vector *q_vector, + struct rnp_ring *tx_ring, int napi_budget) +{ + struct rnp_adapter *adapter = q_vector->adapter; + struct rnp_tx_buffer *tx_buffer; + struct rnp_tx_desc *tx_desc; + u64 total_bytes = 0, total_packets = 0; + int budget = q_vector->tx.work_limit; + int i = tx_ring->next_to_clean; + + if (test_bit(__RNP_DOWN, &adapter->state)) + return true; + tx_ring->tx_stats.poll_count++; + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = RNP_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + struct rnp_tx_desc *eop_desc = tx_buffer->next_to_watch; + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + rmb(); + + /* if eop DD is not set pending work has not been completed */ + if (!(eop_desc->vlan_cmd & cpu_to_le32(RNP_TXD_STAT_DD))) + break; + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + napi_consume_skb(tx_buffer->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = RNP_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = RNP_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + tx_ring->tx_stats.tx_clean_count += total_packets; + tx_ring->tx_stats.tx_clean_times++; + if (tx_ring->tx_stats.tx_clean_times > 10) { + tx_ring->tx_stats.tx_clean_times = 0; + tx_ring->tx_stats.tx_clean_count = 0; + } + + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + tx_ring->tx_stats.send_done_bytes += total_bytes; +#ifdef NO_BQL_TEST +#else + netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, + total_bytes); +#endif + + if (!(q_vector->vector_flags & RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS)) { +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (likely(netif_carrier_ok(tx_ring->netdev) && + (rnp_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !test_bit(__RNP_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + } + + /* now we start tx queue later */ + return !!budget; +} + +static inline void rnp_rx_hash(struct rnp_ring *ring, + union rnp_rx_desc *rx_desc, struct sk_buff *skb) +{ + int rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; +#define RNP_RSS_TYPE_MASK 0xc0 + rss_type = rx_desc->wb.cmd & RNP_RSS_TYPE_MASK; + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.rss_hash), + rss_type ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} + +/** + * rnp_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containing ring specific data + * @rx_desc: current Rx descriptor being processed + * @skb: skb currently being received and modified + **/ +static inline void rnp_rx_checksum(struct rnp_ring *ring, + union rnp_rx_desc *rx_desc, + struct sk_buff *skb) +{ + bool encap_pkt = false; + + skb_checksum_none_assert(skb); + /* Rx csum disabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + if (!(ring->ring_flags & RNP_RING_NO_TUNNEL_SUPPORT)) { + if (rnp_get_stat(rx_desc, RNP_RXD_STAT_TUNNEL_MASK) == + RNP_RXD_STAT_TUNNEL_VXLAN) { + encap_pkt = true; +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) + skb->encapsulation = 1; +#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ + skb->ip_summed = CHECKSUM_NONE; + } + } + /* if outer L3/L4 error */ + /* must in promisc mode or rx-all mode */ + if (rnp_test_staterr(rx_desc, RNP_RXD_STAT_ERR_MASK)) { + return; + } + ring->rx_stats.csum_good++; + /* at least it is a ip packet which has ip checksum */ + + /* It must be a TCP or UDP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + if (encap_pkt) { +#ifdef HAVE_SKBUFF_CSUM_LEVEL + /* If we checked the outer header let the stack know */ + skb->csum_level = 1; +#endif /* HAVE_SKBUFF_CSUM_LEVEL */ + } +} + +static inline void rnp_update_rx_tail(struct rnp_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; +#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; +#endif + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + rnp_wr_reg(rx_ring->tail, val); +} + +#if (PAGE_SIZE < 8192) +#define RNP_MAX_2K_FRAME_BUILD_SKB (RNP_RXBUFFER_1536 - NET_IP_ALIGN) +#define RNP_2K_TOO_SMALL_WITH_PADDING \ + ((NET_SKB_PAD + RNP_RXBUFFER_1536) > SKB_WITH_OVERHEAD(RNP_RXBUFFER_2K)) + +static inline int rnp_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int rnp_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (RNP_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = RNP_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = RNP_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + return rnp_compute_pad(rx_buf_len); +} + +#define RNP_SKB_PAD rnp_skb_pad() +#else /* PAGE_SIZE < 8192 */ +#define RNP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#endif + +/** + * rnp_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void rnp_process_skb_fields(struct rnp_ring *rx_ring, + union rnp_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_hw *hw = &adapter->hw; + + rnp_rx_hash(rx_ring, rx_desc, skb); + rnp_rx_checksum(rx_ring, rx_desc, skb); + + // ncsi card should check other vf vlan + if (((hw->ncsi_en) || +#ifdef NETIF_F_HW_VLAN_CTAG_RX + (dev->features & NETIF_F_HW_VLAN_CTAG_RX) +#ifdef NETIF_F_HW_VLAN_STAG_RX + || (dev->features & NETIF_F_HW_VLAN_STAG_RX)) && +#else + ) && +#endif +#else /* NETIF_F_HW_VLAN_CTAG_RX */ + (dev->features & NETIF_F_HW_VLAN_RX) && +#endif + rnp_test_staterr(rx_desc, RNP_RXD_STAT_VLAN_VALID) && + !ignore_veb_vlan(rx_ring->q_vector->adapter, rx_desc)) { + + if (rx_ring->ring_flags & RNP_RING_DOUBLE_VLAN_SUPPORT) { + /* check outer vlan first */ + if (rnp_test_ext_cmd(rx_desc, REV_OUTER_VLAN)) { + u16 vid_inner = le16_to_cpu(rx_desc->wb.vlan); + u16 vid_outer; + u16 vlan_tci = htons(ETH_P_8021Q); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vid_inner); + /* check outer vlan type */ + if (rx_ring->ring_flags & + RNP_RING_STAGS_SUPPORT) { + if (rnp_test_staterr( + rx_desc, + RNP_RXD_STAT_STAG)) { + switch (rx_ring->q_vector + ->adapter + ->outer_vlan_type) { + case outer_vlan_type_88a8: + vlan_tci = htons( + ETH_P_8021AD); + break; +#ifdef ETH_P_QINQ1 + case outer_vlan_type_9100: + vlan_tci = htons( + ETH_P_QINQ1); + break; +#endif +#ifdef ETH_P_QINQ2 + case outer_vlan_type_9200: + vlan_tci = htons( + ETH_P_QINQ2); + break; +#endif + default: + vlan_tci = htons( + ETH_P_8021AD); + break; + } + } else { + vlan_tci = htons(ETH_P_8021Q); + } + } else { + vlan_tci = htons(ETH_P_8021Q); + } + vid_outer = le16_to_cpu(rx_desc->wb.mark); + /* if in stags mode should ignore only stags */ + if (adapter->flags2 & + RNP_FLAG2_VLAN_STAGS_ENABLED) { + /* push outer in if not equal stags or cvlan */ + if ((vid_outer != adapter->stags_vid) || + (vlan_tci == htons(ETH_P_8021Q))) { + /* push outer inner */ + skb = __vlan_hwaccel_push_inside( + skb); + __vlan_hwaccel_put_tag( + skb, vlan_tci, + vid_outer); + } + } else { + /* push outer */ + skb = __vlan_hwaccel_push_inside(skb); + __vlan_hwaccel_put_tag(skb, vlan_tci, + vid_outer); + } + } else { + /* only inner vlan */ + u16 vid = le16_to_cpu(rx_desc->wb.vlan); + if (rx_ring->ring_flags & RNP_RING_STAGS_SUPPORT) { + if (rnp_test_staterr(rx_desc, + RNP_RXD_STAT_STAG)) { + if ((adapter->flags2 & + RNP_FLAG2_VLAN_STAGS_ENABLED) && + (vid == + adapter->stags_vid)) { + } else + __vlan_hwaccel_put_tag( + skb, + htons(ETH_P_8021AD), + vid); + + } else { + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + vid); + } + } else { + __vlan_hwaccel_put_tag( + skb, htons(ETH_P_8021Q), vid); + } + } + } else { + u16 vid = le16_to_cpu(rx_desc->wb.vlan); + if (rx_ring->ring_flags & RNP_RING_STAGS_SUPPORT) { + if (rnp_test_staterr(rx_desc, + RNP_RXD_STAT_STAG)) { + __vlan_hwaccel_put_tag( + skb, htons(ETH_P_8021AD), vid); + } else { + __vlan_hwaccel_put_tag( + skb, htons(ETH_P_8021Q), vid); + } + } else { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vid); + } + } + rx_ring->rx_stats.vlan_remove++; + } + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, dev); +} + +static void rnp_rx_skb(struct rnp_q_vector *q_vector, struct sk_buff *skb) +{ + struct rnp_adapter *adapter = q_vector->adapter; + + if (!(adapter->flags & RNP_FLAG_IN_NETPOLL)) + napi_gro_receive(&q_vector->napi, skb); + else + netif_rx(skb); +} + +/* drop this packets if error */ +static bool rnp_check_csum_error(struct rnp_ring *rx_ring, + union rnp_rx_desc *rx_desc, unsigned int size, + unsigned int *driver_drop_packets) +{ + bool err = false; + bool drop_err = true; + + struct net_device *netdev = rx_ring->netdev; + + if (netdev->features & NETIF_F_RXCSUM) { + if (unlikely( + rnp_test_staterr(rx_desc, RNP_RXD_STAT_ERR_MASK))) { + rx_debug_printk("rx error: VEB:%s mark:0x%x cmd:0x%x\n", + (rx_ring->q_vector->adapter->flags & + RNP_FLAG_SRIOV_ENABLED) ? + "On" : + "Off", + rx_desc->wb.mark, rx_desc->wb.cmd); + /* push this packet to stack if in promisc mode */ + rx_ring->rx_stats.csum_err++; + + if ((netdev->flags & IFF_PROMISC) || + (netdev->features & NETIF_F_RXALL)) { + drop_err = false; + + } + if (rx_ring->ring_flags & RNP_RING_CHKSM_FIX) { + err = true; + goto skip_fix; + } + if (unlikely(rnp_test_staterr( + rx_desc, + RNP_RXD_STAT_L4_MASK) && + (!(rx_desc->wb.rev1 & + RNP_RX_L3_TYPE_MASK)))) { + rx_ring->rx_stats.csum_err--; + goto skip_fix; + } + /* we ignore sctp csum erro small than 60 */ + if (unlikely(rnp_test_staterr(rx_desc, + RNP_RXD_STAT_SCTP_MASK))) { + if ((size > 60) && + (rx_desc->wb.rev1 & + RNP_RX_L3_TYPE_MASK)) { + if (drop_err) + err = true; + } else { + /* sctp less than 60 hw report err by mistake */ + rx_ring->rx_stats.csum_err--; + } + } else { + if (drop_err) + err = true; + } + } + } +skip_fix: + if (err) { + u32 ntc = rx_ring->next_to_clean + 1; +#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT + struct rnp_rx_buffer *rx_buffer; +#if (PAGE_SIZE < 8192) + unsigned int truesize = rnp_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = + ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(RNP_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + + /* if eop add drop_packets */ + if (likely(rnp_test_staterr(rx_desc, RNP_RXD_STAT_EOP))) + *driver_drop_packets = *driver_drop_packets + 1; + + /* we are reusing so sync this buffer for CPU use */ + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, + rx_buffer->page_offset, size, + DMA_FROM_DEVICE); + + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +#ifdef OPTM_WITH_LPAGE + rnp_put_rx_buffer(rx_ring, rx_buffer); +#else + rnp_put_rx_buffer(rx_ring, rx_buffer, NULL); +#endif +#endif + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + } + return err; +} + +/** + * rnp_rx_ring_reinit - just reinit rx_ring with new count in ->reset_count + * @rx_ring: rx descriptor ring to transact packets on + */ +static int rnp_rx_ring_reinit(struct rnp_adapter *adapter, struct rnp_ring *rx_ring) +{ + struct rnp_ring *temp_ring; + int err = 0; + + if (rx_ring->count == rx_ring->reset_count) + return 0; + /* stop rx queue */ + + temp_ring = vzalloc(array_size(1, sizeof(struct rnp_ring))); + if (!temp_ring) + goto err_setup; + + rnp_disable_rx_queue(adapter, rx_ring); + /* reinit for this ring */ + memcpy(temp_ring, rx_ring, sizeof(struct rnp_ring)); + /* setup new count */ + temp_ring->count = rx_ring->reset_count; + err = rnp_setup_rx_resources(temp_ring, adapter); + if (err) { + rnp_free_rx_resources(temp_ring); + vfree(temp_ring); + goto err_setup; + } + rnp_free_rx_resources(rx_ring); + memcpy(rx_ring, temp_ring, sizeof(struct rnp_ring)); + rnp_configure_rx_ring(adapter, rx_ring); + /* start rx */ + vfree(temp_ring); + ring_wr32(rx_ring, RNP_DMA_RX_START, 1); + return 0; +err_setup: + return -1; +} + +#ifndef OPTM_WITH_LPAGE +/** + * rnp_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +static bool rnp_alloc_rx_buffers(struct rnp_ring *rx_ring, u16 cleaned_count) +{ + union rnp_rx_desc *rx_desc; + struct rnp_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + u64 fun_id = ((u64)(rx_ring->pfvfnum) << (32 + 24)); + bool err = false; +#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT + u16 bufsz; +#endif + /* nothing to do */ + if (!cleaned_count) + return err; + + rx_desc = RNP_RX_DESC(rx_ring, i); + + BUG_ON(rx_desc == NULL); + + bi = &rx_ring->rx_buffer_info[i]; + + BUG_ON(bi == NULL); + + i -= rx_ring->count; +#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT + bufsz = rnp_rx_bufsz(rx_ring); +#endif + + do { +#ifdef CONFIG_RNP_DISABLE_PACKET_SPLIT + if (!rnp_alloc_mapped_skb(rx_ring, bi)) { + err = true; + break; + } +#else + if (!rnp_alloc_mapped_page(rx_ring, bi)) { + err = true; + break; + } + + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); +#endif + + /* + * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ +#ifdef CONFIG_RNP_DISABLE_PACKET_SPLIT + rx_desc->pkt_addr = cpu_to_le64(bi->dma + fun_id); +#else + rx_desc->pkt_addr = + cpu_to_le64(bi->dma + bi->page_offset + fun_id); +#endif + /* clean dd */ + rx_desc->resv_cmd = 0; + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = RNP_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the hdr_addr for the next_to_use descriptor */ + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) + rnp_update_rx_tail(rx_ring, i); + + return err; +} + +#endif +#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT +static inline unsigned int rnp_rx_offset(struct rnp_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? RNP_SKB_PAD : 0; +} + +#ifdef OPTM_WITH_LPAGE +/** + * rnp_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +static bool rnp_alloc_rx_buffers(struct rnp_ring *rx_ring, u16 cleaned_count) +{ + union rnp_rx_desc *rx_desc; + struct rnp_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + u64 fun_id = ((u64)(rx_ring->pfvfnum) << (32 + 24)); + u16 bufsz; + bool err = false; + /* nothing to do */ + if (!cleaned_count) + return err; + + rx_desc = RNP_RX_DESC(rx_ring, i); + + BUG_ON(rx_desc == NULL); + + bi = &rx_ring->rx_buffer_info[i]; + + BUG_ON(bi == NULL); + + i -= rx_ring->count; + bufsz = rnp_rx_bufsz(rx_ring); + + do { + int count = 1; + struct page *page; + + if (!rnp_alloc_mapped_page(rx_ring, bi, rx_desc, bufsz, fun_id)) { + err = true; + break; + } + page = bi->page; + + rx_desc->resv_cmd = 0; + + rx_desc++; + i++; + bi++; + + if (unlikely(!i)) { + rx_desc = RNP_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + rx_desc->resv_cmd = 0; + + cleaned_count--; + + while (count < rx_ring->rx_page_buf_nums && cleaned_count) { + dma_addr_t dma; +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + + bi->page_offset = rx_ring->rx_per_buf_mem * count + + rnp_rx_offset(rx_ring); + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, + bi->page_offset, bufsz, + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + + RNP_RX_DMA_ATTR); +#endif + + if (dma_mapping_error(rx_ring->dev, dma)) { + printk("map second error\n"); + rx_ring->rx_stats.alloc_rx_page_failed++; + break; + } + + bi->dma = dma; + bi->page = page; + page_ref_add(page, USHRT_MAX); + bi->pagecnt_bias = USHRT_MAX; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + 0, bufsz, + DMA_FROM_DEVICE); + + /* + * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->pkt_addr = cpu_to_le64(bi->dma + fun_id); + /* clean dd */ + rx_desc->resv_cmd = 0; + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = RNP_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + count++; + /* clear the hdr_addr for the next_to_use descriptor */ + cleaned_count--; + } + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) + rnp_update_rx_tail(rx_ring, i); + return err; +} +#endif /* OPTM_WITH_LPAGE */ +/** + * rnp_get_headlen - determine size of header for RSC/LRO/GRO/FCOE + * @data: pointer to the start of the headers + * @max_len: total length of section to find headers in + * + * This function is meant to determine the length of headers that will + * be recognized by hardware for LRO, GRO, and RSC offloads. The main + * motivation of doing this is to only perform one pull for IPv4 TCP + * packets so that we can do basic things like calculating the gso_size + * based on the average data per packet. + **/ +static unsigned int rnp_get_headlen(unsigned char *data, unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + __be16 protocol; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + protocol = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + + /* handle any vlan tag if present */ + if (protocol == htons(ETH_P_8021Q)) { + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + protocol = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; + } + + /* handle L3 protocols */ + if (protocol == htons(ETH_P_IP)) { + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol if header is present */ + if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) + nexthdr = hdr.ipv4->protocol; + } else if (protocol == htons(ETH_P_IPV6)) { + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hlen = sizeof(struct ipv6hdr); + } else { + return hdr.network - data; + } + + /* relocate pointer to start of L4 header */ + hdr.network += hlen; + + /* finally sort out TCP/UDP */ + if (nexthdr == IPPROTO_TCP) { + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; + + /* access doff as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[12] & 0xF0) >> 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct tcphdr)) + return hdr.network - data; + + hdr.network += hlen; + } else if (nexthdr == IPPROTO_UDP) { + if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) + return max_len; + + hdr.network += sizeof(struct udphdr); + } + + /* + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + if ((hdr.network - data) < max_len) + return hdr.network - data; + else + return max_len; +} + +#ifdef OPTM_WITH_LPAGE +/** + * rnp_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool rnp_is_non_eop(struct rnp_ring *rx_ring, union rnp_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(RNP_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ + if (likely(rnp_test_staterr(rx_desc, RNP_RXD_STAT_EOP))) + return false; + /* place skb in next buffer to be received */ + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + + return true; +} + +static bool rnp_alloc_mapped_page(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *bi, + union rnp_rx_desc *rx_desc, u16 bufsz, + u64 fun_id) +{ + struct page *page = bi->page; + dma_addr_t dma; +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + page = dev_alloc_pages(RNP_ALLOC_PAGE_ORDER); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + bi->page_offset = rnp_rx_offset(rx_ring); + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, bi->page_offset, bufsz, + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + RNP_RX_DMA_ATTR); +#endif + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, RNP_ALLOC_PAGE_ORDER); + printk("map failed\n"); + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + bi->dma = dma; + bi->page = page; + bi->page_offset = rnp_rx_offset(rx_ring); + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + rx_ring->rx_stats.alloc_rx_page++; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0, bufsz, + DMA_FROM_DEVICE); + + /* + * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->pkt_addr = cpu_to_le64(bi->dma + fun_id); + + return true; +} + +#else +static bool rnp_alloc_mapped_page(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + page = dev_alloc_pages(rnp_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, rnp_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + RNP_RX_DMA_ATTR); +#endif + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, rnp_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + bi->dma = dma; + bi->page = page; + bi->page_offset = rnp_rx_offset(rx_ring); +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; +#else + bi->pagecnt_bias = 1; +#endif + rx_ring->rx_stats.alloc_rx_page++; + + return true; +} + +/** + * rnp_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool rnp_is_non_eop(struct rnp_ring *rx_ring, union rnp_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; +#ifdef CONFIG_RNP_DISABLE_PACKET_SPLIT + struct sk_buff *next_skb; +#endif + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(RNP_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ + if (likely(rnp_test_staterr(rx_desc, RNP_RXD_STAT_EOP))) + return false; +#ifdef CONFIG_RNP_RNP_DISABLE_PACKET_SPLIT + + printk("error spilt detect in disable split mode\n"); +#else + /* place skb in next buffer to be received */ + rx_ring->rx_buffer_info[ntc].skb = skb; +#endif + rx_ring->rx_stats.non_eop_descs++; + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + + return true; +} + +#endif +/** + * rnp_pull_tail - rnp specific version of skb_pull_tail + * @skb: pointer to current skb being adjusted + * + * This function is an rnp specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void rnp_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = rnp_get_headlen(va, RNP_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +static bool rnp_check_src_mac(struct sk_buff *skb, struct net_device *netdev) +{ + char *data = (char *)skb->data; + bool ret = false; + struct netdev_hw_addr *ha; + + if (is_multicast_ether_addr(data)) { + if (0 == memcmp(data + netdev->addr_len, netdev->dev_addr, + netdev->addr_len)) { + dev_kfree_skb_any(skb); + ret = true; + } + /* if src mac equal own mac */ + netdev_for_each_uc_addr(ha, netdev) { + if (0 == memcmp(data + netdev->addr_len, ha->addr, + netdev->addr_len)) { + dev_kfree_skb_any(skb); + ret = true; + } + } + } + return ret; +} + +/** + * rnp_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check if the skb is valid. In the XDP case it will be an error pointer. + * Return true in this case to abort processing and advance to next + * descriptor. + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool rnp_cleanup_headers(struct rnp_ring __maybe_unused *rx_ring, + union rnp_rx_desc *rx_desc, struct sk_buff *skb) +{ + struct net_device *netdev = rx_ring->netdev; + struct rnp_adapter *adapter = netdev_priv(netdev); +#ifdef OPTM_WITH_LPAGE +#else + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; +#endif + + /* place header in linear portion of buffer */ + if (!skb_headlen(skb)) + rnp_pull_tail(skb); + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && + (!(rx_ring->ring_flags & RNP_RING_VEB_MULTI_FIX))) + return rnp_check_src_mac(skb, rx_ring->netdev); + else + return false; +} + +/** + * rnp_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void rnp_reuse_rx_page(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *old_buff) +{ + struct rnp_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* + * Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls and unnecessary copy of skb. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static inline bool rnp_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +static bool rnp_can_reuse_rx_page(struct rnp_rx_buffer *rx_buffer, int size) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + +#ifdef OPTM_WITH_LPAGE + return false; +#endif + /* avoid re-using remote pages */ + if (unlikely(rnp_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) +#else + if (unlikely((page_count(page) - pagecnt_bias) > 1)) +#endif + return false; +#else + + /* + * The last offset is a bit aggressive in that we assume the + * worst case of FCoE being enabled and using a 3K buffer. + * However this should have minimal impact as the 1K extra is + * still less than one buffer in size. + */ +#define RNP_LAST_OFFSET (SKB_WITH_OVERHEAD(PAGE_SIZE)) + if (rx_buffer->page_offset > (RNP_LAST_OFFSET - size)) + return false; +#endif + +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } +#else + /* + * Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + if (likely(!pagecnt_bias)) { + page_ref_inc(page); + rx_buffer->pagecnt_bias = 1; + } +#endif + + return true; +} + +/** + * rnp_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: size of data + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static void rnp_add_rx_frag(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + struct sk_buff *skb, unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = rnp_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(RNP_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +#ifdef OPTM_WITH_LPAGE +static struct rnp_rx_buffer *rnp_get_rx_buffer(struct rnp_ring *rx_ring, + union rnp_rx_desc *rx_desc, + const unsigned int size) +{ + struct rnp_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + + rx_buf_dump("rx buf", + page_address(rx_buffer->page) + rx_buffer->page_offset, + rx_desc->wb.len); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, 0, size, + DMA_FROM_DEVICE); + /* skip_sync: */ + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} +#else + +static struct rnp_rx_buffer *rnp_get_rx_buffer(struct rnp_ring *rx_ring, + union rnp_rx_desc *rx_desc, + struct sk_buff **skb, + const unsigned int size) +{ + struct rnp_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + *skb = rx_buffer->skb; + + rx_buf_dump("rx buf", + page_address(rx_buffer->page) + rx_buffer->page_offset, + rx_desc->wb.len); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, + rx_buffer->page_offset, size, + DMA_FROM_DEVICE); + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} +#endif + +#ifdef OPTM_WITH_LPAGE +static void rnp_put_rx_buffer(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer) +{ + struct rnp_q_vector *q_vector = rx_ring->q_vector; + struct rnp_adapter *adapter = q_vector->adapter; + struct rnp_hw *hw = &adapter->hw; +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); + +#endif + if (rnp_can_reuse_rx_page(rx_buffer, hw->dma_split_size)) { + /* hand second half of page back to the ring */ + rnp_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + rnp_rx_bufsz(rx_ring), DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + RNP_RX_DMA_ATTR); +#endif + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +#else +static void rnp_put_rx_buffer(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + struct sk_buff *skb) +{ + struct rnp_q_vector *q_vector = rx_ring->q_vector; + struct rnp_adapter *adapter = q_vector->adapter; + struct rnp_hw *hw = &adapter->hw; +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); + +#endif + if (rnp_can_reuse_rx_page(rx_buffer, hw->dma_split_size)) { + /* hand second half of page back to the ring */ + rnp_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + rnp_rx_pg_size(rx_ring), DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + RNP_RX_DMA_ATTR); +#endif + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + rx_buffer->skb = NULL; +} +#endif + +#ifdef OPTM_WITH_LPAGE +static struct sk_buff *rnp_construct_skb(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + union rnp_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + unsigned int truesize = SKB_DATA_ALIGN(size); + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, RNP_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + prefetchw(skb->data); + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > RNP_RX_HDR_SIZE) + headlen = rnp_get_headlen(va, RNP_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + + /* update all of the pointers */ + size -= headlen; + + if (size) { + + skb_add_rx_frag(skb, 0, rx_buffer->page, + (va + headlen) - page_address(rx_buffer->page), + size, truesize); + rx_buffer->page_offset += truesize; + } else { + + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC +static struct sk_buff *rnp_build_skb(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + union rnp_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(size + RNP_SKB_PAD); + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* build an skb around the page buffer */ + skb = build_skb(va - RNP_SKB_PAD, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, RNP_SKB_PAD); + __skb_put(skb, size); + /* record DMA address if this is the start of a + * chain of buffers + */ + + return skb; +} + +#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ + +#else + +static struct sk_buff *rnp_construct_skb(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union rnp_rx_desc *rx_desc) +{ + unsigned int size = xdp->data_end - xdp->data; +#if (PAGE_SIZE < 8192) + unsigned int truesize = rnp_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = + SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(xdp->data); +#if L1_CACHE_BYTES < 128 + prefetch(xdp->data + L1_CACHE_BYTES); +#endif + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, RNP_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + prefetchw(skb->data); + + if (size > RNP_RX_HDR_SIZE) { + + skb_add_rx_frag(skb, 0, rx_buffer->page, + xdp->data - page_address(rx_buffer->page), size, + truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + memcpy(__skb_put(skb, size), xdp->data, + ALIGN(size, sizeof(long))); + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC +static struct sk_buff *rnp_build_skb(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union rnp_rx_desc *rx_desc) +{ +#ifdef HAVE_XDP_BUFF_DATA_META + unsigned int metasize = xdp->data - xdp->data_meta; + void *va = xdp->data_meta; +#else + void *va = xdp->data; +#endif +#if (PAGE_SIZE < 8192) + unsigned int truesize = rnp_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* build an skb around the page buffer */ + skb = build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, xdp->data_end - xdp->data); +#ifdef HAVE_XDP_BUFF_DATA_META + if (metasize) + skb_metadata_set(skb, metasize); +#endif + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + +#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ +#endif + +#define RNP_XDP_PASS 0 +#define RNP_XDP_CONSUMED 1 +#define RNP_XDP_TX 2 + +#ifndef OPTM_WITH_LPAGE +static void rnp_rx_buffer_flip(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = rnp_rx_pg_size(rx_ring) / 2; + + rx_buffer->page_offset ^= truesize; +#else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(RNP_SKB_PAD + size) : + SKB_DATA_ALIGN(size); + + rx_buffer->page_offset += truesize; +#endif +} +#endif + +#ifdef OPTM_WITH_LPAGE +/** + * rnp_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed. + **/ + +static int rnp_clean_rx_irq(struct rnp_q_vector *q_vector, + struct rnp_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + unsigned int err_packets = 0; + unsigned int driver_drop_packets = 0; + struct sk_buff *skb = rx_ring->skb; + struct rnp_adapter *adapter = q_vector->adapter; + u16 cleaned_count = rnp_desc_unused_rx(rx_ring); + bool fail_alloc = false; + + while (likely(total_rx_packets < budget)) { + union rnp_rx_desc *rx_desc; + struct rnp_rx_buffer *rx_buffer; + unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= RNP_RX_BUFFER_WRITE) { + fail_alloc = rnp_alloc_rx_buffers(rx_ring, cleaned_count) || fail_alloc; + cleaned_count = 0; + } + rx_desc = RNP_RX_DESC(rx_ring, rx_ring->next_to_clean); + + rx_buf_dump("rx-desc:", rx_desc, sizeof(*rx_desc)); + rx_debug_printk(" dd set: %s\n", + (rx_desc->wb.cmd & RNP_RXD_STAT_DD) ? "Yes" : + "No"); + + if (!rnp_test_staterr(rx_desc, RNP_RXD_STAT_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_debug_printk( + "queue:%d rx-desc:%d has-data len:%d next_to_clean %d\n", + rx_ring->rnp_queue_idx, rx_ring->next_to_clean, + rx_desc->wb.len, rx_ring->next_to_clean); + + /* handle padding */ + if ((adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) && + (!(adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG))) { + if (likely(rnp_test_staterr(rx_desc, + RNP_RXD_STAT_EOP))) { + size = le16_to_cpu(rx_desc->wb.len) - + le16_to_cpu(rx_desc->wb.padding_len); + } else { + size = le16_to_cpu(rx_desc->wb.len); + } + } else { + /* size should not zero */ + size = le16_to_cpu(rx_desc->wb.len); + } + + if (!size) + break; + + /* + * should check csum err + * maybe one packet use multiple descs + * no problems hw set all csum_err in multiple descs + * maybe BUG if the last sctp desc less than 60 + */ + if (rnp_check_csum_error(rx_ring, rx_desc, size, + &driver_drop_packets)) { + cleaned_count++; + err_packets++; + if (err_packets + total_rx_packets > budget) + break; + continue; + } + + rx_buffer = rnp_get_rx_buffer(rx_ring, rx_desc, size); + + if (skb) { + rnp_add_rx_frag(rx_ring, rx_buffer, skb, size); +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + } else if (ring_uses_build_skb(rx_ring)) { + skb = rnp_build_skb(rx_ring, rx_buffer, rx_desc, size); +#endif + } else { + skb = rnp_construct_skb(rx_ring, rx_buffer, rx_desc, + size); + } + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_buffer->pagecnt_bias++; + break; + } + if (module_enable_ptp && adapter->ptp_rx_en && + adapter->flags2 & RNP_FLAG2_PTP_ENABLED) + rnp_ptp_get_rx_hwstamp(adapter, rx_desc, skb); + + rnp_put_rx_buffer(rx_ring, rx_buffer); + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (rnp_is_non_eop(rx_ring, rx_desc)) + continue; + + /* verify the packet layout is correct */ + if (rnp_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + rnp_process_skb_fields(rx_ring, rx_desc, skb); + + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + rnp_rx_skb(q_vector, skb); + skb = NULL; + + /* update budget accounting */ + total_rx_packets++; + } + + rx_ring->skb = skb; + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + rx_ring->rx_stats.driver_drop_packets += driver_drop_packets; + rx_ring->rx_stats.rx_clean_count += total_rx_packets; + rx_ring->rx_stats.rx_clean_times++; + if (rx_ring->rx_stats.rx_clean_times > 10) { + rx_ring->rx_stats.rx_clean_times = 0; + rx_ring->rx_stats.rx_clean_count = 0; + } + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + if (total_rx_packets >= budget) + rx_ring->rx_stats.poll_again_count++; + + /* it we failed alloc mem, we should kepp napi polling */ + return (fail_alloc ? budget : total_rx_packets); +} + +#else +/** + * rnp_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed. + **/ +static int rnp_clean_rx_irq(struct rnp_q_vector *q_vector, + struct rnp_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + unsigned int err_packets = 0; + unsigned int driver_drop_packets = 0; + struct rnp_adapter *adapter = q_vector->adapter; + u16 cleaned_count = rnp_desc_unused_rx(rx_ring); + bool xdp_xmit = false; + struct xdp_buff xdp; + bool fail_alloc = false; + + xdp.data = NULL; + xdp.data_end = NULL; + + while (likely(total_rx_packets < budget)) { + union rnp_rx_desc *rx_desc; + struct rnp_rx_buffer *rx_buffer; + struct sk_buff *skb; + unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= RNP_RX_BUFFER_WRITE) { + fail_alloc = rnp_alloc_rx_buffers(rx_ring, cleaned_count) || fail_alloc; + cleaned_count = 0; + } + rx_desc = RNP_RX_DESC(rx_ring, rx_ring->next_to_clean); + + rx_buf_dump("rx-desc:", rx_desc, sizeof(*rx_desc)); + rx_debug_printk(" dd set: %s\n", + (rx_desc->wb.cmd & RNP_RXD_STAT_DD) ? "Yes" : + "No"); + + if (!rnp_test_staterr(rx_desc, RNP_RXD_STAT_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_debug_printk( + "queue:%d rx-desc:%d has-data len:%d next_to_clean %d\n", + rx_ring->rnp_queue_idx, rx_ring->next_to_clean, + rx_desc->wb.len, rx_ring->next_to_clean); + + /* handle padding */ + if ((adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) && + (!(adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG))) { + if (likely(rnp_test_staterr(rx_desc, + RNP_RXD_STAT_EOP))) { + size = le16_to_cpu(rx_desc->wb.len) - + le16_to_cpu(rx_desc->wb.padding_len); + } else { + size = le16_to_cpu(rx_desc->wb.len); + } + } else { + /* size should not zero */ + size = le16_to_cpu(rx_desc->wb.len); + } + + if (!size) + break; + + /* + * should check csum err + * maybe one packet use multiple descs + * no problems hw set all csum_err in multiple descs + * maybe BUG if the last sctp desc less than 60 + */ + if (rnp_check_csum_error(rx_ring, rx_desc, size, + &driver_drop_packets)) { + cleaned_count++; + err_packets++; + if (err_packets + total_rx_packets > budget) + break; + continue; + } + + rx_buffer = rnp_get_rx_buffer(rx_ring, rx_desc, &skb, size); + + if (!skb) { + xdp.data = page_address(rx_buffer->page) + + rx_buffer->page_offset; +#ifdef HAVE_XDP_BUFF_DATA_META + xdp.data_meta = xdp.data; +#endif + xdp.data_hard_start = xdp.data - rnp_rx_offset(rx_ring); + xdp.data_end = xdp.data + size; + } + + if (IS_ERR(skb)) { + if (PTR_ERR(skb) == -RNP_XDP_TX) { + xdp_xmit = true; + rnp_rx_buffer_flip(rx_ring, rx_buffer, size); + } else { + rx_buffer->pagecnt_bias++; + } + total_rx_packets++; + total_rx_bytes += size; + } else if (skb) { + rnp_add_rx_frag(rx_ring, rx_buffer, skb, size); +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + } else if (ring_uses_build_skb(rx_ring)) { + skb = rnp_build_skb(rx_ring, rx_buffer, &xdp, rx_desc); +#endif + } else { + skb = rnp_construct_skb(rx_ring, rx_buffer, &xdp, + rx_desc); + } + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_buffer->pagecnt_bias++; + break; + } + if (module_enable_ptp && adapter->ptp_rx_en && + adapter->flags2 & RNP_FLAG2_PTP_ENABLED) + rnp_ptp_get_rx_hwstamp(adapter, rx_desc, skb); + + rnp_put_rx_buffer(rx_ring, rx_buffer, skb); + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (rnp_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (rnp_cleanup_headers(rx_ring, rx_desc, skb)) { + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + rnp_process_skb_fields(rx_ring, rx_desc, skb); + + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + + rnp_rx_skb(q_vector, skb); + + /* update budget accounting */ + total_rx_packets++; + } + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + rx_ring->rx_stats.driver_drop_packets += driver_drop_packets; + rx_ring->rx_stats.rx_clean_count += total_rx_packets; + rx_ring->rx_stats.rx_clean_times++; + if (rx_ring->rx_stats.rx_clean_times > 10) { + rx_ring->rx_stats.rx_clean_times = 0; + rx_ring->rx_stats.rx_clean_count = 0; + } + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + if (total_rx_packets >= budget) + rx_ring->rx_stats.poll_again_count++; + + /* it we failed alloc mem, we should kepp napi polling */ + return (fail_alloc ? budget : total_rx_packets); +} +#endif + +#else /* CONFIG_RNP_DISABLE_PACKET_SPLIT */ + +/** + * rnp_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool rnp_is_non_eop(struct rnp_ring *rx_ring, union rnp_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(RNP_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ + if (likely(rnp_test_staterr(rx_desc, RNP_RXD_STAT_EOP))) + return false; +#ifdef CONFIG_RNP_RNP_DISABLE_PACKET_SPLIT + printk("error spilt detect in disable split mode\n"); +#else + /* place skb in next buffer to be received */ + rx_ring->rx_buffer_info[ntc].skb = skb; +#endif + rx_ring->rx_stats.non_eop_descs++; + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + + return true; +} + +/** + * rnp_merge_active_tail - merge active tail into lro skb + * @tail: pointer to active tail in frag_list + * + * This function merges the length and data of an active tail into the + * skb containing the frag_list. It resets the tail's pointer to the head, + * but it leaves the heads pointer to tail intact. + **/ +static inline struct sk_buff *rnp_merge_active_tail(struct sk_buff *tail) +{ + struct sk_buff *head = RNP_CB(tail)->head; + + if (!head) + return tail; + + head->len += tail->len; + head->data_len += tail->len; + head->truesize += tail->truesize; + + RNP_CB(tail)->head = NULL; + + return head; +} + +/** + * rnp_add_active_tail - adds an active tail into the skb frag_list + * @head: pointer to the start of the skb + * @tail: pointer to active tail to add to frag_list + * + * This function adds an active tail to the end of the frag list. This tail + * will still be receiving data so we cannot yet ad it's stats to the main + * skb. That is done via rnp_merge_active_tail. + **/ +static inline void rnp_add_active_tail(struct sk_buff *head, + struct sk_buff *tail) +{ + struct sk_buff *old_tail = RNP_CB(head)->tail; + + if (old_tail) { + rnp_merge_active_tail(old_tail); + old_tail->next = tail; + } else { + skb_shinfo(head)->frag_list = tail; + } + + RNP_CB(tail)->head = head; + RNP_CB(head)->tail = tail; +} + +/** + * rnp_close_active_frag_list - cleanup pointers on a frag_list skb + * @head: pointer to head of an active frag list + * + * This function will clear the frag_tail_tracker pointer on an active + * frag_list and returns true if the pointer was actually set + **/ +static inline bool rnp_close_active_frag_list(struct sk_buff *head) +{ + struct sk_buff *tail = RNP_CB(head)->tail; + + if (!tail) + return false; + + rnp_merge_active_tail(tail); + + RNP_CB(head)->tail = NULL; + + return true; +} + +static bool rnp_alloc_mapped_skb(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *bi) +{ + struct sk_buff *skb = bi->skb; + dma_addr_t dma = bi->dma; + + if (unlikely(dma)) + return true; + + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->skb = skb; + } + dma = dma_map_single(rx_ring->dev, skb->data, rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + dev_kfree_skb_any(skb); + bi->skb = NULL; + + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->dma = dma; + return true; +} + +/** + * rnp_clean_rx_irq - Clean completed descriptors from Rx ring - legacy + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a legacy approach to Rx interrupt + * handling. This version will perform better on systems with a low cost + * dma mapping API. + * + * Returns amount of work completed. + **/ +static int rnp_clean_rx_irq(struct rnp_q_vector *q_vector, + struct rnp_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct rnp_adapter *adapter = q_vector->adapter; + unsigned int driver_drop_packets = 0; + unsigned int err_packets = 0; + u16 len = 0; + u16 cleaned_count = rnp_desc_unused_rx(rx_ring); + bool fail_alloc = false; + + while (likely(total_rx_packets < budget)) { + struct rnp_rx_buffer *rx_buffer; + union rnp_rx_desc *rx_desc; + struct sk_buff *skb; + u16 ntc; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= RNP_RX_BUFFER_WRITE) { + fail_alloc = rnp_alloc_rx_buffers(rx_ring, cleaned_count) || fail_alloc; + cleaned_count = 0; + } + + ntc = rx_ring->next_to_clean; + rx_desc = RNP_RX_DESC(rx_ring, ntc); + rx_buffer = &rx_ring->rx_buffer_info[ntc]; + + if (!rnp_test_staterr(rx_desc, RNP_RXD_STAT_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + skb = rx_buffer->skb; + + prefetch(skb->data); + + /* handle padding */ + if ((adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) && + (!(adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG))) { + if (likely(rnp_test_staterr(rx_desc, + RNP_RXD_STAT_EOP))) { + len = le16_to_cpu(rx_desc->wb.len) - + le16_to_cpu(rx_desc->wb.padding_len); + } else { + len = le16_to_cpu(rx_desc->wb.len); + } + } else { + /* size should not zero */ + len = le16_to_cpu(rx_desc->wb.len); + } + + if (rnp_check_csum_error(rx_ring, rx_desc, len, + &driver_drop_packets)) { + dev_kfree_skb_any(skb); + cleaned_count++; + err_packets++; + if (err_packets + total_rx_packets > budget) + break; + continue; + } + + /* pull the header of the skb in */ + __skb_put(skb, len); + + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header after + * the writeback. Only unmap it when EOP is reached + */ + dma_unmap_single(rx_ring->dev, rx_buffer->dma, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* clear skb reference in buffer info structure */ + rx_buffer->skb = NULL; + rx_buffer->dma = 0; + + cleaned_count++; + + if (rnp_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + rnp_process_skb_fields(rx_ring, rx_desc, skb); + + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + + rnp_rx_skb(q_vector, skb); + + /* update budget accounting */ + total_rx_packets++; + } + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + rx_ring->rx_stats.driver_drop_packets += driver_drop_packets; + rx_ring->rx_stats.rx_clean_count += total_rx_packets; + rx_ring->rx_stats.rx_clean_times++; + if (rx_ring->rx_stats.rx_clean_times > 10) { + rx_ring->rx_stats.rx_clean_times = 0; + rx_ring->rx_stats.rx_clean_count = 0; + } + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + if (total_rx_packets >= budget) + rx_ring->rx_stats.poll_again_count++; + + /* it we failed alloc mem, we should kepp napi polling */ + return (fail_alloc ? budget : total_rx_packets); +} + +#endif /* CONFIG_RNP_DISABLE_PACKET_SPLIT */ + +/** + * rnp_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * rnp_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void rnp_configure_msix(struct rnp_adapter *adapter) +{ + struct rnp_q_vector *q_vector; + int i; + + /* + * configure ring-msix Registers table + */ + for (i = 0; i < adapter->num_q_vectors; i++) { + struct rnp_ring *ring; + + q_vector = adapter->q_vector[i]; + rnp_for_each_ring(ring, q_vector->rx) { + rnp_set_ring_vector(adapter, ring->rnp_queue_idx, + q_vector->v_idx); + } + } +} + +/** + * rnp_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + **/ +static void rnp_update_itr(struct rnp_q_vector *q_vector, + struct rnp_ring_container *ring_container, int type) +{ + unsigned int itr = RNP_ITR_ADAPTIVE_MIN_USECS | + RNP_ITR_ADAPTIVE_LATENCY; + unsigned int avg_wire_size, packets, bytes; + unsigned int packets_old; + unsigned long next_update = jiffies; + u32 old_itr; + u16 add_itr, add = 0; + /* 0 is tx ;1 is rx */ + if (type) + old_itr = q_vector->itr_rx; + else + old_itr = q_vector->itr_tx; + + /* If we don't have any rings just leave ourselves set for maximum + * possible latency so we take ourselves out of the equation. + */ + if (!ring_container->ring) + return; + + packets_old = ring_container->total_packets_old; + packets = ring_container->total_packets; + bytes = ring_container->total_bytes; + add_itr = ring_container->add_itr; + /* If Rx and there are 1 to 23 packets and bytes are less than + * 12112 assume insufficient data to use bulk rate limiting + * approach. Instead we will focus on simply trying to target + * receiving 8 times as much data in the next interrupt. + */ + if (!packets) + return; + + if (packets && packets < 24 && bytes < 12112) { + itr = RNP_ITR_ADAPTIVE_LATENCY; + + avg_wire_size = (bytes + packets * 24); + avg_wire_size = + clamp_t(unsigned int, avg_wire_size, 128, 12800); + + goto adjust_for_speed; + } + + /* Less than 48 packets we can assume that our current interrupt delay + * is only slightly too low. As such we should increase it by a small + * fixed amount. + */ + if (packets < 48) { + if (add_itr) { + if (packets_old < packets) { + itr = (old_itr >> 2) + RNP_ITR_ADAPTIVE_MIN_INC; + if (itr > RNP_ITR_ADAPTIVE_MAX_USECS) + itr = RNP_ITR_ADAPTIVE_MAX_USECS; + add = 1; + + if (packets < 8) + itr += RNP_ITR_ADAPTIVE_LATENCY; + else + itr += ring_container->itr & + RNP_ITR_ADAPTIVE_LATENCY; + + } else { + /* we add itr before ,but not get more packets */ + itr = (old_itr >> 2) - RNP_ITR_ADAPTIVE_MIN_INC; + if (itr < RNP_ITR_ADAPTIVE_MIN_USECS) + itr = RNP_ITR_ADAPTIVE_MIN_USECS; + } + + } else { + /* we not add before, add itr */ + add = 1; + itr = (old_itr >> 2) + RNP_ITR_ADAPTIVE_MIN_INC; + if (itr > RNP_ITR_ADAPTIVE_MAX_USECS) + itr = RNP_ITR_ADAPTIVE_MAX_USECS; + + /* If sample size is 0 - 7 we should probably switch + * to latency mode instead of trying to control + * things as though we are in bulk. + * + * Otherwise if the number of packets is less than 48 + * we should maintain whatever mode we are currently + * in. The range between 8 and 48 is the cross-over + * point between latency and bulk traffic. + */ + if (packets < 8) + itr += RNP_ITR_ADAPTIVE_LATENCY; + else + itr += ring_container->itr & + RNP_ITR_ADAPTIVE_LATENCY; + } + goto clear_counts; + } + + /* Between 48 and 96 is our "goldilocks" zone where we are working + * out "just right". Just report that our current ITR is good for us. + */ + if (packets < 96) { + itr = old_itr >> 2; + goto clear_counts; + } + /* If packet count is 96 or greater we are likely looking at a slight + * overrun of the delay we want. Try halving our delay to see if that + * will cut the number of packets in half per interrupt. + */ + if (packets < 256) { + itr = old_itr >> 3; + if (itr < RNP_ITR_ADAPTIVE_MIN_USECS) + itr = RNP_ITR_ADAPTIVE_MIN_USECS; + goto clear_counts; + } + + /* The paths below assume we are dealing with a bulk ITR since number + * of packets is 256 or greater. We are just going to have to compute + * a value and try to bring the count under control, though for smaller + * packet sizes there isn't much we can do as NAPI polling will likely + * be kicking in sooner rather than later. + */ + itr = RNP_ITR_ADAPTIVE_BULK; + + /* If packet counts are 256 or greater we can assume we have a gross + * overestimation of what the rate should be. Instead of trying to fine + * tune it just use the formula below to try and dial in an exact value + * give the current packet size of the frame. + */ + avg_wire_size = bytes / packets; + + /* The following is a crude approximation of: + * wmem_default / (size + overhead) = desired_pkts_per_int + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate + * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value + * + * Assuming wmem_default is 212992 and overhead is 640 bytes per + * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the + * formula down to + * + * (170 * (size + 24)) / (size + 640) = ITR + * + * We first do some math on the packet size and then finally bitshift + * by 8 after rounding up. We also have to account for PCIe link speed + * difference as ITR scales based on this. + */ + if (avg_wire_size <= 60) { + /* Start at 50k ints/sec */ + avg_wire_size = 5120; + } else if (avg_wire_size <= 316) { + /* 50K ints/sec to 16K ints/sec */ + avg_wire_size *= 40; + avg_wire_size += 2720; + } else if (avg_wire_size <= 1084) { + /* 16K ints/sec to 9.2K ints/sec */ + avg_wire_size *= 15; + avg_wire_size += 11452; + } else if (avg_wire_size <= 1980) { + /* 9.2K ints/sec to 8K ints/sec */ + avg_wire_size *= 5; + avg_wire_size += 22420; + } else { + /* plateau at a limit of 8K ints/sec */ + avg_wire_size = 32256; + } + +adjust_for_speed: + /* Resultant value is 256 times larger than it needs to be. This + * gives us room to adjust the value as needed to either increase + * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. + * + * Use addition as we have already recorded the new latency flag + * for the ITR value. + */ + switch (q_vector->adapter->link_speed) { + case RNP_LINK_SPEED_10GB_FULL: + case RNP_LINK_SPEED_100_FULL: + default: + itr += DIV_ROUND_UP(avg_wire_size, + RNP_ITR_ADAPTIVE_MIN_INC * 256) * + RNP_ITR_ADAPTIVE_MIN_INC; + break; + case RNP_LINK_SPEED_1GB_FULL: + case RNP_LINK_SPEED_10_FULL: + itr += DIV_ROUND_UP(avg_wire_size, + RNP_ITR_ADAPTIVE_MIN_INC * 64) * + RNP_ITR_ADAPTIVE_MIN_INC; + break; + } + + /* In the case of a latency specific workload only allow us to + * reduce the ITR by at most 2us. By doing this we should dial + * in so that our number of interrupts is no more than 2x the number + * of packets for the least busy workload. So for example in the case + * of a TCP worload the ack packets being received would set the + * the interrupt rate as they are a latency specific workload. + */ + if ((itr & RNP_ITR_ADAPTIVE_LATENCY) && itr < ring_container->itr) + itr = ring_container->itr - RNP_ITR_ADAPTIVE_MIN_INC; + +clear_counts: + /* write back value */ + ring_container->itr = itr; + + /* next update should occur within next jiffy */ + ring_container->next_update = next_update + 1; + + ring_container->total_bytes = 0; + ring_container->total_packets_old = packets; + ring_container->add_itr = add; + ring_container->total_packets = 0; +} + +/** + * rnp_write_eitr - write EITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + * + * This function is made to be called by ethtool and by the driver + * when it needs to update EITR registers at runtime. Hardware + * specific quirks/differences are taken care of here. + */ +static void rnp_write_eitr_rx(struct rnp_q_vector *q_vector) +{ + struct rnp_adapter *adapter = q_vector->adapter; + struct rnp_hw *hw = &adapter->hw; + u32 itr_reg = q_vector->itr_rx >> 2; + struct rnp_ring *ring; + + itr_reg = itr_reg * hw->usecstocount; + + rnp_for_each_ring(ring, q_vector->rx) { + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER, itr_reg); + } +} + +static void rnp_set_itr(struct rnp_q_vector *q_vector) +{ + u32 new_itr_rx; + + rnp_update_itr(q_vector, &q_vector->rx, 1); + new_itr_rx = q_vector->rx.itr; + new_itr_rx &= RNP_ITR_ADAPTIVE_MASK_USECS; + new_itr_rx <<= 2; + if (new_itr_rx != q_vector->itr_rx) { + /* save the algorithm value here */ + q_vector->itr_rx = new_itr_rx; + rnp_write_eitr_rx(q_vector); + } +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +static inline void rnp_irq_enable_queues(struct rnp_adapter *adapter, + struct rnp_q_vector *q_vector) +{ + struct rnp_ring *ring; + + rnp_for_each_ring(ring, q_vector->rx) { +#ifdef CONFIG_RNP_DISABLE_TX_IRQ + rnp_wr_reg(ring->dma_int_mask, ~(RX_INT_MASK)); +#else + rnp_wr_reg(ring->dma_int_mask, ~(RX_INT_MASK | TX_INT_MASK)); +#endif + } +} + +static inline void rnp_irq_disable_queues(struct rnp_q_vector *q_vector) +{ + struct rnp_ring *ring; + + rnp_for_each_ring(ring, q_vector->tx) { + if (q_vector->new_rx_count != q_vector->old_rx_count) { + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + q_vector->new_rx_count); + q_vector->old_rx_count = q_vector->new_rx_count; + } + rnp_wr_reg(ring->dma_int_mask, (RX_INT_MASK | TX_INT_MASK)); + } +} + +/** + * rnp_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static inline void rnp_irq_enable(struct rnp_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_q_vectors; i++) + rnp_irq_enable_queues(adapter, adapter->q_vector[i]); +} + +static irqreturn_t rnp_msix_other(int irq, void *data) +{ + struct rnp_adapter *adapter = data; + set_bit(__RNP_IN_IRQ, &adapter->state); + + rnp_msg_task(adapter); + + clear_bit(__RNP_IN_IRQ, &adapter->state); + + return IRQ_HANDLED; +} + +static void rnp_htimer_start(struct rnp_q_vector *q_vector) +{ + unsigned long ns = q_vector->irq_check_usecs * NSEC_PER_USEC / 2; + + hrtimer_start_range_ns(&q_vector->irq_miss_check_timer, ns_to_ktime(ns), + ns, HRTIMER_MODE_REL); +} + +static void rnp_htimer_stop(struct rnp_q_vector *q_vector) +{ + hrtimer_cancel(&q_vector->irq_miss_check_timer); +} + +static irqreturn_t rnp_msix_clean_rings(int irq, void *data) +{ + struct rnp_q_vector *q_vector = data; + + if (q_vector->vector_flags & RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) + rnp_htimer_stop(q_vector); + + /* disabled interrupts (on this vector) for us */ + rnp_irq_disable_queues(q_vector); + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +static void update_rx_count(int cleaned, struct rnp_q_vector *q_vector) +{ + struct rnp_adapter *adapter = q_vector->adapter; + u32 link_speed = adapter->link_speed; + struct rnp_ring *ring; + + if (link_speed == RNP_LINK_SPEED_10GB_FULL) { + if ((cleaned) && (cleaned != q_vector->new_rx_count)) { + if (cleaned < 5) { + q_vector->small_times = 0; + q_vector->large_times = 0; + q_vector->too_small_times++; + if (q_vector->too_small_times >= 2) { + q_vector->new_rx_count = 1; + } + } else if (cleaned < 30) { + q_vector->too_small_times = 0; + q_vector->middle_time++; + /* count is 5 -30 */ + if (cleaned < q_vector->new_rx_count) { + /* change small */ + q_vector->small_times = 0; + q_vector->new_rx_count -= + (1 << (q_vector->large_times++)); + if (q_vector->new_rx_count < 0) + q_vector->new_rx_count = 1; + + } else { + q_vector->large_times = 0; + + if (cleaned > 30) { + if (q_vector->new_rx_count == + (cleaned - 4)) { + } else { + q_vector->new_rx_count += + (1 + << (q_vector->small_times++)); + } + /* should no more than q_vector */ + if (q_vector->new_rx_count >= cleaned) { + q_vector->new_rx_count = + cleaned - 4; + q_vector->small_times = 0; + } + + } else { + if (q_vector->new_rx_count == + (cleaned - 1)) { + } else { + q_vector->new_rx_count += + (1 + << (q_vector->small_times++)); + } + /* should no more than q_vector */ + if (q_vector->new_rx_count >= cleaned) { + q_vector->new_rx_count = + cleaned - 1; + q_vector->small_times = 0; + } + } + } + } else { + q_vector->too_small_times = 0; + q_vector->new_rx_count = + max_t(int, 64, adapter->rx_frames); + q_vector->small_times = 0; + q_vector->large_times = 0; + } + } + } else { + rnp_for_each_ring(ring, q_vector->rx) { + if (ring->ring_flags & RNP_RING_LOWER_ITR) { + q_vector->new_rx_count = 1; + } else { + q_vector->new_rx_count = 32; + } + } + + + } +} + +/** + * rnp_poll - NAPI Rx polling callback + * @napi: structure for representing this polling device + * @budget: how many packets driver is allowed to clean + * + * This function is used for legacy and MSI, NAPI mode + **/ +int rnp_poll(struct napi_struct *napi, int budget) +{ + struct rnp_q_vector *q_vector = + container_of(napi, struct rnp_q_vector, napi); + struct rnp_adapter *adapter = q_vector->adapter; + struct rnp_ring *ring; + int per_ring_budget, work_done = 0; + bool clean_complete = true; + int cleaned_total = 0; + +#ifdef CONFIG_RNP_DCA + if (adapter->flags & RNP_FLAG_DCA_ENABLED) + rnp_update_dca(q_vector); +#endif + + rnp_for_each_ring(ring, q_vector->tx) { + if (!rnp_clean_tx_irq(q_vector, ring, budget)) + clean_complete = false; + } + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling + */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget / q_vector->rx.count, 1); + else + per_ring_budget = budget; + + rnp_for_each_ring(ring, q_vector->rx) { + int cleaned = 0; + /* this ring is waitting to reset rx_len*/ + /* avoid to deal this ring until reset done */ + if (likely(!(ring->ring_flags & RNP_RING_FLAG_DO_RESET_RX_LEN))) + cleaned = rnp_clean_rx_irq(q_vector, ring, + per_ring_budget); + /* check delay rx setup */ + if (unlikely(ring->ring_flags & + RNP_RING_FLAG_DELAY_SETUP_RX_LEN)) { + int head; + + rnp_disable_rx_queue(adapter, ring); + head = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD); + if (head < RNP_MIN_RXD) { + /* it is time to delay set */ + /* stop rx */ + rnp_disable_rx_queue(adapter, ring); + ring->ring_flags &= + (~RNP_RING_FLAG_DELAY_SETUP_RX_LEN); + ring->ring_flags |= + RNP_RING_FLAG_DO_RESET_RX_LEN; + } else { + + ring_wr32(ring, RNP_DMA_RX_START, 1); + } + } + work_done += cleaned; + cleaned_total += cleaned; + if (cleaned >= per_ring_budget) + clean_complete = false; + } + +#ifndef HAVE_NETDEV_NAPI_LIST + if (!netif_running(adapter->netdev)) + clean_complete = true; +#endif + + /* force close irq */ + if (test_bit(__RNP_DOWN, &adapter->state)) { + clean_complete = true; + } + /* all work done, exit the polling mode */ + if (!(q_vector->vector_flags & RNP_QVECTOR_FLAG_ITR_FEATURE)) + update_rx_count(cleaned_total, q_vector); + + if (!clean_complete) { +#ifdef HAVE_IRQ_AFFINITY_NOTIFY +#ifdef SUPPORT_IRQ_AFFINITY_CHANGE + int cpu_id = smp_processor_id(); + + /* It is possible that the interrupt affinity has changed but, + * if the cpu is pegged at 100%, polling will never exit while + * traffic continues and the interrupt will be stuck on this + * cpu. We check to make sure affinity is correct before we + * continue to poll, otherwise we must stop polling so the + * interrupt can move to the correct cpu. + */ + if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + /* Tell napi that we are done polling */ + napi_complete_done(napi, work_done); + if (!test_bit(__RNP_DOWN, &adapter->state)) { + rnp_irq_enable_queues(adapter, q_vector); + /* we need this to ensure irq start before tx start */ + if (q_vector->vector_flags & + RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS) { + smp_mb(); + rnp_for_each_ring(ring, q_vector->tx) { + rnp_check_restart_tx(q_vector, + ring); + if (q_vector->new_rx_count != + q_vector->old_rx_count) { + ring_wr32( + ring, + RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + q_vector->new_rx_count); + q_vector->old_rx_count = + q_vector->new_rx_count; + } + } + } + if (!test_bit(__RNP_DOWN, &adapter->state)) { + if (q_vector->vector_flags & + RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) + rnp_htimer_start(q_vector); + /* Return budget-1 so that polling stops */ + return budget - 1; + } + } + return min(work_done, budget - 1); + } +#endif /* SUPPORT_IRQ_AFFINITY_CHANGE */ +#endif /* HAVE_IRQ_AFFINITY_NOTIFY */ + // irq affinity update here + if (q_vector->vector_flags & + RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS) { + rnp_for_each_ring(ring, q_vector->tx) { + rnp_check_restart_tx(q_vector, ring); + /* update rx count if need */ + if (q_vector->new_rx_count != + q_vector->old_rx_count) { + ring_wr32(ring, + RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + q_vector->new_rx_count); + q_vector->old_rx_count = + q_vector->new_rx_count; + } + } + } + return budget; + } + + if (likely(napi_complete_done(napi, work_done))) { + /* try to do itr handle */ + if (q_vector->vector_flags & RNP_QVECTOR_FLAG_ITR_FEATURE) + rnp_set_itr(q_vector); + + if (!test_bit(__RNP_DOWN, &adapter->state)) { + rnp_irq_enable_queues(adapter, q_vector); + smp_mb(); + /* we need this to ensure irq start before tx start */ + if (q_vector->vector_flags & + RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS) { + rnp_for_each_ring(ring, q_vector->tx) { + rnp_check_restart_tx(q_vector, ring); + if (q_vector->new_rx_count != + q_vector->old_rx_count) { + ring_wr32( + ring, + RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + q_vector->new_rx_count); + q_vector->old_rx_count = + q_vector->new_rx_count; + } + } + } + if (q_vector->vector_flags & RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) + rnp_htimer_start(q_vector); + + } + } + + return min(work_done, budget - 1); +} + +#ifdef HAVE_IRQ_AFFINITY_NOTIFY +#ifdef SUPPORT_IRQ_AFFINITY_CHANGE +/** + * rnp_irq_affinity_notify - Callback for affinity changes + * @notify: context as to what irq was changed + * @mask: the new affinity mask + * + * This is a callback function used by the irq_set_affinity_notifier function + * so that we may register to receive changes to the irq affinity masks. + **/ +static void rnp_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct rnp_q_vector *q_vector = + container_of(notify, struct rnp_q_vector, affinity_notify); + + cpumask_copy(&q_vector->affinity_mask, mask); +} + +/** + * rnp_irq_affinity_release - Callback for affinity notifier release + * @ref: internal core kernel usage + * + * This is a callback function used by the irq_set_affinity_notifier function + * to inform the current notification subscriber that they will no longer + * receive notifications. + **/ +static void rnp_irq_affinity_release(struct kref *ref) +{ +} +#endif +#endif /* HAVE_IRQ_AFFINITY_NOTIFY */ + +static irqreturn_t rnp_intr(int irq, void *data) +{ + struct rnp_adapter *adapter = data; + struct rnp_q_vector *q_vector = adapter->q_vector[0]; + if (q_vector->vector_flags & RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) + rnp_htimer_stop(q_vector); + + /* disabled interrupts (on this vector) for us */ + rnp_irq_disable_queues(q_vector); + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + /* handle other */ + rnp_msg_task(adapter); + + return IRQ_HANDLED; +} + +/** + * rnp_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * rnp_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int rnp_request_msix_irqs(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + int i = 0; + DPRINTK(IFUP, INFO, "[%s] num_q_vectors:%d\n", __func__, + adapter->num_q_vectors); + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct rnp_q_vector *q_vector = adapter->q_vector[i]; + struct msix_entry *entry = + &adapter->msix_entries[i + adapter->q_vector_off]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d-%d", netdev->name, "TxRx", i, + q_vector->v_idx); + } else { + WARN(!(q_vector->tx.ring && q_vector->rx.ring), + "%s vector%d tx rx is null, v_idx:%d\n", + netdev->name, i, q_vector->v_idx); + /* skip this unused q_vector */ + continue; + } + err = request_irq(entry->vector, &rnp_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + e_err(probe, + "%s:request_irq failed for MSIX interrupt:%d " + "Error: %d\n", + netdev->name, entry->vector, err); + goto free_queue_irqs; + } +#ifdef HAVE_IRQ_AFFINITY_NOTIFY + /* register for affinity change notifications */ +#ifdef SUPPORT_IRQ_AFFINITY_CHANGE + q_vector->affinity_notify.notify = rnp_irq_affinity_notify; + q_vector->affinity_notify.release = rnp_irq_affinity_release; + irq_set_affinity_notifier(entry->vector, + &q_vector->affinity_notify); +#endif /* SUPPORT_IRQ_AFFINITY_CHANGE */ +#endif /* HAVE_IRQ_AFFINITY_NOTIFY */ +#ifdef HAVE_IRQ_AFFINITY_HINT + DPRINTK(IFUP, INFO, "[%s] set %s affinity_mask\n", __func__, + q_vector->name); + + irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask); +#endif + } + + return 0; + +free_queue_irqs: + while (i) { + i--; + irq_set_affinity_hint( + adapter->msix_entries[i + adapter->q_vector_off].vector, + NULL); + free_irq( + adapter->msix_entries[i + adapter->q_vector_off].vector, + adapter->q_vector[i]); +#ifdef HAVE_IRQ_AFFINITY_NOTIFY +#ifdef SUPPORT_IRQ_AFFINITY_CHANGE + irq_set_affinity_notifier( + adapter->msix_entries[i + adapter->q_vector_off].vector, + NULL); +#endif +#endif +#ifdef HAVE_IRQ_AFFINITY_HINT + irq_set_affinity_hint( + adapter->msix_entries[i + adapter->q_vector_off].vector, + NULL); +#endif + } + return err; +} + +static int rnp_free_msix_irqs(struct rnp_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct rnp_q_vector *q_vector = adapter->q_vector[i]; + struct msix_entry *entry = + &adapter->msix_entries[i + adapter->q_vector_off]; + + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; +#ifdef HAVE_IRQ_AFFINITY_NOTIFY + /* clear the affinity notifier in the IRQ descriptor */ + irq_set_affinity_notifier(entry->vector, NULL); +#endif +#ifdef HAVE_IRQ_AFFINITY_HINT + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); +#endif + DPRINTK(IFDOWN, INFO, "free irq %s\n", q_vector->name); + free_irq(entry->vector, q_vector); + } + + return 0; +} + +#ifdef DISABLE_RX_IRQ +int rx_poll_thread_handler(void *data) +{ + int i; + struct rnp_adapter *adapter = data; + + dbg("%s %s running...\n", __func__, adapter->name); + + do { + for (i = 0; i < adapter->num_q_vectors; i++) { + rnp_msix_clean_rings(0, adapter->q_vector[i]); + } + + msleep(30); + } while (!kthread_should_stop() && adapter->quit_poll_thread != true); + + dbg("%s %s stopped\n", __func__, adapter->name); + + return 0; +} +#endif + +/** + * rnp_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int rnp_request_irq(struct rnp_adapter *adapter) +{ + int err; + // struct rnp_hw *hw = &adapter->hw; + +#ifdef DISABLE_RX_IRQ + adapter->rx_poll_thread = + kthread_run(rx_poll_thread_handler, adapter, adapter->name); + if (!adapter->rx_poll_thread) { + rnp_err("kthread_run failed!\n"); + return -EIO; + } + return 0; +#endif + if (adapter->flags & RNP_FLAG_MSIX_ENABLED) { + pr_info("msix mode is used\n"); + err = rnp_request_msix_irqs(adapter); + + } else if (adapter->flags & RNP_FLAG_MSI_ENABLED) { + /* in this case one for all */ + pr_info("msi mode is used\n"); + err = request_irq(adapter->pdev->irq, rnp_intr, 0, + adapter->netdev->name, adapter); + adapter->hw.mbx.other_irq_enabled = true; + } else { + pr_info("legacy mode is used\n"); + err = request_irq(adapter->pdev->irq, rnp_intr, IRQF_SHARED, + adapter->netdev->name, adapter); + adapter->hw.mbx.other_irq_enabled = true; + } + + if (err) + e_err(probe, "request_irq failed, Error %d\n", err); + + return err; +} + +static void rnp_free_irq(struct rnp_adapter *adapter) +{ +#ifdef DISABLE_RX_IRQ + return; +#endif + if (adapter->flags & RNP_FLAG_MSIX_ENABLED) { + rnp_free_msix_irqs(adapter); + } else if (adapter->flags & RNP_FLAG_MSI_ENABLED) { + /* in this case one for all */ + free_irq(adapter->pdev->irq, adapter); + adapter->hw.mbx.other_irq_enabled = false; + } else { + free_irq(adapter->pdev->irq, adapter); + adapter->hw.mbx.other_irq_enabled = false; + } + +} + +/** + * rnp_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static inline void rnp_irq_disable(struct rnp_adapter *adapter) +{ + int i, j; + + for (i = 0; i < adapter->num_q_vectors; i++) { + rnp_irq_disable_queues(adapter->q_vector[i]); + j = i + adapter->q_vector_off; + if (adapter->flags & RNP_FLAG_MSIX_ENABLED) + synchronize_irq(adapter->msix_entries[j].vector); + else + synchronize_irq(adapter->pdev->irq); + } +} + +int rnp_setup_tx_maxrate(struct rnp_ring *tx_ring, u64 max_rate, + int samples_1sec) +{ + /* set hardware samping internal 1S */ + ring_wr32(tx_ring, RNP_DMA_REG_TX_FLOW_CTRL_TM, samples_1sec); + ring_wr32(tx_ring, RNP_DMA_REG_TX_FLOW_CTRL_TH, max_rate); + + return 0; +} + +/** + * rnp_tx_maxrate_own - callback to set the maximum per-queue bitrate + * @netdev: network interface device structure + * @queue_index: Tx queue to set + * @maxrate: desired maximum transmit bitrate Mbps + **/ +static int rnp_tx_maxrate_own(struct rnp_adapter *adapter, int queue_index) +{ + struct rnp_ring *tx_ring = adapter->tx_ring[queue_index]; + u64 real_rate = 0; + u32 maxrate = adapter->max_rate[queue_index]; + + if (!maxrate) + return rnp_setup_tx_maxrate(tx_ring, 0, + adapter->hw.usecstocount * 1000000); + /* we need turn it to bytes/s */ + real_rate = ((u64)maxrate * 1024 * 1024) / 8; + rnp_setup_tx_maxrate(tx_ring, real_rate, + adapter->hw.usecstocount * 1000000); + + return 0; +} + +/** + * rnp_configure_tx_ring - Configure 8259x Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +void rnp_configure_tx_ring(struct rnp_adapter *adapter, struct rnp_ring *ring) +{ + struct rnp_hw *hw = &adapter->hw; + + /* disable queue to avoid issues while updating state */ + + if (!(ring->ring_flags & RNP_RING_SKIP_TX_START)) + ring_wr32(ring, RNP_DMA_TX_START, 0); + + ring_wr32(ring, RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_LO, (u32)ring->dma); + ring_wr32(ring, RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_HI, + (u32)(((u64)ring->dma) >> 32) | (hw->pfvfnum << 24)); + ring_wr32(ring, RNP_DMA_REG_TX_DESC_BUF_LEN, ring->count); + + ring->next_to_clean = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + ring->next_to_use = ring->next_to_clean; + ring->tail = ring->ring_addr + RNP_DMA_REG_TX_DESC_BUF_TAIL; + rnp_wr_reg(ring->tail, ring->next_to_use); + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + ring_wr32(ring, RNP_DMA_REG_TX_DESC_FETCH_CTRL, + (8 << 0) /* max_water_flow */ + | (8 << 16) + /* max-num_descs_peer_read */ + ); + + } else { + ring_wr32(ring, RNP_DMA_REG_TX_DESC_FETCH_CTRL, + (64 << 0) /* max_water_flow */ + | (TSRN10_TX_DEFAULT_BURST << 16) + /* max-num_descs_peer_read */ + ); + } + ring_wr32(ring, RNP_DMA_REG_TX_INT_DELAY_TIMER, + adapter->tx_usecs * hw->usecstocount); + ring_wr32(ring, RNP_DMA_REG_TX_INT_DELAY_PKTCNT, adapter->tx_frames); + + rnp_tx_maxrate_own(adapter, ring->queue_index); + if (adapter->flags & RNP_FLAG_FDIR_HASH_CAPABLE) { + ring->atr_sample_rate = adapter->atr_sample_rate; + ring->atr_count = 0; + set_bit(__RNP_TX_FDIR_INIT_DONE, &ring->state); + } else { + ring->atr_sample_rate = 0; + } + /* initialize XPS */ + if (!test_and_set_bit(__RNP_TX_XPS_INIT_DONE, &ring->state)) { + struct rnp_q_vector *q_vector = ring->q_vector; + + if (q_vector) + netif_set_xps_queue(adapter->netdev, + &q_vector->affinity_mask, + ring->queue_index); + } + + clear_bit(__RNP_HANG_CHECK_ARMED, &ring->state); + + if (!(ring->ring_flags & RNP_RING_SKIP_TX_START)) { + /* should wait tx_ready before open tx start */ + int timeout = 0; + u32 status = 0; + + do { + status = ring_rd32(ring, RNP_DMA_TX_READY); + usleep_range(100, 200); + timeout++; + rnp_dbg("wait %d tx ready to 1\n", ring->rnp_queue_idx); + } while ((status != 1) && (timeout < 100)); + + if (timeout >= 100) + printk("wait tx ready timeout\n"); + ring_wr32(ring, RNP_DMA_TX_START, 1); + } +} + +/** + * rnp_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void rnp_configure_tx(struct rnp_adapter *adapter) +{ + u32 i, dma_axi_ctl; + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + + /* dma_axi_en.tx_en must be before Tx queues are enabled */ + dma_axi_ctl = dma_rd32(dma, RNP_DMA_AXI_EN); + dma_axi_ctl |= TX_AXI_RW_EN; + dma_wr32(dma, RNP_DMA_AXI_EN, dma_axi_ctl); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < (adapter->num_tx_queues); i++) + rnp_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +void rnp_disable_rx_queue(struct rnp_adapter *adapter, struct rnp_ring *ring) +{ + ring_wr32(ring, RNP_DMA_RX_START, 0); +} + +void rnp_configure_rx_ring(struct rnp_adapter *adapter, struct rnp_ring *ring) +{ + struct rnp_hw *hw = &adapter->hw; + u64 desc_phy = ring->dma; + u16 q_idx = ring->queue_index; + + /* disable queue to avoid issues while updating state */ + rnp_disable_rx_queue(adapter, ring); + + /* set descripts registers*/ + ring_wr32(ring, RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_LO, (u32)desc_phy); + ring_wr32(ring, RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_HI, + ((u32)(desc_phy >> 32)) | (hw->pfvfnum << 24)); + ring_wr32(ring, RNP_DMA_REG_RX_DESC_BUF_LEN, ring->count); + + ring->tail = ring->ring_addr + RNP_DMA_REG_RX_DESC_BUF_TAIL; + ring->next_to_clean = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD); + ring->next_to_use = ring->next_to_clean; + + if (ring->ring_flags & RNP_RING_SCATER_SETUP) { +#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT + ring_wr32(ring, PCI_DMA_REG_RX_SCATTER_LENGTH, 96); +#else + /* we setup scatter along with hw_max */ + ring_wr32(ring, PCI_DMA_REG_RX_SCATTER_LENGTH, + ((hw->max_length_current + 15) >> 4)); +#endif + } + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + ring_wr32(ring, RNP_DMA_REG_RX_DESC_FETCH_CTRL, + 0 | (TSRN10_RX_DEFAULT_LINE << 0) /* rx-desc-flow */ + | (TSRN10_RX_DEFAULT_BURST << 16) + /* max-read-desc-cnt */ + ); + + } else { + ring_wr32(ring, RNP_DMA_REG_RX_DESC_FETCH_CTRL, + 0 | (TSRN10_RX_DEFAULT_LINE << 0) /* rx-desc-flow */ + | (TSRN10_RX_DEFAULT_BURST << 16) + /* max-read-desc-cnt */ + ); + } + /* setup rx drop */ + if (adapter->rx_drop_status & BIT(q_idx)) { + ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, + adapter->drop_time); + } else { + if (hw->ncsi_en) { + ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, + adapter->drop_time); + } else { + ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, 0); + } + } + + if (ring->ring_flags & RNP_RING_IRQ_MISS_FIX) + ring_wr32(ring, RNP_DMA_INT_TRIG, TX_INT_MASK | RX_INT_MASK); + + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER, + adapter->rx_usecs * hw->usecstocount); + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_PKTCNT, adapter->rx_frames); + rnp_alloc_rx_buffers(ring, rnp_desc_unused_rx(ring)); +} + +static void rnp_configure_virtualization(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + u32 ring, vfnum; + u64 real_rate = 0; + int i, vf_ring, j; + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + hw->ops.set_sriov_status(hw, false); + return; + } + + /* Enable only the PF's pool for Tx/Rx */ + + if (adapter->flags2 & RNP_FLAG2_BRIDGE_MODE_VEB) { + dma_wr32(dma, RNP_DMA_CONFIG, + dma_rd32(dma, RNP_DMA_CONFIG) & (~DMA_VEB_BYPASS)); + adapter->flags2 |= RNP_FLAG2_BRIDGE_MODE_VEB; + } + ring = adapter->tx_ring[0]->rnp_queue_idx; + hw->ops.set_sriov_status(hw, true); + + /* store vfnum */ + vfnum = hw->max_vfs - 1; + hw->veb_ring = ring; + hw->vfnum = vfnum; + /* use last-vf's table entry. */ + adapter->vf_num_for_pf = 0x80 | vfnum; + + /* setup vf tx rate setup here */ + for (i = 0; i < adapter->num_vfs; i++) { + real_rate = (adapter->vfinfo[i].tx_rate * 1024 * 128) / + hw->sriov_ring_limit; + for (j = 0; j < hw->sriov_ring_limit; j++) { + vf_ring = rnp_get_vf_ringnum(hw, i, j); + rnp_setup_ring_maxrate(adapter, vf_ring, real_rate); + } + } +} + +static void rnp_set_rx_buffer_len(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN * 3; + struct rnp_ring *rx_ring; + int i; + + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); + + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; +#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT + clear_bit(__RNP_RX_3K_BUFFER, &rx_ring->state); + clear_bit(__RNP_RX_BUILD_SKB_ENABLED, &rx_ring->state); +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + set_bit(__RNP_RX_BUILD_SKB_ENABLED, &rx_ring->state); + +#else /* !HAVE_SWIOTLB_SKIP_CPU_SYNC */ + +#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ + +#ifdef OPTM_WITH_LPAGE + rx_ring->rx_page_buf_nums = RNP_PAGE_BUFFER_NUMS(rx_ring); + rx_ring->rx_per_buf_mem = + ALIGN((rnp_rx_offset(rx_ring) + rnp_rx_bufsz(rx_ring) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + RNP_RX_HWTS_OFFSET), + 1024); +#endif + +#else + rx_ring->rx_buf_len = max_frame; +#endif /* CONFIG_RNP_DISABLE_PACKET_SPLIT */ + } +} + +/** + * rnp_configure_rx - Configure 8259x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void rnp_configure_rx(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + int i; + u32 rxctrl = 0, dma_axi_ctl; + + /* disable receives while setting up the descriptors */ + /* set_rx_buffer_len must be called before ring initialization */ + rnp_set_rx_buffer_len(adapter); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + rnp_configure_rx_ring(adapter, adapter->rx_ring[i]); + + if (adapter->num_rx_queues > 0) { + wr32(hw, RNP_ETH_DEFAULT_RX_RING, + adapter->rx_ring[0]->rnp_queue_idx); + } + + /* enable all receives */ + rxctrl |= 0; + + dma_axi_ctl = dma_rd32(dma, RNP_DMA_AXI_EN); + dma_axi_ctl |= RX_AXI_RW_EN; + dma_wr32(dma, RNP_DMA_AXI_EN, dma_axi_ctl); +} + +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_TX +static int rnp_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#else /* !NETIF_F_HW_VLAN_CTAG_TX */ +static int rnp_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif /* NETIF_F_HW_VLAN_CTAG_TX */ +#else /* !HAVE_INT_NDO_VLAN_RX_ADD_VID */ +static void rnp_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + bool veb_setup = true; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + + if (sriov_flag) { + if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN) { + if (hw->ops.set_veb_vlan_mask) { + if (hw->ops.set_veb_vlan_mask( + hw, vid, hw->vfnum, true) != 0) { + dev_err(&adapter->pdev->dev, + "out of vlan entries in sriov mode \n"); +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return -EACCES; +#else + return; +#endif + } + } + } else { + /* in sriov mode */ + if ((vid) && (adapter->vf_vlan) && + (vid != adapter->vf_vlan)) { + dev_err(&adapter->pdev->dev, + "only 1 vlan in sriov mode \n"); +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return -EACCES; +#else + return; +#endif + } + + /* update this */ + if (vid) { + adapter->vf_vlan = vid; + if (hw->ops.set_vf_vlan_mode) { + if (hw->feature_flags & + RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_vf_vlan_mode( + hw, vid, 0, true); + else + hw->ops.set_vf_vlan_mode( + hw, vid, hw->vfnum, + true); + } + } + } + } + +#ifndef HAVE_VLAN_RX_REGISTER + if (vid) { + if (proto == htons(ETH_P_8021Q)) { + adapter->vlan_count++; + } + } + + if (vid < VLAN_N_VID) { + if (proto != htons(ETH_P_8021Q)) { + set_bit(vid, adapter->active_vlans_stags); + veb_setup = false; + } else { + set_bit(vid, adapter->active_vlans); + } + } +#endif + + if (hw->ops.set_vlan_filter) { + hw->ops.set_vlan_filter(hw, vid, true, + (sriov_flag && veb_setup)); + } + +#ifndef HAVE_NETDEV_VLAN_FEATURES + /* + * Copy feature flags from netdev to the vlan netdev for this vid. + * This allows things like TSO to bubble down to our vlan device. + * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so + * we will not have a netdev that needs updating. + */ + if (adapter->vlgrp) { + struct vlan_group *vlgrp = adapter->vlgrp; + struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid); + + if (v_netdev) { + v_netdev->features |= netdev->features; + vlan_group_set_device(vlgrp, vid, v_netdev); + } + } +#endif /* HAVE_NETDEV_VLAN_FEATURES */ +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; +#endif +} + +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_RX +static int rnp_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#else /* !NETIF_F_HW_VLAN_CTAG_RX */ +static int rnp_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ +#else +static void rnp_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +#endif +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_eth_info *eth = &hw->eth; + int i; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + bool veb_setup = true; + + if (!vid) +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; +#else + return; +#endif + +#ifdef HAVE_VLAN_RX_REGISTER + if (!test_bit(__RNP_DOWN, &adapter->state)) + rnp_irq_disable(adapter); + + vlan_group_set_device(adapter->vlgrp, vid, NULL); + + if (!test_bit(__RNP_DOWN, &adapter->state)) + rnp_irq_enable(adapter); + +#endif /* HAVE_VLAN_RX_REGISTER */ + + if (sriov_flag) { + if (vid) { + int true_remove = 1; + /* clean this */ + adapter->vf_vlan = 0; + for (i = 0; i < adapter->num_vfs; i++) { + if (vid == adapter->vfinfo[i].vf_vlan) { + true_remove = 0; + } + if (vid == adapter->vfinfo[i].pf_vlan) { + true_remove = 0; + } + /* setup pf_vlan */ + } + /* if no vf use this vid */ + if (true_remove) { + /* if remove stags */ +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_RX + if (proto != htons(ETH_P_8021Q)) { + veb_setup = false; +#ifndef HAVE_VLAN_RX_REGISTER + if (!test_bit(vid, + adapter->active_vlans)) + true_remove = 1; +#endif /* HAVE_VLAN_RX_REGISTER */ + } else { + /* if remove ctags */ +#ifdef NETIF_F_HW_VLAN_STAG_RX +#ifndef HAVE_VLAN_RX_REGISTER + if (!test_bit(vid, + adapter->active_vlans_stags)) + true_remove = 1; +#endif /* HAVE_VLAN_RX_REGISTER */ +#endif + } +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ +#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ + if ((adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) && + (vid == adapter->stags_vid)) + true_remove = 0; + // if no other tags use this vid + if (true_remove) { + // should also check stags + hw->ops.set_vlan_filter(hw, vid, false, + veb_setup); + } + } + /* always clean veb */ + hw->ops.set_vlan_filter(hw, vid, true, false); + + if (hw->ops.set_vf_vlan_mode) { + if (hw->feature_flags & + RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_vf_vlan_mode(hw, vid, 0, + false); + else + hw->ops.set_vf_vlan_mode( + hw, vid, hw->vfnum, false); + } + + /* remove veb */ + if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN) { + if (hw->ops.set_veb_vlan_mask) { + hw->ops.set_veb_vlan_mask( + hw, vid, hw->vfnum, false); + } + } + } + } else { + int true_remove = 0; +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_RX + if (proto != htons(ETH_P_8021Q)) { + veb_setup = false; +#ifndef HAVE_VLAN_RX_REGISTER + if (!test_bit(vid, adapter->active_vlans)) + true_remove = 1; +#endif /* HAVE_VLAN_RX_REGISTER */ + + } else { + /* if remove ctags */ +#ifdef NETIF_F_HW_VLAN_STAG_RX +#ifndef HAVE_VLAN_RX_REGISTER + if (!test_bit(vid, adapter->active_vlans_stags)) + true_remove = 1; +#endif /* HAVE_VLAN_RX_REGISTER */ +#endif + } +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ +#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ + if (true_remove) { + if ((adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) && + (vid == adapter->stags_vid)) + goto SKIP_REMOVE; + // should also check stags + hw->ops.set_vlan_filter(hw, vid, false, false); + } + } +SKIP_REMOVE: + /* need set ncsi vfta again */ + if (hw->ncsi_en) + eth->ops.ncsi_set_vfta(eth); + +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_RX +#ifndef HAVE_VLAN_RX_REGISTER + if (vid) { + if (proto == htons(ETH_P_8021Q)) { + /* should check proto todo */ + adapter->vlan_count--; + } + } + if (proto == htons(ETH_P_8021Q)) + clear_bit(vid, adapter->active_vlans); + /* clear stags */ +#ifdef NETIF_F_HW_VLAN_STAG_RX + if (proto != htons(ETH_P_8021Q)) + clear_bit(vid, adapter->active_vlans_stags); +#endif /* NETIF_F_HW_VLAN_STAG_RX */ +#endif /* HAVE_VLAN_RX_REGISTER */ +#endif /* NETIF_F_HW_VLAN_CTAG_RX */ +#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; +#endif +} + +#endif + +/** + * rnp_vlan_strip_disable - helper to disable hw vlan stripping + * @adapter: driver data + */ +static void rnp_vlan_strip_disable(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *tx_ring; + struct rnp_hw *hw = &adapter->hw; + + for (i = 0; i < adapter->num_rx_queues; i++) { + tx_ring = adapter->rx_ring[i]; + hw->ops.set_vlan_strip(hw, tx_ring->rnp_queue_idx, false); + } +} + +/** + * rnp_vlan_strip_enable - helper to enable hw vlan stripping + * @adapter: driver data + */ +static void rnp_vlan_strip_enable(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct rnp_ring *tx_ring; + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) { + tx_ring = adapter->rx_ring[i]; + + hw->ops.set_vlan_strip(hw, tx_ring->rnp_queue_idx, true); + } +} + +static void rnp_remove_vlan(struct rnp_adapter *adapter) +{ + adapter->vlan_count = 0; +} + +static void rnp_restore_vlan(struct rnp_adapter *adapter) +{ + u16 vid; + struct rnp_hw *hw = &adapter->hw; + struct rnp_eth_info *eth = &hw->eth; + int i; + + /* in stags open, set stags_vid to vlan filter */ + if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) + eth->ops.set_vfta(eth, adapter->stags_vid, true); + +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_TX + rnp_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); +#else /* !NETIF_F_HW_VLAN_CTAG_TX */ + rnp_vlan_rx_add_vid(adapter->netdev, 0); +#endif /* NETIF_F_HW_VLAN_CTAG_TX */ +#else /* !HAVE_INT_NDO_VLAN_RX_ADD_VID */ + rnp_vlan_rx_add_vid(adapter->netdev, 0); +#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ + +#ifndef HAVE_VLAN_RX_REGISTER + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) { +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_TX + rnp_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +#else /* !NETIF_F_HW_VLAN_CTAG_TX */ + rnp_vlan_rx_add_vid(adapter->netdev, vid); +#endif /* NETIF_F_HW_VLAN_CTAG_TX */ +#else /* !HAVE_INT_NDO_VLAN_RX_ADD_VID */ + rnp_vlan_rx_add_vid(adapter->netdev, vid); +#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ + } +#endif /* HAVE_VLAN_RX_REGISTER */ + /* config vlan mode for mac */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + for (i = 0; i < adapter->num_vfs; i++) { + vid = adapter->vfinfo[i].vf_vlan; + if (vid) { +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_TX + rnp_vlan_rx_add_vid(adapter->netdev, + htons(ETH_P_8021Q), vid); +#else /* !NETIF_F_HW_VLAN_CTAG_TX */ + rnp_vlan_rx_add_vid(adapter->netdev, vid); +#endif /* NETIF_F_HW_VLAN_CTAG_TX */ +#else /* !HAVE_INT_NDO_VLAN_RX_ADD_VID */ + rnp_vlan_rx_add_vid(adapter->netdev, vid); +#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ + } + vid = adapter->vfinfo[i].pf_vlan; + if (vid) { +#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef NETIF_F_HW_VLAN_CTAG_TX + rnp_vlan_rx_add_vid(adapter->netdev, + htons(ETH_P_8021Q), vid); +#else /* !NETIF_F_HW_VLAN_CTAG_TX */ + rnp_vlan_rx_add_vid(adapter->netdev, vid); +#endif /* NETIF_F_HW_VLAN_CTAG_TX */ +#else /* !HAVE_INT_NDO_VLAN_RX_ADD_VID */ + rnp_vlan_rx_add_vid(adapter->netdev, vid); +#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ + } + } + } +} + +/** + * rnp_set_rx_mode - Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_method entry point is called whenever the unicast/multicast + * address list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast and + * promiscuous mode. + **/ +void rnp_set_rx_mode(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + netdev_features_t features; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + + hw->ops.set_rx_mode(hw, netdev, sriov_flag); + + if (sriov_flag) { + if (!test_and_set_bit(__RNP_USE_VFINFI, &adapter->state)) { + rnp_restore_vf_macvlans(adapter); + rnp_restore_vf_macs(adapter); + clear_bit(__RNP_USE_VFINFI, &adapter->state); + } + } + + features = netdev->features; + +#ifdef NETIF_F_HW_VLAN_CTAG_RX + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rnp_vlan_strip_enable(adapter); + else + rnp_vlan_strip_disable(adapter); +#else + if (features & NETIF_F_HW_VLAN_RX) + rnp_vlan_strip_enable(adapter); + else + rnp_vlan_strip_disable(adapter); + +#endif + /* stags */ +#ifdef NETIF_F_HW_VLAN_STAG_RX + /* only do this if hw support stags */ + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) { + if (features & NETIF_F_HW_VLAN_STAG_RX) + rnp_vlan_strip_enable(adapter); + else + rnp_vlan_strip_disable(adapter); + } +#endif +} + +static void rnp_napi_enable_all(struct rnp_adapter *adapter) +{ + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) + napi_enable(&adapter->q_vector[q_idx]->napi); +} + +static void rnp_napi_disable_all(struct rnp_adapter *adapter) +{ + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) + napi_disable(&adapter->q_vector[q_idx]->napi); +} + +static void rnp_fdir_filter_restore(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct hlist_node *node2; + struct rnp_fdir_filter *filter; + + spin_lock(&adapter->fdir_perfect_lock); + + /* enable tcam if set tcam mode */ + if (adapter->fdir_mode == fdir_mode_tcam) { + wr32(hw, RNP_ETH_TCAM_EN, 1); + wr32(hw, RNP_TOP_ETH_TCAM_CONFIG_ENABLE, 1); + wr32(hw, RNP_TCAM_CACHE_ENABLE, 0); + } + + /* setup ntuple */ + hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, + fdir_node) { + if ((!filter->vf_num) && + (filter->action != ACTION_TO_MPE)) { + rnp_fdir_write_perfect_filter( + adapter->fdir_mode, hw, &filter->filter, filter->hw_idx, + (filter->action == RNP_FDIR_DROP_QUEUE) ? + RNP_FDIR_DROP_QUEUE : + adapter->rx_ring[filter->action] + ->rnp_queue_idx, + (adapter->priv_flags & RNP_PRIV_FLAG_REMAP_PRIO) ? + true : + false); + } else { + rnp_fdir_write_perfect_filter( + adapter->fdir_mode, hw, &filter->filter, + filter->hw_idx, + (filter->action == RNP_FDIR_DROP_QUEUE) ? + RNP_FDIR_DROP_QUEUE : + filter->action, + (adapter->priv_flags & + RNP_PRIV_FLAG_REMAP_PRIO) ? + true : + false); + } + } + + spin_unlock(&adapter->fdir_perfect_lock); +} + +static void rnp_configure_pause(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + + hw->ops.set_pause_mode(hw); +} + +static void rnp_vlan_stags_flag(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + + /* stags is added */ + if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) + hw->ops.set_txvlan_mode(hw, false); + else + hw->ops.set_txvlan_mode(hw, true); +} + +static void rnp_configure(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); +#if (PAGE_SIZE < 8192) +#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT + struct rnp_ring *rx_ring = adapter->rx_ring[0]; +#endif +#endif + /* + * We must restore virtualization before VLANs or else + * the VLVF registers will not be populated + */ + rnp_configure_virtualization(adapter); + + /* Unicast, Multicast and Promiscuous mode set */ + rnp_set_rx_mode(adapter->netdev); + /* reconfigure hw */ + hw->ops.set_mac(hw, hw->mac.addr, sriov_flag); + + /* in sriov mode vlan is not reset */ + rnp_restore_vlan(adapter); + + /* we first update rx_offset */ +#if (PAGE_SIZE < 8192) +#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT + /* setup before calculate dma_split_size */ + rnp_set_rx_buffer_len(adapter); + hw->dma_split_size = rnp_rx_pg_size(rx_ring) / 2 - + rnp_rx_offset(rx_ring) - + sizeof(struct skb_shared_info); +#else + hw->dma_split_size = 1536; +#endif +#else + /* if mtu more than this */ + hw->dma_split_size = SKB_WITH_OVERHEAD(PAGE_SIZE) - RNP_SKB_PAD; + + if (hw->max_length_current >= 1536) + hw->dma_split_size = min_t(int, hw->dma_split_size, hw->max_length_current); + /* up to 16-asign */ + hw->dma_split_size = (hw->dma_split_size + 15) & (~0xf); +#endif + hw->ops.update_hw_info(hw); + + /* init setup pause */ + rnp_configure_pause(adapter); + rnp_vlan_stags_flag(adapter); + rnp_init_rss_key(adapter); + rnp_init_rss_table(adapter); + + if (adapter->flags & RNP_FLAG_FDIR_HASH_CAPABLE) { + + } else if (adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE) { + + rnp_fdir_filter_restore(adapter); + + } + + /* setup vxlan match mode */ + if (adapter->priv_flags & RNP_PRIV_FLAG_VXLAN_INNER_MATCH) + hw->ops.set_vxlan_mode(hw, true); + else + hw->ops.set_vxlan_mode(hw, false); + rnp_configure_tx(adapter); + rnp_configure_rx(adapter); +} + +static inline bool rnp_is_sfp(struct rnp_hw *hw) +{ + return true; +} + +/** + * rnp_sfp_link_config - set up SFP+ link + * @adapter: pointer to private adapter struct + **/ +static void rnp_sfp_link_config(struct rnp_adapter *adapter) +{ + /* + * We are assuming the worst case scenario here, and that + * is that an SFP was inserted/removed after the reset + * but before SFP detection was enabled. As such the best + * solution is to just start searching as soon as we start + */ + adapter->flags2 |= RNP_FLAG2_SFP_NEEDS_RESET; +} + +static void rnp_up_complete(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int i; + + rnp_configure_msix(adapter); + + /* enable the optics for n10 SFP+ fiber */ + if (hw->ops.enable_tx_laser) + hw->ops.enable_tx_laser(hw); + + smp_mb__before_atomic(); + clear_bit(__RNP_DOWN, &adapter->state); + rnp_napi_enable_all(adapter); + + if (rnp_is_sfp(hw)) { + rnp_sfp_link_config(adapter); + } + /*clear any pending interrupts*/ + rnp_irq_enable(adapter); + + /* enable transmits */ + netif_tx_start_all_queues(adapter->netdev); + + /* enable rx transmit */ + for (i = 0; i < adapter->num_rx_queues; i++) + /* setup rx scater */ + ring_wr32(adapter->rx_ring[i], RNP_DMA_RX_START, 1); + + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problems + */ + adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + mod_timer(&adapter->service_timer, jiffies); + + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + hw->link = 0; + if(hw->saved_force_link_speed != RNP_LINK_SPEED_UNKNOWN ) + rnp_mbx_force_speed(hw, hw->saved_force_link_speed); + hw->ops.set_mbx_link_event(hw, 1); + hw->ops.set_mbx_ifup(hw, 1); +} + +void rnp_reinit_locked(struct rnp_adapter *adapter) +{ + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ + while (test_and_set_bit(__RNP_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + rnp_down(adapter); + /* + * If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + msleep(2000); + rnp_up(adapter); + + clear_bit(__RNP_RESETTING, &adapter->state); +} + +void rnp_up(struct rnp_adapter *adapter) +{ + /* hardware has been reset, we need to reload some things */ + rnp_configure(adapter); + rnp_up_complete(adapter); +} + +void rnp_reset(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int err; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + + rnp_logd(LOG_ADPT_STAT, "%s\n", __func__); + + /* lock SFP init bit to prevent race conditions with the watchdog */ + while (test_and_set_bit(__RNP_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + /* clear all SFP and link config related flags while holding SFP_INIT */ + adapter->flags2 &= + ~(RNP_FLAG2_SEARCH_FOR_SFP | RNP_FLAG2_SFP_NEEDS_RESET); + adapter->flags &= ~RNP_FLAG_NEED_LINK_CONFIG; + + err = hw->ops.init_hw(hw); + + if (err) { + e_dev_err("init_hw: Hardware Error: err:%d. line:%d\n", err, + __LINE__); + } + + clear_bit(__RNP_IN_SFP_INIT, &adapter->state); + + /* reprogram the RAR[0] in case user changed it. */ + hw->ops.set_mac(hw, hw->mac.addr, sriov_flag); + + if (module_enable_ptp) { + if (adapter->flags2 & RNP_FLAG2_PTP_ENABLED && + (adapter->ptp_rx_en || adapter->ptp_tx_en)) + rnp_ptp_reset(adapter); + } +} + +#ifdef OPTM_WITH_LPAGE +/** + * rnp_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void rnp_clean_rx_ring(struct rnp_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + struct rnp_rx_buffer *rx_buffer; +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + + if (!rx_ring->rx_buffer_info) + return; + + if (rx_ring->skb) + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + rx_buffer = &rx_ring->rx_buffer_info[i]; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + if (!rx_buffer->page) + goto next_buffer; + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, + rx_buffer->page_offset, + rnp_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + rnp_rx_pg_size(rx_ring), DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + RNP_RX_DMA_ATTR); +#endif + + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + /* now this page is not used */ + rx_buffer->page = NULL; +next_buffer: + i++; + rx_buffer++; + if (i == rx_ring->count) { + i = 0; + rx_buffer = rx_ring->rx_buffer_info; + } + } + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +#else +/** + * rnp_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void rnp_clean_rx_ring(struct rnp_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + struct rnp_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + + /* Free all the Rx ring sk_buffs */ +#ifdef CONFIG_RNP_DISABLE_PACKET_SPLIT + while (i != rx_ring->next_to_use) { +#else + while (i != rx_ring->next_to_alloc) { +#endif + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; + + dev_kfree_skb(skb); + rx_buffer->skb = NULL; + } + +#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, + rx_buffer->page_offset, + rnp_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + rnp_rx_pg_size(rx_ring), DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + RNP_RX_DMA_ATTR); +#endif + + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + /* now this page is not used */ + rx_buffer->page = NULL; +#else /* CONFIG_RNP_DISABLE_PACKET_SPLIT */ + if (rx_buffer->dma) { + dma_unmap_single(rx_ring->dev, rx_buffer->dma, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + rx_buffer->dma = 0; + } +#endif /* CONFIG_RNP_DISABLE_PACKET_SPLIT */ + i++; + rx_buffer++; + if (i == rx_ring->count) { + i = 0; + rx_buffer = rx_ring->rx_buffer_info; + } + } + +#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +#endif +} +#endif + +/** + * rnp_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void rnp_clean_tx_ring(struct rnp_ring *tx_ring) +{ + unsigned long size; + u16 i = tx_ring->next_to_clean; + struct rnp_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + + BUG_ON(tx_ring == NULL); + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buffer_info) + return; + + while (i != tx_ring->next_to_use) { + struct rnp_tx_desc *eop_desc, *tx_desc; + + dev_kfree_skb_any(tx_buffer->skb); + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + + eop_desc = tx_buffer->next_to_watch; + tx_desc = RNP_TX_DESC(tx_ring, i); + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = RNP_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + netdev_tx_reset_queue(txring_txq(tx_ring)); + size = sizeof(struct rnp_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * rnp_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void rnp_clean_all_rx_rings(struct rnp_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + rnp_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * rnp_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void rnp_clean_all_tx_rings(struct rnp_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + rnp_clean_tx_ring(adapter->tx_ring[i]); +} + +static void rnp_fdir_filter_exit(struct rnp_adapter *adapter) +{ + struct hlist_node *node2; + struct rnp_fdir_filter *filter; + struct rnp_hw *hw = &adapter->hw; + + spin_lock(&adapter->fdir_perfect_lock); + + hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, + fdir_node) { + /* call earase to hw */ + rnp_fdir_erase_perfect_filter(adapter->fdir_mode, hw, + &filter->filter, filter->hw_idx); + + hlist_del(&filter->fdir_node); + kfree(filter); + } + adapter->fdir_filter_count = 0; + adapter->layer2_count = hw->layer2_count; + adapter->tuple_5_count = hw->tuple5_count; + + spin_unlock(&adapter->fdir_perfect_lock); +} + +static int rnp_xmit_nop_frame_ring(struct rnp_adapter *adapter, + struct rnp_ring *tx_ring) +{ + u16 i = tx_ring->next_to_use; + struct rnp_tx_desc *tx_desc; + + tx_desc = RNP_TX_DESC(tx_ring, i); + + /* set length to 0 */ + tx_desc->blen_mac_ip_len = 0; + tx_desc->vlan_cmd = cpu_to_le32(RNP_TXD_CMD_EOP | RNP_TXD_CMD_RS); + /* + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + /* update tail */ + rnp_wr_reg(tx_ring->tail, 0); + return 0; +} + +static void print_status(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct rnp_eth_info *eth = &hw->eth; + int i; + struct rnp_dma_info *dma = &hw->dma; + + printk("eth 0x120 %x\n", eth_rd32(eth, 0x120)); + printk("eth 0x124 %x\n", eth_rd32(eth, 0x124)); + + for (i = 0x300; i < 0x318; i = i + 4) { + printk("eth 0x%x %x\n", i, eth_rd32(eth, i)); + } + + printk("eth 0x%x %x\n", 0x98, eth_rd32(eth, 0x98)); + printk("eth 0x%x %x\n", 0x220, eth_rd32(eth, 0x220)); + + for (i = 0x138; i < 0x158; i = i + 4) { + printk("dma 0x%x %x\n", i, dma_rd32(dma, i)); + } + i = 0x170; + printk("dma 0x%x %x\n", i, dma_rd32(dma, i)); + i = 0x174; + printk("dma 0x%x %x\n", i, dma_rd32(dma, i)); + for (i = 0x214; i < 0x220; i = i + 4) { + printk("dma 0x%x %x\n", i, dma_rd32(dma, i)); + } + for (i = 0x234; i < 0x270; i = i + 4) { + printk("dma 0x%x %x\n", i, dma_rd32(dma, i)); + } +} + +void rnp_down(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct rnp_hw *hw = &adapter->hw; + int i; + int free_tx_ealay = 0; + int err = 0; + bool is_pci_dead = pci_channel_offline(adapter->pdev); + bool is_pci_online = !is_pci_dead; + /* signal that we are down to the interrupt handler */ + set_bit(__RNP_DOWN, &adapter->state); + if ((!hw->ncsi_en) && (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))) + hw->ops.set_mac_rx(hw, false); + + if (hw->ncsi_en) { + /* if we false down, we should set mac loopback */ + hw->ops.set_mac_rx(hw, false); + } + + hw->ops.set_mbx_link_event(hw, 0); + hw->ops.set_mbx_ifup(hw, 0); + + if (hw->ops.clean_link) + hw->ops.clean_link(hw); + + /* if carrier on before */ + if (netif_carrier_ok(netdev)) + e_info(drv, "NIC Link is Down\n"); + + rnp_remove_vlan(adapter); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + usleep_range(5000, 10000); + /* if we have tx desc to clean */ + for (i = 0; i < adapter->num_tx_queues && is_pci_online; i++) { + struct rnp_ring *tx_ring = adapter->tx_ring[i]; + + if (!(tx_ring->ring_flags & RNP_RING_SKIP_TX_START)) { + int head, tail; + int timeout = 0; + + free_tx_ealay = 1; + + head = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + tail = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_TAIL); + + while (head != tail) { + usleep_range(30000, 50000); + head = ring_rd32(tx_ring, + RNP_DMA_REG_TX_DESC_BUF_HEAD); + tail = ring_rd32(tx_ring, + RNP_DMA_REG_TX_DESC_BUF_TAIL); + timeout++; + if ((timeout >= 100) && (timeout < 101)) { + e_info(drv, + "wait tx done timeout %x %x\n", + head, tail); + /* set this to hold hardware status */ + adapter->priv_flags |= + RNP_PRIV_FLGA_TEST_TX_HANG; + print_status(adapter); + err = 1; + } + if (timeout >= 200) { + e_info(drv, + "200 wait tx done timeout %x %x\n", + head, tail); + print_status(adapter); + break; + } + } + } + } + + { + int time = 0; + + while (test_bit(__RNP_SERVICE_CHECK, &adapter->state)) { + usleep_range(100, 200); + time++; + if (time > 100) + break; + } + } + + if (free_tx_ealay) + rnp_clean_all_tx_rings(adapter); + + usleep_range(2000, 5000); + + rnp_irq_disable(adapter); + + usleep_range(5000, 10000); + + netif_tx_disable(netdev); + + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues && is_pci_online; i++) { + rnp_disable_rx_queue(adapter, adapter->rx_ring[i]); + /* only handle when srio enable and change rx length setup */ + if ((((adapter->flags & RNP_FLAG_SRIOV_ENABLED) || + hw->ncsi_en)) && + (adapter->rx_ring[i]->ring_flags & + RNP_RING_FLAG_CHANGE_RX_LEN)) { + int head; + struct rnp_ring *ring = adapter->rx_ring[i]; + + head = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD); + adapter->rx_ring[i]->ring_flags &= + (~RNP_RING_FLAG_CHANGE_RX_LEN); + /* we should delay setup rx length to + * wait rx head to 0 + */ + if (head >= adapter->rx_ring[i]->reset_count) { + adapter->rx_ring[i]->ring_flags |= + RNP_RING_FLAG_DELAY_SETUP_RX_LEN; + /* set sw count to head + 1*/ + adapter->rx_ring[i]->temp_count = head + 1; + } + } + /* only down without rx_len change no need handle */ + } + /* call carrier off first to avoid false dev_watchdog timeouts */ + + rnp_napi_disable_all(adapter); + + adapter->flags2 &= + ~(RNP_FLAG2_FDIR_REQUIRES_REINIT | RNP_FLAG2_RESET_REQUESTED); + adapter->flags &= ~RNP_FLAG_NEED_LINK_UPDATE; + + if (adapter->num_vfs) { + /* ping all the active vfs to let them know we are going down */ + rnp_ping_all_vfs(adapter); + /* Disable all VFTE/VFRE TX/RX */ + rnp_disable_tx_rx(adapter); + } + + if(is_pci_online){ + u32 status = 0; + int timeout = 0; + + do { + status = rd32(hw, RNP_DMA_AXI_READY); + usleep_range(100, 200); + timeout++; + } while ((status != 0xffff) && (timeout < 100)); + + if (timeout > 100) + printk("wait axi ready timeout\n"); + } + + + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues && is_pci_online; i++) { + struct rnp_ring *tx_ring = adapter->tx_ring[i]; + int count = tx_ring->count; + int head; + int timeout = 0; + + /* 1. stop queue */ + if (!err) { + if (!(tx_ring->ring_flags & RNP_RING_SKIP_TX_START)) { + ring_wr32(tx_ring, RNP_DMA_TX_START, 0); + } + } + /* 2. try to set tx head to 0 in sriov mode + * since we don't reset + */ + if ((((adapter->flags & RNP_FLAG_SRIOV_ENABLED) || + hw->ncsi_en)) && + (!(tx_ring->ring_flags & RNP_RING_SIZE_CHANGE_FIX))) { + /* only do this if hw not support tx head to zero auto */ + /* n10 should wait tx_ready */ + u32 status = 0; + + timeout = 0; + do { + status = ring_rd32(tx_ring, RNP_DMA_TX_READY); + usleep_range(100, 200); + timeout++; + rnp_dbg("wait %d tx ready to 1\n", + tx_ring->rnp_queue_idx); + } while ((status != 1) && (timeout < 100)); + + if (timeout >= 100) + printk("wait tx ready timeout\n"); + + head = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + if (head != 0) { + u16 next_to_use = tx_ring->next_to_use; + + if (head != (count - 1)) { + /* 3 set len head + 1 */ + ring_wr32(tx_ring, + RNP_DMA_REG_TX_DESC_BUF_LEN, + head + 1); + } + /* set to use head */ + tx_ring->next_to_use = head; + /* 4 send a len zero packet */ + rnp_xmit_nop_frame_ring(adapter, tx_ring); + if (!(tx_ring->ring_flags & + RNP_RING_SKIP_TX_START)) + ring_wr32(tx_ring, RNP_DMA_TX_START, 1); + /* 5 wait head to zero */ + while ((head != 0) && (timeout < 1000)) { + head = ring_rd32( + tx_ring, + RNP_DMA_REG_TX_DESC_BUF_HEAD); + usleep_range(10000, 20000); + timeout++; + } + if (timeout >= 1000) { + printk("[%s] Wait Tx-ring %d head to zero time out\n", + netdev->name, + tx_ring->rnp_queue_idx); + } + /* 6 stop queue again*/ + if (!(tx_ring->ring_flags & + RNP_RING_SKIP_TX_START)) + ring_wr32(tx_ring, RNP_DMA_TX_START, 0); + /* 7 write back next_to_use maybe hw hang */ + tx_ring->next_to_use = next_to_use; + } + } + } + if (!err) { + if (!pci_channel_offline(adapter->pdev)) { + if (hw->ncsi_en == 0 && + !(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + rnp_reset(adapter); + } + } + } + /* power down the optics for n10 SFP+ fiber */ + if (hw->ops.disable_tx_laser) + hw->ops.disable_tx_laser(hw); + + if (!free_tx_ealay) + rnp_clean_all_tx_rings(adapter); + + rnp_clean_all_rx_rings(adapter); + +#ifdef CONFIG_RNP_DCA + /* since we reset the hardware DCA settings were cleared */ + rnp_setup_dca(adapter); +#endif + + if (hw->ncsi_en) + hw->ops.set_mac_rx(hw, true); +} + +/** + * rnp_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +#ifdef HAVE_TX_TIMEOUT_TXQUEUE +static void rnp_tx_timeout(struct net_device *netdev, unsigned int txqueue) +#else +static void rnp_tx_timeout(struct net_device *netdev) +#endif +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + /* Do the reset outside of interrupt context */ + int i; + bool real_tx_hang = false; + +#define TX_TIMEO_LIMIT 16000 + for (i = 0; i < adapter->num_tx_queues; i++) { + struct rnp_ring *tx_ring = adapter->tx_ring[i]; + + if (check_for_tx_hang(tx_ring) && rnp_check_tx_hang(tx_ring)) + real_tx_hang = true; + } + + if (real_tx_hang) { + printk("hw real hang!!!!"); + /* Do the reset outside of interrupt context */ +#ifndef TEST_TX_HANG + rnp_tx_timeout_reset(adapter); + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + adapter->flags2 |= RNP_FLAG2_RESET_PF; +#endif + } else { + printk("Fake Tx hang detected with timeout of %d " + "seconds\n", + netdev->watchdog_timeo / HZ); + + /* fake Tx hang - increase the kernel timeout */ + if (netdev->watchdog_timeo < TX_TIMEO_LIMIT) + netdev->watchdog_timeo *= 2; + } +} + +/** + * rnp_sw_init - Initialize general software structures (struct rnp_adapter) + * @adapter: board private structure to initialize + * + * rnp_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int rnp_sw_init(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned int rss = 0, fdir; + int rss_limit = num_online_cpus(); +#ifdef RNP_MAX_RINGS + rss_limit = RNP_MAX_RINGS; +#endif + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* if this hw can setup msix count */ + rss = min_t(int, adapter->max_ring_pair_counts, rss_limit); + rss = min_t(int, rss, + hw->mac.max_msix_vectors - adapter->num_other_vectors); + adapter->ring_feature[RING_F_RSS].limit = + min_t(int, rss, adapter->max_ring_pair_counts); + + adapter->flags |= RNP_FLAG_VXLAN_OFFLOAD_CAPABLE; + adapter->flags |= RNP_FLAG_VXLAN_OFFLOAD_ENABLE; + + adapter->max_q_vectors = hw->max_msix_vectors - 1; + adapter->atr_sample_rate = 20; + + fdir = min_t(int, adapter->max_q_vectors, rss_limit); + adapter->ring_feature[RING_F_FDIR].limit = fdir; + + if (hw->feature_flags & RNP_NET_FEATURE_RX_NTUPLE_FILTER) { + spin_lock_init(&adapter->fdir_perfect_lock); + adapter->fdir_filter_count = 0; + adapter->fdir_mode = hw->fdir_mode; + /* fdir_pballoc not from zero, so add 2 */ + adapter->fdir_pballoc = 2 + hw->layer2_count + hw->tuple5_count; + adapter->layer2_count = hw->layer2_count; + adapter->tuple_5_count = hw->tuple5_count; + } +#ifdef CONFIG_RNP_DCA + /* we can't support dca */ + adapter->flags |= RNP_FLAG_DCA_CAPABLE; +#endif + + /* itr sw setup here */ + adapter->sample_interval = 10; + adapter->adaptive_rx_coal = 1; + adapter->adaptive_tx_coal = 1; + adapter->auto_rx_coal = 0; + adapter->napi_budge = 64; + /* set default work limits */ + adapter->tx_work_limit = RNP_DEFAULT_TX_WORK; + adapter->rx_usecs = RNP_PKT_TIMEOUT; + adapter->rx_usecs_usr_set = RNP_PKT_TIMEOUT; + adapter->rx_frames = RNP_RX_PKT_POLL_BUDGET; + adapter->tx_usecs = RNP_PKT_TIMEOUT_TX; + adapter->tx_usecs_usr_set = RNP_PKT_TIMEOUT_TX; + adapter->tx_frames = RNP_TX_PKT_POLL_BUDGET; + + /* set default ring sizes */ + adapter->tx_ring_item_count = RNP_DEFAULT_TXD; + adapter->rx_ring_item_count = RNP_DEFAULT_RXD; + + set_bit(__RNP_DOWN, &adapter->state); + + return 0; +} + +/** + * rnp_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int rnp_setup_tx_resources(struct rnp_ring *tx_ring, + struct rnp_adapter *adapter) +{ + struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = NUMA_NO_NODE; + int size; + + size = sizeof(struct rnp_tx_buffer) * tx_ring->count; + +#ifdef USE_NUMA_MEMORY + if (tx_ring->q_vector) + numa_node = tx_ring->q_vector->numa_node; + tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; +#else + tx_ring->tx_buffer_info = kzalloc(size, GFP_KERNEL); +#endif + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct rnp_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + +#ifdef USE_NUMA_MEMORY + set_dev_node(dev, numa_node); +#endif + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, + GFP_KERNEL); +#ifdef USE_NUMA_MEMORY + set_dev_node(dev, orig_node); +#endif + if (!tx_ring->desc) + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + DPRINTK(IFUP, INFO, + "TxRing:%d, vector:%d ItemCounts:%d " + "desc:%p(0x%llx) node:%d\n", + tx_ring->rnp_queue_idx, tx_ring->q_vector->v_idx, + tx_ring->count, tx_ring->desc, (u64)tx_ring->dma, numa_node); + return 0; + +err: + +#ifdef USE_NUMA_MEMORY + vfree(tx_ring->tx_buffer_info); +#else + kfree(tx_ring->tx_buffer_info); +#endif + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * rnp_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int rnp_setup_all_tx_resources(struct rnp_adapter *adapter) +{ + int i, err = 0; + + tx_dbg("adapter->num_tx_queues:%d, adapter->tx_ring[0]:%p\n", + adapter->num_tx_queues, adapter->tx_ring[0]); + + for (i = 0; i < (adapter->num_tx_queues); i++) { + BUG_ON(adapter->tx_ring[i] == NULL); + err = rnp_setup_tx_resources(adapter->tx_ring[i], adapter); + if (!err) + continue; + + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + rnp_free_tx_resources(adapter->tx_ring[i]); + return err; +} + +/** + * rnp_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int rnp_setup_rx_resources(struct rnp_ring *rx_ring, + struct rnp_adapter *adapter) +{ + struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = NUMA_NO_NODE; + int size; + + BUG_ON(rx_ring == NULL); + + size = sizeof(struct rnp_rx_buffer) * rx_ring->count; + +#ifdef USE_NUMA_MEMORY + if (rx_ring->q_vector) + numa_node = rx_ring->q_vector->numa_node; + + rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; +#else + rx_ring->rx_buffer_info = kzalloc(size, GFP_KERNEL); +#endif + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union rnp_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + +#ifdef USE_NUMA_MEMORY + set_dev_node(dev, numa_node); +#endif + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, + GFP_KERNEL); +#ifdef USE_NUMA_MEMORY + set_dev_node(dev, orig_node); +#endif + if (!rx_ring->desc) + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + DPRINTK(IFUP, INFO, + "RxRing:%d, vector:%d ItemCounts:%d " + "desc:%p(0x%llx) node:%d\n", + rx_ring->rnp_queue_idx, rx_ring->q_vector->v_idx, + rx_ring->count, rx_ring->desc, (u64)rx_ring->dma, numa_node); + + return 0; +err: + +#ifdef USE_NUMA_MEMORY + vfree(rx_ring->rx_buffer_info); +#else + kfree(rx_ring->rx_buffer_info); +#endif + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * rnp_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int rnp_setup_all_rx_resources(struct rnp_adapter *adapter) +{ + int i, err = 0; + u32 head; + + for (i = 0; i < adapter->num_rx_queues; i++) { + BUG_ON(adapter->rx_ring[i] == NULL); + + /* should check count and head */ + /* in sriov condition may head large than count */ + head = ring_rd32(adapter->rx_ring[i], + RNP_DMA_REG_RX_DESC_BUF_HEAD); + if (unlikely(head >= adapter->rx_ring[i]->count)) { + dbg("[%s] Ring %d head large than count", + adapter->netdev->name, + adapter->rx_ring[i]->rnp_queue_idx); + adapter->rx_ring[i]->ring_flags |= + RNP_RING_FLAG_DELAY_SETUP_RX_LEN; + adapter->rx_ring[i]->reset_count = + adapter->rx_ring[i]->count; + adapter->rx_ring[i]->count = head + 1; + } + err = rnp_setup_rx_resources(adapter->rx_ring[i], adapter); + if (!err) + continue; + + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + rnp_free_rx_resources(adapter->rx_ring[i]); + return err; +} + +/** + * rnp_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void rnp_free_tx_resources(struct rnp_ring *tx_ring) +{ + BUG_ON(tx_ring == NULL); + + rnp_clean_tx_ring(tx_ring); +#ifdef USE_NUMA_MEMORY + vfree(tx_ring->tx_buffer_info); +#else + kfree(tx_ring->tx_buffer_info); +#endif + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * rnp_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void rnp_free_all_tx_resources(struct rnp_adapter *adapter) +{ + int i; + + for (i = 0; i < (adapter->num_tx_queues); i++) + rnp_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * rnp_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void rnp_free_rx_resources(struct rnp_ring *rx_ring) +{ + BUG_ON(rx_ring == NULL); + + rnp_clean_rx_ring(rx_ring); + +#ifdef USE_NUMA_MEMORY + vfree(rx_ring->rx_buffer_info); +#else + kfree(rx_ring->rx_buffer_info); +#endif + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * rnp_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void rnp_free_all_rx_resources(struct rnp_adapter *adapter) +{ + int i; + + for (i = 0; i < (adapter->num_rx_queues); i++) + if (adapter->rx_ring[i]->desc) + rnp_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * rnp_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int rnp_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN * 2; + + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < hw->min_length) || (max_frame > hw->max_length)) + return -EINVAL; + + e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + + if (netdev->mtu == new_mtu) + return 0; + + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + rnp_reinit_locked(adapter); + + rnp_msg_post_status(adapter, PF_SET_MTU); + + return 0; +} + +/** + * rnp_tx_maxrate - callback to set the maximum per-queue bitrate + * @netdev: network interface device structure + * @queue_index: Tx queue to set + * @maxrate: desired maximum transmit bitrate Mbps + **/ +__maybe_unused static int rnp_tx_maxrate(struct net_device *netdev, + int queue_index, u32 maxrate) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_ring *tx_ring = adapter->tx_ring[queue_index]; + u64 real_rate = 0; + + adapter->max_rate[queue_index] = maxrate; + rnp_dbg("%s: queue:%d maxrate:%d\n", __func__, queue_index, maxrate); + if (!maxrate) + return rnp_setup_tx_maxrate(tx_ring, 0, + adapter->hw.usecstocount * 1000000); + /* we need turn it to bytes/s */ + real_rate = ((u64)maxrate * 1024 * 128) * 90 / 100; + rnp_setup_tx_maxrate(tx_ring, real_rate, + adapter->hw.usecstocount * 1000000); + + return 0; +} + +/** + * rnp_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +int rnp_open(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int err; + + DPRINTK(IFUP, INFO, "ifup\n"); + + /* disallow open during test */ + if (test_bit(__RNP_TESTING, &adapter->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = rnp_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = rnp_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + rnp_configure(adapter); + + err = rnp_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + if (err) + goto err_set_queues; + + if (module_enable_ptp) + rnp_ptp_register(adapter); + + rnp_up_complete(adapter); + + return 0; + +err_set_queues: + rnp_free_irq(adapter); +err_req_irq: + rnp_free_all_rx_resources(adapter); +err_setup_rx: + rnp_free_all_tx_resources(adapter); +err_setup_tx: + hw->ops.set_mbx_ifup(hw, 0); + rnp_reset(adapter); + + return err; +} + +/** + * rnp_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int rnp_close(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + DPRINTK(IFDOWN, INFO, "ifdown\n"); + +#ifdef DISABLE_RX_IRQ + adapter->quit_poll_thread = true; +#endif + if (module_enable_ptp) + rnp_ptp_unregister(adapter); + + rnp_down(adapter); + rnp_free_irq(adapter); + rnp_free_all_tx_resources(adapter); + rnp_free_all_rx_resources(adapter); + + /* if in sriov mode send link down to all vfs */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + adapter->link_up = 0; + adapter->link_up_old = 0; + rnp_msg_post_status(adapter, PF_SET_LINK_STATUS); + /* wait all vf get this status */ + usleep_range(5000, 10000); + } + + return 0; +} + +#ifdef CONFIG_PM +static int rnp_resume(struct pci_dev *pdev) +{ + struct rnp_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + u32 err; + struct rnp_hw *hw = &adapter->hw; + + printk("call rnp_resume\n"); + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* + * pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pcim_enable_device(pdev); + if (err) { + e_dev_err("Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + switch (hw->hw_type) { + case rnp_hw_n10: + case rnp_hw_n400: + case rnp_hw_n20: + case rnp_hw_uv440: + wait_mbx_init_done(hw); +#ifdef FIX_VF_BUG + rnp_wr_reg(adapter->io_addr_bar0 + + (0x7982fc & (pci_resource_len(pdev, 0) - 1)), + 0); +#endif + break; + default: + + break; + } + + rtnl_lock(); + + err = rnp_init_interrupt_scheme(adapter); + if (!err) + err = register_mbx_irq(adapter); + + if (hw->ops.driver_status) + hw->ops.driver_status(hw, false, rnp_driver_suspuse); + + rnp_reset(adapter); + + if (!err && netif_running(netdev)) + err = rnp_open(netdev); + + rtnl_unlock(); + + if (err) + return err; + + netif_device_attach(netdev); + + return 0; +} +#endif /* CONFIG_PM */ + +static int __rnp_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct rnp_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + struct rnp_hw *hw = &adapter->hw; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + rtnl_lock(); + if (netif_running(netdev)) { + rnp_down(adapter); + rnp_free_irq(adapter); + rnp_free_all_tx_resources(adapter); + rnp_free_all_rx_resources(adapter); + /* should consider sriov mode ? */ + } + rtnl_unlock(); + + if (hw->ops.driver_status) + hw->ops.driver_status(hw, true, rnp_driver_suspuse); + + remove_mbx_irq(adapter); + rnp_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; + +#endif + if (wufc) { + rnp_set_rx_mode(netdev); + + /* enable the optics for n10 SFP+ fiber as we can WoL */ + if (hw->ops.enable_tx_laser) + hw->ops.enable_tx_laser(hw); + + /* turn on all-multi mode if wake on multicast is enabled */ + } + + if (hw->ops.setup_wol) + hw->ops.setup_wol(hw, adapter->wol); + + pci_wake_from_d3(pdev, !!wufc); + *enable_wake = !!wufc; + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int rnp_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + printk("call rnp_suspend\n"); + + retval = __rnp_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} +#endif /* CONFIG_PM */ + +__maybe_unused static void rnp_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __rnp_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * rnp_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void rnp_update_stats(struct rnp_adapter *adapter) +{ + struct net_device_stats *net_stats = &adapter->netdev->stats; + struct rnp_hw *hw = &adapter->hw; + struct rnp_hw_stats *hw_stats = &adapter->hw_stats; + int i; + struct rnp_ring *ring; + u64 hw_csum_rx_error = 0; + u64 hw_csum_rx_good = 0; + + net_stats->tx_packets = 0; + net_stats->tx_bytes = 0; + net_stats->rx_packets = 0; + net_stats->rx_bytes = 0; + net_stats->rx_dropped = 0; + net_stats->rx_errors = 0; + hw_stats->vlan_strip_cnt = 0; + hw_stats->vlan_add_cnt = 0; + + if (test_bit(__RNP_DOWN, &adapter->state) || + test_bit(__RNP_RESETTING, &adapter->state)) + return; + + for (i = 0; i < adapter->num_q_vectors; i++) { + rnp_for_each_ring(ring, adapter->q_vector[i]->rx) { + hw_csum_rx_error += ring->rx_stats.csum_err; + hw_csum_rx_good += ring->rx_stats.csum_good; + hw_stats->vlan_strip_cnt += ring->rx_stats.vlan_remove; + net_stats->rx_packets += ring->stats.packets; + net_stats->rx_bytes += ring->stats.bytes; + } + rnp_for_each_ring(ring, adapter->q_vector[i]->tx) { + hw_stats->vlan_add_cnt += ring->tx_stats.vlan_add; + net_stats->tx_packets += ring->stats.packets; + net_stats->tx_bytes += ring->stats.bytes; + } + } + net_stats->rx_errors += hw_csum_rx_error; + + hw->ops.update_hw_status(hw, hw_stats, net_stats); + + adapter->hw_csum_rx_error = hw_csum_rx_error; + adapter->hw_csum_rx_good = hw_csum_rx_good; +} + +/** + * rnp_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter: pointer to the device adapter structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occurring. Secondly it sets the + * bits needed to check for TX hangs. As a result we should immediately + * determine if a hang has occurred. + */ +static void rnp_check_hang_subtask(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *tx_ring; + u64 tx_next_to_clean_old; + u64 tx_next_to_clean; + u64 tx_next_to_use; + struct rnp_ring *rx_ring; + u64 rx_next_to_clean_old; + u64 rx_next_to_clean; + union rnp_rx_desc *rx_desc; + + /* If we're down or resetting, just bail */ + if (test_bit(__RNP_DOWN, &adapter->state) || + test_bit(__RNP_RESETTING, &adapter->state)) + return; + + set_bit(__RNP_SERVICE_CHECK, &adapter->state); + + /* Force detection of hung controller */ + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_check_for_tx_hang(adapter->tx_ring[i]); + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + tx_ring = adapter->tx_ring[i]; + /* get the last next_to_clean */ + tx_next_to_clean_old = tx_ring->tx_stats.tx_next_to_clean; + tx_next_to_clean = tx_ring->next_to_clean; + tx_next_to_use = tx_ring->next_to_use; + + /* if we have tx desc to clean */ + if (tx_next_to_use != tx_next_to_clean) { + if (tx_next_to_clean == tx_next_to_clean_old) { + tx_ring->tx_stats.tx_equal_count++; + if (tx_ring->tx_stats.tx_equal_count > 2) { + /* maybe not so good */ + struct rnp_q_vector *q_vector = + tx_ring->q_vector; + + /* stats */ + if (q_vector->rx.ring || + q_vector->tx.ring) + napi_schedule_irqoff( + &q_vector->napi); + + tx_ring->tx_stats.tx_irq_miss++; + tx_ring->tx_stats.tx_equal_count = 0; + } + } else { + tx_ring->tx_stats.tx_equal_count = 0; + } + /* update */ + /* record this next_to_clean */ + tx_ring->tx_stats.tx_next_to_clean = tx_next_to_clean; + } else { + /* clean record to -1 */ + tx_ring->tx_stats.tx_next_to_clean = -1; + } + } + + /* check if we lost rx irq */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + /* get the last next_to_clean */ + rx_next_to_clean_old = rx_ring->rx_stats.rx_next_to_clean; + /* get the now clean */ + rx_next_to_clean = rx_ring->next_to_clean; + + if (rx_next_to_clean == rx_next_to_clean_old) { + rx_ring->rx_stats.rx_equal_count++; + + if ((rx_ring->rx_stats.rx_equal_count > 2) && + (rx_ring->rx_stats.rx_equal_count < 5)) { + rx_desc = RNP_RX_DESC(rx_ring, + rx_ring->next_to_clean); + if (rnp_test_staterr(rx_desc, + RNP_RXD_STAT_DD)) { + int size; + struct rnp_q_vector *q_vector = + rx_ring->q_vector; + + size = le16_to_cpu(rx_desc->wb.len); + if (size) { + rx_ring->rx_stats.rx_irq_miss++; + if (q_vector->rx.ring || + q_vector->tx.ring) + napi_schedule_irqoff( + &q_vector->napi); + } else { + printk("set RNP_FLAG2_RESET_REQUESTED since size is 0\n"); + adapter->flags2 |= + RNP_FLAG2_RESET_REQUESTED; + } + } + } + if (rx_ring->rx_stats.rx_equal_count > 1000) + rx_ring->rx_stats.rx_equal_count = 0; + } else { + rx_ring->rx_stats.rx_equal_count = 0; + } + rx_ring->rx_stats.rx_next_to_clean = rx_next_to_clean; + } + + clear_bit(__RNP_SERVICE_CHECK, &adapter->state); +} + +static void update_ring_delay(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *ring; + struct rnp_hw *hw = &adapter->hw; + + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = adapter->rx_ring[i]; + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER, + adapter->rx_usecs * hw->usecstocount); + ring = adapter->tx_ring[i]; + ring_wr32(ring, RNP_DMA_REG_TX_INT_DELAY_TIMER, + adapter->tx_usecs * hw->usecstocount); + } +} + +/** + * rnp_watchdog_update_link - update the link status + * @adapter: pointer to the device adapter structure + * @link_speed: pointer to a u32 to store the link_speed + **/ +static void rnp_watchdog_update_link(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + bool duplex = adapter->duplex_old; + bool flow_rx = true, flow_tx = true; + + if (!(adapter->flags & RNP_FLAG_NEED_LINK_UPDATE)) + return; + + if (hw->ops.check_link) { + hw->ops.check_link(hw, &link_speed, &link_up, &duplex, false); + } else { + /* always assume link is up, if no check link function */ + link_speed = RNP_LINK_SPEED_10GB_FULL; + link_up = true; + } + + if (link_up || time_after(jiffies, (adapter->link_check_timeout + + RNP_TRY_LINK_TIMEOUT))) { + adapter->flags &= ~RNP_FLAG_NEED_LINK_UPDATE; + } + adapter->link_up = link_up; + adapter->link_speed = link_speed; + adapter->duplex_old = duplex; + + if (hw->ops.get_pause_mode) + hw->ops.get_pause_mode(hw); + switch (hw->fc.current_mode) { + case rnp_fc_none: + flow_rx = false; + flow_tx = false; + break; + case rnp_fc_tx_pause: + flow_rx = false; + flow_tx = true; + + break; + case rnp_fc_rx_pause: + flow_rx = true; + flow_tx = false; + break; + + case rnp_fc_full: + flow_rx = true; + flow_tx = true; + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + } + /* if we detect changed link setup new */ + if (adapter->link_up) { + if (hw->ops.set_mac_speed) + hw->ops.set_mac_speed(hw, true, link_speed, duplex); + /* we should also update pause mode */ + if (hw->ops.set_pause_mode) + hw->ops.set_pause_mode(hw); + + e_info(drv, "NIC Link is Up %s, %s Duplex, Flow Control: %s\n", + (link_speed == RNP_LINK_SPEED_40GB_FULL ? + "40 Gbps" : + (link_speed == RNP_LINK_SPEED_25GB_FULL ? + "25 Gbps" : + (link_speed == RNP_LINK_SPEED_10GB_FULL ? + "10 Gbps" : + (link_speed == RNP_LINK_SPEED_1GB_FULL ? + "1 Gbps" : + (link_speed == RNP_LINK_SPEED_100_FULL ? + "100 Mbps" : + (link_speed == RNP_LINK_SPEED_10_FULL ? + "10 Mbps" : + "unknown speed")))))), + ((duplex) ? "Full" : "Half"), + ((flow_rx && flow_tx) ? + "RX/TX" : + (flow_rx ? "RX" : (flow_tx ? "TX" : "None")))); + /* we should update rx irq delay and tx irq delay */ + if (link_speed == RNP_LINK_SPEED_10GB_FULL) { + adapter->rx_usecs = adapter->rx_usecs_usr_set; + adapter->tx_usecs = adapter->tx_usecs_usr_set; + } else { + adapter->rx_usecs = adapter->rx_usecs_usr_set * 6; + adapter->tx_usecs = adapter->tx_usecs_usr_set * 2; + } + update_ring_delay(adapter); + } else { + if (hw->ops.set_mac_speed) + hw->ops.set_mac_speed(hw, false, 0, false); + } +} + +/** + * rnp_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter: pointer to the device adapter structure + **/ +static void rnp_watchdog_link_is_up(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct rnp_hw *hw = &adapter->hw; + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + + adapter->flags2 &= ~RNP_FLAG2_SEARCH_FOR_SFP; + switch (hw->mac.type) { + default: + break; + } + + netif_carrier_on(netdev); + + netif_tx_wake_all_queues(netdev); + + hw->ops.set_mac_rx(hw, true); +} + +/** + * rnp_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter: pointer to the adapter structure + **/ +static void rnp_watchdog_link_is_down(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct rnp_hw *hw = &adapter->hw; + + adapter->link_up = false; + adapter->link_speed = 0; + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + /* poll for SFP+ cable when link is down */ + if (rnp_is_sfp(hw)) + adapter->flags2 |= RNP_FLAG2_SEARCH_FOR_SFP; + + e_info(drv, "NIC Link is Down\n"); + + netif_carrier_off(netdev); + + netif_tx_stop_all_queues(netdev); + + hw->ops.set_mac_rx(hw, false); +} + +static void rnp_update_link_to_vf(struct rnp_adapter *adapter) +{ + /* maybe confict with vf */ + if (!(adapter->flags & RNP_FLAG_VF_INIT_DONE)) + return; + + if ((adapter->link_up_old != adapter->link_up) || + (adapter->link_speed_old != adapter->link_speed)) { + /* if change send mbx to all vf */ + if (!test_bit(__RNP_IN_IRQ, &adapter->state)) { + if (0 == + rnp_msg_post_status(adapter, PF_SET_LINK_STATUS)) { + /* maybe delay if we are in other irq? */ + adapter->link_up_old = adapter->link_up; + adapter->link_speed_old = adapter->link_speed; + } + } + } +} +/** + * rnp_watchdog_subtask - check and bring link up + * @adapter: pointer to the device adapter structure + **/ +static void rnp_watchdog_subtask(struct rnp_adapter *adapter) +{ + /* if interface is down do nothing */ + /* should do link status if in sriov */ + if (test_bit(__RNP_DOWN, &adapter->state) || + test_bit(__RNP_RESETTING, &adapter->state)) + return; + + rnp_watchdog_update_link(adapter); + + if (adapter->link_up) + rnp_watchdog_link_is_up(adapter); + else + rnp_watchdog_link_is_down(adapter); + + rnp_update_link_to_vf(adapter); + + rnp_update_stats(adapter); +} + +/** + * rnp_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +void rnp_service_timer(struct timer_list *t) +{ + struct rnp_adapter *adapter = from_timer(adapter, t, service_timer); + unsigned long next_event_offset; + bool ready = true; + + /* poll faster when waiting for link */ + if (adapter->flags & RNP_FLAG_NEED_LINK_UPDATE) + next_event_offset = HZ / 10; + else + next_event_offset = HZ; + /* Reset the timer */ + if (!test_bit(__RNP_REMOVE, &adapter->state)) + mod_timer(&adapter->service_timer, next_event_offset + jiffies); + + if (ready) + rnp_service_event_schedule(adapter); +} + +static void rnp_reset_pf_subtask(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 err; + + if (!(adapter->flags2 & RNP_FLAG2_RESET_PF)) + return; + + rtnl_lock(); + netif_device_detach(netdev); + if (netif_running(netdev)) { + rnp_down(adapter); + rnp_free_irq(adapter); + rnp_free_all_tx_resources(adapter); + rnp_free_all_rx_resources(adapter); + } + rtnl_unlock(); + + adapter->link_up = 0; + adapter->link_up_old = 0; + rnp_msg_post_status(adapter, PF_SET_LINK_STATUS); + /* wait all vf get this status */ + usleep_range(500, 1000); + + rnp_reset(adapter); + remove_mbx_irq(adapter); + rnp_clear_interrupt_scheme(adapter); + + rtnl_lock(); + err = rnp_init_interrupt_scheme(adapter); + + register_mbx_irq(adapter); + + if (!err && netif_running(netdev)) + err = rnp_open(netdev); + + rtnl_unlock(); + rnp_msg_post_status(adapter, PF_SET_RESET); + netif_device_attach(netdev); + adapter->flags2 &= (~RNP_FLAG2_RESET_PF); +} + +static void rnp_reset_subtask(struct rnp_adapter *adapter) +{ + if (!(adapter->flags2 & RNP_FLAG2_RESET_REQUESTED)) + return; + + adapter->flags2 &= ~RNP_FLAG2_RESET_REQUESTED; + + /* If we're already down or resetting, just bail */ + if (test_bit(__RNP_DOWN, &adapter->state) || + test_bit(__RNP_RESETTING, &adapter->state)) + return; + + netdev_err(adapter->netdev, "Reset adapter\n"); + adapter->tx_timeout_count++; + rtnl_lock(); + rnp_reinit_locked(adapter); + rtnl_unlock(); +} + +static void rnp_rx_len_reset_subtask(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *rx_ring; + + for (i = 0; i < adapter->num_tx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + if (unlikely(rx_ring->ring_flags & + RNP_RING_FLAG_DO_RESET_RX_LEN)) { + dbg("[%s] Rx-ring %d count reset\n", + adapter->netdev->name, rx_ring->rnp_queue_idx); + if (!rnp_rx_ring_reinit(adapter, rx_ring)) { + rx_ring->ring_flags &= + (~RNP_RING_FLAG_DO_RESET_RX_LEN); + } + } + } +} + +static void rnp_auto_itr_moderation(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *rx_ring; + u64 period = (u64)(jiffies - adapter->last_moder_jiffies); + + if (!adapter->adaptive_rx_coal || + period < adapter->sample_interval * HZ) { + return; + } + + adapter->last_moder_jiffies = jiffies; + + /* it is time to check moderation */ + for (i = 0; i < adapter->num_rx_queues; i++) { + u64 x, y, rate; + u64 rx_packets, packets, rx_pkt_diff; + + rx_ring = adapter->rx_ring[i]; + rx_packets = READ_ONCE(rx_ring->stats.packets); + rx_pkt_diff = rx_packets - + adapter->last_moder_packets[rx_ring->queue_index]; + packets = rx_pkt_diff; + + x = packets * HZ; + y = do_div(x, period); + rate = x; + + + if (rate == 0) { + + } else if (rate < 20000) { + + rx_ring->ring_flags |= RNP_RING_LOWER_ITR; + } else { + + rx_ring->ring_flags &= (~RNP_RING_LOWER_ITR); + } + + /* write back new count */ + adapter->last_moder_packets[rx_ring->queue_index] = rx_packets; + } +} + +/** + * rnp_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +void rnp_service_task(struct work_struct *work) +{ + struct rnp_adapter *adapter = + container_of(work, struct rnp_adapter, service_task); + +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) +#ifndef HAVE_UDP_TUNNEL_NIC_INFO + if (adapter->flags2 & RNP_FLAG2_UDP_TUN_REREG_NEEDED) { + rtnl_lock(); + adapter->flags2 &= ~RNP_FLAG2_UDP_TUN_REREG_NEEDED; +#ifdef HAVE_UDP_ENC_RX_OFFLOAD + udp_tunnel_get_rx_info(adapter->netdev); +#else + vxlan_get_rx_port(adapter->netdev); +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ + rtnl_unlock(); + } +#endif /* HAVE_UDP_TUNNEL_NIC_INFO */ +#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ + + rnp_reset_subtask(adapter); + rnp_reset_pf_subtask(adapter); + rnp_watchdog_subtask(adapter); + rnp_rx_len_reset_subtask(adapter); + rnp_auto_itr_moderation(adapter); +#ifndef TEST_TX_HANG + rnp_check_hang_subtask(adapter); +#endif + rnp_service_event_complete(adapter); +} + +static int rnp_tso(struct rnp_ring *tx_ring, struct rnp_tx_buffer *first, + u32 *mac_ip_len, u8 *hdr_len, u32 *tx_flags) +{ + struct sk_buff *skb = first->skb; + struct net_device *netdev = tx_ring->netdev; + struct rnp_adapter *adapter = netdev_priv(netdev); + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + int err; + u8 *inner_mac; + u16 gso_segs, gso_size; + u16 gso_need_pad; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + inner_mac = skb->data; + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->tot_len = 0; + ip.v4->check = 0x0000; + } else { + ip.v6->payload_len = 0; + } + +#ifdef HAVE_ENCAP_TSO_OFFLOAD + if (skb_shinfo(skb)->gso_type & + (SKB_GSO_GRE | +#ifdef NETIF_F_GSO_PARTIAL + SKB_GSO_GRE_CSUM | +#endif + SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { +#ifndef NETIF_F_GSO_PARTIAL + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { +#else + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { +#endif + } + /* we should alayws do this */ + inner_mac = skb_inner_mac_header(skb); + + first->tunnel_hdr_len = (inner_mac - skb->data); + + if (skb_shinfo(skb)->gso_type & + (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { + *tx_flags |= RNP_TXD_TUNNEL_VXLAN; + l4.udp->check = 0; + tx_dbg("set outer l4.udp to 0\n"); + } else { + *tx_flags |= RNP_TXD_TUNNEL_NVGRE; + } + + /* reset pointers to inner headers */ + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + } + +#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + + if (ip.v4->version == 4) { + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->tot_len = 0; + ip.v4->check = 0x0000; + + } else { + ip.v6->payload_len = 0; + /* set ipv6 type */ + *tx_flags |= RNP_TXD_FLAG_IPv6; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + paylen = skb->len - l4_offset; + + if (skb->csum_offset == offsetof(struct tcphdr, check)) { + *tx_flags |= RNP_TXD_L4_TYPE_TCP; + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + l4.tcp->psh = 0; + } else { + *tx_flags |= RNP_TXD_L4_TYPE_UDP; + /* compute length of segmentation header */ + *hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + *mac_ip_len = (l4.hdr - ip.hdr) | ((ip.hdr - inner_mac) << 9); + + /* compute header lengths */ + /* pull values out of skb_shinfo */ + gso_size = skb_shinfo(skb)->gso_size; + gso_segs = skb_shinfo(skb)->gso_segs; + +#ifndef HAVE_NDO_FEATURES_CHECK + /* too small a TSO segment size causes problems */ + if (gso_size < 64) { + gso_size = 64; + gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, 64); + } +#endif + /* if we close padding check gso confition */ + if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) { + gso_need_pad = (first->skb->len - *hdr_len) % gso_size; + if (gso_need_pad) { + if ((gso_need_pad + *hdr_len) <= 60) { + gso_need_pad = 60 - (gso_need_pad + *hdr_len); + first->gso_need_padding = !!gso_need_pad; + } + } + } + + /* update gso size and bytecount with header size */ + /* to fix tx status */ + first->gso_segs = gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + if (skb->csum_offset == offsetof(struct tcphdr, check)) { + first->mss_len_vf_num |= + (gso_size | ((l4.tcp->doff * 4) << 24)); + } else { + first->mss_len_vf_num |= (gso_size | ((8) << 24)); + } + + *tx_flags |= RNP_TXD_FLAG_TSO | RNP_TXD_IP_CSUM | RNP_TXD_L4_CSUM; + + first->ctx_flag = true; + return 1; +} + +static int rnp_tx_csum(struct rnp_ring *tx_ring, struct rnp_tx_buffer *first, + u32 *mac_ip_len, u32 *tx_flags) +{ + struct sk_buff *skb = first->skb; + u8 l4_proto = 0; + u8 ip_len = 0; + u8 mac_len = 0; + u8 *inner_mac = skb->data; + u8 *exthdr; + __be16 frag_off; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + inner_mac = skb->data; + +#ifdef HAVE_ENCAP_CSUM_OFFLOAD + /* outer protocol */ + if (skb->encapsulation) { + /* define outer network header type */ + if (ip.v4->version == 4) { + l4_proto = ip.v4->protocol; + } else { + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } + + /* define outer transport */ + switch (l4_proto) { + case IPPROTO_UDP: + l4.udp->check = 0; + *tx_flags |= RNP_TXD_TUNNEL_VXLAN; + break; +#ifdef HAVE_GRE_ENCAP_OFFLOAD + case IPPROTO_GRE: + *tx_flags |= RNP_TXD_TUNNEL_NVGRE; + /* There was a long-standing issue in GRE where GSO + * was not setting the outer transport header unless + * a GRE checksum was requested. This was fixed in + * the 4.6 version of the kernel. In the 4.7 kernel + * support for GRE over IPv6 was added to GSO. So we + * can assume this workaround for all IPv4 headers + * without impacting later versions of the GRE. + */ + if (ip.v4->version == 4) + l4.hdr = ip.hdr + (ip.v4->ihl * 4); + break; +#endif + default: + skb_checksum_help(skb); + return -1; + } + + /* switch IP header pointer from outer to inner header */ + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + inner_mac = skb_inner_mac_header(skb); + first->tunnel_hdr_len = inner_mac - skb->data; + first->ctx_flag = true; + tx_dbg("tunnel length is %d\n", first->tunnel_hdr_len); + } +#endif /* HAVE_ENCAP_CSUM_OFFLOAD */ + + mac_len = (ip.hdr - inner_mac); // mac length + *mac_ip_len = (ip.hdr - inner_mac) << 9; + tx_dbg("inner checksum needed %d", skb_checksum_start_offset(skb)); + tx_dbg("skb->encapsulation %d\n", skb->encapsulation); + ip_len = (l4.hdr - ip.hdr); + if (ip.v4->version == 4) { + l4_proto = ip.v4->protocol; + } else { + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, + &frag_off); + *tx_flags |= RNP_TXD_FLAG_IPv6; + } + /* Enable L4 checksum offloads */ + switch (l4_proto) { + case IPPROTO_TCP: + *tx_flags |= RNP_TXD_L4_TYPE_TCP | RNP_TXD_L4_CSUM; + break; + case IPPROTO_SCTP: + tx_dbg("sctp checksum packet\n"); + *tx_flags |= RNP_TXD_L4_TYPE_SCTP | RNP_TXD_L4_CSUM; + break; + case IPPROTO_UDP: + *tx_flags |= RNP_TXD_L4_TYPE_UDP | RNP_TXD_L4_CSUM; + break; + default: + skb_checksum_help(skb); + return 0; + } + + /* should consider stags mode */ + if ((tx_ring->ring_flags & RNP_RING_NO_TUNNEL_SUPPORT) && + (first->ctx_flag)) { + /* if not support tunnel */ + *tx_flags &= (~RNP_TXD_TUNNEL_MASK); + if (!(first->priv_tags)) { + first->ctx_flag = false; + mac_len += first->tunnel_hdr_len; + first->tunnel_hdr_len = 0; + } + } + *mac_ip_len = (mac_len << 9) | ip_len; + + return 0; +} + +static int __rnp_maybe_stop_tx(struct rnp_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(rnp_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int rnp_maybe_stop_tx(struct rnp_ring *tx_ring, u16 size) +{ + if (likely(rnp_desc_unused(tx_ring) >= size)) + return 0; + return __rnp_maybe_stop_tx(tx_ring, size); +} + +static int rnp_tx_map(struct rnp_ring *tx_ring, struct rnp_tx_buffer *first, + u32 mac_ip_len, u32 tx_flags) +{ + struct sk_buff *skb = first->skb; + struct rnp_tx_buffer *tx_buffer; + struct rnp_tx_desc *tx_desc; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int data_len, size; + u16 i = tx_ring->next_to_use; + u64 fun_id = ((u64)(tx_ring->pfvfnum) << (56)); + + tx_desc = RNP_TX_DESC(tx_ring, i); + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + /* 1st desc */ + tx_desc->pkt_addr = cpu_to_le64(dma | fun_id); + + while (unlikely(size > RNP_MAX_DATA_PER_TXD)) { + tx_desc->vlan_cmd_bsz = build_ctob( + tx_flags, mac_ip_len, RNP_MAX_DATA_PER_TXD); + /* ==== desc== */ + buf_dump_line("tx0 ", __LINE__, tx_desc, + sizeof(*tx_desc)); + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = RNP_TX_DESC(tx_ring, 0); + i = 0; + } + dma += RNP_MAX_DATA_PER_TXD; + size -= RNP_MAX_DATA_PER_TXD; + + tx_desc->pkt_addr = cpu_to_le64(dma | fun_id); + } + + buf_dump_line("tx1 ", __LINE__, tx_desc, sizeof(*tx_desc)); + if (likely(!data_len)) + break; + tx_desc->vlan_cmd_bsz = build_ctob(tx_flags, mac_ip_len, size); + buf_dump_line("tx2 ", __LINE__, tx_desc, sizeof(*tx_desc)); + + /* ==== frag== */ + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = RNP_TX_DESC(tx_ring, 0); + i = 0; + } + + size = skb_frag_size(frag); + data_len -= size; + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + tx_desc->vlan_cmd_bsz = build_ctob( + tx_flags | RNP_TXD_CMD_EOP | RNP_TXD_CMD_RS, mac_ip_len, size); + buf_dump_line("tx3 ", __LINE__, tx_desc, sizeof(*tx_desc)); + + /* set the timestamp */ + first->time_stamp = jiffies; + + tx_ring->tx_stats.send_bytes += first->bytecount; +#ifdef NO_BQL_TEST +#else + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); +#endif + + /* + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + /* timestamp the skb as late as possible, just prior to notifying + * the MAC that it should transmit this packet + */ + wmb(); + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + buf_dump_line("tx4 ", __LINE__, tx_desc, sizeof(*tx_desc)); + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + /* need this */ + rnp_maybe_stop_tx(tx_ring, DESC_NEEDED); + + skb_tx_timestamp(skb); +#ifdef SIMULATE_TX + napi_consume_skb(first->skb, 64); + dma_unmap_single(tx_ring->dev, dma_unmap_addr(first, dma), + dma_unmap_len(first, len), DMA_TO_DEVICE); + + tx_ring->stats.bytes += skb->len; + tx_ring->stats.packets += 1; + first->skb = NULL; +#else + +#ifdef HAVE_SKB_XMIT_MORE + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + tx_ring->tx_stats.send_bytes_to_hw += first->bytecount; + tx_ring->tx_stats.send_bytes_to_hw += + tx_ring->tx_stats.todo_update; + tx_ring->tx_stats.todo_update = 0; + rnp_wr_reg(tx_ring->tail, i); +#ifndef SPIN_UNLOCK_IMPLIES_MMIOWB + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); +#endif + } else { + tx_ring->tx_stats.todo_update += first->bytecount; + } +#else + /* notify HW of packet */ + rnp_wr_reg(tx_ring->tail, i); + +#ifndef SPIN_UNLOCK_IMPLIES_MMIOWB + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); +#endif +#endif /* HAVE_SKB_XMIT_MORE */ + +#endif + return 0; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + rnp_unmap_and_free_tx_resource(tx_ring, tx_buffer); + if (tx_buffer == first) + break; + if (i == 0) + i += tx_ring->count; + i--; + } + dev_kfree_skb_any(first->skb); + first->skb = NULL; + tx_ring->next_to_use = i; + + return -1; +} + +static void rnp_force_src_mac(struct sk_buff *skb, struct net_device *netdev) +{ + u8 *data = skb->data; + bool ret = false; + struct netdev_hw_addr *ha; + /* force all src mac to myself */ + if (is_multicast_ether_addr(data)) { + if (0 == memcmp(data + netdev->addr_len, netdev->dev_addr, + netdev->addr_len)) { + ret = true; + goto DONE; + } + netdev_for_each_uc_addr(ha, netdev) { + if (0 == memcmp(data + netdev->addr_len, ha->addr, + netdev->addr_len)) { + ret = true; + goto DONE; + } + } + /* if not src mac, force to src mac */ + if (!ret) + memcpy(data + netdev->addr_len, netdev->dev_addr, + netdev->addr_len); + } +DONE: + return; +} + +netdev_tx_t rnp_xmit_frame_ring(struct sk_buff *skb, + struct rnp_adapter *adapter, + struct rnp_ring *tx_ring, bool tx_padding) +{ + struct rnp_tx_buffer *first; + int tso; + u32 tx_flags = 0; + unsigned short f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = skb->protocol; + u8 hdr_len = 0; + int ignore_vlan = 0; + /* default len should not 0 (hw request) */ + u32 mac_ip_len = 20; + + tx_dbg("=== begin ====\n"); + tx_dbg("rnp skb:%p, skb->len:%d headlen:%d, data_len:%d\n", skb, + skb->len, skb_headlen(skb), skb->data_len); + tx_dbg("next_to_clean %d, next_to_use %d\n", tx_ring->next_to_clean, + tx_ring->next_to_use); + /* + * need: 1 descriptor per page * PAGE_SIZE/RNP_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/RNP_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + skb_frag_t *frag_temp = &skb_shinfo(skb)->frags[f]; + + count += TXD_USE_COUNT(skb_frag_size(frag_temp)); + tx_dbg(" rnp #%d frag: size:%d\n", f, skb_frag_size(frag_temp)); + } + + if (rnp_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && + (!(tx_ring->ring_flags & RNP_RING_VEB_MULTI_FIX))) + rnp_force_src_mac(skb, tx_ring->netdev); + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + first->priv_tags = 0; + + first->mss_len_vf_num = 0; + first->inner_vlan_tunnel_len = 0; + + first->ctx_flag = (adapter->flags & RNP_FLAG_SRIOV_ENABLED) ? true : + false; + + /* if we have a HW VLAN tag being added default to the HW one */ + /* RNP_TXD_VLAN_VALID is used for veb */ + /* setup padding flag */ + + if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) { + first->ctx_flag = true; + /* should consider sctp */ + first->gso_need_padding = tx_padding; + } + + /* RNP_FLAG2_VLAN_STAGS_ENABLED and + * tx-stags-offload not support together + */ + if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) { + /* always add a stags for any packets out */ + if (tx_ring->ring_flags & RNP_RING_OUTER_VLAN_FIX) { + /* set outer_vlan to ctx */ + first->inner_vlan_tunnel_len |= (adapter->stags_vid); + first->priv_tags = 1; + first->ctx_flag = true; + + if (skb_vlan_tag_present(skb)) { + tx_flags |= RNP_TXD_VLAN_VALID | + RNP_TXD_VLAN_CTRL_INSERT_VLAN; + tx_flags |= skb_vlan_tag_get(skb); + /* else if it is a SW VLAN check the next + * protocol and store the tag + */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + + vhdr = skb_header_pointer( + skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= ntohs(vhdr->h_vlan_TCI); + tx_flags |= RNP_TXD_VLAN_VALID; + } + + } else { + /* sriov mode not support this */ + tx_flags |= adapter->stags_vid; + tx_flags |= RNP_TXD_VLAN_CTRL_INSERT_VLAN; + if (skb_vlan_tag_present(skb)) { + tx_flags |= RNP_TXD_VLAN_VALID; + first->inner_vlan_tunnel_len |= + (skb_vlan_tag_get(skb) << 8); + first->ctx_flag = true; + /* else if it is a SW VLAN check the next + * protocol and store the tag + */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + + vhdr = skb_header_pointer( + skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= RNP_TXD_VLAN_VALID; + } + } + } else { + /* normal mode*/ + if (skb_vlan_tag_present(skb)) { +#ifndef NO_SKB_VLAN_PROTO + if (skb->vlan_proto != htons(ETH_P_8021Q)) { + /* veb only use ctags */ + tx_flags |= skb_vlan_tag_get(skb); + tx_flags |= RNP_TXD_SVLAN_TYPE | + RNP_TXD_VLAN_CTRL_INSERT_VLAN; + } else { +#endif + tx_flags |= skb_vlan_tag_get(skb); + tx_flags |= RNP_TXD_VLAN_VALID | + RNP_TXD_VLAN_CTRL_INSERT_VLAN; +#ifndef NO_SKB_VLAN_PROTO + } +#endif + tx_ring->tx_stats.vlan_add++; + /* else if it is a SW VLAN check the next + * protocol and store the tag + */ + /* veb only use ctags */ + } else if ((protocol == htons(ETH_P_8021Q))) { + struct vlan_hdr *vhdr, _vhdr; + + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), + &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= ntohs(vhdr->h_vlan_TCI); + tx_flags |= RNP_TXD_VLAN_VALID; + ignore_vlan = 1; + } + } + protocol = vlan_get_protocol(skb); +#ifdef SKB_SHARED_TX_IS_UNION + if (unlikely(skb_tx(skb)->hardware) && + adapter->flags2 & RNP_FLAG2_PTP_ENABLED && adapter->ptp_tx_en) { + if (!test_and_set_bit_lock(__RNP_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_tx(skb)->in_progress = 1; + +#else + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + adapter->flags2 & RNP_FLAG2_PTP_ENABLED && adapter->ptp_tx_en) { + if (!test_and_set_bit_lock(__RNP_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; +#endif + tx_flags |= RNP_TXD_FLAG_PTP; + adapter->ptp_tx_skb = skb_get(skb); + adapter->tx_hwtstamp_start = jiffies; + schedule_work(&adapter->tx_hwtstamp_work); + } else { + printk("ptp_tx_skb miss\n"); + } + } + /* record initial flags and protocol */ + tso = rnp_tso(tx_ring, first, &mac_ip_len, &hdr_len, &tx_flags); + if (tso < 0) + goto out_drop; + else if (!tso) + rnp_tx_csum(tx_ring, first, &mac_ip_len, &tx_flags); + /* check sriov mode */ + /* in this mode pf send msg should with vf_num */ + if (unlikely(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + first->ctx_flag = true; + first->mss_len_vf_num |= (adapter->vf_num_for_pf << 16); + } + + /* add control desc */ + rnp_maybe_tx_ctxtdesc(tx_ring, first, ignore_vlan); + /* add the ATR filter if ATR is on */ + if (rnp_tx_map(tx_ring, first, mac_ip_len, tx_flags)) { + goto cleanup_tx_tstamp; + } +#ifndef HAVE_TRANS_START_IN_QUEUE + tx_ring->netdev->trans_start = jiffies; +#endif + tx_dbg("=== end ====\n\n\n\n"); + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; +cleanup_tx_tstamp: + if (unlikely(tx_flags & RNP_TXD_FLAG_PTP)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + cancel_work_sync(&adapter->tx_hwtstamp_work); + clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state); + } + + return NETDEV_TX_OK; +} + +static bool check_sctp_no_padding(struct sk_buff *skb) +{ + bool no_padding = false; + u8 l4_proto = 0; + u8 *exthdr; + __be16 frag_off; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + if (ip.v4->version == 4) { + l4_proto = ip.v4->protocol; + } else { + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, + &frag_off); + } + /* sctp set no_padding to true */ + switch (l4_proto) { + case IPPROTO_SCTP: + no_padding = true; + break; + default: + + break; + } + return no_padding; +} + +static netdev_tx_t rnp_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_ring *tx_ring; + bool tx_padding = false; + + if (!netif_carrier_ok(netdev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* + * The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) { + if (skb->len < 60) { + if (!check_sctp_no_padding(skb)) { + if (skb_put_padto(skb, 60)) + return NETDEV_TX_OK; + } else { + /* if sctp smaller than 60, never padding */ + tx_padding = true; + } + } + } else { + if (skb_put_padto(skb, 33)) + return NETDEV_TX_OK; + } + tx_ring = adapter->tx_ring[skb->queue_mapping]; + + return rnp_xmit_frame_ring(skb, adapter, tx_ring, tx_padding); +} + +/** + * rnp_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int rnp_set_mac(struct net_device *netdev, void *p) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + + dbg("[%s] call set mac\n", netdev->name); + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + hw->ops.set_mac(hw, hw->mac.addr, sriov_flag); + + /* reset veb table */ + rnp_configure_virtualization(adapter); + return 0; +} + +static int rnp_mdio_read(struct net_device *netdev, int prtad, int devad, + u32 addr, u32 *phy_value) +{ + int rc = -EIO; + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + u16 value; + + rc = hw->ops.phy_read_reg(hw, addr, 0, &value); + *phy_value = value; + + return rc; +} + +static int rnp_mdio_write(struct net_device *netdev, int prtad, int devad, + u16 addr, u16 value) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + return hw->ops.phy_write_reg(hw, addr, 0, value); +} + +static int rnp_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&ifr->ifr_data; + int prtad, devad, ret; + u32 phy_value; + + prtad = (mii->phy_id & MDIO_PHY_ID_PRTAD) >> 5; + devad = (mii->phy_id & MDIO_PHY_ID_DEVAD); + + if (cmd == SIOCGMIIREG) { + ret = rnp_mdio_read(netdev, prtad, devad, mii->reg_num, + &phy_value); + if (ret < 0) + return ret; + mii->val_out = phy_value; + return 0; + } else { + return rnp_mdio_write(netdev, prtad, devad, mii->reg_num, + mii->val_in); + } +} + +static int rnp_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + /* ptp 1588 used this */ + switch (cmd) { +#ifdef HAVE_PTP_1588_CLOCK +#ifdef SIOCGHWTSTAMP + case SIOCGHWTSTAMP: + if (module_enable_ptp) + return rnp_ptp_get_ts_config(adapter, req); + break; +#endif + case SIOCSHWTSTAMP: + if (module_enable_ptp) + return rnp_ptp_set_ts_config(adapter, req); + break; +#endif + case SIOCGMIIPHY: + return 0; + break; + case SIOCGMIIREG: + /* n400 use this */ + /* fall through */ + case SIOCSMIIREG: + return rnp_mii_ioctl(netdev, req, cmd); + break; + } + return -EINVAL; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER + +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void rnp_netpoll(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(__RNP_DOWN, &adapter->state)) + return; + + adapter->flags |= RNP_FLAG_IN_NETPOLL; + for (i = 0; i < adapter->num_q_vectors; i++) + rnp_msix_clean_rings(0, adapter->q_vector[i]); + adapter->flags &= ~RNP_FLAG_IN_NETPOLL; +} + +#endif + +#ifdef HAVE_NDO_GET_STATS64 +#ifdef HAVE_VOID_NDO_GET_STATS64 +static void rnp_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 * +rnp_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +#endif +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct rnp_ring *ring = READ_ONCE(adapter->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct rnp_ring *ring = READ_ONCE(adapter->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + rcu_read_unlock(); + /* following stats updated by rnp_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; + +#ifndef HAVE_VOID_NDO_GET_STATS64 + return stats; +#endif +} +#else +/** + * rnp_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats *rnp_get_stats(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + /* update the stats data */ + rnp_update_stats(adapter); + +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + /* only return the current stats */ + return &netdev->stats; +#else + /* only return the current stats */ + return &adapter->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +} + +#endif + +/** + * rnp_setup_tc - configure net_device for multiple traffic classes + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + */ +int rnp_setup_tc(struct net_device *dev, u8 tc) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_hw *hw = &adapter->hw; + int ret = 0; + + /* Hardware supports up to 8 traffic classes */ + if ((tc > RNP_MAX_TCS_NUM) || (tc == 1)) + return -EINVAL; + /* we canot support tc with sriov mode */ + if ((tc) && (adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return -EINVAL; + + /* only n10 can support tc setup */ + if ((tc) && (hw->hw_type != rnp_hw_n10)) + return -EINVAL; + /* if now we are in force mode, never need force, if not force it */ + if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { + hw->ops.set_mac_rx(hw, false); + if (hw->ops.driver_status) + hw->ops.driver_status(hw, true, + rnp_driver_force_control_mac); + } + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + while (test_and_set_bit(__RNP_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (netif_running(dev)) + rnp_close(dev); + + rnp_fdir_filter_exit(adapter); + adapter->priv_flags &= (~RNP_PRIV_FLAG_TCP_SYNC); + remove_mbx_irq(adapter); + rnp_clear_interrupt_scheme(adapter); + adapter->num_tc = tc; + + if (tc) { + netdev_set_num_tc(dev, tc); + adapter->flags |= RNP_FLAG_DCB_ENABLED; + } else { + netdev_reset_tc(dev); + adapter->flags &= ~RNP_FLAG_DCB_ENABLED; + } + + rnp_init_interrupt_scheme(adapter); + + register_mbx_irq(adapter); + /* rss table must reset */ + adapter->rss_tbl_setup_flag = 0; + + if (netif_running(dev)) + ret = rnp_open(dev); + + /* if we not set force now */ + if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { + hw->ops.set_mac_rx(hw, false); + if (hw->ops.driver_status) + hw->ops.driver_status(hw, false, + rnp_driver_force_control_mac); + } + + clear_bit(__RNP_RESETTING, &adapter->state); + return ret; +} + +#ifdef CONFIG_PCI_IOV +void rnp_sriov_reinit(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + rnp_setup_tc(netdev, netdev_get_num_tc(netdev)); + rtnl_unlock(); + usleep_range(10000, 20000); +} +#endif + +#ifdef NETIF_F_HW_TC +#ifdef HAVE_TC_SETUP_CLSU32 +static int rnp_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) +{ + /* 1. check weather filter rule is ingress root */ + struct rnp_adapter *adapter = netdev_priv(dev); + u32 loc = cls->knode.handle & 0xfffff; + u32 uhtid = TC_U32_USERHTID(cls->knode.handle); + int ret; + + if ((uhtid != 0x800)) + return -EINVAL; + + spin_lock(&adapter->fdir_perfect_lock); + ret = rnp_update_ethtool_fdir_entry(adapter, NULL, loc); + spin_unlock(&adapter->fdir_perfect_lock); + + return ret; +} +#ifdef CONFIG_NET_CLS_ACT + +static int rnp_action_parse(struct tcf_exts *exts, u64 *action, u8 *queue) +{ + const struct tc_action *a; +#if defined(HAVE_TCF_EXTS_TO_LIST) + LIST_HEAD(actions); +#elif defined(HAVE_TCF_EXTS_FOR_EACH_ACTION) + int j; +#endif + +#ifdef HAVE_TCF_EXTS_HAS_ACTION + if (!tcf_exts_has_actions(exts)) + +#else + if (tc_no_actions(exts)) +#endif + return -EINVAL; +#if defined(HAVE_TCF_EXTS_TO_LIST) + tcf_exts_to_list(exts, &actions); + list_for_each_entry(a, &actions, list) { +#elif defined(HAVE_TCF_EXTS_FOR_EACH_ACTION) + tcf_exts_for_each_action(j, a, exts) { +#else + tc_for_each_action(a, exts) + { +#endif + /* Drop action */ + if (is_tcf_gact_shot(a)) { + *action = RNP_FDIR_DROP_QUEUE; + *queue = RNP_FDIR_DROP_QUEUE; + return 0; + } +#ifdef HAVE_TCF_MIRRED_REDIRECT + /* Redirect to a VF or a offloaded macvlan */ +#ifdef HAVE_TCF_MIRRED_EGRESS_REDIRECT + if (is_tcf_mirred_egress_redirect(a)) { +#else + + if (is_tcf_mirred_redirect(a)) { +#endif + +#ifdef HAVE_TCF_MIRRED_DEV + struct net_device *dev = tcf_mirred_dev(a); + + if (!dev) + return -EINVAL; +#else + tcf_mirred_ifindex(a); +#endif /* HAVE_TCF_MIRRED_DEV */ + } +#endif /* HAVE_TCF_MIRRED_REDIRECT */ + + return -EINVAL; + } + + return 0; +} +#else +static int rnp_action_parse(struct tcf_exts *exts, u64 *action, u8 *queue) +{ + return -EINVAL; +} + +#endif + +static int rnp_clsu32_build_input(struct tc_cls_u32_offload *cls, + struct rnp_fdir_filter *input, + const struct rnp_match_parser *parsers) +{ + int i = 0, j = 0, err = -1; + __be32 val, mask, off; + bool found; + + for (i = 0; i < cls->knode.sel->nkeys; i++) { + off = cls->knode.sel->keys[i].off; + val = cls->knode.sel->keys[i].val; + mask = cls->knode.sel->keys[i].mask; + dbg("cls-key[%d] off %d val %d mask %d\n ", i, off, val, mask); + found = false; + for (j = 0; parsers[j].val; j++) { + /* according the off select parser */ + if (off == parsers[j].off) { + found = true; + err = parsers[j].val(input, val, mask); + if (err) + return err; + + break; + } + } + /* if the rule can't parse that we don't support the rule */ + if (!found) + return -EINVAL; + } + + return 0; +} + +static int rnp_config_knode(struct net_device *dev, __be16 protocol, + struct tc_cls_u32_offload *cls) +{ + /*1. check ethernet hw-feature U32 can offload */ + /*2. check U32 protocol We just support IPV4 offloading For now*/ + /*3. check if this cls is a cls of root u32 or cls of class u32*/ + /*4. check if this cls has been added. + * the filter extry create but the match val and mask don't fill + * so we can use it. + * find a exist extry and the match val and mask is added before + * so we don't need add it again + */ + u32 uhtid, link_uhtid; + int ret; + struct rnp_adapter *adapter = netdev_priv(dev); + u8 queue; + struct rnp_fdir_filter *input; + // struct rnp_hw *hw = &adapter->hw; + u32 loc = cls->knode.handle & 0xfffff; + + if (protocol != htons(ETH_P_IP)) + return -EOPNOTSUPP; + + uhtid = TC_U32_USERHTID(cls->knode.handle); + link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); + + netdev_info(dev, "uhtid %d link_uhtid %d protocol 0x%2x\n", uhtid, + link_uhtid, ntohs(protocol)); + /* For now just support handle root ingress + * TODO more feature + */ + if (uhtid != 0x800) + return -EINVAL; + + input = kzalloc(sizeof(*input), GFP_KERNEL); + /*be carefull this input mem need to free */ + ret = rnp_clsu32_build_input(cls, input, rnp_ipv4_parser); + if (ret) { + netdev_warn(dev, "This Rules We Can't Support It\n"); + goto out; + } + ret = rnp_action_parse(cls->knode.exts, &input->action, &queue); + if (ret) + goto out; + + dbg("tc filter rule sw_location %d\n", loc); + + /* maybe bug here */ + input->hw_idx = adapter->tuple_5_count++; + input->sw_idx = loc; + spin_lock(&adapter->fdir_perfect_lock); + rnp_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + spin_unlock(&adapter->fdir_perfect_lock); + + return 0; +out: + kfree(input); + return -EOPNOTSUPP; +} + +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +static int rnp_setup_tc_cls_u32(struct net_device *dev, + struct tc_cls_u32_offload *cls_u32) +{ + __be16 proto = cls_u32->common.protocol; +#else +int rnp_setup_tc_cls_u32(struct net_device *dev, __be16 proto, + struct tc_cls_u32_offload *cls_u32) +{ +#endif + dbg("cls_u32->command is %d\n", cls_u32->command); + switch (cls_u32->command) { + case TC_CLSU32_NEW_KNODE: + case TC_CLSU32_REPLACE_KNODE: + return rnp_config_knode(dev, proto, cls_u32); + case TC_CLSU32_DELETE_KNODE: + return rnp_delete_knode(dev, cls_u32); + default: + return -EOPNOTSUPP; + } +} + +#endif /* HAVE_TC_SETUP_CLSU32 */ + +#ifdef HAVE_TCF_BLOCK +static int rnp_setup_tc_block_ingress_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct net_device *dev = cb_priv; + struct rnp_adapter *adapter = netdev_priv(dev); + + if (test_bit(__RNP_DOWN, &adapter->state)) { + netdev_err( + adapter->netdev, + "Failed to setup tc on port %d. Link Down? 0x%.2lx\n", + adapter->port, adapter->state); + return -EINVAL; + } + if (!tc_cls_can_offload_and_chain0(dev, type_data)) + return -EOPNOTSUPP; + + switch (type) { +#ifdef HAVE_TC_SETUP_CLSU32 + case TC_SETUP_CLSU32: + return rnp_setup_tc_cls_u32(dev, type_data); +#endif /* HAVE_TC_SETUP_CLSU32 */ + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(rnp_block_cb_list); + +#endif /* HAVE_TCF_BLOCK */ + +#ifdef TC_MQPRIO_HW_OFFLOAD_MAX + + +static int rnp_setup_mqprio(struct net_device *dev, + struct tc_mqprio_qopt *mqprio) +{ + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + return rnp_setup_tc(dev, mqprio->num_tc); +} +#endif /* TC_MQPRIO_HW_OFFLOAD_MAX */ + +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +static int __rnp_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) +#elif defined(HAVE_NDO_SETUP_TC_CHAIN_INDEX) +static int __rnp_setup_tc(struct net_device *netdev, u32 handle, + u32 chain_index, __be16 proto, + struct tc_to_netdev *tc) +#else +static int __rnp_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) +#endif /* HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV */ +{ +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#ifdef HAVE_TCF_BLOCK + struct rnp_adapter *adapter = netdev_priv(netdev); +#endif /* HAVE_TCF_BLOCK */ +#endif /* HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV */ +#ifndef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + unsigned int type = tc->type; +#ifdef HAVE_NDO_SETUP_TC_CHAIN_INDEX + if (chain_index) { + dbg("chain_index %d\n", chain_index); + return -EOPNOTSUPP; + } +#endif /* HAVE_NDO_SETUP_TC_CHAIN_INDEX */ + netdev_info(netdev, " TC_H_MAJ %x H_MAJ %x\n", TC_H_MAJ(handle), + TC_H_MAJ(TC_H_INGRESS)); +#ifdef HAVE_TC_SETUP_CLSU32 + if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && + type == TC_SETUP_CLSU32) { + netdev_info(netdev, "setup_tc type is %d\n", type); + return rnp_setup_tc_cls_u32(netdev, proto, tc->cls_u32); + } +#endif /* HAVE_TC_SETUP_CLSU32 */ +#endif /* !HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV */ + switch (type) { +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#ifdef HAVE_TCF_BLOCK + case TC_SETUP_BLOCK: { + struct flow_block_offload *f = + (struct flow_block_offload *)type_data; + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return flow_block_cb_setup_simple( + type_data, &rnp_block_cb_list, + rnp_setup_tc_block_ingress_cb, adapter, adapter, + true); + else + return -EOPNOTSUPP; + } +#else +#ifdef HAVE_TC_SETUP_CLSU32 + case TC_SETUP_CLSU32: + return rnp_setup_tc_cls_u32(netdev, type_data); +#endif /* HAVE_TC_SETUP_CLSU32 */ +#endif /* HAVE_TCF_BLOCK */ +#endif /* HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV */ + case TC_SETUP_QDISC_MQPRIO: +#if defined(HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV) + return rnp_setup_mqprio(netdev, type_data); +#endif + default: + return -EOPNOTSUPP; + } + + return 0; +} +#endif /* NETIF_F_HW_TC */ +void rnp_do_reset(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + rnp_reinit_locked(adapter); + else + rnp_reset(adapter); +} + +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +static u32 rnp_fix_features(struct net_device *netdev, u32 features) +#else +static netdev_features_t rnp_fix_features(struct net_device *netdev, + netdev_features_t features) +#endif +{ + struct rnp_adapter *adapter = netdev_priv(netdev); +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) || \ + defined(NETIF_F_HW_VLAN_STAG_FILTER) || \ + defined(NETIF_F_HW_VLAN_CTAG_RX) || \ + defined(NETIF_F_HW_VLAN_STAG_RX) || \ + defined(NETIF_F_HW_VLAN_STAG_TX) || defined(NETIF_F_HW_VLAN_CTAG_Ta) + struct rnp_hw *hw = &adapter->hw; +#endif + + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + /* close rx csum when rx fcs on */ + if (!(adapter->flags2 & RNP_FLAG2_CHKSM_FIX)) { + if (features & NETIF_F_RXFCS) + features &= (~NETIF_F_RXCSUM); + } + /* Turn off LRO if not RSC capable */ + if (!(adapter->flags2 & RNP_FLAG2_RSC_CAPABLE)) + features &= ~NETIF_F_LRO; +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER + if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) { +#ifdef NETIF_F_HW_VLAN_STAG_FILTER + if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER) + features &= ~NETIF_F_HW_VLAN_STAG_FILTER; +#endif + } + +#endif + +#ifdef NETIF_F_HW_VLAN_STAG_FILTER + if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER) { + if (!(features & NETIF_F_HW_VLAN_STAG_FILTER)) { +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; +#endif + } + } +#endif + +#ifdef NETIF_F_HW_VLAN_CTAG_RX + if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) { +#ifdef NETIF_F_HW_VLAN_STAG_RX + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) + features &= ~NETIF_F_HW_VLAN_STAG_RX; +#endif + } +#endif + +#ifdef NETIF_F_HW_VLAN_STAG_RX + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) { + if (!(features & NETIF_F_HW_VLAN_STAG_RX)) { +#ifdef NETIF_F_HW_VLAN_CTAG_RX + features &= ~NETIF_F_HW_VLAN_CTAG_RX; +#endif + } + } +#endif + +#ifdef NETIF_F_HW_VLAN_CTAG_TX + if (!(features & NETIF_F_HW_VLAN_CTAG_TX)) { +#ifdef NETIF_F_HW_VLAN_STAG_RX + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) + features &= ~NETIF_F_HW_VLAN_STAG_TX; +#endif + } +#endif + +#ifdef NETIF_F_HW_VLAN_STAG_TX + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) { + if (!(features & NETIF_F_HW_VLAN_STAG_TX)) { +#ifdef NETIF_F_HW_VLAN_CTAG_TX + features &= ~NETIF_F_HW_VLAN_CTAG_TX; +#endif + } + } +#endif + + return features; +} + +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +static int rnp_set_features(struct net_device *netdev, u32 features) +#else +static int rnp_set_features(struct net_device *netdev, + netdev_features_t features) +#endif +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = netdev->features ^ features; + bool need_reset = false; + struct rnp_hw *hw = &adapter->hw; + + netdev->features = features; + + /* if changed ntuple should close all */ + if (changed & NETIF_F_NTUPLE) { + if (!(features & NETIF_F_NTUPLE)) { + rnp_fdir_filter_exit(adapter); + } + } + + switch (features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: + /* turn off ATR, enable perfect filters and reset */ + if (!(adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE)) + need_reset = true; + + adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE; + adapter->flags |= RNP_FLAG_FDIR_PERFECT_CAPABLE; + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE) + need_reset = true; + + adapter->flags &= ~RNP_FLAG_FDIR_PERFECT_CAPABLE; + + /* We cannot enable ATR if SR-IOV is enabled */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + break; + + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + break; + + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + break; + + adapter->flags |= RNP_FLAG_FDIR_HASH_CAPABLE; + break; + } + +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER + /* vlan filter changed */ + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (features & (NETIF_F_HW_VLAN_CTAG_FILTER)) { + // rnp_vlan_filter_on(hw); + hw->ops.set_vlan_filter_en(hw, true); + } else { + // rnp_vlan_filter_off(hw); + hw->ops.set_vlan_filter_en(hw, false); + } + rnp_msg_post_status(adapter, PF_VLAN_FILTER_STATUS); + } +#endif /* NETIF_F_HW_VLAN_CTAG_FILTER */ + + /* rss hash changed */ + if (changed & (NETIF_F_RXHASH)) { + bool iov_en = (adapter->flags & RNP_FLAG_SRIOV_ENABLED) ? true : + false; + + if (netdev->features & (NETIF_F_RXHASH)) + hw->ops.set_rx_hash(hw, true, iov_en); + else + hw->ops.set_rx_hash(hw, false, iov_en); + } + + /* rx fcs changed */ + /* in this mode rx l4/sctp checksum will get error */ + if (changed & NETIF_F_RXFCS) { + + if (features & NETIF_F_RXFCS) { + adapter->priv_flags |= RNP_PRIV_FLAG_RX_FCS; + hw->ops.set_fcs_mode(hw, true); + /* if in rx fcs mode ,hw rxcsum may error, + * close rxcusm + */ + } else { + adapter->priv_flags &= (~RNP_PRIV_FLAG_RX_FCS); + hw->ops.set_fcs_mode(hw, false); + } + rnp_msg_post_status(adapter, PF_FCS_STATUS); + } + + if (changed & NETIF_F_RXALL) + need_reset = true; + + if (features & NETIF_F_RXALL) + adapter->priv_flags |= RNP_PRIV_FLAG_RX_ALL; + else + adapter->priv_flags &= (~RNP_PRIV_FLAG_RX_ALL); + +#ifdef NETIF_F_HW_VLAN_CTAG_RX + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rnp_vlan_strip_enable(adapter); + else + rnp_vlan_strip_disable(adapter); +#endif + + if (need_reset) + rnp_do_reset(netdev); + + return 0; +} +#endif /* HAVE_NDO_SET_FEATURES */ + +#ifdef HAVE_BRIDGE_ATTRIBS +#ifdef HAVE_NDO_BRIDGE_SETLINK_EXTACK +static int rnp_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + __always_unused u16 flags, + struct netlink_ext_ack __always_unused *ext) +#elif defined(HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS) +static int rnp_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + __always_unused u16 flags) +#else +static int rnp_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh) +#endif /* HAVE_NDO_BRIDGE_SETLINK_EXTACK */ + +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_hw *hw = &adapter->hw; + struct nlattr *attr, *br_spec; + int rem; + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if (mode == BRIDGE_MODE_VEPA) { + adapter->flags2 &= ~RNP_FLAG2_BRIDGE_MODE_VEB; + wr32(hw, RNP_DMA_CONFIG, + rd32(hw, RNP_DMA_CONFIG) | DMA_VEB_BYPASS); + } else if (mode == BRIDGE_MODE_VEB) { + adapter->flags2 |= RNP_FLAG2_BRIDGE_MODE_VEB; + wr32(hw, RNP_DMA_CONFIG, + rd32(hw, RNP_DMA_CONFIG) & (~DMA_VEB_BYPASS)); + + } else + return -EINVAL; + + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + } + + return 0; +} + +#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +static int rnp_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __maybe_unused filter_mask, int nlflags) +#elif defined(HAVE_BRIDGE_FILTER) +static int rnp_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __always_unused filter_mask) +#else +static int rnp_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev) +#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ +{ + struct rnp_adapter *adapter = netdev_priv(dev); + u16 mode; + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return 0; + + if (adapter->flags2 & RNP_FLAG2_BRIDGE_MODE_VEB) + mode = BRIDGE_MODE_VEB; + else + mode = BRIDGE_MODE_VEPA; + +#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags, + filter_mask, NULL); +#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS) + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags); +#elif defined(HAVE_NDO_FDB_ADD_VID) || \ + defined NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0); +#else + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); +#endif /* HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT */ +} + +#endif /* HAVE_BRIDGE_ATTRIBS */ + +#ifdef HAVE_NDO_FEATURES_CHECK +#define RNP_MAX_TUNNEL_HDR_LEN 80 +#ifdef NETIF_F_GSO_PARTIAL +#define RNP_MAX_MAC_HDR_LEN 127 +#define RNP_MAX_NETWORK_HDR_LEN 511 + +static netdev_features_t rnp_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > RNP_MAX_MAC_HDR_LEN)) + return features & + ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_TSO | NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > RNP_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_TSO | NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} +#else +static netdev_features_t rnp_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (!skb->encapsulation) + return features; + + if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) > + RNP_MAX_TUNNEL_HDR_LEN)) + return features & ~NETIF_F_CSUM_MASK; + + return features; +} + +#endif /* NETIF_F_GSO_PARTIAL */ +#endif /* HAVE_NDO_FEATURES_CHECK */ + +#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) +static void rnp_clear_udp_tunnel_port(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + + if (!(adapter->flags & (RNP_FLAG_VXLAN_OFFLOAD_CAPABLE))) + return; + + adapter->vxlan_port = 0; + hw->ops.set_vxlan_port(hw, adapter->vxlan_port); +} +#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ + +#ifdef HAVE_UDP_ENC_RX_OFFLOAD +/** + * rnp_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports + * @dev: The port's netdev + * @ti: Tunnel endpoint information + **/ +__maybe_unused static void rnp_add_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_hw *hw = &adapter->hw; + __be16 port = ti->port; + + if (ti->sa_family != AF_INET) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & RNP_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port == port) + return; + + if (adapter->vxlan_port) { + netdev_info(dev, + "VXLAN port %d set, not adding port %d\n", + ntohs(adapter->vxlan_port), ntohs(port)); + return; + } + + adapter->vxlan_port = port; + break; + default: + return; + } + hw->ops.set_vxlan_port(hw, ntohs(adapter->vxlan_port)); +} + +/** + * rnp_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports + * @dev: The port's netdev + * @ti: Tunnel endpoint information + **/ +__maybe_unused static void rnp_del_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + if (ti->sa_family != AF_INET) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & RNP_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port != ti->port) { + netdev_info(dev, "VXLAN port %d not found\n", + ntohs(ti->port)); + return; + } + + break; + default: + return; + } + + rnp_clear_udp_tunnel_port(adapter); + adapter->flags2 |= RNP_FLAG2_UDP_TUN_REREG_NEEDED; +} +#elif defined(HAVE_VXLAN_RX_OFFLOAD) +/** + * rnp_add_vxlan_port - Get notifications about VXLAN ports that come up + * @dev: The port's netdev + * @sa_family: Socket Family that VXLAN is notifiying us about + * @port: New UDP port number that VXLAN started listening to + */ +static void rnp_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_hw *hw = &adapter->hw; + + if (sa_family != AF_INET) + return; + + if (!(adapter->flags & RNP_FLAG_VXLAN_OFFLOAD_ENABLE)) + return; + + if (adapter->vxlan_port == port) + return; + + if (adapter->vxlan_port) { + netdev_info(dev, + "Hit Max num of VXLAN ports, not adding port %d\n", + ntohs(port)); + return; + } + adapter->vxlan_port = port; + hw->ops.set_vxlan_port(hw, adapter->vxlan_port); +} + +/** + * rnp_del_vxlan_port - Get notifications about VXLAN ports that go away + * @dev: The port's netdev + * @sa_family: Socket Family that VXLAN is notifying us about + * @port: UDP port number that VXLAN stopped listening to + */ +static void rnp_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + + if (!(adapter->flags & RNP_FLAG_VXLAN_OFFLOAD_ENABLE)) + return; + + if (sa_family != AF_INET) + return; + + if (adapter->vxlan_port != port) { + netdev_info(dev, "Port %d was not found, not deleting\n", + ntohs(port)); + return; + } + + rnp_clear_udp_tunnel_port(adapter); + adapter->flags2 |= RNP_FLAG2_UDP_TUN_REREG_NEEDED; +} +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + +#ifdef HAVE_NET_DEVICE_OPS +const struct net_device_ops rnp10_netdev_ops = { + .ndo_open = rnp_open, + .ndo_stop = rnp_close, + .ndo_start_xmit = rnp_xmit_frame, + .ndo_set_rx_mode = rnp_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, +#ifdef HAVE_NDO_ETH_IOCTL + .ndo_eth_ioctl = rnp_ioctl, +#else + .ndo_do_ioctl = rnp_ioctl, +#endif +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + /* RHEL7 requires this to be defined to enable extended ops. + * RHEL7 uses the function get_ndo_ext to retrieve offsets for + * extended fields from with the net_device_ops struct and + * ndo_size is checked to determine whether or not + * the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + .extended.ndo_change_mtu = rnp_change_mtu, +#else + .ndo_change_mtu = rnp_change_mtu, +#endif +#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = rnp_get_stats64, +#else + .ndo_get_stats = rnp_get_stats, +#endif + .ndo_tx_timeout = rnp_tx_timeout, +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_TX_MAXRATE + .extended.ndo_set_tx_maxrate = rnp_tx_maxrate, +#else +#ifndef NO_TX_MAXRATE + .ndo_set_tx_maxrate = rnp_tx_maxrate, +#endif +#endif + .ndo_set_mac_address = rnp_set_mac, +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + .ndo_vlan_rx_add_vid = rnp_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = rnp_vlan_rx_kill_vid, +#endif + +#ifdef IFLA_VF_MAX + .ndo_set_vf_mac = rnp_ndo_set_vf_mac, +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN + .extended.ndo_set_vf_vlan = rnp_ndo_set_vf_vlan, +#else + .ndo_set_vf_vlan = rnp_ndo_set_vf_vlan, +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + .ndo_set_vf_rate = rnp_ndo_set_vf_bw, +#else + .ndo_set_vf_tx_rate = rnp_ndo_set_vf_bw, +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +#if defined(HAVE_VF_SPOOFCHK_CONFIGURE) && IS_ENABLED(CONFIG_PCI_IOV) + .ndo_set_vf_spoofchk = rnp_ndo_set_vf_spoofchk, +#endif +#ifdef HAVE_NDO_SET_VF_LINK_STATE + .ndo_set_vf_link_state = rnp_ndo_set_vf_link_state, +#endif +#ifdef HAVE_NDO_SET_VF_TRUST +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + .extended.ndo_set_vf_trust = rnp_ndo_set_vf_trust, +#else + .ndo_set_vf_trust = rnp_ndo_set_vf_trust, +#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */ +#endif /* HAVE_NDO_SET_VF_TRUST */ + .ndo_get_vf_config = rnp_ndo_get_vf_config, +#endif /* IFLA_VF_MAX */ +#ifdef HAVE_SETUP_TC +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC + .extended.ndo_setup_tc_rh = __rnp_setup_tc, +#else +#ifdef NETIF_F_HW_TC + .ndo_setup_tc = __rnp_setup_tc, +#else + .ndo_setup_tc = rnp_setup_tc, +#endif /* NETIF_F_HW_TC */ +#endif /* HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC */ +#endif /* HAVE_SETUP_TC */ +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rnp_netpoll, +#endif +#ifdef HAVE_BRIDGE_ATTRIBS + .ndo_bridge_setlink = rnp_ndo_bridge_setlink, + .ndo_bridge_getlink = rnp_ndo_bridge_getlink, +#endif +#ifdef HAVE_UDP_ENC_RX_OFFLOAD +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL + .extended.ndo_udp_tunnel_add = rnp_add_udp_tunnel_port, + .extended.ndo_udp_tunnel_del = rnp_del_udp_tunnel_port, +#else +#ifndef HAVE_UDP_TUNNEL_NIC_INFO + .ndo_udp_tunnel_add = rnp_add_udp_tunnel_port, + .ndo_udp_tunnel_del = rnp_del_udp_tunnel_port, +#endif +#endif +#elif defined(HAVE_VXLAN_RX_OFFLOAD) + .ndo_add_vxlan_port = rnp_add_vxlan_port, + .ndo_del_vxlan_port = rnp_del_vxlan_port, +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ +#ifdef HAVE_NDO_FEATURES_CHECK + .ndo_features_check = rnp_features_check, +#endif /* HAVE_NDO_FEATURES_CHECK */ + +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +}; + +/* RHEL6 keeps these operations in a separate structure */ +static const struct net_device_ops_ext rnp_netdev_ops_ext = { + .size = sizeof(struct net_device_ops_ext), +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#ifdef HAVE_NDO_SET_FEATURES + .ndo_set_features = rnp_set_features, + .ndo_fix_features = rnp_fix_features, +#endif /* HAVE_NDO_SET_FEATURES */ +}; +#endif /* HAVE_NET_DEVICE_OPS */ + +static void rnp_assign_netdev_ops(struct net_device *dev) +{ + /* different hw can assign difference fun */ +#ifdef HAVE_NET_DEVICE_OPS + dev->netdev_ops = &rnp10_netdev_ops; +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_ops_ext(dev, &rnp_netdev_ops_ext); +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ +#else /* HAVE_NET_DEVICE_OPS */ + dev->open = &rnp_open; + dev->stop = &rnp_close; + dev->hard_start_xmit = &rnp_xmit_frame; +#ifdef HAVE_SET_RX_MODE + dev->set_rx_mode = &rnp_set_rx_mode; +#endif + dev->set_multicast_list = &rnp_set_rx_mode; + dev->set_mac_address = &rnp_set_mac; + dev->change_mtu = &rnp_change_mtu; + dev->do_ioctl = &rnp_ioctl; +#ifdef HAVE_TX_TIMEOUT + dev->tx_timeout = &rnp_tx_timeout; +#endif +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + // dev->vlan_rx_register = &rnp_vlan_mode; //todo + dev->vlan_rx_add_vid = &rnp_vlan_rx_add_vid; + dev->vlan_rx_kill_vid = &rnp_vlan_rx_kill_vid; +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = &rnp_netpoll; +#endif +#ifdef HAVE_NETDEV_SELECT_QUEUE + dev->select_queue = &__netdev_pick_tx; +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#endif /* HAVE_NET_DEVICE_OPS */ + + rnp_set_ethtool_ops(dev); + dev->watchdog_timeo = 5 * HZ; +} + +/** + * rnp_wol_supported - Check whether device supports WoL + * @hw: hw specific details + * @device_id: the device ID + * @subdev_id: the subsystem device ID + * + * This function is used by probe and ethtool to determine + * which devices have WoL support + * + **/ +int rnp_wol_supported(struct rnp_adapter *adapter, u16 device_id, + u16 subdevice_id) +{ + int is_wol_supported = 0; + + struct rnp_hw *hw = &adapter->hw; + + if (hw->wol_supported) + is_wol_supported = 1; + return is_wol_supported; +} + +static inline unsigned long rnp_tso_features(struct rnp_hw *hw) +{ + unsigned long features = 0; + +#ifdef NETIF_F_TSO + if (hw->feature_flags & RNP_NET_FEATURE_TSO) + features |= NETIF_F_TSO; +#endif /* NETIF_F_TSO */ +#ifdef NETIF_F_TSO6 + if (hw->feature_flags & RNP_NET_FEATURE_TSO) + features |= NETIF_F_TSO6; +#endif /* NETIF_F_TSO6 */ +#ifdef NETIF_F_GSO_PARTIAL + features |= NETIF_F_GSO_PARTIAL; + if (hw->feature_flags & RNP_NET_FEATURE_TX_UDP_TUNNEL) + features |= RNP_GSO_PARTIAL_FEATURES; +#endif + + return features; +} + +static void remove_mbx_irq(struct rnp_adapter *adapter) +{ + /* mbx */ + if (adapter->num_other_vectors) { + /* only msix use indepented intr */ + if (adapter->flags & RNP_FLAG_MSIX_ENABLED) { + adapter->hw.mbx.ops.configure( + &adapter->hw, adapter->msix_entries[0].entry, + false); + free_irq(adapter->msix_entries[0].vector, adapter); + + adapter->hw.mbx.other_irq_enabled = false; + } + } +} + +static int register_mbx_irq(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int err = 0; + + /* for mbx:vector0 */ + if (adapter->num_other_vectors) { + /* only do this in msix mode */ + if (adapter->flags & RNP_FLAG_MSIX_ENABLED) { + err = request_irq(adapter->msix_entries[0].vector, + rnp_msix_other, 0, netdev->name, + adapter); + if (err) { + e_err(probe, + "request_irq for msix_other failed: %d\n", + err); + goto err_mbx; + } + hw->mbx.ops.configure( + hw, adapter->msix_entries[0].entry, true); + adapter->hw.mbx.other_irq_enabled = true; + } + } + +err_mbx: + return err; +} + +static int rnp_rm_adpater(struct rnp_adapter *adapter) +{ + struct net_device *netdev; + struct rnp_hw *hw = &adapter->hw; + + netdev = adapter->netdev; + pr_info("= remove adapter:%s =\n", netdev->name); + + rnp_dbg_adapter_exit(adapter); + + netif_carrier_off(netdev); + + set_bit(__RNP_DOWN, &adapter->state); + set_bit(__RNP_REMOVE, &adapter->state); + if (module_enable_ptp) { + while (test_bit(__RNP_PTP_TX_IN_PROGRESS, &adapter->state)) { + usleep_range(10000, 20000); + } + cancel_work_sync(&adapter->tx_hwtstamp_work); + } + cancel_work_sync(&adapter->service_task); + + del_timer_sync(&adapter->service_timer); +#ifdef CONFIG_RNP_DCA + if (adapter->flags & RNP_FLAG_DCA_ENABLED) { + adapter->flags &= ~RNP_FLAG_DCA_ENABLED; + dca_remove_requester(&pdev->dev); + wr32(&adapter->hw + RNP_DCA_CTRL, 1); + } +#endif + rnp_sysfs_exit(adapter); + rnp_fdir_filter_exit(adapter); + adapter->priv_flags &= (~RNP_PRIV_FLAG_TCP_SYNC); + + if (adapter->rpu_inited) { + rnp_rpu_mpe_stop(adapter); + adapter->rpu_inited = 0; + } + + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); + + adapter->netdev = NULL; + + if (hw->ops.driver_status) + hw->ops.driver_status(hw, false, rnp_driver_insmod); + + remove_mbx_irq(adapter); + + rnp_clear_interrupt_scheme(adapter); + + if (hw->ncsi_en) { + rnp_mbx_probe_stat_set(hw, MBX_REMOVE); + } + + if (adapter->io_addr) + iounmap(adapter->io_addr); + + if (adapter->io_addr_bar0) + iounmap(adapter->io_addr_bar0); + + free_netdev(netdev); + + pr_info("remove complete\n"); + + return 0; +} + +static void rnp_fix_dma_tx_status(struct rnp_adapter *adapter) +{ + int i; + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + + if ((hw->hw_type == rnp_hw_n10) || (hw->hw_type == rnp_hw_n400)) { + for (i = 0; i < dma->max_tx_queues; i++) + dma_ring_wr32(dma, RING_OFFSET(i) + RNP_DMA_TX_START, + 1); + } +} + +static u8 rnp10_pfnum(u8 __iomem *hw_addr_bar0, struct pci_dev *pdev) +{ + /* n10 read this from bar0 */ + u16 vf_num = -1; + u32 pfvfnum_reg; +#define PF_NUM_REG_N10 (0x75f000) + pfvfnum_reg = (PF_NUM_REG_N10 & (pci_resource_len(pdev, 0) - 1)); + vf_num = readl(hw_addr_bar0 + pfvfnum_reg); +#define VF_NUM_MASK_TEMP (0x400) +#define VF_NUM_OFF (4) + return ((vf_num & VF_NUM_MASK_TEMP) >> VF_NUM_OFF); +} + +static int rnp_can_rpu_start(struct rnp_adapter *adapter) +{ + if (adapter->hw.rpu_addr == NULL) + return 0; + if ((adapter->pdev->device & 0xff00) == 0x1c00) { + return 1; + } + if (adapter->hw.rpu_availble) { + return 1; + } + return 0; +} + +static int rnp_add_adpater(struct pci_dev *pdev, struct rnp_info *ii, + struct rnp_adapter **padapter) +{ + int i, err = 0; + struct rnp_adapter *adapter = NULL; + struct net_device *netdev; + struct rnp_hw *hw; + u8 __iomem *hw_addr = NULL; + u8 __iomem *hw_addr_bar0 = NULL; + + u32 dma_version = 0; + u32 nic_version = 0; + u32 queues = ii->total_queue_pair_cnts; + static int bd_number; +#ifndef NETIF_F_GSO_PARTIAL +#ifdef HAVE_NDO_SET_FEATURES +#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT + netdev_features_t hw_features; +#else + u32 hw_features; +#endif +#endif +#endif /* NETIF_F_GSO_PARTIAL */ + + pr_info("==== add adapter queues:%d ====", queues); +#ifdef HAVE_TX_MQ + netdev = alloc_etherdev_mq(sizeof(struct rnp_adapter), queues); +#else + queues = 1; + netdev = alloc_etherdev(sizeof(struct rnp_adapter)); +#endif + if (!netdev) + return -ENOMEM; + + if (!fix_eth_name) + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + + memset((char *)adapter, 0x00, sizeof(struct rnp_adapter)); + adapter->netdev = netdev; + adapter->pdev = pdev; +#ifdef HAVE_TX_MQ +#ifndef HAVE_NETDEV_SELECT_QUEUE + adapter->indices = queues; +#endif + +#endif + adapter->max_ring_pair_counts = queues; + if (padapter) + *padapter = adapter; + + adapter->bd_number = bd_number++; + adapter->port = 0; + snprintf(adapter->name, sizeof(netdev->name), "%s%d%d", rnp_driver_name, + 1, adapter->bd_number); + pci_set_drvdata(pdev, adapter); + + hw = &adapter->hw; + hw->back = adapter; + /* first setup hw type */ + hw->rss_type = ii->rss_type; + hw->hw_type = ii->hw_type; + switch (hw->hw_type) { + case rnp_hw_n10: + case rnp_hw_n20: + case rnp_hw_n400: + case rnp_hw_uv440: + hw_addr_bar0 = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!hw_addr_bar0) { + dev_err(&pdev->dev, "pcim_iomap bar%d failed!\n", 0); + return -EIO; + } +#ifdef FIX_VF_BUG + rnp_wr_reg(hw_addr_bar0 + + (0x7982fc & (pci_resource_len(pdev, 0) - 1)), + 0); +#endif + + /* n10 use bar4 */ +#define RNP_NIC_BAR_N10 4 + hw_addr = ioremap(pci_resource_start(pdev, RNP_NIC_BAR_N10), + pci_resource_len(pdev, RNP_NIC_BAR_N10)); + if (!hw_addr) { + dev_err(&pdev->dev, "pcim_iomap bar%d failed!\n", + RNP_NIC_BAR_N10); + return -EIO; + } + pr_info("[bar%d]:%p %llx len=%d MB\n", RNP_NIC_BAR_N10, hw_addr, + (unsigned long long)pci_resource_start(pdev, + RNP_NIC_BAR_N10), + (int)pci_resource_len(pdev, RNP_NIC_BAR_N10) / 1024 / + 1024); + /* get dma version */ + dma_version = rnp_rd_reg(hw_addr); + + if (rnp10_pfnum(hw_addr_bar0, pdev)) + hw->pfvfnum = PF_NUM(1); + else + hw->pfvfnum = PF_NUM(0); + +#ifdef FIX_VF_BUG + if (hw->pfvfnum) + hw->hw_addr = hw_addr + 0x100000; + else + hw->hw_addr = hw_addr; +#else + hw->hw_addr = hw_addr; +#endif + /* setup msix base */ +#ifdef FIX_VF_BUG + if (hw->pfvfnum) + hw->ring_msix_base = hw->hw_addr + 0xa4000 + 0x200; + else + hw->ring_msix_base = hw->hw_addr + 0xa4000; +#else + hw->ring_msix_base = hw->hw_addr + 0xa4000; +#endif + nic_version = rd32(hw, RNP_TOP_NIC_VERSION); + adapter->irq_mode = irq_mode_msix; + adapter->flags |= RNP_FLAG_MSIX_CAPABLE; + + break; + default: +#ifdef FIX_VF_BUG + hw_addr_bar0 = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); +#endif + hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + goto err_free_net; + break; + } + + /* setup FT_PADDING */ + { +#ifdef FT_PADDING + u32 data; + + data = rnp_rd_reg(hw->hw_addr + RNP_DMA_CONFIG); + SET_BIT(8, data); + rnp_wr_reg(hw->hw_addr + RNP_DMA_CONFIG, data); + adapter->priv_flags |= RNP_PRIV_FLAG_FT_PADDING; +#endif + } + + /* assign to adapter */ + adapter->io_addr = hw_addr; + adapter->io_addr_bar0 = hw_addr_bar0; + if (pci_resource_len(pdev, 0) == (8 * 1024 * 1024)) { + hw->rpu_addr = hw_addr_bar0; + } + + hw->pdev = pdev; + hw->dma_version = dma_version; + adapter->msg_enable = netif_msg_init(debug, NETIF_MSG_DRV +#ifdef MSG_PROBE_ENABLE + | NETIF_MSG_PROBE +#endif +#ifdef MSG_IFUP_ENABLE + | NETIF_MSG_IFUP +#endif +#ifdef MSG_IFDOWN_ENABLE + | NETIF_MSG_IFDOWN +#endif + ); + + /* we have other irq */ + adapter->num_other_vectors = 1; + /* get software info */ + ii->get_invariants(hw); + + spin_lock_init(&adapter->link_stat_lock); + + if (adapter->num_other_vectors) { + /* Mailbox */ + rnp_init_mbx_params_pf(hw); + memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); + if (dma_version >= 0x20210111) { +#ifndef NO_CM3_MBX + rnp_mbx_link_event_enable(hw, 0); + if ((hw->hw_type == rnp_hw_n10) || + (hw->hw_type == rnp_hw_n400)) + rnp_mbx_force_speed(hw, 0); + if (rnp_mbx_get_capability(hw, ii)) { + dev_err(&pdev->dev, + "rnp_mbx_get_capability failed!\n"); + err = -EIO; + goto err_free_net; + } + + /* should check eco */ +#ifdef VF_PROMISC_SUPPORT + if (!hw->eco) { + dev_err(&pdev->dev, + "only v2 chips support vf promisc!\n"); + err = -EIO; + goto err_free_net; + + } +#endif + /* if not eco, close ptp */ + if (!hw->eco) { + module_enable_ptp = 0; + } + + adapter->portid_of_card = hw->port_id[0]; +#else + rnp_dbg("no mbx to get capability\n"); +#endif + if (hw->eco) { + hw->eth.num_rar_entries -= 1; + hw->mac.num_rar_entries -= 1; + hw->num_rar_entries -= 1; + } + + adapter->portid_of_card = hw->pfvfnum ? 1 : 0; + adapter->wol = hw->wol; + } + } + if (hw->ncsi_en) { + hw->eth.num_rar_entries -= hw->ncsi_rar_entries; + hw->mac.num_rar_entries -= hw->ncsi_rar_entries; + hw->num_rar_entries -= hw->ncsi_rar_entries; + } + + if (hw->force_status) + adapter->priv_flags |= RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE; + else + adapter->priv_flags &= (~RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE); + hw->default_rx_queue = 0; + pr_info("%s %s: dma version:0x%x, nic version:0x%x, pfvfnum:0x%x\n", + adapter->name, pci_name(pdev), hw->dma_version, nic_version, + hw->pfvfnum); + + /* Setup hw api */ + hw->mac.type = ii->mac; + /* EEPROM */ + if (ii->eeprom_ops) + memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); + + hw->phy.sfp_type = rnp_sfp_type_unknown; + + hw->ops.setup_ethtool(netdev); + rnp_assign_netdev_ops(netdev); + rnp_check_options(adapter); + /* setup the private structure */ + /* this private is used only once + */ + err = rnp_sw_init(adapter); + if (err) + goto err_sw_init; + + err = hw->ops.reset_hw(hw); + hw->phy.reset_if_overtemp = false; + if (err) { + e_dev_err("HW Init failed: %d\n", err); + goto err_sw_init; + } + if (hw->ops.driver_status) + hw->ops.driver_status(hw, true, rnp_driver_insmod); + if (hw->ops.driver_status) { + hw->ops.driver_status(hw, !!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE), + rnp_driver_force_control_mac); + } + +#if defined(CONFIG_PCI_IOV) + if (adapter->num_other_vectors) { + rnp_enable_sriov(adapter); + pci_sriov_set_totalvfs(pdev, hw->max_vfs - 1); + } +#endif + +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU + /* MTU range: 68 - 9710 */ +#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU + netdev->extended->min_mtu = hw->min_length; + netdev->extended->max_mtu = + hw->max_length - (ETH_HLEN + 2 * ETH_FCS_LEN); +#else + netdev->min_mtu = hw->min_length; + netdev->max_mtu = hw->max_length - (ETH_HLEN + 2 * ETH_FCS_LEN); +#endif +#endif + +#ifdef NETIF_F_GSO_PARTIAL + + if (hw->feature_flags & RNP_NET_FEATURE_SG) + netdev->features |= NETIF_F_SG; + if (hw->feature_flags & RNP_NET_FEATURE_TSO) + netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; + if (hw->feature_flags & RNP_NET_FEATURE_RX_HASH) + netdev->features |= NETIF_F_RXHASH; + if (hw->feature_flags & RNP_NET_FEATURE_RX_CHECKSUM) + netdev->features |= NETIF_F_RXCSUM; + if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM) + netdev->features |= NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC; + + if (hw->feature_flags & RNP_NET_FEATURE_USO) { +#ifdef NETIF_F_GSO_UDP_L4 + netdev->features |= NETIF_F_GSO_UDP_L4; +#endif + } + + netdev->features |= NETIF_F_HIGHDMA; + + if (hw->feature_flags & RNP_NET_FEATURE_TX_UDP_TUNNEL) { + netdev->gso_partial_features = RNP_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | + RNP_GSO_PARTIAL_FEATURES; + } + + netdev->hw_features |= netdev->features; + + if (hw->ncsi_en) { + hw->feature_flags &= ~RNP_NET_FEATURE_VLAN_OFFLOAD; + } + if (hw->feature_flags & RNP_NET_FEATURE_VLAN_FILTER) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER) + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_FILTER; + if (hw->feature_flags & RNP_NET_FEATURE_VLAN_OFFLOAD) { + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + } + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) { + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX; + } + netdev->hw_features |= NETIF_F_RXALL; + if (hw->feature_flags & RNP_NET_FEATURE_RX_NTUPLE_FILTER) + netdev->hw_features |= NETIF_F_NTUPLE; + if (hw->feature_flags & RNP_NET_FEATURE_RX_FCS) + netdev->hw_features |= NETIF_F_RXFCS; +#ifdef NETIF_F_HW_TC + if (hw->feature_flags & RNP_NET_FEATURE_HW_TC) + netdev->hw_features |= NETIF_F_HW_TC; +#endif + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->hw_enc_features |= netdev->vlan_features; + netdev->mpls_features |= NETIF_F_HW_CSUM; + + if (hw->feature_flags & RNP_NET_FEATURE_VLAN_FILTER) + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER) + netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; + if (hw->feature_flags & RNP_NET_FEATURE_VLAN_OFFLOAD) { + netdev->features |= NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + } + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) { + netdev->features |= NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX; + } + + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + + if (adapter->flags2 & RNP_FLAG2_RSC_CAPABLE) + netdev->hw_features |= NETIF_F_LRO; + +#else /* NETIF_F_GSO_PARTIAL */ + + if (hw->feature_flags & RNP_NET_FEATURE_SG) + netdev->features |= NETIF_F_SG; + if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM) + netdev->features |= NETIF_F_IP_CSUM; + + netdev->features |= NETIF_F_HIGHDMA; + + if (hw->feature_flags & RNP_NET_FEATURE_TX_UDP_TUNNEL) { + netdev->features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + } + +#ifdef NETIF_F_IPV6_CSUM + if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM) + netdev->features |= NETIF_F_IPV6_CSUM; +#endif + + if (hw->feature_flags & RNP_NET_FEATURE_TSO) + netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; +#ifdef NETIF_F_GSO_UDP_L4 + if (hw->feature_flags & RNP_NET_FEATURE_USO) + netdev->features |= NETIF_F_GSO_UDP_L4; +#endif + +#ifdef NETIF_F_HW_VLAN_CTAG_TX + + if (hw->feature_flags & RNP_NET_FEATURE_VLAN_FILTER) + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER) + netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; + if (hw->feature_flags & RNP_NET_FEATURE_VLAN_OFFLOAD) { + netdev->features |= NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + } + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) { + netdev->features |= NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX; + } +#endif + netdev->features |= rnp_tso_features(hw); + +#ifdef NETIF_F_RXHASH + if (hw->feature_flags & RNP_NET_FEATURE_RX_HASH) + netdev->features |= NETIF_F_RXHASH; +#endif /* NETIF_F_RXHASH */ + + if (hw->feature_flags & RNP_NET_FEATURE_RX_CHECKSUM) + netdev->features |= NETIF_F_RXCSUM; + +#ifdef HAVE_NDO_SET_FEATURES + /* copy netdev features into list of user selectable features */ +#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT + hw_features = netdev->hw_features; +#else + hw_features = get_netdev_hw_features(netdev); +#endif + hw_features |= netdev->features; + + /* give us the option of enabling RSC/LRO later */ + if (adapter->flags2 & RNP_FLAG2_RSC_CAPABLE) + hw_features |= NETIF_F_LRO; +#else +#ifdef NETIF_F_GRO + /* this is only needed on kernels prior to 2.6.39 */ + netdev->features |= NETIF_F_GRO; +#endif /* NETIF_F_GRO */ +#endif /* HAVE_NDO_SET_FEATURES */ + +#ifdef HAVE_NDO_SET_FEATURES + + if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM) + hw_features |= NETIF_F_SCTP_CSUM; + if (hw->feature_flags & RNP_NET_FEATURE_RX_NTUPLE_FILTER) + hw_features |= NETIF_F_NTUPLE; +#ifdef NETIF_F_HW_TC + if (hw->feature_flags & RNP_NET_FEATURE_HW_TC) + hw_features |= NETIF_F_HW_TC; + hw_features |= NETIF_F_RXALL; + + if (hw->feature_flags & RNP_NET_FEATURE_RX_FCS) + hw_features |= NETIF_F_RXFCS; +#endif +#endif + +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + set_netdev_hw_features(netdev, hw_features); +#else + netdev->hw_features = hw_features; +#endif +#endif + +#ifdef HAVE_NETDEV_VLAN_FEATURES + + if (hw->feature_flags & RNP_NET_FEATURE_SG) + netdev->vlan_features |= NETIF_F_SG; + if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM) + netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + if (hw->feature_flags & RNP_NET_FEATURE_TSO) + netdev->vlan_features |= NETIF_F_TSO | NETIF_F_TSO6; +#ifdef NETIF_F_GSO_UDP_L4 + if (hw->feature_flags & RNP_NET_FEATURE_USO) + netdev->vlan_features |= NETIF_F_GSO_UDP_L4; +#endif +#endif /* HAVE_NETDEV_VLAN_FEATURES */ + +#ifdef HAVE_ENCAP_CSUM_OFFLOAD + if (hw->feature_flags & RNP_NET_FEATURE_SG) + netdev->hw_enc_features |= NETIF_F_SG; +#endif /* HAVE_ENCAP_CSUM_OFFLOAD */ + +#ifdef HAVE_VXLAN_RX_OFFLOAD + if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM) { + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + } + +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + +#endif /* NETIF_F_GSO_PARTIAL */ + +#ifdef IFF_UNICAST_FLT + netdev->priv_flags |= IFF_UNICAST_FLT; +#endif +#ifdef IFF_SUPP_NOFCS + netdev->priv_flags |= IFF_SUPP_NOFCS; +#endif + +#if IS_ENABLED(CONFIG_DCB) + rnp_dcb_init(netdev, adapter); +#endif + + if (adapter->flags2 & RNP_FLAG2_RSC_ENABLED) + netdev->features |= NETIF_F_LRO; + + eth_hw_addr_set(netdev, hw->mac.perm_addr); +#ifdef ETHTOOL_GPERMADDR + memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); +#endif + pr_info("dev mac:%pM \n", netdev->dev_addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + e_dev_err("invalid MAC address\n"); + err = -EIO; + goto err_sw_init; + } + ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); + + timer_setup(&adapter->service_timer, rnp_service_timer, 0); + + if (module_enable_ptp) { + /* setup ptp_addr according to mac type */ + switch (adapter->hw.mac.mac_type) { + case mac_dwc_xlg: + adapter->ptp_addr = adapter->hw.mac.mac_addr + 0xd00; + adapter->gmac4 = 1; + break; + case mac_dwc_g: + adapter->ptp_addr = adapter->hw.mac.mac_addr + 0x700; + adapter->gmac4 = 0; + break; + } + adapter->flags2 |= RNP_FLAG2_PTP_ENABLED; + if (adapter->flags2 & RNP_FLAG2_PTP_ENABLED) { + adapter->tx_timeout_factor = 10; + INIT_WORK(&adapter->tx_hwtstamp_work, + rnp_tx_hwtstamp_work); + } + } + + INIT_WORK(&adapter->service_task, rnp_service_task); + clear_bit(__RNP_SERVICE_SCHED, &adapter->state); + + if (fix_eth_name) + strncpy(netdev->name, adapter->name, sizeof(netdev->name) - 1); + else { + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); + } + + err = rnp_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + + err = register_mbx_irq(adapter); + if (err) + goto err_register; + +#if defined(CONFIG_PCI_IOV) + rnp_enable_sriov_true(adapter); +#endif + + /* WOL not supported for all devices */ + { + struct ethtool_wolinfo wol; + + if (rnp_wol_exclusion(adapter, &wol) || + !device_can_wakeup(&adapter->pdev->dev)) + adapter->wol = 0; + } + /* reset the hardware with the new settings */ + err = hw->ops.start_hw(hw); + rnp_fix_dma_tx_status(adapter); + + if (!fix_eth_name) + strscpy(netdev->name, "eth%d", sizeof(netdev->name)); + err = register_netdev(netdev); + if (err) { + e_dev_err("register_netdev failed!\n"); + goto err_register; + } + + /* power down the optics for n10 SFP+ fiber */ + if (hw->ops.disable_tx_laser) + hw->ops.disable_tx_laser(hw); + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + +#ifdef CONFIG_RNP_DCA + if (dca_add_requester(&pdev->dev) == 0) { + adapter->flags |= RNP_FLAG_DCA_ENABLED; + rnp_setup_dca(adapter); + } +#endif + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n", + adapter->num_vfs); + for (i = 0; i < adapter->num_vfs; i++) + rnp_vf_configuration(pdev, (i | 0x10000000)); + } + + if (rnp_mbx_lldp_status_get(hw) == 1) { + adapter->priv_flags |= RNP_PRIV_FLAG_LLDP_EN_STAT; + } + + if (rnp_sysfs_init(adapter)) + e_err(probe, "failed to allocate sysfs resources\n"); + + rnp_dbg_adapter_init(adapter); + /* only pf0 download mpe */ + if (rnp_is_pf0(&adapter->hw) && rnp_can_rpu_start(adapter)) { + rnp_rpu_mpe_start(adapter); + } + + if (hw->ncsi_en) { + hw->ops.set_mac_rx(hw, true); + rnp_mbx_probe_stat_set(hw, MBX_PROBE); + } + + return 0; +err_register: + remove_mbx_irq(adapter); + rnp_clear_interrupt_scheme(adapter); +err_sw_init: + rnp_disable_sriov(adapter); + adapter->flags2 &= ~RNP_FLAG2_SEARCH_FOR_SFP; +err_free_net: + free_netdev(netdev); + return err; +} + +/** + * rnp_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in rnp_pci_tbl + * + * Returns 0 on success, negative on failure + * + * rnp_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +#ifdef HAVE_CONFIG_HOTPLUG +static int __devinit rnp_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +#else +static int rnp_probe(struct pci_dev *pdev, const struct pci_device_id *id) +#endif +{ + struct rnp_adapter *adapter; + struct rnp_info *ii = rnp_info_tbl[id->driver_data]; + int err; + + /* Catch broken hardware that put the wrong VF device ID in + * the PCIe SR-IOV capability. + */ + if (pdev->is_virtfn) { + WARN(1, "%s (%hx:%hx) should not be a VF!\n", pci_name(pdev), + pdev->vendor, pdev->device); + return -EINVAL; + } +#ifdef HAVE_PCI_DEV_FLAGS_NO_BUS_RESET + /* not support bus reset*/ + pdev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; +#endif + err = pci_enable_device_mem(pdev); + if (err) + return err; + + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(56)) && + !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(56))) { + enable_hi_dma = 1; + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + } + enable_hi_dma = 0; + } + + err = pci_request_mem_regions(pdev, rnp_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_reg; + } +#ifndef NO_PCIE_ERROR_REPORTING + pci_enable_pcie_error_reporting(pdev); +#endif + pci_set_master(pdev); + pci_save_state(pdev); + + err = rnp_add_adpater(pdev, ii, &adapter); + if (err) + goto err_regions; + + return 0; +err_regions: + pci_release_mem_regions(pdev); +err_dma: +err_pci_reg: + return err; +} + +/** + * rnp_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * rnp_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void rnp_remove(struct pci_dev *pdev) +{ + struct rnp_adapter *adapter = pci_get_drvdata(pdev); + + if(pci_channel_offline(pdev)){ + printk("%s:%s card pluged out ,pci-err-stat:%d\n", __func__, pci_name(pdev), pdev->error_state); + } +#ifdef CONFIG_PCI_IOV + /* + * Only disable SR-IOV on unload if the user specified the now + * deprecated max_vfs module parameter. + */ + rnp_disable_sriov(adapter); +#endif + rnp_rm_adpater(adapter); + + pci_release_mem_regions(pdev); +#ifndef NO_PCIE_ERROR_REPORTING + pci_disable_pcie_error_reporting(pdev); +#endif + pci_disable_device(pdev); +} + +static struct pci_driver rnp_driver = { + .name = rnp_driver_name, + .id_table = rnp_pci_tbl, + .probe = rnp_probe, + .remove = rnp_remove, +#ifdef CONFIG_PM + .suspend = rnp_suspend, + .resume = rnp_resume, +#endif + .shutdown = rnp_shutdown, +#if defined(HAVE_SRIOV_CONFIGURE) + .sriov_configure = rnp_pci_sriov_configure, +#endif /* HAVE_SRIOV_CONFIGURE */ + // .err_handler = &rnp_err_handler, +}; + +static int __init rnp_init_module(void) +{ + int ret; + + pr_info("%s - version %s\n", rnp_driver_string, rnp_driver_version); + pr_info("%s \n", rnp_copyright); + rnp_wq = create_singlethread_workqueue(rnp_driver_name); + + if (!rnp_wq) { + pr_err("%s: Failed to create workqueue\n", rnp_driver_name); + return -ENOMEM; + } + + rnp_dbg_init(); + + ret = pci_register_driver(&rnp_driver); + if (ret) { + destroy_workqueue(rnp_wq); + rnp_dbg_exit(); + return ret; + } + + return 0; +} +module_init(rnp_init_module); + +static void __exit rnp_exit_module(void) +{ + pci_unregister_driver(&rnp_driver); + + destroy_workqueue(rnp_wq); + + rnp_dbg_exit(); + + rcu_barrier(); /* Wait for completion of call_rcu()'s */ +} + +module_exit(rnp_exit_module); diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mbx.c b/drivers/net/ethernet/mucse/rnp/rnp_mbx.c new file mode 100755 index 0000000000000000000000000000000000000000..2082093e9cd2545b20c57a8073d5665cd593acd7 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_mbx.c @@ -0,0 +1,674 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include "rnp.h" +#include "rnp_type.h" +#include "rnp_common.h" +#include "rnp_mbx.h" +#include "rnp_mbx_fw.h" + +#define VF2PF_MBOX_VEC(mbx, vf) (mbx->vf2pf_mbox_vec_base + 4 * (vf)) +#define CPU2PF_MBOX_VEC(mbx) (mbx->cpu2pf_mbox_vec) +/* == PF <--> VF mailbox ==== */ +#define SHARE_MEM_BYTES 64 +#define PF_VF_SHM(mbx, vf) \ + (mbx->pf_vf_shm_base + \ + mbx->mbx_mem_size * vf) +/* for PF1 rtl will remap 6000 to 0xb000 */ +#define PF2VF_COUNTER(mbx, vf) (PF_VF_SHM(mbx, vf) + 0) +#define VF2PF_COUNTER(mbx, vf) (PF_VF_SHM(mbx, vf) + 4) +#define PF_VF_SHM_DATA(mbx, vf) (PF_VF_SHM(mbx, vf) + 8) +#define PF2VF_MBOX_CTRL(mbx, vf) (mbx->pf2vf_mbox_ctrl_base + 4 * vf) +#define PF_VF_MBOX_MASK_LO(mbx) (mbx->pf_vf_mbox_mask_lo) +#define PF_VF_MBOX_MASK_HI(mbx) (mbx->pf_vf_mbox_mask_hi) + +/* === CPU <--> PF === */ +#define CPU_PF_SHM(mbx) (mbx->cpu_pf_shm_base) +#define CPU2PF_COUNTER(mbx) (CPU_PF_SHM(mbx) + 0) +#define PF2CPU_COUNTER(mbx) (CPU_PF_SHM(mbx) + 4) +#define CPU_PF_SHM_DATA(mbx) (CPU_PF_SHM(mbx) + 8) +#define PF2CPU_MBOX_CTRL(mbx) (mbx->pf2cpu_mbox_ctrl) +#define CPU_PF_MBOX_MASK(mbx) (mbx->cpu_pf_mbox_mask) +#define MBOX_CTRL_REQ (1 << 0) /* WO */ +#define MBOX_CTRL_PF_HOLD_SHM (1 << 3) /* VF:RO, PF:WR */ +#define MBOX_IRQ_EN 0 +#define MBOX_IRQ_DISABLE 1 +#define mbx_prd32(hw, reg) prnp_rd_reg((hw)->hw_addr + (reg)) +#define mbx_rd32(hw, reg) rnp_rd_reg((hw)->hw_addr + (reg)) +#define mbx_pwr32(hw, reg, val) p_rnp_wr_reg((hw)->hw_addr + (reg), (val)) +#define mbx_wr32(hw, reg, val) rnp_wr_reg((hw)->hw_addr + (reg), (val)) + +/** + * rnp_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox/vfnum to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +s32 rnp_read_mbx(struct rnp_hw *hw, u32 *msg, u16 size, enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = RNP_ERR_MBX; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * rnp_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 rnp_write_mbx(struct rnp_hw *hw, u32 *msg, u16 size, enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = 0; + + if (size > mbx->size) + ret_val = RNP_ERR_MBX; + else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +static inline u16 rnp_mbx_get_req(struct rnp_hw *hw, int reg) +{ + mb(); + return ioread32(hw->hw_addr + reg) & 0xffff; +} + +static inline u16 rnp_mbx_get_ack(struct rnp_hw *hw, int reg) +{ + mb(); + return (mbx_rd32(hw, reg) >> 16); +} + +static inline void rnp_mbx_inc_pf_req(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + u16 req; + int reg; + struct rnp_mbx_info *mbx = &hw->mbx; + u32 v; + + reg = (mbx_id == MBX_CM3CPU) ? PF2CPU_COUNTER(mbx) : + PF2VF_COUNTER(mbx, mbx_id); + v = mbx_rd32(hw, reg); + + req = (v & 0xffff); + req++; + v &= ~(0x0000ffff); + v |= req; + mb(); + mbx_wr32(hw, reg, v); + + /* update stats */ + hw->mbx.stats.msgs_tx++; +} + +static inline void rnp_mbx_inc_pf_ack(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + u16 ack; + struct rnp_mbx_info *mbx = &hw->mbx; + int reg = (mbx_id == MBX_CM3CPU) ? PF2CPU_COUNTER(mbx) : + PF2VF_COUNTER(mbx, mbx_id); + u32 v = mbx_rd32(hw, reg); + + ack = (v >> 16) & 0xffff; + ack++; + v &= ~(0xffff0000); + v |= (ack << 16); + mb(); + mbx_wr32(hw, reg, v); + + /* update stats */ + hw->mbx.stats.msgs_rx++; +} + +/** + * rnp_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 rnp_check_for_msg(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = RNP_ERR_MBX; + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * rnp_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 rnp_check_for_ack(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = RNP_ERR_MBX; + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * rnp_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 rnp_poll_for_msg(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + +out: + return countdown ? 0 : -ETIME; +} + +/** + * rnp_poll_for_ack - Wait for message acknowledgment + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgment + **/ +static s32 rnp_poll_for_ack(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) { + printk("mbx poll for ack ack timeout\n"); + break; + } + udelay(mbx->usec_delay); + } + +out: + return countdown ? 0 : RNP_ERR_MBX; +} + +/** + * rnp_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 rnp_read_posted_mbx(struct rnp_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = RNP_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + ret_val = rnp_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); +out: + return ret_val; +} + +/** + * rnp_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 rnp_write_posted_mbx(struct rnp_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = RNP_ERR_MBX; + + if(pci_channel_offline(hw->pdev)){ + return -EIO; + } + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg and hold buffer lock */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = rnp_poll_for_ack(hw, mbx_id); + +out: + return ret_val; +} + +/** + * rnp_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 rnp_check_for_msg_pf(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + s32 ret_val = RNP_ERR_MBX; + u16 hw_req_count = 0; + struct rnp_mbx_info *mbx = &hw->mbx; + + if(pci_channel_offline(hw->pdev)){ + return -EIO; + } + + if (mbx_id == MBX_CM3CPU) { + hw_req_count = rnp_mbx_get_req(hw, CPU2PF_COUNTER(mbx)); + if (mbx->mbx_feature & MBX_FEATURE_NO_ZERO) { + if ((hw_req_count != 0) && + (hw_req_count != hw->mbx.cpu_req)) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + + } else { + if (hw_req_count != hw->mbx.cpu_req) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + } + } else { + if (rnp_mbx_get_req(hw, VF2PF_COUNTER(mbx, mbx_id)) != + hw->mbx.vf_req[mbx_id]) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + } + + return ret_val; +} + +/** + * rnp_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 rnp_check_for_ack_pf(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + s32 ret_val = RNP_ERR_MBX; + struct rnp_mbx_info *mbx = &hw->mbx; + + if(pci_channel_offline(hw->pdev)){ + return -EIO; + } + + if (mbx_id == MBX_CM3CPU) { + if (rnp_mbx_get_ack(hw, CPU2PF_COUNTER(mbx)) != + hw->mbx.cpu_ack) { + ret_val = 0; + hw->mbx.stats.acks++; + } + } else { + if (rnp_mbx_get_ack(hw, VF2PF_COUNTER(mbx, mbx_id)) != + hw->mbx.vf_ack[mbx_id]) { + ret_val = 0; + hw->mbx.stats.acks++; + } + } + + return ret_val; +} + +/** + * rnp_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @mbx_id: the VF index or CPU + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 rnp_obtain_mbx_lock_pf(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + int try_cnt = 5000; + struct rnp_mbx_info *mbx = &hw->mbx; + u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) : + PF2VF_MBOX_CTRL(mbx, mbx_id); + + while (try_cnt-- > 0) { + /* Take ownership of the buffer */ + mbx_wr32(hw, CTRL_REG, MBOX_CTRL_PF_HOLD_SHM); + wmb(); + /* reserve mailbox for cm3 use */ + if (mbx_rd32(hw, CTRL_REG) & MBOX_CTRL_PF_HOLD_SHM) + return 0; + udelay(100); + } + + rnp_err("%s: failed to get:%d lock \n", __func__, mbx_id); + return EPERM; +} + +/** + * rnp_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 rnp_write_mbx_pf(struct rnp_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + s32 ret_val = 0; + u16 i; + struct rnp_mbx_info *mbx = &hw->mbx; + u32 DATA_REG = (mbx_id == MBX_CM3CPU) ? CPU_PF_SHM_DATA(mbx) : + PF_VF_SHM_DATA(mbx, mbx_id); + u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) : + PF2VF_MBOX_CTRL(mbx, mbx_id); + + if(pci_channel_offline(hw->pdev)){ + return -EIO; + } + + if (size > RNP_VFMAILBOX_SIZE) { + printk("%s: size:%d should <%d\n", __func__, size, + RNP_VFMAILBOX_SIZE); + return -EINVAL; + } + + /* lock the mailbox to prevent pf/vf/cpu race condition */ + ret_val = rnp_obtain_mbx_lock_pf(hw, mbx_id); + if (ret_val) { + printk("%s: get mbx:%d wlock failed. ret:%d. req:0x%08x-0x%08x\n", + __func__, mbx_id, ret_val, msg[0], msg[1]); + goto out_no_write; + } + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) { + mbx_wr32(hw, DATA_REG + i * 4, msg[i]); + rnp_logd(LOG_MBX_OUT, " w-mbx:0x%x <= 0x%x\n", + DATA_REG + i * 4, msg[i]); + } + + /* flush msg and acks as we are overwriting the message buffer */ + if (mbx_id == MBX_CM3CPU) { + hw->mbx.cpu_ack = rnp_mbx_get_ack(hw, CPU2PF_COUNTER(mbx)); + } else { + hw->mbx.vf_ack[mbx_id] = + rnp_mbx_get_ack(hw, VF2PF_COUNTER(mbx, mbx_id)); + } + rnp_mbx_inc_pf_req(hw, mbx_id); + + /* Interrupt VF/CM3 to tell it a message + * has been sent and release buffer + */ + if (mbx->mbx_feature & MBX_FEATURE_WRITE_DELAY) + udelay(300); + mbx_wr32(hw, CTRL_REG, MBOX_CTRL_REQ); + +out_no_write: + /* sometimes happen */ + + return ret_val; +} + +/** + * rnp_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF/CPU request so no polling for message is needed. + **/ +static s32 rnp_read_mbx_pf(struct rnp_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + s32 ret_val = -EIO; + u32 i; + struct rnp_mbx_info *mbx = &hw->mbx; + u32 BUF_REG = (mbx_id == MBX_CM3CPU) ? CPU_PF_SHM_DATA(mbx) : + PF_VF_SHM_DATA(mbx, mbx_id); + u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) : + PF2VF_MBOX_CTRL(mbx, mbx_id); + if(pci_channel_offline(hw->pdev)){ + return -EIO; + } + + if (size > RNP_VFMAILBOX_SIZE) { + printk("%s: size:%d should <%d\n", __func__, size, + RNP_VFMAILBOX_SIZE); + return -EINVAL; + } + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = rnp_obtain_mbx_lock_pf(hw, mbx_id); + if (ret_val) + goto out_no_read; + + mb(); + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) { + msg[i] = mbx_rd32(hw, BUF_REG + 4 * i); + rnp_logd(LOG_MBX_IN, " r-mbx:0x%x => 0x%x\n", BUF_REG + 4 * i, + msg[i]); + } + mbx_wr32(hw, BUF_REG, 0); + + /* update req. used by rnpvf_check_for_msg_vf */ + if (mbx_id == MBX_CM3CPU) { + hw->mbx.cpu_req = rnp_mbx_get_req(hw, CPU2PF_COUNTER(mbx)); + } else { + hw->mbx.vf_req[mbx_id] = + rnp_mbx_get_req(hw, VF2PF_COUNTER(mbx, mbx_id)); + } + /* this ack maybe too earier? */ + /* Acknowledge receipt and release mailbox, then we're done */ + rnp_mbx_inc_pf_ack(hw, mbx_id); + + /* free ownership of the buffer */ + mbx_wr32(hw, CTRL_REG, 0); + +out_no_read: + + return ret_val; +} + +static void rnp_mbx_reset(struct rnp_hw *hw) +{ + int idx, v; + struct rnp_mbx_info *mbx = &hw->mbx; + + for (idx = 0; idx < hw->max_vfs; idx++) { + v = mbx_rd32(hw, VF2PF_COUNTER(mbx, idx)); + hw->mbx.vf_req[idx] = v & 0xffff; + hw->mbx.vf_ack[idx] = (v >> 16) & 0xffff; + mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0); + } + + v = mbx_rd32(hw, CPU2PF_COUNTER(mbx)); + hw->mbx.cpu_req = v & 0xffff; + hw->mbx.cpu_ack = (v >> 16) & 0xffff; + + printk("now mbx.cpu_req %d mbx.cpu_ack %d\n", hw->mbx.cpu_req, + hw->mbx.cpu_ack); + /* release pf->cm3 buffer lock */ + mbx_wr32(hw, PF2CPU_MBOX_CTRL(mbx), 0); + + if (PF_VF_MBOX_MASK_LO(mbx)) + wr32(hw, PF_VF_MBOX_MASK_LO(mbx), + 0); /* allow vf to vectors */ + if (PF_VF_MBOX_MASK_HI(mbx)) + wr32(hw, PF_VF_MBOX_MASK_HI(mbx), 0); + + /* allow CM3CPU to PF MBX IRQ */ + wr32(hw, CPU_PF_MBOX_MASK(mbx), 0); +} + +static int rnp_mbx_configure_pf(struct rnp_hw *hw, int nr_vec, bool enable) +{ + int idx = 0; + u32 v; + struct rnp_mbx_info *mbx = &hw->mbx; + + if(pci_channel_offline(hw->pdev)){ + return -EIO; + } + + if (enable) { + for (idx = 0; idx < hw->max_vfs; idx++) { + v = mbx_rd32(hw, VF2PF_COUNTER(mbx, idx)); + hw->mbx.vf_req[idx] = v & 0xffff; + hw->mbx.vf_ack[idx] = (v >> 16) & 0xffff; + + mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0); + } + /* reset pf->cm3 status */ + v = mbx_rd32(hw, CPU2PF_COUNTER(mbx)); + hw->mbx.cpu_req = v & 0xffff; + hw->mbx.cpu_ack = (v >> 16) & 0xffff; + /* release pf->cm3 buffer lock */ + mbx_wr32(hw, PF2CPU_MBOX_CTRL(mbx), 0); + /* allow VF to PF MBX IRQ */ + for (idx = 0; idx < hw->max_vfs; idx++) { + mbx_wr32(hw, VF2PF_MBOX_VEC(mbx, idx), + nr_vec); + /* vf to pf req interrupt */ + } + + if (PF_VF_MBOX_MASK_LO(mbx)) + wr32(hw, PF_VF_MBOX_MASK_LO(mbx), + 0); + /* allow vf to vectors */ + + if (PF_VF_MBOX_MASK_HI(mbx)) + wr32(hw, PF_VF_MBOX_MASK_HI(mbx), 0); + + /* bind cm3cpu mbx to irq */ + wr32(hw, CPU2PF_MBOX_VEC(mbx), + nr_vec); + /* cm3 and VF63 share #63 irq */ + /* allow CM3CPU to PF MBX IRQ */ + wr32(hw, CPU_PF_MBOX_MASK(mbx), 0); + + rnp_dbg("[%s] mbx-vector:%d\n", __func__, nr_vec); + + } else { + if (PF_VF_MBOX_MASK_LO(mbx)) + wr32(hw, PF_VF_MBOX_MASK_LO(mbx), + 0xffffffff); + if (PF_VF_MBOX_MASK_HI(mbx)) + wr32(hw, PF_VF_MBOX_MASK_HI(mbx), + 0xffffffff); + + /* disable CM3CPU to PF MBX IRQ */ + wr32(hw, CPU_PF_MBOX_MASK(mbx), 0xffffffff); + + /* reset vf->pf status/ctrl */ + for (idx = 0; idx < hw->max_vfs; idx++) + mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0); + /* reset pf->cm3 ctrl */ + mbx_wr32(hw, PF2CPU_MBOX_CTRL(mbx), 0); + /* used to sync link status */ + wr32(hw, RNP_DMA_DUMY, 0); + } + return 0; +} + +unsigned int rnp_mbx_change_timeout(struct rnp_hw *hw, int timeout_ms) +{ + unsigned int old_timeout = hw->mbx.timeout; + + hw->mbx.timeout = timeout_ms * 1000 / hw->mbx.usec_delay; + + return old_timeout; +} + +/** + * rnp_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +s32 rnp_init_mbx_params_pf(struct rnp_hw *hw) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + + mbx->usec_delay = 100; + /* wait 5s */ + mbx->timeout = (4 * 1000 * 1000) / mbx->usec_delay; + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + mbx->size = RNP_VFMAILBOX_SIZE; + mutex_init(&mbx->lock); + rnp_mbx_reset(hw); + + return 0; +} + +struct rnp_mbx_operations mbx_ops_generic = { + .init_params = rnp_init_mbx_params_pf, + .read = rnp_read_mbx_pf, + .write = rnp_write_mbx_pf, + .read_posted = rnp_read_posted_mbx, + .write_posted = rnp_write_posted_mbx, + .check_for_msg = rnp_check_for_msg_pf, + .check_for_ack = rnp_check_for_ack_pf, + .configure = rnp_mbx_configure_pf, +}; diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mbx.h b/drivers/net/ethernet/mucse/rnp/rnp_mbx.h new file mode 100755 index 0000000000000000000000000000000000000000..eadb100fbb41a1837e869cf4eb4c8ef3fa686935 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_mbx.h @@ -0,0 +1,242 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_MBX_H_ +#define _RNP_MBX_H_ + +#include "rnp_type.h" + +#define RNP_VFMAILBOX_SIZE 14 /* 16 32 bit words - 64 bytes */ +#define RNP_ERR_MBX -100 +#define RNP_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with */ +/* this are the ACK */ +#define RNP_VT_MSGTYPE_NACK 0x40000000 +/* Messages below or'd with + * this are the NACK + */ +#define RNP_VT_MSGTYPE_CTS 0x20000000 +/* Indicates that VF is still + *clear to send requests + */ +#define RNP_VT_MSGINFO_SHIFT 14 +/* bits 23:16 are used for exra info for certain messages */ +#define RNP_VT_MSGINFO_MASK (0x7F << RNP_VT_MSGINFO_SHIFT) +/* VLAN pool filtering masks */ +#define RNP_VLVF_VIEN 0x80000000 /* filter is valid */ +#define RNP_VLVF_ENTRIES 64 +#define RNP_VLVF_VLANID_MASK 0x00000FFF +/* + * mailbox msg_data + * + * + * + */ +#define RNP_VNUM_OFFSET (21) +#define RNP_VF_MASK (0x7f << 21) +#define RNP_MAIL_CMD_MASK 0x3fff +/* mailbox API, legacy requests */ +#define RNP_VF_RESET 0x01 /* VF requests reset */ +#define RNP_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define RNP_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define RNP_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ + +/* mailbox API, version 1.0 VF requests */ +#define RNP_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define RNP_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define RNP_VF_GET_MACADDR 0x07 /* get vf macaddr */ +#define RNP_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define RNP_VF_GET_QUEUES 0x09 /* get queue configuration */ +#define RNP_VF_SET_VLAN_STRIP 0x0a /* VF requests PF to set VLAN STRIP */ +#define RNP_VF_REG_RD 0x0b /* vf read reg */ +#define RNP_VF_GET_MTU 0x0c /* vf get pf ethtool setup */ +#define RNP_VF_SET_MTU 0x0d /* vf get pf ethtool setup */ +#define RNP_VF_GET_FW 0x0e /* vf get firmware version */ +#define RNP_VF_GET_LINK 0x10 /* get link status */ +#define RNP_VF_RESET_PF 0x11 +#define RNP_VF_GET_DMA_FRAG 0x12 +#define RNP_VF_SET_PROMISCE 0x16 +#define RNP_PF_SET_FCS 0x10 /* PF set fcs status */ +#define RNP_PF_SET_PAUSE 0x11 /* PF set pause status */ +#define RNP_PF_SET_FT_PADDING 0x12 /* PF set ft padding status */ +#define RNP_PF_SET_VLAN_FILTER 0x13 /* PF set ntuple status */ +#define RNP_PF_SET_VLAN 0x14 /* PF set ntuple status */ +#define RNP_PF_SET_LINK 0x15 /* PF set ntuple status */ +#define RNP_PF_SET_MTU 0x16 /* PF set ntuple status */ +#define RNP_PF_SET_RESET 0x17 /* PF set ntuple status */ +#define RNP_PF_SET_MAC_SPOOF 0x18 /* PF set mac spoof status */ +#define RNP_PF_LINK_UP (1 << 31) +#define RNP_PF_REMOVE 0x0f +/* GET_QUEUES return data indices within the mailbox */ +#define RNP_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define RNP_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define RNP_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define RNP_VF_DEF_QUEUE 4 /* Default queue offset */ +#define RNP_VF_QUEUE_START 5 /* Default queue offset */ +#define RNP_VF_QUEUE_DEPTH 6 /* ring depth */ + +#define VF_ALLOC_FEATURE BIT(0) +/* length of permanent address message returned from PF */ +#define RNP_VF_PERMADDR_MSG_LEN 11 +/* word in permanent address message with the current multicast type */ +#define RNP_VF_MC_TYPE_WORD 3 +#define RNP_VF_DMA_VERSION_WORD 4 +#define RNP_VF_VLAN_WORD 5 +#define RNP_VF_PHY_TYPE_WORD 6 +#define RNP_VF_FW_VERSION_WORD 7 +#define RNP_VF_LINK_STATUS_WORD 8 +#define RNP_VF_AXI_MHZ 9 +#define PF_FEATRURE_VLAN_FILTER BIT(0) +#define PF_NCSI_EN BIT(1) +#define VF_MAC_SPOOF_EN BIT(2) +#define RNP_VF_FEATURE 10 + +#define RNP_PF_CONTROL_PRING_MSG 0x0100 /* PF control message */ + +#define RNP_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define RNP_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +enum MBX_ID { + MBX_VF0 = 0, + MBX_VF1, + MBX_VF2, + MBX_VF3, + MBX_VF4, + MBX_VF5, + MBX_VF6, + MBX_VF7, + MBX_VF8, + MBX_VF9, + MBX_VF10, + MBX_VF11, + MBX_VF12, + MBX_VF13, + MBX_VF14, + MBX_VF15, + MBX_VF16, + MBX_VF17, + MBX_VF18, + MBX_VF19, + MBX_VF20, + MBX_VF21, + MBX_VF22, + MBX_VF23, + MBX_VF24, + MBX_VF25, + MBX_VF26, + MBX_VF27, + MBX_VF28, + MBX_VF29, + MBX_VF30, + MBX_VF31, + MBX_VF32, + MBX_VF33, + MBX_VF34, + MBX_VF35, + MBX_VF36, + MBX_VF37, + MBX_VF38, + MBX_VF39, + MBX_VF40, + MBX_VF41, + MBX_VF42, + MBX_VF43, + MBX_VF44, + MBX_VF45, + MBX_VF46, + MBX_VF47, + MBX_VF48, + MBX_VF49, + MBX_VF50, + MBX_VF51, + MBX_VF52, + MBX_VF53, + MBX_VF54, + MBX_VF55, + MBX_VF56, + MBX_VF57, + MBX_VF58, + MBX_VF59, + MBX_VF60, + MBX_VF61, + MBX_VF62, + //... + MBX_VF63, + MBX_CM3CPU, + MBX_FW = MBX_CM3CPU, + MBX_VFCNT +}; + +enum PF_STATUS { + PF_FCS_STATUS, + PF_PAUSE_STATUS, + PF_FT_PADDING_STATUS, + PF_VLAN_FILTER_STATUS, + PF_SET_VLAN_STATUS, + PF_SET_LINK_STATUS, + PF_SET_MTU, + PF_SET_RESET, + PF_SET_MAC_SPOOF, +}; + +s32 rnp_read_mbx(struct rnp_hw *, u32 *, u16, enum MBX_ID); +s32 rnp_write_mbx(struct rnp_hw *, u32 *, u16, enum MBX_ID); +s32 rnp_check_for_msg(struct rnp_hw *, enum MBX_ID); +s32 rnp_check_for_ack(struct rnp_hw *, enum MBX_ID); +s32 rnp_check_for_rst(struct rnp_hw *, enum MBX_ID); +s32 rnp_init_mbx_params_pf(struct rnp_hw *); +extern struct rnp_mbx_operations mbx_ops_generic; +#define MBX_IFDOWN (0) +#define MBX_IFUP (1) +#define MBX_PROBE (2) +#define MBX_REMOVE (3) +void rnp_mbx_probe_stat_set(struct rnp_hw *hw, int stat); +int rnp_fw_get_macaddr(struct rnp_hw *hw, int pfvfnum, u8 *mac_addr, int lane); +int rnp_mbx_fw_reset_phy(struct rnp_hw *hw); +unsigned int rnp_mbx_change_timeout(struct rnp_hw *hw, int timeout_ms); +struct rnp_info; +int rnp_mbx_get_capability(struct rnp_hw *hw, struct rnp_info *info); +int rnp_mbx_link_event_enable(struct rnp_hw *hw, int enable); +int rnp_mbx_get_link_stat(struct rnp_hw *hw); +int rnp_mbx_ifup_down(struct rnp_hw *hw, int up); +int rnp_mbx_led_set(struct rnp_hw *hw, int value); +int rnp_mbx_get_dump(struct rnp_hw *hw, int flags, u8 *data_out, int buflen); +int rnp_mbx_set_dump(struct rnp_hw *hw, int flag); +int rnp_mbx_sfp_write(struct rnp_hw *hw, int sfp_addr, int reg, short v); +int rnp_mbx_sfp_module_eeprom_info(struct rnp_hw *hw, int sfp_addr, int reg, + int data_len, u8 *buf); +int rnp_mbx_get_temp(struct rnp_hw *hw, int *voltage); +int rnp_mbx_phy_link_set(struct rnp_hw *hw, int adv, int autoneg, int speed, + int duplex, int tp_mdix_ctrl); +int rnp_mbx_phy_pause_set(struct rnp_hw *hw, int pause_mode); +int rnp_mbx_phy_write(struct rnp_hw *hw, u32 reg, u32 val); +int rnp_mbx_phy_read(struct rnp_hw *hw, u32 reg, u32 *val); + +int rnp_maintain_req(struct rnp_hw *hw, int cmd, int arg0, int req_data_bytes, + int reply_bytes, dma_addr_t dma_phy_addr); +int rnp_mbx_get_lane_stat(struct rnp_hw *hw); +int rnp_mbx_wol_set(struct rnp_hw *hw, u32 mode); +int rnp_mbx_ifsuspuse(struct rnp_hw *hw, int status); +int rnp_mbx_ifinsmod(struct rnp_hw *hw, int status); +int rnp_mbx_ifforce_control_mac(struct rnp_hw *hw, int status); +int wait_mbx_init_done(struct rnp_hw *hw); +int rnp_set_lane_fun(struct rnp_hw *hw, int fun, int value0, int value1, + int value2, int value3); +void rnp_link_stat_mark(struct rnp_hw *hw, int up); +int rnp_mbx_reg_writev(struct rnp_hw *hw, int fw_reg, int value[4], int bytes); +int rnp_mbx_reg_write(struct rnp_hw *hw, int fw_reg, int value); +int rnp_mbx_fw_reg_read(struct rnp_hw *hw, int fw_reg); +int rnp_mbx_force_speed(struct rnp_hw *hw, int speed); + +#define cm3_reg_write32(hw, cm3_rpu_reg, v) \ + rnp_mbx_reg_write((hw), (cm3_rpu_reg), (v)) + +#define cm3_reg_read32(hw, cm3_rpu_reg) rnp_mbx_fw_reg_read((hw), (cm3_rpu_reg)) + +int rnp_mbx_lldp_status_get(struct rnp_hw *hw); +int rnp_mbx_lldp_port_enable(struct rnp_hw *hw, bool enable); +int rnp_mbx_ddr_csl_enable(struct rnp_hw *hw, int enable, dma_addr_t dma_phy, + int bytes); +#endif /* _RNP_MBX_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.c b/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.c new file mode 100755 index 0000000000000000000000000000000000000000..f49fec3a2338e5aa10bda54ecaf206150299df96 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.c @@ -0,0 +1,1518 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include + +#include "rnp.h" +#include "rnp_mbx.h" +#include "rnp_mbx_fw.h" + +#define RNP_FW_MAILBOX_SIZE RNP_VFMAILBOX_SIZE + +static bool is_cookie_valid(struct rnp_hw *hw, void *cookie) +{ + unsigned char *begin = + (unsigned char *)(&hw->mbx.cookie_pool.cookies[0]); + unsigned char *end = + (unsigned char *)(&hw->mbx.cookie_pool + .cookies[MAX_COOKIES_ITEMS]); + if (((unsigned char *)cookie) >= begin && + ((unsigned char *)cookie) < end) { + return true; + } + return false; +} + +static struct mbx_req_cookie *mbx_cookie_zalloc(struct rnp_hw *hw, int priv_len) +{ + struct mbx_req_cookie *cookie = NULL; + int loop_cnt = MAX_COOKIES_ITEMS, i; + bool find = false; + + u64 now_jiffies = get_jiffies_64(); + + if (mutex_lock_interruptible(&hw->mbx.lock)) { + rnp_err("[%s] get mbx lock failed,priv_len:%d\n", __func__, + priv_len); + return NULL; + } + i = hw->mbx.cookie_pool.next_idx; + while (loop_cnt--) { + cookie = &(hw->mbx.cookie_pool.cookies[i]); + if (cookie->stat == COOKIE_FREE || + /* force free cookie if cookie not freed after 120 seconds */ + time_after64(now_jiffies, + cookie->alloced_jiffies + (2 * 60) * HZ)) { + find = true; + cookie->alloced_jiffies = get_jiffies_64(); + cookie->stat = COOKIE_ALLOCED; + hw->mbx.cookie_pool.next_idx = + (i + 1) % MAX_COOKIES_ITEMS; + break; + } + i = (i + 1) % MAX_COOKIES_ITEMS; + } + mutex_unlock(&hw->mbx.lock); + + if (!find) { + rnp_err("[%s] no free cookies availble\n", __func__); + return NULL; + } + + cookie->timeout_jiffes = 30 * HZ; + cookie->priv_len = priv_len; + + return cookie; +} + +static void mbx_free_cookie(struct mbx_req_cookie *cookie, bool force_free) +{ + if (!cookie) + return; + + if (force_free) { + cookie->stat = COOKIE_FREE; + } else { + cookie->stat = COOKIE_FREE_WAIT_TIMEOUT; + } +} + +static int rnp_mbx_write_posted_locked(struct rnp_hw *hw, + struct mbx_fw_cmd_req *req) +{ + int err = 0; + int retry = 3; + + if(pci_channel_offline(hw->pdev)){ + return -EIO; + } + + if (mutex_lock_interruptible(&hw->mbx.lock)) { + rnp_err("[%s] get mbx lock failed opcode:0x%x\n", __func__, + req->opcode); + return -EAGAIN; + } + + rnp_logd(LOG_MBX_LOCK, "%s %d lock:%p hw:%p opcode:0x%x\n", __func__, + hw->pfvfnum, &hw->mbx.lock, hw, req->opcode); + +try_again: + retry--; + if (retry < 0) { + mutex_unlock(&hw->mbx.lock); + rnp_err("%s: write_posted failed! err:0x%x opcode:0x%x\n", + __func__, err, req->opcode); + return -EIO; + } + + err = hw->mbx.ops.write_posted( + hw, (u32 *)req, (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + if (err) { + goto try_again; + } + mutex_unlock(&hw->mbx.lock); + + return err; +} + +static void rnp_link_stat_mark_reset(struct rnp_hw *hw) +{ + wr32(hw, RNP_DMA_DUMY, 0xa5a40000); +} + +static void rnp_link_stat_mark_disable(struct rnp_hw *hw) +{ + wr32(hw, RNP_DMA_DUMY, 0); +} + +static int rnp_mbx_fw_post_req(struct rnp_hw *hw, struct mbx_fw_cmd_req *req, + struct mbx_req_cookie *cookie) +{ + int err = 0; + struct rnp_adapter *adpt = hw->back; + + if(pci_channel_offline(hw->pdev)){ + return -EIO; + } + + cookie->errcode = 0; + cookie->done = 0; + init_waitqueue_head(&cookie->wait); + + if (mutex_lock_interruptible(&hw->mbx.lock)) { + rnp_err("[%s] wait mbx lock timeout pfvf:0x%x opcode:0x%x\n", + __func__, hw->pfvfnum, req->opcode); + return -EAGAIN; + } + + rnp_logd(LOG_MBX_LOCK, "%s %d lock:%p hw:%p opcode:0x%x\n", __func__, + hw->pfvfnum, &hw->mbx.lock, hw, req->opcode); + + err = rnp_write_mbx(hw, (u32 *)req, + (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + if (err) { + rnp_err("rnp_write_mbx failed! err:%d opcode:0x%x\n", err, + req->opcode); + mutex_unlock(&hw->mbx.lock); + return err; + } + + if (cookie->timeout_jiffes != 0) { + int retry_cnt = 4; +retry: + err = wait_event_interruptible_timeout(cookie->wait, + cookie->done == 1, + cookie->timeout_jiffes); + + if (err == -ERESTARTSYS && retry_cnt) { + retry_cnt--; + goto retry; + } + if (err == 0) { + rnp_err("[%s] %s failed! pfvfnum:0x%x hw:%p timeout err:%d opcode:%x\n", + adpt->name, __func__, hw->pfvfnum, hw, err, + req->opcode); + err = -ETIME; + } else if (err > 0) { + err = 0; + } + } else { + wait_event_interruptible(cookie->wait, cookie->done == 1); + } + + mutex_unlock(&hw->mbx.lock); + + if (cookie->errcode) { + err = cookie->errcode; + } + + return err; +} + +static int rnp_fw_send_cmd_wait(struct rnp_hw *hw, struct mbx_fw_cmd_req *req, + struct mbx_fw_cmd_reply *reply) +{ + int err; + int retry_cnt = 3; + + if (!hw || !req || !reply || !hw->mbx.ops.read_posted) { + printk("error: hw:%p req:%p reply:%p\n", hw, req, reply); + return -EINVAL; + } + + if(pci_channel_offline(hw->pdev)){ + return -EIO; + } + + if (mutex_lock_interruptible(&hw->mbx.lock)) { + rnp_err("[%s] get mbx lock failed opcode:0x%x\n", __func__, + req->opcode); + return -EAGAIN; + } + + rnp_logd(LOG_MBX_LOCK, "%s %d lock:%p hw:%p opcode:0x%x\n", __func__, + hw->pfvfnum, &hw->mbx.lock, hw, req->opcode); + err = hw->mbx.ops.write_posted( + hw, (u32 *)req, (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + if (err) { + rnp_err("%s: write_posted failed! err:0x%x opcode:0x%x\n", + __func__, err, req->opcode); + mutex_unlock(&hw->mbx.lock); + return err; + } + +retry: + retry_cnt--; + if (retry_cnt < 0) { + rnp_err("retry timeout opcode:0x%x\n", req->opcode); + return -EIO; + } + err = hw->mbx.ops.read_posted(hw, (u32 *)reply, sizeof(*reply) / 4, + MBX_FW); + if (err) { + rnp_err("%s: read_posted failed! err:0x%x opcode:0x%x\n", + __func__, err, req->opcode); + mutex_unlock(&hw->mbx.lock); + return err; + } + if (reply->opcode != req->opcode) + goto retry; + + mutex_unlock(&hw->mbx.lock); + + if (reply->error_code) { + rnp_err("%s: reply err:0x%x req:0x%x\n", __func__, + reply->error_code, req->opcode); + return -reply->error_code; + } + return 0; +} + +int wait_mbx_init_done(struct rnp_hw *hw) +{ + int count = 10000; + u32 v = rd32(hw, RNP_TOP_NIC_DUMMY); + + while (count) { + v = rd32(hw, RNP_TOP_NIC_DUMMY); + if (((v & 0xFF000000) == 0xa5000000) && (v & 0x80)) + break; + + usleep_range(500, 1000); + printk("waiting fw up\n"); + count--; + } + printk("fw init ok %x\n", v); + + return 0; +} + +int rnp_mbx_get_lane_stat(struct rnp_hw *hw) +{ + int err = 0; + struct mbx_fw_cmd_req req; + struct rnp_adapter *adpt = hw->back; + struct lane_stat_data *st; + struct mbx_req_cookie *cookie = NULL; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + + if (hw->mbx.other_irq_enabled) { + cookie = mbx_cookie_zalloc(hw, sizeof(struct lane_stat_data)); + if (!cookie) { + rnp_err("%s: no memory\n", __func__); + return -ENOMEM; + } + st = (struct lane_stat_data *)cookie->priv; + + build_get_lane_status_req(&req, hw->nr_lane, cookie); + + err = rnp_mbx_fw_post_req(hw, &req, cookie); + if (err) { + rnp_err("%s: error:%d\n", __func__, err); + goto quit; + } + } else { + memset(&reply, 0, sizeof(reply)); + + build_get_lane_status_req(&req, hw->nr_lane, &req); + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + if (err) { + rnp_err("%s: 1 error:%d\n", __func__, err); + goto quit; + } + st = (struct lane_stat_data *)&(reply.data); + } + + hw->phy_type = st->phy_type; + hw->speed = adpt->speed = st->speed; + if ((st->is_sgmii) || (hw->phy_type == PHY_TYPE_10G_TP)) { + adpt->phy_addr = st->phy_addr; + } else { + adpt->sfp.fault = st->sfp.fault; + adpt->sfp.los = st->sfp.los; + adpt->sfp.mod_abs = st->sfp.mod_abs; + adpt->sfp.tx_dis = st->sfp.tx_dis; + } + adpt->si.main = st->si_main; + adpt->si.pre = st->si_pre; + adpt->si.post = st->si_post; + adpt->si.tx_boost = st->si_tx_boost; + adpt->link_traing = st->link_traing; + adpt->fec = st->fec; + hw->is_sgmii = st->is_sgmii; + hw->pci_gen = st->pci_gen; + hw->pci_lanes = st->pci_lanes; + adpt->speed = st->speed; + adpt->hw.link = st->linkup; + hw->is_backplane = st->is_backplane; + hw->supported_link = st->supported_link; + hw->advertised_link = st->advertised_link; + hw->tp_mdx = st->tp_mdx; + + if ((hw->hw_type == rnp_hw_n10) || (hw->hw_type == rnp_hw_n400)) { + if (hw->fw_version >= 0x00050000) { + hw->sfp_connector = st->sfp_connector; + hw->duplex = st->duplex; + adpt->an = st->autoneg; + } else { + hw->sfp_connector = 0xff; + hw->duplex = 1; + adpt->an = st->an; + } + if (hw->fw_version <= 0x00050000) { + hw->supported_link |= RNP_LINK_SPEED_10GB_FULL | + RNP_LINK_SPEED_1GB_FULL; + } + } + + rnp_logd( + LOG_MBX_LINK_STAT, + "%s:pma_type:0x%x phy_type:0x%x,linkup:%d duplex:%d auton:%d " + "fec:%d an:%d lt:%d is_sgmii:%d supported_link:0x%x, backplane:%d " + "speed:%d sfp_connector:0x%x\n", + adpt->name, st->pma_type, st->phy_type, st->linkup, st->duplex, + st->autoneg, st->fec, st->an, st->link_traing, st->is_sgmii, + hw->supported_link, hw->is_backplane, st->speed, + st->sfp_connector); +quit: + if (cookie) + mbx_free_cookie(cookie, err ? false : true); + + return err; +} + +int rnp_mbx_get_link_stat(struct rnp_hw *hw) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_get_link_status_req(&req, hw->nr_lane, &req); + return rnp_fw_send_cmd_wait(hw, &req, &reply); +} + +int rnp_mbx_fw_reset_phy(struct rnp_hw *hw) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + int ret; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + if (hw->mbx.other_irq_enabled) { + struct mbx_req_cookie *cookie = mbx_cookie_zalloc(hw, 0); + + if (!cookie) { + return -ENOMEM; + } + + build_reset_phy_req(&req, cookie); + + ret = rnp_mbx_fw_post_req(hw, &req, cookie); + mbx_free_cookie(cookie, ret ? false : true); + return ret; + } else { + build_reset_phy_req(&req, &req); + return rnp_fw_send_cmd_wait(hw, &req, &reply); + } +} + +int rnp_maintain_req(struct rnp_hw *hw, int cmd, int arg0, int req_data_bytes, + int reply_bytes, dma_addr_t dma_phy_addr) +{ + int err; + struct mbx_req_cookie *cookie = NULL; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + u64 address = dma_phy_addr; + + cookie = mbx_cookie_zalloc(hw, 0); + if (!cookie) { + return -ENOMEM; + } + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + cookie->timeout_jiffes = 60 * HZ; + + build_maintain_req(&req, cookie, cmd, arg0, req_data_bytes, reply_bytes, + address & 0xffffffff, (address >> 32) & 0xffffffff); + + if (hw->mbx.other_irq_enabled) { + cookie->timeout_jiffes = 400 * HZ; + err = rnp_mbx_fw_post_req(hw, &req, cookie); + } else { + int old_mbx_timeout = hw->mbx.timeout; + hw->mbx.timeout = (400 * 1000 * 1000) / hw->mbx.usec_delay; + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + hw->mbx.timeout = old_mbx_timeout; + } + + if (cookie) + mbx_free_cookie(cookie, err ? false : true); + + return (err) ? -EIO : 0; +} + +int rnp_fw_get_macaddr(struct rnp_hw *hw, int pfvfnum, u8 *mac_addr, + int nr_lane) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + rnp_dbg("%s: pfvfnum:0x%x nr_lane:%d\n", __func__, pfvfnum, nr_lane); + + if (!mac_addr) { + rnp_err("%s: mac_addr is null\n", __func__); + return -EINVAL; + } + + if (hw->mbx.other_irq_enabled) { + struct mbx_req_cookie *cookie = + mbx_cookie_zalloc(hw, sizeof(reply.mac_addr)); + struct mac_addr *mac = (struct mac_addr *)cookie->priv; + + if (!cookie) { + return -ENOMEM; + } + + build_get_macaddress_req(&req, 1 << nr_lane, pfvfnum, cookie); + + err = rnp_mbx_fw_post_req(hw, &req, cookie); + if (err) { + mbx_free_cookie(cookie, false); + return err; + } + hw->pcode = mac->pcode; + + if ((1 << nr_lane) & mac->lanes) { + memcpy(mac_addr, mac->addrs[nr_lane].mac, 6); + } + + mbx_free_cookie(cookie, true); + return 0; + } else { + build_get_macaddress_req(&req, 1 << nr_lane, pfvfnum, &req); + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + if (err) { + rnp_err("%s: failed. err:%d\n", __func__, err); + return err; + } + + hw->pcode = reply.mac_addr.pcode; + if ((1 << nr_lane) & reply.mac_addr.lanes) { + memcpy(mac_addr, reply.mac_addr.addrs[nr_lane].mac, 6); + return 0; + } + } + + return -ENODATA; +} + +static int rnp_mbx_sfp_read(struct rnp_hw *hw, int sfp_i2c_addr, int reg, + int cnt, u8 *out_buf) +{ + struct mbx_fw_cmd_req req; + int err = -EIO; + int nr_lane = hw->nr_lane; + + if ((cnt > MBX_SFP_READ_MAX_CNT) || !out_buf) { + rnp_err("%s: cnt:%d should <= %d out_buf:%p\n", __func__, cnt, + MBX_SFP_READ_MAX_CNT, out_buf); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + + if (hw->mbx.other_irq_enabled) { + struct mbx_req_cookie *cookie = mbx_cookie_zalloc(hw, cnt); + if (!cookie) { + return -ENOMEM; + } + build_mbx_sfp_read(&req, nr_lane, sfp_i2c_addr, reg, cnt, + cookie); + + err = rnp_mbx_fw_post_req(hw, &req, cookie); + if (err) { + mbx_free_cookie(cookie, false); + return err; + } else { + memcpy(out_buf, cookie->priv, cnt); + err = 0; + mbx_free_cookie(cookie, true); + } + } else { + struct mbx_fw_cmd_reply reply; + + memset(&reply, 0, sizeof(reply)); + build_mbx_sfp_read(&req, nr_lane, sfp_i2c_addr, reg, cnt, + &reply); + + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + if (err == 0) { + memcpy(out_buf, reply.sfp_read.value, cnt); + } + } + + return err; +} + +int rnp_mbx_sfp_module_eeprom_info(struct rnp_hw *hw, int sfp_addr, int reg, + int data_len, u8 *buf) +{ + int left = data_len; + int cnt, err; + + do { + cnt = (left > MBX_SFP_READ_MAX_CNT) ? MBX_SFP_READ_MAX_CNT : + left; + err = rnp_mbx_sfp_read(hw, sfp_addr, reg, cnt, buf); + if (err) { + rnp_err("%s: error:%d\n", __func__, err); + return err; + } + reg += cnt; + buf += cnt; + left -= cnt; + } while (left > 0); + + return 0; +} + +int rnp_mbx_sfp_write(struct rnp_hw *hw, int sfp_addr, int reg, short v) +{ + struct mbx_fw_cmd_req req; + int err; + int nr_lane = hw->nr_lane; + + memset(&req, 0, sizeof(req)); + + build_mbx_sfp_write(&req, nr_lane, sfp_addr, reg, v); + err = rnp_mbx_write_posted_locked(hw, &req); + + return err; +} + +int rnp_mbx_fw_reg_read(struct rnp_hw *hw, int fw_reg) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + int err, ret = 0xffffffff; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + if (hw->fw_version < 0x00050200) { + return -EOPNOTSUPP; + } + + if (hw->mbx.other_irq_enabled) { + struct mbx_req_cookie *cookie = + mbx_cookie_zalloc(hw, sizeof(reply.r_reg)); + + build_readreg_req(&req, fw_reg, cookie); + err = rnp_mbx_fw_post_req(hw, &req, cookie); + if (err) { + mbx_free_cookie(cookie, false); + return ret; + } + ret = ((int *)(cookie->priv))[0]; + mbx_free_cookie(cookie, true); + } else { + build_readreg_req(&req, fw_reg, &reply); + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + if (err) { + rnp_err("%s: failed. err:%d\n", __func__, err); + return err; + } else { + ret = reply.r_reg.value[0]; + } + } + return ret; +} + +int rnp_mbx_reg_write(struct rnp_hw *hw, int fw_reg, int value) +{ + struct mbx_fw_cmd_req req; + int err; + memset(&req, 0, sizeof(req)); + + if (hw->fw_version < 0x00050200) { + return -EOPNOTSUPP; + } + + build_writereg_req(&req, NULL, fw_reg, 4, &value); + + err = rnp_mbx_write_posted_locked(hw, &req); + return err; +} + +int rnp_mbx_reg_writev(struct rnp_hw *hw, int fw_reg, int value[4], int bytes) +{ + struct mbx_fw_cmd_req req; + int err; + memset(&req, 0, sizeof(req)); + + build_writereg_req(&req, NULL, fw_reg, bytes, value); + + err = rnp_mbx_write_posted_locked(hw, &req); + return err; +} + +int rnp_mbx_wol_set(struct rnp_hw *hw, u32 mode) +{ + struct mbx_fw_cmd_req req; + int err; + int nr_lane = hw->nr_lane; + + memset(&req, 0, sizeof(req)); + + build_mbx_wol_set(&req, nr_lane, mode); + + err = rnp_mbx_write_posted_locked(hw, &req); + return err; +} + +int rnp_mbx_set_dump(struct rnp_hw *hw, int flag) +{ + int err; + struct mbx_fw_cmd_req req; + + memset(&req, 0, sizeof(req)); + build_set_dump(&req, hw->nr_lane, flag); + + err = rnp_mbx_write_posted_locked(hw, &req); + + return err; +} + +int rnp_mbx_force_speed(struct rnp_hw *hw, int speed) +{ + int cmd = 0x01150000; + + if (hw->force_10g_1g_speed_ablity == 0) + return -EINVAL; + + if (speed == RNP_LINK_SPEED_10GB_FULL) { + cmd = 0x01150002; + hw->force_speed_stat = FORCE_SPEED_STAT_10G; + hw->saved_force_link_speed = speed; + } else if (speed == RNP_LINK_SPEED_1GB_FULL) { + cmd = 0x01150001; + hw->force_speed_stat = FORCE_SPEED_STAT_1G; + hw->saved_force_link_speed = speed; + } else { + hw->saved_force_link_speed = RNP_LINK_SPEED_UNKNOWN; + cmd = 0x01150000; + hw->force_speed_stat = FORCE_SPEED_STAT_DISABLED; + } + return rnp_mbx_set_dump(hw, cmd); +} + +int rnp_mbx_get_dump(struct rnp_hw *hw, int flags, u8 *data_out, int bytes) +{ + int err; + struct mbx_req_cookie *cookie = NULL; + + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + struct get_dump_reply *get_dump; + + void *dma_buf = NULL; + dma_addr_t dma_phy = 0; + u64 address; + + cookie = mbx_cookie_zalloc(hw, sizeof(*get_dump)); + if (!cookie) { + return -ENOMEM; + } + get_dump = (struct get_dump_reply *)cookie->priv; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + if (bytes > sizeof(get_dump->data)) { + dma_buf = dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy, + GFP_ATOMIC); + if (!dma_buf) { + dev_err(&hw->pdev->dev, "%s: no memory:%d!", __func__, + bytes); + err = -ENOMEM; + goto quit; + } + } + address = dma_phy; + build_get_dump_req(&req, cookie, hw->nr_lane, address & 0xffffffff, + (address >> 32) & 0xffffffff, bytes); + + if (hw->mbx.other_irq_enabled) { + err = rnp_mbx_fw_post_req(hw, &req, cookie); + } else { + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + get_dump = &reply.get_dump; + } + +quit: + if (err == 0) { + hw->dump.version = get_dump->version; + hw->dump.flag = get_dump->flags; + hw->dump.len = get_dump->bytes; + } + if (err == 0 && data_out) { + if (dma_buf) { + memcpy(data_out, dma_buf, bytes); + } else { + memcpy(data_out, get_dump->data, bytes); + } + } + if (dma_buf) + dma_free_coherent(&hw->pdev->dev, bytes, dma_buf, dma_phy); + + if (cookie) + mbx_free_cookie(cookie, err ? false : true); + return err ? -err : 0; +} + +int rnp_fw_update(struct rnp_hw *hw, int partition, const u8 *fw_bin, int bytes) +{ + int err; + struct mbx_req_cookie *cookie = NULL; + + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + void *dma_buf = NULL; + dma_addr_t dma_phy; + + cookie = mbx_cookie_zalloc(hw, 0); + if (!cookie) { + dev_err(&hw->pdev->dev, "%s: no memory:%d!", __func__, 0); + return -ENOMEM; + } + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + dma_buf = + dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy, GFP_ATOMIC); + if (!dma_buf) { + dev_err(&hw->pdev->dev, "%s: no memory:%d!", __func__, bytes); + err = -ENOMEM; + goto quit; + } + + memcpy(dma_buf, fw_bin, bytes); + + build_fw_update_req(&req, cookie, partition, dma_phy & 0xffffffff, + (dma_phy >> 32) & 0xffffffff, bytes); + if (hw->mbx.other_irq_enabled) { + cookie->timeout_jiffes = 400 * HZ; + err = rnp_mbx_fw_post_req(hw, &req, cookie); + } else { + int old_mbx_timeout = hw->mbx.timeout; + hw->mbx.timeout = (400 * 1000 * 1000) / hw->mbx.usec_delay; + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + hw->mbx.timeout = old_mbx_timeout; + } + +quit: + if (dma_buf) + dma_free_coherent(&hw->pdev->dev, bytes, dma_buf, dma_phy); + if (cookie) + mbx_free_cookie(cookie, err ? false : true); + printk("%s: %s (errcode:%d)\n", __func__, err ? " failed" : " success", + err); + return (err) ? -EIO : 0; +} + +int rnp_mbx_link_event_enable(struct rnp_hw *hw, int enable) +{ + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + int err; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + if (enable) { + int v = rd32(hw, RNP_DMA_DUMY); + v &= 0x0000ffff; + v |= 0xa5a40000; + wr32(hw, RNP_DMA_DUMY, v); + } else { + wr32(hw, RNP_DMA_DUMY, 0); + } + + build_link_set_event_mask(&req, BIT(EVT_LINK_UP), + (enable & 1) << EVT_LINK_UP, &req); + err = rnp_mbx_write_posted_locked(hw, &req); + + return err; +} + +int rnp_fw_get_capability(struct rnp_hw *hw, struct phy_abilities *abil) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_phy_abalities_req(&req, &req); + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + + if (err == 0) + memcpy(abil, &reply.phy_abilities, sizeof(*abil)); + + return err; +} + +static int to_mac_type(struct phy_abilities *ability) +{ + int lanes = hweight_long(ability->lane_mask); + if ((ability->phy_type == PHY_TYPE_40G_BASE_KR4) || + (ability->phy_type == PHY_TYPE_40G_BASE_LR4) || + (ability->phy_type == PHY_TYPE_40G_BASE_CR4) || + (ability->phy_type == PHY_TYPE_40G_BASE_SR4)) { + if (lanes == 1) { + return rnp_mac_n10g_x8_40G; + } else { + return rnp_mac_n10g_x8_10G; + } + } else if ((ability->phy_type == PHY_TYPE_10G_BASE_KR) || + (ability->phy_type == PHY_TYPE_10G_BASE_LR) || + (ability->phy_type == PHY_TYPE_10G_BASE_ER) || + (ability->phy_type == PHY_TYPE_10G_BASE_SR)) { + if (lanes == 1) { + return rnp_mac_n10g_x2_10G; + } else if (lanes == 2) { + return rnp_mac_n10g_x4_10G; + } else { + return rnp_mac_n10g_x8_10G; + } + } else if (ability->phy_type == PHY_TYPE_1G_BASE_KX) { + return rnp_mac_n10l_x8_1G; + } else if (ability->phy_type == PHY_TYPE_SGMII) { + return rnp_mac_n10l_x8_1G; + } + return rnp_mac_unknown; +} + +int rnp_set_lane_fun(struct rnp_hw *hw, int fun, int value0, int value1, + int value2, int value3) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_set_lane_fun(&req, hw->nr_lane, fun, value0, value1, value2, + value3); + + return rnp_mbx_write_posted_locked(hw, &req); +} + +int rnp_mbx_ifinsmod(struct rnp_hw *hw, int status) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_ifinsmod(&req, hw->nr_lane, status); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + err = hw->mbx.ops.write_posted( + hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + + mutex_unlock(&hw->mbx.lock); + + rnp_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d status:%d\n", __func__, + hw->nr_lane, status); + return err; +} + +int rnp_mbx_ifsuspuse(struct rnp_hw *hw, int status) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_ifsuspuse(&req, hw->nr_lane, status); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + err = hw->mbx.ops.write_posted( + hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + + mutex_unlock(&hw->mbx.lock); + + rnp_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d status:%d\n", __func__, + hw->nr_lane, status); + + return err; +} + +int rnp_mbx_ifforce_control_mac(struct rnp_hw *hw, int status) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_ifforce(&req, hw->nr_lane, status); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + + err = hw->mbx.ops.write_posted( + hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + + mutex_unlock(&hw->mbx.lock); + + rnp_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d status:%d\n", __func__, + hw->nr_lane, status); + + return err; +} + +int rnp_mbx_ifup_down(struct rnp_hw *hw, int up) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_ifup_down(&req, hw->nr_lane, up); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + err = hw->mbx.ops.write_posted( + hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + + mutex_unlock(&hw->mbx.lock); + + rnp_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d up:%d\n", __func__, + hw->nr_lane, up); + + /* force firmware report link-status */ + if (up) + rnp_link_stat_mark_reset(hw); + + return err; +} + +int rnp_mbx_led_set(struct rnp_hw *hw, int value) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_led_set(&req, hw->nr_lane, value, &reply); + + return rnp_mbx_write_posted_locked(hw, &req); +} + +int rnp_mbx_get_capability(struct rnp_hw *hw, struct rnp_info *info) +{ + int err; + struct phy_abilities ablity; + int try_cnt = 3; + + memset(&ablity, 0, sizeof(ablity)); + rnp_link_stat_mark_disable(hw); + + while (try_cnt--) { + err = rnp_fw_get_capability(hw, &ablity); + if (err == 0 && info) { + hw->lane_mask = ablity.lane_mask & 0xf; + info->mac = to_mac_type(&ablity); + info->adapter_cnt = hweight_long(hw->lane_mask); + hw->mode = ablity.nic_mode; + hw->pfvfnum = ablity.pfnum; + hw->speed = ablity.speed; + hw->nr_lane = 0; // PF1 + hw->fw_version = ablity.fw_version; + hw->mac_type = info->mac; + hw->phy_type = ablity.phy_type; + hw->axi_mhz = ablity.axi_mhz; + hw->port_ids = ablity.port_ids; + hw->bd_uid = ablity.bd_uid; + hw->phy_id = ablity.phy_id; + hw->wol = ablity.wol_status; + hw->eco = ablity.e.v2; + hw->force_link_supported = + ablity.e.force_link_supported; + + if (ablity.e.force_link_supported && + (ablity.e.force_down_en & 0x1)) { + hw->force_status = 1; + } + + if ((hw->fw_version >= 0x00050201) && + (ablity.speed == SPEED_10000)) { + hw->force_speed_stat = + FORCE_SPEED_STAT_DISABLED; + hw->force_10g_1g_speed_ablity = 1; + } + if (ablity.ext_ablity != 0xffffffff && ablity.e.valid) { + hw->ncsi_en = (ablity.e.ncsi_en == 1); + hw->ncsi_rar_entries = 1; + hw->rpu_en = ablity.e.rpu_en; + if (hw->rpu_en) { + ablity.e.rpu_availble = 1; + } + hw->rpu_availble = ablity.e.rpu_availble; + hw->fw_lldp_ablity = ablity.e.fw_lldp_ablity; + } else { + hw->ncsi_rar_entries = 0; + } + + if (hw->force_link_supported == 0) { + hw->force_status = hw->ncsi_en ? 0 : 1; + } + + pr_info("%s: nic-mode:%d mac:%d adpt_cnt:%d lane_mask:0x%x, phy_type: " + "0x%x, " + "pfvfnum:0x%x, fw-version:0x%08x\n, axi:%d Mhz," + "port_id:%d bd_uid:0x%08x 0x%x ex-ablity:0x%x fs:%d speed:%d " + "ncsi_en:%u %d wol=0x%x rpu:%d-%d v2:%d force-status:%d,%d\n", + __func__, hw->mode, info->mac, + info->adapter_cnt, hw->lane_mask, hw->phy_type, + hw->pfvfnum, ablity.fw_version, ablity.axi_mhz, + ablity.port_id[0], hw->bd_uid, ablity.phy_id, + ablity.ext_ablity, + hw->force_10g_1g_speed_ablity, ablity.speed, + hw->ncsi_en, hw->ncsi_rar_entries, hw->wol, + hw->rpu_en, hw->rpu_availble, hw->eco, + hw->force_status, hw->force_link_supported); + if (hw->phy_type == PHY_TYPE_10G_TP) { + hw->supported_link = RNP_LINK_SPEED_10GB_FULL | + RNP_LINK_SPEED_1GB_FULL | + RNP_LINK_SPEED_1GB_HALF; + hw->phy.autoneg_advertised = hw->supported_link; + hw->autoneg = 1; + } + if (info->adapter_cnt != 0) + return 0; + } + } + + dev_err(&hw->pdev->dev, "%s: error!\n", __func__); + return -EIO; +} + +int rnp_mbx_get_temp(struct rnp_hw *hw, int *voltage) +{ + int err; + struct mbx_req_cookie *cookie = NULL; + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + struct get_temp *temp; + int temp_v = 0; + + cookie = mbx_cookie_zalloc(hw, sizeof(*temp)); + if (!cookie) { + return -ENOMEM; + } + temp = (struct get_temp *)cookie->priv; + + memset(&req, 0, sizeof(req)); + + build_get_temp(&req, cookie); + + if (hw->mbx.other_irq_enabled) { + err = rnp_mbx_fw_post_req(hw, &req, cookie); + } else { + memset(&reply, 0, sizeof(reply)); + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + temp = &reply.get_temp; + } + + if (voltage) + *voltage = temp->volatage; + temp_v = temp->temp; + + if (cookie) { + mbx_free_cookie(cookie, err ? false : true); + } + return temp_v; +} + +enum speed_enum { + speed_10, + speed_100, + speed_1000, + speed_10000, + speed_25000, + speed_40000, + +}; + +void rnp_link_stat_mark(struct rnp_hw *hw, int up) +{ + u32 v; + + v = rd32(hw, RNP_DMA_DUMY); + if ((hw->hw_type == rnp_hw_n10) || (hw->hw_type == rnp_hw_n400)) { + v &= ~(0xffff0000); + v |= 0xa5a40000; + if (up) { + v |= BIT(0); + } else { + v &= ~BIT(0); + } + } + wr32(hw, RNP_DMA_DUMY, v); +} + +void rnp_mbx_probe_stat_set(struct rnp_hw *hw, int stat) +{ +#define RNP10_DMA_DUMMY_PROBE_STAT_BIT (4) + u32 v; + + if( pci_channel_offline(hw->pdev)){ + return; + } + + v = rd32(hw, RNP_DMA_DUMY); + if ((hw->hw_type == rnp_hw_n10) || (hw->hw_type == rnp_hw_n400)) { + v &= ~(0xffff0000); + v |= 0xa5a40000; + + if (stat == MBX_PROBE) { + v |= BIT(RNP10_DMA_DUMMY_PROBE_STAT_BIT); + } else if (stat == MBX_REMOVE) { + v = 0xFFA5A6A7; + } else { + v &= ~BIT(RNP10_DMA_DUMMY_PROBE_STAT_BIT); + } + } + wr32(hw, RNP_DMA_DUMY, v); +} + +static inline int rnp_mbx_fw_req_handler(struct rnp_adapter *adapter, + struct mbx_fw_cmd_req *req) +{ + struct rnp_hw *hw = &adapter->hw; + + switch (req->opcode) { + case LINK_STATUS_EVENT: + rnp_logd( + LOG_LINK_EVENT, + "[LINK_STATUS_EVENT:0x%x] %s:link changed: changed_lane:0x%x, " + "status:0x%x, speed:%d, duplex:%d\n", + req->opcode, adapter->name, + req->link_stat.changed_lanes, + req->link_stat.lane_status, req->link_stat.st[0].speed, + req->link_stat.st[0].duplex); + + if (req->link_stat.lane_status) { + adapter->hw.link = 1; + } else { + adapter->hw.link = 0; + } + if (req->link_stat.st[0].lldp_status) + adapter->priv_flags |= RNP_PRIV_FLAG_LLDP_EN_STAT; + else + adapter->priv_flags &= (~RNP_PRIV_FLAG_LLDP_EN_STAT); + + if (req->link_stat.port_st_magic == SPEED_VALID_MAGIC) { + hw->speed = req->link_stat.st[0].speed; + hw->duplex = req->link_stat.st[0].duplex; + + switch (hw->speed) { + case 10: + adapter->speed = RNP_LINK_SPEED_10_FULL; + break; + case 100: + adapter->speed = RNP_LINK_SPEED_100_FULL; + break; + case 1000: + adapter->speed = RNP_LINK_SPEED_1GB_FULL; + break; + case 10000: + adapter->speed = RNP_LINK_SPEED_10GB_FULL; + break; + case 25000: + adapter->speed = RNP_LINK_SPEED_25GB_FULL; + break; + case 40000: + adapter->speed = RNP_LINK_SPEED_40GB_FULL; + break; + } + } + if (req->link_stat.lane_status) { + rnp_link_stat_mark(hw, 1); + } else { + rnp_link_stat_mark(hw, 0); + } + + adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE; + break; + } + rnp_service_event_schedule(adapter); + + return 0; +} + +static inline int rnp_mbx_fw_reply_handler(struct rnp_adapter *adapter, + struct mbx_fw_cmd_reply *reply) +{ + struct mbx_req_cookie *cookie; + + cookie = reply->cookie; + if (!cookie || is_cookie_valid(&adapter->hw, cookie) == false || + cookie->stat != COOKIE_ALLOCED) { + return -EIO; + } + + if (cookie->priv_len > 0) { + memcpy(cookie->priv, reply->data, cookie->priv_len); + } + + cookie->done = 1; + + if (reply->flags & FLAGS_ERR) { + cookie->errcode = reply->error_code; + } else { + cookie->errcode = 0; + } + + if (cookie->stat == COOKIE_ALLOCED) { + wake_up_interruptible(&cookie->wait); + } + /* not really free cookie, mark as free-able */ + mbx_free_cookie(cookie, false); + + return 0; +} + +static inline int rnp_rcv_msg_from_fw(struct rnp_adapter *adapter) +{ + u32 msgbuf[RNP_FW_MAILBOX_SIZE]; + struct rnp_hw *hw = &adapter->hw; + s32 retval; + + retval = rnp_read_mbx(hw, msgbuf, RNP_FW_MAILBOX_SIZE, MBX_FW); + if (retval) { + printk("Error receiving message from FW:%d\n", retval); + return retval; + } + + rnp_logd(LOG_MBX_MSG_IN, + "msg from fw: msg[0]=0x%08x_0x%08x_0x%08x_0x%08x\n", msgbuf[0], + msgbuf[1], msgbuf[2], msgbuf[3]); + + /* this is a message we already processed, do nothing */ + if (((unsigned short *)msgbuf)[0] & FLAGS_DD) { + return rnp_mbx_fw_reply_handler( + adapter, (struct mbx_fw_cmd_reply *)msgbuf); + } else { + return rnp_mbx_fw_req_handler(adapter, + (struct mbx_fw_cmd_req *)msgbuf); + } +} + +static void rnp_rcv_ack_from_fw(struct rnp_adapter *adapter) +{ + /* do-nothing */ +} + +int rnp_fw_msg_handler(struct rnp_adapter *adapter) +{ + /* == check fw-req */ + if (!rnp_check_for_msg(&adapter->hw, MBX_FW)) + rnp_rcv_msg_from_fw(adapter); + + /* process any acks */ + if (!rnp_check_for_ack(&adapter->hw, MBX_FW)) + rnp_rcv_ack_from_fw(adapter); + + return 0; +} + +int rnp_mbx_phy_write(struct rnp_hw *hw, u32 reg, u32 val) +{ + struct mbx_fw_cmd_req req; + char nr_lane = hw->nr_lane; + memset(&req, 0, sizeof(req)); + + build_set_phy_reg(&req, NULL, PHY_EXTERNAL_PHY_MDIO, nr_lane, reg, val, + 0); + + return rnp_mbx_write_posted_locked(hw, &req); +} + +int rnp_mbx_phy_read(struct rnp_hw *hw, u32 reg, u32 *val) +{ + struct mbx_fw_cmd_req req; + int err = -EIO; + char nr_lane = hw->nr_lane; + int times = 0; +retry: + memset(&req, 0, sizeof(req)); + + if (hw->mbx.other_irq_enabled) { + struct mbx_req_cookie *cookie = mbx_cookie_zalloc(hw, 4); + if (!cookie) { + return -ENOMEM; + } + build_get_phy_reg(&req, cookie, PHY_EXTERNAL_PHY_MDIO, nr_lane, + reg); + + err = rnp_mbx_fw_post_req(hw, &req, cookie); + if (err) { + mbx_free_cookie(cookie, false); + return err; + } else { + memcpy(val, cookie->priv, 4); + err = 0; + } + mbx_free_cookie(cookie, true); + } else { + struct mbx_fw_cmd_reply reply; + memset(&reply, 0, sizeof(reply)); + build_get_phy_reg(&req, &reply, PHY_EXTERNAL_PHY_MDIO, nr_lane, + reg); + + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + if (err == 0) { + *val = reply.r_reg.value[0]; + } + } + if ((*(val) == 0xffff) && (times <= 5)) { + printk("%x warning mbx_phy_read 0xffff, addr %x\n", times, reg); + times++; + goto retry; + } + return err; +} + +int rnp_mbx_phy_link_set(struct rnp_hw *hw, int adv, int autoneg, int speed, + int duplex, int mdix_ctrl) +{ + int err; + struct mbx_fw_cmd_req req; + + memset(&req, 0, sizeof(req)); + + printk("%s:lane:%d adv:0x%x\n", __func__, hw->nr_lane, adv); + printk("%s:autoneg %x, speed %x, duplex %x\n", __func__, autoneg, speed, + duplex); + + build_phy_link_set(&req, adv, hw->nr_lane, autoneg, speed, duplex, + mdix_ctrl); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + err = hw->mbx.ops.write_posted( + hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + + mutex_unlock(&hw->mbx.lock); + return err; +} + +int rnp_mbx_phy_pause_set(struct rnp_hw *hw, int pause_mode) +{ + int err; + struct mbx_fw_cmd_req req; + + memset(&req, 0, sizeof(req)); + + printk("%s:lane:%d pause:0x%x\n", __func__, hw->nr_lane, pause_mode); + + build_phy_pause_set(&req, pause_mode, hw->nr_lane); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + err = hw->mbx.ops.write_posted( + hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + + mutex_unlock(&hw->mbx.lock); + return err; +} + +int rnp_mbx_lldp_port_enable(struct rnp_hw *hw, bool enable) +{ + struct mbx_fw_cmd_req req; + int err; + int nr_lane = hw->nr_lane; + + if (!hw->fw_lldp_ablity) { + rnp_warn("lldp set not supported\n"); + return -EOPNOTSUPP; + } + + memset(&req, 0, sizeof(req)); + + build_lldp_ctrl_set(&req, nr_lane, enable); + + err = rnp_mbx_write_posted_locked(hw, &req); + return err; +} + +int rnp_mbx_lldp_status_get(struct rnp_hw *hw) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + int err, ret = 0; + + if (!hw->fw_lldp_ablity) { + rnp_warn("fw lldp not supported\n"); + return -EOPNOTSUPP; + } + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + if (hw->mbx.other_irq_enabled) { + struct mbx_req_cookie *cookie = + mbx_cookie_zalloc(hw, sizeof(reply.lldp)); + + if (!cookie) { + return -ENOMEM; + } + build_lldp_ctrl_get(&req, hw->nr_lane, cookie); + + err = rnp_mbx_fw_post_req(hw, &req, cookie); + if (err) { + mbx_free_cookie(cookie, false); + return ret; + } + ret = ((int *)(cookie->priv))[0]; + mbx_free_cookie(cookie, true); + } else { + build_lldp_ctrl_get(&req, hw->nr_lane, &reply); + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + if (err) { + rnp_err("%s: 1 error:%d\n", __func__, err); + return -EIO; + } + ret = reply.lldp.enable_stat; + } + return ret; +} + +int rnp_mbx_ddr_csl_enable(struct rnp_hw *hw, int enable, dma_addr_t dma_phy, + int bytes) +{ + struct mbx_fw_cmd_req req; + memset(&req, 0, sizeof(req)); + + build_ddr_csl(&req, NULL, enable, dma_phy, bytes); + + if (hw->mbx.other_irq_enabled) { + return rnp_mbx_write_posted_locked(hw, &req); + } else { + struct mbx_fw_cmd_reply reply; + memset(&reply, 0, sizeof(reply)); + return rnp_fw_send_cmd_wait(hw, &req, &reply); + } +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.h b/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.h new file mode 100755 index 0000000000000000000000000000000000000000..6e075f12c9642b3c8ff7d90f3baaec30451eaf10 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.h @@ -0,0 +1,1135 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef MBX_FW_CMD_H +#define MBX_FW_CMD_H + +#include +#include +#include + +#ifndef _PACKED_ALIGN4 +#define _PACKED_ALIGN4 __attribute__((packed, aligned(4))) +#endif + +enum GENERIC_CMD { + /* generat */ + GET_VERSION = 0x0001, + READ_REG = 0xFF03, + WRITE_REG = 0xFF04, + MODIFY_REG = 0xFF07, + /* virtualization */ + IFUP_DOWN = 0x0800, + SEND_TO_PF = 0x0801, + SEND_TO_VF = 0x0802, + DRIVER_INSMOD = 0x0803, + SYSTEM_SUSPUSE = 0x0804, + FORCE_LINK_ON_CLOSE = 0x0805, + /* link configuration admin commands */ + GET_PHY_ABALITY = 0x0601, + GET_MAC_ADDRESS = 0x0602, + RESET_PHY = 0x0603, + LED_SET = 0x0604, + GET_LINK_STATUS = 0x0607, + LINK_STATUS_EVENT = 0x0608, + SET_LANE_FUN = 0x0609, + GET_LANE_STATUS = 0x0610, + SFP_SPEED_CHANGED_EVENT = 0x0611, + SET_EVENT_MASK = 0x0613, + SET_LOOPBACK_MODE = 0x0618, + SET_PHY_REG = 0x0628, + GET_PHY_REG = 0x0629, + PHY_LINK_SET = 0x0630, + GET_PHY_STATISTICS = 0x0631, + PHY_PAUSE_SET = 0x0632, + /*sfp-module*/ + SFP_MODULE_READ = 0x0900, + SFP_MODULE_WRITE = 0x0901, + /* fw update */ + FW_UPDATE = 0x0700, + FW_MAINTAIN = 0x0701, + WOL_EN = 0x0910, + GET_DUMP = 0x0a00, + SET_DUMP = 0x0a10, + GET_TEMP = 0x0a11, + SET_WOL = 0x0a12, + LLDP_TX_CTL = 0x0a13, + SET_DDR_CSL = 0xFF11, +}; + +enum link_event_mask { + EVT_LINK_UP = 1, + EVT_NO_MEDIA = 2, + EVT_LINK_FAULT = 3, + EVT_PHY_TEMP_ALARM = 4, + EVT_EXCESSIVE_ERRORS = 5, + EVT_SIGNAL_DETECT = 6, + EVT_AUTO_NEGOTIATION_DONE = 7, + EVT_MODULE_QUALIFICATION_FAILED = 8, + EVT_PORT_TX_SUSPEND = 9, +}; + +enum pma_type { + PHY_TYPE_NONE = 0, + PHY_TYPE_1G_BASE_KX, + PHY_TYPE_SGMII, + PHY_TYPE_10G_BASE_KR, + PHY_TYPE_25G_BASE_KR, + PHY_TYPE_40G_BASE_KR4, + PHY_TYPE_10G_BASE_SR, + PHY_TYPE_40G_BASE_SR4, + PHY_TYPE_40G_BASE_CR4, + PHY_TYPE_40G_BASE_LR4, + PHY_TYPE_10G_BASE_LR, + PHY_TYPE_10G_BASE_ER, + PHY_TYPE_10G_TP +}; + +#define PHY_C45 (BIT(30)) +#define PHY_MMD(i) (i << 16) +#define PHY_MMD_PMAPMD PHY_MMD(1) +#define PHY_MMD_AN PHY_MMD(7) +#define PHY_MMD_VEND2 PHY_MMD(31) +#define PHY_826x_MDIX (PHY_C45 | PHY_MMD_VEND2 | 0xa430) +#define PHY_826x_SPEED (PHY_C45 | PHY_MMD_PMAPMD | 0) +#define PHY_826x_DUPLEX (PHY_C45 | PHY_MMD_VEND2 | 0xa44) +#define PHY_826x_AN (PHY_C45 | PHY_MMD_AN | 0) +#define PHY_826x_ADV (PHY_C45 | PHY_MMD_AN | 16) +#define PHY_826x_GBASE_ADV (PHY_C45 | PHY_MMD_AN | 0x20) +#define PHY_826x_GBASE_ADV_2 (PHY_C45 | PHY_MMD_VEND2 | 0xa412) +struct phy_abilities { + unsigned char link_stat; + unsigned char lane_mask; + int speed; + short phy_type; + short nic_mode; + short pfnum; + unsigned int fw_version; + unsigned int axi_mhz; + union { + unsigned char port_id[4]; + unsigned int port_ids; + }; + unsigned int bd_uid; + int phy_id; + int wol_status; + + union { + unsigned int ext_ablity; + struct { + unsigned int valid : 1; /* 0 */ + unsigned int wol_en : 1; /* 1 */ + unsigned int pci_preset_runtime_en : 1; /* 2 */ + unsigned int smbus_en : 1; /* 3 */ + unsigned int ncsi_en : 1; /* 4 */ + unsigned int rpu_en : 1; /* 5 */ + unsigned int v2 : 1; /* 6 */ + unsigned int pxe_en : 1; /* 7 */ + unsigned int mctp_en : 1; /* 8 */ + unsigned int yt8614 : 1; /* 9 */ + unsigned int pci_ext_reset : 1; /* 10 */ + unsigned int rpu_availble : 1; /* 11 */ + unsigned int fw_lldp_ablity : 1; /* 12 */ + unsigned int lldp_enabled : 1; /* 13 */ + unsigned int only_1g : 1; /* 14 */ + unsigned int force_down_en : 4; /* 15-18 */ + unsigned int force_link_supported : 1; /* 19 */ + unsigned int ports_is_sgmii_valid : 1; /* [20] */ + unsigned int lane0_is_sgmii : 1; /* [21] */ + unsigned int lane1_is_sgmii : 1; /* [22] */ + unsigned int lane2_is_sgmii : 1; /* [23] */ + unsigned int lane3_is_sgmii : 1; /* [24] */ + } e; + }; + +} _PACKED_ALIGN4; + +enum LOOPBACK_LEVEL { + LOOPBACK_DISABLE = 0, + LOOPBACK_MAC = 1, + LOOPBACK_PCS = 5, + LOOPBACK_EXTERNAL = 6, +}; +enum LOOPBACK_TYPE { + /* Tx->Rx */ + LOOPBACK_TYPE_LOCAL = 0x0, +}; + +enum LOOPBACK_FORCE_SPEED { + LOOPBACK_FORCE_SPEED_NONE = 0x0, + LOOPBACK_FORCE_SPEED_1GBS = 0x1, + LOOPBACK_FORCE_SPEED_10GBS = 0x2, + LOOPBACK_FORCE_SPEED_40_25GBS = 0x3, +}; + +enum PHY_INTERFACE { + PHY_INTERNAL_PHY = 0, + PHY_EXTERNAL_PHY_MDIO = 1, +}; + +/* Table 3-54. Get link status response (opcode: 0x0607) */ +struct link_stat_data { + char phy_type; + unsigned char speed; +#define LNK_STAT_SPEED_UNKNOWN 0 +#define LNK_STAT_SPEED_10 1 +#define LNK_STAT_SPEED_100 2 +#define LNK_STAT_SPEED_1000 3 +#define LNK_STAT_SPEED_10000 4 +#define LNK_STAT_SPEED_25000 5 +#define LNK_STAT_SPEED_40000 6 + /* 2 */ + char link_stat : 1; +#define LINK_UP 1 +#define LINK_DOWN 0 + char link_fault : 4; +#define LINK_LINK_FAULT BIT(0) +#define LINK_TX_FAULT BIT(1) +#define LINK_RX_FAULT BIT(2) +#define LINK_REMOTE_FAULT BIT(3) + char extern_link_stat : 1; + char media_available : 1; + char rev1 : 1; + /* 3:ignore */ + char an_completed : 1; + char lp_an_ablity : 1; + char parallel_detection_fault : 1; + char fec_enabled : 1; + char low_power_state : 1; + char link_pause_status : 2; + char qualified_odule : 1; + /* 4 */ + char phy_temp_alarm : 1; + char excessive_link_errors : 1; + char port_tx_suspended : 2; + char force_40G_enabled : 1; + char external_25G_phy_err_code : 3; +#define EXTERNAL_25G_PHY_NOT_PRESENT 1 +#define EXTERNAL_25G_PHY_NVM_CRC_ERR 2 +#define EXTERNAL_25G_PHY_MDIO_ACCESS_FAILED 6 +#define EXTERNAL_25G_PHY_INIT_SUCCED 7 + /* 5 */ + char loopback_enabled_status : 4; +#define LOOPBACK_DISABLE 0x0 +#define LOOPBACK_MAC 0x1 +#define LOOPBACK_SERDES 0x2 +#define LOOPBACK_PHY_INTERNAL 0x3 +#define LOOPBACK_PHY_EXTERNAL 0x4 + char loopback_type_status : 1; +#define LOCAL_LOOPBACK 0 /* tx->rx */ +#define FAR_END_LOOPBACK 0 /* rx->Tx */ + char rev3 : 1; + char external_dev_power_ability : 2; + /* 6-7 */ + short max_frame_sz; + /* 8 */ + char _25gb_kr_fec_enabled : 1; + char _25gb_rs_fec_enabled : 1; + char crc_enabled : 1; + char rev4 : 5; + /* 9 */ + int link_type; /* same as Phy type */ + char link_type_ext; +} _PACKED_ALIGN4; + +struct port_stat { + u8 phyid; + u8 duplex : 1; + u8 autoneg : 1; + u8 fec : 1; + u8 rev : 1; + u8 link_traing : 1; + u8 is_sgmii : 1; + u8 lldp_status : 1; + u32 speed; +} __attribute__((packed)); + +struct lane_stat_data { + u8 nr_lane; + u8 pci_gen : 4; + u8 pci_lanes : 4; + u8 pma_type; + u8 phy_type; + u16 linkup : 1; + u16 duplex : 1; + u16 autoneg : 1; + u16 fec : 1; + u16 an : 1; + u16 link_traing : 1; + u16 media_available : 1; + u16 is_sgmii : 1; // + u16 link_fault : 4; +#define LINK_LINK_FAULT BIT(0) +#define LINK_TX_FAULT BIT(1) +#define LINK_RX_FAULT BIT(2) +#define LINK_REMOTE_FAULT BIT(3) + u16 is_backplane : 1; + u16 tp_mdx : 2; + union { + u8 phy_addr; + struct { + u8 mod_abs : 1; + u8 fault : 1; + u8 tx_dis : 1; + u8 los : 1; + } sfp; + }; + u8 sfp_connector; + u32 speed; + u32 si_main; + u32 si_pre; + u32 si_post; + u32 si_tx_boost; + u32 supported_link; + u32 phy_id; + u32 advertised_link; +} __attribute__((packed)); + +struct yt_phy_statistics { + u32 pkg_ib_valid; /* rx crc good and length 64-1518 */ + u32 pkg_ib_os_good; /* rx crc good and length >1518 */ + u32 pkg_ib_us_good; /* rx crc good and length <64 */ + u16 pkg_ib_err; /* rx crc wrong and length 64-1518 */ + u16 pkg_ib_os_bad; /* rx crc wrong and length >1518 */ + u16 pkg_ib_frag; /* rx crc wrong and length <64 */ + u16 pkg_ib_nosfd; /* rx sfd missed */ + u32 pkg_ob_valid; /* tx crc good and length 64-1518 */ + u32 pkg_ob_os_good; /* tx crc good and length >1518 */ + u32 pkg_ob_us_good; /* tx crc good and length <64 */ + u16 pkg_ob_err; /* tx crc wrong and length 64-1518 */ + u16 pkg_ob_os_bad; /* tx crc wrong and length >1518 */ + u16 pkg_ob_frag; /* tx crc wrong and length <64 */ + u16 pkg_ob_nosfd; /* tx sfd missed */ +} __attribute__((packed)); + +struct phy_statistics { + union { + struct yt_phy_statistics yt; + }; +} __attribute__((packed)); +/* == flags == */ +#define FLAGS_DD BIT(0) /* driver clear 0, FW must set 1 */ +#define FLAGS_CMP BIT(1) /* driver clear 0, FW mucst set */ +#define FLAGS_ERR BIT(2) +/* driver clear 0, FW must set only if it reporting an error */ +#define FLAGS_LB BIT(9) +#define FLAGS_RD BIT(10) /* set if additional buffer has command parameters */ +#define FLAGS_BUF BIT(12) /* set 1 on indirect command */ +#define FLAGS_SI BIT(13) /* not irq when command complete */ +#define FLAGS_EI BIT(14) /* interrupt on error */ +#define FLAGS_FE BIT(15) /* flush erro */ + +#ifndef SHM_DATA_MAX_BYTES +#define SHM_DATA_MAX_BYTES (64 - 2 * 4) +#endif + +#define MBX_REQ_HDR_LEN 24 +#define MBX_REPLYHDR_LEN 16 +#define MBX_REQ_MAX_DATA_LEN (SHM_DATA_MAX_BYTES - MBX_REQ_HDR_LEN) +#define MBX_REPLY_MAX_DATA_LEN (SHM_DATA_MAX_BYTES - MBX_REPLYHDR_LEN) + +// TODO req is little endian. bigendian should be conserened + +struct mbx_fw_cmd_req { + unsigned short flags; /* 0-1 */ + unsigned short opcode; /* 2-3 enum LINK_ADM_CMD */ + unsigned short datalen; /* 4-5 */ + unsigned short ret_value; /* 6-7 */ + union { + struct { + unsigned int cookie_lo; /* 8-11 */ + unsigned int cookie_hi; /* 12-15 */ + }; + void *cookie; + }; + unsigned int reply_lo; /* 16-19 5dw */ + unsigned int reply_hi; /* 20-23 */ + /*=== data === 7dw [24-64] */ + union { + char data[0]; + + struct { + unsigned int addr; + unsigned int bytes; + } r_reg; + + struct { + unsigned int addr; + unsigned int bytes; + unsigned int data[4]; + } w_reg; + + struct { + unsigned int lanes; + } ptp; + + struct { + int lane; + int up; + } ifup; + + struct { + int nr_lane; +#define LLDP_TX_ALL_LANES 0xFF + int op; +#define LLDP_TX_SET 0x0 +#define LLDP_TX_GET 0x1 + int enable; + } lldp_tx; + + struct { + int lane; + int status; + } ifinsmod; + + struct { + int lane; + int status; + } ifsuspuse; + + struct { + int nr_lane; + int status; + } ifforce; + + struct { + int nr_lane; + } get_lane_st; + + struct { + int nr_lane; + int func; +#define LANE_FUN_AN 0 +#define LANE_FUN_LINK_TRAING 1 +#define LANE_FUN_FEC 2 +#define LANE_FUN_SI 3 +#define LANE_FUN_SFP_TX_DISABLE 4 +#define LANE_FUN_PCI_LANE 5 +#define LANE_FUN_PRBS 6 +#define LANE_FUN_SPEED_CHANGE 7 + + int value0; + int value1; + int value2; + int value3; + } set_lane_fun; + + struct { + int flag; + int nr_lane; + } set_dump; + + struct { + int lane; + int enable; + } wol; + + struct { + unsigned int bytes; + unsigned int nr_lane; + unsigned int bin_phy_lo; + unsigned int bin_phy_hi; + } get_dump; + + struct { + unsigned int nr_lane; + int value; +#define LED_IDENTIFY_INACTIVE 0 +#define LED_IDENTIFY_ACTIVE 1 +#define LED_IDENTIFY_ON 2 +#define LED_IDENTIFY_OFF 3 + } led_set; + + struct { + unsigned int addr; + unsigned int data; + unsigned int mask; + } modify_reg; + + struct { + unsigned int adv_speed_mask; + unsigned int autoneg; + unsigned int speed; + unsigned int duplex; + int nr_lane; + unsigned int tp_mdix_ctrl; + } phy_link_set; + + struct { + unsigned int pause_mode; + int nr_lane; + } phy_pause_set; + + struct { + unsigned int nr_lane; + unsigned int sfp_adr; + unsigned int reg; + unsigned int cnt; + } sfp_read; + + struct { + unsigned int nr_lane; + unsigned int sfp_adr; + unsigned int reg; + unsigned int val; + } sfp_write; + + struct { + unsigned int nr_lane; /* 0-3 */ + } get_linkstat; + struct { + unsigned short changed_lanes; + unsigned short lane_status; + unsigned int port_st_magic; +#define SPEED_VALID_MAGIC 0xa4a6a8a9 + struct port_stat st[4]; + } link_stat; + + struct { + unsigned short enable_stat; + unsigned short event_mask; + } stat_event_mask; + + struct { /* set loopback */ + unsigned char loopback_level; + unsigned char loopback_type; + unsigned char loopback_force_speed; + + char loopback_force_speed_enable : 1; + } loopback; + + struct { + int cmd; + int arg0; + int req_bytes; + int reply_bytes; + int ddr_lo; + int ddr_hi; + } maintain; + + struct { /* set phy register */ + char phy_interface; + union { + char page_num; + char external_phy_addr; + }; + int phy_reg_addr; + int phy_w_data; + int reg_addr; + int w_data; + /* 1 = ignore page_num, use last QSFP */ + char recall_qsfp_page : 1; + /* page value */ + /* 0 = use page_num for QSFP */ + char nr_lane; + } set_phy_reg; + + struct { + int enable; + int ddr_phy_hi; + int ddr_phy_lo; + int bytes; + } ddr_csl; + + struct { + } get_phy_ablity; + + struct { + int lane_mask; + int pfvf_num; + } get_mac_addr; + + struct { + char phy_interface; + union { + char page_num; + char external_phy_addr; + }; + int phy_reg_addr; + char nr_lane; + } get_phy_reg; + + struct { + unsigned int nr_lane; + } phy_statistics; + + struct { + char paration; + unsigned int bytes; + unsigned int bin_phy_lo; + unsigned int bin_phy_hi; + } fw_update; + }; +} _PACKED_ALIGN4; + +/* firmware -> driver */ +struct mbx_fw_cmd_reply { + unsigned short flags; + /* fw must set: DD, CMP, Error(if error), copy value */ + /* from command: LB,RD,VFC,BUF,SI,EI,FE */ + unsigned short opcode; /* 2-3: copy from req */ + unsigned short error_code; /* 4-5: 0 if no error */ + unsigned short datalen; + /* 6-7: */ + union { + struct { + unsigned int cookie_lo; /* 8-11: */ + unsigned int cookie_hi; /* 12-15: */ + }; + void *cookie; + }; + /* ===== data ==== [16-64] */ + union { + char data[0]; + + struct version { + unsigned int major; + unsigned int sub; + unsigned int modify; + } version; + + struct { + unsigned int value[4]; + } r_reg; + + struct { + unsigned int new_value; + } modify_reg; + + struct get_temp { + int temp; + int volatage; + } get_temp; + + struct lldp_stat { + int enable_stat; + } lldp; + + struct { +#define MBX_SFP_READ_MAX_CNT 32 + char value[MBX_SFP_READ_MAX_CNT]; + } sfp_read; + + struct mac_addr { + int lanes; + struct _addr { + /* for macaddr:01:02:03:04:05:06 + * mac-hi=0x01020304 mac-lo=0x05060000 + */ + unsigned char mac[8]; + } addrs[4]; + u32 pcode; + } mac_addr; + + struct get_dump_reply { + int flags; + int version; + int bytes; + int data[4]; + } get_dump; + + struct lane_stat_data lanestat; + struct link_stat_data linkstat; + struct phy_abilities phy_abilities; + struct phy_statistics phy_statistics; + }; +} _PACKED_ALIGN4; + +static inline void build_lldp_ctrl_set(struct mbx_fw_cmd_req *req, int nr_lane, + int enable) +{ + req->flags = 0; + req->opcode = LLDP_TX_CTL; + req->datalen = sizeof(req->lldp_tx); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->lldp_tx.op = LLDP_TX_SET; + req->lldp_tx.nr_lane = nr_lane; + req->lldp_tx.enable = enable; +} + +static inline void build_lldp_ctrl_get(struct mbx_fw_cmd_req *req, int nr_lane, + void *cookie) +{ + req->flags = 0; + req->opcode = LLDP_TX_CTL; + req->datalen = sizeof(req->lldp_tx); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->lldp_tx.op = LLDP_TX_GET; + req->lldp_tx.nr_lane = nr_lane; +} + +static inline void build_maintain_req(struct mbx_fw_cmd_req *req, void *cookie, + int cmd, int arg0, int req_bytes, + int reply_bytes, u32 dma_phy_lo, + u32 dma_phy_hi) +{ + req->flags = 0; + req->opcode = FW_MAINTAIN; + req->datalen = sizeof(req->maintain); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->maintain.cmd = cmd; + req->maintain.arg0 = arg0; + req->maintain.req_bytes = req_bytes; + req->maintain.reply_bytes = reply_bytes; + req->maintain.ddr_lo = dma_phy_lo; + req->maintain.ddr_hi = dma_phy_hi; +} + +static inline void build_fw_update_req(struct mbx_fw_cmd_req *req, void *cookie, + int partition, u32 fw_bin_phy_lo, + u32 fw_bin_phy_hi, int fw_bytes) +{ + req->flags = 0; + req->opcode = FW_UPDATE; + req->datalen = sizeof(req->fw_update); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->fw_update.paration = partition; + req->fw_update.bytes = fw_bytes; + req->fw_update.bin_phy_lo = fw_bin_phy_lo; + req->fw_update.bin_phy_hi = fw_bin_phy_hi; +} + +static inline void build_reset_phy_req(struct mbx_fw_cmd_req *req, void *cookie) +{ + req->flags = 0; + req->opcode = RESET_PHY; + req->datalen = 0; + req->reply_lo = 0; + req->reply_hi = 0; + req->cookie = cookie; +} + +static inline void build_phy_abalities_req(struct mbx_fw_cmd_req *req, + void *cookie) +{ + req->flags = 0; + req->opcode = GET_PHY_ABALITY; + req->datalen = 0; + req->reply_lo = 0; + req->reply_hi = 0; + req->cookie = cookie; +} + +static inline void build_get_macaddress_req(struct mbx_fw_cmd_req *req, + int lane_mask, int pfvfnum, + void *cookie) +{ + req->flags = 0; + req->opcode = GET_MAC_ADDRESS; + req->datalen = sizeof(req->get_mac_addr); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->get_mac_addr.lane_mask = lane_mask; + req->get_mac_addr.pfvf_num = pfvfnum; +} + +static inline void build_version_req(struct mbx_fw_cmd_req *req, void *cookie) +{ + req->flags = 0; + req->opcode = GET_VERSION; + req->reply_lo = 0; + req->reply_hi = 0; + req->datalen = 0; + req->cookie = cookie; +} + +/* 7.10.11.8 Read egister admin command */ +static inline void build_readreg_req(struct mbx_fw_cmd_req *req, int reg_addr, + void *cookie) +{ + req->flags = 0; + req->opcode = READ_REG; + req->datalen = sizeof(req->r_reg); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->r_reg.addr = reg_addr & ~(3); + req->r_reg.bytes = 4; +} + +static inline void mbx_fw_req_set_reply(struct mbx_fw_cmd_req *req, + dma_addr_t reply) +{ + u64 address = reply; + + req->reply_hi = (address >> 32); + req->reply_lo = (address) & 0xffffffff; +} + +/* 7.10.11.9 Write egister admin command */ +static inline void build_writereg_req(struct mbx_fw_cmd_req *req, void *cookie, + int reg_addr, int bytes, int value[4]) +{ + int i; + + req->flags = 0; + req->opcode = WRITE_REG; + req->datalen = sizeof(req->w_reg); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->w_reg.addr = reg_addr & ~3; + req->w_reg.bytes = bytes; + for (i = 0; i < bytes / 4; i++) + req->w_reg.data[i] = value[i]; +} + +/* 7.10.11.10 modify egister admin command */ +static inline void build_modifyreg_req(struct mbx_fw_cmd_req *req, void *cookie, + int reg_addr, int value, + unsigned int mask) +{ + req->flags = 0; + req->opcode = MODIFY_REG; + req->datalen = sizeof(req->modify_reg); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->modify_reg.addr = reg_addr; + req->modify_reg.data = value; + req->modify_reg.mask = mask; +} + +static inline void build_get_lane_status_req(struct mbx_fw_cmd_req *req, + int nr_lane, void *cookie) +{ + req->flags = 0; + req->opcode = GET_LANE_STATUS; + req->datalen = sizeof(req->get_lane_st); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->get_lane_st.nr_lane = nr_lane; +} + +static inline void build_get_link_status_req(struct mbx_fw_cmd_req *req, + int nr_lane, void *cookie) +{ + req->flags = 0; + req->opcode = GET_LINK_STATUS; + req->datalen = sizeof(req->get_linkstat); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->get_linkstat.nr_lane = nr_lane; +} + +static inline void build_get_temp(struct mbx_fw_cmd_req *req, void *cookie) +{ + req->flags = 0; + req->opcode = GET_TEMP; + req->datalen = 0; + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; +} +static inline void build_get_dump_req(struct mbx_fw_cmd_req *req, void *cookie, + int nr_lane, u32 fw_bin_phy_lo, + u32 fw_bin_phy_hi, int bytes) +{ + req->flags = 0; + req->opcode = GET_DUMP; + req->datalen = sizeof(req->get_dump); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->get_dump.bytes = bytes; + req->get_dump.nr_lane = nr_lane; + req->get_dump.bin_phy_lo = fw_bin_phy_lo; + req->get_dump.bin_phy_hi = fw_bin_phy_hi; +} + +static inline void build_set_dump(struct mbx_fw_cmd_req *req, int nr_lane, + int flag) +{ + req->flags = 0; + req->opcode = SET_DUMP; + req->datalen = sizeof(req->set_dump); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->set_dump.flag = flag; + req->set_dump.nr_lane = nr_lane; +} + +static inline void build_led_set(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int value, void *cookie) +{ + req->flags = 0; + req->opcode = LED_SET; + req->datalen = sizeof(req->led_set); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->led_set.nr_lane = nr_lane; + req->led_set.value = value; +} + +static inline void build_set_lane_fun(struct mbx_fw_cmd_req *req, int nr_lane, + int fun, int value0, int value1, + int value2, int value3) +{ + req->flags = 0; + req->opcode = SET_LANE_FUN; + req->datalen = sizeof(req->set_lane_fun); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->set_lane_fun.func = fun; + req->set_lane_fun.nr_lane = nr_lane; + req->set_lane_fun.value0 = value0; + req->set_lane_fun.value1 = value1; + req->set_lane_fun.value2 = value2; + req->set_lane_fun.value3 = value3; +} + +static inline void build_set_phy_reg(struct mbx_fw_cmd_req *req, void *cookie, + enum PHY_INTERFACE phy_inf, char nr_lane, + int reg, int w_data, int recall_qsfp_page) +{ + req->flags = 0; + req->opcode = SET_PHY_REG; + req->datalen = sizeof(req->set_phy_reg); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + + req->set_phy_reg.phy_interface = phy_inf; + req->set_phy_reg.nr_lane = nr_lane; + req->set_phy_reg.phy_reg_addr = reg; + req->set_phy_reg.phy_w_data = w_data; + + if (recall_qsfp_page) + req->set_phy_reg.recall_qsfp_page = 1; + else + req->set_phy_reg.recall_qsfp_page = 0; +} + +static inline void build_get_phy_reg(struct mbx_fw_cmd_req *req, void *cookie, + enum PHY_INTERFACE phy_inf, char nr_lane, + int reg) +{ + req->flags = 0; + req->opcode = GET_PHY_REG; + req->datalen = sizeof(req->get_phy_reg); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + + req->get_phy_reg.phy_interface = phy_inf; + + req->get_phy_reg.nr_lane = nr_lane; + req->get_phy_reg.phy_reg_addr = reg; +} + +static inline void build_phy_pause_set(struct mbx_fw_cmd_req *req, + int pause_mode, int nr_lane) +{ + req->flags = 0; + req->opcode = PHY_PAUSE_SET; + req->datalen = sizeof(req->phy_pause_set); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->phy_pause_set.nr_lane = nr_lane; + req->phy_pause_set.pause_mode = pause_mode; +} + +static inline void build_phy_link_set(struct mbx_fw_cmd_req *req, + unsigned int adv, int nr_lane, + unsigned int autoneg, unsigned int speed, + unsigned int duplex, + unsigned int tp_mdix_ctrl) +{ + req->flags = 0; + req->opcode = PHY_LINK_SET; + req->datalen = sizeof(req->phy_link_set); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->phy_link_set.nr_lane = nr_lane; + req->phy_link_set.adv_speed_mask = adv; + req->phy_link_set.autoneg = autoneg; + req->phy_link_set.speed = speed; + req->phy_link_set.duplex = duplex; + req->phy_link_set.tp_mdix_ctrl = tp_mdix_ctrl; +} + +static inline void build_ifup_down(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int up) +{ + req->flags = 0; + req->opcode = IFUP_DOWN; + req->datalen = sizeof(req->ifup); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->ifup.lane = nr_lane; + req->ifup.up = up; +} + +static inline void build_ifinsmod(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int status) +{ + req->flags = 0; + req->opcode = DRIVER_INSMOD; + req->datalen = sizeof(req->ifinsmod); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->ifinsmod.lane = nr_lane; + req->ifinsmod.status = status; +} + +static inline void build_ifsuspuse(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int status) +{ + req->flags = 0; + req->opcode = SYSTEM_SUSPUSE; + req->datalen = sizeof(req->ifsuspuse); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->ifinsmod.lane = nr_lane; + req->ifinsmod.status = status; +} + +static inline void build_ifforce(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int status) +{ + req->flags = 0; + req->opcode = FORCE_LINK_ON_CLOSE; + req->datalen = sizeof(req->ifforce); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->ifforce.nr_lane = nr_lane; + req->ifforce.status = status; +} + +static inline void build_mbx_sfp_read(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int sfp_addr, + int reg, int cnt, void *cookie) +{ + req->flags = 0; + req->opcode = SFP_MODULE_READ; + req->datalen = sizeof(req->sfp_read); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->sfp_read.nr_lane = nr_lane; + req->sfp_read.sfp_adr = sfp_addr; + req->sfp_read.reg = reg; + ; + req->sfp_read.cnt = cnt; +} + +static inline void build_mbx_sfp_write(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int sfp_addr, + int reg, int v) +{ + req->flags = 0; + req->opcode = SFP_MODULE_WRITE; + req->datalen = sizeof(req->sfp_write); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->sfp_write.nr_lane = nr_lane; + req->sfp_write.sfp_adr = sfp_addr; + req->sfp_write.reg = reg; + req->sfp_write.val = v; +} + +static inline void build_mbx_wol_set(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, u32 mode) +{ + req->flags = 0; + req->opcode = SET_WOL; + req->datalen = sizeof(req->sfp_write); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->wol.lane = nr_lane; + req->wol.enable = mode; +} + +/* enum link_event_mask or */ +static inline void build_link_set_event_mask(struct mbx_fw_cmd_req *req, + unsigned short event_mask, + unsigned short enable, + void *cookie) +{ + req->flags = 0; + req->opcode = SET_EVENT_MASK; + req->datalen = sizeof(req->stat_event_mask); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->stat_event_mask.event_mask = event_mask; + req->stat_event_mask.enable_stat = enable; +} + +static inline void +build_link_set_loopback_req(struct mbx_fw_cmd_req *req, void *cookie, + enum LOOPBACK_LEVEL level, + enum LOOPBACK_FORCE_SPEED force_speed) +{ + req->flags = 0; + req->opcode = SET_LOOPBACK_MODE; + req->datalen = sizeof(req->loopback); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + + req->loopback.loopback_level = level; + req->loopback.loopback_type = LOOPBACK_TYPE_LOCAL; + if (force_speed != LOOPBACK_FORCE_SPEED_NONE) { + req->loopback.loopback_force_speed = force_speed; + req->loopback.loopback_force_speed_enable = 1; + } +} + +static inline void build_ddr_csl(struct mbx_fw_cmd_req *req, void *cookie, + bool enable, dma_addr_t dma_phy, int bytes) +{ + req->flags = 0; + req->opcode = SET_DDR_CSL; + req->datalen = sizeof(req->ddr_csl); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + + req->ddr_csl.enable = enable; + + if (enable) { + req->ddr_csl.bytes = bytes; + req->ddr_csl.ddr_phy_hi = (dma_phy >> 32); + req->ddr_csl.ddr_phy_lo = dma_phy & 0xffffffff; + } else { + req->ddr_csl.bytes = 0; + } +} + +/* =========== errcode======= */ +enum MBX_ERR { + MBX_OK = 0, + MBX_ERR_NO_PERM, + MBX_ERR_INVAL_OPCODE, + MBX_ERR_INVALID_PARAM, + MBX_ERR_INVALID_ADDR, + MBX_ERR_INVALID_LEN, + MBX_ERR_NODEV, + MBX_ERR_IO, +}; + +int rnp_fw_get_capability(struct rnp_hw *hw, struct phy_abilities *abil); +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mpe.c b/drivers/net/ethernet/mucse/rnp/rnp_mpe.c new file mode 100755 index 0000000000000000000000000000000000000000..7b8f5c4efee6dd8a1c78384a85daf64755fd75c7 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_mpe.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include + +#include "rnp_common.h" +#include "rnp_mbx.h" +#include "rnp_mpe.h" +#define MPE_FW_BIN "n10c/n10-mpe.bin" +#define MPE_FW_DATA "n10c/n10-mpe-data.bin" +#define MPE_RPU_BIN "n10c/n10-rpu.bin" + +extern unsigned int mpe_src_port; +extern unsigned int mpe_pkt_version; +#define CFG_RPU_OFFSET 0x100000 +/* 4010_0000 broadcast addr */ +#define START_MPE_REG 0x00198700 +/* 4019_8700 start all mpe */ + +/* RV_CORE_STATUS: 4000_6000 */ +#define RV_CORE0_WORING_REG 0x6000 +#define RPU_ID 0x6060 +/* read-only rpu id */ + +/* RPU_REG */ +#define RV_BROADCAST_START_REG (0x106000) +#define RPU_DMA_START_REG (0x110000) +#define RPU_ENDIAN_REG (0x110010) +#define N10_START_REG (0x106000) + +/* MPE0_ICCM: 4020_0000H */ +#define CFG_MPE_ICCM(nr) (0x200000 + (nr) * 0x80000) +#define CFG_MPE_DCCM(nr) (0x220000 + (nr) * 0x80000) + +#define RPU_CM3_BASE 0x40000000 +#define RPU_SDRAM_BASE (0x60000000) +#define SDRAM_DEFAULT_VAL (0x88481c00) + +#define iowrite32_arrary(rpubase, offset, array, size) \ + do { \ + int i; \ + for (i = 0; i < size; i++) { \ + rnp_wr_reg(((char *)(rpubase)) + (offset) + i * 4, \ + (array)[i]); \ + } \ + } while (0) + +static void rnp_reset_mpe_and_rpu(struct rnp_hw *hw) +{ +#define SYSCTL_CRG_CTRL12 0x30007030 +#define RPU_RESET_BIT 9 + + /* reset rpu/mpe/pub */ + cm3_reg_write32(hw, SYSCTL_CRG_CTRL12, BIT(RPU_RESET_BIT + 16) | 0); + smp_mb(); + mdelay(150); + cm3_reg_write32(hw, SYSCTL_CRG_CTRL12, + BIT(RPU_RESET_BIT + 16) | BIT(RPU_RESET_BIT)); + smp_mb(); + mdelay(100); +} + +static void rnp_start_rpu(char *rpu_base, int do_start) +{ + int mpe_start_v = 0xff, rpu_start_v = 0x1; + + if (do_start == 0) { + mpe_start_v = 0; + rpu_start_v = 0; + } + rnp_wr_reg(rpu_base + START_MPE_REG, mpe_start_v); + + /* start all rpu-rv-core */ + rnp_wr_reg(rpu_base + RV_BROADCAST_START_REG, rpu_start_v); + /* start rpu */ + rnp_wr_reg(rpu_base + RPU_DMA_START_REG, rpu_start_v); + + smp_mb(); +} + +/* + @rpu_base: mapped(0x4000_0000) + @mpe_bin : required + @mpe_data: optional + @rpu_bin : optional +*/ +static int +rnp_download_and_start_rpu(struct rnp_hw *hw, char *rpu_base, + const unsigned int *mpe_bin, const int mpe_bin_sz, + const unsigned int *mpe_data, const int mpe_data_sz, + const unsigned int *rpu_bin, const int rpu_sz) +{ + int nr = 0; + + rnp_info("MPE: rpu:%d mpe:%d mpe-data:%d. Downloading...\n", rpu_sz, + mpe_bin_sz, mpe_data_sz); + + rnp_reset_mpe_and_rpu(hw); + + /* download rpu firmeware */ + if (rpu_sz) { + iowrite32_arrary(rpu_base, CFG_RPU_OFFSET + 0x4000, rpu_bin, + rpu_sz / 4); + } + + /* download firmware to 4 mpe-core: mpe0,mpe1,mpe2,mpe3 */ + for (nr = 0; nr < 4; nr++) { + iowrite32_arrary(rpu_base, CFG_MPE_ICCM(nr), mpe_bin, + mpe_bin_sz / 4); + if (mpe_data_sz) + iowrite32_arrary(rpu_base, CFG_MPE_DCCM(nr), mpe_data, + mpe_data_sz / 4); + } + smp_mb(); + + /* Enable MPE */ + if (mpe_src_port != 0) { + printk("%s %d\n", __func__, __LINE__); + rnp_wr_reg(rpu_base + 0x100000, mpe_pkt_version); + rnp_wr_reg(rpu_base + 0x100004, mpe_src_port); + } + + /* start mpe */ + rnp_wr_reg(rpu_base + RPU_ENDIAN_REG, 0xf); + smp_mb(); + rnp_start_rpu(rpu_base, 1); + + return 0; +} + +/* + *load fw bin from: /lib/firmware/ directory + */ +static const struct firmware *rnp_load_fw(struct device *dev, + const char *fw_name) +{ + const struct firmware *fw; + int rc; + + rc = request_firmware(&fw, fw_name, dev); + if (rc != 0) { + // dev_warn( dev, "Faild to requesting firmware file: %s, %d\n", + // fw_name, rc); + return NULL; + } + + return fw; +} + +int rnp_rpu_mpe_start(struct rnp_adapter *adapter) +{ + const struct firmware *mpe_bin = NULL, *mpe_data = NULL, + *rpu_bin = NULL; + struct rnp_hw *hw = &adapter->hw; + int rpu_version, err = 0; + // u32 val = 0; + + rpu_version = cm3_reg_read32(hw, RPU_CM3_BASE + RPU_ID); + dev_info(&adapter->pdev->dev, "rpu_version:0x%x\n", rpu_version); + + if (rpu_version != 0x20201125) { + dev_info(&adapter->pdev->dev, "rpu not enabled!\n"); + return -1; + } + + dev_info(&adapter->pdev->dev, "rpu_addr=%p\n", hw->rpu_addr); + if (hw->rpu_addr == NULL) { + return -EINVAL; + } + + mpe_bin = rnp_load_fw(&adapter->pdev->dev, MPE_FW_BIN); + if (!mpe_bin) { + dev_warn(&adapter->pdev->dev, "can't load mpe fw:%s\n", + MPE_FW_BIN); + goto quit; + } + mpe_data = rnp_load_fw(&adapter->pdev->dev, MPE_FW_DATA); + if (!mpe_data) { + dev_warn(&adapter->pdev->dev, "no %s, ignored\n", MPE_FW_DATA); + } + rpu_bin = rnp_load_fw(&adapter->pdev->dev, MPE_RPU_BIN); + if (!rpu_bin) { + dev_warn(&adapter->pdev->dev, "no %s, ignored\n", MPE_RPU_BIN); + } + + err = rnp_download_and_start_rpu( + hw, hw->rpu_addr, (unsigned int *)mpe_bin->data, mpe_bin->size, + mpe_data ? (unsigned int *)mpe_data->data : NULL, + mpe_data ? mpe_data->size : 0, + rpu_bin ? (unsigned int *)rpu_bin->data : NULL, + rpu_bin ? rpu_bin->size : 0); + if (err != 0) { + dev_warn(&adapter->pdev->dev, "can't start mpe and rpu\n"); + goto quit; + } + + adapter->rpu_inited = 1; + +quit: + if (rpu_bin) { + release_firmware(rpu_bin); + } + if (mpe_data) + release_firmware(mpe_data); + if (mpe_bin) + release_firmware(mpe_bin); + return 0; +} + +void rnp_rpu_mpe_stop(struct rnp_adapter *adapter) +{ + if (adapter->rpu_inited && pci_channel_offline(adapter->pdev) == false) { + rnp_start_rpu(adapter->hw.rpu_addr, 0); + rnp_reset_mpe_and_rpu(&adapter->hw); + } + + adapter->rpu_inited = 0; +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mpe.h b/drivers/net/ethernet/mucse/rnp/rnp_mpe.h new file mode 100755 index 0000000000000000000000000000000000000000..d36fcb2a1b01323297f15f4ab493b95207be4fba --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_mpe.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef RNP_MPE_H +#define RNP_MPE_H + +#include "rnp.h" + +int rnp_rpu_mpe_start(struct rnp_adapter *adapter); +void rnp_rpu_mpe_stop(struct rnp_adapter *adapter); + +#endif //RNP_MPE_H diff --git a/drivers/net/ethernet/mucse/rnp/rnp_n10.c b/drivers/net/ethernet/mucse/rnp/rnp_n10.c new file mode 100755 index 0000000000000000000000000000000000000000..478ffc5a7132a4e2fffede2bc0ac42dae526e848 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_n10.c @@ -0,0 +1,5376 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include + +#include "rnp.h" +#include "rnp_phy.h" +#include "rnp_mbx.h" +#include "rnp_mbx_fw.h" +#include "rnp_pcs.h" +#include "rnp_ethtool.h" +#include "rnp_sriov.h" + +#define RNP_N400_MAX_VF 8 +#define RNP_N400_RSS_TBL_NUM 128 +#define RNP_N400_RSS_TC_TBL_NUM 8 +#define RNP_N400_MAX_TX_QUEUES 8 +#define RNP_N400_MAX_RX_QUEUES 8 +#define RNP_N400_RAR_NCSI_RAR_ENTRIES 0 + +#define RNP_N10_MAX_VF 64 +#define RNP_N10_RSS_TBL_NUM 128 +#define RNP_N10_RSS_TC_TBL_NUM 8 +#define RNP_N10_MAX_TX_QUEUES 128 +#define RNP_N10_MAX_RX_QUEUES 128 +#define RNP_N10_RAR_NCSI_RAR_ENTRIES 0 + +#if defined(NIC_VF_FXIED) || defined(VF_PROMISC_SUPPORT) +/* we use the last dmac to support vf promisc */ +#define RNP_N10_RAR_ENTRIES (127 - RNP_N10_RAR_NCSI_RAR_ENTRIES) +#else +#define RNP_N10_RAR_ENTRIES (128 - RNP_N10_RAR_NCSI_RAR_ENTRIES) +#endif + + +#define RNP_N10_MC_TBL_SIZE 128 +#define RNP_N10_VFT_TBL_SIZE 128 +#define RNP_N10_RX_PB_SIZE 512 +#ifndef RNP_N10_MSIX_VECTORS +#define RNP_N10_MSIX_VECTORS 64 +#endif +#define RNP_N400_MSIX_VECTORS 17 + +#define RNP10_MAX_LAYER2_FILTERS 16 +#define RNP10_MAX_TCAM_FILTERS 4096 +#define RNP10_MAX_TUPLE5_FILTERS 128 + + +/* setup queue speed limit to max_rate */ +static void rnp_dma_set_tx_maxrate_n10(struct rnp_dma_info *dma, u16 queue, + u32 max_rate) +{ +} + +/* setup mac with vf_num to veb table */ +static void rnp_dma_set_veb_mac_n10(struct rnp_dma_info *dma, u8 *mac, + u32 vfnum, u32 ring) +{ + u32 maclow, machi, ring_vfnum; + int port; + + maclow = (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5]; + machi = (mac[0] << 8) | mac[1]; + ring_vfnum = ring | ((0x80 | vfnum) << 8); + for (port = 0; port < 4; port++) { + dma_wr32(dma, RNP10_DMA_PORT_VBE_MAC_LO_TBL(port, vfnum), + maclow); + dma_wr32(dma, RNP10_DMA_PORT_VBE_MAC_HI_TBL(port, vfnum), + machi); + dma_wr32(dma, RNP10_DMA_PORT_VEB_VF_RING_TBL(port, vfnum), + ring_vfnum); + } +} + +/* setup vlan with vf_num to veb table */ +static void rnp_dma_set_veb_vlan_n10(struct rnp_dma_info *dma, u16 vlan, + u32 vfnum) +{ + int port; + + /* each vf can support only one vlan */ + for (port = 0; port < 4; port++) + dma_wr32(dma, RNP10_DMA_PORT_VEB_VID_TBL(port, vfnum), vlan); +} + +static void rnp_dma_clr_veb_all_n10(struct rnp_dma_info *dma) +{ + int port, i; + + for (port = 0; port < 4; port++) { + for (i = 0; i < VEB_TBL_CNTS; i++) { + dma_wr32(dma, RNP_DMA_PORT_VBE_MAC_LO_TBL(port, i), 0); + dma_wr32(dma, RNP_DMA_PORT_VBE_MAC_HI_TBL(port, i), 0); + dma_wr32(dma, RNP_DMA_PORT_VEB_VID_TBL(port, i), 0); + dma_wr32(dma, RNP_DMA_PORT_VEB_VF_RING_TBL(port, i), 0); + } + } +} + +static struct rnp_dma_operations dma_ops_n10 = { + .set_tx_maxrate = &rnp_dma_set_tx_maxrate_n10, + .set_veb_mac = &rnp_dma_set_veb_mac_n10, + .set_veb_vlan = &rnp_dma_set_veb_vlan_n10, + .clr_veb_all = &rnp_dma_clr_veb_all_n10, + +}; + +/** + * rnp_eth_set_rar_n10 - Set Rx address register + * @eth: pointer to eth structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + * @sriov_flag + * + * Puts an ethernet address into a receive address register. + **/ +static s32 rnp_eth_set_rar_n10(struct rnp_eth_info *eth, u32 index, u8 *addr, + bool enable_addr) +{ + u32 mcstctrl; + u32 rar_low, rar_high = 0; + u32 rar_entries = eth->num_rar_entries; + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + + /* Make sure we are using a valid rar index range */ + if (index >= (rar_entries + hw->ncsi_rar_entries)) { + rnp_err("RAR index %d is out of range.\n", index); + return RNP_ERR_INVALID_ARGUMENT; + } + + eth_dbg(eth, " RAR[%d] <= %pM. vmdq:%d enable:0x%x\n", index, addr); + + /* + * HW expects these in big endian so we reverse the byte + * order from network order (big endian) to little endian + */ + rar_low = ((u32)addr[5] | ((u32)addr[4] << 8) | ((u32)addr[3] << 16) | + ((u32)addr[2] << 24)); + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = eth_rd32(eth, RNP10_ETH_RAR_RH(index)); + rar_high &= ~(0x0000FFFF | RNP10_RAH_AV); + rar_high |= ((u32)addr[1] | ((u32)addr[0] << 8)); + + if (enable_addr) + rar_high |= RNP10_RAH_AV; + + eth_wr32(eth, RNP10_ETH_RAR_RL(index), rar_low); + eth_wr32(eth, RNP10_ETH_RAR_RH(index), rar_high); + + /* open unicast filter */ + /* we now not use unicast */ + /* but we must open this since dest-mac filter | unicast table */ + /* all packets up if close unicast table */ + mcstctrl = eth_rd32(eth, RNP10_ETH_DMAC_MCSTCTRL); + mcstctrl |= RNP10_MCSTCTRL_UNICASE_TBL_EN; + eth_wr32(eth, RNP10_ETH_DMAC_MCSTCTRL, mcstctrl); + + return 0; +} + +/** + * rnp_eth_clear_rar_n10 - Remove Rx address register + * @eth: pointer to eth structure + * @index: Receive address register to write + * + * Clears an ethernet address from a receive address register. + **/ +static s32 rnp_eth_clear_rar_n10(struct rnp_eth_info *eth, u32 index) +{ + u32 rar_high; + u32 rar_entries = eth->num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + eth_dbg(eth, "RAR index %d is out of range.\n", index); + return RNP_ERR_INVALID_ARGUMENT; + } + + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = eth_rd32(eth, RNP10_ETH_RAR_RH(index)); + rar_high &= ~(0x0000FFFF | RNP10_RAH_AV); + + eth_wr32(eth, RNP10_ETH_RAR_RL(index), 0); + eth_wr32(eth, RNP10_ETH_RAR_RH(index), rar_high); + /* clear VMDq pool/queue selection for this RAR */ + eth->ops.clear_vmdq(eth, index, RNP_CLEAR_VMDQ_ALL); + + return 0; +} + +/** + * rnp_eth_set_vmdq_n10 - Associate a VMDq pool index with a rx address + * @eth: pointer to eth struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq pool index + * only mac->vf + **/ +static s32 rnp_eth_set_vmdq_n10(struct rnp_eth_info *eth, u32 rar, u32 vmdq) +{ + u32 rar_entries = eth->num_rar_entries; + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + eth_dbg(eth, "RAR index %d is out of range.\n", rar); + return RNP_ERR_INVALID_ARGUMENT; + } + /* n400 should use like this + * ---------- + * vf0 | vf1 | vf2 + * n400 4 | 8 | 12 + * n10 2 | 4 | 6 + * n10(1)0 | 2 | 4 + * not good here + */ + if (hw->hw_type == rnp_hw_n400) + eth_wr32(eth, RNP10_VM_DMAC_MPSAR_RING(rar), vmdq * 2); + else + eth_wr32(eth, RNP10_VM_DMAC_MPSAR_RING(rar), vmdq); + + return 0; +} + +/** + * rnp_eth_clear_vmdq_n10 - Disassociate a VMDq pool index from a rx address + * @eth: pointer to eth struct + * @rar: receive address register index to disassociate + * @vmdq: VMDq pool index to remove from the rar + **/ +static s32 rnp_eth_clear_vmdq_n10(struct rnp_eth_info *eth, u32 rar, u32 vmdq) +{ + u32 rar_entries = eth->num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + eth_dbg(eth, "RAR index %d is out of range.\n", rar); + return RNP_ERR_INVALID_ARGUMENT; + } + + eth_wr32(eth, RNP10_VM_DMAC_MPSAR_RING(rar), 0); + + return 0; +} + +static s32 rnp10_mta_vector(struct rnp_eth_info *eth, u8 *mc_addr) +{ + u32 vector = 0; + + switch (eth->mc_filter_type) { + case 0: /* use bits [36:47] of the address */ + vector = ((mc_addr[4] << 8) | (((u16)mc_addr[5]))); + break; + case 1: /* use bits [35:46] of the address */ + vector = ((mc_addr[4] << 7) | (((u16)mc_addr[5]) >> 1)); + break; + case 2: /* use bits [34:45] of the address */ + vector = ((mc_addr[4] << 6) | (((u16)mc_addr[5]) >> 2)); + break; + case 3: /* use bits [32:43] of the address */ + vector = ((mc_addr[4] << 5) | (((u16)mc_addr[5]) >> 3)); + break; + default: /* Invalid mc_filter_type */ + hw_dbg(hw, "MC filter type param set incorrectly\n"); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +static void rnp10_set_mta(struct rnp_hw *hw, u8 *mc_addr) +{ + u32 vector; + u32 vector_bit; + u32 vector_reg; + struct rnp_eth_info *eth = &hw->eth; + + hw->addr_ctrl.mta_in_use++; + + vector = rnp10_mta_vector(eth, mc_addr); + + /* + * The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw_dbg(hw, "\t\t%pM: MTA-BIT:%4d, MTA_REG[%d][%d] <= 1\n", mc_addr, + vector, vector_reg, vector_bit); + eth->mta_shadow[vector_reg] |= (1 << vector_bit); +} + +static void rnp10_set_vf_mta(struct rnp_hw *hw, u16 vector) +{ + u32 vector_bit; + u32 vector_reg; + struct rnp_eth_info *eth = &hw->eth; + + hw->addr_ctrl.mta_in_use++; + + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw_dbg(hw, "\t\t vf M: MTA-BIT:%4d, MTA_REG[%d][%d] <= 1\n", vector, + vector_reg, vector_bit); + eth->mta_shadow[vector_reg] |= (1 << vector_bit); +} + +static u8 *rnp_addr_list_itr(struct rnp_hw __maybe_unused *hw, u8 **mc_addr_ptr) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + struct netdev_hw_addr *mc_ptr; +#else + struct dev_mc_list *mc_ptr; +#endif + u8 *addr = *mc_addr_ptr; + +#ifdef NETDEV_HW_ADDR_T_MULTICAST + mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); + if (mc_ptr->list.next) { + struct netdev_hw_addr *ha; + + ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list); + *mc_addr_ptr = ha->addr; + } +#else + mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]); + if (mc_ptr->next) + *mc_addr_ptr = mc_ptr->next->dmi_addr; +#endif + else + *mc_addr_ptr = NULL; + + return addr; +} + +/** + * rnp_update_mc_addr_list_n10 - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @netdev: pointer to net device structure + * + * The given list replaces any existing list. Clears the MC addrs from receive + * address registers and the multicast table. Uses unused receive address + * registers for the first multicast addresses, and hashes the rest into the + * multicast table. + **/ +static s32 rnp_eth_update_mc_addr_list_n10(struct rnp_eth_info *eth, + struct net_device *netdev, + bool sriov_on) +{ + struct rnp_hw *hw = (struct rnp_hw *)eth->back; +#ifdef NETDEV_HW_ADDR_T_MULTICAST + struct netdev_hw_addr *ha; +#endif + u32 i; + u32 v; + int addr_count = 0; + u8 *addr_list = NULL; + + /* + * Set the new number of MC addresses that we are being requested to + * use. + */ + hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); + hw->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + eth_dbg(eth, " Clearing MTA(multicast table)\n"); + + memset(ð->mta_shadow, 0, sizeof(eth->mta_shadow)); + + /* Update mta shadow */ + eth_dbg(eth, " Updating MTA..\n"); + + addr_count = netdev_mc_count(netdev); + +#ifdef NETDEV_HW_ADDR_T_MULTICAST + ha = list_first_entry(&netdev->mc.list, struct netdev_hw_addr, list); + addr_list = ha->addr; +#else + addr_list = netdev->mc_list->dmi_addr; +#endif + for (i = 0; i < addr_count; i++) { + eth_dbg(eth, " Adding the multicast addresses:\n"); + rnp10_set_mta(hw, rnp_addr_list_itr(hw, &addr_list)); + } + + if (hw->ncsi_en) { + eth->ops.ncsi_set_mc_mta(eth); + } + + if (sriov_on) { + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + if (!test_and_set_bit(__RNP_USE_VFINFI, &adapter->state)) { + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo) { + struct vf_data_storage *vfinfo = + &adapter->vfinfo[i]; + int j; + + for (j = 0; + j < vfinfo->num_vf_mc_hashes; j++) + rnp10_set_vf_mta( + hw, + vfinfo->vf_mc_hashes[j]); + } + } + clear_bit(__RNP_USE_VFINFI, &adapter->state); + } + } + + /* Enable mta */ + for (i = 0; i < hw->eth.mcft_size; i++) { + if (hw->addr_ctrl.mta_in_use) + eth_wr32(eth, RNP10_ETH_MULTICAST_HASH_TABLE(i), + eth->mta_shadow[i]); + } + + if (hw->addr_ctrl.mta_in_use > 0) { + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + if (!(adapter->flags & RNP_FLAG_SWITCH_LOOPBACK_EN)) { + v = eth_rd32(eth, RNP10_ETH_DMAC_MCSTCTRL); + eth_wr32(eth, RNP10_ETH_DMAC_MCSTCTRL, + v | RNP10_MCSTCTRL_MULTICASE_TBL_EN | + eth->mc_filter_type); + } + } + + eth_dbg(eth, " update MTA Done. mta_in_use:%d\n", + hw->addr_ctrl.mta_in_use); + return hw->addr_ctrl.mta_in_use; +} + +/* clean all mc addr */ +static void rnp_eth_clr_mc_addr_n10(struct rnp_eth_info *eth) +{ + int i; + + for (i = 0; i < eth->mcft_size; i++) + eth_wr32(eth, RNP10_ETH_MULTICAST_HASH_TABLE(i), 0); +} + +/** + * rnp_eth_update_rss_key_n10 - Remove Rx address register + * @eth: pointer to eth structure + * @sriov_flag sriov status + * + * update rss key to eth regs + **/ +static void rnp_eth_update_rss_key_n10(struct rnp_eth_info *eth, bool sriov_flag) +{ + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + int i; + u8 *key_temp; + int key_len = RNP_RSS_KEY_SIZE; + u8 *key = hw->rss_key; + u32 *value; + + u32 iov_en = (sriov_flag) ? RNP10_IOV_ENABLED : 0; + + key_temp = kmalloc(key_len, GFP_KERNEL); + /* reoder the key */ + for (i = 0; i < key_len; i++) + *(key_temp + key_len - i - 1) = *(key + i); + + value = (u32 *)key_temp; + for (i = 0; i < key_len; i = i + 4) + eth_wr32(eth, RNP10_ETH_RSS_KEY + i, *(value + i / 4)); + + kfree(key_temp); + + /* open rss now */ + eth_wr32(eth, RNP10_ETH_RSS_CONTROL, + RNP10_ETH_ENABLE_RSS_ONLY | iov_en); +} + +/** + * rnp_eth_update_rss_table_n10 - Remove Rx address register + * @eth: pointer to eth structure + * + * update rss table to eth regs + **/ +static void rnp_eth_update_rss_table_n10(struct rnp_eth_info *eth) +{ + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + u32 reta_entries = hw->rss_indir_tbl_num; + u32 tc_entries = hw->rss_tc_tbl_num; + int i; + + for (i = 0; i < tc_entries; i++) + eth_wr32(eth, RNP10_ETH_TC_IPH_OFFSET_TABLE(i), + hw->rss_tc_tbl[i]); + + for (i = 0; i < reta_entries; i++) + eth_wr32(eth, RNP10_ETH_RSS_INDIR_TBL(i), hw->rss_indir_tbl[i]); +} + +/** + * rnp_eth_set_vfta_n10 - Set VLAN filter table + * @eth: pointer to eth structure + * @vlan: VLAN id to write to VLAN filter + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +static s32 rnp_eth_set_vfta_n10(struct rnp_eth_info *eth, u32 vlan, bool vlan_on) +{ + s32 regindex; + u32 bitindex; + u32 vfta; + u32 targetbit; + bool vfta_changed = false; + + /* todo in vf mode vlvf regester can be set according to vind*/ + if (vlan > 4095) + return RNP_ERR_PARAM; + + regindex = (vlan >> 5) & 0x7F; + bitindex = vlan & 0x1F; + targetbit = (1 << bitindex); + vfta = eth_rd32(eth, RNP10_VFTA(regindex)); + + if (vlan_on) { + if (!(vfta & targetbit)) { + vfta |= targetbit; + vfta_changed = true; + } + } else { + if ((vfta & targetbit)) { + vfta &= ~targetbit; + vfta_changed = true; + } + } + + if (vfta_changed) + eth_wr32(eth, RNP10_VFTA(regindex), vfta); + return 0; +} + +static void rnp_eth_clr_vfta_n10(struct rnp_eth_info *eth) +{ + u32 offset; + + for (offset = 0; offset < eth->vft_size; offset++) + eth_wr32(eth, RNP10_VFTA(offset), 0); +} + +/** + * rnp_eth_set_vlan_filter_n10 - Set VLAN filter table + * @eth: pointer to eth structure + * @status: on |off + * Turn on/off VLAN filter table. + **/ +static void rnp_eth_set_vlan_filter_n10(struct rnp_eth_info *eth, bool status) +{ +#define ETH_VLAN_FILTER_BIT (30) + u32 value = eth_rd32(eth, RNP10_ETH_VLAN_FILTER_ENABLE); + + /* clear bit first */ + value &= (~(0x01 << ETH_VLAN_FILTER_BIT)); + if (status) + value |= (0x01 << ETH_VLAN_FILTER_BIT); + eth_wr32(eth, RNP10_ETH_VLAN_FILTER_ENABLE, value); +} + +static u16 rnp_layer2_pritologic_n10(u16 hw_id) +{ + return hw_id; +} + +static void rnp_eth_set_layer2_n10(struct rnp_eth_info *eth, + union rnp_atr_input *input, u16 pri_id, u8 queue, + bool prio_flag) +{ + u16 hw_id; + + hw_id = rnp_layer2_pritologic_n10(pri_id); + /* enable layer2 */ + eth_wr32(eth, RNP10_ETH_LAYER2_ETQF(hw_id), + (0x1 << 31) | (ntohs(input->layer2_formate.proto))); + + /* setup action */ + if (queue == RNP_FDIR_DROP_QUEUE) { + eth_wr32(eth, RNP10_ETH_LAYER2_ETQS(hw_id), (0x1 << 31)); + } else { + if (queue == ACTION_TO_MPE) + eth_wr32(eth, RNP10_ETH_LAYER2_ETQS(hw_id), + (0x1 << 29) | (MPE_PORT << 16)); + else + /* setup ring_number */ + eth_wr32(eth, RNP10_ETH_LAYER2_ETQS(hw_id), + (0x1 << 30) | (queue << 20)); + } +} + +static void rnp_eth_clr_layer2_n10(struct rnp_eth_info *eth, u16 pri_id) +{ + u16 hw_id; + + hw_id = rnp_layer2_pritologic_n10(pri_id); + eth_wr32(eth, RNP10_ETH_LAYER2_ETQF(hw_id), 0); +} + +static void rnp_eth_clr_all_layer2_n10(struct rnp_eth_info *eth) +{ + int i; +#define RNP10_MAX_LAYER2_FILTERS 16 + for (i = 0; i < RNP10_MAX_LAYER2_FILTERS; i++) + eth_wr32(eth, RNP10_ETH_LAYER2_ETQF(i), 0); +} + +static u16 rnp_tuple5_pritologic_n10(u16 hw_id) +{ + return hw_id; +} + +static u16 rnp_tuple5_pritologic_tcam_n10(u16 pri_id) +{ + int i; + int hw_id = 0; + int step = 32; + for (i = 0; i < pri_id; i++) { + hw_id += step; + if (hw_id > RNP10_MAX_TCAM_FILTERS) + hw_id = hw_id - RNP10_MAX_TCAM_FILTERS + 1; + } + + return hw_id; +} + +static void rnp_eth_set_tuple5_n10(struct rnp_eth_info *eth, + union rnp_atr_input *input, u16 pri_id, u8 queue, + bool prio_flag) +{ + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + +#define RNP10_SRC_IP_MASK BIT(0) +#define RNP10_DST_IP_MASK BIT(1) +#define RNP10_SRC_PORT_MASK BIT(2) +#define RNP10_DST_PORT_MASK BIT(3) +#define RNP10_L4_PROTO_MASK BIT(4) + + if (hw->fdir_mode != fdir_mode_tcam) { + u32 port = 0; + u8 mask_temp = 0; + u8 l4_proto_type = 0; + u16 hw_id; + + hw_id = rnp_tuple5_pritologic_n10(pri_id); + dbg("try to eable tuple 5 %x\n", hw_id); + if (input->formatted.src_ip[0] != 0) { + eth_wr32(eth, RNP10_ETH_TUPLE5_SAQF(hw_id), + htonl(input->formatted.src_ip[0])); + } else { + mask_temp |= RNP10_SRC_IP_MASK; + } + if (input->formatted.dst_ip[0] != 0) { + eth_wr32(eth, RNP10_ETH_TUPLE5_DAQF(hw_id), + htonl(input->formatted.dst_ip[0])); + } else + mask_temp |= RNP10_DST_IP_MASK; + if (input->formatted.src_port != 0) + port |= (htons(input->formatted.src_port)); + else + mask_temp |= RNP10_SRC_PORT_MASK; + if (input->formatted.dst_port != 0) + port |= (htons(input->formatted.dst_port) << 16); + else + mask_temp |= RNP10_DST_PORT_MASK; + + if (port != 0) + eth_wr32(eth, RNP10_ETH_TUPLE5_SDPQF(hw_id), port); + + switch (input->formatted.flow_type) { + case RNP_ATR_FLOW_TYPE_TCPV4: + l4_proto_type = IPPROTO_TCP; + break; + case RNP_ATR_FLOW_TYPE_UDPV4: + l4_proto_type = IPPROTO_UDP; + break; + case RNP_ATR_FLOW_TYPE_SCTPV4: + l4_proto_type = IPPROTO_SCTP; + break; + case RNP_ATR_FLOW_TYPE_IPV4: + l4_proto_type = input->formatted.inner_mac[0]; + break; + default: + l4_proto_type = 0; + } + + if (l4_proto_type == 0) + mask_temp |= RNP10_L4_PROTO_MASK; + + /* setup ftqf*/ + /* always set 0x3 */ + eth_wr32(eth, RNP10_ETH_TUPLE5_FTQF(hw_id), + (1 << 31) | (mask_temp << 25) | (l4_proto_type << 16) | + 0x3); + + /* setup action */ + if (queue == RNP_FDIR_DROP_QUEUE) { + eth_wr32(eth, RNP10_ETH_TUPLE5_POLICY(hw_id), + (0x1 << 31)); + } else { + if (queue == ACTION_TO_MPE) + eth_wr32(eth, RNP10_ETH_TUPLE5_POLICY(hw_id), + (0x1 << 29) | (MPE_PORT << 16)); + else + eth_wr32(eth, RNP10_ETH_TUPLE5_POLICY(hw_id), + ((0x1 << 30) | (queue << 20))); + } + + } else { + u32 port = 0; + u32 port_mask = 0; + u8 l4_proto_type = 0; + u8 l4_proto_mask = 0xff; + u32 action = 0; + u32 mark = 0; + u16 hw_id; + + hw_id = rnp_tuple5_pritologic_tcam_n10(pri_id); + eth_wr32(eth, RNP10_TCAM_MODE, 2); + dbg("try to eable tcam %d\n", hw_id); + if (input->formatted.src_ip[0] != 0) { + eth_wr32(eth, RNP10_TCAM_SAQF(hw_id), + htonl(input->formatted.src_ip[0])); + eth_wr32(eth, RNP10_TCAM_SAQF_MASK(hw_id), + htonl(input->formatted.src_ip_mask[0])); + + dbg("tcam src ip 0%x ---> 0x%x\n", + htonl(input->formatted.src_ip[0]), + RNP10_TCAM_SAQF(hw_id)); + dbg("tcam src ip mask 0%x ---> 0x%x\n", + htonl(input->formatted.src_ip_mask[0]), + RNP10_TCAM_SAQF_MASK(hw_id)); + } else { + eth_wr32(eth, RNP10_TCAM_SAQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_SAQF_MASK(hw_id), 0); + dbg("tcam src ip 0%x ---> 0x%x\n", 0, + RNP10_TCAM_SAQF(hw_id)); + dbg("tcam src ip mask 0%x ---> 0x%x\n", 0, + RNP10_TCAM_SAQF_MASK(hw_id)); + } + if (input->formatted.dst_ip[0] != 0) { + eth_wr32(eth, RNP10_TCAM_DAQF(hw_id), + htonl(input->formatted.dst_ip[0])); + eth_wr32(eth, RNP10_TCAM_DAQF_MASK(hw_id), + htonl(input->formatted.dst_ip_mask[0])); + dbg("tcam dst ip 0%x ---> 0x%x\n", + htonl(input->formatted.dst_ip[0]), + RNP10_TCAM_DAQF(hw_id)); + dbg("tcam dst ip mask 0%x ---> 0x%x\n", + htonl(input->formatted.dst_ip_mask[0]), + RNP10_TCAM_DAQF_MASK(hw_id)); + } else { + eth_wr32(eth, RNP10_TCAM_DAQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_DAQF_MASK(hw_id), 0); + dbg("tcam dst ip 0%x ---> 0x%x\n", 0, + RNP10_TCAM_DAQF(hw_id)); + dbg("tcam dst ip mask 0%x ---> 0x%x\n", 0, + RNP10_TCAM_DAQF_MASK(hw_id)); + } + if (input->formatted.src_port != 0) { + port |= (htons(input->formatted.src_port) << 16); + port_mask |= + (htons(input->formatted.src_port_mask) << 16); + + } + if (input->formatted.dst_port != 0) { + port |= (htons(input->formatted.dst_port)); + port_mask |= (htons(input->formatted.dst_port_mask)); + } + + /* setup src & dst port */ + if (port != 0) { + eth_wr32(eth, RNP10_TCAM_SDPQF(hw_id), port); + eth_wr32(eth, RNP10_TCAM_SDPQF_MASK(hw_id), port_mask); + + dbg("tcam port 0%x ---> 0x%x\n", port, + RNP10_TCAM_SDPQF(hw_id)); + dbg("tcam port mask 0%x ---> 0x%x\n", port_mask, + RNP10_TCAM_SDPQF_MASK(hw_id)); + } else { + eth_wr32(eth, RNP10_TCAM_SDPQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_SDPQF_MASK(hw_id), 0); + dbg("tcam port 0%x ---> 0x%x\n", port, + RNP10_TCAM_SDPQF(hw_id)); + dbg("tcam port mask 0%x ---> 0x%x\n", port_mask, + RNP10_TCAM_SDPQF_MASK(hw_id)); + } + + switch (input->formatted.flow_type) { + case RNP_ATR_FLOW_TYPE_TCPV4: + l4_proto_type = IPPROTO_TCP; + break; + case RNP_ATR_FLOW_TYPE_UDPV4: + l4_proto_type = IPPROTO_UDP; + break; + case RNP_ATR_FLOW_TYPE_SCTPV4: + l4_proto_type = IPPROTO_SCTP; + break; + case RNP_ATR_FLOW_TYPE_IPV4: + l4_proto_type = input->formatted.inner_mac[0]; + l4_proto_mask = input->formatted.inner_mac_mask[0]; + break; + default: + l4_proto_type = 0; + l4_proto_mask = 0; + } + + if (l4_proto_type != 0) { + action |= l4_proto_type; + mark |= l4_proto_mask; + } else { + } + + /* setup action */ + if (queue == RNP_FDIR_DROP_QUEUE) { + eth_wr32(eth, RNP10_TCAM_APQF(hw_id), + (0x1 << 31) | action); + eth_wr32(eth, RNP10_TCAM_APQF_MASK(hw_id), mark); + dbg("tcam action 0%x ---> 0x%x\n", (0x1 << 31) | action, + RNP10_TCAM_APQF(hw_id)); + dbg("tcam action mask 0%x ---> 0x%x\n", mark, + RNP10_TCAM_APQF_MASK(hw_id)); + } else { + if (queue == ACTION_TO_MPE) { + eth_wr32(eth, RNP10_TCAM_APQF(hw_id), + (0x1 << 29) | (MPE_PORT << 24) | + action); + } else { + eth_wr32(eth, RNP10_TCAM_APQF(hw_id), + ((0x1 << 30) | (queue << 16) | + action)); + } + eth_wr32(eth, RNP10_TCAM_APQF_MASK(hw_id), mark); + + dbg("tcam action 0%x ---> 0x%x\n", + (0x1 << 30) | (queue << 16) | action, + RNP10_TCAM_APQF(hw_id)); + dbg("tcam action mask 0%x ---> 0x%x\n", mark, + RNP10_TCAM_APQF_MASK(hw_id)); + } + eth_wr32(eth, RNP10_TCAM_MODE, 1); + } +} + +static void rnp_eth_clr_tuple5_n10(struct rnp_eth_info *eth, u16 pri_id) +{ + u16 hw_id; + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + + if (hw->fdir_mode != fdir_mode_tcam) { + hw_id = rnp_tuple5_pritologic_n10(pri_id); + eth_wr32(eth, RNP10_ETH_TUPLE5_FTQF(hw_id), 0); + } else { + hw_id = rnp_tuple5_pritologic_tcam_n10(pri_id); + dbg("disable tcam tuple5 %d\n", hw_id); + /* earase tcam */ + eth_wr32(eth, RNP10_TCAM_MODE, 2); + eth_wr32(eth, RNP10_TCAM_SAQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_SAQF_MASK(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_DAQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_DAQF_MASK(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_SDPQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_SDPQF_MASK(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_APQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_APQF_MASK(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_MODE, 1); + } +} + +static void rnp_eth_clr_all_tuple5_n10(struct rnp_eth_info *eth) +{ + int i; + + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + + if (hw->fdir_mode != fdir_mode_tcam) { + for (i = 0; i < RNP10_MAX_TUPLE5_FILTERS; i++) + eth_wr32(eth, RNP10_ETH_TUPLE5_FTQF(i), 0); + eth_wr32(eth, RNP10_ETH_TCAM_EN, 0); + } else { + /* todo earase tcm */ + eth_wr32(eth, RNP10_ETH_TCAM_EN, 1); + eth_wr32(eth, RNP10_TOP_ETH_TCAM_CONFIG_ENABLE, 1); + eth_wr32(eth, RNP10_TCAM_MODE, 2); + /* dont't open tcam cache */ + eth_wr32(eth, RNP10_TCAM_CACHE_ENABLE, 0); + + for (i = 0; i < RNP10_MAX_TCAM_FILTERS; i++) { + eth_wr32(eth, RNP10_TCAM_SDPQF(i), 0); + eth_wr32(eth, RNP10_TCAM_DAQF(i), 0); + eth_wr32(eth, RNP10_TCAM_SAQF(i), 0); + eth_wr32(eth, RNP10_TCAM_APQF(i), 0); + + eth_wr32(eth, RNP10_TCAM_SDPQF_MASK(i), 0); + eth_wr32(eth, RNP10_TCAM_DAQF_MASK(i), 0); + eth_wr32(eth, RNP10_TCAM_SAQF_MASK(i), 0); + eth_wr32(eth, RNP10_TCAM_APQF_MASK(i), 0); + } + eth_wr32(eth, RNP10_TCAM_MODE, 1); + } +} + +static void rnp_eth_set_tcp_sync_n10(struct rnp_eth_info *eth, + int queue, bool flag, + bool prio) +{ + if (flag) + eth_wr32(eth, RNP10_ETH_SYNQF, (0x1 << 30) | (queue << 20)); + else + eth_wr32(eth, RNP10_ETH_SYNQF, 0); +} + +static void rnp_eth_set_min_max_packets_n10(struct rnp_eth_info *eth, int min, + int max) +{ + eth_wr32(eth, RNP10_ETH_DEFAULT_RX_MIN_LEN, min); + eth_wr32(eth, RNP10_ETH_DEFAULT_RX_MAX_LEN, max); +} + +static void rnp_eth_set_vlan_strip_n10(struct rnp_eth_info *eth, u16 queue, + bool enable) +{ + u32 reg = RNP10_ETH_VLAN_VME_REG(queue / 32); + u32 offset = queue % 32; + u32 data = eth_rd32(eth, reg); + + if (enable == true) + data |= (1 << offset); + else + data &= ~(1 << offset); + + eth_wr32(eth, reg, data); +} + +static void rnp_eth_set_vxlan_port_n10(struct rnp_eth_info *eth, u32 port) +{ + eth_wr32(eth, RNP10_ETH_VXLAN_PORT, port); +} + +static void rnp_eth_set_vxlan_mode_n10(struct rnp_eth_info *eth, bool inner) +{ + if (inner) + eth_wr32(eth, RNP10_ETH_WRAP_FIELD_TYPE, 1); + else + eth_wr32(eth, RNP10_ETH_WRAP_FIELD_TYPE, 0); +} + +static void rnp_eth_set_rx_hash_n10(struct rnp_eth_info *eth, bool status, + bool sriov_flag) +{ + u32 iov_en = (sriov_flag) ? RNP10_IOV_ENABLED : 0; + + if (status) { + eth_wr32(eth, RNP10_ETH_RSS_CONTROL, + RNP10_ETH_ENABLE_RSS_ONLY | iov_en); + } else { + eth_wr32(eth, RNP10_ETH_RSS_CONTROL, + RNP10_ETH_DISABLE_RSS | iov_en); + } +} + +static s32 rnp_eth_set_fc_mode_n10(struct rnp_eth_info *eth) +{ + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + s32 ret_val = 0; + int i; + + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & rnp_fc_tx_pause) && + hw->fc.high_water[i]) { + if (!hw->fc.low_water[i] || + hw->fc.low_water[i] >= hw->fc.high_water[i]) { + hw_dbg(hw, + "Invalid water mark configuration\n"); + ret_val = RNP_ERR_INVALID_LINK_SETTINGS; + goto out; + } + } + } + + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & rnp_fc_tx_pause)) { + if (hw->fc.high_water[i]) { + eth_wr32(eth, RNP10_ETH_HIGH_WATER(i), + hw->fc.high_water[i]); + } + if (hw->fc.low_water[i]) { + eth_wr32(eth, RNP10_ETH_LOW_WATER(i), + hw->fc.low_water[i]); + } + } + } +out: + return ret_val; +} + +static void rnp_eth_set_vf_vlan_mode_n10(struct rnp_eth_info *eth, u16 vlan, + int vf, bool enable) +{ + struct rnp_hw *hw = (struct rnp_hw *)ð->back; + u32 value = vlan; + if (enable) + value |= BIT(31); + + eth_wr32(eth, RNP10_VLVF(vf), value); + + /* todo, should consider mutiple queue */ + if (hw->hw_type == rnp_hw_n400) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + eth_wr32(eth, RNP10_VLVF_TABLE(vf), (vf + 1) * 2); + else + eth_wr32(eth, RNP10_VLVF_TABLE(vf), vf * 2); + + } else { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + eth_wr32(eth, RNP10_VLVF_TABLE(vf), vf + 1); + else + eth_wr32(eth, RNP10_VLVF_TABLE(vf), vf); + } +} + +static int __get_ncsi_shm_info(struct rnp_hw *hw, + struct ncsi_shm_info *ncsi_shm) +{ + int i; + int *ptr = (int *)ncsi_shm; + int rbytes = round_up(sizeof(*ncsi_shm), 4); + + memset(ncsi_shm, 0, sizeof(*ncsi_shm)); + for (i = 0; i < (rbytes / 4); i++) + ptr[i] = rd32(hw, hw->ncsi_vf_cpu_shm_pf_base + 4 * i); + + return (ncsi_shm->valid & RNP_NCSI_SHM_VALID_MASK) == + RNP_NCSI_SHM_VALID; +} + +static void rnp_ncsi_set_uc_addr_n10(struct rnp_eth_info *eth) +{ + struct ncsi_shm_info ncsi_shm; + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + u8 mac[ETH_ALEN]; + + if (!hw->ncsi_en) + return; + if (__get_ncsi_shm_info(hw, &ncsi_shm)) { + if (ncsi_shm.valid & RNP_MC_VALID) { + mac[0] = ncsi_shm.uc.uc_addr_lo & 0xff; + mac[1] = (ncsi_shm.uc.uc_addr_lo >> 8) & 0xff; + mac[2] = (ncsi_shm.uc.uc_addr_lo >> 16) & 0xff; + mac[3] = (ncsi_shm.uc.uc_addr_lo >> 24) & 0xff; + mac[4] = ncsi_shm.uc.uc_addr_hi & 0xff; + mac[5] = (ncsi_shm.uc.uc_addr_hi >> 8) & 0xff; + if (is_valid_ether_addr(mac)) + eth->ops.set_rar(eth, hw->num_rar_entries, mac, + true); + } + } +} + +static void rnp_ncsi_set_mc_mta_n10(struct rnp_eth_info *eth) +{ + struct ncsi_shm_info ncsi_shm; + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + u8 i; + u8 mac[ETH_ALEN]; + + if (!hw->ncsi_en) + return; + if (__get_ncsi_shm_info(hw, &ncsi_shm)) { + if (ncsi_shm.valid & RNP_MC_VALID) { + for (i = 0; i < RNP_NCSI_MC_COUNT; i++) { + mac[0] = ncsi_shm.mc[i].mc_addr_lo & 0xff; + mac[1] = (ncsi_shm.mc[i].mc_addr_lo >> 8) & + 0xff; + mac[2] = (ncsi_shm.mc[i].mc_addr_lo >> 16) & + 0xff; + mac[3] = (ncsi_shm.mc[i].mc_addr_lo >> 24) & + 0xff; + mac[4] = ncsi_shm.mc[i].mc_addr_hi & 0xff; + mac[5] = (ncsi_shm.mc[i].mc_addr_hi >> 8) & + 0xff; + if (is_multicast_ether_addr(mac) && + !is_zero_ether_addr(mac)) { + rnp10_set_mta(hw, mac); + } + } + } + } +} + +static void rnp_ncsi_set_vfta_n10(struct rnp_eth_info *eth) +{ + struct ncsi_shm_info ncsi_shm; + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + + if (!hw->ncsi_en) + return; + if (__get_ncsi_shm_info(hw, &ncsi_shm)) { + if (ncsi_shm.valid & RNP_VLAN_VALID) + hw->ops.set_vlan_filter(hw, ncsi_shm.ncsi_vlan, true, + false); + } +} + +static struct rnp_eth_operations eth_ops_n10 = { + .set_rar = &rnp_eth_set_rar_n10, + .clear_rar = &rnp_eth_clear_rar_n10, + .set_vmdq = &rnp_eth_set_vmdq_n10, + .clear_vmdq = &rnp_eth_clear_vmdq_n10, + .update_mc_addr_list = &rnp_eth_update_mc_addr_list_n10, + .clr_mc_addr = &rnp_eth_clr_mc_addr_n10, + /* store rss info to eth */ + .set_rss_key = &rnp_eth_update_rss_key_n10, + .set_rss_table = &rnp_eth_update_rss_table_n10, + .set_vfta = &rnp_eth_set_vfta_n10, + .clr_vfta = &rnp_eth_clr_vfta_n10, + .set_vlan_filter = &rnp_eth_set_vlan_filter_n10, + /* ncsi */ + .ncsi_set_vfta = &rnp_ncsi_set_vfta_n10, + .ncsi_set_uc_addr = &rnp_ncsi_set_uc_addr_n10, + .ncsi_set_mc_mta = &rnp_ncsi_set_mc_mta_n10, + .set_layer2_remapping = &rnp_eth_set_layer2_n10, + .clr_layer2_remapping = &rnp_eth_clr_layer2_n10, + .clr_all_layer2_remapping = &rnp_eth_clr_all_layer2_n10, + .set_tuple5_remapping = &rnp_eth_set_tuple5_n10, + .clr_tuple5_remapping = &rnp_eth_clr_tuple5_n10, + .clr_all_tuple5_remapping = &rnp_eth_clr_all_tuple5_n10, + .set_tcp_sync_remapping = &rnp_eth_set_tcp_sync_n10, + .set_min_max_packet = &rnp_eth_set_min_max_packets_n10, + .set_vlan_strip = &rnp_eth_set_vlan_strip_n10, + .set_vxlan_port = &rnp_eth_set_vxlan_port_n10, + .set_vxlan_mode = &rnp_eth_set_vxlan_mode_n10, + .set_rx_hash = &rnp_eth_set_rx_hash_n10, + .set_fc_mode = &rnp_eth_set_fc_mode_n10, + .set_vf_vlan_mode = &rnp_eth_set_vf_vlan_mode_n10, +}; + +/** + * rnp_init_hw_n10 - Generic hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware, filling the bus info + * structure and media type, clears all on chip counters, initializes receive + * address registers, multicast table, VLAN filter table, calls routine to set + * up link and flow control settings, and leaves transmit and receive units + * disabled and uninitialized + **/ +static s32 rnp_init_hw_ops_n10(struct rnp_hw *hw) +{ + s32 status = 0; + + /* Reset the hardware */ + status = hw->ops.reset_hw(hw); + + /* Start the HW */ + if (status == 0) + status = hw->ops.start_hw(hw); + + return status; +} + +static s32 rnp_get_permtion_mac_addr_n10(struct rnp_hw *hw, u8 *mac_addr) +{ +#ifdef NO_MBX_VERSION + u32 v; + v = rd32(hw, RNP10_TOP_MAC_OUI); + mac_addr[0] = (u8)(v >> 16); + mac_addr[1] = (u8)(v >> 8); + mac_addr[2] = (u8)(v >> 0); + v = rd32(hw, RNP10_TOP_MAC_SN); + mac_addr[3] = (u8)(v >> 16); + mac_addr[4] = (u8)(v >> 8); + mac_addr[5] = (u8)(v >> 0); +#else + if (rnp_fw_get_macaddr(hw, hw->pfvfnum, mac_addr, hw->nr_lane)) { + dbg("generate ramdom macaddress...\n"); + eth_random_addr(mac_addr); + } +#endif + + hw->mac.mac_flags |= RNP_FLAGS_INIT_MAC_ADDRESS; + dbg("%s mac:%pM\n", __func__, mac_addr); + + return 0; +} + +static s32 rnp_reset_hw_ops_n10(struct rnp_hw *hw) +{ + int i; + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + dma_wr32(dma, RNP_DMA_AXI_EN, 0); + + + /* if not ncsi or hw not support 'control nic_reset', driver control it */ + if (hw->ncsi_en && hw->fw_version >= 0x00060000) { + /* fw will do nic-reset. to reduct ncsi bmc ping pkg lose */ + } else { +#define N10_NIC_RESET 0 + wr32(hw, RNP10_TOP_NIC_REST_N, N10_NIC_RESET); + /* + * we need this + */ + wmb(); + wr32(hw, RNP10_TOP_NIC_REST_N, ~N10_NIC_RESET); + } + + rnp_mbx_fw_reset_phy(hw); + /* should set all tx-start to 1 */ + for (i = 0; i < RNP_N10_MAX_TX_QUEUES; i++) + dma_ring_wr32(dma, RING_OFFSET(i) + RNP_DMA_TX_START, 1); + + wr32(hw, RNP10_TOP_ETH_BUG_40G_PATCH, 1); + /* set 2046 --> 0x18070 */ + eth_wr32(eth, RNP10_ETH_RX_PROGFULL_THRESH_PORT, DROP_ALL_THRESH); + + /* tcam not reset */ + eth->ops.clr_all_tuple5_remapping(eth); + + /* Store the permanent mac address */ + if (!(hw->mac.mac_flags & RNP_FLAGS_INIT_MAC_ADDRESS)) { + rnp_get_permtion_mac_addr_n10(hw, hw->mac.perm_addr); + memcpy(hw->mac.addr, hw->mac.perm_addr, ETH_ALEN); + } + + hw->ops.init_rx_addrs(hw); + + /* open vxlan default */ +#define VXLAN_HW_ENABLE (1) + eth_wr32(eth, RNP10_ETH_TUNNEL_MOD, VXLAN_HW_ENABLE); + for (i = 0; i < dma->max_tx_queues; i++) + rnp_wr_reg(hw->ring_msix_base + RING_VECTOR(i), 0); + + if (hw->phy_type == PHY_TYPE_SGMII) { + u16 pause_bits = 0; + u32 value; + + if (hw->fc.requested_mode == PAUSE_AUTO) { + pause_bits |= ASYM_PAUSE | SYM_PAUSE; + } else { + if ((hw->fc.requested_mode & PAUSE_TX) && + (!(hw->fc.requested_mode & PAUSE_RX))) { + pause_bits |= ASYM_PAUSE; + + } else if ((!(hw->fc.requested_mode & PAUSE_TX)) && + (!(hw->fc.requested_mode & PAUSE_RX))) { + } else + pause_bits |= ASYM_PAUSE | SYM_PAUSE; + } + rnp_mbx_phy_read(hw, 4, &value); + value &= ~0xC00; + value |= pause_bits; + rnp_mbx_phy_write(hw, 4, value); + } + + return 0; +} + +static s32 rnp_start_hw_ops_n10(struct rnp_hw *hw) +{ + s32 ret_val = 0; + struct rnp_eth_info *eth = &hw->eth; + struct rnp_dma_info *dma = &hw->dma; + + eth_wr32(eth, RNP10_ETH_ERR_MASK_VECTOR, + INNER_L4_BIT | PKT_LEN_ERR | HDR_LEN_ERR); + eth_wr32(eth, RNP10_ETH_BYPASS, 0); + eth_wr32(eth, RNP10_ETH_DEFAULT_RX_RING, 0); + + /* DMA common Registers */ + dma_wr32(dma, RNP_DMA_CONFIG, DMA_VEB_BYPASS); + + /* enable-dma-axi */ + dma_wr32(dma, RNP_DMA_AXI_EN, (RX_AXI_RW_EN | TX_AXI_RW_EN)); + + return ret_val; +} + +/* set n10 min/max packet according to new_mtu + * we support mtu + 14 + 4 * 3 as max packet len*/ +static void rnp_set_mtu_hw_ops_n10(struct rnp_hw *hw, int new_mtu) +{ + struct rnp_eth_info *eth = &hw->eth; + + int min = 60; + int max = new_mtu + ETH_HLEN + ETH_FCS_LEN * 3; + + hw->min_length_current = min; + hw->max_length_current = max; + + eth->ops.set_min_max_packet(eth, min, max); +} + +/* setup n10 vlan filter status */ +static void rnp_set_vlan_filter_en_hw_ops_n10(struct rnp_hw *hw, bool status) +{ + struct rnp_eth_info *eth = &hw->eth; + eth->ops.set_vlan_filter(eth, status); +} + +/* set vlan to n10 vlan filter table & veb */ +/* pf setup call */ +static void rnp_set_vlan_filter_hw_ops_n10(struct rnp_hw *hw, u16 vid, + bool enable, bool sriov_flag) +{ + struct rnp_eth_info *eth = &hw->eth; + struct rnp_dma_info *dma = &hw->dma; + u32 vfnum = hw->max_vfs - 1; + + /* setup n10 eth vlan table */ + eth->ops.set_vfta(eth, vid, enable); + + /* setup veb */ + /* only ctags setup veb if in sriov and not stags */ + if (vid && sriov_flag) { + if (enable) { + dma->ops.set_veb_vlan(dma, vid, vfnum); + } else { + dma->ops.set_veb_vlan(dma, 0, vfnum); + } + } +} + +static void rnp_set_vf_vlan_filter_hw_ops_n10(struct rnp_hw *hw, u16 vid, + int vf, bool enable, + bool veb_only) +{ + struct rnp_dma_info *dma = &hw->dma; + + if (!veb_only) { + /* call set vfta without veb setup */ + hw->ops.set_vlan_filter(hw, vid, enable, false); + + } else { + if (enable) { + dma->ops.set_veb_vlan(dma, vid, vf); + } else { + dma->ops.set_veb_vlan(dma, 0, vf); + } + } +} + +static void rnp_clr_vlan_veb_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_dma_info *dma = &hw->dma; + u32 vfnum = hw->vfnum; + + dma->ops.set_veb_vlan(dma, 0, vfnum); +} + +/* setup n10 vlan strip status */ +static void rnp_set_vlan_strip_hw_ops_n10(struct rnp_hw *hw, u16 queue, + bool strip) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_vlan_strip(eth, queue, strip); +} + +/* update new n10 mac */ +static void rnp_set_mac_hw_ops_n10(struct rnp_hw *hw, u8 *mac, bool sriov_flag) +{ + struct rnp_eth_info *eth = &hw->eth; + struct rnp_dma_info *dma = &hw->dma; + struct rnp_mac_info *mac_info = &hw->mac; + /* use this queue index to setup veb */ + /* now pf use queu 0 /1 + * vfnum is the last vfnum */ + int queue = hw->veb_ring; + int vfnum = hw->vfnum; + + eth->ops.set_rar(eth, 0, mac, true); + if (sriov_flag) { + eth->ops.set_vmdq(eth, 0, queue / hw->sriov_ring_limit); + dma->ops.set_veb_mac(dma, mac, vfnum, queue); + } + + mac_info->ops.set_mac(mac_info, mac, 0); +} + +/** + * rnp_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int rnp_write_uc_addr_list_n10(struct rnp_hw *hw, + struct net_device *netdev, + bool sriov_flag) +{ + unsigned int rar_entries = hw->num_rar_entries - 1; + u32 vfnum = hw->vfnum; + struct rnp_eth_info *eth = &hw->eth; + int count = 0; + + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + vfnum = 0; + /* In SR-IOV mode significantly less RAR entries are available */ + if (sriov_flag) + rar_entries = hw->max_pf_macvlans - 1; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > rar_entries) + return -ENOMEM; + + if (!netdev_uc_empty(netdev)) { + struct netdev_hw_addr *ha; + + hw_dbg(hw, "%s: rar_entries:%d, uc_count:%d\n", __func__, + hw->num_rar_entries, netdev_uc_count(netdev)); + + /* return error if we do not support writing to RAR table */ + if (!eth->ops.set_rar) + return -ENOMEM; + + netdev_for_each_uc_addr(ha, netdev) { + if (!rar_entries) + break; + /* VMDQ_P(0) is num_vfs pf use the last + * vf in sriov mode + */ + /* that's ok */ + eth->ops.set_rar(eth, rar_entries, ha->addr, + RNP10_RAH_AV); + if (sriov_flag) + eth->ops.set_vmdq(eth, rar_entries, vfnum); + + rar_entries--; + + count++; + } + } + /* write the addresses in reverse order to avoid write combining */ + + hw_dbg(hw, "%s: Clearing RAR[1 - %d]\n", __func__, rar_entries); + for (; rar_entries > 0; rar_entries--) + eth->ops.clear_rar(eth, rar_entries); + + if (hw->ncsi_en) + eth->ops.ncsi_set_uc_addr(eth); + + return count; +} + +__maybe_unused static void check_vf_promisc(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int i; + + hw->vf_promisc_mode = 0; + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].promisc_mode) { + hw->vf_promisc_mode = 1; + hw->vf_promisc_num = i; + break; + } + } +} + +static void rnp_set_rx_mode_hw_ops_n10(struct rnp_hw *hw, + struct net_device *netdev, + bool sriov_flag) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + u32 fctrl; +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) || defined(NETIF_F_HW_VLAN_CTAG_RX) + netdev_features_t features = netdev->features; +#endif + int count; + struct rnp_eth_info *eth = &hw->eth; + + hw_dbg(hw, "%s\n", __func__); + + /* broadcast always bypass */ + fctrl = eth_rd32(eth, RNP10_ETH_DMAC_FCTRL) | RNP10_FCTRL_BPE; + /* clear the bits we are changing the status of */ + fctrl &= ~(RNP10_FCTRL_UPE | RNP10_FCTRL_MPE); + /* promisc mode */ +#ifdef VF_PROMISC_SUPPORT + check_vf_promisc(adapter); + if ((netdev->flags & IFF_PROMISC) || (!hw->vf_promisc_mode)) { +#else + if (netdev->flags & IFF_PROMISC) { +#endif + hw->addr_ctrl.user_set_promisc = true; + fctrl |= (RNP10_FCTRL_UPE | RNP10_FCTRL_MPE); + /* disable hardware filter vlans in promisc mode */ +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; +#endif +#ifdef NETIF_F_HW_VLAN_CTAG_RX + features &= ~NETIF_F_HW_VLAN_CTAG_RX; +#endif + } else { + if (netdev->flags & IFF_ALLMULTI) { + fctrl |= RNP10_FCTRL_MPE; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + /* we always update vf multicast info */ + count = eth->ops.update_mc_addr_list(eth, netdev, true); + if (count < 0) { + fctrl |= RNP10_FCTRL_MPE; + } else if (count) { + + } + } + hw->addr_ctrl.user_set_promisc = false; + } + + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (rnp_write_uc_addr_list_n10(hw, netdev, sriov_flag) < 0) { + fctrl |= RNP10_FCTRL_UPE; + } + + eth_wr32(eth, RNP10_ETH_DMAC_FCTRL, fctrl); +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + eth->ops.set_vlan_filter(eth, true); + else + eth->ops.set_vlan_filter(eth, false); +#endif + + if ((hw->addr_ctrl.user_set_promisc == true) || + (adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR)) { + /* set pkt_len_err and hdr_len_err default to 1 */ + eth_wr32(eth, RNP10_ETH_ERR_MASK_VECTOR, + INNER_L4_BIT | PKT_LEN_ERR | HDR_LEN_ERR); + } else { + eth_wr32(eth, RNP10_ETH_ERR_MASK_VECTOR, INNER_L4_BIT); + } + + hw->ops.set_mtu(hw, netdev->mtu); +} + +/* setup an rar with vfnum */ +static void rnp_set_rar_with_vf_hw_ops_n10(struct rnp_hw *hw, u8 *mac, int idx, + u32 vfnum, bool enable) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_rar(eth, idx, mac, enable); + eth->ops.set_vmdq(eth, idx, vfnum); +} + +static void rnp_clr_rar_hw_ops_n10(struct rnp_hw *hw, int idx) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.clear_rar(eth, idx); +} + +static void rnp_clr_rar_all_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_eth_info *eth = &hw->eth; + unsigned int rar_entries = hw->num_rar_entries - 1; + int i; + + for (i = 0; i < rar_entries; i++) + eth->ops.clear_rar(eth, rar_entries); +} + +static void rnp_set_fcs_mode_hw_ops_n10(struct rnp_hw *hw, bool status) +{ + struct rnp_mac_info *mac = &hw->mac; + + mac->ops.set_mac_fcs(mac, status); +} + +static void rnp_set_vxlan_port_hw_ops_n10(struct rnp_hw *hw, u32 port) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_vxlan_port(eth, port); +} + +static void rnp_set_vxlan_mode_hw_ops_n10(struct rnp_hw *hw, bool inner) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_vxlan_mode(eth, inner); +} + +static void rnp_set_mac_rx_hw_ops_n10(struct rnp_hw *hw, bool status) +{ + struct rnp_mac_info *mac = &hw->mac; + struct rnp_eth_info *eth = &hw->eth; + + if(pci_channel_offline(hw->pdev)){ + return; + } + if (status) + eth_wr32(eth, RNP10_ETH_RX_PROGFULL_THRESH_PORT, + RECEIVE_ALL_THRESH); + else + eth_wr32(eth, RNP10_ETH_RX_PROGFULL_THRESH_PORT, + DROP_ALL_THRESH); + + mac->ops.set_mac_rx(mac, status); +} + +static void rnp_set_sriov_status_hw_ops_n10(struct rnp_hw *hw, bool status) +{ + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + u32 v; + + if (status) { + dma_wr32(dma, RNP_DMA_CONFIG, + dma_rd32(dma, RNP_DMA_CONFIG) & (~DMA_VEB_BYPASS)); + v = eth_rd32(eth, RNP10_MRQC_IOV_EN); + v |= RNP10_IOV_ENABLED; + eth_wr32(eth, RNP10_MRQC_IOV_EN, v); + } else { + v = eth_rd32(eth, RNP10_MRQC_IOV_EN); + v &= ~(RNP10_IOV_ENABLED); + eth_wr32(eth, RNP10_MRQC_IOV_EN, v); + + dma->ops.clr_veb_all(dma); + // clean veb ? + } + +#if defined(NIC_VF_FXIED) || defined(VF_PROMISC_SUPPORT) + /* we setup default to pf */ + eth_wr32(eth, RNP10_VM_DMAC_MPSAR_RING(127), hw->default_vf_num); + /* if pf or vf in promisc mode set promisc to that vf*/ + if (hw->vf_promisc_mode) { + int fix_vf_num; + + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + fix_vf_num = (hw->vf_promisc_num + 1) * hw->sriov_ring_limit / 2; + else + fix_vf_num = (hw->vf_promisc_num) * hw->sriov_ring_limit / 2; + + eth_wr32(eth, RNP10_VM_DMAC_MPSAR_RING(127), fix_vf_num); + } +#endif +} + +static void rnp_set_sriov_vf_mc_hw_ops_n10(struct rnp_hw *hw, u16 mc_addr) +{ + struct rnp_eth_info *eth = &hw->eth; + u32 vector_bit; + u32 vector_reg; + u32 mta_reg; + + vector_reg = (mc_addr >> 5) & 0x7F; + vector_bit = mc_addr & 0x1F; + mta_reg = eth_rd32(eth, RNP10_ETH_MULTICAST_HASH_TABLE(vector_reg)); + mta_reg |= (1 << vector_bit); + eth_wr32(eth, RNP10_ETH_MULTICAST_HASH_TABLE(vector_reg), mta_reg); +} + +static void rnp_update_sriov_info_hw_ops_n10(struct rnp_hw *hw) +{ +} + +static void rnp_set_pause_mode_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_mac_info *mac = &hw->mac; + struct rnp_eth_info *eth = &hw->eth; + + mac->ops.set_fc_mode(mac); + eth->ops.set_fc_mode(eth); +} + +static void rnp_get_pause_mode_hw_ops_n10(struct rnp_hw *hw) +{ + u32 value_r5; + + if (hw->phy_type != PHY_TYPE_SGMII) { + if ((hw->fc.requested_mode & PAUSE_TX) && + (hw->fc.requested_mode & PAUSE_RX)) { + hw->fc.current_mode = rnp_fc_full; + } else if (hw->fc.requested_mode & PAUSE_TX) { + hw->fc.current_mode = rnp_fc_tx_pause; + } else if (hw->fc.requested_mode & PAUSE_RX) { + hw->fc.current_mode = rnp_fc_rx_pause; + } else { + hw->fc.current_mode = rnp_fc_none; + } + return; + } + + /* we get pause mode from phy reg */ + rnp_mbx_phy_read(hw, 5, &value_r5); + if (!hw->link) { + /* if link is not up ,fc is null */ + hw->fc.current_mode = rnp_fc_none; + } else { + if (hw->fc.requested_mode == PAUSE_AUTO) { + if (value_r5 & SYM_PAUSE) + hw->fc.current_mode = rnp_fc_full; + else if (value_r5 & ASYM_PAUSE) + hw->fc.current_mode = rnp_fc_rx_pause; + else + hw->fc.current_mode = rnp_fc_none; + + } else if ((hw->fc.requested_mode & PAUSE_TX) && + (hw->fc.requested_mode & PAUSE_RX)) { + if (value_r5 & SYM_PAUSE) + hw->fc.current_mode = rnp_fc_full; + else if (value_r5 & ASYM_PAUSE) + hw->fc.current_mode = rnp_fc_rx_pause; + else + hw->fc.current_mode = rnp_fc_none; + + } else if (hw->fc.requested_mode & PAUSE_TX) { + if (value_r5 & SYM_PAUSE) + hw->fc.current_mode = rnp_fc_tx_pause; + else if (value_r5 & ASYM_PAUSE) + hw->fc.current_mode = rnp_fc_none; + else + hw->fc.current_mode = rnp_fc_none; + + } else if (hw->fc.requested_mode & PAUSE_RX) { + if (value_r5 & SYM_PAUSE) + hw->fc.current_mode = rnp_fc_rx_pause; + else if (value_r5 & ASYM_PAUSE) + hw->fc.current_mode = rnp_fc_rx_pause; + else + hw->fc.current_mode = rnp_fc_none; + + } else { + hw->fc.current_mode = rnp_fc_none; + } + } +} + +static void rnp_update_hw_info_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + u32 data; + /* 1 enable eth filter */ + eth_wr32(eth, RNP10_HOST_FILTER_EN, 1); + /* 2 open redir en */ + eth_wr32(eth, RNP10_REDIR_EN, 1); + + /* 3 open sctp checksum and other checksum */ + if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM) + eth_wr32(eth, RNP10_ETH_SCTP_CHECKSUM_EN, 1); + + /* 4 mark muticaset as broadcast */ + dma_wr32(dma, RNP_VEB_MAC_MASK_LO, 0xffffffff); + dma_wr32(dma, RNP_VEB_MAC_MASK_HI, 0xfeff); + /* 5 setup dma split */ + + data = dma_rd32(dma, RNP_DMA_CONFIG); + data &= (0x00000ffff); +#ifdef FT_PADDING +#define PADDING_BIT 8 + if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) + SET_BIT(PADDING_BIT, data); +#endif +#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT + /* in this mode we fixed dm split */ + /* if PAGE_SIZE */ +#define RX_MAX_DWORD (96) + data |= (((hw->dma_split_size) >> 4) << 16); +#else + /* we update dma spilt with max_length_current */ + data |= (((hw->max_length_current) >> 4) << 16); +#endif + dma_wr32(dma, RNP_DMA_CONFIG, data); + /* 6 open vxlan inner match? */ + + /* 7 setuptcp sync remmapping */ + /* n10 not support prio */ + if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC) { + hw->ops.set_tcp_sync_remapping(hw, adapter->tcp_sync_queue, + true, false); + } else { + hw->ops.set_tcp_sync_remapping(hw, adapter->tcp_sync_queue, + false, false); + } +} + +static void rnp_update_hw_rx_drop_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + int i; + struct rnp_ring *ring; + + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = adapter->rx_ring[i]; + if (adapter->rx_drop_status & BIT(i)) { + ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, + adapter->drop_time); + } else { + ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, 0); + } + } +} + +static void rnp_set_rx_hash_hw_ops_n10(struct rnp_hw *hw, bool status, + bool sriov_flag) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_rx_hash(eth, status, sriov_flag); +} + +/* setup mac to rar 0 + * clean vmdq + * clean mc addr */ +static s32 rnp_init_rx_addrs_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_eth_info *eth = &hw->eth; + + u32 i; + u32 rar_entries = eth->num_rar_entries; + u32 v; + + hw_dbg(hw, "init_rx_addrs:rar_entries:%d, mac.addr:%pM\n", rar_entries, + hw->mac.addr); + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (!is_valid_ether_addr(hw->mac.addr)) { + /* Get the MAC address from the RAR0 for later reference */ + memcpy(hw->mac.addr, hw->mac.perm_addr, ETH_ALEN); + hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); + } else { + /* Setup the receive address. */ + hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); + hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); + eth->ops.set_rar(eth, 0, hw->mac.addr, true); + + /* clear VMDq pool/queue selection for RAR 0 */ + eth->ops.clear_vmdq(eth, 0, RNP_CLEAR_VMDQ_ALL); + } + hw->addr_ctrl.overflow_promisc = 0; + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ + hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); + for (i = 1; i < rar_entries; i++) { + eth->ops.clear_rar(eth, i); + } + if (hw->ncsi_en) + eth->ops.ncsi_set_uc_addr(eth); + + /* Clear the MTA */ + hw->addr_ctrl.mta_in_use = 0; + v = eth_rd32(eth, RNP10_ETH_DMAC_MCSTCTRL); + v &= (~0x3); + v |= eth->mc_filter_type; + eth_wr32(eth, RNP10_ETH_DMAC_MCSTCTRL, v); + + hw_dbg(hw, " Clearing MTA\n"); + eth->ops.clr_mc_addr(eth); + if (hw->ncsi_en) { + eth->ops.ncsi_set_mc_mta(eth); + eth->ops.ncsi_set_vfta(eth); + } + + return 0; +} + +static void rnp_clr_vfta_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.clr_vfta(eth); +} + +static void rnp_set_txvlan_mode_hw_ops_n10(struct rnp_hw *hw, bool cvlan) +{ + struct rnp_mac_info *mac = &hw->mac; + if (cvlan) { + mac_wr32(mac, RNP10_MAC_TX_VLAN_TAG, 0x4000000); + mac_wr32(mac, RNP10_MAC_TX_VLAN_MODE, 0x100000); + mac_wr32(mac, RNP10_MAC_INNER_VLAN_INCL, 0x100000); + } else { + mac_wr32(mac, RNP10_MAC_TX_VLAN_TAG, 0xc600000); + mac_wr32(mac, RNP10_MAC_TX_VLAN_MODE, 0x180000); + mac_wr32(mac, RNP10_MAC_INNER_VLAN_INCL, 0x100000); + } +} + +static void rnp_set_rss_key_hw_ops_n10(struct rnp_hw *hw, bool sriov_flag) +{ + struct rnp_eth_info *eth = &hw->eth; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + int key_len = RNP_RSS_KEY_SIZE; + + memcpy(hw->rss_key, adapter->rss_key, key_len); + + eth->ops.set_rss_key(eth, sriov_flag); +} + +static void rnp_set_rss_table_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_rss_table(eth); +} + +static void rnp_set_mbx_link_event_hw_ops_n10(struct rnp_hw *hw, int enable) +{ + rnp_mbx_link_event_enable(hw, enable); +} + +static void rnp_set_mbx_ifup_hw_ops_n10(struct rnp_hw *hw, int enable) +{ + rnp_mbx_ifup_down(hw, enable); + + if (hw->phy_type == PHY_TYPE_10G_TP) { + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + /* first call reset an */ + if (enable) { + hw->ops.setup_link(hw, hw->phy.autoneg_advertised, + hw->autoneg, adapter->speed, + hw->duplex); + } + } +} + +/** + * rnp_check_mac_link_n10 - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +static s32 rnp_check_mac_link_hw_ops_n10(struct rnp_hw *hw, rnp_link_speed *speed, + bool *link_up, bool *duplex, + bool link_up_wait_to_complete) +{ +#ifdef NO_MBX_VERSION + + struct rnp_pcs_info *pcs = &hw->pcs; + u32 status; + /* if vu440 , we assume link always */ + if (hw->rss_type == rnp_rss_uv440) { + *link_up = true; + *speed = RNP_LINK_SPEED_40GB_FULL; + goto skip_get_link; + } + + status = pcs->ops.read(hw, 0, RNP_PCS_LINK_STATUS); + + if (status & RNP_PCS_LINKUP) + *link_up = true; + else + *link_up = false; + status = pcs->ops.read(hw, 0, RNP_PCS_LINK_SPEED); + + if (status & RNP_PCS_1G_OR_10G) { + switch (status & RNP_PCS_SPPEED_MASK) { + case RNP_PCS_SPPEED_10G: + *speed = RNP_LINK_SPEED_10GB_FULL; + + break; + case RNP_PCS_SPPEED_40G: + *speed = RNP_LINK_SPEED_40GB_FULL; + + break; + } + } +skip_get_link: +#else + + if (hw->speed == 10) { + *speed = RNP_LINK_SPEED_10_FULL; + } else if (hw->speed == 100) { + *speed = RNP_LINK_SPEED_100_FULL; + } else if (hw->speed == 1000) { + *speed = RNP_LINK_SPEED_1GB_FULL; + } else if (hw->speed == 10000) { + *speed = RNP_LINK_SPEED_10GB_FULL; + } else if (hw->speed == 25000) { + *speed = RNP_LINK_SPEED_25GB_FULL; + } else if (hw->speed == 40000) { + *speed = RNP_LINK_SPEED_40GB_FULL; + } else { + *speed = RNP_LINK_SPEED_UNKNOWN; + } + + *link_up = hw->link; + *duplex = 1; +#endif + + return 0; +} + +static s32 rnp_setup_mac_link_hw_ops_n10(struct rnp_hw *hw, u32 adv, u32 autoneg, + u32 speed, u32 duplex) +{ + struct rnp_adapter *adpt = hw->back; + u32 value = 0; + u32 value_r4 = 0; + u32 value_r9 = 0; + + rnp_logd(LOG_PHY, + "%s setup phy: phy_addr=%d speed=%d duplex=%d autoneg=%d " + "is_backplane=%d is_sgmii=%d\n", + __func__, adpt->phy_addr, speed, duplex, autoneg, + hw->is_backplane, hw->is_sgmii); + + if (hw->is_backplane) { + /* Backplane type, support AN, unsupport set speed */ + return rnp_set_lane_fun(hw, LANE_FUN_AN, autoneg, 0, 0, 0); + } + + /* TODO: Not support fiber */ + if ((!hw->is_sgmii) && (hw->phy_type != PHY_TYPE_10G_TP)) { + if (hw->force_10g_1g_speed_ablity) { + return rnp_mbx_force_speed(hw, speed); + } else { + return 0; + } + } + + if (hw->phy_type == PHY_TYPE_10G_TP) { + rnp_mbx_phy_read(hw, PHY_826x_MDIX, &value); + + value &= ~(BIT(8) | BIT(9)); + /* Options: 0: Auto (default) 1: MDI mode 2: MDI-X mode */ + switch (hw->phy.mdix) { + case 1: + value |= BIT(8)|BIT(9); + break; + case 2: + value |= BIT(9); + break; + case 0: + default: + break; + } + rnp_mbx_phy_write(hw, PHY_826x_MDIX, value); + + if (!autoneg) { + rnp_mbx_phy_read(hw, PHY_826x_SPEED, &value); + value &= (~(BIT(13) | BIT(6) | BIT(5) | BIT(4) | + BIT(3) | BIT(2))); + + switch (speed) { + case RNP_LINK_SPEED_10GB_FULL: + value |= BIT(13) | BIT(6); + break; + case RNP_LINK_SPEED_1GB_FULL: + case RNP_LINK_SPEED_1GB_HALF: + value |= BIT(6); + ; + break; + case RNP_LINK_SPEED_100_FULL: + case RNP_LINK_SPEED_100_HALF: + value |= BIT(13); + break; + case RNP_LINK_SPEED_10_FULL: + case RNP_LINK_SPEED_10_HALF: + value = 0; + break; + default: + hw_dbg(hw, "unknown speed = 0x%x.\n", speed); + break; + } + rnp_mbx_phy_write(hw, PHY_826x_SPEED, value); + rnp_mbx_phy_read(hw, PHY_826x_DUPLEX, &value); + value &= (~BIT(8)); + if (duplex) + value |= BIT(8); + rnp_mbx_phy_write(hw, PHY_826x_DUPLEX, value); + rnp_mbx_phy_read(hw, PHY_826x_AN, &value); + value &= (~BIT(12)); + rnp_mbx_phy_write(hw, PHY_826x_AN, value); + } else { + rnp_mbx_phy_read(hw, PHY_826x_ADV, &value); + + value &= (~(BIT(5) | BIT(6) | BIT(7) | BIT(8) | + BIT(10) | BIT(11))); + + if (adv & RNP_LINK_SPEED_100_FULL) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_100_FULL; + value |= BIT(8); + } + if (adv & RNP_LINK_SPEED_100_HALF) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_100_FULL; + value |= BIT(7); + } + + value |= BIT(10) | BIT(11); + /* BIT10 fc BIT11 asyfc */ + rnp_mbx_phy_write(hw, PHY_826x_ADV, value); + + rnp_mbx_phy_read(hw, PHY_826x_GBASE_ADV, &value); + value &= (~(BIT(7) | BIT(8) | BIT(12))); + + /* bit 7 2.5G bit 8 5G */ + if (adv & RNP_LINK_SPEED_10GB_FULL) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_10GB_FULL; + value |= BIT(12); + } + rnp_mbx_phy_write(hw, PHY_826x_GBASE_ADV, value); + rnp_mbx_phy_read(hw, PHY_826x_GBASE_ADV_2, &value); + value &= 0x00ff; + if (adv & RNP_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_1GB_FULL; + value |= BIT(9); + } + if (adv & RNP_LINK_SPEED_1GB_HALF) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_1GB_HALF; + value |= BIT(8); + } + rnp_mbx_phy_write(hw, PHY_826x_GBASE_ADV_2, value); + rnp_mbx_phy_read(hw, PHY_826x_AN, &value); + value |= BIT(12) | BIT(9); + rnp_mbx_phy_write(hw, PHY_826x_AN, value); + } + + return 0; + } + + /* Set MDI/MDIX mode */ + rnp_mbx_phy_read(hw, RNP_YT8531_PHY_SPEC_CTRL, &value); + value &= ~RNP_YT8531_PHY_SPEC_CTRL_MDIX_CFG_MASK; + /* Options: 0: Auto (default) 1: MDI mode 2: MDI-X mode */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + value |= RNP_YT8531_PHY_SPEC_CTRL_FORCE_MDIX; + break; + case 0: + default: + value |= RNP_YT8531_PHY_SPEC_CTRL_AUTO_MDI_MDIX; + break; + } + rnp_mbx_phy_write(hw, RNP_YT8531_PHY_SPEC_CTRL, value); + + /* + * Clear autoneg_advertised and set new values based on input link + * speed. + */ + hw->phy.autoneg_advertised = speed; + + if (!autoneg) { + switch (speed) { + case RNP_LINK_SPEED_1GB_FULL: + case RNP_LINK_SPEED_1GB_HALF: + value = RNP_MDI_PHY_SPEED_SELECT1; + speed = RNP_LINK_SPEED_1GB_FULL; + goto out; + break; + case RNP_LINK_SPEED_100_FULL: + case RNP_LINK_SPEED_100_HALF: + value = RNP_MDI_PHY_SPEED_SELECT0; + break; + case RNP_LINK_SPEED_10_FULL: + case RNP_LINK_SPEED_10_HALF: + value = 0; + break; + default: + value = RNP_MDI_PHY_SPEED_SELECT0 | + RNP_MDI_PHY_SPEED_SELECT1; + hw_dbg(hw, "unknown speed = 0x%x.\n", speed); + break; + } + /* duplex full */ + if (duplex) + value |= RNP_MDI_PHY_DUPLEX; + value |= 0x8000; + rnp_mbx_phy_write(hw, 0x0, value); + goto skip_an; + } + + /* start_an */ + value_r4 = 0x1E0; + value_r9 = 0x300; + /* disable 100/10base-T Self-negotiation ability */ + rnp_mbx_phy_read(hw, 0x4, &value); + value &= ~value_r4; + rnp_mbx_phy_write(hw, 0x4, value); + + /* disable 1000base-T Self-negotiation ability */ + rnp_mbx_phy_read(hw, 0x9, &value); + value &= ~value_r9; + rnp_mbx_phy_write(hw, 0x9, value); + + value_r4 = 0x0; + value_r9 = 0x0; + + if (adv & RNP_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= RNP_LINK_SPEED_1GB_FULL; + value_r9 |= 0x200; + } + if (adv & RNP_LINK_SPEED_100_FULL) { + hw->phy.autoneg_advertised |= RNP_LINK_SPEED_100_FULL; + value_r4 |= 0x100; + } + if (adv & RNP_LINK_SPEED_10_FULL) { + hw->phy.autoneg_advertised |= RNP_LINK_SPEED_10_FULL; + value_r4 |= 0x40; + } + + if (adv & RNP_LINK_SPEED_1GB_HALF) { + hw->phy.autoneg_advertised |= RNP_LINK_SPEED_1GB_HALF; + value_r9 |= 0x100; + } + if (adv & RNP_LINK_SPEED_100_HALF) { + hw->phy.autoneg_advertised |= RNP_LINK_SPEED_100_HALF; + value_r4 |= 0x80; + } + if (adv & RNP_LINK_SPEED_10_HALF) { + hw->phy.autoneg_advertised |= RNP_LINK_SPEED_10_HALF; + value_r4 |= 0x20; + } + + /* enable 1000base-T Self-negotiation ability */ + rnp_mbx_phy_read(hw, 0x9, &value); + value |= value_r9; + rnp_mbx_phy_write(hw, 0x9, value); + + /* enable 100/10base-T Self-negotiation ability */ + rnp_mbx_phy_read(hw, 0x4, &value); + value |= value_r4; + rnp_mbx_phy_write(hw, 0x4, value); + + /* software reset to make the above configuration take effect*/ + rnp_mbx_phy_read(hw, 0x0, &value); + value |= 0x9200; + rnp_mbx_phy_write(hw, 0x0, value); +skip_an: + /* power on in UTP mode */ + rnp_mbx_phy_read(hw, 0x0, &value); + value &= ~0x800; + rnp_mbx_phy_write(hw, 0x0, value); + +out: + return 0; +} + +static void rnp_clean_link_hw_ops_n10(struct rnp_hw *hw) +{ + hw->link = 0; +} + +static void rnp_set_layer2_hw_ops_n10(struct rnp_hw *hw, + union rnp_atr_input *input, u16 pri_id, + u8 queue, bool prio_flag) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_layer2_remapping(eth, input, pri_id, queue, prio_flag); +} + +static void rnp_clr_layer2_hw_ops_n10(struct rnp_hw *hw, u16 pri_id) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.clr_layer2_remapping(eth, pri_id); +} + +static void rnp_clr_all_layer2_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.clr_all_layer2_remapping(eth); +} + +static void rnp_clr_all_tuple5_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.clr_all_tuple5_remapping(eth); +} + +static void rnp_set_tcp_sync_hw_ops_n10(struct rnp_hw *hw, int queue, bool flag, + bool prio) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_tcp_sync_remapping(eth, queue, flag, prio); +} + +static void rnp_update_msix_count_hw_ops_n10(struct rnp_hw *hw, int msix_count) +{ + int msix_count_new; + struct rnp_mac_info *mac = &hw->mac; + + msix_count_new = clamp_t(int, msix_count, 2, RNP_N10_MSIX_VECTORS); + + mac->max_msix_vectors = msix_count_new; + hw->max_msix_vectors = msix_count_new; +} + +static void rnp_set_tuple5_hw_ops_n10(struct rnp_hw *hw, + union rnp_atr_input *input, u16 pri_id, + u8 queue, bool prio_flag) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_tuple5_remapping(eth, input, pri_id, queue, prio_flag); +} + +static void rnp_clr_tuple5_hw_ops_n10(struct rnp_hw *hw, u16 pri_id) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.clr_tuple5_remapping(eth, pri_id); +} + +static void rnp_update_hw_status_hw_ops_n10(struct rnp_hw *hw, + struct rnp_hw_stats *hw_stats, + struct net_device_stats *net_stats) +{ + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + struct rnp_mac_info *mac = &hw->mac; + int port; + + hw_stats->dma_to_dma = + dma_rd32(dma, RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_0) + + dma_rd32(dma, RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_1) + + dma_rd32(dma, RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_2) + + dma_rd32(dma, RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_3); + + hw_stats->dma_to_switch = dma_rd32(dma, RNP_DMA_STATS_DMA_TO_SWITCH); + hw_stats->mac_to_dma = dma_rd32(dma, RNP_DMA_STATS_MAC_TO_DMA); + + net_stats->rx_crc_errors = 0; + hw_stats->dbg_rx_err_cnt = 0; + + for (port = 0; port < 4; port++) { + /* we use Hardware stats? */ + net_stats->rx_crc_errors += + eth_rd32(eth, RNP10_RXTRANS_CRC_ERR_PKTS(port)); + hw_stats->dbg_rx_err_cnt += + eth_rd32(eth, RNP10_RXTRANS_WDT_ERR_PKTS(port)) + + eth_rd32(eth, RNP10_RXTRANS_CODE_ERR_PKTS(port)) + + eth_rd32(eth, RNP10_RXTRANS_CRC_ERR_PKTS(port)) + + eth_rd32(eth, RNP10_RXTRANS_SLEN_ERR_PKTS(port)) + + eth_rd32(eth, RNP10_RXTRANS_GLEN_ERR_PKTS(port)) + + eth_rd32(eth, RNP10_RXTRANS_IPH_ERR_PKTS(port)) + + eth_rd32(eth, RNP10_RXTRANS_LEN_ERR_PKTS(port)); + } + hw_stats->invalid_dropped_packets = + eth_rd32(eth, RNP10_ETH_INVALID_DROP_PKTS); + hw_stats->rx_capabity_lost = + eth_rd32(eth, RNP10_RXTRANS_DROP(0)) + + eth_rd32(eth, RNP10_RXTRANS_CUT_ERR_PKTS(0)); + hw_stats->filter_dropped_packets = + eth_rd32(eth, RNP10_ETH_FILTER_DROP_PKTS); + hw_stats->host_l2_match_drop = + eth_rd32(eth, RNP10_ETH_HOST_L2_DROP_PKTS); + hw_stats->redir_input_match_drop = + eth_rd32(eth, RNP10_ETH_REDIR_INPUT_MATCH_DROP_PKTS); + hw_stats->redir_etype_match_drop = + eth_rd32(eth, RNP10_ETH_ETYPE_DROP_PKTS); + hw_stats->redir_tcp_syn_match_drop = + eth_rd32(eth, RNP10_ETH_TCP_SYN_DROP_PKTS); + hw_stats->redir_tuple5_match_drop = + eth_rd32(eth, RNP10_ETH_REDIR_TUPLE5_DROP_PKTS); + hw_stats->redir_tcam_match_drop = + eth_rd32(eth, RNP10_ETH_REDIR_TCAM_DROP_PKTS); + hw_stats->bmc_dropped_packets = + eth_rd32(eth, RNP10_ETH_DECAP_BMC_DROP_NUM); + hw_stats->switch_dropped_packets = + eth_rd32(eth, RNP10_ETH_DECAP_SWITCH_DROP_NUM); + hw_stats->mac_rx_broadcast = + mac_rd32(mac, RNP10_MAC_STATS_BROADCAST_LOW); + hw_stats->mac_rx_broadcast += + ((u64)mac_rd32(mac, RNP10_MAC_STATS_BROADCAST_HIGH) << 32); + hw_stats->mac_rx_multicast = + mac_rd32(mac, RNP10_MAC_STATS_MULTICAST_LOW); + hw_stats->mac_rx_multicast += + ((u64)mac_rd32(mac, RNP10_MAC_STATS_MULTICAST_HIGH) << 32); + hw_stats->mac_rx_pause_count = + mac_rd32(mac, RNP10_MAC_STATS_RX_PAUSE_COUNT_LOW); + hw_stats->mac_rx_pause_count += + ((u64)mac_rd32(mac, RNP10_MAC_STATS_RX_PAUSE_COUNT_HIGH) << 32); + hw_stats->mac_tx_pause_count = + mac_rd32(mac, RNP10_MAC_STATS_TX_PAUSE_COUNT_LOW); + hw_stats->mac_tx_pause_count += + ((u64)mac_rd32(mac, RNP10_MAC_STATS_TX_PAUSE_COUNT_HIGH) << 32); +} + +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT + +enum n10_priv_bits { + n10_mac_loopback = 0, + n10_switch_loopback = 1, + n10_veb_enable = 4, + n10_padding_enable = 8, + n10_padding_debug_enable = 0x10, +}; + +static const char rnp10_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define RNP10_MAC_LOOPBACK BIT(0) +#define RNP10_SWITCH_LOOPBACK BIT(1) +#define RNP10_VEB_ENABLE BIT(2) +#define RNP10_FT_PADDING BIT(3) +#define RNP10_PADDING_DEBUG BIT(4) +#define RNP10_PTP_FEATURE BIT(5) +#define RNP10_SIMULATE_DOWN BIT(6) +#define RNP10_VXLAN_INNER_MATCH BIT(7) +#define RNP10_STAG_ENABLE BIT(8) +#define RNP10_REC_HDR_LEN_ERR BIT(9) +#define RNP10_SRIOV_VLAN_MODE BIT(10) +#define RNP10_REMAP_MODE BIT(11) +#define RNP10_LLDP_EN_STAT BIT(12) +#define RNP10_FORCE_CLOSE BIT(13) + "mac_loopback", + "switch_loopback", + "veb_enable", + "pcie_patch", + "padding_debug", + "ptp_performance_debug", + "simulate_link_down", + "vxlan_inner_match", + "stag_enable", + "mask_len_err", + "sriov_vlan_mode", + "remap_mode1", + "lldp_en", + "link_down_on_close", +}; + +#define RNP10_PRIV_FLAGS_STR_LEN ARRAY_SIZE(rnp10_priv_flags_strings) +#endif + +const struct rnp_stats rnp10_gstrings_net_stats[] = { + RNP_NETDEV_STAT(rx_packets), + RNP_NETDEV_STAT(tx_packets), + RNP_NETDEV_STAT(rx_bytes), + RNP_NETDEV_STAT(tx_bytes), + RNP_NETDEV_STAT(rx_errors), + RNP_NETDEV_STAT(tx_errors), + RNP_NETDEV_STAT(rx_dropped), + RNP_NETDEV_STAT(tx_dropped), + RNP_NETDEV_STAT(multicast), + RNP_NETDEV_STAT(collisions), + RNP_NETDEV_STAT(rx_over_errors), + RNP_NETDEV_STAT(rx_crc_errors), + RNP_NETDEV_STAT(rx_frame_errors), + RNP_NETDEV_STAT(rx_fifo_errors), + RNP_NETDEV_STAT(rx_missed_errors), + RNP_NETDEV_STAT(tx_aborted_errors), + RNP_NETDEV_STAT(tx_carrier_errors), + RNP_NETDEV_STAT(tx_fifo_errors), + RNP_NETDEV_STAT(tx_heartbeat_errors), +}; + +#define RNP10_GLOBAL_STATS_LEN ARRAY_SIZE(rnp10_gstrings_net_stats) + +static struct rnp_stats rnp10_hwstrings_stats[] = { + RNP_HW_STAT("dma_to_mac", hw_stats.dma_to_dma), + RNP_HW_STAT("dma_to_switch", hw_stats.dma_to_switch), + RNP_HW_STAT("eth_to_dma", hw_stats.mac_to_dma), + RNP_HW_STAT("vlan_add_cnt", hw_stats.vlan_add_cnt), + RNP_HW_STAT("vlan_strip_cnt", hw_stats.vlan_strip_cnt), + RNP_HW_STAT("invalid_dropped_packets", + hw_stats.invalid_dropped_packets), + RNP_HW_STAT("rx_capabity_drop", hw_stats.rx_capabity_lost), + RNP_HW_STAT("filter_dropped_packets", hw_stats.filter_dropped_packets), + RNP_HW_STAT("host_l2_match_drop", hw_stats.host_l2_match_drop), + RNP_HW_STAT("redir_input_match_drop", hw_stats.redir_input_match_drop), + RNP_HW_STAT("redir_etype_match_drop", hw_stats.redir_etype_match_drop), + RNP_HW_STAT("redir_tcp_syn_match_drop", + hw_stats.redir_tcp_syn_match_drop), + RNP_HW_STAT("redir_tuple5_match_drop", + hw_stats.redir_tuple5_match_drop), + RNP_HW_STAT("redir_tcam_match_drop", hw_stats.redir_tcam_match_drop), + RNP_HW_STAT("bmc_dropped_packets", hw_stats.bmc_dropped_packets), + RNP_HW_STAT("switch_dropped_packets", hw_stats.switch_dropped_packets), + RNP_HW_STAT("rx_csum_offload_errors", hw_csum_rx_error), + RNP_HW_STAT("rx_csum_offload_good", hw_csum_rx_good), + RNP_HW_STAT("rx_broadcast_count", hw_stats.mac_rx_broadcast), + RNP_HW_STAT("rx_multicast_count", hw_stats.mac_rx_multicast), + RNP_HW_STAT("mac_rx_pause_count", hw_stats.mac_rx_pause_count), + RNP_HW_STAT("mac_tx_pause_count", hw_stats.mac_tx_pause_count), +}; + +#define RNP10_HWSTRINGS_STATS_LEN ARRAY_SIZE(rnp10_hwstrings_stats) + +#define RNP10_STATS_LEN \ + (RNP10_GLOBAL_STATS_LEN + RNP10_HWSTRINGS_STATS_LEN + \ + RNP_QUEUE_STATS_LEN) + +#ifndef CLOST_SELF_TEST +#ifdef ETHTOOL_TEST +static const char rnp10_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define RNP10_TEST_LEN (sizeof(rnp10_gstrings_test) / ETH_GSTRING_LEN) +#else +#define RNP10_TEST_LEN 0 +#endif +#else +#define RNP10_TEST_LEN 0 +#endif + +static int rnp10_get_regs_len(struct net_device *netdev) +{ +#define RNP10_REGS_LEN 1 + return RNP10_REGS_LEN * sizeof(u32); +} + +#define ADVERTISED_MASK_10G \ + (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full | \ + SUPPORTED_10000baseKR_Full) + +#define SUPPORTED_MASK_40G \ + (SUPPORTED_40000baseKR4_Full | SUPPORTED_40000baseCR4_Full | \ + SUPPORTED_40000baseSR4_Full | SUPPORTED_40000baseLR4_Full) + +#define ADVERTISED_MASK_40G \ + (SUPPORTED_40000baseKR4_Full | SUPPORTED_40000baseCR4_Full | \ + SUPPORTED_40000baseSR4_Full | SUPPORTED_40000baseLR4_Full) + +#ifdef HAVE_ETHTOOL_NEW_10G_BITS +#define SUPPORTED_10000baseT 0 +#else +#define SUPPORTED_10000baseT SUPPORTED_10000baseT_Full +#endif + +static int rnp_set_autoneg_adv_from_hw(struct rnp_hw *hw, + struct ethtool_link_ksettings *ks) +{ + u32 value_r0 = 0, value_r4 = 0, value_r9 = 0; + u32 value_r20, value_r412; + + /* Read autoneg state from phy */ + if (hw->phy_type == PHY_TYPE_SGMII) { + rnp_mbx_phy_read(hw, 0x0, &value_r0); + /* Not support AN, return directly */ + if (!(value_r0 & BIT(12))) + return 0; + + rnp_mbx_phy_read(hw, 0x4, &value_r4); + rnp_mbx_phy_read(hw, 0x9, &value_r9); + if (value_r4 & 0x100) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Full); + if (value_r4 & 0x80) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Half); + if (value_r4 & 0x40) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10baseT_Full); + if (value_r4 & 0x20) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10baseT_Half); + if (value_r9 & 0x200) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + if (value_r9 & 0x100) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Half); + } + + if (hw->phy_type == PHY_TYPE_10G_TP) { + rnp_mbx_phy_read(hw, (PHY_C45 | PHY_MMD(7) | 0x0), &value_r0); + + if (!(value_r0 & BIT(12))) + return 0; + + rnp_mbx_phy_read(hw, (PHY_C45 | PHY_MMD(7) | 0x20), &value_r20); + + if (value_r20 & BIT(12)) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + + rnp_mbx_phy_read(hw, (PHY_C45 | PHY_MMD_VEND2 | 0xa412), + &value_r412); + + if (value_r412 & BIT(8)) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + if (value_r412 & BIT(9)) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + } + + return 0; +} + +/** + * rnp_phy_type_to_ethtool - convert the phy_types to ethtool link modes + * @adapter: adapter struct with hw->phy_type + * @ks: ethtool link ksettings struct to fill out + * + **/ +static void rnp_phy_type_to_ethtool(struct rnp_adapter *adapter, + struct ethtool_link_ksettings *ks) +{ + struct rnp_hw *hw = &adapter->hw; + u32 supported_link = hw->supported_link; + u8 phy_type = hw->phy_type; + + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + + if (phy_type == PHY_TYPE_NONE) { + if (supported_link & RNP_LINK_SPEED_10GB_FULL) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); +#ifdef HAVE_ETHTOOL_NEW_10G_BITS + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseER_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseER_Full); +#endif /* HAVE_ETHTOOL_NEW_10G_BITS */ + } + + if (((supported_link & RNP_LINK_SPEED_10GB_FULL) || + (supported_link & RNP_LINK_SPEED_1GB_FULL))) { +#ifdef HAVE_ETHTOOL_NEW_10G_BITS + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseX_Full); +#else + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); +#endif /* HAVE_ETHTOOL_NEW_10G_BITS */ + } + } + if (phy_type == PHY_TYPE_SGMII) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Half); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10baseT_Half); + + rnp_set_autoneg_adv_from_hw(hw, ks); + } + + if (phy_type == PHY_TYPE_10G_TP) { + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + rnp_set_autoneg_adv_from_hw(hw, ks); + } + + if (rnp_fw_is_old_ethtool(hw) && + (supported_link & RNP_LINK_SPEED_40GB_FULL)) { + supported_link |= RNP_SFP_MODE_40G_CR4 | RNP_SFP_MODE_40G_SR4 | + PHY_TYPE_40G_BASE_LR4; + } + + if (supported_link & RNP_SFP_MODE_40G_CR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseCR4_Full); + } + if (supported_link & RNP_SFP_MODE_40G_SR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseSR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseSR4_Full); + } + if (supported_link & RNP_SFP_MODE_40G_LR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseLR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseLR4_Full); + } + + /* add 25G support here */ +#ifdef HAVE_ETHTOOL_25G_BITS + if (supported_link & RNP_SFP_25G_SR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseSR_Full); + } + if (supported_link & RNP_SFP_25G_KR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + } + if (supported_link & RNP_SFP_25G_CR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + } +#endif + + if (hw->is_backplane) { + if (phy_type == PHY_TYPE_40G_BASE_KR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseKR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseKR4_Full); + } + if (phy_type == PHY_TYPE_10G_BASE_KR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKR_Full); + if (supported_link & RNP_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode( + ks, advertising, 10000baseKR_Full); + } + } + if (supported_link & RNP_SFP_MODE_1G_LX || + supported_link & RNP_SFP_MODE_1G_SX) { +#ifdef HAVE_ETHTOOL_NEW_10G_BITS + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + if (supported_link & RNP_LINK_SPEED_1GB_FULL) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseX_Full); + } +#else + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + if (supported_link & RNP_LINK_SPEED_1GB_FULL) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + } +#endif /* HAVE_ETHTOOL_NEW_10G_BITS */ + } + + if (phy_type == PHY_TYPE_1G_BASE_KX) { + if (hw->is_backplane) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseKX_Full); + if (supported_link & RNP_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode( + ks, advertising, 1000baseKX_Full); + } + + if ((supported_link & RNP_SFP_MODE_1G_T) || + (supported_link & RNP_LINK_SPEED_1GB_FULL)) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + if (supported_link & RNP_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode( + ks, advertising, 1000baseT_Full); + } + } +#ifdef HAVE_ETHTOOL_NEW_10G_BITS + /* need to add new 10G PHY types */ + if (phy_type == PHY_TYPE_10G_BASE_SR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + if (supported_link & RNP_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseSR_Full); + } + if (phy_type == PHY_TYPE_10G_BASE_ER) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseER_Full); + if (supported_link & RNP_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseER_Full); + } + if (phy_type == PHY_TYPE_10G_BASE_LR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + if (supported_link & RNP_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseLR_Full); + } +#else + /* need to keep backward compatibility with older kernels */ + if (phy_type == PHY_TYPE_10G_BASE_SR || + phy_type == PHY_TYPE_10G_BASE_ER || + phy_type == PHY_TYPE_10G_BASE_LR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + if (supported_link & RNP_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + } +#endif /* HAVE_ETHTOOL_NEW_10G_BITS */ + + if (hw->force_speed_stat == FORCE_SPEED_STAT_10G) { +#ifdef HAVE_ETHTOOL_NEW_10G_BITS + ethtool_link_ksettings_del_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_del_link_mode(ks, advertising, + 1000baseT_Full); + + ethtool_link_ksettings_del_link_mode(ks, supported, + 1000baseX_Full); + ethtool_link_ksettings_del_link_mode(ks, advertising, + 1000baseX_Full); + + if (phy_type == PHY_TYPE_1G_BASE_KX) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseLR_Full); + } +#else + ethtool_link_ksettings_del_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_del_link_mode(ks, advertising, + 10000baseT_Full); + + ethtool_link_ksettings_del_link_mode(ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_del_link_mode(ks, advertising, + 10000baseT_Full); +#endif + } +} +/** + * rnp_get_settings_link_up - Get Link settings for when link is up + * @hw: hw structure + * @ks: ethtool ksettings to fill in + * @netdev: network interface device structure + **/ +static void rnp_get_settings_link_up(struct rnp_hw *hw, + struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct ethtool_link_ksettings cap_ksettings; + + /* Initialize supported and advertised settings based on phy settings */ + switch (hw->phy_type) { + case PHY_TYPE_40G_BASE_CR4: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseCR4_Full); + break; + + case PHY_TYPE_40G_BASE_SR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseSR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseSR4_Full); + break; + case PHY_TYPE_40G_BASE_LR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseLR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseLR4_Full); + break; + case PHY_TYPE_10G_BASE_SR: + case PHY_TYPE_10G_BASE_LR: + case PHY_TYPE_10G_BASE_ER: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); +#ifdef HAVE_ETHTOOL_NEW_10G_BITS + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseER_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseER_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseX_Full); +#endif /* HAVE_ETHTOOL_NEW_10G_BITS */ + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + if (hw->speed == SPEED_10000) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + break; + case PHY_TYPE_1G_BASE_KX: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + if (!!hw->is_backplane) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseKX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseKX_Full); + } +#ifdef HAVE_ETHTOOL_NEW_10G_BITS + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseX_Full); +#endif + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + break; + + case PHY_TYPE_SGMII: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Half); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Half); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10baseT_Half); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Half); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Half); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10baseT_Half); + break; + + case PHY_TYPE_40G_BASE_KR4: + case PHY_TYPE_10G_BASE_KR: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseKR4_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseKX_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKX4_Full); +#ifdef HAVE_ETHTOOL_25G_BITS + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); +#endif + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseKR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKX4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseKX_Full); +#ifdef HAVE_ETHTOOL_25G_BITS + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); +#endif + break; + case PHY_TYPE_10G_TP: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + break; + + default: + /* if we got here and link is up something bad is afoot + */ + netdev_info(netdev, + "WARNING: Link is up but PHY type 0x%x is not " + "recognized, or incorrect cable is in use\n", + hw->phy_type); + } + + /* Now that we've worked out everything that could be supported by the + * current PHY type, get what is supported by the NVM and intersect + * them to get what is truly supported + */ + memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings)); + rnp_phy_type_to_ethtool(adapter, &cap_ksettings); + ethtool_intersect_link_masks(ks, &cap_ksettings); + + /* Set speed and duplex */ + ks->base.speed = adapter->speed; + ks->base.duplex = hw->duplex; +} + +/** + * rnp_get_settings_link_down - Get the Link settings when link is down + * @hw: hw structure + * @ks: ethtool ksettings to fill in + * @netdev: network interface device structure + * + * Reports link settings that can be determined when link is down + **/ +static void rnp_get_settings_link_down(struct rnp_hw *hw, + struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + /* link is down and the driver needs to fall back on + * supported phy types to figure out what info to display + */ + rnp_phy_type_to_ethtool(adapter, ks); + + /* With no link speed and duplex are unknown */ + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; + +#ifdef ETHTOOL_GLINKSETTINGS + if ((hw->phy_type == PHY_TYPE_SGMII) || + (hw->phy_type == PHY_TYPE_10G_TP)) { + ks->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; +#ifdef ETH_TP_MDI_AUTO + ks->base.eth_tp_mdix_ctrl = hw->tp_mdix_ctrl; +#endif + } +#endif +} + +/** + * rnp_set_autoneg_state_from_hw - Set the autoneg state from hardware + * @hw: hw structure + * @ks: ethtool ksettings to fill in + * + * Set the autoneg state from hardware, like PHY + **/ +static int rnp_set_autoneg_state_from_hw(struct rnp_hw *hw, + struct ethtool_link_ksettings *ks) +{ + int ret; + struct rnp_adapter *adapter = hw->back; + + ks->base.autoneg = (adapter->an ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + /* Read autoneg state from phy */ + if (hw->phy_type == PHY_TYPE_SGMII) { + u32 value_r0 = 0; + ret = rnp_mbx_phy_read(hw, 0x0, &value_r0); + if (ret) + return -1; + + ks->base.autoneg = (value_r0 & BIT(12)) ? AUTONEG_ENABLE : + AUTONEG_DISABLE; + } + if (hw->phy_type == PHY_TYPE_10G_TP) { + u32 value_r0 = 0; + + rnp_mbx_phy_read(hw, PHY_826x_AN, &value_r0); + + ks->base.autoneg = (value_r0 & BIT(12)) ? AUTONEG_ENABLE : + AUTONEG_DISABLE; + if (value_r0) + adapter->an = 1; + } + + return 0; +} + +static int rnp_get_phy_mdix_from_hw(struct rnp_hw *hw) +{ + int ret; + int rmmd_reg = 0; + u32 value_r17 = 0; + + if (hw->phy_type == PHY_TYPE_SGMII) { + ret = rnp_mbx_phy_read(hw, 0x11, &value_r17); + if (ret) + return -1; + hw->phy.is_mdix = !!(value_r17 & 0x0040); + } + if (hw->phy_type == PHY_TYPE_10G_TP) { + rmmd_reg = (1 << 30) | (0x1f << 16) | (0xa430 & 0xffff); + ret = rnp_mbx_phy_read(hw, rmmd_reg, &value_r17); + if (ret) + return -1; + hw->phy.is_mdix = !!(value_r17 & 0x0200); + } + + return 0; +} + +__maybe_unused static bool fiber_unsupport(u32 supported_link, u8 phy_type) +{ + if ((phy_type == PHY_TYPE_10G_BASE_KR) || + (phy_type == PHY_TYPE_10G_BASE_SR) || + (phy_type == PHY_TYPE_10G_BASE_LR) || + (phy_type == PHY_TYPE_10G_BASE_ER)) { + if (!(supported_link & RNP_LINK_SPEED_10GB_FULL)) + return true; + } + + if ((phy_type == PHY_TYPE_40G_BASE_KR4) || + (phy_type == PHY_TYPE_40G_BASE_SR4) || + (phy_type == PHY_TYPE_40G_BASE_CR4) || + (phy_type == PHY_TYPE_40G_BASE_LR4)) { + if (!(supported_link & + (RNP_LINK_SPEED_40GB_FULL | RNP_LINK_SPEED_25GB_FULL))) + return true; + } + + if (phy_type == PHY_TYPE_1G_BASE_KX) { + if (!(supported_link & RNP_LINK_SPEED_1GB_FULL)) + return true; + } + + return false; +} + +static int rnp10_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + bool link_up; + int err; + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + + /* update hw from firmware */ + err = rnp_mbx_get_lane_stat(hw); + if (err /*|| fiber_unsupport(hw->supported_link, hw->phy_type)*/) { + /* + when force 1G speed and plugin in 10G-AOC, should not return + -1 + */ + return -1; + } + + /* update hw->phy.media_type by hw->phy_type */ + switch (hw->phy_type) { + case PHY_TYPE_NONE: + hw->phy.media_type = rnp_media_type_unknown; + break; + case PHY_TYPE_1G_BASE_KX: + if (hw->is_backplane) { + hw->phy.media_type = rnp_media_type_backplane; + } else if (hw->is_sgmii) { + hw->phy.media_type = rnp_media_type_copper; + } else { + if ((hw->supported_link & RNP_LINK_SPEED_1GB_FULL) || + (hw->supported_link & RNP_SFP_MODE_1G_LX)) { + hw->phy.media_type = rnp_media_type_fiber; + } else { + hw->phy.media_type = rnp_media_type_unknown; + } + } + break; + case PHY_TYPE_SGMII: + case PHY_TYPE_10G_TP: + hw->phy.media_type = rnp_media_type_copper; +#ifdef ETHTOOL_GLINKSETTINGS + ks->base.phy_address = adapter->phy_addr; +#endif + break; + case PHY_TYPE_10G_BASE_KR: + case PHY_TYPE_25G_BASE_KR: + case PHY_TYPE_40G_BASE_KR4: + hw->phy.media_type = rnp_media_type_backplane; + break; + case PHY_TYPE_10G_BASE_SR: + case PHY_TYPE_40G_BASE_SR4: + case PHY_TYPE_40G_BASE_CR4: + case PHY_TYPE_40G_BASE_LR4: + case PHY_TYPE_10G_BASE_LR: + case PHY_TYPE_10G_BASE_ER: + hw->phy.media_type = rnp_media_type_fiber; + break; + default: + hw->phy.media_type = rnp_media_type_unknown; + break; + } + + if (hw->supported_link & RNP_SFP_CONNECTOR_DAC) { + hw->phy.media_type = rnp_media_type_da; + } + + if ((hw->supported_link & RNP_SFP_TO_SGMII) || + (hw->supported_link & RNP_SFP_MODE_1G_T)) { + hw->phy.media_type = rnp_media_type_copper; + } + + /* Check Whether there is media on port */ + if (hw->phy.media_type == rnp_media_type_fiber) { + /* If adapter->sfp.mod_abs is 0, there is no media on port. */ + if (!adapter->sfp.mod_abs) { + hw->phy.media_type = rnp_media_type_unknown; + hw->phy_type = PHY_TYPE_NONE; + } + } + + /* Now set the settings that don't rely on link being up/down */ + /* Set autoneg settings */ + rnp_set_autoneg_state_from_hw(hw, ks); + + link_up = hw->link; + if (link_up) + rnp_get_settings_link_up(hw, ks, netdev); + else + rnp_get_settings_link_down(hw, ks, netdev); + + /* Set media type settings */ + switch (hw->phy.media_type) { + case rnp_media_type_backplane: + ethtool_link_ksettings_add_link_mode(ks, supported, Backplane); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Backplane); + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ks->base.port = PORT_NONE; + break; + case rnp_media_type_copper: + ethtool_link_ksettings_add_link_mode(ks, supported, TP); + ethtool_link_ksettings_add_link_mode(ks, advertising, TP); + if (PHY_TYPE_SGMII == hw->phy_type) + ethtool_link_ksettings_add_link_mode(ks, supported, + Autoneg); + if (AUTONEG_ENABLE == ks->base.autoneg) + ethtool_link_ksettings_add_link_mode(ks, advertising, + Autoneg); + else + ethtool_link_ksettings_del_link_mode(ks, advertising, + Autoneg); + ks->base.port = PORT_TP; + break; + case rnp_media_type_da: + case rnp_media_type_cx4: + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); + ks->base.port = PORT_DA; + break; + case rnp_media_type_fiber: + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); + ks->base.port = PORT_FIBRE; + break; + case rnp_media_type_unknown: + default: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ks->base.port = PORT_OTHER; + break; + } + + if (hw->force_speed_stat != FORCE_SPEED_STAT_DISABLED) { + ethtool_link_ksettings_del_link_mode(ks, advertising, Autoneg); + } + + /* Set flow control settings */ + ethtool_link_ksettings_add_link_mode(ks, supported, Pause); + ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause); + + /* should get pause from hw if 10G-TP */ + switch (hw->fc.requested_mode) { + case rnp_fc_full: + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); + break; + case rnp_fc_tx_pause: + ethtool_link_ksettings_add_link_mode(ks, advertising, + Asym_Pause); + break; + case rnp_fc_rx_pause: + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Asym_Pause); + break; + default: + ethtool_link_ksettings_del_link_mode(ks, advertising, Pause); + ethtool_link_ksettings_del_link_mode(ks, advertising, + Asym_Pause); + break; + } + +#ifdef ETHTOOL_GLINKSETTINGS +#ifdef ETH_TP_MDI_X + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if ((hw->phy_type == PHY_TYPE_SGMII) || + (hw->phy_type == PHY_TYPE_10G_TP)) { + if (rnp_get_phy_mdix_from_hw(hw)) { + ks->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + } else { + ks->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + } + } else { + ks->base.eth_tp_mdix = hw->tp_mdx; + } + +#ifdef ETH_TP_MDI_AUTO + if (hw->phy.mdix == AUTO_ALL_MODES) + ks->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ks->base.eth_tp_mdix_ctrl = hw->phy.mdix; + +#endif +#endif /* ETH_TP_MDI_X */ +#endif +#ifdef ETHTOOL_GLINKSETTINGS + rnp_logd(LOG_ETHTOOL, + "%s %s set link: speed=%d port=%d duplex=%d autoneg=%d " + "phy_address=%d, media_type=%d hw->phy_type:%d\n", + __func__, netdev->name, ks->base.speed, ks->base.port, + ks->base.duplex, ks->base.autoneg, ks->base.phy_address, + hw->phy.media_type, hw->phy_type); +#endif + return 0; +} + +#if defined(ETHTOOL_GLINKSETTINGS) && !defined(KYLIN_V4_ETHTOOL_FIX_BOND) +static int rnp10_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct ethtool_link_ksettings safe_ks; + struct ethtool_link_ksettings copy_ks; + bool autoneg_changed = false, duplex_changed = false; + int timeout = 50; + int err = 0; + u8 autoneg; + u32 advertising_link_speed, speed = 0; + + /* copy the ksettings to copy_ks to avoid modifying the origin */ + memcpy(©_ks, ks, sizeof(struct ethtool_link_ksettings)); + + /* save autoneg out of ksettings */ + + autoneg = copy_ks.base.autoneg; + rnp_logd(LOG_ETHTOOL, + "%s %s set link: speed=%d port=%d duplex=%d autoneg=%d " + "phy_address=%d\n", + __func__, netdev->name, copy_ks.base.speed, copy_ks.base.port, + copy_ks.base.duplex, copy_ks.base.autoneg, + copy_ks.base.phy_address); + + /* get our own copy of the bits to check against */ + memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings)); + safe_ks.base.cmd = copy_ks.base.cmd; + safe_ks.base.link_mode_masks_nwords = + copy_ks.base.link_mode_masks_nwords; + + if (rnp10_get_link_ksettings(netdev, &safe_ks)) { + /* return err */ + return 0; + } + /* Get link modes supported by hardware and check against modes + * requested by user. Return an error if unsupported mode was set. + */ + /* if autoneg is off, this is not error ? */ + if (!bitmap_subset(copy_ks.link_modes.advertising, + safe_ks.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) { + return -EINVAL; + } + /* set autoneg back to what it currently is */ + copy_ks.base.autoneg = safe_ks.base.autoneg; + + memset(&advertising_link_speed, 0, sizeof(u32)); + + /* Check autoneg */ + if (autoneg == AUTONEG_ENABLE) { + /* If autoneg was not already enabled */ + if (!(adapter->an)) { + /* If autoneg is not supported, return error */ + if (!ethtool_link_ksettings_test_link_mode( + &safe_ks, supported, Autoneg)) { + netdev_info( + netdev, + "Autoneg not supported on this phy\n"); + err = -EINVAL; + goto done; + } + /* Autoneg is allowed to change */ + autoneg_changed = true; + } + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10baseT_Full)) + advertising_link_speed |= RNP_LINK_SPEED_10_FULL; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 100baseT_Full)) + advertising_link_speed |= RNP_LINK_SPEED_100_FULL; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseT_Full) || +#ifdef HAVE_ETHTOOL_NEW_10G_BITS + ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseX_Full) || +#endif + ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseKX_Full)) + advertising_link_speed |= RNP_LINK_SPEED_1GB_FULL; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10baseT_Half)) + advertising_link_speed |= RNP_LINK_SPEED_10_HALF; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 100baseT_Half)) + advertising_link_speed |= RNP_LINK_SPEED_100_HALF; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseT_Half)) + advertising_link_speed |= RNP_LINK_SPEED_1GB_HALF; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseT_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseKX4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseKR_Full) || +#ifdef HAVE_ETHTOOL_NEW_10G_BITS + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseCR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseSR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseLR_Full)) +#else + 0) +#endif /* HAVE_ETHTOOL_NEW_10G_BITS */ + advertising_link_speed |= RNP_LINK_SPEED_10GB_FULL; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseKR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseCR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseSR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseLR4_Full)) + advertising_link_speed |= RNP_LINK_SPEED_40GB_FULL; + + if (advertising_link_speed) { + hw->phy.autoneg_advertised = advertising_link_speed; + } else { + if ((hw->force_speed_stat == + FORCE_SPEED_STAT_DISABLED)) { + netdev_info(netdev, + "advertising_link_speed is 0\n"); + err = -EINVAL; + goto done; + } + } + + hw->advertised_link = advertising_link_speed; + if (hw->is_sgmii && hw->autoneg == false) + autoneg_changed = true; + hw->autoneg = true; + } else { + /* If autoneg is currently enabled */ + if (adapter->an) { + /* If autoneg is supported 10GBASE_T is the only PHY + * that can disable it, so otherwise return error + */ + if (ethtool_link_ksettings_test_link_mode( + &safe_ks, supported, Autoneg) && + hw->phy.media_type != rnp_media_type_copper) { + netdev_info( + netdev, + "Autoneg cannot be disabled on this phy\n"); + err = -EINVAL; + goto done; + } + /* Autoneg is allowed to change */ + autoneg_changed = true; + } + /* if 10G -TP, not support close an */ + if (hw->phy_type == PHY_TYPE_10G_TP) { + netdev_info(netdev, + "Autoneg cannot be disabled on this phy\n"); + err = -EINVAL; + goto done; + } + + /* Only allow one speed at a time when autoneg is AUTONEG_DISABLE. */ + switch (ks->base.speed) { + case SPEED_10: + speed = RNP_LINK_SPEED_10_FULL; + break; + case SPEED_100: + speed = RNP_LINK_SPEED_100_FULL; + break; + case SPEED_1000: + speed = RNP_LINK_SPEED_1GB_FULL; + break; + case SPEED_10000: + speed = RNP_LINK_SPEED_10GB_FULL; + break; + default: + netdev_info(netdev, "unsupported speed\n"); + err = -EINVAL; + goto done; + } + + hw->autoneg = false; + } + + hw->phy.autoneg_advertised = RNP_LINK_SPEED_UNKNOWN; + /* If speed didn't get set, set it to what it currently is. + * This is needed because if advertise is 0 (as it is when autoneg + * is disabled) then speed won't get set. + */ + + if (hw->is_sgmii) { + hw->duplex = ks->base.duplex; + duplex_changed = true; + } + + if (hw->phy_type == PHY_TYPE_10G_TP) { + hw->duplex = ks->base.duplex; + duplex_changed = true; + } + /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__RNP_IN_SFP_INIT, &adapter->state)) { + timeout--; + if (!timeout) + return -EBUSY; + usleep_range(1000, 2000); + } +#ifdef ETHTOOL_GLINKSETTINGS +#ifdef ETH_TP_MDI_AUTO + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (copy_ks.base.eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (copy_ks.base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = copy_ks.base.eth_tp_mdix_ctrl; + } + +#endif /* ETH_TP_MDI_AUTO */ +#endif + + hw->mac.autotry_restart = true; + /* set speed */ + err = hw->ops.setup_link(hw, advertising_link_speed, hw->autoneg, speed, + hw->duplex); + if (err) + e_info(probe, "setup link failed with code %d\n", err); + + clear_bit(__RNP_IN_SFP_INIT, &adapter->state); +done: + return err; +} +#else /* ETHTOOL_GLINKSETTINGS */ + +/** + * rnp10_get_settings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @ecmd: ethtool command + * + * Reports speed/duplex settings based on media_type. Since we've backported + * the new API constructs to use in the old API, this ends up just being a + * wrapper to rnpm_get_link_ksettings. + **/ +static int rnp10_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ethtool_link_ksettings ks; + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp10_get_link_ksettings(netdev, &ks)) { + /* return err */ + return 0; + } + _kc_ethtool_ksettings_to_cmd(&ks, ecmd); + /* setup sgmii */ + if ((hw->is_sgmii) || (hw->phy_type == PHY_TYPE_10G_TP)) { +#ifdef ETH_TP_MDI_X + if (rnp_get_phy_mdix_from_hw(hw)) { + ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + } else { + ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + } +#ifdef ETH_TP_MDI_AUTO + if (hw->phy.mdix == AUTO_ALL_MODES) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; + +#endif +#endif /* ETH_TP_MDI_X */ + } else { +#ifdef ETH_TP_MDI_X + ecmd->eth_tp_mdix = hw->tp_mdx; +#ifdef ETH_TP_MDI_AUTO + if (hw->phy.mdix == AUTO_ALL_MODES) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; + + +#endif +#endif + } + + return 0; +} + +int rnp10_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct ethtool_cmd safe_ecmd; + bool change = false; + int timeout = 50; + int err = 0; + u8 autoneg; + u32 advertise, speed = 0; + u32 old_ethtool_advertising = 0; + u32 old_link_speed, advertising_link_speed = 0; + + /* get our own copy of the bits to check against */ + memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd)); + rnp10_get_settings(netdev, &safe_ecmd); + +#ifdef ETHTOOL_GLINKSETTINGS +#ifdef ETH_TP_MDI_AUTO + /* + * MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (ecmd->eth_tp_mdix_ctrl) { + if (hw->phy.media_type != rnp_media_type_copper) + return -EOPNOTSUPP; + + if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (ecmd->autoneg != AUTONEG_ENABLE)) { + netdev_info( + netdev, + "forcing MDI/MDI-X state is not" + "supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + +#endif /* ETH_TP_MDI_AUTO */ +#endif + + /* save autoneg and speed out of ecmd */ + autoneg = ecmd->autoneg; + advertise = ecmd->advertising; + + /* set autoneg and speed back to what they currently are */ + ecmd->autoneg = safe_ecmd.autoneg; + ecmd->advertising = safe_ecmd.advertising; + + /* Due to a bug in ethtool versions < 3.6 this check is necessary */ + old_ethtool_advertising = ecmd->supported & + (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | + ADVERTISED_2500baseX_Full | ADVERTISED_10000baseT_Full); + old_ethtool_advertising |= + (old_ethtool_advertising | ADVERTISED_20000baseMLD2_Full | + ADVERTISED_20000baseKR2_Full); + + if (advertise == old_ethtool_advertising) + netdev_info( + netdev, + "If you are not setting advertising to %x then you may " + "have an old version of ethtool. Please update.\n", + advertise); + ecmd->cmd = safe_ecmd.cmd; + /* If ecmd and safe_ecmd are not the same now, then they are + * trying to set something that we do not support + */ + + /* Check autoneg */ + if (autoneg == AUTONEG_ENABLE) { + /* If autoneg was not already enabled */ + if (!(adapter->an)) { + /* If autoneg is not supported, return error */ + if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) { + netdev_info( + netdev, + "Autoneg not supported on this phy\n"); + err = -EINVAL; + goto done; + } + /* Autoneg is allowed to change */ + change = true; + } + + if (advertise & ADVERTISED_10baseT_Full) + advertising_link_speed |= RNP_LINK_SPEED_10_FULL; + if (advertise & ADVERTISED_100baseT_Full) + advertising_link_speed |= RNP_LINK_SPEED_100_FULL; + if (advertise & ADVERTISED_1000baseT_Full || + advertise & ADVERTISED_1000baseKX_Full) + advertising_link_speed |= RNP_LINK_SPEED_1GB_FULL; + if (advertise & ADVERTISED_10000baseT_Full || + advertise & ADVERTISED_10000baseKX4_Full || + advertise & ADVERTISED_10000baseKR_Full) + advertising_link_speed |= RNP_LINK_SPEED_10GB_FULL; + + hw->phy.autoneg_advertised = advertising_link_speed; + if (hw->is_sgmii && hw->autoneg == false) + change = true; + if ((hw->phy_type == PHY_TYPE_10G_TP) && hw->autoneg == false) + change = true; + hw->autoneg = true; + } else { + /* If autoneg is currently enabled */ + if (adapter->an) { + /* If autoneg is supported 10GBASE_T is the only phy + * that can disable it, so otherwise return error + */ + if (safe_ecmd.supported & SUPPORTED_Autoneg) { + netdev_info( + netdev, + "Autoneg cannot be disabled on this phy\n"); + err = -EINVAL; + goto done; + } + /* Autoneg is allowed to change */ + change = true; + } + + if (hw->phy_type == PHY_TYPE_10G_TP) { + netdev_info(netdev, + "Autoneg cannot be disabled on this phy\n"); + err = -EINVAL; + goto done; + } + + if (ecmd->duplex == DUPLEX_HALF) { + netdev_info(netdev, "unsupported duplex\n"); + return -EINVAL; + } + /* Only allow one speed at a time when autoneg is AUTONEG_DISABLE. */ + speed = ethtool_cmd_speed(ecmd); + switch (speed) { + case SPEED_10: + advertising_link_speed = RNP_LINK_SPEED_10_FULL; + break; + case SPEED_100: + advertising_link_speed = RNP_LINK_SPEED_100_FULL; + break; + case SPEED_1000: + advertising_link_speed = RNP_LINK_SPEED_1GB_FULL; + break; + case SPEED_10000: + advertising_link_speed = RNP_LINK_SPEED_10GB_FULL; + break; + default: + netdev_info(netdev, "unsupported speed\n"); + return -EINVAL; + } + + hw->phy.autoneg_advertised = RNP_LINK_SPEED_UNKNOWN; + hw->autoneg = false; + } + + if (advertise & ~safe_ecmd.supported) { + err = -EINVAL; + goto done; + } + + /* If speed didn't get set, set it to what it currently is. + * This is needed because if advertise is 0 (as it is when autoneg + * is disabled) then speed won't get set. + */ + old_link_speed = hw->phy.autoneg_advertised; + if (!advertising_link_speed) + advertising_link_speed = old_link_speed; + + /* If the unsupported speed is set, return -EOPNOTSUPP error. */ + if ((advertising_link_speed | hw->supported_link) != + hw->supported_link) { + return -EOPNOTSUPP; + } + +#ifdef ETHTOOL_GLINKSETTINGS +#ifdef ETH_TP_MDI_AUTO + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (ecmd->eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = ecmd->eth_tp_mdix_ctrl; + change = true; + } + +#endif /* ETH_TP_MDI_AUTO */ +#endif + if (change || (hw->phy.autoneg_advertised != advertising_link_speed)) { + /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__RNP_IN_SFP_INIT, &adapter->state)) { + timeout--; + if (!timeout) + return -EBUSY; + usleep_range(1000, 2000); + } + + hw->mac.autotry_restart = true; + /* set speed */ + err = hw->ops.setup_link(hw, advertising_link_speed, true, + speed, hw->duplex); + if (err) { + e_info(probe, "setup link failed with code %d\n", err); + hw->ops.setup_link(hw, old_link_speed, true, speed, + hw->duplex); + } + clear_bit(__RNP_IN_SFP_INIT, &adapter->state); + + } else { + netdev_info( + netdev, + "Nothing changed, exiting without setting anything.\n"); + } + +done: + return err; +} +#endif + +static void rnp10_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + strncpy(drvinfo->driver, rnp_driver_name, sizeof(drvinfo->driver)); + snprintf(drvinfo->version, sizeof(drvinfo->version), "%s", + rnp_driver_version); + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d.%d", ((unsigned char *)&(hw->fw_version))[3], + ((unsigned char *)&(hw->fw_version))[2], + ((unsigned char *)&(hw->fw_version))[1], + ((unsigned char *)&(hw->fw_version))[0]); + + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = RNP10_STATS_LEN; + drvinfo->testinfo_len = RNP10_TEST_LEN; + drvinfo->regdump_len = rnp10_get_regs_len(netdev); +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT + drvinfo->n_priv_flags = RNP10_PRIV_FLAGS_STR_LEN; +#endif +} + +static void rnp10_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + u32 *regs_buff = p; + int i; + + memset(p, 0, RNP10_REGS_LEN * sizeof(u32)); + + for (i = 0; i < RNP10_REGS_LEN; i++) + regs_buff[i] = rd32(hw, i * 4); +} + +static int rnp_nway_reset(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + netdev_info(netdev, "NIC Link is Down\n"); + rnp_down(adapter); + msleep(10); + rnp_up(adapter); + return 0; +} + +/** + * rnpm_device_supports_autoneg_fc - Check if phy supports autoneg flow + * control + * @hw: pointer to hardware structure + * + * There are several phys that do not support autoneg flow control. This + * function check the device id to see if the associated phy supports + * autoneg flow control. + **/ +static bool rnp_device_supports_autoneg_fc(struct rnp_hw *hw) +{ + bool supported = false; + + switch (hw->phy.media_type) { + case rnp_media_type_fiber: + break; + case rnp_media_type_backplane: + break; + case rnp_media_type_copper: + /* only some copper devices support flow control autoneg */ + supported = true; + break; + default: + break; + } + + return supported; +} + +static void rnp10_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + /* we don't support autoneg */ + if (rnp_device_supports_autoneg_fc(hw) && !hw->fc.disable_fc_autoneg) + pause->autoneg = 1; + else + pause->autoneg = 0; + if (hw->fc.current_mode == rnp_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == rnp_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == rnp_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int rnp10_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_fc_info fc = hw->fc; + + /* we not support change in dcb mode */ + if (adapter->flags & RNP_FLAG_DCB_ENABLED) + return -EINVAL; + + /* we not support autoneg mode */ + if ((pause->autoneg == AUTONEG_ENABLE) && + !rnp_device_supports_autoneg_fc(hw)) + return -EINVAL; + + fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); + fc.requested_mode &= (~(PAUSE_TX | PAUSE_RX)); + if (pause->autoneg) { + fc.requested_mode |= PAUSE_AUTO; + } else { + if (pause->tx_pause) + fc.requested_mode |= PAUSE_TX; + if (pause->rx_pause) + fc.requested_mode |= PAUSE_RX; + } + + if (hw->phy_type == PHY_TYPE_SGMII) { + u16 pause_bits = 0; + u32 value; + u32 value_r0; + + if (hw->fc.requested_mode == PAUSE_AUTO) { + pause_bits |= ASYM_PAUSE | SYM_PAUSE; + } else { + if ((hw->fc.requested_mode & PAUSE_TX) && + (!(hw->fc.requested_mode & PAUSE_RX))) { + pause_bits |= ASYM_PAUSE; + + } else if ((!(hw->fc.requested_mode & PAUSE_TX)) && + (!(hw->fc.requested_mode & PAUSE_RX))) { + } else + pause_bits |= ASYM_PAUSE | SYM_PAUSE; + } + rnp_mbx_phy_read(hw, 4, &value); + value &= ~0xC00; + value |= pause_bits; + rnp_mbx_phy_write(hw, 4, value); + + if (hw->autoneg) { + rnp_mbx_phy_read(hw, 0, &value_r0); + value_r0 |= BIT(9); + rnp_mbx_phy_write(hw, 0, value_r0); + } + } + + /* if the thing changed then we'll update and use new autoneg */ + if (memcmp(&fc, &hw->fc, sizeof(struct rnp_fc_info))) { + /* to tell all vf new pause status */ + hw->fc = fc; + rnp_msg_post_status(adapter, PF_PAUSE_STATUS); + if (netif_running(netdev)) + rnp_reinit_locked(adapter); + else + rnp_reset(adapter); + } + + return 0; +} + +static void rnp10_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + char *p = (char *)data; + int i; + struct rnp_ring *ring; + u32 dma_ch; + + switch (stringset) { + /* maybe we don't support test? */ +#ifndef CLOST_SELF_TEST + case ETH_SS_TEST: + for (i = 0; i < RNP10_TEST_LEN; i++) { + memcpy(data, rnp10_gstrings_test[i], ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; +#endif + case ETH_SS_STATS: + for (i = 0; i < RNP10_GLOBAL_STATS_LEN; i++) { + memcpy(p, rnp10_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < RNP10_HWSTRINGS_STATS_LEN; i++) { + memcpy(p, rnp10_hwstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < RNP_NUM_TX_QUEUES; i++) { + /* ==== tx ======== */ + ring = adapter->tx_ring[i]; + dma_ch = ring->rnp_queue_idx; + sprintf(p, "---\n queue%u_tx_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_restart", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_busy", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_done_old", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_clean_desc", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_poll_count", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_irq_more", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_hw_head", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_hw_tail", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_sw_next_to_clean", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_sw_next_to_use", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_send_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_send_bytes_to_hw", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_todo_update", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_send_done_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_added_vlan_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_next_to_clean", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_irq_miss", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_equal_count", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_clean_times", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_clean_count", i); + p += ETH_GSTRING_LEN; + + /* ==== rx ======== */ + ring = adapter->rx_ring[i]; + dma_ch = ring->rnp_queue_idx; + sprintf(p, "queue%u_rx_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_driver_drop_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_rsc", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_rsc_flush", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_non_eop_descs", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_alloc_page_failed", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_alloc_buff_failed", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_alloc_page", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_csum_offload_errs", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_csum_offload_good", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_poll_again_count", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_rm_vlan_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_hw_head", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_hw_tail", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_sw_next_to_use", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_sw_next_to_clean", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_next_to_clean", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_irq_miss", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_equal_count", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_clean_times", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_clean_count", i); + p += ETH_GSTRING_LEN; + } + + break; +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT + case ETH_SS_PRIV_FLAGS: + memcpy(data, rnp10_priv_flags_strings, + RNP10_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + } +} + +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT +static int rnp10_get_stats_count(struct net_device *netdev) +{ + return RNP10_STATS_LEN; +} + +#else + +static int rnp10_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + /* now we don't support test */ +#ifndef CLOST_SELF_TEST + case ETH_SS_TEST: + return RNP10_TEST_LEN; +#endif + case ETH_SS_STATS: + return RNP10_STATS_LEN; + case ETH_SS_PRIV_FLAGS: + return RNP10_PRIV_FLAGS_STR_LEN; + default: + return -EOPNOTSUPP; + } +} + +static u32 rnp10_get_priv_flags(struct net_device *netdev) +{ + struct rnp_adapter *adapter = (struct rnp_adapter *)netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->priv_flags & RNP_PRIV_FLAG_MAC_LOOPBACK) + priv_flags |= RNP10_MAC_LOOPBACK; + if (adapter->priv_flags & RNP_PRIV_FLAG_SWITCH_LOOPBACK) + priv_flags |= RNP10_SWITCH_LOOPBACK; + if (adapter->priv_flags & RNP_PRIV_FLAG_VEB_ENABLE) + priv_flags |= RNP10_VEB_ENABLE; + if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) + priv_flags |= RNP10_FT_PADDING; + if (adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG) + priv_flags |= RNP10_PADDING_DEBUG; + if (adapter->priv_flags & RNP_PRIV_FLAG_PTP_DEBUG) + priv_flags |= RNP10_PTP_FEATURE; + if (adapter->priv_flags & RNP_PRIV_FLAG_SIMUATE_DOWN) + priv_flags |= RNP10_SIMULATE_DOWN; + if (adapter->priv_flags & RNP_PRIV_FLAG_VXLAN_INNER_MATCH) + priv_flags |= RNP10_VXLAN_INNER_MATCH; + if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) + priv_flags |= RNP10_STAG_ENABLE; + if (adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR) + priv_flags |= RNP10_REC_HDR_LEN_ERR; + if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) + priv_flags |= RNP10_SRIOV_VLAN_MODE; + if (adapter->priv_flags & RNP_PRIV_FLAG_REMAP_MODE) + priv_flags |= RNP10_REMAP_MODE; + if (adapter->priv_flags & RNP_PRIV_FLAG_LLDP_EN_STAT) + priv_flags |= RNP10_LLDP_EN_STAT; + if (adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE) + priv_flags |= RNP10_FORCE_CLOSE; + + return priv_flags; +} + +static int rnp10_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct rnp_adapter *adapter = (struct rnp_adapter *)netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + u32 data_old; + u32 data_new; + + data_old = dma_rd32(dma, RNP_DMA_CONFIG); + data_new = data_old; + + if (priv_flags & RNP10_MAC_LOOPBACK) { + SET_BIT(n10_mac_loopback, data_new); + adapter->priv_flags |= RNP_PRIV_FLAG_MAC_LOOPBACK; + } else if (adapter->priv_flags & RNP_PRIV_FLAG_MAC_LOOPBACK) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_MAC_LOOPBACK); + CLR_BIT(n10_mac_loopback, data_new); + } + + if (priv_flags & RNP10_LLDP_EN_STAT) { + if (rnp_mbx_lldp_port_enable(hw, true) == 0) { + adapter->priv_flags |= RNP_PRIV_FLAG_LLDP_EN_STAT; + } else { + rnp_err("%s: set lldp enable faild!\n", + adapter->netdev->name); + adapter->priv_flags &= (~RNP_PRIV_FLAG_LLDP_EN_STAT); + } + } else if (adapter->priv_flags & RNP_PRIV_FLAG_LLDP_EN_STAT) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_LLDP_EN_STAT); + rnp_mbx_lldp_port_enable(hw, false); + } + + if (priv_flags & RNP10_SWITCH_LOOPBACK) { + SET_BIT(n10_switch_loopback, data_new); + adapter->priv_flags |= RNP_PRIV_FLAG_SWITCH_LOOPBACK; + } else if (adapter->priv_flags & RNP_PRIV_FLAG_SWITCH_LOOPBACK) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_SWITCH_LOOPBACK); + CLR_BIT(n10_switch_loopback, data_new); + } + + if (priv_flags & RNP10_VEB_ENABLE) { + SET_BIT(n10_veb_enable, data_new); + adapter->priv_flags |= RNP_PRIV_FLAG_VEB_ENABLE; + } else if (adapter->priv_flags & RNP_PRIV_FLAG_VEB_ENABLE) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_VEB_ENABLE); + CLR_BIT(n10_veb_enable, data_new); + } + + if (priv_flags & RNP10_FT_PADDING) { + SET_BIT(n10_padding_enable, data_new); + adapter->priv_flags |= RNP_PRIV_FLAG_FT_PADDING; + } else if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_FT_PADDING); + CLR_BIT(n10_padding_enable, data_new); + } + + if (priv_flags & RNP10_PADDING_DEBUG) + adapter->priv_flags |= RNP_PRIV_FLAG_PADDING_DEBUG; + else if (adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG) + adapter->priv_flags &= (~RNP_PRIV_FLAG_PADDING_DEBUG); + + if (priv_flags & RNP10_PTP_FEATURE) { + adapter->priv_flags |= RNP_PRIV_FLAG_PTP_DEBUG; + adapter->flags2 |= ~RNP_FLAG2_PTP_ENABLED; + } else if (adapter->priv_flags & RNP_PRIV_FLAG_PTP_DEBUG) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_PTP_DEBUG); + adapter->flags2 &= (~RNP_FLAG2_PTP_ENABLED); + } + + if (priv_flags & RNP10_SIMULATE_DOWN) { + adapter->priv_flags |= RNP_PRIV_FLAG_SIMUATE_DOWN; + /* set check link again */ + adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE; + } else if (adapter->priv_flags & RNP_PRIV_FLAG_SIMUATE_DOWN) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_SIMUATE_DOWN); + /* set check link again */ + adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE; + } + + if (priv_flags & RNP10_VXLAN_INNER_MATCH) { + adapter->priv_flags |= RNP_PRIV_FLAG_VXLAN_INNER_MATCH; + hw->ops.set_vxlan_mode(hw, true); + } else if (adapter->priv_flags & RNP_PRIV_FLAG_VXLAN_INNER_MATCH) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_VXLAN_INNER_MATCH); + hw->ops.set_vxlan_mode(hw, false); + } + + if (priv_flags & RNP10_STAG_ENABLE) + adapter->flags2 |= RNP_FLAG2_VLAN_STAGS_ENABLED; + else + adapter->flags2 &= (~RNP_FLAG2_VLAN_STAGS_ENABLED); + + if (priv_flags & RNP10_REC_HDR_LEN_ERR) { + adapter->priv_flags |= RNP_PRIV_FLAG_REC_HDR_LEN_ERR; + eth_wr32(eth, RNP10_ETH_ERR_MASK_VECTOR, + INNER_L4_BIT | PKT_LEN_ERR | HDR_LEN_ERR); + + } else if (adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_REC_HDR_LEN_ERR); + eth_wr32(eth, RNP10_ETH_ERR_MASK_VECTOR, INNER_L4_BIT); + } + + if (priv_flags & RNP10_REMAP_MODE) + adapter->priv_flags |= RNP_PRIV_FLAG_REMAP_MODE; + else + adapter->priv_flags &= (~RNP_PRIV_FLAG_REMAP_MODE); + + if (priv_flags & RNP10_SRIOV_VLAN_MODE) { + int i; + + adapter->priv_flags |= RNP_PRIV_FLAG_SRIOV_VLAN_MODE; + if (!(adapter->flags & RNP_FLAG_SRIOV_INIT_DONE)) + goto skip_setup_vf_vlan; + /* should setup vlvf table */ + for (i = 0; i < adapter->num_vfs; i++) { + if (hw->ops.set_vf_vlan_mode) { + if (adapter->vfinfo[i].vf_vlan) + hw->ops.set_vf_vlan_mode( + hw, adapter->vfinfo[i].vf_vlan, + i, true); + + if (adapter->vfinfo[i].pf_vlan) + hw->ops.set_vf_vlan_mode( + hw, adapter->vfinfo[i].pf_vlan, + i, true); + } + } + + } else if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) { + int i; + adapter->priv_flags &= (~RNP_PRIV_FLAG_SRIOV_VLAN_MODE); + /* should clean vlvf table */ + for (i = 0; i < hw->max_vfs; i++) { + if (hw->ops.set_vf_vlan_mode) + hw->ops.set_vf_vlan_mode(hw, 0, i, false); + } + } + + if (hw->force_link_supported) { + if (priv_flags & RNP10_FORCE_CLOSE) { + if (!(adapter->priv_flags & + RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { + adapter->priv_flags |= + RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE; + if (hw->ops.driver_status) { + hw->ops.driver_status( + hw, true, + rnp_driver_force_control_mac); + } + } + } else { + if (adapter->priv_flags & + RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE) { + adapter->priv_flags &= + (~RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE); + if (hw->ops.driver_status) { + hw->ops.driver_status( + hw, false, + rnp_driver_force_control_mac); + } + } + } + } else { + if (priv_flags & RNP10_FORCE_CLOSE) + rnp_err("%s: firmware not support set `link_down_on_close` private flag\n", + adapter->netdev->name); + } + +skip_setup_vf_vlan: + + dbg("data new is %x\n", data_new); + if (data_old != data_new) + dma_wr32(dma, RNP_DMA_CONFIG, data_new); + /* if ft_padding changed */ + if (CHK_BIT(n10_padding_enable, data_old) != + CHK_BIT(n10_padding_enable, data_new)) { + rnp_msg_post_status(adapter, PF_FT_PADDING_STATUS); + } + + return 0; +} + +#endif + +static void rnp10_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct net_device_stats *net_stats = &netdev->stats; + struct rnp_ring *ring; + int i, j; + char *p = NULL; + + rnp_update_stats(adapter); + + for (i = 0; i < RNP10_GLOBAL_STATS_LEN; i++) { + p = (char *)net_stats + rnp10_gstrings_net_stats[i].stat_offset; + data[i] = (rnp10_gstrings_net_stats[i].sizeof_stat == + sizeof(u64)) ? + *(u64 *)p : + *(u32 *)p; + } + for (j = 0; j < RNP10_HWSTRINGS_STATS_LEN; j++, i++) { + p = (char *)adapter + rnp10_hwstrings_stats[j].stat_offset; + data[i] = + (rnp10_hwstrings_stats[j].sizeof_stat == sizeof(u64)) ? + *(u64 *)p : + *(u32 *)p; + } + + BUG_ON(RNP_NUM_TX_QUEUES != RNP_NUM_RX_QUEUES); + + for (j = 0; j < RNP_NUM_TX_QUEUES; j++) { + int idx; + /* tx-ring */ + ring = adapter->tx_ring[j]; + if (!ring) { + /* tx */ + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + /* rx */ + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + continue; + } + idx = ring->rnp_queue_idx; + + data[i++] = ring->stats.packets; + data[i++] = ring->stats.bytes; + data[i++] = ring->tx_stats.restart_queue; + data[i++] = ring->tx_stats.tx_busy; + data[i++] = ring->tx_stats.tx_done_old; + data[i++] = ring->tx_stats.clean_desc; + data[i++] = ring->tx_stats.poll_count; + data[i++] = ring->tx_stats.irq_more_count; + + /* rnp_tx_queue_ring_stat */ + data[i++] = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + data[i++] = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_TAIL); + data[i++] = ring->next_to_clean; + data[i++] = ring->next_to_use; + data[i++] = ring->tx_stats.send_bytes; + data[i++] = ring->tx_stats.send_bytes_to_hw; + data[i++] = ring->tx_stats.todo_update; + data[i++] = ring->tx_stats.send_done_bytes; + data[i++] = ring->tx_stats.vlan_add; + if (ring->tx_stats.tx_next_to_clean == -1) + data[i++] = ring->count; + else + data[i++] = ring->tx_stats.tx_next_to_clean; + data[i++] = ring->tx_stats.tx_irq_miss; + data[i++] = ring->tx_stats.tx_equal_count; + data[i++] = ring->tx_stats.tx_clean_times; + data[i++] = ring->tx_stats.tx_clean_count; + + /* rx-ring */ + ring = adapter->rx_ring[j]; + if (!ring) { + /* rx */ + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + continue; + } + idx = ring->rnp_queue_idx; + data[i++] = ring->stats.packets; + data[i++] = ring->stats.bytes; + + data[i++] = ring->rx_stats.driver_drop_packets; + data[i++] = ring->rx_stats.rsc_count; + data[i++] = ring->rx_stats.rsc_flush; + data[i++] = ring->rx_stats.non_eop_descs; + data[i++] = ring->rx_stats.alloc_rx_page_failed; + data[i++] = ring->rx_stats.alloc_rx_buff_failed; + data[i++] = ring->rx_stats.alloc_rx_page; + data[i++] = ring->rx_stats.csum_err; + data[i++] = ring->rx_stats.csum_good; + data[i++] = ring->rx_stats.poll_again_count; + data[i++] = ring->rx_stats.vlan_remove; + + /* rnp_rx_queue_ring_stat */ + data[i++] = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD); + data[i++] = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_TAIL); + data[i++] = ring->next_to_use; + data[i++] = ring->next_to_clean; + if (ring->rx_stats.rx_next_to_clean == -1) + data[i++] = ring->count; + else + data[i++] = ring->rx_stats.rx_next_to_clean; + data[i++] = ring->rx_stats.rx_irq_miss; + data[i++] = ring->rx_stats.rx_equal_count; + data[i++] = ring->rx_stats.rx_clean_times; + data[i++] = ring->rx_stats.rx_clean_count; + } +} + +/* n10 ethtool_ops ops here */ +static const struct ethtool_ops rnp10_ethtool_ops = { + +#if defined(ETHTOOL_GLINKSETTINGS) && !defined(KYLIN_V4_ETHTOOL_FIX_BOND) + .get_link_ksettings = rnp10_get_link_ksettings, + .set_link_ksettings = rnp10_set_link_ksettings, +#else + .get_settings = rnp10_get_settings, + .set_settings = rnp10_set_settings, +#endif + .get_drvinfo = rnp10_get_drvinfo, + .get_regs_len = rnp10_get_regs_len, + .get_regs = rnp10_get_regs, + .get_wol = rnp_get_wol, + .set_wol = rnp_set_wol, + .nway_reset = rnp_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = rnp_get_ringparam, + .set_ringparam = rnp_set_ringparam, + .get_pauseparam = rnp10_get_pauseparam, + .set_pauseparam = rnp10_set_pauseparam, + .get_msglevel = rnp_get_msglevel, + .set_msglevel = rnp_set_msglevel, +#ifdef ETHTOOL_GFECPARAM + .get_fecparam = rnp_get_fecparam, + .set_fecparam = rnp_set_fecparam, +#endif +#ifndef CLOST_SELF_TEST + .self_test = rnp_diag_test, +#endif + .get_strings = rnp10_get_strings, +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef HAVE_ETHTOOL_SET_PHYS_ID + .set_phys_id = rnp_set_phys_id, +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +#ifndef HAVE_ETHTOOL_GET_SSET_COUNT + .get_stats_count = rnp10_get_stats_count, +#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .get_sset_count = rnp10_get_sset_count, + .get_priv_flags = rnp10_get_priv_flags, + .set_priv_flags = rnp10_set_priv_flags, +#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .get_ethtool_stats = rnp10_get_ethtool_stats, +#ifdef HAVE_ETHTOOL_GET_PERM_ADDR + .get_perm_addr = ethtool_op_get_perm_addr, +#endif /* HAVE_ETHTOOL_GET_PERM_ADDR */ + .get_coalesce = rnp_get_coalesce, + .set_coalesce = rnp_set_coalesce, +#ifdef HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES_IRQ | + ETHTOOL_COALESCE_MAX_FRAMES, +#endif +#ifndef HAVE_NDO_SET_FEATURES + .get_rx_csum = rnp_get_rx_csum, + .set_rx_csum = rnp_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = rnp_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, +#ifdef NETIF_F_TSO + .get_tso = ethtool_op_get_tso, + .set_tso = rnp_set_tso, +#endif /* NETIF_F_TSO */ +#ifdef ETHTOOL_GFLAGS + .get_flags = ethtool_op_get_flags, +#endif +#endif /* HAVE_NDO_SET_FEATURES */ +#ifdef ETHTOOL_GRXRINGS + .get_rxnfc = rnp_get_rxnfc, + .set_rxnfc = rnp_set_rxnfc, +#endif + +#ifdef ETHTOOL_SRXNTUPLE + .set_rx_ntuple = rnp_set_rx_ntuple, +#endif +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef ETHTOOL_SCHANNELS + .get_channels = rnp_get_channels, + .set_channels = rnp_set_channels, +#endif +#ifdef ETHTOOL_GMODULEINFO + .get_module_info = rnp_get_module_info, + .get_module_eeprom = rnp_get_module_eeprom, +#endif +#ifdef HAVE_ETHTOOL_GET_TS_INFO + .get_ts_info = rnp_get_ts_info, +#endif +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_indir_size = rnp_rss_indir_size, + .get_rxfh_key_size = rnp_get_rxfh_key_size, + .get_rxfh = rnp_get_rxfh, + .set_rxfh = rnp_set_rxfh, +#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ + + .get_dump_flag = rnp_get_dump_flag, + .get_dump_data = rnp_get_dump_data, + .set_dump = rnp_set_dump, +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + +#if defined(HAVE_DDP_PROFILE_UPLOAD_SUPPORT) || defined(CONFIG_LINX_SERIAL) + .flash_device = rnp_flash_device, +#endif /* HAVE_DDP_PROFILE_UPLOAD_SUPPORT */ +}; + +static void rnp_set_ethtool_hw_ops_n10(struct net_device *netdev) +{ +#ifndef ETHTOOL_OPS_COMPAT + netdev->ethtool_ops = &rnp10_ethtool_ops; +#else + SET_ETHTOOL_OPS(netdev, &rnp10_ethtool_ops); +#endif +} + +/** + * rnp_get_thermal_sensor_data_hw_ops_n10 - Gathers thermal sensor data + * @hw: pointer to hardware structure + * Returns the thermal sensor data structure + **/ +static s32 rnp_get_thermal_sensor_data_hw_ops_n10(struct rnp_hw *hw) +{ + int voltage = 0; + struct rnp_thermal_sensor_data *data = &hw->thermal_sensor_data; + + voltage = voltage; + data->sensor[0].temp = rnp_mbx_get_temp(hw, &voltage); + + return 0; +} + +/** + * rnp_init_thermal_sensor_thresh_hw_ops_n10 - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * Inits the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +static s32 rnp_init_thermal_sensor_thresh_hw_ops_n10(struct rnp_hw *hw) +{ + u8 i; + struct rnp_thermal_sensor_data *data = &hw->thermal_sensor_data; + + for (i = 0; i < RNP_MAX_SENSORS; i++) { + data->sensor[i].location = i + 1; + data->sensor[i].caution_thresh = 90; + data->sensor[i].max_op_thresh = 100; + } + + return 0; +} + +static s32 rnp_phy_read_reg_hw_ops_n10(struct rnp_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + s32 status = 0; + u32 data = 0; + + status = rnp_mbx_phy_read(hw, reg_addr, &data); + *phy_data = data & 0xffff; + + return status; +} + +static s32 rnp_phy_write_reg_hw_ops_n10(struct rnp_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + s32 status = 0; + + status = rnp_mbx_phy_write(hw, reg_addr, (u32)phy_data); + + return status; +} + +static void rnp_set_vf_vlan_mode_hw_ops_n10(struct rnp_hw *hw, u16 vlan, int vf, + bool enable) +{ + struct rnp_eth_info *eth = &hw->eth; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + + if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) + eth->ops.set_vf_vlan_mode(eth, vlan, vf, enable); +} + +static void rnp_driver_status_hw_ops_n10(struct rnp_hw *hw, bool enable, int mode) +{ +#ifndef NO_CM3_MBX + switch (mode) { + case rnp_driver_insmod: + rnp_mbx_ifinsmod(hw, enable); + break; + case rnp_driver_suspuse: + rnp_mbx_ifsuspuse(hw, enable); + break; + case rnp_driver_force_control_mac: + rnp_mbx_ifforce_control_mac(hw, enable); + + break; + } +#endif +} + +static struct rnp_hw_operations hw_ops_n10 = { + .init_hw = &rnp_init_hw_ops_n10, + .reset_hw = &rnp_reset_hw_ops_n10, + .start_hw = &rnp_start_hw_ops_n10, + .set_mtu = &rnp_set_mtu_hw_ops_n10, + .set_vlan_filter_en = &rnp_set_vlan_filter_en_hw_ops_n10, + .set_vlan_filter = &rnp_set_vlan_filter_hw_ops_n10, + .set_vf_vlan_filter = &rnp_set_vf_vlan_filter_hw_ops_n10, + .set_vlan_strip = &rnp_set_vlan_strip_hw_ops_n10, + .set_mac = &rnp_set_mac_hw_ops_n10, + .set_rx_mode = &rnp_set_rx_mode_hw_ops_n10, + .set_rar_with_vf = &rnp_set_rar_with_vf_hw_ops_n10, + .clr_rar = &rnp_clr_rar_hw_ops_n10, + .clr_rar_all = &rnp_clr_rar_all_hw_ops_n10, + .clr_vlan_veb = &rnp_clr_vlan_veb_hw_ops_n10, + .set_txvlan_mode = &rnp_set_txvlan_mode_hw_ops_n10, + .set_fcs_mode = &rnp_set_fcs_mode_hw_ops_n10, + .set_vxlan_port = &rnp_set_vxlan_port_hw_ops_n10, + .set_vxlan_mode = &rnp_set_vxlan_mode_hw_ops_n10, + .set_mac_rx = &rnp_set_mac_rx_hw_ops_n10, + .set_rx_hash = &rnp_set_rx_hash_hw_ops_n10, + .set_pause_mode = &rnp_set_pause_mode_hw_ops_n10, + .get_pause_mode = &rnp_get_pause_mode_hw_ops_n10, + .update_hw_info = &rnp_update_hw_info_hw_ops_n10, + .update_rx_drop = &rnp_update_hw_rx_drop_hw_ops_n10, + .update_sriov_info = &rnp_update_sriov_info_hw_ops_n10, + .set_sriov_status = &rnp_set_sriov_status_hw_ops_n10, + .set_sriov_vf_mc = &rnp_set_sriov_vf_mc_hw_ops_n10, + .init_rx_addrs = &rnp_init_rx_addrs_hw_ops_n10, + .clr_vfta = &rnp_clr_vfta_hw_ops_n10, + .set_rss_key = &rnp_set_rss_key_hw_ops_n10, + .set_rss_table = &rnp_set_rss_table_hw_ops_n10, + .update_hw_status = &rnp_update_hw_status_hw_ops_n10, + .set_mbx_link_event = &rnp_set_mbx_link_event_hw_ops_n10, + .set_mbx_ifup = &rnp_set_mbx_ifup_hw_ops_n10, + .check_link = &rnp_check_mac_link_hw_ops_n10, + .setup_link = &rnp_setup_mac_link_hw_ops_n10, + .clean_link = &rnp_clean_link_hw_ops_n10, + .set_layer2_remapping = &rnp_set_layer2_hw_ops_n10, + .clr_layer2_remapping = &rnp_clr_layer2_hw_ops_n10, + .clr_all_layer2_remapping = &rnp_clr_all_layer2_hw_ops_n10, + .set_tuple5_remapping = &rnp_set_tuple5_hw_ops_n10, + .clr_tuple5_remapping = &rnp_clr_tuple5_hw_ops_n10, + .clr_all_tuple5_remapping = &rnp_clr_all_tuple5_hw_ops_n10, + .set_tcp_sync_remapping = &rnp_set_tcp_sync_hw_ops_n10, + .update_msix_count = &rnp_update_msix_count_hw_ops_n10, + .get_thermal_sensor_data = &rnp_get_thermal_sensor_data_hw_ops_n10, + .init_thermal_sensor_thresh = + &rnp_init_thermal_sensor_thresh_hw_ops_n10, + .setup_ethtool = &rnp_set_ethtool_hw_ops_n10, + .phy_read_reg = &rnp_phy_read_reg_hw_ops_n10, + .phy_write_reg = &rnp_phy_write_reg_hw_ops_n10, + .set_vf_vlan_mode = &rnp_set_vf_vlan_mode_hw_ops_n10, + .driver_status = &rnp_driver_status_hw_ops_n10, +}; + +static void rnp_mac_set_rx_n10(struct rnp_mac_info *mac, bool status) +{ + struct rnp_hw *hw = (struct rnp_hw *)mac->back; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + + u32 value = 0; + u32 count = 0; + + if(pci_channel_offline(hw->pdev)){ + return; + } + + if (status) { + do { + value = mac_rd32(mac, RNP10_MAC_RX_CFG); +#ifdef IPC_OFF + /* force off ipc */ + value &= (~BIT(9)); +#endif + mac_wr32(mac, RNP10_MAC_RX_CFG, + value | 0x01); + usleep_range(100, 200); + value = mac_rd32(mac, RNP10_MAC_RX_CFG); + count++; + if (count > 1000) { + printk("setup rx on timeout\n"); + break; + } + } while (!(value & 0x01)); + + if (adapter->flags & RNP_FLAG_SWITCH_LOOPBACK_EN) { + mac_wr32(mac, RNP10_MAC_PKT_FLT, BIT(31) | BIT(0)); + eth_wr32(&hw->eth, RNP10_ETH_DMAC_MCSTCTRL, 0x0); + } else { + do { + value = mac_rd32(mac, RNP10_MAC_RX_CFG); +#ifdef IPC_OFF + /* force off ipc */ + value &= (~BIT(9)); +#endif + mac_wr32(mac, RNP10_MAC_RX_CFG, + value & (~0x400)); + usleep_range(100, 200); + value = mac_rd32(mac, RNP_MAC_RX_CFG); + count++; + if (count > 1000) { + printk("setup rx off timeout\n"); + break; + } + } while (value & 0x400); + if (hw->ncsi_en) + mac_wr32(mac, RNP10_MAC_PKT_FLT, 0x80000001); + else + mac_wr32(mac, RNP10_MAC_PKT_FLT, 0x00000001); + } + } else { + do { + value = mac_rd32(mac, RNP10_MAC_RX_CFG); +#ifdef IPC_OFF + /* force off ipc */ + value &= (~BIT(9)); +#endif + mac_wr32(mac, RNP10_MAC_RX_CFG, + value | 0x400); + usleep_range(100, 200); + value = mac_rd32(mac, RNP10_MAC_RX_CFG); + count++; + if (count > 1000) { + printk("setup rx on timeout\n"); + break; + } + } while (!(value & 0x400)); + mac_wr32(mac, RNP10_MAC_PKT_FLT, 0x0); + } +} + +static void rnp_mac_fcs_n10(struct rnp_mac_info *mac, bool status) +{ + u32 value; + +#define FCS_MASK (0x6) + value = mac_rd32(mac, RNP10_MAC_RX_CFG); + if (status) { + value &= (~FCS_MASK); + + } else { + value |= FCS_MASK; + } + + mac_wr32(mac, RNP10_MAC_RX_CFG, value); +} + +/** + * rnp_fc_mode_n10 - Enable flow control + * @hw: pointer to hardware structure + * + * Enable flow control according to the current settings. + **/ +static s32 rnp_mac_fc_mode_n10(struct rnp_mac_info *mac) +{ + struct rnp_hw *hw = (struct rnp_hw *)mac->back; + s32 ret_val = 0; + u32 reg; + u32 rxctl_reg, txctl_reg[RNP_MAX_TRAFFIC_CLASS]; + int i; + + /* + * Validate the water mark configuration for packet buffer 0. Zero + * water marks indicate that the packet buffer was not configured + * and the watermarks for packet buffer 0 should always be configured. + */ + if (!hw->fc.pause_time) { + ret_val = RNP_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* Disable any previous flow control settings */ + rxctl_reg = mac_rd32(mac, RNP10_MAC_RX_FLOW_CTRL); + rxctl_reg &= (~RNP10_RX_FLOW_ENABLE_MASK); + + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) { + txctl_reg[i] = mac_rd32(mac, RNP10_MAC_Q0_TX_FLOW_CTRL(i)); + txctl_reg[i] &= (~RNP10_TX_FLOW_ENABLE_MASK); + } + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case rnp_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case rnp_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + rxctl_reg |= (RNP10_RX_FLOW_ENABLE_MASK); + break; + case rnp_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) + txctl_reg[i] |= (RNP10_TX_FLOW_ENABLE_MASK); + break; + case rnp_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + rxctl_reg |= (RNP10_RX_FLOW_ENABLE_MASK); + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) + txctl_reg[i] |= (RNP10_TX_FLOW_ENABLE_MASK); + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + ret_val = RNP_ERR_CONFIG; + goto out; + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time; + for (i = 0; i < (RNP_MAX_TRAFFIC_CLASS); i++) + txctl_reg[i] |= (reg << 16); + + /* Set 802.3x based flow control settings. */ + mac_wr32(mac, RNP10_MAC_RX_FLOW_CTRL, rxctl_reg); + for (i = 0; i < (RNP_MAX_TRAFFIC_CLASS); i++) + mac_wr32(mac, RNP10_MAC_Q0_TX_FLOW_CTRL(i), txctl_reg[i]); +out: + return ret_val; +} + +static void rnp_mac_set_mac_n10(struct rnp_mac_info *mac, u8 *addr, int index) +{ + u32 rar_low, rar_high = 0; + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | ((u32)addr[2] << 16) | + ((u32)addr[3] << 24)); + + rar_high = RNP_RAH_AV | ((u32)addr[4] | (u32)addr[5] << 8); + + mac_wr32(mac, RNP10_MAC_UNICAST_HIGH(index), rar_high); + mac_wr32(mac, RNP10_MAC_UNICAST_LOW(index), rar_low); +} + +static struct rnp_mac_operations mac_ops_n10 = { + .set_mac_rx = &rnp_mac_set_rx_n10, + .set_mac_fcs = &rnp_mac_fcs_n10, + .set_fc_mode = &rnp_mac_fc_mode_n10, + .set_mac = &rnp_mac_set_mac_n10, +}; + +static s32 rnp_get_invariants_n10(struct rnp_hw *hw) +{ + struct rnp_mac_info *mac = &hw->mac; + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + struct rnp_mbx_info *mbx = &hw->mbx; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + int i; + + /* setup dma info */ + dma->dma_base_addr = hw->hw_addr; + dma->dma_ring_addr = hw->hw_addr + RNP10_RING_BASE; + dma->max_tx_queues = RNP_N10_MAX_TX_QUEUES; + dma->max_rx_queues = RNP_N10_MAX_RX_QUEUES; + dma->back = hw; + memcpy(&hw->dma.ops, &dma_ops_n10, sizeof(hw->dma.ops)); + + /* setup eth info */ + memcpy(&hw->eth.ops, ð_ops_n10, sizeof(hw->eth.ops)); + + eth->eth_base_addr = hw->hw_addr + RNP10_ETH_BASE; + printk(" eth_base is %p\n", eth->eth_base_addr); + eth->back = hw; + eth->mc_filter_type = 0; + eth->mcft_size = RNP_N10_MC_TBL_SIZE; + eth->vft_size = RNP_N10_VFT_TBL_SIZE; + if (hw->eco) + eth->num_rar_entries = RNP_N10_RAR_ENTRIES - 1; + else + eth->num_rar_entries = RNP_N10_RAR_ENTRIES; + + eth->max_rx_queues = RNP_N10_MAX_RX_QUEUES; + eth->max_tx_queues = RNP_N10_MAX_TX_QUEUES; + + /* setup mac info */ + memcpy(&hw->mac.ops, &mac_ops_n10, sizeof(hw->mac.ops)); + mac->mac_addr = hw->hw_addr + RNP10_MAC_BASE; + mac->back = hw; + mac->mac_type = mac_dwc_xlg; + /* move this to eth todo */ + mac->mc_filter_type = 0; + mac->mcft_size = RNP_N10_MC_TBL_SIZE; + mac->vft_size = RNP_N10_VFT_TBL_SIZE; + if (hw->eco) + mac->num_rar_entries = RNP_N10_RAR_ENTRIES - 1; + else + mac->num_rar_entries = RNP_N10_RAR_ENTRIES; + mac->max_rx_queues = RNP_N10_MAX_RX_QUEUES; + mac->max_tx_queues = RNP_N10_MAX_TX_QUEUES; + mac->max_msix_vectors = RNP_N10_MSIX_VECTORS; + if (!hw->axi_mhz) + hw->usecstocount = 500; + else + hw->usecstocount = hw->axi_mhz; + + /* set up hw feature */ + hw->feature_flags |= + RNP_NET_FEATURE_SG | RNP_NET_FEATURE_TX_CHECKSUM | + RNP_NET_FEATURE_RX_CHECKSUM | RNP_NET_FEATURE_TSO | + RNP_NET_FEATURE_TX_UDP_TUNNEL | RNP_NET_FEATURE_VLAN_FILTER | + RNP_NET_FEATURE_VLAN_OFFLOAD | + RNP_NET_FEATURE_RX_NTUPLE_FILTER | RNP_NET_FEATURE_TCAM | + RNP_NET_FEATURE_RX_HASH | RNP_NET_FEATURE_RX_FCS; + /* maybe supported future*/ + /* setup some fdir resource */ + hw->min_length = RNP_MIN_MTU; + hw->max_length = RNP_MAX_JUMBO_FRAME_SIZE; + hw->max_msix_vectors = RNP_N10_MSIX_VECTORS; + if (hw->eco) + hw->num_rar_entries = RNP_N10_RAR_ENTRIES - 1; + else + hw->num_rar_entries = RNP_N10_RAR_ENTRIES; + hw->fdir_mode = fdir_mode_tuple5; + hw->max_vfs = RNP_N10_MAX_VF; + hw->max_vfs_noari = 3; + hw->sriov_ring_limit = 2; + /* some user only want 1 queue for each vf */ + hw->max_pf_macvlans = RNP_MAX_PF_MACVLANS_N10; + hw->wol_supported = WAKE_MAGIC; + /* ncsi */ + hw->ncsi_vf_cpu_shm_pf_base = RNP_VF_CPU_SHM_BASE_NR62; + hw->ncsi_mc_count = RNP_NCSI_MC_COUNT; + hw->ncsi_vlan_count = RNP_NCSI_VLAN_COUNT; + /* we suppose 1536 */ + hw->dma_split_size = 1536; + if (hw->fdir_mode == fdir_mode_tcam) { + hw->layer2_count = RNP10_MAX_LAYER2_FILTERS - 1; + hw->tuple5_count = RNP10_MAX_TCAM_FILTERS - 1; + } else { + hw->layer2_count = RNP10_MAX_LAYER2_FILTERS - 1; + hw->tuple5_count = RNP10_MAX_TUPLE5_FILTERS - 1; + } + + hw->default_rx_queue = 0; + hw->rss_indir_tbl_num = RNP_N10_RSS_TBL_NUM; + hw->rss_tc_tbl_num = RNP_N10_RSS_TC_TBL_NUM; + /* vf use the last vfnum */ + hw->vfnum = RNP_N10_MAX_VF - 1; + hw->feature_flags |= RNP_NET_FEATURE_VF_FIXED; + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + hw->veb_ring = 0; + else + hw->veb_ring = RNP_N10_MAX_RX_QUEUES; + + memcpy(&hw->ops, &hw_ops_n10, sizeof(hw->ops)); + /* PHY */ + /* setup pcs */ + memcpy(&hw->pcs.ops, &pcs_ops_generic, sizeof(hw->pcs.ops)); + mbx->mbx_feature |= MBX_FEATURE_WRITE_DELAY; + mbx->vf2pf_mbox_vec_base = 0xa5100; + mbx->cpu2pf_mbox_vec = 0xa5300; + mbx->pf_vf_shm_base = 0xa6000; + mbx->mbx_mem_size = 64; + mbx->pf2vf_mbox_ctrl_base = 0xa7100; + mbx->pf_vf_mbox_mask_lo = 0xa7200; + mbx->pf_vf_mbox_mask_hi = 0xa7300; + mbx->cpu_pf_shm_base = 0xaa000; + mbx->pf2cpu_mbox_ctrl = 0xaa100; + mbx->cpu_pf_mbox_mask = 0xaa300; + adapter->drop_time = 10000; + hw->fc.requested_mode = PAUSE_TX | PAUSE_RX; + hw->fc.pause_time = RNP_DEFAULT_FCPAUSE; + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) { + hw->fc.high_water[i] = RNP10_DEFAULT_HIGH_WATER; + hw->fc.low_water[i] = RNP10_DEFAULT_LOW_WATER; + } +#ifdef FIX_MAC_PADDIN + adapter->priv_flags |= RNP_PRIV_FLAG_TX_PADDING; + +#endif + return 0; +} + +struct rnp_info rnp_n10_info = { + .one_pf_with_two_dma = false, + .total_queue_pair_cnts = RNP_N10_MAX_TX_QUEUES, + .adapter_cnt = 1, + .rss_type = rnp_rss_n10, + .hw_type = rnp_hw_n10, + .get_invariants = &rnp_get_invariants_n10, + .mac_ops = &mac_ops_n10, + .eeprom_ops = NULL, + .mbx_ops = &mbx_ops_generic, + .pcs_ops = &pcs_ops_generic, +}; + +static s32 rnp_get_invariants_n400(struct rnp_hw *hw) +{ + struct rnp_mac_info *mac = &hw->mac; + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + struct rnp_mbx_info *mbx = &hw->mbx; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + int i; + /* setup dma info */ + dma->dma_base_addr = hw->hw_addr; + dma->dma_ring_addr = hw->hw_addr + RNP10_RING_BASE; + dma->max_tx_queues = RNP_N400_MAX_TX_QUEUES; + dma->max_rx_queues = RNP_N400_MAX_RX_QUEUES; + dma->back = hw; + memcpy(&hw->dma.ops, &dma_ops_n10, sizeof(hw->dma.ops)); + + /* setup eth info */ + memcpy(&hw->eth.ops, ð_ops_n10, sizeof(hw->eth.ops)); + eth->eth_base_addr = hw->hw_addr + RNP10_ETH_BASE; + eth->back = hw; + eth->mc_filter_type = 0; + eth->mcft_size = RNP_N10_MC_TBL_SIZE; + eth->vft_size = RNP_N10_VFT_TBL_SIZE; + if (hw->eco) + eth->num_rar_entries = RNP_N10_RAR_ENTRIES - 1; + else + eth->num_rar_entries = RNP_N10_RAR_ENTRIES; + eth->max_rx_queues = RNP_N400_MAX_RX_QUEUES; + eth->max_tx_queues = RNP_N400_MAX_TX_QUEUES; + + /* setup mac info */ + memcpy(&hw->mac.ops, &mac_ops_n10, sizeof(hw->mac.ops)); + mac->mac_addr = hw->hw_addr + RNP10_MAC_BASE; + mac->back = hw; + mac->mac_type = mac_dwc_xlg; + /* move this to eth todo */ + mac->mc_filter_type = 0; + mac->mcft_size = RNP_N10_MC_TBL_SIZE; + mac->vft_size = RNP_N10_VFT_TBL_SIZE; + if (hw->eco) + mac->num_rar_entries = RNP_N10_RAR_ENTRIES - 1; + else + mac->num_rar_entries = RNP_N10_RAR_ENTRIES; + mac->max_rx_queues = RNP_N400_MAX_RX_QUEUES; + mac->max_tx_queues = RNP_N400_MAX_TX_QUEUES; + mac->max_msix_vectors = RNP_N400_MSIX_VECTORS; + if (!hw->axi_mhz) + hw->usecstocount = 125; + else + hw->usecstocount = hw->axi_mhz; + /* set up hw feature */ + hw->feature_flags |= + RNP_NET_FEATURE_SG | RNP_NET_FEATURE_TX_CHECKSUM | + RNP_NET_FEATURE_RX_CHECKSUM | RNP_NET_FEATURE_TSO | + RNP_NET_FEATURE_TX_UDP_TUNNEL | RNP_NET_FEATURE_VLAN_FILTER | + RNP_NET_FEATURE_VLAN_OFFLOAD | + RNP_NET_FEATURE_RX_NTUPLE_FILTER | RNP_NET_FEATURE_TCAM | + RNP_NET_FEATURE_RX_HASH | RNP_NET_FEATURE_RX_FCS; + /* setup some fdir resource */ + hw->min_length = RNP_MIN_MTU; + hw->max_length = RNP_MAX_JUMBO_FRAME_SIZE; + hw->max_msix_vectors = RNP_N400_MSIX_VECTORS; + if (hw->eco) + hw->num_rar_entries = RNP_N10_RAR_ENTRIES - 1; + else + hw->num_rar_entries = RNP_N10_RAR_ENTRIES; + hw->fdir_mode = fdir_mode_tuple5; + hw->max_vfs = RNP_N400_MAX_VF; + hw->max_vfs_noari = 3; + /* n400 only use 1 ring for each vf */ + hw->sriov_ring_limit = 1; + hw->max_pf_macvlans = RNP_MAX_PF_MACVLANS_N10; + hw->wol_supported = WAKE_MAGIC; + /* ncsi */ + hw->ncsi_vf_cpu_shm_pf_base = RNP_VF_CPU_SHM_BASE_NR62; + hw->ncsi_mc_count = RNP_NCSI_MC_COUNT; + hw->ncsi_vlan_count = RNP_NCSI_VLAN_COUNT; + + if (hw->fdir_mode == fdir_mode_tcam) { + hw->layer2_count = RNP10_MAX_LAYER2_FILTERS - 1; + hw->tuple5_count = RNP10_MAX_TCAM_FILTERS - 1; + } else { + hw->layer2_count = RNP10_MAX_LAYER2_FILTERS - 1; + hw->tuple5_count = RNP10_MAX_TUPLE5_FILTERS - 1; + } + + hw->default_rx_queue = 0; + hw->rss_indir_tbl_num = RNP_N10_RSS_TBL_NUM; + hw->rss_tc_tbl_num = RNP_N10_RSS_TC_TBL_NUM; + /* vf use the last vfnum */ + hw->vfnum = RNP_N400_MAX_VF - 1; + + /* n400 should fix_vf_bug */ + hw->feature_flags |= RNP_NET_FEATURE_VF_FIXED; + + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + hw->veb_ring = 0; + hw->default_vf_num = 0; + } else { + hw->veb_ring = RNP_N400_MAX_RX_QUEUES; + hw->default_vf_num = RNP_N10_MAX_VF - 1; + } + + memcpy(&hw->ops, &hw_ops_n10, sizeof(hw->ops)); + /* setup pcs */ + memcpy(&hw->pcs.ops, &pcs_ops_generic, sizeof(hw->pcs.ops)); + mbx->mbx_feature |= MBX_FEATURE_WRITE_DELAY; + mbx->vf2pf_mbox_vec_base = 0xa5100; + mbx->cpu2pf_mbox_vec = 0xa5300; + mbx->pf_vf_shm_base = 0xa6000; + mbx->mbx_mem_size = 64; + mbx->pf2vf_mbox_ctrl_base = 0xa7100; + mbx->pf_vf_mbox_mask_lo = 0xa7200; + mbx->pf_vf_mbox_mask_hi = 0xa7300; + mbx->cpu_pf_shm_base = 0xaa000; + mbx->pf2cpu_mbox_ctrl = 0xaa100; + mbx->cpu_pf_mbox_mask = 0xaa300; + + adapter->drop_time = 10000; + /* initialization default pause flow */ + hw->fc.requested_mode |= PAUSE_AUTO; + hw->fc.pause_time = RNP_DEFAULT_FCPAUSE; + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) { + hw->fc.high_water[i] = RNP10_DEFAULT_HIGH_WATER; + hw->fc.low_water[i] = RNP10_DEFAULT_LOW_WATER; + } + + hw->autoneg = 1; + +#ifdef ETH_TP_MDI_AUTO + hw->tp_mdix_ctrl = ETH_TP_MDI_AUTO; +#endif + + return 0; +} + +struct rnp_info rnp_n400_info = { + .one_pf_with_two_dma = false, + .total_queue_pair_cnts = RNP_N400_MAX_TX_QUEUES, + .adapter_cnt = 1, + .rss_type = rnp_rss_n10, + .hw_type = rnp_hw_n400, + .get_invariants = &rnp_get_invariants_n400, + .mac_ops = &mac_ops_n10, + .eeprom_ops = NULL, + .mbx_ops = &mbx_ops_generic, + .pcs_ops = &pcs_ops_generic, +}; diff --git a/drivers/net/ethernet/mucse/rnp/rnp_param.c b/drivers/net/ethernet/mucse/rnp/rnp_param.c new file mode 100755 index 0000000000000000000000000000000000000000..0fffa6dc15f2bdbea70bc34849af0e0435d79355 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_param.c @@ -0,0 +1,388 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include + +#include "rnp.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define RNP_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define STRINGIFY(foo) #foo /* magic for getting defines into strings */ +#define XSTRINGIFY(bar) STRINGIFY(bar) + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define RNP_PARAM_INIT \ + { \ + [0 ... RNP_MAX_NIC] = OPTION_UNSET \ + } +#ifndef module_param_array +/* Module Parameters are always initialized to -1, so that the driver + * can tell the difference between no user specified value or the + * user asking for the default value. + * The true default values are loaded in when rnp_check_options is called. + * + * This is a GCC extension to ANSI C. + * See the item "Labelled Elements in Initializers" in the section + * "Extensions to the C Language Family" of the GCC documentation. + */ + +#define RNP_PARAM(X, desc) \ + static const int __devinitdata X[RNP_MAX_NIC + 1] = RNP_PARAM_INIT; \ + MODULE_PARM(X, "1-" __MODULE_STRING(RNP_MAX_NIC) "i"); \ + MODULE_PARM_DESC(X, desc); +#else +#define RNP_PARAM(X, desc) \ + static int __devinitdata X[RNP_MAX_NIC + 1] = RNP_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); +#endif +/* IntMode (Interrupt Mode) + * + * Valid Range: 0-2 + * - 0 - Legacy Interrupt + * - 1 - MSI Interrupt + * - 2 - MSI-X Interrupt(s) + * + * Default Value: 2 + */ +RNP_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default 2"); +#define RNP_INT_LEGACY 0 +#define RNP_INT_MSI 1 +#define RNP_INT_MSIX 2 + +#ifdef CONFIG_PCI_IOV +/* max_vfs - SR I/O Virtualization + * + * Valid Range: 0-63 for n10 + * Valid Range: 0-7 for n400/n10 + * - 0 Disables SR-IOV + * - 1-x - enables SR-IOV and sets the number of VFs enabled + * + * Default Value: 0 + */ + +RNP_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable (default), " + "1-" XSTRINGIFY(MAX_SRIOV_VFS) " = enable " + "this many VFs"); + +/* SRIOV_Mode (SRIOV Mode) + * + * Valid Range: 0-1 + * - 0 - Legacy Interrupt + * - 1 - MSI Interrupt + * - 2 - MSI-X Interrupt(s) + * + * Default Value: 0 + */ +RNP_PARAM(SRIOV_Mode, "Change SRIOV Mode (0=MAC_MODE, 1=VLAN_MODE), " + "default 0"); +#define RNP_SRIOV_MAC_MODE 0 +#define RNP_SRIOV_VLAN_MODE 1 +#endif + +/* pf_msix_counts_set - Limit max msix counts + * + * Valid Range: 2-63 for n10 + * Valid Range: 2-7 for n400/n10 + * + * Default Value: 0 (un-limit) + */ +RNP_PARAM(pf_msix_counts_set, "Number of Max MSIX Count: (default un-limit)"); +#define RNP_INT_MIN 2 + +struct rnp_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + const char *msg; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct rnp_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +#ifdef HAVE_CONFIG_HOTPLUG +static int __devinit rnp_validate_option(struct net_device *netdev, + unsigned int *value, + struct rnp_option *opt) +#else +static int rnp_validate_option(struct net_device *netdev, unsigned int *value, + struct rnp_option *opt) +#endif +{ + if (*value == OPTION_UNSET) { + netdev_info(netdev, "Invalid %s specified (%d), %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + netdev_info(netdev, "%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + netdev_info(netdev, "%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if ((*value >= opt->arg.r.min && *value <= opt->arg.r.max) || + *value == opt->def) { + if (opt->msg) + netdev_info(netdev, "%s set to %d, %s\n", + opt->name, *value, opt->msg); + else + netdev_info(netdev, "%s set to %d\n", opt->name, + *value); + return 0; + } + break; + case list_option: { + int i; + + for (i = 0; i < opt->arg.l.nr; i++) { + const struct rnp_opt_list *ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + netdev_info(netdev, "%s\n", ent->str); + return 0; + } + } + } break; + default: + BUG(); + } + + netdev_info(netdev, "Invalid %s specified (%d), %s\n", opt->name, + *value, opt->err); + *value = opt->def; + return -1; +} + +#define LIST_LEN(l) (sizeof(l) / sizeof(l[0])) +#define PSTR_LEN 10 + +/** + * rnp_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +#ifdef HAVE_CONFIG_HOTPLUG +void __devinit rnp_check_options(struct rnp_adapter *adapter) +#else +void rnp_check_options(struct rnp_adapter *adapter) +#endif +{ + int bd = adapter->bd_number; + u32 *aflags = &adapter->flags; + + if (bd >= RNP_MAX_NIC) { + netdev_notice(adapter->netdev, + "Warning: no configuration for board #%d\n", bd); + netdev_notice(adapter->netdev, + "Using defaults for all values\n"); +#ifndef module_param_array + bd = RNP_MAX_NIC; +#endif + } + + /* try to setup new irq mode */ + { /* Interrupt Mode */ + unsigned int int_mode; + static struct rnp_option opt = { + .type = range_option, + .name = "Interrupt Mode", + .err = "using default of " __MODULE_STRING( + RNP_INT_MSIX), + .def = RNP_INT_MSIX, + .arg = { .r = { .min = RNP_INT_LEGACY, + .max = RNP_INT_MSIX } } + }; + +#ifdef module_param_array + if (num_IntMode > bd) { +#endif + int_mode = IntMode[bd]; + if (int_mode == OPTION_UNSET) + int_mode = RNP_INT_MSIX; + rnp_validate_option(adapter->netdev, &int_mode, &opt); + switch (int_mode) { + case RNP_INT_MSIX: + if (!(*aflags & RNP_FLAG_MSIX_CAPABLE)) { + netdev_info(adapter->netdev, + "Ignoring MSI-X setting; " + "support unavailable\n"); + } else + adapter->irq_mode = irq_mode_msix; + break; + case RNP_INT_MSI: + if (!(*aflags & RNP_FLAG_MSI_CAPABLE)) { + netdev_info(adapter->netdev, + "Ignoring MSI setting; " + "support unavailable\n"); + } else + adapter->irq_mode = irq_mode_msi; + + break; + case RNP_INT_LEGACY: + if (!(*aflags & RNP_FLAG_LEGACY_CAPABLE)) { + netdev_info(adapter->netdev, + "Ignoring MSI setting; " + "support unavailable\n"); + } else + adapter->irq_mode = irq_mode_legency; + + break; + } +#ifdef module_param_array + } else { + /* default settings */ + /* msix -> msi -> Legacy */ + if (*aflags & RNP_FLAG_MSIX_CAPABLE) + adapter->irq_mode = irq_mode_msix; + else if (*aflags & RNP_FLAG_MSI_CAPABLE) + adapter->irq_mode = irq_mode_msi; + else + adapter->irq_mode = irq_mode_legency; + } +#endif + } + +#ifdef CONFIG_PCI_IOV + { /* Single Root I/O Virtualization (SR-IOV) */ + struct rnp_hw *hw = &adapter->hw; + static struct rnp_option opt = { + .type = range_option, + .name = "I/O Virtualization (IOV)", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = OPTION_DISABLED } } + }; + + opt.arg.r.max = hw->max_vfs; +#ifdef module_param_array + if (num_max_vfs > bd) { +#endif + unsigned int vfs = max_vfs[bd]; + if (rnp_validate_option(adapter->netdev, &vfs, &opt)) { + vfs = 0; + DPRINTK(PROBE, INFO, + "max_vfs out of range " + "Disabling SR-IOV.\n"); + } + + adapter->num_vfs = vfs; + + if (vfs) + *aflags |= RNP_FLAG_SRIOV_ENABLED; + else + *aflags &= ~RNP_FLAG_SRIOV_ENABLED; +#ifdef module_param_array + } else { + if (opt.def == OPTION_DISABLED) { + adapter->num_vfs = 0; + *aflags &= ~RNP_FLAG_SRIOV_ENABLED; + } else { + adapter->num_vfs = opt.def; + *aflags |= RNP_FLAG_SRIOV_ENABLED; + } + } +#endif + } + + { /* Sriov Mode */ + unsigned int sriov_mode; + static struct rnp_option opt = { + .type = range_option, + .name = "SRIOV Mode", + .err = "using default of " __MODULE_STRING( + RNP_SRIOV_MAC_MODE), + .def = RNP_SRIOV_MAC_MODE, + .arg = { .r = { .min = RNP_SRIOV_MAC_MODE, + .max = RNP_SRIOV_VLAN_MODE } } + }; + +#ifdef module_param_array + if (num_SRIOV_Mode > bd) { +#endif + sriov_mode = SRIOV_Mode[bd]; + if (sriov_mode == OPTION_UNSET) + sriov_mode = RNP_SRIOV_MAC_MODE; + rnp_validate_option(adapter->netdev, &sriov_mode, &opt); + + if (sriov_mode == RNP_SRIOV_VLAN_MODE) + adapter->priv_flags |= + RNP_PRIV_FLAG_SRIOV_VLAN_MODE; + +#ifdef module_param_array + } else { + /* default settings */ + adapter->priv_flags &= (~RNP_PRIV_FLAG_SRIOV_VLAN_MODE); + } +#endif + } +#endif /* CONFIG_PCI_IOV */ + + { /* max msix count setup */ + int pf_msix_counts; + struct rnp_hw *hw = &adapter->hw; + static struct rnp_option opt = { + .type = range_option, + .name = "Limit Msix Count", + .err = "using default of Un-limit", + .def = OPTION_DISABLED, + .arg = { .r = { .min = RNP_INT_MIN, + .max = RNP_INT_MIN } } + }; + + opt.arg.r.max = hw->max_msix_vectors; +#ifdef module_param_array + if (num_pf_msix_counts_set > bd) { +#endif + pf_msix_counts = pf_msix_counts_set[bd]; + if (pf_msix_counts == OPTION_DISABLED) + pf_msix_counts = 0; + rnp_validate_option(adapter->netdev, &pf_msix_counts, + &opt); + + if (pf_msix_counts) { + if (hw->ops.update_msix_count) + hw->ops.update_msix_count( + hw, pf_msix_counts); + } + } + } +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_pcs.c b/drivers/net/ethernet/mucse/rnp/rnp_pcs.c new file mode 100755 index 0000000000000000000000000000000000000000..e84879c43722de97b844944950db7a6425c7fb26 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_pcs.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include "rnp_pcs.h" +#include "rnp_regs.h" +#include "rnp_common.h" + +static u32 rnp_read_pcs(struct rnp_hw *hw, int num, u32 addr) +{ + u32 reg_hi, reg_lo; + u32 value; + + reg_hi = addr >> 8; + reg_lo = (addr & 0xff) << 2; + wr32(hw, RNP_PCS_BASE(num) + (0xff << 2), reg_hi); + value = rd32(hw, RNP_PCS_BASE(num) + reg_lo); + return value; +} + +static void rnp_write_pcs(struct rnp_hw *hw, int num, u32 addr, u32 value) +{ + u32 reg_hi, reg_lo; + + reg_hi = addr >> 8; + reg_lo = (addr & 0xff) << 2; + wr32(hw, RNP_PCS_BASE(num) + (0xff << 2), reg_hi); + wr32(hw, RNP_PCS_BASE(num) + reg_lo, value); +} + +struct rnp_pcs_operations pcs_ops_generic = { + .read = rnp_read_pcs, + .write = rnp_write_pcs, +}; diff --git a/drivers/net/ethernet/mucse/rnp/rnp_pcs.h b/drivers/net/ethernet/mucse/rnp/rnp_pcs.h new file mode 100755 index 0000000000000000000000000000000000000000..d79d947cc31dc74b9729beffbe50ff5d277f6ca6 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_pcs.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_PCS_H_ +#define _RNP_PCS_H_ + +extern struct rnp_pcs_operations pcs_ops_generic; + +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_phy.h b/drivers/net/ethernet/mucse/rnp/rnp_phy.h new file mode 100644 index 0000000000000000000000000000000000000000..b25dce89e0643e9d47f47e5d862376b899521735 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_phy.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_PHY_H_ +#define _RNP_PHY_H_ + +#include "rnp_type.h" +#define RNP_I2C_EEPROM_DEV_ADDR 0xA0 +#define RNP_I2C_EEPROM_DEV_ADDR2 0xA2 + +#define RNP_YT8531_PHY_SPEC_CTRL 0x10 +#define RNP_YT8531_PHY_SPEC_CTRL_FORCE_MDIX 0x0020 +#define RNP_YT8531_PHY_SPEC_CTRL_AUTO_MDI_MDIX 0x0060 +#define RNP_YT8531_PHY_SPEC_CTRL_MDIX_CFG_MASK 0x0060 + +/* EEPROM byte offsets */ +#define SFF_MODULE_ID_OFFSET 0x00 +#define SFF_DIAG_SUPPORT_OFFSET 0x5c +#define SFF_MODULE_ID_SFP 0x3 +#define SFF_MODULE_ID_QSFP 0xc +#define SFF_MODULE_ID_QSFP_PLUS 0xd +#define SFF_MODULE_ID_QSFP28 0x11 + +/* Bitmasks */ +#define RNP_SFF_DA_PASSIVE_CABLE 0x4 +#define RNP_SFF_DA_ACTIVE_CABLE 0x8 +#define RNP_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define RNP_SFF_1GBASESX_CAPABLE 0x1 +#define RNP_SFF_1GBASELX_CAPABLE 0x2 +#define RNP_SFF_1GBASET_CAPABLE 0x8 +#define RNP_SFF_10GBASESR_CAPABLE 0x10 +#define RNP_SFF_10GBASELR_CAPABLE 0x20 +#define RNP_SFF_ADDRESSING_MODE 0x4 +#define RNP_I2C_EEPROM_READ_MASK 0x100 +#define RNP_I2C_EEPROM_STATUS_MASK 0x3 +#define RNP_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define RNP_I2C_EEPROM_STATUS_PASS 0x1 +#define RNP_I2C_EEPROM_STATUS_FAIL 0x2 +#define RNP_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 + +/* Flow control defines */ +#define RNP_TAF_SYM_PAUSE 0x400 +#define RNP_TAF_ASM_PAUSE 0x800 + +/* Bit-shift macros */ +#define RNP_SFF_VENDOR_OUI_BYTE0_SHIFT 24 +#define RNP_SFF_VENDOR_OUI_BYTE1_SHIFT 16 +#define RNP_SFF_VENDOR_OUI_BYTE2_SHIFT 8 + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define RNP_SFF_VENDOR_OUI_TYCO 0x00407600 +#define RNP_SFF_VENDOR_OUI_FTL 0x00906500 +#define RNP_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define RNP_SFF_VENDOR_OUI_INTEL 0x001B2100 + +/* I2C SDA and SCL timing parameters for standard mode */ +#define RNP_I2C_T_HD_STA 4 +#define RNP_I2C_T_LOW 5 +#define RNP_I2C_T_HIGH 4 +#define RNP_I2C_T_SU_STA 5 +#define RNP_I2C_T_HD_DATA 5 +#define RNP_I2C_T_SU_DATA 1 +#define RNP_I2C_T_RISE 1 +#define RNP_I2C_T_FALL 1 +#define RNP_I2C_T_SU_STO 4 +#define RNP_I2C_T_BUF 5 + +#define RNP_TN_LASI_STATUS_REG 0x9005 +#define RNP_TN_LASI_STATUS_TEMP_ALARM 0x0008 + +/* SFP+ SFF-8472 Compliance code */ +#define RNP_SFF_SFF_8472_UNSUP 0x00 + +s32 rnp_init_phy_ops_generic(struct rnp_hw *hw); +s32 rnp_identify_phy_generic(struct rnp_hw *hw); +s32 rnp_reset_phy_generic(struct rnp_hw *hw); +s32 rnp_read_phy_reg_generic(struct rnp_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data); +s32 rnp_write_phy_reg_generic(struct rnp_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data); +s32 rnp_setup_phy_link_generic(struct rnp_hw *hw); +s32 rnp_setup_phy_link_speed_generic(struct rnp_hw *hw, rnp_link_speed speed, + bool autoneg_wait_to_complete); +s32 rnp_get_copper_link_capabilities_generic(struct rnp_hw *hw, + rnp_link_speed *speed, + bool *autoneg); +/* PHY specific */ +s32 rnp_check_phy_link_tnx(struct rnp_hw *hw, rnp_link_speed *speed, + bool *link_up); +s32 rnp_setup_phy_link_tnx(struct rnp_hw *hw); +s32 rnp_get_phy_firmware_version_tnx(struct rnp_hw *hw, u16 *firmware_version); +s32 rnp_get_phy_firmware_version_generic(struct rnp_hw *hw, + u16 *firmware_version); +s32 rnp_reset_phy_nl(struct rnp_hw *hw); +s32 rnp_identify_sfp_module_generic(struct rnp_hw *hw); +s32 rnp_get_sfp_init_sequence_offsets(struct rnp_hw *hw, u16 *list_offset, + u16 *data_offset); +s32 rnp_tn_check_overtemp(struct rnp_hw *hw); +s32 rnp_read_i2c_byte_generic(struct rnp_hw *hw, u8 byte_offset, u8 dev_addr, + u8 *data); +s32 rnp_write_i2c_byte_generic(struct rnp_hw *hw, u8 byte_offset, u8 dev_addr, + u8 data); +s32 rnp_read_i2c_eeprom_generic(struct rnp_hw *hw, u8 byte_offset, + u8 *eeprom_data); +s32 rnp_read_i2c_sff8472_generic(struct rnp_hw *hw, u8 byte_offset, + u8 *sff8472_data); +s32 rnp_write_i2c_eeprom_generic(struct rnp_hw *hw, u8 byte_offset, + u8 eeprom_data); +#endif /* _RNP_PHY_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_ptp.c b/drivers/net/ethernet/mucse/rnp/rnp_ptp.c new file mode 100755 index 0000000000000000000000000000000000000000..fe97eaf701e2fc99c873b9f5bce4c49c99429680 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_ptp.c @@ -0,0 +1,830 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include +#include + +#include "rnp.h" +#include "rnp_regs.h" +#include "rnp_ptp.h" + +//#define DEBUG_PTP_TX_TIMESTAMP + +/* PTP and HW Timer ops */ +static void config_hw_tstamping(void __iomem *ioaddr, u32 data) +{ + writel(data, ioaddr + PTP_TCR); +} + +static void config_sub_second_increment(void __iomem *ioaddr, u32 ptp_clock, + int gmac4, u32 *ssinc) +{ + u32 value = readl(ioaddr + PTP_TCR); + unsigned long data; + u32 reg_value; + + /* For GMAC3.x, 4.x versions, in "fine adjustement mode" set sub-second + * increment to twice the number of nanoseconds of a clock cycle. + * The calculation of the default_addend value by the caller will set it + * to mid-range = 2^31 when the remainder of this division is zero, + * which will make the accumulator overflow once every 2 ptp_clock + * cycles, adding twice the number of nanoseconds of a clock cycle : + * 2000000000ULL / ptp_clock. + */ + if (value & RNP_PTP_TCR_TSCFUPDT) + data = (2000000000ULL / ptp_clock); + else + data = (1000000000ULL / ptp_clock); + + /* 0.465ns accuracy */ + if (!(value & RNP_PTP_TCR_TSCTRLSSR)) + data = (data * 1000) / 465; + + data &= RNP_PTP_SSIR_SSINC_MASK; + + reg_value = data; + if (gmac4) + reg_value <<= RNP_PTP_SSIR_SSINC_SHIFT; + + writel(reg_value, ioaddr + PTP_SSIR); + + if (ssinc) + *ssinc = data; +} + +static int config_addend(void __iomem *ioaddr, u32 addend) +{ + u32 value; + int limit; + + writel(addend, ioaddr + PTP_TAR); + /* issue command to update the addend value */ + value = readl(ioaddr + PTP_TCR); + value |= RNP_PTP_TCR_TSADDREG; + writel(value, ioaddr + PTP_TCR); + + /* wait for present addend update to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & RNP_PTP_TCR_TSADDREG)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) +{ + int limit; + u32 value; + + writel(sec, ioaddr + PTP_STSUR); + writel(nsec, ioaddr + PTP_STNSUR); + /* issue command to initialize the system time value */ + value = readl(ioaddr + PTP_TCR); + value |= RNP_PTP_TCR_TSINIT; + writel(value, ioaddr + PTP_TCR); + + /* wait for present system time initialize to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & RNP_PTP_TCR_TSINIT)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static void get_systime(void __iomem *ioaddr, u64 *systime) +{ + u64 ns; + + /* Get the TSSS value */ + ns = readl(ioaddr + PTP_STNSR); + /* Get the TSS and convert sec time value to nanosecond */ + ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; + + if (systime) + *systime = ns; +} + +static void config_mac_interrupt_enable(void __iomem *ioaddr, bool on) +{ + rnp_wr_reg(ioaddr + RNP_MAC_INTERRUPT_ENABLE, on); +} + +static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, int add_sub, + int gmac4) +{ + u32 value; + int limit; + + if (add_sub) { + /* If the new sec value needs to be subtracted with + * the system time, then MAC_STSUR reg should be + * programmed with (2^32 – ) + */ + if (gmac4) + sec = -sec; + + value = readl(ioaddr + PTP_TCR); + if (value & RNP_PTP_TCR_TSCTRLSSR) + nsec = (RNP_PTP_DIGITAL_ROLLOVER_MODE - nsec); + else + nsec = (RNP_PTP_BINARY_ROLLOVER_MODE - nsec); + } + + writel(sec, ioaddr + PTP_STSUR); + value = (add_sub << RNP_PTP_STNSUR_ADDSUB_SHIFT) | nsec; + writel(value, ioaddr + PTP_STNSUR); + + /* issue command to initialize the system time value */ + value = readl(ioaddr + PTP_TCR); + value |= RNP_PTP_TCR_TSUPDT; + writel(value, ioaddr + PTP_TCR); + + /* wait for present system time adjust/update to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & RNP_PTP_TCR_TSUPDT)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +const static struct rnp_hwtimestamp mac_ptp = { + .config_hw_tstamping = config_hw_tstamping, + .config_mac_irq_enable = config_mac_interrupt_enable, + .init_systime = init_systime, + .config_sub_second_increment = config_sub_second_increment, + .config_addend = config_addend, + .adjust_systime = adjust_systime, + .get_systime = get_systime, +}; + +#ifdef HAVE_PTP_CLOCK_INFO_ADJFINE +static int rnp_ptp_adjfreq(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct rnp_adapter *pf = + container_of(ptp, struct rnp_adapter, ptp_clock_ops); + unsigned long flags; + u32 addend; + + if (pf == NULL) { + printk(KERN_DEBUG "adapter_of contail is null\n"); + return 0; + } + + addend = adjust_by_scaled_ppm(pf->default_addend, scaled_ppm); + + spin_lock_irqsave(&pf->ptp_lock, flags); + pf->hwts_ops->config_addend(pf->ptp_addr, addend); + spin_unlock_irqrestore(&pf->ptp_lock, flags); + + return 0; +} +#else +static int rnp_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct rnp_adapter *pf = + container_of(ptp, struct rnp_adapter, ptp_clock_ops); + unsigned long flags; + u32 diff, addend; + int neg_adj = 0; + u64 adj; + + + if (pf == NULL) { + printk(KERN_DEBUG "adapter_of contail is null\n"); + return 0; + } + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + addend = pf->default_addend; + adj = addend; + adj *= ppb; + + diff = div_u64(adj, 1000000000ULL); + addend = neg_adj ? (addend - diff) : (addend + diff); + + + spin_lock_irqsave(&pf->ptp_lock, flags); + pf->hwts_ops->config_addend(pf->ptp_addr, addend); + spin_unlock_irqrestore(&pf->ptp_lock, flags); + + return 0; +} + +#endif +static int rnp_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct rnp_adapter *pf = + container_of(ptp, struct rnp_adapter, ptp_clock_ops); + unsigned long flags; + u32 sec, nsec; + u32 quotient, reminder; + int neg_adj = 0; + + if (delta < 0) { + neg_adj = 1; + delta = -delta; + } + + if (delta == 0) + return 0; + + quotient = div_u64_rem(delta, 1000000000ULL, &reminder); + sec = quotient; + nsec = reminder; + + spin_lock_irqsave(&pf->ptp_lock, flags); + pf->hwts_ops->adjust_systime(pf->ptp_addr, sec, nsec, neg_adj, + pf->gmac4); + spin_unlock_irqrestore(&pf->ptp_lock, flags); + + return 0; +} + +static int rnp_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct rnp_adapter *pf = + container_of(ptp, struct rnp_adapter, ptp_clock_ops); + unsigned long flags; + u64 ns = 0; + + spin_lock_irqsave(&pf->ptp_lock, flags); + + pf->hwts_ops->get_systime(pf->ptp_addr, &ns); + + spin_unlock_irqrestore(&pf->ptp_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int rnp_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct rnp_adapter *pf = + container_of(ptp, struct rnp_adapter, ptp_clock_ops); + unsigned long flags; + + spin_lock_irqsave(&pf->ptp_lock, flags); + pf->hwts_ops->init_systime(pf->ptp_addr, ts->tv_sec, ts->tv_nsec); + spin_unlock_irqrestore(&pf->ptp_lock, flags); + + return 0; +} + +#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64 +static int rnp_ptp_gettime32(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct timespec64 ts64; + int err; + + err = rnp_ptp_gettime(ptp, &ts64); + if (err) + return err; + + *ts = timespec64_to_timespec(ts64); + + return 0; +} + +static int rnp_ptp_settime32(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct timespec64 ts64; + + ts64 = timespec_to_timespec64(*ts); + return rnp_ptp_settime(ptp, &ts64); +} +#endif + +static int rnp_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + /*TODO add support for enable the option 1588 feature PPS Auxiliary */ + return -EOPNOTSUPP; +} + +int rnp_ptp_get_ts_config(struct rnp_adapter *pf, struct ifreq *ifr) +{ + struct hwtstamp_config *config = &pf->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? -EFAULT : + 0; +} + +static int rnp_ptp_setup_ptp(struct rnp_adapter *pf, u32 value) +{ + u32 sec_inc = 0; + u64 temp = 0; + struct timespec64 now; + + /* For now just use extrnal clock(the kernel-system clock)*/ + /* 1.Mask the Timestamp Trigger interrupt */ + /* 2.enable time stamping */ + /* 2.1 clear all bytes about time ctrl reg*/ + + pf->hwts_ops->config_hw_tstamping(pf->ptp_addr, value); + /* 3.Program the PTPclock frequency */ + /* program Sub Second Increment reg + * we use kernel-system clock + */ + pf->hwts_ops->config_sub_second_increment( + pf->ptp_addr, pf->clk_ptp_rate, pf->gmac4, &sec_inc); + /* 4.If use fine correction approash then, + * Program MAC_Timestamp_Addend register + */ + if (sec_inc == 0) { + printk(KERN_DEBUG "%s:%d the sec_inc is zero this is a bug\n", + __func__, __LINE__); + return -EFAULT; + } + temp = div_u64(1000000000ULL, sec_inc); + /* Store sub second increment and flags for later use */ + pf->sub_second_inc = sec_inc; + pf->systime_flags = value; + /* calculate default added value: + * formula is : + * addend = (2^32)/freq_div_ratio; + * where, freq_div_ratio = 1e9ns/sec_inc + */ + temp = (u64)(temp << 32); + + if (pf->clk_ptp_rate == 0) { + pf->clk_ptp_rate = 1000; + printk(KERN_DEBUG "%s:%d clk_ptp_rate is zero\n", __func__, + __LINE__); + } + + pf->default_addend = div_u64(temp, pf->clk_ptp_rate); + + pf->hwts_ops->config_addend(pf->ptp_addr, pf->default_addend); + /* 5.Poll wait for the TCR Update Addend Register*/ + /* 6.enabled Fine Update method */ + /* 7.program the second and nanosecond register*/ + /*TODO If we need to enable one-step timestamp */ + + /* initialize system time */ + ktime_get_real_ts64(&now); + + /* lower 32 bits of tv_sec are safe until y2106 */ + pf->hwts_ops->init_systime(pf->ptp_addr, (u32)now.tv_sec, now.tv_nsec); + + return 0; +} + +int rnp_ptp_set_ts_config(struct rnp_adapter *pf, struct ifreq *ifr) +{ + struct hwtstamp_config config; + u32 ptp_v2 = 0; + u32 tstamp_all = 0; + u32 ptp_over_ipv4_udp = 0; + u32 ptp_over_ipv6_udp = 0; + u32 ptp_over_ethernet = 0; + u32 snap_type_sel = 0; + u32 ts_master_en = 0; + u32 ts_event_en = 0; + u32 value = 0; + s32 ret = -1; + + if (!(pf->flags2 & RNP_FLAG2_PTP_ENABLED)) { + pci_alert(pf->pdev, "No support for HW time stamping\n"); + pf->ptp_tx_en = 0; + pf->ptp_tx_en = 0; + + return -EOPNOTSUPP; + } + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + netdev_info(pf->netdev, + "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", + __func__, config.flags, config.tx_type, config.rx_filter); + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + if (config.tx_type != HWTSTAMP_TX_OFF && + config.tx_type != HWTSTAMP_TX_ON) + return -ERANGE; + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + /* time stamp no incoming packet at all */ + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + /* PTP v1, UDP, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + /* 'mac' hardware can support Sync, Pdelay_Req and + * Pdelay_resp by setting bit14 and bits17/16 to 01 + * This leaves Delay_Req timestamps out. + * Enable all events *and* general purpose message + * timestamping + */ + snap_type_sel = RNP_PTP_TCR_SNAPTYPSEL_1; + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + /* PTP v1, UDP, Sync packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; + /* take time stamp for SYNC messages only */ + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + /* PTP v1, UDP, Delay_req packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; + /* take time stamp for Delay_Req messages only */ + ts_master_en = RNP_PTP_TCR_TSMSTRENA; + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + /* PTP v2, UDP, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + ptp_v2 = RNP_PTP_TCR_TSVER2ENA; + + /* take time stamp for all event messages */ + snap_type_sel = RNP_PTP_TCR_SNAPTYPSEL_1; + + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + /* PTP v2, UDP, Sync packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; + ptp_v2 = RNP_PTP_TCR_TSVER2ENA; + /* take time stamp for SYNC messages only */ + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + /* PTP v2, UDP, Delay_req packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; + ptp_v2 = RNP_PTP_TCR_TSVER2ENA; + /* take time stamp for Delay_Req messages only */ + ts_master_en = RNP_PTP_TCR_TSMSTRENA; + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_EVENT: + /* PTP v2/802.AS1 any layer, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + ptp_v2 = RNP_PTP_TCR_TSVER2ENA; + snap_type_sel = RNP_PTP_TCR_SNAPTYPSEL_1; + // ts_event_en = RNP_PTP_TCR_TSEVNTENA; + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = RNP_PTP_TCR_TSIPENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_SYNC: + /* PTP v2/802.AS1, any layer, Sync packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; + ptp_v2 = RNP_PTP_TCR_TSVER2ENA; + /* take time stamp for SYNC messages only */ + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = RNP_PTP_TCR_TSIPENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* PTP v2/802.AS1, any layer, Delay_req packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; + ptp_v2 = RNP_PTP_TCR_TSVER2ENA; + /* take time stamp for Delay_Req messages only */ + ts_master_en = RNP_PTP_TCR_TSMSTRENA; + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = RNP_PTP_TCR_TSIPENA; + break; + +#ifdef HWTSTAMP_FILTER_NTP_ALL + case HWTSTAMP_FILTER_NTP_ALL: +#endif + case HWTSTAMP_FILTER_ALL: + /* time stamp any incoming packet */ + config.rx_filter = HWTSTAMP_FILTER_ALL; + tstamp_all = RNP_PTP_TCR_TSENALL; + break; + + default: + return -ERANGE; + } + + pf->ptp_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); + pf->ptp_tx_en = config.tx_type == HWTSTAMP_TX_ON; + + netdev_info( + pf->netdev, + "ptp config rx filter 0x%.2x tx_type 0x%.2x rx_en[%d] tx_en[%d]\n", + config.rx_filter, config.tx_type, pf->ptp_rx_en, pf->ptp_tx_en); + if (!pf->ptp_rx_en && !pf->ptp_tx_en) + /*rx and tx is not use hardware ts so clear the ptp register */ + pf->hwts_ops->config_hw_tstamping(pf->ptp_addr, 0); + else { + value = (RNP_PTP_TCR_TSENA | RNP_PTP_TCR_TSCFUPDT | + RNP_PTP_TCR_TSCTRLSSR | tstamp_all | ptp_v2 | + ptp_over_ethernet | ptp_over_ipv6_udp | + ptp_over_ipv4_udp | ts_master_en | snap_type_sel); + + ret = rnp_ptp_setup_ptp(pf, value); + if (ret < 0) + return ret; + } + pf->ptp_config_value = value; + memcpy(&pf->tstamp_config, &config, sizeof(config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : + 0; +} + +/* structure describing a PTP hardware clock */ +static struct ptp_clock_info rnp_ptp_clock_ops = { + .owner = THIS_MODULE, + .name = "rnp ptp", + .max_adj = 50000000, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + /* will be overwritten in stmmac_ptp_register */ +#ifndef COMPAT_PTP_NO_PINS + .n_pins = 0, + /* should be 0 if not set */ +#endif +#ifdef HAVE_PTP_CLOCK_INFO_ADJFINE + .adjfine = rnp_ptp_adjfreq, +#else + .adjfreq = rnp_ptp_adjfreq, +#endif + .adjtime = rnp_ptp_adjtime, + +#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 + .gettime64 = rnp_ptp_gettime, + .settime64 = rnp_ptp_settime, +#else /* HAVE_PTP_CLOCK_INFO_GETTIME64 */ + .gettime = rnp_ptp_gettime32, + .settime = rnp_ptp_settime32, + +#endif /* HAVE_PTP_CLOCK_INFO_GETTIME64 */ + .enable = rnp_ptp_feature_enable, +}; + +int rnp_ptp_register(struct rnp_adapter *pf) +{ + pf->hwts_ops = &mac_ptp; + + pf->ptp_tx_en = 0; + pf->ptp_rx_en = 0; + + spin_lock_init(&pf->ptp_lock); + pf->flags2 |= RNP_FLAG2_PTP_ENABLED; + pf->ptp_clock_ops = rnp_ptp_clock_ops; + + /* default mac clock rate is 50Mhz */ + pf->clk_ptp_rate = 50000000; + if (pf->pdev == NULL) + printk(KERN_DEBUG "pdev dev is null\n"); + + pf->ptp_clock = ptp_clock_register(&pf->ptp_clock_ops, &pf->pdev->dev); + if (pf->ptp_clock == NULL) + pci_err(pf->pdev, "ptp clock register failed\n"); + + if (IS_ERR(pf->ptp_clock)) { + pci_err(pf->pdev, "ptp_clock_register failed\n"); + pf->ptp_clock = NULL; + } else { + pci_info(pf->pdev, "registered PTP clock\n"); + } + + return 0; +} + +void rnp_ptp_unregister(struct rnp_adapter *pf) +{ + /*1. stop the ptp module*/ + if (pf->ptp_clock) { + ptp_clock_unregister(pf->ptp_clock); + pf->ptp_clock = NULL; + pr_debug("Removed PTP HW clock successfully on %s\n", + "rnp_ptp"); + } +} + +#if defined(DEBUG_PTP_HARD_SOFTWAY_RX) || defined(DEBUG_PTP_HARD_SOFTWAY_TX) +static u64 rnp_get_software_ts(void) +{ + struct timespec64 ts; + + ktime_get_real_ts64(&ts); + return (ts.tv_nsec + ts.tv_sec * 1000000000ULL); +} +#endif + +#if defined(DEBUG_PTP_TX_TIMESTAMP) || defined(DEBUG_PTP_RX_TIMESTAMP) +#define TIME_ZONE_CHINA (8) +char *asctime(const struct tm *timeptr) +{ + static const char wday_name[][4] = { "Sun", "Mon", "Tue", "Wed", + "Thu", "Fri", "Sat" }; + static const char mon_name[][4] = { "Jan", "Feb", "Mar", "Apr", + "May", "Jun", "Jul", "Aug", + "Sep", "Oct", "Nov", "Dec" }; + static char result[26]; + + sprintf(result, "%.3s %.3s%3d %.2d:%.2d:%.2d %ld\n", + wday_name[timeptr->tm_wday], mon_name[timeptr->tm_mon], + timeptr->tm_mday, timeptr->tm_hour + TIME_ZONE_CHINA, + timeptr->tm_min, timeptr->tm_sec, 1900 + timeptr->tm_year); + return result; +} + +static void rnp_print_human_timestamp(uint64_t ns, uint8_t *direct) +{ + struct timespec64 ts; + struct tm tms; + ktime_t ktm = ns_to_ktime(ns); + + ts = ktime_to_timespec64(ktm); + time64_to_tm(ts.tv_sec, ts.tv_nsec / 1000000000ULL, &tms); + printk(KERN_DEBUG "[%s] %s ------\n", direct, asctime(&tms)); +} +#endif + +void rnp_tx_hwtstamp_work(struct work_struct *work) +{ + struct rnp_adapter *adapter = + container_of(work, struct rnp_adapter, tx_hwtstamp_work); + void __iomem *ioaddr = adapter->hw.hw_addr; + + /* 1. read port belone timestatmp status reg */ + /* 2. status enabled read nsec and sec reg*/ + /* 3. */ + u64 nanosec = 0, sec = 0; + + if (!adapter->ptp_tx_skb) { + clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state); + return; + } + + if (rnp_rd_reg(ioaddr + RNP_ETH_PTP_TX_TSVALUE_STATUS(0)) & 0x01) { + struct sk_buff *skb = adapter->ptp_tx_skb; + struct skb_shared_hwtstamps shhwtstamps; + u64 txstmp = 0; + /* read and add nsec, sec turn to nsec*/ + + nanosec = rnp_rd_reg(ioaddr + RNP_ETH_PTP_TX_LTIMES(0)); + sec = rnp_rd_reg(ioaddr + RNP_ETH_PTP_TX_HTIMES(0)); + /* when we read the timestamp finish need to notice the hardware + * that the timestamp need to update via set tx_hwts_clear-reg + * from high to low + */ + rnp_wr_reg(ioaddr + RNP_ETH_PTP_TX_CLEAR(0), + PTP_GET_TX_HWTS_FINISH); + rnp_wr_reg(ioaddr + RNP_ETH_PTP_TX_CLEAR(0), + PTP_GET_TX_HWTS_UPDATE); + + txstmp = nanosec & PTP_HWTX_TIME_VALUE_MASK; + txstmp += (sec & PTP_HWTX_TIME_VALUE_MASK) * 1000000000ULL; + + /* Clear the global tx_hwtstamp_skb pointer and force writes + * prior to notifying the stack of a Tx timestamp. + */ + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(txstmp); + adapter->ptp_tx_skb = NULL; +#ifdef DEBUG_PTP_TX_TIMESTAMP + rnp_print_human_timestamp(txstmp, "TX"); +#endif + /* force write prior to skb_tstamp_tx + * because the xmit will re used the point to store ptp skb + */ + wmb(); + + skb_tstamp_tx(skb, &shhwtstamps); + dev_consume_skb_any(skb); + clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state); + } else if (time_after(jiffies, + adapter->tx_hwtstamp_start + + adapter->tx_timeout_factor * HZ)) { + /* this function will mark the skb drop*/ + if (adapter->ptp_tx_skb) + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + adapter->tx_hwtstamp_timeouts++; + clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state); + netdev_warn(adapter->netdev, "clearing Tx timestamp hang\n"); + } else { + /* reschedule to check later */ +#ifdef DEBUG_PTP_HARD_SOFTWAY_TX + struct skb_shared_hwtstamps shhwtstamp; + u64 ns = 0; + + ns = rnp_get_software_ts(); + shhwtstamp.hwtstamp = ns_to_ktime(ns); + if (adapter->ptp_tx_skb) { + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamp); + dev_consume_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + } +#else + schedule_work(&adapter->tx_hwtstamp_work); +#endif + } +} + +void rnp_ptp_get_rx_hwstamp(struct rnp_adapter *adapter, + union rnp_rx_desc *desc, struct sk_buff *skb) +{ + u64 ns = 0; + u64 tsvalueh = 0, tsvaluel = 0; + struct skb_shared_hwtstamps *hwtstamps = NULL; + + if (!skb || !adapter->ptp_rx_en) { + netdev_dbg(adapter->netdev, + "hwstamp skb is null or " + "rx_en iszero %u\n", + adapter->ptp_rx_en); + return; + } + +#ifdef DEBUG_PTP_HARD_SOFTWAY_RX + ns = rnp_get_software_ts(); +#else + if (likely(!((desc->wb.cmd) & RNP_RXD_STAT_PTP))) + return; + hwtstamps = skb_hwtstamps(skb); + /* because of rx hwstamp store before the mac head + * skb->head and skb->data is point to same location when call alloc_skb + * so we must move 16 bytes the skb->data to the mac head location + * but for the head point if we need move the skb->head need to be diss + */ + /* low8bytes is null high8bytes is timestamp + * high32bit is seconds low32bits is nanoseconds + */ + skb_copy_from_linear_data_offset(skb, RNP_RX_TIME_RESERVE, &tsvalueh, + RNP_RX_SEC_SIZE); + skb_copy_from_linear_data_offset(skb, + RNP_RX_TIME_RESERVE + RNP_RX_SEC_SIZE, + &tsvaluel, RNP_RX_NANOSEC_SIZE); + skb_pull(skb, RNP_RX_HWTS_OFFSET); + tsvalueh = ntohl(tsvalueh); + tsvaluel = ntohl(tsvaluel); + + ns = tsvaluel & RNP_RX_NSEC_MASK; + ns += ((tsvalueh & RNP_RX_SEC_MASK) * 1000000000ULL); + + netdev_dbg(adapter->netdev, + "ptp get hardware ts-sec %llu ts-nanosec %llu\n", tsvalueh, + tsvaluel); +#endif + hwtstamps->hwtstamp = ns_to_ktime(ns); +#ifdef DEBUG_PTP_RX_TIMESTAMP + rnp_print_human_timestamp(ns, "RX"); +#endif +} + +void rnp_ptp_reset(struct rnp_adapter *adapter) +{ + rnp_ptp_setup_ptp(adapter, adapter->ptp_config_value); +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_ptp.h b/drivers/net/ethernet/mucse/rnp/rnp_ptp.h new file mode 100755 index 0000000000000000000000000000000000000000..a62e8128f0a49c4c047eb752dc03275d1596f89d --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_ptp.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef __RNP_PTP_H__ +#define __RNP_PTP_H__ + +struct rnp_hwtimestamp { + void (*config_hw_tstamping)(void __iomem *ioaddr, u32 data); + void (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock, + int gmac4, u32 *ssinc); + void (*config_mac_irq_enable)(void __iomem *ioaddr, bool on); + int (*init_systime)(void __iomem *ioaddr, u32 sec, u32 nsec); + int (*config_addend)(void __iomem *ioaddr, u32 addend); + int (*adjust_systime)(void __iomem *ioaddr, u32 sec, u32 nsec, + int add_sub, int gmac4); + void (*get_systime)(void __iomem *ioaddr, u64 *systime); +}; +/* IEEE 1588 PTP register offsets */ +#define PTP_TCR 0x00 /* Timestamp Control Reg */ +#define PTP_SSIR 0x04 /* Sub-Second Increment Reg */ +#define PTP_STSR 0x08 /* System Time – Seconds Regr */ +#define PTP_STNSR 0x0c /* System Time – Nanoseconds Reg */ +#define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */ +#define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */ +#define PTP_TAR 0x18 /* Timestamp Addend Reg */ + +#define RNP_PTP_STNSUR_ADDSUB_SHIFT 31 +#define RNP_PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */ +#define RNP_PTP_BINARY_ROLLOVER_MODE 0x80000000 /* ~0.466 ns */ + +/* PTP Timestamp control register defines */ +#define RNP_PTP_TCR_TSENA BIT(0) /*Timestamp Enable*/ +#define RNP_PTP_TCR_TSCFUPDT BIT(1) /* Timestamp Fine/Coarse Update */ +#define RNP_PTP_TCR_TSINIT BIT(2) /* Timestamp Initialize */ +#define RNP_PTP_TCR_TSUPDT BIT(3) /* Timestamp Update */ +#define RNP_PTP_TCR_TSTRIG BIT(4) /* Timestamp Interrupt Trigger Enable */ +#define RNP_PTP_TCR_TSADDREG BIT(5) /* Addend Reg Update */ +#define RNP_PTP_TCR_TSENALL BIT(8) /* Enable Timestamp for All Frames */ +#define RNP_PTP_TCR_TSCTRLSSR BIT(9) /* Digital or Binary Rollover Control */ +#define RNP_PTP_TCR_TSVER2ENA \ + BIT(10) /* Enable PTP packet Processing for Version 2 Format */ +#define RNP_PTP_TCR_TSIPENA \ + BIT(11) /* Enable Processing of PTP over Ethernet Frames */ +#define RNP_PTP_TCR_TSIPV6ENA \ + BIT(12) /* Enable Processing of PTP Frames Sent over IPv6-UDP */ +#define RNP_PTP_TCR_TSIPV4ENA \ + BIT(13) /* Enable Processing of PTP Frames Sent over IPv4-UDP */ +#define RNP_PTP_TCR_TSEVNTENA \ + BIT(14) /* Enable Timestamp Snapshot for Event Messages */ +#define RNP_PTP_TCR_TSMSTRENA \ + BIT(15) /* Enable Snapshot for Messages Relevant to Master */ +/* Note 802.1 AS Is work Over Ethernet FramesC_Sub_Second_Incremen + * and Normal PTP Is work Oveer UDP + */ + +/* Select PTP packets for Taking Snapshots + * On mac specifically: + * Enable SYNC, Pdelay_Req, Pdelay_Resp when TSEVNTENA is enabled. + * or + * Enable SYNC, Follow_Up, Delay_Req, Delay_Resp, Pdelay_Req, Pdelay_Resp, + * Pdelay_Resp_Follow_Up if TSEVNTENA is disabled + */ +#define RNP_PTP_TCR_SNAPTYPSEL_1 BIT(16) +#define RNP_PTP_TCR_TSENMACADDR \ + BIT(18) /* Enable MAC address for PTP Frame Filtering */ +#define RNP_PTP_TCR_ESTI \ + BIT(20) /* External System Time Input Or MAC Internal Clock*/ +#define RNP_PTP_TCR_AV8021ASMEN BIT(28) /* AV802.1 AS Mode Enable*/ +/* Sub Second increament define */ +#define RNP_PTP_SSIR_SSINC_MASK (0xff) /* Sub-second increment value */ +#define RNP_PTP_SSIR_SSINC_SHIFT (16) /* Sub-second increment offset */ + +#define RNP_MAC_TXTSC BIT(15) /* TX timestamp reg is fill complete */ +#define RNP_MAC_TXTSSTSLO GENMASK(30, 0) /*nano second avalid value */ + +#define RNP_RX_SEC_MASK GENMASK(30, 0) +#define RNP_RX_NSEC_MASK GENMASK(30, 0) +#define RNP_RX_TIME_RESERVE (8) +#define RNP_RX_SEC_SIZE (4) +#define RNP_RX_NANOSEC_SIZE (4) +#define RNP_RX_HWTS_OFFSET \ + (RNP_RX_SEC_SIZE + RNP_RX_NANOSEC_SIZE + RNP_RX_TIME_RESERVE) + +#define PTP_HWTX_TIME_VALUE_MASK GENMASK(31, 0) +#define PTP_GET_TX_HWTS_FINISH (1) +#define PTP_GET_TX_HWTS_UPDATE (0) +/* hardware ts can't so fake ts from the software clock */ +#define DEBUG_PTP_HARD_SOFTWAY + +int rnp_ptp_get_ts_config(struct rnp_adapter *pf, struct ifreq *ifr); +int rnp_ptp_set_ts_config(struct rnp_adapter *pf, struct ifreq *ifr); +int rnp_ptp_register(struct rnp_adapter *pf); +void rnp_ptp_unregister(struct rnp_adapter *pf); + +void rnp_ptp_get_rx_hwstamp(struct rnp_adapter *pf, union rnp_rx_desc *desc, + struct sk_buff *skb); +void rnp_tx_hwtstamp_work(struct work_struct *work); +void rnp_ptp_reset(struct rnp_adapter *adapter); +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_regs.h b/drivers/net/ethernet/mucse/rnp/rnp_regs.h new file mode 100755 index 0000000000000000000000000000000000000000..fb70322f5dbf9510942dd5c882182715ee25a465 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_regs.h @@ -0,0 +1,958 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef RNP_REGS_H +#define RNP_REGS_H + +/* BAR4 memory */ +/* ------------------------------------------*/ +/* module | size | start | end */ +/* DMA | 64KB | 0_0000H | 0_FFFFH */ +/* ETH | 64KB | 1_0000H | 1_FFFFH */ +/* REG | 64KB | 3_0000H | 3_FFFFH */ +/* SERDES | 128KB | 4_0000H | 5_FFFFH */ +/* XLMAC | 256KB | 6_0000H | 9_FFFFH */ +/* MSIX | 64KB | A_0000H | A_FFFFH */ +/* SWITCH | 64KB | B_0000H | B_FFFFH */ +/* TCAM | 256KB | C_0000H | F_FFFFH */ +/* ------------------------------------------*/ + +/* ==================== RNP-DMA Global Registers ==================== */ +//RNP-DMA AXI Register +//DMA-ENABLE-IRQ +// n10 +#define RNP10_RING_BASE (0x8000) +// n20 +#define RNP20_RING_BASE (0x8000) + +#define RING_OFFSET(queue_idx) (0x100 * (queue_idx)) + +#define RNP_DMA_VERSION (0x0000) +#define RNP_DMA_CONFIG (0x0004) +#define RNP_DMA_AXI_READY (0x0014) +#define DMA_MAC_LOOPBACK (1 << 0) +#define DMA_SWITCH_LOOPBACK (1 << 1) +#define DMA_VEB_BYPASS (1 << 4) +#define DMA_AXI_ORDER (1 << 5) +#define DMA_RX_PADDING (1 << 8) +#define DMA_MAP_MODE(n) (n << 12) +#define DMA_RX_FRAGMENT_BYTES(n) (((n) / 16) << 16) +#define RNP_DMA_STATUS (0x0008) +#define RNP_DMA_RX_DATA_PROG_FULL_THRESH (0x00a0) +#define DMA_RING_NUM (0xff << 24) + +#define RC_CONTROL_HW (0x01) +#define RC_CONTROL_PHY_DRIVER (0x02) +#define RC_JUMP_STATUS (0x04) +#define RC_PHY_LINK_DONE (0x08) +#define RC_LINK_CHANGE (0x10) + +#define RNP_DMA_DUMY (0x000c) + +#define RNP_DMA_RX_START (0x10) +#define RNP_DMA_RX_READY (0x14) +#define RNP_DMA_TX_START (0x18) +#define RNP_DMA_TX_READY (0x1c) +#define RNP_DMA_INT_STAT (0x20) +#define RNP_DMA_INT_MASK (0x24) +#define TX_INT_MASK (1 << 1) +#define RX_INT_MASK (1 << 0) +#define RNP_DMA_INT_CLR (0x28) +#define RNP_DMA_INT_TRIG (0x2c) + +#define RNP_DMA_AXI_EN (0x0010) +#define RX_AXI_RW_EN (0x03 << 0) +#define TX_AXI_RW_EN (0x03 << 2) +#define RNP_DMA_AXI_STAT (0x0014) +#define RNP_VEB_MAC_MASK_LO (0x0020) +#define RNP_VEB_MAC_MASK_HI (0x0024) +#define RNP_VEB_VLAN_MASK (0x0028) +#define DEBUG_PROBE_NUM 16 +#define RNP_DMA_DEBUG_PROBE_LO_REG(n) (0x0100 + 0x08 * (n)) +#define RNP_DMA_DEBUG_PROBE_HI_REG(n) (0x0100 + 0x08 * (n)) +#define DEBUG_CNT_NUM 76 +#define RNP_DMA_DEBUG_CNT(n) (0x0200 + 0x04 * (n)) +#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_0 (RNP_DMA_DEBUG_CNT(17)) +#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_1 (RNP_DMA_DEBUG_CNT(18)) +#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_2 (RNP_DMA_DEBUG_CNT(19)) +#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_3 (RNP_DMA_DEBUG_CNT(20)) +#define RNP_DMA_STATS_DMA_TO_SWITCH (RNP_DMA_DEBUG_CNT(21)) +#define RNP_DMA_STATS_MAC_TO_DMA (RNP_DMA_DEBUG_CNT(22)) +#define RNP_DMA_STATS_SWITCH_TO_DMA (RNP_DMA_DEBUG_CNT(23)) +#define RNP_PCI_WR_TO_HOST (RNP_DMA_DEBUG_CNT(34)) + +//RX-Queue Registers +#define RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_HI (0x30) +#define RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_LO (0x34) +#define RNP_DMA_REG_RX_DESC_BUF_LEN (0x38) +#define RNP_DMA_REG_RX_DESC_BUF_HEAD (0x3c) +#define RNP_DMA_REG_RX_DESC_BUF_TAIL (0x40) +#define RNP_DMA_REG_RX_DESC_FETCH_CTRL (0x44) +#define RNP_DMA_REG_RX_INT_DELAY_TIMER (0x48) +#define RNP_DMA_REG_RX_INT_DELAY_PKTCNT (0x4c) +#define RNP_DMA_REG_RX_ARB_DEF_LVL (0x50) +#define PCI_DMA_REG_RX_DESC_TIMEOUT_TH (0x54) +#define PCI_DMA_REG_RX_SCATTER_LENGTH (0x58) +// TX-Queue Registers +#define RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_HI (0x60) +#define RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_LO (0x64) +#define RNP_DMA_REG_TX_DESC_BUF_LEN (0x68) +#define RNP_DMA_REG_TX_DESC_BUF_HEAD (0x6c) +#define RNP_DMA_REG_TX_DESC_BUF_TAIL (0x70) +#define RNP_DMA_REG_TX_DESC_FETCH_CTRL (0x74) +#define RNP_DMA_REG_TX_INT_DELAY_TIMER (0x78) +#define RNP_DMA_REG_TX_INT_DELAY_PKTCNT (0x7c) +#define RNP_DMA_REG_TX_ARB_DEF_LVL (0x80) +#define RNP_DMA_REG_TX_FLOW_CTRL_TH (0x84) +#define RNP_DMA_REG_TX_FLOW_CTRL_TM (0x88) +#define RNP_DMA_PKT_FIFO_DATA_PROG_FULL_THRESH (0x0098) + +// VEB Registers +// fixme +#define VEB_TBL_CNTS 64 +#define RNP_DMA_PORT_VBE_MAC_LO_TBL(port, vf) \ + (0x80A0 + 4 * (port) + 0x100 * (vf)) +#define RNP_DMA_PORT_VBE_MAC_HI_TBL(port, vf) \ + (0x80B0 + 4 * (port) + 0x100 * (vf)) +#define RNP_DMA_PORT_VEB_VID_TBL(port, vf) (0x80C0 + 4 * (port) + 0x100 * (vf)) +#define RNP_DMA_PORT_VEB_VF_RING_TBL(port, vf) \ + (0x80D0 + 4 * (port) + 0x100 * (vf)) +#define RNP_DMA_STATS_MAC_TO_MAC (0x1b0) +#define RNP_DMA_STATS_SWITCH_TO_SWITCH (0x1a4) + +/* ================================================================== */ +#define RNP500_NIC_BASE (0x8000) + +#define RNP500_TOP_NIC_REST_N (0x0010) +#define RNP500_TOP_MAC_OUI (0xc004 - RNP500_NIC_BASE) +#define RNP500_TOP_MAC_SN (0xc008 - RNP500_NIC_BASE) + +#define RNP500_TOP_NIC_CONFIG (0x0004) + +/* ==================== RNP-ETH Global Registers ==================== */ +#define RNP_ETH_BASE (0x10000) + +#define RNP10_ETH_BASE (0x10000) + +#define RNP10_ETH_ENABLE_RSS_ONLY (0x3f30001) +#define RNP10_RAH_AV 0x80000000 +#define RNP10_ETH_RAR_RL(n) (0xa000 + 0x04 * n) +#define RNP10_ETH_RAR_RH(n) (0xa400 + 0x04 * n) + +#define RNP10_ETH_DMAC_FCTRL (0x9110) +#define RNP10_ETH_DMAC_MCSTCTRL (0x9114) +#define RNP10_MCSTCTRL_MULTICASE_TBL_EN (1 << 2) +#define RNP10_MCSTCTRL_UNICASE_TBL_EN (1 << 3) +#define RNP10_VM_DMAC_MPSAR_RING(entry) \ + (0xb400 + (4 * (entry))) + +#define RNP10_ETH_MULTICAST_HASH_TABLE(n) (0xac00 + 0x04 * n) + +#define RNP10_ETH_LAYER2_ETQF(n) (0x9200 + 0x04 * (n)) +#define RNP10_ETH_LAYER2_ETQS(n) (0x9240 + 0x04 * (n)) + +/* ==================== RNP10-TCAM Global Registers ==================== */ +#define RNP10_TCAM_BASE (0xc0000 - RNP10_ETH_BASE) + +#define RNP10_TCAM_SDPQF(n) \ + (RNP10_TCAM_BASE + 0x00 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_DAQF(n) \ + (RNP10_TCAM_BASE + 0x04 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_SAQF(n) \ + (RNP10_TCAM_BASE + 0x08 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_APQF(n) \ + (RNP10_TCAM_BASE + 0x0c + 0x40 * (n / 2) + 0x10 * (n % 2)) + +#define RNP10_ETH_TCAM_EN (0x8024) + +#define RNP10_TCAM_SDPQF_MASK(n) \ + (RNP10_TCAM_BASE + 0x20 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_DAQF_MASK(n) \ + (RNP10_TCAM_BASE + 0x24 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_SAQF_MASK(n) \ + (RNP10_TCAM_BASE + 0x28 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_APQF_MASK(n) \ + (RNP10_TCAM_BASE + 0x2c + 0x40 * (n / 2) + 0x10 * (n % 2)) + +#define RNP10_TCAM_MODE (RNP10_TCAM_BASE + 0x20000) +#define RNP10_TCAM_CACHE_ENABLE (RNP10_TCAM_BASE + 0x20004) +#define RNP10_TCAM_CACHE_ADDR_CLR (RNP10_TCAM_BASE + 0x20008) +#define RNP10_TCAM_CACHE_REQ_CLR (RNP10_TCAM_BASE + 0x2000c) + +#define RNP10_TOP_ETH_TCAM_CONFIG_ENABLE (0x30000 - RNP10_ETH_BASE + 0x8050) + +#define RNP10_VEB_TBL_CNTS 64 +#define RNP10_DMA_PORT_VBE_MAC_LO_TBL(port, vf) \ + (0x80A0 + 4 * (port) + 0x100 * (vf)) +#define RNP10_DMA_PORT_VBE_MAC_HI_TBL(port, vf) \ + (0x80B0 + 4 * (port) + 0x100 * (vf)) +#define RNP10_DMA_PORT_VEB_VID_TBL(port, vf) \ + (0x80C0 + 4 * (port) + 0x100 * (vf)) +#define RNP10_DMA_PORT_VEB_VF_RING_TBL(port, vf) \ + (0x80D0 + 4 * (port) + 0x100 * (vf)) + +/* + * [3:0]: + * 4'b0000:RSS disable + * 4'b0001:RSS only + * 4'b0100:DCB and RSS--8*16 + * 4'b1010:POOLS and RSS--32*4 + * [3] :virtual enable + * [16]:ipv4_hash_tcp_enable + * [17]:ipv4_hash_enable + * [20]:ipv6_hash_enable + * [21]:ipv6_hash_tcp_enable + * [22]:ipv4_hash_udp_enable + * [23]:ipv6_hash_udp_enable + * [24]:ipv4_hash_sctp_enable + * [25]:ipv6_hash_sctp_enable + */ +#define RNP10_ETH_RSS_CONTROL (0x92a0) +#define RNP10_IOV_ENABLED (1 << 3) +#define RNP10_ETH_RSS_KEY (0x92d0) +#define RNP10_ETH_TC_IPH_OFFSET_TABLE(n) (0xe800 + 0x04 * (n)) +#define RNP10_ETH_RSS_INDIR_TBL(n) (0xe000 + 0x04 * (n)) +#define RNP10_ETH_VLAN_FILTER_TABLE(n) (0xb000 + 0x04 * (n)) +#define RNP10_VFTA RNP10_ETH_VLAN_FILTER_TABLE +#define RNP10_VLVF(idx) (0xb600 + 4 * (idx)) +#define RNP10_VLVF_TABLE(idx) (0xb700 + 4 * (idx)) +#define RNP10_ETH_TUPLE5_SAQF(n) (0xc000 + 0x04 * (n)) +#define RNP10_ETH_TUPLE5_DAQF(n) (0xc400 + 0x04 * (n)) +#define RNP10_ETH_TUPLE5_SDPQF(n) (0xc800 + 0x04 * (n)) +#define RNP10_ETH_TUPLE5_FTQF(n) (0xcc00 + 0x04 * (n)) +#define RNP10_ETH_TUPLE5_POLICY(n) (0xd000 + 0x04 * (n)) +#define RNP10_ETH_VLAN_FILTER_ENABLE (0x9118) +#define RNP10_ETH_DEFAULT_RX_MIN_LEN (0x80f0) +#define RNP10_ETH_DEFAULT_RX_MAX_LEN (0x80f4) +#define RNP10_ETH_VLAN_VME_REG(n) (0x8040 + 0x04 * (n)) +#define RNP10_ETH_VXLAN_PORT (0x8010) +#define RNP10_FCTRL_BPE BIT(10) +#define RNP10_FCTRL_UPE BIT(9) +#define RNP10_FCTRL_MPE BIT(8) +#define RNP10_HOST_FILTER_EN (0x801c) +#define RNP10_REDIR_EN (0x8030) +#define RNP10_ETH_SCTP_CHECKSUM_EN (0x8038) +#define RNP10_ETH_ENABLE_RSS_ONLY (0x3f30001) +#define RNP10_ETH_DISABLE_RSS (0) +#define RNP10_COMM_REG0 0x30000 +#define RNP10_TOP_NIC_CONFIG (RNP10_COMM_REG0 + 0x0004) +#define RNP10_TOP_NIC_REST_N (RNP10_COMM_REG0 + 0x0010) +#define RNP10_TOP_ETH_BUG_40G_PATCH (RNP10_COMM_REG0 + 0x801c) +#define RNP10_TOP_MAC_OUI (RNP10_COMM_REG0 + 0xc004) +#define RNP10_TOP_MAC_SN (RNP10_COMM_REG0 + 0xc008) +#define RNP10_ETH_TUNNEL_MOD (0x8004) +#define INNER_L4_BIT BIT(6) +#define PKT_LEN_ERR (2) +#define HDR_LEN_ERR (1) +#define RNP10_ETH_ERR_MASK_VECTOR (0x8060) +#define RNP10_ETH_BYPASS (0x8000) +#define RNP10_ETH_DEFAULT_RX_RING (0x806c) +#define DROP_ALL_THRESH (2046) +#define RECEIVE_ALL_THRESH (0x270) +#define RNP10_ETH_RX_PROGFULL_THRESH_PORT (0x8070) +#define RNP10_ETH_HIGH_WATER(n) (0x80c0 + n * (0x08)) +#define RNP10_ETH_LOW_WATER(n) (0x80c4 + n * (0x08)) +#define RNP10_ETH_WRAP_FIELD_TYPE (0x805c) +#define RNP10_MRQC_IOV_EN (0x92a0) +#define RNP10_ETH_SYNQF (0x9290) +#define RNP10_ETH_SYNQF_PRIORITY (0x9294) +#define RNP10_RXTRANS_DROP(port) (0x8904 + 0x40 * (port)) +#define RNP10_RXTRANS_WDT_ERR_PKTS(port) (0x8908 + 0x40 * (port)) +#define RNP10_RXTRANS_CODE_ERR_PKTS(port) (0x890c + 0x40 * (port)) +#define RNP10_RXTRANS_CRC_ERR_PKTS(port) (0x8910 + 0x40 * (port)) +#define RNP10_RXTRANS_SLEN_ERR_PKTS(port) (0x8914 + 0x40 * (port)) +#define RNP10_RXTRANS_GLEN_ERR_PKTS(port) (0x8918 + 0x40 * (port)) +#define RNP10_RXTRANS_IPH_ERR_PKTS(port) (0x891c + 0x40 * (port)) +#define RNP10_RXTRANS_CSUM_ERR_PKTS(port) (0x8920 + 0x40 * (port)) +#define RNP10_RXTRANS_LEN_ERR_PKTS(port) (0x8924 + 0x40 * (port)) +#define RNP10_RXTRANS_CUT_ERR_PKTS(port) (0x8928 + 0x40 * (port)) +#define RNP10_ETH_DECAP_PKT_DROP_NUM(port) (0x82e8 + 0x04 * (port)) +#define RNP10_ETH_INVALID_DROP_PKTS RNP10_ETH_DECAP_PKT_DROP_NUM(0) +#define RNP10_ETH_FILTER_DROP_PKTS RNP10_ETH_DECAP_PKT_DROP_NUM(1) +#define RNP10_ETH_RX_DEBUG(n) (0x8400 + 0x04 * (n)) +#define RNP10_ETH_RX_FC_DEBUG0_NUM RNP10_ETH_RX_DEBUG(0) +#define RNP10_ETH_RX_FC_DEBUG1_NUM RNP10_ETH_RX_DEBUG(1) +#define RNP10_ETH_RX_DIS_DEBUG0_NUM RNP10_ETH_RX_DEBUG(2) +#define RNP10_ETH_RX_DIS_DEBUG1_NUM RNP10_ETH_RX_DEBUG(3) +#define RNP10_ETH_HOST_L2_DROP_PKTS RNP10_ETH_RX_DEBUG(4) +#define RNP10_ETH_REDIR_INPUT_MATCH_DROP_PKTS RNP10_ETH_RX_DEBUG(5) +#define RNP10_ETH_ETYPE_DROP_PKTS RNP10_ETH_RX_DEBUG(6) +#define RNP10_ETH_TCP_SYN_DROP_PKTS RNP10_ETH_RX_DEBUG(7) +#define RNP10_ETH_REDIR_TUPLE5_DROP_PKTS RNP10_ETH_RX_DEBUG(8) +#define RNP10_ETH_REDIR_TCAM_DROP_PKTS RNP10_ETH_RX_DEBUG(9) +#define RNP10_MAC_STATS_BROADCAST_LOW (0x0918) +#define RNP10_MAC_STATS_BROADCAST_HIGH (0x091c) +#define RNP10_MAC_STATS_MULTICAST_LOW (0x0920) +#define RNP10_MAC_STATS_MULTICAST_HIGH (0x0924) +#define RNP10_MAC_STATS_RX_PAUSE_COUNT_LOW (0x0988) +#define RNP10_MAC_STATS_RX_PAUSE_COUNT_HIGH (0x098C) +#define RNP10_MAC_STATS_TX_PAUSE_COUNT_LOW (0x0894) +#define RNP10_MAC_STATS_TX_PAUSE_COUNT_HIGH (0x898) +#define RNP10_ETH_DECAP_BMC_DROP_NUM (0x82f4) +#define RNP10_ETH_DECAP_SWITCH_DROP_NUM (0x82f8) +#define RNP10_VLVF(idx) (0xb600 + 4 * (idx)) +#define RNP500_VEB_TBL_CNTS 8 +#define RNP500_DMA_RBUF_FIFO (0x00b0) +#define RNP500_DMA_PORT_VBE_MAC_LO_TBL(port, vf) \ + (0x10c0 + 4 * (port) + 0x100 * (vf)) +#define RNP500_DMA_PORT_VBE_MAC_HI_TBL(port, vf) \ + (0x10c4 + 4 * (port) + 0x100 * (vf)) +#define RNP500_DMA_PORT_VEB_VID_TBL(port, vf) \ + (0x10C8 + 4 * (port) + 0x100 * (vf)) +#define RNP500_DMA_PORT_VEB_VF_RING_TBL(port, vf) \ + (0x10cc + 4 * (port) + 0x100 * (vf)) +#define RNP500_ETH_BASE (0x10000) +#define RNP500_ETH_TUPLE5_SAQF(n) (0xc000 + 0x04 * (n)) +#define RNP500_ETH_TUPLE5_DAQF(n) (0xc400 + 0x04 * (n)) +#define RNP500_ETH_TUPLE5_SDPQF(n) (0xc800 + 0x04 * (n)) +#define RNP500_ETH_TUPLE5_FTQF(n) (0xcc00 + 0x04 * (n)) +#define RNP500_ETH_TUPLE5_POLICY(n) (0xce00 + 0x04 * (n)) +#define RNP500_ETH_DEFAULT_RX_MIN_LEN (0x80f0) +#define RNP500_ETH_DEFAULT_RX_MAX_LEN (0x80f4) +#define RNP500_ETH_VLAN_VME_REG(n) (0x8040 + 0x04 * (n)) +#define RNP500_ETH_ERR_MASK_VECTOR (0x8060) +#define RNP500_ETH_RSS_MASK (0x3ff0001) +#define RNP500_ETH_ENABLE_RSS_ONLY (0x3f30001) +#define RNP500_ETH_RSS_CONTROL (0x92a0) +#define RNP500_MRQC_IOV_EN (0x92a0) +#define RNP500_IOV_ENABLED (1 << 3) +#define RNP500_ETH_DISABLE_RSS (0) +#define RNP500_ETH_SYNQF (0x9290) +#define RNP500_ETH_SYNQF_PRIORITY (0x9294) +#define RNP500_ETH_FCS_EN (0x804c) +#define RNP500_ETH_HIGH_WATER(n) (0x80c0 + n * (0x08)) +#define RNP500_ETH_LOW_WATER(n) (0x80c4 + n * (0x08)) +#define RNP500_ETH_WRAP_FIELD_TYPE (0x805c) +#define RNP500_ETH_TX_VLAN_CONTROL_ENABLE (0x0070) +#define RNP500_ETH_TX_VLAN_TYPE (0x0074) +#define RNP500_ETH_RX_MAC_LEN_REG (0x80e0) +#define RNP500_ETH_WHOLE_PKT_LEN_ERR_DROP (0x807c) +#define RNP500_RAH_AV 0x80000000 +#define RNP500_ETH_RAR_RL(n) (0xa000 + 0x04 * n) +#define RNP500_ETH_RAR_RH(n) (0xa400 + 0x04 * n) +#define RNP500_FCTRL_BPE BIT(10) +#define RNP500_FCTRL_UPE BIT(9) +#define RNP500_FCTRL_MPE BIT(8) +#define RNP500_ETH_DMAC_FCTRL (0x9110) +#define RNP500_ETH_DMAC_MCSTCTRL (0x9114) +#define RNP500_MCSTCTRL_MULTICASE_TBL_EN (1 << 4) +#define RNP500_MCSTCTRL_UNICASE_TBL_EN (1 << 3) +#define RNP500_VM_DMAC_MPSAR_RING(entry) \ + (0xb400 + (4 * (entry))) +#define RNP500_ETH_MULTICAST_HASH_TABLE(n) (0xac00 + 0x04 * n) +#define RNP500_ETH_RSS_KEY (0x92d0) +#define RNP500_ETH_TC_IPH_OFFSET_TABLE(n) (0xe800 + 0x04 * (n)) +#define RNP500_ETH_RSS_INDIR_TBL(n) (0xe000 + 0x04 * (n)) +#define RNP500_ETH_VLAN_FILTER_TABLE(n) (0xb000 + 0x04 * (n)) +#define RNP500_VFTA RNP500_ETH_VLAN_FILTER_TABLE +#define RNP500_VLVF(idx) (0xb600 + 4 * (idx)) +#define RNP500_VLVF_TABLE(idx) (0xb700 + 4 * (idx)) +#define RNP500_ETH_VLAN_FILTER_ENABLE (0x9118) +#define RNP500_PRIORITY_1_MARK (0x8080) +#define RNP500_PRIORITY_1 (400) +#define RNP500_PRIORITY_0 (300) +#define RNP500_PRIORITY_0_MARK (0x8084) +#define RNP500_PRIORITY_EN (0x8088) +#define RNP500_PRIORITY_EN_8023 (0x808c) +#define RNP500_ETH_LAYER2_ETQF(n) (0x9200 + 0x04 * (n)) +#define RNP500_ETH_LAYER2_ETQS(n) (0x9240 + 0x04 * (n)) +#define RNP500_ETH_BYPASS (0x8000) +#define RNP500_ETH_ERR_MASK_VECTOR (0x8060) +#define RNP500_ETH_PRIV_DATA_CONTROL_REG (0x8068) +#define RNP500_ETH_DEFAULT_RX_RING (0x806c) +#define RNP500_ETH_DOUBLE_VLAN_DROP (0x8078) +#define RNP500_HOST_FILTER_EN (0x800c) +#define RNP500_BAD_PACKETS_RECEIVE_EN (0x8024) +#define RNP500_REDIR_EN (0x8030) +#define WATCHDOG_TIMER_ERROR BIT(0) +#define RUN_FRAME_ERROR BIT(1) +#define GAINT_FRAME_ERROR BIT(2) +#define LATE_COLLISION_ERROR BIT(3) +#define GMII_ERROR BIT(4) +#define DRIBBLING_BIT_ERROR BIT(5) +#define CRC_ERROR BIT(6) +#define LENGTH_ERROR BIT(8) +#define DA_FILTER_ERROR BIT(9) +#define SA_FILTER_ERROR BIT(10) + +/* ================================================================== */ +#define ETH_ERR_SCTP (1 << 4) +#define ETH_ERR_L4 (1 << 3) +#define ETH_ERR_L3 (1 << 2) +#define ETH_ERR_PKT_LEN_ERR (1 << 1) +#define ETH_ERR_HDR_LEN_ERR (1 << 0) +#define ETH_IGNORE_ALL_ERR \ + (ETH_ERR_SCTP | ETH_ERR_L4 | ETH_ERR_L3 | ETH_ERR_PKT_LEN_ERR | \ + ETH_ERR_HDR_LEN_ERR) +#define VM_DMAC_TBL_SZ 128 +#define RNP_ETH_ENABLE_RSS_ONLY (0x3f30001) +#define RNP_ETH_DISABLE_RSS (0) +#define RNP_ETH_TX_PROGFULL_THRESH_PORT(n) (RNP_ETH_BASE + 0x0060 + 0x08 * (n)) +#define RNP_ETH_TX_PROGEMPTY_THRESH_PORT(n) (RNP_ETH_BASE + 0x0064 + 0x08 * (n)) +#define RNP_ETH_EMAC_DMA_PROFULL_THRESH (RNP_ETH_BASE + 0x0080) +#define RNP_ETH_EMAC_DMA_PROEMPTY_THRESH (RNP_ETH_BASE + 0x0084) +#define RNP_ETH_EMAC_SW_PROFULL_THRESH (RNP_ETH_BASE + 0x0088) +#define RNP_ETH_EMAC_SW_PROEMPTY_THRESH (RNP_ETH_BASE + 0x008c) +#define RNP_ETH_EMAC_BMC_TX_PROFULL_THRESH (RNP_ETH_BASE + 0x0090) +#define RNP_ETH_EMAC_BMC_TX_PROEMPTY_THRESH (RNP_ETH_BASE + 0x0094) +#define RNP_ETH_CNT_PKT_EMAC_TX(n) (RNP_ETH_BASE + 0x00a0 + 0x04 * (n)) +#define RNP_ETH_CNT_PKT_PECL_TX(n) (RNP_ETH_BASE + 0x00b0 + 0x04 * (n)) +#define RNP_ETH_STATUS_TX_FLOWCTRL(n) (RNP_ETH_BASE + 0x00c0 + 0x04 * (n)) +#define RNP_ETH_VERSION_FLOWWCTRL (RNP_ETH_BASE + 0x00d0) +#define RNP_ETH_CFG_ETH_MAC (RNP_ETH_BASE + 0x00d4) +#define RNP_ETH_SCA_TX_CS(port) (RNP_ETH_BASE + 0x0100 + 0x08 * (port)) +#define RNP_ETH_SCA_TX_NS(port) (RNP_ETH_BASE + 0x0104 + 0x08 * (port)) +#define RNP_ETH_TXTRANS_CS(port) (RNP_ETH_BASE + 0x0120 + 0x08 * (port)) +#define RNP_ETH_TXTRANS_NS(port) (RNP_ETH_BASE + 0x0124 + 0x08 * (port)) +#define RNP_ETH_1TO4_INST0_IN_PKTS (RNP_ETH_BASE + 0x0200) +#define RNP_ETH_1TO4_INST1_IN_PKTS (RNP_ETH_BASE + 0x0204) +#define RNP_ETH_1TO4_INST2_IN_PKTS (RNP_ETH_BASE + 0x0208) +#define RNP_ETH_1TO4_INST3_IN_PKTS (RNP_ETH_BASE + 0x020c) +#define RNP_ETH_IN_0_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x0210 + 0x10 * (port)) +#define RNP_ETH_IN_1_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x0214 + 0x10 * (port)) +#define RNP_ETH_IN_2_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x0218 + 0x10 * (port)) +#define RNP_ETH_IN_3_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x021c + 0x10 * (port)) +#define RNP_ETH_EMAC_TX_TO_PHY_PKTS(port) (RNP_ETH_BASE + 0x0250 + 4 * (port)) +#define RNP_ETH_TXTRANS_PTP_PKT_NUM(port) (RNP_ETH_BASE + 0x0260 + 4 * (port)) +#define RNP_ETH_TX_DEBUG(n) (RNP_ETH_BASE + 0x0300 + 0x04 * (n)) +#define RNP_ETH_PTP_TX_STATUS(n) (RNP_ETH_BASE + 0x0400) +#define RNP_ETH_PTP_TX_HTIMES(n) (RNP_ETH_BASE + 0x0404) +#define RNP_ETH_PTP_TX_LTIMES(n) (RNP_ETH_BASE + 0x0408) +#define RNP_ETH_PTP_TX_TSVALUE_STATUS(n) (RNP_ETH_BASE + 0x040c) +#define RNP_ETH_PTP_TX_CLEAR(n) (RNP_ETH_BASE + 0x0410) +#define RNP_ETH_MAC_SPEED_PORT(n) (RNP_ETH_BASE + 0x0450 + 0x04 * (n)) +#define RNP_ETH_MAC_LOOPBACK_MODE_PORT(n) (RNP_ETH_BASE + 0x0460 + 0x04 * (n)) +#define RNP_ETH_EXCEPT_DROP_PROC (RNP_ETH_BASE + 0x0470) +#define RNP_ETH_IPP (RNP_ETH_BASE + 0x8000) +#define RNP_ETH_BYPASS (RNP_ETH_BASE + 0x8000) +#define RNP_ETH_TUNNEL_MOD (RNP_ETH_BASE + 0x8004) +#define RNP_ETH_LOOPBACK_EN (RNP_ETH_BASE + 0x8008) +#define RNP_FIFO_CTRL_MODE (RNP_ETH_BASE + 0x800c) +#define RNP_ETH_VXLAN_PORT (RNP_ETH_BASE + 0x8010) +#define RNP_ETH_NVGRE_PORT (RNP_ETH_BASE + 0x8014) +#define RNP_ETH_RDMA_PORT (RNP_ETH_BASE + 0x8018) +#define RNP_HOST_FILTER_EN (RNP_ETH_BASE + 0x801c) +#define RNP_MNG_FILTER_EN (RNP_ETH_BASE + 0x8020) +#define RNP_ETH_TCAM_EN (RNP_ETH_BASE + 0x8024) +#define RNP_CONGEST_DROP_EN (RNP_ETH_BASE + 0x8028) +#define RNP_REDIR_EN (RNP_ETH_BASE + 0x8030) +#define RNP_ETH_SCTP_CHECKSUM_EN (RNP_ETH_BASE + 0x8038) +#define RNP_ETH_ARP_FUNC_EN (RNP_ETH_BASE + 0x803c) +#define RNP_ETH_VLAN_VME_REG(n) (RNP_ETH_BASE + 0x8040 + 0x04 * (n)) +#define RNP_ETH_CVLAN_RM_EN (RNP_ETH_BASE + 0x8050) +#define RNP_ETH_VLAN_RM_TYPE (RNP_ETH_BASE + 0x8054) +#define RNP_ETH_WRAP_FIELD_TYPE (RNP_ETH_BASE + 0x805c) +#define RNP_ETH_ERR_MASK_VECTOR (RNP_ETH_BASE + 0x8060) +#define RNP_ETH_DEFAULT_RX_RING (RNP_ETH_BASE + 0x806c) +#define RNP_ETH_RX_PROGFULL_THRESH_PORT(n) (RNP_ETH_BASE + 0x8070 + 0x08 * (n)) +#define RNP_ETH_RX_PROGEMPTY_THRESH_PORT(n) (RNP_ETH_BASE + 0x8074 + 0x08 * (n)) +#define RNP_ETH_EMAC_GAT_PROGFULL_THRESH (RNP_ETH_BASE + 0x8090) +#define RNP_ETH_EMAC_GAT_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x8094) +#define RNP_ETH_EMAC_PARSE_PROGFULL_THRESH (RNP_ETH_BASE + 0x8098) +#define RNP_ETH_EMAC_PARSE_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x809c) +#define RNP_ETH_FC_PROGFULL_THRESH (RNP_ETH_BASE + 0x80a0) +#define RNP_ETH_FC_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80a4) +#define RNP_ETH_DIS_PROGFULL_THRESH (RNP_ETH_BASE + 0x80a8) +#define RNP_ETH_DIS_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80ac) +#define RNP_ETH_COV_PROGFULL_THRESH (RNP_ETH_BASE + 0x80b0) +#define RNP_ETH_COV_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80b4) +#define RNP_ETH_BMC_RX_PROGFULL_THRESH (RNP_ETH_BASE + 0x80b8) +#define RNP_ETH_BMC_RX_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80bc) +#define RNP_ETH_HIGH_WATER(n) (RNP_ETH_BASE + 0x80c0 + n * (0x08)) +#define RNP_ETH_LOW_WATER(n) (RNP_ETH_BASE + 0x80c4 + n * (0x08)) +#define RNP_ETH_DEFAULT_RX_MIN_LEN (RNP_ETH_BASE + 0x80f0) +#define RNP_ETH_DEFAULT_RX_MAX_LEN (RNP_ETH_BASE + 0x80f4) +#define RNP_ETH_PTP_EVENT_PORT (RNP_ETH_BASE + 0x80f8) +#define RNP_ETH_PTP_GENER_PORT_REG (RNP_ETH_BASE + 0x80fc) +#define RNP_ETH_RX_TRANS_CS_PORT(n) (RNP_ETH_BASE + 0x8100 + 0x08 * (n)) +#define RNP_ETH_RX_TRANS_NS_PORT(n) (RNP_ETH_BASE + 0x8104 + 0x08 * (n)) +#define RNP_ETH_GAT_RX_CS (RNP_ETH_BASE + 0x8120) +#define RNP_ETH_GAT_RX_NS (RNP_ETH_BASE + 0x8124) +#define RNP_ETH_EMAC_PIP_CS (RNP_ETH_BASE + 0x8128) +#define RNP_ETH_EMAC_PIP_NS (RNP_ETH_BASE + 0x812c) +#define RNP_ETH_EMAC_FC_CS (RNP_ETH_BASE + 0x8138) +#define RNP_ETH_EMAC_FC_NS (RNP_ETH_BASE + 0x813c) +#define RNP_ETH_EMAC_DIS_CS (RNP_ETH_BASE + 0x8140) +#define RNP_ETH_EMAC_DIS_NS (RNP_ETH_BASE + 0x8144) +#define RNP_ETH_HOST_L2_FILTER_CS (RNP_ETH_BASE + 0x8150) +#define RNP_ETH_HOST_L2_FILTER_NS (RNP_ETH_BASE + 0x8154) +#define RNP_ETH_EMAC_DECAP_CS (RNP_ETH_BASE + 0x8158) +#define RNP_ETH_EMAC_DECAP_NS (RNP_ETH_BASE + 0x815c) +#define RNP_ETH_PFC_CONFIG_PROT(n) (RNP_ETH_BASE + 0x8180 + n * (0x04)) +#define RNP_ETH_RX_PKT_NUM(port) (RNP_ETH_BASE + 0x8220 + 0x04 * (port)) +#define RNP_ETH_RX_DROP_PKT_NUM(port) (RNP_ETH_BASE + 0x8230 + 0x04 * (port)) +#define RNP_ETH_TOTAL_GAT_RX_PKT_NUM (RNP_ETH_BASE + 0x8240) +#define RNP_ETH_PKT_ARP_REQ_NUM (RNP_ETH_BASE + 0x8250) +#define RNP_ETH_PKT_ARP_RESPONSE_NUM (RNP_ETH_BASE + 0x8254) +#define RNP_ETH_ICMP_NUM (RNP_ETH_BASE + 0x8258) +#define RNP_ETH_PKT_UDP_NUM (RNP_ETH_BASE + 0x825c) +#define RNP_ETH_PKT_TCP_NUM (RNP_ETH_BASE + 0x8260) +#define RNP_ETH_PKT_ESP_NUM (RNP_ETH_BASE + 0x8264) +#define RNP_ETH_PKT_GRE_NUM (RNP_ETH_BASE + 0x8268) +#define RNP_ETH_PKT_SCTP_NUM (RNP_ETH_BASE + 0x826c) +#define RNP_ETH_PKT_TCPSYN_NUM (RNP_ETH_BASE + 0x8270) +#define RNP_ETH_PKT_VXLAN_NUM (RNP_ETH_BASE + 0x8274) +#define RNP_ETH_PKT_NVGRE_NUM (RNP_ETH_BASE + 0x8278) +#define RNP_ETH_PKT_FRAGMENT_NUM (RNP_ETH_BASE + 0x827c) +#define RNP_ETH_PKT_LAYER1_VLAN_NUM (RNP_ETH_BASE + 0x8280) +#define RNP_ETH_PKT_LAYER2_VLAN_NUM (RNP_ETH_BASE + 0x8284) +#define RNP_ETH_PKT_IPV4_NUM (RNP_ETH_BASE + 0x8288) +#define RNP_ETH_PKT_IPV6_NUM (RNP_ETH_BASE + 0x828c) +#define RNP_ETH_PKT_INGRESS_NUM (RNP_ETH_BASE + 0x8290) +#define RNP_ETH_PKT_EGRESS_NUM (RNP_ETH_BASE + 0x8294) +#define RNP_ETH_PKT_IP_HDR_LEN_ERR_NUM (RNP_ETH_BASE + 0x8298) +#define RNP_ETH_PKT_IP_PKT_LEN_ERR_NUM (RNP_ETH_BASE + 0x829c) +#define RNP_ETH_PKT_L3_HDR_CHK_ERR_NUM (RNP_ETH_BASE + 0x82a0) +#define RNP_ETH_PKT_L4_HDR_CHK_ERR_NUM (RNP_ETH_BASE + 0x82a4) +#define RNP_ETH_PKT_SCTP_CHK_ERR_NUM (RNP_ETH_BASE + 0x82a8) +#define RNP_ETH_PKT_VLAN_ERR_NUM (RNP_ETH_BASE + 0x82ac) +#define RNP_ETH_PKT_RDMA_NUM (RNP_ETH_BASE + 0x82b0) +#define RNP_ETH_PKT_ARP_AUTO_RESPONSE_NUM (RNP_ETH_BASE + 0x82b4) +#define RNP_ETH_PKT_ICMPV6_NUM (RNP_ETH_BASE + 0x82b8) +#define RNP_ETH_PKT_IPV6_EXTEND_NUM (RNP_ETH_BASE + 0x82bc) +#define RNP_ETH_PKT_802_3_NUM (RNP_ETH_BASE + 0x82c0) +#define RNP_ETH_PKT_EXCEPT_SHORT_NUM (RNP_ETH_BASE + 0x82c4) +#define RNP_ETH_PKT_PTP_NUM (RNP_ETH_BASE + 0x82c8) +#define RNP_ETH_DECAP_PKT_IN_NUM (RNP_ETH_BASE + 0x82d0) +#define RNP_ETH_DECAP_PKT_OUT_NUM (RNP_ETH_BASE + 0x82d4) +#define RNP_ETH_DECAP_DMAC_OUT_NUM (RNP_ETH_BASE + 0x82d8) +#define RNP_ETH_DECAP_BMC_OUT_NUM (RNP_ETH_BASE + 0x82dc) +#define RNP_ETH_DECAP_SW_OUT_NUM (RNP_ETH_BASE + 0x82e0) +#define RNP_ETH_DECAP_MIRROR_OUT_NUM (RNP_ETH_BASE + 0x82e4) +#define RNP_ETH_DECAP_PKT_DROP_NUM(port) (RNP_ETH_BASE + 0x82e8 + 0x04 * (port)) +#define RNP_ETH_INVALID_DROP_PKTS RNP_ETH_DECAP_PKT_DROP_NUM(0) +#define RNP_ETH_FILTER_DROP_PKTS RNP_ETH_DECAP_PKT_DROP_NUM(1) +#define RNP_ETH_DECAP_DMAC_DROP_NUM (RNP_ETH_BASE + 0x82f0) +#define RNP_ETH_DECAP_BMC_DROP_NUM (RNP_ETH_BASE + 0x82f4) +#define RNP_ETH_DECAP_SWITCH_DROP_NUM (RNP_ETH_BASE + 0x82f8) +#define RNP_ETH_DECAP_RM_VLAN_NUM (RNP_ETH_BASE + 0x82fc) +#define RNP_ETH_RX_FC_PKT_IN_NUM (RNP_ETH_BASE + 0x8300) +#define RNP_ETH_RX_FC_PKT_OUT_NUM (RNP_ETH_BASE + 0x8304) +#define RNP_ETH_RX_FC_PKT_DROP0_NUM (RNP_ETH_BASE + 0x8308) +#define RNP_ETH_RX_FC_PKT_DROP1_NUM (RNP_ETH_BASE + 0x830c) +#define RNP_ETH_RING_FC_STATUS0 (RNP_ETH_BASE + 0x8310) +#define RNP_ETH_RING_FC_STATUS1 (RNP_ETH_BASE + 0x8314) +#define RNP_ETH_RING_FC_STATUS2 (RNP_ETH_BASE + 0x8318) +#define RNP_ETH_RING_FC_STATUS3 (RNP_ETH_BASE + 0x831c) +#define RNP_ETH_RX_DEBUG(n) (RNP_ETH_BASE + 0x8400 + 0x04 * (n)) +#define RNP_ETH_RX_FC_DEBUG0_NUM RNP_ETH_RX_DEBUG(0) +#define RNP_ETH_RX_FC_DEBUG1_NUM RNP_ETH_RX_DEBUG(1) +#define RNP_ETH_RX_DIS_DEBUG0_NUM RNP_ETH_RX_DEBUG(2) +#define RNP_ETH_RX_DIS_DEBUG1_NUM RNP_ETH_RX_DEBUG(3) +#define RNP_ETH_HOST_L2_DROP_PKTS RNP_ETH_RX_DEBUG(4) +#define RNP_ETH_REDIR_INPUT_MATCH_DROP_PKTS RNP_ETH_RX_DEBUG(5) +#define RNP_ETH_ETYPE_DROP_PKTS RNP_ETH_RX_DEBUG(6) +#define RNP_ETH_TCP_SYN_DROP_PKTS RNP_ETH_RX_DEBUG(7) +#define RNP_ETH_REDIR_TUPLE5_DROP_PKTS RNP_ETH_RX_DEBUG(8) +#define RNP_ETH_REDIR_TCAM_DROP_PKTS RNP_ETH_RX_DEBUG(9) +#define RNP_ETH_VMARK_TC(n) (RNP_ETH_BASE + 0x8500 + 0x04 * (n)) +#define RNP_RING_FC_ENABLE (RNP_ETH_BASE + 0x8520) +#define RNP_SELECT_RING_EN(n) (RNP_ETH_BASE + 0x8524 + (0x4 * n)) +#define RNP_TC_FC_SW_EN (RNP_ETH_BASE + 0x8534) +#define RNP_ETH_LOCAL_DIP(n) (RNP_ETH_BASE + 0x8600 + 0x04 * (n)) +#define RNP_ETH_LOCAL_DMAC_H(n) (RNP_ETH_BASE + 0x8700 + 0x04 * (n)) +#define RNP_ETH_LOCAL_DMAC_L(n) (RNP_ETH_BASE + 0x8800 + 0x04 * (n)) +/* Rx Ring Flow Control */ +#define RNP_RXTRANS_RX_PKTS(port) (RNP_ETH_BASE + 0x8900 + 0x40 * (port)) +#define RNP_RXTRANS_DROP_PKTS(port) (RNP_ETH_BASE + 0x8904 + 0x40 * (port)) +#define RNP_RXTRANS_WDT_ERR_PKTS(port) (RNP_ETH_BASE + 0x8908 + 0x40 * (port)) +#define RNP_RXTRANS_CODE_ERR_PKTS(port) (RNP_ETH_BASE + 0x890c + 0x40 * (port)) +#define RNP_RXTRANS_CRC_ERR_PKTS(port) (RNP_ETH_BASE + 0x8910 + 0x40 * (port)) +#define RNP_RXTRANS_SLEN_ERR_PKTS(port) (RNP_ETH_BASE + 0x8914 + 0x40 * (port)) +#define RNP_RXTRANS_GLEN_ERR_PKTS(port) (RNP_ETH_BASE + 0x8918 + 0x40 * (port)) +#define RNP_RXTRANS_IPH_ERR_PKTS(port) (RNP_ETH_BASE + 0x891c + 0x40 * (port)) +#define RNP_RXTRANS_CSUM_ERR_PKTS(port) (RNP_ETH_BASE + 0x8920 + 0x40 * (port)) +#define RNP_RXTRANS_LEN_ERR_PKTS(port) (RNP_ETH_BASE + 0x8924 + 0x40 * (port)) +#define RNP_RXTRANS_CUT_ERR_PKTS(port) (RNP_ETH_BASE + 0x8928 + 0x40 * (port)) +#define RNP_RXTRANS_EXCEPT_BYTES(port) (RNP_ETH_BASE + 0x892c + 0x40 * (port)) +#define RNP_RXTRANS_G1600_BYTES_PKTS(port) \ + (RNP_ETH_BASE + 0x8930 + 0x40 * (port)) +#define RNP_RX_RING_MAXRATE(n) (RNP_ETH_BASE + 0x8a00 + (0x4 * n)) +#define RNP_ETH_RX_PROGFULL_RTRN(n) (RNP_ETH_BASE + 0x8c00 + 0x04 * (n)) +#define RNP_ETH_CNT_PKT_EMAC_RX(n) (RNP_ETH_BASE + 0x8c10 + 0x04 * (n)) +#define RNP_ETH_CNT_PKT_PECL_RX(n) (RNP_ETH_BASE + 0x8c20 + 0x04 * (n)) +#define RNP_ETH_STATUS_RX_FLOWCTRL(n) (RNP_ETH_BASE + 0x8c30 + 0x04 * (n)) +#define RNP_ETH_DMAC_FCTRL (RNP_ETH_BASE + 0x9110) +#define RNP_ETH_DMAC_MCSTCTRL (RNP_ETH_BASE + 0x9114) +#define RNP_MCSTCTRL_MULTICASE_TBL_EN (1 << 2) +#define RNP_MCSTCTRL_UNICASE_TBL_EN (1 << 3) +#define RNP_MCSTCTRL_DMAC_47 0x00 +#define RNP_MCSTCTRL_DMAC_46 0x01 +#define RNP_MCSTCTRL_DMAC_45 0x02 +#define RNP_MCSTCTRL_DMAC_43 0x03 +#define RNP_ETH_VLAN_FILTER_ENABLE (RNP_ETH_BASE + 0x9118) +#define RNP_ETH_INPORT_POLICY_VAL (RNP_ETH_BASE + 0x91d0) +#define RNP_ETH_INPORT_POLICY_REG(n) (RNP_ETH_BASE + 0x91e0 + 0x04 * (n)) +#define ETH_LAYER2_NUM (16) +#define RNP_ETH_LAYER2_ETQF(n) (RNP_ETH_BASE + 0x9200 + 0x04 * (n)) +#define RNP_ETH_LAYER2_ETQS(n) (RNP_ETH_BASE + 0x9240 + 0x04 * (n)) +#define RNP_ETH_LAYER2_ETQS_DEFAULT (RNP_ETH_BASE + 0x9280) +#define RNP_ETH_ETQF_DEFAULT (RNP_ETH_BASE + 0x9284) +#define RNP_ETH_SYNQF (RNP_ETH_BASE + 0x9290) +#define RNP_ETH_SYNQF_PRIORITY (RNP_ETH_BASE + 0x9294) +/* + * [3:0]: + * 4'b0000:RSS disable + * 4'b0001:RSS only + * 4'b0100:DCB and RSS--8*16 + * 4'b1010:POOLS and RSS--32*4 + * [3] :virtual enable + * [16]:ipv4_hash_tcp_enable + * [17]:ipv4_hash_enable + * [20]:ipv6_hash_enable + * [21]:ipv6_hash_tcp_enable + * [22]:ipv4_hash_udp_enable + * [23]:ipv6_hash_udp_enable + * [24]:ipv4_hash_sctp_enable + * [25]:ipv6_hash_sctp_enable + */ +#define RNP_ETH_RSS_CONTROL (RNP_ETH_BASE + 0x92a0) +#define RNP_MRQC_IOV_EN (RNP_ETH_BASE + 0x92a0) +#define RNP_IOV_ENABLED (1 << 3) +#define RNP_ETH_RSS_KEY (RNP_ETH_BASE + 0x92d0) +#define RNP_ETH_RAR_RL(n) (RNP_ETH_BASE + 0xa000 + 0x04 * n) +#define RNP_ETH_RAR_RH(n) (RNP_ETH_BASE + 0xa400 + 0x04 * n) +#define RNP_ETH_UTA(n) (RNP_ETH_BASE + 0xa800 + 0x04 * n) +#define RNP_ETH_MULTICAST_HASH_TABLE(n) (RNP_ETH_BASE + 0xac00 + 0x04 * n) +#define RNP_MTA(n) RNP_ETH_MULTICAST_HASH_TABLE(n) +#define RNP_ETH_VLAN_FILTER_TABLE(n) (RNP_ETH_BASE + 0xb000 + 0x04 * (n)) +#define RNP_VFTA RNP_ETH_VLAN_FILTER_TABLE +#define RNP_FCTRL_MULTICASE_BYPASS (1 << 8) +#define RNP_FCTRL_UNICASE_BYPASS (1 << 9) +#define RNP_FCTRL_BROADCAST_BYPASS (1 << 10) +#define RNP_ETH_ETYPE_TABLE(n) (RNP_ETH_BASE + 0xb300 + 0x04 * (n)) +#define RNP_VM_DMAC_MPSAR_RING(entry) \ + (RNP_ETH_BASE + 0xb400 + (4 * (entry))) +#define RNP_VLVF(idx) (RNP_ETH_BASE + 0xb600 + 4 * (idx)) +#define RNP_VLVFB(idx) (RNP_ETH_BASE + 0xb700 + 4 * (idx)) +#define RNP_VM_TUNNEL_PFVLVF_L(n) (RNP_ETH_BASE + 0xb800 + 0x04 * (n)) +#define RNP_VM_TUNNEL_PFVLVF_H(n) (RNP_ETH_BASE + 0xb900 + 0x04 * (n)) +/* 5 tuple */ +#define ETH_TUPLE5_NUM 128 +#define RNP_ETH_TUPLE5_SAQF(n) (RNP_ETH_BASE + 0xc000 + 0x04 * (n)) +#define RNP_ETH_TUPLE5_DAQF(n) (RNP_ETH_BASE + 0xc400 + 0x04 * (n)) +#define RNP_ETH_TUPLE5_SDPQF(n) (RNP_ETH_BASE + 0xc800 + 0x04 * (n)) +#define RNP_ETH_TUPLE5_FTQF(n) (RNP_ETH_BASE + 0xcc00 + 0x04 * (n)) +#define RNP_ETH_TUPLE5_POLICY(n) (RNP_ETH_BASE + 0xd000 + 0x04 * (n)) +#define RNP_ETH_RSS_INDIR_TBL(p, n) \ + (RNP_ETH_BASE + 0xe000 + 0x04 * (n) + 0x200 * (p)) +#define RNP_ETH_TC_IPH_OFFSET_TABLE(n) (RNP_ETH_BASE + 0xe800 + 0x04 * (n)) +#define RNP_ETH_TC_VLAN_OFFSET_TABLE(n) (RNP_ETH_BASE + 0xe820 + 0x04 * (n)) +#define RNP_ETH_TC_PORT_OFFSET_TABLE(n) (RNP_ETH_BASE + 0xe840 + 0x04 * (n)) +#define RNP_REDIR_RING_MASK (RNP_ETH_BASE + 0xe860) +#define RNP_ETH_RSS_MODE (0x6fe00) +#define RNP_ETH_RSS_INDIR_TBL_UV3P(n) (0x6ff00 + 0x04 * (n)) +/* ================================================================== */ + +/* ==================== RNP-REG Global Registers ==================== */ +#define RNP_COMM_REG0 0x30000 +#define RNP_TOP_NIC_VERSION (RNP_COMM_REG0 + 0x0000) +#define RNP500_TOP_NIC_VERSION (0x8000 + 0x0000) +#define RNP500_FPGA_VERSION (0x8020) +#define RNP500_FPGA_TIME (0x8024) +#define RNP500_LEGANCY_ENABLE (0xd004) +#define RNP_TOP_NIC_CONFIG (RNP_COMM_REG0 + 0x0004) +#define RNP_TOP_NIC_STAT (RNP_COMM_REG0 + 0x0008) +#define RNP_TOP_NIC_DUMMY (RNP_COMM_REG0 + 0x000c) +#define RNP_TOP_NIC_REST_N (RNP_COMM_REG0 + 0x0010) +#define NIC_RESET 0 +#define RNP_TOP_DMA_MEM_SLP (RNP_COMM_REG0 + 0x4004) +#define RNP_TOP_DMA_MEM_SD (RNP_COMM_REG0 + 0x4008) +#define RNP_TOP_ETH_TIMESTAMP_SEL (RNP_COMM_REG0 + 0x8010) +#define RNP_TOP_ETH_MAC_CLK_SEL (RNP_COMM_REG0 + 0x8014) +#define RNP_TOP_ETH_INF_ETH_STATUS (RNP_COMM_REG0 + 0x8018) +#define RNP_TOP_ETH_BUG_40G_PATCH (RNP_COMM_REG0 + 0x801c) +#define RNP_TOP_ETH_PWR_PORT_NUM (4) +#define RNP_TOP_ETH_PWR_CLAMP_CTRL_PORT(n) (RNP_COMM_REG0 + 0x8020 + 0xc * (n)) +#define RNP_TOP_ETH_PWR_ISOLATE_PORT(n) (RNP_COMM_REG0 + 0x8024 + 0xc * (n)) +#define RNP_TOP_ETH_PWR_DOWN_PORT(n) (RNP_COMM_REG0 + 0x8028 + 0xc * (n)) +#define RNP_TOP_ETH_TCAM_CONFIG_ENABLE (RNP_COMM_REG0 + 0x8050) +#define RNP_TOP_ETH_SLIP (RNP_COMM_REG0 + 0x8060) +#define RNP_TOP_ETH_SHUT_DOWN (RNP_COMM_REG0 + 0x8064) +#define RNP_TOP_ETH_OVS_SLIP (RNP_COMM_REG0 + 0x8068) +#define RNP_TOP_ETH_OVS_SHUT_DOWN (RNP_COMM_REG0 + 0x806c) +#define RNP_FC_PORT_ENABLE (RNP_COMM_REG0 + 0x9004) +#define RNP_FC_PORT_PRIO_MAP(n) (RNP_COMM_REG0 + 0x9008 + (0x04 * n)) +#define RNP_FC_EN_CONF_AVAILABLE (RNP_COMM_REG0 + 0x9018) +#define RNP_FC_UNCTAGS_MAP_OFFSET (16) +#define RNP_TOP_MAC_OUI (RNP_COMM_REG0 + 0xc004) +#define RNP_TOP_MAC_SN (RNP_COMM_REG0 + 0xc008) +/* ================================================================== */ + +/* ==================== RNP-SERDES Global Registers ================= */ + +#define RNP_SERDES (0x40000) +#define RNP_PCS_OFFSET (0x1000) + +#define RNP_PCS_BASE(i) (RNP_SERDES + RNP_PCS_OFFSET * i) +#define RNP_PCS_1G_OR_10G BIT(13) +#define RNP_PCS_SPPEED_MASK (0x1c) +#define RNP_PCS_SPPEED_10G (0x0) +#define RNP_PCS_SPPEED_40G (0xc) +#define RNP_PCS_LINK_SPEED (0x30000) +#define RNP_PCS_LINKUP BIT(2) +#define RNP_PCS_LINK_STATUS (0x30001) + +/* ================================================================== */ + +/* ==================== RNP-MAC Global Registers ==================== */ +//=== MAC Registers== +#define RNP10_MAC_BASE (0x60000) +#define RNP_XLMAC (0x60000) + +#define RNP10_MAC_TX_CFG (0x0000) +#define RNP10_MAC_RX_CFG (0x0004) +#define RNP_IPC_MASK_XLGMAC BIT(9) +#define RNP_RX_ALL BIT(31) +#define RNP_RX_ALL_MUL BIT(4) +#define RNP10_MAC_PKT_FLT (0x0008) +#define RNP10_MAC_LPI_CTRL (0x00d0) + +#define RNP10_MAC_Q0_TX_FLOW_CTRL(i) (0x0070 + 0x04 * (i)) +#define RNP10_MAC_RX_FLOW_CTRL (0x0090) + +#define RNP10_TX_FLOW_ENABLE_MASK (0x2) +#define RNP10_RX_FLOW_ENABLE_MASK (0x1) + +#define RNP10_MAC_TX_VLAN_TAG (0x0050) +#define RNP10_MAC_TX_VLAN_MODE (0x0060) +#define RNP10_MAC_INNER_VLAN_INCL (0x0064) + +#define RNP10_MAC_UNICAST_LOW(i) (0x304 + i * 0x08) +#define RNP10_MAC_UNICAST_HIGH(i) (0x300 + i * 0x08) + +#define RNP500_MAC_BASE (0x20000) + +#define RNP_MODE_NO_SA_INSER (0x0) +#define RNP_SARC_OFFSET (28) +#define RNP_TWOKPE_MASK BIT(27) +#define RNP_SFTERR_MASK BIT(26) +#define RNP_CST_MASK BIT(25) +#define RNP_TC_MASK BIT(24) +#define RNP_WD_MASK BIT(23) +#define RNP_JD_MASK BIT(22) +#define RNP_BE_MASK BIT(21) +#define RNP_JE_MASK BIT(20) +#define RNP_IFG_96 (0x00) +#define RNP_IFG_OFFSET (17) +#define RNP_DCRS_MASK BIT(16) +#define RNP_PS_MASK BIT(15) +#define RNP_FES_MASK BIT(14) +#define RNP_DO_MASK BIT(13) +#define RNP_LM_MASK BIT(12) +#define RNP_DM_MASK BIT(11) +#define RNP_IPC_MASK BIT(10) +#define RNP_DR_MASK BIT(9) +#define RNP_LUD_MASK BIT(8) +#define RNP_ACS_MASK BIT(7) +#define RNP_BL_MODE (0x00) +#define RNP_BL_OFFSET (5) +#define RNP_DC_MASK BIT(4) +#define RNP_TE_MASK BIT(3) +#define RNP_RE_MASK BIT(2) +#define RNP_PRELEN_MODE (0) + +#define RNP500_MAC_UNICAST_LOW(i) (0x44 + i * 0x08) +#define RNP500_MAC_UNICAST_HIGH(i) (0x40 + i * 0x08) + +#define GMAC_CONTROL 0x00000000 /* Configuration */ +#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */ +#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */ +#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */ +#define GMAC_MII_ADDR 0x00000010 /* MII Address */ +#define GMAC_MII_DATA 0x00000014 /* MII Data */ +#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */ + +#define GMAC_PMT 0x0000002c +enum power_event { + pointer_reset = 0x80000000, + global_unicast = 0x00000200, + wake_up_rx_frame = 0x00000040, + magic_frame = 0x00000020, + wake_up_frame_en = 0x00000004, + magic_pkt_en = 0x00000002, + power_down = 0x00000001, +}; + +#define GMAC_VTHM_MASK BIT(19) +#define GMAC_ESVL_MASK BIT(18) +#define GMAC_VTIM_MASK BIT(17) +#define GMAC_ETV_MASK BIT(16) +#define GMAC_VLAN_TAG_CTRL 0x0000001c + +#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense */ +#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */ +#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */ +#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */ +#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ +#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */ +#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ +#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */ +#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */ +#define GMAC_CONTROL_ACS 0x00000080 /* Auto Pad/FCS Stripping */ +#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */ +#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ +#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ + +/* GMAC Frame Filter defines */ +#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ +#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ +#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ +#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ +#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ +#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ +#define GMAC_FRAME_FILTER_PCF 0x00000080 /* Pass Control frames */ +#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ +#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ +#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ +#define GMAC_FRAME_FILTER_VLAN 0x00010000 /* vlan filter open */ +#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ +/* GMII ADDR defines */ +#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */ +#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */ +/* GMAC FLOW CTRL defines */ +#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ +#define GMAC_FLOW_CTRL_PT_SHIFT 16 +#define GMAC_FLOW_CTRL_UP 0x00000008 /* Unicast pause frame enable */ +#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ +#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ +#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ + +#define GMAC_MANAGEMENT_RX_UNDERSIZE (0x01a4) +#define RNP_MAC_TX_CFG (RNP_XLMAC + 0x0000) +#define RNP_MAC_RX_CFG (RNP_XLMAC + 0x0004) +#define RNP_MAC_PKT_FLT (RNP_XLMAC + 0x0008) +#define RNP_MAC_LPI_CTRL (RNP_XLMAC + 0x00d0) + +#define RNP_MAC_TX_VLAN_TAG (RNP_XLMAC + 0x0050) +#define RNP_MAC_TX_VLAN_MODE (RNP_XLMAC + 0x0060) +#define RNP_MAC_INNER_VLAN_INCL (RNP_XLMAC + 0x0064) + +#define RNP_MAC_Q0_TX_FLOW_CTRL(i) (RNP_XLMAC + 0x0070 + 0x04 * (i)) +#define RNP_MAC_RX_FLOW_CTRL (RNP_XLMAC + 0x0090) + +#define RNP_MAC_HW_FEATURE (RNP_XLMAC + 0x0120) + +/*1588 */ +#define RNP_MAC_TS_CTRL (RNP_XLMAC + 0X0d00) +#define RNP_MAC_SUB_SECOND_INCREMENT (RNP_XLMAC + 0x0d04) +#define RNP_MAC_SYS_TIME_SEC_CFG (RNP_XLMAC + 0x0d08) +#define RNP_MAC_SYS_TIME_NANOSEC_CFG (RNP_XLMAC + 0x0d0c) +#define RNP_MAC_SYS_TIME_SEC_UPDATE (RNP_XLMAC + 0x0d10) +#define RNP_MAC_SYS_TIME_NANOSEC_UPDATE (RNP_XLMAC + 0x0d14) +#define RNP_MAC_TS_ADDEND (RNP_XLMAC + 0x0d18) +#define RNP_MAC_TS_STATS (RNP_XLMAC + 0x0d20) +#define RNP_MAC_INTERRUPT_ENABLE (RNP_XLMAC + 0x00b4) + +#define RNP_MAC_STATS_BROADCAST_LOW (RNP_XLMAC + 0x0918) +#define RNP_MAC_STATS_BROADCAST_HIGH (RNP_XLMAC + 0x091c) +#define RNP_MAC_STATS_MULTICAST_LOW (RNP_XLMAC + 0x0920) +#define RNP_MAC_STATS_MULTICAST_HIGH (RNP_XLMAC + 0x0924) + +#define RNP_TX_FLOW_ENABLE_MASK (0x2) +#define RNP_RX_FLOW_ENABLE_MASK (0x1) +/* ================================================================== */ + +/* ==================== RNP-MSIX Global Registers ==================== */ +//==== Ring-MSIX Registers (MSI-X_module_design.docs) === +#define RING_VECTOR(n) (0x04 * (n)) + +/* ================================================================== */ + +/* ==================== RNP-SWITCH Global Registers ================= */ +#define RNP_SWITCH_BASE 0xB0000 + +// port is 6 +#define RNP_SWITCH_RULE_INGS(port, n) \ + (RNP_SWITCH_BASE + 0x24 * (port) + 0x1000 + 0x04 * (n)) +#define RNP_SWITCH_RULE_INGS_RPU_NP(port) \ + (RNP_SWITCH_BASE + 0x24 * (port) + 0x1014) +#define RNP_SWITCH_RULE_INGS_RPU_SWITCH(port) \ + (RNP_SWITCH_BASE + 0x24 * (port) + 0x1018) +#define RNP_SWITCH_RULE_INGS_SEC(port) \ + (RNP_SWITCH_BASE + 0x24 * (port) + 0x101c) +#define RNP_SWITCH_RULE_INGS_EXFPGA(port) \ + (RNP_SWITCH_BASE + 0x24 * (port) + 0x1020) + +#define RNP_SWITCH_CNT_EGRESS_PKT(port) (RNP_SWITCH_BASE + 0x10db + 0x04 * (n)) +#define RNP_SWITCH_CNT_INGRESS_PKT(port) (RNP_SWITCH_BASE + 0x10f0 + 0x04 * (n)) +#define RNP_SWITCH_RPUUP_DATA_PROG_FULL_THRESH (RNP_SWITCH_BASE + 0x1108) +#define RNP_SWITCH_RPUDN_DATA_PROG_FULL_THRESH (RNP_SWITCH_BASE + 0x110c) +#define RNP_SWITCH_MAC0_DATA_PROG_FULL_THRESH (RNP_SWITCH_BASE + 0x1110) +#define RNP_SWITCH_MAC1_DATA_PROG_FULL_THRESH (RNP_SWITCH_BASE + 0x1114) +#define RNP_SWITCH_DMA0_DATA_PROG_FULL_THRESH (RNP_SWITCH_BASE + 0x1118) +#define RNP_SWITCH_DMA1_DATA_PROG_FULL_THRESH (RNP_SWITCH_BASE + 0x111c) +#define RNP_SWITCH_REG1_INGRESS_STATUS(port) \ + (RNP_SWITCH_BASE + 0x1120 + 0x08 * (port)) +#define RNP_SWITCH_REG2_INGRESS_STATUS(port) \ + (RNP_SWITCH_BASE + 0x1124 + 0x08 * (port)) + +#define RNP_SWITCH_REG_STATUS_ROBIN(port) \ + (RNP_SWITCH_BASE + 0x1150 + 0x04 * (port)) +#define RNP_SWITCH_REG_EGRESS_STATUS(port) \ + (RNP_SWITCH_BASE + 0x1168 + 0x04 * (port)) +#define RNP_SWITCH_INFO_FIFO_DMA_TX(n) (RNP_SWITCH_BASE + 0x1198 + 0x08 * (n)) +#define RNP_SWITCH_INFO_FIFO_DMA_RX(n) (RNP_SWITCH_BASE + 0x119c + 0x08 * (n)) +#define RNP_SWITCH_INFO_FIFO_MAC_TX(n) (RNP_SWITCH_BASE + 0x11a8 + 0x08 * (n)) +#define RNP_SWITCH_INFO_FIFO_MAC_RX(n) (RNP_SWITCH_BASE + 0x11ac + 0x08 * (n)) +#define RNP_SWITCH_INFO_FIFO_RPUUP_RX(n) (RNP_SWITCH_BASE + 0x11bc + 0x08 * (n)) +#define RNP_SWITCH_INFO_FIFO_RPUDN_RX(n) (RNP_SWITCH_BASE + 0x11c0 + 0x08 * (n)) +#define RNP_SWITCH_EN_SOFT_RESET (RNP_SWITCH_BASE + 0xf000) +#define RNP_SWITCH_SOFT_RESET (RNP_SWITCH_BASE + 0xf004) +#define RNP_SWITCH_CLR_INGS_ERR (RNP_SWITCH_BASE + 0xf008) +#define RNP_SWITCH_ERR_CODE_INGS(port) \ + (RNP_SWITCH_BASE + 0xf010 + 0x04 * (port)) +#define RNP_SWITCH_MEM_SD (RNP_SWITCH_BASE + 0xf028) +#define RNP_SWITCH_MEM_SLP (RNP_SWITCH_BASE + 0xf02c) +#define RNP_SWITCH_EN_INVALID_DPORT_DROP_O (RNP_SWITCH_BASE + 0xf030) + +/* ================================================================== */ + +/* ==================== RNP-TCAM Global Registers ==================== */ +#define RNP_TCAM_BASE (0xc0000) + +#define RNP_TCAM_SDPQF(n) \ + (RNP_TCAM_BASE + 0x00 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_DAQF(n) \ + (RNP_TCAM_BASE + 0x04 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_SAQF(n) \ + (RNP_TCAM_BASE + 0x08 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_APQF(n) \ + (RNP_TCAM_BASE + 0x0c + 0x40 * (n / 2) + 0x10 * (n % 2)) + +#define RNP_TCAM_SDPQF_MASK(n) \ + (RNP_TCAM_BASE + 0x20 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_DAQF_MASK(n) \ + (RNP_TCAM_BASE + 0x24 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_SAQF_MASK(n) \ + (RNP_TCAM_BASE + 0x28 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_APQF_MASK(n) \ + (RNP_TCAM_BASE + 0x2c + 0x40 * (n / 2) + 0x10 * (n % 2)) + +#define RNP_TCAM_MODE (RNP_TCAM_BASE + 0x20000) +#define RNP_TCAM_CACHE_ENABLE (RNP_TCAM_BASE + 0x20004) +#define RNP_TCAM_CACHE_ADDR_CLR (RNP_TCAM_BASE + 0x20008) +#define RNP_TCAM_CACHE_REQ_CLR (RNP_TCAM_BASE + 0x2000c) + +/* ================================================================== */ + +/* ==================== OTHER Global Registers ==================== */ +//===== PF-VF Functions ==== +#define VF_NUM_REG 0xa3000 +// 8bit: 7:vf_actiove 6:fun0/fun1 [5:0]:vf_num +#define VF_NUM(vfnum, fun) ((1 << 7) | (((fun) & 0x1) << 6) | ((vfnum) & 0x3f)) +#define PF_BIT 6 +#define PF_NUM(fun) (((fun) & 0x1) << 6) +#define IS_VF(vfnum) (((vfnum) & (1 << 7)) ? 1 : 0) + +/* PFC Flow Control*/ +enum NIC_MODE { + MODE_NIC_MODE_2PORT_40G = 0, + MODE_NIC_MODE_2PORT_10G = 1, + MODE_NIC_MODE_4PORT_10G = 2, + MODE_NIC_MODE_8PORT_10G = 3, +}; + +/* ================================================================== */ + +#endif /* RNP_REGS_H */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_sriov.c b/drivers/net/ethernet/mucse/rnp/rnp_sriov.c new file mode 100755 index 0000000000000000000000000000000000000000..eaecc4b863caf7434493f6a2f81fdb0c8367b9d2 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_sriov.c @@ -0,0 +1,1896 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef NETIF_F_HW_VLAN_CTAG_TX +#include +#endif + +#include "rnp.h" +#include "rnp_type.h" +#include "rnp_sriov.h" + +int rnp_msg_post_status_signle(struct rnp_adapter *adapter, + enum PF_STATUS status, int vf); +#ifdef CONFIG_PCI_IOV +static int __rnp_enable_sriov(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int num_vf_macvlans, i, num_vebvlans; + struct vf_macvlans *mv_list; + struct vf_vebvlans *vv_list = NULL; + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + /* sriov and dcb cannot open together */ + /* reset numtc */ + adapter->flags &= (~RNP_FLAG_DCB_ENABLED); + netdev_reset_tc(adapter->netdev); + + e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs); + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= RNP_FLAG_VMDQ_ENABLED; + if (!adapter->ring_feature[RING_F_VMDQ].limit) + adapter->ring_feature[RING_F_VMDQ].limit = 1; + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + adapter->ring_feature[RING_F_VMDQ].offset = 0; + else + adapter->ring_feature[RING_F_VMDQ].offset = hw->max_vfs - 1; + + /* we reseve each for fake mac if we can */ + num_vf_macvlans = hw->num_rar_entries - + (hw->max_pf_macvlans + 1 + adapter->num_vfs * 2); + if (num_vf_macvlans < 0) + num_vf_macvlans = hw->num_rar_entries - + (hw->max_pf_macvlans + 1 + adapter->num_vfs); + + num_vebvlans = hw->num_vebvlan_entries; + + adapter->mv_list = mv_list = kcalloc( + num_vf_macvlans, sizeof(struct vf_macvlans), GFP_KERNEL); + if (num_vebvlans) + hw->vv_list = vv_list = kcalloc( + num_vebvlans, sizeof(struct vf_vebvlans), GFP_KERNEL); + + if (mv_list) { + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&adapter->vf_mvs.l); + for (i = 0; i < num_vf_macvlans; i++) { + mv_list->vf = -1; + mv_list->free = true; + mv_list->rar_entry = hw->mac.num_rar_entries - + (i + adapter->num_vfs * 2 + 1); + list_add(&mv_list->l, &adapter->vf_mvs.l); + mv_list++; + } + } + + if (vv_list) { + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&hw->vf_vas.l); + for (i = 0; i < num_vebvlans; i++) { + vv_list->vid = -1; + vv_list->vid = 0; + vv_list->free = true; + vv_list->veb_entry = i; + list_add(&vv_list->l, &hw->vf_vas.l); + vv_list++; + } + } + + adapter->flags2 |= RNP_FLAG2_BRIDGE_MODE_VEB; + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + + hw->ops.set_sriov_status(hw, true); + adapter->vfinfo = kcalloc(adapter->num_vfs, + sizeof(struct vf_data_storage), GFP_KERNEL); + if (adapter->vfinfo) { + /* limit trafffic classes based on VFs enabled */ + /* TODO analyze VF need support pfc or traffic classes */ + /* We do not support RSS w/ SR-IOV */ + adapter->ring_feature[RING_F_RSS].limit = hw->sriov_ring_limit; + + /* Disable RSC when in SR-IOV mode */ + adapter->flags2 &= + ~(RNP_FLAG2_RSC_CAPABLE | RNP_FLAG2_RSC_ENABLED); + + adapter->flags |= RNP_FLAG_SRIOV_ENABLED; + + /* enable spoof checking for all VFs */ + return 0; + } + + /* open flags at last to avoid null call adapter->vfinfo */ + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + return -ENOMEM; +} + +void rnp_enable_sriov_true(struct rnp_adapter *adapter) +{ + int err = 0; + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return; + + adapter->flags |= RNP_FLAG_SRIOV_INIT_DONE; + + err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (err) { + printk("Failed to enable PCI sriov: %d num %d\n", err, + adapter->num_vfs); + printk("We cannot handle this error\n"); + } + + adapter->flags |= RNP_FLAG_VF_INIT_DONE; +} + +/* Note this function is called when the user wants to enable SR-IOV + * VFs using the now deprecated module parameter + * never used + */ +void rnp_enable_sriov(struct rnp_adapter *adapter) +{ + int pre_existing_vfs = 0; + struct rnp_hw *hw = &adapter->hw; + + pre_existing_vfs = pci_num_vf(adapter->pdev); + if (!pre_existing_vfs && !adapter->num_vfs) + return; + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + if (!pre_existing_vfs) + dev_warn( + &adapter->pdev->dev, + "Enabling SR-IOV VFs using the module parameter is deprecated " + "- please use the pci sysfs interface.\n"); + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + /* If there are pre-existing VFs then we have to force + * use of that many - over ride any module parameter value. + * This may result from the user unloading the PF driver + * while VFs were assigned to guest VMs or because the VFs + * have been created via the new PCI SR-IOV sysfs interface. + */ + if (pre_existing_vfs) { + adapter->num_vfs = pre_existing_vfs; + dev_warn( + &adapter->pdev->dev, + "Virtual Functions already enabled for this device - Please " + "reload all VF drivers to avoid spoofed packet errors\n"); + } else { + int i; + /* + * The n10 supports up to 64 VFs per physical function + * but this implementation limits allocation to 126 so that + * basic networking resources are still available to the + * physical function. If the user requests greater than + * 64 VFs then it is an error - reset to default of zero. + */ + adapter->num_vfs = + min_t(unsigned int, adapter->num_vfs, hw->max_vfs - 1); + + /* should first alloc memory for sriov */ + if (__rnp_enable_sriov(adapter)) { + e_err(probe, "Failed to alloc memory for sriov\n"); + adapter->num_vfs = 0; + } + + for (i = 0; i < adapter->num_vfs; i++) + rnp_vf_configuration(adapter->pdev, (i | 0x10000000)); + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + } +} + +static bool rnp_vfs_are_assigned(struct rnp_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct pci_dev *vfdev; + unsigned int dev_id = RNP_DEV_ID_N10_PF0_VF_N; + unsigned int vendor_id = PCI_VENDOR_ID_MUCSE; + + switch (adapter->pdev->device) { + case RNP_DEV_ID_N10_PF0: + case RNP_DEV_ID_N10_PF1: + vendor_id = 0x1dab; + if (rnp_is_pf1(&adapter->hw)) + dev_id = RNP_DEV_ID_N10_PF1_VF; + else + dev_id = RNP_DEV_ID_N10_PF0_VF; + break; + case PCI_DEVICE_ID_N10_PF0: + case PCI_DEVICE_ID_N10_PF1: + vendor_id = PCI_VENDOR_ID_MUCSE; + if (rnp_is_pf1(&adapter->hw)) + dev_id = RNP_DEV_ID_N10_PF1_VF_N; + else + dev_id = RNP_DEV_ID_N10_PF0_VF_N; + } + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(vendor_id, dev_id, NULL); + while (vfdev) { + /* if we don't own it we don't care */ + if (vfdev->is_virtfn && vfdev->physfn == pdev) { + /* if it is assigned we cannot release it */ + if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) + return true; + } + + vfdev = pci_get_device(vendor_id, dev_id, vfdev); + } + + return false; +} + +#endif /* #ifdef CONFIG_PCI_IOV */ +int rnp_disable_sriov(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int rss; + int time = 0; + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return 0; + + adapter->num_vfs = 0; + adapter->flags &= ~RNP_FLAG_SRIOV_ENABLED; + adapter->flags &= ~RNP_FLAG_SRIOV_INIT_DONE; + adapter->flags &= ~RNP_FLAG_VF_INIT_DONE; + adapter->priv_flags &= (~RNP_PRIV_FLAG_OLD_VF_QUEUE); + adapter->vlan_count = 0; + msleep(100); + + if(pci_channel_offline(adapter->pdev) == false){ + /* only do if not ncsi card */ + if (!hw->ncsi_en) + hw->ops.set_mac_rx(hw, false); + + hw->ops.set_sriov_status(hw, false); + } + + /* set num VFs to 0 to prevent access to vfinfo */ + while (test_and_set_bit(__RNP_USE_VFINFI, &adapter->state)) { + msleep(100); + time++; + + if (time > 100) { + printk("wait flags timeout\n"); + break; + } + } + if (time < 100) + clear_bit(__RNP_USE_VFINFI, &adapter->state); + + /* free VF control structures */ + kfree(adapter->vfinfo); + adapter->vfinfo = NULL; + + /* free macvlan list */ + if (hw->vv_list) { + kfree(hw->vv_list); + hw->vv_list = NULL; + } + + if (adapter->mv_list) { + kfree(adapter->mv_list); + adapter->mv_list = NULL; + } + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + /* if SR-IOV is already disabled then there is nothing to do */ + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); +#ifdef CONFIG_PCI_IOV + /* + * If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (rnp_vfs_are_assigned(adapter)) { + e_dev_warn( + "Unloading driver while VFs are assigned - VFs will not be " + "deallocated\n"); + return -EPERM; + } + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); +#endif + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + + /* set default pool back to 0 */ + + /* Disable VMDq flag so device will be set in VM mode */ + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) + adapter->flags &= ~RNP_FLAG_VMDQ_ENABLED; + adapter->ring_feature[RING_F_VMDQ].offset = 0; + + rss = min_t(int, adapter->max_ring_pair_counts, num_online_cpus()); + + rss = min_t(int, rss, + hw->mac.max_msix_vectors - adapter->num_other_vectors); + + adapter->ring_feature[RING_F_RSS].limit = rss; + + /* take a breather then clean up driver data */ + msleep(100); + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + return 0; +} + +static bool check_ari_mode(struct pci_dev *dev) +{ + struct pci_bus *bus = dev->bus; + + return bus->self && bus->self->ari_enabled; +} + +static int rnp_pci_sriov_enable(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + struct rnp_adapter *adapter = pci_get_drvdata(dev); + struct rnp_hw *hw = &adapter->hw; + int err = 0; + int i; + int pre_existing_vfs = pci_num_vf(dev); + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + err = rnp_disable_sriov(adapter); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + goto out; + + /* maybe bug, if add 1 vlan, then open sriov */ + if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN) { + if (adapter->vlan_count > hw->max_vfs - 1) { + dev_err(&adapter->pdev->dev, + "vlans is too much, delete less than %d vlans\n", + hw->max_vfs - 1); + + err = -EOPNOTSUPP; + goto err_out; + } + + } else if (adapter->vlan_count > 1) { + dev_err(&adapter->pdev->dev, + "only 1 vlan in sriov mode, delete other vlans\n"); + dev_err(&adapter->pdev->dev, "please delete all vlans first\n"); + + err = -EOPNOTSUPP; + goto err_out; + } + + adapter->vlan_count = 0; + if (err) + goto err_out; + + /* While the SR-IOV capability structure reports total VFs to be + * 64 we limit the actual number that can be allocated to 63 so + * that some transmit/receive resources can be reserved to the + * PF. The PCI bus driver already checks for other values out of + * range. + */ + + if (check_ari_mode(dev)) { + int temp = hw->sriov_ring_limit; + + if (temp == 1) + temp = 2; + + + if (num_vfs > (128 / temp - 1)) { + err = -EPERM; + goto err_out; + } + } else { + if (num_vfs > hw->max_vfs_noari) { + err = -EPERM; + goto err_out; + } + } + + adapter->num_vfs = num_vfs; + err = __rnp_enable_sriov(adapter); + if (err) + goto err_out; + + for (i = 0; i < adapter->num_vfs; i++) + rnp_vf_configuration(dev, (i | 0x10000000)); + /* we should reinit pf first */ + dbg("flags:0x%x\n", adapter->flags); + if (hw->ops.clr_rar_all) + hw->ops.clr_rar_all(hw); + + rnp_sriov_reinit(adapter); + + adapter->flags |= RNP_FLAG_SRIOV_INIT_DONE; + err = pci_enable_sriov(dev, num_vfs); + if (err) { + e_dev_warn("Failed to enable PCI sriov: %d num %d\n", err, + num_vfs); + rnp_disable_sriov(adapter); + rnp_sriov_reinit(adapter); + goto err_out; + } + adapter->flags |= RNP_FLAG_VF_INIT_DONE; + +out: + return num_vfs; + +err_out: + return err; +#endif + return 0; +} + +static int rnp_pci_sriov_disable(struct pci_dev *dev) +{ + struct rnp_adapter *adapter = pci_get_drvdata(dev); + int err; + u32 current_flags = adapter->flags; + + err = rnp_disable_sriov(adapter); + + /* Only reinit if no error and state changed */ + if (!err && current_flags != adapter->flags) { + /* rnp_disable_sriov() doesn't clear VMDQ flag */ + adapter->flags &= ~RNP_FLAG_VMDQ_ENABLED; +#ifdef CONFIG_PCI_IOV + rnp_sriov_reinit(adapter); +#endif + } + + return err; +} + +static int rnp_set_vf_multicasts(struct rnp_adapter *adapter, u32 *msgbuf, + u32 vf) +{ + int entries = (msgbuf[0] & RNP_VT_MSGINFO_MASK) >> RNP_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + struct rnp_hw *hw = &adapter->hw; + int i; + + /* only so many hash values supported */ + entries = min(entries, RNP_MAX_VF_MC_ENTRIES); + + /* + * salt away the number of multi cast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vfinfo->num_vf_mc_hashes = entries; + + /* + * VFs are limited to using the MTA hash table for their multicast + * addresses + */ + for (i = 0; i < entries; i++) + vfinfo->vf_mc_hashes[i] = hash_list[i]; + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + /* fixed mode */ + hw->ops.set_sriov_vf_mc(hw, vfinfo->vf_mc_hashes[i]); + } + + return 0; +} + +void rnp_restore_vf_macs(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int vf; + u8 *mac_addr; + int rar_entry; + int fix_vf_num = 0; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + mac_addr = adapter->vfinfo[vf].vf_mac_addresses; + rar_entry = hw->mac.num_rar_entries - (vf + 1); + /* setup to the hw */ + if (hw->sriov_ring_limit > 2) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (vf + 1) * hw->sriov_ring_limit / 2; + } else { + fix_vf_num = (vf) * hw->sriov_ring_limit / 2; + } + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, + fix_vf_num, true); + // add for fake mac + mac_addr = adapter->vfinfo[vf].vf_mac_fake_address; + rar_entry = hw->mac.num_rar_entries - (vf + 1 + adapter->num_vfs); + + if (adapter->vfinfo[vf].vf_mac_fake_set) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (vf + 1) * hw->sriov_ring_limit / 2; + } else { + fix_vf_num = (vf) * hw->sriov_ring_limit / 2; + } + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, + fix_vf_num, true); + } + } else { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf + 1, + true); + else + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf, + true); + // add for fake mac + mac_addr = adapter->vfinfo[vf].vf_mac_fake_address; + rar_entry = hw->mac.num_rar_entries - (vf + 1 + adapter->num_vfs); + + if (adapter->vfinfo[vf].vf_mac_fake_set) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf + 1, + true); + else + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf, + true); + } + } + } +} + +void rnp_restore_vf_macvlans(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct list_head *pos; + struct vf_macvlans *entry; + int fix_vf_num = 0; + + hw_dbg(hw, "%s Staring..\n", __func__); + + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (!entry->free) { + hw_dbg(hw, " vf:%d MACVLAN: RAR[%d] <= %pM\n", + entry->vf, entry->rar_entry, entry->vf_macvlan); + + if (hw->sriov_ring_limit > 2) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (entry->vf + 1) * hw->sriov_ring_limit / 2; + } else { + fix_vf_num = (entry->vf) * hw->sriov_ring_limit / 2; + } + hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, entry->rar_entry, + fix_vf_num, true); + + + } else { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, + entry->rar_entry, + entry->vf + 1, true); + } else { + hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, + entry->rar_entry, + entry->vf, true); + } + } + } + } + hw_dbg(hw, "%s Done\n", __func__); +} + +void rnp_restore_vf_multicasts(struct rnp_adapter *adapter) +{ + /* Restore any VF macvlans */ + rnp_restore_vf_macvlans(adapter); +} + +static int rnp_set_vf_vlan(struct rnp_adapter *adapter, int add, int vid, + u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + int true_handle = 1; + int i; + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + /* should check other vf */ + if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + /* if other vf use this vlan, don't true remove */ + if (!add) { + /* check equal pf_vlan */ + if (vid == adapter->vf_vlan) + true_handle = 0; + if (!test_and_set_bit(__RNP_USE_VFINFI, + &adapter->state)) { + for (i = 0; i < adapter->num_vfs; i++) { + /* check if other vf_vlan still valid */ + if ((i != vf) && + (vid == adapter->vfinfo[i].vf_vlan)) + true_handle = 0; + /* check if other pf_vlan still valid */ + if ((i != vf) && + (vid == adapter->vfinfo[i].pf_vlan)) + true_handle = 0; + } + clear_bit(__RNP_USE_VFINFI, &adapter->state); + } + } + } + if (true_handle) + hw->ops.set_vf_vlan_filter(hw, vid, vf, (bool)add, false); + + return 0; +} + +static s32 rnp_set_vf_lpe(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + return 0; +} + +static inline void rnp_vf_reset_event(struct rnp_adapter *adapter, u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + int rar_entry = hw->mac.num_rar_entries - (vf + 1); + int i; + + /* reset multicast table array for vf */ + adapter->vfinfo[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + rnp_set_rx_mode(adapter->netdev); + + /* clear this rar_entry */ + hw->ops.clr_rar(hw, rar_entry); + + /* reset VF api back to unknown */ + adapter->vfinfo[vf].vf_api = 0; + for (i = 0; i < RNP_MAX_VF_MC_ENTRIES; i++) + adapter->vfinfo[vf].vf_mc_hashes[i] = 0; + adapter->vfinfo[vf].vf_vlan = 0; + adapter->vfinfo[vf].vlan_count = 0; +} + +static int rnp_set_vf_mac(struct rnp_adapter *adapter, int vf, + unsigned char *mac_addr) +{ + struct rnp_hw *hw = &adapter->hw; + int fix_vf_num = 0; + /* this rar_entry may be cofict with mac vlan with pf */ + int rar_entry = hw->mac.num_rar_entries - (vf + 1); + + memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6); + + /* setup to the hw */ + if (hw->sriov_ring_limit > 2) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (vf + 1) * hw->sriov_ring_limit / 2; + } else { + fix_vf_num = (vf) * hw->sriov_ring_limit / 2; + } + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, + fix_vf_num, true); + + } else { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf + 1, true); + else + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf, true); + } + + return 0; +} + +static int rnp_set_vf_macvlan(struct rnp_adapter *adapter, int vf, int index, + unsigned char *mac_addr) +{ + struct rnp_hw *hw = &adapter->hw; + struct list_head *pos; + struct vf_macvlans *entry; + int fix_vf_num = 0; + /* index = 0 , only earase */ + /* index = 1 , earase and then set */ + if (index <= 1) { + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + entry->is_macvlan = false; + hw->ops.clr_rar(hw, entry->rar_entry); + } + } + } + + /* + * If index was zero then we were asked to clear the uc list + * for the VF. We're done. + */ + if (!index) + return 0; + + entry = NULL; + + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->free) + break; + } + + /* + * If we traversed the entire list and didn't find a free entry + * then we're out of space on the RAR table. Also entry may + * be NULL because the original memory allocation for the list + * failed, which is not fatal but does mean we can't support + * VF requests for MACVLAN because we couldn't allocate + * memory for the list management required. + */ + if (!entry || !entry->free) + return -ENOSPC; + + entry->free = false; + entry->is_macvlan = true; + entry->vf = vf; + memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); + if (hw->sriov_ring_limit > 2) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (entry->vf + 1) * hw->sriov_ring_limit / 2; + } else { + fix_vf_num = (entry->vf) * hw->sriov_ring_limit / 2; + } + hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, entry->rar_entry, + fix_vf_num, true); + + } else { + + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, entry->rar_entry, + entry->vf + 1, true); + } else { + hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, entry->rar_entry, + entry->vf, true); + } + } + + return 0; +} + +int rnp_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) +{ + unsigned char vf_mac_addr[6]; + struct rnp_adapter *adapter = pci_get_drvdata(pdev); + unsigned int vfn = (event_mask & 0x3f); + + bool enable = ((event_mask & 0x10000000U) != 0); + + if (enable) { + eth_zero_addr(vf_mac_addr); + memcpy(vf_mac_addr, adapter->hw.mac.perm_addr, 6); + vf_mac_addr[5] = vf_mac_addr[5] + (0x80 | vfn); + vf_mac_addr[4] = vf_mac_addr[4] + (pdev->devfn); + + memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); + } + + return 0; +} + +static int rnp_vf_reset_msg(struct rnp_adapter *adapter, u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; + u32 msgbuf[RNP_VF_PERMADDR_MSG_LEN]; + u8 *addr = (u8 *)(&msgbuf[1]); + + /* reset the filters for the device */ + rnp_vf_reset_event(adapter, vf); + + /* set vf mac address */ + if (!is_zero_ether_addr(vf_mac)) + rnp_set_vf_mac(adapter, vf, vf_mac); + + /* enable VF mailbox for further messages */ + adapter->vfinfo[vf].clear_to_send = true; + + /* Enable counting of spoofed packets in the SSVPC register */ + /* reply to reset with ack and vf mac address */ + msgbuf[0] = RNP_VF_RESET; + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] |= RNP_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] |= RNP_VT_MSGTYPE_NACK; + dev_warn( + &adapter->pdev->dev, + "VF %d has no MAC address assigned, you may have to assign " + "one manually\n", + vf); + } + + /* + * Piggyback the multicast filter type so VF can compute the + * correct vectors + */ + msgbuf[RNP_VF_MC_TYPE_WORD] = 0; + /* setup link status , pause mode, ft padding mode */ + /* pause mode */ + msgbuf[RNP_VF_MC_TYPE_WORD] |= (0xff & hw->fc.current_mode) << 16; + if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) + msgbuf[RNP_VF_MC_TYPE_WORD] |= (0x01 << 8); + else + msgbuf[RNP_VF_MC_TYPE_WORD] |= (0x00 << 8); + /* mc_type */ + msgbuf[RNP_VF_MC_TYPE_WORD] |= rd32(hw, RNP_ETH_DMAC_MCSTCTRL) & 0x03; + msgbuf[RNP_VF_DMA_VERSION_WORD] = rd32(hw, RNP_DMA_VERSION); + msgbuf[RNP_VF_VLAN_WORD] = adapter->vfinfo[vf].pf_vlan; + /* fixme tx fetch to be added here */ + msgbuf[RNP_VF_PHY_TYPE_WORD] = (hw->mac_type << 16) | hw->phy_type; + msgbuf[RNP_VF_FW_VERSION_WORD] = (hw->fw_version); +#ifdef HAVE_NDO_SET_VF_LINK_STATE + if (adapter->vfinfo[vf].link_state == rnp_link_state_auto) { + msgbuf[RNP_VF_LINK_STATUS_WORD] = + (adapter->link_up ? RNP_PF_LINK_UP : 0) | + adapter->link_speed; + } else if (adapter->vfinfo[vf].link_state == rnp_link_state_on) { + msgbuf[RNP_VF_LINK_STATUS_WORD] = RNP_PF_LINK_UP | + adapter->link_speed; + } else { + msgbuf[RNP_VF_LINK_STATUS_WORD] = 0; + } +#else + msgbuf[RNP_VF_LINK_STATUS_WORD] = 0; +#endif + + msgbuf[RNP_VF_AXI_MHZ] = hw->usecstocount; + /* we start from 0 */ + msgbuf[RNP_VF_FEATURE] = 0; +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER + if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + msgbuf[RNP_VF_FEATURE] |= PF_FEATRURE_VLAN_FILTER; +#endif + if (hw->ncsi_en) + msgbuf[RNP_VF_FEATURE] |= PF_NCSI_EN; +#if defined(HAVE_VF_SPOOFCHK_CONFIGURE) + if (adapter->vfinfo[vf].spoofchk_enabled) + msgbuf[RNP_VF_FEATURE] |= VF_MAC_SPOOF_EN; +#endif + + /* now vf maybe has no irq handler if it is the first reset*/ + rnp_write_mbx(hw, msgbuf, RNP_VF_PERMADDR_MSG_LEN, vf); + + return 0; +} + +static int rnp_get_vf_mac_addr(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + u8 *mac = ((u8 *)(&msgbuf[1])); + + memcpy(mac, adapter->vfinfo[vf].vf_mac_addresses, 6); + + return 0; +} + +/* vf call setup a new mac */ +static int rnp_set_vf_mac_addr(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + + if (adapter->vfinfo[vf].pf_set_mac && + memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, ETH_ALEN)) { + e_warn(drv, + "VF %d attempted to override administratively set MAC address\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } + rnp_set_vf_mac(adapter, vf, new_mac); + + return 0; +} + +static int rnp_set_vf_vlan_msg(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + int add = ((msgbuf[0] & RNP_VT_MSGINFO_MASK) >> RNP_VT_MSGINFO_SHIFT); + int vid = (msgbuf[1] & RNP_VLVF_VLANID_MASK); + int err; + + if (adapter->vfinfo[vf].pf_vlan) { + e_warn(drv, + "VF %d attempted to override administratively set VLAN " + "configuration\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } + /* only allow 1 vlan for each vf */ + if ((add) && (adapter->vfinfo[vf].vlan_count)) { + e_warn(drv, "VF %d attempted to set more than 1 vlan", vf); + e_warn(drv, " vlan now %d, try to set %d\n", + adapter->vfinfo[vf].vf_vlan, vid); + return -1; + } + + /* vlan 0 has no work todo */ + if (!vid) + return 0; + if (add) { + adapter->vfinfo[vf].vlan_count++; + adapter->vfinfo[vf].vf_vlan = vid; + } else if (adapter->vfinfo[vf].vlan_count) { + adapter->vfinfo[vf].vf_vlan = 0; + adapter->vfinfo[vf].vlan_count--; + } + + err = rnp_set_vf_vlan(adapter, add, vid, vf); + + return err; +} + +static int rnp_set_vf_vlan_strip_msg(struct rnp_adapter *adapter, u32 *msgbuf, + u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + int vlan_strip_on = !!(msgbuf[1] >> 31); + int queue_cnt = msgbuf[1] & 0xffff; + int err = 0, i; + + vf_dbg("strip_on:%d queeu_cnt:%d, %d %d\n", vlan_strip_on, queue_cnt, + msgbuf[2], msgbuf[3]); + + for (i = 0; i < queue_cnt; i++) { + if (vlan_strip_on) + hw->ops.set_vlan_strip(hw, msgbuf[2 + i], true); + else + hw->ops.set_vlan_strip(hw, msgbuf[2 + i], false); + } + + return err; +} + +static int rnp_set_vf_macvlan_msg(struct rnp_adapter *adapter, u32 *msgbuf, + u32 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + int index = (msgbuf[0] & RNP_VT_MSGINFO_MASK) >> RNP_VT_MSGINFO_SHIFT; + int err; + + if (adapter->vfinfo[vf].pf_set_mac && index > 0) { + e_warn(drv, + "VF %d requested MACVLAN filter but is administratively denied\n", + vf); + return -1; + } + + /* An non-zero index indicates the VF is setting a filter */ + if (index) { + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + } + + err = rnp_set_vf_macvlan(adapter, vf, index, new_mac); + if (err == -ENOSPC) + e_warn(drv, + "VF %d has requested a MACVLAN filter but there is no space for " + "it\n", + vf); + + return err < 0; + + return 0; +} + +static int rnp_negotiate_vf_api(struct rnp_adapter *adapter, u32 *msgbuf, + u32 vf) +{ + adapter->vfinfo[vf].vf_api = 0; + + return 0; +} + +static int rnp_get_vf_reg(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + u32 reg = msgbuf[1]; + + msgbuf[1] = rd32(&adapter->hw, reg); + + return 0; +} + +static int rnp_set_vf_mtu(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct net_device *netdev = adapter->netdev; + if (msgbuf[1] > netdev->mtu) { + e_dev_warn( + "vf %d try to change %d mtu to %d (large than pf limit)\n", + vf, netdev->mtu, msgbuf[1]); + return -1; + } else + return 0; +} + + +static int rnp_set_vf_promisc(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + int i; + int ret = 0; + struct rnp_hw *hw = &adapter->hw; + + if (msgbuf[1]) { + /* check if other vf in promisc */ + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[vf].promisc_mode) { + printk("vf %d already in promisc\n", vf); + ret = -1; + break; + } + } + /* if no vf in promisc mode */ + adapter->vfinfo[vf].promisc_mode = true; + hw->ops.set_rx_mode(hw, adapter->netdev, true); + hw->ops.set_sriov_status(hw, true); + + } else { + adapter->vfinfo[vf].promisc_mode = false; + hw->ops.set_rx_mode(hw, adapter->netdev, true); + hw->ops.set_sriov_status(hw, true); + } + return ret; +} + +static int rnp_get_vf_mtu(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct net_device *netdev = adapter->netdev; + msgbuf[1] = netdev->mtu; + return 0; +} + +static int rnp_get_vf_fw(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + + msgbuf[1] = hw->fw_version; + + return 0; +} + +static int rnp_get_vf_link(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ +#ifdef HAVE_NDO_SET_VF_LINK_STATE + if (adapter->vfinfo[vf].link_state == rnp_link_state_auto) { + msgbuf[1] = (adapter->link_up ? RNP_PF_LINK_UP : 0) | + adapter->link_speed; + } else if (adapter->vfinfo[vf].link_state == rnp_link_state_on) { + msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed; + + } else { + msgbuf[1] = 0; + } +#else + msgbuf[0] = 0; + +#endif + return 0; +} + +static int rnp_get_vf_dma_frag(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + /* we fixed 1536 bytes */ + msgbuf[1] = 1536; + return 0; +} + +static int rnp_fix_rss_table(struct rnp_adapter *adapter) +{ + int rx_nums = 2; + int i, j; + struct rnp_hw *hw = &adapter->hw; + struct rnp_ring *rx_ring; + u32 reta = 0; + u32 reta_entries = rnp_rss_indir_tbl_entries(adapter); + + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + rx_nums = rx_nums / adapter->num_tc; + for (i = 0, j = 0; i < 8; i++) { + //wr32(hw, RNP_ETH_TC_IPH_OFFSET_TABLE(i), j); + adapter->rss_tc_tbl[i] = j; + hw->rss_tc_tbl[i] = j; + j = (j + 1) % adapter->num_tc; + } + } else { + for (i = 0, j = 0; i < 8; i++) { + //wr32(hw, RNP_ETH_TC_IPH_OFFSET_TABLE(i), 0); + hw->rss_tc_tbl[i] = 0; + adapter->rss_tc_tbl[i] = 0; + } + } + + /* adapter->num_q_vectors is not correct */ + for (i = 0, j = 0; i < reta_entries; i++) { + /* init with default value */ + if (!adapter->rss_tbl_setup_flag) + adapter->rss_indir_tbl[i] = j; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + /* in sriov mode reta in [0, rx_nums] */ + reta = j; + } else { + /* in no sriov, reta is real ring number */ + rx_ring = adapter->rx_ring[adapter->rss_indir_tbl[i]]; + reta = rx_ring->rnp_queue_idx; + } + /* store rss_indir_tbl */ + //adapter->rss_indir_tbl[i] = reta; + hw->rss_indir_tbl[i] = reta; + + j = (j + 1) % rx_nums; + } + /* tbl only init once */ + adapter->rss_tbl_setup_flag = 1; + + hw->ops.set_rss_table(hw); + return 0; +} + +static int rnp_get_vf_queues(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + int queue_fixed = hw->sriov_ring_limit; + + if ((msgbuf[1] != 0xaa) || (!(msgbuf[2] & VF_ALLOC_FEATURE))) { + + if (hw->sriov_ring_limit > 2) { + dev_warn(pci_dev_to_dev(adapter->pdev), + "Use new rnpvf version to support %d vf queue\n", + hw->sriov_ring_limit); + //hw->sriov_ring_limit = 2; + queue_fixed = 2; + adapter->priv_flags |= RNP_PRIV_FLAG_OLD_VF_QUEUE; + // should set rss to 2 + } + // others is new rnpvf + } + msgbuf[RNP_VF_TX_QUEUES] = queue_fixed; + msgbuf[RNP_VF_RX_QUEUES] = queue_fixed; + msgbuf[RNP_VF_TRANS_VLAN] = adapter->vfinfo[vf].pf_vlan; + msgbuf[RNP_VF_DEF_QUEUE] = 0; + if (hw->hw_type == rnp_hw_n400) { + /* n400, we use + * vf0 use ring4 + * vf1 use ring8 + */ + msgbuf[RNP_VF_QUEUE_START] = vf * 4 + 4; + + } else if ((hw->hw_type == rnp_hw_n10) && (hw->sriov_ring_limit == 1)) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + msgbuf[RNP_VF_QUEUE_START] = vf * 2 + 2; + else + msgbuf[RNP_VF_QUEUE_START] = vf * 2; + + } else { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + msgbuf[RNP_VF_QUEUE_START] = vf * hw->sriov_ring_limit + + hw->sriov_ring_limit; + else + msgbuf[RNP_VF_QUEUE_START] = vf * hw->sriov_ring_limit; + } + msgbuf[RNP_VF_QUEUE_DEPTH] = (adapter->tx_ring_item_count << 16) | + adapter->rx_ring_item_count; + + if (adapter->priv_flags & RNP_PRIV_FLAG_OLD_VF_QUEUE) { + /* we must fix rss table to 2 queues */ + rnp_fix_rss_table(adapter); + } + + return 0; +} + +static int rnp_rcv_msg_from_vf(struct rnp_adapter *adapter, u32 vf) +{ + u32 mbx_size = RNP_VFMAILBOX_SIZE; + u32 msgbuf[RNP_VFMAILBOX_SIZE]; + struct rnp_hw *hw = &adapter->hw; + s32 retval; + + vf_dbg("msg from vf:%d\n", vf); + + retval = rnp_read_mbx(hw, msgbuf, mbx_size, vf); + if (retval) { + pr_err("Error receiving message from VF\n"); + return retval; + } + vf_dbg("msg[0]=0x%08x\n", msgbuf[0]); + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (RNP_VT_MSGTYPE_ACK | RNP_VT_MSGTYPE_NACK)) + return retval; + + /* flush the ack before we write any messages back */ + /* clear vf_num */ + msgbuf[0] &= (~RNP_VF_MASK); + + /* this is a vf reset irq */ + if ((msgbuf[0] & RNP_MAIL_CMD_MASK) == RNP_VF_RESET) { + vf_dbg("vf %d up\n", vf); + return rnp_vf_reset_msg(adapter, vf); + } + + /* + * until the vf completes a virtual function reset it should not be + * allowed to start any configuration. + */ + if (!adapter->vfinfo[vf].clear_to_send) { + vf_dbg("wait vf clear to send\n"); + msgbuf[0] |= RNP_VT_MSGTYPE_NACK; + rnp_write_mbx(hw, msgbuf, 1, vf); + return retval; + } + + switch ((msgbuf[0] & RNP_MAIL_CMD_MASK)) { + case RNP_VF_SET_MAC_ADDR: + retval = rnp_set_vf_mac_addr(adapter, msgbuf, vf); + break; + case RNP_VF_SET_MULTICAST: + retval = rnp_set_vf_multicasts(adapter, msgbuf, vf); + break; + case RNP_VF_SET_VLAN: + retval = rnp_set_vf_vlan_msg(adapter, msgbuf, vf); + break; + case RNP_VF_SET_VLAN_STRIP: + retval = rnp_set_vf_vlan_strip_msg(adapter, msgbuf, vf); + break; + case RNP_VF_SET_LPE: + retval = rnp_set_vf_lpe(adapter, msgbuf, vf); + break; + case RNP_VF_GET_MACADDR: + retval = rnp_get_vf_mac_addr(adapter, msgbuf, vf); + break; + case RNP_VF_SET_MACVLAN: + retval = rnp_set_vf_macvlan_msg(adapter, msgbuf, vf); + break; + case RNP_VF_API_NEGOTIATE: + retval = rnp_negotiate_vf_api(adapter, msgbuf, vf); + break; + case RNP_VF_GET_QUEUES: + retval = rnp_get_vf_queues(adapter, msgbuf, vf); + break; + case RNP_VF_REG_RD: + retval = rnp_get_vf_reg(adapter, msgbuf, vf); + break; + case RNP_VF_GET_MTU: + retval = rnp_get_vf_mtu(adapter, msgbuf, vf); + break; + case RNP_VF_SET_MTU: + retval = rnp_set_vf_mtu(adapter, msgbuf, vf); + break; + case RNP_VF_GET_FW: + retval = rnp_get_vf_fw(adapter, msgbuf, vf); + break; + case RNP_VF_GET_LINK: + retval = rnp_get_vf_link(adapter, msgbuf, vf); + break; + case RNP_PF_REMOVE: + vf_dbg("vf %d removed\n", vf); + adapter->vfinfo[vf].clear_to_send = false; + retval = 1; + break; + case RNP_VF_RESET_PF: + adapter->flags2 |= RNP_FLAG2_RESET_PF; + retval = 1; + break; + case RNP_VF_GET_DMA_FRAG: + retval = rnp_get_vf_dma_frag(adapter, msgbuf, vf); + + break; + case RNP_VF_SET_PROMISCE: + retval = rnp_set_vf_promisc(adapter, msgbuf, vf); + break; + default: + e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); + retval = RNP_ERR_MBX; + break; + } + + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= RNP_VT_MSGTYPE_NACK; + else + msgbuf[0] |= RNP_VT_MSGTYPE_ACK; + + /* write vf_num */ + msgbuf[0] |= (vf << 21); + + msgbuf[0] |= RNP_VT_MSGTYPE_CTS; + + if ((msgbuf[0] & RNP_MAIL_CMD_MASK) != RNP_PF_REMOVE) + rnp_write_mbx(hw, msgbuf, mbx_size, vf); + + return retval; +} + +static void rnp_rcv_ack_from_vf(struct rnp_adapter *adapter, u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + u32 msg = RNP_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!adapter->vfinfo[vf].clear_to_send) + rnp_write_mbx(hw, &msg, 1, vf); +} + +void rnp_msg_task(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + u32 vf; + + rnp_fw_msg_handler(adapter); + + if (!(adapter->flags & RNP_FLAG_SRIOV_INIT_DONE)) + return; + for (vf = 0; vf < adapter->num_vfs; vf++) { + /* process any reset requests */ + + /* check flag */ + if (test_and_set_bit(__VF_MBX_USED, + &adapter->vfinfo[vf].status)) { + adapter->miss_time++; + e_info(drv, "we missed some irqs %d\n", vf); + continue; + } + + /* process any messages pending */ + if (!rnp_check_for_msg(hw, vf)) + rnp_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!rnp_check_for_ack(hw, vf)) + rnp_rcv_ack_from_vf(adapter, vf); + clear_bit(__VF_MBX_USED, &adapter->vfinfo[vf].status); + } +} + +static int rnp_msg_post_status_signle_link(struct rnp_adapter *adapter, int vf, + int link_state) +{ + u32 msgbuf[RNP_VFMAILBOX_SIZE]; + struct rnp_hw *hw = &adapter->hw; + struct rnp_mbx_info *mbx = &hw->mbx; + msgbuf[0] = RNP_PF_SET_LINK | (vf << RNP_VNUM_OFFSET); + + switch (link_state) { + case rnp_link_state_on: + msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed; + break; + case rnp_link_state_off: + msgbuf[1] = 0; + break; + case rnp_link_state_auto: + if (adapter->link_up) { + msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed; + } else { + msgbuf[1] = 0; + } + break; + } + return mbx->ops.write(hw, msgbuf, 2, vf); +} + +int rnp_msg_post_status_signle(struct rnp_adapter *adapter, + enum PF_STATUS status, int vf) +{ + u32 msgbuf[RNP_VFMAILBOX_SIZE]; + struct rnp_hw *hw = &adapter->hw; + struct rnp_mbx_info *mbx = &hw->mbx; + switch (status) { + case PF_FCS_STATUS: + msgbuf[0] = RNP_PF_SET_FCS | (vf << RNP_VNUM_OFFSET); + if (adapter->netdev->features & NETIF_F_RXFCS) + msgbuf[1] = 1; + else + msgbuf[1] = 0; + break; + case PF_PAUSE_STATUS: + msgbuf[0] = RNP_PF_SET_PAUSE | (vf << RNP_VNUM_OFFSET); + msgbuf[1] = hw->fc.requested_mode; + break; + case PF_FT_PADDING_STATUS: + msgbuf[0] = RNP_PF_SET_FT_PADDING | (vf << RNP_VNUM_OFFSET); + if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) { + msgbuf[1] = 1; + } else { + msgbuf[1] = 0; + } + + break; + case PF_VLAN_FILTER_STATUS: + msgbuf[0] = RNP_PF_SET_VLAN_FILTER | (vf << RNP_VNUM_OFFSET); +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER + if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { + msgbuf[1] = 1; + } else { + msgbuf[1] = 0; + } +#else + msgbuf[1] = 0; +#endif + + break; + case PF_SET_VLAN_STATUS: + msgbuf[0] = RNP_PF_SET_VLAN | (vf << RNP_VNUM_OFFSET); + + msgbuf[1] = adapter->vfinfo[vf].pf_vlan; + break; + case PF_SET_LINK_STATUS: +#ifdef HAVE_NDO_SET_VF_LINK_STATE + if (adapter->vfinfo[vf].link_state != rnp_link_state_auto) + return 0; +#endif + /* only update link state if in auto mode */ + msgbuf[0] = RNP_PF_SET_LINK | (vf << RNP_VNUM_OFFSET); + if (adapter->link_up) { + msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed; + } else { + msgbuf[1] = 0; + } + break; + case PF_SET_MTU: + msgbuf[0] = RNP_PF_SET_MTU | (vf << RNP_VNUM_OFFSET); + msgbuf[1] = adapter->netdev->mtu; + break; + case PF_SET_RESET: + msgbuf[0] = RNP_PF_SET_RESET | (vf << RNP_VNUM_OFFSET); + msgbuf[1] = 0; + + break; + case PF_SET_MAC_SPOOF: + msgbuf[0] = RNP_PF_SET_MAC_SPOOF | (vf << RNP_VNUM_OFFSET); + if (adapter->vfinfo[vf].spoofchk_enabled) + msgbuf[1] = 1; + else + msgbuf[1] = 0; + + break; + } + + return mbx->ops.write(hw, msgbuf, 2, vf); +} + +/* try to send mailbox to all active vf */ +int rnp_msg_post_status(struct rnp_adapter *adapter, enum PF_STATUS status) +{ + u32 vf; + int err = 0; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + /* broadcast */ + for (vf = 0; vf < adapter->num_vfs; vf++) { + if (adapter->vfinfo[vf].clear_to_send) { + if (!test_bit(__RNP_IN_IRQ, &adapter->state)) { + if (test_and_set_bit(__VF_MBX_USED, + &adapter->vfinfo[vf].status)) { + adapter->miss_time++; + printk("send \n"); + return -1; + } + err |= rnp_msg_post_status_signle( + adapter, status, vf); + // clear flags + clear_bit(__VF_MBX_USED, + &adapter->vfinfo[vf].status); + } + } + } + } + return err; +} + +void rnp_disable_tx_rx(struct rnp_adapter *adapter) +{ +} + +void rnp_ping_all_vfs(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + u32 ping; + int i; + + for (i = 0; i < adapter->num_vfs; i++) { + ping = RNP_PF_CONTROL_PRING_MSG; + /* only send to active vf */ + ping |= RNP_VT_MSGTYPE_CTS; + rnp_write_mbx(hw, &ping, 1, i); + } +} + +int rnp_get_vf_ringnum(struct rnp_hw *hw, int vf, int num) +{ + int fix_vf_num; + + if (hw->sriov_ring_limit >= 2) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (vf + 1) * hw->sriov_ring_limit + num; + } else { + fix_vf_num = (vf) * hw->sriov_ring_limit + num; + } + } else { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (vf + 1) * 2 + num; + } else { + fix_vf_num = (vf) * 2 + num; + } + + + } + + return fix_vf_num; +} + +int rnp_setup_ring_maxrate(struct rnp_adapter *adapter, int ring, u64 max_rate) +{ + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + int samples_1sec = adapter->hw.usecstocount * 1000000; + + dma_ring_wr32(dma, RING_OFFSET(ring) + RNP_DMA_REG_TX_FLOW_CTRL_TM, + samples_1sec); + dma_ring_wr32(dma, RING_OFFSET(ring) + RNP_DMA_REG_TX_FLOW_CTRL_TH, + max_rate); + return 0; +} + +static int rnp_disable_port_vlan(struct rnp_adapter *adapter, int vf) +{ + struct rnp_hw *hw = &adapter->hw; + int err; + + err = rnp_set_vf_vlan(adapter, false, adapter->vfinfo[vf].pf_vlan, vf); + + if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) { + if (hw->ops.set_vf_vlan_mode) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_vf_vlan_mode( + hw, adapter->vfinfo[vf].pf_vlan, vf + 1, + false); + else + hw->ops.set_vf_vlan_mode( + hw, adapter->vfinfo[vf].pf_vlan, vf, + false); + } + } + adapter->vfinfo[vf].pf_vlan = 0; + adapter->vfinfo[vf].pf_qos = 0; + /* clear veb */ + hw->ops.set_vf_vlan_filter(hw, 0, vf, false, true); + + return err; +} + +static int rnp_enable_port_vlan(struct rnp_adapter *adapter, int vf, u16 vlan, + u8 qos) +{ + struct rnp_hw *hw = &adapter->hw; + int err; + + err = rnp_set_vf_vlan(adapter, true, vlan, vf); + if (err) + goto out; + + adapter->vfinfo[vf].pf_vlan = vlan; + adapter->vfinfo[vf].pf_qos = qos; + dev_info(pci_dev_to_dev(adapter->pdev), + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__RNP_DOWN, &adapter->state)) { + dev_warn( + pci_dev_to_dev(adapter->pdev), + "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn( + pci_dev_to_dev(adapter->pdev), + "Bring the PF device up before attempting to use the VF device.\n"); + } + hw->ops.set_vf_vlan_filter(hw, vlan, vf, true, true); + + /* if in sriov vlan mode should setup pfvlvf table */ + if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) { + if (hw->ops.set_vf_vlan_mode) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_vf_vlan_mode(hw, vlan, vf + 1, + true); + else + hw->ops.set_vf_vlan_mode(hw, vlan, vf, true); + } + } +out: + return err; +} + +#ifdef IFLA_VF_VLAN_INFO_MAX +int rnp_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto) +#else +int rnp_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) +#endif +{ + int err = 0; + struct rnp_adapter *adapter = netdev_priv(netdev); + + /* VLAN IDs accepted range 0-4094 */ + if (vf < 0 || vf >= adapter->num_vfs || vlan > VLAN_VID_MASK - 1 || + qos > 7) + return -EINVAL; + +#ifdef IFLA_VF_VLAN_INFO_MAX + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; +#endif + if (vlan || qos) { + /* + * Check if there is already a port VLAN set, if so + * we have to delete the old one first before we + * can set the new one. The usage model had + * previously assumed the user would delete the + * old port VLAN before setting a new one but this + * is not necessarily the case. + */ + if (adapter->vfinfo[vf].vf_vlan) { + dev_err(&adapter->pdev->dev, + "vf set vlan before, delete it before add new\n"); + err = -EINVAL; + goto out; + } + if (adapter->vfinfo[vf].pf_vlan) + err = rnp_disable_port_vlan(adapter, vf); + if (err) + goto out; + err = rnp_enable_port_vlan(adapter, vf, vlan, qos); + + } else { + /* if only vf set vlan */ + if ((adapter->vfinfo[vf].pf_vlan == 0) && + (adapter->vfinfo[vf].vf_vlan)) { + dev_err(&adapter->pdev->dev, + "pf cannot delete vm vlan(ip link add)\n"); + err = -EINVAL; + } + /* if not set vlan before, nothing todo */ + if (adapter->vfinfo[vf].pf_vlan == 0) + return 0; + + err = rnp_disable_port_vlan(adapter, vf); + } + /* send mbx to vf */ + rnp_msg_post_status_signle(adapter, PF_SET_VLAN_STATUS, vf); +out: + return err; +} + +#if IS_ENABLED(CONFIG_PCI_IOV) +int rnp_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + if (vf < 0 || vf >= adapter->num_vfs) + return -EINVAL; + + adapter->vfinfo[vf].spoofchk_enabled = setting; + /* send mbx to vf */ + rnp_msg_post_status_signle(adapter, PF_SET_MAC_SPOOF, vf); + + return 0; +} + +#endif /* CONFIG_PCI_IOV */ + +#ifdef HAVE_NDO_SET_VF_TRUST +int rnp_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + if (vf < 0 || vf >= adapter->num_vfs) + return -EINVAL; + + /* nothing to do */ + if (adapter->vfinfo[vf].trusted == setting) + return 0; + + adapter->vfinfo[vf].trusted = setting; + + /* reset VF to reconfigure features */ + e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); + + return 0; +} + +#endif + +#ifdef HAVE_NDO_SET_VF_LINK_STATE + +int rnp_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (vf < 0 || vf >= adapter->num_vfs) { + dev_err(pci_dev_to_dev(adapter->pdev), + "NDO set VF link - invalid VF identifier %d\n", vf); + ret = -EINVAL; + goto out; + } + + switch (state) { + case IFLA_VF_LINK_STATE_ENABLE: + dev_info(pci_dev_to_dev(adapter->pdev), + "NDO set VF %d link state %d \n", vf, state); + adapter->vfinfo[vf].link_state = rnp_link_state_on; + rnp_msg_post_status_signle_link(adapter, vf, rnp_link_state_on); + break; + case IFLA_VF_LINK_STATE_DISABLE: + dev_info(pci_dev_to_dev(adapter->pdev), + "NDO set VF %d link state disable\n", vf); + adapter->vfinfo[vf].link_state = rnp_link_state_off; + rnp_msg_post_status_signle_link(adapter, vf, + rnp_link_state_off); + break; + case IFLA_VF_LINK_STATE_AUTO: + dev_info(pci_dev_to_dev(adapter->pdev), + "NDO set VF %d link state auto\n", vf); + adapter->vfinfo[vf].link_state = rnp_link_state_auto; + rnp_msg_post_status_signle_link(adapter, vf, + rnp_link_state_auto); + break; + default: + dev_err(pci_dev_to_dev(adapter->pdev), + "NDO set VF %d - invalid link state %d\n", vf, state); + ret = -EINVAL; + } +out: + return ret; +} + +#endif + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +int rnp_ndo_set_vf_bw(struct net_device *netdev, int vf, + int __always_unused min_tx_rate, int max_tx_rate) +#else +int rnp_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate) +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + /* limit vf ring rate */ + int ring_max_rate; + int vf_ring; + int link_speed = 0; + u64 real_rate = 0; + int i; + + if (vf >= hw->max_vfs - 1) + return -EINVAL; + + switch (adapter->link_speed) { + case RNP_LINK_SPEED_40GB_FULL: + link_speed = 40000; + break; + case RNP_LINK_SPEED_25GB_FULL: + link_speed = 25000; + break; + case RNP_LINK_SPEED_10GB_FULL: + link_speed = 10000; + break; + case RNP_LINK_SPEED_1GB_FULL: + link_speed = 1000; + break; + case RNP_LINK_SPEED_100_FULL: + link_speed = 100; + break; + } + /* rate limit cannot be less than 10Mbs or greater than link speed */ + if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) + return -EINVAL; + + adapter->vfinfo[vf].tx_rate = max_tx_rate; + + ring_max_rate = max_tx_rate / hw->sriov_ring_limit; + + real_rate = (ring_max_rate * 1024 * 128) * 90 / 100; + + for (i = 0; i < hw->sriov_ring_limit; i++) { + vf_ring = rnp_get_vf_ringnum(hw, vf, i); + rnp_setup_ring_maxrate(adapter, vf_ring, real_rate); + } + //vf_ring = rnp_get_vf_ringnum(hw, vf, 1); + //rnp_setup_ring_maxrate(adapter, vf_ring, real_rate); + return 0; +} + +int rnp_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) + return -EINVAL; + adapter->vfinfo[vf].pf_set_mac = true; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); + dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" + " change effective."); + if (test_bit(__RNP_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF MAC address has been set," + " but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before" + " attempting to use the VF device.\n"); + } + rnp_set_vf_mac(adapter, vf, mac); + rnp_msg_post_status_signle(adapter, PF_SET_RESET, vf); + + return 0; +} + +int rnp_ndo_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->num_vfs) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate; + ivi->min_tx_rate = 0; +#else + ivi->tx_rate = adapter->vfinfo[vf].tx_rate; +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + + if (adapter->vfinfo[vf].pf_vlan) + ivi->vlan = adapter->vfinfo[vf].pf_vlan; + else + ivi->vlan = adapter->vfinfo[vf].vf_vlan; + + ivi->qos = adapter->vfinfo[vf].pf_qos; +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; +#endif +#ifdef HAVE_NDO_SET_VF_LINK_STATE + switch (adapter->vfinfo[vf].link_state) { + case rnp_link_state_on: + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + break; + case rnp_link_state_off: + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; + break; + case rnp_link_state_auto: + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + break; + default: + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + } +#endif +#ifdef HAVE_NDO_SET_VF_TRUST + ivi->trusted = adapter->vfinfo[vf].trusted; +#endif + + return 0; +} + +int rnp_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + vf_dbg("\n\n !!!! %s:%d num_vfs:%d\n", __func__, __LINE__, num_vfs); + if (num_vfs == 0) + return rnp_pci_sriov_disable(dev); + else + return rnp_pci_sriov_enable(dev, num_vfs); +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_sriov.h b/drivers/net/ethernet/mucse/rnp/rnp_sriov.h new file mode 100755 index 0000000000000000000000000000000000000000..d0300bd700e04fbef60c8f0247b0c8d99170228d --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_sriov.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_SRIOV_H_ +#define _RNP_SRIOV_H_ + +void rnp_restore_vf_multicasts(struct rnp_adapter *adapter); +void rnp_restore_vf_macvlans(struct rnp_adapter *adapter); + +void rnp_restore_vf_macs(struct rnp_adapter *adapter); +void rnp_msg_task(struct rnp_adapter *adapter); +int rnp_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); +void rnp_disable_tx_rx(struct rnp_adapter *adapter); +void rnp_ping_all_vfs(struct rnp_adapter *adapter); +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +int rnp_ndo_set_vf_bw(struct net_device *netdev, int vf, + int __always_unused min_tx_rate, int max_tx_rate); +#else +int rnp_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate); +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +int rnp_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); +int rnp_msg_post_status(struct rnp_adapter *adapter, enum PF_STATUS status); + +int rnp_setup_ring_maxrate(struct rnp_adapter *adapter, int ring, u64 max_rate); +int rnp_get_vf_ringnum(struct rnp_hw *hw, int vf, int num); +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +int rnp_ndo_set_vf_bw(struct net_device *netdev, int vf, + int __always_unused min_tx_rate, int max_tx_rate); +#else +int rnp_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate); +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +int rnp_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +int rnp_ndo_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi); +void rnp_check_vf_rate_limit(struct rnp_adapter *adapter); +int rnp_disable_sriov(struct rnp_adapter *adapter); +#ifdef CONFIG_PCI_IOV +void rnp_enable_sriov_true(struct rnp_adapter *adapter); +void rnp_enable_sriov(struct rnp_adapter *adapter); +#endif +int rnp_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +#ifdef IFLA_VF_VLAN_INFO_MAX +int rnp_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto); +#else +int rnp_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); +#endif + +int rnp_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state); +#if IS_ENABLED(CONFIG_PCI_IOV) +int rnp_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +#endif +#ifdef HAVE_NDO_SET_VF_TRUST +int rnp_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); +#endif +#endif /* _RNP_SRIOV_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_sysfs.c b/drivers/net/ethernet/mucse/rnp/rnp_sysfs.c new file mode 100755 index 0000000000000000000000000000000000000000..7b98cde893369d538b54b2538f9deb64bb40438a --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_sysfs.c @@ -0,0 +1,2482 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rnp.h" +#include "rnp_common.h" +#include "rnp_type.h" + +#include "rnp_mbx.h" +#include "rnp_mbx_fw.h" +#include "rnp_compat.h" +//#define TEST_PF_RESET + +#define PHY_EXT_REG_FLAG 0x80000000 + +struct maintain_req { + int magic; +#define MAINTAIN_MAGIC 0xa6a7a8a9 + + int cmd; + int arg0; + int req_data_bytes; + int reply_bytes; + char data[0]; +} __attribute__((packed)); + +struct ucfg_mac_sn { + unsigned char macaddr[64]; + unsigned char sn[32]; + int magic; +#define MAC_SN_MAGIC 0x87654321 + char rev[52]; + unsigned char pn[32]; +} __attribute__((packed, aligned(4))); + +static int print_desc(char *buf, void *data, int len) +{ + u8 *ptr = (u8 *)data; + int ret = 0; + int i = 0; + + for (i = 0; i < len; i++) + ret += sprintf(buf + ret, "%02x ", *(ptr + i)); + + return ret; +} +#ifdef RNP_HWMON +static ssize_t rnp_hwmon_show_location(struct device __always_unused *dev, + struct device_attribute *attr, char *buf) +{ + struct hwmon_attr *rnp_attr = + container_of(attr, struct hwmon_attr, dev_attr); + + return snprintf(buf, PAGE_SIZE, "loc%u\n", rnp_attr->sensor->location); +} + +static ssize_t rnp_hwmon_show_name(struct device __always_unused *dev, + struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "rnp\n"); +} + +static ssize_t rnp_hwmon_show_temp(struct device __always_unused *dev, + struct device_attribute *attr, char *buf) +{ + struct hwmon_attr *rnp_attr = + container_of(attr, struct hwmon_attr, dev_attr); + int value; + + /* reset the temp field */ + rnp_attr->hw->ops.get_thermal_sensor_data(rnp_attr->hw); + + value = rnp_attr->sensor->temp; + /* display millidegree */ + value *= 1000; + + return snprintf(buf, PAGE_SIZE, "%d\n", value); +} + +static ssize_t rnp_hwmon_show_cautionthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *rnp_attr = + container_of(attr, struct hwmon_attr, dev_attr); + int value = rnp_attr->sensor->caution_thresh; + /* display millidegree */ + value *= 1000; + + return snprintf(buf, PAGE_SIZE, "%d\n", value); +} + +static ssize_t rnp_hwmon_show_maxopthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *rnp_attr = + container_of(attr, struct hwmon_attr, dev_attr); + int value = rnp_attr->sensor->max_op_thresh; + + /* display millidegree */ + value *= 1000; + + return snprintf(buf, PAGE_SIZE, "%d\n", value); +} + +/** + * rnp_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @adapter: pointer to the adapter structure + * @offset: offset in the eeprom sensor data table + * @type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a + * device_attribute This is included in our hwmon_attr struct that contains + * the references to the data structures we need to get the data to display + */ +static int rnp_add_hwmon_attr(struct rnp_adapter *adapter, unsigned int offset, + int type) +{ + unsigned int n_attr; + struct hwmon_attr *rnp_attr; +#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS + + n_attr = adapter->rnp_hwmon_buff->n_hwmon; + rnp_attr = &adapter->rnp_hwmon_buff->hwmon_list[n_attr]; +#else + int rc; + + n_attr = adapter->rnp_hwmon_buff.n_hwmon; + rnp_attr = &adapter->rnp_hwmon_buff.hwmon_list[n_attr]; +#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */ + + switch (type) { + case RNP_HWMON_TYPE_LOC: + rnp_attr->dev_attr.show = rnp_hwmon_show_location; + snprintf(rnp_attr->name, sizeof(rnp_attr->name), "temp%u_label", + offset + 1); + break; + case RNP_HWMON_TYPE_NAME: + rnp_attr->dev_attr.show = rnp_hwmon_show_name; + snprintf(rnp_attr->name, sizeof(rnp_attr->name), "name"); + break; + case RNP_HWMON_TYPE_TEMP: + rnp_attr->dev_attr.show = rnp_hwmon_show_temp; + snprintf(rnp_attr->name, sizeof(rnp_attr->name), "temp%u_input", + offset + 1); + break; + case RNP_HWMON_TYPE_CAUTION: + rnp_attr->dev_attr.show = rnp_hwmon_show_cautionthresh; + snprintf(rnp_attr->name, sizeof(rnp_attr->name), "temp%u_max", + offset + 1); + break; + case RNP_HWMON_TYPE_MAX: + rnp_attr->dev_attr.show = rnp_hwmon_show_maxopthresh; + snprintf(rnp_attr->name, sizeof(rnp_attr->name), "temp%u_crit", + offset + 1); + break; + default: + return -EPERM; + } + + /* These always the same regardless of type */ + rnp_attr->sensor = &adapter->hw.thermal_sensor_data.sensor[offset]; + rnp_attr->hw = &adapter->hw; + rnp_attr->dev_attr.store = NULL; + rnp_attr->dev_attr.attr.mode = 0444; + rnp_attr->dev_attr.attr.name = rnp_attr->name; + +#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS + sysfs_attr_init(&rnp_attr->dev_attr.attr); + + adapter->rnp_hwmon_buff->attrs[n_attr] = &rnp_attr->dev_attr.attr; + + ++adapter->rnp_hwmon_buff->n_hwmon; + + return 0; +#else + rc = device_create_file(pci_dev_to_dev(adapter->pdev), + &rnp_attr->dev_attr); + + if (rc == 0) + ++adapter->rnp_hwmon_buff.n_hwmon; + + return rc; +#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */ +} +#endif /* RNP_HWMON */ + +#define to_net_device(n) container_of(n, struct net_device, dev) + +/* xx:xx:xx:xx:xx:xx@vf */ +static ssize_t mac_vf_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int i = 0; + int vf; + int rar_entry; + int ignored __attribute__((unused)) ; + char *buf_temp; + char *token; + char *mac; + char temp[10]; + unsigned char mac_store[ETH_ALEN]; + + buf_temp = kmalloc(count, GFP_KERNEL); + memcpy(buf_temp, buf, count); + token = strsep(&buf_temp, " "); + while (token) { + //printk("token is %s\n", token); + /* vlan max 8 prio */ + mac = strsep(&token, ":"); + sprintf(temp, "0x%s\n", mac); + + ignored = kstrtou8(temp, 0, &mac_store[i]); + //kstrtou8(mac, 0, &temp); + i++; + + if (i >= 5) + break; + } + mac = strsep(&token, "@"); + sprintf(temp, "0x%s\n", mac); + ignored=kstrtou8(temp, 0, &mac_store[i]); + // set vf + mac = strsep(&token, "@"); + ignored=kstrtou32(mac, 0, &vf); + + if (vf > adapter->num_vfs - 1) { + kfree(buf_temp); + return -EINVAL; + } + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + kfree(buf_temp); + return -EINVAL; + } + + memcpy(adapter->vfinfo[vf].vf_mac_fake_address, mac_store, ETH_ALEN); + + adapter->vfinfo[vf].vf_mac_fake_set = 1; + /*printk("set mac 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, vf %d\n", + mac_store[0], + mac_store[1], + mac_store[2], + mac_store[3], + mac_store[4], + mac_store[5], + vf); + */ + rar_entry = hw->mac.num_rar_entries - (vf + 1 + adapter->num_vfs); + + if (adapter->vfinfo[vf].vf_mac_fake_set) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_rar_with_vf(hw, mac_store, rar_entry, + vf + 1, true); + else + hw->ops.set_rar_with_vf(hw, mac_store, rar_entry, vf, + true); + } + + kfree(buf_temp); + + return count; +} + +#ifndef NO_BIT_ATTRS +static ssize_t maintain_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t off, + size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int rbytes = count; + + if (adapter->maintain_buf == NULL) + return 0; + + if (off + count > adapter->maintain_buf_len) + rbytes = adapter->maintain_buf_len - off; + + memcpy(buf, adapter->maintain_buf + off, rbytes); + + if ((off + rbytes) >= adapter->maintain_buf_len) { + kfree(adapter->maintain_buf); + adapter->maintain_buf = NULL; + adapter->maintain_buf_len = 0; + } + + return rbytes; +} + +static ssize_t maintain_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t off, + size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct maintain_req *req; + void *dma_buf = NULL; + dma_addr_t dma_phy; + int bytes; + + if (off == 0) { + if (count < sizeof(*req)) { + return -EINVAL; + } + req = (struct maintain_req *)buf; + if (req->magic != MAINTAIN_MAGIC) { + return -EINVAL; + } + bytes = max_t(int, req->req_data_bytes, req->reply_bytes); + bytes += sizeof(*req); + + /* free no readed buf */ + if (adapter->maintain_buf) { + kfree(adapter->maintain_buf); + adapter->maintain_buf = NULL; + adapter->maintain_buf_len = 0; + } + + dma_buf = dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy, + GFP_ATOMIC); + if (!dma_buf) { + netdev_err(netdev, "%s: no memory:%d!", __func__, + bytes); + return -ENOMEM; + } + + adapter->maintain_dma_buf = dma_buf; + adapter->maintain_dma_phy = dma_phy; + adapter->maintain_dma_size = bytes; + adapter->maintain_in_bytes = req->req_data_bytes + sizeof(*req); + + memcpy(dma_buf + off, buf, count); + + if (count < adapter->maintain_in_bytes) + return count; + } + + dma_buf = adapter->maintain_dma_buf; + dma_phy = adapter->maintain_dma_phy; + req = (struct maintain_req *)dma_buf; + + memcpy(dma_buf + off, buf, count); + + /* all data got, send req */ + if ((off + count) >= adapter->maintain_in_bytes) { + int reply_bytes = req->reply_bytes; + err = rnp_maintain_req(hw, req->cmd, req->arg0, + req->req_data_bytes, req->reply_bytes, + dma_phy); + if (err != 0) { + goto err_quit; + } + /* copy data for read */ + if (reply_bytes > 0) { + adapter->maintain_buf_len = reply_bytes; + adapter->maintain_buf = + kmalloc(adapter->maintain_buf_len, GFP_KERNEL); + if (!adapter->maintain_buf) { + netdev_err(netdev, + "No Memory for maintain buf:%d\n", + adapter->maintain_buf_len); + err = -ENOMEM; + + goto err_quit; + } + memcpy(adapter->maintain_buf, dma_buf, reply_bytes); + } + + if (dma_buf) { + dma_free_coherent(&hw->pdev->dev, + adapter->maintain_dma_size, dma_buf, + dma_phy); + } + adapter->maintain_dma_buf = NULL; + } + + return count; +err_quit: + if (dma_buf) { + dma_free_coherent(&hw->pdev->dev, adapter->maintain_dma_size, + dma_buf, dma_phy); + adapter->maintain_dma_buf = NULL; + } + return err; +} + +static BIN_ATTR(maintain, (S_IWUSR | S_IRUGO), maintain_read, maintain_write, + 1 * 1024 * 1024); +#endif + +static ssize_t show_version_info(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = 0; + + ret += sprintf(buf + ret, "driver :%s-%x\n", + rnp_driver_version, hw->pcode); + ret += sprintf(buf + ret, "fw :%d.%d.%d.%d 0x%08x\n", ((char *)&(hw->fw_version))[3], + ((char *)&(hw->fw_version))[2], ((char *)&(hw->fw_version))[1], + ((char *)&(hw->fw_version))[0], hw->bd_uid); + + return ret; +} +#ifdef TEST_PF_RESET +static ssize_t show_test_info(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + int i; + struct rnp_q_vector *q_vector; + + for (i = 0; i < adapter->num_tx_queues; i++) { + q_vector = adapter->q_vector[i]; + + ret += sprintf(buf + ret, "q_vector %d itr %d\n", + q_vector->v_idx, q_vector->itr_rx >> 2); + } + + return ret; +} + +static ssize_t store_test_info(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = count; + + return ret; +} +#endif +static ssize_t show_ring_sriov_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = 0; + + ret += sprintf(buf + ret, "now sriov ring num is %d\n", hw->sriov_ring_limit); + ret += sprintf(buf + ret, "old vf is %s\n", + (adapter->priv_flags & RNP_PRIV_FLAG_OLD_VF_QUEUE) ? "on" : "off"); + ret += sprintf(buf + ret, "sriov off can clear it\n"); + + return ret; +} + +static ssize_t store_ring_sriov_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = count; + u32 sriov_ring_num; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + printk("should close sriov first\n"); + return -EINVAL; + } + + if (0 != kstrtou32(buf, 0, &sriov_ring_num)) + return -EINVAL; + /* should check tx_ring_num is valid */ + if ((sriov_ring_num % 2) != 0) { + printk("only enen number is valied \n"); + return -EINVAL; + + } + + if ((sriov_ring_num != 0) && (sriov_ring_num <= 32)) { + hw->sriov_ring_limit = sriov_ring_num; + } else { + ret = -EINVAL; + } + + return ret; +} + +static ssize_t show_rx_desc_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + u32 rx_ring_num = adapter->sysfs_rx_ring_num; + u32 rx_desc_num = adapter->sysfs_rx_desc_num; + struct rnp_ring *ring = adapter->rx_ring[rx_ring_num]; + int ret = 0; + union rnp_rx_desc *desc; + + if (test_bit(__RNP_DOWN, &adapter->state)) { + ret += sprintf(buf + ret, "port not up \n"); + return ret; + } + + desc = RNP_RX_DESC(ring, rx_desc_num); + ret += sprintf(buf + ret, "rx ring %d desc %d:\n", rx_ring_num, + rx_desc_num); + ret += print_desc(buf + ret, desc, sizeof(*desc)); + ret += sprintf(buf + ret, "\n"); + + return ret; +} + +static ssize_t store_rx_desc_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = count; + u32 rx_desc_num = adapter->sysfs_rx_desc_num; + u32 rx_ring_num = adapter->sysfs_rx_ring_num; + struct rnp_ring *ring = adapter->rx_ring[rx_ring_num]; + + if (0 != kstrtou32(buf, 0, &rx_desc_num)) + return -EINVAL; + /* should check tx_ring_num is valid */ + if (rx_desc_num < ring->count) { + adapter->sysfs_rx_desc_num = rx_desc_num; + } else { + ret = -EINVAL; + } + + return ret; +} + +static ssize_t show_tcp_sync_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC) + ret += sprintf( + buf + ret, "tcp sync remap on queue %d prio %s\n", + adapter->tcp_sync_queue, + (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC_PRIO) ? + "NO" : + "OFF"); + else + ret += sprintf(buf + ret, "tcp sync remap off\n"); + + return ret; +} + +static ssize_t store_tcp_sync_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = count; + u32 tcp_sync_queue; + + if (0 != kstrtou32(buf, 0, &tcp_sync_queue)) + return -EINVAL; + + if (tcp_sync_queue < adapter->num_rx_queues) { + adapter->tcp_sync_queue = tcp_sync_queue; + adapter->priv_flags |= RNP_PRIV_FLAG_TCP_SYNC; + + if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC_PRIO) + hw->ops.set_tcp_sync_remapping( + hw, adapter->tcp_sync_queue, true, true); + else + hw->ops.set_tcp_sync_remapping( + hw, adapter->tcp_sync_queue, true, false); + + } else { + adapter->priv_flags &= ~RNP_PRIV_FLAG_TCP_SYNC; + + hw->ops.set_tcp_sync_remapping(hw, adapter->tcp_sync_queue, + false, false); + } + + return ret; +} + +static ssize_t show_rx_skip_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (adapter->priv_flags & RNP_PRIV_FLAG_RX_SKIP_EN) + ret += sprintf(buf + ret, "rx skip bytes: %d\n", + 16 * (adapter->priv_skip_count + 1)); + else + ret += sprintf(buf + ret, "rx skip off\n"); + + return ret; +} + +static ssize_t store_rx_skip_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = count; + u32 rx_skip_count; + + if (0 != kstrtou32(buf, 0, &rx_skip_count)) + return -EINVAL; + + if ((rx_skip_count > 0) && (rx_skip_count < 17)) { + adapter->priv_skip_count = rx_skip_count - 1; + adapter->priv_flags |= RNP_PRIV_FLAG_RX_SKIP_EN; + hw->ops.set_rx_skip(hw, adapter->priv_skip_count, true); + + } else { + adapter->priv_flags &= ~RNP_PRIV_FLAG_RX_SKIP_EN; + + hw->ops.set_rx_skip(hw, adapter->priv_skip_count, false); + + return -EINVAL; + } + + return ret; +} + +static ssize_t show_rx_drop_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + ret += sprintf(buf + ret, "rx_drop_status %llx\n", + adapter->rx_drop_status); + + return ret; +} + +static ssize_t store_rx_drop_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = count; + u64 rx_drop_status; + + if (0 != kstrtou64(buf, 0, &rx_drop_status)) + return -EINVAL; + + adapter->rx_drop_status = rx_drop_status; + + hw->ops.update_rx_drop(hw); + + return ret; +} + +static ssize_t show_outer_vlan_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (adapter->priv_flags & RNP_PRIV_FLAG_DOUBLE_VLAN) + ret += sprintf(buf + ret, "double vlan on\n"); + else + ret += sprintf(buf + ret, "double vlan off\n"); + + switch (adapter->outer_vlan_type) { + case outer_vlan_type_88a8: + ret += sprintf(buf + ret, "outer vlan 0x88a8\n"); + + break; +#ifdef ETH_P_QINQ1 + case outer_vlan_type_9100: + ret += sprintf(buf + ret, "outer vlan 0x9100\n"); + + break; +#endif +#ifdef ETH_P_QINQ2 + case outer_vlan_type_9200: + ret += sprintf(buf + ret, "outer vlan 0x9200\n"); + + break; +#endif + default: + ret += sprintf(buf + ret, "outer vlan error\n"); + break; + } + return ret; +} + +static ssize_t store_outer_vlan_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = count; + u32 outer_vlan_type; + + if (0 != kstrtou32(buf, 0, &outer_vlan_type)) + return -EINVAL; + /* should check tx_ring_num is valid */ + if (outer_vlan_type < outer_vlan_type_max) { + adapter->outer_vlan_type = outer_vlan_type; + } else + ret = -EINVAL; + /* should update to hw */ + if (hw->ops.set_outer_vlan_type) + hw->ops.set_outer_vlan_type(hw, outer_vlan_type); + + return ret; +} + +static ssize_t show_tx_stags_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) + ret += sprintf(buf + ret, "tx stags on\n"); + else + ret += sprintf(buf + ret, "tx stags off\n"); + + ret += sprintf(buf + ret, "vid 0x%x\n", adapter->stags_vid); + + return ret; +} + +static ssize_t store_tx_stags_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_eth_info *eth = &hw->eth; + int ret = count; + u16 tx_stags; + + if (0 != kstrtou16(buf, 0, &tx_stags)) + return -EINVAL; + if (tx_stags < VLAN_N_VID) { + adapter->stags_vid = tx_stags; + } else + ret = -EINVAL; + /* should update vlan filter */ + eth->ops.set_vfta(eth, adapter->stags_vid, true); + + return ret; +} + +static ssize_t show_tx_desc_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + u32 tx_ring_num = adapter->sysfs_tx_ring_num; + u32 tx_desc_num = adapter->sysfs_tx_desc_num; + struct rnp_ring *ring = adapter->tx_ring[tx_ring_num]; + int ret = 0; + struct rnp_tx_desc *desc; + + if (test_bit(__RNP_DOWN, &adapter->state)) { + ret += sprintf(buf + ret, "port not up \n"); + return ret; + } + + desc = RNP_TX_DESC(ring, tx_desc_num); + ret += sprintf(buf + ret, "tx ring %d desc %d:\n", tx_ring_num, + tx_desc_num); + ret += print_desc(buf + ret, desc, sizeof(*desc)); + ret += sprintf(buf + ret, "\n"); + + return ret; +} + +static ssize_t store_tx_desc_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = count; + + u32 tx_desc_num = adapter->sysfs_tx_desc_num; + u32 tx_ring_num = adapter->sysfs_tx_ring_num; + + struct rnp_ring *ring = adapter->tx_ring[tx_ring_num]; + + if (0 != kstrtou32(buf, 0, &tx_desc_num)) + return -EINVAL; + if (tx_desc_num < ring->count) + adapter->sysfs_tx_desc_num = tx_desc_num; + else + ret = -EINVAL; + + return ret; +} + +static ssize_t show_para_info(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_eth_info *eth = &hw->eth; + struct rnp_mac_info *mac = &hw->mac; + + ret += sprintf(buf + ret, "nsi_en:%d\n", hw->ncsi_en); + ret += sprintf( + buf + ret, + "eth: \n\tmc_filter_type:%u, mcft_size:%u, vft_size:%u, " + "num_rar_entries:%u,\n" + "\trar_highwater:%u, rx_pb_size:%u, max_tx_queues:%u, " + "max_rx_queues:%u, \n" + "\treg_off:%u, orig_autoc:%u, cached_autoc:%u, orig_autoc2:%u\n", + eth->mc_filter_type, eth->mcft_size, eth->vft_size, + eth->num_rar_entries, eth->rar_highwater, eth->rx_pb_size, + eth->max_tx_queues, eth->max_rx_queues, eth->reg_off, + eth->orig_autoc, eth->cached_autoc, eth->orig_autoc2); + + ret += sprintf( + buf + ret, + "mac:\n\t" + "mc_filter_type:%u mcft_size:%u vft_size:%u num_rar_entries:%u \n" + "\trar_highwater:%u rx_pb_size:%u max_tx_queues:%u max_rx_queues:%u \n" + "\treg_off:%u orig_autoc:%u cached_autoc:%u orig_autoc2:%u " + "orig_link_settings_stored:%u \n" + "\tautotry_restart:%u mac_flags:%u\n", + mac->mc_filter_type, mac->mcft_size, mac->vft_size, + mac->num_rar_entries, mac->rar_highwater, mac->rx_pb_size, + mac->max_tx_queues, mac->max_rx_queues, mac->reg_off, + mac->orig_autoc, mac->cached_autoc, mac->orig_autoc2, + mac->orig_link_settings_stored, mac->autotry_restart, + mac->mac_flags); + + return ret; +} + +static ssize_t show_rx_ring_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + u32 rx_ring_num = adapter->sysfs_rx_ring_num; + struct rnp_ring *ring = adapter->rx_ring[rx_ring_num]; + int ret = 0; + union rnp_rx_desc *rx_desc; + + if (test_bit(__RNP_DOWN, &adapter->state)) { + ret += sprintf(buf + ret, "port not up\n"); + + return ret; + } + + ret += sprintf(buf + ret, "queue %d info:\n", rx_ring_num); + ret += sprintf(buf + ret, "next_to_use %d\n", ring->next_to_use); + ret += sprintf(buf + ret, "next_to_clean %d\n", ring->next_to_clean); + rx_desc = RNP_RX_DESC(ring, ring->next_to_clean); + ret += sprintf(buf + ret, "next_to_clean desc: "); + ret += print_desc(buf + ret, rx_desc, sizeof(*rx_desc)); + ret += sprintf(buf + ret, "\n"); + + return ret; +} + +static ssize_t store_rx_ring_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = count; + + u32 rx_ring_num = adapter->sysfs_rx_ring_num; + + if (0 != kstrtou32(buf, 0, &rx_ring_num)) + return -EINVAL; + if (rx_ring_num < adapter->num_rx_queues) + adapter->sysfs_rx_ring_num = rx_ring_num; + else + ret = -EINVAL; + + return ret; +} + +static ssize_t show_tx_ring_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + u32 tx_ring_num = adapter->sysfs_tx_ring_num; + struct rnp_ring *ring = adapter->tx_ring[tx_ring_num]; + int ret = 0; + struct rnp_tx_buffer *tx_buffer; + struct rnp_tx_desc *eop_desc; + + if (test_bit(__RNP_DOWN, &adapter->state)) { + ret += sprintf(buf + ret, "port not up\n"); + + return ret; + } + + ret += sprintf(buf + ret, "queue %d info:\n", tx_ring_num); + ret += sprintf(buf + ret, "next_to_use %d\n", ring->next_to_use); + ret += sprintf(buf + ret, "next_to_clean %d\n", ring->next_to_clean); + + tx_buffer = &ring->tx_buffer_info[ring->next_to_clean]; + eop_desc = tx_buffer->next_to_watch; + /* if have watch desc */ + if (eop_desc) { + ret += sprintf(buf + ret, "next_to_watch:\n"); + ret += print_desc(buf + ret, eop_desc, sizeof(*eop_desc)); + ret += sprintf(buf + ret, "\n"); + } else { + ret += sprintf(buf + ret, "no next_to_watch data\n"); + } + + return ret; +} + +static ssize_t store_tx_ring_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = count; + + u32 tx_ring_num = adapter->sysfs_tx_ring_num; + + if (0 != kstrtou32(buf, 0, &tx_ring_num)) + return -EINVAL; + if (tx_ring_num < adapter->num_tx_queues) + adapter->sysfs_tx_ring_num = tx_ring_num; + else + ret = -EINVAL; + + return ret; +} + +//static ssize_t show_tx_counter(struct device *dev, +// struct device_attribute *attr, char *buf) +//{ +// u32 val = 0; +// int i, ret = 0; +// struct net_device *netdev = to_net_device(dev); +// struct rnp_adapter *adapter = netdev_priv(netdev); +// struct rnp_hw *hw = &adapter->hw; +// +// ret += sprintf(buf + ret, "tx counters\n"); +// for (i = 0; i < 4; i++) { +// ret += sprintf(buf + ret, "ring%d-tx:\n", i); +// +// val = rd32(hw, RNP10_RING_BASE + RING_OFFSET(i) + +// RNP_DMA_REG_TX_DESC_BUF_LEN); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "len:", +// RNP10_RING_BASE + RING_OFFSET(i) + +// RNP_DMA_REG_TX_DESC_BUF_LEN, +// val); +// +// val = rd32(hw, RNP10_RING_BASE + RING_OFFSET(i) + +// RNP_DMA_REG_TX_DESC_BUF_HEAD); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "head:", +// RNP10_RING_BASE + RING_OFFSET(i) + +// RNP_DMA_REG_TX_DESC_BUF_HEAD, +// val); +// +// val = rd32(hw, RNP10_RING_BASE + RING_OFFSET(i) + +// RNP_DMA_REG_TX_DESC_BUF_TAIL); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "tail:", +// RNP10_RING_BASE + RING_OFFSET(i) + +// RNP_DMA_REG_TX_DESC_BUF_TAIL, +// val); +// } +// +// ret += sprintf(buf + ret, "to_1to4_p1:\n"); +// +// val = rd32(hw, RNP_ETH_1TO4_INST0_IN_PKTS); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "emac_in:", RNP_ETH_1TO4_INST0_IN_PKTS, val); +// +// val = rd32(hw, RNP_ETH_IN_0_TX_PKT_NUM(0)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "emac_send:", RNP_ETH_IN_0_TX_PKT_NUM(0), val); +// +// ret += sprintf(buf + ret, "to_1to4_p2:\n"); +// +// val = rd32(hw, RNP_ETH_IN_1_TX_PKT_NUM(0)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "sop_pkt:", RNP_ETH_IN_1_TX_PKT_NUM(0), val); +// +// val = rd32(hw, RNP_ETH_IN_2_TX_PKT_NUM(0)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "eop_pkt:", RNP_ETH_IN_2_TX_PKT_NUM(0), val); +// +// val = rd32(hw, RNP_ETH_IN_3_TX_PKT_NUM(0)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "send_terr:", RNP_ETH_IN_3_TX_PKT_NUM(0), val); +// +// ret += sprintf(buf + ret, "to_tx_trans(phy):\n"); +// +// val = rd32(hw, RNP_ETH_EMAC_TX_TO_PHY_PKTS(0)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "in:", RNP_ETH_EMAC_TX_TO_PHY_PKTS(0), val); +// +// val = rd32(hw, RNP_ETH_TXTRANS_PTP_PKT_NUM(0)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "out:", RNP_ETH_TXTRANS_PTP_PKT_NUM(0), val); +// +// ret += sprintf(buf + ret, "mac:\n"); +// +// val = rd32(hw, 0x60000); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "mac-tx-cfg:", 0x60000, val); +// +// val = rd32(hw, 0x1081c); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "mac-tx:", 0x1081c, +// val); +// +// val = rd32(hw, 0x1087c); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "underflow_err:", 0x1087c, val); +// +// val = rd32(hw, RNP_ETH_TX_DEBUG(0)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "port0_txtrans_sop:", RNP_ETH_TX_DEBUG(0), val); +// +// val = rd32(hw, RNP_ETH_TX_DEBUG(4)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "port0_txtrans_eop:", RNP_ETH_TX_DEBUG(4), val); +// +// val = rd32(hw, RNP_ETH_TX_DEBUG(13)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "tx_empty:", RNP_ETH_TX_DEBUG(13), val); +// +// val = rd32(hw, RNP_ETH_TX_DEBUG(14)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: 0x%x\n", +// "tx_prog_full:", RNP_ETH_TX_DEBUG(14), val); +// +// val = rd32(hw, RNP_ETH_TX_DEBUG(15)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: 0x%x\n", +// "tx_full:", RNP_ETH_TX_DEBUG(15), val); +// +// return ret; +//} + +//static DEVICE_ATTR(tx_counter, S_IRUGO | S_IWUSR, show_tx_counter, NULL); + +//static ssize_t show_rx_counter(struct device *dev, +// struct device_attribute *attr, char *buf) +//{ +// u32 val = 0, port = 0; +// int ret = 0; +// struct net_device *netdev = to_net_device(dev); +// struct rnp_adapter *adapter = netdev_priv(netdev); +// struct rnp_hw *hw = &adapter->hw; +// +// ret += sprintf(buf + ret, "rx counters\n"); +// for (port = 0; port < 4; port++) { +// ret += sprintf(buf + ret, "emac_rx_trans (port:%d):\n", port); +// +// val = rd32(hw, RNP_XLMAC + 0x900 + port * 0x10000); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "mac-pkts:", RNP_XLMAC + 0x900 + port * 0x10000, +// val); +// +// val = rd32(hw, RNP_RXTRANS_RX_PKTS(port)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "pkts:", RNP_RXTRANS_RX_PKTS(port), val); +// +// val = rd32(hw, RNP_RXTRANS_DROP_PKTS(port)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "drop:", RNP_RXTRANS_DROP_PKTS(port), val); +// +// val = rd32(hw, RNP_RXTRANS_WDT_ERR_PKTS(port)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "wdt_err:", RNP_RXTRANS_WDT_ERR_PKTS(port), val); +// +// val = rd32(hw, RNP_RXTRANS_CODE_ERR_PKTS(port)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "code_err:", RNP_RXTRANS_CODE_ERR_PKTS(port), +// val); +// +// val = rd32(hw, RNP_RXTRANS_CRC_ERR_PKTS(port)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "crc_err:", RNP_RXTRANS_CRC_ERR_PKTS(port), val); +// +// val = rd32(hw, RNP_RXTRANS_SLEN_ERR_PKTS(port)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "slen_err:", RNP_RXTRANS_SLEN_ERR_PKTS(port), +// val); +// +// val = rd32(hw, RNP_RXTRANS_GLEN_ERR_PKTS(port)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "glen_err:", RNP_RXTRANS_GLEN_ERR_PKTS(port), +// val); +// +// val = rd32(hw, RNP_RXTRANS_IPH_ERR_PKTS(port)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "iph_err:", RNP_RXTRANS_IPH_ERR_PKTS(port), val); +// +// val = rd32(hw, RNP_RXTRANS_CSUM_ERR_PKTS(port)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "csum_err:", RNP_RXTRANS_CSUM_ERR_PKTS(port), +// val); +// +// val = rd32(hw, RNP_RXTRANS_LEN_ERR_PKTS(port)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "len_err:", RNP_RXTRANS_LEN_ERR_PKTS(port), val); +// +// val = rd32(hw, RNP_RXTRANS_CUT_ERR_PKTS(port)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "trans_cut_err:", RNP_RXTRANS_CUT_ERR_PKTS(port), +// val); +// +// val = rd32(hw, RNP_RXTRANS_EXCEPT_BYTES(port)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "expt_byte_err:", RNP_RXTRANS_EXCEPT_BYTES(port), +// val); +// +// val = rd32(hw, RNP_RXTRANS_G1600_BYTES_PKTS(port)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// ">1600Byte:", RNP_RXTRANS_G1600_BYTES_PKTS(port), +// val); +// } +// +// ret += sprintf(buf + ret, "gather:\n"); +// val = rd32(hw, RNP_ETH_TOTAL_GAT_RX_PKT_NUM); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "total_in_pkts:", RNP_ETH_TOTAL_GAT_RX_PKT_NUM, val); +// +// port = 0; +// val = rd32(hw, RNP_ETH_RX_PKT_NUM(port)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "to_nxt_mdodule:", RNP_ETH_RX_PKT_NUM(port), val); +// +// for (port = 0; port < 4; port++) { +// u8 pname[16] = { 0 }; +// val = rd32(hw, RNP_ETH_RX_PKT_NUM(port)); +// sprintf(pname, "p%d-rx:", port); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", pname, +// RNP_ETH_RX_PKT_NUM(port), val); +// } +// +// for (port = 0; port < 4; port++) { +// u8 pname[16] = { 0 }; +// val = rd32(hw, RNP_ETH_RX_DROP_PKT_NUM(port)); +// sprintf(pname, "p%d-drop:", port); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", pname, +// RNP_ETH_RX_DROP_PKT_NUM(port), val); +// } +// +// ret += sprintf(buf + ret, "debug:\n"); +// val = rd32(hw, RNP_ETH_RX_DEBUG(10)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "data_eop:", RNP_ETH_RX_DEBUG(10), val); +// val = rd32(hw, RNP_ETH_RX_DEBUG(11)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "data_descs:", RNP_ETH_RX_DEBUG(11), val); +// val = rd32(hw, RNP_ETH_RX_DEBUG(12)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "data_desc_sop:", RNP_ETH_RX_DEBUG(12), val); +// val = rd32(hw, RNP_ETH_RX_DEBUG(13)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "data_desc_eop:", RNP_ETH_RX_DEBUG(13), val); +// val = rd32(hw, RNP_ETH_RX_DEBUG(14)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "4to1_gather_sop:", RNP_ETH_RX_DEBUG(14), val); +// val = rd32(hw, RNP_ETH_RX_DEBUG(15)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "4to1_gather_eop:", RNP_ETH_RX_DEBUG(15), val); +// +// ret += sprintf(buf + ret, "ip-parse:\n"); +// +// val = rd32(hw, RNP_ETH_PKT_EGRESS_NUM); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "pkg_egree:", RNP_ETH_PKT_EGRESS_NUM, val); +// +// val = rd32(hw, RNP_ETH_PKT_IP_HDR_LEN_ERR_NUM); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "L3_len_err:", RNP_ETH_PKT_IP_HDR_LEN_ERR_NUM, val); +// +// val = rd32(hw, RNP_ETH_PKT_IP_PKT_LEN_ERR_NUM); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "ip_hdr_err:", RNP_ETH_PKT_IP_PKT_LEN_ERR_NUM, val); +// +// val = rd32(hw, RNP_ETH_PKT_L3_HDR_CHK_ERR_NUM); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "l3-csum-err:", RNP_ETH_PKT_L3_HDR_CHK_ERR_NUM, val); +// +// val = rd32(hw, RNP_ETH_PKT_L4_HDR_CHK_ERR_NUM); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "l4-csum-err:", RNP_ETH_PKT_L4_HDR_CHK_ERR_NUM, val); +// +// val = rd32(hw, RNP_ETH_PKT_SCTP_CHK_ERR_NUM); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "sctp-err:", RNP_ETH_PKT_SCTP_CHK_ERR_NUM, val); +// +// val = rd32(hw, RNP_ETH_PKT_VLAN_ERR_NUM); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "vlan-err:", RNP_ETH_PKT_VLAN_ERR_NUM, val); +// +// val = rd32(hw, RNP_ETH_PKT_EXCEPT_SHORT_NUM); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "except_short_num:", RNP_ETH_PKT_EXCEPT_SHORT_NUM, val); +// +// val = rd32(hw, RNP_ETH_PKT_PTP_NUM); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "ptp:", RNP_ETH_PKT_PTP_NUM, val); +// +// ret += sprintf(buf + ret, "to-indecap:\n"); +// +// val = rd32(hw, RNP_ETH_DECAP_PKT_IN_NUM); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "*in engin*:", RNP_ETH_DECAP_PKT_IN_NUM, val); +// +// val = rd32(hw, RNP_ETH_DECAP_PKT_OUT_NUM); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "*out engin*:", RNP_ETH_DECAP_PKT_OUT_NUM, val); +// +// val = rd32(hw, RNP_ETH_DECAP_DMAC_OUT_NUM); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "to-dma/host:", RNP_ETH_DECAP_DMAC_OUT_NUM, val); +// +// val = rd32(hw, RNP_ETH_DECAP_BMC_OUT_NUM); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "to-bmc:", RNP_ETH_DECAP_BMC_OUT_NUM, val); +// +// val = rd32(hw, RNP_ETH_DECAP_SW_OUT_NUM); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "to-switch:", RNP_ETH_DECAP_SW_OUT_NUM, val); +// +// val = rd32(hw, RNP_ETH_DECAP_MIRROR_OUT_NUM); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "bmc+host:", RNP_ETH_DECAP_MIRROR_OUT_NUM, val); +// +// val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(0x0)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "err_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(0x0), val); +// +// val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(1)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "plicy_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(1), val); +// +// val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(2)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "dmac_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(2), val); +// +// val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(3)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "bmc_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(3), val); +// +// val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(4)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "sw_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(4), val); +// +// val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(5)); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "rm_vlane_num:", RNP_ETH_DECAP_PKT_DROP_NUM(5), val); +// +// ret += sprintf(buf + ret, "\npolicy-drop-reason:\n"); +// val = rd32(hw, RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(4)); +// ret += sprintf(buf + ret, "\t %30s 0x%08x: %d\n", "host_l2_match_drop:", +// RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(4), val); +// val = rd32(hw, RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(5)); +// ret += sprintf(buf + ret, "\t %30s 0x%08x: %d\n", +// "redir_input_match_drop:", +// RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(5), val); +// val = rd32(hw, RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(6)); +// ret += sprintf(buf + ret, "\t %30s 0x%08x: %d\n", +// "redir_etypt_match_drop:", +// RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(6), val); +// val = rd32(hw, RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(7)); +// ret += sprintf(buf + ret, "\t %30s 0x%08x: %d\n", +// "redir_tcp_sync_match_drop:", +// RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(7), val); +// val = rd32(hw, RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(8)); +// ret += sprintf(buf + ret, "\t %30s 0x%08x: %d\n", +// "redir_tuple5_match_drop:", +// RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(8), val); +// val = rd32(hw, RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(9)); +// ret += sprintf(buf + ret, "\t %30s 0x%08x: %d\n", +// "recdir_tcam_match_drop:", +// RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(9), val); +// +// ret += sprintf(buf + ret, "dma-2-host:\n"); +// +// val = rd32(hw, 0x264); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "fifo equ:", 0x264, +// val); +// +// val = rd32(hw, 0x268); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "fifo deq:", 0x268, +// val); +// +// val = rd32(hw, 0x114); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", +// "unexpt_abtring:", 0x114, val); +// +// val = rd32(hw, 0x288); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "pci2host:", 0x288, +// val); +// +// for (port = 0; port < 4; port++) { +// ret += sprintf(buf + ret, "rx-ring%d:\n", port); +// +// val = rd32(hw, RNP10_RING_BASE + RING_OFFSET(port) + +// RNP_DMA_REG_RX_DESC_BUF_HEAD); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %u\n", "head:", +// RNP10_RING_BASE + RING_OFFSET(port) + +// RNP_DMA_REG_RX_DESC_BUF_HEAD, +// val); +// +// val = rd32(hw, RNP10_RING_BASE + RING_OFFSET(port) + +// RNP_DMA_REG_RX_DESC_BUF_TAIL); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %u\n", "tail:", +// RNP10_RING_BASE + RING_OFFSET(port) + +// RNP_DMA_REG_RX_DESC_BUF_TAIL, +// val); +// +// val = rd32(hw, RNP10_RING_BASE + RING_OFFSET(port) + +// RNP_DMA_REG_RX_DESC_BUF_LEN); +// ret += sprintf(buf + ret, "\t %16s 0x%08x: %u\n", "len:", +// RNP10_RING_BASE + RING_OFFSET(port) + +// RNP_DMA_REG_RX_DESC_BUF_LEN, +// val); +// } +// +// return ret; +//} + +//static DEVICE_ATTR(rx_counter, S_IRUGO | S_IWUSR, show_rx_counter, NULL); + +static ssize_t show_active_vid(struct device *dev, + struct device_attribute *attr, char *buf) +{ +#ifndef HAVE_VLAN_RX_REGISTER + u16 vid; +#endif + u16 current_vid = 0; + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + u8 vfnum = hw->max_vfs - 1; + /* use last-vf's table entry. the last one */ + + if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + current_vid = rd32(hw, RNP_DMA_PORT_VEB_VID_TBL(adapter->port, + vfnum)); + } + +#ifndef HAVE_VLAN_RX_REGISTER + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) { + ret += sprintf(buf + ret, "%u%s ", vid, + (current_vid == vid ? "*" : "")); + } +#endif + ret += sprintf(buf + ret, "\n"); + return ret; +} + +static ssize_t store_active_vid(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + u16 vid; + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); +#ifndef HAVE_VLAN_RX_REGISTER + struct rnp_hw *hw = &adapter->hw; + u8 vfnum = hw->max_vfs - 1; + int port = 0; +#endif + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return -EIO; + + if (0 != kstrtou16(buf, 0, &vid)) + return -EINVAL; + +#ifndef HAVE_VLAN_RX_REGISTER + if ((vid < 4096) && test_bit(vid, adapter->active_vlans)) { + if (rd32(hw, RNP_DMA_VERSION) >= 0x20201231) { + for (port = 0; port < 4; port++) + wr32(hw, RNP_DMA_PORT_VEB_VID_TBL(port, vfnum), + vid); + } else { + wr32(hw, RNP_DMA_PORT_VEB_VID_TBL(adapter->port, vfnum), + vid); + } + err = 0; + } +#endif + + return err ? err : count; +} + +static inline int pn_sn_dlen(char *v, int v_len) +{ + int i, len = 0; + for (i = 0; i < v_len; i++) { + if (isascii(v[i])) { + len++; + } else { + break; + } + } + return len; +} + +static int rnp_mbx_get_pn_sn(struct rnp_hw *hw, char pn[33], char sn[33]) +{ + struct maintain_req *req; + void *dma_buf = NULL; + dma_addr_t dma_phy; + struct ucfg_mac_sn *cfg; + + int err = 0, bytes = sizeof(*req) + sizeof(struct ucfg_mac_sn); + + memset(pn, 0, 33); + memset(sn, 0, 33); + + dma_buf = + dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy, GFP_KERNEL); + if (!dma_buf) { + printk("%s: no memory:%d!", __func__, bytes); + return -ENOMEM; + } + + req = (struct maintain_req *)dma_buf; + memset(dma_buf, 0, bytes); + cfg = (struct ucfg_mac_sn *)(req + 1); + req->magic = MAINTAIN_MAGIC; + req->cmd = 0; + req->arg0 = 3; + req->req_data_bytes = 0; + req->reply_bytes = bytes - sizeof(*req); + + err = rnp_maintain_req(hw, req->cmd, req->arg0, req->req_data_bytes, + req->reply_bytes, dma_phy); + if (err != 0) { + goto err_quit; + } + if (cfg->magic == MAC_SN_MAGIC) { + int sz = pn_sn_dlen(cfg->pn, 32); + if (sz) { + memcpy(pn, cfg->pn, sz); + pn[sz] = 0; + } + sz = pn_sn_dlen(cfg->sn, 32); + if (sz) { + memcpy(sn, cfg->sn, sz); + sn[sz] = 0; + } + } + +err_quit: + if (dma_buf) + dma_free_coherent(&hw->pdev->dev, bytes, dma_buf, dma_phy); + + return 0; +} + +static ssize_t show_own_vpd(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + char pn[33] = { 0 }, sn[33] = { 0 }; + + rnp_mbx_get_pn_sn(hw, pn, sn); + + ret += sprintf( + buf + ret, "Product Name: %s\n", + "Ethernet Controller N10 Series for 10GbE or 40GbE (Dual-port)"); + ret += sprintf(buf + ret, "[PN] Part number: %s\n", pn); + ret += sprintf(buf + ret, "[SN] Serial number: %s\n", sn); + + return ret; +} +static DEVICE_ATTR(own_vpd, S_IRUGO, show_own_vpd, NULL); + +static ssize_t show_port_idx(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + + ret += sprintf(buf, "%d\n", adapter->portid_of_card); + return ret; +} +static DEVICE_ATTR(port_idx, S_IRUGO | S_IRUSR, show_port_idx, NULL); + +static ssize_t show_debug_linkstat(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + ret += sprintf(buf, "%d %d dumy:0x%x up-flag:%d carry:%d\n", + adapter->link_up, adapter->hw.link, rd32(hw, 0xc), + adapter->flags & RNP_FLAG_NEED_LINK_UPDATE, + netif_carrier_ok(netdev)); + return ret; +} + +static DEVICE_ATTR(debug_linkstat, S_IRUGO | S_IRUSR, show_debug_linkstat, + NULL); + +static ssize_t show_sfp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + ret += sprintf(buf, " IO Error\n"); + } else { + ret += sprintf( + buf, "mod-abs:%d\ntx-fault:%d\ntx-dis:%d\nrx-los:%d\n", + adapter->sfp.mod_abs, adapter->sfp.fault, + adapter->sfp.tx_dis, adapter->sfp.los); + } + + return ret; +} +static DEVICE_ATTR(sfp, S_IRUGO | S_IRUSR, show_sfp, NULL); + +static ssize_t store_pci(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int gen = 3, lanes = 8; + + if (count > 30) + return -EINVAL; + + if (sscanf(buf, "gen%dx%d", &gen, &lanes) != 2) { + printk("Error: invalid input. example: gen3x8\n"); + return -EINVAL; + } + if (gen > 3 || lanes > 8) + return -EINVAL; + + err = rnp_set_lane_fun(hw, LANE_FUN_PCI_LANE, gen, lanes, 0, 0); + + return err ? err : count; +} + +static ssize_t show_pci(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + ret += sprintf(buf, " IO Error\n"); + } else { + ret += sprintf(buf, "gen%dx%d\n", hw->pci_gen, hw->pci_lanes); + } + + return ret; +} + +static DEVICE_ATTR(pci, S_IRUGO | S_IWUSR | S_IRUSR, show_pci, store_pci); + +static ssize_t store_sfp_tx_disable(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + long enable = 0; + + if (kstrtol(buf, 10, &enable)) { + return -EINVAL; + } + + err = rnp_set_lane_fun(hw, LANE_FUN_SFP_TX_DISABLE, !!enable, 0, 0, 0); + + return err ? err : count; +} + +static ssize_t show_sfp_tx_disable(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + ret += sprintf(buf, " IO Error\n"); + } else { + ret += sprintf(buf, "%d\n", adapter->sfp.tx_dis); + } + + return ret; +} + +static DEVICE_ATTR(sfp_tx_disable, S_IRUGO | S_IWUSR | S_IRUSR, + show_sfp_tx_disable, store_sfp_tx_disable); + +static ssize_t store_link_traing(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + long enable = 0; + + if (kstrtol(buf, 10, &enable)) { + return -EINVAL; + } + + err = rnp_set_lane_fun(hw, LANE_FUN_LINK_TRAING, !!enable, 0, 0, 0); + + return err ? err : count; +} + +static ssize_t show_link_traing(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + ret += sprintf(buf, " IO Error\n"); + } else { + ret += sprintf(buf, "%d\n", adapter->link_traing); + } + + return ret; +} + +static DEVICE_ATTR(link_traing, S_IRUGO | S_IWUSR | S_IRUSR, show_link_traing, + store_link_traing); + +static ssize_t store_fec(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + long enable = 0; + + if (kstrtol(buf, 10, &enable)) { + return -EINVAL; + } + + err = rnp_set_lane_fun(hw, LANE_FUN_FEC, !!enable, 0, 0, 0); + + return err ? err : count; +} + +static ssize_t show_fec(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + ret += sprintf(buf, " IO Error\n"); + } else { + ret += sprintf(buf, "%d\n", adapter->fec); + } + + return ret; +} + +static DEVICE_ATTR(fec, S_IRUGO | S_IWUSR | S_IRUSR, show_fec, store_fec); + +static ssize_t store_pcs(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 reg_hi = 0, reg_lo = 0, pcs_base_regs = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int input_arg_cnt; + u32 pcs_phy_regs[] = { + 0x00040000, 0x00041000, 0x00042000, 0x00043000, + 0x00040000, 0x00041000, 0x00042000, 0x00043000, + }; + + if (count > 64) { + printk("Error: Input size >100: too large\n"); + return -EINVAL; + } + + input_arg_cnt = sscanf(buf, "%u %x %x", &adapter->sysfs_pcs_lane_num, + &adapter->sysfs_bar4_reg_addr, + &adapter->sysfs_bar4_reg_val); + + if (input_arg_cnt != 2 && input_arg_cnt != 3) { + printk("Error: Invalid Input: read lane x reg 0xXXX or write phy x reg " + "0xXXX val 0xXXX\n"); + return -EINVAL; + } + + if (adapter->sysfs_pcs_lane_num > 8) { + printk("Error: Invalid value. should in 0~7\n"); + return -EINVAL; + } + + switch (input_arg_cnt) { + case 2: + reg_hi = adapter->sysfs_bar4_reg_addr >> 8; + reg_lo = (adapter->sysfs_bar4_reg_addr & 0xff) << 2; + pcs_base_regs = pcs_phy_regs[adapter->sysfs_pcs_lane_num]; + wr32(hw, pcs_base_regs + (0xff << 2), reg_hi); + adapter->sysfs_bar4_reg_val = rd32(hw, pcs_base_regs + reg_lo); + break; + case 3: + reg_hi = adapter->sysfs_bar4_reg_addr >> 8; + reg_lo = (adapter->sysfs_bar4_reg_addr & 0xff) << 2; + pcs_base_regs = pcs_phy_regs[adapter->sysfs_pcs_lane_num]; + wr32(hw, pcs_base_regs + (0xff << 2), reg_hi); + wr32(hw, pcs_base_regs + reg_lo, adapter->sysfs_bar4_reg_val); + break; + default: + printk("Error: Invalid value. input_arg_cnt=%d\n", + input_arg_cnt); + break; + } + adapter->sysfs_input_arg_cnt = input_arg_cnt; + + return count; +} + +static ssize_t show_pcs(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + + switch (adapter->sysfs_input_arg_cnt) { + case 2: + ret += sprintf(buf, "lane%u pcs: 0x%x => 0x%x\n", + adapter->sysfs_pcs_lane_num, + adapter->sysfs_bar4_reg_addr, + adapter->sysfs_bar4_reg_val); + break; + case 3: + ret += sprintf(buf, "lane%u pcs: 0x%x <= 0x%x\n", + adapter->sysfs_pcs_lane_num, + adapter->sysfs_bar4_reg_addr, + adapter->sysfs_bar4_reg_val); + break; + default: + break; + } + + return ret; +} + +static DEVICE_ATTR(pcs_reg, S_IRUGO | S_IWUSR | S_IRUSR, show_pcs, store_pcs); + +static ssize_t phy_reg_read(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int val = 0; + int err = -EINVAL; + int phy_reg = adapter->sysfs_phy_reg; + + if (hw) { + if (adapter->sysfs_is_phy_ext_reg) { + err = rnp_mbx_phy_read(hw, phy_reg | PHY_EXT_REG_FLAG, + &val); + } else { + err = rnp_mbx_phy_read(hw, phy_reg, &val); + } + } + + if (err) { + return 0; + } else { + return sprintf(buf, "phy %s 0x%04x : 0x%04x\n", + adapter->sysfs_is_phy_ext_reg ? "ext reg" : + "reg", + phy_reg, val & 0xffff); + } +} + +static ssize_t phy_reg_write(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int i = 0, argc = 0, err = -EINVAL; + char argv[3][16]; + unsigned long val[3] = { 0 }; + int phy_reg = 0; + + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + memset(argv, 0, sizeof(argv)); + argc = sscanf(buf, "%15s %15s %15s", argv[0], argv[1], argv[2]); + + if (argc < 1) { + return -EINVAL; + } + + adapter->sysfs_is_phy_ext_reg = 0; + + if (strcmp(argv[0], "ext") == 0) { + adapter->sysfs_is_phy_ext_reg = 1; + } else { + if (kstrtoul(argv[0], 0, &val[0])) { + return -EINVAL; + } + } + + for (i = 1; i < argc; i++) { + if (kstrtoul(argv[i], 0, &val[i])) { + return -EINVAL; + } + } + + if (argc == 1) { + if (adapter->sysfs_is_phy_ext_reg) { + return -EINVAL; + } else { + /* set phy reg index */ + phy_reg = val[0]; + err = 0; + } + } + + if (argc == 2) { + if (adapter->sysfs_is_phy_ext_reg) { + /* set ext phy reg index */ + phy_reg = val[1]; + err = 0; + } else { + /* write phy reg */ + phy_reg = val[0]; + err = rnp_mbx_phy_write(hw, phy_reg, val[1]); + } + } + + if (argc == 3) { + if (adapter->sysfs_is_phy_ext_reg) { + /* write ext phy reg */ + phy_reg = val[1]; + err = rnp_mbx_phy_write(hw, phy_reg | PHY_EXT_REG_FLAG, + val[2]); + } else { + return -EINVAL; + } + } + + adapter->sysfs_phy_reg = phy_reg; + + return err ? err : count; +} + +static DEVICE_ATTR(phy_reg, 0664, phy_reg_read, phy_reg_write); + +static ssize_t store_prbs(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + long prbs = 0; + + if (kstrtol(buf, 10, &prbs)) { + return -EINVAL; + } + + err = rnp_set_lane_fun(hw, LANE_FUN_PRBS, prbs, 0, 0, 0); + + return err ? err : count; +} + +static DEVICE_ATTR(prbs, S_IRUGO | S_IWUSR | S_IRUSR, NULL, store_prbs); + +static ssize_t store_autoneg(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + long enable = 0; + + if (kstrtol(buf, 10, &enable)) { + return -EINVAL; + } + + err = rnp_set_lane_fun(hw, LANE_FUN_AN, !!enable, 0, 0, 0); + + return err ? err : count; +} + +static ssize_t show_autoneg(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + ret += sprintf(buf, " IO Error\n"); + } else { + ret += sprintf(buf, "%d\n", adapter->an); + } + + return ret; +} + +static DEVICE_ATTR(autoneg, S_IRUGO | S_IWUSR | S_IRUSR, show_autoneg, + store_autoneg); + +static ssize_t store_lane_si(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int si_main = -1, si_pre = -1, si_post = -1, si_txboost = -1; + int cnt; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + printk("Error: rnp_mbx_get_lane_stat failed\n"); + return -EIO; + } + if (count > 100) { + printk("Error: Input size >100: too large\n"); + return -EINVAL; + } + + if (hw->supported_link & + (RNP_LINK_SPEED_40GB_FULL | RNP_LINK_SPEED_25GB_FULL)) { + u32 lane0_main, lane0_pre, lane0_post, lane0_boost; + u32 lane1_main, lane1_pre, lane1_post, lane1_boost; + u32 lane2_main, lane2_pre, lane2_post, lane2_boost; + u32 lane3_main, lane3_pre, lane3_post, lane3_boost; + + cnt = sscanf(buf, + "%u %u %u %u,%u %u %u %u,%u %u %u %u,%u %u %u %u", + &lane0_main, &lane0_pre, &lane0_post, &lane0_boost, + &lane1_main, &lane1_pre, &lane1_post, &lane1_boost, + &lane2_main, &lane2_pre, &lane2_post, &lane2_boost, + &lane3_main, &lane3_pre, &lane3_post, + &lane3_boost); + if (cnt != 16) { + printk("Error: Invalid Input.\n" + " ,,,\n" + " laneX_si:
  \n\n"
+			       "   ie: 21 0 11 11,22 0 12 12,23 0 13 13,24 0 14 14 \n");
+
+			return -EINVAL;
+		}
+
+		si_main = ((lane0_main & 0xff) << 0) |
+			  ((lane1_main & 0xff) << 8) |
+			  ((lane2_main & 0xff) << 16) |
+			  ((lane3_main & 0xff) << 24);
+		si_pre = ((lane0_pre & 0xff) << 0) | ((lane1_pre & 0xff) << 8) |
+			 ((lane2_pre & 0xff) << 16) |
+			 ((lane3_pre & 0xff) << 24);
+		si_post = ((lane0_post & 0xff) << 0) |
+			  ((lane1_post & 0xff) << 8) |
+			  ((lane2_post & 0xff) << 16) |
+			  ((lane3_post & 0xff) << 24);
+		si_txboost = ((lane0_boost & 0xf) << 0) |
+			     ((lane1_boost & 0xf) << 4) |
+			     ((lane2_boost & 0xf) << 8) |
+			     ((lane3_boost & 0xf) << 12);
+		printk("%s: main:0x%x pre:0x%x post:0x%x boost:0x%x\n",
+		       adapter->name, si_main, si_pre, si_post, si_txboost);
+	} else {
+		cnt = sscanf(buf, "%u %u %u %u", &si_main, &si_pre, &si_post,
+			     &si_txboost);
+		if (cnt != 4) {
+			printk("Error: Invalid Input: 
  \n");
+			return -EINVAL;
+		}
+		if (si_main > 63 || si_pre > 63 || si_post > 63) {
+			printk("Error: Invalid value. should in 0~63\n");
+			return -EINVAL;
+		}
+		if (si_txboost > 16) {
+			printk("Error: Invalid txboost. should in 0~15\n");
+			return -EINVAL;
+		}
+	}
+	err = rnp_set_lane_fun(hw, LANE_FUN_SI, si_main, si_pre, si_post,
+			       si_txboost);
+
+	return err ? err : count;
+}
+
+static ssize_t show_lane_si(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	int ret = 0, i;
+	struct net_device *netdev = to_net_device(dev);
+	struct rnp_adapter *adapter = netdev_priv(netdev);
+	struct rnp_hw *hw = &adapter->hw;
+
+	if (rnp_mbx_get_lane_stat(hw) != 0) {
+		ret += sprintf(buf, " IO Error\n");
+	} else {
+		if (hw->supported_link &
+		    (RNP_LINK_SPEED_40GB_FULL | RNP_LINK_SPEED_25GB_FULL)) {
+			ret += sprintf(
+				buf + ret,
+				"main:0x%08x pre:0x%08x post:0x%08x tx_boost:0x%04x\n\n",
+				adapter->si.main, adapter->si.pre,
+				adapter->si.post, adapter->si.tx_boost);
+			for (i = 0; i < 4; i++) {
+				ret += sprintf(
+					buf + ret,
+					" lane%d main:%u pre:%u post:%u tx_boost:%u\n",
+					i, (adapter->si.main >> (i * 8)) & 0xff,
+					(adapter->si.pre >> (i * 8)) & 0xff,
+					(adapter->si.post >> (i * 8)) & 0xff,
+					(adapter->si.tx_boost >> (i * 4)) &
+						0xf);
+			}
+		} else {
+			ret += sprintf(
+				buf + ret,
+				"lane:%d main:%u pre:%u post:%u tx_boost:%u\n",
+				hw->nr_lane, adapter->si.main, adapter->si.pre,
+				adapter->si.post, adapter->si.tx_boost & 0xf);
+		}
+	}
+
+	return ret;
+}
+
+static DEVICE_ATTR(si, S_IRUGO | S_IWUSR | S_IRUSR, show_lane_si,
+		   store_lane_si);
+
+static ssize_t show_temperature(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnp_adapter *adapter = netdev_priv(netdev);
+	struct rnp_hw *hw = &adapter->hw;
+	int ret = 0, temp = 0, voltage = 0;
+
+	temp = rnp_mbx_get_temp(hw, &voltage);
+
+	ret += sprintf(buf, "temp:%d oC  volatage:%d mV\n", temp, voltage);
+	return ret;
+}
+
+static struct pci_dev *pcie_find_root_port_old(struct pci_dev *dev)
+{
+	while (1) {
+		if (!pci_is_pcie(dev))
+			break;
+		if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+			return dev;
+		if (!dev->bus->self)
+			break;
+		dev = dev->bus->self;
+	}
+	return NULL;
+}
+
+static ssize_t show_root_slot_info(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnp_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+	struct pci_dev *root_pdev = pcie_find_root_port_old(adapter->pdev);
+
+	if (root_pdev) {
+		ret += sprintf(buf + ret, "%02x:%02x.%x\n",
+			       root_pdev->bus->number,
+			       PCI_SLOT(root_pdev->devfn),
+			       PCI_FUNC(root_pdev->devfn));
+	}
+	return ret;
+}
+
+static int do_switch_loopback_set(struct rnp_adapter *adapter, int en,
+				  int sport_lane, int dport_lane)
+{
+	int v;
+	struct rnp_hw *hw = &adapter->hw;
+
+	printk("%s: %s %d -> %d en:%d\n", __func__,
+	       netdev_name(adapter->netdev), sport_lane, dport_lane, en);
+
+	if (en) {
+		adapter->flags |= RNP_FLAG_SWITCH_LOOPBACK_EN;
+	} else {
+		adapter->flags &= ~RNP_FLAG_SWITCH_LOOPBACK_EN;
+	}
+
+	wr32(hw, RNP_ETH_INPORT_POLICY_REG(sport_lane),
+	     BIT(29) | (dport_lane << 16));
+
+	v = rd32(hw, RNP_ETH_INPORT_POLICY_VAL);
+	if (en) {
+		v |= BIT(sport_lane);
+	} else {
+		v &= ~BIT(sport_lane);
+	}
+	wr32(hw, RNP_ETH_INPORT_POLICY_VAL, v);
+
+	v = mac_rd32(&hw->mac, RNP10_MAC_PKT_FLT);
+	if (en) {
+		v |= (RNP_RX_ALL | RNP_RX_ALL_MUL);
+	} else {
+		v &= ~(RNP_RX_ALL | RNP_RX_ALL_MUL);
+	}
+	mac_wr32(&hw->mac, RNP10_MAC_PKT_FLT, v);
+
+	eth_wr32(&hw->eth, RNP10_ETH_DMAC_MCSTCTRL, 0x0);
+
+	return 0;
+}
+
+static ssize_t _switch_loopback(struct rnp_adapter *adapter,
+				const char *peer_eth, int en)
+{
+	struct net_device *peer_netdev = NULL;
+	struct rnp_adapter *peer_adapter = NULL;
+	char name[100];
+
+	strncpy(name, peer_eth, sizeof(name));
+	strim(name);
+
+	printk("%s: nr_lane:%d peer_lane:%s en:%d\n", __func__, 0, peer_eth,
+	       en);
+
+	peer_netdev = dev_get_by_name(&init_net, name);
+	if (!peer_netdev) {
+		printk("canot' find %s\n", name);
+		return -EINVAL;
+	}
+	peer_adapter = netdev_priv(peer_netdev);
+
+	if (PCI_SLOT(peer_adapter->pdev->devfn) !=
+	    PCI_SLOT(adapter->pdev->devfn)) {
+		printk("%s %s not in same slot\n", netdev_name(adapter->netdev),
+		       netdev_name(peer_adapter->netdev));
+		dev_put(peer_netdev);
+		return -EINVAL;
+	}
+
+	printk("%s: %s(%d)<->%s(%d)\n", __func__, netdev_name(adapter->netdev),
+	       0, netdev_name(peer_adapter->netdev), 0);
+
+	do_switch_loopback_set(adapter, en, 0,
+			       rnp_is_pf1(&peer_adapter->hw) ? 4 : 0);
+	do_switch_loopback_set(peer_adapter, en, 0,
+			       rnp_is_pf1(&adapter->hw) ? 4 : 0);
+
+	if (peer_netdev) {
+		dev_put(peer_netdev);
+	}
+
+	return 0;
+}
+
+static ssize_t store_switch_loopback_on(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	struct rnp_adapter *adapter = netdev_priv(to_net_device(dev));
+
+	return _switch_loopback(adapter, buf, 1) == 0 ? count : -EINVAL;
+}
+
+static DEVICE_ATTR(switch_loopback_on, 0664, NULL, store_switch_loopback_on);
+
+static ssize_t store_switch_loopback_off(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t count)
+{
+	struct rnp_adapter *adapter = netdev_priv(to_net_device(dev));
+
+	return _switch_loopback(adapter, buf, 0) == 0 ? count : -EINVAL;
+}
+static DEVICE_ATTR(switch_loopback_off, 0664, NULL, store_switch_loopback_off);
+static DEVICE_ATTR(root_slot_info, 0644, show_root_slot_info, NULL);
+static DEVICE_ATTR(temperature, S_IRUGO | S_IRUSR, show_temperature, NULL);
+static DEVICE_ATTR(active_vid, 0644, show_active_vid, store_active_vid);
+static DEVICE_ATTR(tx_ring_info, 0644, show_tx_ring_info, store_tx_ring_info);
+static DEVICE_ATTR(rx_ring_info, 0644, show_rx_ring_info, store_rx_ring_info);
+static DEVICE_ATTR(para_info, 0644, show_para_info, NULL);
+static DEVICE_ATTR(tx_desc_info, 0644, show_tx_desc_info, store_tx_desc_info);
+static DEVICE_ATTR(rx_desc_info, 0644, show_rx_desc_info, store_rx_desc_info);
+static DEVICE_ATTR(ring_sriov_info, 0644, show_ring_sriov_info, store_ring_sriov_info);
+static DEVICE_ATTR(rx_drop_info, 0644, show_rx_drop_info, store_rx_drop_info);
+static DEVICE_ATTR(outer_vlan_info, 0644, show_outer_vlan_info,
+		   store_outer_vlan_info);
+static DEVICE_ATTR(tcp_sync_info, 0644, show_tcp_sync_info,
+		   store_tcp_sync_info);
+static DEVICE_ATTR(rx_skip_info, 0644, show_rx_skip_info, store_rx_skip_info);
+static DEVICE_ATTR(tx_stags_info, 0644, show_tx_stags_info,
+		   store_tx_stags_info);
+#ifdef TEST_PF_RESET
+static DEVICE_ATTR(test_info, 0644, show_test_info, store_test_info);
+#endif
+static DEVICE_ATTR(mac_vf_info, 0644, NULL, mac_vf_store);
+static DEVICE_ATTR(version_info, 0644, show_version_info, NULL);
+
+static struct attribute *dev_attrs[] = {
+	&dev_attr_tx_stags_info.attr,
+#ifdef TEST_PF_RESET
+	&dev_attr_test_info.attr,
+#endif
+	&dev_attr_version_info.attr,
+	&dev_attr_mac_vf_info.attr,
+	&dev_attr_root_slot_info.attr,
+	&dev_attr_active_vid.attr,
+	&dev_attr_rx_drop_info.attr,
+	&dev_attr_outer_vlan_info.attr,
+	&dev_attr_tcp_sync_info.attr,
+	&dev_attr_rx_skip_info.attr,
+	&dev_attr_tx_ring_info.attr,
+	&dev_attr_rx_ring_info.attr,
+	&dev_attr_para_info.attr,
+	&dev_attr_tx_desc_info.attr,
+	&dev_attr_rx_desc_info.attr,
+	&dev_attr_ring_sriov_info.attr,
+	//&dev_attr_tx_counter.attr,
+	//&dev_attr_rx_counter.attr,
+	&dev_attr_port_idx.attr,
+	&dev_attr_prbs.attr,
+	&dev_attr_pcs_reg.attr,
+	&dev_attr_debug_linkstat.attr,
+	&dev_attr_switch_loopback_off.attr,
+	&dev_attr_switch_loopback_on.attr,
+	NULL,
+};
+#ifndef NO_BIT_ATTRS
+static struct bin_attribute *dev_bin_attrs[] = {
+	&bin_attr_maintain,
+	NULL,
+};
+#endif
+static struct attribute_group dev_attr_grp = {
+	.attrs = dev_attrs,
+#ifndef NO_BIT_ATTRS
+	.bin_attrs = dev_bin_attrs,
+#endif
+};
+
+static struct attribute *vendor_dev_attrs[] = {
+	&dev_attr_si.attr,
+	&dev_attr_sfp.attr,
+	&dev_attr_autoneg.attr,
+	&dev_attr_own_vpd.attr,
+	&dev_attr_pci.attr,
+	&dev_attr_sfp_tx_disable.attr,
+	&dev_attr_link_traing.attr,
+	&dev_attr_fec.attr,
+	&dev_attr_temperature.attr,
+	&dev_attr_phy_reg.attr,
+	NULL,
+};
+
+const static struct attribute_group vendor_attr_grp = {
+	.name = "vendor",
+	.attrs = vendor_dev_attrs,
+};
+
+const static struct attribute_group *attr_grps[] = {
+	&dev_attr_grp,
+	&vendor_attr_grp,
+	NULL,
+};
+
+static void rnp_sysfs_del_adapter(struct rnp_adapter __maybe_unused *adapter)
+{
+#ifdef RNP_HWMON
+#ifndef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
+	int i;
+
+	if (adapter == NULL)
+		return;
+
+	for (i = 0; i < adapter->rnp_hwmon_buff.n_hwmon; i++) {
+		device_remove_file(
+			pci_dev_to_dev(adapter->pdev),
+			&adapter->rnp_hwmon_buff.hwmon_list[i].dev_attr);
+	}
+
+	kfree(adapter->rnp_hwmon_buff.hwmon_list);
+
+	if (adapter->rnp_hwmon_buff.device)
+		hwmon_device_unregister(adapter->rnp_hwmon_buff.device);
+#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */
+#endif /* RNP_HWMON */
+}
+
+/* called from rnp_main.c */
+void rnp_sysfs_exit(struct rnp_adapter *adapter)
+{
+	rnp_sysfs_del_adapter(adapter);
+	sysfs_remove_groups(&adapter->netdev->dev.kobj, &attr_grps[0]);
+}
+
+/* called from rnp_main.c */
+int rnp_sysfs_init(struct rnp_adapter *adapter)
+{
+	int rc = 0;
+	int flag;
+#ifdef RNP_HWMON
+#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
+	struct hwmon_buff *rnp_hwmon;
+	struct device *hwmon_dev;
+#else
+	struct hwmon_buff *rnp_hwmon = &adapter->rnp_hwmon_buff;
+	int n_attrs;
+#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */
+	unsigned int i;
+#endif /* RNP_HWMON */
+
+	flag = sysfs_create_groups(&adapter->netdev->dev.kobj, &attr_grps[0]);
+	if (flag != 0) {
+		dev_err(&adapter->netdev->dev,
+			"sysfs_create_group failed:flag:%d\n", flag);
+		return flag;
+	}
+#ifdef RNP_HWMON
+	/* If this method isn't defined we don't support thermals */
+	if (adapter->hw.ops.init_thermal_sensor_thresh == NULL) {
+		goto no_thermal;
+	}
+
+	/* Don't create thermal hwmon interface if no sensors present */
+	if (adapter->hw.ops.init_thermal_sensor_thresh(&adapter->hw))
+		goto no_thermal;
+
+#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
+	rnp_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*rnp_hwmon),
+				 GFP_KERNEL);
+
+	if (!rnp_hwmon) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	adapter->rnp_hwmon_buff = rnp_hwmon;
+#else
+	/*
+	 * Allocation space for max attributs
+	 * max num sensors * values (loc, temp, max, caution)
+	 */
+	n_attrs = RNP_MAX_SENSORS * 4;
+	rnp_hwmon->hwmon_list =
+		kcalloc(n_attrs, sizeof(struct hwmon_attr), GFP_KERNEL);
+
+	if (!rnp_hwmon->hwmon_list) {
+		rc = -ENOMEM;
+		goto err;
+	}
+#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */
+
+	for (i = 0; i < RNP_MAX_SENSORS; i++) {
+		/*
+		 * Only create hwmon sysfs entries for sensors that have
+		 * meaningful data for.
+		 */
+		if (adapter->hw.thermal_sensor_data.sensor[i].location == 0)
+			continue;
+
+		/* Bail if any hwmon attr struct fails to initialize */
+		rc = rnp_add_hwmon_attr(adapter, i, RNP_HWMON_TYPE_CAUTION);
+		if (rc)
+			goto err;
+		rc = rnp_add_hwmon_attr(adapter, i, RNP_HWMON_TYPE_LOC);
+		if (rc)
+			goto err;
+		rc = rnp_add_hwmon_attr(adapter, i, RNP_HWMON_TYPE_TEMP);
+		if (rc)
+			goto err;
+		rc = rnp_add_hwmon_attr(adapter, i, RNP_HWMON_TYPE_MAX);
+		if (rc)
+			goto err;
+	}
+
+#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
+	rnp_hwmon->groups[0] = &rnp_hwmon->group;
+	rnp_hwmon->group.attrs = rnp_hwmon->attrs;
+
+	hwmon_dev = devm_hwmon_device_register_with_groups(
+		&adapter->pdev->dev, "rnp", rnp_hwmon, rnp_hwmon->groups);
+
+	if (IS_ERR(hwmon_dev)) {
+		rc = PTR_ERR(hwmon_dev);
+		goto exit;
+	}
+
+#else
+	rnp_hwmon->device =
+		hwmon_device_register(pci_dev_to_dev(adapter->pdev));
+
+	if (IS_ERR(rnp_hwmon->device)) {
+		rc = PTR_ERR(rnp_hwmon->device);
+		goto err;
+	}
+
+#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */
+no_thermal:
+#endif /* RNP_HWMON */
+	goto exit;
+
+err:
+	rnp_sysfs_exit(adapter);
+exit:
+	return rc;
+}
diff --git a/drivers/net/ethernet/mucse/rnp/rnp_tc_u32_parse.h b/drivers/net/ethernet/mucse/rnp/rnp_tc_u32_parse.h
new file mode 100755
index 0000000000000000000000000000000000000000..c40c4c0542115f73577a301e6f6a1d26e071dca5
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnp/rnp_tc_u32_parse.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef __RNP_TC_U32_PARSE_H__
+#define __RNP_TC_U32_PARSE_H__
+#include "rnp.h"
+
+struct rnp_match_parser {
+	int off; /* the skb offset begin form the 12 bytes mac_type */
+	/* parse the value/mask to realy value*/
+	int (*val)(struct rnp_fdir_filter *f, __be32 val, __be32 mask);
+};
+inline void ip_print(u32 ip, bool src_true)
+{
+	printk(KERN_DEBUG "%s_ip is %d.%d.%d.%d \n", src_true ? "src" : "dst",
+	       ip & 0xff, ip >> 8 & 0xff, ip >> 16 & 0xff, ip >> 24 & 0xff);
+}
+/* Ipv4 Rule Parse */
+static inline int rnp_fill_ipv4_src_ip(struct rnp_fdir_filter *f, __be32 val,
+				       __be32 mask)
+{
+	memcpy(&f->filter.formatted.src_ip[0], &val, sizeof(u32));
+	memcpy(&f->filter.formatted.src_ip_mask[0], &mask, sizeof(u32));
+
+	f->filter.formatted.flow_type = RNP_ATR_FLOW_TYPE_IPV4;
+	f->filter.layer2_formate.proto = htons(ETH_P_IP);
+
+	ip_print(f->filter.formatted.src_ip[0], true);
+	printk(KERN_DEBUG "ip mask is 0x%.2x\n",
+	       f->filter.formatted.src_ip_mask[0]);
+	return 0;
+}
+
+static inline int rnp_fill_ipv4_dst_ip(struct rnp_fdir_filter *f, __be32 val,
+				       __be32 mask)
+{
+	memcpy(&f->filter.formatted.dst_ip[0], &val, sizeof(u32));
+	memcpy(&f->filter.formatted.dst_ip_mask[0], &mask, sizeof(u32));
+
+	f->filter.formatted.flow_type = RNP_ATR_FLOW_TYPE_IPV4;
+	f->filter.layer2_formate.proto = htons(ETH_P_IP);
+
+	ip_print(f->filter.formatted.dst_ip[0], false);
+	printk(KERN_DEBUG "ip mask is 0x%.2x\n",
+	       f->filter.formatted.dst_ip_mask[0]);
+
+	return 0;
+}
+
+static const struct rnp_match_parser rnp_ipv4_parser[] = {
+	{ .off = 12, .val = rnp_fill_ipv4_src_ip },
+	{ .off = 16, .val = rnp_fill_ipv4_dst_ip },
+	{ .val = NULL }
+};
+
+#endif
diff --git a/drivers/net/ethernet/mucse/rnp/rnp_type.h b/drivers/net/ethernet/mucse/rnp/rnp_type.h
new file mode 100755
index 0000000000000000000000000000000000000000..06639d4173822158b34155363afa87cace80e459
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnp/rnp_type.h
@@ -0,0 +1,1291 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNP_TYPE_H_
+#define _RNP_TYPE_H_
+
+#include 
+#include 
+#include 
+
+//#define OPTM_WITH_LPAGE
+
+#if defined(CONFIG_MXGBE_FIX_VF_BUG) && !defined(FIX_VF_BUG)
+#define FIX_VF_BUG
+#endif
+#if defined(CONFIG_MXGBE) && !defined(N10)
+#define N10
+#endif
+
+#if defined(CONFIG_MXGBE_FIX_MAC_PADDIN) && !defined(FIX_MAC_PADDIN)
+#define FIX_MAC_PADDIN
+#endif
+
+#if defined(CONFIG_MXGBE_OPTM_WITH_LPAGE) && !defined(OPTM_WITH_LPAGE)
+#define OPTM_WITH_LPAGE
+#endif
+
+#if defined(CONFIG_MXGBE_MSIX_COUNT)
+#define RNP_N10_MSIX_VECTORS CONFIG_MXGBE_MSIX_COUNT
+#endif
+
+//#define DISABLE_PACKET_SPLIT
+
+// if kylin os, try to set OPTM_WITH_LPAGE to reduce memory cost?
+#if (PAGE_SIZE < 8192)
+//error
+#ifdef OPTM_WITH_LPAGE
+//#error can't open OPTM_WITH_LPAGE with PAGE_SIZE small than 8192
+#undef OPTM_WITH_LPAGE
+#endif
+#endif
+
+/* not open it in default */
+//#define VF_PROMISC_SUPPORT
+/* 
+OPTM_WITH_LPAGE should never define along with 
+CONFIG_RNP_DISABLE_PACKET_SPLIT
+*/
+#ifdef OPTM_WITH_LPAGE
+#ifdef CONFIG_RNP_DISABLE_PACKET_SPLIT
+#error "OPTM_WITH_LPAGE exclude from CONFIG_RNP_DISABLE_PACKET_SPLIT "
+#endif
+#endif
+
+#include "rnp_regs.h"
+#include "rnp_compat.h"
+
+/* Device IDs */
+#define PCI_VENDOR_ID_MUCSE 0x8848
+#define PCI_DEVICE_ID_N10_PF0 0x1000
+#define PCI_DEVICE_ID_N10_PF1 0x1001
+
+#define RNP_DEV_ID_N10_PF0 0x7001
+#define RNP_DEV_ID_N10_PF1 0x7002
+
+#define PCI_DEVICE_ID_N10 0x1000
+#define PCI_DEVICE_ID_N10_TP 0x1004
+#define PCI_DEVICE_ID_N10_X1 0x1002
+#define PCI_DEVICE_ID_N10C 0x1C00
+#define PCI_DEVICE_ID_N400 0x1001 /* N400  2-port */
+#define PCI_DEVICE_ID_N400C 0x1C01 /* N400C 2-port */
+#define PCI_DEVICE_ID_N400_X1 0x1003 /* N400  1-port */
+#define PCI_DEVICE_ID_N400C_X1 0x1C03 /* N400C 1-port */
+/* Wake Up Control */
+#define RNP_WUC_PME_EN 0x00000002 /* PME Enable */
+#define RNP_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define RNP_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion  */
+
+/* Wake Up Filter Control */
+#define RNP_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define RNP_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define RNP_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define RNP_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define RNP_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define RNP_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define RNP_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define RNP_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define RNP_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */
+
+#define RNP_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
+#define RNP_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define RNP_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define RNP_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define RNP_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define RNP_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
+#define RNP_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
+#define RNP_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
+#define RNP_WUFC_FLX_FILTERS_6 0x003F0000 /* Mask for 6 flex filters */
+#define RNP_WUFC_FLX_FILTERS_8 0x00FF0000 /* Mask for 8 flex filters */
+#define RNP_WUFC_FW_RST_WK 0x80000000 /* Ena wake on FW reset assertion */
+/* Mask for Ext. flex filters */
+#define RNP_WUFC_EXT_FLX_FILTERS 0x00300000
+#define RNP_WUFC_ALL_FILTERS 0x000F00FF /* Mask all 4 flex filters */
+#define RNP_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask all 6 flex filters */
+#define RNP_WUFC_ALL_FILTERS_8 0x00FF00FF /* Mask all 8 flex filters */
+#define RNP_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
+
+#define RNP_MAX_SENSORS 1
+struct rnp_thermal_diode_data {
+	u8 location;
+	int temp;
+	u8 caution_thresh;
+	u8 max_op_thresh;
+};
+
+struct rnp_thermal_sensor_data {
+	struct rnp_thermal_diode_data sensor[RNP_MAX_SENSORS];
+};
+
+/* Proxy Status */
+#define RNP_PROXYS_EX 0x00000004 /* Exact packet received */
+#define RNP_PROXYS_ARP_DIR 0x00000020 /* ARP w/filter match received */
+#define RNP_PROXYS_NS 0x00000200 /* IPV6 NS received */
+#define RNP_PROXYS_NS_DIR 0x00000400 /* IPV6 NS w/DA match received */
+#define RNP_PROXYS_ARP 0x00000800 /* ARP request packet received */
+#define RNP_PROXYS_MLD 0x00001000 /* IPv6 MLD packet received */
+
+/* Proxying Filter Control */
+#define RNP_PROXYFC_ENABLE 0x00000001 /* Port Proxying Enable */
+#define RNP_PROXYFC_EX 0x00000004 /* Directed Exact Proxy Enable */
+#define RNP_PROXYFC_ARP_DIR 0x00000020 /* Directed ARP Proxy Enable */
+#define RNP_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */
+#define RNP_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Enable */
+#define RNP_PROXYFC_MLD 0x00000800 /* IPv6 MLD Proxy Enable */
+#define RNP_PROXYFC_NO_TCO 0x00008000 /* Ignore TCO packets */
+
+#define RNP_WUPL_LENGTH_MASK 0xFFFF
+
+/* max 4 in n10 */
+#define RNP_MAX_TRAFFIC_CLASS 4
+#define TSRN10_TX_DEFAULT_BURST 16
+
+#ifndef TSRN10_RX_DEFAULT_BURST
+#define TSRN10_RX_DEFAULT_BURST 16
+#endif
+
+#ifndef TSRN10_RX_DEFAULT_LINE
+#define TSRN10_RX_DEFAULT_LINE 64
+#endif
+
+#ifndef RNP_PKT_TIMEOUT
+#define RNP_PKT_TIMEOUT 30
+#endif
+
+#ifndef RNP_RX_PKT_POLL_BUDGET
+#define RNP_RX_PKT_POLL_BUDGET 64
+#endif
+
+#ifndef RNP_TX_PKT_POLL_BUDGET
+#define RNP_TX_PKT_POLL_BUDGET 0x30
+#endif
+
+#ifndef RNP_PKT_TIMEOUT_TX
+#define RNP_PKT_TIMEOUT_TX 100
+#endif
+/* VF Device IDs */
+#define RNP_DEV_ID_N10_PF0_VF 0x8001
+#define RNP_DEV_ID_N10_PF1_VF 0x8002
+
+#define RNP_DEV_ID_N10_PF0_VF_N 0x1010
+#define RNP_DEV_ID_N10_PF1_VF_N 0x1011
+
+/* Transmit Descriptor - Advanced */
+struct rnp_tx_desc {
+	union {
+		__le64 pkt_addr; // Packet buffer address
+		struct {
+			__le32 adr_lo;
+			__le32 adr_hi;
+		};
+	};
+	union {
+		__le64 vlan_cmd_bsz;
+		struct {
+			__le32 blen_mac_ip_len;
+			__le32 vlan_cmd;
+		};
+	};
+#define RNP_TXD_FLAGS_VLAN_PRIO_MASK 0xe000
+#define RNP_TX_FLAGS_VLAN_PRIO_SHIFT 13
+#define RNP_TX_FLAGS_VLAN_CFI_SHIFT 12
+#define RNP_TXD_VLAN_VALID (0x80000000)
+#define RNP_TXD_SVLAN_TYPE (0x02000000)
+#define RNP_TXD_VLAN_CTRL_NOP (0x00 << 13)
+#define RNP_TXD_VLAN_CTRL_RM_VLAN (0x20000000)
+#define RNP_TXD_VLAN_CTRL_INSERT_VLAN (0x40000000)
+#define RNP_TXD_L4_CSUM (0x10000000) /* udp tcp sctp csum */
+#define RNP_TXD_IP_CSUM (0x8000000)
+#define RNP_TXD_TUNNEL_MASK (0x3000000)
+#define RNP_TXD_TUNNEL_VXLAN (0x1000000)
+#define RNP_TXD_TUNNEL_NVGRE (0x2000000)
+#define RNP_TXD_L4_TYPE_UDP (0xc00000)
+#define RNP_TXD_L4_TYPE_TCP (0x400000)
+#define RNP_TXD_L4_TYPE_SCTP (0x800000)
+#define RNP_TXD_FLAG_IPv4 (0)
+#define RNP_TXD_FLAG_IPv6 (0x200000)
+#define RNP_TXD_FLAG_TSO (0x100000)
+#define RNP_TXD_FLAG_PTP (0x4000000)
+#define RNP_TXD_CMD_RS (0x040000)
+#define RNP_TXD_CMD_INNER_VLAN (0x08000000)
+#define RNP_TXD_STAT_DD (0x020000)
+#define RNP_TXD_CMD_EOP (0x010000)
+#define RNP_TXD_PAD_CTRL (0x01000000)
+};
+
+struct rnp_tx_ctx_desc {
+	__le32 mss_len_vf_num;
+	__le32 inner_vlan_tunnel_len;
+#define VF_VEB_MARK (1 << 24) /* bit 56 */
+#define VF_VEB_IGNORE_VLAN (1 << 25) /* bit 57 */
+	__le32 resv;
+	__le32 resv_cmd;
+#define RNP_TXD_FLAG_TO_RPU (1 << 15)
+#define RNP_TXD_SMAC_CTRL_NOP (0x00 << 12)
+#define RNP_TXD_SMAC_CTRL_REPLACE_MACADDR0 (0x02 << 12)
+#define RNP_TXD_SMAC_CTRL_REPLACE_MACADDR1 (0x06 << 12)
+#define RNP_TXD_CTX_VLAN_CTRL_NOP (0x00 << 10)
+#define RNP_TXD_CTX_VLAN_CTRL_RM_VLAN (0x01 << 10)
+#define RNP_TXD_CTX_VLAN_CTRL_INSERT_VLAN (0x02 << 10)
+#define RNP_TXD_MTI_CRC_PAD_CTRL (0x01000000)
+#define RNP_TXD_CTX_CTRL_DESC (0x080000)
+#define RNP_TXD_CMD_RS (0x040000)
+#define RNP_TXD_STAT_DD (0x020000)
+};
+
+/* Receive Descriptor - Advanced */
+union rnp_rx_desc {
+	struct {
+		union {
+			__le64 pkt_addr; /* Packet buffer address */
+			struct {
+				__le32 addr_lo;
+				__le32 addr_hi;
+			};
+		};
+		__le32 resv;
+		__le32 resv_cmd;
+#define RNP_RXD_FLAG_RS (0)
+	};
+
+	struct {
+		__le32 rss_hash;
+		__le16 mark;
+		__le16 rev1;
+#define RNP_RX_L3_TYPE_MASK (1 << 15) /* 1 is ipv4 */
+#define VEB_VF_PKG (1 << 0) /* bit 48 */
+#define VEB_VF_IGNORE_VLAN (1 << 1) /* bit 49 */
+#define REV_OUTER_VLAN (1 << 5)
+		__le16 len;
+		__le16 padding_len;
+		__le16 vlan;
+		__le16 cmd;
+#define RNP_RXD_STAT_VLAN_VALID (1 << 15)
+#define RNP_RXD_STAT_STAG (0x01 << 14)
+#define RNP_RXD_STAT_TUNNEL_NVGRE (0x02 << 13)
+#define RNP_RXD_STAT_TUNNEL_VXLAN (0x01 << 13)
+#define RNP_RXD_STAT_TUNNEL_MASK (0x03 << 13)
+#define RNP_RXD_STAT_ERR_MASK (0x1f << 8)
+#define RNP_RXD_STAT_SCTP_MASK (0x04 << 8)
+#define RNP_RXD_STAT_L4_MASK (0x02 << 8)
+#define RNP_RXD_STAT_L4_SCTP (0x02 << 6)
+#define RNP_RXD_STAT_L4_TCP (0x01 << 6)
+#define RNP_RXD_STAT_L4_UDP (0x03 << 6)
+#define RNP_RXD_STAT_IPV6 (1 << 5)
+#define RNP_RXD_STAT_IPV4 (0 << 5)
+#define RNP_RXD_STAT_PTP (1 << 4)
+#define RNP_RXD_STAT_DD (1 << 1)
+#define RNP_RXD_STAT_EOP (1 << 0)
+	} wb;
+} __packed;
+
+/* Host Interface Command Structures */
+struct rnp_hic_hdr {
+	u8 cmd;
+	u8 buf_len;
+	union {
+		u8 cmd_resv;
+		u8 ret_status;
+	} cmd_or_resp;
+	u8 checksum;
+};
+
+struct rnp_hic_drv_info {
+	struct rnp_hic_hdr hdr;
+	u8 port_num;
+	u8 ver_sub;
+	u8 ver_build;
+	u8 ver_min;
+	u8 ver_maj;
+	u8 pad; /* end spacing to ensure length is mult. of dword */
+	u16 pad2; /* end spacing to ensure length is mult. of dword2 */
+};
+
+/* Context descriptors */
+struct rnp_adv_tx_context_desc {
+	__le32 vlan_macip_lens;
+	__le32 seqnum_seed;
+	__le32 type_tucmd_mlhl;
+	__le32 mss_l4len_idx;
+};
+
+/* RAH */
+#define RNP_RAH_VIND_MASK 0x003C0000
+#define RNP_RAH_VIND_SHIFT 18
+#define RNP_RAH_AV 0x80000000
+#define RNP_CLEAR_VMDQ_ALL 0xFFFFFFFF
+
+/* Autonegotiation advertised speeds */
+typedef u32 rnp_autoneg_advertised;
+/* Link speed */
+typedef u32 rnp_link_speed;
+#define RNP_LINK_SPEED_UNKNOWN 0
+#define RNP_LINK_SPEED_10_FULL BIT(2)
+#define RNP_LINK_SPEED_100_FULL BIT(3)
+#define RNP_LINK_SPEED_1GB_FULL BIT(4)
+#define RNP_LINK_SPEED_10GB_FULL BIT(5)
+#define RNP_LINK_SPEED_40GB_FULL BIT(6)
+#define RNP_LINK_SPEED_25GB_FULL BIT(7)
+#define RNP_LINK_SPEED_50GB_FULL BIT(8)
+#define RNP_LINK_SPEED_100GB_FULL BIT(9)
+#define RNP_LINK_SPEED_10_HALF BIT(10)
+#define RNP_LINK_SPEED_100_HALF BIT(11)
+#define RNP_LINK_SPEED_1GB_HALF BIT(12)
+#define RNP_SFP_MODE_10G_LR BIT(13)
+#define RNP_SFP_MODE_10G_SR BIT(14)
+#define RNP_SFP_MODE_10G_LRM BIT(15)
+#define RNP_SFP_MODE_1G_T BIT(16)
+#define RNP_SFP_MODE_1G_KX BIT(17)
+#define RNP_SFP_MODE_1G_SX BIT(18)
+#define RNP_SFP_MODE_1G_LX BIT(19)
+#define RNP_SFP_MODE_40G_SR4 BIT(20)
+#define RNP_SFP_MODE_40G_CR4 BIT(21)
+#define RNP_SFP_MODE_40G_LR4 BIT(22)
+#define RNP_SFP_MODE_1G_CX BIT(23)
+#define RNP_SFP_MODE_10G_BASE_T BIT(24)
+#define RNP_SFP_MODE_FIBER_CHANNEL_SPEED BIT(25)
+#define RNP_SFP_CONNECTOR_DAC BIT(26)
+#define RNP_SFP_TO_SGMII BIT(27)
+#define RNP_SFP_25G_SR BIT(28)
+#define RNP_SFP_25G_KR BIT(29)
+#define RNP_SFP_25G_CR BIT(30)
+#define RNP_LINK_SPEED_10GB_HALF BIT(31)
+
+/* Flow Control Data Sheet defined values
+ * Calculation and defines taken from 802.1bb Annex O
+ */
+
+enum rnp_atr_flow_type {
+	RNP_ATR_FLOW_TYPE_IPV4 = 0x0,
+	RNP_ATR_FLOW_TYPE_UDPV4 = 0x1,
+	RNP_ATR_FLOW_TYPE_TCPV4 = 0x2,
+	RNP_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+	RNP_ATR_FLOW_TYPE_IPV6 = 0x4,
+	RNP_ATR_FLOW_TYPE_UDPV6 = 0x5,
+	RNP_ATR_FLOW_TYPE_TCPV6 = 0x6,
+	RNP_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+	RNP_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10,
+	RNP_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11,
+	RNP_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12,
+	RNP_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13,
+	RNP_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14,
+	RNP_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15,
+	RNP_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16,
+	RNP_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17,
+	RNP_ATR_FLOW_TYPE_ETHER = 0x18,
+	RNP_ATR_FLOW_TYPE_USERDEF = 0x19,
+};
+
+#define RNP_FDIR_DROP_QUEUE (200)
+
+enum {
+	fdir_mode_tcam = 0,
+	fdir_mode_tuple5,
+};
+/* Flow Director ATR input struct. */
+union rnp_atr_input {
+	/*
+	 * Byte layout in order, all values with MSB first:
+	 *
+	 * vm_pool      - 1 byte
+	 * flow_type    - 1 byte
+	 * vlan_id      - 2 bytes
+	 * src_ip       - 16 bytes
+	 * inner_mac    - 6 bytes
+	 * cloud_mode   - 2 bytes
+	 * tni_vni      - 4 bytes
+	 * dst_ip       - 16 bytes
+	 * src_port     - 2 bytes
+	 * dst_port     - 2 bytes
+	 * flex_bytes   - 2 bytes
+	 * bkt_hash     - 2 bytes
+	 */
+	struct {
+		u8 vm_pool;
+		u8 flow_type;
+		__be16 vlan_id;
+		__be32 dst_ip[4];
+		__be32 dst_ip_mask[4];
+		__be32 src_ip[4];
+		__be32 src_ip_mask[4];
+		u8 inner_mac[6];
+		u8 inner_mac_mask[6];
+		__be16 tunnel_type;
+		__be32 tni_vni;
+		__be16 src_port;
+		__be16 src_port_mask;
+		__be16 dst_port;
+		__be16 dst_port_mask;
+		__be16 flex_bytes;
+		__be16 bkt_hash;
+	} formatted;
+	struct {
+		u8 vm_poll;
+		u8 flow_type;
+		u16 vlan_id;
+		__be16 proto;
+		__be16 resv;
+		__be32 nouse[12];
+	} layer2_formate;
+	__be32 dword_stream[14];
+};
+
+/* BitTimes (BT) conversion */
+#define RNP_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024))
+#define RNP_B2BT(BT) (BT * 8)
+
+/* Calculate Delay to respond to PFC */
+#define RNP_PFC_D 672
+
+/* Calculate Cable Delay */
+#define RNP_CABLE_DC 5556 /* Delay Copper */
+#define RNP_CABLE_DO 5000 /* Delay Optical */
+
+/* Calculate Interface Delay X540 */
+#define RNP_PHY_DC 25600 /* Delay 10G BASET */
+#define RNP_MAC_DC 8192 /* Delay Copper XAUI interface */
+#define RNP_XAUI_DC (2 * 2048) /* Delay Copper Phy */
+
+#define RNP_ID_X540 (RNP_MAC_DC + RNP_XAUI_DC + RNP_PHY_DC)
+
+/* Calculate Interface Delay 82598, n10 */
+#define RNP_PHY_D 12800
+#define RNP_MAC_D 4096
+#define RNP_XAUI_D (2 * 1024)
+
+/* PHY MDI STANDARD CONFIG */
+#define RNP_MDI_PHY_ID1_OFFSET 2
+#define RNP_MDI_PHY_ID2_OFFSET 3
+#define RNP_MDI_PHY_ID_MASK 0xFFFFFC00U
+#define RNP_MDI_PHY_SPEED_SELECT1 0x0040
+#define RNP_MDI_PHY_DUPLEX 0x0100
+#define RNP_MDI_PHY_RESTART_AN 0x0200
+#define RNP_MDI_PHY_ANE 0x1000
+#define RNP_MDI_PHY_SPEED_SELECT0 0x2000
+#define RNP_MDI_PHY_RESET
+
+#define NGBE_PHY_RST_WAIT_PERIOD 50
+
+#define RNP_ID (RNP_MAC_D + RNP_XAUI_D + RNP_PHY_D)
+
+/* Calculate Delay incurred from higher layer */
+#define RNP_HD 6144
+
+/* Calculate PCI Bus delay for low thresholds */
+#define RNP_PCI_DELAY 10000
+
+/* Flow Director compressed ATR hash input struct */
+union rnp_atr_hash_dword {
+	struct {
+		u8 vm_pool;
+		u8 flow_type;
+		__be16 vlan_id;
+	} formatted;
+	__be32 ip;
+	struct {
+		__be16 src;
+		__be16 dst;
+	} port;
+	__be16 flex_bytes;
+	__be32 dword;
+};
+
+enum rnp_eeprom_type {
+	rnp_eeprom_uninitialized = 0,
+	rnp_eeprom_spi,
+	rnp_flash,
+	rnp_eeprom_none /* No NVM support */
+};
+
+enum mac_type {
+	mac_dwc_xlg,
+	mac_dwc_g,
+
+};
+
+enum rnp_mac_type {
+	rnp_mac_unknown = 0,
+	rnp_mac_n10g_x8_40G,
+	rnp_mac_n10g_x2_10G,
+	rnp_mac_n10g_x4_10G,
+	rnp_mac_n10g_x8_10G,
+	rnp_mac_n10l_x8_1G,
+	rnp_num_macs
+};
+
+enum rnp_rss_type {
+	rnp_rss_uv440 = 0,
+	rnp_rss_uv3p,
+	rnp_rss_n10,
+	rnp_rss_n20,
+};
+
+enum rnp_hw_type {
+	rnp_hw_uv440 = 0,
+	rnp_hw_uv3p,
+	rnp_hw_n10,
+	rnp_hw_n20,
+	rnp_hw_n400
+};
+
+enum rnp_eth_type { rnp_eth_n10 = 0 };
+
+enum rnp_phy_type {
+	rnp_phy_unknown = 0,
+	rnp_phy_none,
+	rnp_phy_sfp,
+	rnp_phy_sfp_unsupported,
+	rnp_phy_generic,
+	rnp_phy_sfp_unknown,
+	rnp_phy_sgmii,
+};
+
+enum rnp_sfp_type {
+	rnp_sfp_type_da_cu = 0,
+	rnp_sfp_type_sr = 1,
+	rnp_sfp_type_lr = 2,
+	rnp_sfp_type_da_cu_core0 = 3,
+	rnp_sfp_type_da_cu_core1 = 4,
+	rnp_sfp_type_srlr_core0 = 5,
+	rnp_sfp_type_srlr_core1 = 6,
+	rnp_sfp_type_da_act_lmt_core0 = 7,
+	rnp_sfp_type_da_act_lmt_core1 = 8,
+	rnp_sfp_type_1g_cu_core0 = 9,
+	rnp_sfp_type_1g_cu_core1 = 10,
+	rnp_sfp_type_1g_sx_core0 = 11,
+	rnp_sfp_type_1g_sx_core1 = 12,
+	rnp_sfp_type_1g_lx_core0 = 13,
+	rnp_sfp_type_1g_lx_core1 = 14,
+	rnp_sfp_type_not_present = 0xFFFE,
+	rnp_sfp_type_unknown = 0xFFFF
+};
+
+enum rnp_media_type {
+	rnp_media_type_unknown = 0,
+	rnp_media_type_fiber,
+	rnp_media_type_copper,
+	rnp_media_type_backplane,
+	rnp_media_type_cx4,
+	rnp_media_type_da,
+	rnp_media_type_virtual
+
+};
+
+/* Flow Control Settings */
+enum rnp_fc_mode {
+	rnp_fc_none = 0,
+	rnp_fc_rx_pause,
+	rnp_fc_tx_pause,
+	rnp_fc_full,
+	rnp_fc_default
+};
+
+#define PAUSE_TX (0x1)
+#define PAUSE_RX (0x2)
+#define PAUSE_AUTO (0x10)
+
+#define ASYM_PAUSE BIT(11)
+#define SYM_PAUSE BIT(10)
+
+struct rnp_addr_filter_info {
+	u32 num_mc_addrs;
+	u32 rar_used_count;
+	u32 mta_in_use;
+	u32 overflow_promisc;
+	bool uc_set_promisc;
+	bool user_set_promisc;
+};
+
+/* Bus parameters */
+struct rnp_bus_info {
+	u16 func;
+	u16 lan_id;
+};
+
+/* Flow control parameters */
+struct rnp_fc_info {
+	u32 high_water[RNP_MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
+	u32 low_water[RNP_MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */
+	u16 pause_time; /* Flow Control Pause timer */
+	bool send_xon; /* Flow control send XON */
+	bool strict_ieee; /* Strict IEEE mode */
+	bool disable_fc_autoneg; /* Do not autonegotiate FC */
+	bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
+	enum rnp_fc_mode current_mode; /* FC mode in effect */
+	u32 requested_mode; /* FC mode requested by caller */
+};
+
+/* Statistics counters collected by the MAC */
+struct rnp_hw_stats {
+	u64 dma_to_dma;
+	u64 dma_to_switch;
+	u64 mac_to_mac;
+	u64 switch_to_switch;
+	u64 mac_to_dma;
+	u64 switch_to_dma;
+	u64 vlan_add_cnt;
+	u64 vlan_strip_cnt;
+	/* === error */
+	u64 invalid_dropped_packets;
+	u64 filter_dropped_packets;
+	/* == drop == */
+	u64 rx_capabity_lost;
+	u64 host_l2_match_drop;
+	u64 redir_input_match_drop;
+	u64 redir_etype_match_drop;
+	u64 redir_tcp_syn_match_drop;
+	u64 redir_tuple5_match_drop;
+	u64 redir_tcam_match_drop;
+
+	u64 bmc_dropped_packets;
+	u64 switch_dropped_packets;
+	/* === rx */
+	u64 dma_to_host;
+	/* === dma-tx == */
+	u64 port0_tx_packets;
+	u64 port1_tx_packets;
+	u64 port2_tx_packets;
+	u64 port3_tx_packets;
+	/* === emac 1to4 tx == */
+	u64 in0_tx_pkts;
+	u64 in1_tx_pkts;
+	u64 in2_tx_pkts;
+	u64 in3_tx_pkts;
+	/* === phy tx == */
+	u64 port0_to_phy_pkts;
+	u64 port1_to_phy_pkts;
+	u64 port2_to_phy_pkts;
+	u64 port3_to_phy_pkts;
+	/* === mac rx === */
+	u64 mac_rx_broadcast;
+	u64 mac_rx_multicast;
+	u64 mac_rx_pause_count;
+	u64 mac_tx_pause_count;
+	u64 tx_broadcast;
+	u64 tx_multicast;
+
+	u64 dma_rx_drop_cnt_0;
+	u64 dma_rx_drop_cnt_1;
+	u64 dma_rx_drop_cnt_2;
+	u64 dma_rx_drop_cnt_3;
+	u64 dma_rx_drop_cnt_4;
+	u64 dma_rx_drop_cnt_5;
+	u64 dma_rx_drop_cnt_6;
+	u64 dma_rx_drop_cnt_7;
+	u64 dbg_rx_err_cnt;
+};
+
+/* forward declaration */
+struct rnp_hw;
+struct rnp_eth_info;
+struct rnp_dma_info;
+struct rnp_mac_info;
+
+/* iterator type for walking multicast address lists */
+typedef u8 *(*rnp_mc_addr_itr)(struct rnp_hw *hw, u8 **mc_addr_ptr, u32 *vmdq);
+
+/* Function pointer table */
+struct rnp_eeprom_operations {
+	s32 (*init_params)(struct rnp_hw *hw);
+	s32 (*read)(struct rnp_hw *hw, u16, u16 *);
+	s32 (*read_buffer)(struct rnp_hw *, u16, u16, u16 *);
+	s32 (*write)(struct rnp_hw *, u16, u16);
+	s32 (*write_buffer)(struct rnp_hw *, u16, u16, u16 *);
+	s32 (*validate_checksum)(struct rnp_hw *, u16 *);
+	s32 (*update_checksum)(struct rnp_hw *);
+	u16 (*calc_checksum)(struct rnp_hw *);
+};
+
+/* add nic operations */
+struct rnp_eth_operations {
+	/* RAR, Multicast, VLAN */
+	s32 (*get_mac_addr)(struct rnp_eth_info *, u8 *);
+	s32 (*set_rar)(struct rnp_eth_info *, u32, u8 *, bool);
+	s32 (*clear_rar)(struct rnp_eth_info *, u32);
+	s32 (*set_vmdq)(struct rnp_eth_info *, u32, u32);
+	s32 (*clear_vmdq)(struct rnp_eth_info *, u32, u32);
+	s32 (*update_mc_addr_list)(struct rnp_eth_info *, struct net_device *,
+				   bool);
+	void (*clr_mc_addr)(struct rnp_eth_info *);
+	int (*set_rss_hfunc)(struct rnp_eth_info *, int hfunc);
+	void (*set_rss_key)(struct rnp_eth_info *, bool);
+	void (*set_rss_table)(struct rnp_eth_info *);
+	void (*set_rx_hash)(struct rnp_eth_info *, bool, bool);
+	/* ncsi */
+	void (*ncsi_set_vfta)(struct rnp_eth_info *);
+	void (*ncsi_set_uc_addr)(struct rnp_eth_info *);
+	void (*ncsi_set_mc_mta)(struct rnp_eth_info *);
+	void (*set_layer2_remapping)(struct rnp_eth_info *,
+				     union rnp_atr_input *, u16, u8, bool);
+	void (*clr_layer2_remapping)(struct rnp_eth_info *, u16);
+	void (*clr_all_layer2_remapping)(struct rnp_eth_info *);
+	void (*set_tuple5_remapping)(struct rnp_eth_info *,
+				     union rnp_atr_input *, u16, u8, bool);
+	void (*clr_tuple5_remapping)(struct rnp_eth_info *, u16);
+	void (*clr_all_tuple5_remapping)(struct rnp_eth_info *);
+	void (*set_tcp_sync_remapping)(struct rnp_eth_info *, int, bool, bool);
+	void (*set_rx_skip)(struct rnp_eth_info *, int, bool);
+	void (*set_min_max_packet)(struct rnp_eth_info *, int, int);
+	void (*set_vlan_strip)(struct rnp_eth_info *, u16, bool);
+	s32 (*set_vfta)(struct rnp_eth_info *, u32, bool);
+	void (*clr_vfta)(struct rnp_eth_info *);
+	void (*set_vlan_filter)(struct rnp_eth_info *, bool);
+	void (*set_outer_vlan_type)(struct rnp_eth_info *, int type);
+	void (*set_double_vlan)(struct rnp_eth_info *, bool);
+	void (*set_vxlan_port)(struct rnp_eth_info *, u32);
+	void (*set_vxlan_mode)(struct rnp_eth_info *, bool);
+	s32 (*set_fc_mode)(struct rnp_eth_info *);
+	void (*set_rx)(struct rnp_eth_info *, bool);
+	void (*set_fcs)(struct rnp_eth_info *, bool);
+	void (*set_vf_vlan_mode)(struct rnp_eth_info *, u16, int, bool);
+};
+
+enum {
+	rnp_driver_insmod,
+	rnp_driver_suspuse,
+	rnp_driver_force_control_mac,
+};
+
+struct rnp_hw_operations {
+	s32 (*init_hw)(struct rnp_hw *);
+	s32 (*reset_hw)(struct rnp_hw *);
+	s32 (*start_hw)(struct rnp_hw *);
+	void (*set_mtu)(struct rnp_hw *, int);
+	void (*set_vlan_filter_en)(struct rnp_hw *, bool);
+	void (*set_vlan_filter)(struct rnp_hw *, u16, bool, bool);
+	int (*set_veb_vlan_mask)(struct rnp_hw *, u16, int, bool);
+	void (*set_vf_vlan_filter)(struct rnp_hw *, u16, int, bool, bool);
+	void (*clr_vfta)(struct rnp_hw *);
+	void (*set_vlan_strip)(struct rnp_hw *, u16, bool);
+	void (*set_mac)(struct rnp_hw *, u8 *mac, bool);
+	void (*set_rx_mode)(struct rnp_hw *, struct net_device *netdev, bool);
+	void (*set_rar_with_vf)(struct rnp_hw *hw, u8 *mac, int, u32, bool);
+	void (*clr_rar)(struct rnp_hw *hw, int idx);
+	void (*clr_rar_all)(struct rnp_hw *hw);
+	void (*clr_vlan_veb)(struct rnp_hw *);
+	void (*set_txvlan_mode)(struct rnp_hw *, bool);
+	void (*set_tx_maxrate)(struct rnp_hw *, bool);
+	void (*set_fcs_mode)(struct rnp_hw *, bool);
+	void (*set_vxlan_port)(struct rnp_hw *, u32);
+	void (*set_vxlan_mode)(struct rnp_hw *, bool);
+	void (*set_mac_speed)(struct rnp_hw *, bool, u32, bool);
+	void (*set_mac_rx)(struct rnp_hw *, bool);
+	void (*update_sriov_info)(struct rnp_hw *);
+	void (*set_sriov_status)(struct rnp_hw *, bool);
+	void (*set_sriov_vf_mc)(struct rnp_hw *, u16);
+	void (*set_pause_mode)(struct rnp_hw *);
+	void (*get_pause_mode)(struct rnp_hw *);
+	void (*update_hw_info)(struct rnp_hw *);
+	void (*set_rx_hash)(struct rnp_hw *, bool, bool);
+	int (*set_rss_hfunc)(struct rnp_hw *, u8 hfunc);
+	void (*set_rss_key)(struct rnp_hw *, bool);
+	void (*set_rss_table)(struct rnp_hw *);
+	void (*set_mbx_link_event)(struct rnp_hw *, int);
+	void (*set_mbx_ifup)(struct rnp_hw *, int);
+	s32 (*get_thermal_sensor_data)(struct rnp_hw *);
+	s32 (*init_thermal_sensor_thresh)(struct rnp_hw *hw);
+	void (*disable_tx_laser)(struct rnp_hw *);
+	void (*enable_tx_laser)(struct rnp_hw *);
+	void (*flap_tx_laser)(struct rnp_hw *);
+	s32 (*check_link)(struct rnp_hw *, rnp_link_speed *, bool *, bool *,
+			  bool);
+	s32 (*setup_link)(struct rnp_hw *, rnp_link_speed, u32, u32, u32);
+	void (*clean_link)(struct rnp_hw *);
+	s32 (*get_link_capabilities)(struct rnp_hw *, rnp_link_speed *, bool *);
+	s32 (*init_rx_addrs)(struct rnp_hw *);
+	void (*set_layer2_remapping)(struct rnp_hw *, union rnp_atr_input *,
+				     u16, u8, bool);
+	void (*clr_layer2_remapping)(struct rnp_hw *, u16);
+	void (*clr_all_layer2_remapping)(struct rnp_hw *);
+	void (*set_tuple5_remapping)(struct rnp_hw *, union rnp_atr_input *,
+				     u16, u8, bool);
+	void (*clr_tuple5_remapping)(struct rnp_hw *, u16);
+	void (*clr_all_tuple5_remapping)(struct rnp_hw *);
+	void (*set_tcp_sync_remapping)(struct rnp_hw *, int queue, bool, bool);
+	void (*set_rx_skip)(struct rnp_hw *, int count, bool);
+	void (*set_outer_vlan_type)(struct rnp_hw *, int);
+	void (*update_hw_status)(struct rnp_hw *, struct rnp_hw_stats *,
+				 struct net_device_stats *);
+	void (*update_msix_count)(struct rnp_hw *, int msix_count);
+	void (*update_rx_drop)(struct rnp_hw *);
+	void (*setup_ethtool)(struct net_device *);
+	s32 (*phy_read_reg)(struct rnp_hw *, u32, u32, u16 *);
+	s32 (*phy_write_reg)(struct rnp_hw *, u32, u32, u16);
+	void (*setup_wol)(struct rnp_hw *, u32);
+	void (*set_vf_vlan_mode)(struct rnp_hw *, u16, int, bool);
+	void (*driver_status)(struct rnp_hw *, bool, int);
+};
+
+struct rnp_mac_operations {
+	void (*set_mac_rx)(struct rnp_mac_info *mac, bool);
+	void (*set_mac_speed)(struct rnp_mac_info *, bool, u32, bool);
+	void (*set_mac_fcs)(struct rnp_mac_info *mac, bool);
+	s32 (*set_fc_mode)(struct rnp_mac_info *mac);
+	void (*check_link)(struct rnp_mac_info *, rnp_link_speed *, bool *,
+			   bool);
+	void (*set_mac)(struct rnp_mac_info *, u8 *, int);
+	int (*mdio_write)(struct rnp_mac_info *, int phyreg, int phydata);
+	int (*mdio_read)(struct rnp_mac_info *, int phyreg, int *regvalue);
+	void (*pmt)(struct rnp_mac_info *, u32);
+};
+
+struct rnp_eeprom_info {
+	struct rnp_eeprom_operations ops;
+	enum rnp_eeprom_type type;
+	u32 semaphore_delay;
+	u16 word_size;
+	u16 address_bits;
+	u16 word_page_size;
+};
+
+struct rnp_dma_operations {
+	void (*set_tx_maxrate)(struct rnp_dma_info *dma, u16, u32);
+	void (*set_veb_mac)(struct rnp_dma_info *dma, u8 *, u32, u32);
+	/* only set own vlan */
+	void (*set_veb_vlan)(struct rnp_dma_info *dma, u16, u32);
+	void (*set_veb_vlan_mask)(struct rnp_dma_info *dma, u16, u16, int);
+	void (*clr_veb_all)(struct rnp_dma_info *dma);
+};
+
+struct rnp_dma_info {
+	struct rnp_dma_operations ops;
+	u8 __iomem *dma_base_addr;
+	u8 __iomem *dma_ring_addr;
+	void *back;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	u32 dma_version;
+};
+
+#define RNP_MAX_MTA 128
+struct rnp_eth_info {
+	struct rnp_eth_operations ops;
+	u8 __iomem *eth_base_addr;
+	enum rnp_eth_type eth_type;
+	void *back;
+
+	u32 mta_shadow[RNP_MAX_MTA];
+	s32 mc_filter_type;
+	u32 mcft_size;
+	u32 vft_size;
+	u32 num_rar_entries;
+	u32 rar_highwater;
+	u32 rx_pb_size;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	u32 reg_off;
+	u32 orig_autoc;
+	u32 cached_autoc;
+	u32 orig_autoc2;
+};
+
+struct rnp_nic_info {
+	u8 __iomem *nic_base_addr;
+};
+
+struct mii_regs {
+	unsigned int addr; /* MII Address */
+	unsigned int data; /* MII Data */
+	unsigned int addr_shift; /* MII address shift */
+	unsigned int reg_shift; /* MII reg shift */
+	unsigned int addr_mask; /* MII address mask */
+	unsigned int reg_mask; /* MII reg mask */
+	unsigned int clk_csr_shift;
+	unsigned int clk_csr_mask;
+};
+
+#define RNP_FLAGS_DOUBLE_RESET_REQUIRED 0x01
+#define RNP_FLAGS_INIT_MAC_ADDRESS 0x02
+struct rnp_mac_info {
+	struct rnp_mac_operations ops;
+	u8 __iomem *mac_addr;
+	void *back;
+	struct mii_regs mii;
+	int phy_addr;
+	int clk_csr;
+	enum rnp_mac_type type;
+	enum mac_type mac_type;
+	u8 addr[ETH_ALEN];
+	u8 perm_addr[ETH_ALEN];
+	/* prefix for World Wide Node Name (WWNN) */
+	u16 wwnn_prefix;
+	/* prefix for World Wide Port Name (WWPN) */
+	u16 wwpn_prefix;
+	u16 max_msix_vectors;
+	u32 mta_shadow[RNP_MAX_MTA];
+	s32 mc_filter_type;
+	u32 mcft_size;
+	u32 vft_size;
+	u32 num_rar_entries;
+	u32 rar_highwater;
+	u32 rx_pb_size;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	u32 reg_off;
+	u32 orig_autoc;
+	u32 cached_autoc;
+	u32 orig_autoc2;
+	bool orig_link_settings_stored;
+	bool autotry_restart;
+	u8 mac_flags;
+};
+
+struct rnp_phy_info {
+	struct mdio_if_info mdio;
+	enum rnp_phy_type type;
+	u32 id;
+	u32 phy_addr;
+	bool is_mdix;
+	u8 mdix;
+	enum rnp_sfp_type sfp_type;
+	bool sfp_setup_needed;
+	u32 revision;
+	enum rnp_media_type media_type;
+	bool reset_disable;
+	rnp_autoneg_advertised autoneg_advertised;
+	bool smart_speed_active;
+	bool multispeed_fiber;
+	bool reset_if_overtemp;
+};
+
+#include "rnp_mbx.h"
+
+struct rnp_pcs_operations {
+	u32 (*read)(struct rnp_hw *hw, int num, u32 addr);
+	void (*write)(struct rnp_hw *hw, int num, u32 addr, u32 value);
+};
+
+struct rnp_mbx_operations {
+	s32 (*init_params)(struct rnp_hw *hw);
+	s32 (*read)(struct rnp_hw *, u32 *, u16, enum MBX_ID);
+	s32 (*write)(struct rnp_hw *, u32 *, u16, enum MBX_ID);
+	s32 (*read_posted)(struct rnp_hw *, u32 *, u16, enum MBX_ID);
+	s32 (*write_posted)(struct rnp_hw *, u32 *, u16, enum MBX_ID);
+	s32 (*check_for_msg)(struct rnp_hw *, enum MBX_ID);
+	s32 (*check_for_ack)(struct rnp_hw *, enum MBX_ID);
+	s32 (*configure)(struct rnp_hw *hw, int nr_vec, bool enable);
+};
+
+struct rnp_mbx_stats {
+	u32 msgs_tx;
+	u32 msgs_rx;
+	u32 acks;
+	u32 reqs;
+	u32 rsts;
+};
+
+struct rnp_pcs_info {
+	struct rnp_pcs_operations ops;
+	int pcs_count;
+};
+
+struct mbx_fw_cmd_reply;
+
+typedef void (*cookie_cb)(struct mbx_fw_cmd_reply *reply, void *priv);
+
+enum cookie_stat{
+	COOKIE_FREE=0,
+	COOKIE_FREE_WAIT_TIMEOUT,
+	COOKIE_ALLOCED,
+};
+
+struct mbx_req_cookie {
+	u64 alloced_jiffies;
+	enum cookie_stat stat;
+	cookie_cb cb;
+	int timeout_jiffes;
+	int errcode;
+	wait_queue_head_t wait;
+	int done;
+	int priv_len;
+#define MAX_PRIV_LEN 64
+	char priv[MAX_PRIV_LEN];
+};
+
+struct mbx_req_cookie_pool {
+#define MAX_COOKIES_ITEMS (20*400)
+	struct mbx_req_cookie cookies[MAX_COOKIES_ITEMS];
+	int next_idx;
+};
+
+struct rnp_mbx_info {
+	struct rnp_mbx_operations ops;
+	struct rnp_mbx_stats stats;
+	u32 timeout;
+	u32 usec_delay;
+	u32 v2p_mailbox;
+	u16 size;
+	u16 vf_req[64];
+	u16 vf_ack[64];
+	u16 cpu_req;
+	u16 cpu_ack;
+	struct mutex lock;
+	bool other_irq_enabled;
+	int mbx_size;
+	int mbx_mem_size;
+#define MBX_FEATURE_NO_ZERO BIT(0)
+#define MBX_FEATURE_WRITE_DELAY BIT(1)
+	u32 mbx_feature;
+	/* cm3 <-> pf mbx */
+	u32 cpu_pf_shm_base;
+	u32 pf2cpu_mbox_ctrl;
+	u32 pf2cpu_mbox_mask;
+	u32 cpu_pf_mbox_mask;
+	u32 cpu2pf_mbox_vec;
+	/* pf <--> vf mbx */
+	u32 pf_vf_shm_base;
+	u32 pf2vf_mbox_ctrl_base;
+	u32 pf_vf_mbox_mask_lo;
+	u32 pf_vf_mbox_mask_hi;
+	u32 pf2vf_mbox_vec_base;
+	u32 vf2pf_mbox_vec_base;
+	u32 cpu_vf_share_ram;
+	int share_size;
+	struct mbx_req_cookie_pool cookie_pool;
+};
+
+struct vf_vebvlans {
+	struct list_head l;
+	bool free;
+	int veb_entry;
+	u16 vid;
+	u16 mask;
+};
+
+#define RNP_MBX_VF_CPU_SHM_PF_BASE (0xA8000)
+#define RNP_NCSI_MC_COUNT (11)
+#define RNP_NCSI_VLAN_COUNT (1)
+
+#define RNP_VF_CPU_SHM_BASE_NR62 (RNP_MBX_VF_CPU_SHM_PF_BASE + 62 * 64)
+struct ncsi_shm_info {
+	u32 valid;
+#define RNP_NCSI_SHM_VALID 0xa5000000
+#define RNP_NCSI_SHM_VALID_MASK 0xff000000
+#define RNP_MC_VALID BIT(0)
+#define RNP_UC_VALID BIT(1)
+#define RNP_VLAN_VALID BIT(2)
+
+	struct {
+		u32 uc_addr_lo;
+		u32 uc_addr_hi;
+	} uc;
+
+	struct {
+		u32 mc_addr_lo;
+		u32 mc_addr_hi;
+	} mc[RNP_NCSI_MC_COUNT];
+	u32 ncsi_vlan;
+};
+
+struct rnp_hw {
+	void *back;
+	u8 __iomem *hw_addr;
+	u8 __iomem *ring_msix_base;
+	u8 __iomem *rpu_addr;
+	u8 pfvfnum;
+	struct pci_dev *pdev;
+	u16 device_id;
+	u16 vendor_id;
+	u16 subsystem_device_id;
+	u16 subsystem_vendor_id;
+	char lane_mask;
+	u16 mac_type;
+	u16 phy_type;
+	int nr_lane;
+	u8 is_backplane : 1;
+	u8 is_sgmii : 1;
+	u8 force_10g_1g_speed_ablity : 1;
+	u8 force_speed_stat : 2;
+#define FORCE_SPEED_STAT_DISABLED 0
+#define FORCE_SPEED_STAT_1G 1
+#define FORCE_SPEED_STAT_10G 2
+	u8 rpu_en : 1;
+	u8 rpu_availble : 1;
+	u8 ncsi_en;
+	u8 ncsi_rar_entries;
+	u16 ncsi_mc_count;
+	u16 ncsi_vlan_count;
+	u32 ncsi_vf_cpu_shm_pf_base;
+	u32 saved_force_link_speed;
+	u32 pcode;
+	u32 supported_link;
+	u32 advertised_link;
+	u32 autoneg;
+	u32 tp_mdx;
+	u32 tp_mdix_ctrl;
+	u32 phy_id;
+	u8 fw_lldp_ablity;
+	u8 link;
+	u8 pci_gen;
+	u8 pci_lanes;
+	u16 max_msix_vectors;
+	int speed;
+	int duplex;
+	u32 dma_version;
+	u32 wol;
+	u32 eco;
+	u32 force_status;
+	u32 force_link_supported;
+	u16 min_length;
+	u16 max_length;
+	u16 min_length_current;
+	u16 max_length_current;
+	/* rss info */
+#define HW_MAX_RETA_ENTRIES 512
+	u8 rss_indir_tbl[HW_MAX_RETA_ENTRIES];
+#define HW_MAX_TC_ENTRIES 8
+	u8 rss_tc_tbl[HW_MAX_TC_ENTRIES];
+	int rss_indir_tbl_num;
+	int rss_tc_tbl_num;
+	u32 rss_tbl_setup_flag;
+#define HW_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
+	u8 rss_key[HW_RSS_KEY_SIZE];
+	u32 rss_key_setup_flag;
+	u32 vfnum;
+	int dma_split_size;
+	int num_rar_entries;
+	int max_vfs;
+	int max_vfs_noari;
+	int sriov_ring_limit;
+	int max_pf_macvlans;
+	int num_vebvlan_entries;
+	int fdir_mode;
+	int layer2_count;
+	int tuple5_count;
+	int veb_ring;
+	int default_vf_num;
+	int vf_promisc_mode;
+	int vf_promisc_num;
+	u32 fdir_pballoc;
+	enum rnp_rss_type rss_type;
+	enum rnp_hw_type hw_type;
+	struct rnp_hw_operations ops;
+	struct rnp_nic_info nic;
+	struct rnp_dma_info dma;
+	struct rnp_eth_info eth;
+	struct rnp_mac_info mac;
+	struct rnp_addr_filter_info addr_ctrl;
+	struct rnp_fc_info fc;
+	struct rnp_phy_info phy;
+	struct rnp_eeprom_info eeprom;
+	struct rnp_bus_info bus;
+	struct rnp_mbx_info mbx;
+	struct rnp_pcs_info pcs;
+	bool adapter_stopped;
+	bool force_full_reset;
+	bool mng_fw_enabled;
+	bool wol_enabled;
+	unsigned long wol_supported;
+	int fw_version;
+	u8 sfp_connector;
+	struct vf_vebvlans vf_vas;
+	struct vf_vebvlans *vv_list;
+	u32 axi_mhz;
+	u32 bd_uid;
+	union {
+		u8 port_id[4];
+		u32 port_ids;
+	};
+	int mode;
+	int default_rx_queue;
+	u32 usecstocount;
+#define RNP_NET_FEATURE_SG ((u32)(1 << 0))
+#define RNP_NET_FEATURE_TX_CHECKSUM ((u32)(1 << 1))
+#define RNP_NET_FEATURE_RX_CHECKSUM ((u32)(1 << 2))
+#define RNP_NET_FEATURE_TSO ((u32)(1 << 3))
+#define RNP_NET_FEATURE_TX_UDP_TUNNEL ((1 << 4))
+#define RNP_NET_FEATURE_VLAN_FILTER ((1 << 5))
+#define RNP_NET_FEATURE_VLAN_OFFLOAD ((1 << 6))
+#define RNP_NET_FEATURE_RX_NTUPLE_FILTER ((1 << 7))
+#define RNP_NET_FEATURE_TCAM ((1 << 8))
+#define RNP_NET_FEATURE_RX_HASH ((1 << 9))
+#define RNP_NET_FEATURE_RX_FCS ((1 << 10))
+#define RNP_NET_FEATURE_HW_TC ((1 << 11))
+#define RNP_NET_FEATURE_USO ((1 << 12))
+#define RNP_NET_FEATURE_STAG_FILTER ((1 << 13))
+#define RNP_NET_FEATURE_STAG_OFFLOAD ((1 << 14))
+#define RNP_NET_FEATURE_VF_FIXED ((1 << 15))
+#define RNP_VEB_VLAN_MASK_EN ((1 << 16))
+
+	u32 feature_flags;
+	struct rnp_thermal_sensor_data thermal_sensor_data;
+
+	struct {
+		int version;
+		int len;
+		int flag;
+	} dump;
+};
+
+struct rnp_info {
+	enum rnp_mac_type mac;
+	enum rnp_rss_type rss_type;
+	enum rnp_hw_type hw_type;
+	s32 (*get_invariants)(struct rnp_hw *);
+	struct rnp_mac_operations *mac_ops;
+	struct rnp_eeprom_operations *eeprom_ops;
+	struct rnp_mbx_operations *mbx_ops;
+	struct rnp_pcs_operations *pcs_ops;
+	bool one_pf_with_two_dma;
+	int reg_off;
+	int adapter_cnt;
+	char lane_mask;
+	int hi_dma;
+	int total_queue_pair_cnts;
+	int dma2_in_1pf;
+	char *hw_addr;
+};
+
+/* Error Codes */
+#define RNP_ERR_EEPROM -1
+#define RNP_ERR_EEPROM_CHECKSUM -2
+#define RNP_ERR_PHY -3
+#define RNP_ERR_CONFIG -4
+#define RNP_ERR_PARAM -5
+#define RNP_ERR_MAC_TYPE -6
+#define RNP_ERR_UNKNOWN_PHY -7
+#define RNP_ERR_LINK_SETUP -8
+#define RNP_ERR_ADAPTER_STOPPED -9
+#define RNP_ERR_INVALID_MAC_ADDR -10
+#define RNP_ERR_DEVICE_NOT_SUPPORTED -11
+#define RNP_ERR_MASTER_REQUESTS_PENDING -12
+#define RNP_ERR_INVALID_LINK_SETTINGS -13
+#define RNP_ERR_AUTONEG_NOT_COMPLETE -14
+#define RNP_ERR_RESET_FAILED -15
+#define RNP_ERR_SWFW_SYNC -16
+#define RNP_ERR_PHY_ADDR_INVALID -17
+#define RNP_ERR_I2C -18
+#define RNP_ERR_SFP_NOT_SUPPORTED -19
+#define RNP_ERR_SFP_NOT_PRESENT -20
+#define RNP_ERR_SFP_NO_INIT_SEQ_PRESENT -21
+#define RNP_ERR_FDIR_REINIT_FAILED -23
+#define RNP_ERR_EEPROM_VERSION -24
+#define RNP_ERR_NO_SPACE -25
+#define RNP_ERR_OVERTEMP -26
+#define RNP_ERR_FC_NOT_NEGOTIATED -27
+#define RNP_ERR_FC_NOT_SUPPORTED -28
+#define RNP_ERR_SFP_SETUP_NOT_COMPLETE -30
+#define RNP_ERR_PBA_SECTION -31
+#define RNP_ERR_INVALID_ARGUMENT -32
+#define RNP_ERR_HOST_INTERFACE_COMMAND -33
+#define RNP_NOT_IMPLEMENTED 0x7FFFFFFF
+
+#define RNP_RAH_AV 0x80000000
+/* eth fix code */
+#define RNP_FCTRL_BPE BIT(10)
+#define RNP_FCTRL_UPE BIT(9)
+#define RNP_FCTRL_MPE BIT(8)
+
+#define RNP_MCSTCTRL_MTA BIT(2)
+#define RNP_MCSTCTRL_UTA BIT(3)
+
+#define RNP_MAX_LAYER2_FILTERS (16)
+#define RNP_MAX_TUPLE5_FILTERS (128)
+#define RNP_MAX_TCAM_FILTERS (4096)
+
+#define RNP_SRC_IP_MASK BIT(0)
+#define RNP_DST_IP_MASK BIT(1)
+#define RNP_SRC_PORT_MASK BIT(2)
+#define RNP_DST_PORT_MASK BIT(3)
+#define RNP_L4_PROTO_MASK BIT(4)
+#endif /* _RNP_TYPE_H_ */
diff --git a/drivers/net/ethernet/mucse/rnp/version.h b/drivers/net/ethernet/mucse/rnp/version.h
new file mode 100755
index 0000000000000000000000000000000000000000..3e32e68a5fd88eeab876811e59bffd1f509daef4
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnp/version.h
@@ -0,0 +1,4 @@
+#ifndef VERSION_H
+#define VERSION_H
+#define GIT_COMMIT " 9954f7f"
+#endif
diff --git a/drivers/net/ethernet/mucse/rnpgbe/Makefile b/drivers/net/ethernet/mucse/rnpgbe/Makefile
new file mode 100755
index 0000000000000000000000000000000000000000..bfd2fd9668ca4f48e33aa1d64704cac42cfb8e4f
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/Makefile
@@ -0,0 +1,17 @@
+obj-$(CONFIG_MXGBE) += rnpgbe.o
+rnpgbe-objs :=   \
+		rnpgbe_main.o \
+		rnpgbe_common.o \
+		rnpgbe_debugfs.o \
+		rnpgbe_ethtool.o \
+		rnpgbe_lib.o \
+		rnpgbe_mbx.o \
+		rnpgbe_chip.o \
+		rnpgbe_mbx_fw.o\
+		rnpgbe_sriov.o \
+		rnpgbe_param.o \
+		rnp_compat.o \
+		rnpgbe_sysfs.o \
+		rnpgbe_sfc.o \
+		rnpgbe_ptp.o 
+
diff --git a/drivers/net/ethernet/mucse/rnpgbe/common.mk b/drivers/net/ethernet/mucse/rnpgbe/common.mk
new file mode 100755
index 0000000000000000000000000000000000000000..12b34829cdb2e5ae59a2c17dc3c34cf2f94912ef
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/common.mk
@@ -0,0 +1,446 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2022 - 2024 Mucse Corporation.
+
+#
+# common Makefile rules useful for out-of-tree Linux driver builds
+#
+# Usage: include common.mk
+#
+# After including, you probably want to add a minimum_kver_check call
+#
+# Required Variables:
+# DRIVER
+#   -- Set to the lowercase driver name
+
+#####################
+# Helpful functions #
+#####################
+
+readlink = $(shell readlink -f ${1})
+
+# helper functions for converting kernel version to version codes
+get_kver = $(or $(word ${2},$(subst ., ,${1})),0)
+get_kvercode = $(shell [ "${1}" -ge 0 -a "${1}" -le 255 2>/dev/null ] && \
+                       [ "${2}" -ge 0 -a "${2}" -le 255 2>/dev/null ] && \
+                       [ "${3}" -ge 0 -a "${3}" -le 255 2>/dev/null ] && \
+                       printf %d $$(( ( ${1} << 16 ) + ( ${2} << 8 ) + ( ${3} ) )) )
+
+################
+# depmod Macro #
+################
+
+cmd_depmod = /sbin/depmod $(if ${SYSTEM_MAP_FILE},-e -F ${SYSTEM_MAP_FILE}) \
+                          $(if $(strip ${INSTALL_MOD_PATH}),-b ${INSTALL_MOD_PATH}) \
+                          -a ${KVER}
+
+################
+# dracut Macro #
+################
+
+cmd_initrd := $(shell \
+                if which dracut > /dev/null 2>&1 ; then \
+                    echo "dracut --force"; \
+                elif which update-initramfs > /dev/null 2>&1 ; then \
+                    echo "update-initramfs -u"; \
+                fi )
+
+#####################
+# Environment tests #
+#####################
+
+DRIVER_UPPERCASE := $(shell echo ${DRIVER} | tr "[:lower:]" "[:upper:]")
+
+ifeq (,${BUILD_KERNEL})
+BUILD_KERNEL=$(shell uname -r)
+endif
+
+# Kernel Search Path
+# All the places we look for kernel source
+KSP :=  /lib/modules/${BUILD_KERNEL}/source \
+        /lib/modules/${BUILD_KERNEL}/build \
+        /usr/src/linux-${BUILD_KERNEL} \
+        /usr/src/linux-$(${BUILD_KERNEL} | sed 's/-.*//') \
+        /usr/src/kernel-headers-${BUILD_KERNEL} \
+        /usr/src/kernel-source-${BUILD_KERNEL} \
+        /usr/src/linux-$(${BUILD_KERNEL} | sed 's/\([0-9]*\.[0-9]*\)\..*/\1/') \
+        /usr/src/linux \
+        /usr/src/kernels/${BUILD_KERNEL} \
+        /usr/src/kernels
+
+# prune the list down to only values that exist and have an include/linux
+# sub-directory. We can't use include/config because some older kernels don't
+# have this.
+test_dir = $(shell [ -e ${dir}/include/linux ] && echo ${dir})
+KSP := $(foreach dir, ${KSP}, ${test_dir})
+
+# we will use this first valid entry in the search path
+ifeq (,${KSRC})
+  KSRC := $(firstword ${KSP})
+endif
+
+ifeq (,${KSRC})
+  $(warning *** Kernel header files not in any of the expected locations.)
+  $(warning *** Install the appropriate kernel development package, e.g.)
+  $(error kernel-devel, for building kernel modules and try again)
+else
+ifeq (/lib/modules/${BUILD_KERNEL}/source, ${KSRC})
+  KOBJ :=  /lib/modules/${BUILD_KERNEL}/build
+else
+  KOBJ :=  ${KSRC}
+endif
+endif
+
+# Version file Search Path
+VSP :=  ${KOBJ}/include/generated/utsrelease.h \
+        ${KOBJ}/include/linux/utsrelease.h \
+        ${KOBJ}/include/linux/version.h \
+        ${KOBJ}/include/generated/uapi/linux/version.h \
+        /boot/vmlinuz.version.h
+
+# Config file Search Path
+CSP :=  ${KOBJ}/include/generated/autoconf.h \
+        ${KOBJ}/include/linux/autoconf.h \
+        /boot/vmlinuz.autoconf.h
+
+# System.map Search Path (for depmod)
+MSP := ${KSRC}/System.map \
+       /boot/System.map-${BUILD_KERNEL}
+
+# prune the lists down to only files that exist
+test_file = $(shell [ -f ${1} ] && echo ${1})
+VSP := $(foreach file, ${VSP}, $(call test_file,${file}))
+CSP := $(foreach file, ${CSP}, $(call test_file,${file}))
+MSP := $(foreach file, ${MSP}, $(call test_file,${file}))
+
+
+# and use the first valid entry in the Search Paths
+ifeq (,${VERSION_FILE})
+  VERSION_FILE := $(firstword ${VSP})
+endif
+
+ifeq (,${CONFIG_FILE})
+  CONFIG_FILE := $(firstword ${CSP})
+endif
+
+ifeq (,${SYSTEM_MAP_FILE})
+  SYSTEM_MAP_FILE := $(firstword ${MSP})
+endif
+
+ifeq (,$(wildcard ${VERSION_FILE}))
+  $(error Linux kernel source not configured - missing version header file)
+endif
+
+ifeq (,$(wildcard ${CONFIG_FILE}))
+  $(error Linux kernel source not configured - missing autoconf.h)
+endif
+
+ifeq (,$(wildcard ${SYSTEM_MAP_FILE}))
+  $(warning Missing System.map file - depmod will not check for missing symbols during module installation)
+endif
+
+ifneq ($(words $(subst :, ,$(CURDIR))), 1)
+  $(error Sources directory '$(CURDIR)' cannot contain spaces nor colons. Rename directory or move sources to another path)
+endif
+
+########################
+# Extract config value #
+########################
+
+get_config_value = $(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\
+                           grep -m 1 ${1} | awk '{ print $$3 }')
+
+########################
+# Check module signing #
+########################
+
+CONFIG_MODULE_SIG_ALL := $(call get_config_value,CONFIG_MODULE_SIG_ALL)
+CONFIG_MODULE_SIG_FORCE := $(call get_config_value,CONFIG_MODULE_SIG_FORCE)
+CONFIG_MODULE_SIG_KEY := $(call get_config_value,CONFIG_MODULE_SIG_KEY)
+
+SIG_KEY_SP := ${KOBJ}/${CONFIG_MODULE_SIG_KEY} \
+              ${KOBJ}/certs/signing_key.pem
+
+SIG_KEY_FILE := $(firstword $(foreach file, ${SIG_KEY_SP}, $(call test_file,${file})))
+
+# print a warning if the kernel configuration attempts to sign modules but
+# the signing key can't be found.
+ifneq (${SIG_KEY_FILE},)
+warn_signed_modules := : ;
+else
+warn_signed_modules :=
+ifeq (${CONFIG_MODULE_SIG_ALL},1)
+warn_signed_modules += \
+    echo "*** The target kernel has CONFIG_MODULE_SIG_ALL enabled, but" ; \
+    echo "*** the signing key cannot be found. Module signing has been" ; \
+    echo "*** disabled for this build." ;
+endif # CONFIG_MODULE_SIG_ALL=y
+ifeq (${CONFIG_MODULE_SIG_FORCE},1)
+    echo "warning: The target kernel has CONFIG_MODULE_SIG_FORCE enabled," ; \
+    echo "warning: but the signing key cannot be found. The module must" ; \
+    echo "warning: be signed manually using 'scripts/sign-file'." ;
+endif # CONFIG_MODULE_SIG_FORCE
+DISABLE_MODULE_SIGNING := Yes
+endif
+
+#######################
+# Linux Version Setup #
+#######################
+
+# The following command line parameter is intended for development of KCOMPAT
+# against upstream kernels such as net-next which have broken or non-updated
+# version codes in their Makefile. They are intended for debugging and
+# development purpose only so that we can easily test new KCOMPAT early. If you
+# don't know what this means, you do not need to set this flag. There is no
+# arcane magic here.
+
+# Convert LINUX_VERSION into LINUX_VERSION_CODE
+ifneq (${LINUX_VERSION},)
+  LINUX_VERSION_CODE=$(call get_kvercode,$(call get_kver,${LINUX_VERSION},1),$(call get_kver,${LINUX_VERSION},2),$(call get_kver,${LINUX_VERSION},3))
+endif
+
+# Honor LINUX_VERSION_CODE
+ifneq (${LINUX_VERSION_CODE},)
+  $(warning Forcing target kernel to build with LINUX_VERSION_CODE of ${LINUX_VERSION_CODE}$(if ${LINUX_VERSION}, from LINUX_VERSION=${LINUX_VERSION}). Do this at your own risk.)
+  KVER_CODE := ${LINUX_VERSION_CODE}
+  EXTRA_CFLAGS += -DLINUX_VERSION_CODE=${LINUX_VERSION_CODE}
+endif
+
+# Determine SLE_KERNEL_REVISION for SuSE SLE >= 11 (needed by kcompat)
+# This assumes SuSE will continue setting CONFIG_LOCALVERSION to the string
+# appended to the stable kernel version on which their kernel is based with
+# additional versioning information (up to 3 numbers), a possible abbreviated
+# git SHA1 commit id and a kernel type, e.g. CONFIG_LOCALVERSION=-1.2.3-default
+# or CONFIG_LOCALVERSION=-999.gdeadbee-default
+#
+# SLE_LOCALVERSION_CODE is also exported to support legacy kcompat.h
+# definitions.
+ifeq (1,$(call get_config_value,CONFIG_SUSE_KERNEL))
+
+ifneq (10,$(call get_config_value,CONFIG_SLE_VERSION))
+
+  CONFIG_LOCALVERSION := $(call get_config_value,CONFIG_LOCALVERSION)
+  LOCALVERSION := $(shell echo ${CONFIG_LOCALVERSION} | \
+                    cut -d'-' -f2 | sed 's/\.g[[:xdigit:]]\{7\}//')
+  LOCALVER_A := $(shell echo ${LOCALVERSION} | cut -d'.' -f1)
+  LOCALVER_B := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f2)
+  LOCALVER_C := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f3)
+  SLE_LOCALVERSION_CODE := $(shell expr ${LOCALVER_A} \* 65536 + \
+                                        0${LOCALVER_B} \* 256 + 0${LOCALVER_C})
+  EXTRA_CFLAGS += -DSLE_LOCALVERSION_CODE=${SLE_LOCALVERSION_CODE}
+  EXTRA_CFLAGS += -DSLE_KERNEL_REVISION=${LOCALVER_A}
+endif
+endif
+
+EXTRA_CFLAGS += ${CFLAGS_EXTRA}
+
+# get the kernel version - we use this to find the correct install path
+KVER := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM ${VERSION_FILE} | grep UTS_RELEASE | \
+        awk '{ print $$3 }' | sed 's/\"//g')
+
+# assume source symlink is the same as build, otherwise adjust KOBJ
+ifneq (,$(wildcard /lib/modules/${KVER}/build))
+  ifneq (${KSRC},$(call readlink,/lib/modules/${KVER}/build))
+    KOBJ=/lib/modules/${KVER}/build
+  endif
+endif
+
+ifeq (${KVER_CODE},)
+  KVER_CODE := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM ${VSP} 2> /dev/null |\
+                 grep -m 1 LINUX_VERSION_CODE | awk '{ print $$3 }' | sed 's/\"//g')
+endif
+
+# minimum_kver_check
+#
+# helper function to provide uniform output for different drivers to abort the
+# build based on kernel version check. Usage: "$(call minimum_kver_check,2,6,XX)".
+define _minimum_kver_check
+ifeq (0,$(shell [ ${KVER_CODE} -lt $(call get_kvercode,${1},${2},${3}) ]; echo "$$?"))
+  $$(warning *** Aborting the build.)
+  $$(error This driver is not supported on kernel versions older than ${1}.${2}.${3})
+endif
+endef
+minimum_kver_check = $(eval $(call _minimum_kver_check,${1},${2},${3}))
+
+#############################
+# kcompat definitions setup #
+#############################
+
+# In most cases, kcompat flags can be checked within the driver source files
+# using simple CPP checks. However, it may be necessary to check for a flag
+# value within the Makefile for some specific edge cases. For example, if an
+# entire feature ought to be excluded on some kernels due to missing
+# functionality.
+#
+# To support this, kcompat_defs.h is compiled and converted into a word list
+# that can be checked to determine whether a given kcompat feature flag will
+# be defined for this kernel.
+#
+# KCOMPAT_DEFINITIONS holds the set of all macros which are defined. Note
+# this does include a large number of standard/builtin definitions.
+#
+# Use is_kcompat_defined as a $(call) function to check whether a given flag
+# is defined or undefined. For example:
+#
+#   ifeq ($(call is_kcompat_defined,HAVE_FEATURE_FLAG),1)
+#
+#   ifneq ($(call is_kcompat_defined,HAVE_FEATURE_FLAG),1)
+#
+# The is_kcompat_defined function returns 1 if the macro name is defined,
+# and the empty string otherwise.
+#
+# There is no mechanism to extract the value of the kcompat definition.
+# Supporting this would be non-trivial as Make does not have a map variable
+# type.
+#
+# Note that only the new layout is supported. Legacy definitions in
+# kcompat.h are not supported. If you need to check one of these, please
+# refactor it into the new layout.
+
+ifneq ($(wildcard ./kcompat_defs.h),)
+# call script that populates defines automatically
+#
+# since is_kcompat_defined() is a macro, it's "computed" before any target
+# recipe, kcompat_generated_defs.h is needed prior to that, so needs to be
+# generated also via $(shell) call, which makes error handling ugly
+$(if $(shell KSRC=${KSRC} OUT=kcompat_generated_defs.h CONFFILE=${CONFIG_FILE} \
+    bash kcompat-generator.sh && echo ok), , $(error kcompat-generator.sh failed))
+
+#KCOMPAT_DEFINITIONS := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM \
+                                     -I${KOBJ}/include \
+                                     -I${KOBJ}/include/generated/uapi \
+                                     kcompat_defs.h | awk '{ print $$2 }')
+
+is_kcompat_defined = $(if $(filter ${1},${KCOMPAT_DEFINITIONS}),1,)
+else
+KCOMPAT_DEFINITIONS :=
+is_kcompat_defined =
+endif
+
+################
+# Manual Pages #
+################
+
+MANSECTION = 7
+
+ifeq (,${MANDIR})
+  # find the best place to install the man page
+  MANPATH := $(shell (manpath 2>/dev/null || echo $MANPATH) | sed 's/:/ /g')
+  ifneq (,${MANPATH})
+    # test based on inclusion in MANPATH
+    test_dir = $(findstring ${dir}, ${MANPATH})
+  else
+    # no MANPATH, test based on directory existence
+    test_dir = $(shell [ -e ${dir} ] && echo ${dir})
+  endif
+  # our preferred install path
+  # should /usr/local/man be in here ?
+  MANDIR := /usr/share/man /usr/man
+  MANDIR := $(foreach dir, ${MANDIR}, ${test_dir})
+  MANDIR := $(firstword ${MANDIR})
+endif
+ifeq (,${MANDIR})
+  # fallback to /usr/man
+  MANDIR := /usr/man
+endif
+
+####################
+# CCFLAGS variable #
+####################
+
+# set correct CCFLAGS variable for kernels older than 2.6.24
+ifeq (0,$(shell [ ${KVER_CODE} -lt $(call get_kvercode,2,6,24) ]; echo $$?))
+CCFLAGS_VAR := EXTRA_CFLAGS
+else
+CCFLAGS_VAR := ccflags-y
+endif
+
+#################
+# KBUILD_OUTPUT #
+#################
+
+# Only set KBUILD_OUTPUT if the real paths of KOBJ and KSRC differ
+ifneq ($(call readlink,${KSRC}),$(call readlink,${KOBJ}))
+export KBUILD_OUTPUT ?= ${KOBJ}
+endif
+
+############################
+# Module Install Directory #
+############################
+
+# Default to using updates/drivers/net/ethernet/mucse/ path, since depmod since
+# v3.1 defaults to checking updates folder first, and only checking kernels/
+# and extra afterwards. We use updates instead of kernel/* due to desire to
+# prevent over-writing built-in modules files.
+export INSTALL_MOD_DIR ?= updates/drivers/net/ethernet/mucse/${DRIVER}
+
+#################
+# Auxiliary Bus #
+#################
+
+# If the check_aux_bus script exists, then this driver depends on the
+# auxiliary module. Run the script to determine if we need to include
+# auxiliary files with this build.
+ifneq ($(call test_file,../scripts/check_aux_bus),)
+NEED_AUX_BUS := $(shell ../scripts/check_aux_bus --ksrc="${KSRC}" --build-kernel="${BUILD_KERNEL}" >/dev/null 2>&1; echo $$?)
+endif # check_aux_bus exists
+
+# The out-of-tree auxiliary module we ship should be moved into this
+# directory as part of installation.
+export INSTALL_AUX_DIR ?= updates/drivers/net/ethernet/mucse/auxiliary
+
+# If we're installing auxiliary bus out-of-tree, the following steps are
+# necessary to ensure the relevant files get put in place.
+ifeq (${NEED_AUX_BUS},2)
+define auxiliary_post_install
+	install -D -m 644 Module.symvers ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_AUX_DIR}/Module.symvers
+	mv -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_MOD_DIR}/auxiliary.ko \
+	      ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_AUX_DIR}/auxiliary.ko
+	install -D -m 644 linux/auxiliary_bus.h ${INSTALL_MOD_PATH}/${KSRC}/include/linux/auxiliary_bus.h
+endef
+else
+auxiliary_post_install =
+endif
+
+ifeq (${NEED_AUX_BUS},2)
+define auxiliary_post_uninstall
+	rm -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_AUX_DIR}/Module.symvers
+	rm -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_AUX_DIR}/auxiliary.ko
+	rm -f ${INSTALL_MOD_PATH}/${KSRC}/include/linux/auxiliary_bus.h
+endef
+else
+auxiliary_post_uninstall =
+endif
+
+######################
+# Kernel Build Macro #
+######################
+
+# kernel build function
+# ${1} is the kernel build target
+# ${2} may contain any extra rules to pass directly to the sub-make process
+#
+# This function is expected to be executed by
+#   @+$(call kernelbuild,,)
+# from within a Makefile recipe.
+#
+# The following variables are expected to be defined for its use:
+# GCC_I_SYS -- if set it will enable use of gcc-i-sys.sh wrapper to use -isystem
+# CCFLAGS_VAR -- the CCFLAGS variable to set extra CFLAGS
+# EXTRA_CFLAGS -- a set of extra CFLAGS to pass into the ccflags-y variable
+# KSRC -- the location of the kernel source tree to build against
+# DRIVER_UPPERCASE -- the uppercase name of the kernel module, set from DRIVER
+# W -- if set, enables the W= kernel warnings options
+# C -- if set, enables the C= kernel sparse build options
+#
+kernelbuild = $(call warn_signed_modules) \
+              ${MAKE} $(if ${GCC_I_SYS},CC="${GCC_I_SYS}") \
+                      ${CCFLAGS_VAR}="${EXTRA_CFLAGS}" \
+                      -C "${KSRC}" \
+                      CONFIG_${DRIVER_UPPERCASE}=m \
+                      $(if ${DISABLE_MODULE_SIGNING},CONFIG_MODULE_SIG=n) \
+                      $(if ${DISABLE_MODULE_SIGNING},CONFIG_MODULE_SIG_ALL=) \
+                      M="${CURDIR}" \
+                      $(if ${W},W="${W}") \
+                      $(if ${C},C="${C}") \
+                      $(if ${NEED_AUX_BUS},NEED_AUX_BUS="${NEED_AUX_BUS}") \
+                      ${2} ${1}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/kcompat-generator.sh b/drivers/net/ethernet/mucse/rnpgbe/kcompat-generator.sh
new file mode 100755
index 0000000000000000000000000000000000000000..55e65e7b577fb18729cff4809a3d3cdc4f9ded2f
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/kcompat-generator.sh
@@ -0,0 +1,299 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright(c) 2022 - 2024 Mucse Corporation
+
+set -Eeuo pipefail
+
+# This file generates HAVE_ and NEED_ defines for current kernel
+# (or KSRC if provided).
+#
+# It does so by 'gen' function calls (see body of 'gen-devlink' for examples).
+# 'gen' could look for various kinds of declarations in provided kernel headers,
+# eg look for an enum in one of files specified and check if given enumeration
+# (single value) is present. See 'Documentation' or comment above the 'gen' fun
+# in the kcompat-lib.sh.
+
+# Why using bash/awk instead of an old/legacy approach?
+#
+# The aim is to replicate all the defines provided by human developers
+# in the past. Additional bonus is the fact, that we no longer need to care
+# about backports done by OS vendors (RHEL, SLES, ORACLE, UBUNTU, more to come).
+# We will even work (compile) with only part of backports provided.
+#
+# To enable smooth transition, especially in time of late fixes, "old" method
+# of providing flags should still work as usual.
+
+# End of intro.
+# Find info about coding style/rules at the end of file.
+# Most of the implementation is in kcompat-lib.sh, here are actual 'gen' calls.
+
+export LC_ALL=C
+ORIG_CWD="$(pwd)"
+trap 'rc=$?; echo >&2 "$(realpath "$ORIG_CWD/${BASH_SOURCE[0]}"):$LINENO: failed with rc: $rc"' ERR
+
+#setup ksrc and conffile to this shell
+#KSRC=/home/dongyb/uos_merge
+#CONFFILE=/home/dongyb/uos_merge/include/generated/autoconf.h
+#OUT=kcompat_generated_defs.h
+# shellcheck source=kcompat-lib.sh
+source "$ORIG_CWD"/kcompat-lib.sh
+
+# DO NOT break gen calls below (via \), to make our compat code more grep-able,
+# keep them also grouped, first by feature (like DEVLINK), then by .h filename
+# finally, keep them sorted within a group (sort by flag name)
+
+# handy line of DOC copy-pasted form kcompat-lib.sh:
+#   gen DEFINE if (KIND [METHOD of]) NAME [(matches|lacks) PATTERN|absent] in 
+
+function gen-device() {
+	dh='include/linux/device.h'
+	dph='include/linux/dev_printk.h'
+	gen NEED_BUS_FIND_DEVICE_CONST_DATA if fun bus_find_device lacks 'const void \\*data' in "$dh"
+	gen NEED_DEV_LEVEL_ONCE if macro dev_level_once absent in "$dh" "$dph"
+	gen NEED_DEVM_KASPRINTF if fun devm_kasprintf absent in "$dh"
+	gen NEED_DEVM_KFREE if fun devm_kfree absent in "$dh"
+	gen NEED_DEVM_KVASPRINTF if fun devm_kvasprintf absent in "$dh"
+	gen NEED_DEVM_KZALLOC if fun devm_kzalloc absent in "$dh"
+}
+
+function gen-devlink() {
+	dh='include/net/devlink.h'
+	gen HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY if fun devlink_flash_update_begin_notify in "$dh"
+	gen HAVE_DEVLINK_FLASH_UPDATE_PARAMS    if struct devlink_flash_update_params in "$dh"
+	gen HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW if struct devlink_flash_update_params matches 'struct firmware \\*fw' in "$dh"
+	gen HAVE_DEVLINK_HEALTH if enum devlink_health_reporter_state in "$dh"
+	gen HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER if fun devlink_health_reporter_create lacks auto_recover in "$dh"
+	gen HAVE_DEVLINK_HEALTH_OPS_EXTACK if method dump of devlink_health_reporter_ops matches ext_ack in "$dh"
+	gen HAVE_DEVLINK_INFO_DRIVER_NAME_PUT if fun devlink_info_driver_name_put in "$dh"
+	gen HAVE_DEVLINK_PARAMS if method validate of devlink_param matches ext_ack in "$dh"
+	gen HAVE_DEVLINK_PARAMS_PUBLISH if fun devlink_params_publish in "$dh"
+	gen HAVE_DEVLINK_PORT_NEW if method port_new of devlink_ops in "$dh"
+	gen HAVE_DEVLINK_PORT_OPS if struct devlink_port_ops in "$dh"
+	gen HAVE_DEVLINK_PORT_SPLIT if method port_split of devlink_ops in "$dh"
+	gen HAVE_DEVLINK_PORT_SPLIT_EXTACK if method port_split of devlink_ops matches netlink_ext_ack in "$dh"
+	gen HAVE_DEVLINK_PORT_SPLIT_PORT_STRUCT if method port_split of devlink_ops matches devlink_port in "$dh"
+	gen HAVE_DEVLINK_PORT_TYPE_ETH_HAS_NETDEV if fun devlink_port_type_eth_set matches 'struct net_device' in "$dh"
+	gen HAVE_DEVLINK_RATE_NODE_CREATE if fun devl_rate_node_create in "$dh"
+	# keep devlink_region_ops body in variable, to not look 4 times for
+	# exactly the same thing in big file
+	# please consider it as an example of "how to speed up if needed"
+	REGION_OPS="$(find-struct-decl devlink_region_ops "$dh")"
+	gen HAVE_DEVLINK_REGIONS if struct devlink_region_ops in - <<< "$REGION_OPS"
+	gen HAVE_DEVLINK_REGION_OPS_SNAPSHOT if fun snapshot in - <<< "$REGION_OPS"
+	gen HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS if fun snapshot matches devlink_region_ops in - <<< "$REGION_OPS"
+	gen HAVE_DEVLINK_REGISTER_SETS_DEV if fun devlink_register matches 'struct device' in "$dh"
+	gen HAVE_DEVLINK_RELOAD_ENABLE_DISABLE if fun devlink_reload_enable in "$dh"
+	gen HAVE_DEVLINK_SET_FEATURES  if fun devlink_set_features in "$dh"
+	gen HAVE_DEVL_PORT_REGISTER if fun devl_port_register in "$dh"
+
+	gen HAVE_DEVLINK_PORT_FLAVOUR_PCI_SF if enum devlink_port_flavour matches DEVLINK_PORT_FLAVOUR_PCI_SF in include/uapi/linux/devlink.h
+	gen HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT if enum devlink_reload_action matches DEVLINK_RELOAD_ACTION_FW_ACTIVATE in include/uapi/linux/devlink.h
+
+	gen NEED_DEVLINK_RESOURCES_UNREGISTER_NO_RESOURCE if fun devlink_resources_unregister matches 'struct devlink_resource \\*' in "$dh"
+	gen NEED_DEVLINK_TO_DEV  if fun devlink_to_dev absent in "$dh"
+	gen NEED_DEVLINK_UNLOCKED_RESOURCE if fun devl_resource_size_get absent in "$dh"
+}
+
+function gen-ethtool() {
+	eth='include/linux/ethtool.h'
+	ueth='include/uapi/linux/ethtool.h'
+	gen HAVE_ETHTOOL_COALESCE_EXTACK if method get_coalesce of ethtool_ops matches 'struct kernel_ethtool_coalesce \\*' in "$eth"
+	gen HAVE_ETHTOOL_EXTENDED_RINGPARAMS if method get_ringparam of ethtool_ops matches 'struct kernel_ethtool_ringparam \\*' in "$eth"
+	gen HAVE_ETHTOOL_KEEE if struct ethtool_keee in "$eth"
+	gen HAVE_ETHTOOL_RXFH_PARAM if struct ethtool_rxfh_param in "$eth"
+	gen NEED_ETHTOOL_SPRINTF if fun ethtool_sprintf absent in "$eth"
+	gen HAVE_ETHTOOL_FLOW_RSS if macro FLOW_RSS in "$ueth"
+}
+
+function gen-filter() {
+	fh='include/linux/filter.h'
+	gen HAVE_XDP_DO_FLUSH if fun xdp_do_flush_map in "$fh"
+	gen NEED_NO_NETDEV_PROG_XDP_WARN_ACTION if fun bpf_warn_invalid_xdp_action lacks 'struct net_device \\*' in "$fh"
+}
+
+function gen-flow-dissector() {
+	gen HAVE_FLOW_DISSECTOR_KEY_PPPOE if enum flow_dissector_key_id matches FLOW_DISSECTOR_KEY_PPPOE in include/net/flow_dissector.h include/net/flow_keys.h
+	# following HAVE ... CVLAN flag is mistakenly named after an enum key,
+	# but guards code around function call that was introduced later
+	gen HAVE_FLOW_DISSECTOR_KEY_CVLAN if fun flow_rule_match_cvlan in include/net/flow_offload.h
+}
+
+function gen-gnss() {
+	cdh='include/linux/cdev.h'
+	clh='include/linux/device/class.h'
+	dh='include/linux/device.h'
+	gh='include/linux/gnss.h'
+	th='include/uapi/linux/types.h'
+	fh='include/linux/fs.h'
+
+	gen HAVE_CDEV_DEVICE if fun cdev_device_add in "$cdh"
+	gen HAVE_DEV_UEVENT_CONST if method dev_uevent of class matches 'const struct device' in "$clh"
+	gen HAVE_POLL_T if typedef __poll_t in "$th"
+	gen HAVE_STREAM_OPEN if fun stream_open in "$fh"
+	# There can be either macro class_create or a function
+	gen NEED_CLASS_CREATE_WITH_MODULE_PARAM if fun class_create matches 'owner' in "$clh" "$dh"
+	gen NEED_CLASS_CREATE_WITH_MODULE_PARAM if macro class_create in "$clh" "$dh"
+
+	if ! grep -qE CONFIG_SUSE_KERNEL.+1 "$CONFFILE"; then
+		gen HAVE_GNSS_MODULE if struct gnss_device in "$gh"
+	fi
+}
+
+function gen-netdevice() {
+	ndh='include/linux/netdevice.h'
+	gen HAVE_NDO_ETH_IOCTL if fun ndo_eth_ioctl in "$ndh"
+	gen HAVE_NDO_FDB_ADD_VID    if method ndo_fdb_del of net_device_ops matches 'u16 vid' in "$ndh"
+	gen HAVE_NDO_FDB_DEL_EXTACK if method ndo_fdb_del of net_device_ops matches ext_ack in "$ndh"
+	gen HAVE_NDO_GET_DEVLINK_PORT if method ndo_get_devlink_port of net_device_ops in "$ndh"
+	gen HAVE_NDO_UDP_TUNNEL_CALLBACK if method ndo_udp_tunnel_add of net_device_ops in "$ndh"
+	gen HAVE_NETIF_SET_TSO_MAX if fun netif_set_tso_max_size in "$ndh"
+	gen HAVE_SET_NETDEV_DEVLINK_PORT if macro SET_NETDEV_DEVLINK_PORT in "$ndh"
+	gen NEED_NETIF_NAPI_ADD_NO_WEIGHT if fun netif_napi_add matches 'int weight' in "$ndh"
+	gen NEED_NET_PREFETCH if fun net_prefetch absent in "$ndh"
+}
+
+function gen-pci() {
+	pcih='include/linux/pci.h'
+	gen HAVE_PCI_MSIX_ALLOC_IRQ_AT if fun pci_msix_alloc_irq_at in "$pcih"
+	gen HAVE_PCI_MSIX_CAN_ALLOC_DYN if fun pci_msix_can_alloc_dyn in "$pcih"
+	gen HAVE_PCI_MSIX_FREE_IRQ if fun pci_msix_free_irq in "$pcih"
+	gen HAVE_PER_VF_MSIX_SYSFS if method sriov_set_msix_vec_count of pci_driver in "$pcih"
+	gen HAVE_STRUCT_PCI_DEV_PTM_ENABLED if struct pci_dev matches ptm_enabled in "$pcih"
+	gen NEED_PCIE_PTM_ENABLED if fun pcie_ptm_enabled absent in "$pcih"
+	gen NEED_PCI_ENABLE_PTM if fun pci_enable_ptm absent in "$pcih"
+}
+
+function gen-other() {
+	ush='include/linux/u64_stats_sync.h'
+	#gen NEED_PCI_AER_CLEAR_NONFATAL_STATUS if fun pci_aer_clear_nonfatal_status absent in include/linux/aer.h
+	#gen NEED_BITMAP_COPY_CLEAR_TAIL if fun bitmap_copy_clear_tail absent in include/linux/bitmap.h
+	#gen NEED_BITMAP_FROM_ARR32 if fun bitmap_from_arr32 absent in include/linux/bitmap.h
+	#gen NEED_BITMAP_TO_ARR32 if fun bitmap_to_arr32 absent in include/linux/bitmap.h
+	#gen HAVE_COMPLETION_RAW_SPINLOCK if struct completion matches 'struct swait_queue_head' in include/linux/completion.h
+	#gen NEED_DEBUGFS_LOOKUP if fun debugfs_lookup absent in include/linux/debugfs.h
+	#gen NEED_DEBUGFS_LOOKUP_AND_REMOVE if fun debugfs_lookup_and_remove absent in include/linux/debugfs.h
+	gen NEED_ETH_HW_ADDR_SET if fun eth_hw_addr_set absent in include/linux/etherdevice.h
+	#gen HAVE_HWMON_DEVICE_REGISTER_WITH_INFO if fun hwmon_device_register_with_info in include/linux/hwmon.h
+	#gen NEED_HWMON_CHANNEL_INFO if macro HWMON_CHANNEL_INFO absent in include/linux/hwmon.h
+	#gen HAVE_IOMMU_DEV_FEAT_AUX if enum iommu_dev_features matches IOMMU_DEV_FEAT_AUX in include/linux/iommu.h
+	#gen NEED_DEFINE_STATIC_KEY_FALSE if macro DEFINE_STATIC_KEY_FALSE absent in include/linux/jump_label.h
+	#gen NEED_STATIC_BRANCH_LIKELY if macro static_branch_likely absent in include/linux/jump_label.h
+	#gen HAVE_STRUCT_STATIC_KEY_FALSE if struct static_key_false in include/linux/jump_label.h include/linux/jump_label_type.h
+	#gen NEED_DECLARE_STATIC_KEY_FALSE if macro DECLARE_STATIC_KEY_FALSE absent in include/linux/jump_label.h include/linux/jump_label_type.h
+	#gen NEED_LOWER_16_BITS if macro lower_16_bits absent in include/linux/kernel.h
+	#gen NEED_UPPER_16_BITS if macro upper_16_bits absent in include/linux/kernel.h
+	gen NEED_MUL_U64_U64_DIV_U64 if fun mul_u64_u64_div_u64 absent in include/linux/math64.h
+	#gen HAVE_MDEV_GET_DRVDATA if fun mdev_get_drvdata in include/linux/mdev.h
+	#gen HAVE_MDEV_REGISTER_PARENT if fun mdev_register_parent in include/linux/mdev.h
+	#gen NEED_DEV_PM_DOMAIN_ATTACH if fun dev_pm_domain_attach absent in include/linux/pm_domain.h include/linux/pm.h
+	#gen NEED_DEV_PM_DOMAIN_DETACH if fun dev_pm_domain_detach absent in include/linux/pm_domain.h include/linux/pm.h
+	#gen NEED_PTP_CLASSIFY_RAW if fun ptp_classify_raw absent in include/linux/ptp_classify.h
+	#gen NEED_PTP_PARSE_HEADER if fun ptp_parse_header absent in include/linux/ptp_classify.h
+	gen HAVE_PTP_CLOCK_INFO_ADJFINE if method adjfine of ptp_clock_info in include/linux/ptp_clock_kernel.h
+	gen NEED_DIFF_BY_SCALED_PPM if fun diff_by_scaled_ppm absent in include/linux/ptp_clock_kernel.h
+	#gen NEED_PTP_SYSTEM_TIMESTAMP if fun ptp_read_system_prets absent in include/linux/ptp_clock_kernel.h
+	#gen NEED_DEV_PAGE_IS_REUSABLE if fun dev_page_is_reusable absent in include/linux/skbuff.h
+	#gen NEED_SYSFS_EMIT if fun sysfs_emit absent in include/linux/sysfs.h
+	#gen HAVE_TRACE_ENABLED_SUPPORT if implementation of macro __DECLARE_TRACE matches 'trace_##name##_enabled' in include/linux/tracepoint.h
+	#gen HAVE_U64_STATS_FETCH_BEGIN_IRQ if fun u64_stats_fetch_begin_irq in "$ush"
+	#gen HAVE_U64_STATS_FETCH_RETRY_IRQ if fun u64_stats_fetch_retry_irq in "$ush"
+	#gen NEED_U64_STATS_READ if fun u64_stats_read absent in "$ush"
+	#gen NEED_U64_STATS_SET if fun u64_stats_set absent in "$ush"
+	#gen HAVE_LMV1_SUPPORT if macro VFIO_REGION_TYPE_MIGRATION in include/uapi/linux/vfio.h
+}
+
+# all the generations, extracted from main() to keep normal code and various
+# prep separated
+function gen-all() {
+	if grep -qE CONFIG_NET_DEVLINK.+1 "$CONFFILE"; then
+		gen-devlink
+	fi
+	gen-netdevice
+	# code above is covered by unit_tests/test_gold.sh
+	if [ -n "${JUST_UNIT_TESTING-}" ]; then
+		return
+	fi
+	gen-device
+	gen-ethtool
+	gen-filter
+	gen-flow-dissector
+	gen-gnss
+	gen-pci
+	gen-other
+}
+
+function main() {
+	# check if caller (like our makefile) wants to redirect output to file
+	if [ -n "${OUT-}" ]; then
+
+		# in case OUT exists, we don't want to overwrite it, instead
+		# write to a temporary copy.
+		if [ -s "${OUT}" ]; then
+			TMP_OUT="$(mktemp "${OUT}.XXX")"
+			trap "rm -f '${TMP_OUT}'" EXIT
+
+			REAL_OUT="${OUT}"
+			OUT="${TMP_OUT}"
+		fi
+
+		exec > "$OUT"
+		# all stdout goes to OUT since now
+		echo "/* Autogenerated for KSRC=${KSRC-} via $(basename "$0") */"
+	fi
+	if [ -d "${KSRC-}" ]; then
+		cd "${KSRC}"
+	fi
+
+	# check if KSRC was ok/if we are in proper place to look for headers
+	if [ -z "$(filter-out-bad-files include/linux/kernel.h)" ]; then
+		echo >&2 "seems that there are no kernel includes placed in KSRC=${KSRC}
+			pwd=$(pwd); ls -l:"
+		ls -l >&2
+		exit 8
+	fi
+
+	# we need just CONFIG_NET_DEVLINK so far, but it's in .config, required
+	if [ ! -f "${CONFFILE-}" ]; then
+		echo >&2 ".config should be passed as env CONFFILE
+			(and it's not set or not a file)"
+		exit 9
+	fi
+
+	gen-all
+
+	if [ -n "${OUT-}" ]; then
+		cd "$ORIG_CWD"
+
+		# Compare and see if anything changed. This avoids updating
+		# mtime of the file.
+		if [ -n "${REAL_OUT-}" ]; then
+			if cmp --silent "${REAL_OUT}" "${TMP_OUT}"; then
+				# exit now, skipping print of the output since
+				# there were no changes. the trap should
+				# cleanup TMP_OUT
+				exit 0
+			fi
+
+			mv -f "${TMP_OUT}" "${REAL_OUT}"
+			OUT="${REAL_OUT}"
+		fi
+
+		# dump output, will be visible in CI
+		if [ -n "${JUST_UNIT_TESTING-}${QUIET_COMPAT-}" ]; then
+			return
+		fi
+		cat -n "$OUT" >&2
+	fi
+}
+
+main
+
+# Coding style:
+# - rely on `set -e` handling as much as possible, so:
+#  - do not use <(bash process substitution) - it breaks error handling;
+#  - do not put substantial logic in `if`-like statement - it disables error
+#    handling inside of the conditional (`if big-fun call; then` is substantial)
+# - make shellcheck happy - https://www.shellcheck.net
+#
+# That enables us to move processing out of `if` or `... && ...` statements,
+# what finally means that bash error handling (`set -e`) would break on errors.
diff --git a/drivers/net/ethernet/mucse/rnpgbe/kcompat-lib.sh b/drivers/net/ethernet/mucse/rnpgbe/kcompat-lib.sh
new file mode 100755
index 0000000000000000000000000000000000000000..6d064a5c7536332c9a3a00f9eeaa05c4cfe0a3b1
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/kcompat-lib.sh
@@ -0,0 +1,278 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright(c) 2022 - 2024 Mucse Corporation
+
+# to be sourced
+
+# General shell helpers
+
+# exit with non-zero exit code; if there is only one param:
+# exit with msg $1 and exit code from last command (or 99 if = 0)
+# otherwise, exit with $1 and use remaining arguments as msg
+function die() {
+	rc=$?
+	if [ $# -gt 1 ]; then
+		rc="$1"
+		shift
+	fi
+	[ "$rc" -ne 0 ] || rc=99
+	echo >&2 "$@"
+	exit $rc
+}
+
+# filter out paths that are not files
+# input $@, output via echo;
+# note: pass `-` for stdin
+# note: outputs nothing if all input files are "bad" (eg. not existing), but it
+#	is left for caller to decide if this is an erorr condition;
+# note: whitespaces are considered "bad" as part of filename, it's an error.
+function filter-out-bad-files() {
+	if [[ $# = 1 && "$1" = '-' ]]; then
+		echo -
+		return 0
+	fi
+	if [ $# = 0 ]; then
+		die 10 "no files passed, use '-' when reading from pipe (|)"
+	fi
+	local any=0 diagmsgs=/dev/stderr re=$'[\t \n]'
+	[ -n "${QUIET_COMPAT-}" ] && diagmsgs=/dev/null
+	for x in "$@"; do
+		if [ -e "$x" ]; then
+			if [[ "$x" =~ $re ]]; then
+				die 11 "err: filename contains whitespaces: $x."
+			fi
+			echo "$x"
+			any=1
+		else
+			echo >&"$diagmsgs" filtering "$x" out
+		fi
+	done
+	if [ $any = 0 ]; then
+		echo >&"$diagmsgs" 'all files (for given query) filtered out'
+	fi
+}
+
+# Basics of regexp explained, as a reference for mostly-C programmers:
+# (bash) "regexp-$VAR-regexp"  - bash' VARs are placed into "QUOTED" strings
+# /\);?$/       - match end of function declaration, $ is end of string
+# ^[ \t]*       - (heuristic), anything but comment, eg to exclude function docs
+# /STH/, /END/  - (awk), print all lines sice STH matched, up to END, inclusive
+
+# "Whitespace only"
+WB='[ \t\n]'
+
+# Helpers below print the thing that is looked for, for further grep'ping/etc.
+# That simplifies process of excluding comments or spares us state machine impl.
+#
+# We take advantage of current/common linux codebase formatting here.
+#
+# Functions in this section require input file/s passed as args
+# (usually one, but more could be supplied in case of renames in kernel),
+# '-' could be used as an (only) file argument to read from stdin/pipe.
+
+# wrapper over find-something-decl() functions below, to avoid repetition
+# pass $what as $1, $end as $2, and $files to look in as rest of args
+function find-decl() {
+	test $# -ge 3 # ensure that there are at least 3 params
+	local what end files
+	what="$1"
+	end="$2"
+	shift 2
+	files="$(filter-out-bad-files "$@")" || die
+	if [ -z "$files" ]; then
+		return 0
+	fi
+	# shellcheck disable=SC2086
+	awk "
+		/^$WB*\*/ {next}
+		$what, $end
+	" $files
+}
+
+# yield $1 function declaration (signature), don't pass return type in $1
+# looks only in files specified ($2, $3...)
+function find-fun-decl() {
+	test $# -ge 2
+	local what end
+	what="/$WB*([(]\*)?$1$WB*($|[()])/"
+	end='/\);?$/'
+	shift
+	find-decl "$what" "$end" "$@"
+}
+
+# yield $1 enum declaration (type/body)
+function find-enum-decl() {
+	test $# -ge 2
+	local what end
+	what="/^$WB*enum$WB+$1"' \{$/'
+	end='/\};$/'
+	shift
+	find-decl "$what" "$end" "$@"
+}
+
+# yield $1 struct declaration (type/body)
+function find-struct-decl() {
+	test $# -ge 2
+	local what end
+	what="/^$WB*struct$WB+$1"' \{$/'
+	end='/^\};$/' # that's (^) different from enum-decl
+	shift
+	find-decl "$what" "$end" "$@"
+}
+
+# yield first line of $1 macro definition
+function find-macro-decl() {
+	test $# -ge 2
+	local what end
+	# only unindented defines, only whole-word match
+	what="/^#define$WB+$1"'([ \t\(]|$)/'
+	end=1 # only first line; use find-macro-implementation-decl for full body
+	shift
+	find-decl "$what" "$end" "$@"
+}
+
+# yield full macro implementation
+function find-macro-implementation-decl() {
+	test $# -ge 2
+	local what end
+	# only unindented defines, only whole-word match
+	what="/^#define$WB+$1"'([ \t\(]|$)/'
+	# full implementation, until a line not ending in a backslash.
+	# Does not handle macros with comments embedded within the definition.
+	end='/[^\\]$/'
+	shift
+	find-decl "$what" "$end" "$@"
+}
+
+# yield first line of $1 typedef definition (simple typedefs only)
+# this probably won't handle typedef struct { \n int foo;\n};
+function find-typedef-decl() {
+	test $# -ge 2
+	local what end
+	what="/^typedef .* $1"';$/'
+	end=1
+	shift
+	find-decl "$what" "$end" "$@"
+}
+
+# gen() - DSL-like function to wrap around all the other
+#
+# syntax:
+#   gen DEFINE if (KIND [METHOD of]) NAME [(matches|lacks) PATTERN|absent] in 
+
+# where:
+#   DEFINE is HAVE_ or NEED_ #define to print;
+#   `if` is there to just read it easier and made syntax easier to check;
+#
+#   NAME is the name for what we are looking for;
+#
+#   KIND specifies what kind of declaration/definition we are looking for,
+#      could be: fun, enum, struct, method, macro, typedef,
+#      'implementation of macro'
+#   for KIND=method, we are looking for function ptr named METHOD in struct
+#     named NAME (two optional args are then necessary (METHOD & of));
+#
+#   for KIND='implementation of macro' we are looking for the full
+#     implementation of the macro, not just its first line. This is usually
+#     combined with "matches" or "lacks".
+#
+#   next [optional] args could be used:
+#     matches PATTERN - use to grep for the PATTERN within definition
+#       (eg, for ext_ack param)
+#     lacks - use to add #define only if there is no match of the PATTERN,
+#       *but* the NAME is *found*
+#     absent - the NAME that we grep for must be not found
+#       (ie: function not exisiting)
+#
+#     without this optional params, behavior is the same as with
+#       `matches .` - use to grep just for existence of NAME;
+#
+#   `in` is there to ease syntax, similar to `if` before.
+#
+#   is just space-separate list of files to look in,
+#    single (-) for stdin.
+#
+# PATTERN is awk pattern, will be wrapped by two slashes (/)
+function gen() {
+	test $# -ge 6 || die 20 "too few arguments, $# given, at least 6 needed"
+	local define if_kw kind name in_kw # mandatory
+	local of_kw method_name operator pattern # optional
+	local src_line="${BASH_SOURCE[0]}:${BASH_LINENO[0]}"
+	define="$1"
+	if_kw="$2"
+	kind="$3"
+	local orig_args_cnt=$#
+	shift 3
+	[ "$if_kw" != if ] && die 21 "$src_line: 'if' keyword expected, '$if_kw' given"
+	case "$kind" in
+	fun|enum|struct|macro|typedef)
+		name="$1"
+		shift
+	;;
+	method)
+		test $# -ge 5 || die 22 "$src_line: too few arguments, $orig_args_cnt given, at least 8 needed"
+		method_name="$1"
+		of_kw="$2"
+		name="$3"
+		shift 3
+		[ "$of_kw" != of ] && die 23 "$src_line: 'of' keyword expected, '$of_kw' given"
+	;;
+	implementation)
+		test $# -ge 5 || die 28 "$src_line: too few arguments, $orig_args_cnt given, at least 8 needed"
+		of_kw="$1"
+		kind="$2"
+		name="$3"
+		shift 3
+		[ "$of_kw" != of ] && die 29 "$src_line: 'of' keyword expected, '$of_kw' given"
+		[ "$kind" != macro ] && die 30 "$src_line: implementation only supports 'macro', '$kind' given"
+		kind=macro-implementation
+	;;
+	*) die 24 "$src_line: unknown KIND ($kind) to look for" ;;
+	esac
+	operator="$1"
+	case "$operator" in
+	absent)
+		pattern='.'
+		in_kw="$2"
+		shift 2
+	;;
+	matches|lacks)
+		pattern="$2"
+		in_kw="$3"
+		shift 3
+	;;
+	in)
+		operator=matches
+		pattern='.'
+		in_kw=in
+		shift
+	;;
+	*) die 25 "$src_line: unknown OPERATOR ($operator) to look for" ;;
+	esac
+	[ "$in_kw" != in ] && die 26 "$src_line: 'in' keyword expected, '$in_kw' given"
+	test $# -ge 1 || die 27 "$src_line: too few arguments, at least one filename expected"
+
+	local first_decl=
+	if [ "$kind" = method ]; then
+		first_decl="$(find-struct-decl "$name" "$@")" || exit 28
+		# prepare params for next lookup phase
+		set -- - # overwrite $@ to be single dash (-)
+		name="$method_name"
+		kind=fun
+	elif [[ $# = 1 && "$1" = '-' ]]; then
+		# avoid losing stdin provided to gen() due to redirection (<<<)
+		first_decl="$(cat -)"
+	fi
+
+	# lookup the NAME
+	local body
+	body="$(find-$kind-decl "$name" "$@" <<< "$first_decl")" || exit 29
+	awk -v define="$define" -v pattern="$pattern" -v "$operator"=1 '
+		/./ { not_empty = 1 }
+		$0 ~ pattern { found = 1 }
+		END {
+			if (lacks && !found && not_empty || matches && found || absent && !found)
+				print "#define", define
+		}
+	' <<< "$body"
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/kcompat_defs.h b/drivers/net/ethernet/mucse/rnpgbe/kcompat_defs.h
new file mode 100755
index 0000000000000000000000000000000000000000..edba24e54d0517aa69e457fc4c81d50a2f74b986
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/kcompat_defs.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _KCOMPAT_DEFS_H_
+#define _KCOMPAT_DEFS_H_
+
+#ifndef LINUX_VERSION_CODE
+#include 
+#else
+#ifndef KERNEL_VERSION
+#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#endif
+#endif /* LINUX_VERSION_CODE */
+
+#ifndef UTS_RELEASE
+#include 
+#endif
+
+/*
+ * Include the definitions file for HAVE/NEED flags for the standard upstream
+ * kernels.
+ *
+ * Then, based on the distribution we detect, load the distribution specific
+ * definitions file that customizes the definitions for the target
+ * distribution.
+ */
+#include "kcompat_std_defs.h"
+
+#ifdef CONFIG_SUSE_KERNEL
+#include "kcompat_sles_defs.h"
+#elif UBUNTU_VERSION_CODE
+#include "kcompat_ubuntu_defs.h"
+#elif RHEL_RELEASE_CODE
+#include "kcompat_rhel_defs.h"
+#else
+#if defined(KYLIN_OS) || defined(CONFIG_KYLINOS_SERVER) ||                     \
+	defined(CONFIG_KYLINOS_DESKTOP)
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 4, 130))
+// keylin 4.4.131
+#define NONEED_NAPI_CONSUME_SKB
+#define NONEED_CSUM_REPLACE_BY_DIFF
+#define NONEED_PCI_REQUEST_IO_REGIONS
+#define NONEED_ETH_TYPE_VLAN
+#define NONEED_UUID_SIZE
+//#define FEITENG_4_4_131
+#endif
+
+#if defined(KYLIN_RELEASE_CODE)
+#if (KYLIN_RELEASE_CODE <= KYLIN_RELEASE_VERSION(10, 2))
+#define NEED_SKB_FRAG_OFF
+#define NEED_SKB_FRAG_OFF_ADD
+#else
+#undef NEED_NETDEV_TX_SENT_QUEUE
+#undef NEED_SKB_FRAG_OFF
+#undef NEED_SKB_FRAG_OFF_ADD
+#endif
+#endif
+
+#endif
+
+#endif
+
+#include "kcompat_generated_defs.h"
+
+#endif /* _KCOMPAT_DEFS_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/kcompat_gcc.h b/drivers/net/ethernet/mucse/rnpgbe/kcompat_gcc.h
new file mode 100755
index 0000000000000000000000000000000000000000..d07ea3d7be1c185698b8e06e0b8ad727acf7accf
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/kcompat_gcc.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _KCOMPAT_GCC_H_
+#define _KCOMPAT_GCC_H_
+
+#ifndef GCC_VERSION
+#define GCC_VERSION                                                            \
+	(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+#endif /* GCC_VERSION */
+
+#ifdef __has_attribute
+#if __has_attribute(__fallthrough__)
+#define fallthrough __attribute__((__fallthrough__))
+#else
+#define fallthrough                                                            \
+	do {                                                                   \
+	} while (0) /* fallthrough */
+#endif /* __has_attribute(fallthrough) */
+#else
+#define fallthrough                                                            \
+	do {                                                                   \
+	} while (0) /* fallthrough */
+#endif /* __has_attribute */
+
+/* Backport macros for controlling GCC diagnostics */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0))
+
+/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */
+#if GCC_VERSION >= 40600
+#define __diag_str1(s) #s
+#define __diag_str(s) __diag_str1(s)
+#define __diag(s) _Pragma(__diag_str(GCC diagnostic s))
+#else
+#define __diag(s)
+#endif /* GCC_VERSION >= 4.6 */
+#define __diag_push() __diag(push)
+#define __diag_pop() __diag(pop)
+#endif /* LINUX_VERSION < 4.18.0 */
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
+#if GCC_VERSION < 50000
+/* Workaround for gcc bug - not accepting "(type)" before "{ ... }" as part of
+ * static struct initializers [when used with -std=gnu11 switch]
+ * https://bugzilla.redhat.com/show_bug.cgi?id=1672652
+ *
+ * fix was backported to gcc 4.8.5-39 by RedHat, contained in RHEL 7.7
+ * workaround here is to just drop that redundant (commented out below) part and
+ * redefine kernel macros used by us.
+ */
+
+/* Since problematic code could be triggered by print-family (incl. wrappers)
+ * invocation, we have to first include headers that contain macros that we are
+ * redefining, and only later proceed with the rest of includes.
+ */
+#include 
+#include 
+#include 
+#include 
+
+#ifdef __SPIN_LOCK_INITIALIZER
+#undef __SPIN_LOCK_UNLOCKED
+#define __SPIN_LOCK_UNLOCKED(lockname)                                         \
+	/* (spinlock_t) */ __SPIN_LOCK_INITIALIZER(lockname)
+#endif /* __SPIN_LOCK_INITIALIZER */
+
+#ifdef __RAW_SPIN_LOCK_INITIALIZER
+#undef __RAW_SPIN_LOCK_UNLOCKED
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname)                                     \
+	/* (raw_spinlock_t) */ __RAW_SPIN_LOCK_INITIALIZER(lockname)
+#endif /* __RAW_SPIN_LOCK_INITIALIZER */
+
+#ifndef CONFIG_DEBUG_SPINLOCK
+/* raw_spin_lock_init needs __RAW_SPIN_LOCK_UNLOCKED with typecast, so keep the
+ * original impl,
+ * but enhance it with typecast dropped from __RAW_SPIN_LOCK_UNLOCKED() */
+#undef raw_spin_lock_init
+#define raw_spin_lock_init(lock)                                               \
+	do {                                                                   \
+		*(lock) = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED(lock);      \
+	} while (0)
+#endif /* !CONFIG_DEBUG_SPINLOCK */
+
+#undef STATIC_KEY_INIT_TRUE
+#define STATIC_KEY_INIT_TRUE                                                   \
+	{                                                                      \
+		.enabled = { 1 },                                              \
+		{                                                              \
+			.type = 1UL                                            \
+		}                                                              \
+	}
+
+#undef STATIC_KEY_INIT_FALSE
+#define STATIC_KEY_INIT_FALSE                                                  \
+	{                                                                      \
+		.enabled = { 0 }                                               \
+	}
+
+#undef STATIC_KEY_TRUE_INIT
+#define STATIC_KEY_TRUE_INIT                                                   \
+	/* (struct static_key_true) */ {                                       \
+		.key = STATIC_KEY_INIT_TRUE                                    \
+	}
+
+#undef STATIC_KEY_FALSE_INIT
+#define STATIC_KEY_FALSE_INIT                                                  \
+	/* (struct static_key_false) */ {                                      \
+		.key = STATIC_KEY_INIT_FALSE                                   \
+	}
+
+#ifdef HAVE_JUMP_LABEL
+/* dd_key_init() is used (indirectly) with arg like "(STATIC_KEY_INIT_FALSE)"
+ * from DEFINE_DYNAMIC_DEBUG_METADATA(), which, depending on config has many
+ * different definitions (including helper macros).
+ * To reduce compat code, just consume parens from the arg instead copy-pasting
+ * all definitions and slightly changing them. */
+#define _KC_SLURP_PARENS(...) __VA_ARGS__
+#undef dd_key_init
+#define dd_key_init(key, init) key = _KC_SLURP_PARENS init
+#endif /* HAVE_JUMP_LABEL */
+
+#undef UUID_INIT
+#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)                     \
+	{                                                                      \
+		{                                                              \
+			((a) >> 24) & 0xff, ((a) >> 16) & 0xff,                \
+				((a) >> 8) & 0xff, (a) & 0xff,                 \
+				((b) >> 8) & 0xff, (b) & 0xff,                 \
+				((c) >> 8) & 0xff, (c) & 0xff, (d0), (d1),     \
+				(d2), (d3), (d4), (d5), (d6), (d7)             \
+		}                                                              \
+	}
+
+#endif /* GCC_VERSION < 5.0 */
+#endif /* C11 */
+
+#endif /* _KCOMPAT_GCC_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/kcompat_generated_defs.h b/drivers/net/ethernet/mucse/rnpgbe/kcompat_generated_defs.h
new file mode 100644
index 0000000000000000000000000000000000000000..f3071607c9bdcf212195799521dd7f42b2064011
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/kcompat_generated_defs.h
@@ -0,0 +1,41 @@
+/* Autogenerated for KSRC=/lib/modules/5.15.0-25-generic/build via kcompat-generator.sh */
+#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS
+#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW
+#define HAVE_DEVLINK_HEALTH
+#define HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER
+#define HAVE_DEVLINK_HEALTH_OPS_EXTACK
+#define HAVE_DEVLINK_INFO_DRIVER_NAME_PUT
+#define HAVE_DEVLINK_PARAMS
+#define HAVE_DEVLINK_PARAMS_PUBLISH
+#define HAVE_DEVLINK_PORT_NEW
+#define HAVE_DEVLINK_PORT_SPLIT
+#define HAVE_DEVLINK_PORT_SPLIT_EXTACK
+#define HAVE_DEVLINK_PORT_TYPE_ETH_HAS_NETDEV
+#define HAVE_DEVLINK_REGIONS
+#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT
+#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS
+#define HAVE_DEVLINK_RELOAD_ENABLE_DISABLE
+#define HAVE_DEVLINK_PORT_FLAVOUR_PCI_SF
+#define HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT
+#define NEED_DEVLINK_RESOURCES_UNREGISTER_NO_RESOURCE
+#define NEED_DEVLINK_TO_DEV
+#define NEED_DEVLINK_UNLOCKED_RESOURCE
+#define HAVE_NDO_ETH_IOCTL
+#define HAVE_NDO_FDB_ADD_VID
+#define HAVE_NDO_GET_DEVLINK_PORT
+#define NEED_NETIF_NAPI_ADD_NO_WEIGHT
+#define HAVE_ETHTOOL_COALESCE_EXTACK
+#define HAVE_ETHTOOL_FLOW_RSS
+#define HAVE_XDP_DO_FLUSH
+#define NEED_NO_NETDEV_PROG_XDP_WARN_ACTION
+#define HAVE_FLOW_DISSECTOR_KEY_CVLAN
+#define HAVE_CDEV_DEVICE
+#define HAVE_POLL_T
+#define HAVE_STREAM_OPEN
+#define NEED_CLASS_CREATE_WITH_MODULE_PARAM
+#define NEED_CLASS_CREATE_WITH_MODULE_PARAM
+#define HAVE_GNSS_MODULE
+#define HAVE_PER_VF_MSIX_SYSFS
+#define HAVE_STRUCT_PCI_DEV_PTM_ENABLED
+#define HAVE_PTP_CLOCK_INFO_ADJFINE
+#define NEED_DIFF_BY_SCALED_PPM
diff --git a/drivers/net/ethernet/mucse/rnpgbe/kcompat_impl.h b/drivers/net/ethernet/mucse/rnpgbe/kcompat_impl.h
new file mode 100755
index 0000000000000000000000000000000000000000..2803674162cf4ebdca9e2e66980e50188ed80449
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/kcompat_impl.h
@@ -0,0 +1,1006 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _KCOMPAT_IMPL_H_
+#define _KCOMPAT_IMPL_H_
+
+/* This file contains implementations of backports from various kernels. It
+ * must rely only on NEED_ and HAVE_ checks. It must not make
+ * any checks to determine the kernel version when deciding whether to 
+ * include an implementation.
+ *
+ * All new implementations must go in this file, and legacy implementations
+ * should be migrated to the new format over time.
+ */
+
+/*
+ * generic network stack functions
+ */
+
+/* NEED_NETDEV_TXQ_BQL_PREFETCH
+ *
+ * functions
+ * netdev_txq_bql_complete_prefetchw()
+ * netdev_txq_bql_enqueue_prefetchw()
+ *
+ * were added in kernel 4.20 upstream commit
+ * 535114539bb2 ("net: add netdev_txq_bql_{enqueue, complete}_prefetchw()
+ * helpers")
+ */
+#ifdef NEED_NETDEV_TXQ_BQL_PREFETCH
+/**
+ *      netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
+ *      @dev_queue: pointer to transmit queue
+ *
+ * BQL enabled drivers might use this helper in their ndo_start_xmit(),
+ * to give appropriate hint to the CPU.
+ */
+static inline void
+netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
+{
+#ifdef CONFIG_BQL
+	prefetchw(&dev_queue->dql.num_queued);
+#endif
+}
+
+/**
+ *      netdev_txq_bql_complete_prefetchw - prefetch bql data for write
+ *      @dev_queue: pointer to transmit queue
+ *
+ * BQL enabled drivers might use this helper in their TX completion path,
+ * to give appropriate hint to the CPU.
+ */
+static inline void
+netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
+{
+#ifdef CONFIG_BQL
+	prefetchw(&dev_queue->dql.limit);
+#endif
+}
+#endif /* NEED_NETDEV_TXQ_BQL_PREFETCH */
+
+/* NEED_NETDEV_TX_SENT_QUEUE
+ *
+ * __netdev_tx_sent_queue was added in kernel 4.20 upstream commit
+ * 3e59020abf0f ("net: bql: add __netdev_tx_sent_queue()")
+ */
+#ifdef NEED_NETDEV_TX_SENT_QUEUE
+/* Variant of netdev_tx_sent_queue() for drivers that are aware
+ * that they should not test BQL status themselves.
+ * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
+ * skb of a batch.
+ * Returns true if the doorbell must be used to kick the NIC.
+ */
+static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
+					  unsigned int bytes, bool xmit_more)
+{
+	if (xmit_more) {
+#ifdef CONFIG_BQL
+		dql_queued(&dev_queue->dql, bytes);
+#endif
+		return netif_tx_queue_stopped(dev_queue);
+	}
+	netdev_tx_sent_queue(dev_queue, bytes);
+	return true;
+}
+#endif /* NEED_NETDEV_TX_SENT_QUEUE */
+
+/* NEED_NET_PREFETCH
+ *
+ * net_prefetch was introduced by commit f468f21b7af0 ("net: Take common
+ * prefetch code structure into a function")
+ *
+ * This function is trivial to re-implement in full.
+ */
+#ifdef NEED_NET_PREFETCH
+static inline void net_prefetch(void *p)
+{
+	prefetch(p);
+#if L1_CACHE_BYTES < 128
+	prefetch((u8 *)p + L1_CACHE_BYTES);
+#endif
+}
+#endif /* NEED_NET_PREFETCH */
+
+/* NEED_SKB_FRAG_OFF and NEED_SKB_FRAG_OFF_ADD
+ *
+ * skb_frag_off and skb_frag_off_add were added in upstream commit
+ * 7240b60c98d6 ("linux: Add skb_frag_t page_offset accessors")
+ *
+ * Implementing the wrappers directly for older kernels which still have the
+ * old implementation of skb_frag_t is trivial.
+ *
+ * LTS 4.19 backported the define for skb_frag_off in 4.19.201.
+ * d94d95ae0dd0 ("gro: ensure frag0 meets IP header alignment")
+ * Need to exclude defining skb_frag_off for 4.19.X where X > 200
+ */
+#ifdef NEED_SKB_FRAG_OFF
+static inline unsigned int skb_frag_off(const skb_frag_t *frag)
+{
+	return frag->page_offset;
+}
+#endif /* NEED_SKB_FRAG_OFF */
+#ifdef NEED_SKB_FRAG_OFF_ADD
+static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
+{
+	frag->page_offset += delta;
+}
+#endif /* NEED_SKB_FRAG_OFF_ADD */
+
+/*
+ * NETIF_F_HW_L2FW_DOFFLOAD related functions
+ *
+ * Support for NETIF_F_HW_L2FW_DOFFLOAD was first introduced upstream by
+ * commit a6cc0cfa72e0 ("net: Add layer 2 hardware acceleration operations for
+ * macvlan devices")
+ */
+#ifdef NETIF_F_HW_L2FW_DOFFLOAD
+
+#include 
+
+/* NEED_MACVLAN_ACCEL_PRIV
+ *
+ * macvlan_accel_priv is an accessor function that replaced direct access to
+ * the macvlan->fwd_priv variable. It was introduced in commit 7d775f63470c
+ * ("macvlan: Rename fwd_priv to accel_priv and add accessor function")
+ *
+ * Implement the new wrapper name by simply accessing the older
+ * macvlan->fwd_priv name.
+ */
+#ifdef NEED_MACVLAN_ACCEL_PRIV
+static inline void *macvlan_accel_priv(struct net_device *dev)
+{
+	struct macvlan_dev *macvlan = netdev_priv(dev);
+
+	return macvlan->fwd_priv;
+}
+#endif /* NEED_MACVLAN_ACCEL_PRIV */
+
+/* NEED_MACVLAN_RELEASE_L2FW_OFFLOAD
+ *
+ * macvlan_release_l2fw_offload was introduced upstream by commit 53cd4d8e4dfb
+ * ("macvlan: Provide function for interfaces to release HW offload")
+ *
+ * Implementing this is straight forward, but we must be careful to use
+ * fwd_priv instead of accel_priv. Note that both the change to accel_priv and
+ * introduction of this function happened in the same release.
+ */
+#ifdef NEED_MACVLAN_RELEASE_L2FW_OFFLOAD
+static inline int macvlan_release_l2fw_offload(struct net_device *dev)
+{
+	struct macvlan_dev *macvlan = netdev_priv(dev);
+
+	macvlan->fwd_priv = NULL;
+	return dev_uc_add(macvlan->lowerdev, dev->dev_addr);
+}
+#endif /* NEED_MACVLAN_RELEASE_L2FW_OFFLOAD */
+
+/* NEED_MACVLAN_SUPPORTS_DEST_FILTER
+ *
+ * macvlan_supports_dest_filter was introduced upstream by commit 6cb1937d4eff
+ * ("macvlan: Add function to test for destination filtering support")
+ *
+ * The implementation doesn't rely on anything new and is trivial to backport
+ * for kernels that have NETIF_F_HW_L2FW_DOFFLOAD support.
+ */
+#ifdef NEED_MACVLAN_SUPPORTS_DEST_FILTER
+static inline bool macvlan_supports_dest_filter(struct net_device *dev)
+{
+	struct macvlan_dev *macvlan = netdev_priv(dev);
+
+	return macvlan->mode == MACVLAN_MODE_PRIVATE ||
+	       macvlan->mode == MACVLAN_MODE_VEPA ||
+	       macvlan->mode == MACVLAN_MODE_BRIDGE;
+}
+#endif /* NEED_MACVLAN_SUPPORTS_DEST_FILTER */
+
+#endif /* NETIF_F_HW_L2FW_DOFFLOAD */
+
+/*
+ * tc functions
+ */
+
+/* NEED_FLOW_INDR_BLOCK_CB_REGISTER
+ *
+ * __flow_indr_block_cb_register and __flow_indr_block_cb_unregister were
+ * added in upstream commit 4e481908c51b ("flow_offload: move tc indirect
+ * block to flow offload")
+ *
+ * This was a simple rename so we can just translate from the old
+ * naming scheme with a macro.
+ */
+#ifdef NEED_FLOW_INDR_BLOCK_CB_REGISTER
+#define __flow_indr_block_cb_register __tc_indr_block_cb_register
+#define __flow_indr_block_cb_unregister __tc_indr_block_cb_unregister
+#endif
+
+/*
+ * devlink support
+ */
+#if IS_ENABLED(CONFIG_NET_DEVLINK)
+
+#include 
+
+#ifdef HAVE_DEVLINK_REGIONS
+/* NEED_DEVLINK_REGION_CREATE_OPS
+ *
+ * The ops parameter to devlink_region_create was added by commit e8937681797c
+ * ("devlink: prepare to support region operations")
+ *
+ * For older kernels, define _kc_devlink_region_create that takes an ops
+ * parameter, and calls the old implementation function by extracting the name
+ * from the structure.
+ */
+#ifdef NEED_DEVLINK_REGION_CREATE_OPS
+struct devlink_region_ops {
+	const char *name;
+	void (*destructor)(const void *data);
+};
+
+static inline struct devlink_region *
+_kc_devlink_region_create(struct devlink *devlink,
+			  const struct devlink_region_ops *ops,
+			  u32 region_max_snapshots, u64 region_size)
+{
+	return devlink_region_create(devlink, ops->name, region_max_snapshots,
+				     region_size);
+}
+
+#define devlink_region_create _kc_devlink_region_create
+#endif /* NEED_DEVLINK_REGION_CREATE_OPS */
+#endif /* HAVE_DEVLINK_REGIONS */
+
+/* NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY
+ *
+ * devlink_flash_update_status_notify, _begin_notify, and _end_notify were
+ * added by upstream commit 191ed2024de9 ("devlink: allow driver to update
+ * progress of flash update")
+ *
+ * For older kernels that lack the netlink messages, convert the functions
+ * into no-ops.
+ */
+#ifdef NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY
+static inline void
+devlink_flash_update_begin_notify(struct devlink __always_unused *devlink)
+{
+}
+
+static inline void
+devlink_flash_update_end_notify(struct devlink __always_unused *devlink)
+{
+}
+
+static inline void
+devlink_flash_update_status_notify(struct devlink __always_unused *devlink,
+				   const char __always_unused *status_msg,
+				   const char __always_unused *component,
+				   unsigned long __always_unused done,
+				   unsigned long __always_unused total)
+{
+}
+#endif /* NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY */
+
+#ifndef HAVE_DEVLINK_FLASH_UPDATE_PARAMS
+struct devlink_flash_update_params {
+	const char *file_name;
+	const char *component;
+	u32 overwrite_mask;
+};
+
+#ifndef DEVLINK_FLASH_OVERWRITE_SETTINGS
+#define DEVLINK_FLASH_OVERWRITE_SETTINGS BIT(0)
+#endif
+
+#ifndef DEVLINK_FLASH_OVERWRITE_IDENTIFIERS
+#define DEVLINK_FLASH_OVERWRITE_IDENTIFIERS BIT(1)
+#endif
+#endif /* !HAVE_DEVLINK_FLASH_UPDATE_PARAMS */
+
+/* NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY
+ *
+ * devlink_flash_update_timeout_notify was added by upstream commit
+ * f92970c694b3 ("devlink: add timeout information to status_notify").
+ *
+ * For older kernels, just convert timeout notifications into regular status
+ * notification messages without timeout information.
+ */
+#ifdef NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY
+static inline void devlink_flash_update_timeout_notify(
+	struct devlink *devlink, const char *status_msg, const char *component,
+	unsigned long __always_unused timeout)
+{
+	devlink_flash_update_status_notify(devlink, status_msg, component, 0,
+					   0);
+}
+#endif /* NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY */
+
+/*
+ * NEED_DEVLINK_PORT_ATTRS_SET_STRUCT
+ *
+ * HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR
+ * HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID
+ *
+ * devlink_port_attrs_set was introduced by commit b9ffcbaf56d3 ("devlink:
+ * introduce devlink_port_attrs_set")
+ *
+ * It's function signature has changed multiple times over several kernel
+ * releases:
+ *
+ * commit 5ec1380a21bb ("devlink: extend attrs_set for setting port
+ * flavours") added the ability to set port flavour. (Note that there is no
+ * official kernel release with devlink_port_attrs_set without the flavour
+ * argument, as they were introduced in the same series.)
+ *
+ * commit bec5267cded2 ("net: devlink: extend port attrs for switch ID") added
+ * the ability to set the switch ID (HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID)
+ *
+ * Finally commit 71ad8d55f8e5 ("devlink: Replace devlink_port_attrs_set
+ * parameters with a struct") refactored to pass devlink_port_attrs struct
+ * instead of individual parameters. (!NEED_DEVLINK_PORT_ATTRS_SET_STRUCT)
+ *
+ * We want core drivers to just use the latest form that takes
+ * a devlink_port_attrs structure. Note that this structure did exist as part
+ * of  but was never used directly by driver code prior to the
+ * function parameter change. For this reason, the implementation always
+ * relies on _kc_devlink_port_attrs instead of what was defined in the kernel.
+ */
+#ifdef NEED_DEVLINK_PORT_ATTRS_SET_STRUCT
+
+#ifndef HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR
+enum devlink_port_flavour {
+	DEVLINK_PORT_FLAVOUR_PHYSICAL,
+	DEVLINK_PORT_FLAVOUR_CPU,
+	DEVLINK_PORT_FLAVOUR_DSA,
+	DEVLINK_PORT_FLAVOUR_PCI_PF,
+	DEVLINK_PORT_FLAVOUR_PCI_VF,
+};
+#endif
+
+struct _kc_devlink_port_phys_attrs {
+	u32 port_number;
+	u32 split_subport_number;
+};
+
+struct _kc_devlink_port_pci_pf_attrs {
+	u16 pf;
+};
+
+struct _kc_devlink_port_pci_vf_attrs {
+	u16 pf;
+	u16 vf;
+};
+
+struct _kc_devlink_port_attrs {
+	u8 split : 1, splittable : 1;
+	u32 lanes;
+	enum devlink_port_flavour flavour;
+	struct netdev_phys_item_id switch_id;
+	union {
+		struct _kc_devlink_port_phys_attrs phys;
+		struct _kc_devlink_port_pci_pf_attrs pci_pf;
+		struct _kc_devlink_port_pci_vf_attrs pci_vf;
+	};
+};
+
+#define devlink_port_attrs _kc_devlink_port_attrs
+
+static inline void
+_kc_devlink_port_attrs_set(struct devlink_port *devlink_port,
+			   struct _kc_devlink_port_attrs *attrs)
+{
+#if defined(HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID)
+	devlink_port_attrs_set(devlink_port, attrs->flavour,
+			       attrs->phys.port_number, attrs->split,
+			       attrs->phys.split_subport_number,
+			       attrs->switch_id.id, attrs->switch_id.id_len);
+#elif defined(HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR)
+	devlink_port_attrs_set(devlink_port, attrs->flavour,
+			       attrs->phys.port_number, attrs->split,
+			       attrs->phys.split_subport_number);
+#else
+	if (attrs->split)
+		devlink_port_split_set(devlink_port, attrs->phys.port_number);
+#endif
+}
+
+#define devlink_port_attrs_set _kc_devlink_port_attrs_set
+
+#endif /* NEED_DEVLINK_PORT_ATTRS_SET_STRUCT */
+
+/*
+ * NEED_DEVLINK_ALLOC_SETS_DEV
+ *
+ * Since commit 919d13a7e455 ("devlink: Set device as early as possible"), the
+ * devlink device pointer is set by devlink_alloc instead of by
+ * devlink_register.
+ *
+ * devlink_alloc now includes the device pointer in its signature, while
+ * devlink_register no longer includes it.
+ *
+ * This implementation provides a replacement for devlink_alloc which will
+ * take and then silently discard the extra dev pointer.
+ *
+ * To use devlink_register, drivers must check
+ * HAVE_DEVLINK_REGISTER_SETS_DEV. Note that we can't easily provide
+ * a backport of the change to devlink_register directly. Although the dev
+ * pointer is accessible from the devlink pointer through the driver private
+ * section, it is device driver specific and is not easily accessible in
+ * compat code.
+ */
+#ifdef NEED_DEVLINK_ALLOC_SETS_DEV
+
+//static inline struct devlink *
+//_kc_devlink_alloc(const struct devlink_ops *ops, size_t priv_size,
+//		  struct device * __always_unused dev)
+//{
+//	return devlink_alloc(ops, priv_size);
+//}
+//
+//#define devlink_alloc _kc_devlink_alloc
+#endif /* NEED_DEVLINK_ALLOC_SETS_DEV */
+
+#endif /* CONFIG_NET_DEVLINK */
+
+#ifdef NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE
+/* ida_alloc(), ida_alloc_min(), ida_alloc_max(), ida_alloc_range(), and
+ * ida_free() were added in commit 5ade60dda43c ("ida: add new API").
+ *
+ * Also, using "0" as the "end" argument (3rd argument) to ida_simple_get() is
+ * considered the max value, which is why it's used in ida_alloc() and
+ * ida_alloc_min().
+ */
+static inline int ida_alloc(struct ida *ida, gfp_t gfp)
+{
+	return ida_simple_get(ida, 0, 0, gfp);
+}
+
+static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
+{
+	return ida_simple_get(ida, min, 0, gfp);
+}
+
+static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
+{
+	return ida_simple_get(ida, 0, max, gfp);
+}
+
+static inline int ida_alloc_range(struct ida *ida, unsigned int min,
+				  unsigned int max, gfp_t gfp)
+{
+	return ida_simple_get(ida, min, max, gfp);
+}
+
+static inline void ida_free(struct ida *ida, unsigned int id)
+{
+	ida_simple_remove(ida, id);
+}
+#endif /* NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE */
+
+/*
+ * dev_printk implementations
+ */
+
+/* NEED_DEV_PRINTK_ONCE
+ *
+ * The dev_*_once family of printk functions was introduced by commit
+ * e135303bd5be ("device: Add dev__once variants")
+ *
+ * The implementation is very straight forward so we will just implement them
+ * as-is here.
+ */
+#ifdef NEED_DEV_PRINTK_ONCE
+#ifdef CONFIG_PRINTK
+#define dev_level_once(dev_level, dev, fmt, ...)                               \
+	do {                                                                   \
+		static bool __print_once __read_mostly;                        \
+                                                                               \
+		if (!__print_once) {                                           \
+			__print_once = true;                                   \
+			dev_level(dev, fmt, ##__VA_ARGS__);                    \
+		}                                                              \
+	} while (0)
+#else
+#define dev_level_once(dev_level, dev, fmt, ...)                               \
+	do {                                                                   \
+		if (0)                                                         \
+			dev_level(dev, fmt, ##__VA_ARGS__);                    \
+	} while (0)
+#endif
+
+#define dev_emerg_once(dev, fmt, ...)                                          \
+	dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
+#define dev_alert_once(dev, fmt, ...)                                          \
+	dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
+#define dev_crit_once(dev, fmt, ...)                                           \
+	dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
+#define dev_err_once(dev, fmt, ...)                                            \
+	dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
+#define dev_warn_once(dev, fmt, ...)                                           \
+	dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
+#define dev_notice_once(dev, fmt, ...)                                         \
+	dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
+#define dev_info_once(dev, fmt, ...)                                           \
+	dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
+#define dev_dbg_once(dev, fmt, ...)                                            \
+	dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
+#endif /* NEED_DEV_PRINTK_ONCE */
+
+#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
+
+/* NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0
+ *
+ * tc_cls_can_offload_and_chain0 was added by upstream commit
+ * 878db9f0f26d ("pkt_cls: add new tc cls helper to check offload flag and
+ * chain index").
+ *
+ * This patch backports this function for older kernels by calling
+ * tc_can_offload() directly.
+ */
+#ifdef NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0
+#include 
+static inline bool
+tc_cls_can_offload_and_chain0(const struct net_device *dev,
+			      struct tc_cls_common_offload *common)
+{
+	if (!tc_can_offload(dev))
+		return false;
+	if (common->chain_index)
+		return false;
+
+	return true;
+}
+#endif /* NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 */
+#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */
+
+/* NEED_TC_SETUP_QDISC_MQPRIO
+ *
+ * TC_SETUP_QDISC_MQPRIO was added by upstream commit
+ * 575ed7d39e2f ("net_sch: mqprio: Change TC_SETUP_MQPRIO to
+ * TC_SETUP_QDISC_MQPRIO").
+ *
+ * For older kernels which are using TC_SETUP_MQPRIO
+ */
+#ifdef NEED_TC_SETUP_QDISC_MQPRIO
+#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO
+#endif /* NEED_TC_SETUP_QDISC_MQPRIO */
+
+/*
+ * ART/TSC functions
+ */
+#ifdef HAVE_PTP_CROSSTIMESTAMP
+/* NEED_CONVERT_ART_NS_TO_TSC
+ *
+ * convert_art_ns_to_tsc was added by upstream commit fc804f65d462 ("x86/tsc:
+ * Convert ART in nanoseconds to TSC").
+ *
+ * This function is similar to convert_art_to_tsc, but expects the input in
+ * terms of nanoseconds, rather than ART cycles. We implement this by
+ * accessing the tsc_khz value and performing the proper calculation. In order
+ * to access the correct clock object on returning, we use the function
+ * convert_art_to_tsc, because the art_related_clocksource is inaccessible.
+ */
+#ifdef NEED_CONVERT_ART_NS_TO_TSC
+#ifdef CONFIG_X86
+#include 
+
+static inline struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns)
+{
+	struct system_counterval_t system;
+	u64 tmp, res, rem;
+
+	rem = do_div(art_ns, USEC_PER_SEC);
+
+	res = art_ns * tsc_khz;
+	tmp = rem * tsc_khz;
+
+	do_div(tmp, USEC_PER_SEC);
+	res += tmp;
+
+	system = convert_art_to_tsc(art_ns);
+	system.cycles = res;
+
+	return system;
+}
+#else /* CONFIG_X86 */
+static inline struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns)
+{
+	WARN_ONCE(1, "%s is only supported on X86", __func__);
+	return (struct system_counterval_t){};
+}
+#endif /* !CONFIG_X86 */
+#endif /* NEED_CONVERT_ART_NS_TO_TSC */
+#endif /* HAVE_PTP_CROSSTIMESTAMP */
+
+/*
+ * PTP functions and definitions
+ */
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+#include 
+#include 
+
+/* PTP_* ioctl flags
+ *
+ * PTP_PEROUT_ONE_SHOT and PTP_PEROUT_DUTY_CYCLE were added by commit
+ * f65b71aa25a6 ("ptp: add ability to configure duty cycle for periodic
+ * output")
+ *
+ * PTP_PEROUT_PHASE was added in commit b6bd41363a1c ("ptp: introduce
+ * a phase offset in the periodic output request")
+ *
+ * PTP_STRICT_FLAGS was added in commit 6138e687c7b6 ("ptp: Introduce strict
+ * checking of external time stamp options.")
+ *
+ * These flags control behavior for the periodic output PTP ioctl. For older
+ * kernels, we define the flags as 0. This allows bitmask checks on flags to
+ * work as expected, since these feature flags will become no-ops on kernels
+ * that lack support.
+ *
+ * Drivers can check if the relevant feature is actually supported by using an
+ * '#if' on the flag instead of an '#ifdef'
+ */
+#ifndef PTP_PEROUT_PHASE
+#define PTP_PEROUT_PHASE 0
+#endif
+
+#ifndef PTP_PEROUT_DUTY_CYCLE
+#define PTP_PEROUT_DUTY_CYCLE 0
+#endif
+
+#ifndef PTP_STRICT_FLAGS
+#define PTP_STRICT_FLAGS 0
+#endif
+
+#ifndef PTP_PEROUT_PHASE
+/* PTP_PEROUT_PHASE
+ *
+ * The PTP_PEROUT_PHASE flag was added in commit b6bd41363a1c ("ptp: introduce
+ * a phase offset in the periodic output request") as a way for userspace to
+ * request a phase-offset periodic output that starts on some arbitrary
+ * multiple of the clock period.
+ *
+ * For older kernels, define this flag to 0 so that checks for if it is
+ * enabled will always fail. Drivers should use '#if PTP_PEROUT_PHASE' to
+ * determine if the kernel has phase support, and use the flag as normal for
+ * checking supported flags or if the flag is enabled for a given request.
+ */
+#define PTP_PEROUT_PHASE 0
+#endif
+
+#endif /* CONFIG_PTP_1588_CLOCK */
+
+#ifdef NEED_BUS_FIND_DEVICE_CONST_DATA
+/* NEED_BUS_FIND_DEVICE_CONST_DATA
+ *
+ * bus_find_device() was updated in upstream commit 418e3ea157ef
+ * ("bus_find_device: Unify the match callback with class_find_device")
+ * to take a const void *data parameter and also have the match() function
+ * passed in take a const void *data parameter.
+ *
+ * all of the kcompat below makes it so the caller can always just call
+ * bus_find_device() according to the upstream kernel without having to worry
+ * about const vs. non-const arguments.
+ */
+struct _kc_bus_find_device_custom_data {
+	const void *real_data;
+	int (*real_match)(struct device *dev, const void *data);
+};
+
+static inline int _kc_bus_find_device_wrapped_match(struct device *dev,
+						    void *data)
+{
+	struct _kc_bus_find_device_custom_data *custom_data = data;
+
+	return custom_data->real_match(dev, custom_data->real_data);
+}
+
+static inline struct device *
+_kc_bus_find_device(struct bus_type *type, struct device *start,
+		    const void *data,
+		    int (*match)(struct device *dev, const void *data))
+{
+	struct _kc_bus_find_device_custom_data custom_data = {};
+
+	custom_data.real_data = data;
+	custom_data.real_match = match;
+
+	return bus_find_device(type, start, &custom_data,
+			       _kc_bus_find_device_wrapped_match);
+}
+
+/* force callers of bus_find_device() to call _kc_bus_find_device() on kernels
+ * where NEED_BUS_FIND_DEVICE_CONST_DATA is defined
+ */
+#define bus_find_device(type, start, data, match)                              \
+	_kc_bus_find_device(type, start, data, match)
+#endif /* NEED_BUS_FIND_DEVICE_CONST_DATA */
+
+#ifdef NEED_CPU_LATENCY_QOS_RENAME
+/* NEED_CPU_LATENCY_QOS_RENAME
+ *
+ * The PM_QOS_CPU_DMA_LATENCY definition was removed in 67b06ba01857 ("PM:
+ * QoS: Drop PM_QOS_CPU_DMA_LATENCY and rename related functions"). The
+ * related functions were renamed to use "cpu_latency_qos_" prefix.
+ *
+ * Use wrapper functions to map the new API onto the API available in older
+ * kernels.
+ */
+#include 
+static inline void cpu_latency_qos_add_request(struct pm_qos_request *req,
+					       s32 value)
+{
+	pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY, value);
+}
+
+static inline void cpu_latency_qos_update_request(struct pm_qos_request *req,
+						  s32 new_value)
+{
+	pm_qos_update_request(req, new_value);
+}
+
+static inline void cpu_latency_qos_remove_request(struct pm_qos_request *req)
+{
+	pm_qos_remove_request(req);
+}
+#endif /* NEED_CPU_LATENCY_QOS_RENAME */
+
+#ifdef NEED_DECLARE_STATIC_KEY_FALSE
+/* NEED_DECLARE_STATIC_KEY_FALSE
+ *
+ * DECLARE_STATIC_KEY_FALSE was added by upstream commit
+ * 525e0ac4d2b2 ("locking/static_keys: Provide DECLARE and
+ * well as DEFINE macros")
+ *
+ * The definition is now necessary to handle
+ * the xdpdrv work with more than 64 cpus
+ */
+#define DECLARE_STATIC_KEY_FALSE(name) extern struct static_key_false name
+#endif /* NEED_DECLARE_STATIC_KEY_FALSE */
+
+#ifdef NEED_DEFINE_STATIC_KEY_FALSE
+/* NEED_DEFINE_STATIC_KEY_FALSE
+ *
+ * DEFINE_STATIC_KEY_FALSE was added by upstream commit
+ * 11276d5306b8 ("locking/static_keys: Add a new
+ * static_key interface")
+ *
+ * The definition is now necessary to handle
+ * the xdpdrv work with more than 64 cpus
+ */
+#define DECLARE_STATIC_KEY_FALSE(name) extern struct static_key name
+
+#define DEFINE_STATIC_KEY_FALSE(name)                                          \
+	struct static_key name = STATIC_KEY_INIT_FALSE
+#endif /* NEED_DEFINE_STATIC_KEY_FALSE */
+
+#ifdef NEED_STATIC_BRANCH
+/* NEED_STATIC_BRANCH
+ *
+ * static_branch_likely, static_branch_unlikely,
+ * static_branch_inc, static_branch_dec was added by upstream commit
+ * 11276d5306b8 ("locking/static_keys: Add a new
+ * static_key interface")
+ *
+ * The definition is now necessary to handle
+ * the xdpdrv work with more than 64 cpus
+ */
+#define static_branch_likely(x) likely(static_key_enabled(x))
+#define static_branch_unlikely(x) unlikely(static_key_enabled(x))
+
+#define static_branch_inc(x) static_key_slow_inc(x)
+#define static_branch_dec(x) static_key_slow_dec(x)
+
+#endif /* NEED_STATIC_BRANCH */
+
+#ifdef NEED_NETDEV_XDP_STRUCT
+#define netdev_bpf netdev_xdp
+#endif /* NEED_NETDEV_XDP_STRUCT */
+
+#ifdef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION
+#ifdef HAVE_XDP_SUPPORT
+#include 
+#endif /* HAVE_XDP_SUPPORT */
+#endif /* HAVE_NETDEV_PROG_XDP_WARN_ACTION */
+
+/* NEED_ETH_HW_ADDR_SET
+ *
+ * eth_hw_addr_set was added by upstream commit
+ * 48eab831ae8b ("net: create netdev->dev_addr assignment helpers")
+ *
+ * Using eth_hw_addr_set became required in 5.17, when the dev_addr field in
+ * the netdev struct was constified. See 48eab831ae8b ("net: create
+ * netdev->dev_addr assignment helpers")
+ */
+#ifdef NEED_ETH_HW_ADDR_SET
+static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr)
+{
+	ether_addr_copy(dev->dev_addr, addr);
+}
+#endif /* NEED_ETH_HW_ADDR_SET */
+
+/*
+ * NEED_NETIF_NAPI_ADD_NO_WEIGHT
+ *
+ * Upstream commit b48b89f9c189 ("net: drop the weight argument from
+ * netif_napi_add") removes weight argument from function call.
+ *
+ * Our drivers always used default weight, which is 64.
+ *
+ * Define NEED_NETIF_NAPI_ADD_NO_WEIGHT on kernels 3.10+ to use old
+ * implementation. Undef for 6.1+ where new function was introduced.
+ * RedHat 9.2 required using no weight parameter option.
+ */
+#ifdef NEED_NETIF_NAPI_ADD_NO_WEIGHT
+static inline void _kc_netif_napi_add(struct net_device *dev,
+				      struct napi_struct *napi,
+				      int (*poll)(struct napi_struct *, int))
+{
+	return netif_napi_add(dev, napi, poll);
+}
+
+/* RHEL7 complains about redefines. Undef first, then define compat wrapper */
+#ifdef netif_napi_add
+#undef netif_napi_add
+#endif
+#define netif_napi_add _kc_netif_napi_add
+#endif /* NEED_NETIF_NAPI_ADD_NO_WEIGHT */
+
+#ifdef NEED_JIFFIES_64_TIME_IS_MACROS
+/* NEED_JIFFIES_64_TIME_IS_MACROS
+ *
+ * The jiffies64 time_is_* macros were introduced upstream by 3740dcdf8a77
+ * ("jiffies: add time comparison functions for 64 bit jiffies") in Linux 4.9.
+ *
+ * Support for 64-bit jiffies has been available since the initial import of
+ * Linux into git in 2005, so its safe to just implement the macros as-is
+ * here.
+ */
+#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a)
+#define time_is_after_jiffies64(a) time_before64(get_jiffies_64(), a)
+#define time_is_before_eq_jiffies64(a) time_after_eq64(get_jiffies_64(), a)
+#define time_is_after_eq_jiffies64(a) time_before_eq64(get_jiffies_64(), a)
+#endif /* NEED_JIFFIES_64_TIME_IS_MACROS */
+
+#ifdef NEED_INDIRECT_CALL_WRAPPER_MACROS
+/* NEED_INDIRECT_CALL_WRAPPER_MACROS
+ *
+ * The INDIRECT_CALL_* macros were introduced upstream as upstream commit
+ * 283c16a2dfd3 ("indirect call wrappers: helpers to speed-up indirect calls
+ * of builtin") which landed in Linux 5.0
+ *
+ * These are easy to implement directly.
+ */
+#ifdef CONFIG_RETPOLINE
+#define INDIRECT_CALL_1(f, f1, ...)                                            \
+	({ likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); })
+#define INDIRECT_CALL_2(f, f2, f1, ...)                                        \
+	({                                                                     \
+		likely(f == f2) ? f2(__VA_ARGS__) :                            \
+				  INDIRECT_CALL_1(f, f1, __VA_ARGS__);         \
+	})
+
+#define INDIRECT_CALLABLE_DECLARE(f) f
+#define INDIRECT_CALLABLE_SCOPE
+#else /* !CONFIG_RETPOLINE */
+#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALLABLE_DECLARE(f)
+#define INDIRECT_CALLABLE_SCOPE static
+#endif /* CONFIG_RETPOLINE */
+#endif /* NEED_INDIRECT_CALL_WRAPPER_MACROS */
+
+#ifdef NEED_INDIRECT_CALL_3_AND_4
+/* NEED_INDIRECT_CALL_3_AND_4
+ * Support for the 3 and 4 call variants was added in upstream commit
+ * e678e9ddea96 ("indirect_call_wrapper: extend indirect wrapper to support up
+ * to 4 calls")
+ *
+ * These are easy to implement directly.
+ */
+
+#ifdef CONFIG_RETPOLINE
+#define INDIRECT_CALL_3(f, f3, f2, f1, ...)                                    \
+	({                                                                     \
+		likely(f == f3) ? f3(__VA_ARGS__) :                            \
+				  INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__);     \
+	})
+#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...)                                \
+	({                                                                     \
+		likely(f == f4) ? f4(__VA_ARGS__) :                            \
+				  INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \
+	})
+#else /* !CONFIG_RETPOLINE */
+#define INDIRECT_CALL_3(f, f3, f2, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__)
+#endif /* CONFIG_RETPOLINE */
+#endif /* NEED_INDIRECT_CALL_3_AND_4 */
+
+#ifdef NEED_EXPORT_INDIRECT_CALLABLE
+/* NEED_EXPORT_INDIRECT_CALLABLE
+ *
+ * Support for EXPORT_INDIRECT_CALLABLE was added in upstream commit
+ * 0053859496ba ("net: add EXPORT_INDIRECT_CALLABLE wrapper")
+ *
+ * These are easy to implement directly.
+ */
+#ifdef CONFIG_RETPOLINE
+#define EXPORT_INDIRECT_CALLABLE(f) EXPORT_SYMBOL(f)
+#else
+#define EXPORT_INDIRECT_CALLABLE(f)
+#endif /* CONFIG_RETPOLINE */
+#endif /* NEED_EXPORT_INDIRECT_CALLABLE */
+
+
+/* NEED_MUL_U64_U64_DIV_U64
+ *
+ * mul_u64_u64_div_u64 was introduced in Linux 5.9 as part of commit
+ * 3dc167ba5729 ("sched/cputime: Improve cputime_adjust()")
+ */
+#ifdef NEED_MUL_U64_U64_DIV_U64
+u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
+#endif /* NEED_MUL_U64_U64_DIV_U64 */
+
+
+/* NEED_DIFF_BY_SCALED_PPM
+ *
+ * diff_by_scaled_ppm and adjust_by_scaled_ppm were introduced in
+ * kernel 6.1 by upstream commit 1060707e3809 ("ptp: introduce helpers
+ * to adjust by scaled parts per million").
+ */
+#if 0//def NEED_DIFF_BY_SCALED_PPM
+static inline bool
+diff_by_scaled_ppm(u64 base, long scaled_ppm, u64 *diff)
+{
+        bool negative = false;
+
+        if (scaled_ppm < 0) {
+                negative = true;
+                scaled_ppm = -scaled_ppm;
+        }
+
+        *diff = mul_u64_u64_div_u64(base, (u64)scaled_ppm,
+                                    1000000ULL << 16);
+
+        return negative;
+}
+
+static inline u64
+adjust_by_scaled_ppm(u64 base, long scaled_ppm)
+{
+        u64 diff;
+
+        if (diff_by_scaled_ppm(base, scaled_ppm, &diff))
+                return base - diff;
+
+        return base + diff;
+}
+#endif /* NEED_DIFF_BY_SCALED_PPM */
+
+#ifndef HAVE_ETHTOOL_KEEE
+#ifndef __ETHTOOL_DECLARE_LINK_MODE_MASK
+#define __ETHTOOL_DECLARE_LINK_MODE_MASK(name)          \
+        DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS)
+#endif
+struct ethtool_keee {
+        __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+        __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
+        __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertised);
+        u32     supported_u32;
+        u32     advertised_u32;
+        u32     lp_advertised_u32;
+        u32     tx_lpi_timer;
+        bool    tx_lpi_enabled;
+        bool    eee_active;
+        bool    eee_enabled;
+};
+
+void eee_to_keee(struct ethtool_keee *keee,
+                 const struct ethtool_eee *eee);
+
+bool ethtool_eee_use_linkmodes(const struct ethtool_keee *eee);
+
+void keee_to_eee(struct ethtool_eee *eee,
+                 const struct ethtool_keee *keee);
+#endif /* !HAVE_ETHTOOL_KEEE */
+
+
+#endif /* _KCOMPAT_IMPL_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/kcompat_overflow.h b/drivers/net/ethernet/mucse/rnpgbe/kcompat_overflow.h
new file mode 100755
index 0000000000000000000000000000000000000000..7e1f41066700d933fb1c31b6c6e67e0ffd2f227d
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/kcompat_overflow.h
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef __LINUX_OVERFLOW_H
+#define __LINUX_OVERFLOW_H
+
+#include 
+
+/*
+ * In the fallback code below, we need to compute the minimum and
+ * maximum values representable in a given type. These macros may also
+ * be useful elsewhere, so we provide them outside the
+ * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
+ *
+ * It would seem more obvious to do something like
+ *
+ * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
+ * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
+ *
+ * Unfortunately, the middle expressions, strictly speaking, have
+ * undefined behaviour, and at least some versions of gcc warn about
+ * the type_max expression (but not if -fsanitize=undefined is in
+ * effect; in that case, the warning is deferred to runtime...).
+ *
+ * The slightly excessive casting in type_min is to make sure the
+ * macros also produce sensible values for the exotic type _Bool. [The
+ * overflow checkers only almost work for _Bool, but that's
+ * a-feature-not-a-bug, since people shouldn't be doing arithmetic on
+ * _Bools. Besides, the gcc builtins don't allow _Bool* as third
+ * argument.]
+ *
+ * Idea stolen from
+ * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
+ * credit to Christian Biere.
+ */
+/* The is_signed_type macro is redefined in a few places in various kernel
+ * headers. If this header is included at the same time as one of those, we
+ * will generate compilation warnings. Since we can't fix every old kernel,
+ * rename is_signed_type for this file to _kc_is_signed_type. This prevents
+ * the macro name collision, and should be safe since our drivers do not
+ * directly call the macro.
+ */
+#define _kc_is_signed_type(type) (((type)(-1)) < (type)1)
+#define __type_half_max(type)                                                  \
+	((type)1 << (8 * sizeof(type) - 1 - _kc_is_signed_type(type)))
+#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
+#define type_min(T) ((T)((T)-type_max(T) - (T)1))
+
+#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
+/*
+ * For simplicity and code hygiene, the fallback code below insists on
+ * a, b and *d having the same type (similar to the min() and max()
+ * macros), whereas gcc's type-generic overflow checkers accept
+ * different types. Hence we don't just make check_add_overflow an
+ * alias for __builtin_add_overflow, but add type checks similar to
+ * below.
+ */
+#define check_add_overflow(a, b, d)                                            \
+	({                                                                     \
+		typeof(a) __a = (a);                                           \
+		typeof(b) __b = (b);                                           \
+		typeof(d) __d = (d);                                           \
+		(void)(&__a == &__b);                                          \
+		(void)(&__a == __d);                                           \
+		__builtin_add_overflow(__a, __b, __d);                         \
+	})
+
+#define check_sub_overflow(a, b, d)                                            \
+	({                                                                     \
+		typeof(a) __a = (a);                                           \
+		typeof(b) __b = (b);                                           \
+		typeof(d) __d = (d);                                           \
+		(void)(&__a == &__b);                                          \
+		(void)(&__a == __d);                                           \
+		__builtin_sub_overflow(__a, __b, __d);                         \
+	})
+
+#define check_mul_overflow(a, b, d)                                            \
+	({                                                                     \
+		typeof(a) __a = (a);                                           \
+		typeof(b) __b = (b);                                           \
+		typeof(d) __d = (d);                                           \
+		(void)(&__a == &__b);                                          \
+		(void)(&__a == __d);                                           \
+		__builtin_mul_overflow(__a, __b, __d);                         \
+	})
+
+#else
+
+/* Checking for unsigned overflow is relatively easy without causing UB. */
+#define __unsigned_add_overflow(a, b, d)                                       \
+	({                                                                     \
+		typeof(a) __a = (a);                                           \
+		typeof(b) __b = (b);                                           \
+		typeof(d) __d = (d);                                           \
+		(void)(&__a == &__b);                                          \
+		(void)(&__a == __d);                                           \
+		*__d = __a + __b;                                              \
+		*__d < __a;                                                    \
+	})
+#define __unsigned_sub_overflow(a, b, d)                                       \
+	({                                                                     \
+		typeof(a) __a = (a);                                           \
+		typeof(b) __b = (b);                                           \
+		typeof(d) __d = (d);                                           \
+		(void)(&__a == &__b);                                          \
+		(void)(&__a == __d);                                           \
+		*__d = __a - __b;                                              \
+		__a < __b;                                                     \
+	})
+/*
+ * If one of a or b is a compile-time constant, this avoids a division.
+ */
+#define __unsigned_mul_overflow(a, b, d)                                       \
+	({                                                                     \
+		typeof(a) __a = (a);                                           \
+		typeof(b) __b = (b);                                           \
+		typeof(d) __d = (d);                                           \
+		(void)(&__a == &__b);                                          \
+		(void)(&__a == __d);                                           \
+		*__d = __a * __b;                                              \
+		__builtin_constant_p(__b) ?                                    \
+			__b > 0 && __a > type_max(typeof(__a)) / __b :         \
+			__a > 0 && __b > type_max(typeof(__b)) / __a;          \
+	})
+
+/*
+ * For signed types, detecting overflow is much harder, especially if
+ * we want to avoid UB. But the interface of these macros is such that
+ * we must provide a result in *d, and in fact we must produce the
+ * result promised by gcc's builtins, which is simply the possibly
+ * wrapped-around value. Fortunately, we can just formally do the
+ * operations in the widest relevant unsigned type (u64) and then
+ * truncate the result - gcc is smart enough to generate the same code
+ * with and without the (u64) casts.
+ */
+
+/*
+ * Adding two signed integers can overflow only if they have the same
+ * sign, and overflow has happened iff the result has the opposite
+ * sign.
+ */
+#define __signed_add_overflow(a, b, d)                                         \
+	({                                                                     \
+		typeof(a) __a = (a);                                           \
+		typeof(b) __b = (b);                                           \
+		typeof(d) __d = (d);                                           \
+		(void)(&__a == &__b);                                          \
+		(void)(&__a == __d);                                           \
+		*__d = (u64)__a + (u64)__b;                                    \
+		(((~(__a ^ __b)) & (*__d ^ __a)) & type_min(typeof(__a))) !=   \
+			0;                                                     \
+	})
+
+/*
+ * Subtraction is similar, except that overflow can now happen only
+ * when the signs are opposite. In this case, overflow has happened if
+ * the result has the opposite sign of a.
+ */
+#define __signed_sub_overflow(a, b, d)                                         \
+	({                                                                     \
+		typeof(a) __a = (a);                                           \
+		typeof(b) __b = (b);                                           \
+		typeof(d) __d = (d);                                           \
+		(void)(&__a == &__b);                                          \
+		(void)(&__a == __d);                                           \
+		*__d = (u64)__a - (u64)__b;                                    \
+		((((__a ^ __b)) & (*__d ^ __a)) & type_min(typeof(__a))) != 0; \
+	})
+
+/*
+ * Signed multiplication is rather hard. gcc always follows C99, so
+ * division is truncated towards 0. This means that we can write the
+ * overflow check like this:
+ *
+ * (a > 0 && (b > MAX/a || b < MIN/a)) ||
+ * (a < -1 && (b > MIN/a || b < MAX/a) ||
+ * (a == -1 && b == MIN)
+ *
+ * The redundant casts of -1 are to silence an annoying -Wtype-limits
+ * (included in -Wextra) warning: When the type is u8 or u16, the
+ * __b_c_e in check_mul_overflow obviously selects
+ * __unsigned_mul_overflow, but unfortunately gcc still parses this
+ * code and warns about the limited range of __b.
+ */
+
+#define __signed_mul_overflow(a, b, d)                                         \
+	({                                                                     \
+		typeof(a) __a = (a);                                           \
+		typeof(b) __b = (b);                                           \
+		typeof(d) __d = (d);                                           \
+		typeof(a) __tmax = type_max(typeof(a));                        \
+		typeof(a) __tmin = type_min(typeof(a));                        \
+		(void)(&__a == &__b);                                          \
+		(void)(&__a == __d);                                           \
+		*__d = (u64)__a * (u64)__b;                                    \
+		(__b > 0 && (__a > __tmax / __b || __a < __tmin / __b)) ||     \
+			(__b < (typeof(__b))-1 &&                              \
+			 (__a > __tmin / __b || __a < __tmax / __b)) ||        \
+			(__b == (typeof(__b))-1 && __a == __tmin);             \
+	})
+
+#define check_add_overflow(a, b, d)                                            \
+	__builtin_choose_expr(_kc_is_signed_type(typeof(a)),                   \
+			      __signed_add_overflow(a, b, d),                  \
+			      __unsigned_add_overflow(a, b, d))
+
+#define check_sub_overflow(a, b, d)                                            \
+	__builtin_choose_expr(_kc_is_signed_type(typeof(a)),                   \
+			      __signed_sub_overflow(a, b, d),                  \
+			      __unsigned_sub_overflow(a, b, d))
+
+#define check_mul_overflow(a, b, d)                                            \
+	__builtin_choose_expr(_kc_is_signed_type(typeof(a)),                   \
+			      __signed_mul_overflow(a, b, d),                  \
+			      __unsigned_mul_overflow(a, b, d))
+
+#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
+
+/** check_shl_overflow() - Calculate a left-shifted value and check overflow
+ *
+ * @a: Value to be shifted
+ * @s: How many bits left to shift
+ * @d: Pointer to where to store the result
+ *
+ * Computes *@d = (@a << @s)
+ *
+ * Returns true if '*d' cannot hold the result or when 'a << s' doesn't
+ * make sense. Example conditions:
+ * - 'a << s' causes bits to be lost when stored in *d.
+ * - 's' is garbage (e.g. negative) or so large that the result of
+ *   'a << s' is guaranteed to be 0.
+ * - 'a' is negative.
+ * - 'a << s' sets the sign bit, if any, in '*d'.
+ *
+ * '*d' will hold the results of the attempted shift, but is not
+ * considered "safe for use" if false is returned.
+ */
+#define check_shl_overflow(a, s, d)                                            \
+	({                                                                     \
+		typeof(a) _a = a;                                              \
+		typeof(s) _s = s;                                              \
+		typeof(d) _d = d;                                              \
+		u64 _a_full = _a;                                              \
+		unsigned int _to_shift =                                       \
+			_s >= 0 && _s < 8 * sizeof(*d) ? _s : 0;               \
+		*_d = (_a_full << _to_shift);                                  \
+		(_to_shift != _s || *_d < 0 || _a < 0 ||                       \
+		 (*_d >> _to_shift) != _a);                                    \
+	})
+
+/**
+ * array_size() - Calculate size of 2-dimensional array.
+ *
+ * @a: dimension one
+ * @b: dimension two
+ *
+ * Calculates size of 2-dimensional array: @a * @b.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+static inline __must_check size_t array_size(size_t a, size_t b)
+{
+	size_t bytes;
+
+	if (check_mul_overflow(a, b, &bytes))
+		return SIZE_MAX;
+
+	return bytes;
+}
+
+/**
+ * array3_size() - Calculate size of 3-dimensional array.
+ *
+ * @a: dimension one
+ * @b: dimension two
+ * @c: dimension three
+ *
+ * Calculates size of 3-dimensional array: @a * @b * @c.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
+{
+	size_t bytes;
+
+	if (check_mul_overflow(a, b, &bytes))
+		return SIZE_MAX;
+	if (check_mul_overflow(bytes, c, &bytes))
+		return SIZE_MAX;
+
+	return bytes;
+}
+
+static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c)
+{
+	size_t bytes;
+
+	if (check_mul_overflow(n, size, &bytes))
+		return SIZE_MAX;
+	if (check_add_overflow(bytes, c, &bytes))
+		return SIZE_MAX;
+
+	return bytes;
+}
+
+/**
+ * struct_size() - Calculate size of structure with trailing array.
+ * @p: Pointer to the structure.
+ * @member: Name of the array member.
+ * @n: Number of elements in the array.
+ *
+ * Calculates size of memory needed for structure @p followed by an
+ * array of @n @member elements.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define struct_size(p, member, n)                                              \
+	__ab_c_size(n, sizeof(*(p)->member) + __must_be_array((p)->member),    \
+		    sizeof(*(p)))
+
+#endif /* __LINUX_OVERFLOW_H */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/kcompat_rhel_defs.h b/drivers/net/ethernet/mucse/rnpgbe/kcompat_rhel_defs.h
new file mode 100755
index 0000000000000000000000000000000000000000..0dd330bb15017652dde6a8888219a872ff674bb4
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/kcompat_rhel_defs.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _KCOMPAT_RHEL_DEFS_H_
+#define _KCOMPAT_RHEL_DEFS_H_
+
+/* This is the RedHat Enterprise Linux distribution specific definitions file.
+ * It defines what features need backports for a given version of the RHEL
+ * kernel.
+ *
+ * It checks the RHEL_RELEASE_CODE and RHEL_RELEASE_VERSION macros to decide
+ * what support the target kernel has.
+ *
+ * It assumes that kcompat_std_defs.h has already been processed, and will
+ * #define or #undef any flags that have changed based on backports done by
+ * RHEL.
+ */
+
+#if !RHEL_RELEASE_CODE
+#error "RHEL_RELEASE_CODE is 0 or undefined"
+#endif
+
+#ifndef RHEL_RELEASE_VERSION
+#error "RHEL_RELEASE_VERSION is undefined"
+#endif
+
+#if (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(6, 5))
+#define NEED_DIV64_U64_REM
+#else
+#endif
+
+#if (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(6, 10))
+#define NO_BIT_ATTRS
+#define NO_SKB_DUMP
+#define NO_REAL_QUEUE_NUM
+#define COMPAT_PTP_NO_PINS
+#define NO_SKB_VLAN_PROTO
+#define NEED_ETHTOOL_CONVERT_LEGACY_U32_TO_LINK_MODE
+#else /* > 6.8 */
+#endif /* 6.8 */
+
+/*****************************************************************************/
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 1))
+#define COMPAT_PTP_NO_PINS
+#else /* >= 7.1 */
+#define HAVE_NDO_FEATURES_CHECK
+#endif /* 7.1 */
+
+/*****************************************************************************/
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 3))
+#else /* >= 7.3 */
+#undef NEED_DEV_PRINTK_ONCE
+#endif /* 7.3 */
+
+/*****************************************************************************/
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 5))
+#else /* >= 7.5 */
+#define HAVE_TCF_EXTS_TO_LIST
+#endif /* 7.5 */
+
+/*****************************************************************************/
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 6))
+#else /* >= 7.6 */
+#undef NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0
+/* CentOS-7-aarch64-Everything-1810.iso */
+#ifndef CONFIG_ARM64
+#undef NEED_TC_SETUP_QDISC_MQPRIO
+#endif /* CONFIG_ARM64 */
+#endif /* 7.6 */
+
+/*****************************************************************************/
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 7))
+#define NO_TX_MAXRATE
+#else /* >= 7.7 */
+// if anolios need this
+#ifdef ANOLIS_OS
+#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS
+#endif
+#define HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR
+#define HAVE_ETHTOOL_NEW_100G_BITS
+#undef NEED_NETDEV_TX_SENT_QUEUE
+#endif /* 7.7 */
+
+/*****************************************************************************/
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 0))
+#else /* >= 8.0 */
+#undef HAVE_TCF_EXTS_TO_LIST
+#undef HAVE_ETHTOOL_NEW_100G_BITS
+#define HAVE_NDO_OFFLOAD_STATS
+#undef HAVE_RHEL7_EXTENDED_OFFLOAD_STATS
+#define HAVE_TCF_EXTS_FOR_EACH_ACTION
+/* 7.7 undefs it due to a backport in 7.7+, but 8.0 needs it still */
+#define NEED_NETDEV_TX_SENT_QUEUE
+#define HAVE_DEVLINK_REGIONS
+#define HAVE_DEVLINK_PARAMS
+#endif /* 8.0 */
+
+/*****************************************************************************/
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 1))
+#define NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE
+#else /* >= 8.1 */
+#define HAVE_ETHTOOL_NEW_100G_BITS
+#undef NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE
+#define HAVE_DEVLINK_PARAMS_PUBLISH
+#undef NEED_NETDEV_TX_SENT_QUEUE
+#undef NEED_INDIRECT_CALL_WRAPPER_MACROS
+#define HAVE_INDIRECT_CALL_WRAPPER_HEADER
+#endif /* 8.1 */
+
+/*****************************************************************************/
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 2))
+#else /* >= 8.2 */
+#undef NEED_BUS_FIND_DEVICE_CONST_DATA
+#undef NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY
+#undef NEED_SKB_FRAG_OFF
+#undef NEED_SKB_FRAG_OFF_ADD
+#undef NEED_FLOW_INDR_BLOCK_CB_REGISTER
+#define HAVE_FLOW_INDR_BLOCK_LOCK
+#define HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID
+#define HAVE_DEVLINK_HEALTH
+#define HAVE_NETDEV_SB_DEV
+#endif /* 8.2 */
+
+/*****************************************************************************/
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 3))
+#else /* >= 8.3 */
+#undef NEED_CPU_LATENCY_QOS_RENAME
+#define HAVE_DEVLINK_HEALTH_OPS_EXTACK
+#define HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER
+#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT
+#undef NEED_DEVLINK_REGION_CREATE_OPS
+#endif /* 8.3 */
+
+/*****************************************************************************/
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 4))
+#else /* >= 8.4 */
+#undef NEED_DEVLINK_PORT_ATTRS_SET_STRUCT
+#undef NEED_NET_PREFETCH
+#undef NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY
+#undef HAVE_XDP_QUERY_PROG
+#define HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT
+#endif /* 8.4 */
+
+/*****************************************************************************/
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 5))
+#else /* >= 8.5 */
+#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS
+#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW
+#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS
+#undef HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY
+#undef HAVE_NAPI_BUSY_LOOP
+#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS
+#endif /* 8.5 */
+
+/*****************************************************************************/
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 6))
+#else /* >= 8.6 */
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9, 0))
+#ifdef ANOLIS_OS
+#define HAVE_NETIF_NAPI_ADD_WEIGHT
+#endif
+#define HAVE_ETHTOOL_COALESCE_EXTACK
+#endif /* < 9.0 */
+#undef NEED_ETH_HW_ADDR_SET
+#endif /* 8.6 */
+
+/*****************************************************************************/
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 7))
+#else /* >= 8.7 */
+#undef NEED_DEVLINK_ALLOC_SETS_DEV
+#undef NEED_NO_NETDEV_PROG_XDP_WARN_ACTION
+#undef HAVE_DEVLINK_PARAMS_PUBLISH
+#undef HAVE_DEVLINK_RELOAD_ENABLE_DISABLE
+#undef HAVE_DEVLINK_REGISTER_SETS_DEV
+#define HAVE_DEVLINK_NOTIFY_REGISTER
+#define HAVE_DEVLINK_SET_FEATURES
+#endif /* 8.7 */
+
+/*****************************************************************************/
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 8))
+#else /* >= 8.8 */
+#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS
+#undef HAVE_NETIF_NAPI_ADD_WEIGHT
+#endif
+
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 10))
+#else /* >= 8.10 */
+#define HAVE_NETIF_NAPI_ADD_WEIGHT
+#endif
+
+/*****************************************************************************/
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9, 0))
+#else /* >= 9.0 */
+#undef HAVE_NETIF_NAPI_ADD_WEIGHT
+#undef HAVE_ETHTOOL_EXTENDED_RINGPARAMS
+#undef HAVE_ETHTOOL_COALESCE_EXTACK
+#define HAVE_XDP_BUFF_RXQ
+#undef NEED_DEVLINK_ALLOC_SETS_DEV
+#undef HAVE_DEVLINK_PARAMS_PUBLISH
+#undef HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT
+#undef HAVE_DEVLINK_REGISTER_SETS_DEV
+#define HAVE_NDO_ETH_IOCTL
+#endif /* 9.0 */
+
+/*****************************************************************************/
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9, 1))
+//#undef HAVE_ETHTOOL_EXTENDED_RINGPARAMS
+//#undef HAVE_ETHTOOL_COALESCE_EXTACK
+#else /* >= 9.1 */
+#undef HAVE_PASID_SUPPORT
+#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS
+#define HAVE_ETHTOOL_COALESCE_EXTACK
+#endif /* 9.1 */
+
+/*****************************************************************************/
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9, 2))
+#else /* >= 9.2 */
+#define HAVE_XDP_BUFF_RXQ
+#define HAVE_NETIF_NAPI_ADD_WEIGHT
+//#undef HAVE_ETHTOOL_COALESCE_EXTACK
+#endif /* 9.2 */
+
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9, 3))
+#else /* >= 9.3 */
+#define NO_PCIE_ERROR_REPORTING
+#define HAVE_ETHTOOL_COALESCE_EXTACK
+#endif /* 9.3 */
+
+#endif /* _KCOMPAT_RHEL_DEFS_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/kcompat_sles_defs.h b/drivers/net/ethernet/mucse/rnpgbe/kcompat_sles_defs.h
new file mode 100755
index 0000000000000000000000000000000000000000..73cd1e899eff767b8ca5a2fbccbdfcc1d74d9a8e
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/kcompat_sles_defs.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _KCOMPAT_SLES_DEFS_H_
+#define _KCOMPAT_SLES_DEFS_H_
+
+/* This is the SUSE Linux Enterprise distribution specific definitions
+ * file. It defines what features need backports for a given version of
+ * the SUSE Linux Enterprise kernel.
+ *
+ * It checks a combination of the LINUX_VERSION code and the
+ * SLE_LOCALVERSION_CODE to determine what support the kernel has.
+ *
+ * It assumes that kcompat_std_defs.h has already been processed, and will
+ * #define or #undef any flags that have changed based on backports done by
+ * SUSE.
+ */
+
+#ifndef LINUX_VERSION_CODE
+#error "LINUX_VERSION_CODE is undefined"
+#endif
+
+#ifndef KERNEL_VERSION
+#error "KERNEL_VERSION is undefined"
+#endif
+
+#if !SLE_KERNEL_REVISION
+#error "SLE_KERNEL_REVISION is 0 or undefined"
+#endif
+
+#if SLE_KERNEL_REVISION > 65535
+#error "SLE_KERNEL_REVISION is unexpectedly large"
+#endif
+
+/* SLE kernel versions are a combination of the LINUX_VERSION_CODE along
+ * with an extra digit that indicates the SUSE specific revision of that
+ * kernel. This value is found in the CONFIG_LOCALVERSION of the SUSE
+ * kernel, which is extracted by common.mk and placed into
+ * SLE_KERNEL_REVISION_CODE.
+ *
+ * We combine the value of SLE_KERNEL_REVISION along with the
+ * LINUX_VERSION_CODE code to generate the useful value that determines
+ * what specific kernel we're dealing with.
+ *
+ * Just in case the SLE_KERNEL_REVISION ever goes above 255, we reserve
+ * 16 bits instead of 8 for this value.
+ */
+#define SLE_KERNEL_CODE ((LINUX_VERSION_CODE << 16) + SLE_KERNEL_REVISION)
+#define SLE_KERNEL_VERSION(a, b, c, d) ((KERNEL_VERSION(a, b, c) << 16) + (d))
+
+/* Unlike RHEL, SUSE kernels are not always tied to a single service pack.
+ * For example, 4.12.14 was used as the base for SLE 15 SP1, SLE 12 SP4,
+ * and SLE 12 SP5.
+ *
+ * You can find the patches that SUSE applied to the kernel tree at
+ * https://github.com/SUSE/kernel-source.
+ *
+ * You can find the correct kernel version for a check by using steps
+ * similar to the following
+ *
+ * 1) download the kernel-source repo
+ * 2) checkout the relevant branch, i.e SLE15-SP3
+ * 3) find the relevant backport you're interested in the patches.suse
+ *    directory
+ * 4) git log  to locate the commit that introduced the
+ * backport
+ * 5) git describe --contains to find the relevant tag that includes that
+ *    commit, i.e. rpm-5.3.18-37
+ * 6) those digits represent the SLE kernel that introduced that backport.
+ *
+ * Try to keep the checks in SLE_KERNEL_CODE order and condense where
+ * possible.
+ */
+
+/************************************************************************/
+#if (SLE_KERNEL_CODE > SLE_KERNEL_VERSION(4, 12, 14, 23) &&                    \
+     SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4, 12, 14, 94))
+/*
+ * 4.12.14 is used as the base for SLE 12 SP4, SLE 12 SP5, SLE 15, and SLE
+ * 15 SP1. Unfortunately the revision codes do not line up cleanly. SLE 15
+ * launched with 4.12.14-23. It appears that SLE 12 SP4 and SLE 15 SP1 both
+ * diverged from this point, with SLE 12 SP4 kernels starting around
+ * 4.12.14-94. A few backports for SLE 15 SP1 landed in some alpha and beta
+ * kernels tagged between 4.12.14-25 up to 4.12.14-32. These changes did
+ * not make it into SLE 12 SP4. This was cleaned up with SLE 12 SP5 by an
+ * apparent merge in 4.12.14-111. The official launch of SLE 15 SP1 ended
+ * up with version 4.12.14-195.
+ *
+ * Because of this inconsistency and because all of these kernels appear
+ * to be alpha or beta kernel releases for SLE 15 SP1, we do not rely on
+ * version checks between this range. Issue a warning to indicate that we
+ * do not support these.
+ */
+#warning "SLE kernel between 4.12.14-23 and 4.12.14-94 are not supported"
+#endif
+
+/************************************************************************/
+#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4, 12, 14, 100))
+#else /* >= 4.12.14-100 */
+#undef HAVE_TCF_EXTS_TO_LIST
+#define HAVE_TCF_EXTS_FOR_EACH_ACTION
+#endif /* 4.12.14-100 */
+
+/************************************************************************/
+#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4, 12, 14, 111))
+#define NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE
+#else /* >= 4.12.14-111 */
+#define HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR
+#undef NEED_MACVLAN_ACCEL_PRIV
+#undef NEED_MACVLAN_RELEASE_L2FW_OFFLOAD
+#undef NEED_MACVLAN_SUPPORTS_DEST_FILTER
+#undef NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE
+#endif /* 4.12.14-111 */
+
+/************************************************************************/
+/* SLES 12-SP5 base kernel version */
+#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4, 12, 14, 120))
+#else /* >= 4.12.14-120 */
+#define HAVE_NDO_SELECT_QUEUE_SB_DEV
+#define HAVE_TCF_MIRRED_DEV
+#define HAVE_TCF_BLOCK
+#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
+#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK
+#undef NEED_TC_SETUP_QDISC_MQPRIO
+#undef NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0
+#undef NEED_NETDEV_TX_SENT_QUEUE
+#endif /* 4.12.14-120 */
+
+/************************************************************************/
+/* SLES 15-SP1 base */
+#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(4, 12, 14, 195))
+#else /* >= 4.12.14-195 */
+#undef NEED_NETDEV_TX_SENT_QUEUE
+#endif /* 4.12.14-195 */
+
+/************************************************************************/
+#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5, 3, 8, 2))
+#else /* >= 5.3.8-2 */
+#undef NEED_BUS_FIND_DEVICE_CONST_DATA
+#undef NEED_FLOW_INDR_BLOCK_CB_REGISTER
+#undef NEED_SKB_FRAG_OFF
+#undef NEED_SKB_FRAG_OFF_ADD
+#define HAVE_FLOW_INDR_BLOCK_LOCK
+#endif /* 5.3.8-2 */
+
+#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5, 3, 16, 2))
+#else /* >= 5.3.16-2 */
+#define HAVE_DEVLINK_HEALTH_OPS_EXTACK
+#endif /* 5.3.16-2 */
+
+#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5, 3, 18, 26))
+#else /* >= 5.3.18-26 */
+#undef NEED_CPU_LATENCY_QOS_RENAME
+#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS
+#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS
+#endif
+
+/************************************************************************/
+#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5, 3, 18, 34))
+#else /* >= 5.3.18-34 */
+#undef NEED_DEVLINK_REGION_CREATE_OPS
+#undef NEED_DEVLINK_PORT_ATTRS_SET_STRUCT
+#define HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER
+#endif /* 5.3.18-34 */
+
+/************************************************************************/
+#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5, 3, 18, 37))
+#else /* >= 5.3.18-37 */
+#undef NEED_NET_PREFETCH
+#endif /* 5.3.18-37 */
+
+/************************************************************************/
+#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5, 3, 18, 38))
+#else /* >= 5.3.18-38 */
+#undef NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY
+#endif /* 5.3.18-38 */
+
+/************************************************************************/
+#if (SLE_KERNEL_CODE < SLE_KERNEL_VERSION(5, 14, 21, 9))
+#else /* >= 5.14.21-150400.9 */
+#undef NEED_DEVLINK_ALLOC_SETS_DEV
+#define HAVE_ETHTOOL_COALESCE_EXTACK
+#endif /* 5.14.21-150400.9 */
+
+#endif /* _KCOMPAT_SLES_DEFS_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/kcompat_std_defs.h b/drivers/net/ethernet/mucse/rnpgbe/kcompat_std_defs.h
new file mode 100755
index 0000000000000000000000000000000000000000..16ca915218f299e275853a47c577ff9be095cf9e
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/kcompat_std_defs.h
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _KCOMPAT_STD_DEFS_H_
+#define _KCOMPAT_STD_DEFS_H_
+
+/* This file contains the definitions for what kernel features need 
+ * backports for a given kernel. It targets only the standard stable kernel
+ * releases. It must check only LINUX_VERSION_CODE and assume the kernel is
+ * a standard release, and not a custom distribution.
+ *
+ * It must define HAVE_ and NEED_ for features. It must not
+ * implement any backports, instead leaving the implementation to the
+ * kcompat_impl.h header.
+ *
+ * If a feature can be easily implemented as a replacement macro or fully
+ * backported, use a NEED_ to indicate that the feature needs
+ * a backport. (If NEED_ is undefined, then no backport for that 
+ * feature is needed).
+ *
+ * If a feature cannot be easily implemented in kcompat directly, but
+ * requires drivers to make specific changes such as stripping out an entire
+ * feature or modifying a function pointer prototype, use a HAVE_.
+ */
+
+#ifndef LINUX_VERSION_CODE
+#error "LINUX_VERSION_CODE is undefined"
+#endif
+
+#ifndef KERNEL_VERSION
+#error "KERNEL_VERSION is undefined"
+#endif
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
+#define NEED_DEV_PM_DOMAIN_ATTACH_DETACH
+#else /* >= 3,18,0 */
+#endif /* 3,18,0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
+#define NO_TX_MAXRATE
+#define NEED_DEV_PRINTK_ONCE
+#else /* >= 3,19,0 */
+#endif /* 3,19,0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0))
+#define NEED_DEFINE_STATIC_KEY_FALSE
+#define NEED_STATIC_BRANCH
+#else /* >= 4,3,0 */
+#define NEED_DECLARE_STATIC_KEY_FALSE
+#endif /* 4,3,0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+#else /* >= 4,8,0 */
+#define HAVE_TCF_EXTS_TO_LIST
+#define HAVE_PCI_ALLOC_IRQ
+#endif /* 4,8,0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0))
+#define NEED_JIFFIES_64_TIME_IS_MACROS
+#else /* >= 4,9,0 */
+#define HAVE_KTHREAD_DELAYED_API
+#define HAVE_NDO_OFFLOAD_STATS
+#undef NEED_DECLARE_STATIC_KEY_FALSE
+#endif /* 4,9,0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+#else /* >= 4,12,0 */
+#define HAVE_NAPI_BUSY_LOOP
+#endif /* 4,12,0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0))
+#define NEED_TC_SETUP_QDISC_MQPRIO
+#define NEED_NETDEV_XDP_STRUCT
+#else /* >= 4,15,0 */
+#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
+#define HAVE_NDO_BPF
+#endif /* 4,15,0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0))
+#define NEED_TC_CLS_CAN_OFFLOAD_AND_CHAIN0
+#else /* >= 4,16,0 */
+#define HAVE_XDP_BUFF_RXQ
+#define HAVE_XDP_RXQ_INFO_REG_3_PARAMS
+#endif /* 4,16,0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0))
+#define NEED_CONVERT_ART_NS_TO_TSC
+#else /* >= 4,17,0 */
+#endif /* 4,17,0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0))
+#define NEED_MACVLAN_ACCEL_PRIV
+#define NEED_MACVLAN_RELEASE_L2FW_OFFLOAD
+#define NEED_MACVLAN_SUPPORTS_DEST_FILTER
+#else /* >= 4,18,0 */
+#define HAVE_DEVLINK_PORT_ATTRS_SET_PORT_FLAVOUR
+#endif /* 4,18,0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
+#define NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE
+#else /* >= 4,19,0 */
+#undef HAVE_TCF_EXTS_TO_LIST
+#define HAVE_TCF_EXTS_FOR_EACH_ACTION
+#define HAVE_DEVLINK_REGIONS
+#define HAVE_TC_ETF_QOPT_OFFLOAD
+#define HAVE_DEVLINK_PARAMS
+#endif /* 4,19,0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0))
+#define NEED_NETDEV_TX_SENT_QUEUE
+#else /* >= 4.20.0 */
+#endif /* 4.20.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0))
+#define NEED_INDIRECT_CALL_WRAPPER_MACROS
+#else /* >= 5.0.0 */
+#define HAVE_INDIRECT_CALL_WRAPPER_HEADER
+#endif /* 5.0.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0))
+#else /* >= 5.1.0 */
+#define HAVE_ETHTOOL_200G_BITS
+#define HAVE_ETHTOOL_NEW_100G_BITS
+#define HAVE_DEVLINK_PARAMS_PUBLISH
+#define HAVE_DEVLINK_HEALTH
+#endif /* 5.1.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0))
+#else /* >= 5.2.0 */
+#define HAVE_DEVLINK_PORT_ATTRS_SET_SWITCH_ID
+#endif /* 5.2.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0))
+#define NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY
+#define NEED_BUS_FIND_DEVICE_CONST_DATA
+#else /* >= 5.3.0 */
+#endif /* 5.3.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
+#define NEED_SKB_FRAG_OFF_ADD
+#define NEED_SKB_FRAG_OFF
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 14, 241) &&                        \
+     LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0))
+#undef NEED_SKB_FRAG_OFF
+#endif /* > 4.14.241 && < 4.15.0 */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 19, 200) &&                        \
+     LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0))
+#undef NEED_SKB_FRAG_OFF
+#endif /* > 4.19.200 && < 4.20.0 */
+
+#define NEED_FLOW_INDR_BLOCK_CB_REGISTER
+#else /* >= 5.4.0 */
+#define HAVE_FLOW_INDR_BLOCK_LOCK
+#define HAVE_XSK_UNALIGNED_CHUNK_PLACEMENT
+#endif /* 5.4.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0))
+#else /* >= 5.5.0 */
+#define HAVE_DEVLINK_HEALTH_OPS_EXTACK
+#endif /* 5.5.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0))
+#define NEED_DEVLINK_REGION_CREATE_OPS
+#define NEED_CPU_LATENCY_QOS_RENAME
+#else /* >= 5.7.0 */
+#define HAVE_DEVLINK_HEALTH_DEFAULT_AUTO_RECOVER
+#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT
+#endif /* 5.7.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0))
+#else /* >= 5.8.0 */
+#undef HAVE_XSK_UNALIGNED_CHUNK_PLACEMENT
+#endif /* 5.8.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0))
+#define NEED_DEVLINK_PORT_ATTRS_SET_STRUCT
+#define HAVE_XDP_QUERY_PROG
+#define NEED_INDIRECT_CALL_3_AND_4
+#else /* >= 5.9.0 */
+#define HAVE_TASKLET_SETUP
+#endif /* 5.9.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+#define NEED_NET_PREFETCH
+#define NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY
+#else /* >= 5.10.0 */
+#define HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT
+#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS
+#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS
+#define HAVE_UDP_TUNNEL_NIC_SHARED
+#endif /* 5.10.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0))
+#define HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY
+#else /* >= 5.11.0 */
+#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW
+#define HAVE_XSK_BATCHED_DESCRIPTOR_INTERFACES
+#define HAVE_PASID_SUPPORT
+#undef HAVE_XDP_RXQ_INFO_REG_3_PARAMS
+#endif /* 5.11.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 12, 0))
+#define NEED_EXPORT_INDIRECT_CALLABLE
+#else /* >= 5.12.0 */
+#endif /* 5.12.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0))
+/* HAVE_KOBJ_IN_MDEV_PARENT_OPS_CREATE
+ *
+ * create api changed as part of the commit c2ef2f50ad0c( vfio/mdev: Remove
+ * kobj from mdev_parent_ops->create())
+ *
+ * if flag is defined use the old API else new API
+ */
+#define HAVE_KOBJ_IN_MDEV_PARENT_OPS_CREATE
+#define HAVE_DEV_IN_MDEV_API
+#else /* >= 5.13.0 */
+#endif /* 5.13.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0))
+#else /* >= 5.14.0 */
+#define HAVE_TTY_WRITE_ROOM_UINT
+#endif /* 5.14.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
+#define NEED_DEVLINK_ALLOC_SETS_DEV
+#define HAVE_DEVLINK_REGISTER_SETS_DEV
+//#define NEED_ETH_HW_ADDR_SET
+#else /* >= 5.15.0 */
+#define HAVE_ETHTOOL_COALESCE_EXTACK
+#define HAVE_NDO_ETH_IOCTL
+#define HAVE_DEVICE_IN_MDEV_PARENT_OPS
+#undef HAVE_DEVLINK_RELOAD_ACTION_AND_LIMIT
+#undef HAVE_DEVLINK_PARAMS_PUBLISH
+#endif /* 5.15.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0))
+#else /* >= 5.16.0 */
+#undef HAVE_PASID_SUPPORT
+#define HAVE_DEVLINK_SET_FEATURES
+#define HAVE_DEVLINK_NOTIFY_REGISTER
+#endif /* 5.16.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0))
+#define NEED_NO_NETDEV_PROG_XDP_WARN_ACTION
+#else /* >=5.17.0*/
+#define HAVE_XDP_DO_FLUSH
+#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS
+#endif /* 5.17.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0))
+#define HAVE_XSK_TX_PEEK_RELEASE_DESC_BATCH_3_PARAMS
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0))
+#undef HAVE_XSK_TX_PEEK_RELEASE_DESC_BATCH_3_PARAMS
+#endif /* 5.11.0 */
+#else /* >=5.18.0*/
+#endif /* 5.18.0 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0))
+#else /* >=5.19.0*/
+#define HAVE_NETIF_NAPI_ADD_WEIGHT
+#endif /* 5.19.0 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
+#else /* >=6.2.0*/
+//#define COMPAT_PTP_NO_ADJFREQ
+#endif /* 6.2.0 */
+
+#endif /* _KCOMPAT_STD_DEFS_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/kcompat_ubuntu_defs.h b/drivers/net/ethernet/mucse/rnpgbe/kcompat_ubuntu_defs.h
new file mode 100755
index 0000000000000000000000000000000000000000..05011fc8c0a987b80bd729bd857fd2e500ed4b6b
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/kcompat_ubuntu_defs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _KCOMPAT_UBUNTU_DEFS_H_
+#define _KCOMPAT_UBUNTU_DEFS_H_
+
+/* This file contains the definitions for the Ubuntu specific distribution of
+ * the Linux kernel.
+ *
+ * It checks the UBUNTU_VERSION_CODE to decide which features are available in
+ * the target kernel. It assumes that kcompat_std_defs.h has already been
+ * processed, and will #define or #undef the relevant flags based on what
+ * features were backported by Ubuntu.
+ */
+
+#if !UTS_UBUNTU_RELEASE_ABI
+#error "UTS_UBUNTU_RELEASE_ABI is 0 or undefined"
+#endif
+
+#if !UBUNTU_VERSION_CODE
+#error "UBUNTU_VERSION_CODE is 0 or undefined"
+#endif
+
+#ifndef UBUNTU_VERSION
+#error "UBUNTU_VERSION is undefined"
+#endif
+
+/*****************************************************************************/
+#if (UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4, 15, 0, 159) &&                   \
+     UBUNTU_VERSION_CODE < UBUNTU_VERSION(4, 15, 0, 999))
+#undef NEED_SKB_FRAG_OFF
+#endif
+
+/*****************************************************************************/
+
+#endif /* _KCOMPAT_UBUNTU_DEFS_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/makefile.example b/drivers/net/ethernet/mucse/rnpgbe/makefile.example
new file mode 100755
index 0000000000000000000000000000000000000000..12efda547b21bd4e215a3126e436e6d9d92aadd0
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/makefile.example
@@ -0,0 +1,17 @@
+obj-$(CONFIG_MXGBE) += rnpgbe.o
+rnpgbe-objs :=   \
+		rnpgbe_main.o \
+		rnpgbe_common.o \
+		rnpgbe_debugfs.o \
+		rnpgbe_ethtool.o \
+		rnpgbe_lib.o \
+		rnpgbe_mbx.o \
+		rnpgbe_chip.o \
+		rnpgbe_mbx_fw.o\
+		rnpgbe_sriov.o \
+		rnpgbe_param.o \
+		rnp_compat.o \
+		rnpgbe_sysfs.o \
+		rnpgbe_sfc.o \
+		rnp_ptp.o 
+
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnp_compat.c b/drivers/net/ethernet/mucse/rnpgbe/rnp_compat.c
new file mode 100755
index 0000000000000000000000000000000000000000..6b1177b29b2e60fd94648d0e265a8ae1ce755e40
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnp_compat.c
@@ -0,0 +1,3164 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include "rnp_compat.h"
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 8)) || defined __VMKLNX__
+/* From lib/vsprintf.c */
+#include 
+
+static int skip_atoi(const char **s)
+{
+	int i = 0;
+
+	while (isdigit(**s))
+		i = i * 10 + *((*s)++) - '0';
+	return i;
+}
+
+#define _kc_ZEROPAD 1 /* pad with zero */
+#define _kc_SIGN 2 /* unsigned/signed long */
+#define _kc_PLUS 4 /* show plus */
+#define _kc_SPACE 8 /* space if plus */
+#define _kc_LEFT 16 /* left justified */
+#define _kc_SPECIAL 32 /* 0x */
+#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
+
+static char *number(char *buf, char *end, long long num, int base, int size,
+		    int precision, int type)
+{
+	char c, sign, tmp[66];
+	const char *digits;
+	const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+	const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+	int i;
+
+	digits = (type & _kc_LARGE) ? large_digits : small_digits;
+	if (type & _kc_LEFT)
+		type &= ~_kc_ZEROPAD;
+	if (base < 2 || base > 36)
+		return 0;
+	c = (type & _kc_ZEROPAD) ? '0' : ' ';
+	sign = 0;
+	if (type & _kc_SIGN) {
+		if (num < 0) {
+			sign = '-';
+			num = -num;
+			size--;
+		} else if (type & _kc_PLUS) {
+			sign = '+';
+			size--;
+		} else if (type & _kc_SPACE) {
+			sign = ' ';
+			size--;
+		}
+	}
+	if (type & _kc_SPECIAL) {
+		if (base == 16)
+			size -= 2;
+		else if (base == 8)
+			size--;
+	}
+	i = 0;
+	if (num == 0)
+		tmp[i++] = '0';
+	else
+		while (num != 0)
+			tmp[i++] = digits[do_div(num, base)];
+	if (i > precision)
+		precision = i;
+	size -= precision;
+	if (!(type & (_kc_ZEROPAD + _kc_LEFT))) {
+		while (size-- > 0) {
+			if (buf <= end)
+				*buf = ' ';
+			++buf;
+		}
+	}
+	if (sign) {
+		if (buf <= end)
+			*buf = sign;
+		++buf;
+	}
+	if (type & _kc_SPECIAL) {
+		if (base == 8) {
+			if (buf <= end)
+				*buf = '0';
+			++buf;
+		} else if (base == 16) {
+			if (buf <= end)
+				*buf = '0';
+			++buf;
+			if (buf <= end)
+				*buf = digits[33];
+			++buf;
+		}
+	}
+	if (!(type & _kc_LEFT)) {
+		while (size-- > 0) {
+			if (buf <= end)
+				*buf = c;
+			++buf;
+		}
+	}
+	while (i < precision--) {
+		if (buf <= end)
+			*buf = '0';
+		++buf;
+	}
+	while (i-- > 0) {
+		if (buf <= end)
+			*buf = tmp[i];
+		++buf;
+	}
+	while (size-- > 0) {
+		if (buf <= end)
+			*buf = ' ';
+		++buf;
+	}
+	return buf;
+}
+
+int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+	int len;
+	unsigned long long num;
+	int i, base;
+	char *str, *end, c;
+	const char *s;
+
+	int flags; /* flags to number() */
+
+	int field_width; /* width of output field */
+	int precision;
+	/*
+	 * min. # of digits for integers; max
+	 * number of chars for from string
+	 */
+	int qualifier; /* 'h', 'l', or 'L' for integer fields */
+	/* 'z' support added 23/7/1999 S.H.    */
+	/* 'z' changed to 'Z' --davidm 1/25/99 */
+
+	str = buf;
+	end = buf + size - 1;
+
+	if (end < buf - 1) {
+		end = ((void *)-1);
+		size = end - buf + 1;
+	}
+
+	for (; *fmt; ++fmt) {
+		if (*fmt != '%') {
+			if (str <= end)
+				*str = *fmt;
+			++str;
+			continue;
+		}
+
+		/* process flags */
+		flags = 0;
+	repeat:
+		++fmt; /* this also skips first '%' */
+		switch (*fmt) {
+		case '-':
+			flags |= _kc_LEFT;
+			goto repeat;
+		case '+':
+			flags |= _kc_PLUS;
+			goto repeat;
+		case ' ':
+			flags |= _kc_SPACE;
+			goto repeat;
+		case '#':
+			flags |= _kc_SPECIAL;
+			goto repeat;
+		case '0':
+			flags |= _kc_ZEROPAD;
+			goto repeat;
+		}
+
+		/* get field width */
+		field_width = -1;
+		if (isdigit(*fmt))
+			field_width = skip_atoi(&fmt);
+		else if (*fmt == '*') {
+			++fmt;
+			/* it's the next argument */
+			field_width = va_arg(args, int);
+			if (field_width < 0) {
+				field_width = -field_width;
+				flags |= _kc_LEFT;
+			}
+		}
+
+		/* get the precision */
+		precision = -1;
+		if (*fmt == '.') {
+			++fmt;
+			if (isdigit(*fmt))
+				precision = skip_atoi(&fmt);
+			else if (*fmt == '*') {
+				++fmt;
+				/* it's the next argument */
+				precision = va_arg(args, int);
+			}
+			if (precision < 0)
+				precision = 0;
+		}
+
+		/* get the conversion qualifier */
+		qualifier = -1;
+		if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt == 'Z') {
+			qualifier = *fmt;
+			++fmt;
+		}
+
+		/* default base */
+		base = 10;
+
+		switch (*fmt) {
+		case 'c':
+			if (!(flags & _kc_LEFT)) {
+				while (--field_width > 0) {
+					if (str <= end)
+						*str = ' ';
+					++str;
+				}
+			}
+			c = (unsigned char)va_arg(args, int);
+			if (str <= end)
+				*str = c;
+			++str;
+			while (--field_width > 0) {
+				if (str <= end)
+					*str = ' ';
+				++str;
+			}
+			continue;
+
+		case 's':
+			s = va_arg(args, char *);
+			if (!s)
+				s = "";
+
+			len = strnlen(s, precision);
+
+			if (!(flags & _kc_LEFT)) {
+				while (len < field_width--) {
+					if (str <= end)
+						*str = ' ';
+					++str;
+				}
+			}
+			for (i = 0; i < len; ++i) {
+				if (str <= end)
+					*str = *s;
+				++str;
+				++s;
+			}
+			while (len < field_width--) {
+				if (str <= end)
+					*str = ' ';
+				++str;
+			}
+			continue;
+
+		case 'p':
+			if ('M' == *(fmt + 1)) {
+				str = get_mac(str, end,
+					      va_arg(args, unsigned char *));
+				fmt++;
+			} else {
+				if (field_width == -1) {
+					field_width = 2 * sizeof(void *);
+					flags |= _kc_ZEROPAD;
+				}
+				str = number(str, end,
+					     (unsigned long)va_arg(args,
+								   void *),
+					     16, field_width, precision, flags);
+			}
+			continue;
+
+		case 'n':
+			/* FIXME: */
+			/* What does C99 say about the overflow case here? */
+			if (qualifier == 'l') {
+				long *ip = va_arg(args, long *);
+				*ip = (str - buf);
+			} else if (qualifier == 'Z') {
+				size_t *ip = va_arg(args, size_t *);
+				*ip = (str - buf);
+			} else {
+				int *ip = va_arg(args, int *);
+				*ip = (str - buf);
+			}
+			continue;
+
+		case '%':
+			if (str <= end)
+				*str = '%';
+			++str;
+			continue;
+
+			/* integer number formats - set up the flags and "break" */
+		case 'o':
+			base = 8;
+			break;
+
+		case 'X':
+			flags |= _kc_LARGE;
+		case 'x':
+			base = 16;
+			break;
+
+		case 'd':
+		case 'i':
+			flags |= _kc_SIGN;
+		case 'u':
+			break;
+
+		default:
+			if (str <= end)
+				*str = '%';
+			++str;
+			if (*fmt) {
+				if (str <= end)
+					*str = *fmt;
+				++str;
+			} else {
+				--fmt;
+			}
+			continue;
+		}
+		if (qualifier == 'L')
+			num = va_arg(args, long long);
+		else if (qualifier == 'l') {
+			num = va_arg(args, unsigned long);
+			if (flags & _kc_SIGN)
+				num = (signed long)num;
+		} else if (qualifier == 'Z') {
+			num = va_arg(args, size_t);
+		} else if (qualifier == 'h') {
+			num = (unsigned short)va_arg(args, int);
+			if (flags & _kc_SIGN)
+				num = (signed short)num;
+		} else {
+			num = va_arg(args, unsigned int);
+			if (flags & _kc_SIGN)
+				num = (signed int)num;
+		}
+		str = number(str, end, num, base, field_width, precision,
+			     flags);
+	}
+	if (str <= end)
+		*str = '\0';
+	else if (size > 0)
+		/* don't write out a null byte if the buf size is zero */
+		*end = '\0';
+	/* the trailing null byte doesn't count towards the total
+	 * ++str;
+	 */
+	return str - buf;
+}
+
+int _kc_snprintf(char *buf, size_t size, const char *fmt, ...)
+{
+	va_list args;
+	int i;
+
+	va_start(args, fmt);
+	i = _kc_vsnprintf(buf, size, fmt, args);
+	va_end(args);
+	return i;
+}
+#endif /* < 2.4.8 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 13))
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#if defined(CONFIG_HIGHMEM)
+
+#ifndef PCI_DRAM_OFFSET
+#define PCI_DRAM_OFFSET 0
+#endif
+
+u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page,
+		     unsigned long offset, size_t size, int direction)
+{
+	return (((u64)(page - mem_map) << PAGE_SHIFT) + offset +
+		PCI_DRAM_OFFSET);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page,
+		     unsigned long offset, size_t size, int direction)
+{
+	return pci_map_single(dev, (void *)page_address(page) + offset, size,
+			      direction);
+}
+
+#endif /* CONFIG_HIGHMEM */
+
+void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
+			int direction)
+{
+	return pci_unmap_single(dev, dma_addr, size, direction);
+}
+
+#endif /* 2.4.13 => 2.4.3 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
+
+/**************************************/
+/* PCI DRIVER API */
+
+int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
+{
+	if (!pci_dma_supported(dev, mask))
+		return -EIO;
+	dev->dma_mask = mask;
+	return 0;
+}
+
+int _kc_pci_request_regions(struct pci_dev *dev, char *res_name)
+{
+	int i;
+
+	for (i = 0; i < 6; i++) {
+		if (pci_resource_len(dev, i) == 0)
+			continue;
+
+		if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
+			if (!request_region(pci_resource_start(dev, i),
+					    pci_resource_len(dev, i),
+					    res_name)) {
+				pci_release_regions(dev);
+				return -EBUSY;
+			}
+		} else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
+			if (!request_mem_region(pci_resource_start(dev, i),
+						pci_resource_len(dev, i),
+						res_name)) {
+				pci_release_regions(dev);
+				return -EBUSY;
+			}
+		}
+	}
+	return 0;
+}
+
+void _kc_pci_release_regions(struct pci_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < 6; i++) {
+		if (pci_resource_len(dev, i) == 0)
+			continue;
+
+		if (pci_resource_flags(dev, i) & IORESOURCE_IO)
+			release_region(pci_resource_start(dev, i),
+				       pci_resource_len(dev, i));
+
+		else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
+			release_mem_region(pci_resource_start(dev, i),
+					   pci_resource_len(dev, i));
+	}
+}
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+struct net_device *_kc_alloc_etherdev(int sizeof_priv)
+{
+	struct net_device *dev;
+	int alloc_size;
+
+	alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
+	dev = kzalloc(alloc_size, GFP_KERNEL);
+	if (!dev)
+		return NULL;
+
+	if (sizeof_priv)
+		dev->priv = (void *)(((unsigned long)(dev + 1) + 31) & ~31);
+	dev->name[0] = '\0';
+	ether_setup(dev);
+
+	return dev;
+}
+
+int _kc_is_valid_ether_addr(u8 *addr)
+{
+	const char zaddr[6] = {
+		0,
+	};
+
+	return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
+}
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
+
+int _kc_pci_set_power_state(struct pci_dev *dev, int state)
+{
+	return 0;
+}
+
+int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
+{
+	return 0;
+}
+
+#endif /* 2.4.6 => 2.4.3 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
+			    int off, int size)
+{
+	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+	frag->page = page;
+	frag->page_offset = off;
+	frag->size = size;
+	skb_shinfo(skb)->nr_frags = i + 1;
+}
+
+/*
+ * Original Copyright:
+ * find_next_bit.c: fallback find next bit implementation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+			    unsigned long offset)
+{
+	const unsigned long *p = addr + BITOP_WORD(offset);
+	unsigned long result = offset & ~(BITS_PER_LONG - 1);
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset %= BITS_PER_LONG;
+	if (offset) {
+		tmp = *(p++);
+		tmp &= (~0UL << offset);
+		if (size < BITS_PER_LONG)
+			goto found_first;
+		if (tmp)
+			goto found_middle;
+		size -= BITS_PER_LONG;
+		result += BITS_PER_LONG;
+	}
+	while (size & ~(BITS_PER_LONG - 1)) {
+		temp = *(p++);
+		if (tmp)
+			goto found_middle;
+		result += BITS_PER_LONG;
+		size -= BITS_PER_LONG;
+	}
+	if (!size)
+		return result;
+	tmp = *p;
+
+found_first:
+	tmp &= (~0UL >> (BITS_PER_LONG - size));
+	if (tmp == 0UL) /* Are any bits set? */
+		return result + size; /* Nope. */
+found_middle:
+	return result + ffs(tmp);
+}
+
+size_t _kc_strlcpy(char *dest, const char *src, size_t size)
+{
+	size_t ret = strlen(src);
+
+	if (size) {
+		size_t len = (ret >= size) ? size - 1 : ret;
+
+		memcpy(dest, src, len);
+		dest[len] = '\0';
+	}
+	return ret;
+}
+
+#ifndef do_div
+#if BITS_PER_LONG == 32
+uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base)
+{
+	uint64_t rem = *n;
+	uint64_t b = base;
+	uint64_t res, d = 1;
+	uint32_t high = rem >> 32;
+
+	/* Reduce the thing a bit first */
+	res = 0;
+	if (high >= base) {
+		high /= base;
+		res = (uint64_t)high << 32;
+		rem -= (uint64_t)(high * base) << 32;
+	}
+
+	while ((int64_t)b > 0 && b < rem) {
+		b = b + b;
+		d = d + d;
+	}
+
+	do {
+		if (rem >= b) {
+			rem -= b;
+			res += d;
+		}
+		b >>= 1;
+		d >>= 1;
+	} while (d);
+
+	*n = res;
+	return rem;
+}
+#endif /* BITS_PER_LONG == 32 */
+#endif /* do_div */
+#endif /* 2.6.0 => 2.4.6 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 4))
+int _kc_scnprintf(char *buf, size_t size, const char *fmt, ...)
+{
+	va_list args;
+	int i;
+
+	va_start(args, fmt);
+	i = vsnprintf(buf, size, fmt, args);
+	va_end(args);
+	return (i >= size) ? (size - 1) : i;
+}
+#endif /* < 2.6.4 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10))
+DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = { 1 };
+#endif /* < 2.6.10 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13))
+char *_kc_kstrdup(const char *s, unsigned int gfp)
+{
+	size_t len;
+	char *buf;
+
+	if (!s)
+		return NULL;
+
+	len = strlen(s) + 1;
+	buf = kmalloc(len, gfp);
+	if (buf)
+		memcpy(buf, s, len);
+	return buf;
+}
+#endif /* < 2.6.13 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14))
+void *_kc_kzalloc(size_t size, int flags)
+{
+	void *ret = kmalloc(size, flags);
+
+	if (ret)
+		memset(ret, 0, size);
+	return ret;
+}
+#endif /* <= 2.6.13 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+int _kc_skb_pad(struct sk_buff *skb, int pad)
+{
+	int ntail;
+
+	/* If the skbuff is non linear tailroom is always zero.. */
+	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
+		memset(skb->data + skb->len, 0, pad);
+		return 0;
+	}
+
+	ntail = skb->data_len + pad - (skb->end - skb->tail);
+	if (likely(skb_cloned(skb) || ntail > 0)) {
+		if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC))
+			goto free_skb;
+	}
+
+#ifdef MAX_SKB_FRAGS
+	if (skb_is_nonlinear(skb) && !__pskb_pull_tail(skb, skb->data_len))
+		goto free_skb;
+
+#endif
+	memset(skb->data + skb->len, 0, pad);
+	return 0;
+
+free_skb:
+	kfree_skb(skb);
+	return -ENOMEM;
+}
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 4)))
+int _kc_pci_save_state(struct pci_dev *pdev)
+{
+	struct adapter_struct *adapter = pci_get_drvdata(pdev);
+	int size = PCI_CONFIG_SPACE_LEN, i;
+	u16 pcie_cap_offset, pcie_link_status;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	/* no ->dev for 2.4 kernels */
+	WARN_ON(pdev->dev.driver_data == NULL);
+#endif
+	pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	if (pcie_cap_offset) {
+		if (!pci_read_config_word(pdev,
+					  pcie_cap_offset + PCIE_LINK_STATUS,
+					  &pcie_link_status))
+			size = PCIE_CONFIG_SPACE_LEN;
+	}
+	pci_config_space_ich8lan();
+#ifdef HAVE_PCI_ERS
+	if (adapter->config_space == NULL)
+#else
+	WARN_ON(adapter->config_space != NULL);
+#endif
+		adapter->config_space = kmalloc(size, GFP_KERNEL);
+	if (!adapter->config_space) {
+		printk(KERN_ERR "Out of memory in pci_save_state\n");
+		return -ENOMEM;
+	}
+	for (i = 0; i < (size / 4); i++)
+		pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
+	return 0;
+}
+
+void _kc_pci_restore_state(struct pci_dev *pdev)
+{
+	struct adapter_struct *adapter = pci_get_drvdata(pdev);
+	int size = PCI_CONFIG_SPACE_LEN, i;
+	u16 pcie_cap_offset;
+	u16 pcie_link_status;
+
+	if (adapter->config_space != NULL) {
+		pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+		if (pcie_cap_offset &&
+		    !pci_read_config_word(pdev,
+					  pcie_cap_offset + PCIE_LINK_STATUS,
+					  &pcie_link_status))
+			size = PCIE_CONFIG_SPACE_LEN;
+
+		pci_config_space_ich8lan();
+		for (i = 0; i < (size / 4); i++)
+			pci_write_config_dword(pdev, i * 4,
+					       adapter->config_space[i]);
+#ifndef HAVE_PCI_ERS
+		kfree(adapter->config_space);
+		adapter->config_space = NULL;
+#endif
+	}
+}
+#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
+
+#ifdef HAVE_PCI_ERS
+void _kc_free_netdev(struct net_device *netdev)
+{
+	struct adapter_struct *adapter = netdev_priv(netdev);
+
+	kfree(adapter->config_space);
+#ifdef CONFIG_SYSFS
+	if (netdev->reg_state == NETREG_UNINITIALIZED) {
+		kfree((char *)netdev - netdev->padded);
+	} else {
+		BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
+		netdev->reg_state = NETREG_RELEASED;
+		class_device_put(&netdev->class_dev);
+	}
+#else
+	kfree((char *)netdev - netdev->padded);
+#endif
+}
+#endif
+
+void *_kc_kmemdup(const void *src, size_t len, unsigned gfp)
+{
+	void *p;
+
+	p = kzalloc(len, gfp);
+	if (p)
+		memcpy(p, src, len);
+	return p;
+}
+#endif /* <= 2.6.19 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21))
+struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev)
+{
+	return ((struct adapter_struct *)netdev_priv(netdev))->pdev;
+}
+#endif /* < 2.6.21 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22))
+/* hexdump code taken from lib/hexdump.c */
+static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
+				   int groupsize, unsigned char *linebuf,
+				   size_t linebuflen, bool ascii)
+{
+	const u8 *ptr = buf;
+	u8 ch;
+	int j, lx = 0;
+	int ascii_column;
+
+	if (rowsize != 16 && rowsize != 32)
+		rowsize = 16;
+
+	if (!len)
+		goto nil;
+	if (len > rowsize) /* limit to one line at a time */
+		len = rowsize;
+	if ((len % groupsize) != 0) /* no mixed size output */
+		groupsize = 1;
+
+	switch (groupsize) {
+	case 8: {
+		const u64 *ptr8 = buf;
+		int ngroups = len / groupsize;
+
+		for (j = 0; j < ngroups; j++)
+			lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+					"%s%16.16llx", j ? " " : "",
+					(unsigned long long)*(ptr8 + j));
+		ascii_column = 17 * ngroups + 2;
+		break;
+	}
+
+	case 4: {
+		const u32 *ptr4 = buf;
+		int ngroups = len / groupsize;
+
+		for (j = 0; j < ngroups; j++)
+			lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+					"%s%8.8x", j ? " " : "", *(ptr4 + j));
+		ascii_column = 9 * ngroups + 2;
+		break;
+	}
+
+	case 2: {
+		const u16 *ptr2 = buf;
+		int ngroups = len / groupsize;
+
+		for (j = 0; j < ngroups; j++)
+			lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+					"%s%4.4x", j ? " " : "", *(ptr2 + j));
+		ascii_column = 5 * ngroups + 2;
+		break;
+	}
+
+	default:
+		for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
+			ch = ptr[j];
+			linebuf[lx++] = hex_asc(ch >> 4);
+			linebuf[lx++] = hex_asc(ch & 0x0f);
+			linebuf[lx++] = ' ';
+		}
+		if (j)
+			lx--;
+
+		ascii_column = 3 * rowsize + 2;
+		break;
+	}
+	if (!ascii)
+		goto nil;
+
+	while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
+		linebuf[lx++] = ' ';
+	for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
+		linebuf[lx++] =
+			(isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] : '.';
+nil:
+	linebuf[lx++] = '\0';
+}
+
+void _kc_print_hex_dump(const char *level, const char *prefix_str,
+			int prefix_type, int rowsize, int groupsize,
+			const void *buf, size_t len, bool ascii)
+{
+	const u8 *ptr = buf;
+	int i, linelen, remaining = len;
+	unsigned char linebuf[200];
+
+	if (rowsize != 16 && rowsize != 32)
+		rowsize = 16;
+
+	for (i = 0; i < len; i += rowsize) {
+		linelen = min(remaining, rowsize);
+		remaining -= rowsize;
+		_kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
+				       linebuf, sizeof(linebuf), ascii);
+
+		switch (prefix_type) {
+		case DUMP_PREFIX_ADDRESS:
+			printk(KERN_DEBUG "%s%s%*p: %s\n", level, prefix_str,
+			       (int)(2 * sizeof(void *)), ptr + i, linebuf);
+			break;
+		case DUMP_PREFIX_OFFSET:
+			printk(KERN_DEBUG "%s%s%.8x: %s\n", level, prefix_str,
+			       i, linebuf);
+			break;
+		default:
+			printk(KERN_DEBUG "%s%s%s\n", level, prefix_str,
+			       linebuf);
+			break;
+		}
+	}
+}
+
+#endif /* < 2.6.22 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23))
+int ixgbe_dcb_netlink_register(void)
+{
+	return 0;
+}
+
+int ixgbe_dcb_netlink_unregister(void)
+{
+	return 0;
+}
+
+int ixgbe_copy_dcb_cfg(struct ixgbe_adapter __always_unused *adapter,
+		       int __always_unused tc_max)
+{
+	return 0;
+}
+#endif /* < 2.6.23 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#ifdef NAPI
+struct net_device *napi_to_poll_dev(const struct napi_struct *napi)
+{
+	struct adapter_q_vector *q_vector =
+		container_of(napi, struct adapter_q_vector, napi);
+	return &q_vector->poll_dev;
+}
+
+int __kc_adapter_clean(struct net_device *netdev, int *budget)
+{
+	int work_done;
+	int work_to_do = min(*budget, netdev->quota);
+	/* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
+	struct napi_struct *napi = netdev->priv;
+
+	work_done = napi->poll(napi, work_to_do);
+	*budget -= work_done;
+	netdev->quota -= work_done;
+	return (work_done >= work_to_do) ? 1 : 0;
+}
+#endif /* NAPI */
+#endif /* <= 2.6.24 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
+void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
+{
+	struct pci_dev *parent = pdev->bus->self;
+	u16 link_state;
+	int pos;
+
+	if (!parent)
+		return;
+
+	pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+	if (pos) {
+		pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
+		link_state &= ~state;
+		pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
+	}
+}
+#endif /* < 2.6.26 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27))
+#ifdef HAVE_TX_MQ
+void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
+{
+	struct adapter_struct *adapter = netdev_priv(netdev);
+	int i;
+
+	netif_stop_queue(netdev);
+	if (netif_is_multiqueue(netdev))
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			netif_stop_subqueue(netdev, i);
+}
+void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
+{
+	struct adapter_struct *adapter = netdev_priv(netdev);
+	int i;
+
+	netif_wake_queue(netdev);
+	if (netif_is_multiqueue(netdev))
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			netif_wake_subqueue(netdev, i);
+}
+void _kc_netif_tx_start_all_queues(struct net_device *netdev)
+{
+	struct adapter_struct *adapter = netdev_priv(netdev);
+	int i;
+
+	netif_start_queue(netdev);
+	if (netif_is_multiqueue(netdev))
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			netif_start_subqueue(netdev, i);
+}
+#endif /* HAVE_TX_MQ */
+
+void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...)
+{
+	va_list args;
+
+	printk(KERN_WARNING "------------[ cut here ]------------\n");
+	printk(KERN_WARNING "WARNING: at %s:%d \n", file, line);
+	va_start(args, fmt);
+	vprintk(fmt, args);
+	va_end(args);
+
+	dump_stack();
+}
+#endif /* __VMKLNX__ */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28))
+
+int _kc_pci_prepare_to_sleep(struct pci_dev *dev)
+{
+	pci_power_t target_state;
+	int error;
+
+	target_state = pci_choose_state(dev, PMSG_SUSPEND);
+
+	pci_enable_wake(dev, target_state, true);
+
+	error = pci_set_power_state(dev, target_state);
+
+	if (error)
+		pci_enable_wake(dev, target_state, false);
+
+	return error;
+}
+
+int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
+{
+	int err;
+
+	err = pci_enable_wake(dev, PCI_D3cold, enable);
+	if (err)
+		goto out;
+
+	err = pci_enable_wake(dev, PCI_D3hot, enable);
+
+out:
+	return err;
+}
+#endif /* < 2.6.28 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
+static void __kc_pci_set_master(struct pci_dev *pdev, bool enable)
+{
+	u16 old_cmd, cmd;
+
+	pci_read_config_word(pdev, PCI_COMMAND, &old_cmd);
+	if (enable)
+		cmd = old_cmd | PCI_COMMAND_MASTER;
+	else
+		cmd = old_cmd & ~PCI_COMMAND_MASTER;
+	if (cmd != old_cmd) {
+		dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n",
+			enable ? "enabling" : "disabling");
+		pci_write_config_word(pdev, PCI_COMMAND, cmd);
+	}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7))
+	pdev->is_busmaster = enable;
+#endif
+}
+
+void _kc_pci_clear_master(struct pci_dev *dev)
+{
+	__kc_pci_set_master(dev, false);
+}
+#endif /* < 2.6.29 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34))
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 0))
+int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev)
+{
+	int num_vf = 0;
+#ifdef CONFIG_PCI_IOV
+	struct pci_dev *vfdev;
+
+	/* loop through all ethernet devices starting at PF dev */
+	vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL);
+	while (vfdev) {
+		if (vfdev->is_virtfn && vfdev->physfn == dev)
+			num_vf++;
+
+		vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev);
+	}
+
+#endif
+	return num_vf;
+}
+#endif /* RHEL_RELEASE_CODE */
+#endif /* < 2.6.34 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35))
+#ifdef HAVE_TX_MQ
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 0)))
+#ifndef CONFIG_NETDEVICES_MULTIQUEUE
+int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
+{
+	unsigned int real_num = dev->real_num_tx_queues;
+	struct Qdisc *qdisc;
+	int i;
+
+	if (txq < 1 || txq > dev->num_tx_queues)
+		return -EINVAL;
+
+	else if (txq > real_num)
+		dev->real_num_tx_queues = txq;
+	else if (txq < real_num) {
+		dev->real_num_tx_queues = txq;
+		for (i = txq; i < dev->num_tx_queues; i++) {
+			qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+			if (qdisc) {
+				spin_lock_bh(qdisc_lock(qdisc));
+				qdisc_reset(qdisc);
+				spin_unlock_bh(qdisc_lock(qdisc));
+			}
+		}
+	}
+
+	return 0;
+}
+#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
+#endif /* HAVE_TX_MQ */
+
+ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
+				   const void __user *from, size_t count)
+{
+	loff_t pos = *ppos;
+	size_t res;
+
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= available || !count)
+		return 0;
+	if (count > available - pos)
+		count = available - pos;
+	res = copy_from_user(to + pos, from, count);
+	if (res == count)
+		return -EFAULT;
+	count -= res;
+	*ppos = pos + count;
+	return count;
+}
+
+#endif /* < 2.6.35 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36))
+static const u32 _kc_flags_dup_features =
+	(ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
+
+u32 _kc_ethtool_op_get_flags(struct net_device *dev)
+{
+	return dev->features & _kc_flags_dup_features;
+}
+
+int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
+{
+	if (data & ~supported)
+		return -EINVAL;
+
+	dev->features = ((dev->features & ~_kc_flags_dup_features) |
+			 (data & _kc_flags_dup_features));
+	return 0;
+}
+#endif /* < 2.6.36 */
+
+/******************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39))
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 0)))
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+#include 
+#include 
+
+u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb,
+		      u16 num_tx_queues)
+{
+	u32 hash;
+	u16 qoffset = 0;
+	u16 qcount = num_tx_queues;
+
+	if (skb_rx_queue_recorded(skb)) {
+		hash = skb_get_rx_queue(skb);
+		while (unlikely(hash >= num_tx_queues))
+			hash -= num_tx_queues;
+		return hash;
+	}
+
+	if (netdev_get_num_tc(dev)) {
+		struct adapter_struct *kc_adapter = netdev_priv(dev);
+
+		if (skb->priority == TC_PRIO_CONTROL) {
+			qoffset = kc_adapter->dcb_tc - 1;
+		} else {
+			qoffset = skb->vlan_tci;
+			qoffset &= RNP_TX_FLAGS_VLAN_PRIO_MASK;
+			qoffset >>= 13;
+		}
+
+		qcount = kc_adapter->ring_feature[RING_F_RSS].indices;
+		qoffset *= qcount;
+	}
+
+	if (skb->sk && skb->sk->sk_hash)
+		hash = skb->sk->sk_hash;
+	else
+#ifdef NETIF_F_RXHASH
+		hash = (__force u16)skb->protocol ^ skb->rxhash;
+#else
+		hash = skb->protocol;
+#endif
+
+	hash = jhash_1word(hash, _kc_hashrnd);
+
+	return (u16)(((u64)hash * qcount) >> 32) + qoffset;
+}
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+
+u8 _kc_netdev_get_num_tc(struct net_device *dev)
+{
+	struct adapter_struct *kc_adapter = netdev_priv(dev);
+
+	if (kc_adapter->flags & RNP_FLAG_DCB_ENABLED)
+		return kc_adapter->dcb_tc;
+	else
+		return 0;
+}
+
+int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc)
+{
+	struct adapter_struct *kc_adapter = netdev_priv(dev);
+
+	if (num_tc > RNP_DCB_MAX_TRAFFIC_CLASS)
+		return -EINVAL;
+
+	kc_adapter->dcb_tc = num_tc;
+
+	return 0;
+}
+
+u8 _kc_netdev_get_prio_tc_map(struct net_device __maybe_unused *dev,
+			      u8 __maybe_unused up)
+{
+	struct adapter_struct *kc_adapter = netdev_priv(dev);
+
+	return ixgbe_dcb_get_tc_from_up(&kc_adapter->dcb_cfg, 0, up);
+}
+
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
+#endif /* < 2.6.39 */
+
+/******************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
+			 int size, unsigned int truesize)
+{
+	skb_fill_page_desc(skb, i, page, off, size);
+	skb->len += size;
+	skb->data_len += size;
+	skb->truesize += truesize;
+}
+
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0))
+int _kc_simple_open(struct inode *inode, struct file *file)
+{
+	if (inode->i_private)
+		file->private_data = inode->i_private;
+
+	return 0;
+}
+#endif /* SLE_VERSION < 11,3,0 */
+
+#endif /* < 3.4.0 */
+
+/******************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+static inline int __kc_pcie_cap_version(struct pci_dev *dev)
+{
+	int pos;
+	u16 reg16;
+
+	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+	if (!pos)
+		return 0;
+	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16);
+	return reg16 & PCI_EXP_FLAGS_VERS;
+}
+
+static inline bool
+__kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev)
+{
+	return true;
+}
+
+static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev)
+{
+	int type = pci_pcie_type(dev);
+
+	return __kc_pcie_cap_version(dev) > 1 ||
+	       type == PCI_EXP_TYPE_ROOT_PORT ||
+	       type == PCI_EXP_TYPE_ENDPOINT || type == PCI_EXP_TYPE_LEG_END;
+}
+
+static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev)
+{
+	int type = pci_pcie_type(dev);
+	int pos;
+	u16 pcie_flags_reg;
+
+	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+	if (!pos)
+		return false;
+	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg);
+
+	return __kc_pcie_cap_version(dev) > 1 ||
+	       type == PCI_EXP_TYPE_ROOT_PORT ||
+	       (type == PCI_EXP_TYPE_DOWNSTREAM &&
+		pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
+}
+
+static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev)
+{
+	int type = pci_pcie_type(dev);
+
+	return __kc_pcie_cap_version(dev) > 1 ||
+	       type == PCI_EXP_TYPE_ROOT_PORT || type == PCI_EXP_TYPE_RC_EC;
+}
+
+static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
+{
+	if (!pci_is_pcie(dev))
+		return false;
+
+	switch (pos) {
+	case PCI_EXP_FLAGS_TYPE:
+		return true;
+	case PCI_EXP_DEVCAP:
+	case PCI_EXP_DEVCTL:
+	case PCI_EXP_DEVSTA:
+		return __kc_pcie_cap_has_devctl(dev);
+	case PCI_EXP_LNKCAP:
+	case PCI_EXP_LNKCTL:
+	case PCI_EXP_LNKSTA:
+		return __kc_pcie_cap_has_lnkctl(dev);
+	case PCI_EXP_SLTCAP:
+	case PCI_EXP_SLTCTL:
+	case PCI_EXP_SLTSTA:
+		return __kc_pcie_cap_has_sltctl(dev);
+	case PCI_EXP_RTCTL:
+	case PCI_EXP_RTCAP:
+	case PCI_EXP_RTSTA:
+		return __kc_pcie_cap_has_rtctl(dev);
+	case PCI_EXP_DEVCAP2:
+	case PCI_EXP_DEVCTL2:
+	case PCI_EXP_LNKCAP2:
+	case PCI_EXP_LNKCTL2:
+	case PCI_EXP_LNKSTA2:
+		return __kc_pcie_cap_version(dev) > 1;
+	default:
+		return false;
+	}
+}
+
+/*
+ * Note that these accessor functions are only for the "PCI Express
+ * Capability" (see PCIe spec r3.0, sec 7.8).  They do not apply to the
+ * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
+ */
+int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
+{
+	int ret;
+
+	*val = 0;
+	if (pos & 1)
+		return -EINVAL;
+
+	if (__kc_pcie_capability_reg_implemented(dev, pos)) {
+		ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
+		/*
+		 * Reset *val to 0 if pci_read_config_word() fails, it may
+		 * have been written as 0xFFFF if hardware error happens
+		 * during pci_read_config_word().
+		 */
+		if (ret)
+			*val = 0;
+		return ret;
+	}
+
+	/*
+	 * For Functions that do not implement the Slot Capabilities,
+	 * Slot Status, and Slot Control registers, these spaces must
+	 * be hardwired to 0b, with the exception of the Presence Detect
+	 * State bit in the Slot Status register of Downstream Ports,
+	 * which must be hardwired to 1b.  (PCIe Base Spec 3.0, sec 7.8)
+	 */
+	if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
+	    pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
+		*val = PCI_EXP_SLTSTA_PDS;
+	}
+
+	return 0;
+}
+
+int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
+{
+	int ret;
+
+	*val = 0;
+	if (pos & 3)
+		return -EINVAL;
+
+	if (__kc_pcie_capability_reg_implemented(dev, pos)) {
+		ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
+		/*
+		 * Reset *val to 0 if pci_read_config_dword() fails, it may
+		 * have been written as 0xFFFFFFFF if hardware error happens
+		 * during pci_read_config_dword().
+		 */
+		if (ret)
+			*val = 0;
+		return ret;
+	}
+
+	if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
+	    pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
+		*val = PCI_EXP_SLTSTA_PDS;
+	}
+
+	return 0;
+}
+
+int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
+{
+	if (pos & 1)
+		return -EINVAL;
+
+	if (!__kc_pcie_capability_reg_implemented(dev, pos))
+		return 0;
+
+	return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
+}
+
+int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+					    u16 clear, u16 set)
+{
+	int ret;
+	u16 val;
+
+	ret = __kc_pcie_capability_read_word(dev, pos, &val);
+	if (!ret) {
+		val &= ~clear;
+		val |= set;
+		ret = __kc_pcie_capability_write_word(dev, pos, val);
+	}
+
+	return ret;
+}
+
+int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, u16 clear)
+{
+	return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0);
+}
+#endif /* < 3.7.0 */
+
+/******************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
+#ifdef CONFIG_XPS
+#if NR_CPUS < 64
+#define _KC_MAX_XPS_CPUS NR_CPUS
+#else
+#define _KC_MAX_XPS_CPUS 64
+#endif
+
+/*
+ * netdev_queue sysfs structures and functions.
+ */
+struct _kc_netdev_queue_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct netdev_queue *queue,
+			struct _kc_netdev_queue_attribute *attr, char *buf);
+	ssize_t (*store)(struct netdev_queue *queue,
+			 struct _kc_netdev_queue_attribute *attr,
+			 const char *buf, size_t len);
+};
+
+#define to_kc_netdev_queue_attr(_attr)                                         \
+	container_of(_attr, struct _kc_netdev_queue_attribute, attr)
+
+int __kc_netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
+			     u16 index)
+{
+	struct netdev_queue *txq = netdev_get_tx_queue(dev, index);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38))
+	/* Redhat requires some odd extended netdev structures */
+	struct netdev_tx_queue_extended *txq_ext =
+		netdev_extended(dev)->_tx_ext + index;
+	struct kobj_type *ktype = txq_ext->kobj.ktype;
+#else
+	struct kobj_type *ktype = txq->kobj.ktype;
+#endif
+	struct _kc_netdev_queue_attribute *xps_attr;
+	struct attribute *attr = NULL;
+	int i, len, err;
+#define _KC_XPS_BUFLEN (DIV_ROUND_UP(_KC_MAX_XPS_CPUS, 32) * 9)
+	char buf[_KC_XPS_BUFLEN];
+
+	if (!ktype)
+		return -ENOMEM;
+
+	/* attempt to locate the XPS attribute in the Tx queue */
+	for (i = 0; (attr = ktype->default_attrs[i]); i++) {
+		if (!strcmp("xps_cpus", attr->name))
+			break;
+	}
+
+	/* if we did not find it return an error */
+	if (!attr)
+		return -EINVAL;
+
+	/* copy the mask into a string */
+	len = bitmap_scnprintf(buf, _KC_XPS_BUFLEN, cpumask_bits(mask),
+			       _KC_MAX_XPS_CPUS);
+	if (!len)
+		return -ENOMEM;
+
+	xps_attr = to_kc_netdev_queue_attr(attr);
+
+	/* Store the XPS value using the SYSFS store call */
+	err = xps_attr->store(txq, xps_attr, buf, len);
+
+	/* we only had an error on err < 0 */
+	return (err < 0) ? err : 0;
+}
+#endif /* CONFIG_XPS */
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+static inline int kc_get_xps_queue(struct net_device *dev, struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+	struct xps_dev_maps *dev_maps;
+	struct xps_map *map;
+	int queue_index = -1;
+
+	rcu_read_lock();
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38))
+	/* Redhat requires some odd extended netdev structures */
+	dev_maps = rcu_dereference(netdev_extended(dev)->xps_maps);
+#else
+	dev_maps = rcu_dereference(dev->xps_maps);
+#endif
+	if (dev_maps) {
+		map = rcu_dereference(
+			dev_maps->cpu_map[raw_smp_processor_id()]);
+		if (map) {
+			if (map->len == 1)
+				queue_index = map->queues[0];
+			else {
+				u32 hash;
+
+				if (skb->sk && skb->sk->sk_hash)
+					hash = skb->sk->sk_hash;
+				else
+					hash = (__force u16)skb->protocol ^
+					       skb->rxhash;
+				hash = jhash_1word(hash, _kc_hashrnd);
+				queue_index =
+					map->queues[((u64)hash * map->len) >>
+						    32];
+			}
+			if (unlikely(queue_index >= dev->real_num_tx_queues))
+				queue_index = -1;
+		}
+	}
+	rcu_read_unlock();
+
+	return queue_index;
+#else
+	struct adapter_struct *kc_adapter = netdev_priv(dev);
+	int queue_index = -1;
+
+	if (kc_adapter->flags & RNP_FLAG_FDIR_HASH_CAPABLE) {
+		queue_index = skb_rx_queue_recorded(skb) ?
+				      skb_get_rx_queue(skb) :
+				      smp_processor_id();
+		while (unlikely(queue_index >= dev->real_num_tx_queues))
+			queue_index -= dev->real_num_tx_queues;
+		return queue_index;
+	}
+
+	return -1;
+#endif
+}
+
+u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
+{
+	struct sock *sk = skb->sk;
+	int queue_index = sk_tx_queue_get(sk);
+	int new_index;
+
+	if (queue_index >= 0 && queue_index < dev->real_num_tx_queues) {
+#ifdef CONFIG_XPS
+		if (!skb->ooo_okay)
+#endif
+			return queue_index;
+	}
+
+	new_index = kc_get_xps_queue(dev, skb);
+	if (new_index < 0)
+		new_index = skb_tx_hash(dev, skb);
+
+	if (queue_index != new_index && sk) {
+		struct dst_entry *dst = rcu_dereference(sk->sk_dst_cache);
+
+		if (dst && skb_dst(skb) == dst)
+			sk_tx_queue_set(sk, new_index);
+	}
+
+	return new_index;
+}
+
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+#endif /* 3.9.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+#ifdef HAVE_FDB_OPS
+#ifdef USE_CONST_DEV_UC_CHAR
+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+			  struct net_device *dev, const unsigned char *addr,
+			  u16 flags)
+#else
+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev,
+			  unsigned char *addr, u16 flags)
+#endif
+{
+	int err = -EINVAL;
+
+	/* If aging addresses are supported device will need to
+	 * implement its own handler for this.
+	 */
+	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+		pr_info("%s: FDB only supports static addresses\n", dev->name);
+		return err;
+	}
+
+	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+		err = dev_uc_add_excl(dev, addr);
+	else if (is_multicast_ether_addr(addr))
+		err = dev_mc_add_excl(dev, addr);
+
+	/* Only return duplicate errors if NLM_F_EXCL is set */
+	if (err == -EEXIST && !(flags & NLM_F_EXCL))
+		err = 0;
+
+	return err;
+}
+
+#ifdef USE_CONST_DEV_UC_CHAR
+#ifdef HAVE_FDB_DEL_NLATTR
+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+			  struct net_device *dev, const unsigned char *addr)
+#else
+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+			  const unsigned char *addr)
+#endif
+#else
+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+			  unsigned char *addr)
+#endif
+{
+	int err = -EINVAL;
+
+	/* If aging addresses are supported device will need to
+	 * implement its own handler for this.
+	 */
+	if (!(ndm->ndm_state & NUD_PERMANENT)) {
+		pr_info("%s: FDB only supports static addresses\n", dev->name);
+		return err;
+	}
+
+	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+		err = dev_uc_del(dev, addr);
+	else if (is_multicast_ether_addr(addr))
+		err = dev_mc_del(dev, addr);
+
+	return err;
+}
+
+#endif /* HAVE_FDB_OPS */
+#ifdef CONFIG_PCI_IOV
+int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev)
+{
+	unsigned int vfs_assigned = 0;
+#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
+	int pos;
+	struct pci_dev *vfdev;
+	unsigned short dev_id;
+
+	/* only search if we are a PF */
+	if (!dev->is_physfn)
+		return 0;
+
+	/* find SR-IOV capability */
+	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+	if (!pos)
+		return 0;
+
+	/*
+	 * determine the device ID for the VFs, the vendor ID will be the
+	 * same as the PF so there is no need to check for that one
+	 */
+	pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id);
+
+	/* loop through all the VFs to see if we own any that are assigned */
+	vfdev = pci_get_device(dev->vendor, dev_id, NULL);
+	while (vfdev) {
+		/*
+		 * It is considered assigned if it is a virtual function with
+		 * our dev as the physical function and the assigned bit is set
+		 */
+		if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
+		    (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
+			vfs_assigned++;
+
+		vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
+	}
+
+#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */
+	return vfs_assigned;
+}
+
+#endif /* CONFIG_PCI_IOV */
+#endif /* 3.10.0 */
+
+static const unsigned char __maybe_unused pcie_link_speed[] = {
+	PCI_SPEED_UNKNOWN, /* 0 */
+	PCIE_SPEED_2_5GT, /* 1 */
+	PCIE_SPEED_5_0GT, /* 2 */
+	PCIE_SPEED_8_0GT, /* 3 */
+	PCIE_SPEED_16_0GT, /* 4 */
+	PCI_SPEED_UNKNOWN, /* 5 */
+	PCI_SPEED_UNKNOWN, /* 6 */
+	PCI_SPEED_UNKNOWN, /* 7 */
+	PCI_SPEED_UNKNOWN, /* 8 */
+	PCI_SPEED_UNKNOWN, /* 9 */
+	PCI_SPEED_UNKNOWN, /* A */
+	PCI_SPEED_UNKNOWN, /* B */
+	PCI_SPEED_UNKNOWN, /* C */
+	PCI_SPEED_UNKNOWN, /* D */
+	PCI_SPEED_UNKNOWN, /* E */
+	PCI_SPEED_UNKNOWN /* F */
+};
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
+			       enum pcie_link_width *width)
+{
+	*speed = PCI_SPEED_UNKNOWN;
+	*width = PCIE_LNK_WIDTH_UNKNOWN;
+
+	while (dev) {
+		u16 lnksta;
+		enum pci_bus_speed next_speed;
+		enum pcie_link_width next_width;
+		int ret =
+			pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+
+		if (ret)
+			return ret;
+
+		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+			     PCI_EXP_LNKSTA_NLW_SHIFT;
+
+		if (next_speed < *speed)
+			*speed = next_speed;
+
+		if (next_width < *width)
+			*width = next_width;
+
+		dev = dev->bus->self;
+	}
+
+	return 0;
+}
+
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 7))
+int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev)
+{
+	int i;
+	u16 status;
+
+	/* Wait for Transaction Pending bit clean */
+	for (i = 0; i < 4; i++) {
+		if (i)
+			msleep((1 << (i - 1)) * 100);
+
+		pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
+		if (!(status & PCI_EXP_DEVSTA_TRPND))
+			return 1;
+	}
+
+	return 0;
+}
+#endif /*  crs_timeout) {
+			printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
+					    "responding\n",
+			       pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
+			       PCI_FUNC(devfn));
+			return false;
+		}
+	}
+
+	return true;
+}
+
+bool _kc_pci_device_is_present(struct pci_dev *pdev)
+{
+	u32 v;
+
+	return _kc_pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
+}
+#endif /* nexthdr;
+	bool found;
+
+#define __KC_IP6_FH_F_FRAG BIT(0)
+#define __KC_IP6_FH_F_AUTH BIT(1)
+#define __KC_IP6_FH_F_SKIP_RH BIT(2)
+
+	if (fragoff)
+		*fragoff = 0;
+
+	if (*offset) {
+		struct ipv6hdr _ip6, *ip6;
+
+		ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6);
+		if (!ip6 || (ip6->version != 6)) {
+			printk(KERN_ERR "IPv6 header not found\n");
+			return -EBADMSG;
+		}
+		start = *offset + sizeof(struct ipv6hdr);
+		nexthdr = ip6->nexthdr;
+	}
+
+	do {
+		struct ipv6_opt_hdr _hdr, *hp;
+		unsigned int hdrlen;
+
+		found = (nexthdr == target);
+
+		if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
+			if (target < 0 || found)
+				break;
+			return -ENOENT;
+		}
+
+		hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
+		if (!hp)
+			return -EBADMSG;
+
+		if (nexthdr == NEXTHDR_ROUTING) {
+			struct ipv6_rt_hdr _rh, *rh;
+
+			rh = skb_header_pointer(skb, start, sizeof(_rh), &_rh);
+			if (!rh)
+				return -EBADMSG;
+
+			if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) &&
+			    rh->segments_left == 0)
+				found = false;
+		}
+
+		if (nexthdr == NEXTHDR_FRAGMENT) {
+			unsigned short _frag_off;
+			__be16 *fp;
+
+			if (flags) /* Indicate that this is a fragment */
+				*flags |= __KC_IP6_FH_F_FRAG;
+			fp = skb_header_pointer(
+				skb,
+				start + offsetof(struct frag_hdr, frag_off),
+				sizeof(_frag_off), &_frag_off);
+			if (!fp)
+				return -EBADMSG;
+
+			_frag_off = ntohs(*fp) & ~0x7;
+			if (_frag_off) {
+				if (target < 0 &&
+				    ((!ipv6_ext_hdr(hp->nexthdr)) ||
+				     hp->nexthdr == NEXTHDR_NONE)) {
+					if (fragoff)
+						*fragoff = _frag_off;
+					return hp->nexthdr;
+				}
+				return -ENOENT;
+			}
+			hdrlen = 8;
+		} else if (nexthdr == NEXTHDR_AUTH) {
+			if (flags && (*flags & __KC_IP6_FH_F_AUTH) &&
+			    (target < 0))
+				break;
+			hdrlen = (hp->hdrlen + 2) << 2;
+		} else
+			hdrlen = ipv6_optlen(hp);
+
+		if (!found) {
+			nexthdr = hp->nexthdr;
+			start += hdrlen;
+		}
+	} while (!found);
+
+	*offset = start;
+	return nexthdr;
+}
+
+int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
+			       int minvec, int maxvec)
+{
+	int nvec = maxvec;
+	int rc;
+
+	if (maxvec < minvec)
+		return -ERANGE;
+
+	do {
+		rc = pci_enable_msix(dev, entries, nvec);
+		if (rc < 0) {
+			return rc;
+		} else if (rc > 0) {
+			if (rc < minvec)
+				return -ENOSPC;
+			nvec = rc;
+		}
+	} while (rc);
+
+	return nvec;
+}
+#endif /* 3.14.0 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0))
+char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
+{
+	size_t size;
+	char *buf;
+
+	if (!s)
+		return NULL;
+
+	size = strlen(s) + 1;
+	buf = devm_kzalloc(dev, size, gfp);
+	if (buf)
+		memcpy(buf, s, size);
+	return buf;
+}
+
+void __kc_netdev_rss_key_fill(void *buffer, size_t len)
+{
+	/* Set of random keys generated using kernel random number generator */
+	static const u8 seed[NETDEV_RSS_KEY_LEN] = {
+		0xE6, 0xFA, 0x35, 0x62, 0x95, 0x12, 0x3E, 0xA3, 0xFB,
+		0x46, 0xC1, 0x5F, 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49,
+		0x50, 0x95, 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD,
+		0xBC, 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41,
+		0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, 0xD7,
+		0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20
+	};
+
+	BUG_ON(len > NETDEV_RSS_KEY_LEN);
+	memcpy(buffer, seed, len);
+}
+#endif /* 3.15.0 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0))
+#ifdef HAVE_SET_RX_MODE
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+int __kc_hw_addr_sync_dev(
+	struct netdev_hw_addr_list *list, struct net_device *dev,
+	int (*sync)(struct net_device *, const unsigned char *),
+	int (*unsync)(struct net_device *, const unsigned char *))
+{
+	struct netdev_hw_addr *ha, *tmp;
+	int err;
+
+	/* first go through and flush out any stale entries */
+	list_for_each_entry_safe (ha, tmp, &list->list, list) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+		if (!ha->synced || ha->refcount != 1)
+#else
+		if (!ha->sync_cnt || ha->refcount != 1)
+#endif
+			continue;
+
+		if (unsync && unsync(dev, ha->addr))
+			continue;
+
+		list_del_rcu(&ha->list);
+		kfree_rcu(ha, rcu_head);
+		list->count--;
+	}
+
+	/* go through and sync new entries to the list */
+	list_for_each_entry_safe (ha, tmp, &list->list, list) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+		if (ha->synced)
+#else
+		if (ha->sync_cnt)
+#endif
+			continue;
+
+		err = sync(dev, ha->addr);
+		if (err)
+			return err;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+		ha->synced = true;
+#else
+		ha->sync_cnt++;
+#endif
+		ha->refcount++;
+	}
+
+	return 0;
+}
+
+void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
+			     struct net_device *dev,
+			     int (*unsync)(struct net_device *,
+					   const unsigned char *))
+{
+	struct netdev_hw_addr *ha, *tmp;
+
+	list_for_each_entry_safe (ha, tmp, &list->list, list) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+		if (!ha->synced)
+#else
+		if (!ha->sync_cnt)
+#endif
+			continue;
+
+		if (unsync && unsync(dev, ha->addr))
+			continue;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+		ha->synced = false;
+#else
+		ha->sync_cnt--;
+#endif
+		if (--ha->refcount)
+			continue;
+
+		list_del_rcu(&ha->list);
+		kfree_rcu(ha, rcu_head);
+		list->count--;
+	}
+}
+
+#endif /* NETDEV_HW_ADDR_T_UNICAST  */
+#ifndef NETDEV_HW_ADDR_T_MULTICAST
+int __kc_dev_addr_sync_dev(
+	struct dev_addr_list **list, int *count, struct net_device *dev,
+	int (*sync)(struct net_device *, const unsigned char *),
+	int (*unsync)(struct net_device *, const unsigned char *))
+{
+	struct dev_addr_list *da, **next = list;
+	int err;
+
+	/* first go through and flush out any stale entries */
+	while ((da = *next) != NULL) {
+		if (da->da_synced && da->da_users == 1) {
+			if (!unsync || !unsync(dev, da->da_addr)) {
+				*next = da->next;
+				kfree(da);
+				(*count)--;
+				continue;
+			}
+		}
+		next = &da->next;
+	}
+
+	/* go through and sync new entries to the list */
+	for (da = *list; da != NULL; da = da->next) {
+		if (da->da_synced)
+			continue;
+
+		err = sync(dev, da->da_addr);
+		if (err)
+			return err;
+
+		da->da_synced++;
+		da->da_users++;
+	}
+
+	return 0;
+}
+
+void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count,
+			      struct net_device *dev,
+			      int (*unsync)(struct net_device *,
+					    const unsigned char *))
+{
+	struct dev_addr_list *da;
+
+	while ((da = *list) != NULL) {
+		if (da->da_synced) {
+			if (!unsync || !unsync(dev, da->da_addr)) {
+				da->da_synced--;
+				if (--da->da_users == 0) {
+					*list = da->next;
+					kfree(da);
+					(*count)--;
+					continue;
+				}
+			}
+		}
+		list = &da->next;
+	}
+}
+#endif /* NETDEV_HW_ADDR_T_MULTICAST  */
+#endif /* HAVE_SET_RX_MODE */
+void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len,
+			gfp_t gfp)
+{
+	void *p;
+
+	p = devm_kzalloc(dev, len, gfp);
+	if (p)
+		memcpy(p, src, len);
+
+	return p;
+}
+#endif /* 3.16.0 */
+
+/******************************************************************************/
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) &&                        \
+     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 5)))
+#endif /* <3.17.0 && RHEL_RELEASE_CODE < RHEL7.5 */
+
+/******************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
+#ifndef NO_PTP_SUPPORT
+static void __kc_sock_efree(struct sk_buff *skb)
+{
+	sock_put(skb->sk);
+}
+
+struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb)
+{
+	struct sock *sk = skb->sk;
+	struct sk_buff *clone;
+
+	if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt))
+		return NULL;
+
+	clone = skb_clone(skb, GFP_ATOMIC);
+	if (!clone) {
+		sock_put(sk);
+		return NULL;
+	}
+
+	clone->sk = sk;
+	clone->destructor = __kc_sock_efree;
+
+	return clone;
+}
+
+void __kc_skb_complete_tx_timestamp(struct sk_buff *skb,
+				    struct skb_shared_hwtstamps *hwtstamps)
+{
+	struct sock_exterr_skb *serr;
+	struct sock *sk = skb->sk;
+	int err;
+
+	sock_hold(sk);
+
+	*skb_hwtstamps(skb) = *hwtstamps;
+
+	serr = SKB_EXT_ERR(skb);
+	memset(serr, 0, sizeof(*serr));
+	serr->ee.ee_errno = ENOMSG;
+	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
+
+	err = sock_queue_err_skb(sk, skb);
+	if (err)
+		kfree_skb(skb);
+
+	sock_put(sk);
+}
+#endif
+
+/* include headers needed for get_headlen function */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#include 
+#endif
+#ifdef HAVE_SCTP
+#include 
+#endif
+
+u32 __kc_eth_get_headlen(const struct net_device __always_unused *dev,
+			 unsigned char *data, unsigned int max_len)
+{
+	union {
+		unsigned char *network;
+		/* l2 headers */
+		struct ethhdr *eth;
+		struct vlan_hdr *vlan;
+		/* l3 headers */
+		struct iphdr *ipv4;
+		struct ipv6hdr *ipv6;
+	} hdr;
+	__be16 proto;
+	u8 nexthdr = 0; /* default to not TCP */
+	u8 hlen;
+
+	/* this should never happen, but better safe than sorry */
+	if (max_len < ETH_HLEN)
+		return max_len;
+
+	/* initialize network frame pointer */
+	hdr.network = data;
+
+	/* set first protocol and move network header forward */
+	proto = hdr.eth->h_proto;
+	hdr.network += ETH_HLEN;
+
+again:
+	switch (proto) {
+	/* handle any vlan tag if present */
+	case __constant_htons(ETH_P_8021AD):
+	case __constant_htons(ETH_P_8021Q):
+		if ((hdr.network - data) > (max_len - VLAN_HLEN))
+			return max_len;
+
+		proto = hdr.vlan->h_vlan_encapsulated_proto;
+		hdr.network += VLAN_HLEN;
+		goto again;
+	/* handle L3 protocols */
+	case __constant_htons(ETH_P_IP):
+		if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
+			return max_len;
+
+		/* access ihl as a u8 to avoid unaligned access on ia64 */
+		hlen = (hdr.network[0] & 0x0F) << 2;
+
+		/* verify hlen meets minimum size requirements */
+		if (hlen < sizeof(struct iphdr))
+			return hdr.network - data;
+
+		/* record next protocol if header is present */
+		if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
+			nexthdr = hdr.ipv4->protocol;
+
+		hdr.network += hlen;
+		break;
+#ifdef NETIF_F_TSO6
+	case __constant_htons(ETH_P_IPV6):
+		if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+			return max_len;
+
+		/* record next protocol */
+		nexthdr = hdr.ipv6->nexthdr;
+		hdr.network += sizeof(struct ipv6hdr);
+		break;
+#endif /* NETIF_F_TSO6 */
+#if IS_ENABLED(CONFIG_FCOE) || IS_ENABLED(CONFIG_FCOE_MODULE)
+	case __constant_htons(ETH_P_FCOE):
+		hdr.network += FCOE_HEADER_LEN;
+		break;
+#endif
+	default:
+		return hdr.network - data;
+	}
+
+	/* finally sort out L4 */
+	switch (nexthdr) {
+	case IPPROTO_TCP:
+		if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
+			return max_len;
+
+		/* access doff as a u8 to avoid unaligned access on ia64 */
+		hdr.network += max_t(u8, sizeof(struct tcphdr),
+				     (hdr.network[12] & 0xF0) >> 2);
+
+		break;
+	case IPPROTO_UDP:
+	case IPPROTO_UDPLITE:
+		hdr.network += sizeof(struct udphdr);
+		break;
+#ifdef HAVE_SCTP
+	case IPPROTO_SCTP:
+		hdr.network += sizeof(struct sctphdr);
+		break;
+#endif
+	}
+
+	/*
+	 * If everything has gone correctly hdr.network should be the
+	 * data section of the packet and will be the end of the header.
+	 * If not then it probably represents the end of the last recognized
+	 * header.
+	 */
+	return min_t(unsigned int, hdr.network - data, max_len);
+}
+
+#endif /* < 3.18.0 */
+
+/******************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
+#ifdef HAVE_NET_GET_RANDOM_ONCE
+static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN];
+
+void __kc_netdev_rss_key_fill(void *buffer, size_t len)
+{
+	BUG_ON(len > sizeof(__kc_netdev_rss_key));
+	net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key));
+	memcpy(buffer, __kc_netdev_rss_key, len);
+}
+#endif
+
+int _kc_bitmap_print_to_pagebuf(bool list, char *buf,
+				const unsigned long *maskp, int nmaskbits)
+{
+	ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2;
+	int n = 0;
+
+	if (len > 1) {
+		n = list ? bitmap_scnlistprintf(buf, len, maskp, nmaskbits) :
+			   bitmap_scnprintf(buf, len, maskp, nmaskbits);
+		buf[n++] = '\n';
+		buf[n] = '\0';
+	}
+	return n;
+}
+#endif
+
+/******************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0))
+#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 8) &&                       \
+       RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)) &&                      \
+      (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) &&                      \
+      (SLE_VERSION_CODE > SLE_VERSION(12, 1, 0)))
+unsigned int _kc_cpumask_local_spread(unsigned int i, int node)
+{
+	int cpu;
+
+	/* Wrap: we always want a cpu. */
+	i %= num_online_cpus();
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28))
+	/* Kernels prior to 2.6.28 do not have for_each_cpu or
+	 * cpumask_of_node, so just use for_each_online_cpu()
+	 */
+	for_each_online_cpu (cpu)
+		if (i-- == 0)
+			return cpu;
+
+	return 0;
+#else
+	if (node == -1) {
+		for_each_cpu (cpu, cpu_online_mask)
+			if (i-- == 0)
+				return cpu;
+	} else {
+		/* NUMA first. */
+		for_each_cpu_and (cpu, cpumask_of_node(node), cpu_online_mask)
+			if (i-- == 0)
+				return cpu;
+
+		for_each_cpu (cpu, cpu_online_mask) {
+			/* Skip NUMA nodes, done above. */
+			if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
+				continue;
+
+			if (i-- == 0)
+				return cpu;
+		}
+	}
+#endif /* KERNEL_VERSION >= 2.6.28 */
+	BUG();
+}
+#endif
+#endif
+
+/******************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0))
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) &&                     \
+     !(SLE_VERSION_CODE >= SLE_VERSION(12, 2, 0)))
+/**
+ * _kc_skb_flow_dissect_flow_keys - parse SKB to fill _kc_flow_keys
+ * @skb: SKB used to fille _kc_flow_keys
+ * @flow: _kc_flow_keys to set with SKB fields
+ * @flags: currently unused flags
+ *
+ * The purpose of using kcompat for this function is so the caller doesn't have
+ * to care about which kernel version they are on, which prevents a larger than
+ * normal #ifdef mess created by using a HAVE_* flag for this case. This is also
+ * done for 4.2 kernels to simplify calling skb_flow_dissect_flow_keys()
+ * because in 4.2 kernels skb_flow_dissect_flow_keys() exists, but only has 2
+ * arguments. Recent kernels have skb_flow_dissect_flow_keys() that has 3
+ * arguments.
+ *
+ * The caller needs to understand that this function was only implemented as a
+ * bare-minimum replacement for recent versions of skb_flow_dissect_flow_keys()
+ * and this function is in no way similar to skb_flow_dissect_flow_keys(). An
+ * example use can be found in the ice driver, specifically ice_arfs.c.
+ *
+ * This function is treated as a whitelist of supported fields the SKB can
+ * parse. If new functionality is added make sure to keep this format (i.e. only
+ * check for fields that are explicity wanted).
+ *
+ * Current whitelist:
+ *
+ * TCPv4, TCPv6, UDPv4, UDPv6
+ *
+ * If any unexpected protocol or other field is found this function memsets the
+ * flow passed in back to 0 and returns false. Otherwise the flow is populated
+ * and returns true.
+ */
+bool _kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb,
+				    struct _kc_flow_keys *flow,
+				    unsigned int __always_unused flags)
+{
+	memset(flow, 0, sizeof(*flow));
+
+	flow->basic.n_proto = skb->protocol;
+	switch (flow->basic.n_proto) {
+	case htons(ETH_P_IP):
+		flow->basic.ip_proto = ip_hdr(skb)->protocol;
+		flow->addrs.v4addrs.src = ip_hdr(skb)->saddr;
+		flow->addrs.v4addrs.dst = ip_hdr(skb)->daddr;
+		break;
+	case htons(ETH_P_IPV6):
+		flow->basic.ip_proto = ipv6_hdr(skb)->nexthdr;
+		memcpy(&flow->addrs.v6addrs.src, &ipv6_hdr(skb)->saddr,
+		       sizeof(struct in6_addr));
+		memcpy(&flow->addrs.v6addrs.dst, &ipv6_hdr(skb)->daddr,
+		       sizeof(struct in6_addr));
+		break;
+	default:
+		netdev_dbg(
+			skb->dev,
+			"%s: Unsupported/unimplemented layer 3 protocol %04x\n",
+			__func__, htons(flow->basic.n_proto));
+		goto unsupported;
+	}
+
+	switch (flow->basic.ip_proto) {
+	case IPPROTO_TCP: {
+		struct tcphdr *tcph;
+
+		tcph = tcp_hdr(skb);
+		flow->ports.src = tcph->source;
+		flow->ports.dst = tcph->dest;
+		break;
+	}
+	case IPPROTO_UDP: {
+		struct udphdr *udph;
+
+		udph = udp_hdr(skb);
+		flow->ports.src = udph->source;
+		flow->ports.dst = udph->dest;
+		break;
+	}
+	default:
+		netdev_dbg(
+			skb->dev,
+			"%s: Unsupported/unimplemented layer 4 protocol %02x\n",
+			__func__, flow->basic.ip_proto);
+		return false;
+	}
+
+	return true;
+
+unsupported:
+	memset(flow, 0, sizeof(*flow));
+	return false;
+}
+#endif /* ! >= RHEL7.4 && ! >= SLES12.2 */
+#endif /* 4.3.0 */
+
+/******************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)))
+#ifdef CONFIG_SPARC
+#include 
+#include 
+#endif
+int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused,
+				     u8 *mac_addr __maybe_unused)
+{
+#if (((LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)) && defined(CONFIG_OF) &&  \
+	      !defined(HAVE_STRUCT_DEVICE_OF_NODE) ||                          \
+      !defined(CONFIG_OF)) &&                                                  \
+     !defined(CONFIG_SPARC))
+	return -ENODEV;
+#else
+	const unsigned char *addr;
+	struct device_node *dp;
+
+	if (dev_is_pci(dev))
+		dp = pci_device_to_OF_node(to_pci_dev(dev));
+	else
+#if defined(HAVE_STRUCT_DEVICE_OF_NODE) && defined(CONFIG_OF)
+		dp = dev->of_node;
+#else
+		dp = NULL;
+#endif
+
+	addr = NULL;
+	if (dp)
+		addr = of_get_mac_address(dp);
+#ifdef CONFIG_SPARC
+	/* Kernel hasn't implemented arch_get_platform_mac_address, but we
+	 * should handle the SPARC case here since it was supported
+	 * originally. This is replaced by arch_get_platform_mac_address()
+	 * upstream.
+	 */
+	if (!addr)
+		addr = idprom->id_ethaddr;
+#endif
+	if (!addr)
+		return -ENODEV;
+
+	ether_addr_copy(mac_addr, addr);
+	return 0;
+#endif
+}
+#endif /* !(RHEL_RELEASE >= 7.3) */
+#endif /* < 4.5.0 */
+
+/*****************************************************************************/
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) ||                        \
+     (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12, 3, 0))) ||      \
+     (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7, 5))))
+const char *_kc_phy_speed_to_str(int speed)
+{
+	switch (speed) {
+	case SPEED_10:
+		return "10Mbps";
+	case SPEED_100:
+		return "100Mbps";
+	case SPEED_1000:
+		return "1Gbps";
+	case SPEED_2500:
+		return "2.5Gbps";
+	case SPEED_5000:
+		return "5Gbps";
+	case SPEED_10000:
+		return "10Gbps";
+	case SPEED_14000:
+		return "14Gbps";
+	case SPEED_20000:
+		return "20Gbps";
+	case SPEED_25000:
+		return "25Gbps";
+	case SPEED_40000:
+		return "40Gbps";
+	case SPEED_50000:
+		return "50Gbps";
+	case SPEED_56000:
+		return "56Gbps";
+#ifdef SPEED_100000
+	case SPEED_100000:
+		return "100Gbps";
+#endif
+	case SPEED_UNKNOWN:
+		return "Unknown";
+	default:
+		return "Unsupported (update phy-core.c)";
+	}
+}
+#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */
+
+/******************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0))
+void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst,
+				      struct ethtool_link_ksettings *src)
+{
+	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
+	unsigned int idx = 0;
+
+	for (; idx < size; idx++) {
+		dst->link_modes.supported[idx] &=
+			src->link_modes.supported[idx];
+		dst->link_modes.advertising[idx] &=
+			src->link_modes.advertising[idx];
+	}
+}
+#endif /* 4.15.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0))
+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 0)) &&                      \
+	!(SLE_VERSION_CODE >= SLE_VERSION(12, 5, 0) &&                         \
+		  SLE_VERSION_CODE < SLE_VERSION(15, 0, 0) ||                  \
+	  SLE_VERSION_CODE >= SLE_VERSION(15, 1, 0))
+#if BITS_PER_LONG == 64
+/**
+ * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap
+ * @bitmap: array of unsigned longs, the destination bitmap
+ * @buf: array of u32 (in host byte order), the source bitmap
+ * @nbits: number of bits in @bitmap
+ */
+void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
+		       unsigned int nbits)
+{
+	unsigned int i, halfwords;
+
+	halfwords = DIV_ROUND_UP(nbits, 32);
+	for (i = 0; i < halfwords; i++) {
+		bitmap[i / 2] = (unsigned long)buf[i];
+		if (++i < halfwords)
+			bitmap[i / 2] |= ((unsigned long)buf[i]) << 32;
+	}
+
+	/* Clear tail bits in last word beyond nbits. */
+	if (nbits % BITS_PER_LONG)
+		bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits);
+}
+#endif /* BITS_PER_LONG == 64 */
+#endif /* !(RHEL >= 8.0) && !(SLES >= 12.5 && SLES < 15.0 || SLES >= 15.1) */
+#endif /* 4.16.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0))
+/* PCIe link information */
+#define PCIE_SPEED2STR(speed)                                                  \
+	((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" :                            \
+	 (speed) == PCIE_SPEED_8_0GT  ? "8 GT/s" :                             \
+	 (speed) == PCIE_SPEED_5_0GT  ? "5 GT/s" :                             \
+	 (speed) == PCIE_SPEED_2_5GT  ? "2.5 GT/s" :                           \
+					"Unknown speed")
+
+/* PCIe speed to Mb/s reduced by encoding overhead */
+#define PCIE_SPEED2MBS_ENC(speed)                                              \
+	((speed) == PCIE_SPEED_16_0GT ? 16000 * 128 / 130 :                    \
+	 (speed) == PCIE_SPEED_8_0GT  ? 8000 * 128 / 130 :                     \
+	 (speed) == PCIE_SPEED_5_0GT  ? 5000 * 8 / 10 :                        \
+	 (speed) == PCIE_SPEED_2_5GT  ? 2500 * 8 / 10 :                        \
+					0)
+
+static u32 _kc_pcie_bandwidth_available(struct pci_dev *dev,
+					struct pci_dev **limiting_dev,
+					enum pci_bus_speed *speed,
+					enum pcie_link_width *width)
+{
+	u16 lnksta;
+	enum pci_bus_speed next_speed;
+	enum pcie_link_width next_width;
+	u32 bw, next_bw;
+
+	if (speed)
+		*speed = PCI_SPEED_UNKNOWN;
+	if (width)
+		*width = PCIE_LNK_WIDTH_UNKNOWN;
+
+	bw = 0;
+
+	while (dev) {
+		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+
+		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+			     PCI_EXP_LNKSTA_NLW_SHIFT;
+
+		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
+
+		/* Check if current device limits the total bandwidth */
+		if (!bw || next_bw <= bw) {
+			bw = next_bw;
+
+			if (limiting_dev)
+				*limiting_dev = dev;
+			if (speed)
+				*speed = next_speed;
+			if (width)
+				*width = next_width;
+		}
+
+		dev = pci_upstream_bridge(dev);
+	}
+
+	return bw;
+}
+
+static enum pci_bus_speed _kc_pcie_get_speed_cap(struct pci_dev *dev)
+{
+	u32 lnkcap2, lnkcap;
+
+	/*
+	 * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link
+	 * Speeds Vector in Link Capabilities 2 when supported, falling
+	 * back to Max Link Speed in Link Capabilities otherwise.
+	 */
+	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
+	if (lnkcap2) { /* PCIe r3.0-compliant */
+		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
+			return PCIE_SPEED_16_0GT;
+		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
+			return PCIE_SPEED_8_0GT;
+		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
+			return PCIE_SPEED_5_0GT;
+		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
+			return PCIE_SPEED_2_5GT;
+		return PCI_SPEED_UNKNOWN;
+	}
+
+	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+	if (lnkcap) {
+		if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
+			return PCIE_SPEED_16_0GT;
+		else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
+			return PCIE_SPEED_8_0GT;
+		else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
+			return PCIE_SPEED_5_0GT;
+		else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
+			return PCIE_SPEED_2_5GT;
+	}
+
+	return PCI_SPEED_UNKNOWN;
+}
+
+static enum pcie_link_width _kc_pcie_get_width_cap(struct pci_dev *dev)
+{
+	u32 lnkcap;
+
+	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+	if (lnkcap)
+		return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
+
+	return PCIE_LNK_WIDTH_UNKNOWN;
+}
+
+static u32 _kc_pcie_bandwidth_capable(struct pci_dev *dev,
+				      enum pci_bus_speed *speed,
+				      enum pcie_link_width *width)
+{
+	*speed = _kc_pcie_get_speed_cap(dev);
+	*width = _kc_pcie_get_width_cap(dev);
+
+	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
+		return 0;
+
+	return *width * PCIE_SPEED2MBS_ENC(*speed);
+}
+
+void _kc_pcie_print_link_status(struct pci_dev *dev)
+{
+	enum pcie_link_width width, width_cap;
+	enum pci_bus_speed speed, speed_cap;
+	struct pci_dev *limiting_dev = NULL;
+	u32 bw_avail, bw_cap;
+
+	bw_cap = _kc_pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
+	bw_avail = _kc_pcie_bandwidth_available(dev, &limiting_dev, &speed,
+						&width);
+
+	if (bw_avail >= bw_cap)
+		pci_info(
+			dev,
+			"%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
+			bw_cap / 1000, bw_cap % 1000, PCIE_SPEED2STR(speed_cap),
+			width_cap);
+	else
+		pci_info(
+			dev,
+			"%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
+			bw_avail / 1000, bw_avail % 1000, PCIE_SPEED2STR(speed),
+			width,
+			limiting_dev ? pci_name(limiting_dev) : "",
+			bw_cap / 1000, bw_cap % 1000, PCIE_SPEED2STR(speed_cap),
+			width_cap);
+}
+#endif /* 4.17.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)) ||                          \
+	(RHEL_RELEASE_CODE &&                                                  \
+	 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 1)))
+#ifdef HAVE_TC_SETUP_CLSFLOWER
+#define FLOW_DISSECTOR_MATCH(__rule, __type, __out)                            \
+	do {                                                                   \
+		const struct flow_match *__m = &(__rule)->match;               \
+		struct flow_dissector *__d = (__m)->dissector;                 \
+		(__out)->key =                                                 \
+			skb_flow_dissector_target(__d, __type, (__m)->key);    \
+		(__out)->mask =                                                \
+			skb_flow_dissector_target(__d, __type, (__m)->mask);   \
+	} while (0)
+
+void flow_rule_match_basic(const struct flow_rule *rule,
+			   struct flow_match_basic *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
+}
+
+void flow_rule_match_control(const struct flow_rule *rule,
+			     struct flow_match_control *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
+}
+
+void flow_rule_match_eth_addrs(const struct flow_rule *rule,
+			       struct flow_match_eth_addrs *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
+}
+
+#ifdef HAVE_TC_FLOWER_ENC
+void flow_rule_match_enc_keyid(const struct flow_rule *rule,
+			       struct flow_match_enc_keyid *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
+}
+
+void flow_rule_match_enc_ports(const struct flow_rule *rule,
+			       struct flow_match_ports *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
+}
+
+void flow_rule_match_enc_control(const struct flow_rule *rule,
+				 struct flow_match_control *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
+}
+
+void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
+				    struct flow_match_ipv4_addrs *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
+}
+
+void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
+				    struct flow_match_ipv6_addrs *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
+}
+#endif
+
+#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS
+void flow_rule_match_vlan(const struct flow_rule *rule,
+			  struct flow_match_vlan *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
+}
+#endif
+
+void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
+				struct flow_match_ipv4_addrs *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
+}
+
+void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
+				struct flow_match_ipv6_addrs *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
+}
+
+void flow_rule_match_ports(const struct flow_rule *rule,
+			   struct flow_match_ports *out)
+{
+	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
+}
+#endif /* HAVE_TC_SETUP_CLSFLOWER */
+#endif /* 5.1.0 || (RHEL && RHEL < 8.1) */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0))
+#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 2))))
+#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
+int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f,
+				   struct list_head __always_unused *driver_list,
+				   tc_setup_cb_t *cb, void *cb_ident,
+				   void *cb_priv, bool ingress_only)
+{
+	if (ingress_only &&
+	    f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+		return -EOPNOTSUPP;
+
+	/* Note: Upstream has driver_block_list, but older kernels do not */
+	switch (f->command) {
+	case TC_BLOCK_BIND:
+#ifdef HAVE_TCF_BLOCK_CB_REGISTER_EXTACK
+		return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv,
+					     f->extack);
+#else
+		return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv);
+#endif
+	case TC_BLOCK_UNBIND:
+		tcf_block_cb_unregister(f->block, cb, cb_ident);
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */
+#endif /* !RHEL >= 8.2 */
+#endif /* 5.3.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0))
+u64 _kc_pci_get_dsn(struct pci_dev *dev)
+{
+	u32 dword;
+	u64 dsn;
+	int pos;
+
+	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
+	if (!pos)
+		return 0;
+
+	/*
+	 * The Device Serial Number is two dwords offset 4 bytes from the
+	 * capability position. The specification says that the first dword is
+	 * the lower half, and the second dword is the upper half.
+	 */
+	pos += 4;
+	pci_read_config_dword(dev, pos, &dword);
+	dsn = (u64)dword;
+	pci_read_config_dword(dev, pos + 4, &dword);
+	dsn |= ((u64)dword) << 32;
+
+	return dsn;
+}
+#endif /* 5.7.0 */
+
+#ifdef NEED_MUL_U64_U64_DIV_U64
+#ifdef NEED_DIV64_U64_REM
+static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
+{
+        *remainder = dividend % divisor;
+        return dividend / divisor;
+}
+#endif
+u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
+{
+        u64 res = 0, div, rem;
+        int shift;
+
+        /* can a * b overflow ? */
+        if (ilog2(a) + ilog2(b) > 62) {
+                /*
+                 * (b * a) / c is equal to
+                 *
+                 *      (b / c) * a +
+                 *      (b % c) * a / c
+                 *
+                 * if nothing overflows. Can the 1st multiplication
+                 * overflow? Yes, but we do not care: this can only
+                 * happen if the end result can't fit in u64 anyway.
+                 *
+                 * So the code below does
+                 *
+                 *      res = (b / c) * a;
+                 *      b = b % c;
+                 */
+                div = div64_u64_rem(b, c, &rem);
+                res = div * a;
+                b = rem;
+
+                shift = ilog2(a) + ilog2(b) - 62;
+                if (shift > 0) {
+                        /* drop precision */
+                        b >>= shift;
+                        c >>= shift;
+                        if (!c)
+                                return res;
+                }
+        }
+
+        return res + div64_u64(a * b, c);
+}
+#endif /* NEED_MUL_U64_U64_DIV_U64 */
+#ifdef NEED_ETHTOOL_CONVERT_LEGACY_U32_TO_LINK_MODE
+void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst,
+                                             u32 legacy_u32)
+{
+        bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
+        dst[0] = legacy_u32;
+}
+
+/* return false if src had higher bits set. lower bits always updated. */
+bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
+                                             const unsigned long *src)
+{
+        bool retval = true;
+
+        /* TODO: following test will soon always be true */
+        if (__ETHTOOL_LINK_MODE_MASK_NBITS > 32) {
+                __ETHTOOL_DECLARE_LINK_MODE_MASK(ext);
+
+                bitmap_zero(ext, __ETHTOOL_LINK_MODE_MASK_NBITS);
+                bitmap_fill(ext, 32);
+                bitmap_complement(ext, ext, __ETHTOOL_LINK_MODE_MASK_NBITS);
+                if (bitmap_intersects(ext, src,
+                                      __ETHTOOL_LINK_MODE_MASK_NBITS)) {
+                        /* src mask goes beyond bit 31 */
+                        retval = false;
+                }
+        }
+        *legacy_u32 = src[0];
+        return retval;
+}
+
+#endif
+
+#ifndef HAVE_ETHTOOL_KEEE
+void eee_to_keee(struct ethtool_keee *keee,
+                 const struct ethtool_eee *eee)
+{
+        memset(keee, 0, sizeof(*keee));
+
+        keee->supported_u32 = eee->supported;
+        keee->advertised_u32 = eee->advertised;
+        keee->lp_advertised_u32 = eee->lp_advertised;
+        keee->eee_active = eee->eee_active;
+        keee->eee_enabled = eee->eee_enabled;
+        keee->tx_lpi_enabled = eee->tx_lpi_enabled;
+        keee->tx_lpi_timer = eee->tx_lpi_timer;
+
+        ethtool_convert_legacy_u32_to_link_mode(keee->supported,
+                                                eee->supported);
+        ethtool_convert_legacy_u32_to_link_mode(keee->advertised,
+                                                eee->advertised);
+        ethtool_convert_legacy_u32_to_link_mode(keee->lp_advertised,
+                                                eee->lp_advertised);
+}
+
+bool ethtool_eee_use_linkmodes(const struct ethtool_keee *eee)
+{
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)))
+        return !linkmode_empty(eee->supported);
+#else
+        return false;
+#endif /* RH7.9 */
+}
+
+void keee_to_eee(struct ethtool_eee *eee,
+                 const struct ethtool_keee *keee)
+{
+        memset(eee, 0, sizeof(*eee));
+
+        eee->eee_active = keee->eee_active;
+        eee->eee_enabled = keee->eee_enabled;
+        eee->tx_lpi_enabled = keee->tx_lpi_enabled;
+        eee->tx_lpi_timer = keee->tx_lpi_timer;
+
+        if (ethtool_eee_use_linkmodes(keee)) {
+                bool overflow;
+
+                overflow = !ethtool_convert_link_mode_to_legacy_u32(&eee->supported,
+                                                                    keee->supported);
+                ethtool_convert_link_mode_to_legacy_u32(&eee->advertised,
+                                                        keee->advertised);
+                ethtool_convert_link_mode_to_legacy_u32(&eee->lp_advertised,
+                                                        keee->lp_advertised);
+                if (overflow)
+                        pr_warn("Ethtool ioctl interface doesn't support passing EEE linkmodes beyond bit 32\n");
+        } else {
+                eee->supported = keee->supported_u32;
+                eee->advertised = keee->advertised_u32;
+                eee->lp_advertised = keee->lp_advertised_u32;
+        }
+}
+#endif /* !HAVE_ETHTOOL_KEEE */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnp_compat.h b/drivers/net/ethernet/mucse/rnpgbe/rnp_compat.h
new file mode 100755
index 0000000000000000000000000000000000000000..6b89acf15cceb46f2ebbe52609d432fdae468810
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnp_compat.h
@@ -0,0 +1,7639 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _KCOMPAT_H_
+#define _KCOMPAT_H_
+
+#ifndef LINUX_VERSION_CODE
+#include 
+#else
+#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#endif
+#include "kcompat_gcc.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#ifndef GCC_VERSION
+#define GCC_VERSION                                                            \
+	(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+#endif /* GCC_VERSION */
+
+#ifndef IEEE_8021QAZ_APP_SEL_DSCP
+#define IEEE_8021QAZ_APP_SEL_DSCP 5
+#endif
+
+/* Backport macros for controlling GCC diagnostics */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0))
+
+/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */
+#if GCC_VERSION >= 40600
+#define __diag_str1(s) #s
+#define __diag_str(s) __diag_str1(s)
+#define __diag(s) _Pragma(__diag_str(GCC diagnostic s))
+#else
+#define __diag(s)
+#endif /* GCC_VERSION >= 4.6 */
+#define __diag_push() __diag(push)
+#define __diag_pop() __diag(pop)
+#endif /* LINUX_VERSION < 4.18.0 */
+
+#ifndef NSEC_PER_MSEC
+#define NSEC_PER_MSEC 1000000L
+#endif
+#include 
+/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */
+#ifndef UTS_RELEASE
+/* utsrelease.h changed locations in 2.6.33 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
+#include 
+#else
+#include 
+#endif
+#endif
+
+/* NAPI enable/disable flags here */
+#define NAPI
+
+#define adapter_struct rnp_adapter
+#define adapter_q_vector rnp_q_vector
+
+/* and finally set defines so that the code sees the changes */
+#ifdef NAPI
+#else
+#endif /* NAPI */
+
+/* Dynamic LTR and deeper C-State support disable/enable */
+//#define DISABLE_PACKET_SPLIT
+/* packet split disable/enable */
+#ifdef DISABLE_PACKET_SPLIT
+#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT
+#define CONFIG_RNP_DISABLE_PACKET_SPLIT
+#endif
+#endif /* DISABLE_PACKET_SPLIT */
+
+/* MSI compatibility code for all kernels and drivers */
+#ifdef DISABLE_PCI_MSI
+#undef CONFIG_PCI_MSI
+#endif
+#ifndef CONFIG_PCI_MSI
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 8))
+struct msix_entry {
+	u16 vector; /* kernel uses to write allocated vector */
+	u16 entry; /* driver uses to specify entry, OS writes */
+};
+#endif
+#undef pci_enable_msi
+#define pci_enable_msi(a) -ENOTSUPP
+#undef pci_disable_msi
+#define pci_disable_msi(a)                                                     \
+	do {                                                                   \
+	} while (0)
+#undef pci_enable_msix
+#define pci_enable_msix(a, b, c) -ENOTSUPP
+#undef pci_disable_msix
+#define pci_disable_msix(a)                                                    \
+	do {                                                                   \
+	} while (0)
+#define msi_remove_pci_irq_vectors(a)                                          \
+	do {                                                                   \
+	} while (0)
+#endif /* CONFIG_PCI_MSI */
+#ifdef DISABLE_PM
+#undef CONFIG_PM
+#endif
+
+#ifdef DISABLE_NET_POLL_CONTROLLER
+#undef CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef PMSG_SUSPEND
+#define PMSG_SUSPEND 3
+#endif
+
+/* generic boolean compatibility */
+#undef TRUE
+#undef FALSE
+#define TRUE true
+#define FALSE false
+#ifdef GCC_VERSION
+#if (GCC_VERSION < 3000)
+#define _Bool char
+#endif
+#else
+#define _Bool char
+#endif
+
+#ifndef BIT
+#define BIT(nr) (1UL << (nr))
+#endif
+
+#undef __always_unused
+#define __always_unused __attribute__((__unused__))
+
+#undef __maybe_unused
+#define __maybe_unused __attribute__((__unused__))
+
+/* kernels less than 2.4.14 don't have this */
+#ifndef ETH_P_8021Q
+#define ETH_P_8021Q 0x8100
+#endif
+
+#ifndef module_param
+#define module_param(v, t, p) MODULE_PARM(v, "i");
+#endif
+
+#ifndef DMA_64BIT_MASK
+#define DMA_64BIT_MASK 0xffffffffffffffffULL
+#endif
+
+#ifndef DMA_32BIT_MASK
+#define DMA_32BIT_MASK 0x00000000ffffffffULL
+#endif
+
+#ifndef PCI_CAP_ID_EXP
+#define PCI_CAP_ID_EXP 0x10
+#endif
+
+#ifndef uninitialized_var
+#define uninitialized_var(x) (x = x)
+#endif
+
+#ifndef PCIE_LINK_STATE_L0S
+#define PCIE_LINK_STATE_L0S 1
+#endif
+#ifndef PCIE_LINK_STATE_L1
+#define PCIE_LINK_STATE_L1 2
+#endif
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)
+#endif
+
+#if !defined(HAVE_FREE_NETDEV) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0))
+#define free_netdev(x) kfree(x)
+#endif
+
+#ifdef HAVE_POLL_CONTROLLER
+#define CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef SKB_DATAREF_SHIFT
+/*
+ *if we do not have the infrastructure to detect if skb_header is cloned
+ *just return false in all cases
+ */
+#define skb_header_cloned(x) 0
+#endif
+
+#ifndef NETIF_F_GSO
+#define gso_size tso_size
+#define gso_segs tso_segs
+#endif
+
+#ifndef NETIF_F_GRO
+#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb)                           \
+	vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan)
+#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb)
+#endif
+
+#ifndef NETIF_F_SCTP_CSUM
+#define NETIF_F_SCTP_CSUM 0
+#endif
+
+#ifndef NETIF_F_LRO
+#define NETIF_F_LRO BIT(15)
+#endif
+
+#ifndef NETIF_F_NTUPLE
+#define NETIF_F_NTUPLE BIT(27)
+#endif
+
+#ifndef NETIF_F_ALL_FCOE
+#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | NETIF_F_FSO)
+#endif
+
+#ifndef IPPROTO_SCTP
+#define IPPROTO_SCTP 132
+#endif
+
+#ifndef IPPROTO_UDPLITE
+#define IPPROTO_UDPLITE 136
+#endif
+
+#ifndef CHECKSUM_PARTIAL
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#define CHECKSUM_COMPLETE CHECKSUM_HW
+#endif
+
+#ifndef __read_mostly
+#define __read_mostly
+#endif
+
+#ifndef MII_RESV1
+#define MII_RESV1 0x17 /* Reserved...		*/
+#endif
+
+#ifndef unlikely
+#define unlikely(_x) _x
+#define likely(_x) _x
+#endif
+
+#ifndef WARN_ON
+#define WARN_ON(x) ({ 0; })
+#endif
+
+#ifndef PCI_DEVICE
+#define PCI_DEVICE(vend, dev)                                                  \
+	.vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID,            \
+	.subdevice = PCI_ANY_ID
+#endif
+
+#ifndef node_online
+#define node_online(node) ((node) == 0)
+#endif
+
+#ifndef _LINUX_RANDOM_H
+#include 
+#endif
+
+#ifndef BITS_PER_TYPE
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#endif
+
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
+#endif
+
+#ifndef DECLARE_BITMAP
+#define DECLARE_BITMAP(name, bits) long name[BITS_TO_LONGS(bits)]
+#endif
+
+#ifndef VLAN_HLEN
+#define VLAN_HLEN 4
+#endif
+
+#ifndef VLAN_ETH_HLEN
+#define VLAN_ETH_HLEN 18
+#endif
+
+#ifndef VLAN_ETH_FRAME_LEN
+#define VLAN_ETH_FRAME_LEN 1518
+#endif
+
+#ifndef DCA_GET_TAG_TWO_ARGS
+#define dca3_get_tag(a, b) dca_get_tag(b)
+#endif
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#if defined(__i386__) || defined(__x86_64__)
+#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#endif
+#endif
+
+/* taken from 2.6.24 definition in linux/kernel.h */
+#ifndef IS_ALIGNED
+#define IS_ALIGNED(x, a) (((x) % ((typeof(x))(a))) == 0)
+#endif
+
+#ifdef IS_ENABLED
+#undef IS_ENABLED
+#undef __ARG_PLACEHOLDER_1
+#undef config_enabled
+#undef _config_enabled
+#undef __config_enabled
+#undef ___config_enabled
+#endif
+
+#define __ARG_PLACEHOLDER_1 0,
+#define config_enabled(cfg) _config_enabled(cfg)
+#ifdef __CHECKER__
+/* cppcheck-suppress preprocessorErrorDirective */
+#endif /* __CHECKER__ */
+#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
+#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
+#define ___config_enabled(__ignored, val, ...) val
+
+#define IS_ENABLED(option)                                                     \
+	(config_enabled(option) || config_enabled(option##_MODULE))
+
+#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX)
+struct _kc_vlan_ethhdr {
+	unsigned char h_dest[ETH_ALEN];
+	unsigned char h_source[ETH_ALEN];
+	__be16 h_vlan_proto;
+	__be16 h_vlan_TCI;
+	__be16 h_vlan_encapsulated_proto;
+};
+#define vlan_ethhdr _kc_vlan_ethhdr
+struct _kc_vlan_hdr {
+	__be16 h_vlan_TCI;
+	__be16 h_vlan_encapsulated_proto;
+};
+#define vlan_hdr _kc_vlan_hdr
+#define vlan_tx_tag_present(_skb) 0
+#define vlan_tx_tag_get(_skb) 0
+#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */
+
+#ifndef VLAN_PRIO_SHIFT
+#define VLAN_PRIO_SHIFT 13
+#endif
+
+#ifndef PCI_EXP_LNKSTA_CLS_2_5GB
+#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001
+#endif
+
+#ifndef PCI_EXP_LNKSTA_CLS_5_0GB
+#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002
+#endif
+
+#ifndef PCI_EXP_LNKSTA_CLS_8_0GB
+#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003
+#endif
+
+#ifndef PCI_EXP_LNKSTA_NLW_X1
+#define PCI_EXP_LNKSTA_NLW_X1 0x0010
+#endif
+
+#ifndef PCI_EXP_LNKSTA_NLW_X2
+#define PCI_EXP_LNKSTA_NLW_X2 0x0020
+#endif
+
+#ifndef PCI_EXP_LNKSTA_NLW_X4
+#define PCI_EXP_LNKSTA_NLW_X4 0x0040
+#endif
+
+#ifndef PCI_EXP_LNKSTA_NLW_X8
+#define PCI_EXP_LNKSTA_NLW_X8 0x0080
+#endif
+
+#ifndef __GFP_COLD
+#define __GFP_COLD 0
+#endif
+
+#ifndef __GFP_COMP
+#define __GFP_COMP 0
+#endif
+
+#ifndef IP_OFFSET
+#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
+#endif
+
+/*****************************************************************************/
+/*
+ * Installations with ethtool version without eeprom, adapter id, or statistics
+ * support
+ */
+
+#ifndef ETH_GSTRING_LEN
+#define ETH_GSTRING_LEN 32
+#endif
+
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x1d
+#undef ethtool_drvinfo
+#define ethtool_drvinfo k_ethtool_drvinfo
+struct k_ethtool_drvinfo {
+	u32 cmd;
+	char driver[32];
+	char version[32];
+	char fw_version[32];
+	char bus_info[32];
+	char reserved1[32];
+	char reserved2[16];
+	u32 n_stats;
+	u32 testinfo_len;
+	u32 eedump_len;
+	u32 regdump_len;
+};
+
+struct ethtool_stats {
+	u32 cmd;
+	u32 n_stats;
+	u64 data[0];
+};
+#endif /* ETHTOOL_GSTATS */
+
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x1c
+#endif /* ETHTOOL_PHYS_ID */
+
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x1b
+enum ethtool_stringset {
+	ETH_SS_TEST = 0,
+	ETH_SS_STATS,
+};
+struct ethtool_gstrings {
+	u32 cmd; /* ETHTOOL_GSTRINGS */
+	u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/
+	u32 len; /* number of strings in the string set */
+	u8 data[0];
+};
+#endif /* ETHTOOL_GSTRINGS */
+
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x1a
+enum ethtool_test_flags {
+	ETH_TEST_FL_OFFLINE = BIT(0),
+	ETH_TEST_FL_FAILED = BIT(1),
+};
+struct ethtool_test {
+	u32 cmd;
+	u32 flags;
+	u32 reserved;
+	u32 len;
+	u64 data[0];
+};
+#endif /* ETHTOOL_TEST */
+
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0xb
+#undef ETHTOOL_GREGS
+struct ethtool_eeprom {
+	u32 cmd;
+	u32 magic;
+	u32 offset;
+	u32 len;
+	u8 data[0];
+};
+
+struct ethtool_value {
+	u32 cmd;
+	u32 data;
+};
+#endif /* ETHTOOL_GEEPROM */
+
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0xa
+#endif /* ETHTOOL_GLINK */
+
+#ifndef ETHTOOL_GWOL
+#define ETHTOOL_GWOL 0x5
+#define ETHTOOL_SWOL 0x6
+#define SOPASS_MAX 6
+struct ethtool_wolinfo {
+	u32 cmd;
+	u32 supported;
+	u32 wolopts;
+	u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */
+};
+#endif /* ETHTOOL_GWOL */
+
+#ifndef ETHTOOL_GREGS
+#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */
+#define ethtool_regs _kc_ethtool_regs
+/* for passing big chunks of data */
+struct _kc_ethtool_regs {
+	u32 cmd;
+	u32 version; /* driver-specific, indicates different chips/revs */
+	u32 len; /* bytes */
+	u8 data[0];
+};
+#endif /* ETHTOOL_GREGS */
+
+#ifndef ETHTOOL_GMSGLVL
+#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
+#endif
+#ifndef ETHTOOL_SMSGLVL
+#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */
+#endif
+#ifndef ETHTOOL_NWAY_RST
+#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */
+#endif
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0x0000000a /* Get link status */
+#endif
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
+#endif
+#ifndef ETHTOOL_SEEPROM
+#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
+#endif
+#ifndef ETHTOOL_GCOALESCE
+#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
+/* for configuring coalescing parameters of chip */
+#define ethtool_coalesce _kc_ethtool_coalesce
+struct _kc_ethtool_coalesce {
+	u32 cmd; /* ETHTOOL_{G,S}COALESCE */
+
+	/* How many usecs to delay an RX interrupt after
+	 * a packet arrives.  If 0, only rx_max_coalesced_frames
+	 * is used.
+	 */
+	u32 rx_coalesce_usecs;
+
+	/* How many packets to delay an RX interrupt after
+	 * a packet arrives.  If 0, only rx_coalesce_usecs is
+	 * used.  It is illegal to set both usecs and max frames
+	 * to zero as this would cause RX interrupts to never be
+	 * generated.
+	 */
+	u32 rx_max_coalesced_frames;
+
+	/* Same as above two parameters, except that these values
+	 * apply while an IRQ is being serviced by the host.  Not
+	 * all cards support this feature and the values are ignored
+	 * in that case.
+	 */
+	u32 rx_coalesce_usecs_irq;
+	u32 rx_max_coalesced_frames_irq;
+
+	/* How many usecs to delay a TX interrupt after
+	 * a packet is sent.  If 0, only tx_max_coalesced_frames
+	 * is used.
+	 */
+	u32 tx_coalesce_usecs;
+
+	/* How many packets to delay a TX interrupt after
+	 * a packet is sent.  If 0, only tx_coalesce_usecs is
+	 * used.  It is illegal to set both usecs and max frames
+	 * to zero as this would cause TX interrupts to never be
+	 * generated.
+	 */
+	u32 tx_max_coalesced_frames;
+
+	/* Same as above two parameters, except that these values
+	 * apply while an IRQ is being serviced by the host.  Not
+	 * all cards support this feature and the values are ignored
+	 * in that case.
+	 */
+	u32 tx_coalesce_usecs_irq;
+	u32 tx_max_coalesced_frames_irq;
+
+	/* How many usecs to delay in-memory statistics
+	 * block updates.  Some drivers do not have an in-memory
+	 * statistic block, and in such cases this value is ignored.
+	 * This value must not be zero.
+	 */
+	u32 stats_block_coalesce_usecs;
+
+	/* Adaptive RX/TX coalescing is an algorithm implemented by
+	 * some drivers to improve latency under low packet rates and
+	 * improve throughput under high packet rates.  Some drivers
+	 * only implement one of RX or TX adaptive coalescing.  Anything
+	 * not implemented by the driver causes these values to be
+	 * silently ignored.
+	 */
+	u32 use_adaptive_rx_coalesce;
+	u32 use_adaptive_tx_coalesce;
+
+	/* When the packet rate (measured in packets per second)
+	 * is below pkt_rate_low, the {rx,tx}_*_low parameters are
+	 * used.
+	 */
+	u32 pkt_rate_low;
+	u32 rx_coalesce_usecs_low;
+	u32 rx_max_coalesced_frames_low;
+	u32 tx_coalesce_usecs_low;
+	u32 tx_max_coalesced_frames_low;
+
+	/* When the packet rate is below pkt_rate_high but above
+	 * pkt_rate_low (both measured in packets per second) the
+	 * normal {rx,tx}_* coalescing parameters are used.
+	 */
+
+	/* When the packet rate is (measured in packets per second)
+	 * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+	 * used.
+	 */
+	u32 pkt_rate_high;
+	u32 rx_coalesce_usecs_high;
+	u32 rx_max_coalesced_frames_high;
+	u32 tx_coalesce_usecs_high;
+	u32 tx_max_coalesced_frames_high;
+
+	/* How often to do adaptive coalescing packet rate sampling,
+	 * measured in seconds.  Must not be zero.
+	 */
+	u32 rate_sample_interval;
+};
+#endif /* ETHTOOL_GCOALESCE */
+
+#ifndef ETHTOOL_SCOALESCE
+#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */
+#endif
+#ifndef ETHTOOL_GRINGPARAM
+#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
+/* for configuring RX/TX ring parameters */
+#define ethtool_ringparam _kc_ethtool_ringparam
+struct _kc_ethtool_ringparam {
+	u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */
+
+	/* Read only attributes.  These indicate the maximum number
+	 * of pending RX/TX ring entries the driver will allow the
+	 * user to set.
+	 */
+	u32 rx_max_pending;
+	u32 rx_mini_max_pending;
+	u32 rx_jumbo_max_pending;
+	u32 tx_max_pending;
+
+	/* Values changeable by the user.  The valid values are
+	 * in the range 1 to the "*_max_pending" counterpart above.
+	 */
+	u32 rx_pending;
+	u32 rx_mini_pending;
+	u32 rx_jumbo_pending;
+	u32 tx_pending;
+};
+#endif /* ETHTOOL_GRINGPARAM */
+
+#ifndef ETHTOOL_SRINGPARAM
+#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */
+#endif
+#ifndef ETHTOOL_GPAUSEPARAM
+#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
+/* for configuring link flow control parameters */
+#define ethtool_pauseparam _kc_ethtool_pauseparam
+struct _kc_ethtool_pauseparam {
+	u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
+
+	/* If the link is being auto-negotiated (via ethtool_cmd.autoneg
+	 * being true) the user may set 'autoneg' here non-zero to have the
+	 * pause parameters be auto-negotiated too.  In such a case, the
+	 * {rx,tx}_pause values below determine what capabilities are
+	 * advertised.
+	 *
+	 * If 'autoneg' is zero or the link is not being auto-negotiated,
+	 * then {rx,tx}_pause force the driver to use/not-use pause
+	 * flow control.
+	 */
+	u32 autoneg;
+	u32 rx_pause;
+	u32 tx_pause;
+};
+#endif /* ETHTOOL_GPAUSEPARAM */
+
+#ifndef ETHTOOL_SPAUSEPARAM
+#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */
+#endif
+#ifndef ETHTOOL_GRXCSUM
+#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SRXCSUM
+#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GTXCSUM
+#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STXCSUM
+#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GSG
+#define ETHTOOL_GSG 0x00000018
+/* Get scatter-gather enable
+	 * (ethtool_value)
+	 */
+#endif
+#ifndef ETHTOOL_SSG
+#define ETHTOOL_SSG 0x00000019
+/* Set scatter-gather enable
+	 * (ethtool_value).
+	 */
+#endif
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */
+#endif
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */
+#endif
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
+#endif
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
+#endif
+#ifndef ETHTOOL_GTSO
+#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STSO
+#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
+#endif
+
+#ifndef ETHTOOL_BUSINFO_LEN
+#define ETHTOOL_BUSINFO_LEN 32
+#endif
+
+#ifndef WAKE_FILTER
+#define WAKE_FILTER BIT(7)
+#endif
+
+#ifndef SPEED_2500
+#define SPEED_2500 2500
+#endif
+#ifndef SPEED_5000
+#define SPEED_5000 5000
+#endif
+#ifndef SPEED_14000
+#define SPEED_14000 14000
+#endif
+#ifndef SPEED_25000
+#define SPEED_25000 25000
+#endif
+#ifndef SPEED_50000
+#define SPEED_50000 50000
+#endif
+#ifndef SPEED_56000
+#define SPEED_56000 56000
+#endif
+#ifndef SPEED_100000
+#define SPEED_100000 100000
+#endif
+#ifndef SPEED_200000
+#define SPEED_200000 200000
+#endif
+
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b))
+#endif
+#ifndef AX_RELEASE_VERSION
+#define AX_RELEASE_VERSION(a, b) (((a) << 8) + (b))
+#endif
+
+#ifndef AX_RELEASE_CODE
+#define AX_RELEASE_CODE 0
+#endif
+
+#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3, 0))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5, 0)
+#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3, 1))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5, 1)
+#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3, 2))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5, 3)
+#endif
+
+#ifndef RHEL_RELEASE_CODE
+/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */
+#define RHEL_RELEASE_CODE 0
+#endif
+
+/* RHEL 7 didn't backport the parameter change in
+ * create_singlethread_workqueue.
+ * If/when RH corrects this we will want to tighten up the version check.
+ */
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 0))
+#undef create_singlethread_workqueue
+#define create_singlethread_workqueue(name)                                    \
+	alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
+#endif
+
+/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find
+ * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new
+ * enough versions of Ubuntu. Otherwise you can simply see it in the output of
+ * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in
+ * the linux-source package, but in the linux-headers package. It begins to
+ * appear in later releases of 14.04 and 14.10.
+ *
+ * Ex:
+ * 
+ *  $uname -r
+ *  3.13.0-45-generic
+ * ABI is 45
+ *
+ * 
+ *  $uname -r
+ *  3.16.0-23-generic
+ * ABI is 23
+ */
+#ifndef UTS_UBUNTU_RELEASE_ABI
+#define UTS_UBUNTU_RELEASE_ABI 0
+#define UBUNTU_VERSION_CODE 0
+#else
+/* Ubuntu does not provide actual release version macro, so we use the kernel
+ * version plus the ABI to generate a unique version code specific to Ubuntu.
+ * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to
+ * ignore differences in sublevel which are not important since we have the
+ * ABI value. Otherwise, it becomes impossible to correlate ABI to version for
+ * ordering checks.
+ *
+ * This also lets us store an ABI value up to 65535, since it can take the
+ * space that would use the lower byte of the Linux version code.
+ */
+#define UBUNTU_VERSION_CODE                                                    \
+	(((~0xFF & LINUX_VERSION_CODE) << 8) + UTS_UBUNTU_RELEASE_ABI)
+
+#if UTS_UBUNTU_RELEASE_ABI > 65535
+#error UTS_UBUNTU_RELEASE_ABI is larger than 65535...
+#endif /* UTS_UBUNTU_RELEASE_ABI > 65535 */
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0))
+/* Our version code scheme does not make sense for non 3.x or newer kernels,
+ * and we have no support in kcompat for this scenario. Thus, treat this as a
+ * non-Ubuntu kernel. Possibly might be better to error here.
+ */
+#define UTS_UBUNTU_RELEASE_ABI 0
+#define UBUNTU_VERSION_CODE 0
+#endif /* <= 3.0.0 */
+#endif /* !UTS_UBUNTU_RELEASE_ABI */
+
+/* We ignore the 3rd digit since we want to give precedence to the additional
+ * ABI value provided by Ubuntu.
+ */
+#define UBUNTU_VERSION(a, b, c, d) (((a) << 24) + ((b) << 16) + (d))
+
+/* SLE_VERSION is used to generate a 3-digit encoding that can order SLE
+ * kernels based on their major release, service pack, and a possible
+ * maintenance release.
+ */
+#define SLE_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+
+/* The SLE_LOCALVERSION_CODE comes from a 3-digit code added as part of the
+ * Linux kernel version. It is extracted by the driver Makefile. This macro is
+ * used to generate codes for making comparisons below.
+ */
+#define SLE_LOCALVERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+
+#ifdef CONFIG_SUSE_KERNEL
+/* Starting since at least SLE 12sp4 and SLE 15, the SUSE kernels have
+ * provided CONFIG_SUSE_VERSION, CONFIG_SUSE_PATCHLEVEL and
+ * CONFIG_SUSE_AUXRELEASE. Use these to generate SLE_VERSION if available.
+ * Only fall back to the manual table otherwise. We expect all future versions
+ * of SLE kernels to include these values, so the table will remain only for
+ * the older releases.
+ */
+#ifdef CONFIG_SUSE_VERSION
+#ifndef CONFIG_SUSE_PATCHLEVEL
+#error "CONFIG_SUSE_VERSION exists but CONFIG_SUSE_PATCHLEVEL is missing"
+#endif
+#ifndef CONFIG_SUSE_AUXRELEASE
+#error "CONFIG_SUSE_VERSION exists but CONFIG_SUSE_AUXRELEASE is missing"
+#endif
+#define SLE_VERSION_CODE                                                       \
+	SLE_VERSION(CONFIG_SUSE_VERSION, CONFIG_SUSE_PATCHLEVEL,               \
+		    CONFIG_SUSE_AUXRELEASE)
+#else
+/* If we do not have the CONFIG_SUSE_VERSION configuration values, fall back
+ * to the following table for older releases.
+ */
+#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 27))
+/* SLES11 GA is 2.6.27 based */
+#define SLE_VERSION_CODE SLE_VERSION(11, 0, 0)
+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 32))
+/* SLES11 SP1 is 2.6.32 based */
+#define SLE_VERSION_CODE SLE_VERSION(11, 1, 0)
+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3, 0, 13))
+/* SLES11 SP2 GA is 3.0.13-0.27 */
+#define SLE_VERSION_CODE SLE_VERSION(11, 2, 0)
+#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3, 0, 76)))
+/* SLES11 SP3 GA is 3.0.76-0.11 */
+#define SLE_VERSION_CODE SLE_VERSION(11, 3, 0)
+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3, 0, 101))
+#if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0, 8, 0))
+/* some SLES11sp2 update kernels up to 3.0.101-0.7.x */
+#define SLE_VERSION_CODE SLE_VERSION(11, 2, 0)
+#elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63, 0, 0))
+/* most SLES11sp3 update kernels */
+#define SLE_VERSION_CODE SLE_VERSION(11, 3, 0)
+#else
+/* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */
+#define SLE_VERSION_CODE SLE_VERSION(11, 4, 0)
+#endif
+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3, 12, 28))
+/**
+ * SLES12 GA is 3.12.28-4
+ * kernel updates 3.12.xx-<33 through 52>[.yy]
+ */
+#define SLE_VERSION_CODE SLE_VERSION(12, 0, 0)
+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3, 12, 49))
+/**
+ * SLES12 SP1 GA is 3.12.49-11
+ * updates 3.12.xx-60.yy where xx={51..}
+ */
+#define SLE_VERSION_CODE SLE_VERSION(12, 1, 0)
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 21) &&                      \
+	(LINUX_VERSION_CODE <= KERNEL_VERSION(4, 4, 59))) ||                   \
+       (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 74) &&                      \
+	LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) &&                        \
+	SLE_LOCALVERSION_CODE >= KERNEL_VERSION(92, 0, 0) &&                   \
+	SLE_LOCALVERSION_CODE < KERNEL_VERSION(93, 0, 0)))
+/** SLES12 SP2 GA is 4.4.21-69.
+ * SLES12 SP2 updates before SLES12 SP3 are: 4.4.{21,38,49,59}
+ * SLES12 SP2 updates after SLES12 SP3 are: 4.4.{74,90,103,114,120}
+ * but they all use a SLE_LOCALVERSION_CODE matching 92.nn.y
+ */
+#define SLE_VERSION_CODE SLE_VERSION(12, 2, 0)
+#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 73) ||                      \
+	LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 82) ||                      \
+	LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 92)) ||                     \
+       (LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 103) &&                     \
+	(SLE_LOCALVERSION_CODE == KERNEL_VERSION(6, 33, 0) ||                  \
+	 SLE_LOCALVERSION_CODE == KERNEL_VERSION(6, 38, 0))) ||                \
+       (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 114) &&                     \
+	LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) &&                        \
+	SLE_LOCALVERSION_CODE >= KERNEL_VERSION(94, 0, 0) &&                   \
+	SLE_LOCALVERSION_CODE < KERNEL_VERSION(95, 0, 0)))
+/* SLES12 SP3 GM is 4.4.73-5 and update kernels are 4.4.82-6.3.
+ * SLES12 SP3 updates not conflicting with SP2 are: 4.4.{82,92}
+ * SLES12 SP3 updates conflicting with SP2 are:
+ *   - 4.4.103-6.33.1, 4.4.103-6.38.1
+ *   - 4.4.{114,120}-94.nn.y
+ */
+#define SLE_VERSION_CODE SLE_VERSION(12, 3, 0)
+#else
+#error "This looks like a SUSE kernel, but it has an unrecognized local version code."
+#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */
+#endif /* !CONFIG_SUSE_VERSION */
+#endif /* CONFIG_SUSE_KERNEL */
+#ifndef SLE_VERSION_CODE
+#define SLE_VERSION_CODE 0
+#endif /* SLE_VERSION_CODE */
+#ifndef SLE_LOCALVERSION_CODE
+#define SLE_LOCALVERSION_CODE 0
+#endif /* SLE_LOCALVERSION_CODE */
+
+/* Include definitions from the new kcompat layout */
+#include "kcompat_defs.h"
+
+/*
+ * ADQ depends on __TC_MQPRIO_MODE_MAX and related kernel code
+ * added around 4.15. Some distributions (e.g. Oracle Linux 7.7)
+ * have done a partial back-port of that to their kernels based
+ * on older mainline kernels that did not include all the necessary
+ * kernel enablement to support ADQ.
+ * Undefine __TC_MQPRIO_MODE_MAX for all OSV distributions with
+ * kernels based on mainline kernels older than 4.15 except for
+ * RHEL, SLES and Ubuntu which are known to have good back-ports.
+ */
+#if (!RHEL_RELEASE_CODE && !SLE_VERSION_CODE && !UBUNTU_VERSION_CODE)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0))
+#undef __TC_MQPRIO_MODE_MAX
+#endif /*  LINUX_VERSION_CODE == KERNEL_VERSION(4,15,0) */
+#endif /* if (NOT RHEL && NOT SLES && NOT UBUNTU) */
+
+#ifdef __KLOCWORK__
+* /
+#ifdef ARRAY_SIZE
+#undef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#define memcpy(dest, src, len) memcpy_s(dest, len, src, len)
+#define memset(dest, ch, len) memset_s(dest, len, ch, len)
+
+	static inline int _kc_test_and_clear_bit(int nr,
+						 volatile unsigned long *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long old;
+	unsigned long flags = 0;
+
+	_atomic_spin_lock_irqsave(p, flags);
+	old = *p;
+	*p = old & ~mask;
+	_atomic_spin_unlock_irqrestore(p, flags);
+
+	return (old & mask) != 0;
+}
+#define test_and_clear_bit(nr, addr) _kc_test_and_clear_bit(nr, addr)
+
+static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long old;
+	unsigned long flags = 0;
+
+	_atomic_spin_lock_irqsave(p, flags);
+	old = *p;
+	*p = old | mask;
+	_atomic_spin_unlock_irqrestore(p, flags);
+
+	return (old & mask) != 0;
+}
+#define test_and_set_bit(nr, addr) _kc_test_and_set_bit(nr, addr)
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+#undef dev_dbg
+#define dev_dbg(dev, format, arg...) dev_printk(KERN_DEBUG, dev, format, ##arg)
+#undef pr_debug
+#define pr_debug(format, arg...) printk(KERN_DEBUG format, ##arg)
+#endif /* CONFIG_DYNAMIC_DEBUG */
+
+#undef hlist_for_each_entry_safe
+#define hlist_for_each_entry_safe(pos, n, head, member)                        \
+	for (n = NULL,                                                         \
+	    pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);     \
+	     pos; pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)),   \
+					 member))
+
+#ifdef uninitialized_var
+#undef uninitialized_var
+#define uninitialized_var(x) (x = *(&(x)))
+#endif
+
+#ifdef WRITE_ONCE
+#undef WRITE_ONCE
+#define WRITE_ONCE(x, val) ((x) = (val))
+#endif /* WRITE_ONCE */
+
+#ifdef wait_event_interruptible_timeout
+#undef wait_event_interruptible_timeout
+#define wait_event_interruptible_timeout(wq_head, condition, timeout)          \
+	({                                                                     \
+		long ret;                                                      \
+		if ((condition))                                               \
+			ret = timeout;                                         \
+		else                                                           \
+			ret = 0;                                               \
+		ret;                                                           \
+	})
+#endif /* wait_event_interruptible_timeout */
+
+#ifdef max_t
+#undef max_t
+#define max_t(type, x, y)                                                      \
+	({                                                                     \
+		type __x = (x);                                                \
+		type __y = (y);                                                \
+		__x > __y ? __x : __y;                                         \
+	})
+#endif /* max_t */
+
+#ifdef min_t
+#undef min_t
+#define min_t(type, x, y)                                                      \
+	({                                                                     \
+		type __x = (x);                                                \
+		type __y = (y);                                                \
+		__x < __y ? __x : __y;                                         \
+	})
+#endif /* min_t */
+#endif /* __KLOCWORK__ */
+
+/* Older versions of GCC will trigger -Wformat-nonliteral warnings for const
+ * char * strings. Unfortunately, the implementation of do_trace_printk does
+ * this, in order to add a storage attribute to the memory. This was fixed in
+ * GCC 5.1, but we still use older distributions built with GCC 4.x.
+ *
+ * The string pointer is only passed as a const char * to the __trace_bprintk
+ * function. Since that function has the __printf attribute, it will trigger
+ * the warnings. We can't remove the attribute, so instead we'll use the
+ * __diag macro to disable -Wformat-nonliteral around the call to
+ * __trace_bprintk.
+ */
+#if GCC_VERSION < 50100
+#define __trace_bprintk(ip, fmt, args...)                                      \
+	({                                                                     \
+		int err;                                                       \
+		__diag_push();                                                 \
+		__diag(ignored "-Wformat-nonliteral");                         \
+		err = __trace_bprintk(ip, fmt, ##args);                        \
+		__diag_pop();                                                  \
+		err;                                                           \
+	})
+#endif /* GCC_VERSION < 5.1.0 */
+
+/* Newer kernels removed  */
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) &&                         \
+     (!(RHEL_RELEASE_CODE &&                                                   \
+	RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3)) &&                    \
+      !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15, 3, 0)))))
+#define HAVE_PCI_ASPM_H
+#endif
+
+/*****************************************************************************/
+/* 2.4.3 => 2.4.0 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
+
+/**************************************/
+/* PCI DRIVER API */
+
+#ifndef pci_set_dma_mask
+#define pci_set_dma_mask _kc_pci_set_dma_mask
+int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
+#endif
+
+#ifndef pci_request_regions
+#define pci_request_regions _kc_pci_request_regions
+int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
+#endif
+
+#ifndef pci_release_regions
+#define pci_release_regions _kc_pci_release_regions
+void _kc_pci_release_regions(struct pci_dev *pdev);
+#endif
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+#ifndef alloc_etherdev
+#define alloc_etherdev _kc_alloc_etherdev
+struct net_device *_kc_alloc_etherdev(int sizeof_priv);
+#endif
+
+#ifndef is_valid_ether_addr
+#define is_valid_ether_addr _kc_is_valid_ether_addr
+int _kc_is_valid_ether_addr(u8 *addr);
+#endif
+
+/**************************************/
+/* MISCELLANEOUS */
+
+#ifndef INIT_TQUEUE
+#define INIT_TQUEUE(_tq, _routine, _data)                                      \
+	do {                                                                   \
+		INIT_LIST_HEAD(&(_tq)->list);                                  \
+		(_tq)->sync = 0;                                               \
+		(_tq)->routine = _routine;                                     \
+		(_tq)->data = _data;                                           \
+	} while (0)
+#endif
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 5))
+/* Generic MII registers. */
+#define MII_BMCR 0x00 /* Basic mode control register */
+#define MII_BMSR 0x01 /* Basic mode status register  */
+#define MII_PHYSID1 0x02 /* PHYS ID 1                   */
+#define MII_PHYSID2 0x03 /* PHYS ID 2                   */
+#define MII_ADVERTISE 0x04 /* Advertisement control reg   */
+#define MII_LPA 0x05 /* Link partner ability reg    */
+#define MII_EXPANSION 0x06 /* Expansion register          */
+/* Basic mode control register. */
+#define BMCR_FULLDPLX 0x0100 /* Full duplex                 */
+#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation     */
+/* Basic mode status register. */
+#define BMSR_ERCAP 0x0001 /* Ext-reg capability          */
+#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
+#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex  */
+#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex  */
+#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
+#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */
+/* Advertisement control register. */
+#define ADVERTISE_CSMA 0x0001 /* Only selector supported     */
+#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex  */
+#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex  */
+#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
+#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
+#define ADVERTISE_ALL                                                          \
+	(ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF |             \
+	 ADVERTISE_100FULL)
+/* Expansion register for auto-negotiation. */
+#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words    */
+#endif
+
+/*****************************************************************************/
+/* 2.4.6 => 2.4.3 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
+
+#ifndef pci_set_power_state
+#define pci_set_power_state _kc_pci_set_power_state
+int _kc_pci_set_power_state(struct pci_dev *dev, int state);
+#endif
+
+#ifndef pci_enable_wake
+#define pci_enable_wake _kc_pci_enable_wake
+int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
+#endif
+
+#ifndef pci_disable_device
+#define pci_disable_device _kc_pci_disable_device
+void _kc_pci_disable_device(struct pci_dev *pdev);
+#endif
+
+/* PCI PM entry point syntax changed, so don't support suspend/resume */
+#undef CONFIG_PM
+
+#endif /* 2.4.6 => 2.4.3 */
+
+#ifndef HAVE_PCI_SET_MWI
+#define pci_set_mwi(X)                                                         \
+	pci_write_config_word(X, PCI_COMMAND,                                  \
+			      adapter->hw.bus.pci_cmd_word |                   \
+				      PCI_COMMAND_INVALIDATE);
+#define pci_clear_mwi(X)                                                       \
+	pci_write_config_word(X, PCI_COMMAND,                                  \
+			      adapter->hw.bus.pci_cmd_word &                   \
+				      ~PCI_COMMAND_INVALIDATE);
+#endif
+
+/*****************************************************************************/
+/* 2.4.10 => 2.4.9 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 10))
+
+/**************************************/
+/* MODULE API */
+
+#ifndef MODULE_LICENSE
+#define MODULE_LICENSE(X)
+#endif
+
+/**************************************/
+/* OTHER */
+
+#undef min
+#define min(x, y)                                                              \
+	({                                                                     \
+		const typeof(x) _x = (x);                                      \
+		const typeof(y) _y = (y);                                      \
+		(void)(&_x == &_y);                                            \
+		_x < _y ? _x : _y;                                             \
+	})
+
+#undef max
+#define max(x, y)                                                              \
+	({                                                                     \
+		const typeof(x) _x = (x);                                      \
+		const typeof(y) _y = (y);                                      \
+		(void)(&_x == &_y);                                            \
+		_x > _y ? _x : _y;                                             \
+	})
+
+#define min_t(type, x, y)                                                      \
+	({                                                                     \
+		type _x = (x);                                                 \
+		type _y = (y);                                                 \
+		_x < _y ? _x : _y;                                             \
+	})
+
+#define max_t(type, x, y)                                                      \
+	({                                                                     \
+		type _x = (x);                                                 \
+		type _y = (y);                                                 \
+		_x > _y ? _x : _y;                                             \
+	})
+
+#ifndef list_for_each_safe
+#define list_for_each_safe(pos, n, head)                                       \
+	for (pos = (head)->next, n = pos->next; pos != (head);                 \
+	     pos = n, n = pos->next)
+#endif
+
+#ifndef ____cacheline_aligned_in_smp
+#ifdef CONFIG_SMP
+#define ____cacheline_aligned_in_smp ____cacheline_aligned
+#else
+#define ____cacheline_aligned_in_smp
+#endif /* CONFIG_SMP */
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 8))
+int _kc_snprintf(char *buf, size_t size, const char *fmt, ...);
+#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args)
+int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args)
+#else /* 2.4.8 => 2.4.9 */
+int snprintf(char *buf, size_t size, const char *fmt, ...);
+int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+#endif
+#endif /* 2.4.10 -> 2.4.6 */
+
+/*****************************************************************************/
+/* 2.4.12 => 2.4.10 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 12))
+#ifndef HAVE_NETIF_MSG
+#define HAVE_NETIF_MSG 1
+enum {
+	NETIF_MSG_DRV = 0x0001,
+	NETIF_MSG_PROBE = 0x0002,
+	NETIF_MSG_LINK = 0x0004,
+	NETIF_MSG_TIMER = 0x0008,
+	NETIF_MSG_IFDOWN = 0x0010,
+	NETIF_MSG_IFUP = 0x0020,
+	NETIF_MSG_RX_ERR = 0x0040,
+	NETIF_MSG_TX_ERR = 0x0080,
+	NETIF_MSG_TX_QUEUED = 0x0100,
+	NETIF_MSG_INTR = 0x0200,
+	NETIF_MSG_TX_DONE = 0x0400,
+	NETIF_MSG_RX_STATUS = 0x0800,
+	NETIF_MSG_PKTDATA = 0x1000,
+	NETIF_MSG_HW = 0x2000,
+	NETIF_MSG_WOL = 0x4000,
+};
+
+#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
+#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
+#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
+#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
+#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
+#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
+#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
+#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
+#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
+#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
+#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
+#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
+#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
+#endif /* !HAVE_NETIF_MSG */
+#endif /* 2.4.12 => 2.4.10 */
+
+/*****************************************************************************/
+/* 2.4.13 => 2.4.12 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 13))
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#ifndef virt_to_page
+#define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
+#endif
+
+#ifndef pci_map_page
+#define pci_map_page _kc_pci_map_page
+u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page,
+		     unsigned long offset, size_t size, int direction);
+#endif
+
+#ifndef pci_unmap_page
+#define pci_unmap_page _kc_pci_unmap_page
+void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
+			int direction);
+#endif
+
+/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
+
+#undef DMA_32BIT_MASK
+#define DMA_32BIT_MASK 0xffffffff
+#undef DMA_64BIT_MASK
+#define DMA_64BIT_MASK 0xffffffff
+
+/**************************************/
+/* OTHER */
+
+#ifndef cpu_relax
+#define cpu_relax() rep_nop()
+#endif
+
+struct vlan_ethhdr {
+	unsigned char h_dest[ETH_ALEN];
+	unsigned char h_source[ETH_ALEN];
+	unsigned short h_vlan_proto;
+	unsigned short h_vlan_TCI;
+	unsigned short h_vlan_encapsulated_proto;
+};
+#endif /* 2.4.13 => 2.4.12 */
+
+/*****************************************************************************/
+/* 2.4.17 => 2.4.12 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 17))
+
+#ifndef __devexit_p
+#define __devexit_p(x) (&(x))
+#endif
+
+#endif /* 2.4.17 => 2.4.13 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 18))
+#define NETIF_MSG_HW 0x2000
+#define NETIF_MSG_WOL 0x4000
+
+#ifndef netif_msg_hw
+#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
+#endif
+#ifndef netif_msg_wol
+#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
+#endif
+#endif /* 2.4.18 */
+
+/*****************************************************************************/
+
+/*****************************************************************************/
+/* 2.4.20 => 2.4.19 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 20))
+
+/* we won't support NAPI on less than 2.4.20 */
+#ifdef NAPI
+#undef NAPI
+#endif
+
+#endif /* 2.4.20 => 2.4.19 */
+
+/*****************************************************************************/
+/* 2.4.22 => 2.4.17 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 22))
+#define pci_name(x) ((x)->slot_name)
+#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map)
+
+#ifndef SUPPORTED_10000baseT_Full
+#define SUPPORTED_10000baseT_Full BIT(12)
+#endif
+#ifndef ADVERTISED_10000baseT_Full
+#define ADVERTISED_10000baseT_Full BIT(12)
+#endif
+#endif
+
+/*****************************************************************************/
+/* 2.4.22 => 2.4.17 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 22))
+#endif
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* 2.4.23 => 2.4.22 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 23))
+/*****************************************************************************/
+#ifdef NAPI
+#ifndef netif_poll_disable
+#define netif_poll_disable(x) _kc_netif_poll_disable(x)
+static inline void _kc_netif_poll_disable(struct net_device *netdev)
+{
+	while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
+		/* No hurry */
+		current->state = TASK_INTERRUPTIBLE;
+		schedule_timeout(1);
+	}
+}
+#endif
+#ifndef netif_poll_enable
+#define netif_poll_enable(x) _kc_netif_poll_enable(x)
+static inline void _kc_netif_poll_enable(struct net_device *netdev)
+{
+	clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
+}
+#endif
+#endif /* NAPI */
+#ifndef netif_tx_disable
+#define netif_tx_disable(x) _kc_netif_tx_disable(x)
+static inline void _kc_netif_tx_disable(struct net_device *dev)
+{
+	spin_lock_bh(&dev->xmit_lock);
+	netif_stop_queue(dev);
+	spin_unlock_bh(&dev->xmit_lock);
+}
+#endif
+#else /* 2.4.23 => 2.4.22 */
+#define HAVE_SCTP
+#endif /* 2.4.23 => 2.4.22 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 25) ||                          \
+     (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) &&                         \
+      LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 4)))
+#define ETHTOOL_OPS_COMPAT
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 27))
+#define __user
+#endif /* < 2.4.27 */
+
+/*****************************************************************************/
+/* 2.5.71 => 2.4.x */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 71))
+#define sk_protocol protocol
+#define pci_get_device pci_find_device
+#endif /* 2.5.70 => 2.4.x */
+
+/*****************************************************************************/
+/* < 2.4.27 or 2.6.0 <= 2.6.5 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 27) ||                          \
+     (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) &&                         \
+      LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 5)))
+
+#ifndef netif_msg_init
+#define netif_msg_init _kc_netif_msg_init
+static inline u32 _kc_netif_msg_init(int debug_value,
+				     int default_msg_enable_bits)
+{
+	/* use default */
+	if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
+		return default_msg_enable_bits;
+	if (debug_value == 0) /* no output */
+		return 0;
+	/* set low N bits */
+	return (1 << debug_value) - 1;
+}
+#endif
+
+#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
+/*****************************************************************************/
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 27)) ||                        \
+     ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) &&                       \
+      (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 3))))
+#define netdev_priv(x) x->priv
+#endif
+
+/*****************************************************************************/
+/* <= 2.5.0 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0))
+#include 
+#undef pci_register_driver
+#define pci_register_driver pci_module_init
+
+/*
+ * Most of the dma compat code is copied/modifed from the 2.4.37
+ * /include/linux/libata-compat.h header file
+ */
+/* These definitions mirror those in pci.h, so they can be used
+ * interchangeably with their PCI_ counterparts */
+enum dma_data_direction {
+	DMA_BIDIRECTIONAL = 0,
+	DMA_TO_DEVICE = 1,
+	DMA_FROM_DEVICE = 2,
+	DMA_NONE = 3,
+};
+
+struct device {
+	struct pci_dev pdev;
+};
+
+static inline struct pci_dev *to_pci_dev(struct device *dev)
+{
+	return (struct pci_dev *)dev;
+}
+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
+{
+	return (struct device *)pdev;
+}
+#define pdev_printk(lvl, pdev, fmt, args...)                                   \
+	printk("%s %s: " fmt, lvl, pci_name(pdev), ##args)
+#define dev_err(dev, fmt, args...)                                             \
+	pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ##args)
+#define dev_info(dev, fmt, args...)                                            \
+	pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ##args)
+#define dev_warn(dev, fmt, args...)                                            \
+	pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ##args)
+#define dev_notice(dev, fmt, args...)                                          \
+	pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ##args)
+#define dev_dbg(dev, fmt, args...)                                             \
+	pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ##args)
+
+/* NOTE: dangerous! we ignore the 'gfp' argument */
+#define dma_alloc_coherent(dev, sz, dma, gfp)                                  \
+	pci_alloc_consistent(to_pci_dev(dev), (sz), (dma))
+#define dma_free_coherent(dev, sz, addr, dma_addr)                             \
+	pci_free_consistent(to_pci_dev(dev), (sz), (addr), (dma_addr))
+
+#define dma_map_page(dev, a, b, c, d)                                          \
+	pci_map_page(to_pci_dev(dev), (a), (b), (c), (d))
+#define dma_unmap_page(dev, a, b, c)                                           \
+	pci_unmap_page(to_pci_dev(dev), (a), (b), (c))
+
+#define dma_map_single(dev, a, b, c)                                           \
+	pci_map_single(to_pci_dev(dev), (a), (b), (c))
+#define dma_unmap_single(dev, a, b, c)                                         \
+	pci_unmap_single(to_pci_dev(dev), (a), (b), (c))
+
+#define dma_map_sg(dev, sg, nents, dir)                                        \
+	pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir))
+#define dma_unmap_sg(dev, sg, nents, dir)                                      \
+	pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir))
+
+#define dma_sync_single(dev, a, b, c)                                          \
+	pci_dma_sync_single(to_pci_dev(dev), (a), (b), (c))
+
+/* for range just sync everything, that's all the pci API can do */
+#define dma_sync_single_range(dev, addr, off, sz, dir)                         \
+	pci_dma_sync_single(to_pci_dev(dev), (addr), (off) + (sz), (dir))
+
+#define dma_set_mask(dev, mask) pci_set_dma_mask(to_pci_dev(dev), (mask))
+
+/* hlist_* code - double linked lists */
+struct hlist_head {
+	struct hlist_node *first;
+};
+
+struct hlist_node {
+	struct hlist_node *next, **pprev;
+};
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+	struct hlist_node *next = n->next;
+	struct hlist_node **pprev = n->pprev;
+	*pprev = next;
+	if (next)
+		next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+	__hlist_del(n);
+	n->next = NULL;
+	n->pprev = NULL;
+}
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+	struct hlist_node *first = h->first;
+
+	n->next = first;
+	if (first)
+		first->pprev = &n->next;
+	h->first = n;
+	n->pprev = &h->first;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+	return !h->first;
+}
+#define HLIST_HEAD_INIT                                                        \
+	{                                                                      \
+		.first = NULL                                                  \
+	}
+#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+	h->next = NULL;
+	h->pprev = NULL;
+}
+
+#ifndef might_sleep
+#define might_sleep()
+#endif
+#else
+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
+{
+	return &pdev->dev;
+}
+#endif /* <= 2.5.0 */
+
+/*****************************************************************************/
+/* 2.5.28 => 2.4.23 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28))
+
+#include 
+#define work_struct tq_struct
+#undef INIT_WORK
+#define INIT_WORK(a, b) INIT_TQUEUE(a, (void (*)(void *))b, a)
+#undef container_of
+#define container_of list_entry
+#define schedule_work schedule_task
+#define flush_scheduled_work flush_scheduled_tasks
+#define cancel_work_sync(x) flush_scheduled_work()
+
+#endif /* 2.5.28 => 2.4.17 */
+
+/*****************************************************************************/
+/* 2.6.0 => 2.5.28 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#ifndef read_barrier_depends
+#define read_barrier_depends() rmb()
+#endif
+
+#ifndef rcu_head
+struct __kc_callback_head {
+	struct __kc_callback_head *next;
+	void (*func)(struct callback_head *head);
+};
+#define rcu_head __kc_callback_head
+#endif
+
+#undef get_cpu
+#define get_cpu() smp_processor_id()
+#undef put_cpu
+#define put_cpu()                                                              \
+	do {                                                                   \
+	} while (0)
+#define MODULE_INFO(version, _version)
+
+#define dma_set_coherent_mask(dev, mask) 1
+
+#undef dev_put
+#define dev_put(dev) __dev_put(dev)
+
+#ifndef skb_fill_page_desc
+#define skb_fill_page_desc _kc_skb_fill_page_desc
+void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
+			    int off, int size);
+#endif
+
+#undef ALIGN
+#define ALIGN(x, a) (((x) + (a)-1) & ~((a)-1))
+
+#ifndef page_count
+#define page_count(p) atomic_read(&(p)->count)
+#endif
+
+#ifdef MAX_NUMNODES
+#undef MAX_NUMNODES
+#endif
+#define MAX_NUMNODES 1
+
+/* find_first_bit and find_next bit are not defined for most
+ * 2.4 kernels (except for the redhat 2.4.21 kernels
+ */
+#include 
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+#undef find_next_bit
+#define find_next_bit _kc_find_next_bit
+unsigned long _kc_find_next_bit(const unsigned long *addr, unsigned long size,
+				unsigned long offset);
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+
+#ifndef netdev_name
+static inline const char *_kc_netdev_name(const struct net_device *dev)
+{
+	if (strchr(dev->name, '%'))
+		return "(unregistered net_device)";
+	return dev->name;
+}
+#define netdev_name(netdev) _kc_netdev_name(netdev)
+#endif /* netdev_name */
+
+#ifndef strlcpy
+#define strlcpy _kc_strlcpy
+size_t _kc_strlcpy(char *dest, const char *src, size_t size);
+#endif /* strlcpy */
+
+#ifndef do_div
+#if BITS_PER_LONG == 64
+#define do_div(n, base)                                                        \
+	({                                                                     \
+		uint32_t __base = (base);                                      \
+		uint32_t __rem;                                                \
+		__rem = ((uint64_t)(n)) % __base;                              \
+		(n) = ((uint64_t)(n)) / __base;                                \
+		__rem;                                                         \
+	})
+#elif BITS_PER_LONG == 32
+uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor);
+#define do_div(n, base)                                                        \
+	({                                                                     \
+		uint32_t __base = (base);                                      \
+		uint32_t __rem;                                                \
+		if (likely(((n) >> 32) == 0)) {                                \
+			__rem = (uint32_t)(n) % __base;                        \
+			(n) = (uint32_t)(n) / __base;                          \
+		} else                                                         \
+			__rem = _kc__div64_32(&(n), __base);                   \
+		__rem;                                                         \
+	})
+#else /* BITS_PER_LONG == ?? */
+#error do_div() does not yet support the C64
+#endif /* BITS_PER_LONG */
+#endif /* do_div */
+
+#ifndef NSEC_PER_SEC
+#define NSEC_PER_SEC 1000000000L
+#endif
+
+#undef HAVE_I2C_SUPPORT
+#else /* 2.6.0 */
+
+#endif /* 2.6.0 => 2.5.28 */
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 3))
+#define dma_pool pci_pool
+#define dma_pool_destroy pci_pool_destroy
+#define dma_pool_alloc pci_pool_alloc
+#define dma_pool_free pci_pool_free
+
+#define dma_pool_create(name, dev, size, align, allocation)                    \
+	pci_pool_create((name), to_pci_dev(dev), (size), (align), (allocation))
+#endif /* < 2.6.3 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 4))
+#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+/* 2.6.5 => 2.6.0 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 5))
+#define dma_sync_single_for_cpu dma_sync_single
+#define dma_sync_single_for_device dma_sync_single
+#define dma_sync_single_range_for_cpu dma_sync_single_range
+#define dma_sync_single_range_for_device dma_sync_single_range
+#ifndef pci_dma_mapping_error
+#define pci_dma_mapping_error _kc_pci_dma_mapping_error
+static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr)
+{
+	return dma_addr == 0;
+}
+#endif
+#endif /* 2.6.5 => 2.6.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 4))
+int _kc_scnprintf(char *buf, size_t size, const char *fmt, ...);
+#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args)
+#endif /* < 2.6.4 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 6))
+/* taken from 2.6 include/linux/bitmap.h */
+#undef bitmap_zero
+#define bitmap_zero _kc_bitmap_zero
+static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
+{
+	if (nbits <= BITS_PER_LONG)
+		*dst = 0UL;
+	else {
+		int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+		memset(dst, 0, len);
+	}
+}
+#define page_to_nid(x) 0
+
+#endif /* < 2.6.6 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7))
+#undef if_mii
+#define if_mii _kc_if_mii
+static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
+{
+	return (struct mii_ioctl_data *)&rq->ifr_ifru;
+}
+
+#ifndef __force
+#define __force
+#endif
+#endif /* < 2.6.7 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 8))
+#ifndef PCI_EXP_DEVCTL
+#define PCI_EXP_DEVCTL 8
+#endif
+#ifndef PCI_EXP_DEVCTL_CERE
+#define PCI_EXP_DEVCTL_CERE 0x0001
+#endif
+#define PCI_EXP_FLAGS 2 /* Capabilities register */
+#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */
+#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */
+#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
+#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */
+#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */
+#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
+#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
+#define PCI_EXP_DEVCAP 4 /* Device capabilities */
+#define PCI_EXP_DEVSTA 10 /* Device Status */
+#define msleep(x)                                                              \
+	do {                                                                   \
+		set_current_state(TASK_UNINTERRUPTIBLE);                       \
+		schedule_timeout((x * HZ) / 1000 + 2);                         \
+	} while (0)
+
+#endif /* < 2.6.8 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 9))
+#include 
+#define __iomem
+
+#ifndef kcalloc
+#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
+void *_kc_kzalloc(size_t size, int flags);
+#endif
+#define MSEC_PER_SEC 1000L
+static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
+{
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+	return (MSEC_PER_SEC / HZ) * j;
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+	return (j + (HZ / MSEC_PER_SEC) - 1) / (HZ / MSEC_PER_SEC);
+#else
+	return (j * MSEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
+{
+	if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
+		return MAX_JIFFY_OFFSET;
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+	return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+	return m * (HZ / MSEC_PER_SEC);
+#else
+	return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
+#endif
+}
+
+#define msleep_interruptible _kc_msleep_interruptible
+static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
+{
+	unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
+
+	while (timeout && !signal_pending(current)) {
+		__set_current_state(TASK_INTERRUPTIBLE);
+		timeout = schedule_timeout(timeout);
+	}
+	return _kc_jiffies_to_msecs(timeout);
+}
+
+/* Basic mode control register. */
+#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000)         */
+
+#ifndef __le16
+#define __le16 u16
+#endif
+#ifndef __le32
+#define __le32 u32
+#endif
+#ifndef __le64
+#define __le64 u64
+#endif
+#ifndef __be16
+#define __be16 u16
+#endif
+#ifndef __be32
+#define __be32 u32
+#endif
+#ifndef __be64
+#define __be64 u64
+#endif
+
+static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
+{
+	return (struct vlan_ethhdr *)skb->mac.raw;
+}
+
+/* Wake-On-Lan options. */
+#define WAKE_PHY BIT(0)
+#define WAKE_UCAST BIT(1)
+#define WAKE_MCAST BIT(2)
+#define WAKE_BCAST BIT(3)
+#define WAKE_ARP BIT(4)
+#define WAKE_MAGIC BIT(5)
+#define WAKE_MAGICSECURE BIT(6) /* only meaningful if WAKE_MAGIC */
+
+#define skb_header_pointer _kc_skb_header_pointer
+static inline void *_kc_skb_header_pointer(const struct sk_buff *skb,
+					   int offset, int len, void *buffer)
+{
+	int hlen = skb_headlen(skb);
+
+	if (hlen - offset >= len)
+		return skb->data + offset;
+
+#ifdef MAX_SKB_FRAGS
+	if (skb_copy_bits(skb, offset, buffer, len) < 0)
+		return NULL;
+
+	return buffer;
+#else
+	return NULL;
+#endif
+
+#ifndef NETDEV_TX_OK
+#define NETDEV_TX_OK 0
+#endif
+#ifndef NETDEV_TX_BUSY
+#define NETDEV_TX_BUSY 1
+#endif
+#ifndef NETDEV_TX_LOCKED
+#define NETDEV_TX_LOCKED -1
+#endif
+}
+
+#ifndef __bitwise
+#define __bitwise
+#endif
+#endif /* < 2.6.9 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10))
+#ifdef module_param_array_named
+#undef module_param_array_named
+#define module_param_array_named(name, array, type, nump, perm)                \
+	static struct kparam_array __param_arr_##name = {                      \
+		ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,   \
+		sizeof(array[0]),  array                                       \
+	};                                                                     \
+	module_param_call(name, param_array_set, param_array_get,              \
+			  &__param_arr_##name, perm)
+#endif /* module_param_array_named */
+/*
+ * num_online is broken for all < 2.6.10 kernels.  This is needed to support
+ * Node module parameter of rnp.
+ */
+#undef num_online_nodes
+#define num_online_nodes(n) 1
+extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES);
+#undef node_online_map
+#define node_online_map _kcompat_node_online_map
+#define pci_get_class pci_find_class
+#endif /* < 2.6.10 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11))
+#define PCI_D0 0
+#define PCI_D1 1
+#define PCI_D2 2
+#define PCI_D3hot 3
+#define PCI_D3cold 4
+typedef int pci_power_t;
+#define pci_choose_state(pdev, state) state
+#define PMSG_SUSPEND 3
+#define PCI_EXP_LNKCTL 16
+
+#undef NETIF_F_LLTX
+
+#ifndef ARCH_HAS_PREFETCH
+#define prefetch(X)
+#endif
+
+#ifndef NET_IP_ALIGN
+#define NET_IP_ALIGN 2
+#endif
+
+#define KC_USEC_PER_SEC 1000000L
+#define usecs_to_jiffies _kc_usecs_to_jiffies
+static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
+{
+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
+	return (KC_USEC_PER_SEC / HZ) * j;
+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
+	return (j + (HZ / KC_USEC_PER_SEC) - 1) / (HZ / KC_USEC_PER_SEC);
+#else
+	return (j * KC_USEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
+{
+	if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
+		return MAX_JIFFY_OFFSET;
+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
+	return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
+	return m * (HZ / KC_USEC_PER_SEC);
+#else
+	return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
+#endif
+}
+
+#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
+#define PCI_EXP_LNKSTA 18 /* Link Status */
+#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
+#define PCI_EXP_SLTCTL 24 /* Slot Control */
+#define PCI_EXP_SLTSTA 26 /* Slot Status */
+#define PCI_EXP_RTCTL 28 /* Root Control */
+#define PCI_EXP_RTCAP 30 /* Root Capabilities */
+#define PCI_EXP_RTSTA 32 /* Root Status */
+#endif /* < 2.6.11 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12))
+#include 
+#define USE_REBOOT_NOTIFIER
+
+/* Generic MII registers. */
+#define MII_CTRL1000 0x09 /* 1000BASE-T control          */
+#define MII_STAT1000 0x0a /* 1000BASE-T status           */
+/* Advertisement control register. */
+#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause               */
+#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause     */
+/* Link partner ability register. */
+#define LPA_PAUSE_CAP 0x0400 /* Can pause                   */
+#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically     */
+/* 1000BASE-T Control register */
+#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
+#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */
+/* 1000BASE-T Status register */
+#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */
+#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */
+
+#ifndef is_zero_ether_addr
+#define is_zero_ether_addr _kc_is_zero_ether_addr
+static inline int _kc_is_zero_ether_addr(const u8 *addr)
+{
+	return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
+}
+#endif /* is_zero_ether_addr */
+#ifndef is_multicast_ether_addr
+#define is_multicast_ether_addr _kc_is_multicast_ether_addr
+static inline int _kc_is_multicast_ether_addr(const u8 *addr)
+{
+	return addr[0] & 0x01;
+}
+#endif /* is_multicast_ether_addr */
+#endif /* < 2.6.12 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13))
+#ifndef kstrdup
+#define kstrdup _kc_kstrdup
+char *_kc_kstrdup(const char *s, unsigned int gfp);
+#endif
+#endif /* < 2.6.13 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14))
+#define pm_message_t u32
+#ifndef kzalloc
+#define kzalloc _kc_kzalloc
+void *_kc_kzalloc(size_t size, int flags);
+#endif
+
+/* Generic MII registers. */
+#define MII_ESTATUS 0x0f /* Extended Status */
+/* Basic mode status register. */
+#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
+/* Extended status register. */
+#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
+#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
+
+#define SUPPORTED_Pause BIT(13)
+#define SUPPORTED_Asym_Pause BIT(14)
+#define ADVERTISED_Pause BIT(13)
+#define ADVERTISED_Asym_Pause BIT(14)
+
+#if (!(RHEL_RELEASE_CODE &&                                                    \
+       (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4, 3)) &&                     \
+       (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5, 0))))
+#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 9)) && !defined(gfp_t))
+#define gfp_t unsigned
+#else
+typedef unsigned gfp_t;
+#endif
+#endif /* !RHEL4.3->RHEL5.0 */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 9))
+#ifdef CONFIG_X86_64
+#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir)                 \
+	dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir))
+#define dma_sync_single_range_for_device(dev, addr, off, sz, dir)              \
+	dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir))
+#endif
+#endif
+#endif /* < 2.6.14 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 15))
+#ifndef kfree_rcu
+/* this is placed here due to a lack of rcu_barrier in previous kernels */
+#define kfree_rcu(_ptr, _offset) kfree(_ptr)
+#endif /* kfree_rcu */
+#ifndef vmalloc_node
+#define vmalloc_node(a, b) vmalloc(a)
+#endif /* vmalloc_node*/
+
+#define setup_timer(_timer, _function, _data)                                  \
+	do {                                                                   \
+		(_timer)->function = _function;                                \
+		(_timer)->data = _data;                                        \
+		init_timer(_timer);                                            \
+	} while (0)
+#ifndef device_can_wakeup
+#define device_can_wakeup(dev) (1)
+#endif
+#ifndef device_set_wakeup_enable
+#define device_set_wakeup_enable(dev, val)                                     \
+	do {                                                                   \
+	} while (0)
+#endif
+#ifndef device_init_wakeup
+#define device_init_wakeup(dev, val)                                           \
+	do {                                                                   \
+	} while (0)
+#endif
+static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2)
+{
+	const u16 *a = (const u16 *)addr1;
+	const u16 *b = (const u16 *)addr2;
+
+	return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
+}
+#undef compare_ether_addr
+#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2)
+#endif /* < 2.6.15 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16))
+#undef DEFINE_MUTEX
+#define DEFINE_MUTEX(x) DECLARE_MUTEX(x)
+#define mutex_lock(x) down_interruptible(x)
+#define mutex_unlock(x) up(x)
+
+#ifndef ____cacheline_internodealigned_in_smp
+#ifdef CONFIG_SMP
+#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp
+#else
+#define ____cacheline_internodealigned_in_smp
+#endif /* CONFIG_SMP */
+#endif /* ____cacheline_internodealigned_in_smp */
+#undef HAVE_PCI_ERS
+#else /* 2.6.16 and above */
+#undef HAVE_PCI_ERS
+#define HAVE_PCI_ERS
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10, 4, 0))
+#ifdef device_can_wakeup
+#undef device_can_wakeup
+#endif /* device_can_wakeup */
+#define device_can_wakeup(dev) 1
+#endif /* SLE_VERSION(10,4,0) */
+#endif /* < 2.6.16 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17))
+#ifndef dev_notice
+#define dev_notice(dev, fmt, args...) dev_printk(KERN_NOTICE, dev, fmt, ##args)
+#endif
+
+#ifndef first_online_node
+#define first_online_node 0
+#endif
+#ifndef NET_SKB_PAD
+#define NET_SKB_PAD 16
+#endif
+#endif /* < 2.6.17 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
+
+#ifndef IRQ_HANDLED
+#define irqreturn_t void
+#define IRQ_HANDLED
+#define IRQ_NONE
+#endif
+
+#ifndef IRQF_PROBE_SHARED
+#ifdef SA_PROBEIRQ
+#define IRQF_PROBE_SHARED SA_PROBEIRQ
+#else
+#define IRQF_PROBE_SHARED 0
+#endif
+#endif
+
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#ifndef skb_is_gso
+#ifdef NETIF_F_TSO
+#define skb_is_gso _kc_skb_is_gso
+static inline int _kc_skb_is_gso(const struct sk_buff *skb)
+{
+	return skb_shinfo(skb)->gso_size;
+}
+#else
+#define skb_is_gso(a) 0
+#endif
+#endif
+
+#ifndef resource_size_t
+#define resource_size_t unsigned long
+#endif
+
+#ifdef skb_pad
+#undef skb_pad
+#endif
+#define skb_pad(x, y) _kc_skb_pad(x, y)
+int _kc_skb_pad(struct sk_buff *skb, int pad);
+#ifdef skb_padto
+#undef skb_padto
+#endif
+#define skb_padto(x, y) _kc_skb_padto(x, y)
+static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len)
+{
+	unsigned int size = skb->len;
+	if (likely(size >= len))
+		return 0;
+	return _kc_skb_pad(skb, len - size);
+}
+
+#ifndef DECLARE_PCI_UNMAP_ADDR
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) u32 LEN_NAME
+#define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
+#endif /* DECLARE_PCI_UNMAP_ADDR */
+#endif /* < 2.6.18 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+enum pcie_link_width {
+	PCIE_LNK_WIDTH_RESRV = 0x00,
+	PCIE_LNK_X1 = 0x01,
+	PCIE_LNK_X2 = 0x02,
+	PCIE_LNK_X4 = 0x04,
+	PCIE_LNK_X8 = 0x08,
+	PCIE_LNK_X12 = 0x0C,
+	PCIE_LNK_X16 = 0x10,
+	PCIE_LNK_X32 = 0x20,
+	PCIE_LNK_WIDTH_UNKNOWN = 0xFF,
+};
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 0)))
+#define i_private u.generic_ip
+#endif /* >= RHEL 5.0 */
+
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
+#endif
+#ifndef __ALIGN_MASK
+#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 0))
+#if (!((RHEL_RELEASE_CODE &&                                                   \
+	((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4, 4) &&                    \
+	  RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5, 0)) ||                   \
+	 (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5, 0))))))
+typedef irqreturn_t (*irq_handler_t)(int, void *, struct pt_regs *);
+#endif
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 0))
+#undef CONFIG_INET_LRO
+#undef CONFIG_INET_LRO_MODULE
+#undef CONFIG_FCOE
+#undef CONFIG_FCOE_MODULE
+#endif
+typedef irqreturn_t (*new_handler_t)(int, void *);
+static inline irqreturn_t _kc_request_irq(unsigned int irq,
+					  new_handler_t handler,
+					  unsigned long flags,
+					  const char *devname, void *dev_id)
+#else /* 2.4.x */
+typedef void (*irq_handler_t)(int, void *, struct pt_regs *);
+typedef void (*new_handler_t)(int, void *);
+static inline int _kc_request_irq(unsigned int irq, new_handler_t handler,
+				  unsigned long flags, const char *devname,
+				  void *dev_id)
+#endif /* >= 2.5.x */
+{
+	irq_handler_t new_handler = (irq_handler_t)handler;
+	return request_irq(irq, new_handler, flags, devname, dev_id);
+}
+
+#undef request_irq
+#define request_irq(irq, handler, flags, devname, dev_id)                      \
+	_kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
+
+#define irq_handler_t new_handler_t
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11))
+#ifndef skb_checksum_help
+static inline int __kc_skb_checksum_help(struct sk_buff *skb)
+{
+	return skb_checksum_help(skb, 0);
+}
+#define skb_checksum_help(skb) __kc_skb_checksum_help((skb))
+#endif
+#endif /* < 2.6.19 && >= 2.6.11 */
+
+/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 4)))
+#define PCIE_CONFIG_SPACE_LEN 256
+#define PCI_CONFIG_SPACE_LEN 64
+#define PCIE_LINK_STATUS 0x12
+#define pci_config_space_ich8lan()                                             \
+	do {                                                                   \
+	} while (0)
+#undef pci_save_state
+int _kc_pci_save_state(struct pci_dev *);
+#define pci_save_state(pdev) _kc_pci_save_state(pdev)
+#undef pci_restore_state
+void _kc_pci_restore_state(struct pci_dev *);
+#define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
+#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
+
+#ifdef HAVE_PCI_ERS
+#undef free_netdev
+void _kc_free_netdev(struct net_device *);
+#define free_netdev(netdev) _kc_free_netdev(netdev)
+#endif
+static inline int
+pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev)
+{
+	return 0;
+}
+#define pci_disable_pcie_error_reporting(dev)                                  \
+	do {                                                                   \
+	} while (0)
+#define pci_cleanup_aer_uncorrect_error_status(dev)                            \
+	do {                                                                   \
+	} while (0)
+
+void *_kc_kmemdup(const void *src, size_t len, unsigned gfp);
+#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp)
+#ifndef bool
+#define bool _Bool
+#define true 1
+#define false 0
+#endif
+#else /* 2.6.19 */
+#include 
+#include 
+
+#define NEW_SKB_CSUM_HELP
+#endif /* < 2.6.19 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 28))
+#undef INIT_WORK
+#define INIT_WORK(_work, _func)                                                \
+	do {                                                                   \
+		INIT_LIST_HEAD(&(_work)->entry);                               \
+		(_work)->pending = 0;                                          \
+		(_work)->func = (void (*)(void *))_func;                       \
+		(_work)->data = _work;                                         \
+		init_timer(&(_work)->timer);                                   \
+	} while (0)
+#endif
+
+#ifndef PCI_VDEVICE
+#define PCI_VDEVICE(ven, dev)                                                  \
+	PCI_VENDOR_ID_##ven, (dev), PCI_ANY_ID, PCI_ANY_ID, 0, 0
+#endif
+
+#ifndef PCI_VENDOR_ID_INTEL
+#define PCI_VENDOR_ID_INTEL 0x8086
+#endif
+
+#ifndef round_jiffies
+#define round_jiffies(x) x
+#endif
+
+#define csum_offset csum
+
+#define HAVE_EARLY_VMALLOC_NODE
+#define dev_to_node(dev) -1
+#undef set_dev_node
+/* remove compiler warning with b=b, for unused variable */
+#define set_dev_node(a, b)                                                     \
+	do {                                                                   \
+		(b) = (b);                                                     \
+	} while (0)
+
+#if (!(RHEL_RELEASE_CODE &&                                                    \
+       (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4, 7)) &&                  \
+	 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5, 0))) ||                  \
+	(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 6)))) &&                 \
+     !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10, 2, 0)))
+typedef __u16 __bitwise __sum16;
+typedef __u32 __bitwise __wsum;
+#endif
+
+#if (!(RHEL_RELEASE_CODE &&                                                    \
+       (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4, 7)) &&                  \
+	 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5, 0))) ||                  \
+	(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 4)))) &&                 \
+     !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10, 2, 0)))
+static inline __wsum csum_unfold(__sum16 n)
+{
+	return (__force __wsum)n;
+}
+#endif
+
+#else /* < 2.6.20 */
+#define HAVE_DEVICE_NUMA_NODE
+#endif /* < 2.6.20 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21))
+#define to_net_dev(class) container_of(class, struct net_device, class_dev)
+#define NETDEV_CLASS_DEV
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5, 5)))
+#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
+#define vlan_group_set_device(vg, id, dev)                                     \
+	do {                                                                   \
+		if (vg)                                                        \
+			vg->vlan_devices[id] = dev;                            \
+	} while (0)
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
+#define pci_channel_offline(pdev)                                              \
+	(pdev->error_state && pdev->error_state != pci_channel_io_normal)
+#define pci_request_selected_regions(pdev, bars, name)                         \
+	pci_request_regions(pdev, name)
+#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
+
+#ifndef __aligned
+#define __aligned(x) __attribute__((aligned(x)))
+#endif
+
+struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev);
+#define netdev_to_dev(netdev) pci_dev_to_dev(_kc_netdev_to_pdev(netdev))
+#define devm_kzalloc(dev, size, flags) kzalloc(size, flags)
+#define devm_kfree(dev, p) kfree(p)
+#else /* 2.6.21 */
+static inline struct device *netdev_to_dev(struct net_device *netdev)
+{
+	return &netdev->dev;
+}
+
+#endif /* < 2.6.21 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22))
+#define tcp_hdr(skb) (skb->h.th)
+#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
+#define skb_transport_offset(skb) (skb->h.raw - skb->data)
+#define skb_transport_header(skb) (skb->h.raw)
+#define ipv6_hdr(skb) (skb->nh.ipv6h)
+#define ip_hdr(skb) (skb->nh.iph)
+#define skb_network_offset(skb) (skb->nh.raw - skb->data)
+#define skb_network_header(skb) (skb->nh.raw)
+#define skb_tail_pointer(skb) skb->tail
+#define skb_reset_tail_pointer(skb)                                            \
+	do {                                                                   \
+		skb->tail = skb->data;                                         \
+	} while (0)
+#define skb_set_tail_pointer(skb, offset)                                      \
+	do {                                                                   \
+		skb->tail = skb->data + offset;                                \
+	} while (0)
+#define skb_copy_to_linear_data(skb, from, len) memcpy(skb->data, from, len)
+#define skb_copy_to_linear_data_offset(skb, offset, from, len)                 \
+	memcpy(skb->data + offset, from, len)
+#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
+#define pci_register_driver pci_module_init
+#define skb_mac_header(skb) skb->mac.raw
+
+#ifdef NETIF_F_MULTI_QUEUE
+#ifndef alloc_etherdev_mq
+#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
+#endif
+#endif /* NETIF_F_MULTI_QUEUE */
+
+#ifndef ETH_FCS_LEN
+#define ETH_FCS_LEN 4
+#endif
+#define cancel_work_sync(x) flush_scheduled_work()
+#ifndef udp_hdr
+#define udp_hdr _udp_hdr
+static inline struct udphdr *_udp_hdr(const struct sk_buff *skb)
+{
+	return (struct udphdr *)skb_transport_header(skb);
+}
+#endif
+
+#ifdef cpu_to_be16
+#undef cpu_to_be16
+#endif
+#define cpu_to_be16(x) __constant_htons(x)
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5, 1)))
+enum { DUMP_PREFIX_NONE, DUMP_PREFIX_ADDRESS, DUMP_PREFIX_OFFSET };
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */
+#ifndef hex_asc
+#define hex_asc(x) "0123456789abcdef"[x]
+#endif
+#include 
+void _kc_print_hex_dump(const char *level, const char *prefix_str,
+			int prefix_type, int rowsize, int groupsize,
+			const void *buf, size_t len, bool ascii);
+#define print_hex_dump(lvl, s, t, r, g, b, l, a)                               \
+	_kc_print_hex_dump(lvl, s, t, r, g, b, l, a)
+#ifndef ADVERTISED_2500baseX_Full
+#define ADVERTISED_2500baseX_Full BIT(15)
+#endif
+#ifndef SUPPORTED_2500baseX_Full
+#define SUPPORTED_2500baseX_Full BIT(15)
+#endif
+
+#ifndef ETH_P_PAUSE
+#define ETH_P_PAUSE 0x8808
+#endif
+
+static inline int compound_order(struct page *page)
+{
+	return 0;
+}
+
+#define __must_be_array(a) 0
+
+#ifndef SKB_WITH_OVERHEAD
+#define SKB_WITH_OVERHEAD(X)                                                   \
+	((X)-SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#endif
+#else /* 2.6.22 */
+#define ETH_TYPE_TRANS_SETS_DEV
+#define HAVE_NETDEV_STATS_IN_NETDEV
+#endif /* < 2.6.22 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22))
+#undef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev)                                                  \
+	do {                                                                   \
+	} while (0)
+#endif /* > 2.6.22 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23))
+#define netif_subqueue_stopped(_a, _b) 0
+#ifndef PTR_ALIGN
+#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
+#endif
+
+#ifndef CONFIG_PM_SLEEP
+#define CONFIG_PM_SLEEP CONFIG_PM
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+#define HAVE_ETHTOOL_GET_PERM_ADDR
+#endif /* 2.6.14 through 2.6.22 */
+
+static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom)
+{
+	int delta = 0;
+
+	if (headroom > (skb->data - skb->head))
+		delta = headroom - (skb->data - skb->head);
+
+	if (delta || skb_header_cloned(skb))
+		return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
+					GFP_ATOMIC);
+	return 0;
+}
+#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h))
+#endif /* < 2.6.23 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#ifndef ETH_FLAG_LRO
+#define ETH_FLAG_LRO NETIF_F_LRO
+#endif
+
+#ifndef ACCESS_ONCE
+#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+#endif
+
+/* if GRO is supported then the napi struct must already exist */
+#ifndef NETIF_F_GRO
+/* NAPI API changes in 2.6.24 break everything */
+struct napi_struct {
+	/* used to look up the real NAPI polling routine */
+	int (*poll)(struct napi_struct *, int);
+	struct net_device *dev;
+	int weight;
+};
+#endif
+
+#ifdef NAPI
+int __kc_adapter_clean(struct net_device *, int *);
+/* The following definitions are multi-queue aware, and thus we have a driver
+ * define list which determines which drivers support multiple queues, and
+ * thus need these stronger defines. If a driver does not support multi-queue
+ * functionality, you don't need to add it to this list.
+ */
+struct net_device *napi_to_poll_dev(const struct napi_struct *napi);
+
+static inline void
+__kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi,
+		       int (*poll)(struct napi_struct *, int), int weight)
+{
+	struct net_device *poll_dev = napi_to_poll_dev(napi);
+	poll_dev->poll = __kc_adapter_clean;
+	poll_dev->priv = napi;
+	poll_dev->weight = weight;
+	set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state);
+	set_bit(__LINK_STATE_START, &poll_dev->state);
+	dev_hold(poll_dev);
+	napi->poll = poll;
+	napi->weight = weight;
+	napi->dev = dev;
+}
+#define netif_napi_add __kc_mq_netif_napi_add
+
+static inline void __kc_mq_netif_napi_del(struct napi_struct *napi)
+{
+	struct net_device *poll_dev = napi_to_poll_dev(napi);
+	WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state));
+	dev_put(poll_dev);
+	memset(poll_dev, 0, sizeof(struct net_device));
+}
+
+#define netif_napi_del __kc_mq_netif_napi_del
+
+static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi)
+{
+	return netif_running(napi->dev) &&
+	       netif_rx_schedule_prep(napi_to_poll_dev(napi));
+}
+#define napi_schedule_prep __kc_mq_napi_schedule_prep
+
+static inline void __kc_mq_napi_schedule(struct napi_struct *napi)
+{
+	if (napi_schedule_prep(napi))
+		__netif_rx_schedule(napi_to_poll_dev(napi));
+}
+#define napi_schedule __kc_mq_napi_schedule
+
+#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi))
+#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi))
+#ifdef CONFIG_SMP
+static inline void napi_synchronize(const struct napi_struct *n)
+{
+	struct net_device *dev = napi_to_poll_dev(n);
+
+	while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
+		/* No hurry. */
+		msleep(1);
+	}
+}
+#else
+#define napi_synchronize(n) barrier()
+#endif /* CONFIG_SMP */
+#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi))
+static inline void _kc_napi_complete(struct napi_struct *napi)
+{
+#ifdef NETIF_F_GRO
+	napi_gro_flush(napi);
+#endif
+	netif_rx_complete(napi_to_poll_dev(napi));
+}
+#define napi_complete _kc_napi_complete
+#else /* NAPI */
+
+/* The following definitions are only used if we don't support NAPI at all. */
+
+static inline __kc_netif_napi_add(struct net_device *dev,
+				  struct napi_struct *napi,
+				  int (*poll)(struct napi_struct *, int),
+				  int weight)
+{
+	dev->poll = poll;
+	dev->weight = weight;
+	napi->poll = poll;
+	napi->weight = weight;
+	napi->dev = dev;
+}
+#define netif_napi_del(_a)                                                     \
+	do {                                                                   \
+	} while (0)
+#endif /* NAPI */
+
+#undef dev_get_by_name
+#define dev_get_by_name(_a, _b) dev_get_by_name(_b)
+#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
+#ifndef DMA_BIT_MASK
+#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL << (n)) - 1))
+#endif
+
+#ifdef NETIF_F_TSO6
+#define skb_is_gso_v6 _kc_skb_is_gso_v6
+static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb)
+{
+	return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
+}
+#endif /* NETIF_F_TSO6 */
+
+#ifndef KERN_CONT
+#define KERN_CONT ""
+#endif
+#ifndef pr_err
+#define pr_err(fmt, arg...) printk(KERN_ERR fmt, ##arg)
+#endif
+
+#ifndef rounddown_pow_of_two
+#define rounddown_pow_of_two(n)                                                \
+	__builtin_constant_p(n) ? ((n == 1) ? 0 : (1UL << ilog2(n))) :         \
+				  (1UL << (fls_long(n) - 1))
+#endif
+
+#else /* < 2.6.24 */
+#define HAVE_ETHTOOL_GET_SSET_COUNT
+#define HAVE_NETDEV_NAPI_LIST
+#endif /* < 2.6.24 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 24))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
+#define INCLUDE_PM_QOS_PARAMS_H
+#include 
+#else /* >= 3.2.0 */
+#include 
+#endif /* else >= 3.2.0 */
+#endif /* > 2.6.24 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
+#define PM_QOS_CPU_DMA_LATENCY 1
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
+#include 
+#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY
+#define pm_qos_add_requirement(pm_qos_class, name, value)                      \
+	set_acceptable_latency(name, value)
+#define pm_qos_remove_requirement(pm_qos_class, name)                          \
+	remove_acceptable_latency(name)
+#define pm_qos_update_requirement(pm_qos_class, name, value)                   \
+	modify_acceptable_latency(name, value)
+#else
+#define PM_QOS_DEFAULT_VALUE -1
+#define pm_qos_add_requirement(pm_qos_class, name, value)
+#define pm_qos_remove_requirement(pm_qos_class, name)
+#define pm_qos_update_requirement(pm_qos_class, name, value)                   \
+	{                                                                      \
+		if (value != PM_QOS_DEFAULT_VALUE) {                           \
+			printk(KERN_WARNING                                    \
+			       "%s: unable to set PM QoS requirement\n",       \
+			       pci_name(adapter->pdev));                       \
+		}                                                              \
+	}
+
+#endif /* > 2.6.18 */
+
+#define pci_enable_device_mem(pdev) pci_enable_device(pdev)
+
+#ifndef DEFINE_PCI_DEVICE_TABLE
+#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[]
+#endif /* DEFINE_PCI_DEVICE_TABLE */
+
+#ifndef strict_strtol
+#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r)
+static inline int _kc_strict_strtol(const char *buf, unsigned int base,
+				    long *res)
+{
+	/* adapted from strict_strtoul() in 2.6.25 */
+	char *tail;
+	long val;
+	size_t len;
+
+	*res = 0;
+	len = strlen(buf);
+	if (!len)
+		return -EINVAL;
+	val = simple_strtol(buf, &tail, base);
+	if (tail == buf)
+		return -EINVAL;
+	if ((*tail == '\0') ||
+	    ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) {
+		*res = val;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#ifndef RNP_PROCFS
+#define RNP_PROCFS
+#endif /* RNP_PROCFS */
+#endif /* >= 2.6.0 */
+
+#else /* < 2.6.25 */
+
+#if IS_ENABLED(CONFIG_SYSFS)
+#ifndef RNP_SYSFS
+#define RNP_SYSFS
+#endif /* RNP_SYSFS */
+#endif /* CONFIG_SYSFS */
+#if IS_ENABLED(CONFIG_HWMON)
+#ifndef RNPGBE_HWMON
+#define RNPGBE_HWMON
+#endif /* RNPGBE_HWMON */
+#endif /* CONFIG_HWMON */
+
+#endif /* < 2.6.25 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
+#ifndef clamp_t
+#define clamp_t(type, val, min, max)                                           \
+	({                                                                     \
+		type __val = (val);                                            \
+		type __min = (min);                                            \
+		type __max = (max);                                            \
+		__val = __val < __min ? __min : __val;                         \
+		__val > __max ? __max : __val;                                 \
+	})
+#endif /* clamp_t */
+#undef kzalloc_node
+#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags)
+
+void _kc_pci_disable_link_state(struct pci_dev *dev, int state);
+#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s)
+#else /* < 2.6.26 */
+#define NETDEV_CAN_SET_GSO_MAX_SIZE
+#ifdef HAVE_PCI_ASPM_H
+#include 
+#endif
+#define HAVE_NETDEV_VLAN_FEATURES
+#ifndef PCI_EXP_LNKCAP_ASPMS
+#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
+#endif /* PCI_EXP_LNKCAP_ASPMS */
+#endif /* < 2.6.26 */
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27))
+static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep,
+					     __u32 speed)
+{
+	ep->speed = (__u16)speed;
+	/* ep->speed_hi = (__u16)(speed >> 16); */
+}
+#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set
+
+static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep)
+{
+	/* no speed_hi before 2.6.27, and probably no need for it yet */
+	return (__u32)ep->speed;
+}
+#define ethtool_cmd_speed _kc_ethtool_cmd_speed
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 15))
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)) && defined(CONFIG_PM))
+#define ANCIENT_PM 1
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)) &&                     \
+       (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)) &&                      \
+       defined(CONFIG_PM_SLEEP))
+#define NEWER_PM 1
+#endif
+#if defined(ANCIENT_PM) || defined(NEWER_PM)
+#undef device_set_wakeup_enable
+#define device_set_wakeup_enable(dev, val)                                     \
+	do {                                                                   \
+		u16 pmc = 0;                                                   \
+		int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM);    \
+		if (pm) {                                                      \
+			pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC,   \
+					     &pmc);                            \
+		}                                                              \
+		(dev)->power.can_wakeup = !!(pmc >> 11);                       \
+		(dev)->power.should_wakeup = (val && (pmc >> 11));             \
+	} while (0)
+#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */
+#endif /* 2.6.15 through 2.6.27 */
+#ifndef netif_napi_del
+#define netif_napi_del(_a)                                                     \
+	do {                                                                   \
+	} while (0)
+#ifdef NAPI
+#ifdef CONFIG_NETPOLL
+#undef netif_napi_del
+#define netif_napi_del(_a) list_del(&(_a)->dev_list);
+#endif
+#endif
+#endif /* netif_napi_del */
+#ifdef dma_mapping_error
+#undef dma_mapping_error
+#endif
+#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr)
+
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+#define HAVE_TX_MQ
+#endif
+
+#ifndef DMA_ATTR_WEAK_ORDERING
+#define DMA_ATTR_WEAK_ORDERING 0
+#endif
+
+#ifdef HAVE_TX_MQ
+void _kc_netif_tx_stop_all_queues(struct net_device *);
+void _kc_netif_tx_wake_all_queues(struct net_device *);
+void _kc_netif_tx_start_all_queues(struct net_device *);
+#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
+#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
+#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
+#undef netif_stop_subqueue
+#define netif_stop_subqueue(_ndev, _qi)                                        \
+	do {                                                                   \
+		if (netif_is_multiqueue((_ndev)))                              \
+			netif_stop_subqueue((_ndev), (_qi));                   \
+		else                                                           \
+			netif_stop_queue((_ndev));                             \
+	} while (0)
+#undef netif_start_subqueue
+#define netif_start_subqueue(_ndev, _qi)                                       \
+	do {                                                                   \
+		if (netif_is_multiqueue((_ndev)))                              \
+			netif_start_subqueue((_ndev), (_qi));                  \
+		else                                                           \
+			netif_start_queue((_ndev));                            \
+	} while (0)
+#else /* HAVE_TX_MQ */
+#define netif_tx_stop_all_queues(a) netif_stop_queue(a)
+#define netif_tx_wake_all_queues(a) netif_wake_queue(a)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 12))
+#define netif_tx_start_all_queues(a) netif_start_queue(a)
+#else
+#define netif_tx_start_all_queues(a)                                           \
+	do {                                                                   \
+	} while (0)
+#endif
+#define netif_stop_subqueue(_ndev, _qi) netif_stop_queue((_ndev))
+#define netif_start_subqueue(_ndev, _qi) netif_start_queue((_ndev))
+#endif /* HAVE_TX_MQ */
+#ifndef NETIF_F_MULTI_QUEUE
+#define NETIF_F_MULTI_QUEUE 0
+#define netif_is_multiqueue(a) 0
+#define netif_wake_subqueue(a, b)
+#endif /* NETIF_F_MULTI_QUEUE */
+
+#ifndef __WARN_printf
+void __kc_warn_slowpath(const char *file, const int line, const char *fmt, ...)
+	__attribute__((format(printf, 3, 4)));
+#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg)
+#endif /* __WARN_printf */
+
+#ifndef WARN
+#define WARN(condition, format...)                                             \
+	({                                                                     \
+		int __ret_warn_on = !!(condition);                             \
+		if (unlikely(__ret_warn_on))                                   \
+			__WARN_printf(format);                                 \
+		unlikely(__ret_warn_on);                                       \
+	})
+#endif /* WARN */
+#undef HAVE_RNP_DEBUG_FS
+#else /* < 2.6.27 */
+#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set
+static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep,
+					     __u32 speed)
+{
+	ep->speed = (__u16)(speed & 0xFFFF);
+	ep->speed_hi = (__u16)(speed >> 16);
+}
+#define HAVE_TX_MQ
+#define HAVE_NETDEV_SELECT_QUEUE
+#ifdef CONFIG_DEBUG_FS
+#define HAVE_RNP_DEBUG_FS
+#endif /* CONFIG_DEBUG_FS */
+#endif /* < 2.6.27 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28))
+#define pci_ioremap_bar(pdev, bar)                                             \
+	ioremap(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar))
+#define pci_wake_from_d3 _kc_pci_wake_from_d3
+#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep
+int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable);
+int _kc_pci_prepare_to_sleep(struct pci_dev *dev);
+#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC)
+#ifndef __skb_queue_head_init
+static inline void __kc_skb_queue_head_init(struct sk_buff_head *list)
+{
+	list->prev = list->next = (struct sk_buff *)list;
+	list->qlen = 0;
+}
+#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
+#endif
+
+#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
+#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
+
+#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */
+#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
+
+#endif /* < 2.6.28 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
+#ifndef swap
+#define swap(a, b)                                                             \
+	do {                                                                   \
+		typeof(a) __tmp = (a);                                         \
+		(a) = (b);                                                     \
+		(b) = __tmp;                                                   \
+	} while (0)
+#endif
+#define pci_request_selected_regions_exclusive(pdev, bars, name)               \
+	pci_request_selected_regions(pdev, bars, name)
+#ifndef CONFIG_NR_CPUS
+#define CONFIG_NR_CPUS 1
+#endif /* CONFIG_NR_CPUS */
+#ifndef pcie_aspm_enabled
+#define pcie_aspm_enabled() (1)
+#endif /* pcie_aspm_enabled */
+
+#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */
+
+#ifndef PCI_EXP_LNKSTA_CLS
+#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */
+#endif
+#ifndef PCI_EXP_LNKSTA_NLW
+#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */
+#endif
+
+#ifndef pci_clear_master
+void _kc_pci_clear_main(struct pci_dev *dev);
+#define pci_clear_master(dev) _kc_pci_clear_main(dev)
+#endif
+
+#ifndef PCI_EXP_LNKCTL_ASPMC
+#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */
+#endif
+
+#ifndef PCI_EXP_LNKCAP_MLW
+#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */
+#endif
+
+#else /* < 2.6.29 */
+#ifndef HAVE_NET_DEVICE_OPS
+#define HAVE_NET_DEVICE_OPS
+#endif
+#ifdef CONFIG_DCB
+#define HAVE_PFC_MODE_ENABLE
+#endif /* CONFIG_DCB */
+#endif /* < 2.6.29 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30))
+#define NO_PTP_SUPPORT
+#define skb_rx_queue_recorded(a) false
+#define skb_get_rx_queue(a) 0
+#define skb_record_rx_queue(a, b)                                              \
+	do {                                                                   \
+	} while (0)
+#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues)
+#undef CONFIG_FCOE
+#undef CONFIG_FCOE_MODULE
+#ifndef CONFIG_PCI_IOV
+#undef pci_enable_sriov
+#define pci_enable_sriov(a, b) -ENOTSUPP
+#undef pci_disable_sriov
+#define pci_disable_sriov(a)                                                   \
+	do {                                                                   \
+	} while (0)
+#endif /* CONFIG_PCI_IOV */
+#ifndef pr_cont
+#define pr_cont(fmt, ...) printk(KERN_CONT fmt, ##__VA_ARGS__)
+#endif /* pr_cont */
+static inline void _kc_synchronize_irq(unsigned int a)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28))
+	synchronize_irq();
+#else /* < 2.5.28 */
+	synchronize_irq(a);
+#endif /* < 2.5.28 */
+}
+#undef synchronize_irq
+#define synchronize_irq(a) _kc_synchronize_irq(a)
+
+#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
+
+#ifdef nr_cpus_node
+#undef nr_cpus_node
+#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
+#endif
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 5))
+#define HAVE_PCI_DEV_IS_VIRTFN_BIT
+#endif /* RHEL >= 5.5 */
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 5)))
+static inline bool pci_is_root_bus(struct pci_bus *pbus)
+{
+	return !(pbus->parent);
+}
+#endif
+
+#else /* < 2.6.30 */
+#define HAVE_ASPM_QUIRKS
+#define HAVE_PCI_DEV_IS_VIRTFN_BIT
+#endif /* < 2.6.30 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+#define ETH_P_1588 0x88F7
+#define ETH_P_FIP 0x8914
+#ifndef netdev_uc_count
+#define netdev_uc_count(dev) ((dev)->uc_count)
+#endif
+#ifndef netdev_for_each_uc_addr
+#define netdev_for_each_uc_addr(uclist, dev)                                   \
+	for (uclist = dev->uc_list; uclist; uclist = uclist->next)
+#endif
+#ifndef PORT_OTHER
+#define PORT_OTHER 0xff
+#endif
+#ifndef MDIO_PHY_ID_PRTAD
+#define MDIO_PHY_ID_PRTAD 0x03e0
+#endif
+#ifndef MDIO_PHY_ID_DEVAD
+#define MDIO_PHY_ID_DEVAD 0x001f
+#endif
+#ifndef skb_dst
+#define skb_dst(s) ((s)->dst)
+#endif
+
+#ifndef SUPPORTED_1000baseKX_Full
+#define SUPPORTED_1000baseKX_Full BIT(17)
+#endif
+#ifndef SUPPORTED_10000baseKX4_Full
+#define SUPPORTED_10000baseKX4_Full BIT(18)
+#endif
+#ifndef SUPPORTED_10000baseKR_Full
+#define SUPPORTED_10000baseKR_Full BIT(19)
+#endif
+
+#ifndef ADVERTISED_1000baseKX_Full
+#define ADVERTISED_1000baseKX_Full BIT(17)
+#endif
+#ifndef ADVERTISED_10000baseKX4_Full
+#define ADVERTISED_10000baseKX4_Full BIT(18)
+#endif
+#ifndef ADVERTISED_10000baseKR_Full
+#define ADVERTISED_10000baseKR_Full BIT(19)
+#endif
+
+static inline unsigned long dev_trans_start(struct net_device *dev)
+{
+	return dev->trans_start;
+}
+#else /* < 2.6.31 */
+#ifndef HAVE_NETDEV_STORAGE_ADDRESS
+#define HAVE_NETDEV_STORAGE_ADDRESS
+#endif
+#ifndef HAVE_NETDEV_HW_ADDR
+#define HAVE_NETDEV_HW_ADDR
+#endif
+#ifndef HAVE_TRANS_START_IN_QUEUE
+#define HAVE_TRANS_START_IN_QUEUE
+#endif
+#ifndef HAVE_INCLUDE_LINUX_MDIO_H
+#define HAVE_INCLUDE_LINUX_MDIO_H
+#endif
+#include 
+#endif /* < 2.6.31 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32))
+#undef netdev_tx_t
+#define netdev_tx_t int
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef NETIF_F_FCOE_MTU
+#define NETIF_F_FCOE_MTU BIT(26)
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+static inline int _kc_pm_runtime_get_sync(void)
+{
+	return 1;
+}
+#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync()
+#else /* 2.6.0 => 2.6.32 */
+static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev)
+{
+	return 1;
+}
+#ifndef pm_runtime_get_sync
+#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev)
+#endif
+#endif /* 2.6.0 => 2.6.32 */
+#ifndef pm_runtime_put
+#define pm_runtime_put(dev)                                                    \
+	do {                                                                   \
+	} while (0)
+#endif
+#ifndef pm_runtime_put_sync
+#define pm_runtime_put_sync(dev)                                               \
+	do {                                                                   \
+	} while (0)
+#endif
+#ifndef pm_runtime_resume
+#define pm_runtime_resume(dev)                                                 \
+	do {                                                                   \
+	} while (0)
+#endif
+#ifndef pm_schedule_suspend
+#define pm_schedule_suspend(dev, t)                                            \
+	do {                                                                   \
+	} while (0)
+#endif
+#ifndef pm_runtime_set_suspended
+#define pm_runtime_set_suspended(dev)                                          \
+	do {                                                                   \
+	} while (0)
+#endif
+#ifndef pm_runtime_disable
+#define pm_runtime_disable(dev)                                                \
+	do {                                                                   \
+	} while (0)
+#endif
+#ifndef pm_runtime_put_noidle
+#define pm_runtime_put_noidle(dev)                                             \
+	do {                                                                   \
+	} while (0)
+#endif
+#ifndef pm_runtime_set_active
+#define pm_runtime_set_active(dev)                                             \
+	do {                                                                   \
+	} while (0)
+#endif
+#ifndef pm_runtime_enable
+#define pm_runtime_enable(dev)                                                 \
+	do {                                                                   \
+	} while (0)
+#endif
+#ifndef pm_runtime_get_noresume
+#define pm_runtime_get_noresume(dev)                                           \
+	do {                                                                   \
+	} while (0)
+#endif
+#else /* < 2.6.32 */
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 2)) && \
+     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)))
+#define HAVE_RHEL6_NET_DEVICE_EXTENDED
+#endif /* RHEL >= 6.2 && RHEL < 7.0 */
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 6)) && \
+     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)))
+#define HAVE_RHEL6_NET_DEVICE_OPS_EXT
+#define HAVE_NDO_SET_FEATURES
+#endif /* RHEL >= 6.6 && RHEL < 7.0 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
+#define HAVE_NETDEV_OPS_FCOE_ENABLE
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#ifdef CONFIG_DCB
+#ifndef HAVE_DCBNL_OPS_GETAPP
+#define HAVE_DCBNL_OPS_GETAPP
+#endif
+#endif /* CONFIG_DCB */
+#include 
+/* IOV bad DMA target work arounds require at least this kernel rev support */
+#define HAVE_PCIE_TYPE
+#endif /* < 2.6.32 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
+#ifndef pci_pcie_cap
+#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP)
+#endif
+#ifndef IPV4_FLOW
+#define IPV4_FLOW 0x10
+#endif /* IPV4_FLOW */
+#ifndef IPV6_FLOW
+#define IPV6_FLOW 0x11
+#endif /* IPV6_FLOW */
+/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */
+#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 0)) || \
+     (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 1, 0)))
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
+#define HAVE_NETDEV_OPS_FCOE_GETWWN
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#endif /* RHEL6 or SLES11 SP1 */
+#ifndef __percpu
+#define __percpu
+#endif /* __percpu */
+
+#ifndef PORT_DA
+#define PORT_DA PORT_OTHER
+#endif /* PORT_DA */
+#ifndef PORT_NONE
+#define PORT_NONE PORT_OTHER
+#endif
+
+#if ((RHEL_RELEASE_CODE &&                                                     \
+      (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 3)) &&                     \
+      (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0))))
+#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE)
+#undef DEFINE_DMA_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
+#undef DEFINE_DMA_UNMAP_LEN
+#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
+#undef dma_unmap_addr
+#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
+#undef dma_unmap_addr_set
+#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
+#undef dma_unmap_len
+#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
+#undef dma_unmap_len_set
+#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
+#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */
+#endif /* RHEL_RELEASE_CODE */
+
+#if (!(RHEL_RELEASE_CODE &&                                                    \
+       (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 8)) &&                  \
+	 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 0))) ||                  \
+	((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 1)) &&                  \
+	 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0))))))
+static inline bool pci_is_pcie(struct pci_dev *dev)
+{
+	return !!pci_pcie_cap(dev);
+}
+#endif /* RHEL_RELEASE_CODE */
+
+#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 2))))
+#define sk_tx_queue_get(_sk) (-1)
+#define sk_tx_queue_set(_sk, _tx_queue)                                        \
+	do {                                                                   \
+	} while (0)
+#endif /* !(RHEL >= 6.2) */
+
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)) && \
+     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)))
+#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE
+#define HAVE_ETHTOOL_SET_PHYS_ID
+#define HAVE_ETHTOOL_GET_TS_INFO
+#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 5))
+#define HAVE_ETHTOOL_GSRSSH
+#define HAVE_RHEL6_SRIOV_CONFIGURE
+#define HAVE_RXFH_NONCONST
+#endif /* RHEL > 6.5 */
+#endif /* RHEL >= 6.4 && RHEL < 7.0 */
+
+#else /* < 2.6.33 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
+#define HAVE_NETDEV_OPS_FCOE_GETWWN
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#endif /* < 2.6.33 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34))
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 0))
+#ifndef pci_num_vf
+#define pci_num_vf(pdev) _kc_pci_num_vf(pdev)
+int _kc_pci_num_vf(struct pci_dev *dev);
+#endif
+#endif /* RHEL_RELEASE_CODE */
+
+#ifndef dev_is_pci
+#define dev_is_pci(d) ((d)->bus == &pci_bus_type)
+#endif
+
+#ifndef ETH_FLAG_NTUPLE
+#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE
+#endif
+
+#ifndef netdev_mc_count
+#define netdev_mc_count(dev) ((dev)->mc_count)
+#endif
+#ifndef netdev_mc_empty
+#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
+#endif
+#ifndef netdev_for_each_mc_addr
+#define netdev_for_each_mc_addr(mclist, dev)                                   \
+	for (mclist = dev->mc_list; mclist; mclist = mclist->next)
+#endif
+#ifndef netdev_uc_count
+#define netdev_uc_count(dev) ((dev)->uc.count)
+#endif
+#ifndef netdev_uc_empty
+#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0)
+#endif
+#ifndef netdev_for_each_uc_addr
+#define netdev_for_each_uc_addr(ha, dev)                                       \
+	list_for_each_entry (ha, &dev->uc.list, list)
+#endif
+#ifndef dma_set_coherent_mask
+#define dma_set_coherent_mask(dev, mask)                                       \
+	pci_set_consistent_dma_mask(to_pci_dev(dev), (mask))
+#endif
+#ifndef pci_dev_run_wake
+#define pci_dev_run_wake(pdev) (0)
+#endif
+
+/* netdev logging taken from include/linux/netdevice.h */
+#ifndef netdev_name
+static inline const char *_kc_netdev_name(const struct net_device *dev)
+{
+	if (dev->reg_state != NETREG_REGISTERED)
+		return "(unregistered net_device)";
+	return dev->name;
+}
+#define netdev_name(netdev) _kc_netdev_name(netdev)
+#endif /* netdev_name */
+
+#undef netdev_printk
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#define netdev_printk(level, netdev, format, args...)                          \
+	do {                                                                   \
+		struct pci_dev *pdev = _kc_netdev_to_pdev(netdev);             \
+		printk(level "%s: " format, pci_name(pdev), ##args);           \
+	} while (0)
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21))
+#define netdev_printk(level, netdev, format, args...)                          \
+	do {                                                                   \
+		struct pci_dev *pdev = _kc_netdev_to_pdev(netdev);             \
+		struct device *dev = pci_dev_to_dev(pdev);                     \
+		dev_printk(level, dev, "%s: " format, netdev_name(netdev),     \
+			   ##args);                                            \
+	} while (0)
+#else /* 2.6.21 => 2.6.34 */
+#define netdev_printk(level, netdev, format, args...)                          \
+	dev_printk(level, (netdev)->dev.parent, "%s: " format,                 \
+		   netdev_name(netdev), ##args)
+#endif /* <2.6.0 <2.6.21 <2.6.34 */
+#undef netdev_emerg
+#define netdev_emerg(dev, format, args...)                                     \
+	netdev_printk(KERN_EMERG, dev, format, ##args)
+#undef netdev_alert
+#define netdev_alert(dev, format, args...)                                     \
+	netdev_printk(KERN_ALERT, dev, format, ##args)
+#undef netdev_crit
+#define netdev_crit(dev, format, args...)                                      \
+	netdev_printk(KERN_CRIT, dev, format, ##args)
+#undef netdev_err
+#define netdev_err(dev, format, args...)                                       \
+	netdev_printk(KERN_ERR, dev, format, ##args)
+#undef netdev_warn
+#define netdev_warn(dev, format, args...)                                      \
+	netdev_printk(KERN_WARNING, dev, format, ##args)
+#undef netdev_notice
+#define netdev_notice(dev, format, args...)                                    \
+	netdev_printk(KERN_NOTICE, dev, format, ##args)
+#undef netdev_info
+#define netdev_info(dev, format, args...)                                      \
+	netdev_printk(KERN_INFO, dev, format, ##args)
+#undef netdev_dbg
+#if defined(DEBUG)
+#define netdev_dbg(__dev, format, args...)                                     \
+	netdev_printk(KERN_DEBUG, __dev, format, ##args)
+#elif defined(CONFIG_DYNAMIC_DEBUG)
+#define netdev_dbg(__dev, format, args...)                                     \
+	do {                                                                   \
+		dynamic_dev_dbg((__dev)->dev.parent, "%s: " format,            \
+				netdev_name(__dev), ##args);                   \
+	} while (0)
+#else /* DEBUG */
+#define netdev_dbg(__dev, format, args...)                                     \
+	({                                                                     \
+		if (0)                                                         \
+			netdev_printk(KERN_DEBUG, __dev, format, ##args);      \
+		0;                                                             \
+	})
+#endif /* DEBUG */
+
+#undef netif_printk
+#define netif_printk(priv, type, level, dev, fmt, args...)                     \
+	do {                                                                   \
+		if (netif_msg_##type(priv))                                    \
+			netdev_printk(level, (dev), fmt, ##args);              \
+	} while (0)
+
+#undef netif_emerg
+#define netif_emerg(priv, type, dev, fmt, args...)                             \
+	netif_level(emerg, priv, type, dev, fmt, ##args)
+#undef netif_alert
+#define netif_alert(priv, type, dev, fmt, args...)                             \
+	netif_level(alert, priv, type, dev, fmt, ##args)
+#undef netif_crit
+#define netif_crit(priv, type, dev, fmt, args...)                              \
+	netif_level(crit, priv, type, dev, fmt, ##args)
+#undef netif_err
+#define netif_err(priv, type, dev, fmt, args...)                               \
+	netif_level(err, priv, type, dev, fmt, ##args)
+#undef netif_warn
+#define netif_warn(priv, type, dev, fmt, args...)                              \
+	netif_level(warn, priv, type, dev, fmt, ##args)
+#undef netif_notice
+#define netif_notice(priv, type, dev, fmt, args...)                            \
+	netif_level(notice, priv, type, dev, fmt, ##args)
+#undef netif_info
+#define netif_info(priv, type, dev, fmt, args...)                              \
+	netif_level(info, priv, type, dev, fmt, ##args)
+#undef netif_dbg
+#define netif_dbg(priv, type, dev, fmt, args...)                               \
+	netif_level(dbg, priv, type, dev, fmt, ##args)
+
+#ifdef SET_SYSTEM_SLEEP_PM_OPS
+#define HAVE_SYSTEM_SLEEP_PM_OPS
+#endif
+
+#ifndef for_each_set_bit
+#define for_each_set_bit(bit, addr, size)                                      \
+	for ((bit) = find_first_bit((addr), (size)); (bit) < (size);           \
+	     (bit) = find_next_bit((addr), (size), (bit) + 1))
+#endif /* for_each_set_bit */
+
+#ifndef DEFINE_DMA_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN
+#define dma_unmap_addr pci_unmap_addr
+#define dma_unmap_addr_set pci_unmap_addr_set
+#define dma_unmap_len pci_unmap_len
+#define dma_unmap_len_set pci_unmap_len_set
+#endif /* DEFINE_DMA_UNMAP_ADDR */
+
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 3))
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define sysfs_attr_init(attr)                                                  \
+	do {                                                                   \
+		static struct lock_class_key __key;                            \
+		(attr)->key = &__key;                                          \
+	} while (0)
+#else
+#define sysfs_attr_init(attr)                                                  \
+	do {                                                                   \
+	} while (0)
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
+#endif /* RHEL_RELEASE_CODE */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+static inline bool _kc_pm_runtime_suspended(void)
+{
+	return false;
+}
+#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended()
+#else /* 2.6.0 => 2.6.34 */
+static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev)
+{
+	return false;
+}
+#ifndef pm_runtime_suspended
+#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev)
+#endif
+#endif /* 2.6.0 => 2.6.34 */
+
+#ifndef pci_bus_speed
+/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */
+enum _kc_pci_bus_speed {
+	_KC_PCIE_SPEED_2_5GT = 0x14,
+	_KC_PCIE_SPEED_5_0GT = 0x15,
+	_KC_PCIE_SPEED_8_0GT = 0x16,
+	_KC_PCI_SPEED_UNKNOWN = 0xff,
+};
+#define pci_bus_speed _kc_pci_bus_speed
+#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT
+#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT
+#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT
+#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN
+#endif /* pci_bus_speed */
+
+#else /* < 2.6.34 */
+#define HAVE_SYSTEM_SLEEP_PM_OPS
+#ifndef HAVE_SET_RX_MODE
+#define HAVE_SET_RX_MODE
+#endif
+
+#endif /* < 2.6.34 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35))
+ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
+				   const void __user *from, size_t count);
+#define simple_write_to_buffer _kc_simple_write_to_buffer
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)))
+static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
+{
+#ifdef HAVE_PCI_DEV_IS_VIRTFN_BIT
+#ifdef CONFIG_PCI_IOV
+	if (dev->is_virtfn)
+		dev = dev->physfn;
+#endif /* CONFIG_PCI_IOV */
+#endif /* HAVE_PCI_DEV_IS_VIRTFN_BIT */
+	return dev;
+}
+#endif /* ! RHEL >= 6.4 */
+
+#ifndef PCI_EXP_LNKSTA_NLW_SHIFT
+#define PCI_EXP_LNKSTA_NLW_SHIFT 4
+#endif
+
+#ifndef numa_node_id
+#define numa_node_id() 0
+#endif
+#ifndef numa_mem_id
+#define numa_mem_id numa_node_id
+#endif
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 0)))
+#ifdef HAVE_TX_MQ
+#include 
+#ifndef CONFIG_NETDEVICES_MULTIQUEUE
+int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
+#else /* CONFIG_NETDEVICES_MULTI_QUEUE */
+static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev,
+						   unsigned int txq)
+{
+	dev->egress_subqueue_count = txq;
+	return 0;
+}
+#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
+#else /* HAVE_TX_MQ */
+static inline int
+_kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev,
+				 unsigned int __always_unused txq)
+{
+	return 0;
+}
+#endif /* HAVE_TX_MQ */
+#define netif_set_real_num_tx_queues(dev, txq)                                 \
+	_kc_netif_set_real_num_tx_queues(dev, txq)
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
+#ifndef ETH_FLAG_RXHASH
+#define ETH_FLAG_RXHASH (1 << 28)
+#endif /* ETH_FLAG_RXHASH */
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 0))
+#define HAVE_IRQ_AFFINITY_HINT
+#endif
+struct device_node;
+#else /* < 2.6.35 */
+#define HAVE_STRUCT_DEVICE_OF_NODE
+#define HAVE_PM_QOS_REQUEST_LIST
+#define HAVE_IRQ_AFFINITY_HINT
+#include 
+#endif /* < 2.6.35 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36))
+int _kc_ethtool_op_set_flags(struct net_device *, u32, u32);
+#define ethtool_op_set_flags _kc_ethtool_op_set_flags
+u32 _kc_ethtool_op_get_flags(struct net_device *);
+#define ethtool_op_get_flags _kc_ethtool_op_get_flags
+
+enum {
+	WQ_UNBOUND = 0,
+	WQ_RESCUER = 0,
+};
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#ifdef NET_IP_ALIGN
+#undef NET_IP_ALIGN
+#endif
+#define NET_IP_ALIGN 0
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+
+#ifdef NET_SKB_PAD
+#undef NET_SKB_PAD
+#endif
+
+#if (L1_CACHE_BYTES > 32)
+#define NET_SKB_PAD L1_CACHE_BYTES
+#else
+#define NET_SKB_PAD 32
+#endif
+
+static inline struct sk_buff *
+_kc_netdev_alloc_skb_ip_align(struct net_device *dev, unsigned int length)
+{
+	struct sk_buff *skb;
+
+	skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC);
+	if (skb) {
+#if (NET_IP_ALIGN + NET_SKB_PAD)
+		skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
+#endif
+		skb->dev = dev;
+	}
+	return skb;
+}
+
+#ifdef netdev_alloc_skb_ip_align
+#undef netdev_alloc_skb_ip_align
+#endif
+#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l)
+
+#undef netif_level
+#define netif_level(level, priv, type, dev, fmt, args...)                      \
+	do {                                                                   \
+		if (netif_msg_##type(priv))                                    \
+			netdev_##level(dev, fmt, ##args);                      \
+	} while (0)
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 3)))
+#undef usleep_range
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+#endif
+
+#define u64_stats_update_begin(a)                                              \
+	do {                                                                   \
+	} while (0)
+#define u64_stats_update_end(a)                                                \
+	do {                                                                   \
+	} while (0)
+#define u64_stats_fetch_begin(a)                                               \
+	do {                                                                   \
+	} while (0)
+#define u64_stats_fetch_retry_bh(a, b) (0)
+#define u64_stats_fetch_begin_bh(a) (0)
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 1))
+#define HAVE_8021P_SUPPORT
+#endif
+
+/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)) &&                     \
+     !(SLE_VERSION_CODE >= SLE_VERSION(11, 2, 0)))
+static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb)
+{
+	return;
+}
+#endif
+
+#else /* < 2.6.36 */
+
+#define msleep(x)                                                              \
+	do {                                                                   \
+		if (x > 20)                                                    \
+			msleep(x);                                             \
+		else                                                           \
+			usleep_range(1000 * x, 2000 * x);                      \
+	} while (0)
+
+#define HAVE_PM_QOS_REQUEST_ACTIVE
+#define HAVE_8021P_SUPPORT
+#define HAVE_NDO_GET_STATS64
+#endif /* < 2.6.36 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37))
+#define HAVE_NON_CONST_PCI_DRIVER_NAME
+#ifndef netif_set_real_num_tx_queues
+static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev,
+						   unsigned int txq)
+{
+	netif_set_real_num_tx_queues(dev, txq);
+	return 0;
+}
+#define netif_set_real_num_tx_queues(dev, txq)                                 \
+	_kc_netif_set_real_num_tx_queues(dev, txq)
+#endif
+#ifndef netif_set_real_num_rx_queues
+static inline int
+__kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev,
+				  unsigned int __always_unused rxq)
+{
+	return 0;
+}
+#define netif_set_real_num_rx_queues(dev, rxq)                                 \
+	__kc_netif_set_real_num_rx_queues((dev), (rxq))
+#endif
+#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR
+#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2)
+#endif
+#ifndef VLAN_N_VID
+#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN
+#endif /* VLAN_N_VID */
+#ifndef ETH_FLAG_TXVLAN
+#define ETH_FLAG_TXVLAN BIT(7)
+#endif /* ETH_FLAG_TXVLAN */
+#ifndef ETH_FLAG_RXVLAN
+#define ETH_FLAG_RXVLAN BIT(8)
+#endif /* ETH_FLAG_RXVLAN */
+
+#define WQ_MEM_RECLAIM WQ_RESCUER
+
+static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb)
+{
+	WARN_ON(skb->ip_summed != CHECKSUM_NONE);
+}
+#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb)
+
+static inline void *_kc_vzalloc_node(unsigned long size, int node)
+{
+	void *addr = vmalloc_node(size, node);
+	if (addr)
+		memset(addr, 0, size);
+	return addr;
+}
+#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node)
+
+static inline void *_kc_vzalloc(unsigned long size)
+{
+	void *addr = vmalloc(size);
+	if (addr)
+		memset(addr, 0, size);
+	return addr;
+}
+#define vzalloc(_size) _kc_vzalloc(_size)
+
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 7)) ||                     \
+     (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6, 0)))
+static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
+{
+	if (vlan_tx_tag_present(skb) ||
+	    skb->protocol != cpu_to_be16(ETH_P_8021Q))
+		return skb->protocol;
+
+	if (skb_headlen(skb) < sizeof(struct vlan_ethhdr))
+		return 0;
+
+	return ((struct vlan_ethhdr *)skb->data)->h_vlan_encapsulated_proto;
+}
+#endif /* !RHEL5.7+ || RHEL6.0 */
+
+#ifdef HAVE_HW_TIME_STAMP
+#define SKBTX_HW_TSTAMP BIT(0)
+#define SKBTX_IN_PROGRESS BIT(2)
+#define SKB_SHARED_TX_IS_UNION
+#endif
+
+#ifndef device_wakeup_enable
+#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true)
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 18))
+#ifndef HAVE_VLAN_RX_REGISTER
+#define HAVE_VLAN_RX_REGISTER
+#endif
+#endif /* > 2.4.18 */
+#endif /* < 2.6.37 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22))
+#define skb_checksum_start_offset(skb) skb_transport_offset(skb)
+#else /* 2.6.22 -> 2.6.37 */
+static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb)
+{
+	return skb->csum_start - skb_headroom(skb);
+}
+#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb)
+#endif /* 2.6.22 -> 2.6.37 */
+#if IS_ENABLED(CONFIG_DCB)
+#ifndef IEEE_8021QAZ_MAX_TCS
+#define IEEE_8021QAZ_MAX_TCS 8
+#endif
+#ifndef DCB_CAP_DCBX_HOST
+#define DCB_CAP_DCBX_HOST 0x01
+#endif
+#ifndef DCB_CAP_DCBX_LLD_MANAGED
+#define DCB_CAP_DCBX_LLD_MANAGED 0x02
+#endif
+#ifndef DCB_CAP_DCBX_VER_CEE
+#define DCB_CAP_DCBX_VER_CEE 0x04
+#endif
+#ifndef DCB_CAP_DCBX_VER_IEEE
+#define DCB_CAP_DCBX_VER_IEEE 0x08
+#endif
+#ifndef DCB_CAP_DCBX_STATIC
+#define DCB_CAP_DCBX_STATIC 0x10
+#endif
+#endif /* CONFIG_DCB */
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 2))
+#define CONFIG_XPS
+#endif /* RHEL_RELEASE_VERSION(6,2) */
+#endif /* < 2.6.38 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39))
+#ifndef TC_BITMASK
+#define TC_BITMASK 15
+#endif
+#ifndef NETIF_F_RXCSUM
+#define NETIF_F_RXCSUM BIT(29)
+#endif
+#ifndef skb_queue_reverse_walk_safe
+#define skb_queue_reverse_walk_safe(queue, skb, tmp)                           \
+	for (skb = (queue)->prev, tmp = skb->prev;                             \
+	     skb != (struct sk_buff *)(queue); skb = tmp, tmp = skb->prev)
+#endif
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef FCOE_MTU
+#define FCOE_MTU 2158
+#endif
+#endif
+#if IS_ENABLED(CONFIG_DCB)
+#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE
+#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1
+#endif
+#endif
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)))
+#define kstrtoul(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0)
+#define kstrtouint(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0)
+#define kstrtou32(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0)
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 0)))
+u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16);
+#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q))
+u8 _kc_netdev_get_num_tc(struct net_device *dev);
+#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev)
+int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc);
+#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc))
+#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0)
+#define netdev_set_tc_queue(dev, tc, cnt, off)                                 \
+	do {                                                                   \
+	} while (0)
+u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up);
+#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up)
+#define netdev_set_prio_tc_map(dev, up, tc)                                    \
+	do {                                                                   \
+	} while (0)
+#else /* RHEL6.1 or greater */
+#ifndef HAVE_MQPRIO
+#define HAVE_MQPRIO
+#endif /* HAVE_MQPRIO */
+#if IS_ENABLED(CONFIG_DCB)
+#ifndef HAVE_DCBNL_IEEE
+#define HAVE_DCBNL_IEEE
+#ifndef IEEE_8021QAZ_TSA_STRICT
+#define IEEE_8021QAZ_TSA_STRICT 0
+#endif
+#ifndef IEEE_8021QAZ_TSA_ETS
+#define IEEE_8021QAZ_TSA_ETS 2
+#endif
+#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE
+#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1
+#endif
+#endif
+#endif /* CONFIG_DCB */
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
+
+#ifndef udp_csum
+#define udp_csum __kc_udp_csum
+static inline __wsum __kc_udp_csum(struct sk_buff *skb)
+{
+	__wsum csum = csum_partial(skb_transport_header(skb),
+				   sizeof(struct udphdr), skb->csum);
+
+	for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
+		csum = csum_add(csum, skb->csum);
+	}
+	return csum;
+}
+#endif /* udp_csum */
+#else /* < 2.6.39 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
+#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#ifndef HAVE_MQPRIO
+#define HAVE_MQPRIO
+#endif
+#ifndef HAVE_SETUP_TC
+#define HAVE_SETUP_TC
+#endif
+#ifdef CONFIG_DCB
+#ifndef HAVE_DCBNL_IEEE
+#define HAVE_DCBNL_IEEE
+#endif
+#endif /* CONFIG_DCB */
+#ifndef HAVE_NDO_SET_FEATURES
+#define HAVE_NDO_SET_FEATURES
+#endif
+#define HAVE_IRQ_AFFINITY_NOTIFY
+#endif /* < 2.6.39 */
+
+/*****************************************************************************/
+/* use < 2.6.40 because of a Fedora 15 kernel update where they
+ * updated the kernel version to 2.6.40.x and they back-ported 3.0 features
+ * like set_phys_id for ethtool.
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 40))
+#ifdef ETHTOOL_GRXRINGS
+#ifndef FLOW_EXT
+#define FLOW_EXT 0x80000000
+union _kc_ethtool_flow_union {
+	struct ethtool_tcpip4_spec tcp_ip4_spec;
+	struct ethtool_usrip4_spec usr_ip4_spec;
+	__u8 hdata[60];
+};
+struct _kc_ethtool_flow_ext {
+	__be16 vlan_etype;
+	__be16 vlan_tci;
+	__be32 data[2];
+};
+struct _kc_ethtool_rx_flow_spec {
+	__u32 flow_type;
+	union _kc_ethtool_flow_union h_u;
+	struct _kc_ethtool_flow_ext h_ext;
+	union _kc_ethtool_flow_union m_u;
+	struct _kc_ethtool_flow_ext m_ext;
+	__u64 ring_cookie;
+	__u32 location;
+};
+#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec
+#endif /* FLOW_EXT */
+#endif
+
+#define pci_disable_link_state_locked pci_disable_link_state
+
+#ifndef PCI_LTR_VALUE_MASK
+#define PCI_LTR_VALUE_MASK 0x000003ff
+#endif
+#ifndef PCI_LTR_SCALE_MASK
+#define PCI_LTR_SCALE_MASK 0x00001c00
+#endif
+#ifndef PCI_LTR_SCALE_SHIFT
+#define PCI_LTR_SCALE_SHIFT 10
+#endif
+
+#else /* < 2.6.40 */
+#define HAVE_ETHTOOL_SET_PHYS_ID
+#endif /* < 2.6.40 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
+#define USE_LEGACY_PM_SUPPORT
+#ifndef kfree_rcu
+#define kfree_rcu(_ptr, _rcu_head) kfree(_ptr)
+#endif /* kfree_rcu */
+#ifndef kstrtol_from_user
+#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r)
+static inline int _kc_kstrtol_from_user(const char __user *s, size_t count,
+					unsigned int base, long *res)
+{
+	/* sign, base 2 representation, newline, terminator */
+	char buf[1 + sizeof(long) * 8 + 1 + 1];
+
+	count = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, s, count))
+		return -EFAULT;
+	buf[count] = '\0';
+	return strict_strtol(buf, base, res);
+}
+#endif
+
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 0) ||   \
+			   RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5, 7)))
+/* 20000base_blah_full Supported and Advertised Registers */
+#define SUPPORTED_20000baseMLD2_Full BIT(21)
+#define SUPPORTED_20000baseKR2_Full BIT(22)
+#define ADVERTISED_20000baseMLD2_Full BIT(21)
+#define ADVERTISED_20000baseKR2_Full BIT(22)
+#endif /* RHEL_RELEASE_CODE */
+#endif /* < 3.0.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0))
+#ifndef __netdev_alloc_skb_ip_align
+#define __netdev_alloc_skb_ip_align(d, l, _g) netdev_alloc_skb_ip_align(d, l)
+#endif /* __netdev_alloc_skb_ip_align */
+#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app)
+#define dcb_ieee_delapp(dev, app) 0
+#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority)
+
+/* 1000BASE-T Control register */
+#define CTL1000_AS_MASTER 0x0800
+#define CTL1000_ENABLE_MASTER 0x1000
+
+/* kernels less than 3.0.0 don't have this */
+#ifndef ETH_P_8021AD
+#define ETH_P_8021AD 0x88A8
+#endif
+
+/* Stub definition for !CONFIG_OF is introduced later */
+#ifdef CONFIG_OF
+static inline struct device_node *
+pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev)
+{
+#ifdef HAVE_STRUCT_DEVICE_OF_NODE
+	return pdev ? pdev->dev.of_node : NULL;
+#else
+	return NULL;
+#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */
+}
+#endif /* CONFIG_OF */
+#else /* < 3.1.0 */
+#ifndef HAVE_DCBNL_IEEE_DELAPP
+#define HAVE_DCBNL_IEEE_DELAPP
+#endif
+#endif /* < 3.1.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
+#ifndef dma_zalloc_coherent
+#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f)
+static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size,
+					    dma_addr_t *dma_handle, gfp_t flag)
+{
+	void *ret = dma_alloc_coherent(dev, size, dma_handle, flag);
+	if (ret)
+		memset(ret, 0, size);
+	return ret;
+}
+#endif
+#ifdef ETHTOOL_GRXRINGS
+#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
+#endif /* ETHTOOL_GRXRINGS */
+
+#ifndef skb_frag_size
+#define skb_frag_size(frag) _kc_skb_frag_size(frag)
+static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag)
+{
+	return frag->size;
+}
+#endif /* skb_frag_size */
+
+#ifndef skb_frag_size_sub
+#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta)
+static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta)
+{
+	frag->size -= delta;
+}
+#endif /* skb_frag_size_sub */
+
+#ifndef skb_frag_page
+#define skb_frag_page(frag) _kc_skb_frag_page(frag)
+static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag)
+{
+	return frag->page;
+}
+#endif /* skb_frag_page */
+
+#ifndef skb_frag_address
+#define skb_frag_address(frag) _kc_skb_frag_address(frag)
+static inline void *_kc_skb_frag_address(const skb_frag_t *frag)
+{
+	return page_address(skb_frag_page(frag)) + frag->page_offset;
+}
+#endif /* skb_frag_address */
+
+#ifndef skb_frag_dma_map
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#include 
+#endif
+#define skb_frag_dma_map(dev, frag, offset, size, dir)                         \
+	_kc_skb_frag_dma_map(dev, frag, offset, size, dir)
+static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev,
+					      const skb_frag_t *frag,
+					      size_t offset, size_t size,
+					      enum dma_data_direction dir)
+{
+	return dma_map_page(dev, skb_frag_page(frag),
+			    frag->page_offset + offset, size, dir);
+}
+#endif /* skb_frag_dma_map */
+
+#ifndef __skb_frag_unref
+#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag)
+static inline void __kc_skb_frag_unref(skb_frag_t *frag)
+{
+	put_page(skb_frag_page(frag));
+}
+#endif /* __skb_frag_unref */
+
+#ifndef SPEED_UNKNOWN
+#define SPEED_UNKNOWN -1
+#endif
+#ifndef DUPLEX_UNKNOWN
+#define DUPLEX_UNKNOWN 0xff
+#endif
+#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 3)) ||                      \
+     (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0)))
+#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_PCI_DEV_FLAGS_ASSIGNED
+#endif
+#endif
+#else /* < 3.2.0 */
+#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_VF_SPOOFCHK_CONFIGURE
+#endif
+#ifndef HAVE_SKB_L4_RXHASH
+#define HAVE_SKB_L4_RXHASH
+#endif
+#define HAVE_IOMMU_PRESENT
+#define HAVE_PM_QOS_REQUEST_LIST_NEW
+#endif /* < 3.2.0 */
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6, 2))
+#undef rnp_get_netdev_tc_txq
+#define rnp_get_netdev_tc_txq(dev, tc)                                         \
+	(&netdev_extended(dev)->qos_data.tc_to_txq[tc])
+#endif
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0))
+/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than
+ * alloc_workqueue() to avoid compiler warning from -Wvarargs
+ */
+static inline struct workqueue_struct *__attribute__((format(printf, 3, 4)))
+_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active,
+		    const char *fmt, ...)
+{
+	struct workqueue_struct *wq;
+	va_list args, temp;
+	unsigned int len;
+	char *p;
+
+	va_start(args, fmt);
+	va_copy(temp, args);
+	len = vsnprintf(NULL, 0, fmt, temp);
+	va_end(temp);
+
+	p = kmalloc(len + 1, GFP_KERNEL);
+	if (!p) {
+		va_end(args);
+		return NULL;
+	}
+
+	vsnprintf(p, len + 1, fmt, args);
+	va_end(args);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36))
+	wq = create_workqueue(p);
+#else
+	wq = alloc_workqueue(p, flags, max_active);
+#endif
+	kfree(p);
+
+	return wq;
+}
+#ifdef alloc_workqueue
+#undef alloc_workqueue
+#endif
+#define alloc_workqueue(fmt, flags, max_active, args...)                       \
+	_kc_alloc_workqueue(flags, max_active, fmt, ##args)
+
+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 5))
+typedef u32 netdev_features_t;
+#endif
+#undef PCI_EXP_TYPE_RC_EC
+#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
+#ifndef CONFIG_BQL
+#define netdev_tx_completed_queue(_q, _p, _b)                                  \
+	do {                                                                   \
+	} while (0)
+#define netdev_completed_queue(_n, _p, _b)                                     \
+	do {                                                                   \
+	} while (0)
+#define netdev_tx_sent_queue(_q, _b)                                           \
+	do {                                                                   \
+	} while (0)
+#define netdev_sent_queue(_n, _b)                                              \
+	do {                                                                   \
+	} while (0)
+#define netdev_tx_reset_queue(_q)                                              \
+	do {                                                                   \
+	} while (0)
+#define netdev_reset_queue(_n)                                                 \
+	do {                                                                   \
+	} while (0)
+#endif
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0))
+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE
+#endif /* SLE_VERSION(11,3,0) */
+#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q)
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 4, 0))
+static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start,
+					u8 *nexthdrp,
+					__be16 __always_unused *frag_offp)
+{
+	return ipv6_skip_exthdr(skb, start, nexthdrp);
+}
+#undef ipv6_skip_exthdr
+#define ipv6_skip_exthdr(a, b, c, d) __kc_ipv6_skip_exthdr((a), (b), (c), (d))
+#endif /* !SLES11sp4 or greater */
+
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)) &&                     \
+     !(SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0)))
+static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
+{
+	return index % n_rx_rings;
+}
+#endif
+
+#else /* ! < 3.3.0 */
+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE
+#define HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef ETHTOOL_SRXNTUPLE
+#undef ETHTOOL_SRXNTUPLE
+#endif
+#endif /* < 3.3.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+#ifndef NETIF_F_RXFCS
+#define NETIF_F_RXFCS 0
+#endif /* NETIF_F_RXFCS */
+#ifndef NETIF_F_RXALL
+#define NETIF_F_RXALL 0
+#endif /* NETIF_F_RXALL */
+
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0))
+#define NUMTCS_RETURNS_U8
+
+int _kc_simple_open(struct inode *inode, struct file *file);
+#define simple_open _kc_simple_open
+#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */
+
+#ifndef skb_add_rx_frag
+#define skb_add_rx_frag _kc_skb_add_rx_frag
+void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
+			 int size, unsigned int truesize);
+#endif
+#ifdef NET_ADDR_RANDOM
+#define eth_hw_addr_random(N)                                                  \
+	do {                                                                   \
+		eth_random_addr(N->dev_addr);                                  \
+		N->addr_assign_type |= NET_ADDR_RANDOM;                        \
+	} while (0)
+#else /* NET_ADDR_RANDOM */
+#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr)
+#endif /* NET_ADDR_RANDOM */
+
+#ifndef for_each_set_bit_from
+#define for_each_set_bit_from(bit, addr, size)                                 \
+	for ((bit) = find_next_bit((addr), (size), (bit)); (bit) < (size);     \
+	     (bit) = find_next_bit((addr), (size), (bit) + 1))
+#endif /* for_each_set_bit_from */
+
+#else /* < 3.4.0 */
+#include 
+#endif /* >= 3.4.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) ||                         \
+	(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4))
+#ifndef NO_PTP_SUPPORT
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+#define HAVE_PTP_1588_CLOCK
+#endif /* CONFIG_PTP_1588_CLOCK */
+#endif /* !NO_PTP_SUPPORT */
+#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0))
+
+#ifndef SIZE_MAX
+#define SIZE_MAX (~(size_t)0)
+#endif
+
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG 64
+#endif
+
+#ifndef ether_addr_equal
+static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2)
+{
+	return !compare_ether_addr(addr1, addr2);
+}
+#define ether_addr_equal(_addr1, _addr2)                                       \
+	__kc_ether_addr_equal((_addr1), (_addr2))
+#endif
+
+/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */
+#ifdef CONFIG_OF_NET
+static inline int of_get_phy_mode(struct device_node __always_unused *np)
+{
+	return -ENODEV;
+}
+
+static inline const void *
+of_get_mac_address(struct device_node __always_unused *np)
+{
+	return NULL;
+}
+#endif
+#else
+#include 
+#define HAVE_FDB_OPS
+#define HAVE_ETHTOOL_GET_TS_INFO
+#endif /* < 3.5.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */
+
+#ifndef MDIO_EEE_100TX
+#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */
+#endif
+#ifndef MDIO_EEE_1000T
+#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */
+#endif
+#ifndef MDIO_EEE_10GT
+#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */
+#endif
+#ifndef MDIO_EEE_1000KX
+#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */
+#endif
+#ifndef MDIO_EEE_10GKX4
+#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */
+#endif
+#ifndef MDIO_EEE_10GKR
+#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */
+#endif
+
+#ifndef __GFP_MEMALLOC
+#define __GFP_MEMALLOC 0
+#endif
+
+#ifndef eth_broadcast_addr
+#define eth_broadcast_addr _kc_eth_broadcast_addr
+static inline void _kc_eth_broadcast_addr(u8 *addr)
+{
+	memset(addr, 0xff, ETH_ALEN);
+}
+#endif
+
+#ifndef eth_random_addr
+#define eth_random_addr _kc_eth_random_addr
+static inline void _kc_eth_random_addr(u8 *addr)
+{
+	get_random_bytes(addr, ETH_ALEN);
+	addr[0] &= 0xfe; /* clear multicast */
+	addr[0] |= 0x02; /* set local assignment */
+}
+#endif /* eth_random_addr */
+
+#ifndef DMA_ATTR_SKIP_CPU_SYNC
+#define DMA_ATTR_SKIP_CPU_SYNC 0
+#endif
+#else /* < 3.6.0 */
+#define HAVE_STRUCT_PAGE_PFMEMALLOC
+#endif /* < 3.6.0 */
+
+/******************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+#include 
+#ifndef ADVERTISED_40000baseKR4_Full
+/* these defines were all added in one commit, so should be safe
+ * to trigger activiation on one define
+ */
+#define SUPPORTED_40000baseKR4_Full BIT(23)
+#define SUPPORTED_40000baseCR4_Full BIT(24)
+#define SUPPORTED_40000baseSR4_Full BIT(25)
+#define SUPPORTED_40000baseLR4_Full BIT(26)
+#define ADVERTISED_40000baseKR4_Full BIT(23)
+#define ADVERTISED_40000baseCR4_Full BIT(24)
+#define ADVERTISED_40000baseSR4_Full BIT(25)
+#define ADVERTISED_40000baseLR4_Full BIT(26)
+#endif
+
+#ifndef mmd_eee_cap_to_ethtool_sup_t
+/**
+ * mmd_eee_cap_to_ethtool_sup_t
+ * @eee_cap: value of the MMD EEE Capability register
+ *
+ * A small helper function that translates MMD EEE Capability (3.20) bits
+ * to ethtool supported settings.
+ */
+static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap)
+{
+	u32 supported = 0;
+
+	if (eee_cap & MDIO_EEE_100TX)
+		supported |= SUPPORTED_100baseT_Full;
+	if (eee_cap & MDIO_EEE_1000T)
+		supported |= SUPPORTED_1000baseT_Full;
+	if (eee_cap & MDIO_EEE_10GT)
+		supported |= SUPPORTED_10000baseT_Full;
+	if (eee_cap & MDIO_EEE_1000KX)
+		supported |= SUPPORTED_1000baseKX_Full;
+	if (eee_cap & MDIO_EEE_10GKX4)
+		supported |= SUPPORTED_10000baseKX4_Full;
+	if (eee_cap & MDIO_EEE_10GKR)
+		supported |= SUPPORTED_10000baseKR_Full;
+
+	return supported;
+}
+#define mmd_eee_cap_to_ethtool_sup_t(eee_cap)                                  \
+	__kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap)
+#endif /* mmd_eee_cap_to_ethtool_sup_t */
+
+#ifndef mmd_eee_adv_to_ethtool_adv_t
+/**
+ * mmd_eee_adv_to_ethtool_adv_t
+ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
+ *
+ * A small helper function that translates the MMD EEE Advertisement (7.60)
+ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
+ * settings.
+ */
+static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv)
+{
+	u32 adv = 0;
+
+	if (eee_adv & MDIO_EEE_100TX)
+		adv |= ADVERTISED_100baseT_Full;
+	if (eee_adv & MDIO_EEE_1000T)
+		adv |= ADVERTISED_1000baseT_Full;
+	if (eee_adv & MDIO_EEE_10GT)
+		adv |= ADVERTISED_10000baseT_Full;
+	if (eee_adv & MDIO_EEE_1000KX)
+		adv |= ADVERTISED_1000baseKX_Full;
+	if (eee_adv & MDIO_EEE_10GKX4)
+		adv |= ADVERTISED_10000baseKX4_Full;
+	if (eee_adv & MDIO_EEE_10GKR)
+		adv |= ADVERTISED_10000baseKR_Full;
+
+	return adv;
+}
+
+#define mmd_eee_adv_to_ethtool_adv_t(eee_adv)                                  \
+	__kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv)
+#endif /* mmd_eee_adv_to_ethtool_adv_t */
+
+#ifndef ethtool_adv_to_mmd_eee_adv_t
+/**
+ * ethtool_adv_to_mmd_eee_adv_t
+ * @adv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement settings
+ * to EEE advertisements for the MMD EEE Advertisement (7.60) and
+ * MMD EEE Link Partner Ability (7.61) registers.
+ */
+static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv)
+{
+	u16 reg = 0;
+
+	if (adv & ADVERTISED_100baseT_Full)
+		reg |= MDIO_EEE_100TX;
+	if (adv & ADVERTISED_1000baseT_Full)
+		reg |= MDIO_EEE_1000T;
+	if (adv & ADVERTISED_10000baseT_Full)
+		reg |= MDIO_EEE_10GT;
+	if (adv & ADVERTISED_1000baseKX_Full)
+		reg |= MDIO_EEE_1000KX;
+	if (adv & ADVERTISED_10000baseKX4_Full)
+		reg |= MDIO_EEE_10GKX4;
+	if (adv & ADVERTISED_10000baseKR_Full)
+		reg |= MDIO_EEE_10GKR;
+
+	return reg;
+}
+#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv)
+#endif /* ethtool_adv_to_mmd_eee_adv_t */
+
+#ifndef pci_pcie_type
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+static inline u8 pci_pcie_type(struct pci_dev *pdev)
+{
+	int pos;
+	u16 reg16;
+
+	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	BUG_ON(!pos);
+	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16);
+	return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
+}
+#else /* < 2.6.24 */
+#define pci_pcie_type(x) (x)->pcie_type
+#endif /* < 2.6.24 */
+#endif /* pci_pcie_type */
+
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4))) &&                    \
+	(!(SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0))) &&                      \
+	(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+#define ptp_clock_register(caps, args...) ptp_clock_register(caps)
+#endif
+
+#ifndef pcie_capability_read_word
+int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
+#define pcie_capability_read_word(d, p, v)                                     \
+	__kc_pcie_capability_read_word(d, p, v)
+#endif /* pcie_capability_read_word */
+
+#ifndef pcie_capability_read_dword
+int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
+#define pcie_capability_read_dword(d, p, v)                                    \
+	__kc_pcie_capability_read_dword(d, p, v)
+#endif
+
+#ifndef pcie_capability_write_word
+int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
+#define pcie_capability_write_word(d, p, v)                                    \
+	__kc_pcie_capability_write_word(d, p, v)
+#endif /* pcie_capability_write_word */
+
+#ifndef pcie_capability_clear_and_set_word
+int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+					    u16 clear, u16 set);
+#define pcie_capability_clear_and_set_word(d, p, c, s)                         \
+	__kc_pcie_capability_clear_and_set_word(d, p, c, s)
+#endif /* pcie_capability_clear_and_set_word */
+
+#ifndef pcie_capability_clear_word
+int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, u16 clear);
+#define pcie_capability_clear_word(d, p, c)                                    \
+	__kc_pcie_capability_clear_word(d, p, c)
+#endif /* pcie_capability_clear_word */
+
+#ifndef PCI_EXP_LNKSTA2
+#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
+#endif
+
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0))
+#define USE_CONST_DEV_UC_CHAR
+#define HAVE_NDO_FDB_ADD_NLATTR
+#endif
+
+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 8))
+#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi)
+#endif /* !RHEL6.8+ */
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 6))
+#include 
+#else
+
+#define DEFINE_HASHTABLE(name, bits)                                           \
+	struct hlist_head name[1 << (bits)] = { [0 ...((1 << (bits)) - 1)] =   \
+							HLIST_HEAD_INIT }
+
+#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits)                               \
+	struct hlist_head name[1 << (bits)] __read_mostly = {                  \
+		[0 ...((1 << (bits)) - 1)] = HLIST_HEAD_INIT                   \
+	}
+
+#define DECLARE_HASHTABLE(name, bits) struct hlist_head name[1 << (bits)]
+
+#define HASH_SIZE(name) (ARRAY_SIZE(name))
+#define HASH_BITS(name) ilog2(HASH_SIZE(name))
+
+/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
+#define hash_min(val, bits)                                                    \
+	(sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
+
+static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
+{
+	unsigned int i;
+
+	for (i = 0; i < sz; i++)
+		INIT_HLIST_HEAD(&ht[i]);
+}
+
+#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
+
+#define hash_add(hashtable, node, key)                                         \
+	hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
+
+static inline bool hash_hashed(struct hlist_node *node)
+{
+	return !hlist_unhashed(node);
+}
+
+static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
+{
+	unsigned int i;
+
+	for (i = 0; i < sz; i++)
+		if (!hlist_empty(&ht[i]))
+			return false;
+
+	return true;
+}
+
+#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
+
+static inline void hash_del(struct hlist_node *node)
+{
+	hlist_del_init(node);
+}
+#endif /* RHEL >= 6.6 */
+
+/* We don't have @flags support prior to 3.7, so we'll simply ignore the flags
+ * parameter on these older kernels.
+ */
+#define __setup_timer(_timer, _fn, _data, _flags)                              \
+	setup_timer((_timer), (_fn), (_data))
+
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 7))) &&                    \
+	(!(SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0)))
+
+#ifndef mod_delayed_work
+/**
+ * __mod_delayed_work - modify delay or queue delayed work
+ * @wq: workqueue to use
+ * @dwork: delayed work to queue
+ * @delay: number of jiffies to wait before queueing
+ *
+ * Return: %true if @dwork was pending and was rescheduled;
+ *         %false if it wasn't pending
+ *
+ * Note: the dwork parameter was declared as a void*
+ *       to avoid comptibility problems with early 2.6 kernels
+ *       where struct delayed_work is not declared. Unlike the original
+ *       implementation flags are not preserved and it shouldn't be
+ *       used in the interrupt context.
+ */
+static inline bool __mod_delayed_work(struct workqueue_struct *wq, void *dwork,
+				      unsigned long delay)
+{
+	bool ret = cancel_delayed_work(dwork);
+	queue_delayed_work(wq, dwork, delay);
+	return ret;
+}
+#define mod_delayed_work(wq, dwork, delay) __mod_delayed_work(wq, dwork, delay)
+#endif /* mod_delayed_work */
+
+#endif /* !(RHEL >= 6.7) && !(SLE >= 12.0) */
+#else /* >= 3.7.0 */
+#include 
+#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS
+#define USE_CONST_DEV_UC_CHAR
+#define HAVE_NDO_FDB_ADD_NLATTR
+#endif /* >= 3.7.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+#if (!(RHEL_RELEASE_CODE &&                                                    \
+       RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 5)) &&                     \
+     !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 4, 0)))
+#ifndef pci_sriov_set_totalvfs
+static inline int
+__kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev,
+			    u16 __always_unused numvfs)
+{
+	return 0;
+}
+#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b))
+#endif
+#endif /* !(RHEL_RELEASE_CODE >= 6.5 && SLE_VERSION_CODE >= 11.4) */
+#ifndef PCI_EXP_LNKCTL_ASPM_L0S
+#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */
+#endif
+#ifndef PCI_EXP_LNKCTL_ASPM_L1
+#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */
+#endif
+#define HAVE_CONFIG_HOTPLUG
+/* Reserved Ethernet Addresses per IEEE 802.1Q */
+static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { 0x01, 0x80,
+								  0xc2, 0x00,
+								  0x00, 0x00 };
+
+#ifndef is_link_local_ether_addr
+static inline bool __kc_is_link_local_ether_addr(const u8 *addr)
+{
+	__be16 *a = (__be16 *)addr;
+	static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
+	static const __be16 m = cpu_to_be16(0xfff0);
+
+	return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
+}
+#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr)
+#endif /* is_link_local_ether_addr */
+
+#ifndef FLOW_MAC_EXT
+#define FLOW_MAC_EXT 0x40000000
+#endif /* FLOW_MAC_EXT */
+
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 4, 0))
+#define HAVE_SRIOV_CONFIGURE
+#endif
+
+#ifndef PCI_EXP_LNKCAP_SLS_2_5GB
+#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
+#endif
+
+#ifndef PCI_EXP_LNKCAP_SLS_5_0GB
+#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
+#endif
+
+#undef PCI_EXP_LNKCAP2_SLS_2_5GB
+#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */
+
+#undef PCI_EXP_LNKCAP2_SLS_5_0GB
+#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */
+
+#undef PCI_EXP_LNKCAP2_SLS_8_0GB
+#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */
+
+#else /* >= 3.8.0 */
+#ifndef __devinit
+#define __devinit
+#endif
+
+#ifndef __devinitdata
+#define __devinitdata
+#endif
+
+#ifndef __devinitconst
+#define __devinitconst
+#endif
+
+#ifndef __devexit
+#define __devexit
+#endif
+
+#ifndef __devexit_p
+#define __devexit_p
+#endif
+
+#ifndef HAVE_ENCAP_CSUM_OFFLOAD
+#define HAVE_ENCAP_CSUM_OFFLOAD
+#endif
+
+#ifndef HAVE_GRE_ENCAP_OFFLOAD
+#define HAVE_GRE_ENCAP_OFFLOAD
+#endif
+
+#ifndef HAVE_SRIOV_CONFIGURE
+#define HAVE_SRIOV_CONFIGURE
+#endif
+
+#define HAVE_BRIDGE_ATTRIBS
+#ifndef BRIDGE_MODE_VEB
+#define BRIDGE_MODE_VEB 0 /* Default loopback mode */
+#endif /* BRIDGE_MODE_VEB */
+#ifndef BRIDGE_MODE_VEPA
+#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */
+#endif /* BRIDGE_MODE_VEPA */
+#endif /* >= 3.8.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
+
+#undef BUILD_BUG_ON
+#ifdef __CHECKER__
+#define BUILD_BUG_ON(condition) (0)
+#else /* __CHECKER__ */
+#ifndef __compiletime_warning
+#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400)
+#define __compiletime_warning(message) __attribute__((warning(message)))
+#else /* __GNUC__ */
+#define __compiletime_warning(message)
+#endif /* __GNUC__ */
+#endif /* __compiletime_warning */
+#ifndef __compiletime_error
+#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400)
+#define __compiletime_error(message) __attribute__((error(message)))
+#define __compiletime_error_fallback(condition)                                \
+	do {                                                                   \
+	} while (0)
+#else /* __GNUC__ */
+#define __compiletime_error(message)
+#define __compiletime_error_fallback(condition)                                \
+	do {                                                                   \
+		((void)sizeof(char[1 - 2 * condition]));                       \
+	} while (0)
+#endif /* __GNUC__ */
+#else /* __compiletime_error */
+#define __compiletime_error_fallback(condition)                                \
+	do {                                                                   \
+	} while (0)
+#endif /* __compiletime_error */
+#define __compiletime_assert(condition, msg, prefix, suffix)                   \
+	do {                                                                   \
+		bool __cond = !(condition);                                    \
+		extern void prefix##suffix(void) __compiletime_error(msg);     \
+		if (__cond)                                                    \
+			prefix##suffix();                                      \
+		__compiletime_error_fallback(__cond);                          \
+	} while (0)
+
+#define _compiletime_assert(condition, msg, prefix, suffix)                    \
+	__compiletime_assert(condition, msg, prefix, suffix)
+#define compiletime_assert(condition, msg)                                     \
+	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
+#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg)
+#ifndef __OPTIMIZE__
+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2 * !!(condition)]))
+#else /* __OPTIMIZE__ */
+#define BUILD_BUG_ON(condition)                                                \
+	BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition)
+#endif /* __OPTIMIZE__ */
+#endif /* __CHECKER__ */
+
+#undef hlist_entry
+#define hlist_entry(ptr, type, member) container_of(ptr, type, member)
+
+#undef hlist_entry_safe
+#define hlist_entry_safe(ptr, type, member)                                    \
+	({                                                                     \
+		typeof(ptr) ____ptr = (ptr);                                   \
+		____ptr ? hlist_entry(____ptr, type, member) : NULL;           \
+	})
+
+#undef hlist_for_each_entry
+#define hlist_for_each_entry(pos, head, member)                                \
+	for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);    \
+	     pos; pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)),   \
+					 member))
+
+#undef hlist_for_each_entry_safe
+#define hlist_for_each_entry_safe(pos, n, head, member)                        \
+	for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);      \
+	     pos && ({                                                         \
+		     n = pos->member.next;                                     \
+		     1;                                                        \
+	     });                                                               \
+	     pos = hlist_entry_safe(n, typeof(*pos), member))
+
+#undef hlist_for_each_entry_continue
+#define hlist_for_each_entry_continue(pos, member)                             \
+	for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)),        \
+				    member);                                   \
+	     pos; pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)),   \
+					 member))
+
+#undef hlist_for_each_entry_from
+#define hlist_for_each_entry_from(pos, member)                                 \
+	for (; pos; pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), \
+					   member))
+
+#undef hash_for_each
+#define hash_for_each(name, bkt, obj, member)                                  \
+	for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);    \
+	     (bkt)++)                                                          \
+		hlist_for_each_entry (obj, &name[bkt], member)
+
+#undef hash_for_each_safe
+#define hash_for_each_safe(name, bkt, tmp, obj, member)                        \
+	for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);    \
+	     (bkt)++)                                                          \
+		hlist_for_each_entry_safe (obj, tmp, &name[bkt], member)
+
+#undef hash_for_each_possible
+#define hash_for_each_possible(name, obj, member, key)                         \
+	hlist_for_each_entry (obj, &name[hash_min(key, HASH_BITS(name))],      \
+			      member)
+
+#undef hash_for_each_possible_safe
+#define hash_for_each_possible_safe(name, obj, tmp, member, key)               \
+	hlist_for_each_entry_safe (                                            \
+		obj, tmp, &name[hash_min(key, HASH_BITS(name))], member)
+
+#ifdef CONFIG_XPS
+int __kc_netif_set_xps_queue(struct net_device *, const struct cpumask *, u16);
+#define netif_set_xps_queue(_dev, _mask, _idx)                                 \
+	__kc_netif_set_xps_queue((_dev), (_mask), (_idx))
+#else /* CONFIG_XPS */
+#define netif_set_xps_queue(_dev, _mask, _idx)                                 \
+	do {                                                                   \
+	} while (0)
+#endif /* CONFIG_XPS */
+
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+#define _kc_hashrnd 0xd631614b /* not so random hash salt */
+u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
+#define __netdev_pick_tx __kc_netdev_pick_tx
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+#else
+#define HAVE_BRIDGE_FILTER
+#define HAVE_FDB_DEL_NLATTR
+#endif /* < 3.9.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+#ifndef NAPI_POLL_WEIGHT
+#define NAPI_POLL_WEIGHT 64
+#endif
+#ifdef CONFIG_PCI_IOV
+int __kc_pci_vfs_assigned(struct pci_dev *dev);
+#else
+static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev)
+{
+	return 0;
+}
+#endif
+#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev)
+
+#ifndef list_first_entry_or_null
+#define list_first_entry_or_null(ptr, type, member)                            \
+	(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
+#endif
+
+#ifndef VLAN_TX_COOKIE_MAGIC
+static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb,
+							 u16 vlan_tci)
+{
+#ifdef VLAN_TAG_PRESENT
+	vlan_tci |= VLAN_TAG_PRESENT;
+#endif
+	skb->vlan_tci = vlan_tci;
+	return skb;
+}
+#define __vlan_hwaccel_push_inside(skb) __vlan_put_tag(skb, skb->vlan_tci)
+
+#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci)                      \
+	__kc__vlan_hwaccel_put_tag(skb, vlan_tci)
+#else
+
+#endif
+
+#ifdef HAVE_FDB_OPS
+#if defined(HAVE_NDO_FDB_ADD_NLATTR)
+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+			  struct net_device *dev, const unsigned char *addr,
+			  u16 flags);
+#elif defined(USE_CONST_DEV_UC_CHAR)
+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev,
+			  const unsigned char *addr, u16 flags);
+#else
+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev,
+			  unsigned char *addr, u16 flags);
+#endif /* HAVE_NDO_FDB_ADD_NLATTR */
+#if defined(HAVE_FDB_DEL_NLATTR)
+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+			  struct net_device *dev, const unsigned char *addr);
+#elif defined(USE_CONST_DEV_UC_CHAR)
+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+			  const unsigned char *addr);
+#else
+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+			  unsigned char *addr);
+#endif /* HAVE_FDB_DEL_NLATTR */
+#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add
+#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del
+#endif /* HAVE_FDB_OPS */
+
+#ifndef PCI_DEVID
+#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
+#endif
+
+/* The definitions for these functions when CONFIG_OF_NET is defined are
+ * pulled in from . For kernels older than 3.5 we already have
+ * backports for when CONFIG_OF_NET is true. These are separated and
+ * duplicated in order to cover all cases so that all kernels get either the
+ * real definitions (when CONFIG_OF_NET is defined) or the stub definitions
+ * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real
+ * definitions).
+ */
+#ifndef CONFIG_OF_NET
+static inline int of_get_phy_mode(struct device_node __always_unused *np)
+{
+	return -ENODEV;
+}
+
+static inline const void *
+of_get_mac_address(struct device_node __always_unused *np)
+{
+	return NULL;
+}
+#endif
+
+#else /* >= 3.10.0 */
+#define HAVE_ENCAP_TSO_OFFLOAD
+#define USE_DEFAULT_FDB_DEL_DUMP
+#define HAVE_SKB_INNER_NETWORK_HEADER
+
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 0)))
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 0))
+#define HAVE_RHEL7_PCI_DRIVER_RH
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2))
+#define HAVE_RHEL7_PCI_RESET_NOTIFY
+#endif /* RHEL >= 7.2 */
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3))
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 5))
+#define HAVE_GENEVE_RX_OFFLOAD
+#endif /* RHEL < 7.5 */
+#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC
+/* CentOS-7-aarch64-Everything-1810.iso not define this */
+#ifndef CONFIG_ARM64
+#define HAVE_RHEL7_NET_DEVICE_OPS_EXT
+#endif /* CONFIG_ARM64 */
+#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE)
+#define HAVE_UDP_ENC_TUNNEL
+#endif /* !HAVE_UDP_ENC_TUNNEL && CONFIG_GENEVE */
+#endif /* RHEL >= 7.3 */
+
+/* new hooks added to net_device_ops_extended in RHEL7.4 */
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4))
+/* CentOS-7-aarch64-Everything-1810.iso not define this */
+#ifndef CONFIG_ARM64
+#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN
+#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL
+#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_TX_MAXRATE
+#endif /* CONFIG_ARM64 */
+#define HAVE_UDP_ENC_RX_OFFLOAD
+#endif /* RHEL >= 7.4 */
+#else /* RHEL >= 8.0 */
+#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK
+#define NO_NETDEV_BPF_PROG_ATTACHED
+#define HAVE_NDO_SELECT_QUEUE_SB_DEV
+#endif /* RHEL >= 8.0 */
+#endif /* RHEL >= 7.0 */
+#endif /* >= 3.10.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0))
+#define netdev_notifier_info_to_dev(ptr) ptr
+#ifndef time_in_range64
+#define time_in_range64(a, b, c)                                               \
+	(time_after_eq64(a, b) && time_before_eq64(a, c))
+#endif /* time_in_range64 */
+#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 10)) || \
+     (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 4, 0)))
+#define HAVE_NDO_SET_VF_LINK_STATE
+#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2))
+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+#endif
+#else /* >= 3.11.0 */
+#define HAVE_NDO_SET_VF_LINK_STATE
+#define HAVE_SKB_INNER_PROTOCOL
+#define HAVE_MPLS_FEATURES
+#endif /* >= 3.11.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
+			       enum pcie_link_width *width);
+#ifndef pcie_get_minimum_link
+#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w)
+#endif
+
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6, 7))
+int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev);
+#define pci_wait_for_pending_transaction _kc_pci_wait_for_pending_transaction
+#endif /* = 3.12.0 */
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0))
+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+#define HAVE_VXLAN_RX_OFFLOAD
+#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN)
+#define HAVE_UDP_ENC_TUNNEL
+#endif
+#endif /* < 4.8.0 */
+#define HAVE_NDO_GET_PHYS_PORT_ID
+#define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK
+#endif /* >= 3.12.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m)
+int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask);
+#ifndef u64_stats_init
+#define u64_stats_init(a)                                                      \
+	do {                                                                   \
+	} while (0)
+#endif
+#undef BIT_ULL
+#define BIT_ULL(n) (1ULL << (n))
+
+#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0)) &&       \
+     !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 0)))
+static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
+{
+	dev = pci_physfn(dev);
+	if (pci_is_root_bus(dev->bus))
+		return NULL;
+
+	return dev->bus->self;
+}
+#endif
+
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 1, 0))
+#undef HAVE_STRUCT_PAGE_PFMEMALLOC
+#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT
+#endif
+#ifndef list_next_entry
+#define list_next_entry(pos, member)                                           \
+	list_entry((pos)->member.next, typeof(*(pos)), member)
+#endif
+#ifndef list_prev_entry
+#define list_prev_entry(pos, member)                                           \
+	list_entry((pos)->member.prev, typeof(*(pos)), member)
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 20))
+#define devm_kcalloc(dev, cnt, size, flags)                                    \
+	devm_kzalloc(dev, (cnt) * (size), flags)
+#endif /* > 2.6.20 */
+
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2)))
+#define list_last_entry(ptr, type, member) list_entry((ptr)->prev, type, member)
+#endif
+
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0))
+bool _kc_pci_device_is_present(struct pci_dev *pdev);
+#define pci_device_is_present _kc_pci_device_is_present
+#endif /* = 3.13.0 */
+#define HAVE_VXLAN_CHECKS
+#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3, 13, 0, 24))
+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+#else
+#define HAVE_NDO_SELECT_QUEUE_ACCEL
+#endif
+#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
+#endif
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+
+#ifndef U16_MAX
+#define U16_MAX ((u16)~0U)
+#endif
+
+#ifndef U32_MAX
+#define U32_MAX ((u32)~0U)
+#endif
+
+#ifndef U64_MAX
+#define U64_MAX ((u64)~0ULL)
+#endif
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2)))
+#define dev_consume_skb_any(x) dev_kfree_skb_any(x)
+#define dev_consume_skb_irq(x) dev_kfree_skb_irq(x)
+#endif
+
+#if (!(RHEL_RELEASE_CODE &&                                                    \
+       RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 0)) &&                     \
+     !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0)))
+
+/* it isn't expected that this would be a #define unless we made it so */
+#ifndef skb_set_hash
+
+#define PKT_HASH_TYPE_NONE 0
+#define PKT_HASH_TYPE_L2 1
+#define PKT_HASH_TYPE_L3 2
+#define PKT_HASH_TYPE_L4 3
+
+enum _kc_pkt_hash_types {
+	_KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE,
+	_KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2,
+	_KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3,
+	_KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4,
+};
+#define pkt_hash_types _kc_pkt_hash_types
+
+#define skb_set_hash __kc_skb_set_hash
+static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb,
+				     u32 __maybe_unused hash,
+				     int __maybe_unused type)
+{
+#ifdef HAVE_SKB_L4_RXHASH
+	skb->l4_rxhash = (type == PKT_HASH_TYPE_L4);
+#endif
+#ifdef NETIF_F_RXHASH
+	skb->rxhash = hash;
+#endif
+}
+#endif /* !skb_set_hash */
+
+#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */
+
+#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7, 0)) || \
+     (SLE_VERSION_CODE && SLE_VERSION_CODE <= SLE_VERSION(12, 1, 0)))
+/* GPLv2 code taken from 5.10-rc2 kernel source include/linux/pci.h, Copyright
+ * original authors.
+ */
+//static inline int pci_enable_msix_exact(struct pci_dev *dev,
+//					struct msix_entry *entries, int nvec)
+//{
+//	int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
+//	if (rc < 0)
+//		return rc;
+//	return 0;
+//}
+#endif /* <=EL7.0 || <=SLES 12.1 */
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5)))
+#ifndef HAVE_VXLAN_RX_OFFLOAD
+#define HAVE_VXLAN_RX_OFFLOAD
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+#endif
+
+#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN)
+#define HAVE_UDP_ENC_TUNNEL
+#endif
+
+#ifndef HAVE_VXLAN_CHECKS
+#define HAVE_VXLAN_CHECKS
+#endif /* HAVE_VXLAN_CHECKS */
+#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */
+
+#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)) || \
+     (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 0, 0)))
+#define HAVE_NDO_DFWD_OPS
+#endif
+
+#ifndef pci_enable_msix_range
+int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
+			       int minvec, int maxvec);
+#define pci_enable_msix_range __kc_pci_enable_msix_range
+#endif
+
+#ifndef ether_addr_copy
+#define ether_addr_copy __kc_ether_addr_copy
+static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+	*(u32 *)dst = *(const u32 *)src;
+	*(u16 *)(dst + 4) = *(const u16 *)(src + 4);
+#else
+	u16 *a = (u16 *)dst;
+	const u16 *b = (const u16 *)src;
+
+	a[0] = b[0];
+	a[1] = b[1];
+	a[2] = b[2];
+#endif
+}
+#endif /* ether_addr_copy */
+int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
+		       int target, unsigned short *fragoff, int *flags);
+#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e))
+
+#ifndef OPTIMIZE_HIDE_VAR
+#ifdef __GNUC__
+#ifndef OPTIMIZER_HIDE_VAR
+#define OPTIMIZER_HIDE_VAR(var) __asm__("" : "=r"(var) : "0"(var))
+#endif
+#else
+#include 
+#define OPTIMIZE_HIDE_VAR(var) barrier()
+#endif
+#endif
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 0)) && \
+     !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10, 4, 0)))
+static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
+{
+#ifdef NETIF_F_RXHASH
+	return skb->rxhash;
+#else
+	return 0;
+#endif /* NETIF_F_RXHASH */
+}
+#endif /* !RHEL > 5.9 && !SLES >= 10.4 */
+
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 5))
+#define request_firmware_direct request_firmware
+#endif /* !RHEL || RHEL < 7.5 */
+
+#else /* >= 3.14.0 */
+
+/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */
+#ifndef HAVE_NDO_DFWD_OPS
+#define HAVE_NDO_DFWD_OPS
+#endif
+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+#endif /* 3.14.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35))
+#define HAVE_SKBUFF_RXHASH
+#endif /* >= 2.6.35 */
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 1)) && \
+     !(UBUNTU_VERSION_CODE &&                                                  \
+       UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3, 13, 0, 30)))
+#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh
+#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh
+#endif
+
+char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp);
+#define devm_kstrdup(dev, s, gfp) _kc_devm_kstrdup(dev, s, gfp)
+
+#else /* >= 3.15.0 */
+#define HAVE_NET_GET_RANDOM_ONCE
+#define HAVE_PTP_1588_CLOCK_PINS
+#define HAVE_NETDEV_PORT
+#endif /* 3.15.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0))
+#ifndef smp_mb__before_atomic
+#define smp_mb__before_atomic() smp_mb()
+#define smp_mb__after_atomic() smp_mb()
+#endif
+#ifndef __dev_uc_sync
+#ifdef HAVE_SET_RX_MODE
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+int __kc_hw_addr_sync_dev(
+	struct netdev_hw_addr_list *list, struct net_device *dev,
+	int (*sync)(struct net_device *, const unsigned char *),
+	int (*unsync)(struct net_device *, const unsigned char *));
+void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
+			     struct net_device *dev,
+			     int (*unsync)(struct net_device *,
+					   const unsigned char *));
+#endif
+#ifndef NETDEV_HW_ADDR_T_MULTICAST
+int __kc_dev_addr_sync_dev(
+	struct dev_addr_list **list, int *count, struct net_device *dev,
+	int (*sync)(struct net_device *, const unsigned char *),
+	int (*unsync)(struct net_device *, const unsigned char *));
+void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count,
+			      struct net_device *dev,
+			      int (*unsync)(struct net_device *,
+					    const unsigned char *));
+#endif
+#endif /* HAVE_SET_RX_MODE */
+
+static inline int __kc_dev_uc_sync(
+	struct net_device __maybe_unused *dev,
+	int __maybe_unused (*sync)(struct net_device *, const unsigned char *),
+	int __maybe_unused (*unsync)(struct net_device *,
+				     const unsigned char *))
+{
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+	return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
+#elif defined(HAVE_SET_RX_MODE)
+	return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, dev, sync,
+				      unsync);
+#else
+	return 0;
+#endif
+}
+#define __dev_uc_sync __kc_dev_uc_sync
+
+static inline void
+__kc_dev_uc_unsync(struct net_device __maybe_unused *dev,
+		   int __maybe_unused (*unsync)(struct net_device *,
+						const unsigned char *))
+{
+#ifdef HAVE_SET_RX_MODE
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+	__kc_hw_addr_unsync_dev(&dev->uc, dev, unsync);
+#else /* NETDEV_HW_ADDR_T_MULTICAST */
+	__kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync);
+#endif /* NETDEV_HW_ADDR_T_UNICAST */
+#endif /* HAVE_SET_RX_MODE */
+}
+#define __dev_uc_unsync __kc_dev_uc_unsync
+
+static inline int __kc_dev_mc_sync(
+	struct net_device __maybe_unused *dev,
+	int __maybe_unused (*sync)(struct net_device *, const unsigned char *),
+	int __maybe_unused (*unsync)(struct net_device *,
+				     const unsigned char *))
+{
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+	return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
+#elif defined(HAVE_SET_RX_MODE)
+	return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, dev, sync,
+				      unsync);
+#else
+	return 0;
+#endif
+}
+#define __dev_mc_sync __kc_dev_mc_sync
+
+static inline void
+__kc_dev_mc_unsync(struct net_device __maybe_unused *dev,
+		   int __maybe_unused (*unsync)(struct net_device *,
+						const unsigned char *))
+{
+#ifdef HAVE_SET_RX_MODE
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+	__kc_hw_addr_unsync_dev(&dev->mc, dev, unsync);
+#else /* NETDEV_HW_ADDR_T_MULTICAST */
+	__kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync);
+#endif /* NETDEV_HW_ADDR_T_MULTICAST */
+#endif /* HAVE_SET_RX_MODE */
+}
+#define __dev_mc_unsync __kc_dev_mc_unsync
+#endif /* __dev_uc_sync */
+
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 1))
+#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+#endif
+
+#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM
+/* if someone backports this, hopefully they backport as a #define.
+ * declare it as zero on older kernels so that if it get's or'd in
+ * it won't effect anything, therefore preventing core driver changes
+ */
+#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0
+#define SKB_GSO_UDP_TUNNEL_CSUM 0
+#endif
+void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len,
+			gfp_t gfp);
+#define devm_kmemdup __kc_devm_kmemdup
+
+#else
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) &&                        \
+     !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 4, 0))))
+#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY
+#if defined(KYLIN_OS) || defined(CONFIG_KYLINOS_SERVER)
+#undef HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY
+#endif
+#endif /* >= 3.16.0 && < 4.13.0 && !(SLES >= 12sp4) */
+#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+#endif /* 3.16.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+#if (!RHEL_RELEASE_CODE)
+#define __vlan_hwaccel_push_inside(skb)                                        \
+	__vlan_put_tag(skb, skb->vlan_proto, skb->vlan_tci)
+
+#endif
+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 8) &&                       \
+      RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)) &&                       \
+	!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2))
+#ifndef timespec64
+#define timespec64 timespec
+static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
+{
+	return ts;
+}
+static inline struct timespec
+timespec64_to_timespec(const struct timespec64 ts64)
+{
+	return ts64;
+}
+#define timespec64_equal timespec_equal
+#define timespec64_compare timespec_compare
+#define set_normalized_timespec64 set_normalized_timespec
+#define timespec64_add_safe timespec_add_safe
+#define timespec64_add timespec_add
+#define timespec64_sub timespec_sub
+#define timespec64_valid timespec_valid
+#define timespec64_valid_strict timespec_valid_strict
+#define timespec64_to_ns timespec_to_ns
+#define ns_to_timespec64 ns_to_timespec
+#define ktime_to_timespec64 ktime_to_timespec
+#define ktime_get_ts64 ktime_get_ts
+#define ktime_get_real_ts64 ktime_get_real_ts
+#define timespec64_add_ns timespec_add_ns
+#endif /* timespec64 */
+#endif /* !(RHEL6.8= RHEL_RELEASE_VERSION(6, 8) &&                        \
+     RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0))
+static inline void ktime_get_real_ts64(struct timespec64 *ts)
+{
+	*ts = ktime_to_timespec64(ktime_get_real());
+}
+
+static inline void ktime_get_ts64(struct timespec64 *ts)
+{
+	*ts = ktime_to_timespec64(ktime_get());
+}
+#endif
+
+#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4))
+#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a)
+#endif
+
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 5))
+#endif /* RHEL_RELEASE_CODE < RHEL7.5 */
+
+#if RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 3) &&     \
+	RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 3)
+static inline u64 ktime_get_ns(void)
+{
+	return ktime_to_ns(ktime_get());
+}
+
+static inline u64 ktime_get_real_ns(void)
+{
+	return ktime_to_ns(ktime_get_real());
+}
+
+static inline u64 ktime_get_boot_ns(void)
+{
+	return ktime_to_ns(ktime_get_boottime());
+}
+#endif /* RHEL < 7.3 */
+
+#else
+#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT
+#include 
+#define HAVE_RHASHTABLE
+#endif /* 3.17.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
+#ifndef NO_PTP_SUPPORT
+#include 
+struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb);
+void __kc_skb_complete_tx_timestamp(struct sk_buff *skb,
+				    struct skb_shared_hwtstamps *hwtstamps);
+#define skb_clone_sk __kc_skb_clone_sk
+#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp
+#endif
+#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 2))))
+u32 __kc_eth_get_headlen(const struct net_device *dev, unsigned char *data,
+			 unsigned int max_len);
+#else
+unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len);
+#endif /* !RHEL >= 8.2 */
+
+#define eth_get_headlen __kc_eth_get_headlen
+#ifndef ETH_P_XDSA
+#define ETH_P_XDSA 0x00F8
+#endif
+/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 1))
+#define HAVE_SKBUFF_CSUM_LEVEL
+#endif /* >= RH 7.1 */
+
+/* RHEL 7.3 backported xmit_more */
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3))
+#define HAVE_SKB_XMIT_MORE
+#endif /* >= RH 7.3 */
+
+#undef GENMASK
+#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#undef GENMASK_ULL
+#define GENMASK_ULL(h, l)                                                      \
+	(((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#else /*  3.18.0 */
+#define HAVE_SKBUFF_CSUM_LEVEL
+#define HAVE_SKB_XMIT_MORE
+#define HAVE_SKB_INNER_PROTOCOL_TYPE
+#endif /* 3.18.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 4))
+#else
+#define HAVE_NDO_FEATURES_CHECK
+#endif /* 3.18.4 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 5))
+#else
+#define HAVE_PCI_DEV_FLAGS_NO_BUS_RESET
+#endif /* 3.18.5 */
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7, 1))
+#define gettime64 gettime
+#define settime64 settime
+#define COMPAT_PTP_NO_PINS 1
+#define NO_PUSH_INSIDE
+#undef HAVE_NDO_FEATURES_CHECK /* 7.1 */
+#define __vlan_hwaccel_push_inside(skb)                                        \
+	__vlan_put_tag(skb, skb->vlan_proto, skb->vlan_tci)
+#endif
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 13))
+#ifndef WRITE_ONCE
+#define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = (val); })
+#endif
+#endif /* 3.18.13 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
+/* netdev_phys_port_id renamed to netdev_phys_item_id */
+#define netdev_phys_item_id netdev_phys_port_id
+
+static inline void _kc_napi_complete_done(struct napi_struct *napi,
+					  int __always_unused work_done)
+{
+	napi_complete(napi);
+}
+/* don't use our backport if the distro kernels already have it */
+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12, 3, 0))) ||        \
+	(RHEL_RELEASE_CODE &&                                                  \
+	 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 5)))
+#define napi_complete_done _kc_napi_complete_done
+#endif
+
+int _kc_bitmap_print_to_pagebuf(bool list, char *buf,
+				const unsigned long *maskp, int nmaskbits);
+#define bitmap_print_to_pagebuf _kc_bitmap_print_to_pagebuf
+
+#ifndef NETDEV_RSS_KEY_LEN
+#define NETDEV_RSS_KEY_LEN (13 * 4)
+#endif
+#if (!(RHEL_RELEASE_CODE &&                                                    \
+       ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 7) &&                    \
+	 RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)) ||                    \
+	(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2)))))
+#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len)
+#endif /* RHEL_RELEASE_CODE */
+void __kc_netdev_rss_key_fill(void *buffer, size_t len);
+#define SPEED_20000 20000
+#define SPEED_40000 40000
+#ifndef dma_rmb
+#define dma_rmb() rmb()
+#endif
+#ifndef dev_alloc_pages
+#ifndef NUMA_NO_NODE
+#define NUMA_NO_NODE -1
+#endif
+#define dev_alloc_pages(_order)                                                \
+	alloc_pages_node(NUMA_NO_NODE,                                         \
+			 (GFP_ATOMIC | __GFP_COLD | __GFP_COMP |               \
+			  __GFP_MEMALLOC),                                     \
+			 (_order))
+#endif
+#ifndef dev_alloc_page
+#define dev_alloc_page() dev_alloc_pages(0)
+#endif
+#if !defined(eth_skb_pad) && !defined(skb_put_padto)
+/**
+ *     __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size
+ *     @skb: buffer to pad
+ *     @len: minimal length
+ *
+ *     Pads up a buffer to ensure the trailing bytes exist and are
+ *     blanked. If the buffer already contains sufficient data it
+ *     is untouched. Otherwise it is extended. Returns zero on
+ *     success. The skb is freed on error.
+ */
+static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len)
+{
+	unsigned int size = skb->len;
+
+	if (unlikely(size < len)) {
+		len -= size;
+		if (skb_pad(skb, len))
+			return -ENOMEM;
+		__skb_put(skb, len);
+	}
+	return 0;
+}
+#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len)
+
+static inline int __kc_eth_skb_pad(struct sk_buff *skb)
+{
+	return __kc_skb_put_padto(skb, ETH_ZLEN);
+}
+#define eth_skb_pad(skb) __kc_eth_skb_pad(skb)
+#endif /* eth_skb_pad && skb_put_padto */
+
+#ifndef SKB_ALLOC_NAPI
+/* RHEL 7.2 backported napi_alloc_skb and friends */
+static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi,
+						  unsigned int length)
+{
+	return netdev_alloc_skb_ip_align(napi->dev, length);
+}
+#define napi_alloc_skb(napi, len) __kc_napi_alloc_skb(napi, len)
+#define __napi_alloc_skb(napi, len, mask) __kc_napi_alloc_skb(napi, len)
+#endif /* SKB_ALLOC_NAPI */
+#define HAVE_CONFIG_PM_RUNTIME
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 7)) &&  \
+     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)))
+#define HAVE_RXFH_HASHFUNC
+#endif /* 6.7 < RHEL < 7.0 */
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 1))
+#define HAVE_RXFH_HASHFUNC
+#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS
+#endif /* RHEL > 7.1 */
+#ifndef napi_schedule_irqoff
+#define napi_schedule_irqoff napi_schedule
+#endif
+#ifndef READ_ONCE
+#define READ_ONCE(_x) ACCESS_ONCE(_x)
+#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2))
+#define HAVE_NDO_FDB_ADD_VID
+#endif
+#ifndef ETH_MODULE_SFF_8636
+#define ETH_MODULE_SFF_8636 0x3
+#endif
+#ifndef ETH_MODULE_SFF_8636_LEN
+#define ETH_MODULE_SFF_8636_LEN 256
+#endif
+#ifndef ETH_MODULE_SFF_8436
+#define ETH_MODULE_SFF_8436 0x4
+#endif
+#ifndef ETH_MODULE_SFF_8436_LEN
+#define ETH_MODULE_SFF_8436_LEN 256
+#endif
+#ifndef writel_relaxed
+#define writel_relaxed writel
+#endif
+#else /* 3.19.0 */
+#define HAVE_NDO_FDB_ADD_VID
+#define HAVE_RXFH_HASHFUNC
+#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS
+#endif /* 3.19.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 20, 0))
+/* vlan_tx_xx functions got renamed to skb_vlan */
+#ifndef skb_vlan_tag_get
+#define skb_vlan_tag_get vlan_tx_tag_get
+#endif
+#ifndef skb_vlan_tag_present
+#define skb_vlan_tag_present vlan_tx_tag_present
+#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 1))
+#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H
+#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2))
+#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS
+#endif
+#else
+#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H
+#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS
+#endif /* 3.20.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0))
+/* Definition for CONFIG_OF was introduced earlier */
+#if !defined(CONFIG_OF) &&                                                     \
+	!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2))
+static inline struct device_node *
+pci_device_to_OF_node(const struct pci_dev __always_unused *pdev)
+{
+	return NULL;
+}
+#else /* !CONFIG_OF && RHEL < 7.3 */
+#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT
+#endif /* !CONFIG_OF && RHEL < 7.3 */
+#else /* < 4.0 */
+#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT
+#endif /* < 4.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0))
+#ifndef NO_PTP_SUPPORT
+#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H
+#include 
+#else
+#include 
+#endif
+static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta)
+{
+	tc->nsec += delta;
+}
+
+static inline struct net_device *
+of_find_net_device_by_node(struct device_node __always_unused *np)
+{
+	return NULL;
+}
+
+#define timecounter_adjtime __kc_timecounter_adjtime
+#endif
+#if ((RHEL_RELEASE_CODE &&                                                     \
+      (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2))) ||                    \
+     (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 2, 0))))
+#define HAVE_NDO_SET_VF_RSS_QUERY_EN
+#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2))
+#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
+#define HAVE_RHEL7_EXTENDED_NDO_SET_TX_MAXRATE
+#define HAVE_NDO_SET_TX_MAXRATE
+#endif
+#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 8) &&                       \
+       RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)) &&                      \
+      (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) &&                      \
+      (SLE_VERSION_CODE > SLE_VERSION(12, 1, 0)))
+unsigned int _kc_cpumask_local_spread(unsigned int i, int node);
+#define cpumask_local_spread _kc_cpumask_local_spread
+#endif
+#ifdef HAVE_RHASHTABLE
+#define rhashtable_loopup_fast(ht, key, params)                                \
+	do {                                                                   \
+		(void)params;                                                  \
+		rhashtable_lookup((ht), (key));                                \
+	} while (0)
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
+#define rhashtable_insert_fast(ht, obj, params)                                \
+	do {                                                                   \
+		(void)params;                                                  \
+		rhashtable_insert((ht), (obj), GFP_KERNEL);                    \
+	} while (0)
+
+#define rhashtable_remove_fast(ht, obj, params)                                \
+	do {                                                                   \
+		(void)params;                                                  \
+		rhashtable_remove((ht), (obj), GFP_KERNEL);                    \
+	} while (0)
+
+#else /* >= 3,19,0 */
+#define rhashtable_insert_fast(ht, obj, params)                                \
+	do {                                                                   \
+		(void)params;                                                  \
+		rhashtable_insert((ht), (obj));                                \
+	} while (0)
+
+#define rhashtable_remove_fast(ht, obj, params)                                \
+	do {                                                                   \
+		(void)params;                                                  \
+		rhashtable_remove((ht), (obj));                                \
+	} while (0)
+
+#endif /* 3,19,0 */
+#endif /* HAVE_RHASHTABLE */
+#else /* >= 4,1,0 */
+#define HAVE_NDO_GET_PHYS_PORT_NAME
+#define HAVE_PTP_CLOCK_INFO_GETTIME64
+#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
+#define HAVE_PASSTHRU_FEATURES_CHECK
+#define HAVE_NDO_SET_VF_RSS_QUERY_EN
+#define HAVE_NDO_SET_TX_MAXRATE
+#endif /* 4,1,0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 9))
+#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2)) &&                      \
+     !((SLE_VERSION_CODE == SLE_VERSION(11, 3, 0)) &&                          \
+       (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0, 47, 71))) &&              \
+     !((SLE_VERSION_CODE == SLE_VERSION(11, 4, 0)) &&                          \
+       (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65, 0, 0))) &&               \
+     !(SLE_VERSION_CODE >= SLE_VERSION(12, 1, 0)))
+static inline bool page_is_pfmemalloc(struct page __maybe_unused *page)
+{
+#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC
+	return page->pfmemalloc;
+#else
+	return false;
+#endif
+}
+#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */
+#else
+#undef HAVE_STRUCT_PAGE_PFMEMALLOC
+#endif /* 4.1.9 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0))
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2)) &&                     \
+     !(SLE_VERSION_CODE >= SLE_VERSION(12, 1, 0)))
+#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFULL
+#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000ULL
+#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32
+static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
+{
+	return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
+};
+
+static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
+{
+	return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
+	       ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+};
+#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4))
+#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27))
+#if (!((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 8) &&                     \
+	RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)) ||                     \
+       RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2)))
+static inline bool pci_ari_enabled(struct pci_bus *bus)
+{
+	return bus->self && bus->self->ari_enabled;
+}
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2))
+#define HAVE_VF_STATS
+#endif /* (RHEL7.2+) */
+#endif /* !(RHEL6.8+ || RHEL7.2+) */
+#else
+static inline bool pci_ari_enabled(struct pci_bus *bus)
+{
+	return false;
+}
+#endif /* 2.6.27 */
+#else
+#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
+#define HAVE_VF_STATS
+#endif /* 4.2.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0))
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) &&                     \
+     !(SLE_VERSION_CODE >= SLE_VERSION(12, 2, 0)))
+/**
+ * _kc_flow_dissector_key_ipv4_addrs:
+ * @src: source ip address
+ * @dst: destination ip address
+ */
+struct _kc_flow_dissector_key_ipv4_addrs {
+	__be32 src;
+	__be32 dst;
+};
+
+/**
+ * _kc_flow_dissector_key_ipv6_addrs:
+ * @src: source ip address
+ * @dst: destination ip address
+ */
+struct _kc_flow_dissector_key_ipv6_addrs {
+	struct in6_addr src;
+	struct in6_addr dst;
+};
+
+/**
+ * _kc_flow_dissector_key_addrs:
+ * @v4addrs: IPv4 addresses
+ * @v6addrs: IPv6 addresses
+ */
+struct _kc_flow_dissector_key_addrs {
+	union {
+		struct _kc_flow_dissector_key_ipv4_addrs v4addrs;
+		struct _kc_flow_dissector_key_ipv6_addrs v6addrs;
+	};
+};
+
+/**
+ * _kc_flow_dissector_key_tp_ports:
+ *	@ports: port numbers of Transport header
+ *		src: source port number
+ *		dst: destination port number
+ */
+struct _kc_flow_dissector_key_ports {
+	union {
+		__be32 ports;
+		struct {
+			__be16 src;
+			__be16 dst;
+		};
+	};
+};
+
+/**
+ * _kc_flow_dissector_key_basic:
+ * @n_proto: Network header protocol (eg. IPv4/IPv6)
+ * @ip_proto: Transport header protocol (eg. TCP/UDP)
+ * @padding: padding for alignment
+ */
+struct _kc_flow_dissector_key_basic {
+	__be16 n_proto;
+	u8 ip_proto;
+	u8 padding;
+};
+
+struct _kc_flow_keys {
+	struct _kc_flow_dissector_key_basic basic;
+	struct _kc_flow_dissector_key_ports ports;
+	struct _kc_flow_dissector_key_addrs addrs;
+};
+
+/* These are all the include files for kernels inside this #ifdef block that
+ * have any reference to the in kernel definition of struct flow_keys. The
+ * reason for putting them here is to make 100% sure that these files do not get
+ * included after re-defining flow_keys to _kc_flow_keys. This is done to
+ * prevent any possible ABI issues that this structure re-definition could case.
+ */
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0) &&                         \
+      LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)) ||                         \
+     RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 7) ||                        \
+     SLE_VERSION_CODE >= SLE_VERSION(11, 4, 0))
+#include 
+#endif /* (>= 3.3.0 && < 4.2.0) || >= RHEL 6.7  || >= SLE 11.4 */
+#if (LINUX_VERSION_CODE == KERNEL_VERSION(4, 2, 0))
+#include 
+#endif /* 4.2.0 */
+#include 
+#include 
+#include 
+#include 
+
+#define flow_keys _kc_flow_keys
+bool _kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb,
+				    struct flow_keys *flow,
+				    unsigned int __always_unused flags);
+#define skb_flow_dissect_flow_keys _kc_skb_flow_dissect_flow_keys
+#endif /* ! >= RHEL 7.4 && ! >= SLES 12.2 */
+
+#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)) ||                      \
+     (SLE_VERSION_CODE >= SLE_VERSION(12, 2, 0)))
+#include 
+#endif /* >= RHEL7.3 || >= SLE12sp2 */
+#else /* >= 4.3.0 */
+#include 
+#endif /* 4.3.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3))
+#define HAVE_NDO_SET_VF_TRUST
+#endif /* (RHEL_RELEASE >= 7.3) */
+#ifndef CONFIG_64BIT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+#include  /* 32-bit readq/writeq */
+#else /* 3.3.0 => 4.3.x */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26))
+#include 
+#endif /* 2.6.26 => 3.3.0 */
+#ifndef readq
+static inline __u64 readq(const volatile void __iomem *addr)
+{
+	const volatile u32 __iomem *p = addr;
+	u32 low, high;
+
+	low = readl(p);
+	high = readl(p + 1);
+
+	return low + ((u64)high << 32);
+}
+#define readq readq
+#endif
+
+#ifndef writeq
+static inline void writeq(__u64 val, volatile void __iomem *addr)
+{
+	writel(val, addr);
+	writel(val >> 32, (u8 *)addr + 4);
+}
+#define writeq writeq
+#endif
+#endif /* < 3.3.0 */
+#endif /* !CONFIG_64BIT */
+#else /* < 4.4.0 */
+#define HAVE_NDO_SET_VF_TRUST
+
+#ifndef CONFIG_64BIT
+#include  /* 32-bit readq/writeq */
+#endif /* !CONFIG_64BIT */
+#endif /* 4.4.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+/* protect against a likely backport */
+#ifndef NETIF_F_CSUM_MASK
+#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM
+#endif /* NETIF_F_CSUM_MASK */
+#ifndef NETIF_F_SCTP_CRC
+#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM
+#endif /* NETIF_F_SCTP_CRC */
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)))
+#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address
+int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused,
+				     u8 *mac_addr __maybe_unused);
+#endif /* !(RHEL_RELEASE >= 7.3) */
+#else /* 4.5.0 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+#define HAVE_GENEVE_RX_OFFLOAD
+#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE)
+#define HAVE_UDP_ENC_TUNNEL
+#endif
+#endif /* < 4.8.0 */
+#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD
+#define HAVE_NETDEV_UPPER_INFO
+#endif /* 4.5.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 3))
+static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22))
+	return skb->head + skb->csum_start;
+#else /* < 2.6.22 */
+	return skb_transport_header(skb);
+#endif
+}
+#endif
+
+#if !(UBUNTU_VERSION_CODE &&                                                   \
+      UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4, 4, 0, 21)) &&                   \
+	!(RHEL_RELEASE_CODE &&                                                 \
+	  (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2))) &&                 \
+	!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0)))
+#ifndef NONEED_NAPI_CONSUME_SKB
+static inline void napi_consume_skb(struct sk_buff *skb,
+				    int __always_unused budget)
+{
+	dev_consume_skb_any(skb);
+}
+#endif
+#endif /* UBUNTU 4,4,0,21, RHEL 7.2, SLES12 SP3 */
+#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) &&      \
+	!(RHEL_RELEASE_CODE &&                                                 \
+	  RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4))
+#ifndef NONEED_CSUM_REPLACE_BY_DIFF
+static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
+{
+	*sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
+}
+#endif
+#endif
+#if !(RHEL_RELEASE_CODE &&                                                     \
+      (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 2))) &&                     \
+	!(SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12, 3, 0)))
+static inline void page_ref_inc(struct page *page)
+{
+	get_page(page);
+}
+#else
+#define HAVE_PAGE_COUNT_BULK_UPDATE
+#endif
+#ifndef IPV4_USER_FLOW
+#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */
+#endif
+
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4))
+#define HAVE_TC_SETUP_CLSFLOWER
+#define HAVE_TC_FLOWER_ENC
+#endif
+
+#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 7)) ||                      \
+     (SLE_VERSION_CODE >= SLE_VERSION(12, 2, 0)))
+#define HAVE_TC_SETUP_CLSU32
+#endif
+
+#if (SLE_VERSION_CODE >= SLE_VERSION(12, 2, 0))
+#define HAVE_TC_SETUP_CLSFLOWER
+#endif
+
+#ifndef kstrtobool
+#define kstrtobool _kc_kstrtobool
+int _kc_kstrtobool(const char *s, bool *res);
+#endif
+
+#else /* >= 4.6.0 */
+#define HAVE_PAGE_COUNT_BULK_UPDATE
+#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC
+#define HAVE_PTP_CROSSTIMESTAMP
+#define HAVE_TC_SETUP_CLSFLOWER
+#define HAVE_TC_SETUP_CLSU32
+#endif /* 4.6.0 */
+
+#if defined(KYLIN_OS) || defined(CONFIG_KYLINOS_SERVER)
+//#if ( LINUX_VERSION_CODE > KERNEL_VERSION(4,19,0) )
+#if defined(KYLIN_RELEASE_CODE)
+#if (KYLIN_RELEASE_CODE <= KYLIN_RELEASE_VERSION(10, 2)) ||                    \
+	(LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 64))
+#undef HAVE_TC_SETUP_CLSFLOWER
+#endif
+#if (KYLIN_RELEASE_CODE == KYLIN_RELEASE_VERSION(10, 3))
+// close this in sp3
+#undef HAVE_SKB_XMIT_MORE
+#endif
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 90))
+#undef HAVE_TC_SETUP_CLSFLOWER
+#endif
+// try to fix bond in kylin 4
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 4, 131))
+#define KYLIN_V4_ETHTOOL_FIX_BOND
+#endif
+
+#endif
+
+#if defined(KYLIN_RELEASE_CODE)
+#if (KYLIN_RELEASE_CODE == KYLIN_RELEASE_VERSION(10, 4))
+#undef NEED_DEVLINK_REGION_CREATE_OPS
+#undef NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY
+#undef NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY
+#undef NEED_ETH_HW_ADDR_SET
+#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS
+#define HAVE_NDO_BRIDGE_SETLINK_EXTACK
+#endif
+#endif
+
+/* uos os detect here */
+#if defined(UOS_OS) || defined(UTS_RELEASE)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
+#undef HAVE_TC_SETUP_CLSFLOWER
+#define NO_ETH_GET_HEADLEN
+#undef NEED_NETDEV_TX_SENT_QUEUE
+#define NO_NEED_PTP_SYSTEM_TIMESTAMP
+#endif
+#endif
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
+#if ((SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0)) ||                            \
+     (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)))
+#define HAVE_NETIF_TRANS_UPDATE
+#endif /* SLES12sp3+ || RHEL7.4+ */
+#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 3)) ||                      \
+     (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0)))
+#define HAVE_ETHTOOL_25G_BITS
+#define HAVE_ETHTOOL_50G_BITS
+#define HAVE_ETHTOOL_100G_BITS
+#endif /* RHEL7.3+ || SLES12sp3+ */
+#ifdef ETHTOOL_GLINKSETTINGS /* kernel ethtool.h */
+#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7, 3))
+#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE
+#endif
+#endif
+#else /* 4.7.0 */
+#define HAVE_NETIF_TRANS_UPDATE
+#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE
+#define HAVE_ETHTOOL_25G_BITS
+#define HAVE_ETHTOOL_50G_BITS
+#define HAVE_ETHTOOL_100G_BITS
+#define HAVE_TCF_MIRRED_REDIRECT
+#endif /* 4.7.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4))
+enum udp_parsable_tunnel_type {
+	UDP_TUNNEL_TYPE_VXLAN,
+	UDP_TUNNEL_TYPE_GENEVE,
+};
+struct udp_tunnel_info {
+	unsigned short type;
+	sa_family_t sa_family;
+	__be16 port;
+};
+#endif
+
+#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE < UBUNTU_VERSION(4, 8, 0, 0))
+#define tc_no_actions(_exts) true
+#define tc_for_each_action(_a, _exts) while (0)
+#endif
+#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) &&      \
+	!(RHEL_RELEASE_CODE &&                                                 \
+	  RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4))
+#ifndef NONEED_PCI_REQUEST_IO_REGIONS
+static inline int
+#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME
+pci_request_io_regions(struct pci_dev *pdev, char *name)
+#else
+pci_request_io_regions(struct pci_dev *pdev, const char *name)
+#endif
+{
+	return pci_request_selected_regions(
+		pdev, pci_select_bars(pdev, IORESOURCE_IO), name);
+}
+
+static inline void pci_release_io_regions(struct pci_dev *pdev)
+{
+	return pci_release_selected_regions(
+		pdev, pci_select_bars(pdev, IORESOURCE_IO));
+}
+
+static inline int
+#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME
+pci_request_mem_regions(struct pci_dev *pdev, char *name)
+#else
+pci_request_mem_regions(struct pci_dev *pdev, const char *name)
+#endif
+{
+	return pci_request_selected_regions(
+		pdev, pci_select_bars(pdev, IORESOURCE_MEM), name);
+}
+
+static inline void pci_release_mem_regions(struct pci_dev *pdev)
+{
+	return pci_release_selected_regions(
+		pdev, pci_select_bars(pdev, IORESOURCE_MEM));
+}
+#endif
+#endif /* !SLE_VERSION(12,3,0) */
+#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) ||                      \
+     (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0)))
+#define HAVE_ETHTOOL_NEW_50G_BITS
+#endif /* RHEL7.4+ || SLES12sp3+ */
+#else
+#define HAVE_UDP_ENC_RX_OFFLOAD
+#define HAVE_ETHTOOL_NEW_50G_BITS
+#endif /* 4.8.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0))
+#ifdef HAVE_TC_SETUP_CLSFLOWER
+#if (!(RHEL_RELEASE_CODE) && !(SLE_VERSION_CODE) ||                            \
+     (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12, 3, 0))))
+#define HAVE_TC_FLOWER_VLAN_IN_TAGS
+#endif /* !RHEL_RELEASE_CODE && !SLE_VERSION_CODE || = RHEL_RELEASE_VERSION(7, 4))
+#define HAVE_ETHTOOL_NEW_1G_BITS
+#define HAVE_ETHTOOL_NEW_10G_BITS
+#endif /* RHEL7.4+ */
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 4))
+static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
+{
+	dst[0] = mask & ULONG_MAX;
+
+	if (sizeof(mask) > sizeof(unsigned long))
+		dst[1] = mask >> 32;
+}
+#endif /* = RHEL_RELEASE_VERSION(7, 4)) &&                     \
+     !(SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0)) &&                           \
+     !(UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4, 13, 0, 16)))
+#ifndef NONEED_ETH_TYPE_VLAN
+static inline bool eth_type_vlan(__be16 ethertype)
+{
+	switch (ethertype) {
+	case htons(ETH_P_8021Q):
+#ifdef ETH_P_8021AD
+	case htons(ETH_P_8021AD):
+#endif
+		return true;
+	default:
+		return false;
+	}
+}
+#endif
+#endif /* Linux < 4.9 || RHEL < 7.4 || SLES < 12.3 || Ubuntu < 4.3.0-16 */
+#else /* >=4.9 */
+#define HAVE_FLOW_DISSECTOR_KEY_VLAN_PRIO
+#define HAVE_ETHTOOL_NEW_1G_BITS
+#define HAVE_ETHTOOL_NEW_10G_BITS
+#endif /* KERNEL_VERSION(4.9.0) */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+/* SLES 12.3 and RHEL 7.5 backported this interface */
+#if (!SLE_VERSION_CODE && !RHEL_RELEASE_CODE) ||                               \
+	(SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12, 3, 0))) ||    \
+	(RHEL_RELEASE_CODE &&                                                  \
+	 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 5)))
+static inline bool _kc_napi_complete_done2(struct napi_struct *napi,
+					   int __always_unused work_done)
+{
+	/* it was really hard to get napi_complete_done to be safe to call
+	 * recursively without running into our own kcompat, so just use
+	 * napi_complete
+	 */
+	napi_complete(napi);
+
+	/* true means that the stack is telling the driver to go-ahead and
+	 * re-enable interrupts
+	 */
+	return true;
+}
+
+#ifdef napi_complete_done
+#undef napi_complete_done
+#endif
+#define napi_complete_done _kc_napi_complete_done2
+#endif /* sles and rhel exclusion for < 4.10 */
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4))
+#define HAVE_DEV_WALK_API
+#define HAVE_ETHTOOL_NEW_2500MB_BITS
+#define HAVE_ETHTOOL_5G_BITS
+#endif /* RHEL7.4+ */
+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE == SLE_VERSION(12, 3, 0)))
+#define HAVE_STRUCT_DMA_ATTRS
+#endif /* (SLES == 12.3.0) */
+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0)))
+#define HAVE_NETDEVICE_MIN_MAX_MTU
+#endif /* (SLES >= 12.3.0) */
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5)))
+#define HAVE_STRUCT_DMA_ATTRS
+#define HAVE_RHEL7_EXTENDED_MIN_MAX_MTU
+#define HAVE_NETDEVICE_MIN_MAX_MTU
+#endif
+#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) &&     \
+     !(RHEL_RELEASE_CODE &&                                                    \
+       (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5))))
+#ifndef dma_map_page_attrs
+#define dma_map_page_attrs __kc_dma_map_page_attrs
+static inline dma_addr_t
+__kc_dma_map_page_attrs(struct device *dev, struct page *page, size_t offset,
+			size_t size, enum dma_data_direction dir,
+			unsigned long __always_unused attrs)
+{
+	return dma_map_page(dev, page, offset, size, dir);
+}
+#endif
+
+#ifndef dma_unmap_page_attrs
+#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs
+static inline void
+__kc_dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
+			  enum dma_data_direction dir,
+			  unsigned long __always_unused attrs)
+{
+	dma_unmap_page(dev, addr, size, dir);
+}
+#endif
+
+static inline void __page_frag_cache_drain(struct page *page,
+					   unsigned int count)
+{
+#ifdef HAVE_PAGE_COUNT_BULK_UPDATE
+	if (!page_ref_sub_and_test(page, count))
+		return;
+
+	init_page_count(page);
+#else
+	BUG_ON(count > 1);
+	if (!count)
+		return;
+#endif
+	__free_pages(page, compound_order(page));
+}
+#endif /* !SLE_VERSION(12,3,0) && !RHEL_VERSION(7,5) */
+#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12, 3, 0))) ||       \
+     (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5)))
+#define HAVE_SWIOTLB_SKIP_CPU_SYNC
+#endif
+
+#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(15, 0, 0))) ||       \
+     (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7, 4))))
+#define page_frag_free __free_page_frag
+#endif
+#ifndef ETH_MIN_MTU
+#define ETH_MIN_MTU 68
+#endif /* ETH_MIN_MTU */
+
+/* If kernel is older than 4.10 but distro is RHEL >= 7.5 || SLES > 12SP4,
+ * it does have support for NAPI_STATE
+ */
+#if ((RHEL_RELEASE_CODE &&                                                     \
+      (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5))) ||                    \
+     (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 4, 0))))
+#define HAVE_NAPI_STATE_IN_BUSY_POLL
+#endif /* RHEL >= 7.5 || SLES >=12.4 */
+#else /* >= 4.10 */
+#define HAVE_TC_FLOWER_ENC
+#define HAVE_NETDEVICE_MIN_MAX_MTU
+#define HAVE_SWIOTLB_SKIP_CPU_SYNC
+#define HAVE_NETDEV_TC_RESETS_XPS
+#define HAVE_XPS_QOS_SUPPORT
+#define HAVE_DEV_WALK_API
+#define HAVE_ETHTOOL_NEW_2500MB_BITS
+#define HAVE_ETHTOOL_5G_BITS
+/* kernel 4.10 onwards, as part of busy_poll rewrite, new state were added
+ * which is part of NAPI:state. If NAPI:state=NAPI_STATE_IN_BUSY_POLL,
+ * it means napi_poll is invoked in busy_poll context
+ */
+#define HAVE_NAPI_STATE_IN_BUSY_POLL
+#define HAVE_TCF_MIRRED_EGRESS_REDIRECT
+#define HAVE_PTP_CLOCK_INFO_ADJFINE
+#endif /* 4.10.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#define HAVE_NDO_BUSY_POLL
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0))) ||      \
+     (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5))))
+#define HAVE_VOID_NDO_GET_STATS64
+#endif /* (SLES >= 12.3.0) && (RHEL >= 7.5) */
+
+static inline void _kc_dev_kfree_skb_irq(struct sk_buff *skb)
+{
+	if (!skb)
+		return;
+	dev_kfree_skb_irq(skb);
+}
+
+#undef dev_kfree_skb_irq
+#define dev_kfree_skb_irq _kc_dev_kfree_skb_irq
+
+static inline void _kc_dev_consume_skb_irq(struct sk_buff *skb)
+{
+	if (!skb)
+		return;
+	dev_consume_skb_irq(skb);
+}
+
+#undef dev_consume_skb_irq
+#define dev_consume_skb_irq _kc_dev_consume_skb_irq
+
+static inline void _kc_dev_kfree_skb_any(struct sk_buff *skb)
+{
+	if (!skb)
+		return;
+	dev_kfree_skb_any(skb);
+}
+
+#undef dev_kfree_skb_any
+#define dev_kfree_skb_any _kc_dev_kfree_skb_any
+
+static inline void _kc_dev_consume_skb_any(struct sk_buff *skb)
+{
+	if (!skb)
+		return;
+	dev_consume_skb_any(skb);
+}
+
+#undef dev_consume_skb_any
+#define dev_consume_skb_any _kc_dev_consume_skb_any
+
+#else /* > 4.11 */
+#define HAVE_VOID_NDO_GET_STATS64
+#define HAVE_VM_OPS_FAULT_NO_VMA
+#endif /* 4.11.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 7) &&                        \
+     RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 0))
+/* The RHEL 7.7+ NL_SET_ERR_MSG_MOD triggers unused parameter warnings */
+#undef NL_SET_ERR_MSG_MOD
+#endif
+/* If kernel is older than 4.12 but distro is RHEL >= 7.5 || SLES > 12SP4,
+ * it does have support for MIN_NAPI_ID
+ */
+#if ((RHEL_RELEASE_CODE &&                                                     \
+      (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5))) ||                    \
+     (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 4, 0))))
+#define HAVE_MIN_NAPI_ID
+#endif /* RHEL >= 7.5 || SLES >= 12.4 */
+#ifndef NL_SET_ERR_MSG_MOD
+#define NL_SET_ERR_MSG_MOD(extack, msg)                                        \
+	do {                                                                   \
+		uninitialized_var(extack);                                     \
+		pr_err(KBUILD_MODNAME ": " msg);                               \
+	} while (0)
+#endif /* !NL_SET_ERR_MSG_MOD */
+#else /* >= 4.12 */
+#define HAVE_MIN_NAPI_ID
+#endif /* 4.12 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
+#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12, 3, 0))) ||       \
+     (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5)))
+#define HAVE_TCF_EXTS_HAS_ACTION
+#endif
+#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 4, 0)))
+#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE
+#endif /* SLES >= 12sp4 */
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5)) &&                     \
+     !(SLE_VERSION_CODE >= SLE_VERSION(12, 4, 0)))
+#ifndef NONEED_UUID_SIZE
+#define UUID_SIZE 16
+typedef struct {
+	__u8 b[UUID_SIZE];
+} uuid_t;
+#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)                     \
+	((uuid_t){ { ((a) >> 24) & 0xff, ((a) >> 16) & 0xff,                   \
+		     ((a) >> 8) & 0xff, (a) & 0xff, ((b) >> 8) & 0xff,         \
+		     (b) & 0xff, ((c) >> 8) & 0xff, (c) & 0xff, (d0), (d1),    \
+		     (d2), (d3), (d4), (d5), (d6), (d7) } })
+
+static inline bool uuid_equal(const uuid_t *u1, const uuid_t *u2)
+{
+	return memcmp(u1, u2, sizeof(uuid_t)) == 0;
+}
+#endif
+#else
+#define HAVE_METADATA_PORT_INFO
+#endif /* !(RHEL >= 7.5) && !(SLES >= 12.4) */
+#else /* > 4.13 */
+#define HAVE_METADATA_PORT_INFO
+#define HAVE_HWTSTAMP_FILTER_NTP_ALL
+#define HAVE_NDO_SETUP_TC_CHAIN_INDEX
+#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE
+#define HAVE_PTP_CLOCK_DO_AUX_WORK
+#endif /* 4.13.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0))
+#ifdef ETHTOOL_GLINKSETTINGS
+#ifndef ethtool_link_ksettings_del_link_mode
+#define ethtool_link_ksettings_del_link_mode(ptr, name, mode)                  \
+	__clear_bit(ETHTOOL_LINK_MODE_##mode##_BIT, (ptr)->link_modes.name)
+#endif
+#endif /* ETHTOOL_GLINKSETTINGS */
+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12, 4, 0)))
+#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
+#endif
+
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5)))
+#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
+#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC
+#endif
+
+#define TIMER_DATA_TYPE unsigned long
+#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE)
+
+#define timer_setup(timer, callback, flags)                                    \
+	__setup_timer((timer), (TIMER_FUNC_TYPE)(callback),                    \
+		      (TIMER_DATA_TYPE)(timer), (flags))
+
+#define from_timer(var, callback_timer, timer_fieldname)                       \
+	container_of(callback_timer, typeof(*var), timer_fieldname)
+
+#ifndef xdp_do_flush_map
+#define xdp_do_flush_map()                                                     \
+	do {                                                                   \
+	} while (0)
+#endif
+struct _kc_xdp_buff {
+	void *data;
+	void *data_end;
+	void *data_hard_start;
+};
+#define xdp_buff _kc_xdp_buff
+struct _kc_bpf_prog {};
+#define bpf_prog _kc_bpf_prog
+#ifndef DIV_ROUND_DOWN_ULL
+#define DIV_ROUND_DOWN_ULL(ll, d)                                              \
+	({                                                                     \
+		unsigned long long _tmp = (ll);                                \
+		do_div(_tmp, d);                                               \
+		_tmp;                                                          \
+	})
+#endif /* DIV_ROUND_DOWN_ULL */
+#else /* > 4.14 */
+#define HAVE_XDP_SUPPORT
+#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
+#define HAVE_TCF_EXTS_HAS_ACTION
+#endif /* 4.14.0 */
+
+/*****************************************************************************/
+#ifndef ETHTOOL_GLINKSETTINGS
+
+#define __ETHTOOL_LINK_MODE_MASK_NBITS 32
+#define ETHTOOL_LINK_MASK_SIZE BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS)
+
+/**
+ * struct ethtool_link_ksettings
+ * @link_modes: supported and advertising, single item arrays
+ * @link_modes.supported: bitmask of supported link speeds
+ * @link_modes.advertising: bitmask of currently advertised speeds
+ * @base: base link details
+ * @base.speed: current link speed
+ * @base.port: current port type
+ * @base.duplex: current duplex mode
+ * @base.autoneg: current autonegotiation settings
+ *
+ * This struct and the following macros provide a way to support the old
+ * ethtool get/set_settings API on older kernels, but in the style of the new
+ * GLINKSETTINGS API.  In this way, the same code can be used to support both
+ * APIs as seemlessly as possible.
+ *
+ * It should be noted the old API only has support up to the first 32 bits.
+ */
+struct ethtool_link_ksettings {
+	struct {
+		u32 speed;
+		u8 port;
+		u8 duplex;
+		u8 autoneg;
+	} base;
+	struct {
+		unsigned long supported[ETHTOOL_LINK_MASK_SIZE];
+		unsigned long advertising[ETHTOOL_LINK_MASK_SIZE];
+	} link_modes;
+};
+
+#define ETHTOOL_LINK_NAME_advertising(mode) ADVERTISED_##mode
+#define ETHTOOL_LINK_NAME_supported(mode) SUPPORTED_##mode
+#define ETHTOOL_LINK_NAME(name) ETHTOOL_LINK_NAME_##name
+#define ETHTOOL_LINK_CONVERT(name, mode) ETHTOOL_LINK_NAME(name)(mode)
+
+/**
+ * ethtool_link_ksettings_zero_link_mode
+ * @ptr: ptr to ksettings struct
+ * @name: supported or advertising
+ */
+#define ethtool_link_ksettings_zero_link_mode(ptr, name)                       \
+	(*((ptr)->link_modes.name) = 0x0)
+
+/**
+ * ethtool_link_ksettings_add_link_mode
+ * @ptr: ptr to ksettings struct
+ * @name: supported or advertising
+ * @mode: link mode to add
+ */
+#define ethtool_link_ksettings_add_link_mode(ptr, name, mode)                  \
+	(*((ptr)->link_modes.name) |=                                          \
+	 (typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode))
+
+/**
+ * ethtool_link_ksettings_del_link_mode
+ * @ptr: ptr to ksettings struct
+ * @name: supported or advertising
+ * @mode: link mode to delete
+ */
+#define ethtool_link_ksettings_del_link_mode(ptr, name, mode)                  \
+	(*((ptr)->link_modes.name) &=                                          \
+	 ~(typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode))
+
+/**
+ * ethtool_link_ksettings_test_link_mode
+ * @ptr: ptr to ksettings struct
+ * @name: supported or advertising
+ * @mode: link mode to add
+ */
+#define ethtool_link_ksettings_test_link_mode(ptr, name, mode)                 \
+	(!!(*((ptr)->link_modes.name) & ETHTOOL_LINK_CONVERT(name, mode)))
+
+/**
+ * _kc_ethtool_ksettings_to_cmd - Convert ethtool_link_ksettings to ethtool_cmd
+ * @ks: ethtool_link_ksettings struct
+ * @cmd: ethtool_cmd struct
+ *
+ * Convert an ethtool_link_ksettings structure into the older ethtool_cmd
+ * structure. We provide this in kcompat.h so that drivers can easily
+ * implement the older .{get|set}_settings as wrappers around the new api.
+ * Hence, we keep it prefixed with _kc_ to make it clear this isn't actually
+ * a real function in the kernel.
+ */
+static inline void
+_kc_ethtool_ksettings_to_cmd(struct ethtool_link_ksettings *ks,
+			     struct ethtool_cmd *cmd)
+{
+	cmd->supported = (u32)ks->link_modes.supported[0];
+	cmd->advertising = (u32)ks->link_modes.advertising[0];
+	ethtool_cmd_speed_set(cmd, ks->base.speed);
+	cmd->duplex = ks->base.duplex;
+	cmd->autoneg = ks->base.autoneg;
+	cmd->port = ks->base.port;
+}
+
+#endif /* !ETHTOOL_GLINKSETTINGS */
+
+/*****************************************************************************/
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) ||                        \
+     (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12, 3, 0))) ||      \
+     (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7, 5))))
+#define phy_speed_to_str _kc_phy_speed_to_str
+const char *_kc_phy_speed_to_str(int speed);
+#else /* (LINUX >= 4.14.0) || (SLES > 12.3.0) || (RHEL > 7.5) */
+#include 
+#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0))
+#if ((RHEL_RELEASE_CODE &&                                                     \
+      (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 6))) ||                    \
+     (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15, 1, 0))))
+/* CentOS-7-aarch64-Everything-1810.iso not define this */
+#ifndef CONFIG_ARM64
+#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
+#define HAVE_TCF_BLOCK
+#endif /* CONFIG_ARM64 */
+#else /* RHEL >= 7.6 || SLES >= 15.1 */
+#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */
+void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst,
+				      struct ethtool_link_ksettings *src);
+#define ethtool_intersect_link_masks _kc_ethtool_intersect_link_masks
+#else /* >= 4.15 */
+#define HAVE_XDP_BUFF_DATA_META
+#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
+#define HAVE_TCF_BLOCK
+#endif /* 4.15.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0))
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 7)) &&                     \
+     !(SLE_VERSION_CODE >= SLE_VERSION(12, 4, 0) &&                            \
+       SLE_VERSION_CODE < SLE_VERSION(15, 0, 0)) &&                            \
+     !(SLE_VERSION_CODE >= SLE_VERSION(15, 1, 0)))
+/* The return value of the strscpy() and strlcpy() functions is different.
+ * This could be potentially hazard for the future.
+ * To avoid this the void result is forced.
+ * So it is not possible use this function with the return value.
+ * Return value is required in kernel 4.3 through 4.15
+ */
+#define strscpy(...) (void)(strlcpy(__VA_ARGS__))
+#endif /* !RHEL >= 7.7 && !SLES12sp4+ && !SLES15sp1+ */
+
+#define pci_printk(level, pdev, fmt, arg...)                                   \
+	dev_printk(level, &(pdev)->dev, fmt, ##arg)
+#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
+#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
+#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
+#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
+#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
+#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
+#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
+#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
+
+#ifndef array_index_nospec
+static inline unsigned long _kc_array_index_mask_nospec(unsigned long index,
+							unsigned long size)
+{
+	/*
+	 * Always calculate and emit the mask even if the compiler
+	 * thinks the mask is not needed. The compiler does not take
+	 * into account the value of @index under speculation.
+	 */
+	OPTIMIZER_HIDE_VAR(index);
+	return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1);
+}
+
+#define array_index_nospec(index, size)                                        \
+	({                                                                     \
+		typeof(index) _i = (index);                                    \
+		typeof(size) _s = (size);                                      \
+		unsigned long _mask = _kc_array_index_mask_nospec(_i, _s);     \
+                                                                               \
+		BUILD_BUG_ON(sizeof(_i) > sizeof(long));                       \
+		BUILD_BUG_ON(sizeof(_s) > sizeof(long));                       \
+                                                                               \
+		(typeof(_i))(_i & _mask);                                      \
+	})
+#endif /* array_index_nospec */
+#ifndef sizeof_field
+#define sizeof_field(TYPE, MEMBER) (sizeof((((TYPE *)0)->MEMBER)))
+#endif /* sizeof_field */
+/* add a check for the Oracle UEK 4.14.35 kernel as
+ * it backported a version of this bitmap function
+ */
+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 0)) &&                      \
+	!(SLE_VERSION_CODE >= SLE_VERSION(12, 5, 0) &&                         \
+		  SLE_VERSION_CODE < SLE_VERSION(15, 0, 0) ||                  \
+	  SLE_VERSION_CODE >= SLE_VERSION(15, 1, 0)) &&                        \
+	!(LINUX_VERSION_CODE == KERNEL_VERSION(4, 14, 35))
+/*
+ * Copy bitmap and clear tail bits in last word.
+ */
+static inline void bitmap_copy_clear_tail(unsigned long *dst,
+					  const unsigned long *src,
+					  unsigned int nbits)
+{
+	bitmap_copy(dst, src, nbits);
+	if (nbits % BITS_PER_LONG)
+		dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits);
+}
+
+/*
+ * On 32-bit systems bitmaps are represented as u32 arrays internally, and
+ * therefore conversion is not needed when copying data from/to arrays of u32.
+ */
+#if BITS_PER_LONG == 64
+void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
+		       unsigned int nbits);
+#else
+#define bitmap_from_arr32(bitmap, buf, nbits)                                  \
+	bitmap_copy_clear_tail((unsigned long *)(bitmap),                      \
+			       (const unsigned long *)(buf), (nbits))
+#endif /* BITS_PER_LONG == 64 */
+#endif /* !(RHEL >= 8.0) && !(SLES >= 12.5 && SLES < 15.0 || SLES >= 15.1) */
+#else /* >= 4.16 */
+#include 
+#define HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK
+#define HAVE_TCF_MIRRED_DEV
+#define HAVE_VF_STATS_DROPPED
+#endif /* 4.16.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0))
+#include 
+#include 
+#define PCIE_SPEED_16_0GT 0x17
+#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */
+#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */
+#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */
+void _kc_pcie_print_link_status(struct pci_dev *dev);
+#define pcie_print_link_status _kc_pcie_print_link_status
+#else /* >= 4.17.0 */
+#define HAVE_XDP_BUFF_IN_XDP_H
+#endif /* 4.17.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0))
+#include "kcompat_overflow.h"
+
+#if (SLE_VERSION_CODE < SLE_VERSION(15, 1, 0))
+#define firmware_request_nowarn request_firmware_direct
+#endif /* SLES < 15.1 */
+
+#else
+#include 
+#include 
+#define HAVE_XDP_FRAME_STRUCT
+#define HAVE_XDP_SOCK
+#define HAVE_NDO_XDP_XMIT_BULK_AND_FLAGS
+#define NO_NDO_XDP_FLUSH
+#endif /* 4.18.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
+#define bitmap_alloc(nbits, flags)                                             \
+	kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), flags)
+#define bitmap_zalloc(nbits, flags) bitmap_alloc(nbits, ((flags) | __GFP_ZERO))
+#define bitmap_free(bitmap) kfree(bitmap)
+#ifdef ETHTOOL_GLINKSETTINGS
+#define ethtool_ks_clear(ptr, name)                                            \
+	ethtool_link_ksettings_zero_link_mode(ptr, name)
+#define ethtool_ks_add_mode(ptr, name, mode)                                   \
+	ethtool_link_ksettings_add_link_mode(ptr, name, mode)
+#define ethtool_ks_del_mode(ptr, name, mode)                                   \
+	ethtool_link_ksettings_del_link_mode(ptr, name, mode)
+#define ethtool_ks_test(ptr, name, mode)                                       \
+	ethtool_link_ksettings_test_link_mode(ptr, name, mode)
+#endif /* ETHTOOL_GLINKSETTINGS */
+#define HAVE_NETPOLL_CONTROLLER
+#define REQUIRE_PCI_CLEANUP_AER_ERROR_STATUS
+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15, 1, 0)))
+#define HAVE_TCF_MIRRED_DEV
+#define HAVE_NDO_SELECT_QUEUE_SB_DEV
+#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK
+#endif
+
+static inline void __kc_metadata_dst_free(void *md_dst)
+{
+	kfree(md_dst);
+}
+
+#define metadata_dst_free(md_dst) __kc_metadata_dst_free(md_dst)
+#else /* >= 4.19.0 */
+#define HAVE_TCF_BLOCK_CB_REGISTER_EXTACK
+#define NO_NETDEV_BPF_PROG_ATTACHED
+#define HAVE_NDO_SELECT_QUEUE_SB_DEV
+#define HAVE_NETDEV_SB_DEV
+#define HAVE_TCF_VLAN_TPID
+#define HAVE_RHASHTABLE_TYPES
+#endif /* 4.19.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0))
+#define HAVE_XDP_UMEM_PROPS
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 0)))
+#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK
+#endif /* RHEL >= 8.0 */
+#if ((SLE_VERSION_CODE >= SLE_VERSION(12, 5, 0) &&                             \
+      SLE_VERSION_CODE < SLE_VERSION(15, 0, 0)) ||                             \
+     (SLE_VERSION_CODE >= SLE_VERSION(15, 1, 0)))
+#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK
+#endif /* SLE == 12sp5 || SLE >= 15sp1 */
+#else /* >= 4.20.0 */
+#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK
+#define HAVE_AF_XDP_ZC_SUPPORT
+#define HAVE_VXLAN_TYPE
+#define HAVE_ETF_SUPPORT /* Earliest TxTime First */
+#endif /* 4.20.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0))
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8, 0)))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+#define NETLINK_MAX_COOKIE_LEN 20
+struct netlink_ext_ack {
+	const char *_msg;
+	const struct nlattr *bad_attr;
+	u8 cookie[NETLINK_MAX_COOKIE_LEN];
+	u8 cookie_len;
+};
+
+#endif /* < 4.12 */
+/*
+static inline int _kc_dev_open(struct net_device *netdev,
+			       struct netlink_ext_ack __always_unused *extack)
+{
+	return dev_open(netdev);
+}
+
+#define dev_open _kc_dev_open
+*/
+static inline int
+_kc_dev_change_flags(struct net_device *netdev, unsigned int flags,
+		     struct netlink_ext_ack __always_unused *extack)
+{
+	return dev_change_flags(netdev, flags);
+}
+
+#define dev_change_flags _kc_dev_change_flags
+#endif /* !(RHEL_RELEASE_CODE && RHEL > RHEL(8,0)) */
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 7) &&  \
+			   RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 0)) ||  \
+     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 1)))
+#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL
+#define HAVE_PTP_CLOCK_INFO_GETTIMEX64
+#else /* RHEL >= 7.7 && RHEL < 8.0 || RHEL >= 8.1 */
+#ifndef NO_NEED_PTP_SYSTEM_TIMESTAMP
+#if !(defined(CONFIG_KYLINOS_SERVER) &&                                        \
+      (LINUX_VERSION_CODE > KERNEL_VERSION(4, 19, 0)))
+#if !(defined(EULER_OS))
+struct ptp_system_timestamp {
+	struct timespec64 pre_ts;
+	struct timespec64 post_ts;
+};
+
+static inline void
+ptp_read_system_prets(struct ptp_system_timestamp __always_unused *sts)
+{
+	;
+}
+
+static inline void
+ptp_read_system_postts(struct ptp_system_timestamp __always_unused *sts)
+{
+	;
+}
+#endif
+#endif
+#endif
+#endif /* !(RHEL >= 7.7 && RHEL != 8.0) */
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 1)))
+#define HAVE_NDO_BRIDGE_SETLINK_EXTACK
+#endif /* RHEL 8.1 */
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 2))
+#define HAVE_TC_INDIR_BLOCK
+#endif /* RHEL 8.2 */
+#else /* >= 5.0.0 */
+#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL
+#define HAVE_PTP_CLOCK_INFO_GETTIMEX64
+#define HAVE_NDO_BRIDGE_SETLINK_EXTACK
+#define HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM
+#define HAVE_GENEVE_TYPE
+#define HAVE_TC_INDIR_BLOCK
+#endif /* 5.0.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0))
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 1)))
+#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE
+#define HAVE_NDO_FDB_ADD_EXTACK
+#define HAVE_DEVLINK_INFO_GET
+#define HAVE_DEVLINK_FLASH_UPDATE
+#else /* RHEL < 8.1 */
+#if defined(HAVE_TC_SETUP_CLSFLOWER) &&                                        \
+	!(defined(CONFIG_KYLINOS_SERVER) &&                                    \
+	  (LINUX_VERSION_CODE > KERNEL_VERSION(4, 19, 0)))
+#include 
+
+struct flow_match {
+	struct flow_dissector *dissector;
+	void *mask;
+	void *key;
+};
+
+struct flow_match_basic {
+	struct flow_dissector_key_basic *key, *mask;
+};
+
+struct flow_match_control {
+	struct flow_dissector_key_control *key, *mask;
+};
+
+struct flow_match_eth_addrs {
+	struct flow_dissector_key_eth_addrs *key, *mask;
+};
+
+#ifdef HAVE_TC_FLOWER_ENC
+struct flow_match_enc_keyid {
+	struct flow_dissector_key_keyid *key, *mask;
+};
+#endif
+
+#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS
+struct flow_match_vlan {
+	struct flow_dissector_key_vlan *key, *mask;
+};
+#endif
+
+struct flow_match_ipv4_addrs {
+	struct flow_dissector_key_ipv4_addrs *key, *mask;
+};
+
+struct flow_match_ipv6_addrs {
+	struct flow_dissector_key_ipv6_addrs *key, *mask;
+};
+
+struct flow_match_ports {
+	struct flow_dissector_key_ports *key, *mask;
+};
+
+struct flow_rule {
+	struct flow_match match;
+#if 0
+	/* In 5.1+ kernels, action is a member of struct flow_rule but is
+	 * not compatible with how we kcompat tc_cls_flower_offload_flow_rule
+	 * below.  By not declaring it here, any driver that attempts to use
+	 * action as an element of struct flow_rule will fail to compile
+	 * instead of silently trying to access memory that shouldn't be.
+	 */
+	struct flow_action	action;
+#endif
+};
+
+void flow_rule_match_basic(const struct flow_rule *rule,
+			   struct flow_match_basic *out);
+void flow_rule_match_control(const struct flow_rule *rule,
+			     struct flow_match_control *out);
+void flow_rule_match_eth_addrs(const struct flow_rule *rule,
+			       struct flow_match_eth_addrs *out);
+#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS
+void flow_rule_match_vlan(const struct flow_rule *rule,
+			  struct flow_match_vlan *out);
+#endif
+void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
+				struct flow_match_ipv4_addrs *out);
+void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
+				struct flow_match_ipv6_addrs *out);
+void flow_rule_match_ports(const struct flow_rule *rule,
+			   struct flow_match_ports *out);
+#ifdef HAVE_TC_FLOWER_ENC
+void flow_rule_match_enc_ports(const struct flow_rule *rule,
+			       struct flow_match_ports *out);
+void flow_rule_match_enc_control(const struct flow_rule *rule,
+				 struct flow_match_control *out);
+void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
+				    struct flow_match_ipv4_addrs *out);
+void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
+				    struct flow_match_ipv6_addrs *out);
+void flow_rule_match_enc_keyid(const struct flow_rule *rule,
+			       struct flow_match_enc_keyid *out);
+#endif
+
+static inline struct flow_rule *
+tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd)
+{
+	return (struct flow_rule *)&tc_flow_cmd->dissector;
+}
+
+static inline bool flow_rule_match_key(const struct flow_rule *rule,
+				       enum flow_dissector_key_id key)
+{
+	return dissector_uses_key(rule->match.dissector, key);
+}
+#endif /* HAVE_TC_SETUP_CLSFLOWER */
+
+#endif /* RHEL < 8.1 */
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 1)))
+#if defined(CONFIG_KYLINOS_SERVER) &&                                          \
+	(LINUX_VERSION_CODE > KERNEL_VERSION(4, 19, 0))
+#else
+#define devlink_params_publish(devlink)                                        \
+	do {                                                                   \
+	} while (0)
+#define devlink_params_unpublish(devlink)                                      \
+	do {                                                                   \
+	} while (0)
+#endif
+#endif
+
+#else /* >= 5.1.0 */
+#define HAVE_NDO_FDB_ADD_EXTACK
+#define NO_XDP_QUERY_XSK_UMEM
+#define HAVE_AF_XDP_NETDEV_UMEM
+#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE
+#define HAVE_TC_FLOWER_ENC_IP
+#define HAVE_DEVLINK_INFO_GET
+#define HAVE_DEVLINK_FLASH_UPDATE
+#define HAVE_DEVLINK_PORT_PARAMS
+#endif /* 5.1.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0))
+#if (defined HAVE_SKB_XMIT_MORE) &&                                            \
+	(!(RHEL_RELEASE_CODE &&                                                \
+	   (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 2))))
+#define netdev_xmit_more() (skb->xmit_more)
+#else
+#define netdev_xmit_more() (0)
+#endif
+
+#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 2))))
+
+#ifndef NO_ETH_GET_HEADLEN
+#if !defined(eth_get_headlen) &&                                               \
+	!(defined(CONFIG_KYLINOS_SERVER) &&                                    \
+	  (LINUX_VERSION_CODE > KERNEL_VERSION(4, 19, 0)))
+static inline u32
+__kc_eth_get_headlen(const struct net_device __always_unused *dev, void *data,
+		     unsigned int len)
+{
+	return eth_get_headlen(data, len);
+}
+
+#define eth_get_headlen(dev, data, len) __kc_eth_get_headlen(dev, data, len)
+#endif /* !eth_get_headlen */
+#endif
+#endif /* !RHEL >= 8.2 */
+
+#ifndef mmiowb
+#ifdef CONFIG_IA64
+#define mmiowb() asm volatile("mf.a" ::: "memory")
+#else
+#define mmiowb()
+#endif
+#endif /* mmiowb */
+
+#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8, 1))
+#define HAVE_NDO_GET_DEVLINK_PORT
+#endif /* RHEL > 8.1 */
+
+#else /* >= 5.2.0 */
+#define HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED
+#define SPIN_UNLOCK_IMPLIES_MMIOWB
+#define HAVE_NDO_GET_DEVLINK_PORT
+#endif /* 5.2.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0))
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 2)))
+#define flow_block_offload tc_block_offload
+#define flow_block_command tc_block_command
+#define flow_cls_offload tc_cls_flower_offload
+#define flow_block_binder_type tcf_block_binder_type
+#define flow_cls_common_offload tc_cls_common_offload
+#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule
+#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE
+#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY
+#define FLOW_CLS_STATS TC_CLSFLOWER_STATS
+#define FLOW_CLS_TMPLT_CREATE TC_CLSFLOWER_TMPLT_CREATE
+#define FLOW_CLS_TMPLT_DESTROY TC_CLSFLOWER_TMPLT_DESTROY
+#define FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS                                  \
+	TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
+#define FLOW_BLOCK_BIND TC_BLOCK_BIND
+#define FLOW_BLOCK_UNBIND TC_BLOCK_UNBIND
+
+#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO
+#include 
+
+int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f,
+				   struct list_head *driver_list,
+				   tc_setup_cb_t *cb, void *cb_ident,
+				   void *cb_priv, bool ingress_only);
+
+#define flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv,      \
+				   ingress_only)                               \
+	_kc_flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv,  \
+				       ingress_only)
+#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */
+#else /* RHEL >= 8.2 */
+#define HAVE_FLOW_BLOCK_API
+#define HAVE_DEVLINK_PORT_ATTR_PCI_VF
+#endif /* RHEL >= 8.2 */
+
+#ifndef ETH_P_LLDP
+#define ETH_P_LLDP 0x88CC
+#endif /* !ETH_P_LLDP */
+
+#else /* >= 5.3.0 */
+#define XSK_UMEM_RETURNS_XDP_DESC
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0))
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15, 3, 0))
+#define HAVE_XSK_UMEM_HAS_ADDRS
+#endif /* SLE < 15.3 */
+#endif /* < 5.8.0*/
+#define HAVE_FLOW_BLOCK_API
+#define HAVE_DEVLINK_PORT_ATTR_PCI_VF
+#if IS_ENABLED(CONFIG_DIMLIB)
+#define HAVE_CONFIG_DIMLIB
+#endif
+#endif /* 5.3.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
+#if (SLE_VERSION_CODE >= SLE_VERSION(15, 2, 0))
+#define HAVE_NDO_XSK_WAKEUP
+#endif /* SLES15sp2 */
+#else /* >= 5.4.0 */
+#define HAVE_NDO_XSK_WAKEUP
+#endif /* 5.4.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0))
+static inline unsigned long _kc_bitmap_get_value8(const unsigned long *map,
+						  unsigned long start)
+{
+	const size_t index = BIT_WORD(start);
+	const unsigned long offset = start % BITS_PER_LONG;
+
+	return (map[index] >> offset) & 0xFF;
+}
+#define bitmap_get_value8 _kc_bitmap_get_value8
+
+static inline void _kc_bitmap_set_value8(unsigned long *map,
+					 unsigned long value,
+					 unsigned long start)
+{
+	const size_t index = BIT_WORD(start);
+	const unsigned long offset = start % BITS_PER_LONG;
+
+	map[index] &= ~(0xFFUL << offset);
+	map[index] |= value << offset;
+}
+#define bitmap_set_value8 _kc_bitmap_set_value8
+
+#endif /* 5.5.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0))
+#ifdef HAVE_AF_XDP_ZC_SUPPORT
+#define xsk_umem_release_addr xsk_umem_discard_addr
+#define xsk_umem_release_addr_rq xsk_umem_discard_addr_rq
+#endif /* HAVE_AF_XDP_ZC_SUPPORT */
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3)) || \
+     (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15, 3, 0)))
+#define HAVE_TX_TIMEOUT_TXQUEUE
+#endif
+#else /* >= 5.6.0 */
+#define HAVE_TX_TIMEOUT_TXQUEUE
+#endif /* 5.6.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0))
+u64 _kc_pci_get_dsn(struct pci_dev *dev);
+#define pci_get_dsn(dev) _kc_pci_get_dsn(dev)
+/* add a check for the Oracle UEK 5.4.17 kernel which
+ * backported the rename of the aer functions
+ */
+#if !(SLE_VERSION_CODE > SLE_VERSION(15, 2, 0)) &&                             \
+	!((LINUX_VERSION_CODE == KERNEL_VERSION(5, 3, 18)) &&                  \
+	  (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(14, 0, 0))) &&              \
+	!(LINUX_VERSION_CODE == KERNEL_VERSION(5, 4, 17)) &&                   \
+	!(RHEL_RELEASE_CODE &&                                                 \
+	  (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3)))
+#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status
+#endif
+
+#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID
+#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id"
+#endif
+#else /* >= 5.7.0 */
+#define HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT
+#endif /* 5.7.0 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0))
+#if !(RHEL_RELEASE_CODE &&                                                     \
+      (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 4))) &&                    \
+	!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15, 3, 0))
+/* (RHEL < 8.4) || (SLE < 15.3) */
+#define xdp_convert_buff_to_frame convert_to_xdp_frame
+#elif (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 4)))
+/* RHEL >= 8.4 */
+#define HAVE_XDP_BUFF_FRAME_SZ
+#endif
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE != RHEL_RELEASE_VERSION(8, 10)))
+#define flex_array_size(p, member, count)                                      \
+	array_size(count, sizeof(*(p)->member) + __must_be_array((p)->member))
+#endif
+#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(15, 3, 0)))
+#ifdef HAVE_AF_XDP_ZC_SUPPORT
+#ifndef xsk_umem_get_rx_frame_size
+static inline u32 _xsk_umem_get_rx_frame_size(struct xdp_umem *umem)
+{
+	return umem->chunk_size_nohr - XDP_PACKET_HEADROOM;
+}
+
+#define xsk_umem_get_rx_frame_size _xsk_umem_get_rx_frame_size
+#endif /* xsk_umem_get_rx_frame_size */
+#endif /* HAVE_AF_XDP_ZC_SUPPORT */
+#else /* SLE >= 15.3 */
+#define HAVE_XDP_BUFF_FRAME_SZ
+#define HAVE_MEM_TYPE_XSK_BUFF_POOL
+#endif /* SLE >= 15.3 */
+#else /* >= 5.8.0 */
+#define HAVE_XDP_SOCK_DRV
+#define HAVE_TC_FLOW_INDIR_DEV
+#define HAVE_TC_FLOW_INDIR_BLOCK_CLEANUP
+#define HAVE_XDP_BUFF_FRAME_SZ
+#define HAVE_MEM_TYPE_XSK_BUFF_POOL
+#endif /* 5.8.0 */
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 3)))
+#define HAVE_TC_FLOW_INDIR_DEV
+#endif
+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15, 3, 0)))
+#define HAVE_TC_FLOW_INDIR_DEV
+#endif /* SLE_VERSION_CODE && SLE_VERSION_CODE >= SLES15SP3 */
+
+/*****************************************************************************/
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 4)))
+#define HAVE_TC_FLOW_INDIR_BLOCK_CLEANUP
+#endif /* (RHEL >= 8.4) */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0))
+#else /* >= 5.9.0 */
+#define HAVE_FLOW_INDIR_BLOCK_QDISC
+#define HAVE_UDP_TUNNEL_NIC_INFO
+#endif /* 5.9.0 */
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8, 3)))
+#define HAVE_FLOW_INDIR_BLOCK_QDISC
+#endif
+#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15, 3, 0)))
+#define HAVE_FLOW_INDIR_BLOCK_QDISC
+#endif /* SLE_VERSION_CODE && SLE_VERSION_CODE >= SLES15SP3 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
+#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15, 3, 0))))
+#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM
+#define xsk_get_pool_from_qid xdp_get_umem_from_qid
+#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size
+#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info
+#define xsk_pool_dma_unmap xsk_buff_dma_unmap
+#define xsk_pool_dma_map xsk_buff_dma_map
+#define xsk_tx_peek_desc xsk_umem_consume_tx
+#define xsk_tx_release xsk_umem_consume_tx_done
+#define xsk_tx_completed xsk_umem_complete_tx
+#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup
+
+#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL
+#include 
+static inline void _kc_xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp,
+						 void __always_unused *pool)
+{
+	xsk_buff_dma_sync_for_cpu(xdp);
+}
+
+#define xsk_buff_dma_sync_for_cpu(xdp, pool)                                   \
+	_kc_xsk_buff_dma_sync_for_cpu(xdp, pool)
+#endif /* HAVE_MEM_TYPE_XSK_BUFF_POOL */
+
+#else /* SLE >= 15.3 */
+#define HAVE_NETDEV_BPF_XSK_POOL
+#endif /* SLE >= 15.3 */
+#else /* >= 5.10.0 */
+#define HAVE_NETDEV_BPF_XSK_POOL
+#endif /* <5.10.0 */
+
+#if defined(EULER_OS) || defined(OPENEULER_VERSION_CODE)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
+#define HAVE_ETHTOOL_COALESCE_EXTACK
+#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS
+#endif
+
+#if defined(OPENEULER_VERSION_CODE)
+#if (OPENEULER_VERSION_CODE > OPENEULER_VERSION(2203, 2))
+#ifdef NEED_ETH_HW_ADDR_SET
+#undef NEED_ETH_HW_ADDR_SET
+#endif
+#endif
+#endif
+
+#endif
+
+/*****************************************************************************/
+#ifdef HAVE_XDP_RXQ_INFO_REG_3_PARAMS
+#ifdef HAVE_XDP_BUFF_IN_XDP_H
+#include 
+#else
+#include 
+#endif /* HAVE_XDP_BUFF_IN_XDP_H */
+static inline int _kc_xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+				       struct net_device *dev, u32 queue_index,
+				       unsigned int __always_unused napi_id)
+{
+	return xdp_rxq_info_reg(xdp_rxq, dev, queue_index);
+}
+
+#define xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id)                   \
+	_kc_xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id)
+#endif /* HAVE_XDP_RXQ_INFO_REG_3_PARAMS */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0))
+#ifdef HAVE_NAPI_BUSY_LOOP
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include 
+static inline void _kc_napi_busy_loop(unsigned int napi_id,
+				      bool (*loop_end)(void *, unsigned long),
+				      void *loop_end_arg,
+				      bool __always_unused prefer_busy_poll,
+				      u16 __always_unused budget)
+{
+	napi_busy_loop(napi_id, loop_end, loop_end_arg);
+}
+
+#define napi_busy_loop(napi_id, loop_end, loop_end_arg, prefer_busy_poll,      \
+		       budget)                                                 \
+	_kc_napi_busy_loop(napi_id, loop_end, loop_end_arg, prefer_busy_poll,  \
+			   budget)
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+#endif /* HAVE_NAPI_BUSY_LOOP */
+#endif /* <5.11.0 */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0))
+#define NO_PCIE_ERROR_REPORTING
+#endif
+
+/*
+ * Load the implementations file which actually defines kcompat backports.
+ * Legacy backports still exist in this file, but all new backports must be
+ * implemented using kcompat_*defs.h and kcompat_impl.h
+ */
+#include "kcompat_impl.h"
+
+#endif /* _KCOMPAT_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
new file mode 100755
index 0000000000000000000000000000000000000000..99e5c96894c6002111a273b0300eeb77f3ca97b2
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
@@ -0,0 +1,1293 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBE_H_
+#define _RNPGBE_H_
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "rnpgbe_type.h"
+#include "rnpgbe_common.h"
+#include "rnp_compat.h"
+#ifdef CONFIG_RNP_DCA
+#include 
+#endif /* CONFIG_RNP_DCA */
+
+extern struct rnpgbe_info rnpgbe_n500_info;
+extern struct rnpgbe_info rnpgbe_n210_info;
+extern struct rnpgbe_info rnpgbe_n210L_info;
+/* common prefix used by pr_<> macros */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define RNP_ALLOC_PAGE_ORDER 0
+#define RNP_PAGE_BUFFER_NUMS(ring)                                             \
+	((1 << RNP_ALLOC_PAGE_ORDER) * PAGE_SIZE /                             \
+	 ALIGN((rnpgbe_rx_offset(ring) + rnpgbe_rx_bufsz(ring) +               \
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +               \
+		RNP_RX_HWTS_OFFSET),                                           \
+	       1024))
+#define RNP_DEFAULT_TX_WORK (128)
+#define RNP_MIN_TX_WORK (32)
+#define RNP_MAX_TX_WORK (512)
+#define RNP_MIN_RX_WORK (32)
+#define RNP_MAX_RX_WORK (512)
+#define RNP_WORK_ALIGN (2)
+#define RNP_MIN_TX_FRAME (1)
+#define RNP_MAX_TX_FRAME (256)
+#define RNP_MIN_TX_USEC (10)
+#define RNP_MAX_TX_USEC (10000)
+#define RNP_MIN_RX_FRAME (1)
+#define RNP_MAX_RX_FRAME (256)
+#define RNP_MIN_RX_USEC (2)
+#define RNP_MAX_RX_USEC (10000)
+#define RNP_MAX_TXD (4096)
+#define RNP_MIN_TXD (64)
+/* Default LPI timers */
+#define RNP_DEFAULT_LIT_LS 0x3E8
+#define RNP_DEFAULT_TWT_LS 0x1E
+#define RNP_START_ITR 648 /* ~6000 ints/sec */
+#define RNP_4K_ITR 980
+#define RNP_20K_ITR 196
+#define RNP_70K_ITR
+#define RNP_LOWEREST_ITR 5
+#define ACTION_TO_MPE (130)
+#define MPE_PORT (10)
+#define AUTO_ALL_MODES 0
+/* TX/RX descriptor defines */
+#ifdef FEITENG
+#define RNP_DEFAULT_TXD 4096
+#else /* FEITENG */
+#define RNP_DEFAULT_TXD 512
+#endif /* FEITENG */
+#define RNP_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define RNP_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#ifdef FEITENG
+#define RNP_DEFAULT_RXD 4096
+#else /* FEITENG */
+#define RNP_DEFAULT_RXD 512
+#endif /* FEITENG */
+#define RNP_MAX_RXD 4096
+#define RNP_MIN_RXD 64
+/* flow control */
+#define RNP_MIN_FCRTL 0x40
+#define RNP_MAX_FCRTL 0x7FF80
+#define RNP_MIN_FCRTH 0x600
+#define RNP_MAX_FCRTH 0x7FFF0
+#define RNP_DEFAULT_FCPAUSE 0xFFFF
+#define RNP10_DEFAULT_HIGH_WATER 0x320
+#define RNP10_DEFAULT_LOW_WATER 0x270
+#define RNP500_DEFAULT_HIGH_WATER 400
+#define RNP500_DEFAULT_LOW_WATER 256
+#define RNP_MIN_FCPAUSE 0
+#define RNP_MAX_FCPAUSE 0xFFFF
+/* Supported Rx Buffer Sizes */
+#define RNP_RXBUFFER_256 256 /* Used for skb receive header */
+#define RNP_RXBUFFER_1536 1536
+#define RNP_RXBUFFER_2K 2048
+#define RNP_RXBUFFER_3K 3072
+#define RNP_RXBUFFER_4K 4096
+#define RNP_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
+#define RNP_RXBUFFER_MAX (RNP_RXBUFFER_2K)
+#ifdef CONFIG_RNPGBE_DISABLE_PACKET_SPLIT
+#define RNP_RXBUFFER_7K 7168
+#define RNP_RXBUFFER_8K 8192
+#define RNP_RXBUFFER_15K 15360
+#endif /* CONFIG_RNPGBE_DISABLE_PACKET_SPLIT */
+#define MAX_Q_VECTORS 128
+#define RNP_RING_COUNTS_PEER_PF 8
+#ifdef NETIF_F_GSO_PARTIAL
+#define RNP_GSO_PARTIAL_FEATURES                                               \
+	(NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |     \
+	 NETIF_F_GSO_UDP_TUNNEL_CSUM)
+#endif /* NETIF_F_GSO_PARTIAL */
+/*
+ * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
+ * this adds up to 448 bytes of extra data.
+ *
+ * Since netdev_alloc_skb now allocates a page fragment we can use a value
+ * of 256 and the resultant skb will have a truesize of 960 or less.
+ */
+#define RNP_RX_HDR_SIZE RNP_RXBUFFER_256
+#define RNP_ITR_ADAPTIVE_MIN_INC 2
+#define RNP_ITR_ADAPTIVE_MIN_USECS 5
+#define RNP_ITR_ADAPTIVE_MAX_USECS 800
+#define RNP_ITR_ADAPTIVE_LATENCY 0x400
+#define RNP_ITR_ADAPTIVE_BULK 0x00
+#define RNP_ITR_ADAPTIVE_MASK_USECS                                            \
+	(RNP_ITR_ADAPTIVE_LATENCY - RNP_ITR_ADAPTIVE_MIN_INC)
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#ifdef OPTM_WITH_LPAGE
+#define RNP_RX_BUFFER_WRITE (PAGE_SIZE / 2048) /* Must be power of 2 */
+#else /* OPTM_WITH_LPAGE */
+#define RNP_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#endif /* OPTM_WITH_LPAGE */
+
+enum rnpgbe_tx_flags {
+	/* cmd_type flags */
+	RNP_TX_FLAGS_HW_VLAN = 0x01,
+	RNP_TX_FLAGS_TSO = 0x02,
+	RNP_TX_FLAGS_TSTAMP = 0x04,
+	/* olinfo flags */
+	RNP_TX_FLAGS_CC = 0x08,
+	RNP_TX_FLAGS_IPV4 = 0x10,
+	RNP_TX_FLAGS_CSUM = 0x20,
+	/* software defined flags */
+	RNP_TX_FLAGS_SW_VLAN = 0x40,
+	RNP_TX_FLAGS_FCOE = 0x80,
+};
+#ifndef RNP_MAX_VF_CNT
+#define RNP_MAX_VF_CNT 64
+#endif /* RNP_MAX_VF_CNT */
+#define RNP_RX_RATE_HIGH 450000
+#define RNP_RX_COAL_TIME_HIGH 128
+#define RNP_RX_SIZE_THRESH 1024
+#define RNP_RX_RATE_THRESH (1000000 / RNP_RX_COAL_TIME_HIGH)
+#define RNP_SAMPLE_INTERVAL 0
+#define RNP_AVG_PKT_SMALL 256
+#define RNP_MAX_VF_MC_ENTRIES 30
+#define RNP_MAX_VF_FUNCTIONS RNP_MAX_VF_CNT
+#define RNP_MAX_VFTA_ENTRIES 128
+#define MAX_EMULATION_MAC_ADDRS 16
+#define RNP_MAX_PF_MACVLANS_N10 15
+/* we resver 2 for ncsi */
+#define RNP_MAX_PF_MACVLANS_N500 (15 - 2)
+#define PF_RING_CNT_WHEN_IOV_ENABLED 2
+#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
+
+enum vf_link_state {
+	rnpgbe_link_state_auto,
+	rnpgbe_link_state_on,
+	rnpgbe_link_state_off,
+};
+
+struct vf_data_storage {
+	unsigned char vf_mac_addresses[ETH_ALEN];
+	u16 vf_mc_hashes[RNP_MAX_VF_MC_ENTRIES];
+	u16 num_vf_mc_hashes;
+	u16 default_vf_vlan_id;
+	u16 vlans_enabled;
+	bool clear_to_send;
+	bool pf_set_mac;
+	u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+	u16 vf_vlan; /* vf just can set 1 vlan */
+	u16 pf_qos;
+	u16 tx_rate;
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+	int link_state;
+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
+	u16 vlan_count;
+	u8 spoofchk_enabled;
+	u8 trusted;
+	unsigned long status;
+	unsigned int vf_api;
+};
+
+enum vf_state_t {
+	__VF_MBX_USED,
+};
+
+struct vf_macvlans {
+	struct list_head l;
+	int vf;
+	int rar_entry;
+	bool free;
+	bool is_macvlan;
+	u8 vf_macvlan[ETH_ALEN];
+};
+
+/* now tx max 4k for one desc */
+/* feiteng use 12k can get better netperf performance */
+#define RNP_MAX_TXD_PWR 12
+#define RNP_MAX_DATA_PER_TXD (1 << RNP_MAX_TXD_PWR)
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), RNP_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffers
+ */
+struct rnpgbe_tx_buffer {
+	struct rnpgbe_tx_desc *next_to_watch;
+	unsigned long time_stamp;
+	struct sk_buff *skb;
+	unsigned int bytecount;
+	unsigned short gso_segs;
+	bool gso_need_padding;
+	__be16 protocol;
+	__be16 priv_tags;
+	DEFINE_DMA_UNMAP_ADDR(dma);
+	DEFINE_DMA_UNMAP_LEN(len);
+	union {
+		u32 mss_len_vf_num;
+		struct {
+			__le16 mss_len;
+			u8 vf_num;
+			u8 l4_hdr_len;
+		};
+	};
+	union {
+		u32 inner_vlan_tunnel_len;
+		struct {
+			u8 tunnel_hdr_len;
+			u8 inner_vlan_l;
+			u8 inner_vlan_h;
+			u8 resv;
+		};
+	};
+	bool ctx_flag;
+};
+
+struct rnpgbe_rx_buffer {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT
+	struct page *page;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+	__u32 page_offset;
+#else /* (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) */
+	__u16 page_offset;
+#endif /* (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) */
+	__u16 pagecnt_bias;
+#endif /* CONFIG_RNP_DISABLE_PACKET_SPLIT */
+};
+
+struct rnpgbe_queue_stats {
+	u64 packets;
+	u64 bytes;
+};
+
+struct rnpgbe_tx_queue_stats {
+	u64 restart_queue;
+	u64 tx_busy;
+	u64 tx_done_old;
+	u64 clean_desc;
+	u64 poll_count;
+	u64 irq_more_count;
+	u64 send_bytes;
+	u64 send_bytes_to_hw;
+	u64 todo_update;
+	u64 send_done_bytes;
+	u64 vlan_add;
+	u64 tx_next_to_clean;
+	u64 tx_irq_miss;
+	u64 tx_equal_count;
+	u64 tx_clean_times;
+	u64 tx_clean_count;
+};
+
+struct rnpgbe_rx_queue_stats {
+	u64 driver_drop_packets;
+	u64 rsc_count;
+	u64 rsc_flush;
+	u64 non_eop_descs;
+	u64 alloc_rx_page_failed;
+	u64 alloc_rx_buff_failed;
+	u64 alloc_rx_page;
+	u64 csum_err;
+	u64 csum_good;
+	u64 poll_again_count;
+	u64 vlan_remove;
+	u64 rx_next_to_clean;
+	u64 rx_irq_miss;
+	u64 rx_equal_count;
+	u64 rx_clean_times;
+	u64 rx_clean_count;
+};
+
+enum rnpgbe_ring_state_t {
+#ifndef CONFIG_RNPGBE_DISABLE_PACKET_SPLIT
+	__RNP_RX_3K_BUFFER,
+	__RNP_RX_BUILD_SKB_ENABLED,
+#endif /* CONFIG_RNPGBE_DISABLE_PACKET_SPLIT */
+	__RNP_TX_FDIR_INIT_DONE,
+	__RNP_TX_XPS_INIT_DONE,
+	__RNP_TX_DETECT_HANG,
+	__RNP_HANG_CHECK_ARMED,
+	__RNP_RX_CSUM_UDP_ZERO_ERR,
+	__RNP_RX_FCOE,
+};
+
+enum {
+	PART_FW,
+	PART_CFG,
+	PART_MACSN,
+	PART_PCSPHY,
+	PART_PXE,
+};
+
+#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT
+#define ring_uses_build_skb(ring)                                              \
+	test_bit(__RNP_RX_BUILD_SKB_ENABLED, &(ring)->state)
+#endif
+#define check_for_tx_hang(ring) test_bit(__RNP_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring)                                            \
+	set_bit(__RNP_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring)                                          \
+	clear_bit(__RNP_TX_DETECT_HANG, &(ring)->state)
+
+struct rnpgbe_ring {
+	struct rnpgbe_ring *next; /* pointer to next ring in q_vector */
+	struct rnpgbe_q_vector *q_vector; /* backpointer to host q_vector */
+	struct net_device *netdev; /* netdev ring belongs to */
+	struct device *dev; /* device for DMA mapping */
+	void *desc; /* descriptor ring memory */
+	union {
+		struct rnpgbe_tx_buffer *tx_buffer_info;
+		struct rnpgbe_rx_buffer *rx_buffer_info;
+	};
+	unsigned long last_rx_timestamp;
+	unsigned long state;
+	u8 __iomem *ring_addr;
+	u8 __iomem *tail;
+	u8 __iomem *dma_int_stat;
+	u8 __iomem *dma_int_mask;
+	u8 __iomem *dma_int_clr;
+	dma_addr_t dma; /* phys. address of descriptor ring */
+	unsigned int size; /* length in bytes */
+	u32 ring_flags;
+#define RNP_RING_FLAG_DELAY_SETUP_RX_LEN ((u32)(1 << 0))
+#define RNP_RING_FLAG_CHANGE_RX_LEN ((u32)(1 << 1))
+#define RNP_RING_FLAG_DO_RESET_RX_LEN ((u32)(1 << 2))
+#define RNP_RING_SKIP_TX_START ((u32)(1 << 3))
+#define RNP_RING_NO_TUNNEL_SUPPORT ((u32)(1 << 4))
+#define RNP_RING_SIZE_CHANGE_FIX ((u32)(1 << 5))
+#define RNP_RING_SCATER_SETUP ((u32)(1 << 6))
+#define RNP_RING_STAGS_SUPPORT ((u32)(1 << 7))
+#define RNP_RING_DOUBLE_VLAN_SUPPORT ((u32)(1 << 8))
+#define RNP_RING_VEB_MULTI_FIX ((u32)(1 << 9))
+#define RNP_RING_IRQ_MISS_FIX ((u32)(1 << 10))
+#define RNP_RING_OUTER_VLAN_FIX ((u32)(1 << 11))
+#define RNP_RING_CHKSM_FIX ((u32)(1 << 12))
+#define RNP_RING_LOWER_ITR ((u32)(1 << 13))
+	u8 pfvfnum;
+	u16 count; /* amount of descriptors */
+	u16 temp_count;
+	u16 reset_count;
+	u8 queue_index; /* queue_index needed for multiqueue queue management */
+	u8 rnpgbe_queue_idx; /* the real ring,used by dma*/
+	u16 next_to_use; /* tail (not-dma-mapped) */
+	u16 next_to_clean; /* soft-saved-head */
+	u16 device_id;
+#ifdef OPTM_WITH_LPAGE
+	u16 rx_page_buf_nums;
+	u32 rx_per_buf_mem;
+	struct sk_buff *skb;
+#endif /* OPTM_WITH_LPAGE */
+	union {
+#ifdef CONFIG_RNP_DISABLE_PACKET_SPLIT
+		u16 rx_buf_len;
+#else /* CONFIG_RNP_DISABLE_PACKET_SPLIT */
+		u16 next_to_alloc;
+#endif /* CONFIG_RNP_DISABLE_PACKET_SPLIT */
+		struct {
+			u8 atr_sample_rate;
+			u8 atr_count;
+		};
+	};
+
+	u8 dcb_tc;
+	struct rnpgbe_queue_stats stats;
+#ifdef HAVE_NDO_GET_STATS64
+	struct u64_stats_sync syncp;
+#endif /* HAVE_NDO_GET_STATS64 */
+	union {
+		struct rnpgbe_tx_queue_stats tx_stats;
+		struct rnpgbe_rx_queue_stats rx_stats;
+	};
+} ____cacheline_internodealigned_in_smp;
+
+#define RING2ADAPT(ring) netdev_priv((ring)->netdev)
+
+enum rnpgbe_ring_f_enum {
+	RING_F_NONE = 0,
+	RING_F_VMDQ, /* SR-IOV uses the same ring feature */
+	RING_F_RSS,
+	RING_F_FDIR,
+	RING_F_ARRAY_SIZE /* must be last in enum set */
+};
+
+#define RNP_MAX_RSS_INDICES 128
+#define RNP_MAX_RSS_INDICES_UV3P 8
+#define RNP_MAX_VMDQ_INDICES 64
+#define RNP_MAX_FDIR_INDICES 63 /* based on q_vector limit */
+#define RNP_MAX_FCOE_INDICES 8
+#define MAX_RX_QUEUES (128)
+#define MAX_TX_QUEUES (128)
+struct rnpgbe_ring_feature {
+	u16 limit; /* upper limit on feature indices */
+	u16 indices; /* current value of indices */
+	u16 mask; /* Mask used for feature to ring mapping */
+	u16 offset; /* offset to start of feature */
+} ____cacheline_internodealigned_in_smp;
+
+#define RNP_n10_VMDQ_8Q_MASK 0x78
+#define RNP_n10_VMDQ_4Q_MASK 0x7C
+#define RNP_n10_VMDQ_2Q_MASK 0x7E
+
+/*
+ * FCoE requires that all Rx buffers be over 2200 bytes in length.  Since
+ * this is twice the size of a half page we need to double the page order
+ * for FCoE enabled Rx queues.
+ */
+static inline unsigned int rnpgbe_rx_bufsz(struct rnpgbe_ring *ring)
+{
+	// 1 rx-desc trans max half page(2048), for jumbo frame sg is needed
+	return (RNP_RXBUFFER_1536 - NET_IP_ALIGN);
+	//return RNP_RXBUFFER_1536;
+}
+
+/* SG , 1 rx-desc use one page */
+static inline unsigned int rnpgbe_rx_pg_order(struct rnpgbe_ring *ring)
+{
+	/* fixed 1 page */
+	/* we don't support 3k buffer */
+	return 0;
+}
+#define rnpgbe_rx_pg_size(_ring) (PAGE_SIZE << rnpgbe_rx_pg_order(_ring))
+#define DEFAULT_ADV (RNP_LINK_SPEED_1GB_FULL | RNP_LINK_SPEED_100_FULL | \
+	RNP_LINK_SPEED_10_FULL | RNP_LINK_SPEED_10_HALF | \
+	RNP_LINK_SPEED_100_HALF)
+
+struct rnpgbe_ring_container {
+	struct rnpgbe_ring *ring; /* pointer to linked list of rings */
+	unsigned long next_update; /* jiffies value of last update */
+	unsigned int total_bytes; /* total bytes processed this int */
+	unsigned int total_packets; /* total packets processed this int */
+	unsigned int total_packets_old;
+	u16 work_limit; /* total work allowed per interrupt */
+	u16 count; /* total number of rings in vector */
+	u16 itr; /* current ITR/MSIX vector setting for ring */
+	u16 add_itr;
+	int update_count;
+};
+
+/* iterator for handling rings in ring container */
+#define rnpgbe_for_each_ring(pos, head)                                        \
+	for (pos = (head).ring; pos != NULL; pos = pos->next)
+
+#define MAX_RX_PACKET_BUFFERS ((adapter->flags & RNP_FLAG_DCB_ENABLED) ? 8 : 1)
+#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
+
+/* MAX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+
+#define SUPPORT_IRQ_AFFINITY_CHANGE
+struct rnpgbe_q_vector {
+	int old_rx_count;
+	int new_rx_count;
+	int large_times;
+	int small_times;
+	int too_small_times;
+	int middle_time;
+	struct rnpgbe_adapter *adapter;
+#ifdef CONFIG_RNP_DCA
+	int cpu; /* CPU for DCA */
+#endif /* CONFIG_RNP_DCA */
+	int v_idx;
+	/* index of q_vector within array, also used for
+	 * finding the bit in EICR and friends that
+	 * represents the vector for this ring
+	 */
+	u16 itr_rx;
+	u16 itr_tx;
+	struct rnpgbe_ring_container rx, tx;
+	struct napi_struct napi;
+#ifdef HAVE_IRQ_AFFINITY_HINT
+	cpumask_t affinity_mask;
+#endif /* HAVE_IRQ_AFFINITY_HINT */
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
+#ifdef SUPPORT_IRQ_AFFINITY_CHANGE
+	struct irq_affinity_notify affinity_notify;
+#endif /* SUPPORT_IRQ_AFFINITY_CHANGE */
+#endif /* HAVE_IRQ_AFFINITY_NOTIFY */
+	int numa_node;
+	struct rcu_head rcu; /* to avoid race with update stats on free */
+	u32 vector_flags;
+#define RNP_QVECTOR_FLAG_IRQ_MISS_CHECK ((u32)(1 << 0))
+#define RNP_QVECTOR_FLAG_ITR_FEATURE ((u32)(1 << 1))
+#define RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS ((u32)(1 << 2))
+	int irq_check_usecs;
+	struct hrtimer irq_miss_check_timer; // to check irq miss
+	char name[IFNAMSIZ + 9];
+	/* for dynamic allocation of rings associated with this q_vector */
+	struct rnpgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
+};
+
+static inline __le16 rnpgbe_test_ext_cmd(union rnpgbe_rx_desc *rx_desc,
+					 const u16 stat_err_bits)
+{
+	return rx_desc->wb.rev1 & cpu_to_le16(stat_err_bits);
+}
+
+#ifdef RNPGBE_HWMON
+#define RNPGBE_HWMON_TYPE_LOC 0
+#define RNPGBE_HWMON_TYPE_TEMP 1
+#define RNPGBE_HWMON_TYPE_CAUTION 2
+#define RNPGBE_HWMON_TYPE_MAX 3
+#define RNPGBE_HWMON_TYPE_NAME 4
+
+struct hwmon_attr {
+	struct device_attribute dev_attr;
+	struct rnpgbe_hw *hw;
+	struct rnpgbe_thermal_diode_data *sensor;
+	char name[12];
+};
+
+struct hwmon_buff {
+#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
+	struct attribute_group group;
+	const struct attribute_group *groups[2];
+	struct attribute *attrs[RNPGBE_MAX_SENSORS * 4 + 1];
+	struct hwmon_attr hwmon_list[RNPGBE_MAX_SENSORS * 4];
+#else
+	struct device *device;
+	struct hwmon_attr *hwmon_list;
+#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */
+	unsigned int n_hwmon;
+};
+#endif /* RNPM_HWMON */
+
+/* rnpgbe_test_staterr - tests bits in Rx descriptor status and error fields */
+static inline __le16 rnpgbe_test_staterr(union rnpgbe_rx_desc *rx_desc,
+					 const u16 stat_err_bits)
+{
+	return rx_desc->wb.cmd & cpu_to_le16(stat_err_bits);
+}
+
+static inline __le16 rnpgbe_get_stat(union rnpgbe_rx_desc *rx_desc,
+				     const u16 stat_mask)
+{
+	return rx_desc->wb.cmd & cpu_to_le16(stat_mask);
+}
+
+static inline u16 rnpgbe_desc_unused(struct rnpgbe_ring *ring)
+{
+	u16 ntc = ring->next_to_clean;
+	u16 ntu = ring->next_to_use;
+
+	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
+
+static inline u16 rnpgbe_desc_unused_rx(struct rnpgbe_ring *ring)
+{
+	u16 ntc = ring->next_to_clean;
+	u16 ntu = ring->next_to_use;
+
+	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
+
+#define RNP_RX_DESC(R, i) (&(((union rnpgbe_rx_desc *)((R)->desc))[i]))
+#define RNP_TX_DESC(R, i) (&(((struct rnpgbe_tx_desc *)((R)->desc))[i]))
+#define RNP_TX_CTXTDESC(R, i) (&(((struct rnpgbe_tx_ctx_desc *)((R)->desc))[i]))
+
+#define RNP_MAX_JUMBO_FRAME_SIZE 9590 /* Maximum Supported Size 9.5KB */
+#define RNP_MIN_MTU 68
+#define RNP500_MAX_JUMBO_FRAME_SIZE 9722 /* Maximum Supported Size 9728 */
+
+#define OTHER_VECTOR 1
+#define NON_Q_VECTORS (OTHER_VECTOR)
+
+/* default to trying for four seconds */
+#define RNP_TRY_LINK_TIMEOUT (4 * HZ)
+
+#define RNP_MAX_USER_PRIO (8)
+#define RNP_MAX_TCS_NUM (4)
+struct rnpgbe_pfc_cfg {
+	u8 pfc_max; /* hardware can enabled max pfc channel */
+	u8 hw_pfc_map; /* enable the prio channel bit */
+	u8 pfc_num; /* at present enabled the pfc-channel num */
+	u8 pfc_en; /* enabled the pfc feature or not */
+};
+
+struct rnpgbe_dcb_num_tcs {
+	u8 pg_tcs;
+	u8 pfc_tcs;
+};
+
+struct rnpgbe_dcb_cfg {
+	u8 tc_num;
+	u16 delay; /* pause time */
+	u8 dcb_en; /* enabled the dcb feature or not */
+	u8 dcbx_mode;
+	struct rnpgbe_pfc_cfg pfc_cfg;
+	struct rnpgbe_dcb_num_tcs num_tcs;
+
+	/* statistic info */
+
+	u64 requests[RNP_MAX_TCS_NUM];
+	u64 indications[RNP_MAX_TCS_NUM];
+
+	enum rnpgbe_fc_mode last_lfc_mode;
+};
+struct rnpgbe_pps_cfg {
+	bool available;
+	struct timespec64 start;
+	struct timespec64 period;
+};
+
+enum rss_func_mode_enum {
+	rss_func_top,
+	rss_func_xor,
+	rss_func_order,
+};
+
+enum outer_vlan_type_enum {
+	outer_vlan_type_88a8,
+#ifdef ETH_P_QINQ1
+	outer_vlan_type_9100,
+#endif
+#ifdef ETH_P_QINQ2
+	outer_vlan_type_9200,
+#endif
+	outer_vlan_type_max,
+};
+
+enum irq_mode_enum {
+	irq_mode_legency,
+	irq_mode_msi,
+	irq_mode_msix,
+};
+
+/* board specific private data structure */
+struct rnpgbe_adapter {
+#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX)
+#ifdef HAVE_VLAN_RX_REGISTER
+	struct vlan_group *vlgrp; /* must be first, see rnpgbe_receive_skb */
+#else
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+	unsigned long active_vlans_stags[BITS_TO_LONGS(VLAN_N_VID)];
+#endif
+
+#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_VLAN_CTAG_TX */
+	/* OS defined structs */
+	u16 vf_vlan;
+	u16 vlan_count;
+	int miss_time;
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	// juest for test
+	// struct page *page[512];
+	// int page_count;
+
+	bool quit_poll_thread;
+	struct task_struct *rx_poll_thread;
+	unsigned long state;
+#ifdef HAVE_TX_MQ
+#ifndef HAVE_NETDEV_SELECT_QUEUE
+	unsigned int indices;
+#endif
+#endif
+	spinlock_t link_stat_lock;
+
+	/* this var is used for auto itr modify */
+	/* hw not Supported well */
+	unsigned long last_moder_packets[MAX_RX_QUEUES];
+	unsigned long last_moder_tx_packets;
+	unsigned long last_moder_bytes[MAX_RX_QUEUES];
+	unsigned long last_moder_jiffies;
+	int last_moder_time[MAX_RX_QUEUES];
+	/* only rx itr is Supported */
+	int shut_down_temp;
+	int usecendcount;
+	u16 rx_usecs;
+	u16 rx_frames;
+	u16 usecstocount;
+	u16 tx_frames;
+	u16 tx_usecs;
+	u32 pkt_rate_low;
+	u16 rx_usecs_low;
+	u32 pkt_rate_high;
+	u16 rx_usecs_high;
+	u32 sample_interval;
+	u32 adaptive_rx_coal;
+	u32 adaptive_tx_coal;
+	u32 auto_rx_coal;
+
+	int napi_budge;
+
+	int eee_enabled; // eee controler
+	int eee_active; // eee true status
+	int tx_lpi_timer;
+	bool tx_path_in_lpi_mode;
+	bool en_tx_lpi_clockgating;
+	int eee_timer;
+	int local_eee;
+	int partner_eee;
+	struct mutex eee_lock;
+
+	union {
+		int phy_addr;
+		struct {
+			u8 mod_abs : 1;
+			u8 fault : 1;
+			u8 tx_dis : 1;
+			u8 los : 1;
+		} sfp;
+	};
+
+	struct {
+		u32 main;
+		u32 pre;
+		u32 post;
+		u32 tx_boost;
+	} si;
+
+	int speed;
+
+	u8 an : 1;
+	u8 fec : 1;
+	u8 link_traing : 1;
+	u8 duplex : 1;
+	u8 rpu_inited : 1;
+
+	/* Some features need tri-state capability,
+	 * thus the additional *_CAPABLE flags.
+	 */
+	u32 vf_num_for_pf;
+	u32 flags;
+	u32 gephy_test_mode;
+#define RNP_FLAG_MSI_CAPABLE ((u32)(1 << 0))
+#define RNP_FLAG_MSI_ENABLED ((u32)(1 << 1))
+#define RNP_FLAG_MSIX_CAPABLE ((u32)(1 << 2))
+#define RNP_FLAG_MSIX_ENABLED ((u32)(1 << 3))
+#define RNP_FLAG_RX_1BUF_CAPABLE ((u32)(1 << 4))
+#define RNP_FLAG_RX_PS_CAPABLE ((u32)(1 << 5))
+#define RNP_FLAG_RX_PS_ENABLED ((u32)(1 << 6))
+#define RNP_FLAG_IN_NETPOLL ((u32)(1 << 7))
+#define RNP_FLAG_DCA_ENABLED ((u32)(1 << 8))
+#define RNP_FLAG_DCA_CAPABLE ((u32)(1 << 9))
+#define RNP_FLAG_IMIR_ENABLED ((u32)(1 << 10))
+#define RNP_FLAG_MQ_CAPABLE ((u32)(1 << 11))
+#define RNP_FLAG_DCB_ENABLED ((u32)(1 << 12))
+#define RNP_FLAG_VMDQ_CAPABLE ((u32)(1 << 13))
+#define RNP_FLAG_VMDQ_ENABLED ((u32)(1 << 14))
+#define RNP_FLAG_FAN_FAIL_CAPABLE ((u32)(1 << 15))
+#define RNP_FLAG_NEED_LINK_UPDATE ((u32)(1 << 16))
+#define RNP_FLAG_NEED_LINK_CONFIG ((u32)(1 << 17))
+#define RNP_FLAG_FDIR_HASH_CAPABLE ((u32)(1 << 18))
+#define RNP_FLAG_FDIR_PERFECT_CAPABLE ((u32)(1 << 19))
+#define RNP_FLAG_FCOE_CAPABLE ((u32)(1 << 20))
+#define RNP_FLAG_FCOE_ENABLED ((u32)(1 << 21))
+#define RNP_FLAG_SRIOV_CAPABLE ((u32)(1 << 22))
+#define RNP_FLAG_SRIOV_ENABLED ((u32)(1 << 23))
+#define RNP_FLAG_VXLAN_OFFLOAD_CAPABLE ((u32)(1 << 24))
+#define RNP_FLAG_VXLAN_OFFLOAD_ENABLE ((u32)(1 << 25))
+#define RNP_FLAG_SWITCH_LOOPBACK_EN ((u32)(1 << 26))
+#define RNP_FLAG_SRIOV_INIT_DONE ((u32)(1 << 27))
+#define RNP_FLAG_IN_IRQ ((u32)(1 << 28))
+#define RNP_FLAG_VF_INIT_DONE ((u32)(1 << 29))
+#define RNP_FLAG_LEGACY_CAPABLE ((u32)(1 << 30))
+#define RNP_FLAG_LEGACY_ENABLED ((u32)(1 << 31))
+	u32 flags2;
+#define RNP_FLAG2_RSC_CAPABLE ((u32)(1 << 0))
+#define RNP_FLAG2_RSC_ENABLED ((u32)(1 << 1))
+#define RNP_FLAG2_TEMP_SENSOR_CAPABLE ((u32)(1 << 2))
+#define RNP_FLAG2_TEMP_SENSOR_EVENT ((u32)(1 << 3))
+#define RNP_FLAG2_SEARCH_FOR_SFP ((u32)(1 << 4))
+#define RNP_FLAG2_SFP_NEEDS_RESET ((u32)(1 << 5))
+#define RNP_FLAG2_RESET_REQUESTED ((u32)(1 << 6))
+#define RNP_FLAG2_FDIR_REQUIRES_REINIT ((u32)(1 << 7))
+#define RNP_FLAG2_RSS_FIELD_IPV4_UDP ((u32)(1 << 8))
+#define RNP_FLAG2_RSS_FIELD_IPV6_UDP ((u32)(1 << 9))
+#define RNP_FLAG2_PTP_ENABLED ((u32)(1 << 10))
+#define RNP_FLAG2_PTP_PPS_ENABLED ((u32)(1 << 11))
+#define RNP_FLAG2_BRIDGE_MODE_VEB ((u32)(1 << 12))
+#define RNP_FLAG2_VLAN_STAGS_ENABLED ((u32)(1 << 13))
+#define RNP_FLAG2_UDP_TUN_REREG_NEEDED ((u32)(1 << 14))
+#define RNP_FLAG2_RESET_PF ((u32)(1 << 15))
+#define RNP_FLAG2_CHKSM_FIX ((u32)(1 << 16))
+#define RNP_FLAG2_INSMOD ((u32)(1 << 17))
+#define RNP_FLAG2_NO_NET_REG ((u32)(1 << 18))
+
+	u32 priv_flags;
+#define RNP_PRIV_FLAG_MAC_LOOPBACK BIT(0)
+#define RNP_PRIV_FLAG_SWITCH_LOOPBACK BIT(1)
+#define RNP_PRIV_FLAG_VEB_ENABLE BIT(2)
+#define RNP_PRIV_FLAG_FT_PADDING BIT(3)
+#define RNP_PRIV_FLAG_PADDING_DEBUG BIT(4)
+#define RNP_PRIV_FLAG_PTP_DEBUG BIT(5)
+#define RNP_PRIV_FLAG_SIMUATE_DOWN BIT(6)
+#define RNP_PRIV_FLAG_VXLAN_INNER_MATCH BIT(7)
+#define RNP_PRIV_FLAG_ULTRA_SHORT BIT(8)
+#define RNP_PRIV_FLAG_DOUBLE_VLAN BIT(9)
+#define RNP_PRIV_FLAG_TCP_SYNC BIT(10)
+#define RNP_PRIV_FLAG_PAUSE_OWN BIT(11)
+#define RNP_PRIV_FLAG_JUMBO BIT(12)
+#define RNP_PRIV_FLAG_TX_PADDING BIT(13)
+#define RNP_PRIV_FLAG_RX_ALL BIT(14)
+#define RNP_PRIV_FLAG_REC_HDR_LEN_ERR BIT(15)
+#define RNP_PRIV_FLAG_RX_FCS BIT(16)
+#define RNP_PRIV_FLAG_DOUBLE_VLAN_RECEIVE BIT(17)
+#define RNP_PRIV_FLGA_TEST_TX_HANG BIT(18)
+#define RNP_PRIV_FLAG_RX_SKIP_EN BIT(19)
+#define RNP_PRIV_FLAG_TCP_SYNC_PRIO BIT(20)
+#define RNP_PRIV_FLAG_REMAP_PRIO BIT(21)
+#define RNP_PRIV_FLAG_8023_PRIO BIT(22)
+#define RNP_PRIV_FLAG_SRIOV_VLAN_MODE BIT(23)
+#define RNP_PRIV_FLAG_SOFT_TX_PADDING BIT(24)
+#define RNP_PRIV_FLAG_TX_COALESCE BIT(25)
+#define RNP_PRIV_FLAG_RX_COALESCE BIT(26)
+#define RNP_PRIV_FLAG_LLDP BIT(27)
+#define RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE BIT(28)
+
+#define PRIV_DATA_EN BIT(7)
+	int rss_func_mode;
+	int outer_vlan_type;
+	int tcp_sync_queue;
+	int priv_skip_count;
+
+	u64 rx_drop_status;
+	int drop_time;
+	/* Tx fast path data */
+	unsigned int num_tx_queues;
+	unsigned int max_ring_pair_counts;
+	// unsigned int txrx_queue_count;
+	u16 tx_work_limit;
+
+#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD)
+	__be16 vxlan_port;
+#endif /* HAVE_UDP_ENC_RX_OFFLAD || HAVE_VXLAN_RX_OFFLOAD */
+#ifdef HAVE_UDP_ENC_RX_OFFLOAD
+	__be16 geneve_port;
+#endif /* HAVE_UDP_ENC_RX_OFFLOAD */
+	/* Rx fast path data */
+	int num_rx_queues;
+	u16 rx_itr_setting;
+	u32 eth_queue_idx;
+	u32 max_rate[MAX_TX_QUEUES];
+	/* TX */
+	struct rnpgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
+	int tx_ring_item_count;
+
+	u64 restart_queue;
+	u64 lsc_int;
+	u32 tx_timeout_count;
+
+	/* RX */
+	struct rnpgbe_ring *rx_ring[MAX_RX_QUEUES];
+	int rx_ring_item_count;
+
+	u64 hw_csum_rx_error;
+	u64 hw_csum_rx_good;
+	u64 hw_rx_no_dma_resources;
+	u64 rsc_total_count;
+	u64 rsc_total_flush;
+	u64 non_eop_descs;
+	u32 alloc_rx_page_failed;
+	u32 alloc_rx_buff_failed;
+
+	int num_other_vectors;
+	int irq_mode;
+	struct rnpgbe_q_vector *q_vector[MAX_Q_VECTORS];
+
+	/*used for IEEE 1588 ptp clock start*/
+	u8 __iomem *ptp_addr;
+	int gmac4;
+	const struct rnpgbe_hwtimestamp *hwts_ops;
+	struct ptp_clock *ptp_clock;
+	struct ptp_clock_info ptp_clock_ops;
+	struct sk_buff *ptp_tx_skb;
+	struct hwtstamp_config tstamp_config;
+	u32 ptp_config_value;
+	spinlock_t ptp_lock; /* Used to protect the SYSTIME registers. */
+
+	u64 clk_ptp_rate; /*uint is HZ 1MHz=1 000 000Hz*/
+	u32 sub_second_inc;
+	u32 systime_flags;
+	struct timespec64 ptp_prev_hw_time;
+	unsigned int default_addend;
+	bool ptp_tx_en;
+	bool ptp_rx_en;
+
+	struct work_struct tx_hwtstamp_work;
+	unsigned long tx_hwtstamp_start;
+	unsigned long tx_hwtstamp_skipped;
+	unsigned long tx_timeout_factor;
+	u64 tx_hwtstamp_timeouts;
+	/*used for IEEE 1588 ptp clock end */
+
+	/* DCB parameters */
+	struct rnpgbe_dcb_cfg dcb_cfg;
+	u8 prio_tc_map[RNP_MAX_USER_PRIO * 2];
+	u8 num_tc;
+
+	int num_q_vectors; /* current number of q_vectors for device */
+	int max_q_vectors; /* true count of q_vectors for device */
+	struct rnpgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
+	struct msix_entry *msix_entries;
+
+	u32 test_icr;
+	struct rnpgbe_ring test_tx_ring;
+	struct rnpgbe_ring test_rx_ring;
+
+	/* structs defined in rnpgbe_hw.h */
+	struct rnpgbe_hw hw;
+	u16 msg_enable;
+	struct rnpgbe_hw_stats hw_stats;
+
+	u64 tx_busy;
+
+	u32 link_speed;
+	bool link_up;
+	bool duplex_status;
+	u32 link_speed_old;
+	bool link_up_old;
+	bool duplex_old;
+	unsigned long link_check_timeout;
+
+	struct timer_list service_timer;
+	struct work_struct service_task;
+	struct timer_list eee_ctrl_timer;
+
+	/* fdir relative */
+	struct hlist_head fdir_filter_list;
+	unsigned long fdir_overflow; /* number of times ATR was backed off */
+	union rnpgbe_atr_input fdir_mask;
+	int fdir_mode;
+	int fdir_filter_count;
+	/* fixme to bitmap */
+	// unsigned long layer2_bit[BITS_TO_LONGS(RNP_MAX_LAYER2_FILTERS)];
+	int layer2_count;
+	/* fixme to bitmap */
+	// unsigned long tuple5_bit[BITS_TO_LONGS(RNP_MAX_TCAM_FILTERS)];
+	int tuple_5_count;
+	u32 fdir_pballoc; // total count
+	u32 atr_sample_rate;
+	spinlock_t fdir_perfect_lock;
+
+	u8 __iomem *io_addr_bar0;
+	u8 __iomem *io_addr;
+	u32 wol;
+
+	u16 bd_number;
+	u16 q_vector_off;
+
+	u16 eeprom_verh;
+	u16 eeprom_verl;
+	u16 eeprom_cap;
+
+	u16 stags_vid;
+	// sysfs debug info
+	u32 sysfs_tx_ring_num;
+	u32 sysfs_rx_ring_num;
+	u32 sysfs_tx_desc_num;
+	u32 sysfs_rx_desc_num;
+
+	u32 sysfs_mii_reg;
+	u32 sysfs_mii_value;
+	u32 sysfs_mii_control;
+
+	u32 interrupt_event;
+	u32 led_reg;
+
+	/* maintain */
+	char *maintain_buf;
+	int maintain_buf_len;
+	void *maintain_dma_buf;
+	dma_addr_t maintain_dma_phy;
+	int maintain_dma_size;
+	int maintain_in_bytes;
+
+	/* SR-IOV */
+	DECLARE_BITMAP(active_vfs, RNP_MAX_VF_FUNCTIONS);
+	unsigned int num_vfs;
+	struct vf_data_storage *vfinfo;
+	int vf_rate_link_speed;
+	struct vf_macvlans vf_mvs;
+	struct vf_macvlans *mv_list;
+
+	u32 timer_event_accumulator;
+	u32 vferr_refcount;
+	struct kobject *info_kobj;
+#ifdef RNP_SYSFS
+#ifdef RNPGBE_HWMON
+#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
+	struct hwmon_buff *rnpgbe_hwmon_buff;
+#else
+	struct hwmon_buff rnpgbe_hwmon_buff;
+#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */
+#endif /* RNPGBE_HWMON */
+#endif /* RNPM_SYSFS */
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *rnpgbe_dbg_adapter;
+#endif /*CONFIG_DEBUG_FS*/
+
+	u8 default_up;
+	// u8 veb_vfnum;
+
+	u8 port; /* nr_pf_port: 0 or 1 */
+	u8 portid_of_card; /* port num in card*/
+
+#define RNP_MAX_RETA_ENTRIES 512
+	u8 rss_indir_tbl[RNP_MAX_RETA_ENTRIES];
+#define RNP_MAX_TC_ENTRIES 8
+	u8 rss_tc_tbl[RNP_MAX_TC_ENTRIES];
+	int rss_indir_tbl_num;
+	int rss_tc_tbl_num;
+	u32 rss_tbl_setup_flag;
+#define RNP_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
+	u8 rss_key[RNP_RSS_KEY_SIZE];
+	u32 rss_key_setup_flag;
+
+	// struct rnpgbe_info* info;
+	bool dma2_in_1pf;
+
+	char name[60];
+};
+
+struct rnpgbe_fdir_filter {
+	struct hlist_node fdir_node;
+	union rnpgbe_atr_input filter;
+	u16 sw_idx;
+	u16 hw_idx;
+	u32 vf_num;
+	u64 action;
+};
+
+enum rnpgbe_state_t {
+	__RNP_TESTING,
+	__RNP_RESETTING,
+	__RNP_DOWN,
+	__RNP_SERVICE_SCHED,
+	__RNP_IN_SFP_INIT,
+	__RNP_READ_I2C,
+	__RNP_PTP_TX_IN_PROGRESS,
+	__RNP_USE_VFINFI,
+	__RNP_IN_IRQ,
+	__RNP_REMOVE,
+	__RNP_SERVICE_CHECK,
+	__RNP_EEE_REMOVE,
+};
+
+struct rnpgbe_cb {
+	union { /* Union defining head/tail partner */
+		struct sk_buff *head;
+		struct sk_buff *tail;
+	};
+	dma_addr_t dma;
+	u16 append_cnt;
+	bool page_released;
+};
+#define RNP_CB(skb) ((struct rnpgbe_cb *)(skb)->cb)
+
+enum rnpgbe_boards {
+	board_n10_709_1pf_2x10G,
+	board_vu440s,
+	board_n10,
+	board_n400,
+	board_n20,
+	board_n500,
+	board_n210,
+	board_n210L,
+};
+
+#ifdef CONFIG_RNP_DCB
+extern const struct dcbnl_rtnl_ops dcbnl_ops;
+#endif
+
+extern char rnpgbe_driver_name[];
+extern const char rnpgbe_driver_version[];
+
+extern void rnpgbe_up(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_down(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_reinit_locked(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_reset(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_set_ethtool_ops(struct net_device *netdev);
+extern int rnpgbe_setup_rx_resources(struct rnpgbe_ring *ring,
+				     struct rnpgbe_adapter *adapter);
+extern int rnpgbe_setup_tx_resources(struct rnpgbe_ring *ring,
+				     struct rnpgbe_adapter *adapter);
+extern void rnpgbe_free_rx_resources(struct rnpgbe_ring *ring);
+extern void rnpgbe_free_tx_resources(struct rnpgbe_ring *ring);
+extern void rnpgbe_configure_rx_ring(struct rnpgbe_adapter *adapter,
+				     struct rnpgbe_ring *ring);
+extern void rnpgbe_configure_tx_ring(struct rnpgbe_adapter *adapter,
+				     struct rnpgbe_ring *ring);
+extern void rnpgbe_disable_rx_queue(struct rnpgbe_adapter *adapter,
+				    struct rnpgbe_ring *ring);
+extern void rnpgbe_update_stats(struct rnpgbe_adapter *adapter);
+extern int rnpgbe_init_interrupt_scheme(struct rnpgbe_adapter *adapter);
+extern int rnpgbe_wol_supported(struct rnpgbe_adapter *adapter, u16 device_id);
+extern void rnpgbe_clear_interrupt_scheme(struct rnpgbe_adapter *adapter);
+extern void
+rnpgbe_unmap_and_free_tx_resource(struct rnpgbe_ring *ring,
+				  struct rnpgbe_tx_buffer *tx_buffer_info);
+extern int rnpgbe_poll(struct napi_struct *napi, int budget);
+extern int ethtool_ioctl(struct ifreq *ifr);
+extern s32 rnpgbe_reinit_fdir_tables_n10(struct rnpgbe_hw *hw);
+extern s32 rnpgbe_init_fdir_signature_n10(struct rnpgbe_hw *hw, u32 fdirctrl);
+extern s32 rnpgbe_init_fdir_perfect_n10(struct rnpgbe_hw *hw, u32 fdirctrl);
+extern s32 rnpgbe_fdir_add_signature_filter_n10(
+	struct rnpgbe_hw *hw, union rnpgbe_atr_hash_dword input,
+	union rnpgbe_atr_hash_dword common, u8 queue);
+
+extern void rnpgbe_release_hw_control(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_get_hw_control(struct rnpgbe_adapter *adapter);
+extern s32 rnpgbe_fdir_set_input_mask_n10(struct rnpgbe_hw *hw,
+					  union rnpgbe_atr_input *input_mask);
+extern s32 rnpgbe_fdir_write_perfect_filter_n10(struct rnpgbe_hw *hw,
+						union rnpgbe_atr_input *input,
+						u16 soft_id, u8 queue);
+extern s32 rnpgbe_fdir_write_perfect_filter(int fdir_mode, struct rnpgbe_hw *hw,
+					    union rnpgbe_atr_input *filter,
+					    u16 hw_id, u8 queue,
+					    bool prio_flag);
+extern s32 rnpgbe_fdir_erase_perfect_filter_n10(struct rnpgbe_hw *hw,
+						union rnpgbe_atr_input *input,
+						u16 soft_id);
+extern void rnpgbe_atr_compute_perfect_hash_n10(union rnpgbe_atr_input *input,
+						union rnpgbe_atr_input *mask);
+extern bool rnpgbe_verify_lesm_fw_enabled_n10(struct rnpgbe_hw *hw);
+extern void rnpgbe_set_rx_mode(struct net_device *netdev);
+#ifdef CONFIG_RNP_DCB
+extern void rnpgbe_set_rx_drop_en(struct rnpgbe_adapter *adapter);
+#endif
+extern int rnpgbe_setup_tx_maxrate(struct rnpgbe_ring *tx_ring, u64 max_rate,
+				   int sample_interval);
+extern int rnpgbe_setup_tc(struct net_device *dev, u8 tc);
+
+void rnpgbe_check_options(struct rnpgbe_adapter *adapter);
+
+void rnpgbe_maybe_tx_ctxtdesc(struct rnpgbe_ring *tx_ring,
+			      struct rnpgbe_tx_buffer *first, u32 type_tucmd);
+
+extern void rnpgbe_store_reta(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_store_key(struct rnpgbe_adapter *adapter);
+extern int rnpgbe_init_rss_key(struct rnpgbe_adapter *adapter);
+extern int rnpgbe_init_rss_table(struct rnpgbe_adapter *adapter);
+extern s32 rnpgbe_fdir_erase_perfect_filter(int fdir_mode, struct rnpgbe_hw *hw,
+					    union rnpgbe_atr_input *input,
+					    u16 hw_id);
+extern u32 rnpgbe_rss_indir_tbl_entries(struct rnpgbe_adapter *adapter);
+#ifdef CONFIG_RNPGBE_HWMON
+extern void rnpgbe_sysfs_exit(struct rnpgbe_adapter *adapter);
+extern int rnpgbe_sysfs_init(struct rnpgbe_adapter *adapter);
+#endif /* CONFIG_RNPGBE_HWMON */
+#ifdef CONFIG_DEBUG_FS
+extern void rnpgbe_dbg_adapter_init(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_dbg_adapter_exit(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_dbg_init(void);
+extern void rnpgbe_dbg_exit(void);
+#else /* CONFIG_DEBUG_FS */
+static inline void rnpgbe_dbg_adapter_init(struct rnpgbe_adapter *adapter)
+{
+}
+static inline void rnpgbe_dbg_adapter_exit(struct rnpgbe_adapter *adapter)
+{
+}
+static inline void rnpgbe_dbg_init(void)
+{
+}
+static inline void rnpgbe_dbg_exit(void)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+static inline struct netdev_queue *txring_txq(const struct rnpgbe_ring *ring)
+{
+	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
+extern void rnpgbe_ptp_init(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_ptp_stop(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_ptp_overflow_check(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_ptp_rx_hang(struct rnpgbe_adapter *adapter);
+extern void __rnpgbe_ptp_rx_hwtstamp(struct rnpgbe_q_vector *q_vector,
+				     struct sk_buff *skb);
+
+static inline void rnpgbe_ptp_rx_hwtstamp(struct rnpgbe_ring *rx_ring,
+					  union rnpgbe_rx_desc *rx_desc,
+					  struct sk_buff *skb)
+{
+	if (unlikely(!rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_PTP)))
+		return;
+	/*
+	 * Update the last_rx_timestamp timer in order to enable watchdog check
+	 * for error case of latched timestamp on a dropped packet.
+	 */
+	rx_ring->last_rx_timestamp = jiffies;
+}
+
+static inline int ignore_veb_vlan(struct rnpgbe_adapter *adapter,
+				  union rnpgbe_rx_desc *rx_desc)
+{
+	if (unlikely((adapter->flags & RNP_FLAG_SRIOV_ENABLED) &&
+		     (cpu_to_le16(rx_desc->wb.rev1) & VEB_VF_IGNORE_VLAN))) {
+		return 1;
+	}
+	return 0;
+}
+
+static inline int ignore_veb_pkg_err(struct rnpgbe_adapter *adapter,
+				     union rnpgbe_rx_desc *rx_desc)
+{
+	if (unlikely((adapter->flags & RNP_FLAG_SRIOV_ENABLED) &&
+		     (cpu_to_le16(rx_desc->wb.rev1) & VEB_VF_PKG))) {
+		return 1;
+	}
+	return 0;
+}
+
+int rnpgbe_update_ethtool_fdir_entry(struct rnpgbe_adapter *adapter,
+				     struct rnpgbe_fdir_filter *input,
+				     u16 sw_idx);
+
+static inline int rnpgbe_is_pf1(struct pci_dev *pdev)
+{
+	return ((pdev->devfn & 0x1) ? 1 : 0);
+}
+
+static inline int rnpgbe_get_fuc(struct pci_dev *pdev)
+{
+	return pdev->devfn;
+}
+
+extern void rnpgbe_sysfs_exit(struct rnpgbe_adapter *adapter);
+extern int rnpgbe_sysfs_init(struct rnpgbe_adapter *adapter);
+
+#ifdef CONFIG_PCI_IOV
+void rnpgbe_sriov_reinit(struct rnpgbe_adapter *adapter);
+#endif /* CONFIG_PCI_IOV */
+
+#define SET_BIT(n, var) (var = (var | (1 << n)))
+#define CLR_BIT(n, var) (var = (var & (~(1 << n))))
+#define CHK_BIT(n, var) (var & (1 << n))
+#ifdef HAVE_STRUCT_DMA_ATTRS
+#define RNP_RX_DMA_ATTR NULL
+#else /* HAVE_STRUCT_DMA_ATTRS */
+#define RNP_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+#endif /* HAVE_STRUCT_DMA_ATTRS */
+
+static inline bool rnpgbe_removed(void __iomem *addr)
+{
+	return unlikely(!addr);
+}
+#define RNP_REMOVED(a) rnpgbe_removed(a)
+int rnpgbe_fw_msg_handler(struct rnpgbe_adapter *adapter);
+int rnp500_fw_update(struct rnpgbe_hw *hw, int partition, const u8 *fw_bin,
+		     int bytes);
+int rnpgbe_fw_update(struct rnpgbe_hw *hw, int partition, const u8 *fw_bin,
+		     int bytes);
+#define RNPM_FW_VERSION_NEW_ETHTOOL 0x00050010
+void rnpgbe_service_event_schedule(struct rnpgbe_adapter *adapter);
+
+static inline bool rnpgbe_fw_is_old_ethtool(struct rnpgbe_hw *hw)
+{
+	return hw->fw_version >= RNPM_FW_VERSION_NEW_ETHTOOL ? false : true;
+}
+
+int rsp_hal_sfc_flash_erase(struct rnpgbe_hw *hw, u32 size);
+int rsp_hal_sfc_write_protect(struct rnpgbe_hw *hw, u32 value);
+
+#endif /* _RNPGBE_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c
new file mode 100755
index 0000000000000000000000000000000000000000..715174ed8af334bc3a8e53d3a96e460410788fa2
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c
@@ -0,0 +1,4673 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+#include 
+
+#include "rnpgbe.h"
+#include "rnpgbe_phy.h"
+#include "rnpgbe_mbx.h"
+#include "rnpgbe_ethtool.h"
+#include "rnpgbe_sriov.h"
+
+#define RNP_N500_PKT_LEN_ERR (2)
+#define RNP_N500_HDR_LEN_ERR (1)
+#define RNP_N500_MAX_VF 8
+#define RNP_N500_RSS_TBL_NUM 128
+#define RNP_N500_RSS_TC_TBL_NUM 8
+#define RNP_N500_MAX_TX_QUEUES 8
+#define RNP_N500_MAX_RX_QUEUES 8
+#define NCSI_RAR_NUM (2)
+#define NCSI_MC_NUM (5)
+#ifdef NIC_VF_FXIED
+/* we reseve 2 rar for ncsi */
+#define RNP_N500_RAR_ENTRIES (32 - NCSI_RAR_NUM)
+#define NCSI_RAR_IDX_START (32 - NCSI_RAR_NUM)
+#else
+/* we reseve 2 rar for ncsi */
+#define RNP_N500_RAR_ENTRIES (32 - NCSI_RAR_NUM)
+#define NCSI_RAR_IDX_START (32 - NCSI_RAR_NUM)
+#endif
+#define RNP_N500_MC_TBL_SIZE 128
+#define RNP_N500_VFT_TBL_SIZE 128
+#define RNP_N500_MSIX_VECTORS 32
+
+#define RNP500_MAX_LAYER2_FILTERS 16
+#define RNP500_MAX_TUPLE5_FILTERS 128
+
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
+
+enum n500_priv_bits {
+	n500_mac_loopback = 0,
+	n500_padding_enable = 8,
+};
+
+static const char rnp500_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define RNP500_MAC_LOOPBACK BIT(0)
+#define RNP500_TX_SOLF_PADDING BIT(1)
+#define RNP500_PADDING_DEBUG BIT(2)
+#define RNP500_SIMULATE_DOWN BIT(3)
+#define RNP500_ULTRA_SHORT BIT(4)
+#define RNP500_DOUBLE_VLAN BIT(5)
+#define RNP500_PAUSE_OWN BIT(6)
+#define RNP500_STAGS_ENABLE BIT(7)
+#define RNP500_JUMBO_ENABLE BIT(8)
+#define RNP500_TX_PADDING BIT(9)
+#define RNP500_REC_HDR_LEN_ERR BIT(10)
+#define RNP500_DOUBLE_VLAN_RECEIVE BIT(11)
+#define RNP500_RX_SKIP_EN BIT(12)
+#define RNP500_TCP_SYNC_PRIO BIT(13)
+#define RNP500_REMAP_PRIO BIT(14)
+#define RNP500_8023_PRIO BIT(15)
+#define RNP500_SRIOV_VLAN_MODE BIT(16)
+#define RNP500_LLDP_EN BIT(17)
+#define RNP500_FORCE_CLOSE BIT(18)
+	"mac_loopback",
+	"soft_tx_padding_off",
+	"padding_debug",
+	"simulate_link_down",
+	"ultra_short_packet",
+	"double_vlan",
+	"pause_use_own_address",
+	"stags_enable",
+	"jumbo_enable",
+	"mac_tx_padding_off",
+	"mask_len_err",
+	"double_vlan_receive",
+	"rx_skip_en",
+	"tcp_sync_prio",
+	"remap_prio",
+	"8023_prio",
+	"sriov_vlan_mode",
+	"lldp_en",
+	"link_down_on_close",
+};
+
+#define RNP500_PRIV_FLAGS_STR_LEN ARRAY_SIZE(rnp500_priv_flags_strings)
+#endif
+
+/* setup queue speed limit to max_rate */
+static void rnpgbe_dma_set_tx_maxrate_n500(struct rnpgbe_dma_info *dma,
+					   u16 queue, u32 max_rate)
+{
+	/* todo */
+}
+
+/* setup mac with vf_num to veb table */
+static void rnpgbe_dma_set_veb_mac_n500(struct rnpgbe_dma_info *dma, u8 *mac,
+					u32 vfnum, u32 ring)
+{
+	/* n500 only has 1 port veb table */
+	u32 maclow, machi, ring_vfnum;
+	int port;
+
+	maclow = (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5];
+	machi = (mac[0] << 8) | mac[1];
+	ring_vfnum = ring | ((0x80 | vfnum) << 8);
+	for (port = 0; port < 1; port++) {
+		dma_wr32(dma, RNP500_DMA_PORT_VBE_MAC_LO_TBL(port, vfnum),
+			 maclow);
+		dma_wr32(dma, RNP500_DMA_PORT_VBE_MAC_HI_TBL(port, vfnum),
+			 machi);
+		dma_wr32(dma, RNP500_DMA_PORT_VEB_VF_RING_TBL(port, vfnum),
+			 ring_vfnum);
+	}
+}
+
+/* setup vlan with vf_num to veb table */
+static void rnpgbe_dma_set_veb_vlan_n500(struct rnpgbe_dma_info *dma, u16 vlan,
+					 u32 vfnum)
+{
+	int port;
+
+	/* each vf can support only one vlan */
+	for (port = 0; port < 1; port++)
+		dma_wr32(dma, RNP500_DMA_PORT_VEB_VID_TBL(port, vfnum), vlan);
+}
+
+static void rnpgbe_dma_set_veb_vlan_mask_n500(struct rnpgbe_dma_info *dma,
+					      u16 vlan, u16 mask, int entry)
+{
+	/* bit 19:12 is mask bit 11:0 is vid */
+	dma_wr32(dma, RNP500_DMA_PORT_VEB_VID_TBL(0, entry),
+		 (mask << 12) | vlan);
+}
+
+static void rnpgbe_dma_clr_veb_all_n500(struct rnpgbe_dma_info *dma)
+{
+	int port, i;
+
+	for (port = 0; port < 1; port++) {
+		for (i = 0; i < RNP500_VEB_TBL_CNTS; i++) {
+			dma_wr32(dma, RNP500_DMA_PORT_VBE_MAC_LO_TBL(port, i),
+				 0);
+			dma_wr32(dma, RNP500_DMA_PORT_VBE_MAC_HI_TBL(port, i),
+				 0);
+			dma_wr32(dma, RNP500_DMA_PORT_VEB_VID_TBL(port, i), 0);
+			dma_wr32(dma, RNP500_DMA_PORT_VEB_VF_RING_TBL(port, i),
+				 0);
+		}
+	}
+}
+
+static struct rnpgbe_dma_operations dma_ops_n500 = {
+	.set_tx_maxrate = &rnpgbe_dma_set_tx_maxrate_n500,
+	.set_veb_mac = &rnpgbe_dma_set_veb_mac_n500,
+	.set_veb_vlan = &rnpgbe_dma_set_veb_vlan_n500,
+	.set_veb_vlan_mask = &rnpgbe_dma_set_veb_vlan_mask_n500,
+	.clr_veb_all = &rnpgbe_dma_clr_veb_all_n500,
+
+};
+
+/**
+ *  rnpgbe_eth_set_rar_n500 - Set Rx address register
+ *  @eth: pointer to eth structure
+ *  @index: Receive address register to write
+ *  @addr: Address to put into receive address register
+ *  @vmdq: VMDq "set" or "pool" index
+ *  @enable_addr: set flag that address is active
+ *  @sriov_flag
+ *
+ *  Puts an ethernet address into a receive address register.
+ **/
+static s32 rnpgbe_eth_set_rar_n500(struct rnpgbe_eth_info *eth,
+				   u32 index, u8 *addr,
+				   bool enable_addr)
+{
+	u32 mcstctrl;
+	u32 rar_low, rar_high = 0;
+	u32 rar_entries = eth->num_rar_entries;
+
+	/* Make sure we are using a valid rar index range */
+	if (index >= rar_entries) {
+		rnpgbe_err("RAR index %d is out of range.\n", index);
+		return RNP_ERR_INVALID_ARGUMENT;
+	}
+
+	eth_dbg(eth, "    RAR[%d] <= %pM.  vmdq:%d enable:0x%x\n", index, addr);
+
+	/*
+	 * HW expects these in big endian so we reverse the byte
+	 * order from network order (big endian) to little endian
+	 */
+	rar_low = ((u32)addr[5] | ((u32)addr[4] << 8) | ((u32)addr[3] << 16) |
+		   ((u32)addr[2] << 24));
+	/*
+	 * Some parts put the VMDq setting in the extra RAH bits,
+	 * so save everything except the lower 16 bits that hold part
+	 * of the address and the address valid bit.
+	 */
+	rar_high = eth_rd32(eth, RNP500_ETH_RAR_RH(index));
+	rar_high &= ~(0x0000FFFF | RNP500_RAH_AV);
+	rar_high |= ((u32)addr[1] | ((u32)addr[0] << 8));
+
+	if (enable_addr)
+		rar_high |= RNP500_RAH_AV;
+
+	eth_wr32(eth, RNP500_ETH_RAR_RL(index), rar_low);
+	eth_wr32(eth, RNP500_ETH_RAR_RH(index), rar_high);
+
+	/* open unicast filter */
+	/* we now not use unicast */
+	/* but we must open this since dest-mac filter | unicast table */
+	/* all packets up if close unicast table */
+	mcstctrl = eth_rd32(eth, RNP500_ETH_DMAC_MCSTCTRL);
+	mcstctrl |= RNP500_MCSTCTRL_UNICASE_TBL_EN;
+	eth_wr32(eth, RNP500_ETH_DMAC_MCSTCTRL, mcstctrl);
+
+	return 0;
+}
+
+/**
+ *  rnpgbe_eth_clear_rar_n500 - Remove Rx address register
+ *  @eth: pointer to eth structure
+ *  @index: Receive address register to write
+ *
+ *  Clears an ethernet address from a receive address register.
+ **/
+static s32 rnpgbe_eth_clear_rar_n500(struct rnpgbe_eth_info *eth,
+				     u32 index)
+{
+	u32 rar_high;
+	u32 rar_entries = eth->num_rar_entries;
+
+	/* Make sure we are using a valid rar index range */
+	if (index >= rar_entries) {
+		eth_dbg(eth, "RAR index %d is out of range.\n", index);
+		return RNP_ERR_INVALID_ARGUMENT;
+	}
+
+	/*
+	 * Some parts put the VMDq setting in the extra RAH bits,
+	 * so save everything except the lower 16 bits that hold part
+	 * of the address and the address valid bit.
+	 */
+	rar_high = eth_rd32(eth, RNP500_ETH_RAR_RH(index));
+	rar_high &= ~(0x0000FFFF | RNP500_RAH_AV);
+	eth_wr32(eth, RNP500_ETH_RAR_RL(index), 0);
+	eth_wr32(eth, RNP500_ETH_RAR_RH(index), rar_high);
+
+	/* clear VMDq pool/queue selection for this RAR */
+	eth->ops.clear_vmdq(eth, index, RNP_CLEAR_VMDQ_ALL);
+
+	return 0;
+}
+
+/**
+ *  rnpgbe_eth_set_vmdq_n500 - Associate a VMDq pool index with a rx address
+ *  @eth: pointer to eth struct
+ *  @rar: receive address register index to associate with a VMDq index
+ *  @vmdq: VMDq pool index
+ *  only mac->vf
+ **/
+static s32 rnpgbe_eth_set_vmdq_n500(struct rnpgbe_eth_info *eth,
+				    u32 rar, u32 vmdq)
+{
+	u32 rar_entries = eth->num_rar_entries;
+
+	/* Make sure we are using a valid rar index range */
+	if (rar >= rar_entries) {
+		eth_dbg(eth, "RAR index %d is out of range.\n", rar);
+		return RNP_ERR_INVALID_ARGUMENT;
+	}
+
+	eth_wr32(eth, RNP500_VM_DMAC_MPSAR_RING(rar), vmdq);
+
+	return 0;
+}
+
+/**
+ *  rnpgbe_eth_clear_vmdq_n500 - Disassociate a VMDq pool index from a rx address
+ *  @eth: pointer to eth struct
+ *  @rar: receive address register index to disassociate
+ *  @vmdq: VMDq pool index to remove from the rar
+ **/
+static s32 rnpgbe_eth_clear_vmdq_n500(struct rnpgbe_eth_info *eth,
+				      u32 rar, u32 vmdq)
+{
+	u32 rar_entries = eth->num_rar_entries;
+
+	/* Make sure we are using a valid rar index range */
+	if (rar >= rar_entries) {
+		eth_dbg(eth, "RAR index %d is out of range.\n", rar);
+		return RNP_ERR_INVALID_ARGUMENT;
+	}
+
+	eth_wr32(eth, RNP500_VM_DMAC_MPSAR_RING(rar), 0);
+
+	return 0;
+}
+
+static s32 rnp500_mta_vector(struct rnpgbe_eth_info *eth, u8 *mc_addr)
+{
+	u32 vector = 0;
+
+	switch (eth->mc_filter_type) {
+	case 0: /* use bits [36:47] of the address */
+		vector = ((mc_addr[4] << 8) | (((u16)mc_addr[5])));
+		break;
+	case 1: /* use bits [35:46] of the address */
+		vector = ((mc_addr[4] << 7) | (((u16)mc_addr[5]) >> 1));
+		break;
+	case 2: /* use bits [34:45] of the address */
+		vector = ((mc_addr[4] << 6) | (((u16)mc_addr[5]) >> 2));
+		break;
+	case 3: /* use bits [32:43] of the address */
+		vector = ((mc_addr[4] << 5) | (((u16)mc_addr[5]) >> 3));
+		break;
+	case 4: /* use bits [32:43] of the address */
+		vector = ((mc_addr[0] << 8) | (((u16)mc_addr[1])));
+		vector = (vector >> 4);
+		break;
+	case 5: /* use bits [32:43] of the address */
+		vector = ((mc_addr[0] << 8) | (((u16)mc_addr[1])));
+		vector = (vector >> 3);
+		break;
+	case 6: /* use bits [32:43] of the address */
+		vector = ((mc_addr[0] << 8) | (((u16)mc_addr[1])));
+		vector = (vector >> 2);
+		break;
+	case 7: /* use bits [32:43] of the address */
+		vector = ((mc_addr[0] << 8) | (((u16)mc_addr[1])));
+		break;
+	default: /* Invalid mc_filter_type */
+		hw_dbg(hw, "MC filter type param set incorrectly\n");
+		break;
+	}
+
+	/* vector can only be 12-bits or boundary will be exceeded */
+	vector &= 0xFFF;
+	return vector;
+}
+
+static void rnp500_set_mta(struct rnpgbe_hw *hw, u8 *mc_addr)
+{
+	u32 vector;
+	u32 vector_bit;
+	u32 vector_reg;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	hw->addr_ctrl.mta_in_use++;
+
+	vector = rnp500_mta_vector(eth, mc_addr);
+
+	/*
+	 * The MTA is a register array of 128 32-bit registers. It is treated
+	 * like an array of 4096 bits.  We want to set bit
+	 * BitArray[vector_value]. So we figure out what register the bit is
+	 * in, read it, OR in the new bit, then write back the new value.  The
+	 * register is determined by the upper 7 bits of the vector value and
+	 * the bit within that register are determined by the lower 5 bits of
+	 * the value.
+	 */
+	vector_reg = (vector >> 5) & 0x7F;
+	vector_bit = vector & 0x1F;
+	hw_dbg(hw, "\t\t%pM: MTA-BIT:%4d, MTA_REG[%d][%d] <= 1\n", mc_addr,
+	       vector, vector_reg, vector_bit);
+	eth->mta_shadow[vector_reg] |= (1 << vector_bit);
+}
+
+static void rnp500_set_vf_mta(struct rnpgbe_hw *hw, u16 vector)
+{
+	/* vf/pf use the same multicast table */
+	u32 vector_bit;
+	u32 vector_reg;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	hw->addr_ctrl.mta_in_use++;
+
+	vector_reg = (vector >> 5) & 0x7F;
+	vector_bit = vector & 0x1F;
+	hw_dbg(hw, "\t\t vf M: MTA-BIT:%4d, MTA_REG[%d][%d] <= 1\n", vector,
+	       vector_reg, vector_bit);
+	eth->mta_shadow[vector_reg] |= (1 << vector_bit);
+}
+
+static u8 *rnpgbe_addr_list_itr(struct rnpgbe_hw __maybe_unused *hw,
+				u8 **mc_addr_ptr)
+{
+	struct netdev_hw_addr *mc_ptr;
+	u8 *addr = *mc_addr_ptr;
+
+	mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]);
+	if (mc_ptr->list.next) {
+		struct netdev_hw_addr *ha;
+
+		ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list);
+		*mc_addr_ptr = ha->addr;
+	} else {
+		*mc_addr_ptr = NULL;
+	}
+
+	return addr;
+}
+
+/**
+ *  rnpgbe_update_mc_addr_list_n500 - Updates MAC list of multicast addresses
+ *  @hw: pointer to hardware structure
+ *  @netdev: pointer to net device structure
+ *
+ *  The given list replaces any existing list. Clears the MC addrs from receive
+ *  address registers and the multicast table. Uses unused receive address
+ *  registers for the first multicast addresses, and hashes the rest into the
+ *  multicast table.
+ **/
+static s32 rnpgbe_eth_update_mc_addr_list_n500(struct rnpgbe_eth_info *eth,
+					       struct net_device *netdev,
+					       bool sriov_on)
+{
+	struct rnpgbe_hw *hw = (struct rnpgbe_hw *)eth->back;
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+	struct netdev_hw_addr *ha;
+#endif /* NETDEV_HW_ADDR_T_MULTICAST */
+	u32 i;
+	u32 v;
+	int addr_count = 0;
+	u8 *addr_list = NULL;
+	int ret;
+	u8 ncsi_mc_addr[6];
+
+	/*
+	 * Set the new number of MC addresses that we are being requested to
+	 * use.
+	 */
+	hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
+	hw->addr_ctrl.mta_in_use = 0;
+
+	/* Clear mta_shadow */
+	eth_dbg(eth, " Clearing MTA(multicast table)\n");
+	memset(ð->mta_shadow, 0, sizeof(eth->mta_shadow));
+
+	/* Update mta shadow */
+	eth_dbg(eth, " Updating MTA..\n");
+	addr_count = netdev_mc_count(netdev);
+
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+	ha = list_first_entry(&netdev->mc.list, struct netdev_hw_addr, list);
+	addr_list = ha->addr;
+#else /* NETDEV_HW_ADDR_T_MULTICAST */
+	addr_list = netdev->mc_list->dmi_addr;
+#endif /* NETDEV_HW_ADDR_T_MULTICAST */
+	for (i = 0; i < addr_count; i++) {
+		eth_dbg(eth, " Adding the multicast addresses:\n");
+		rnp500_set_mta(hw, rnpgbe_addr_list_itr(hw, &addr_list));
+	}
+
+	if (sriov_on) {
+		struct rnpgbe_adapter *adapter =
+			(struct rnpgbe_adapter *)hw->back;
+
+		for (i = 0; i < adapter->num_vfs; i++) {
+			if (adapter->vfinfo) {
+				struct vf_data_storage *vfinfo =
+					&adapter->vfinfo[i];
+				int j;
+
+				for (j = 0; j < vfinfo->num_vf_mc_hashes; j++)
+					rnp500_set_vf_mta(
+						hw, vfinfo->vf_mc_hashes[j]);
+			}
+		}
+	}
+	/* update ncsi multicast address */
+	for (i = NCSI_RAR_NUM; i < NCSI_MC_NUM; i++) {
+		ret = hw->ops.get_ncsi_mac(hw, ncsi_mc_addr, i);
+		if (!ret)
+			rnp500_set_mta(hw, ncsi_mc_addr);
+	}
+
+	/* Enable mta */
+	for (i = 0; i < hw->eth.mcft_size; i++) {
+		if (hw->addr_ctrl.mta_in_use) {
+			eth_wr32(eth, RNP500_ETH_MUTICAST_HASH_TABLE(i),
+				 eth->mta_shadow[i]);
+		}
+	}
+
+	if (hw->addr_ctrl.mta_in_use > 0) {
+		v = eth_rd32(eth, RNP500_ETH_DMAC_MCSTCTRL);
+		eth_wr32(eth, RNP500_ETH_DMAC_MCSTCTRL,
+			 v | RNP500_MCSTCTRL_MULTICASE_TBL_EN |
+				 eth->mc_filter_type);
+	}
+
+	eth_dbg(eth, " update MTA Done. mta_in_use:%d\n",
+		hw->addr_ctrl.mta_in_use);
+	return hw->addr_ctrl.mta_in_use;
+}
+
+/* clean all mc addr */
+static void rnpgbe_eth_clr_mc_addr_n500(struct rnpgbe_eth_info *eth)
+{
+	int i;
+
+	for (i = 0; i < eth->mcft_size; i++)
+		eth_wr32(eth, RNP500_ETH_MUTICAST_HASH_TABLE(i), 0);
+}
+
+/**
+ *  rnpgbe_eth_set_rss_hfunc_n500 - Remove Rx address register
+ *  @eth: pointer to eth structure
+ *  @hfunc type
+ *
+ *  update rss key to eth regs
+ **/
+static int rnpgbe_eth_set_rss_hfunc_n500(struct rnpgbe_eth_info *eth, int hfunc)
+{
+	u32 data;
+
+	data = eth_rd32(eth, RNP500_ETH_RSS_CONTROL);
+	/* clean mode only bit[14:15] */
+	data &= ~(BIT(14) | BIT(15));
+
+	if (hfunc == rss_func_top) {
+		/* do nothing */
+
+	} else if (hfunc == rss_func_xor)
+		data |= BIT(14);
+	else if (hfunc == rss_func_order)
+		data |= BIT(15);
+	else
+		return -EINVAL;
+
+	/* update to hardware */
+	eth_wr32(eth, RNP500_ETH_RSS_CONTROL, data);
+
+	return 0;
+}
+
+/**
+ *  rnpgbe_eth_update_rss_key_n500 - Remove Rx address register
+ *  @eth: pointer to eth structure
+ *  @sriov_flag sriov status
+ *
+ *  update rss key to eth regs
+ **/
+
+static void rnpgbe_eth_update_rss_key_n500(struct rnpgbe_eth_info *eth,
+					   bool sriov_flag)
+{
+	struct rnpgbe_hw *hw = (struct rnpgbe_hw *)eth->back;
+	int i;
+	u8 *key_temp;
+	int key_len = RNP_RSS_KEY_SIZE;
+	u8 *key = hw->rss_key;
+	u32 data;
+	u32 iov_en = (sriov_flag) ? RNP500_IOV_ENABLED : 0;
+	u32 *value;
+
+	data = eth_rd32(eth, RNP500_ETH_RSS_CONTROL);
+
+	key_temp = kmalloc(key_len, GFP_KERNEL);
+	/* reoder the key */
+	for (i = 0; i < key_len; i++)
+		*(key_temp + key_len - i - 1) = *(key + i);
+
+	value = (u32 *)key_temp;
+
+	for (i = 0; i < key_len; i = i + 4)
+		eth_wr32(eth, RNP500_ETH_RSS_KEY + i, *(value + i / 4));
+	kfree(key_temp);
+
+	data |= (RNP500_ETH_ENABLE_RSS_ONLY | iov_en);
+	eth_wr32(eth, RNP500_ETH_RSS_CONTROL, data);
+}
+
+/**
+ *  rnpgbe_eth_update_rss_table_n500 - Remove Rx address register
+ *  @eth: pointer to eth structure
+ *
+ *  update rss table to eth regs
+ **/
+static void rnpgbe_eth_update_rss_table_n500(struct rnpgbe_eth_info *eth)
+{
+	struct rnpgbe_hw *hw = (struct rnpgbe_hw *)eth->back;
+	u32 reta_entries = hw->rss_indir_tbl_num;
+	u32 tc_entries = hw->rss_tc_tbl_num;
+	int i;
+
+	for (i = 0; i < tc_entries; i++)
+		eth_wr32(eth, RNP500_ETH_TC_IPH_OFFSET_TABLE(i),
+			 hw->rss_tc_tbl[i]);
+
+	for (i = 0; i < reta_entries; i++)
+		eth_wr32(eth, RNP500_ETH_RSS_INDIR_TBL(i),
+			 hw->rss_indir_tbl[i]);
+
+	/* if we update rss table,
+	 * we should update deault ring same with rss[0]
+	 **/
+	eth_wr32(eth, RNP500_ETH_DEFAULT_RX_RING, hw->rss_indir_tbl[0]);
+}
+
+/**
+ *  rnpgbe_eth_set_vfta_n500 - Set VLAN filter table
+ *  @eth: pointer to eth structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ *
+ *  Turn on/off specified VLAN in the VLAN filter table.
+ **/
+static s32 rnpgbe_eth_set_vfta_n500(struct rnpgbe_eth_info *eth, u32 vlan,
+				    bool vlan_on)
+{
+	s32 regindex;
+	u32 bitindex;
+	u32 vfta;
+	u32 targetbit;
+	bool vfta_changed = false;
+
+	/* todo in vf mode vlvf regester can be set according to vind*/
+	if (vlan > 4095)
+		return RNP_ERR_PARAM;
+
+	regindex = (vlan >> 5) & 0x7F;
+	bitindex = vlan & 0x1F;
+	targetbit = (1 << bitindex);
+	vfta = eth_rd32(eth, RNP500_VFTA(regindex));
+
+	if (vlan_on) {
+		if (!(vfta & targetbit)) {
+			vfta |= targetbit;
+			vfta_changed = true;
+		}
+	} else {
+		if ((vfta & targetbit)) {
+			vfta &= ~targetbit;
+			vfta_changed = true;
+		}
+	}
+
+	if (vfta_changed)
+		eth_wr32(eth, RNP500_VFTA(regindex), vfta);
+
+	return 0;
+}
+
+static void rnpgbe_eth_clr_vfta_n500(struct rnpgbe_eth_info *eth)
+{
+	u32 offset;
+
+	for (offset = 0; offset < eth->vft_size; offset++)
+		eth_wr32(eth, RNP500_VFTA(offset), 0);
+}
+
+static void rnpgbe_eth_set_doulbe_vlan_n500(struct rnpgbe_eth_info *eth,
+					    bool on)
+{
+	if (on)
+		eth_wr32(eth, RNP500_ETH_VLAN_RM_TYPE, 1);
+	else
+		eth_wr32(eth, RNP500_ETH_VLAN_RM_TYPE, 0);
+}
+
+static void rnpgbe_eth_set_outer_vlan_type_n500(struct rnpgbe_eth_info *eth,
+						int type)
+{
+	u32 data = 0x88a8;
+
+	switch (type) {
+	case outer_vlan_type_88a8:
+		data = 0x88a8;
+		break;
+#ifdef ETH_P_QINQ1
+	case outer_vlan_type_9100:
+		data = 0x9100;
+		break;
+#endif /* ETH_P_QINQ1 */
+#ifdef ETH_P_QINQ2
+	case outer_vlan_type_9200:
+		data = 0x9200;
+		break;
+#endif /* ETH_P_QINQ2 */
+	}
+	eth_wr32(eth, RNP500_ETH_WRAP_FIELD_TYPE, data);
+	eth_wr32(eth, RNP500_ETH_TX_VLAN_TYPE, data);
+}
+
+/**
+ *  rnpgbe_eth_set_vlan_filter_n500 - Set VLAN filter table
+ *  @eth: pointer to eth structure
+ *  @status: on |off
+ *  Turn on/off VLAN filter table.
+ **/
+static void rnpgbe_eth_set_vlan_filter_n500(struct rnpgbe_eth_info *eth,
+					    bool status)
+{
+#define ETH_VLAN_FILTER_BIT (30)
+	u32 value = eth_rd32(eth, RNP500_ETH_VLAN_FILTER_ENABLE);
+
+	/* clear bit first */
+	value &= (~(0x01 << ETH_VLAN_FILTER_BIT));
+	if (status)
+		value |= (0x01 << ETH_VLAN_FILTER_BIT);
+	eth_wr32(eth, RNP500_ETH_VLAN_FILTER_ENABLE, value);
+}
+
+static u16 rnpgbe_layer2_pritologic_n500(u16 hw_id)
+{
+	return hw_id;
+}
+
+static void rnpgbe_eth_set_layer2_n500(struct rnpgbe_eth_info *eth,
+				       union rnpgbe_atr_input *input,
+				       u16 pri_id,
+				       u8 queue, bool prio_flag)
+{
+	u16 hw_id;
+
+	hw_id = rnpgbe_layer2_pritologic_n500(pri_id);
+	/* enable layer2 */
+	eth_wr32(eth, RNP500_ETH_LAYER2_ETQF(hw_id),
+		 (0x1 << 31) | (ntohs(input->layer2_formate.proto)));
+
+	/* setup action */
+	if (queue == RNP_FDIR_DROP_QUEUE) {
+		eth_wr32(eth, RNP500_ETH_LAYER2_ETQS(hw_id), (0x1 << 31));
+	} else {
+		/* setup ring_number */
+		if (prio_flag)
+			eth_wr32(eth, RNP500_ETH_LAYER2_ETQS(hw_id),
+				 (0x1 << 30) | (queue << 20) | (0x1 << 28));
+		else
+			eth_wr32(eth, RNP500_ETH_LAYER2_ETQS(hw_id),
+				 (0x1 << 30) | (queue << 20));
+	}
+}
+
+static void rnpgbe_eth_clr_layer2_n500(struct rnpgbe_eth_info *eth, u16 pri_id)
+{
+	u16 hw_id;
+
+	hw_id = rnpgbe_layer2_pritologic_n500(pri_id);
+	eth_wr32(eth, RNP500_ETH_LAYER2_ETQF(hw_id), 0);
+}
+
+static void rnpgbe_eth_clr_all_layer2_n500(struct rnpgbe_eth_info *eth)
+{
+	int i;
+#define RNP500_MAX_LAYER2_FILTERS 16
+	for (i = 0; i < RNP500_MAX_LAYER2_FILTERS; i++)
+		eth_wr32(eth, RNP500_ETH_LAYER2_ETQF(i), 0);
+}
+
+static u16 rnpgbe_tuple5_pritologic_n500(u16 hw_id)
+{
+	return hw_id;
+}
+
+static void rnpgbe_eth_set_tuple5_n500(struct rnpgbe_eth_info *eth,
+				       union rnpgbe_atr_input *input,
+				       u16 pri_id,
+				       u8 queue, bool prio_flag)
+{
+#define RNP500_SRC_IP_MASK BIT(0)
+#define RNP500_DST_IP_MASK BIT(1)
+#define RNP500_SRC_PORT_MASK BIT(2)
+#define RNP500_DST_PORT_MASK BIT(3)
+#define RNP500_L4_PROTO_MASK BIT(4)
+	u32 port = 0;
+	u8 mask_temp = 0;
+	u8 l4_proto_type = 0;
+	u16 hw_id;
+
+	hw_id = rnpgbe_tuple5_pritologic_n500(pri_id);
+	dbg("try to eable tuple 5 %x\n", hw_id);
+	if (input->formatted.src_ip[0] != 0) {
+		eth_wr32(eth, RNP500_ETH_TUPLE5_SAQF(hw_id),
+			 htonl(input->formatted.src_ip[0]));
+	} else {
+		mask_temp |= RNP500_SRC_IP_MASK;
+	}
+	if (input->formatted.dst_ip[0] != 0) {
+		eth_wr32(eth, RNP500_ETH_TUPLE5_DAQF(hw_id),
+			 htonl(input->formatted.dst_ip[0]));
+	} else
+		mask_temp |= RNP500_DST_IP_MASK;
+	if (input->formatted.src_port != 0)
+		port |= (htons(input->formatted.src_port));
+	else
+		mask_temp |= RNP500_SRC_PORT_MASK;
+	if (input->formatted.dst_port != 0)
+		port |= (htons(input->formatted.dst_port) << 16);
+	else
+		mask_temp |= RNP500_DST_PORT_MASK;
+
+	if (port != 0)
+		eth_wr32(eth, RNP500_ETH_TUPLE5_SDPQF(hw_id), port);
+
+	switch (input->formatted.flow_type) {
+	case RNP_ATR_FLOW_TYPE_TCPV4:
+		l4_proto_type = IPPROTO_TCP;
+		break;
+	case RNP_ATR_FLOW_TYPE_UDPV4:
+		l4_proto_type = IPPROTO_UDP;
+		break;
+	case RNP_ATR_FLOW_TYPE_SCTPV4:
+		l4_proto_type = IPPROTO_SCTP;
+		break;
+	case RNP_ATR_FLOW_TYPE_IPV4:
+		l4_proto_type = input->formatted.inner_mac[0];
+		break;
+	default:
+		l4_proto_type = 0;
+	}
+
+	if (l4_proto_type == 0)
+		mask_temp |= RNP500_L4_PROTO_MASK;
+
+	/* setup ftqf*/
+	/* always set 0x3 */
+	eth_wr32(eth, RNP500_ETH_TUPLE5_FTQF(hw_id),
+		 (1 << 31) | (mask_temp << 25) | (l4_proto_type << 16) | 0x3);
+
+	/* setup action */
+	if (queue == RNP_FDIR_DROP_QUEUE) {
+		eth_wr32(eth, RNP500_ETH_TUPLE5_POLICY(hw_id), (0x1 << 31));
+	} else {
+		/* setup ring_number */
+		if (prio_flag)
+			eth_wr32(eth, RNP500_ETH_TUPLE5_POLICY(hw_id),
+				 ((0x1 << 30) | (queue << 20) | (0x1 << 28)));
+		else
+			eth_wr32(eth, RNP500_ETH_TUPLE5_POLICY(hw_id),
+				 ((0x1 << 30) | (queue << 20)));
+	}
+}
+
+static void rnpgbe_eth_clr_tuple5_n500(struct rnpgbe_eth_info *eth, u16 pri_id)
+{
+	u16 hw_id;
+
+	hw_id = rnpgbe_tuple5_pritologic_n500(pri_id);
+	eth_wr32(eth, RNP500_ETH_TUPLE5_FTQF(hw_id), 0);
+}
+
+static void rnpgbe_eth_clr_all_tuple5_n500(struct rnpgbe_eth_info *eth)
+{
+	int i;
+
+	for (i = 0; i < RNP500_MAX_TUPLE5_FILTERS; i++)
+		eth_wr32(eth, RNP500_ETH_TUPLE5_FTQF(i), 0);
+}
+
+static void rnpgbe_eth_set_tcp_sync_n500(struct rnpgbe_eth_info *eth,
+					 int queue,
+					 bool flag, bool prio)
+{
+	if (flag) {
+		eth_wr32(eth, RNP500_ETH_SYNQF, (0x1 << 30) | (queue << 20));
+		if (prio)
+			eth_wr32(eth, RNP500_ETH_SYNQF_PRIORITY,
+				 (0x1 << 31) | 0x1);
+		else
+			eth_wr32(eth, RNP500_ETH_SYNQF_PRIORITY, (0x1 << 31));
+		// todo add tcp-sync setup
+
+	} else {
+		eth_wr32(eth, RNP500_ETH_SYNQF, 0);
+		eth_wr32(eth, RNP500_ETH_SYNQF_PRIORITY, 0);
+	}
+}
+
+static void rnpgbe_eth_set_rx_skip_n500(struct rnpgbe_eth_info *eth,
+					int count,
+					bool flag)
+{
+	if (flag) {
+		eth_wr32(eth, RNP500_ETH_PRIV_DATA_CONTROL_REG,
+			 PRIV_DATA_EN | count);
+	} else {
+		eth_wr32(eth, RNP500_ETH_PRIV_DATA_CONTROL_REG, 0);
+	}
+}
+
+static void rnpgbe_eth_set_min_max_packets_n500(struct rnpgbe_eth_info *eth,
+						int min, int max)
+{
+	eth_wr32(eth, RNP500_ETH_DEFAULT_RX_MIN_LEN, min);
+	eth_wr32(eth, RNP500_ETH_DEFAULT_RX_MAX_LEN, max);
+}
+
+static void rnpgbe_eth_set_vlan_strip_n500(struct rnpgbe_eth_info *eth,
+					   u16 queue, bool enable)
+{
+	u32 reg = RNP500_ETH_VLAN_VME_REG(queue / 32);
+	u32 offset = queue % 32;
+	u32 data = eth_rd32(eth, reg);
+
+	if (enable == true)
+		data |= (1 << offset);
+	else
+		data &= ~(1 << offset);
+
+	eth_wr32(eth, reg, data);
+}
+
+static void rnpgbe_eth_set_vxlan_port_n500(struct rnpgbe_eth_info *eth,
+					   u32 port)
+{
+}
+
+static void rnpgbe_eth_set_vxlan_mode_n500(struct rnpgbe_eth_info *eth,
+					   bool inner)
+{
+}
+
+static void rnpgbe_eth_set_rx_hash_n500(struct rnpgbe_eth_info *eth,
+					bool status, bool sriov_flag)
+{
+	u32 iov_en = (sriov_flag) ? RNP500_IOV_ENABLED : 0;
+	u32 data;
+
+	data = eth_rd32(eth, RNP500_ETH_RSS_CONTROL);
+	data &= ~RNP500_ETH_RSS_MASK;
+
+	if (status) {
+		data |= RNP500_ETH_ENABLE_RSS_ONLY;
+		eth_wr32(eth, RNP500_ETH_RSS_CONTROL, data | iov_en);
+	} else {
+		eth_wr32(eth, RNP500_ETH_RSS_CONTROL, data | iov_en);
+	}
+}
+
+static void rnpgbe_eth_set_rx_n500(struct rnpgbe_eth_info *eth, bool status)
+{
+	if (status) {
+		eth_wr32(eth, RNP500_ETH_EXCEPT_DROP_PROC, 0);
+		eth_wr32(eth, RNP500_ETH_TX_MUX_DROP, 0);
+	} else {
+		eth_wr32(eth, RNP500_ETH_EXCEPT_DROP_PROC, 1);
+		eth_wr32(eth, RNP500_ETH_TX_MUX_DROP, 1);
+	}
+}
+
+static void rnpgbe_eth_fcs_n500(struct rnpgbe_eth_info *eth, bool status)
+{
+	if (status)
+		eth_wr32(eth, RNP500_ETH_FCS_EN, 1);
+	else
+		eth_wr32(eth, RNP500_ETH_FCS_EN, 0);
+}
+
+static void rnpgbe_eth_set_vf_vlan_mode_n500(struct rnpgbe_eth_info *eth,
+					     u16 vlan, int vf, bool enable)
+{
+	u32 value = vlan;
+
+	if (enable)
+		value |= BIT(31);
+
+	eth_wr32(eth, RNP500_VLVF(vf), value);
+	/* n500 1 vf only can setup 1 vlan */
+	eth_wr32(eth, RNP500_VLVF_TABLE(vf), vf);
+}
+
+static s32 rnpgbe_eth_set_fc_mode_n500(struct rnpgbe_eth_info *eth)
+{
+	struct rnpgbe_hw *hw = (struct rnpgbe_hw *)eth->back;
+	s32 ret_val = 0;
+	int i;
+	/* n500 has only 1 traffic class */
+	for (i = 0; i < 1; i++) {
+		if ((hw->fc.current_mode & rnpgbe_fc_tx_pause) &&
+		    hw->fc.high_water[i]) {
+			if (!hw->fc.low_water[i] ||
+			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+				hw_dbg(hw,
+				       "Invalid water mark configuration\n");
+				ret_val = RNP_ERR_INVALID_LINK_SETTINGS;
+				goto out;
+			}
+		}
+	}
+
+	for (i = 0; i < 1; i++) {
+		if ((hw->fc.current_mode & rnpgbe_fc_tx_pause)) {
+			if (hw->fc.high_water[i]) {
+				eth_wr32(eth, RNP500_ETH_HIGH_WATER(i),
+					 hw->fc.high_water[i]);
+			}
+			if (hw->fc.low_water[i]) {
+				eth_wr32(eth, RNP500_ETH_LOW_WATER(i),
+					 hw->fc.low_water[i]);
+			}
+		}
+	}
+out:
+	return ret_val;
+}
+
+static struct rnpgbe_eth_operations eth_ops_n500 = {
+	.set_rar = &rnpgbe_eth_set_rar_n500,
+	.clear_rar = &rnpgbe_eth_clear_rar_n500,
+	.set_vmdq = &rnpgbe_eth_set_vmdq_n500,
+	.clear_vmdq = &rnpgbe_eth_clear_vmdq_n500,
+	.update_mc_addr_list = &rnpgbe_eth_update_mc_addr_list_n500,
+	.clr_mc_addr = &rnpgbe_eth_clr_mc_addr_n500,
+	/* store rss info to eth */
+	.set_rss_hfunc = &rnpgbe_eth_set_rss_hfunc_n500,
+	.set_rss_key = &rnpgbe_eth_update_rss_key_n500,
+	.set_rss_table = &rnpgbe_eth_update_rss_table_n500,
+	.set_vfta = &rnpgbe_eth_set_vfta_n500,
+	.clr_vfta = &rnpgbe_eth_clr_vfta_n500,
+	.set_vlan_filter = &rnpgbe_eth_set_vlan_filter_n500,
+	.set_outer_vlan_type = &rnpgbe_eth_set_outer_vlan_type_n500,
+	.set_double_vlan = &rnpgbe_eth_set_doulbe_vlan_n500,
+	.set_layer2_remapping = &rnpgbe_eth_set_layer2_n500,
+	.clr_layer2_remapping = &rnpgbe_eth_clr_layer2_n500,
+	.clr_all_layer2_remapping = &rnpgbe_eth_clr_all_layer2_n500,
+	.set_tuple5_remapping = &rnpgbe_eth_set_tuple5_n500,
+	.clr_tuple5_remapping = &rnpgbe_eth_clr_tuple5_n500,
+	.clr_all_tuple5_remapping = &rnpgbe_eth_clr_all_tuple5_n500,
+	.set_tcp_sync_remapping = &rnpgbe_eth_set_tcp_sync_n500,
+	.set_rx_skip = &rnpgbe_eth_set_rx_skip_n500,
+	.set_min_max_packet = &rnpgbe_eth_set_min_max_packets_n500,
+	.set_vlan_strip = &rnpgbe_eth_set_vlan_strip_n500,
+	.set_vxlan_port = &rnpgbe_eth_set_vxlan_port_n500,
+	.set_vxlan_mode = &rnpgbe_eth_set_vxlan_mode_n500,
+	.set_rx_hash = &rnpgbe_eth_set_rx_hash_n500,
+	.set_fc_mode = &rnpgbe_eth_set_fc_mode_n500,
+	.set_rx = &rnpgbe_eth_set_rx_n500,
+	.set_fcs = &rnpgbe_eth_fcs_n500,
+	.set_vf_vlan_mode = &rnpgbe_eth_set_vf_vlan_mode_n500,
+};
+
+/**
+ *  rnpgbe_init_hw_n500 - Generic hardware initialization
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the hardware by resetting the hardware, filling the bus info
+ *  structure and media type, clears all on chip counters, initializes receive
+ *  address registers, multicast table, VLAN filter table, calls routine to set
+ *  up link and flow control settings, and leaves transmit and receive units
+ *  disabled and uninitialized
+ **/
+static s32 rnpgbe_init_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	s32 status = 0;
+
+	/* Reset the hardware */
+	status = hw->ops.reset_hw(hw);
+
+	if (status == 0) {
+		/* Start the HW */
+		status = hw->ops.start_hw(hw);
+	}
+
+	return status;
+}
+
+static s32 rnpgbe_get_permtion_mac_addr_n500(struct rnpgbe_hw *hw,
+					     u8 *mac_addr)
+{
+#ifdef NO_CM3_MBX
+	u32 v;
+	struct rnpgbe_nic_info *nic = &hw->nic;
+
+	v = nic_rd32(nic, RNP500_TOP_MAC_OUI);
+	mac_addr[0] = (u8)(v >> 16);
+	mac_addr[1] = (u8)(v >> 8);
+	mac_addr[2] = (u8)(v >> 0);
+
+	v = nic_rd32(nic, RNP500_TOP_MAC_SN);
+	mac_addr[3] = (u8)(v >> 16);
+	mac_addr[4] = (u8)(v >> 8);
+	mac_addr[5] = (u8)(v >> 0);
+#else /* NO_CM3_MBX */
+	if (rnpgbe_fw_get_macaddr(hw, hw->pfvfnum, mac_addr, hw->nr_lane)) {
+		printk(KERN_DEBUG "generate ramdom macaddress...\n");
+		eth_random_addr(mac_addr);
+	} else {
+		printk(KERN_DEBUG "get mac addr %x:%x:%x:%x:%x:%x\n",
+		       mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
+		       mac_addr[4], mac_addr[5]);
+		if (!is_valid_ether_addr(mac_addr))
+			eth_random_addr(mac_addr);
+	}
+#endif /* NO_CM3_MBX */
+	hw->mac.mac_flags |= RNP_FLAGS_INIT_MAC_ADDRESS;
+	dbg("%s mac:%pM\n", __func__, mac_addr);
+
+	return 0;
+}
+
+static s32 rnpgbe_reset_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	int i;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+#ifdef NO_CM3_MBX
+	struct rnpgbe_nic_info *nic = &hw->nic;
+#endif
+
+	/* Call adapter stop to disable tx/rx and clear interrupts */
+	dma_wr32(dma, RNP_DMA_AXI_EN, 0);
+
+#ifndef NO_CM3_MBX
+	rnpgbe_mbx_fw_reset_phy(hw);
+#else
+#define N500_NIC_RESET 0
+
+	wr32(hw, RNP500_PHY_RELEASE, 0x20002);
+	wr32(hw, RNP500_TP_SFP, 0xffff0060);
+	nic_wr32(nic, RNP500_TOP_NIC_REST_N, N500_NIC_RESET);
+	/*
+	 * we need this
+	 */
+	wmb();
+	nic_wr32(nic, RNP500_TOP_NIC_REST_N, ~N500_NIC_RESET);
+
+#endif
+	/* tcam not reset */
+	eth->ops.clr_all_tuple5_remapping(eth);
+	/* Store the permanent mac address */
+	if (!(hw->mac.mac_flags & RNP_FLAGS_INIT_MAC_ADDRESS)) {
+		rnpgbe_get_permtion_mac_addr_n500(hw, hw->mac.perm_addr);
+		memcpy(hw->mac.addr, hw->mac.perm_addr, ETH_ALEN);
+	}
+
+	hw->ops.init_rx_addrs(hw);
+	/* n500 should do this ? */
+	eth_wr32(eth, RNP500_ETH_ERR_MASK_VECTOR,
+		 RNP_N500_PKT_LEN_ERR | RNP_N500_HDR_LEN_ERR);
+	wr32(hw, RNP_DMA_RX_DATA_PROG_FULL_THRESH, 0xa);
+
+	for (i = 0; i < 12; i++)
+		rnpgbe_wr_reg(hw->ring_msix_base + RING_VECTOR(i), 0);
+	{
+		u32 value = 0;
+
+		value |= RNP_MODE_NO_SA_INSER << RNP_SARC_OFFSET;
+		value &= (~RNP_TWOKPE_MASK);
+		value &= (~RNP_SFTERR_MASK);
+		value |= (RNP_CST_MASK);
+		value |= RNP_TC_MASK;
+		value &= (~RNP_WD_MASK);
+		value &= (~RNP_JD_MASK);
+		value &= (~RNP_BE_MASK);
+		value |= (RNP_JE_MASK);
+		value |= (RNP_IFG_96 << RNP_IFG_OFFSET);
+		value &= (~RNP_DCRS_MASK);
+		value &= (~RNP_PS_MASK);
+		value &= (~RNP_FES_MASK);
+		value &= (~RNP_DO_MASK);
+		value &= (~RNP_LM_MASK);
+		value |= RNP_DM_MASK;
+		value |= RNP_IPC_MASK; /* open rx checksum */
+		value &= (~RNP_DR_MASK);
+		value &= (~RNP_LUD_MASK);
+		value |= (RNP_BL_MODE << RNP_BL_OFFSET);
+		value &= (~RNP_DC_MASK);
+		value |= RNP_TE_MASK;
+		value |= (RNP_PRELEN_MODE);
+	}
+
+#ifndef NO_CM3_MBX
+	if (hw->ncsi_en)
+		rnpgbe_mbx_phy_pause_get(hw, &hw->fc.requested_mode);
+	else
+		rnpgbe_mbx_phy_pause_set(hw, hw->fc.requested_mode);
+
+	rnpgbe_mbx_get_lane_stat(hw);
+#else /* NO_CM3_MBX */
+	hw->fc.current_mode = PAUSE_AUTO;
+#endif
+	hw->link = 0;
+
+	return 0;
+}
+
+static s32 rnpgbe_start_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	s32 ret_val = 0;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+
+	eth_wr32(eth, RNP500_ETH_ERR_MASK_VECTOR,
+		 RNP_N500_PKT_LEN_ERR | RNP_N500_HDR_LEN_ERR);
+	eth_wr32(eth, RNP500_ETH_BYPASS, 0);
+	eth_wr32(eth, RNP500_ETH_DEFAULT_RX_RING, 0);
+	dma_wr32(dma, RNP_DMA_CONFIG, DMA_VEB_BYPASS);
+	dma_wr32(dma, RNP_DMA_AXI_EN, (RX_AXI_RW_EN | TX_AXI_RW_EN));
+
+	{
+		int value = dma_rd32(dma, RNP_DMA_DUMY);
+
+		value |= RC_CONTROL_HW;
+		dma_wr32(dma, RNP_DMA_DUMY, value);
+	}
+	return ret_val;
+}
+
+/* set n500 min/max packet according to new_mtu
+ * we support mtu + 14 + 4 * 3 as max packet LENGTH_ERROR
+ */
+static void rnpgbe_set_mtu_hw_ops_n500(struct rnpgbe_hw *hw, int new_mtu)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+
+	int min;
+	int max = new_mtu + ETH_HLEN + ETH_FCS_LEN * 2;
+#define ULTRA_SHORT 33
+#define DEFAULT_SHORT 60
+	if ((adapter->priv_flags & RNP_PRIV_FLAG_ULTRA_SHORT) ||
+	    (adapter->priv_flags & RNP_PRIV_FLAG_RX_ALL))
+		min = ULTRA_SHORT;
+	else
+		min = DEFAULT_SHORT;
+
+	/* we receive jumbo fram only in jumbo enable or rx all mode */
+	if ((adapter->priv_flags & RNP_PRIV_FLAG_JUMBO) ||
+	    (adapter->priv_flags & RNP_PRIV_FLAG_RX_ALL))
+		max = hw->max_length;
+
+	hw->min_length_current = min;
+	hw->max_length_current = max;
+	eth->ops.set_min_max_packet(eth, min, max);
+}
+
+/* setup n500 vlan filter status */
+static void rnpgbe_set_vlan_filter_en_hw_ops_n500(struct rnpgbe_hw *hw,
+						  bool status)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_vlan_filter(eth, status);
+}
+
+/* set vlan to n500 vlan filter table & veb */
+/* pf setup call */
+static void rnpgbe_set_vlan_filter_hw_ops_n500(struct rnpgbe_hw *hw, u16 vid,
+					       bool enable, bool sriov_flag)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	u16 ncsi_vid;
+	int i;
+	int ret;
+
+	//todo set up own veb , use the last vfnum
+	u32 vfnum = hw->max_vfs - 1;
+	/* setup n500 eth vlan table */
+	eth->ops.set_vfta(eth, vid, enable);
+
+	/* setup veb */
+	if (sriov_flag) {
+		if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN) {
+			/* we update veb int other location */
+		} else {
+			if (enable)
+				dma->ops.set_veb_vlan(dma, vid, vfnum);
+			else
+				dma->ops.set_veb_vlan(dma, 0, vfnum);
+		}
+	}
+	/* always setup nsci vid */
+	for (i = 0; i < 2; i++) {
+		ret = hw->ops.get_ncsi_vlan(hw, &ncsi_vid, i);
+		if (!ret) {
+			eth->ops.set_vfta(eth, ncsi_vid, 1);
+			printk(KERN_DEBUG "update ncsi vid %d\n", ncsi_vid);
+		}
+	}
+}
+
+static int rnpgbe_set_veb_vlan_mask_hw_ops_n500(struct rnpgbe_hw *hw, u16 vid,
+						int vf, bool enable)
+{
+	struct list_head *pos;
+	struct vf_vebvlans *entry;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	bool find = false;
+	int err = 0;
+	/* 1 try to find is this vid is in vlan mask table */
+	list_for_each(pos, &hw->vf_vas.l) {
+		entry = list_entry(pos, struct vf_vebvlans, l);
+		if (entry->vid == vid) {
+			find = true;
+			break;
+		}
+	}
+	if (find) {
+		/* this vid is used before */
+		if (enable) {
+			entry->mask |= (1 << vf);
+		} else {
+			entry->mask &= (~(1 << vf));
+			/* if mask is zero free this */
+			if (!entry) {
+				entry->vid = -1;
+				entry->free = true;
+			}
+		}
+	} else {
+		/* 2 try to get new entries */
+		list_for_each(pos, &hw->vf_vas.l) {
+			entry = list_entry(pos, struct vf_vebvlans, l);
+			if (entry->free == true) {
+				find = true;
+				break;
+			}
+		}
+		if (find) {
+			entry->free = false;
+			entry->vid = vid;
+			entry->mask |= (1 << vf);
+		} else {
+			err = -1;
+			goto err_out;
+		}
+	}
+	/* 3 update new vlan mask to hw */
+	dma->ops.set_veb_vlan_mask(dma, entry->vid, entry->mask,
+				   entry->veb_entry);
+err_out:
+	return err;
+}
+
+static void rnpgbe_set_vf_vlan_filter_hw_ops_n500(struct rnpgbe_hw *hw, u16 vid,
+						  int vf, bool enable,
+						  bool veb_only)
+{
+	struct rnpgbe_dma_info *dma = &hw->dma;
+
+	if (!veb_only) {
+		/* call set vfta without veb setup */
+		hw->ops.set_vlan_filter(hw, vid, enable, false);
+
+	} else {
+		if (enable)
+			dma->ops.set_veb_vlan(dma, vid, vf);
+		else
+			dma->ops.set_veb_vlan(dma, 0, vf);
+	}
+}
+
+static void rnpgbe_clr_vlan_veb_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	u32 vfnum = hw->vfnum;
+
+	dma->ops.set_veb_vlan(dma, 0, vfnum);
+}
+
+/* setup n500 vlan strip status */
+static void rnpgbe_set_vlan_strip_hw_ops_n500(struct rnpgbe_hw *hw, u16 queue,
+					      bool strip)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_vlan_strip(eth, queue, strip);
+}
+
+/* update new n500 mac */
+static void rnpgbe_set_mac_hw_ops_n500(struct rnpgbe_hw *hw, u8 *mac,
+				       bool sriov_flag)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	struct rnpgbe_mac_info *mac_info = &hw->mac;
+	/* use this queue index to setup veb */
+	/* now pf use queu 0 /1
+	 * vfnum is the last vfnum
+	 */
+	int queue = hw->veb_ring;
+	int vfnum = hw->vfnum;
+
+	eth->ops.set_rar(eth, 0, mac, true);
+	if (sriov_flag) {
+		eth->ops.set_vmdq(eth, 0, queue / hw->sriov_ring_limit);
+		dma->ops.set_veb_mac(dma, mac, vfnum, queue);
+	}
+
+	mac_info->ops.set_mac(mac_info, mac, 0);
+}
+
+/**
+ * rnpgbe_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ *                0 on no addresses written
+ *                X on writing X addresses to the RAR table
+ **/
+static int rnpgbe_write_uc_addr_list_n500(struct rnpgbe_hw *hw,
+					  struct net_device *netdev,
+					  bool sriov_flag)
+{
+	unsigned int rar_entries = hw->num_rar_entries - 1;
+	u32 vfnum = hw->vfnum;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	int count = 0;
+	int i = 0;
+	u8 ncsi_addr[6];
+	int ret;
+
+	/* In SR-IOV mode significantly less RAR entries are available */
+	if (sriov_flag)
+		rar_entries = hw->max_pf_macvlans - 1;
+
+	/* return ENOMEM indicating insufficient memory for addresses */
+	if (netdev_uc_count(netdev) > rar_entries)
+		return -ENOMEM;
+
+	if (!netdev_uc_empty(netdev)) {
+		struct netdev_hw_addr *ha;
+
+		hw_dbg(hw, "%s: rar_entries:%d, uc_count:%d\n", __func__,
+		       hw->num_rar_entries, netdev_uc_count(netdev));
+
+		/* return error if we do not support writing to RAR table */
+		if (!eth->ops.set_rar)
+			return -ENOMEM;
+
+		netdev_for_each_uc_addr(ha, netdev) {
+			if (!rar_entries)
+				break;
+			eth->ops.set_rar(eth, rar_entries, ha->addr,
+					 RNP500_RAH_AV);
+			if (sriov_flag)
+				eth->ops.set_vmdq(eth, rar_entries, vfnum);
+
+			rar_entries--;
+
+			count++;
+		}
+	}
+	for (i = 0; i < NCSI_RAR_NUM; i++) {
+		ret = hw->ops.get_ncsi_mac(hw, ncsi_addr, i);
+		if (!ret) {
+			eth->ops.set_rar(eth, NCSI_RAR_IDX_START + i, ncsi_addr,
+					 RNP500_RAH_AV);
+		}
+	}
+
+	/* write the addresses in reverse order to avoid write combining */
+	hw_dbg(hw, "%s: Clearing RAR[1 - %d]\n", __func__, rar_entries);
+	for (; rar_entries > 0; rar_entries--)
+		eth->ops.clear_rar(eth, rar_entries);
+
+	return count;
+}
+
+static void rnpgbe_set_rx_mode_hw_ops_n500(struct rnpgbe_hw *hw,
+					   struct net_device *netdev,
+					   bool sriov_flag)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	u32 fctrl;
+#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) || defined(NETIF_F_HW_VLAN_CTAG_RX)
+	netdev_features_t features = netdev->features;
+#endif
+	int count;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	hw_dbg(hw, "%s\n", __func__);
+
+	/* broadcast always bypass */
+	fctrl = eth_rd32(eth, RNP500_ETH_DMAC_FCTRL) | RNP500_FCTRL_BPE;
+
+	/* clear the bits we are changing the status of */
+	fctrl &= ~(RNP500_FCTRL_UPE | RNP500_FCTRL_MPE);
+	/* promisc mode */
+	if (netdev->flags & IFF_PROMISC) {
+		hw->addr_ctrl.user_set_promisc = true;
+		fctrl |= (RNP500_FCTRL_UPE | RNP500_FCTRL_MPE);
+		/* disable hardware filter vlans in promisc mode */
+#ifdef NETIF_F_HW_VLAN_CTAG_FILTER
+		features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+#endif /* NETIF_F_HW_VLAN_CTAG_FILTER */
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+		features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+#endif /* NETIF_F_HW_VLAN_CTAG_RX */
+	} else {
+		if (netdev->flags & IFF_ALLMULTI) {
+			fctrl |= RNP500_FCTRL_MPE;
+		} else {
+			/* Write addresses to the MTA, if the attempt fails
+			 * then we should just turn on promiscuous mode so
+			 * that we can at least receive multicast traffic
+			 */
+			count = eth->ops.update_mc_addr_list(eth, netdev, true);
+			if (count < 0) {
+				fctrl |= RNP500_FCTRL_MPE;
+			} else if (count) {
+
+			}
+		}
+		hw->addr_ctrl.user_set_promisc = false;
+	}
+
+	/*
+	 * Write addresses to available RAR registers, if there is not
+	 * sufficient space to store all the addresses then enable
+	 * unicast promiscuous mode
+	 */
+	if (rnpgbe_write_uc_addr_list_n500(hw, netdev, sriov_flag) < 0)
+		fctrl |= RNP500_FCTRL_UPE;
+
+	eth_wr32(eth, RNP500_ETH_DMAC_FCTRL, fctrl);
+#ifdef NETIF_F_HW_VLAN_CTAG_FILTER
+	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+		eth->ops.set_vlan_filter(eth, true);
+	else
+		eth->ops.set_vlan_filter(eth, false);
+#endif /* NETIF_F_HW_VLAN_CTAG_FILTER */
+
+	if ((hw->addr_ctrl.user_set_promisc == true) ||
+	    (adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR)) {
+		eth_wr32(eth, RNP500_ETH_ERR_MASK_VECTOR, 0);
+	} else {
+		/* set pkt_len_err and hdr_len_err default to 1 */
+		eth_wr32(eth, RNP500_ETH_ERR_MASK_VECTOR,
+			 PKT_LEN_ERR | HDR_LEN_ERR);
+	}
+
+	hw->ops.set_mtu(hw, netdev->mtu);
+}
+
+/* setup an rar with vfnum */
+static void rnpgbe_set_rar_with_vf_hw_ops_n500(struct rnpgbe_hw *hw, u8 *mac,
+					       int idx, u32 vfnum, bool enable)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_rar(eth, idx, mac, enable);
+	/* should check error or not ?*/
+	eth->ops.set_vmdq(eth, idx, vfnum);
+}
+
+static void rnpgbe_clr_rar_hw_ops_n500(struct rnpgbe_hw *hw, int idx)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.clear_rar(eth, idx);
+}
+
+static void rnpgbe_clr_rar_all_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	unsigned int rar_entries = hw->num_rar_entries - 1;
+	int i;
+
+	for (i = 0; i < rar_entries; i++)
+		eth->ops.clear_rar(eth, rar_entries);
+}
+
+static void rnpgbe_set_fcs_mode_hw_ops_n500(struct rnpgbe_hw *hw, bool status)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	mac->ops.set_mac_fcs(mac, status);
+	eth->ops.set_fcs(eth, status);
+}
+
+static void rnpgbe_set_vxlan_port_hw_ops_n500(struct rnpgbe_hw *hw, u32 port)
+{
+	/* n500 not support */
+}
+
+static void rnpgbe_set_vxlan_mode_hw_ops_n500(struct rnpgbe_hw *hw, bool inner)
+{
+	/* n500 not support */
+}
+
+static void rnpgbe_set_mac_speed_hw_ops_n500(struct rnpgbe_hw *hw, bool link,
+					     u32 speed, bool duplex)
+{
+	/* n500 hw control this */
+}
+
+static void rnpgbe_set_mac_rx_hw_ops_n500(struct rnpgbe_hw *hw, bool status)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_mac_info *mac = &hw->mac;
+
+	if (pci_channel_offline(hw->pdev))
+		return;
+
+	if (status) {
+		mac->ops.set_mac_rx(mac, status);
+		eth->ops.set_rx(eth, status);
+	} else {
+		eth->ops.set_rx(eth, status);
+		mac->ops.set_mac_rx(mac, status);
+	}
+}
+
+static void rnpgbe_set_sriov_status_hw_ops_n500(struct rnpgbe_hw *hw,
+						bool status)
+{
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	u32 v, fctrl;
+
+	fctrl = eth_rd32(eth, RNP500_ETH_DMAC_FCTRL);
+#define RNP500_DMAC_MASK (0x7f)
+	fctrl &= ~RNP500_DMAC_MASK;
+
+	if (status) {
+		fctrl |= hw->veb_ring;
+		eth_wr32(eth, RNP500_ETH_DMAC_FCTRL, fctrl);
+		/* setup default ring */
+		dma_wr32(dma, RNP_DMA_CONFIG,
+			 dma_rd32(dma, RNP_DMA_CONFIG) & (~DMA_VEB_BYPASS));
+		v = eth_rd32(eth, RNP500_MRQC_IOV_EN);
+		v |= RNP500_IOV_ENABLED;
+		eth_wr32(eth, RNP500_MRQC_IOV_EN, v);
+		/* 1 setup veb vlan type */
+
+	} else {
+		eth_wr32(eth, RNP500_ETH_DMAC_FCTRL, fctrl);
+		v = eth_rd32(eth, RNP500_MRQC_IOV_EN);
+		v &= ~(RNP500_IOV_ENABLED);
+		eth_wr32(eth, RNP500_MRQC_IOV_EN, v);
+		dma->ops.clr_veb_all(dma);
+	}
+}
+
+static void rnpgbe_set_sriov_vf_mc_hw_ops_n500(struct rnpgbe_hw *hw,
+					       u16 mc_addr)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	u32 vector_bit;
+	u32 vector_reg;
+	u32 mta_reg;
+	/* pf/ vf share one mc table */
+
+	vector_reg = (mc_addr >> 5) & 0x7F;
+	vector_bit = mc_addr & 0x1F;
+	mta_reg = eth_rd32(eth, RNP500_ETH_MUTICAST_HASH_TABLE(vector_reg));
+	mta_reg |= (1 << vector_bit);
+	eth_wr32(eth, RNP500_ETH_MUTICAST_HASH_TABLE(vector_reg), mta_reg);
+}
+
+static void rnpgbe_update_sriov_info_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	/* update sriov info to hw */
+}
+
+static void rnpgbe_set_pause_mode_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	mac->ops.set_fc_mode(mac);
+	eth->ops.set_fc_mode(eth);
+}
+
+static void rnpgbe_get_pause_mode_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	/* n500 can get pause mode in link event */
+}
+
+static void rnpgbe_update_hw_info_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+	u32 data;
+	/* 1 enable eth filter */
+	eth_wr32(eth, RNP500_HOST_FILTER_EN, 1);
+	/* 2 open redir en */
+	eth_wr32(eth, RNP500_REDIR_EN, 1);
+	/* 3 open sctp checksum and other checksum */
+	if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM)
+		eth_wr32(eth, RNP500_ETH_SCTP_CHECKSUM_EN, 1);
+
+	/* 4 mark muticaset as broadcast */
+	dma_wr32(dma, RNP_VEB_MAC_MASK_LO, 0xffffffff);
+	dma_wr32(dma, RNP_VEB_MAC_MASK_HI, 0xfeff);
+	/* test only */
+	dma_wr32(dma, 0x00b0, 615);
+	/* 5 setup ft padding and veb vlan mode */
+	data = dma_rd32(dma, RNP_DMA_CONFIG);
+#ifdef FT_PADDING
+#define N500_PADDING_BIT 8
+	if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING)
+		SET_BIT(N500_PADDING_BIT, data);
+#endif
+	/* force close padding in n500 */
+	CLR_BIT(8, data);
+
+#define N500_VLAN_POLL_EN BIT(3)
+	if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN)
+		data |= N500_VLAN_POLL_EN;
+
+	dma_wr32(dma, RNP_DMA_CONFIG, data);
+	/* 6 setup vlan mode */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_DOUBLE_VLAN)
+		eth->ops.set_double_vlan(eth, true);
+	else
+		eth->ops.set_double_vlan(eth, false);
+
+	/* 7 setup rss-hash mode */
+	eth->ops.set_rss_hfunc(eth, adapter->rss_func_mode);
+	/* 8 setup outer-vlan type */
+	eth->ops.set_outer_vlan_type(eth, adapter->outer_vlan_type);
+	/* 9 setup tcp sync remapping */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC) {
+		if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC_PRIO)
+			hw->ops.set_tcp_sync_remapping(
+				hw, adapter->tcp_sync_queue, true, true);
+		else
+			hw->ops.set_tcp_sync_remapping(
+				hw, adapter->tcp_sync_queue, true, false);
+	} else {
+		hw->ops.set_tcp_sync_remapping(hw, adapter->tcp_sync_queue,
+					       false, false);
+	}
+	/* 10 setup pause status */
+	data = mac_rd32(mac, GMAC_FLOW_CTRL);
+	if (adapter->priv_flags & RNP_PRIV_FLAG_PAUSE_OWN)
+		data |= GMAC_FLOW_CTRL_UP;
+	else
+		data &= (~GMAC_FLOW_CTRL_UP);
+
+	mac_wr32(mac, GMAC_FLOW_CTRL, data);
+
+	/* 11 open tx double vlan according to stags */
+	eth_wr32(eth, RNP500_ETH_TX_VLAN_CONTROL_EANBLE, 1);
+
+	/* 12 test */
+	/* eth_wr32(eth, RNP500_ETH_RX_MAC_LEN_REG, 1); */
+	eth_wr32(eth, RNP500_ETH_WHOLE_PKT_LEN_ERR_DROP, 1);
+
+	/* 13 setup double vlan drop */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_DOUBLE_VLAN_RECEIVE)
+		eth_wr32(eth, RNP500_ETH_DOUBLE_VLAN_DROP, 0);
+	else
+		eth_wr32(eth, RNP500_ETH_DOUBLE_VLAN_DROP, 1);
+
+	/* 14 open error mask if in rx all mode */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_RX_ALL) {
+		eth_wr32(eth, RNP500_MAC_ERR_MASK,
+			 RUN_FRAME_ERROR | GAINT_FRAME_ERROR | CRC_ERROR |
+				 LENGTH_ERROR);
+		/* we open this in rx all mode */
+		eth_wr32(eth, RNP500_ETH_DOUBLE_VLAN_DROP, 0);
+#define FORWARD_ALL_CONTROL (0x2)
+		eth_wr32(eth, RNP500_BAD_PACKETS_RECEIVE_EN, 1);
+		mac_wr32(mac, GMAC_FRAME_FILTER,
+			 0x00000001 | (FORWARD_ALL_CONTROL << 6));
+	} else {
+		eth_wr32(eth, RNP500_MAC_ERR_MASK,
+			 RUN_FRAME_ERROR | GAINT_FRAME_ERROR);
+		eth_wr32(eth, RNP500_BAD_PACKETS_RECEIVE_EN, 0);
+		mac_wr32(mac, GMAC_FRAME_FILTER, 0x00000001);
+	}
+	/* 15 update water acoording to max length */
+	{
+#define FIFO_ALL (1024)
+		int water_high =
+			FIFO_ALL - ((hw->max_length_current + 15) >> 4);
+		/* n500 only use one */
+		hw->fc.high_water[0] = water_high;
+		hw->fc.low_water[0] = water_high;
+
+		dma_wr32(dma, RNP500_DMA_RBUF_FIFO,
+			 ((hw->max_length_current + 15) >> 4) + 5);
+
+		eth_wr32(eth, RNP500_ETH_EMAC_PARSE_PROGFULL_THRESH,
+			 ((hw->max_length_current + 15) >> 4) + 2);
+	}
+	/* 16 setup fcs mode */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_RX_FCS)
+		hw->ops.set_fcs_mode(hw, true);
+	else
+		hw->ops.set_fcs_mode(hw, false);
+
+	/* 17 setup tso fifo */
+	dma_wr32(dma, RNP_DMA_PKT_FIFO_DATA_PROG_FULL_THRESH, 36);
+	/* 18 setup priv skip */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_RX_SKIP_EN)
+		data = PRIV_DATA_EN | adapter->priv_skip_count;
+	else
+		data = 0;
+	eth_wr32(eth, RNP500_ETH_PRIV_DATA_CONTROL_REG, data);
+	/* 19 setup mac count read self clear */
+	data = mac_rd32(mac, RNP500_MAC_COUNT_CONTROL);
+#define READ_CLEAR BIT(2)
+	data |= READ_CLEAR;
+	mac_wr32(mac, RNP500_MAC_COUNT_CONTROL, data);
+	/* 20 setup prio */
+	if (adapter->priv_flags &
+	    (RNP_PRIV_FLAG_8023_PRIO | RNP_PRIV_FLAG_REMAP_PRIO)) {
+		eth_wr32(eth, RNP500_PRIORITY_1_MARK, RNP500_PRIORITY_1);
+		eth_wr32(eth, RNP500_PRIORITY_0_MARK, RNP500_PRIORITY_0);
+		eth_wr32(eth, RNP500_PRIORITY_EN, 1);
+		if (adapter->priv_flags & RNP_PRIV_FLAG_8023_PRIO)
+			eth_wr32(eth, RNP500_PRIORITY_EN_8023, 1);
+		else
+			eth_wr32(eth, RNP500_PRIORITY_EN_8023, 0);
+	} else {
+		eth_wr32(eth, RNP500_PRIORITY_EN, 0);
+	}
+}
+
+static void rnpgbe_update_hw_rx_drop_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+	int i;
+	struct rnpgbe_ring *ring;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		ring = adapter->rx_ring[i];
+		if (adapter->rx_drop_status & BIT(i)) {
+			ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH,
+				  adapter->drop_time);
+		} else {
+			ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, 0);
+		}
+	}
+}
+
+static void rnpgbe_set_rx_hash_hw_ops_n500(struct rnpgbe_hw *hw, bool status,
+					   bool sriov_flag)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_rx_hash(eth, status, sriov_flag);
+}
+
+/*
+ * setup mac to rar 0
+ * clean vmdq
+ * clean mc addr
+ */
+static s32 rnpgbe_init_rx_addrs_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	u32 i;
+	u32 rar_entries = eth->num_rar_entries;
+	u32 v;
+
+	hw_dbg(hw, "init_rx_addrs:rar_entries:%d, mac.addr:%pM\n", rar_entries,
+	       hw->mac.addr);
+	/*
+	 * If the current mac address is valid, assume it is a software override
+	 * to the permanent address.
+	 * Otherwise, use the permanent address from the eeprom.
+	 */
+	if (!is_valid_ether_addr(hw->mac.addr)) {
+		/* Get the MAC address from the RAR0 for later reference */
+		memcpy(hw->mac.addr, hw->mac.perm_addr, ETH_ALEN);
+		hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
+	} else {
+		/* Setup the receive address. */
+		hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
+		hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
+
+		eth->ops.set_rar(eth, 0, hw->mac.addr, true);
+		/*  clear VMDq pool/queue selection for RAR 0 */
+		eth->ops.clear_vmdq(eth, 0, RNP_CLEAR_VMDQ_ALL);
+	}
+	hw->addr_ctrl.overflow_promisc = 0;
+	hw->addr_ctrl.rar_used_count = 1;
+
+	/* Zero out the other receive addresses. */
+	hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
+	if (!hw->ncsi_en) {
+		for (i = 1; i < rar_entries; i++)
+			eth->ops.clear_rar(eth, i);
+	}
+
+	/* Clear the MTA */
+	hw->addr_ctrl.mta_in_use = 0;
+	v = eth_rd32(eth, RNP500_ETH_DMAC_MCSTCTRL);
+	v &= (~0x3);
+	v |= eth->mc_filter_type;
+	eth_wr32(eth, RNP500_ETH_DMAC_MCSTCTRL, v);
+
+	hw_dbg(hw, " Clearing MTA\n");
+	if (!hw->ncsi_en)
+		eth->ops.clr_mc_addr(eth);
+
+	return 0;
+}
+
+static void rnpgbe_clr_vfta_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.clr_vfta(eth);
+}
+
+static void rnpgbe_set_txvlan_mode_hw_ops_n500(struct rnpgbe_hw *hw, bool cvlan)
+{
+	/* n500 not support this */
+}
+
+static int rnpgbe_set_rss_hfunc_hw_ops_n500(struct rnpgbe_hw *hw, u8 hfunc)
+{
+#ifdef HAVE_RXFH_HASHFUNC
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+
+	switch (hfunc) {
+	case ETH_RSS_HASH_TOP:
+		adapter->rss_func_mode = rss_func_top;
+		break;
+
+	case ETH_RSS_HASH_XOR:
+		adapter->rss_func_mode = rss_func_xor;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	eth->ops.set_rss_hfunc(eth, adapter->rss_func_mode);
+#endif /* HAVE_RXFH_HASHFUNC */
+	return 0;
+}
+
+static void rnpgbe_set_rss_key_hw_ops_n500(struct rnpgbe_hw *hw,
+					   bool sriov_flag)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+	int key_len = RNP_RSS_KEY_SIZE;
+
+	memcpy(hw->rss_key, adapter->rss_key, key_len);
+
+	eth->ops.set_rss_key(eth, sriov_flag);
+}
+
+static void rnpgbe_set_rss_table_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_rss_table(eth);
+}
+
+static void rnpgbe_set_mbx_link_event_hw_ops_n500(struct rnpgbe_hw *hw,
+						  int enable)
+{
+#ifndef NO_CM3_MBX
+	rnpgbe_mbx_link_event_enable(hw, enable);
+#endif /* NO_CM3_MBX */
+}
+
+static void rnpgbe_set_mbx_ifup_hw_ops_n500(struct rnpgbe_hw *hw, int enable)
+{
+#ifdef NO_UP_DOWN
+	static int flags[4];
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+	/* only once */
+	if ((enable) && (flags[adapter->bd_number] == 0)) {
+		flags[adapter->bd_number] = 1;
+#endif /* NO_UP_DOWN */
+#ifndef NO_CM3_MBX
+		rnpgbe_mbx_ifup_down(hw, enable);
+#endif /* NO_CM3_MBX */
+
+#ifdef NO_UP_DOWN
+	}
+#endif
+}
+
+/**
+ *  rnpgbe_check_mac_link_n500 - Determine link and speed status
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @link_up: true when link is up
+ *  @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ *  Reads the links register to determine if link is up and the current speed
+ **/
+static s32 rnpgbe_check_mac_link_hw_ops_n500(struct rnpgbe_hw *hw,
+					     rnpgbe_link_speed *speed,
+					     bool *link_up,
+					     bool *duplex,
+					     bool link_up_wait_to_complete)
+{
+#ifdef NO_CM3_MBX
+
+	*speed = RNP_LINK_SPEED_1GB_FULL;
+	*link_up = true;
+	*duplex = true;
+#else /* NO_CM3_MBX */
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+
+	if (hw->speed == 10)
+		*speed = RNP_LINK_SPEED_10_FULL;
+	else if (hw->speed == 100)
+		*speed = RNP_LINK_SPEED_100_FULL;
+	else if (hw->speed == 1000)
+		*speed = RNP_LINK_SPEED_1GB_FULL;
+	else if (hw->speed == 10000)
+		*speed = RNP_LINK_SPEED_10GB_FULL;
+	else if (hw->speed == 25000)
+		*speed = RNP_LINK_SPEED_25GB_FULL;
+	else if (hw->speed == 40000)
+		*speed = RNP_LINK_SPEED_40GB_FULL;
+	else
+		*speed = RNP_LINK_SPEED_UNKNOWN;
+
+	*link_up = hw->link;
+
+	if (adapter->priv_flags & RNP_PRIV_FLGA_TEST_TX_HANG)
+		*link_up = 0;
+
+	*duplex = hw->duplex;
+
+#endif /* NO_CM3_MBX */
+	return 0;
+}
+
+static s32 rnpgbe_setup_mac_link_hw_ops_n500(struct rnpgbe_hw *hw,
+					     u32 adv,
+					     u32 autoneg,
+					     u32 speed, u32 duplex)
+{
+	rnpgbe_mbx_phy_link_set(hw, adv, autoneg, speed, duplex,
+				hw->tp_mdix_ctrl);
+
+	return 0;
+}
+
+static void rnpgbe_clean_link_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	hw->link = 0;
+}
+
+static s32 rnpgbe_get_link_capabilities_hw_ops_n500(struct rnpgbe_hw *hw,
+						    rnpgbe_link_speed *speed,
+						    bool *autoneg)
+{
+	/* fix setup */
+	/* reletive with firmware */
+	*speed = RNP_LINK_SPEED_10GB_FULL;
+	*autoneg = false;
+
+	return 0;
+}
+
+static void rnpgbe_set_layer2_hw_ops_n500(struct rnpgbe_hw *hw,
+					  union rnpgbe_atr_input *input,
+					  u16 pri_id, u8 queue, bool prio_flag)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_layer2_remapping(eth, input, pri_id, queue, prio_flag);
+}
+
+static void rnpgbe_clr_layer2_hw_ops_n500(struct rnpgbe_hw *hw, u16 pri_id)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.clr_layer2_remapping(eth, pri_id);
+}
+
+static void rnpgbe_clr_all_layer2_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.clr_all_layer2_remapping(eth);
+}
+
+static void rnpgbe_clr_all_tuple5_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.clr_all_tuple5_remapping(eth);
+}
+
+static void rnpgbe_set_tcp_sync_hw_ops_n500(struct rnpgbe_hw *hw, int queue,
+					    bool flag, bool prio)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_tcp_sync_remapping(eth, queue, flag, prio);
+}
+
+static void rnpgbe_set_rx_skip_hw_ops_n500(struct rnpgbe_hw *hw, int count,
+					   bool flag)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_rx_skip(eth, count, flag);
+}
+
+static void rnpgbe_set_outer_vlan_type_hw_ops_n500(struct rnpgbe_hw *hw,
+						   int type)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_outer_vlan_type(eth, type);
+}
+
+/**
+ * rnpgbe_get_thermal_sensor_data_hw_ops_n500 - Gathers thermal sensor data
+ * @hw: pointer to hardware structure
+ * Returns the thermal sensor data structure
+ **/
+static s32 rnpgbe_get_thermal_sensor_data_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	int voltage = 0;
+	struct rnpgbe_thermal_sensor_data *data = &hw->thermal_sensor_data;
+
+	data->sensor[0].temp = rnpgbe_mbx_get_temp(hw, &voltage);
+
+	return 0;
+}
+
+/**
+ * rnpgbe_init_thermal_sensor_thresh_hw_ops_n500 - Inits thermal sensor thresholds
+ * @hw: pointer to hardware structure
+ * Inits the thermal sensor thresholds according to the NVM map
+ * and save off the threshold and location values into mac.thermal_sensor_data
+ **/
+static s32 rnpgbe_init_thermal_sensor_thresh_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	u8 i;
+	struct rnpgbe_thermal_sensor_data *data = &hw->thermal_sensor_data;
+
+	for (i = 0; i < RNPGBE_MAX_SENSORS; i++) {
+		data->sensor[i].location = i + 1;
+		data->sensor[i].caution_thresh = 90;
+		data->sensor[i].max_op_thresh = 100;
+	}
+
+	return 0;
+}
+
+
+static s32 rnpgbe_phy_read_reg_hw_ops_n500(struct rnpgbe_hw *hw,
+					   u32 reg_addr,
+					   u32 device_type,
+					   u16 *phy_data)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	s32 status = 0;
+	u32 data = 0;
+
+	status = mac->ops.mdio_read(mac, reg_addr, &data);
+	*phy_data = data & 0xffff;
+
+	return status;
+}
+
+static s32 rnpgbe_phy_write_reg_hw_ops_n500(struct rnpgbe_hw *hw,
+					    u32 reg_addr,
+					    u32 device_type,
+					    u16 phy_data)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	s32 status = 0;
+
+	status = mac->ops.mdio_write(mac, reg_addr, (u32)phy_data);
+
+	return status;
+}
+
+static void rnpgbe_setup_wol_hw_ops_n500(struct rnpgbe_hw *hw,
+					 u32 mode)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+
+	mac->ops.pmt(mac, mode, !!hw->ncsi_en);
+}
+
+static void rnpgbe_setup_eee_hw_ops_n500(struct rnpgbe_hw *hw,
+					 int ls, int tw,
+					 u32 local_eee)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+
+	mac->ops.set_eee_timer(mac, ls, tw);
+	rnpgbe_mbx_phy_eee_set(hw, tw, local_eee);
+}
+
+static void rnpgbe_set_eee_mode_hw_ops_n500(struct rnpgbe_hw *hw,
+					    bool en_tx_lpi_clockgating)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+
+	mac->ops.set_eee_mode(mac, en_tx_lpi_clockgating);
+}
+
+static void rnpgbe_reset_eee_mode_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+
+	mac->ops.reset_eee_mode(mac);
+}
+
+static void rnpgbe_set_eee_pls_hw_ops_n500(struct rnpgbe_hw *hw,
+					   int link)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+
+	mac->ops.set_eee_pls(mac, link);
+}
+
+static u32 rnpgbe_get_lpi_status_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+
+	return mac->ops.get_lpi_status(mac);
+}
+
+static int rnpgbe_get_ncsi_mac_hw_ops_n500(struct rnpgbe_hw *hw,
+					   u8 *addr, int idx)
+{
+#define NCSI_MAC_H(i) (0x48 + i * 0x8)
+#define NCSI_MAC_L(i) (0x4C + i * 0x8)
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	u32 rar_h, rar_l;
+
+	rar_h = mac_rd32(mac, NCSI_MAC_H(idx));
+	rar_l = mac_rd32(mac, NCSI_MAC_L(idx));
+
+	if (((rar_h & 0x0000ffff) != 0x0000ffff) || (rar_l != 0xffffffff)) {
+		*(addr + 3) = (rar_l & 0xff000000) >> 24;
+		*(addr + 2) = (rar_l & 0xff0000) >> 16;
+		*(addr + 1) = (rar_l & 0xff00) >> 8;
+		*(addr + 0) = (rar_l & 0xff) >> 0;
+		*(addr + 5) = (rar_h & 0xff00) >> 8;
+		*(addr + 4) = (rar_h & 0xff) >> 0;
+		return 0;
+	} else
+		return -1;
+}
+
+static int rnpgbe_get_ncsi_vlan_hw_ops_n500(struct rnpgbe_hw *hw,
+					    u16 *vlan, int idx)
+{
+#define NCSI_VLAN(i) (0x80 + i * 0x10)
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	u32 vid;
+
+	vid = mac_rd32(mac, NCSI_VLAN(idx));
+
+	if (vid & 0x80000000) {
+		*vlan = (u16)(vid & 0x0000ffff);
+		return 0;
+	} else
+		return -1;
+}
+
+static void rnpgbe_set_lldp_hw_ops_n500(struct rnpgbe_hw *hw,
+					bool enable)
+{
+	rnpgbe_mbx_lldp_set(hw, enable);
+}
+
+static void rnpgbe_get_lldp_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+}
+
+static void rnpgbe_set_eee_timer_hw_ops_n500(struct rnpgbe_hw *hw,
+					     int ls, int tw)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+
+	mac->ops.set_eee_timer(mac, ls, tw);
+}
+
+static void rnpgbe_set_vf_vlan_mode_hw_ops_n500(struct rnpgbe_hw *hw,
+						u16 vlan, int vf,
+						bool enable)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE)
+		eth->ops.set_vf_vlan_mode(eth, vlan, vf, enable);
+}
+
+static void rnpgbe_driver_status_hw_ops_n500(struct rnpgbe_hw *hw,
+					     bool enable,
+					     int mode)
+{
+#ifndef NO_CM3_MBX
+	switch (mode) {
+	case rnpgbe_driver_insmod:
+		rnpgbe_mbx_ifinsmod(hw, enable);
+		break;
+	case rnpgbe_driver_suspuse:
+		rnpgbe_mbx_ifsuspuse(hw, enable);
+		break;
+	case rnpgbe_driver_force_control_phy:
+		rnpgbe_mbx_ifforce_control_mac(hw, enable);
+
+		break;
+	}
+#endif /* NO_CM3_MBX */
+}
+
+static void rnpgbe_set_tuple5_hw_ops_n500(struct rnpgbe_hw *hw,
+					  union rnpgbe_atr_input *input,
+					  u16 pri_id, u8 queue, bool prio_flag)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_tuple5_remapping(eth, input, pri_id, queue, prio_flag);
+}
+
+static void rnpgbe_clr_tuple5_hw_ops_n500(struct rnpgbe_hw *hw, u16 pri_id)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.clr_tuple5_remapping(eth, pri_id);
+}
+
+static void
+rnpgbe_update_hw_status_hw_ops_n500(struct rnpgbe_hw *hw,
+				    struct rnpgbe_hw_stats *hw_stats,
+				    struct net_device_stats *net_stats)
+{
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	int i;
+
+	net_stats->rx_errors += eth_rd32(eth, RNP500_RX_MAC_GFCS_ERR_NUM) +
+				eth_rd32(eth, RNP500_RX_MAC_LEN_ERR_NUM) +
+				eth_rd32(eth, RNP500_RX_MAC_SFCS_ERR_NUM) +
+				eth_rd32(eth, RNP500_RX_MAC_GLEN_ERR_NUM) +
+				eth_rd32(eth, RNP500_RX_MAC_SLEN_ERR_NUM);
+
+	net_stats->collisions = eth_rd32(eth, RNP500_RX_MAC_LCS_ERR_NUM);
+	net_stats->rx_over_errors = eth_rd32(eth, RNP500_RX_MAC_CUT_NUM);
+	net_stats->rx_crc_errors = eth_rd32(eth, RNP500_RX_MAC_GFCS_ERR_NUM);
+	hw_stats->invalid_droped_packets =
+		eth_rd32(eth, RNP500_RX_DROP_PKT_NUM);
+
+	hw_stats->rx_capabity_lost = eth_rd32(eth, RNP500_RXTRANS_DROP) +
+				     eth_rd32(eth, RNP500_RXTRANS_CUT_ERR_PKTS);
+	hw_stats->filter_dropped_packets =
+		eth_rd32(eth, RNP500_DECAP_PKT_DROP1_NUM);
+	hw_stats->host_l2_match_drop =
+		eth_rd32(eth, RNP500_ETH_HOST_L2_DROP_PKTS);
+	hw_stats->redir_input_match_drop =
+		eth_rd32(eth, RNP500_ETH_REDIR_INPUT_MATCH_DROP_PKTS);
+	hw_stats->redir_etype_match_drop =
+		eth_rd32(eth, RNP500_ETH_ETYPE_DROP_PKTS);
+	hw_stats->redir_tcp_syn_match_drop =
+		eth_rd32(eth, RNP500_ETH_TCP_SYN_DROP_PKTS);
+	hw_stats->redir_tuple5_match_drop =
+		eth_rd32(eth, RNP500_ETH_REDIR_TUPLE5_DROP_PKTS);
+	hw_stats->tx_multicast = eth_rd32(eth, RNP500_TX_MULTI_NUM);
+	hw_stats->tx_broadcast = eth_rd32(eth, RNP500_TX_BROADCAST_NUM);
+	hw_stats->mac_rx_broadcast = 0;
+	hw_stats->mac_rx_multicast = 0;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct rnpgbe_ring *tx_ring = adapter->tx_ring[i];
+		int idx = tx_ring->rnpgbe_queue_idx;
+
+		hw_stats->mac_rx_multicast +=
+			dma_rd32(dma, RNP500_VEB_VFMPRC(idx));
+		hw_stats->mac_rx_broadcast +=
+			dma_rd32(dma, RNP500_VEB_VFBPRC(idx));
+	}
+	hw_stats->dma_rx_drop_cnt_0 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(0));
+	hw_stats->dma_rx_drop_cnt_1 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(1));
+	hw_stats->dma_rx_drop_cnt_2 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(2));
+	hw_stats->dma_rx_drop_cnt_3 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(3));
+	hw_stats->dma_rx_drop_cnt_4 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(4));
+	hw_stats->dma_rx_drop_cnt_5 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(5));
+	hw_stats->dma_rx_drop_cnt_6 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(6));
+	hw_stats->dma_rx_drop_cnt_7 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(7));
+
+	net_stats->multicast = hw_stats->mac_rx_multicast;
+
+	hw_stats->ultra_short_cnt +=
+		mac_rd32(mac, GMAC_MANAGEMENT_RX_UNDERSIZE);
+	hw_stats->jumbo_cnt += mac_rd32(mac, RNP500_MAC_GLEN_ERR_NUM);
+	hw_stats->tx_pause += mac_rd32(mac, GMAC_MANAGEMENT_TX_PAUSE);
+	hw_stats->rx_pause += mac_rd32(mac, GMAC_MANAGEMENT_RX_PAUSE);
+}
+
+const struct rnpgbe_stats rnp500_gstrings_net_stats[] = {
+	RNP_NETDEV_STAT(rx_packets),
+	RNP_NETDEV_STAT(tx_packets),
+	RNP_NETDEV_STAT(rx_bytes),
+	RNP_NETDEV_STAT(tx_bytes),
+	RNP_NETDEV_STAT(rx_errors),
+	RNP_NETDEV_STAT(tx_errors),
+	RNP_NETDEV_STAT(rx_dropped),
+	RNP_NETDEV_STAT(tx_dropped),
+	RNP_NETDEV_STAT(multicast),
+	RNP_NETDEV_STAT(collisions),
+	RNP_NETDEV_STAT(rx_over_errors),
+	RNP_NETDEV_STAT(rx_crc_errors),
+	RNP_NETDEV_STAT(rx_frame_errors),
+	RNP_NETDEV_STAT(rx_fifo_errors),
+	RNP_NETDEV_STAT(rx_missed_errors),
+	RNP_NETDEV_STAT(tx_aborted_errors),
+	RNP_NETDEV_STAT(tx_carrier_errors),
+	RNP_NETDEV_STAT(tx_fifo_errors),
+	RNP_NETDEV_STAT(tx_heartbeat_errors),
+};
+
+#define RNP500_GLOBAL_STATS_LEN ARRAY_SIZE(rnp500_gstrings_net_stats)
+
+static struct rnpgbe_stats rnp500_hwstrings_stats[] = {
+	RNP_HW_STAT("vlan_add_cnt", hw_stats.vlan_add_cnt),
+	RNP_HW_STAT("vlan_strip_cnt", hw_stats.vlan_strip_cnt),
+	RNP_HW_STAT("invalid_droped_packets", hw_stats.invalid_droped_packets),
+	RNP_HW_STAT("rx_capabity_drop", hw_stats.rx_capabity_lost),
+	RNP_HW_STAT("filter_dropped_packets", hw_stats.filter_dropped_packets),
+	RNP_HW_STAT("host_l2_match_drop", hw_stats.host_l2_match_drop),
+	RNP_HW_STAT("redir_input_match_drop", hw_stats.redir_input_match_drop),
+	RNP_HW_STAT("redir_etype_match_drop", hw_stats.redir_etype_match_drop),
+	RNP_HW_STAT("redir_tcp_syn_match_drop",
+		    hw_stats.redir_tcp_syn_match_drop),
+	RNP_HW_STAT("redir_tuple5_match_drop",
+		    hw_stats.redir_tuple5_match_drop),
+	RNP_HW_STAT("tx_multicast", hw_stats.tx_multicast),
+	RNP_HW_STAT("tx_broadcast", hw_stats.tx_broadcast),
+	RNP_HW_STAT("rx_csum_offload_errors", hw_csum_rx_error),
+	RNP_HW_STAT("rx_csum_offload_good", hw_csum_rx_good),
+	RNP_HW_STAT("rx_broadcast_count", hw_stats.mac_rx_broadcast),
+	RNP_HW_STAT("rx_multicast_count", hw_stats.mac_rx_multicast),
+	RNP_HW_STAT("ultra_short_packets", hw_stats.ultra_short_cnt),
+	RNP_HW_STAT("jumbo_packets", hw_stats.jumbo_cnt),
+	RNP_HW_STAT("mac_rx_pause_count", hw_stats.rx_pause),
+	RNP_HW_STAT("mac_tx_pause_count", hw_stats.tx_pause),
+};
+
+#define RNP500_HWSTRINGS_STATS_LEN ARRAY_SIZE(rnp500_hwstrings_stats)
+
+#define RNP500_STATS_LEN                                                       \
+	(RNP500_GLOBAL_STATS_LEN + RNP500_HWSTRINGS_STATS_LEN +                \
+	 RNP_QUEUE_STATS_LEN)
+
+#ifndef CLOST_SELF_TEST
+#ifdef ETHTOOL_TEST
+static const char rnp500_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Register test  (offline)", "Eeprom test    (offline)",
+	"Interrupt test (offline)", "Loopback test  (offline)",
+	"Link test   (on/offline)"
+};
+
+#define RNP500_TEST_LEN (sizeof(rnp500_gstrings_test) / ETH_GSTRING_LEN)
+#else
+#define RNP500_TEST_LEN 0
+#endif
+#else
+#define RNP500_TEST_LEN 0
+#endif
+
+#if defined(ETHTOOL_GLINKSETTINGS) && !defined(KYLIN_V4_ETHTOOL_FIX_BOND)
+static int rnp500_get_link_ksettings(struct net_device *netdev,
+				     struct ethtool_link_ksettings *cmd)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	rnpgbe_link_speed supported_link;
+	rnpgbe_link_speed advertised_link;
+	bool autoneg = hw->autoneg;
+
+	ethtool_link_ksettings_zero_link_mode(cmd, supported);
+	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+#ifdef NO_CM3_MBX
+	hw->link = 1;
+	adapter->speed = 1000;
+
+	hw->supported_link = RNP_LINK_SPEED_1GB_FULL | RNP_LINK_SPEED_100_FULL |
+			     RNP_LINK_SPEED_10_FULL | RNP_LINK_SPEED_1GB_HALF |
+			     RNP_LINK_SPEED_100_HALF | RNP_LINK_SPEED_10_HALF;
+	hw->advertised_link = RNP_LINK_SPEED_1GB_FULL |
+			      RNP_LINK_SPEED_100_FULL | RNP_LINK_SPEED_10_FULL |
+			      RNP_LINK_SPEED_1GB_HALF |
+			      RNP_LINK_SPEED_100_HALF | RNP_LINK_SPEED_10_HALF;
+	hw->is_sgmii = 1;
+	hw->tp_mdix_ctrl = ETH_TP_MDI;
+#else /* NO_CM3_MBX */
+
+#endif /* NO_CM3_MBX */
+
+	supported_link = hw->supported_link;
+	advertised_link = hw->advertised_link;
+
+	if (hw->is_sgmii) {
+		if (supported_link & RNP_LINK_SPEED_1GB_FULL)
+			ethtool_link_ksettings_add_link_mode(cmd, supported,
+							     1000baseT_Full);
+		if (supported_link & RNP_LINK_SPEED_100_FULL)
+			ethtool_link_ksettings_add_link_mode(cmd, supported,
+							     100baseT_Full);
+		if (supported_link & RNP_LINK_SPEED_10_FULL)
+			ethtool_link_ksettings_add_link_mode(cmd, supported,
+							     10baseT_Full);
+		if (supported_link & RNP_LINK_SPEED_1GB_HALF)
+			ethtool_link_ksettings_add_link_mode(cmd, supported,
+							     1000baseT_Half);
+		if (supported_link & RNP_LINK_SPEED_100_HALF)
+			ethtool_link_ksettings_add_link_mode(cmd, supported,
+							     100baseT_Half);
+		if (supported_link & RNP_LINK_SPEED_10_HALF)
+			ethtool_link_ksettings_add_link_mode(cmd, supported,
+							     10baseT_Half);
+
+		if ((autoneg) && (!hw->fake_autoneg)) {
+			if (advertised_link & RNP_LINK_SPEED_1GB_FULL)
+				ethtool_link_ksettings_add_link_mode(
+					cmd, advertising, 1000baseT_Full);
+			if (advertised_link & RNP_LINK_SPEED_100_FULL)
+				ethtool_link_ksettings_add_link_mode(
+					cmd, advertising, 100baseT_Full);
+			if (advertised_link & RNP_LINK_SPEED_10_FULL)
+				ethtool_link_ksettings_add_link_mode(
+					cmd, advertising, 10baseT_Full);
+			if (advertised_link & RNP_LINK_SPEED_1GB_HALF)
+				ethtool_link_ksettings_add_link_mode(
+					cmd, advertising, 1000baseT_Half);
+			if (advertised_link & RNP_LINK_SPEED_100_HALF)
+				ethtool_link_ksettings_add_link_mode(
+					cmd, advertising, 100baseT_Half);
+			if (advertised_link & RNP_LINK_SPEED_10_HALF)
+				ethtool_link_ksettings_add_link_mode(
+					cmd, advertising, 10baseT_Half);
+		}
+
+		ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+
+		if (!hw->fake_autoneg)
+			ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+
+		cmd->base.port = PORT_TP;
+		cmd->base.phy_address = adapter->phy_addr;
+		cmd->base.duplex = adapter->duplex;
+		if (adapter->hw.link)
+			cmd->base.eth_tp_mdix = hw->tp_mdx;
+		else
+			cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+		//if (hw->fake_autoneg)
+		//	cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+#ifdef ETH_TP_MDI_AUTO
+		cmd->base.eth_tp_mdix_ctrl = hw->tp_mdix_ctrl;
+		//if (hw->fake_autoneg)
+		//	cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+#endif /* ETH_TP_MDI_AUTO */
+	} else {
+		if (supported_link & RNP_LINK_SPEED_1GB_FULL) {
+			ethtool_link_ksettings_add_link_mode(cmd, supported,
+							     1000baseKX_Full);
+		}
+
+		if (advertised_link & RNP_LINK_SPEED_1GB_FULL)
+			ethtool_link_ksettings_add_link_mode(cmd, advertising,
+							     1000baseKX_Full);
+		ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+		ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+		cmd->base.port = PORT_FIBRE;
+	}
+
+	ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+
+	if (autoneg) {
+		ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+		cmd->base.autoneg = AUTONEG_ENABLE;
+	} else
+		cmd->base.autoneg = AUTONEG_DISABLE;
+
+	if (hw->fake_autoneg)
+		cmd->base.autoneg = AUTONEG_DISABLE;
+
+	/* set pause support */
+	ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+	ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
+
+	if (hw->fc.requested_mode & PAUSE_AUTO)
+		ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+	else {
+		if ((hw->fc.requested_mode & PAUSE_TX) &&
+		    (hw->fc.requested_mode & PAUSE_RX))
+			ethtool_link_ksettings_add_link_mode(cmd, advertising,
+							     Pause);
+		else if (hw->fc.requested_mode & PAUSE_TX)
+			ethtool_link_ksettings_add_link_mode(cmd, advertising,
+							     Asym_Pause);
+		else if (hw->fc.requested_mode & PAUSE_RX)
+			ethtool_link_ksettings_add_link_mode(cmd, advertising,
+							     Asym_Pause);
+		else
+			ethtool_link_ksettings_add_link_mode(cmd, advertising,
+							     Asym_Pause);
+	}
+
+	if (adapter->hw.link) {
+		cmd->base.speed = hw->speed;
+		if (adapter->hw.duplex)
+			cmd->base.duplex = DUPLEX_FULL;
+		else
+			cmd->base.duplex = DUPLEX_HALF;
+	} else {
+		cmd->base.speed = SPEED_UNKNOWN;
+		cmd->base.duplex = DUPLEX_UNKNOWN;
+	}
+
+	return 0;
+}
+
+static int rnp500_set_link_ksettings(struct net_device *netdev,
+				     const struct ethtool_link_ksettings *cmd)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 advertised, old;
+	s32 err = 0;
+	u32 speed = 0, autoneg = 0, duplex = 0;
+
+	if ((hw->phy_type == rnpgbe_media_type_copper) ||
+	    (hw->phy.multispeed_fiber)) {
+		/*
+		 * this function does not support duplex forcing, but can
+		 * limit the advertising of the adapter to the specified speed
+		 */
+		/* only allow one speed at a time if no  */
+		if (!cmd->base.autoneg) {
+			if ((cmd->base.speed != SPEED_100) &&
+			    (cmd->base.speed != SPEED_10) &&
+			    (cmd->base.speed != SPEED_1000))
+				return -EINVAL;
+			autoneg = 0;
+			speed = cmd->base.speed;
+			duplex = cmd->base.duplex;
+			/* if set force 1000, we should open autoneg */
+			if (cmd->base.speed == SPEED_1000) {
+				autoneg = 1;
+				hw->fake_autoneg = 1;
+			}
+		} else {
+			hw->fake_autoneg = 0;
+			autoneg = 1;
+		}
+
+		if (cmd->base.eth_tp_mdix_ctrl) {
+			if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+			    (cmd->base.autoneg != AUTONEG_ENABLE)) {
+				dev_err(&adapter->pdev->dev,
+					"forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+				return -EINVAL;
+			}
+		}
+
+		hw->autoneg = autoneg;
+		hw->tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
+
+		old = hw->advertised_link;
+		advertised = 0;
+
+		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+							  1000baseT_Full))
+			advertised |= RNP_LINK_SPEED_1GB_FULL;
+
+		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+							  100baseT_Full))
+			advertised |= RNP_LINK_SPEED_100_FULL;
+
+		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+							  10baseT_Full))
+			advertised |= RNP_LINK_SPEED_10_FULL;
+
+		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+							  100baseT_Half))
+			advertised |= RNP_LINK_SPEED_100_HALF;
+
+		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+							  10baseT_Half))
+			advertised |= RNP_LINK_SPEED_10_HALF;
+
+		/* if autoneg on, adv can not set 0 */
+		if ((!advertised) && (autoneg))
+			return -EINVAL;
+
+		/* this sets the link speed and restarts auto-neg */
+		while (test_and_set_bit(__RNP_IN_SFP_INIT, &adapter->state))
+			usleep_range(1000, 2000);
+
+		hw->mac.autotry_restart = true;
+		err = hw->ops.setup_link(hw, advertised, autoneg, speed,
+					 duplex);
+		if (err) {
+			printk(KERN_DEBUG "setup link failed with code %d\n",
+			       err);
+			hw->ops.setup_link(hw, old, autoneg, speed, duplex);
+		} else {
+			hw->advertised_link = advertised;
+		}
+		clear_bit(__RNP_IN_SFP_INIT, &adapter->state);
+	} else {
+		/* if not sgmii, we not support close autoneg */
+		if ((!cmd->base.autoneg) && (!hw->is_sgmii))
+			return -EINVAL;
+		if (cmd->base.duplex == DUPLEX_HALF)
+			return -EINVAL;
+		if (cmd->base.speed != SPEED_1000)
+			return -EINVAL;
+	}
+
+	return err;
+}
+
+#else /* !ETHTOOL_GLINKSETTINGS */
+static int rnp500_get_settings(struct net_device *netdev,
+			       struct ethtool_cmd *ecmd)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	rnpgbe_link_speed supported_link;
+	rnpgbe_link_speed advertised_link;
+	bool autoneg = true;
+
+#ifndef NO_CM3_MBX
+
+#else
+	hw->supported_link = RNP_LINK_SPEED_1GB_FULL | RNP_LINK_SPEED_100_FULL |
+			     RNP_LINK_SPEED_10_FULL | RNP_LINK_SPEED_1GB_HALF |
+			     RNP_LINK_SPEED_100_HALF | RNP_LINK_SPEED_10_HALF;
+	hw->advertised_link = RNP_LINK_SPEED_1GB_FULL |
+			      RNP_LINK_SPEED_100_FULL | RNP_LINK_SPEED_10_FULL |
+			      RNP_LINK_SPEED_1GB_HALF |
+			      RNP_LINK_SPEED_100_HALF | RNP_LINK_SPEED_10_HALF;
+	hw->is_sgmii = 1;
+
+	hw->tp_mdix_ctrl = ETH_TP_MDI;
+	hw->link = 1;
+	adapter->speed = 1000;
+#endif
+	supported_link = hw->supported_link;
+	advertised_link = hw->advertised_link;
+
+	if (hw->is_sgmii) {
+		if (supported_link & RNP_LINK_SPEED_10GB_FULL)
+			ecmd->supported |= ADVERTISED_10000baseT_Full;
+		if (supported_link & RNP_LINK_SPEED_1GB_FULL)
+			ecmd->supported |= ADVERTISED_1000baseT_Full;
+		if (supported_link & RNP_LINK_SPEED_100_FULL)
+			ecmd->supported |= ADVERTISED_100baseT_Full;
+		if (supported_link & RNP_LINK_SPEED_10_FULL)
+			ecmd->supported |= ADVERTISED_10baseT_Full;
+		if (supported_link & RNP_LINK_SPEED_1GB_HALF)
+			ecmd->supported |= ADVERTISED_1000baseT_Half;
+		if (supported_link & RNP_LINK_SPEED_100_HALF)
+			ecmd->supported |= ADVERTISED_100baseT_Half;
+		if (supported_link & RNP_LINK_SPEED_10_HALF)
+			ecmd->supported |= ADVERTISED_10baseT_Half;
+
+		if (hw->autoneg) {
+			autoneg = true;
+			if (!hw->fake_autoneg) {
+				if (advertised_link & RNP_LINK_SPEED_10GB_FULL)
+					ecmd->advertising |= ADVERTISED_10000baseT_Full;
+				if (advertised_link & RNP_LINK_SPEED_1GB_FULL)
+					ecmd->advertising |= ADVERTISED_1000baseT_Full;
+				if (advertised_link & RNP_LINK_SPEED_100_FULL)
+					ecmd->advertising |= ADVERTISED_100baseT_Full;
+				if (advertised_link & RNP_LINK_SPEED_10_FULL)
+					ecmd->advertising |= ADVERTISED_10baseT_Full;
+				if (advertised_link & RNP_LINK_SPEED_1GB_HALF)
+					ecmd->advertising |= ADVERTISED_1000baseT_Half;
+				if (advertised_link & RNP_LINK_SPEED_100_HALF)
+				ecmd->advertising |= ADVERTISED_100baseT_Half;
+				if (advertised_link & RNP_LINK_SPEED_10_HALF)
+					ecmd->advertising |= ADVERTISED_10baseT_Half;
+			}
+		} else {
+			autoneg = false;
+		}
+
+		ecmd->supported |= SUPPORTED_TP;
+		if (!hw->fake_autoneg)
+			ecmd->advertising |= ADVERTISED_TP;
+		ecmd->port = PORT_TP;
+		ecmd->transceiver = XCVR_EXTERNAL;
+		ecmd->phy_address = hw->mac.phy_addr;
+		ecmd->duplex = adapter->duplex;
+		//if (!hw->fake_autoneg)
+		ecmd->eth_tp_mdix = hw->tp_mdx;
+		//else
+		//	ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+#ifdef ETH_TP_MDI_AUTO
+		ecmd->eth_tp_mdix_ctrl = hw->tp_mdix_ctrl;
+		//if (hw->fake_autoneg)
+		ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+#endif /* ETH_TP_MDI_AUTO */
+	} else {
+		if (supported_link & RNP_LINK_SPEED_10GB_FULL)
+			ecmd->supported |= SUPPORTED_10000baseT_Full;
+		if (supported_link & RNP_LINK_SPEED_1GB_FULL)
+			ecmd->supported |= SUPPORTED_1000baseKX_Full;
+		if (supported_link & RNP_LINK_SPEED_25GB_FULL)
+			ecmd->supported |= SUPPORTED_40000baseKR4_Full;
+		if (supported_link & RNP_LINK_SPEED_40GB_FULL)
+			ecmd->supported |= SUPPORTED_40000baseCR4_Full |
+					   SUPPORTED_40000baseSR4_Full |
+					   SUPPORTED_40000baseLR4_Full;
+
+		if (advertised_link & RNP_LINK_SPEED_10GB_FULL)
+			ecmd->advertising |= SUPPORTED_10000baseT_Full;
+		if (advertised_link & RNP_LINK_SPEED_1GB_FULL)
+			ecmd->advertising |= SUPPORTED_1000baseKX_Full;
+		if (advertised_link & RNP_LINK_SPEED_25GB_FULL)
+			ecmd->advertising |= SUPPORTED_40000baseKR4_Full;
+		if (advertised_link & RNP_LINK_SPEED_40GB_FULL)
+			ecmd->advertising |= SUPPORTED_40000baseCR4_Full |
+					     SUPPORTED_40000baseSR4_Full |
+					     SUPPORTED_40000baseLR4_Full;
+
+		ecmd->supported |= SUPPORTED_FIBRE;
+		ecmd->advertising |= ADVERTISED_FIBRE;
+		ecmd->port = PORT_FIBRE;
+		ecmd->transceiver = XCVR_INTERNAL;
+	}
+
+	ecmd->supported |= SUPPORTED_Autoneg;
+	if (autoneg) {
+		ecmd->advertising |= ADVERTISED_Autoneg;
+		ecmd->autoneg = AUTONEG_ENABLE;
+	} else
+		ecmd->autoneg = AUTONEG_DISABLE;
+
+	if (hw->fake_autoneg)
+		ecmd->autoneg = AUTONEG_DISABLE;
+	/* Indicate pause support */
+	ecmd->supported |= SUPPORTED_Pause;
+
+	if (!hw->fake_autoneg) {
+		if (hw->fc.requested_mode & PAUSE_AUTO)
+			ecmd->advertising |= ADVERTISED_Pause;
+		else {
+			if ((hw->fc.requested_mode & PAUSE_TX) &&
+					(hw->fc.requested_mode & PAUSE_RX))
+				ecmd->advertising |= ADVERTISED_Pause;
+			else if (hw->fc.requested_mode & PAUSE_TX)
+				ecmd->advertising |= ADVERTISED_Asym_Pause;
+			else if (hw->fc.requested_mode & PAUSE_RX)
+				ecmd->advertising |=
+					ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+			else
+				ecmd->advertising &=
+					~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+		}
+	}
+	if (adapter->hw.duplex)
+		ecmd->duplex = DUPLEX_FULL;
+	else
+		ecmd->duplex = DUPLEX_HALF;
+
+	if (adapter->hw.link) {
+		ethtool_cmd_speed_set(ecmd, hw->speed);
+	} else {
+		ethtool_cmd_speed_set(ecmd, -1);
+		ecmd->duplex = DUPLEX_UNKNOWN;
+	}
+
+	return 0;
+}
+
+static int rnp500_set_settings(struct net_device *netdev,
+			       struct ethtool_cmd *ecmd)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 advertised, old;
+	s32 err = 0;
+	u32 autoneg = 0, speed = 0, duplex = 0;
+
+	if ((hw->phy_type == rnpgbe_media_type_copper) ||
+	    (hw->phy.multispeed_fiber)) {
+		/*
+		 * this function does not support duplex forcing, but can
+		 * limit the advertising of the adapter to the specified speed
+		 */
+		if (ecmd->advertising & ~ecmd->supported)
+			return -EINVAL;
+
+		if (ecmd->autoneg == AUTONEG_DISABLE) {
+			autoneg = 0;
+			if ((ecmd->speed != SPEED_100) &&
+			    (ecmd->speed != SPEED_10) &&
+			    (ecmd->speed != SPEED_1000))
+				return -EINVAL;
+			speed = ecmd->speed;
+			duplex = ecmd->duplex;
+			/* if set 1000 force, we open auto */
+			if (ecmd->speed == SPEED_1000)
+				autoneg = 1;
+
+		} else {
+			autoneg = 1;
+		}
+		hw->autoneg = autoneg;
+
+		old = hw->phy.autoneg_advertised;
+		advertised = 0;
+		if (ecmd->advertising & ADVERTISED_10000baseT_Full)
+			advertised |= RNP_LINK_SPEED_10GB_FULL;
+
+		if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+			advertised |= RNP_LINK_SPEED_1GB_FULL;
+
+		if (ecmd->advertising & ADVERTISED_100baseT_Full)
+			advertised |= RNP_LINK_SPEED_100_FULL;
+
+		if (ecmd->advertising & ADVERTISED_10baseT_Full)
+			advertised |= RNP_LINK_SPEED_10_FULL;
+
+		if (ecmd->advertising & ADVERTISED_100baseT_Half)
+			advertised |= RNP_LINK_SPEED_100_HALF;
+
+		if (ecmd->advertising & ADVERTISED_10baseT_Half)
+			advertised |= RNP_LINK_SPEED_10_HALF;
+
+		if (old == advertised)
+			return err;
+
+		if ((!advertised) && (autoneg))
+			return -EINVAL;
+
+		/* this sets the link speed and restarts auto-neg */
+		hw->mac.autotry_restart = true;
+		err = hw->ops.setup_link(hw, advertised, autoneg, speed,
+					 duplex);
+		if (err) {
+			e_info(probe, "setup link failed with code %d\n", err);
+			hw->ops.setup_link(hw, old, autoneg, speed, duplex);
+		}
+	} else {
+		/* if not sgmii, we not support close autoneg */
+		if ((ecmd->autoneg == AUTONEG_DISABLE) && (!hw->is_sgmii))
+			return -EINVAL;
+		/* in this case we currently only support 10Gb/FULL */
+		u32 speed = ethtool_cmd_speed(ecmd);
+
+		if (speed != SPEED_1000)
+			return -EINVAL;
+
+		if (ecmd->duplex == DUPLEX_HALF)
+			return -EINVAL;
+	}
+
+	return err;
+}
+
+#endif
+
+static int rnp500_get_regs_len(struct net_device *netdev)
+{
+#define RNP500_REGS_LEN 1
+	return RNP500_REGS_LEN * sizeof(u32);
+}
+
+static void rnp500_get_drvinfo(struct net_device *netdev,
+			       struct ethtool_drvinfo *drvinfo)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	strncpy(drvinfo->driver, rnpgbe_driver_name, sizeof(drvinfo->driver));
+	strncpy(drvinfo->version, rnpgbe_driver_version,
+		sizeof(drvinfo->version));
+
+#ifdef FPGA_VESION
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		 "fw:0x%x boot: %s",
+		 hw->fw_version | (hw->sfc_boot ? 0x8000000 : 0));
+#else
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		 "%d.%d.%d.%d", ((unsigned char *)&(hw->fw_version))[3],
+		 ((unsigned char *)&(hw->fw_version))[2],
+		 ((unsigned char *)&(hw->fw_version))[1],
+		 ((unsigned char *)&(hw->fw_version))[0]);
+#endif
+	strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info));
+	drvinfo->n_stats = RNP500_STATS_LEN;
+	drvinfo->testinfo_len = RNP500_TEST_LEN;
+	drvinfo->regdump_len = rnp500_get_regs_len(netdev);
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
+	drvinfo->n_priv_flags = RNP500_PRIV_FLAGS_STR_LEN;
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
+}
+
+static int rnp500_get_eeprom_len(struct net_device *netdev)
+{
+	/* not support this */
+	return 0;
+}
+
+static int rnp500_get_eeprom(struct net_device *netdev,
+			     struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 *eeprom_buff;
+	int first_u32, last_u32, eeprom_len;
+	int ret_val = 0;
+
+	if (hw->hw_type == rnpgbe_hw_n210)
+		return -EPERM;
+
+	if (eeprom->len == 0)
+		return -EINVAL;
+
+	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+	/* assign to u32 */
+	first_u32 = eeprom->offset >> 2;
+	last_u32 = (eeprom->offset + eeprom->len - 1) >> 2;
+	eeprom_len = last_u32 - first_u32 + 1;
+
+	eeprom_buff = kmalloc_array(eeprom_len, sizeof(u32), GFP_KERNEL);
+	if (!eeprom_buff)
+		return -ENOMEM;
+
+	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 0x03), eeprom->len);
+	kfree(eeprom_buff);
+
+	return ret_val;
+}
+
+static int rnp500_set_eeprom(struct net_device *netdev,
+			     struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	return 0;
+}
+
+static void rnp500_get_pauseparam(struct net_device *netdev,
+				  struct ethtool_pauseparam *pause)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	if (hw->fc.requested_mode & PAUSE_AUTO)
+		pause->autoneg = 1;
+	else
+		pause->autoneg = 0;
+
+	if (hw->fc.current_mode == rnpgbe_fc_rx_pause) {
+		pause->rx_pause = 1;
+	} else if (hw->fc.current_mode == rnpgbe_fc_tx_pause) {
+		pause->tx_pause = 1;
+	} else if (hw->fc.current_mode == rnpgbe_fc_full) {
+		pause->rx_pause = 1;
+		pause->tx_pause = 1;
+	} else {
+		pause->rx_pause = 0;
+		pause->tx_pause = 0;
+	}
+}
+
+static int rnp500_set_pauseparam(struct net_device *netdev,
+				 struct ethtool_pauseparam *pause)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_fc_info fc = hw->fc;
+
+	/* we not support change in dcb mode */
+	if (adapter->flags & RNP_FLAG_DCB_ENABLED)
+		return -EINVAL;
+
+	fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
+
+	fc.requested_mode = 0;
+
+	if (pause->autoneg) {
+		fc.requested_mode |= PAUSE_AUTO;
+	} else {
+		if (pause->tx_pause)
+			fc.requested_mode |= PAUSE_TX;
+		if (pause->rx_pause)
+			fc.requested_mode |= PAUSE_RX;
+	}
+#ifndef NO_CM3_MBX
+	rnpgbe_mbx_phy_pause_set(hw, fc.requested_mode);
+#else /* NO_CM3_MBX */
+	if (pause->autoneg) {
+		/* fc.requested_mode |= PAUSE_AUTO; */
+	} else {
+		if (pause->tx_pause)
+			hw->fc.current_mode = rnpgbe_fc_tx_pause;
+		if (pause->rx_pause)
+			hw->fc.current_mode = rnpgbe_fc_rx_pause;
+	}
+#endif /* NO_CM3_MBX */
+
+	hw->fc = fc;
+	return 0;
+}
+
+static void rnp500_get_regs(struct net_device *netdev,
+			    struct ethtool_regs *regs, void *p)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 *regs_buff = p;
+	int i;
+
+	memset(p, 0, RNP500_REGS_LEN * sizeof(u32));
+
+	for (i = 0; i < RNP500_REGS_LEN; i++)
+		regs_buff[i] = rd32(hw, i * 4);
+}
+
+static int rnp500_nway_reset(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (netif_running(netdev))
+		rnpgbe_reinit_locked(adapter);
+
+	return 0;
+}
+
+static void rnp500_get_strings(struct net_device *netdev, u32 stringset,
+			       u8 *data)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	char *p = (char *)data;
+	int i;
+	struct rnpgbe_ring *ring;
+	u32 dma_ch;
+
+	switch (stringset) {
+	/* maybe we don't support test? */
+#ifndef CLOST_SELF_TEST
+	case ETH_SS_TEST:
+		for (i = 0; i < RNP500_TEST_LEN; i++) {
+			memcpy(data, rnp500_gstrings_test[i], ETH_GSTRING_LEN);
+			data += ETH_GSTRING_LEN;
+		}
+		break;
+#endif
+	case ETH_SS_STATS:
+		for (i = 0; i < RNP500_GLOBAL_STATS_LEN; i++) {
+			memcpy(p, rnp500_gstrings_net_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		for (i = 0; i < RNP500_HWSTRINGS_STATS_LEN; i++) {
+			memcpy(p, rnp500_hwstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		for (i = 0; i < RNP_NUM_TX_QUEUES; i++) {
+			/* ====  tx ======== */
+			ring = adapter->tx_ring[i];
+			dma_ch = ring->rnpgbe_queue_idx;
+			sprintf(p, "---\n     queue%u_tx_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_bytes", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_tx_restart", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_busy", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_done_old", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_clean_desc", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_poll_count", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_irq_more", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_tx_hw_head", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_hw_tail", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_sw_next_to_clean", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_sw_next_to_use", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_send_bytes", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_send_bytes_to_hw", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_todo_update", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_send_done_bytes", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_added_vlan_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_next_to_clean", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_irq_miss", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_tx_equal_count", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_clean_times", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_clean_count", i);
+			p += ETH_GSTRING_LEN;
+
+			/* ====  rx ======== */
+			ring = adapter->rx_ring[i];
+			dma_ch = ring->rnpgbe_queue_idx;
+			sprintf(p, "queue%u_rx_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_bytes", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_driver_drop_packets", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_rx_rsc", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_rsc_flush", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_non_eop_descs", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_alloc_page_failed", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_alloc_buff_failed", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_alloc_page", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_csum_offload_errs", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_csum_offload_good", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_poll_again_count", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_rm_vlan_packets", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_rx_hw_head", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_hw_tail", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_sw_next_to_use", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_sw_next_to_clean", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_rx_next_to_clean", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_irq_miss", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_equal_count", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_rx_clean_times", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_clean_count", i);
+			p += ETH_GSTRING_LEN;
+		}
+
+		break;
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
+	case ETH_SS_PRIV_FLAGS:
+		memcpy(data, rnp500_priv_flags_strings,
+		       RNP500_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+		break;
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
+	}
+}
+
+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
+static int rnp500_get_stats_count(struct net_device *netdev)
+{
+	return RNP500_STATS_LEN;
+}
+
+#else /* HAVE_ETHTOOL_GET_SSET_COUNT */
+
+static int rnp500_get_sset_count(struct net_device *netdev, int sset)
+{
+	switch (sset) {
+	/* now we don't support test */
+#ifndef CLOST_SELF_TEST
+	case ETH_SS_TEST:
+		return RNP500_TEST_LEN;
+#endif /* CLOST_SELF_TEST */
+	case ETH_SS_STATS:
+		return RNP500_STATS_LEN;
+	case ETH_SS_PRIV_FLAGS:
+		return RNP500_PRIV_FLAGS_STR_LEN;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static u32 rnp500_get_priv_flags(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter =
+		(struct rnpgbe_adapter *)netdev_priv(netdev);
+	u32 priv_flags = 0;
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_MAC_LOOPBACK)
+		priv_flags |= RNP500_MAC_LOOPBACK;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG)
+		priv_flags |= RNP500_PADDING_DEBUG;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_SIMUATE_DOWN)
+		priv_flags |= RNP500_SIMULATE_DOWN;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_ULTRA_SHORT)
+		priv_flags |= RNP500_ULTRA_SHORT;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_DOUBLE_VLAN)
+		priv_flags |= RNP500_DOUBLE_VLAN;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_PAUSE_OWN)
+		priv_flags |= RNP500_PAUSE_OWN;
+	if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED)
+		priv_flags |= RNP500_STAGS_ENABLE;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_JUMBO)
+		priv_flags |= RNP500_JUMBO_ENABLE;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING)
+		priv_flags |= RNP500_TX_PADDING;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_SOFT_TX_PADDING)
+		priv_flags |= RNP500_TX_SOLF_PADDING;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR)
+		priv_flags |= RNP500_REC_HDR_LEN_ERR;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_DOUBLE_VLAN_RECEIVE)
+		priv_flags |= RNP500_DOUBLE_VLAN_RECEIVE;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_RX_SKIP_EN)
+		priv_flags |= RNP500_RX_SKIP_EN;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC_PRIO)
+		priv_flags |= RNP500_TCP_SYNC_PRIO;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_REMAP_PRIO)
+		priv_flags |= RNP500_REMAP_PRIO;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_8023_PRIO)
+		priv_flags |= RNP500_8023_PRIO;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE)
+		priv_flags |= RNP500_SRIOV_VLAN_MODE;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_LLDP)
+		priv_flags |= RNP500_LLDP_EN;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)
+		priv_flags |= RNP500_FORCE_CLOSE;
+
+	return priv_flags;
+}
+
+static int rnp500_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+	struct rnpgbe_adapter *adapter =
+		(struct rnpgbe_adapter *)netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	u32 data_old;
+	u32 data_new;
+
+	data_old = dma_rd32(dma, RNP_DMA_CONFIG);
+	data_new = data_old;
+	dbg("data old is %x\n", data_old);
+
+	if (priv_flags & RNP500_MAC_LOOPBACK) {
+		SET_BIT(n500_mac_loopback, data_new);
+		adapter->priv_flags |= RNP_PRIV_FLAG_MAC_LOOPBACK;
+	} else if (adapter->priv_flags & RNP_PRIV_FLAG_MAC_LOOPBACK) {
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_MAC_LOOPBACK);
+		CLR_BIT(n500_mac_loopback, data_new);
+	}
+
+	if (priv_flags & RNP500_PADDING_DEBUG)
+		adapter->priv_flags |= RNP_PRIV_FLAG_PADDING_DEBUG;
+	else if (adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG)
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_PADDING_DEBUG);
+
+	if (priv_flags & RNP500_SIMULATE_DOWN) {
+		adapter->priv_flags |= RNP_PRIV_FLAG_SIMUATE_DOWN;
+		/* set check link again */
+		adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE;
+	} else if (adapter->priv_flags & RNP_PRIV_FLAG_SIMUATE_DOWN) {
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_SIMUATE_DOWN);
+		/* set check link again */
+		adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE;
+	}
+
+	/* if open ultra short function */
+	if (priv_flags & RNP500_ULTRA_SHORT) {
+		int min = 33;
+
+		adapter->priv_flags |= RNP_PRIV_FLAG_ULTRA_SHORT;
+		eth_wr32(eth, RNP500_ETH_DEFAULT_RX_MIN_LEN, min);
+
+	} else {
+		int min = 60;
+
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_ULTRA_SHORT);
+		eth_wr32(eth, RNP500_ETH_DEFAULT_RX_MIN_LEN, min);
+	}
+	if (priv_flags & RNP500_PAUSE_OWN) {
+		u32 data;
+
+		data = mac_rd32(mac, GMAC_FLOW_CTRL);
+		data |= GMAC_FLOW_CTRL_UP;
+		adapter->priv_flags |= RNP_PRIV_FLAG_PAUSE_OWN;
+		mac_wr32(mac, GMAC_FLOW_CTRL, data);
+
+	} else {
+		u32 data;
+
+		data = mac_rd32(mac, GMAC_FLOW_CTRL);
+		data &= (~GMAC_FLOW_CTRL_UP);
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_PAUSE_OWN);
+		mac_wr32(mac, GMAC_FLOW_CTRL, data);
+	}
+
+	if (priv_flags & RNP500_DOUBLE_VLAN) {
+		adapter->priv_flags |= RNP_PRIV_FLAG_DOUBLE_VLAN;
+		eth->ops.set_double_vlan(eth, true);
+	} else {
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_DOUBLE_VLAN);
+		eth->ops.set_double_vlan(eth, false);
+	}
+
+	if (priv_flags & RNP500_STAGS_ENABLE) {
+		eth_wr32(eth, RNP500_ETH_TX_VLAN_CONTROL_EANBLE, 1);
+		adapter->flags2 |= RNP_FLAG2_VLAN_STAGS_ENABLED;
+		eth->ops.set_vfta(eth, adapter->stags_vid, true);
+	} else {
+		int true_remove = 1;
+		int vid = adapter->stags_vid;
+
+		eth_wr32(eth, RNP500_ETH_TX_VLAN_CONTROL_EANBLE, 0);
+		adapter->flags2 &= (~RNP_FLAG2_VLAN_STAGS_ENABLED);
+		if (vid) {
+#ifndef HAVE_VLAN_RX_REGISTER
+			if (test_bit(vid, adapter->active_vlans))
+				true_remove = 0;
+
+#ifdef NETIF_F_HW_VLAN_STAG_RX
+			if (test_bit(vid, adapter->active_vlans_stags))
+				true_remove = 0;
+#endif /* NETIF_F_HW_VLAN_STAG_RX */
+#endif /* HAVE_VLAN_RX_REGISTER */
+			if (true_remove)
+				hw->ops.set_vlan_filter(hw, vid, false, false);
+		}
+	}
+
+	if (priv_flags & RNP500_JUMBO_ENABLE) {
+		adapter->priv_flags |= RNP_PRIV_FLAG_JUMBO;
+		hw->ops.set_mtu(hw, netdev->mtu);
+	} else {
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_JUMBO);
+		hw->ops.set_mtu(hw, netdev->mtu);
+	}
+
+	if (priv_flags & RNP500_TX_PADDING)
+		adapter->priv_flags |= RNP_PRIV_FLAG_TX_PADDING;
+	else
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_TX_PADDING);
+
+	if (priv_flags & RNP500_TX_SOLF_PADDING)
+		adapter->priv_flags |= RNP_PRIV_FLAG_SOFT_TX_PADDING;
+	else
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_SOFT_TX_PADDING);
+
+	if (priv_flags & RNP500_REC_HDR_LEN_ERR) {
+		adapter->priv_flags |= RNP_PRIV_FLAG_REC_HDR_LEN_ERR;
+		eth_wr32(eth, RNP500_ETH_ERR_MASK_VECTOR, 0);
+
+	} else if (adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR) {
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_REC_HDR_LEN_ERR);
+		eth_wr32(eth, RNP500_ETH_ERR_MASK_VECTOR,
+			 PKT_LEN_ERR | HDR_LEN_ERR);
+	}
+
+	if (priv_flags & RNP500_DOUBLE_VLAN_RECEIVE) {
+		adapter->priv_flags |= RNP_PRIV_FLAG_DOUBLE_VLAN_RECEIVE;
+		if (!(adapter->priv_flags & RNP_PRIV_FLAG_RX_ALL))
+			eth_wr32(eth, RNP500_ETH_DOUBLE_VLAN_DROP, 0);
+	} else {
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_DOUBLE_VLAN_RECEIVE);
+		if (!(adapter->priv_flags & RNP_PRIV_FLAG_RX_ALL))
+			eth_wr32(eth, RNP500_ETH_DOUBLE_VLAN_DROP, 1);
+	}
+
+	if (priv_flags & RNP500_TCP_SYNC_PRIO)
+		adapter->priv_flags |= RNP_PRIV_FLAG_TCP_SYNC_PRIO;
+	else
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_TCP_SYNC_PRIO);
+
+	if (priv_flags & RNP500_SRIOV_VLAN_MODE) {
+		int i;
+
+		adapter->priv_flags |= RNP_PRIV_FLAG_SRIOV_VLAN_MODE;
+		if (!(adapter->flags & RNP_FLAG_SRIOV_INIT_DONE))
+			goto skip_setup_vf_vlan_n500;
+		/* should setup vlvf table */
+		for (i = 0; i < adapter->num_vfs; i++) {
+			if (hw->ops.set_vf_vlan_mode) {
+				if (adapter->vfinfo[i].vf_vlan)
+					hw->ops.set_vf_vlan_mode(
+						hw, adapter->vfinfo[i].vf_vlan,
+						i, true);
+
+				if (adapter->vfinfo[i].pf_vlan)
+					hw->ops.set_vf_vlan_mode(
+						hw, adapter->vfinfo[i].pf_vlan,
+						i, true);
+			}
+		}
+
+	} else if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) {
+		int i;
+
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_SRIOV_VLAN_MODE);
+		/* should clean vlvf table */
+		for (i = 0; i < hw->max_vfs; i++) {
+			if (hw->ops.set_vf_vlan_mode)
+				hw->ops.set_vf_vlan_mode(hw, 0, i, false);
+		}
+	}
+
+	if (priv_flags & RNP500_LLDP_EN) {
+		/* open lldp */
+		hw->ops.set_lldp(hw, true);
+		adapter->priv_flags |= RNP_PRIV_FLAG_LLDP;
+
+	} else if (adapter->priv_flags & RNP_PRIV_FLAG_LLDP) {
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_LLDP);
+		/* close lldp */
+		hw->ops.set_lldp(hw, false);
+	}
+
+	/* if force close */
+	if (hw->force_cap) {
+		if (priv_flags & RNP500_FORCE_CLOSE) {
+			if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) {
+				adapter->priv_flags |= RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE;
+				if (hw->ops.driver_status)
+					hw->ops.driver_status(hw, true,
+						rnpgbe_driver_force_control_phy);
+			}
+		} else if (adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE) {
+			adapter->priv_flags &= (~RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE);
+			if (hw->ops.driver_status)
+				hw->ops.driver_status(hw, false,
+						rnpgbe_driver_force_control_phy);
+
+		}
+	} else if (priv_flags & RNP500_FORCE_CLOSE) {
+		rnpgbe_err("firmware not support set this feature.\n");
+		return -1;
+
+	}
+
+skip_setup_vf_vlan_n500:
+	if (priv_flags & RNP500_8023_PRIO) {
+		adapter->priv_flags |= RNP_PRIV_FLAG_8023_PRIO;
+		eth_wr32(eth, RNP500_PRIORITY_EN_8023, 1);
+	} else {
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_8023_PRIO);
+		eth_wr32(eth, RNP500_PRIORITY_EN_8023, 0);
+	}
+
+	if (priv_flags & RNP500_REMAP_PRIO)
+		adapter->priv_flags |= RNP_PRIV_FLAG_REMAP_PRIO;
+	else
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_REMAP_PRIO);
+
+	if (priv_flags & (RNP500_8023_PRIO | RNP500_REMAP_PRIO)) {
+		eth_wr32(eth, RNP500_PRIORITY_1_MARK, RNP500_PRIORITY_1);
+		eth_wr32(eth, RNP500_PRIORITY_0_MARK, RNP500_PRIORITY_0);
+		eth_wr32(eth, RNP500_PRIORITY_EN, 1);
+	} else {
+		eth_wr32(eth, RNP500_PRIORITY_EN, 0);
+	}
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC) {
+		if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC_PRIO)
+			hw->ops.set_tcp_sync_remapping(
+				hw, adapter->tcp_sync_queue, true, true);
+		else
+			hw->ops.set_tcp_sync_remapping(
+				hw, adapter->tcp_sync_queue, true, false);
+	}
+
+	if (data_old != data_new)
+		dma_wr32(dma, RNP_DMA_CONFIG, data_new);
+	/* if ft_padding changed */
+	if (CHK_BIT(n500_padding_enable, data_old) !=
+	    CHK_BIT(n500_padding_enable, data_new)) {
+		rnpgbe_msg_post_status(adapter, PF_FT_PADDING_STATUS);
+	}
+
+	return 0;
+}
+
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
+
+static void rnp500_get_ethtool_stats(struct net_device *netdev,
+				     struct ethtool_stats *stats, u64 *data)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct net_device_stats *net_stats = &netdev->stats;
+	struct rnpgbe_ring *ring;
+	int i, j;
+	char *p = NULL;
+
+	rnpgbe_update_stats(adapter);
+
+	for (i = 0; i < RNP500_GLOBAL_STATS_LEN; i++) {
+		p = (char *)net_stats +
+		    rnp500_gstrings_net_stats[i].stat_offset;
+		data[i] = (rnp500_gstrings_net_stats[i].sizeof_stat ==
+			   sizeof(u64)) ?
+				  *(u64 *)p :
+				  *(u32 *)p;
+	}
+	for (j = 0; j < RNP500_HWSTRINGS_STATS_LEN; j++, i++) {
+		p = (char *)adapter + rnp500_hwstrings_stats[j].stat_offset;
+		data[i] =
+			(rnp500_hwstrings_stats[j].sizeof_stat == sizeof(u64)) ?
+				*(u64 *)p :
+				*(u32 *)p;
+	}
+
+	BUG_ON(RNP_NUM_TX_QUEUES != RNP_NUM_RX_QUEUES);
+
+	for (j = 0; j < RNP_NUM_TX_QUEUES; j++) {
+		int idx;
+		/* tx-ring */
+		ring = adapter->tx_ring[j];
+		if (!ring) {
+			/* tx */
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			/* rx */
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			continue;
+		}
+		idx = ring->rnpgbe_queue_idx;
+
+		data[i++] = ring->stats.packets;
+		data[i++] = ring->stats.bytes;
+
+		data[i++] = ring->tx_stats.restart_queue;
+		data[i++] = ring->tx_stats.tx_busy;
+		data[i++] = ring->tx_stats.tx_done_old;
+		data[i++] = ring->tx_stats.clean_desc;
+		data[i++] = ring->tx_stats.poll_count;
+		data[i++] = ring->tx_stats.irq_more_count;
+
+		/* rnpgbe_tx_queue_ring_stat */
+		data[i++] = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_HEAD);
+		data[i++] = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_TAIL);
+		data[i++] = ring->next_to_clean;
+		data[i++] = ring->next_to_use;
+		data[i++] = ring->tx_stats.send_bytes;
+		data[i++] = ring->tx_stats.send_bytes_to_hw;
+		data[i++] = ring->tx_stats.todo_update;
+		data[i++] = ring->tx_stats.send_done_bytes;
+		data[i++] = ring->tx_stats.vlan_add;
+		if (ring->tx_stats.tx_next_to_clean == -1)
+			data[i++] = ring->count;
+		else
+			data[i++] = ring->tx_stats.tx_next_to_clean;
+		data[i++] = ring->tx_stats.tx_irq_miss;
+		data[i++] = ring->tx_stats.tx_equal_count;
+		data[i++] = ring->tx_stats.tx_clean_times;
+		data[i++] = ring->tx_stats.tx_clean_count;
+
+		/* rx-ring */
+		ring = adapter->rx_ring[j];
+		if (!ring) {
+			/* rx */
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			continue;
+		}
+		idx = ring->rnpgbe_queue_idx;
+		data[i++] = ring->stats.packets;
+		data[i++] = ring->stats.bytes;
+
+		data[i++] = ring->rx_stats.driver_drop_packets;
+		data[i++] = ring->rx_stats.rsc_count;
+		data[i++] = ring->rx_stats.rsc_flush;
+		data[i++] = ring->rx_stats.non_eop_descs;
+		data[i++] = ring->rx_stats.alloc_rx_page_failed;
+		data[i++] = ring->rx_stats.alloc_rx_buff_failed;
+		data[i++] = ring->rx_stats.alloc_rx_page;
+		data[i++] = ring->rx_stats.csum_err;
+		data[i++] = ring->rx_stats.csum_good;
+		data[i++] = ring->rx_stats.poll_again_count;
+		data[i++] = ring->rx_stats.vlan_remove;
+
+		/* rnpgbe_rx_queue_ring_stat */
+		data[i++] = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD);
+		data[i++] = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_TAIL);
+		data[i++] = ring->next_to_use;
+		data[i++] = ring->next_to_clean;
+		if (ring->rx_stats.rx_next_to_clean == -1)
+			data[i++] = ring->count;
+		else
+			data[i++] = ring->rx_stats.rx_next_to_clean;
+		data[i++] = ring->rx_stats.rx_irq_miss;
+		data[i++] = ring->rx_stats.rx_equal_count;
+		data[i++] = ring->rx_stats.rx_clean_times;
+		data[i++] = ring->rx_stats.rx_clean_count;
+	}
+}
+
+static const struct ethtool_ops rnp500_ethtool_ops = {
+#if defined(ETHTOOL_GLINKSETTINGS) && !defined(KYLIN_V4_ETHTOOL_FIX_BOND)
+	.get_link_ksettings = rnp500_get_link_ksettings,
+	.set_link_ksettings = rnp500_set_link_ksettings,
+#else
+	.get_settings = rnp500_get_settings,
+	.set_settings = rnp500_set_settings,
+#endif
+	.get_drvinfo = rnp500_get_drvinfo,
+	.get_regs_len = rnp500_get_regs_len,
+	.get_regs = rnp500_get_regs,
+	.get_wol = rnpgbe_get_wol,
+	.set_wol = rnpgbe_set_wol,
+	.nway_reset = rnp500_nway_reset,
+	.get_link = ethtool_op_get_link,
+	.get_eeprom_len = rnp500_get_eeprom_len,
+	.get_eeprom = rnp500_get_eeprom,
+	.set_eeprom = rnp500_set_eeprom,
+	.get_ringparam = rnpgbe_get_ringparam,
+	.set_ringparam = rnpgbe_set_ringparam,
+	.get_pauseparam = rnp500_get_pauseparam,
+	.set_pauseparam = rnp500_set_pauseparam,
+	.get_msglevel = rnpgbe_get_msglevel,
+	.set_msglevel = rnpgbe_set_msglevel,
+#ifndef CLOST_SELF_TEST
+	.self_test = rnpgbe_diag_test,
+#endif
+	.get_strings = rnp500_get_strings,
+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
+	.set_phys_id = rnpgbe_set_phys_id,
+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
+	.get_stats_count = rnp500_get_stats_count,
+#else /* HAVE_ETHTOOL_GET_SSET_COUNT */
+	.get_sset_count = rnp500_get_sset_count,
+	.get_priv_flags = rnp500_get_priv_flags,
+	.set_priv_flags = rnp500_set_priv_flags,
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
+	.get_ethtool_stats = rnp500_get_ethtool_stats,
+#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
+	.get_perm_addr = ethtool_op_get_perm_addr,
+#endif /* HAVE_ETHTOOL_GET_PERM_ADDR */
+	.get_coalesce = rnpgbe_get_coalesce,
+	.set_coalesce = rnpgbe_set_coalesce,
+#ifdef ETHTOOL_COALESCE_USECS
+        .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+                                     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+                                     ETHTOOL_COALESCE_MAX_FRAMES,
+#endif /* ETHTOOL_COALESCE_USECS */
+
+#ifndef HAVE_NDO_SET_FEATURES
+	.get_rx_csum = rnpgbe_get_rx_csum,
+	.set_rx_csum = rnpgbe_set_rx_csum,
+	.get_tx_csum = ethtool_op_get_tx_csum,
+	.set_tx_csum = rnpgbe_set_tx_csum,
+	.get_sg = ethtool_op_get_sg,
+	.set_sg = ethtool_op_set_sg,
+#ifdef NETIF_F_TSO
+	.get_tso = ethtool_op_get_tso,
+	.set_tso = rnpgbe_set_tso,
+#endif /* NETIF_F_TSO */
+#ifdef ETHTOOL_GFLAGS
+	.get_flags = ethtool_op_get_flags,
+#endif
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef ETHTOOL_GRXRINGS
+	.get_rxnfc = rnpgbe_get_rxnfc,
+	.set_rxnfc = rnpgbe_set_rxnfc,
+#endif
+
+#ifdef ETHTOOL_SRXNTUPLE
+	.set_rx_ntuple = rnpgbe_set_rx_ntuple,
+#endif
+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#ifdef ETHTOOL_GEEE
+#ifdef HAVE_ETHTOOL_KEEE
+	.get_eee = rnpgbe_get_keee,
+#else
+	.get_eee = rnpgbe_get_eee,
+#endif /* HAVE_ETHTOOL_KEEE */
+#endif /* ETHTOOL_GEEE */
+#ifdef ETHTOOL_SEEE
+#ifdef HAVE_ETHTOOL_KEEE
+	.set_eee = rnpgbe_set_keee,
+#else
+	.set_eee = rnpgbe_set_eee,
+#endif /* HAVE_ETHTOOL_KEEE */
+#endif /* ETHTOOL_SEEE */
+#ifdef ETHTOOL_SCHANNELS
+	.get_channels = rnpgbe_get_channels,
+	.set_channels = rnpgbe_set_channels,
+#endif
+#ifdef ETHTOOL_GMODULEINFO
+	.get_module_info = rnpgbe_get_module_info,
+	.get_module_eeprom = rnpgbe_get_module_eeprom,
+#endif
+#ifdef HAVE_ETHTOOL_GET_TS_INFO
+	.get_ts_info = rnpgbe_get_ts_info,
+#endif
+#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)
+	.get_rxfh_indir_size = rnpgbe_rss_indir_size,
+	.get_rxfh_key_size = rnpgbe_get_rxfh_key_size,
+	.get_rxfh = rnpgbe_get_rxfh,
+	.set_rxfh = rnpgbe_set_rxfh,
+#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */
+	.get_dump_flag = rnpgbe_get_dump_flag,
+	.get_dump_data = rnpgbe_get_dump_data,
+	.set_dump = rnpgbe_set_dump,
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+
+#if defined(HAVE_DDP_PROFILE_UPLOAD_SUPPORT) || defined(CONFIG_LINX_SERIAL)
+	.flash_device = rnpgbe_flash_device,
+#endif /* HAVE_DDP_PROFILE_UPLOAD_SUPPORT */
+};
+
+static void rnpgbe_set_ethtool_hw_ops_n500(struct net_device *netdev)
+{
+#ifndef ETHTOOL_OPS_COMPAT
+	netdev->ethtool_ops = &rnp500_ethtool_ops;
+#else
+	SET_ETHTOOL_OPS(netdev, &rnp500_ethtool_ops);
+#endif
+}
+
+static struct rnpgbe_hw_operations hw_ops_n500 = {
+	.init_hw = &rnpgbe_init_hw_ops_n500,
+	.reset_hw = &rnpgbe_reset_hw_ops_n500,
+	.start_hw = &rnpgbe_start_hw_ops_n500,
+	.set_mtu = &rnpgbe_set_mtu_hw_ops_n500,
+	.set_vlan_filter_en = &rnpgbe_set_vlan_filter_en_hw_ops_n500,
+	.set_vlan_filter = &rnpgbe_set_vlan_filter_hw_ops_n500,
+	.set_veb_vlan_mask = &rnpgbe_set_veb_vlan_mask_hw_ops_n500,
+	.set_vf_vlan_filter = &rnpgbe_set_vf_vlan_filter_hw_ops_n500,
+	.set_vlan_strip = &rnpgbe_set_vlan_strip_hw_ops_n500,
+	.set_mac = &rnpgbe_set_mac_hw_ops_n500,
+	.set_rx_mode = &rnpgbe_set_rx_mode_hw_ops_n500,
+	.set_rar_with_vf = &rnpgbe_set_rar_with_vf_hw_ops_n500,
+	.clr_rar = &rnpgbe_clr_rar_hw_ops_n500,
+	.clr_rar_all = &rnpgbe_clr_rar_all_hw_ops_n500,
+	.clr_vlan_veb = &rnpgbe_clr_vlan_veb_hw_ops_n500,
+	.set_txvlan_mode = &rnpgbe_set_txvlan_mode_hw_ops_n500,
+	.set_fcs_mode = &rnpgbe_set_fcs_mode_hw_ops_n500,
+	.set_vxlan_port = &rnpgbe_set_vxlan_port_hw_ops_n500,
+	.set_vxlan_mode = &rnpgbe_set_vxlan_mode_hw_ops_n500,
+	.set_mac_rx = &rnpgbe_set_mac_rx_hw_ops_n500,
+	.set_mac_speed = &rnpgbe_set_mac_speed_hw_ops_n500,
+	.set_rx_hash = &rnpgbe_set_rx_hash_hw_ops_n500,
+	.set_pause_mode = &rnpgbe_set_pause_mode_hw_ops_n500,
+	.get_pause_mode = &rnpgbe_get_pause_mode_hw_ops_n500,
+	.update_hw_info = &rnpgbe_update_hw_info_hw_ops_n500,
+	.update_rx_drop = &rnpgbe_update_hw_rx_drop_hw_ops_n500,
+	.update_sriov_info = &rnpgbe_update_sriov_info_hw_ops_n500,
+	.set_sriov_status = &rnpgbe_set_sriov_status_hw_ops_n500,
+	.set_sriov_vf_mc = &rnpgbe_set_sriov_vf_mc_hw_ops_n500,
+	.init_rx_addrs = &rnpgbe_init_rx_addrs_hw_ops_n500,
+	.clr_vfta = &rnpgbe_clr_vfta_hw_ops_n500,
+	.set_rss_hfunc = &rnpgbe_set_rss_hfunc_hw_ops_n500,
+	.set_rss_key = &rnpgbe_set_rss_key_hw_ops_n500,
+	.set_rss_table = &rnpgbe_set_rss_table_hw_ops_n500,
+	.update_hw_status = &rnpgbe_update_hw_status_hw_ops_n500,
+	.set_mbx_link_event = &rnpgbe_set_mbx_link_event_hw_ops_n500,
+	.set_mbx_ifup = &rnpgbe_set_mbx_ifup_hw_ops_n500,
+	.check_link = &rnpgbe_check_mac_link_hw_ops_n500,
+	.setup_link = &rnpgbe_setup_mac_link_hw_ops_n500,
+	.clean_link = &rnpgbe_clean_link_hw_ops_n500,
+	.get_link_capabilities = &rnpgbe_get_link_capabilities_hw_ops_n500,
+	.set_layer2_remapping = &rnpgbe_set_layer2_hw_ops_n500,
+	.clr_layer2_remapping = &rnpgbe_clr_layer2_hw_ops_n500,
+	.clr_all_layer2_remapping = &rnpgbe_clr_all_layer2_hw_ops_n500,
+	.set_tuple5_remapping = &rnpgbe_set_tuple5_hw_ops_n500,
+	.clr_tuple5_remapping = &rnpgbe_clr_tuple5_hw_ops_n500,
+	.clr_all_tuple5_remapping = &rnpgbe_clr_all_tuple5_hw_ops_n500,
+	.set_tcp_sync_remapping = &rnpgbe_set_tcp_sync_hw_ops_n500,
+	.set_rx_skip = &rnpgbe_set_rx_skip_hw_ops_n500,
+	.set_outer_vlan_type = &rnpgbe_set_outer_vlan_type_hw_ops_n500,
+	.setup_ethtool = &rnpgbe_set_ethtool_hw_ops_n500,
+	.get_thermal_sensor_data = &rnpgbe_get_thermal_sensor_data_hw_ops_n500,
+	.init_thermal_sensor_thresh =
+		&rnpgbe_init_thermal_sensor_thresh_hw_ops_n500,
+	.phy_read_reg = &rnpgbe_phy_read_reg_hw_ops_n500,
+	.phy_write_reg = &rnpgbe_phy_write_reg_hw_ops_n500,
+	.setup_wol = &rnpgbe_setup_wol_hw_ops_n500,
+	.set_vf_vlan_mode = &rnpgbe_set_vf_vlan_mode_hw_ops_n500,
+	.driver_status = &rnpgbe_driver_status_hw_ops_n500,
+	.setup_eee = &rnpgbe_setup_eee_hw_ops_n500,
+	.set_eee_mode = &rnpgbe_set_eee_mode_hw_ops_n500,
+	.reset_eee_mode = &rnpgbe_reset_eee_mode_hw_ops_n500,
+	.set_eee_timer = &rnpgbe_set_eee_timer_hw_ops_n500,
+	.set_eee_pls = &rnpgbe_set_eee_pls_hw_ops_n500,
+	.get_lpi_status = &rnpgbe_get_lpi_status_hw_ops_n500,
+	.get_ncsi_mac = &rnpgbe_get_ncsi_mac_hw_ops_n500,
+	.get_ncsi_vlan = &rnpgbe_get_ncsi_vlan_hw_ops_n500,
+	.set_lldp = &rnpgbe_set_lldp_hw_ops_n500,
+	.get_lldp = &rnpgbe_get_lldp_hw_ops_n500,
+};
+
+static void rnpgbe_mac_set_rx_n500(struct rnpgbe_mac_info *mac, bool status)
+{
+
+	u32 value = mac_rd32(mac, GMAC_CONTROL);
+
+	if (status)
+		value |= GMAC_CONTROL_TE | GMAC_CONTROL_RE;
+	else
+		value &= ~(GMAC_CONTROL_RE);
+
+	mac_wr32(mac, GMAC_CONTROL, value);
+	value = mac_rd32(mac, GMAC_FRAME_FILTER);
+	mac_wr32(mac, GMAC_FRAME_FILTER, value | 1);
+}
+
+static void rnpgbe_mac_set_speed_n500(struct rnpgbe_mac_info *mac, bool link,
+				      u32 speed, bool duplex)
+{
+#define SPEED_MASK (RNP_DM_MASK | RNP_FES_MASK | RNP_PS_MASK | RNP_LUD_MASK)
+	u32 value = mac_rd32(mac, GMAC_CONTROL);
+
+	value &= (~SPEED_MASK);
+
+	if (link)
+		value |= RNP_LUD_MASK;
+
+	if (duplex)
+		value |= RNP_DM_MASK;
+
+	switch (speed) {
+	case RNP_LINK_SPEED_100_FULL:
+		value |= RNP_PS_MASK;
+		value |= RNP_FES_MASK;
+		break;
+	case RNP_LINK_SPEED_10_FULL:
+		value |= RNP_PS_MASK;
+		break;
+	}
+
+	mac_wr32(mac, GMAC_CONTROL, value);
+}
+
+static void rnpgbe_mac_fcs_n500(struct rnpgbe_mac_info *mac, bool status)
+{
+#define RNP500_CST_MASK BIT(25)
+	u32 value = mac_rd32(mac, GMAC_CONTROL);
+
+	if (status)
+		value &= (~RNP500_CST_MASK);
+	else
+		value |= (RNP500_CST_MASK);
+	mac_wr32(mac, GMAC_CONTROL, value);
+}
+
+/**
+ *  rnpgbe_fc_mode_n500 - Enable flow control
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable flow control according to the current settings.
+ **/
+static s32 rnpgbe_mac_fc_mode_n500(struct rnpgbe_mac_info *mac)
+{
+	struct rnpgbe_hw *hw = (struct rnpgbe_hw *)mac->back;
+	s32 ret_val = 0;
+	unsigned int flow = GMAC_FLOW_CTRL_UP;
+
+	flow = mac_rd32(mac, GMAC_FLOW_CTRL);
+	flow &= GMAC_FLOW_CTRL_UP;
+
+	/*
+	 * Validate the water mark configuration for packet buffer 0.  Zero
+	 * water marks indicate that the packet buffer was not configured
+	 * and the watermarks for packet buffer 0 should always be configured.
+	 */
+	if (!hw->fc.pause_time) {
+		ret_val = RNP_ERR_INVALID_LINK_SETTINGS;
+		goto out;
+	}
+
+	switch (hw->fc.current_mode) {
+	case rnpgbe_fc_none:
+		/*
+		 * Flow control is disabled by software override or autoneg.
+		 * The code below will actually disable it in the HW.
+		 */
+		break;
+	case rnpgbe_fc_rx_pause:
+		/*
+		 * Rx Flow control is enabled and Tx Flow control is
+		 * disabled by software override. Since there really
+		 * isn't a way to advertise that we are capable of RX
+		 * Pause ONLY, we will advertise that we support both
+		 * symmetric and asymmetric Rx PAUSE.  Later, we will
+		 * disable the adapter's ability to send PAUSE frames.
+		 */
+		flow |= GMAC_FLOW_CTRL_RFE;
+		break;
+	case rnpgbe_fc_tx_pause:
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is
+		 * disabled by software override.
+		 */
+		flow |= GMAC_FLOW_CTRL_TFE;
+		break;
+	case rnpgbe_fc_full:
+		/* Flow control (both Rx and Tx) is enabled by SW override. */
+		flow |= GMAC_FLOW_CTRL_RFE;
+		flow |= GMAC_FLOW_CTRL_TFE;
+		break;
+	default:
+		hw_dbg(hw, "Flow control param set incorrectly\n");
+		ret_val = RNP_ERR_CONFIG;
+		goto out;
+	}
+
+	flow |= (hw->fc.pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
+	mac_wr32(mac, GMAC_FLOW_CTRL, flow);
+
+out:
+	return ret_val;
+}
+
+static bool poll_free_mdio(u8 __iomem *addr, u32 mask, int count)
+{
+	unsigned int value;
+	int con = 0;
+
+	do {
+		value = rnpgbe_rd_reg(addr);
+		usleep_range(10, 100);
+		con++;
+	} while ((value & mask) && (con < count));
+
+	return !!(con >= count);
+}
+
+static int rnpgbe_mdio_read(struct rnpgbe_mac_info *mac, int phyreg)
+{
+#define MII_BUSY 0x00000001
+#define MII_WRITE 0x00000002
+#define MII_DATA_MASK GENMASK(15, 0)
+
+	unsigned int mii_address = mac->mii.addr;
+	unsigned int mii_data = mac->mii.data;
+	u32 value = MII_BUSY;
+	int data = 0;
+	int phyaddr = mac->phy_addr;
+
+	value |= (phyaddr << mac->mii.addr_shift) & mac->mii.addr_mask;
+	value |= (phyreg << mac->mii.reg_shift) & mac->mii.reg_mask;
+	value |= (mac->clk_csr << mac->mii.clk_csr_shift) &
+		 mac->mii.clk_csr_mask;
+
+	if (poll_free_mdio(mac->mac_addr + mii_address, MII_BUSY, 100))
+		return -EBUSY;
+
+	mac_wr32(mac, mii_data, data);
+	mac_wr32(mac, mii_address, value);
+
+	if (poll_free_mdio(mac->mac_addr + mii_address, MII_BUSY, 100))
+		return -EBUSY;
+	/* Read the data from the MII data register */
+	data = (int)mac_rd32(mac, mii_data) & MII_DATA_MASK;
+
+	return data;
+}
+
+static void rnpgbe_mac_check_link_n500(struct rnpgbe_mac_info *mac,
+				       rnpgbe_link_speed *speed,
+				       bool *link_up,
+				       bool link_up_wait_to_complete)
+{
+	struct rnpgbe_hw *hw = (struct rnpgbe_hw *)mac->back;
+	/* always assume link is up, if no check link function */
+	u32 data;
+#ifdef CONFIG_RNP_FPGA
+#define LINK_IS_UP (0x04)
+#define TEST_PHY (LINK_IS_UP)
+#else
+#define AUTONEGOTATION_COMPLETE (0x20)
+#define LINK_IS_UP (0x04)
+#define TEST_PHY (AUTONEGOTATION_COMPLETE | LINK_IS_UP)
+#endif
+
+	data = rnpgbe_mdio_read(mac, 1);
+	if ((data & TEST_PHY) == TEST_PHY) {
+		data = rnpgbe_mdio_read(mac, 0);
+#define DUPLEX_MODE (0x100)
+		if (data & DUPLEX_MODE) {
+			if (data & 0x40) {
+				*speed = RNP_LINK_SPEED_1GB_FULL;
+				hw->speed = SPEED_1000;
+			} else if (data & 0x2000) {
+				*speed = RNP_LINK_SPEED_100_FULL;
+				hw->speed = SPEED_100;
+			} else {
+				*speed = RNP_LINK_SPEED_10_FULL;
+				hw->speed = SPEED_10;
+			}
+		} else {
+			if (data & 0x40) {
+				*speed = RNP_LINK_SPEED_1GB_HALF;
+				hw->speed = SPEED_1000;
+			} else if (data & 0x2000) {
+				*speed = RNP_LINK_SPEED_100_HALF;
+				hw->speed = SPEED_100;
+			} else {
+				*speed = RNP_LINK_SPEED_10_HALF;
+				hw->speed = SPEED_10;
+			}
+		}
+		*link_up = true;
+		hw->link = true;
+	} else {
+		*link_up = false;
+		hw->link = false;
+		*speed = RNP_LINK_SPEED_UNKNOWN;
+	}
+}
+
+static void rnpgbe_mac_set_mac_n500(struct rnpgbe_mac_info *mac,
+				    u8 *addr, int index)
+{
+	u32 rar_low, rar_high = 0;
+
+	rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | ((u32)addr[2] << 16) |
+		   ((u32)addr[3] << 24));
+	rar_high = RNP_RAH_AV | ((u32)addr[4] | (u32)addr[5] << 8);
+	mac_wr32(mac, RNP500_MAC_UNICAST_HIGH(index), rar_high);
+	mac_wr32(mac, RNP500_MAC_UNICAST_LOW(index), rar_low);
+}
+
+static int rnpgbe_mac_mdio_read_n500(struct rnpgbe_mac_info *mac,
+				     u32 phyreg,
+				     u32 *regvalue)
+{
+	unsigned int mii_address = mac->mii.addr;
+	unsigned int mii_data = mac->mii.data;
+	u32 value = MII_BUSY;
+	int data = 0;
+	int phyaddr = mac->phy_addr;
+
+	value |= (phyaddr << mac->mii.addr_shift) & mac->mii.addr_mask;
+	value |= (phyreg << mac->mii.reg_shift) & mac->mii.reg_mask;
+	value |= (mac->clk_csr << mac->mii.clk_csr_shift) &
+		 mac->mii.clk_csr_mask;
+
+	if (poll_free_mdio(mac->mac_addr + mii_address, MII_BUSY, 100))
+		return -EBUSY;
+
+	mac_wr32(mac, mii_data, data);
+	mac_wr32(mac, mii_address, value);
+
+	if (poll_free_mdio(mac->mac_addr + mii_address, MII_BUSY, 100))
+		return -EBUSY;
+	/* Read the data from the MII data register */
+	data = (int)mac_rd32(mac, mii_data) & MII_DATA_MASK;
+
+	*regvalue = data;
+
+	return data;
+}
+
+static int rnpgbe_mac_mdio_write_n500(struct rnpgbe_mac_info *mac,
+				      int phyreg,
+				      int phydata)
+{
+	unsigned int mii_address = mac->mii.addr;
+	unsigned int mii_data = mac->mii.data;
+	u32 value = MII_BUSY;
+	int data = phydata;
+	int phyaddr = mac->phy_addr;
+
+	value |= (phyaddr << mac->mii.addr_shift) & mac->mii.addr_mask;
+	value |= (phyreg << mac->mii.reg_shift) & mac->mii.reg_mask;
+
+	value |= (mac->clk_csr << mac->mii.clk_csr_shift) &
+		 mac->mii.clk_csr_mask;
+	value |= MII_WRITE;
+
+	/* Wait until any existing MII operation is complete */
+	if (poll_free_mdio(mac->mac_addr + mii_address, MII_BUSY, 100))
+		return -EBUSY;
+	/* Set the MII address register to write */
+	mac_wr32(mac, mii_data, data);
+	mac_wr32(mac, mii_address, value);
+
+	/* Wait until any existing MII operation is complete */
+	return poll_free_mdio(mac->mac_addr + mii_address, MII_BUSY, 100);
+}
+
+static void rnpgbe_mac_pmt_n500(struct rnpgbe_mac_info *mac,
+				u32 mode, bool ncsi_en)
+{
+	unsigned int pmt = 0;
+
+	if (mode & RNP_WUFC_MAG) {
+		rnpgbe_dbg("GMAC: WOL Magic frame\n");
+		pmt |= magic_pkt_en;
+	}
+	if (mode & RNP_WUFC_EX) {
+		rnpgbe_dbg("GMAC: WOL on global unicast\n");
+		pmt |= global_unicast | wake_up_frame_en;
+	}
+	/* only pmt down not ocp */
+	if (!ncsi_en)
+		pmt |= power_down;
+
+	mac_wr32(mac, GMAC_PMT, pmt);
+}
+
+static void rnpgbe_mac_set_eee_mode_n500(struct rnpgbe_mac_info *mac,
+					 bool en_tx_lpi_clockgating)
+{
+	u32 value = 0;
+
+	/*TODO - en_tx_lpi_clockgating treatment */
+
+	/*
+	 * Enable the link status receive on RGMII, SGMII ore SMII
+	 * receive path and instruct the transmit to enter in LPI
+	 * state.
+	 */
+	value |= LPI_CTRL_STATUS_PLS;
+	value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+	mac_wr32(mac, GMAC_LPI_CTRL_STATUS, value);
+}
+
+static void rnpgbe_mac_reset_eee_mode_n500(struct rnpgbe_mac_info *mac)
+{
+	u32 value = 0;
+
+	value |= LPI_CTRL_STATUS_PLS;
+	value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
+	mac_wr32(mac, GMAC_LPI_CTRL_STATUS, value);
+}
+
+static void rnpgbe_mac_set_eee_timer_n500(struct rnpgbe_mac_info *mac,
+					  int ls, int tw)
+{
+	int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
+
+	/* Program the timers in the LPI timer control register:
+	 * LS: minimum time (ms) for which the link
+	 *  status from PHY should be ok before transmitting
+	 *  the LPI pattern.
+	 * TW: minimum time (us) for which the core waits
+	 *  after it has stopped transmitting the LPI pattern.
+	 */
+	mac_wr32(mac, GMAC_LPI_TIMER_CTRL, value);
+}
+
+static void rnpgbe_mac_set_eee_pls_n500(struct rnpgbe_mac_info *mac, int link)
+{
+	u32 value = 0;
+
+	value = mac_rd32(mac, GMAC_LPI_CTRL_STATUS);
+
+	if (link)
+		value |= LPI_CTRL_STATUS_PLS;
+	else
+		value &= ~LPI_CTRL_STATUS_PLS;
+
+	mac_wr32(mac, GMAC_LPI_CTRL_STATUS, value);
+}
+
+static u32 rnpgbe_mac_get_lpi_status_n500(struct rnpgbe_mac_info *mac)
+{
+	if (mac_rd32(mac, GMAC_INT_STATUS) & GMAC_INT_STATUS_LPIIS)
+		return mac_rd32(mac, GMAC_LPI_CTRL_STATUS);
+	else
+		return 0;
+}
+
+static struct rnpgbe_mac_operations mac_ops_n500 = {
+	.set_mac_rx = &rnpgbe_mac_set_rx_n500,
+	.set_mac_speed = &rnpgbe_mac_set_speed_n500,
+	.set_mac_fcs = &rnpgbe_mac_fcs_n500,
+	.set_fc_mode = &rnpgbe_mac_fc_mode_n500,
+	.check_link = &rnpgbe_mac_check_link_n500,
+	.set_mac = &rnpgbe_mac_set_mac_n500,
+	.mdio_write = &rnpgbe_mac_mdio_write_n500,
+	.mdio_read = &rnpgbe_mac_mdio_read_n500,
+	.pmt = &rnpgbe_mac_pmt_n500,
+	.set_eee_mode = rnpgbe_mac_set_eee_mode_n500,
+	.reset_eee_mode = rnpgbe_mac_reset_eee_mode_n500,
+	.set_eee_timer = rnpgbe_mac_set_eee_timer_n500,
+	.set_eee_pls = rnpgbe_mac_set_eee_pls_n500,
+	.get_lpi_status = rnpgbe_mac_get_lpi_status_n500,
+};
+
+static s32 rnpgbe_get_invariants_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_nic_info *nic = &hw->nic;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+	int i;
+
+	nic->nic_base_addr = hw->hw_addr + RNP500_NIC_BASE;
+	/* setup dma info */
+	dma->dma_base_addr = hw->hw_addr;
+	dma->dma_ring_addr = hw->hw_addr + RNP500_RING_BASE;
+	dma->max_tx_queues = RNP_N500_MAX_TX_QUEUES;
+	dma->max_rx_queues = RNP_N500_MAX_RX_QUEUES;
+	dma->back = hw;
+	memcpy(&hw->dma.ops, &dma_ops_n500, sizeof(hw->dma.ops));
+
+	/* setup eth info */
+	memcpy(&hw->eth.ops, ð_ops_n500, sizeof(hw->eth.ops));
+
+	eth->eth_base_addr = hw->hw_addr + RNP500_ETH_BASE;
+	eth->back = hw;
+	eth->mc_filter_type = 0;
+	eth->mcft_size = RNP_N500_MC_TBL_SIZE;
+	eth->vft_size = RNP_N500_VFT_TBL_SIZE;
+	eth->num_rar_entries = RNP_N500_RAR_ENTRIES + NCSI_RAR_NUM;
+	eth->max_rx_queues = RNP_N500_MAX_RX_QUEUES;
+	eth->max_tx_queues = RNP_N500_MAX_TX_QUEUES;
+
+	/* setup mac info */
+	memcpy(&hw->mac.ops, &mac_ops_n500, sizeof(hw->mac.ops));
+	mac->mac_addr = hw->hw_addr + RNP500_MAC_BASE;
+	mac->back = hw;
+	mac->mac_type = mac_dwc_g;
+	/* move this to eth todo */
+	mac->mc_filter_type = 0;
+	mac->mcft_size = 2;
+	mac->vft_size = 1;
+	mac->num_rar_entries = RNP_N500_RAR_ENTRIES;
+	mac->max_rx_queues = RNP_N500_MAX_RX_QUEUES;
+	mac->max_tx_queues = RNP_N500_MAX_TX_QUEUES;
+	mac->max_msix_vectors = RNP_N500_MSIX_VECTORS;
+
+	mac->mii.addr = GMAC_MII_ADDR;
+	mac->mii.data = GMAC_MII_DATA;
+	mac->mii.addr_shift = 11;
+	mac->mii.addr_mask = 0x0000F800;
+	mac->mii.reg_shift = 6;
+	mac->mii.reg_mask = 0x000007C0;
+	mac->mii.clk_csr_shift = 2;
+	mac->mii.clk_csr_mask = GENMASK(5, 2);
+	mac->clk_csr = 0x02; /* csr 25M */
+
+	mac->phy_addr = 0x11;
+
+	if (!hw->axi_mhz)
+		hw->usecstocount = 125;
+	else
+		hw->usecstocount = hw->axi_mhz;
+
+	printk(KERN_DEBUG "now hw->usecstocount is %d\n", hw->usecstocount);
+
+	hw->feature_flags |=
+		RNP_NET_FEATURE_SG | RNP_NET_FEATURE_TX_CHECKSUM |
+		RNP_NET_FEATURE_RX_CHECKSUM | RNP_NET_FEATURE_TSO |
+		RNP_NET_FEATURE_VLAN_FILTER | RNP_NET_FEATURE_VLAN_OFFLOAD |
+		RNP_NET_FEATURE_RX_NTUPLE_FILTER | RNP_NET_FEATURE_RX_HASH |
+		RNP_NET_FEATURE_USO | RNP_NET_FEATURE_RX_FCS |
+		RNP_NET_FEATURE_STAG_FILTER | RNP_NET_FEATURE_STAG_OFFLOAD;
+	/* maybe supported future*/
+	hw->feature_flags |= RNP_HW_FEATURE_EEE;
+
+	/* setup some fdir resource */
+	hw->min_length = RNP_MIN_MTU;
+	hw->max_length = RNP500_MAX_JUMBO_FRAME_SIZE;
+	hw->max_msix_vectors = RNP_N500_MSIX_VECTORS;
+	hw->num_rar_entries = RNP_N500_RAR_ENTRIES;
+	hw->fdir_mode = fdir_mode_tuple5;
+	hw->max_vfs = RNP_N500_MAX_VF;
+	hw->max_vfs_noari = 1;
+	hw->layer2_count = RNP500_MAX_LAYER2_FILTERS - 1;
+	hw->tuple5_count = RNP500_MAX_TUPLE5_FILTERS - 1;
+
+	/* n500 support magic wol */
+	hw->wol_supported = WAKE_MAGIC;
+	hw->num_vebvlan_entries = 8;
+	hw->default_rx_queue = 0;
+	hw->rss_indir_tbl_num = RNP_N500_RSS_TBL_NUM;
+	hw->rss_tc_tbl_num = RNP_N500_RSS_TC_TBL_NUM;
+	/* vf use the last vfnum */
+	hw->vfnum = RNP_N500_MAX_VF - 1;
+
+	hw->sriov_ring_limit = 1;
+	hw->max_pf_macvlans = RNP_MAX_PF_MACVLANS_N500;
+
+	hw->veb_ring = RNP_N500_MAX_RX_QUEUES - 1;
+
+	memcpy(&hw->ops, &hw_ops_n500, sizeof(hw->ops));
+	hw->supported_link = RNP_LINK_SPEED_1GB_FULL;
+	mbx->mbx_feature |= MBX_FEATURE_NO_ZERO;
+
+	/* mbx setup */
+	mbx->vf2pf_mbox_vec_base = 0x28900;
+	mbx->cpu2pf_mbox_vec = 0x28b00;
+	mbx->pf_vf_shm_base = 0x29000;
+	mbx->mbx_mem_size = 64;
+	mbx->pf2vf_mbox_ctrl_base = 0x2a100;
+	mbx->pf_vf_mbox_mask_lo = 0x2a200;
+	mbx->pf_vf_mbox_mask_hi = 0;
+	mbx->cpu_pf_shm_base = 0x2d000;
+	mbx->pf2cpu_mbox_ctrl = 0x2e000;
+	mbx->cpu_pf_mbox_mask = 0x2e200;
+	mbx->cpu_vf_share_ram = 0x2b000;
+	mbx->share_size = 512;
+
+	adapter->priv_flags |= RNP_PRIV_FLAG_PAUSE_OWN;
+	adapter->drop_time = 100;
+
+	/*initialization default pause flow */
+	/* we start from auto */
+	hw->fc.requested_mode = PAUSE_AUTO;
+	hw->fc.pause_time = RNP_DEFAULT_FCPAUSE;
+	hw->autoneg = 1;
+
+#ifdef ETH_TP_MDI_AUTO
+	hw->tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+#endif
+	for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) {
+		hw->fc.high_water[i] = RNP500_DEFAULT_HIGH_WATER;
+		hw->fc.low_water[i] = RNP500_DEFAULT_LOW_WATER;
+	}
+	hw->eeprom.word_size = 10;
+
+	return 0;
+}
+
+static s32 rnpgbe_get_invariants_n210(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_nic_info *nic = &hw->nic;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+	int i;
+
+	nic->nic_base_addr = hw->hw_addr + RNP500_NIC_BASE;
+	/* setup dma info */
+	dma->dma_base_addr = hw->hw_addr;
+	dma->dma_ring_addr = hw->hw_addr + RNP500_RING_BASE;
+	dma->max_tx_queues = RNP_N500_MAX_TX_QUEUES;
+	dma->max_rx_queues = RNP_N500_MAX_RX_QUEUES;
+	dma->back = hw;
+	memcpy(&hw->dma.ops, &dma_ops_n500, sizeof(hw->dma.ops));
+
+	/* setup eth info */
+	memcpy(&hw->eth.ops, ð_ops_n500, sizeof(hw->eth.ops));
+
+	eth->eth_base_addr = hw->hw_addr + RNP500_ETH_BASE;
+	eth->back = hw;
+	eth->mc_filter_type = 0;
+	eth->mcft_size = RNP_N500_MC_TBL_SIZE;
+	eth->vft_size = RNP_N500_VFT_TBL_SIZE;
+	eth->num_rar_entries = RNP_N500_RAR_ENTRIES + NCSI_RAR_NUM;
+	eth->max_rx_queues = RNP_N500_MAX_RX_QUEUES;
+	eth->max_tx_queues = RNP_N500_MAX_TX_QUEUES;
+
+	/* setup mac info */
+	memcpy(&hw->mac.ops, &mac_ops_n500, sizeof(hw->mac.ops));
+	mac->mac_addr = hw->hw_addr + RNP500_MAC_BASE;
+	mac->back = hw;
+	mac->mac_type = mac_dwc_g;
+	/* move this to eth todo */
+	mac->mc_filter_type = 0;
+	mac->mcft_size = 2;
+	mac->vft_size = 1;
+	mac->num_rar_entries = RNP_N500_RAR_ENTRIES;
+	mac->max_rx_queues = RNP_N500_MAX_RX_QUEUES;
+	mac->max_tx_queues = RNP_N500_MAX_TX_QUEUES;
+	mac->max_msix_vectors = RNP_N500_MSIX_VECTORS;
+
+	mac->mii.addr = GMAC_MII_ADDR;
+	mac->mii.data = GMAC_MII_DATA;
+	mac->mii.addr_shift = 11;
+	mac->mii.addr_mask = 0x0000F800;
+	mac->mii.reg_shift = 6;
+	mac->mii.reg_mask = 0x000007C0;
+	mac->mii.clk_csr_shift = 2;
+	mac->mii.clk_csr_mask = GENMASK(5, 2);
+	mac->clk_csr = 0x02; /* csr 25M */
+
+	mac->phy_addr = 0x11;
+
+	if (!hw->axi_mhz)
+		hw->usecstocount = 62;
+	else
+		hw->usecstocount = hw->axi_mhz;
+
+	hw->feature_flags |=
+		RNP_NET_FEATURE_SG | RNP_NET_FEATURE_TX_CHECKSUM |
+		RNP_NET_FEATURE_RX_CHECKSUM | RNP_NET_FEATURE_TSO |
+		RNP_NET_FEATURE_VLAN_FILTER | RNP_NET_FEATURE_VLAN_OFFLOAD |
+		RNP_NET_FEATURE_RX_NTUPLE_FILTER | RNP_NET_FEATURE_RX_HASH |
+		RNP_NET_FEATURE_USO | RNP_NET_FEATURE_RX_FCS |
+		RNP_NET_FEATURE_STAG_FILTER | RNP_NET_FEATURE_STAG_OFFLOAD;
+
+	hw->feature_flags |= RNP_HW_FEATURE_EEE;
+	/* setup some fdir resource */
+	hw->min_length = RNP_MIN_MTU;
+	hw->max_length = RNP500_MAX_JUMBO_FRAME_SIZE;
+	hw->max_msix_vectors = RNP_N500_MSIX_VECTORS;
+	hw->num_rar_entries = RNP_N500_RAR_ENTRIES;
+	hw->fdir_mode = fdir_mode_tuple5;
+	hw->max_vfs = RNP_N500_MAX_VF;
+	/* n210 only 1 pf, can open 7 in no ari */
+	hw->max_vfs_noari = 7;
+	hw->layer2_count = RNP500_MAX_LAYER2_FILTERS - 1;
+	hw->tuple5_count = RNP500_MAX_TUPLE5_FILTERS - 1;
+
+	/* n500 support magic wol */
+	hw->wol_supported = WAKE_MAGIC;
+
+	hw->num_vebvlan_entries = 8;
+	hw->default_rx_queue = 0;
+	hw->rss_indir_tbl_num = RNP_N500_RSS_TBL_NUM;
+	hw->rss_tc_tbl_num = RNP_N500_RSS_TC_TBL_NUM;
+	/* vf use the last vfnum */
+	hw->vfnum = RNP_N500_MAX_VF - 1;
+
+	hw->sriov_ring_limit = 1;
+	hw->max_pf_macvlans = RNP_MAX_PF_MACVLANS_N500;
+	hw->veb_ring = RNP_N500_MAX_RX_QUEUES - 1;
+	memcpy(&hw->ops, &hw_ops_n500, sizeof(hw->ops));
+	hw->supported_link = RNP_LINK_SPEED_1GB_FULL;
+	mbx->mbx_feature |= MBX_FEATURE_NO_ZERO;
+
+	/* mbx setup */
+	mbx->vf2pf_mbox_vec_base = 0x29200;
+	mbx->cpu2pf_mbox_vec = 0x29400;
+	mbx->pf_vf_shm_base = 0x29900;
+	mbx->mbx_mem_size = 64;
+	mbx->pf2vf_mbox_ctrl_base = 0x2aa00;
+	mbx->pf_vf_mbox_mask_lo = 0x2ab00;
+	mbx->pf_vf_mbox_mask_hi = 0;
+	mbx->cpu_pf_shm_base = 0x2d900;
+	mbx->pf2cpu_mbox_ctrl = 0x2e900;
+	mbx->cpu_pf_mbox_mask = 0x2eb00;
+	mbx->cpu_vf_share_ram = 0x2b900;
+	mbx->share_size = 512;
+
+	adapter->priv_flags |= RNP_PRIV_FLAG_PAUSE_OWN;
+	adapter->drop_time = 100;
+
+	/* initialization default pause flow */
+	hw->fc.requested_mode = PAUSE_AUTO;
+	hw->fc.pause_time = RNP_DEFAULT_FCPAUSE;
+	hw->autoneg = 1;
+
+	/* we start from auto mode */
+#ifdef ETH_TP_MDI_AUTO
+	hw->tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+#endif
+	for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) {
+		hw->fc.high_water[i] = RNP500_DEFAULT_HIGH_WATER;
+		hw->fc.low_water[i] = RNP500_DEFAULT_LOW_WATER;
+	}
+	hw->eeprom.word_size = 10;
+
+	return 0;
+}
+
+struct rnpgbe_info rnpgbe_n500_info = {
+	.one_pf_with_two_dma = false,
+	.total_queue_pair_cnts = RNP_N500_MAX_TX_QUEUES,
+	.adapter_cnt = 1,
+	.rss_type = rnpgbe_rss_n500,
+	.hw_type = rnpgbe_hw_n500,
+	.get_invariants = &rnpgbe_get_invariants_n500,
+	.mac_ops = &mac_ops_n500,
+	.eeprom_ops = NULL,
+	.mbx_ops = &rnpgbe_mbx_ops_generic,
+};
+
+struct rnpgbe_info rnpgbe_n210_info = {
+	.one_pf_with_two_dma = false,
+	.total_queue_pair_cnts = RNP_N500_MAX_TX_QUEUES,
+	.adapter_cnt = 1,
+	.rss_type = rnpgbe_rss_n500,
+	.hw_type = rnpgbe_hw_n210,
+	.get_invariants = &rnpgbe_get_invariants_n210,
+	.mac_ops = &mac_ops_n500,
+	.eeprom_ops = NULL,
+	.mbx_ops = &rnpgbe_mbx_ops_generic,
+};
+
+struct rnpgbe_info rnpgbe_n210L_info = {
+	.one_pf_with_two_dma = false,
+	.total_queue_pair_cnts = RNP_N500_MAX_TX_QUEUES,
+	.adapter_cnt = 1,
+	.rss_type = rnpgbe_rss_n500,
+	.hw_type = rnpgbe_hw_n210L,
+	.get_invariants = &rnpgbe_get_invariants_n210,
+	.mac_ops = &mac_ops_n500,
+	.eeprom_ops = NULL,
+	.mbx_ops = &rnpgbe_mbx_ops_generic,
+};
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_common.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_common.c
new file mode 100755
index 0000000000000000000000000000000000000000..fe4fa7518b732d11791ec67ed27419d809234634
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_common.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "rnpgbe.h"
+#include "rnpgbe_common.h"
+#include "rnpgbe_mbx.h"
+
+unsigned int rnpgbe_loglevel = 0x00;
+module_param(rnpgbe_loglevel, uint, 0600);
+
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_common.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_common.h
new file mode 100755
index 0000000000000000000000000000000000000000..0cd8450a9cef81dd1f3d1b06957371a190adaab7
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_common.h
@@ -0,0 +1,385 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBE_COMMON_H_
+#define _RNPGBE_COMMON_H_
+
+#include 
+#include 
+#include "rnpgbe_type.h"
+#include "rnpgbe.h"
+#include "rnpgbe_regs.h"
+#include "rnp_compat.h"
+
+struct rnpgbe_adapter;
+
+#define TRACE() printk(KERN_DEBUG "==[ %s %d ] ==\n", __func__, __LINE__)
+
+#ifdef CONFIG_RNP_RX_DEBUG
+#define rx_debug_printk printk
+#define rx_buf_dump buf_dump
+#define rx_dbg(fmt, args...)                                                   \
+	printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args)
+#else /* CONFIG_RNP_RX_DEBUG */
+#define rx_debug_printk(fmt, args...)
+#define rx_buf_dump(a, b, c)
+#define rx_dbg(fmt, args...)
+#endif /* CONFIG_RNP_RX_DEBUG */
+
+#ifdef CONFIG_RNP_TX_DEBUG
+#define desc_hex_dump(msg, buf, len)                                           \
+	print_hex_dump(KERN_WARNING, msg, DUMP_PREFIX_OFFSET, 16, 1, (buf),    \
+		       (len), false)
+#define rnpgbe_skb_dump _rnpgbe_skb_dump
+#define tx_dbg(fmt, args...)                                                   \
+	printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args)
+#else /* CONFIG_RNP_TX_DEBUG */
+#define desc_hex_dump(msg, buf, len)
+#define rnpgbe_skb_dump(skb, full_pkt)
+#define tx_dbg(fmt, args...)
+#endif /* CONFIG_RNP_TX_DEBUG */
+
+#ifdef DEBUG
+#define dbg(fmt, args...)                                                      \
+	printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args)
+#else /* DEBUG */
+#define dbg(fmt, args...)
+#endif /* DEBUG */
+
+#ifdef CONFIG_RNP_VF_DEBUG
+#define vf_dbg(fmt, args...)                                                   \
+	printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args)
+#else /* CONFIG_RNP_VF_DEBUG */
+#define vf_dbg(fmt, args...)
+#endif /* CONFIG_RNP_VF_DEBUG */
+
+/* ================= registers  read/write helper ===== */
+#define p_rnpgbe_wr_reg(reg, val)                                              \
+	do {                                                                   \
+		printk(KERN_DEBUG " wr-reg: %p <== 0x%08x \t#%-4d %s\n",       \
+		       (reg), (val), __LINE__, __FILE__);                      \
+		iowrite32((val), (void *)(reg));                               \
+	} while (0)
+
+static inline unsigned int prnpgbe_rd_reg(void *reg)
+{
+	unsigned int v = ioread32((void *)(reg));
+
+	printk(KERN_DEBUG "  %p => 0x%08x\n", reg, v);
+	return v;
+}
+
+#ifdef IO_PRINT
+static inline unsigned int rnpgbe_rd_reg(void *reg)
+{
+	unsigned int v = ioread32((void *)(reg));
+
+	dbg(" rd-reg: %p <== 0x%08x\n", reg, v);
+	return v;
+}
+#define rnpgbe_wr_reg(reg, val)                                                \
+	do {                                                                   \
+		dbg(" wr-reg: %p <== 0x%08x \t#%-4d %s\n", (reg), (val),       \
+		    __LINE__, __FILE__);                                       \
+		iowrite32((val), (void *)(reg));                               \
+	} while (0)
+#else /* IO_PRINT */
+#define rnpgbe_rd_reg(reg) readl((void *)(reg))
+#define rnpgbe_wr_reg(reg, val) writel((val), (void *)(reg))
+#endif /* IO_PRINT */
+
+#define rd32(hw, off) rnpgbe_rd_reg((hw)->hw_addr + (off))
+#define wr32(hw, off, val) rnpgbe_wr_reg((hw)->hw_addr + (off), (val))
+
+#define nic_rd32(nic, off) rnpgbe_rd_reg((nic)->nic_base_addr + (off))
+#define nic_wr32(nic, off, val)                                                \
+	rnpgbe_wr_reg((nic)->nic_base_addr + (off), (val))
+
+#define dma_rd32(dma, off) rnpgbe_rd_reg((dma)->dma_base_addr + (off))
+#define dma_wr32(dma, off, val)                                                \
+	rnpgbe_wr_reg((dma)->dma_base_addr + (off), (val))
+
+#define dma_ring_rd32(dma, off) rnpgbe_rd_reg((dma)->dma_ring_addr + (off))
+#define dma_ring_wr32(dma, off, val)                                           \
+	rnpgbe_wr_reg((dma)->dma_ring_addr + (off), (val))
+
+#define eth_rd32(eth, off) rnpgbe_rd_reg((eth)->eth_base_addr + (off))
+#define eth_wr32(eth, off, val)                                                \
+	rnpgbe_wr_reg((eth)->eth_base_addr + (off), (val))
+
+#define mac_rd32(mac, off) rnpgbe_rd_reg((mac)->mac_addr + (off))
+#define mac_wr32(mac, off, val) rnpgbe_wr_reg((mac)->mac_addr + (off), (val))
+#ifdef debug_ring
+static inline unsigned int rnpgbe_rd_reg_1(int ring, u32 off, void *reg)
+{
+	unsigned int v = ioread32((void *)(reg));
+
+	printk(KERN_DEBUG "%d rd-reg: %x <== 0x%08x\n", ring, off, v);
+	return v;
+}
+
+#define ring_rd32(ring, off)                                                   \
+	rnpgbe_rd_reg_1(ring->rnpgbe_queue_idx, off, (ring)->ring_addr + (off))
+#define ring_wr32(ring, off, val)                                              \
+	rnpgbe_wr_reg((ring)->ring_addr + (off), (val))
+#else /* debug_ring */
+#define ring_rd32(ring, off) rnpgbe_rd_reg((ring)->ring_addr + (off))
+#define ring_wr32(ring, off, val)                                              \
+	rnpgbe_wr_reg((ring)->ring_addr + (off), (val))
+#endif /* debug_ring */
+
+#define pwr32(hw, off, val) p_rnpgbe_wr_reg((hw)->hw_addr + (off), (val))
+
+#define rnpgbe_mbx_rd(hw, off) rnpgbe_rd_reg((hw)->ring_msix_base + (off))
+#define rnpgbe_mbx_wr(hw, off, val)                                            \
+	rnpgbe_wr_reg((hw)->ring_msix_base + (off), val)
+
+static inline void hw_queue_strip_rx_vlan(struct rnpgbe_hw *hw, u8 ring_num,
+					  bool enable)
+{
+	u32 reg = RNP_ETH_VLAN_VME_REG(ring_num / 32);
+	u32 offset = ring_num % 32;
+	u32 data = rd32(hw, reg);
+
+	if (enable == true)
+		data |= (1 << offset);
+	else
+		data &= ~(1 << offset);
+	wr32(hw, reg, data);
+}
+
+#define rnpgbe_set_reg_bit(hw, reg_def, bit)                                   \
+	do {                                                                   \
+		u32 reg = reg_def;                                             \
+		u32 value = rd32(hw, reg);                                     \
+		dbg("before set  %x %x\n", reg, value);                        \
+		value |= (0x01 << bit);                                        \
+		dbg("after set %x %x\n", reg, value);                          \
+		wr32(hw, reg, value);                                          \
+	} while (0)
+
+#define rnpgbe_clr_reg_bit(hw, reg_def, bit)                                   \
+	do {                                                                   \
+		u32 reg = reg_def;                                             \
+		u32 value = rd32(hw, reg);                                     \
+		dbg("before clr %x %x\n", reg, value);                         \
+		value &= (~(0x01 << bit));                                     \
+		dbg("after clr %x %x\n", reg, value);                          \
+		wr32(hw, reg, value);                                          \
+	} while (0)
+
+#define rnpgbe_vlan_filter_on(hw)                                              \
+	rnpgbe_set_reg_bit(hw, RNP_ETH_VLAN_FILTER_ENABLE, 30)
+#define rnpgbe_vlan_filter_off(hw)                                             \
+	rnpgbe_clr_reg_bit(hw, RNP_ETH_VLAN_FILTER_ENABLE, 30)
+
+#define DPRINTK(nlevel, klevel, fmt, args...)                                  \
+	((NETIF_MSG_##nlevel & adapter->msg_enable) ?                          \
+		 (void)(netdev_printk(KERN_##klevel, adapter->netdev, fmt,     \
+				      ##args)) :                               \
+		 NULL)
+
+/* ==== log helper === */
+#ifdef HW_DEBUG
+#define hw_dbg(hw, fmt, args...) printk(KERN_DEBUG "hw-dbg : " fmt, ##args)
+#define eth_dbg(eth, fmt, args...) printk(KERN_DEBUG "hw-dbg : " fmt, ##args)
+#else
+#define hw_dbg(hw, fmt, args...)
+#define eth_dbg(hw, fmt, args...)
+#endif
+
+#ifdef RNP_DEBUG_OPEN
+#define rnpgbe_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
+#else /* RNP_DEBUG_OPEN */
+#define rnpgbe_dbg(fmt, args...)
+#endif /* RNP_DEBUG_OPEN */
+#define rnpgbe_info(fmt, args...) printk(KERN_DEBUG "rnp-info: " fmt, ##args)
+#define rnpgbe_warn(fmt, args...) printk(KERN_DEBUG "rnp-warn: " fmt, ##args)
+#define rnpgbe_err(fmt, args...) printk(KERN_ERR "rnp-err : " fmt, ##args)
+
+#define e_info(msglvl, format, arg...)                                         \
+	netif_info(adapter, msglvl, adapter->netdev, format, ##arg)
+#define e_err(msglvl, format, arg...)                                          \
+	netif_err(adapter, msglvl, adapter->netdev, format, ##arg)
+#define e_warn(msglvl, format, arg...)                                         \
+	netif_warn(adapter, msglvl, adapter->netdev, format, ##arg)
+#define e_crit(msglvl, format, arg...)                                         \
+	netif_crit(adapter, msglvl, adapter->netdev, format, ##arg)
+
+#define e_dev_info(format, arg...) dev_info(&adapter->pdev->dev, format, ##arg)
+#define e_dev_warn(format, arg...) dev_warn(&adapter->pdev->dev, format, ##arg)
+#define e_dev_err(format, arg...) dev_err(&adapter->pdev->dev, format, ##arg)
+
+#ifdef CONFIG_RNP_TX_DEBUG
+static inline void buf_dump_line(const char *msg, int line, void *buf, int len)
+{
+	int i, offset = 0;
+	int msg_len = 1024;
+	u8 msg_buf[1024];
+	u8 *ptr = (u8 *)buf;
+
+	offset += snprintf(msg_buf + offset, msg_len,
+			   "=== %s #%d line:%d buf:%p==\n000: ", msg, len, line,
+			   buf);
+
+	for (i = 0; i < len; ++i) {
+		if ((i != 0) && (i % 16) == 0 && (offset >= (1024 - 10 * 16))) {
+			printk(KERN_DEBUG "%s\n", msg_buf);
+			offset = 0;
+		}
+
+		if ((i != 0) && (i % 16) == 0) {
+			offset += snprintf(msg_buf + offset, msg_len,
+					   "\n%03x: ", i);
+		}
+		offset += snprintf(msg_buf + offset, msg_len, "%02x ", ptr[i]);
+	}
+
+	offset += snprintf(msg_buf + offset, msg_len, "\n");
+	printk(KERN_DEBUG "%s\n", msg_buf);
+}
+#else /* CONFIG_RNP_TX_DEBUG */
+#define buf_dump_line(msg, line, buf, len)
+#endif /* CONFIG_RNP_TX_DEBUG */
+
+static inline __le64 build_ctob(u32 vlan_cmd, u32 mac_ip_len, u32 size)
+{
+	return cpu_to_le64(((u64)vlan_cmd << 32) | ((u64)mac_ip_len << 16) |
+			   ((u64)size));
+}
+
+static inline void buf_dump(const char *msg, void *buf, int len)
+{
+	int i, offset = 0;
+	int msg_len = 1024;
+	char msg_buf[1024];
+	u8 *ptr = (u8 *)buf;
+
+	offset += snprintf(msg_buf + offset, msg_len,
+			   "=== %s #%d ==\n000: ", msg, len);
+
+	for (i = 0; i < len; ++i) {
+		if ((i != 0) && (i % 16) == 0 && (offset >= (1024 - 10 * 16))) {
+			printk(KERN_DEBUG "%s\n", msg_buf);
+			offset = 0;
+		}
+
+		if ((i != 0) && (i % 16) == 0) {
+			offset += snprintf(msg_buf + offset, msg_len,
+					   "\n%03x: ", i);
+		}
+		offset += snprintf(msg_buf + offset, msg_len, "%02x ", ptr[i]);
+	}
+
+	offset += snprintf(msg_buf + offset, msg_len, "\n=== done ==\n");
+	printk(KERN_DEBUG "%s\n", msg_buf);
+}
+
+#ifndef NO_SKB_DUMP
+static inline void _rnpgbe_skb_dump(const struct sk_buff *skb, bool full_pkt)
+{
+	static atomic_t can_dump_full = ATOMIC_INIT(5);
+#ifdef DEBUG
+	struct skb_shared_info *sh = skb_shinfo(skb);
+#endif /* DEBUG */
+	struct net_device *dev = skb->dev;
+	struct sk_buff *list_skb;
+	bool has_mac, has_trans;
+	int headroom, tailroom;
+	int i, len, seg_len;
+	const char *level = KERN_WARNING;
+
+	if (full_pkt)
+		full_pkt = atomic_dec_if_positive(&can_dump_full) >= 0;
+
+	if (full_pkt)
+		len = skb->len;
+	else
+		len = min_t(int, skb->len, MAX_HEADER + 128);
+
+	headroom = skb_headroom(skb);
+	tailroom = skb_tailroom(skb);
+
+	has_mac = skb_mac_header_was_set(skb);
+	has_trans = skb_transport_header_was_set(skb);
+
+	dbg("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
+	    "mac=(%d,%d) net=(%d,%d) trans=%d\n"
+	    "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
+	    "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
+	    "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n",
+	    level, skb->len, headroom, skb_headlen(skb), tailroom,
+	    has_mac ? skb->mac_header : -1,
+	    has_mac ? (skb->network_header - skb->mac_header) : -1,
+	    skb->network_header, has_trans ? skb_network_header_len(skb) : -1,
+	    has_trans ? skb->transport_header : -1, sh->tx_flags, sh->nr_frags,
+	    sh->gso_size, sh->gso_type, sh->gso_segs, skb->csum, skb->ip_summed,
+	    skb->csum_complete_sw, skb->csum_valid, skb->csum_level, skb->hash,
+	    skb->sw_hash, skb->l4_hash, ntohs(skb->protocol), skb->pkt_type,
+	    skb->skb_iif);
+
+	if (dev)
+		dbg("%sdev name=%s feat=0x%pNF\n", level, dev->name,
+		    &dev->features);
+
+	seg_len = min_t(int, skb_headlen(skb), len);
+	if (seg_len)
+		print_hex_dump(level, "skb linear:   ", DUMP_PREFIX_OFFSET, 16,
+			       1, skb->data, seg_len, false);
+	len -= seg_len;
+
+	for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		u32 p_len;
+		struct page *p;
+		u8 *vaddr;
+
+		p = skb_frag_address(frag);
+		p_len = skb_frag_size(frag);
+		seg_len = min_t(int, p_len, len);
+		vaddr = kmap_atomic(p);
+		print_hex_dump(level, "skb frag:     ", DUMP_PREFIX_OFFSET, 16,
+			       1, vaddr, seg_len, false);
+		kunmap_atomic(vaddr);
+		len -= seg_len;
+		if (!len)
+			break;
+	}
+
+	if (full_pkt && skb_has_frag_list(skb)) {
+		dbg("skb fraglist:\n");
+		skb_walk_frags(skb, list_skb) _rnpgbe_skb_dump(list_skb, true);
+	}
+}
+#endif /* NO_SKB_DUMP */
+
+enum RNP_LOG_EVT {
+	LOG_MBX_IN,
+	LOG_MBX_OUT,
+	LOG_MBX_MSG_IN,
+	LOG_MBX_MSG_OUT,
+	LOG_LINK_EVENT,
+	LOG_ADPT_STAT,
+	LOG_MBX_ABLI,
+	LOG_MBX_LINK_STAT,
+	LOG_MBX_IFUP_DOWN,
+	LOG_MBX_LOCK,
+	LOG_ETHTOOL,
+	LOG_PHY,
+
+};
+
+#define MII_BUSY 0x00000001
+#define MII_WRITE 0x00000002
+#define MII_DATA_MASK GENMASK(15, 0)
+
+extern unsigned int rnpgbe_loglevel;
+
+#define rnpgbe_logd(evt, fmt, args...)                                         \
+	do {                                                                   \
+		if (BIT(evt) & rnpgbe_loglevel) {                              \
+			printk(KERN_DEBUG fmt, ##args);                        \
+		}                                                              \
+	} while (0)
+
+#endif /* _RNPGBE_COMMON_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_debugfs.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_debugfs.c
new file mode 100755
index 0000000000000000000000000000000000000000..4e5eb8b8b76391b405b6cc1010b93c65c9935fb0
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_debugfs.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+
+#include "rnpgbe.h"
+
+#ifdef HAVE_RNP_DEBUG_FS
+static struct dentry *rnpgbe_dbg_root;
+static char rnpgbe_dbg_reg_ops_buf[256] = "";
+
+/**
+ * rnpgbe_dbg_reg_ops_read - read for reg_ops datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t rnpgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
+				       size_t count, loff_t *ppos)
+{
+	struct rnpgbe_adapter *adapter = filp->private_data;
+	char *buf;
+	int len;
+
+	/* don't allow partial reads */
+	if (*ppos != 0)
+		return 0;
+
+	buf = kasprintf(GFP_KERNEL, "%s: %s\n", adapter->name,
+			rnpgbe_dbg_reg_ops_buf);
+	if (!buf)
+		return -ENOMEM;
+
+	if (count < strlen(buf)) {
+		kfree(buf);
+		return -ENOSPC;
+	}
+
+	len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+
+	kfree(buf);
+	return len;
+}
+
+/**
+ * rnpgbe_dbg_reg_ops_write - write into reg_ops datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t rnpgbe_dbg_reg_ops_write(struct file *filp,
+					const char __user *buffer, size_t count,
+					loff_t *ppos)
+{
+	struct rnpgbe_adapter *adapter = filp->private_data;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int len;
+
+	/* don't allow partial writes */
+	if (*ppos != 0)
+		return 0;
+	if (count >= sizeof(rnpgbe_dbg_reg_ops_buf))
+		return -ENOSPC;
+
+	len = simple_write_to_buffer(rnpgbe_dbg_reg_ops_buf,
+				     sizeof(rnpgbe_dbg_reg_ops_buf) - 1, ppos,
+				     buffer, count);
+	if (len < 0)
+		return len;
+
+	rnpgbe_dbg_reg_ops_buf[len] = '\0';
+
+	if (strncmp(rnpgbe_dbg_reg_ops_buf, "write", 5) == 0) {
+		u32 reg, value;
+		int cnt;
+
+		cnt = sscanf(&rnpgbe_dbg_reg_ops_buf[5], "%x %x", ®, &value);
+		if (cnt == 2) {
+			if (reg >= 0x30000000) {
+				rnpgbe_mbx_reg_write(hw, reg, value);
+				e_dev_info("write: 0x%08x = 0x%08x\n", reg,
+					   value);
+			} else {
+				rnpgbe_wr_reg(hw->hw_addr + reg, value);
+				value = rnpgbe_rd_reg(hw->hw_addr + reg);
+				e_dev_info("write: 0x%08x = 0x%08x\n", reg,
+					   value);
+			}
+		} else {
+			e_dev_info("write  \n");
+		}
+	} else if (strncmp(rnpgbe_dbg_reg_ops_buf, "read", 4) == 0) {
+		u32 reg, value;
+		int cnt;
+
+		cnt = sscanf(&rnpgbe_dbg_reg_ops_buf[4], "%x", ®);
+		if (cnt == 1) {
+			if (reg >= 0x30000000)
+				value = rnpgbe_mbx_fw_reg_read(hw, reg);
+			else
+				value = rnpgbe_rd_reg(hw->hw_addr + reg);
+
+			snprintf(rnpgbe_dbg_reg_ops_buf,
+				 sizeof(rnpgbe_dbg_reg_ops_buf),
+				 "0x%08x: 0x%08x", reg, value);
+			e_dev_info("read 0x%08x = 0x%08x\n", reg, value);
+		} else {
+			e_dev_info("read \n");
+		}
+	} else {
+		e_dev_info("Unknown command %s\n", rnpgbe_dbg_reg_ops_buf);
+		e_dev_info("Available commands:\n");
+		e_dev_info("   read \n");
+		e_dev_info("   write  \n");
+	}
+	return count;
+}
+
+static const struct file_operations rnpgbe_dbg_reg_ops_fops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = rnpgbe_dbg_reg_ops_read,
+	.write = rnpgbe_dbg_reg_ops_write,
+};
+
+static char rnpgbe_dbg_netdev_ops_buf[256] = "";
+
+/**
+ * rnpgbe_dbg_netdev_ops_read - read for netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t rnpgbe_dbg_netdev_ops_read(struct file *filp,
+					  char __user *buffer, size_t count,
+					  loff_t *ppos)
+{
+	struct rnpgbe_adapter *adapter = filp->private_data;
+	char *buf;
+	int len;
+
+	/* don't allow partial reads */
+	if (*ppos != 0)
+		return 0;
+
+	buf = kasprintf(GFP_KERNEL, "%s: %s\n", adapter->name,
+			rnpgbe_dbg_netdev_ops_buf);
+	if (!buf)
+		return -ENOMEM;
+
+	if (count < strlen(buf)) {
+		kfree(buf);
+		return -ENOSPC;
+	}
+
+	len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+
+	kfree(buf);
+	return len;
+}
+
+/**
+ * rnpgbe_dbg_netdev_ops_write - write into netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t rnpgbe_dbg_netdev_ops_write(struct file *filp,
+					   const char __user *buffer,
+					   size_t count, loff_t *ppos)
+{
+	struct rnpgbe_adapter *adapter = filp->private_data;
+	int len;
+
+	/* don't allow partial writes */
+	if (*ppos != 0)
+		return 0;
+	if (count >= sizeof(rnpgbe_dbg_netdev_ops_buf))
+		return -ENOSPC;
+
+	len = simple_write_to_buffer(rnpgbe_dbg_netdev_ops_buf,
+				     sizeof(rnpgbe_dbg_netdev_ops_buf) - 1,
+				     ppos, buffer, count);
+	if (len < 0)
+		return len;
+
+	rnpgbe_dbg_netdev_ops_buf[len] = '\0';
+
+	if (strncmp(rnpgbe_dbg_netdev_ops_buf, "stat", 4) == 0) {
+		rnpgbe_info("adapter->stat=0x%lx\n", adapter->state);
+		rnpgbe_info("adapter->tx_timeout_count=%d\n",
+			    adapter->tx_timeout_count);
+	} else if (strncmp(rnpgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
+#ifdef HAVE_NET_DEVICE_OPS
+#ifdef HAVE_TX_TIMEOUT_TXQUEUE
+		adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev,
+							    UINT_MAX);
+#else
+		adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);
+#endif
+#else
+		adapter->netdev->tx_timeout(adapter->netdev);
+#endif
+		e_dev_info("tx_timeout called\n");
+	} else {
+		e_dev_info("Unknown command: %s\n", rnpgbe_dbg_netdev_ops_buf);
+		e_dev_info("Available commands:\n");
+		e_dev_info("    tx_timeout\n");
+	}
+	return count;
+}
+
+static const struct file_operations rnpgbe_dbg_netdev_ops_fops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = rnpgbe_dbg_netdev_ops_read,
+	.write = rnpgbe_dbg_netdev_ops_write,
+};
+
+/**
+ * rnpgbe_dbg_netdev_temp_read - read temperature from hw
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t rnpgbe_dbg_netdev_temp_read(struct file *filp,
+					   char __user *buffer, size_t count,
+					   loff_t *ppos)
+{
+	struct rnpgbe_adapter *adapter = filp->private_data;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	char *buf;
+	int len;
+	int temp = 0, voltage = 0;
+
+	/* don't allow partial reads */
+	if (*ppos != 0)
+		return 0;
+
+	temp = rnpgbe_mbx_get_temp(hw, &voltage);
+
+	buf = kasprintf(GFP_KERNEL, "%s: temp: %d oC voltage:%d mV\n",
+			adapter->name, temp, voltage);
+	if (!buf)
+		return -ENOMEM;
+
+	if (count < strlen(buf)) {
+		kfree(buf);
+		return -ENOSPC;
+	}
+
+	len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+
+	kfree(buf);
+	return len;
+}
+static const struct file_operations rnpgbe_dbg_netdev_temp = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = rnpgbe_dbg_netdev_temp_read,
+};
+
+/**
+ * rnpgbe_dbg_adapter_init - setup the debugfs directory for the adapter
+ * @adapter: the adapter that is starting up
+ **/
+void rnpgbe_dbg_adapter_init(struct rnpgbe_adapter *adapter)
+{
+	const char *name = adapter->name;
+	struct dentry *pfile;
+
+	adapter->rnpgbe_dbg_adapter = debugfs_create_dir(name, rnpgbe_dbg_root);
+	if (adapter->rnpgbe_dbg_adapter) {
+		pfile = debugfs_create_file("reg_ops", 0600,
+					    adapter->rnpgbe_dbg_adapter,
+					    adapter, &rnpgbe_dbg_reg_ops_fops);
+		if (!pfile)
+			e_dev_err("debugfs reg_ops for %s failed\n", name);
+		pfile = debugfs_create_file("netdev_ops", 0600,
+					    adapter->rnpgbe_dbg_adapter,
+					    adapter,
+					    &rnpgbe_dbg_netdev_ops_fops);
+		if (!pfile)
+			e_dev_err("debugfs netdev_ops for %s failed\n", name);
+
+		pfile = debugfs_create_file("temp", 0600,
+					    adapter->rnpgbe_dbg_adapter,
+					    adapter, &rnpgbe_dbg_netdev_temp);
+		if (!pfile)
+			e_dev_err("debugfs temp for %s failed\n", name);
+	} else {
+		e_dev_err("debugfs entry for %s failed\n", name);
+	}
+}
+
+/**
+ * rnpgbe_dbg_adapter_exit - clear out the adapter's debugfs entries
+ * @adapter: the adapter that is starting up
+ **/
+void rnpgbe_dbg_adapter_exit(struct rnpgbe_adapter *adapter)
+{
+	debugfs_remove_recursive(adapter->rnpgbe_dbg_adapter);
+	adapter->rnpgbe_dbg_adapter = NULL;
+}
+
+/**
+ * rnpgbe_dbg_init - start up debugfs for the driver
+ **/
+void rnpgbe_dbg_init(void)
+{
+	rnpgbe_dbg_root = debugfs_create_dir(rnpgbe_driver_name, NULL);
+	if (rnpgbe_dbg_root == NULL)
+		pr_err("init of debugfs failed\n");
+}
+
+/**
+ * rnpgbe_dbg_exit - clean out the driver's debugfs entries
+ **/
+void rnpgbe_dbg_exit(void)
+{
+	debugfs_remove_recursive(rnpgbe_dbg_root);
+}
+#endif
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ethtool.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ethtool.c
new file mode 100755
index 0000000000000000000000000000000000000000..738bc0d712114c5baed3716d33f9fe0b178d726a
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ethtool.c
@@ -0,0 +1,2548 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "rnpgbe.h"
+#include "rnpgbe_phy.h"
+#include "rnpgbe_sriov.h"
+#include "rnpgbe_mbx_fw.h"
+#include "rnpgbe_ethtool.h"
+#ifdef ETHTOOL_GEEE
+#include 
+#endif
+
+#define CLOST_SELF_TEST
+#ifndef CLOST_SELF_TEST
+#ifdef ETHTOOL_TEST
+static const char rnpgbe_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Register test  (offline)", "Eeprom test    (offline)",
+	"Interrupt test (offline)", "Loopback test  (offline)",
+	"Link test   (on/offline)"
+};
+
+#define RNP_TEST_LEN (sizeof(rnpgbe_gstrings_test) / ETH_GSTRING_LEN)
+#else
+#define RNP_TEST_LEN 0
+#endif
+#else
+#define RNP_TEST_LEN 0
+#endif
+
+int rnpgbe_wol_exclusion(struct rnpgbe_adapter *adapter,
+			 struct ethtool_wolinfo *wol)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int retval = 0;
+
+	if (!hw->wol_en) {
+		retval = 1;
+		wol->supported = 0;
+	}
+
+	/* WOL not supported for all devices */
+	if (!rnpgbe_wol_supported(adapter, hw->device_id)) {
+		retval = 1;
+		wol->supported = 0;
+	}
+
+	return retval;
+}
+
+void rnpgbe_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	// we only support magic wol
+	wol->supported = hw->wol_supported;
+	wol->wolopts = 0;
+
+	/* we now can't wol */
+	if (rnpgbe_wol_exclusion(adapter, wol) ||
+	    !device_can_wakeup(&adapter->pdev->dev))
+		return;
+
+	if (adapter->wol & RNP_WUFC_EX)
+		wol->wolopts |= WAKE_UCAST;
+	if (adapter->wol & RNP_WUFC_MC)
+		wol->wolopts |= WAKE_MCAST;
+	if (adapter->wol & RNP_WUFC_BC)
+		wol->wolopts |= WAKE_BCAST;
+	if (adapter->wol & RNP_WUFC_MAG)
+		wol->wolopts |= WAKE_MAGIC;
+}
+
+int rnpgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int ret = 0;
+	u32 new_wol = 0;
+
+	if (wol->wolopts & (~hw->wol_supported))
+		return -EOPNOTSUPP;
+
+	if (wol->wolopts & WAKE_UCAST)
+		new_wol |= RNP_WUFC_EX;
+	if (wol->wolopts & WAKE_MCAST)
+		new_wol |= RNP_WUFC_MC;
+	if (wol->wolopts & WAKE_BCAST)
+		new_wol |= RNP_WUFC_BC;
+	if (wol->wolopts & WAKE_MAGIC)
+		new_wol |= RNP_WUFC_MAG;
+
+	ret = rnpgbe_mbx_wol_set(hw, new_wol);
+	if (ret != 0)
+		return -EOPNOTSUPP;
+
+	adapter->wol = new_wol;
+	// setup mbx
+	device_set_wakeup_enable(&adapter->pdev->dev, !!adapter->wol);
+
+	return 0;
+}
+
+/* ethtool register test data */
+struct rnpgbe_reg_test {
+	u16 reg;
+	u8 array_len;
+	u8 test_type;
+	u32 mask;
+	u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x40 bytes apart, or in contiguous tables.  We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define WRITE_NO_TEST 3
+#define TABLE32_TEST 4
+#define TABLE64_TEST_LO 5
+#define TABLE64_TEST_HI 6
+
+/* default n10 register test */
+static struct rnpgbe_reg_test reg_test_n10[] = {
+	//{RNP_DMA_CONFIG, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF},
+	/*
+	 * { RNP_FCRTL_n10(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+	 * { RNP_FCRTH_n10(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+	 * { RNP_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	 * { RNP_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
+	 * { RNP_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+	 * { RNP_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	 * { RNP_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+	 * { RNP_RXDCTL(0), 4, WRITE_NO_TEST, 0, RNPGBE_RXDCTL_ENABLE },
+	 * { RNP_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	 * { RNP_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
+	 * { RNP_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+	 * { RNP_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	 * { RNP_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	 * { RNP_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	 * { RNP_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+	 * { RNP_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
+	 * { RNP_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+	 * { RNP_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
+	 * { RNP_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	 */
+	{ .reg = 0 },
+};
+
+/* write and read check */
+static bool reg_pattern_test(struct rnpgbe_adapter *adapter, u64 *data, int reg,
+			     u32 mask, u32 write)
+{
+	u32 pat, val, before;
+	static const u32 test_pattern[] = { 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000,
+					    0xFFFFFFFF };
+
+	for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
+		before = readl(adapter->hw.hw_addr + reg);
+		printk(KERN_DEBUG "before reg %x is %x\n", reg, before);
+		writel((test_pattern[pat] & write),
+		       (adapter->hw.hw_addr + reg));
+		val = readl(adapter->hw.hw_addr + reg);
+		if (val != (test_pattern[pat] & write & mask)) {
+			e_err(drv,
+			      "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
+			      reg, val, (test_pattern[pat] & write & mask));
+			*data = reg;
+			writel(before, adapter->hw.hw_addr + reg);
+			return 1;
+		}
+		writel(before, adapter->hw.hw_addr + reg);
+	}
+	return 0;
+}
+
+static bool reg_set_and_check(struct rnpgbe_adapter *adapter, u64 *data,
+			      int reg, u32 mask, u32 write)
+{
+	u32 val, before;
+
+	before = readl(adapter->hw.hw_addr + reg);
+	writel((write & mask), (adapter->hw.hw_addr + reg));
+	val = readl(adapter->hw.hw_addr + reg);
+	if ((write & mask) != (val & mask)) {
+		e_err(drv,
+		      "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+		      reg, (val & mask), (write & mask));
+		*data = reg;
+		writel(before, (adapter->hw.hw_addr + reg));
+		return 1;
+	}
+	writel(before, (adapter->hw.hw_addr + reg));
+	return 0;
+}
+
+static bool rnpgbe_reg_test(struct rnpgbe_adapter *adapter, u64 *data)
+{
+	struct rnpgbe_reg_test *test;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 i;
+
+	if (RNP_REMOVED(hw->hw_addr)) {
+		e_err(drv, "Adapter removed - register test blocked\n");
+		*data = 1;
+		return true;
+	}
+
+	test = reg_test_n10;
+	/*
+	 * Perform the remainder of the register test, looping through
+	 * the test table until we either fail or reach the null entry.
+	 */
+	while (test->reg) {
+		for (i = 0; i < test->array_len; i++) {
+			bool b = false;
+
+			switch (test->test_type) {
+			case PATTERN_TEST:
+				b = reg_pattern_test(adapter, data,
+						     test->reg + (i * 0x40),
+						     test->mask, test->write);
+				break;
+			case SET_READ_TEST:
+				b = reg_set_and_check(adapter, data,
+						      test->reg + (i * 0x40),
+						      test->mask, test->write);
+				break;
+			case WRITE_NO_TEST:
+				wr32(hw, test->reg + (i * 0x40), test->write);
+				break;
+			case TABLE32_TEST:
+				b = reg_pattern_test(adapter, data,
+						     test->reg + (i * 4),
+						     test->mask, test->write);
+				break;
+			case TABLE64_TEST_LO:
+				b = reg_pattern_test(adapter, data,
+						     test->reg + (i * 8),
+						     test->mask, test->write);
+				break;
+			case TABLE64_TEST_HI:
+				b = reg_pattern_test(adapter, data,
+						     (test->reg + 4) + (i * 8),
+						     test->mask, test->write);
+				break;
+			}
+			if (b)
+				return true;
+		}
+		test++;
+	}
+
+	*data = 0;
+	return false;
+}
+
+static int rnpgbe_link_test(struct rnpgbe_adapter *adapter, u64 *data)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	bool link_up;
+	u32 link_speed = 0;
+	bool duplex;
+	*data = 0;
+
+	hw->ops.check_link(hw, &link_speed, &link_up, &duplex, true);
+	if (!link_up)
+		*data = 1;
+	return *data;
+}
+
+void rnpgbe_diag_test(struct net_device *netdev, struct ethtool_test *eth_test,
+		      u64 *data)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	bool if_running = netif_running(netdev);
+
+	set_bit(__RNP_TESTING, &adapter->state);
+	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+		if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+			int i;
+
+			for (i = 0; i < adapter->num_vfs; i++) {
+				if (adapter->vfinfo[i].clear_to_send) {
+					netdev_warn(
+						netdev, "%s",
+						"offline diagnostic is not supported when VFs "
+						"are present, just show pass\n");
+					data[0] = 0;
+					data[1] = 0;
+					data[2] = 0;
+					data[3] = 0;
+					if (rnpgbe_link_test(adapter, &data[4]))
+						eth_test->flags |= ETH_TEST_FL_FAILED;
+					//eth_test->flags |= ETH_TEST_FL_FAILED;
+					clear_bit(__RNP_TESTING,
+						  &adapter->state);
+					goto skip_ol_tests;
+				}
+			}
+		}
+
+		/* Offline tests */
+		e_info(hw, "offline testing starting\n");
+
+		//if (if_running)
+		//	rnpgbe_close(netdev);
+
+		/* bringing adapter down disables SFP+ optics */
+		if (hw->ops.enable_tx_laser)
+			hw->ops.enable_tx_laser(hw);
+
+		/* Link test performed before hardware reset so autoneg doesn't
+		 * interfere with test result
+		 */
+		if (rnpgbe_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		//rnpgbe_reset(adapter);
+		e_info(hw, "register testing starting\n");
+		if (rnpgbe_reg_test(adapter, &data[0]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		data[1] = 0;
+		data[2] = 0;
+		/*
+		 * rnpgbe_reset(adapter);
+		 * e_info(hw, "eeprom testing starting\n");
+		 * if (rnpgbe_eeprom_test(adapter, &data[1]))
+		 * eth_test->flags |= ETH_TEST_FL_FAILED;
+		 * rnpgbe_reset(adapter);
+		 * e_info(hw, "interrupt testing starting\n");
+		 * if (rnpgbe_intr_test(adapter, &data[2]))
+		 * eth_test->flags |= ETH_TEST_FL_FAILED;
+		 */
+		/* If SRIOV or VMDq is enabled then skip MAC
+		 * loopback diagnostic.
+		 */
+		if (adapter->flags &
+		    (RNP_FLAG_SRIOV_ENABLED | RNP_FLAG_VMDQ_ENABLED)) {
+			e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
+			data[3] = 0;
+			goto skip_loopback;
+		}
+
+		data[3] = 0;
+		/* loopback test is not added now */
+		/*
+		 * rnpgbe_reset(adapter);
+		 * e_info(hw, "loopback testing starting\n");
+		 * todo Loopback test
+		 * if (rnpgbe_loopback_test(adapter, &data[3]))
+		 * eth_test->flags |= ETH_TEST_FL_FAILED;
+		 */
+	skip_loopback:
+		/* clear testing bit and return adapter to previous state */
+		clear_bit(__RNP_TESTING, &adapter->state);
+	} else {
+		e_info(hw, "online testing starting\n");
+
+		/* if adapter is down, SFP+ optics will be disabled */
+		if (!if_running && hw->ops.enable_tx_laser)
+			hw->ops.enable_tx_laser(hw);
+
+		/* Online tests */
+		if (rnpgbe_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		/* Offline tests aren't run; pass by default */
+		data[0] = 0;
+		data[1] = 0;
+		data[2] = 0;
+		data[3] = 0;
+
+		clear_bit(__RNP_TESTING, &adapter->state);
+	}
+
+	/* if adapter was down, ensure SFP+ optics are disabled again */
+	if (!if_running && hw->ops.disable_tx_laser)
+		hw->ops.disable_tx_laser(hw);
+skip_ol_tests:
+	msleep_interruptible(4 * 1000);
+}
+
+u32 rnpgbe_get_msglevel(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	return adapter->msg_enable;
+}
+
+void rnpgbe_set_msglevel(struct net_device *netdev, u32 data)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	adapter->msg_enable = data;
+}
+
+int rnpgbe_set_phys_id(struct net_device *netdev,
+		       enum ethtool_phys_id_state state)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	switch (state) {
+	case ETHTOOL_ID_ACTIVE:
+		rnpgbe_mbx_led_set(hw, 1);
+		return 2;
+
+	case ETHTOOL_ID_ON:
+		rnpgbe_mbx_led_set(hw, 2);
+		break;
+
+	case ETHTOOL_ID_OFF:
+		rnpgbe_mbx_led_set(hw, 3);
+		break;
+
+	case ETHTOOL_ID_INACTIVE:
+		rnpgbe_mbx_led_set(hw, 0);
+		break;
+	}
+	return 0;
+}
+
+int rnpgbe_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+
+	/*For we juse set it as pf0 */
+	if (!(adapter->flags2 & RNP_FLAG2_PTP_ENABLED))
+		return ethtool_op_get_ts_info(dev, info);
+
+#ifdef HAVE_PTP_1588_CLOCK
+	if (adapter->ptp_clock)
+		info->phc_index = ptp_clock_index(adapter->ptp_clock);
+	else
+		info->phc_index = -1;
+#endif
+	dbg("phc_index is %d\n", info->phc_index);
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE |
+		SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_TX_SOFTWARE |
+		SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE;
+
+	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+			   BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+			   BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+#ifdef PTP_802_AS1
+			   /* 802.AS1 */
+			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+#endif
+			   BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+			   BIT(HWTSTAMP_FILTER_ALL);
+
+	return 0;
+}
+
+static unsigned int rnpgbe_max_channels(struct rnpgbe_adapter *adapter)
+{
+	unsigned int max_combined;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+		/* SR-IOV currently only allows 2 queue on the PF */
+		max_combined = hw->sriov_ring_limit;
+	} else if (adapter->flags & RNP_FLAG_DCB_ENABLED) {
+		/* dcb on max support 32 */
+		max_combined = 32;
+	} else {
+		/* support up to 16 queues with RSS */
+		max_combined = adapter->max_ring_pair_counts;
+		/* should not large than q_vectors ? */
+	}
+#ifdef RNP_MAX_RINGS
+	if (max_combined > RNP_MAX_RINGS)
+		max_combined = RNP_MAX_RINGS;
+#endif
+
+	return max_combined;
+}
+
+#ifdef ETHTOOL_GEEE
+int rnpgbe_get_keee(struct net_device *netdev, struct ethtool_keee *edata)
+{
+
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	if (!(hw->feature_flags & RNP_HW_FEATURE_EEE))
+		return -EOPNOTSUPP;
+
+	if (!(hw->eee_capability))
+		return -EOPNOTSUPP;
+
+	edata->supported_u32 = 0;
+
+	if (hw->eee_capability & EEE_1000BT)
+		edata->supported_u32 |= SUPPORTED_1000baseT_Full;
+	if (hw->eee_capability & EEE_100BT)
+		edata->supported_u32 |= SUPPORTED_100baseT_Full;
+
+	if (adapter->eee_enabled)
+		edata->eee_enabled = true;
+
+	edata->lp_advertised_u32 =
+		mmd_eee_adv_to_ethtool_adv_t(adapter->partner_eee);
+	edata->advertised_u32 = mmd_eee_adv_to_ethtool_adv_t(adapter->local_eee);
+
+	// @eee_active: Result of the eee auto negotiation.
+	if ((adapter->eee_enabled) &&
+	    (adapter->local_eee & adapter->partner_eee))
+		edata->eee_active = true;
+	// @tx_lpi_enabled: Whether the interface should assert its tx lpi
+	// todo
+	edata->tx_lpi_enabled = adapter->tx_path_in_lpi_mode;
+	edata->tx_lpi_timer = adapter->tx_lpi_timer;
+
+	// if in half duplex fixme
+	if (!hw->duplex) {
+		edata->eee_enabled = false;
+		edata->eee_active = false;
+		edata->tx_lpi_enabled = false;
+		edata->advertised_u32 &= ~edata->advertised_u32;
+	}
+	return 0;
+
+}
+
+#ifndef HAVE_ETHTOOL_KEEE
+int rnpgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+	struct ethtool_keee kedata;
+	int ret;
+
+	eee_to_keee(&kedata, edata);
+	ret = rnpgbe_get_keee(netdev, &kedata);
+	keee_to_eee(edata, &kedata);
+
+	return ret;
+}
+#endif
+#endif
+
+#ifdef ETHTOOL_SEEE
+int rnpgbe_set_keee(struct net_device *netdev, struct ethtool_keee *edata)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct ethtool_keee eee_curr = {0};
+	s32 ret_val;
+
+	if (!(hw->feature_flags & RNP_HW_FEATURE_EEE))
+		return -EOPNOTSUPP;
+
+	memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+
+	ret_val = rnpgbe_get_keee(netdev, &eee_curr);
+
+	if (ret_val)
+		return ret_val;
+
+	if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && (edata->eee_enabled)) {
+		dev_err(pci_dev_to_dev(adapter->pdev),
+			"not supported enable eee with sriov on\n");
+
+		return -EINVAL;
+	}
+
+	if (edata->eee_enabled) {
+		if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
+			dev_err(pci_dev_to_dev(adapter->pdev),
+				"Setting EEE tx-lpi is not supported\n");
+			return -EINVAL;
+		}
+
+		if (!edata->advertised_u32 ||
+		    (edata->advertised_u32 &
+		     ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) {
+			dev_err(pci_dev_to_dev(adapter->pdev),
+				"EEE Advertisement supports 100Base-Tx Full Duplex(0x08) 1000Base-T Full Duplex(0x20) or both(0x28)\n");
+			return -EINVAL;
+		}
+		adapter->local_eee = 0;
+		if (edata->advertised_u32 & ADVERTISE_100_FULL)
+			adapter->local_eee |= EEE_100BT;
+		if (edata->advertised_u32 & SUPPORTED_1000baseT_Full)
+			adapter->local_eee |= EEE_1000BT;
+
+	} else if (!edata->eee_enabled) {
+		// we set local eee to control eee
+		adapter->local_eee = 0;
+		//dev_err(pci_dev_to_dev(adapter->pdev),
+		//		"Setting EEE options is not supported with EEE disabled\n");
+		//return -EINVAL;
+	}
+
+	if (edata->eee_enabled)
+		adapter->eee_enabled = 1;
+	else
+		adapter->eee_enabled = 0;
+
+	adapter->tx_lpi_timer = edata->tx_lpi_timer;
+
+	// setup to the hw
+	// link will down is re-auto
+	if (hw->ops.setup_eee)
+		hw->ops.setup_eee(hw, RNP_DEFAULT_LIT_LS, adapter->tx_lpi_timer,
+				  adapter->local_eee);
+
+	return 0;
+}
+
+#ifndef HAVE_ETHTOOL_KEEE
+int rnpgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+	struct ethtool_keee kedata;
+	int ret;
+
+	eee_to_keee(&kedata, edata);
+	ret = rnpgbe_set_keee(netdev, &kedata);
+	keee_to_eee(edata, &kedata);
+
+	return ret;
+}
+#endif
+#endif
+
+void rnpgbe_get_channels(struct net_device *dev, struct ethtool_channels *ch)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+
+	/* report maximum channels */
+	ch->max_combined = rnpgbe_max_channels(adapter);
+
+	/* report info for other vector */
+	ch->max_other = NON_Q_VECTORS;
+	ch->other_count = NON_Q_VECTORS;
+
+	/* record RSS queues */
+	ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
+
+	/* nothing else to report if RSS is disabled */
+	if (ch->combined_count == 1)
+		return;
+
+	/* we do not support ATR queueing if SR-IOV is enabled */
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED)
+		return;
+
+	/* same thing goes for being DCB enabled */
+	if (netdev_get_num_tc(dev) > 1)
+		return;
+}
+
+int rnpgbe_set_channels(struct net_device *dev, struct ethtool_channels *ch)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+	unsigned int count = ch->combined_count;
+
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED)
+		return -EINVAL;
+
+	// close tcp-sync if reset ring size
+
+	/* verify they are not requesting separate vectors */
+	if (!count || ch->rx_count || ch->tx_count)
+		return -EINVAL;
+
+	/* verify other_count has not changed */
+	if (ch->other_count != NON_Q_VECTORS)
+		return -EINVAL;
+
+	dbg("call set channels %d %d %d\n", count, ch->rx_count, ch->tx_count);
+	dbg("max channels %d\n", rnpgbe_max_channels(adapter));
+	/* verify the number of channels does not exceed hardware limits */
+	if (count > rnpgbe_max_channels(adapter))
+		return -EINVAL;
+
+	/* update feature limits from largest to smallest supported values */
+	adapter->ring_feature[RING_F_FDIR].limit = count;
+
+	/* cap RSS limit at 16 */
+	/*
+	 * if (count > RNP_MAX_RSS_INDICES)
+	 * count = RNP_MAX_RSS_INDICES;
+	 */
+	if (count > adapter->max_ring_pair_counts)
+		count = adapter->max_ring_pair_counts;
+	adapter->ring_feature[RING_F_RSS].limit = count;
+
+	/* use setup TC to update any traffic class queue mapping */
+	return rnpgbe_setup_tc(dev, netdev_get_num_tc(dev));
+}
+
+int rnpgbe_get_module_info(struct net_device *dev,
+			   struct ethtool_modinfo *modinfo)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u8 module_id, diag_supported;
+	int rc;
+
+	rnpgbe_mbx_get_lane_stat(hw);
+
+	if (hw->is_sgmii)
+		return -EIO;
+
+	rc = rnpgbe_mbx_sfp_module_eeprom_info(hw, 0xA0, SFF_MODULE_ID_OFFSET,
+					       1, &module_id);
+	if (rc || module_id == 0xff)
+		return -EIO;
+	rc = rnpgbe_mbx_sfp_module_eeprom_info(
+		hw, 0xA0, SFF_DIAG_SUPPORT_OFFSET, 1, &diag_supported);
+	if (!rc) {
+		switch (module_id) {
+		case SFF_MODULE_ID_SFP:
+			modinfo->type = ETH_MODULE_SFF_8472;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+			if (!diag_supported)
+				modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+			break;
+		case SFF_MODULE_ID_QSFP:
+		case SFF_MODULE_ID_QSFP_PLUS:
+			modinfo->type = ETH_MODULE_SFF_8436;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+			break;
+		case SFF_MODULE_ID_QSFP28:
+			modinfo->type = ETH_MODULE_SFF_8636;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+			break;
+		default:
+			printk(KERN_DEBUG
+			       "%s: module_id:0x%x diag_supported:0x%x\n",
+			       __func__, module_id, diag_supported);
+			rc = -EOPNOTSUPP;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+int rnpgbe_get_module_eeprom(struct net_device *dev,
+			     struct ethtool_eeprom *eeprom, u8 *data)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u16 start = eeprom->offset, length = eeprom->len;
+	int rc = 0;
+
+	rnpgbe_mbx_get_lane_stat(hw);
+
+	if (hw->is_sgmii)
+		return -EIO;
+
+	memset(data, 0, eeprom->len);
+
+	/* Read A0 portion of the EEPROM */
+	if (start < ETH_MODULE_SFF_8436_LEN) {
+		if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
+			length = ETH_MODULE_SFF_8436_LEN - start;
+		rc = rnpgbe_mbx_sfp_module_eeprom_info(hw, 0xA0, start, length,
+						       data);
+		if (rc)
+			return rc;
+		start += length;
+		data += length;
+		length = eeprom->len - length;
+	}
+
+	/* Read A2 portion of the EEPROM */
+	if (length) {
+		start -= ETH_MODULE_SFF_8436_LEN;
+		rc = rnpgbe_mbx_sfp_module_eeprom_info(hw, 0xA2, start, length,
+						       data);
+	}
+
+	return rc;
+}
+#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS
+void rnpgbe_get_ringparam(struct net_device *netdev,
+			  struct ethtool_ringparam *ring,
+			  struct kernel_ethtool_ringparam __always_unused *ker,
+			  struct netlink_ext_ack __always_unused *extack)
+#else
+void rnpgbe_get_ringparam(struct net_device *netdev,
+			  struct ethtool_ringparam *ring)
+#endif /* HAVE_ETHTOOL_EXTENDED_RINGPARAMS */
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	/* all ring share the same status*/
+
+	ring->rx_max_pending = RNP_MAX_RXD;
+	ring->tx_max_pending = RNP_MAX_TXD;
+	ring->rx_mini_max_pending = 0;
+	ring->rx_jumbo_max_pending = 0;
+	ring->rx_pending = adapter->rx_ring_item_count;
+	ring->tx_pending = adapter->tx_ring_item_count;
+	ring->rx_mini_pending = 0;
+	ring->rx_jumbo_pending = 0;
+}
+
+#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS
+int rnpgbe_set_ringparam(struct net_device *netdev,
+			 struct ethtool_ringparam *ring,
+			 struct kernel_ethtool_ringparam __always_unused *ker,
+			 struct netlink_ext_ack __always_unused *extack)
+#else
+int rnpgbe_set_ringparam(struct net_device *netdev,
+			 struct ethtool_ringparam *ring)
+#endif /* HAVE_ETHTOOL_EXTENDED_RINGPARAMS */
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_ring *temp_ring;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int i, err = 0;
+	u32 new_rx_count, new_tx_count;
+
+	/* sriov mode can't change ring param */
+	//if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+	//	return -EINVAL;
+	//}
+
+	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+		return -EINVAL;
+
+	if ((ring->tx_pending < RNP_MIN_TXD) ||
+	    (ring->tx_pending > RNP_MAX_TXD) ||
+	    (ring->rx_pending < RNP_MIN_RXD) ||
+	    (ring->rx_pending > RNP_MAX_RXD)) {
+		netdev_info(
+			netdev,
+			"Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
+			ring->tx_pending, ring->rx_pending, RNP_MIN_TXD,
+			RNP_MAX_TXD);
+		return -EINVAL;
+	}
+
+	new_tx_count = clamp_t(u32, ring->tx_pending, RNP_MIN_TXD, RNP_MAX_TXD);
+	new_tx_count = ALIGN(new_tx_count, RNP_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+	new_rx_count = clamp_t(u32, ring->rx_pending, RNP_MIN_RXD, RNP_MAX_RXD);
+	new_rx_count = ALIGN(new_rx_count, RNP_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+	if ((new_tx_count == adapter->tx_ring_item_count) &&
+	    (new_rx_count == adapter->rx_ring_item_count)) {
+		/* nothing to do */
+		return 0;
+	}
+
+	while (test_and_set_bit(__RNP_RESETTING, &adapter->state))
+		usleep_range(1000, 2000);
+
+	if (!netif_running(adapter->netdev)) {
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			adapter->tx_ring[i]->count = new_tx_count;
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			adapter->rx_ring[i]->count = new_rx_count;
+		adapter->tx_ring_item_count = new_tx_count;
+		adapter->rx_ring_item_count = new_rx_count;
+		goto clear_reset;
+	}
+
+	/* allocate temporary buffer to store rings in */
+	i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
+	temp_ring = vmalloc(i * sizeof(struct rnpgbe_ring));
+	if (!temp_ring) {
+		err = -ENOMEM;
+		goto clear_reset;
+	}
+	memset(temp_ring, 0x00, i * sizeof(struct rnpgbe_ring));
+
+	if (new_rx_count != adapter->rx_ring_item_count) {
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			adapter->rx_ring[i]->reset_count = new_rx_count;
+	}
+
+	/* if now we are in force mode, never need force, if not force it */
+	if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) {
+		hw->ops.set_mac_rx(hw, false);
+		if (hw->ops.driver_status)
+			hw->ops.driver_status(hw, true,
+					      rnpgbe_driver_force_control_phy);
+	}
+
+	rnpgbe_down(adapter);
+	/*
+	 * Setup new Tx resources and free the old Tx resources in that order.
+	 * We can then assign the new resources to the rings via a memcpy.
+	 * The advantage to this approach is that we are guaranteed to still
+	 * have resources even in the case of an allocation failure.
+	 */
+	if (new_tx_count != adapter->tx_ring_item_count) {
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			memcpy(&temp_ring[i], adapter->tx_ring[i],
+			       sizeof(struct rnpgbe_ring));
+
+			temp_ring[i].count = new_tx_count;
+			err = rnpgbe_setup_tx_resources(&temp_ring[i], adapter);
+			if (err) {
+				while (i) {
+					i--;
+					rnpgbe_free_tx_resources(&temp_ring[i]);
+				}
+				goto err_setup;
+			}
+		}
+
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			rnpgbe_free_tx_resources(adapter->tx_ring[i]);
+			memcpy(adapter->tx_ring[i], &temp_ring[i],
+			       sizeof(struct rnpgbe_ring));
+		}
+
+		adapter->tx_ring_item_count = new_tx_count;
+	}
+
+	/* Repeat the process for the Rx rings if needed */
+	if (new_rx_count != adapter->rx_ring_item_count) {
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			memcpy(&temp_ring[i], adapter->rx_ring[i],
+			       sizeof(struct rnpgbe_ring));
+			/* setup ring count */
+			if (!(adapter->rx_ring[i]->ring_flags &
+			      RNP_RING_FLAG_DELAY_SETUP_RX_LEN)) {
+				temp_ring[i].count = new_rx_count;
+			} else {
+				/* setup temp count */
+				temp_ring[i].count = temp_ring[i].temp_count;
+				adapter->rx_ring[i]->reset_count = new_rx_count;
+			}
+			err = rnpgbe_setup_rx_resources(&temp_ring[i], adapter);
+			if (err) {
+				while (i) {
+					i--;
+					rnpgbe_free_rx_resources(&temp_ring[i]);
+				}
+				goto err_setup;
+			}
+		}
+
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			rnpgbe_free_rx_resources(adapter->rx_ring[i]);
+			memcpy(adapter->rx_ring[i], &temp_ring[i],
+			       sizeof(struct rnpgbe_ring));
+		}
+		adapter->rx_ring_item_count = new_rx_count;
+	}
+
+err_setup:
+	rnpgbe_up(adapter);
+	vfree(temp_ring);
+	if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) {
+		if (hw->ops.driver_status)
+			hw->ops.driver_status(hw, false,
+					      rnpgbe_driver_force_control_phy);
+	}
+clear_reset:
+	clear_bit(__RNP_RESETTING, &adapter->state);
+	return err;
+}
+
+int rnpgbe_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	rnpgbe_mbx_get_dump_flags(&adapter->hw);
+
+	dump->flag = adapter->hw.dump.flag;
+	dump->len = adapter->hw.dump.len;
+	dump->version = adapter->hw.dump.version;
+
+	return 0;
+}
+
+int rnpgbe_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
+			 void *buffer)
+{
+	int err;
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	err = rnpgbe_mbx_get_dump(&adapter->hw, dump->flag, buffer, dump->len);
+	if (err)
+		return err;
+
+	dump->flag = adapter->hw.dump.flag;
+	dump->len = adapter->hw.dump.len;
+	dump->version = adapter->hw.dump.version;
+
+	return 0;
+}
+
+int rnpgbe_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
+{
+	//int err;
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	rnpgbe_mbx_set_dump(&adapter->hw, dump->flag);
+
+	return 0;
+}
+
+int rnpgbe_get_coalesce(struct net_device *netdev,
+#ifdef HAVE_ETHTOOL_COALESCE_EXTACK
+			struct ethtool_coalesce *coal,
+			struct kernel_ethtool_coalesce *kernel_coal,
+			struct netlink_ext_ack *extack)
+#else
+			struct ethtool_coalesce *coal)
+#endif
+
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_TX_COALESCE) {
+		// auto is closed
+		coal->use_adaptive_tx_coalesce = 0;
+
+		coal->tx_coalesce_usecs = adapter->tx_usecs;
+		coal->tx_coalesce_usecs_irq = 0;
+		coal->tx_max_coalesced_frames = adapter->tx_frames;
+		coal->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
+
+		coal->tx_coalesce_usecs_low = 0;
+		coal->tx_max_coalesced_frames_low = 0;
+		coal->tx_coalesce_usecs_high = 0;
+		coal->tx_max_coalesced_frames_high = 0;
+	} else {
+		// todo
+		coal->use_adaptive_tx_coalesce = 1;
+		coal->tx_coalesce_usecs = adapter->tx_usecs;
+		coal->tx_coalesce_usecs_irq = 0;
+		coal->tx_max_coalesced_frames = adapter->tx_frames;
+		coal->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
+
+		coal->tx_coalesce_usecs_low = 0;
+		coal->tx_max_coalesced_frames_low = 0;
+		coal->tx_coalesce_usecs_high = 0;
+		coal->tx_max_coalesced_frames_high = 0;
+	}
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_RX_COALESCE) {
+		coal->use_adaptive_rx_coalesce = 0;
+
+		coal->rx_coalesce_usecs_irq = 0;
+		coal->rx_coalesce_usecs = adapter->rx_usecs;
+		coal->rx_max_coalesced_frames = adapter->rx_frames;
+		coal->rx_max_coalesced_frames_irq = adapter->napi_budge;
+
+		coal->rx_coalesce_usecs_low = 0;
+		coal->rx_max_coalesced_frames_low = 0;
+		coal->rx_coalesce_usecs_high = 0;
+		coal->rx_max_coalesced_frames_high = 0;
+	} else {
+		coal->use_adaptive_rx_coalesce = 1;
+		// todo
+
+		coal->rx_coalesce_usecs_irq = 0;
+		coal->rx_coalesce_usecs = adapter->rx_usecs;
+		coal->rx_max_coalesced_frames = adapter->rx_frames;
+		coal->rx_max_coalesced_frames_irq = adapter->napi_budge;
+
+		coal->rx_coalesce_usecs_low = 0;
+		coal->rx_max_coalesced_frames_low = 0;
+		coal->rx_coalesce_usecs_high = 0;
+		coal->rx_max_coalesced_frames_high = 0;
+	}
+
+	/* this is not support */
+	coal->pkt_rate_low = 0;
+	coal->pkt_rate_high = 0;
+	coal->rate_sample_interval = 0;
+
+	return 0;
+}
+
+int rnpgbe_set_coalesce(struct net_device *netdev,
+#ifdef HAVE_ETHTOOL_COALESCE_EXTACK
+			struct ethtool_coalesce *ec,
+			struct kernel_ethtool_coalesce *kernel_coal,
+			struct netlink_ext_ack *extack)
+#else
+			struct ethtool_coalesce *ec)
+#endif
+
+{
+	int reset = 0;
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	u32 value;
+	/* we don't support close tx and rx coalesce */
+	//if (!(ec->use_adaptive_tx_coalesce) || !(ec->use_adaptive_rx_coalesce))
+	//		return -EINVAL;
+	if (!ec->use_adaptive_tx_coalesce)
+		adapter->priv_flags |= RNP_PRIV_FLAG_TX_COALESCE;
+	else
+		adapter->priv_flags &= ~RNP_PRIV_FLAG_TX_COALESCE;
+
+	if (!ec->use_adaptive_rx_coalesce)
+		adapter->priv_flags |= RNP_PRIV_FLAG_RX_COALESCE;
+	else
+		adapter->priv_flags &= ~RNP_PRIV_FLAG_RX_COALESCE;
+
+	// if we close irq, we set rx coal to the lowerst
+
+	//if (ec->tx_max_coalesced_frames_irq) {
+
+	if ((ec->tx_max_coalesced_frames_irq < RNP_MIN_TX_WORK) ||
+	    (ec->tx_max_coalesced_frames_irq > RNP_MAX_TX_WORK))
+		return -EINVAL;
+	/* check coalesce frame irq */
+	value = clamp_t(u32, ec->tx_max_coalesced_frames_irq, RNP_MIN_TX_WORK,
+			RNP_MAX_TX_WORK);
+	value = ALIGN(value, RNP_WORK_ALIGN);
+
+	if (adapter->tx_work_limit != value) {
+		reset = 1;
+		adapter->tx_work_limit = value;
+	}
+	//}
+
+	//if (ec->tx_max_coalesced_frames) {
+	/* check vlaue */
+	if ((ec->tx_max_coalesced_frames < RNP_MIN_TX_FRAME) ||
+	    (ec->tx_max_coalesced_frames > RNP_MAX_TX_FRAME))
+		return -EINVAL;
+
+	value = clamp_t(u32, ec->tx_max_coalesced_frames, RNP_MIN_TX_FRAME,
+			RNP_MAX_TX_FRAME);
+	if (adapter->tx_frames != value) {
+		reset = 1;
+		adapter->tx_frames = value;
+	}
+	//}
+
+	//if (ec->tx_coalesce_usecs) {
+	if ((ec->tx_coalesce_usecs < RNP_MIN_TX_USEC) ||
+	    (ec->tx_coalesce_usecs > RNP_MAX_TX_USEC))
+		return -EINVAL;
+	/* check vlaue */
+	value = clamp_t(u32, ec->tx_coalesce_usecs, RNP_MIN_TX_USEC,
+			RNP_MAX_TX_USEC);
+	if (adapter->tx_usecs != value) {
+		reset = 1;
+		adapter->tx_usecs = value;
+	}
+	//}
+
+	//if (ec->rx_max_coalesced_frames_irq) {
+	if ((ec->rx_max_coalesced_frames_irq < RNP_MIN_RX_WORK) ||
+	    (ec->rx_max_coalesced_frames_irq > RNP_MAX_RX_WORK))
+		return -EINVAL;
+
+	value = clamp_t(u32, ec->rx_max_coalesced_frames_irq, RNP_MIN_RX_WORK,
+			RNP_MAX_RX_WORK);
+	value = ALIGN(value, RNP_WORK_ALIGN);
+
+	if (adapter->napi_budge != value) {
+		reset = 1;
+		adapter->napi_budge = value;
+	}
+	//}
+
+	//if (ec->rx_max_coalesced_frames) {
+	if ((ec->rx_max_coalesced_frames < RNP_MIN_RX_FRAME) ||
+	    (ec->rx_max_coalesced_frames > RNP_MAX_RX_FRAME))
+		return -EINVAL;
+
+	value = clamp_t(u32, ec->rx_max_coalesced_frames, RNP_MIN_RX_FRAME,
+			RNP_MAX_RX_FRAME);
+	if (adapter->rx_frames != value) {
+		reset = 1;
+		adapter->rx_frames = value;
+	}
+	//}
+
+	//if (ec->rx_coalesce_usecs) {
+	if ((ec->rx_coalesce_usecs < RNP_MIN_RX_USEC) ||
+	    (ec->rx_coalesce_usecs > RNP_MAX_RX_USEC))
+		return -EINVAL;
+	/* check vlaue */
+	value = clamp_t(u32, ec->rx_coalesce_usecs, RNP_MIN_RX_USEC,
+			RNP_MAX_RX_USEC);
+
+	if (adapter->rx_usecs != value) {
+		reset = 1;
+		adapter->rx_usecs = value;
+	}
+	//}
+	/* other setup is not supported */
+	if ((ec->pkt_rate_low) || (ec->pkt_rate_high) ||
+	    (ec->rx_coalesce_usecs_low) || (ec->rx_max_coalesced_frames_low) ||
+	    (ec->tx_coalesce_usecs_low) || (ec->tx_max_coalesced_frames_low) ||
+	    (ec->rx_coalesce_usecs_high) ||
+	    (ec->rx_max_coalesced_frames_high) ||
+	    (ec->tx_coalesce_usecs_high) ||
+	    (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval) ||
+	    (ec->tx_coalesce_usecs_irq) || (ec->rx_coalesce_usecs_irq))
+		return -EINVAL;
+
+	if (reset)
+		return rnpgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
+
+	return 0;
+}
+
+#ifndef HAVE_NDO_SET_FEATURES
+u32 rnpgbe_get_rx_csum(struct net_device *netdev)
+{
+	return !!(netdev->features & NETIF_F_RXCSUM);
+}
+
+int rnpgbe_set_rx_csum(struct net_device *netdev, u32 data)
+{
+	//struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	//bool need_reset = false;
+
+	if (data)
+		netdev->features |= NETIF_F_RXCSUM;
+	else
+		netdev->features &= ~NETIF_F_RXCSUM;
+
+	return 0;
+}
+
+int rnpgbe_set_tx_csum(struct net_device *netdev, u32 data)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+#ifdef NETIF_F_IPV6_CSUM
+	u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+#else
+	u32 feature_list = NETIF_F_IP_CSUM;
+#endif
+
+	switch (adapter->hw.hw_type) {
+	case rnpgbe_hw_n500:
+	case rnpgbe_hw_n210:
+	case rnpgbe_hw_n210L:
+#ifdef HAVE_ENCAP_TSO_OFFLOAD
+		if (data)
+			netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+		else
+			netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL;
+		feature_list |= NETIF_F_GSO_UDP_TUNNEL;
+#endif /* HAVE_ENCAP_TSO_OFFLOAD */
+		feature_list |= NETIF_F_SCTP_CSUM;
+		break;
+	default:
+		break;
+	}
+
+	if (data)
+		netdev->features |= feature_list;
+	else
+		netdev->features &= ~feature_list;
+
+	return 0;
+}
+
+#ifdef NETIF_F_TSO
+int rnpgbe_set_tso(struct net_device *netdev, u32 data)
+{
+#ifdef NETIF_F_TSO6
+	u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6;
+#else
+	u32 feature_list = NETIF_F_TSO;
+#endif
+
+	if (data)
+		netdev->features |= feature_list;
+	else
+		netdev->features &= ~feature_list;
+
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+	if (!data) {
+		struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+		struct net_device *v_netdev;
+		int i;
+
+		/* disable TSO on all VLANs if they're present */
+		if (!adapter->vlgrp)
+			goto tso_out;
+
+		for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
+			v_netdev = vlan_group_get_device(adapter->vlgrp, i);
+			if (!v_netdev)
+				continue;
+
+			v_netdev->features &= ~feature_list;
+			vlan_group_set_device(adapter->vlgrp, i, v_netdev);
+		}
+	}
+
+tso_out:
+
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+	return 0;
+}
+#endif
+#endif
+
+#ifdef ETHTOOL_GRXRINGS
+
+static int rnpgbe_get_rss_hash_opts(struct rnpgbe_adapter *adapter,
+				    struct ethtool_rxnfc *cmd)
+{
+	cmd->data = 0;
+
+	/* Report default options for RSS on rnp */
+	switch (cmd->flow_type) {
+	case TCP_V4_FLOW:
+		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+		/* fall through */
+		fallthrough;
+	case UDP_V4_FLOW:
+	case SCTP_V4_FLOW:
+		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+		/* fall through */
+		fallthrough;
+	case AH_ESP_V4_FLOW:
+	case AH_V4_FLOW:
+	case ESP_V4_FLOW:
+	case IPV4_FLOW:
+		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+		break;
+	case TCP_V6_FLOW:
+		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+		/* fall through */
+		fallthrough;
+	case UDP_V6_FLOW:
+	case SCTP_V6_FLOW:
+		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+		/* fall through */
+		fallthrough;
+	case AH_ESP_V6_FLOW:
+	case AH_V6_FLOW:
+	case ESP_V6_FLOW:
+	case IPV6_FLOW:
+		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+__maybe_unused static void dump_fsp(struct ethtool_rx_flow_spec *fsp)
+{
+	int i;
+
+	dbg(" fsp cookie is %llx\n", fsp->ring_cookie);
+	switch (fsp->flow_type & ~FLOW_EXT) {
+	case ETHER_FLOW:
+		for (i = 0; i < ETH_ALEN; i++)
+			dbg("src 0x%02x\n", fsp->h_u.ether_spec.h_source[i]);
+		for (i = 0; i < ETH_ALEN; i++)
+			dbg("dst 0x%02x\n", fsp->h_u.ether_spec.h_dest[i]);
+		for (i = 0; i < ETH_ALEN; i++)
+			dbg("src mask 0x%02x\n",
+			    fsp->m_u.ether_spec.h_source[i]);
+		for (i = 0; i < ETH_ALEN; i++)
+			dbg("dst mask 0x%02x\n", fsp->m_u.ether_spec.h_dest[i]);
+
+		dbg("proto type is %x\n", fsp->h_u.ether_spec.h_proto);
+
+		break;
+
+	default:
+		dbg("flow type is %x\n", fsp->flow_type);
+		dbg("l2 prot is %x\n", fsp->h_u.ether_spec.h_proto);
+		dbg("ip4 src ip is %x\n", fsp->h_u.tcp_ip4_spec.ip4src);
+		dbg("ip4 src ip mask is %x\n", fsp->m_u.tcp_ip4_spec.ip4src);
+
+		dbg("ip4 dst ip is %x\n", fsp->h_u.tcp_ip4_spec.ip4dst);
+		dbg("ip4 dst ip mask is %x\n", fsp->m_u.tcp_ip4_spec.ip4dst);
+
+		dbg("ip4 src port is %x\n", fsp->h_u.tcp_ip4_spec.psrc);
+		dbg("ip4 src port mask is %x\n", fsp->m_u.tcp_ip4_spec.psrc);
+
+		dbg("ip4 dst port is %x\n", fsp->h_u.tcp_ip4_spec.pdst);
+		dbg("ip4 dst port mask is %x\n", fsp->m_u.tcp_ip4_spec.pdst);
+
+		dbg("proto is %x\n", fsp->h_u.usr_ip4_spec.proto);
+		break;
+	}
+}
+
+static int rnpgbe_get_ethtool_fdir_entry(struct rnpgbe_adapter *adapter,
+					 struct ethtool_rxnfc *cmd)
+{
+	struct ethtool_rx_flow_spec *fsp =
+		(struct ethtool_rx_flow_spec *)&cmd->fs;
+	struct hlist_node *node2;
+	struct rnpgbe_fdir_filter *rule = NULL;
+
+	/* report total rule count */
+	cmd->data = adapter->fdir_pballoc;
+
+	hlist_for_each_entry_safe (rule, node2, &adapter->fdir_filter_list,
+				   fdir_node)
+		if (fsp->location <= rule->sw_idx)
+			break;
+
+	if (!rule || fsp->location != rule->sw_idx)
+		return -EINVAL;
+
+	/* fill out the flow spec entry */
+
+	/* set flow type field */
+	switch (rule->filter.formatted.flow_type) {
+	case RNP_ATR_FLOW_TYPE_TCPV4:
+		fsp->flow_type = TCP_V4_FLOW;
+		break;
+	case RNP_ATR_FLOW_TYPE_UDPV4:
+		fsp->flow_type = UDP_V4_FLOW;
+		break;
+	case RNP_ATR_FLOW_TYPE_SCTPV4:
+		fsp->flow_type = SCTP_V4_FLOW;
+		break;
+	case RNP_ATR_FLOW_TYPE_IPV4:
+		fsp->flow_type = IP_USER_FLOW;
+		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+		if (adapter->fdir_mode == fdir_mode_tuple5) {
+			fsp->h_u.usr_ip4_spec.proto =
+				rule->filter.formatted.inner_mac[0];
+			fsp->m_u.usr_ip4_spec.proto = 0xff;
+		} else {
+			fsp->h_u.usr_ip4_spec.proto =
+				rule->filter.formatted.inner_mac[0] &
+				rule->filter.formatted.inner_mac_mask[0];
+			fsp->m_u.usr_ip4_spec.proto =
+				rule->filter.formatted.inner_mac_mask[0];
+		}
+		break;
+	case RNP_ATR_FLOW_TYPE_ETHER:
+		fsp->flow_type = ETHER_FLOW;
+		/* support proto and mask only in this mode */
+		fsp->h_u.ether_spec.h_proto = rule->filter.layer2_formate.proto;
+		fsp->m_u.ether_spec.h_proto = 0xffff;
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (rule->filter.formatted.flow_type != RNP_ATR_FLOW_TYPE_ETHER) {
+		/* not support mask in tuple 5 mode */
+		if (adapter->fdir_mode == fdir_mode_tuple5) {
+			fsp->h_u.tcp_ip4_spec.psrc =
+				rule->filter.formatted.src_port;
+			fsp->h_u.tcp_ip4_spec.pdst =
+				rule->filter.formatted.dst_port;
+			fsp->h_u.tcp_ip4_spec.ip4src =
+				rule->filter.formatted.src_ip[0];
+			fsp->h_u.tcp_ip4_spec.ip4dst =
+				rule->filter.formatted.dst_ip[0];
+			fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
+			fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
+			fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
+			fsp->m_u.tcp_ip4_spec.ip4dst = 0xffffffff;
+		} else {
+			fsp->h_u.tcp_ip4_spec.psrc =
+				rule->filter.formatted.src_port &
+				rule->filter.formatted.src_port_mask;
+			fsp->m_u.tcp_ip4_spec.psrc =
+				rule->filter.formatted.src_port_mask;
+			fsp->h_u.tcp_ip4_spec.pdst =
+				rule->filter.formatted.dst_port &
+				rule->filter.formatted.dst_port_mask;
+			fsp->m_u.tcp_ip4_spec.pdst =
+				rule->filter.formatted.dst_port_mask;
+
+			fsp->h_u.tcp_ip4_spec.ip4src =
+				rule->filter.formatted.src_ip[0] &
+				rule->filter.formatted.src_ip_mask[0];
+			fsp->m_u.tcp_ip4_spec.ip4src =
+				rule->filter.formatted.src_ip_mask[0];
+
+			fsp->h_u.tcp_ip4_spec.ip4dst =
+				rule->filter.formatted.dst_ip[0] &
+				rule->filter.formatted.dst_ip_mask[0];
+			fsp->m_u.tcp_ip4_spec.ip4dst =
+				rule->filter.formatted.dst_ip_mask[0];
+		}
+	}
+	// dump_fsp(fsp);
+
+	/* record action */
+	if (rule->action == RNP_FDIR_DROP_QUEUE)
+		fsp->ring_cookie = RX_CLS_FLOW_DISC;
+	else {
+		if (rule->vf_num != 0) {
+			fsp->ring_cookie =
+				((u64)rule->vf_num << 32) | (rule->action);
+		} else {
+			fsp->ring_cookie = rule->action;
+		}
+	}
+
+	return 0;
+}
+
+static int rnpgbe_get_ethtool_fdir_all(struct rnpgbe_adapter *adapter,
+				       struct ethtool_rxnfc *cmd,
+				       u32 *rule_locs)
+{
+	struct hlist_node *node2;
+	struct rnpgbe_fdir_filter *rule;
+	int cnt = 0;
+
+	/* report total rule count */
+	cmd->data = adapter->fdir_pballoc;
+
+	hlist_for_each_entry_safe (rule, node2, &adapter->fdir_filter_list,
+				   fdir_node) {
+		if (cnt == cmd->rule_cnt)
+			return -EMSGSIZE;
+		rule_locs[cnt] = rule->sw_idx;
+		cnt++;
+	}
+
+	cmd->rule_cnt = cnt;
+
+	return 0;
+}
+
+int rnpgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
+		     void *rule_locs)
+#else
+		     u32 *rule_locs)
+#endif
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+	int ret = -EOPNOTSUPP;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	switch (cmd->cmd) {
+	case ETHTOOL_GRXRINGS:
+		if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+			/* we fix 2 when srio on */
+			cmd->data = hw->sriov_ring_limit;
+		} else {
+			cmd->data = adapter->num_rx_queues;
+		}
+		ret = 0;
+		break;
+	case ETHTOOL_GRXCLSRLCNT:
+		cmd->rule_cnt = adapter->fdir_filter_count;
+		ret = 0;
+		break;
+	case ETHTOOL_GRXCLSRULE:
+		ret = rnpgbe_get_ethtool_fdir_entry(adapter, cmd);
+		break;
+	case ETHTOOL_GRXCLSRLALL:
+		ret = rnpgbe_get_ethtool_fdir_all(adapter, cmd,
+						  (u32 *)rule_locs);
+		break;
+	case ETHTOOL_GRXFH:
+		ret = rnpgbe_get_rss_hash_opts(adapter, cmd);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+#define UDP_RSS_FLAGS                                                          \
+	(RNP_FLAG2_RSS_FIELD_IPV4_UDP | RNP_FLAG2_RSS_FIELD_IPV6_UDP)
+static int rnpgbe_set_rss_hash_opt(struct rnpgbe_adapter *adapter,
+				   struct ethtool_rxnfc *nfc)
+{
+	/*
+	 * RSS does not support anything other than hashing
+	 * to queues on src and dst IPs and ports
+	 */
+	if (nfc->data &
+	    ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
+		return -EINVAL;
+
+	switch (nfc->flow_type) {
+	case TCP_V4_FLOW:
+	case TCP_V6_FLOW:
+	case UDP_V4_FLOW:
+	case UDP_V6_FLOW:
+		if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST) ||
+		    !(nfc->data & RXH_L4_B_0_1) || !(nfc->data & RXH_L4_B_2_3))
+			return -EINVAL;
+		break;
+	case AH_ESP_V4_FLOW:
+	case AH_V4_FLOW:
+	case ESP_V4_FLOW:
+	case SCTP_V4_FLOW:
+	case AH_ESP_V6_FLOW:
+	case AH_V6_FLOW:
+	case ESP_V6_FLOW:
+	case SCTP_V6_FLOW:
+		if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST) ||
+		    (nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3))
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int rnpgbe_flowspec_to_flow_type(struct rnpgbe_adapter *adapter,
+					struct ethtool_rx_flow_spec *fsp,
+					uint8_t *flow_type,
+					struct rnpgbe_fdir_filter *input)
+{
+	int i;
+	int ret = 1;
+	/* not support flow_ext */
+	if (fsp->flow_type & FLOW_EXT)
+		return 0;
+
+	switch (fsp->flow_type & ~FLOW_EXT) {
+	/* todo ipv6 is not considered*/
+	case TCP_V4_FLOW:
+		*flow_type = RNP_ATR_FLOW_TYPE_TCPV4;
+		break;
+	case UDP_V4_FLOW:
+		*flow_type = RNP_ATR_FLOW_TYPE_UDPV4;
+		break;
+	case SCTP_V4_FLOW:
+		*flow_type = RNP_ATR_FLOW_TYPE_SCTPV4;
+		break;
+	case ETHER_FLOW:
+		/* layer 2 flow */
+		*flow_type = RNP_ATR_FLOW_TYPE_ETHER;
+		input->filter.layer2_formate.proto =
+			fsp->h_u.ether_spec.h_proto;
+		break;
+	case IP_USER_FLOW:
+		switch (fsp->h_u.usr_ip4_spec.proto) {
+		case IPPROTO_TCP:
+			*flow_type = RNP_ATR_FLOW_TYPE_TCPV4;
+			break;
+		case IPPROTO_UDP:
+			*flow_type = RNP_ATR_FLOW_TYPE_UDPV4;
+			break;
+		case IPPROTO_SCTP:
+			*flow_type = RNP_ATR_FLOW_TYPE_SCTPV4;
+			break;
+		case 0:
+			/* if only ip4 no src no dst*/
+			if (!(fsp->h_u.tcp_ip4_spec.ip4src) &&
+			    (!(fsp->h_u.tcp_ip4_spec.ip4dst))) {
+				/* if have no l4 proto, use layer2 */
+				*flow_type = RNP_ATR_FLOW_TYPE_ETHER;
+				input->filter.layer2_formate.proto =
+					htons(0x0800);
+			} else {
+				/* may only src or dst input */
+				*flow_type = RNP_ATR_FLOW_TYPE_IPV4;
+			}
+			break;
+		default:
+			/* other unknown l4 proto ip */
+			*flow_type = RNP_ATR_FLOW_TYPE_IPV4;
+		}
+		break;
+	default:
+		return 0;
+	}
+	/* layer2 flow */
+	if (*flow_type == RNP_ATR_FLOW_TYPE_ETHER) {
+		if (adapter->layer2_count < 0) {
+			e_err(drv, "layer2 count full\n");
+			ret = 0;
+		}
+		/* should check dst mac filter */
+		/* should check src dst all zeros */
+		for (i = 0; i < ETH_ALEN; i++) {
+			if (fsp->h_u.ether_spec.h_source[i] != 0)
+				ret = 0;
+
+			if (fsp->h_u.ether_spec.h_dest[i] != 0)
+				ret = 0;
+
+			if (fsp->m_u.ether_spec.h_source[i] != 0)
+				ret = 0;
+
+			if (fsp->m_u.ether_spec.h_dest[i] != 0)
+				ret = 0;
+		}
+		// we not support setup vlan type
+		if (input->filter.layer2_formate.proto == htons(ETH_P_8021Q))
+			ret = 0;
+		if (input->filter.layer2_formate.proto == htons(0x88a8))
+			ret = 0;
+		if (input->filter.layer2_formate.proto == htons(0x9100))
+			ret = 0;
+		if (input->filter.layer2_formate.proto == htons(0x9200))
+			ret = 0;
+
+	} else if (*flow_type == RNP_ATR_FLOW_TYPE_IPV4) {
+		if (adapter->fdir_mode == fdir_mode_tuple5) {
+			if (adapter->tuple_5_count < 0) {
+				e_err(drv, "tuple 5 count full\n");
+				ret = 0;
+			}
+			if ((fsp->h_u.usr_ip4_spec.ip4src != 0) &&
+			    (fsp->m_u.usr_ip4_spec.ip4src != 0xffffffff)) {
+				e_err(drv, "ip src mask error\n");
+				ret = 0;
+			}
+			if ((fsp->h_u.usr_ip4_spec.ip4dst != 0) &&
+			    (fsp->m_u.usr_ip4_spec.ip4dst != 0xffffffff)) {
+				e_err(drv, "ip dst mask error\n");
+				ret = 0;
+			}
+			if ((fsp->h_u.usr_ip4_spec.proto != 0) &&
+			    (fsp->m_u.usr_ip4_spec.proto != 0xff)) {
+				e_err(drv, "ip l4 proto mask error\n");
+				ret = 0;
+			}
+		} else {
+			if (adapter->tuple_5_count < 0) {
+				e_err(drv, "tcam count full\n");
+				ret = 0;
+			}
+			/* tcam mode can support mask */
+		}
+		/* not support l4_4_bytes */
+		if ((fsp->h_u.usr_ip4_spec.l4_4_bytes != 0)) {
+			e_err(drv, "ip l4_4_bytes error\n");
+			ret = 0;
+		}
+	} else {
+		if (adapter->fdir_mode == fdir_mode_tuple5) {
+			/* should check mask all ff */
+			if (adapter->tuple_5_count < 0) {
+				e_err(drv, "tuple 5 count full\n");
+				ret = 0;
+			}
+			if ((fsp->h_u.tcp_ip4_spec.ip4src != 0) &&
+			    (fsp->m_u.tcp_ip4_spec.ip4src != 0xffffffff)) {
+				e_err(drv, "src mask error\n");
+				ret = 0;
+			}
+			if ((fsp->h_u.tcp_ip4_spec.ip4dst != 0) &&
+			    (fsp->m_u.tcp_ip4_spec.ip4dst != 0xffffffff)) {
+				e_err(drv, "dst mask error\n");
+				ret = 0;
+			}
+			if ((fsp->h_u.tcp_ip4_spec.psrc != 0) &&
+			    (fsp->m_u.tcp_ip4_spec.psrc != 0xffff)) {
+				e_err(drv, "src port mask error\n");
+				ret = 0;
+			}
+			if ((fsp->h_u.tcp_ip4_spec.pdst != 0) &&
+			    (fsp->m_u.tcp_ip4_spec.pdst != 0xffff)) {
+				e_err(drv, "src port mask error\n");
+				ret = 0;
+			}
+		} else {
+			if (adapter->tuple_5_count < 0) {
+				e_err(drv, "tcam count full\n");
+				ret = 0;
+			}
+		}
+		/* l4 tos is not supported */
+		if (fsp->h_u.tcp_ip4_spec.tos != 0) {
+			e_err(drv, "tos error\n");
+			ret = 0;
+		}
+	}
+
+	return ret;
+}
+
+/* check if this sw_idx set before */
+__maybe_unused static int
+rnpgbe_check_ethtool_fdir_entry(struct rnpgbe_adapter *adapter, u16 sw_idx,
+				u16 *hw_idx)
+{
+	struct rnpgbe_fdir_filter *rule, *parent;
+	struct hlist_node *node2;
+	int find = 0;
+
+	parent = NULL;
+	rule = NULL;
+	hlist_for_each_entry_safe (rule, node2, &adapter->fdir_filter_list,
+				   fdir_node) {
+		/* hash found, or no matching entry */
+		if (rule->sw_idx >= sw_idx)
+			break;
+
+		parent = rule;
+	}
+	/* if there is an old rule occupying our place remove it */
+	if (rule && (rule->sw_idx == sw_idx)) {
+		*hw_idx = rule->hw_idx;
+		find = 1;
+	}
+
+	return find;
+}
+
+int rnpgbe_update_ethtool_fdir_entry(struct rnpgbe_adapter *adapter,
+				     struct rnpgbe_fdir_filter *input,
+				     u16 sw_idx)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct hlist_node *node2;
+	struct rnpgbe_fdir_filter *rule, *parent;
+	bool deleted = false;
+	u16 hw_idx_layer2 = 0;
+	u16 hw_idx_tuple5 = 0;
+
+	s32 err;
+
+	parent = NULL;
+	rule = NULL;
+
+	hlist_for_each_entry_safe (rule, node2, &adapter->fdir_filter_list,
+				   fdir_node) {
+		/* hash found, or no matching entry */
+		if (rule->sw_idx >= sw_idx)
+			break;
+
+		parent = rule;
+	}
+
+	/* if there is an old rule occupying our place remove it */
+	if (rule && (rule->sw_idx == sw_idx)) {
+		/* only clear hw enable bits */
+		/* hardware filters are only configured when interface is up,
+		 * and we should not issue filter commands while the interface
+		 * is down
+		 */
+		if (netif_running(adapter->netdev) && (!input)) {
+			err = rnpgbe_fdir_erase_perfect_filter(
+				adapter->fdir_mode, hw, &rule->filter,
+				rule->hw_idx);
+			if (err)
+				return -EINVAL;
+		}
+
+		adapter->fdir_filter_count--;
+		if (rule->filter.formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER)
+			adapter->layer2_count++;
+		else
+			adapter->tuple_5_count++;
+
+		hlist_del(&rule->fdir_node);
+		kfree(rule);
+		deleted = true;
+	}
+
+	/* If we weren't given an input, then this was a request to delete a
+	 * filter. We should return -EINVAL if the filter wasn't found, but
+	 * return 0 if the rule was successfully deleted.
+	 */
+	if (!input)
+		return deleted ? 0 : -EINVAL;
+
+	/* initialize node and set software index */
+	INIT_HLIST_NODE(&input->fdir_node);
+
+	/* add filter to the list */
+	if (parent)
+		hlist_add_behind(&input->fdir_node, &parent->fdir_node);
+	else
+		hlist_add_head(&input->fdir_node, &adapter->fdir_filter_list);
+
+	/* we must setup all */
+	/* should first earase all tcam and l2 rule */
+
+	if (adapter->fdir_mode != fdir_mode_tcam)
+		hw->ops.clr_all_layer2_remapping(hw);
+	else
+		hw->ops.clr_all_tuple5_remapping(hw);
+
+	/* setup hw */
+	hlist_for_each_entry_safe (rule, node2, &adapter->fdir_filter_list,
+				   fdir_node) {
+		if (netif_running(adapter->netdev)) {
+			/* hw_idx */
+			if (rule->filter.formatted.flow_type ==
+			    RNP_ATR_FLOW_TYPE_ETHER)
+				rule->hw_idx = hw_idx_layer2++;
+			else
+				rule->hw_idx = hw_idx_tuple5++;
+
+			if ((!rule->vf_num) &&
+			    (rule->action != ACTION_TO_MPE)) {
+				int idx = rule->action;
+
+				err = rnpgbe_fdir_write_perfect_filter(
+					adapter->fdir_mode, hw, &rule->filter,
+					rule->hw_idx,
+					(rule->action == RNP_FDIR_DROP_QUEUE) ?
+						RNP_FDIR_DROP_QUEUE :
+						adapter->rx_ring[idx]
+							->rnpgbe_queue_idx,
+					(adapter->priv_flags &
+					 RNP_PRIV_FLAG_REMAP_PRIO) ?
+						true :
+						false);
+			} else {
+				// ACTION_TO_MPE use this
+				err = rnpgbe_fdir_write_perfect_filter(
+					adapter->fdir_mode, hw, &rule->filter,
+					rule->hw_idx,
+					(rule->action == RNP_FDIR_DROP_QUEUE) ?
+						RNP_FDIR_DROP_QUEUE :
+						rule->action,
+					(adapter->priv_flags &
+					 RNP_PRIV_FLAG_REMAP_PRIO) ?
+						true :
+						false);
+			}
+			if (err)
+				return -EINVAL;
+		}
+	}
+
+	/* update counts */
+	adapter->fdir_filter_count++;
+	if (input->filter.formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER) {
+		/* used to determine hw reg offset */
+		adapter->layer2_count--;
+	} else {
+		adapter->tuple_5_count--;
+	}
+	return 0;
+}
+
+/* used to dbg flo_spec info */
+static void print_fsp(struct ethtool_rx_flow_spec *fsp)
+{
+	int i;
+
+	switch (fsp->flow_type & ~FLOW_EXT) {
+	case ETHER_FLOW:
+		for (i = 0; i < ETH_ALEN; i++)
+			dbg("src 0x%02x\n", fsp->h_u.ether_spec.h_source[i]);
+		for (i = 0; i < ETH_ALEN; i++)
+			dbg("dst 0x%02x\n", fsp->h_u.ether_spec.h_dest[i]);
+		for (i = 0; i < ETH_ALEN; i++)
+			dbg("src mask 0x%02x\n",
+			    fsp->m_u.ether_spec.h_source[i]);
+		for (i = 0; i < ETH_ALEN; i++)
+			dbg("dst mask 0x%02x\n", fsp->m_u.ether_spec.h_dest[i]);
+
+		dbg("proto type is %x\n", fsp->h_u.ether_spec.h_proto);
+
+		break;
+
+	default:
+		dbg("flow type is %x\n", fsp->flow_type);
+
+		dbg("ip4 src ip is %x\n", fsp->h_u.tcp_ip4_spec.ip4src);
+		dbg("ip4 src ip mask is %x\n", fsp->m_u.tcp_ip4_spec.ip4src);
+
+		dbg("ip4 dst ip is %x\n", fsp->h_u.tcp_ip4_spec.ip4dst);
+		dbg("ip4 dst ip mask is %x\n", fsp->m_u.tcp_ip4_spec.ip4dst);
+
+		dbg("ip4 src port is %x\n", fsp->h_u.tcp_ip4_spec.psrc);
+		dbg("ip4 src port mask is %x\n", fsp->m_u.tcp_ip4_spec.psrc);
+
+		dbg("ip4 dst port is %x\n", fsp->h_u.tcp_ip4_spec.pdst);
+		dbg("ip4 dst port mask is %x\n", fsp->m_u.tcp_ip4_spec.pdst);
+
+		dbg("l4 proto type is %x\n", fsp->h_u.usr_ip4_spec.proto);
+		break;
+	}
+}
+
+static int rnpgbe_add_ethtool_fdir_entry(struct rnpgbe_adapter *adapter,
+					 struct ethtool_rxnfc *cmd)
+{
+	struct ethtool_rx_flow_spec *fsp =
+		(struct ethtool_rx_flow_spec *)&cmd->fs;
+	struct rnpgbe_fdir_filter *input;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	// int use_old = 0;
+	/* we don't support mask */
+	// union rnpgbe_atr_input mask;
+	int err;
+
+	u32 ring_cookie_high = fsp->ring_cookie >> 32;
+
+	if (!(adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE))
+		return -EOPNOTSUPP;
+
+	/*
+	 * Don't allow programming if the action is a queue greater than
+	 * the number of online Rx queues.
+	 */
+	/* is sriov is on, allow vf and queue */
+	/* vf should smaller than num_vfs */
+	// dump_fsp(fsp);
+	print_fsp(fsp);
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+		if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
+		    (((ring_cookie_high & 0xff) > adapter->num_vfs) ||
+		     ((fsp->ring_cookie & (u64)0xffffffff) >=
+		      hw->sriov_ring_limit)))
+			return -EINVAL;
+
+	} else {
+		if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
+		    (fsp->ring_cookie >= adapter->num_rx_queues)) {
+			// ACTION_TO_MPE to mpe special
+			if (fsp->ring_cookie != ACTION_TO_MPE)
+				return -EINVAL;
+		}
+	}
+
+	/* Don't allow indexes to exist outside of available space */
+	if (fsp->location >= (adapter->fdir_pballoc)) {
+		e_err(drv, "Location out of range\n");
+		return -EINVAL;
+	}
+
+	input = kzalloc(sizeof(*input), GFP_ATOMIC);
+	if (!input)
+		return -ENOMEM;
+
+	// memset(&mask, 0, sizeof(union rnpgbe_atr_input));
+
+	/* set SW index */
+	input->sw_idx = fsp->location;
+
+	/* record flow type */
+	if (!rnpgbe_flowspec_to_flow_type(
+		    adapter, fsp, &input->filter.formatted.flow_type, input)) {
+		e_err(drv, "Unrecognized flow type\n");
+		goto err_out;
+	}
+
+	if (input->filter.formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER) {
+		/* used to determine hw reg offset */
+		// input->hw_idx = adapter->layer2_count;
+	} else if (input->filter.formatted.flow_type ==
+		   RNP_ATR_FLOW_TYPE_IPV4) {
+		/* Copy input into formatted structures */
+		input->filter.formatted.src_ip[0] =
+			fsp->h_u.usr_ip4_spec.ip4src;
+		input->filter.formatted.src_ip_mask[0] =
+			fsp->m_u.usr_ip4_spec.ip4src;
+		input->filter.formatted.dst_ip[0] =
+			fsp->h_u.usr_ip4_spec.ip4dst;
+		input->filter.formatted.dst_ip_mask[0] =
+			fsp->m_u.usr_ip4_spec.ip4dst;
+		input->filter.formatted.src_port = 0;
+		input->filter.formatted.src_port_mask = 0xffff;
+		input->filter.formatted.dst_port = 0;
+		input->filter.formatted.dst_port_mask = 0xffff;
+		input->filter.formatted.inner_mac[0] =
+			fsp->h_u.usr_ip4_spec.proto;
+		input->filter.formatted.inner_mac_mask[0] =
+			fsp->m_u.usr_ip4_spec.proto;
+	} else { /* tcp or udp or sctp*/
+		/* Copy input into formatted structures */
+		input->filter.formatted.src_ip[0] =
+			fsp->h_u.tcp_ip4_spec.ip4src;
+		input->filter.formatted.src_ip_mask[0] =
+			fsp->m_u.usr_ip4_spec.ip4src;
+		input->filter.formatted.dst_ip[0] =
+			fsp->h_u.tcp_ip4_spec.ip4dst;
+		input->filter.formatted.dst_ip_mask[0] =
+			fsp->m_u.usr_ip4_spec.ip4dst;
+		input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
+		input->filter.formatted.src_port_mask =
+			fsp->m_u.tcp_ip4_spec.psrc;
+		input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+		input->filter.formatted.dst_port_mask =
+			fsp->m_u.tcp_ip4_spec.pdst;
+	}
+
+	/* determine if we need to drop or route the packet */
+	if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
+		input->action = RNP_FDIR_DROP_QUEUE;
+	else {
+		input->vf_num = (fsp->ring_cookie >> 32) & 0xff;
+		if (input->vf_num) {
+			/* in vf mode input->action is the real queue nums */
+			input->action =
+				hw->sriov_ring_limit *
+					(((fsp->ring_cookie >> 32) & 0xff) -
+					 1) +
+				(fsp->ring_cookie & 0xffffffff);
+		} else
+			input->action = fsp->ring_cookie;
+	}
+
+	spin_lock(&adapter->fdir_perfect_lock);
+	err = rnpgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
+	spin_unlock(&adapter->fdir_perfect_lock);
+
+	return err;
+err_out:
+	kfree(input);
+	return -EINVAL;
+}
+
+static int rnpgbe_del_ethtool_fdir_entry(struct rnpgbe_adapter *adapter,
+					 struct ethtool_rxnfc *cmd)
+{
+	struct ethtool_rx_flow_spec *fsp =
+		(struct ethtool_rx_flow_spec *)&cmd->fs;
+	int err;
+
+	spin_lock(&adapter->fdir_perfect_lock);
+	err = rnpgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
+	spin_unlock(&adapter->fdir_perfect_lock);
+
+	return err;
+}
+
+int rnpgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+	int ret = -EOPNOTSUPP;
+
+	switch (cmd->cmd) {
+	case ETHTOOL_SRXCLSRLINS:
+		ret = rnpgbe_add_ethtool_fdir_entry(adapter, cmd);
+		break;
+	case ETHTOOL_SRXCLSRLDEL:
+		ret = rnpgbe_del_ethtool_fdir_entry(adapter, cmd);
+		break;
+	case ETHTOOL_SRXFH:
+		ret = rnpgbe_set_rss_hash_opt(adapter, cmd);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+#endif
+#ifdef ETHTOOL_SRXNTUPLE
+/*
+ * We need to keep this around for kernels 2.6.33 - 2.6.39 in order to avoid
+ * a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag
+ * was defined that this function was present.
+ */
+int rnpgbe_set_rx_ntuple(struct net_device __always_unused *dev,
+			 struct ethtool_rx_ntuple __always_unused *cmd)
+{
+	return -EOPNOTSUPP;
+}
+
+#endif
+
+#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)
+
+u32 rnpgbe_rss_indir_size(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	return rnpgbe_rss_indir_tbl_entries(adapter);
+}
+
+u32 rnpgbe_get_rxfh_key_size(struct net_device *netdev)
+{
+	return RNP_RSS_KEY_SIZE;
+}
+
+void rnpgbe_get_reta(struct rnpgbe_adapter *adapter, u32 *indir)
+{
+	int i, reta_size = rnpgbe_rss_indir_tbl_entries(adapter);
+	u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
+
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED)
+		rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
+
+	for (i = 0; i < reta_size; i++)
+		indir[i] = adapter->rss_indir_tbl[i] & rss_m;
+}
+
+#ifdef HAVE_RXFH_HASHFUNC
+#ifdef HAVE_ETHTOOL_RXFH_PARAM
+int rnpgbe_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh) 
+#else /* HAVE_ETHTOOL_RXFH_PARAM */
+int rnpgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
+#endif /* HAVE_ETHTOOL_RXFH_PARAM */
+#else /* HAVE_RXFH_HASHFUNC */
+int rnpgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
+#endif /* HAVE_RXFH_HASHFUNC */
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	u8 *seed = NULL;
+
+#ifdef HAVE_RXFH_HASHFUNC
+#ifdef HAVE_ETHTOOL_RXFH_PARAM
+	if (rxfh->hfunc) {
+		switch (adapter->rss_func_mode) {
+		case rss_func_top:
+			rxfh->hfunc = ETH_RSS_HASH_TOP;
+			break;
+		case rss_func_xor:
+			rxfh->hfunc = ETH_RSS_HASH_XOR;
+			break;
+		case rss_func_order:
+			rxfh->hfunc = ETH_RSS_HASH_TOP;
+			break;
+		}
+	}
+
+#else
+	if (hfunc) {
+		switch (adapter->rss_func_mode) {
+		case rss_func_top:
+			*hfunc = ETH_RSS_HASH_TOP;
+			break;
+		case rss_func_xor:
+			*hfunc = ETH_RSS_HASH_XOR;
+			break;
+		case rss_func_order:
+			*hfunc = ETH_RSS_HASH_TOP;
+			break;
+		}
+	}
+#endif
+#endif
+
+#ifdef HAVE_ETHTOOL_RXFH_PARAM
+	if (rxfh->indir)
+		rnpgbe_get_reta(adapter, rxfh->indir);
+#else
+	if (indir)
+		rnpgbe_get_reta(adapter, indir);
+#endif
+
+#ifdef HAVE_ETHTOOL_RXFH_PARAM
+	if (rxfh->key)
+		seed = rxfh->key;
+#else
+	if (key)
+		seed = key;
+#endif
+	if (seed)
+		memcpy(seed, adapter->rss_key, rnpgbe_get_rxfh_key_size(netdev));
+
+	return 0;
+}
+
+static int check_fw_type(struct rnpgbe_hw *hw, const u8 *data)
+{
+	u32 device_id;
+	int ret = 0;
+
+	device_id = *((u16 *)data + 30); 
+
+	/* if no device_id no check */
+	if ((device_id == 0) || (device_id == 0xffff))
+		return 0;
+
+	switch (hw->hw_type) {
+	case rnpgbe_hw_n500:
+		if (device_id != 0x8308)
+			ret = 1;
+	break;
+	case rnpgbe_hw_n210:
+		if (device_id != 0x8208)
+			ret = 1;
+	break;
+	case rnpgbe_hw_n210L:
+		if (device_id != 0x820a)
+			ret = 1;
+	break;
+	default:
+		ret = 1;
+	}
+
+	return ret;
+}
+
+static int rnpgbe_flash_firmware(struct rnpgbe_adapter *adapter, int region,
+				 const u8 *data, int bytes)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	if ((hw->hw_type == rnpgbe_hw_n500) ||
+	    (hw->hw_type == rnpgbe_hw_n210) ||
+	    (hw->hw_type == rnpgbe_hw_n210L)) {
+		switch (region) {
+		case PART_FW:
+			if ((*((u32 *)(data)) != 0xa55aa55a) || 
+			    check_fw_type(hw, data))
+				return -EINVAL;
+			break;
+		//we only support update fw in one
+		//case PART_CFG:
+		//	if (*((u32 *)(data)) != 0x00010cf9)
+		//		return -EINVAL;
+		//	break;
+		//case PART_MACSN:
+		//	break;
+		//case PART_PCSPHY:
+		//	if (*((u16 *)(data)) != 0x081d)
+		//		return -EINVAL;
+		//	break;
+		//case PART_PXE:
+		//	if (*((u16 *)(data)) != 0xaa55)
+		//		return -EINVAL;
+		//	break;
+		default:
+			return -EINVAL;
+		}
+		return rnp500_fw_update(hw, region, data, bytes);
+
+	} else {
+		switch (region) {
+		case PART_FW:
+			if (*((u32 *)(data + 28)) != 0xA51BBEAF)
+				return -EINVAL;
+			break;
+		case PART_CFG:
+			if (*((u32 *)(data)) != 0x00010cf9)
+				return -EINVAL;
+			break;
+		case PART_MACSN:
+			break;
+		case PART_PCSPHY:
+			if (*((u16 *)(data)) != 0x081d)
+				return -EINVAL;
+			break;
+		case PART_PXE:
+			if (*((u16 *)(data)) != 0xaa55)
+				return -EINVAL;
+			break;
+		default:
+			return -EINVAL;
+		}
+		return rnpgbe_fw_update(hw, region, data, bytes);
+	}
+}
+
+static int rnpgbe_flash_firmware_from_file(struct net_device *dev,
+					   struct rnpgbe_adapter *adapter,
+					   int region, const char *filename)
+{
+	const struct firmware *fw;
+	int rc;
+
+	rc = request_firmware(&fw, filename, &dev->dev);
+	if (rc != 0) {
+		netdev_err(dev, "Error %d requesting firmware file: %s\n", rc,
+			   filename);
+		return rc;
+	}
+
+	rc = rnpgbe_flash_firmware(adapter, region, fw->data, fw->size);
+	release_firmware(fw);
+	return rc;
+}
+
+int rnpgbe_flash_device(struct net_device *dev, struct ethtool_flash *flash)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+
+	if (IS_VF(adapter->hw.pfvfnum)) {
+		netdev_err(dev,
+			   "flashdev not supported from a virtual function\n");
+		return -EINVAL;
+	}
+
+	return rnpgbe_flash_firmware_from_file(dev, adapter, flash->region,
+					       flash->data);
+}
+static int rnpgbe_rss_indir_tbl_max(struct rnpgbe_adapter *adapter)
+{
+	if (adapter->hw.rss_type == rnpgbe_rss_uv3p)
+		return 8;
+	else if (adapter->hw.rss_type == rnpgbe_rss_uv440)
+		return 128;
+	else if (adapter->hw.rss_type == rnpgbe_rss_n10)
+		return 128;
+	else if (adapter->hw.rss_type == rnpgbe_rss_n500)
+		return 128;
+	else
+		return 128;
+}
+
+#ifdef HAVE_RXFH_HASHFUNC
+#ifdef HAVE_ETHTOOL_RXFH_PARAM
+int
+rnpgbe_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
+              struct netlink_ext_ack *extack)
+#else
+int rnpgbe_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
+		    const u8 hfunc)
+#endif
+#else
+#ifdef HAVE_RXFH_NONCONST
+int rnpgbe_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
+#else
+int rnpgbe_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key)
+#endif /* HAVE_RXFH_NONCONST */
+#endif /* HAVE_RXFH_HASHFUNC */
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+#ifdef HAVE_RXFH_HASHFUNC
+	struct rnpgbe_hw *hw = &adapter->hw;
+#endif
+	int i;
+	u32 reta_entries = rnpgbe_rss_indir_tbl_entries(adapter);
+
+#ifdef HAVE_RXFH_HASHFUNC
+#ifdef HAVE_ETHTOOL_RXFH_PARAM
+	if (rxfh->hfunc) {
+		if (hw->ops.set_rss_hfunc) {
+			if (hw->ops.set_rss_hfunc(hw, rxfh->hfunc))
+				return -EINVAL;
+		} else
+			return -EINVAL;
+
+	} else {
+		if (hw->ops.set_rss_hfunc)
+			hw->ops.set_rss_hfunc(hw, rxfh->hfunc);
+	}
+
+#else
+	if (hfunc) {
+		if (hw->ops.set_rss_hfunc) {
+			if (hw->ops.set_rss_hfunc(hw, hfunc))
+				return -EINVAL;
+		} else
+			return -EINVAL;
+
+	} else {
+		if (hw->ops.set_rss_hfunc)
+			hw->ops.set_rss_hfunc(hw, hfunc);
+	}
+#endif
+#endif
+
+#ifdef HAVE_ETHTOOL_RXFH_PARAM
+	if ((rxfh->indir) && (adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return -EINVAL;
+#else
+	if ((indir) && (adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return -EINVAL;
+#endif
+	/* Fill out the redirection table */
+#ifdef HAVE_ETHTOOL_RXFH_PARAM
+	if (rxfh->indir) {
+#else
+	if (indir) {
+#endif
+		int max_queues = min_t(int, adapter->num_rx_queues,
+				       rnpgbe_rss_indir_tbl_max(adapter));
+
+		/*Allow max 2 queues w/ SR-IOV.*/
+		if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) &&
+		    (max_queues > 1))
+			max_queues = 1;
+
+		/* Verify user input. */
+#ifdef HAVE_ETHTOOL_RXFH_PARAM
+		for (i = 0; i < reta_entries; i++) {
+			if (rxfh->indir[i] >= max_queues)
+				return -EINVAL;
+		}
+		/* store rss tbl */
+		for (i = 0; i < reta_entries; i++)
+			adapter->rss_indir_tbl[i] = rxfh->indir[i];
+#else
+		for (i = 0; i < reta_entries; i++) {
+			if (indir[i] >= max_queues)
+				return -EINVAL;
+		}
+		/* store rss tbl */
+		for (i = 0; i < reta_entries; i++)
+			adapter->rss_indir_tbl[i] = indir[i];
+#endif
+
+		rnpgbe_store_reta(adapter);
+	}
+
+#ifdef HAVE_ETHTOOL_RXFH_PARAM
+	/* Fill out the rss hash key */
+	if (rxfh->key) {
+		memcpy(adapter->rss_key, rxfh->key, rnpgbe_get_rxfh_key_size(netdev));
+		rnpgbe_store_key(adapter);
+	}
+#else
+	/* Fill out the rss hash key */
+	if (key) {
+		memcpy(adapter->rss_key, key, rnpgbe_get_rxfh_key_size(netdev));
+		rnpgbe_store_key(adapter);
+	}
+#endif
+	return 0;
+}
+
+#endif
+
+#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+static const struct ethtool_ops_ext rnpgbe_ethtool_ops_ext = {
+	.size = sizeof(struct ethtool_ops_ext),
+	.get_ts_info = rnpgbe_get_ts_info,
+	.set_phys_id = rnpgbe_set_phys_id,
+	.get_eee = rnpgbe_get_eee,
+	.set_eee = rnpgbe_set_eee,
+	.get_channels = rnpgbe_get_channels,
+	.set_channels = rnpgbe_set_channels,
+#ifdef ETHTOOL_GMODULEINFO
+	.get_module_info = rnpgbe_get_module_info,
+	.get_module_eeprom = rnpgbe_get_module_eeprom,
+#endif
+#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)
+	.get_rxfh_indir_size = rnpgbe_rss_indir_size,
+	.get_rxfh_key_size = rnpgbe_get_rxfh_key_size,
+	.get_rxfh = rnpgbe_get_rxfh,
+	.set_rxfh = rnpgbe_set_rxfh,
+#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */
+};
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+
+void rnpgbe_set_ethtool_ops(struct net_device *netdev)
+{
+#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+	set_ethtool_ops_ext(netdev, &rnpgbe_ethtool_ops_ext);
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ethtool.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ethtool.h
new file mode 100755
index 0000000000000000000000000000000000000000..e1216d5b7a970f60eaa14ea0bf75820ba0f6e943
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ethtool.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBE_ETHTOOL_H_
+#define _RNPGBE_ETHTOOL_H_
+
+enum { NETDEV_STATS, RNP_STATS };
+
+struct rnpgbe_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+};
+
+/* rnp allocates num_tx_queues and num_rx_queues symmetrically so
+ * we set the num_rx_queues to evaluate to num_tx_queues. This is
+ * used because we do not have a good way to get the max number of
+ * rx queues with CONFIG_RPS disabled.
+ */
+#ifdef HAVE_TX_MQ
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+#ifdef NO_REAL_QUEUE_NUM
+#define RNP_NUM_RX_QUEUES netdev->num_tx_queues
+#define RNP_NUM_TX_QUEUES netdev->num_tx_queues
+#else /* NO_REAL_QUEUE_NUM */
+#define RNP_NUM_RX_QUEUES netdev->real_num_rx_queues
+#define RNP_NUM_TX_QUEUES netdev->real_num_tx_queues
+#endif /* NO_REAL_QUEUE_NUM */
+#else /* HAVE_NETDEV_SELECT_QUEUE */
+#define RNP_NUM_RX_QUEUES adapter->indices
+#define RNP_NUM_TX_QUEUES adapter->indices
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+#else /* HAVE_TX_MQ */
+#define RNP_NUM_TX_QUEUES 1
+#define RNP_NUM_RX_QUEUES                                                      \
+	(((struct rnpgbe_adapter *)netdev_priv(netdev))->num_rx_queues)
+#endif /* HAVE_TX_MQ */
+
+#define RNP_NETDEV_STAT(_net_stat)                                             \
+	{                                                                      \
+		.stat_string = #_net_stat,                                     \
+		.sizeof_stat =                                                 \
+			sizeof_field(struct net_device_stats, _net_stat),      \
+		.stat_offset = offsetof(struct net_device_stats, _net_stat)    \
+	}
+
+#define RNP_HW_STAT(_name, _stat)                                              \
+	{                                                                      \
+		.stat_string = _name,                                          \
+		.sizeof_stat = sizeof_field(struct rnpgbe_adapter, _stat),     \
+		.stat_offset = offsetof(struct rnpgbe_adapter, _stat)          \
+	}
+
+struct rnpgbe_tx_queue_ring_stat {
+	u64 hw_head;
+	u64 hw_tail;
+	u64 sw_to_clean;
+	u64 sw_to_next_to_use;
+};
+
+struct rnpgbe_rx_queue_ring_stat {
+	u64 hw_head;
+	u64 hw_tail;
+	u64 sw_to_use;
+	u64 sw_to_clean;
+};
+
+#define RNP_QUEUE_STATS_LEN                                                    \
+	(RNP_NUM_TX_QUEUES *                                                   \
+		 (sizeof(struct rnpgbe_tx_queue_stats) / sizeof(u64) +         \
+		  sizeof(struct rnpgbe_queue_stats) / sizeof(u64) +            \
+		  sizeof(struct rnpgbe_tx_queue_ring_stat) / sizeof(u64)) +    \
+	 RNP_NUM_RX_QUEUES *                                                   \
+		 (sizeof(struct rnpgbe_rx_queue_stats) / sizeof(u64) +         \
+		  sizeof(struct rnpgbe_queue_stats) / sizeof(u64) +            \
+		  sizeof(struct rnpgbe_rx_queue_ring_stat) / sizeof(u64)))
+
+#define RNP_STATS_LEN                                                          \
+	(RNP_GLOBAL_STATS_LEN + RNP_HWSTRINGS_STATS_LEN + RNP_QUEUE_STATS_LEN)
+
+int rnpgbe_wol_exclusion(struct rnpgbe_adapter *adapter,
+			 struct ethtool_wolinfo *wol);
+void rnpgbe_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol);
+int rnpgbe_wol_exclusion(struct rnpgbe_adapter *adapter,
+			 struct ethtool_wolinfo *wol);
+int rnpgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol);
+void rnpgbe_diag_test(struct net_device *netdev, struct ethtool_test *eth_test,
+		      u64 *data);
+u32 rnpgbe_get_msglevel(struct net_device *netdev);
+void rnpgbe_set_msglevel(struct net_device *netdev, u32 data);
+int rnpgbe_set_phys_id(struct net_device *netdev,
+		       enum ethtool_phys_id_state state);
+int rnpgbe_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
+void rnpgbe_get_channels(struct net_device *dev, struct ethtool_channels *ch);
+int rnpgbe_set_channels(struct net_device *dev, struct ethtool_channels *ch);
+int rnpgbe_get_module_info(struct net_device *dev,
+			   struct ethtool_modinfo *modinfo);
+int rnpgbe_get_module_eeprom(struct net_device *dev,
+			     struct ethtool_eeprom *eeprom, u8 *data);
+#ifdef ETHTOOL_GEEE
+int rnpgbe_get_keee(struct net_device *netdev, struct ethtool_keee *edata);
+#ifndef HAVE_ETHTOOL_KEEE
+int rnpgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata);
+#endif /* HAVE_ETHTOOL_KEEE */
+#endif /* ETHTOOL_GEEE */
+#ifdef ETHTOOL_SEEE
+int rnpgbe_set_keee(struct net_device *netdev, struct ethtool_keee *edata);
+#ifndef HAVE_ETHTOOL_KEEE
+int rnpgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata);
+#endif /* HAVE_ETHTOOL_KEEE */
+#endif /* ETHTOOL_SEEE */
+#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS
+void rnpgbe_get_ringparam(struct net_device *netdev,
+			  struct ethtool_ringparam *ring,
+			  struct kernel_ethtool_ringparam __always_unused *ker,
+			  struct netlink_ext_ack __always_unused *extack);
+#else /* HAVE_ETHTOOL_EXTENDED_RINGPARAMS */
+void rnpgbe_get_ringparam(struct net_device *netdev,
+			  struct ethtool_ringparam *ring);
+#endif /* HAVE_ETHTOOL_EXTENDED_RINGPARAMS */
+#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS
+int rnpgbe_set_ringparam(struct net_device *netdev,
+			 struct ethtool_ringparam *ring,
+			 struct kernel_ethtool_ringparam __always_unused *ker,
+			 struct netlink_ext_ack __always_unused *extack);
+#else /* HAVE_ETHTOOL_EXTENDED_RINGPARAMS */
+int rnpgbe_set_ringparam(struct net_device *netdev,
+			 struct ethtool_ringparam *ring);
+#endif /* HAVE_ETHTOOL_EXTENDED_RINGPARAMS */
+int rnpgbe_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump);
+int rnpgbe_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
+			 void *buffer);
+int rnpgbe_set_dump(struct net_device *netdev, struct ethtool_dump *dump);
+int rnpgbe_get_coalesce(struct net_device *netdev,
+#ifdef HAVE_ETHTOOL_COALESCE_EXTACK
+			struct ethtool_coalesce *coal,
+			struct kernel_ethtool_coalesce *kernel_coal,
+			struct netlink_ext_ack *extack);
+#else /* HAVE_ETHTOOL_COALESCE_EXTACK */
+			struct ethtool_coalesce *coal);
+#endif /* HAVE_ETHTOOL_COALESCE_EXTACK */
+int rnpgbe_set_coalesce(struct net_device *netdev,
+#ifdef HAVE_ETHTOOL_COALESCE_EXTACK
+			struct ethtool_coalesce *ec,
+			struct kernel_ethtool_coalesce *kernel_coal,
+			struct netlink_ext_ack *extack);
+#else /* HAVE_ETHTOOL_COALESCE_EXTACK */
+			struct ethtool_coalesce *ec);
+#endif /* HAVE_ETHTOOL_COALESCE_EXTACK */
+
+#ifndef HAVE_NDO_SET_FEATURES
+u32 rnpgbe_get_rx_csum(struct net_device *netdev);
+int rnpgbe_set_rx_csum(struct net_device *netdev, u32 data);
+int rnpgbe_set_tx_csum(struct net_device *netdev, u32 data);
+#ifdef NETIF_F_TSO
+int rnpgbe_set_tso(struct net_device *netdev, u32 data);
+#endif /* NETIF_F_TSO */
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef ETHTOOL_GRXRINGS
+int rnpgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
+		     void *rule_locs);
+#else /* HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS */
+		     u32 *rule_locs);
+#endif /* HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS */
+int rnpgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
+#endif /* ETHTOOL_GRXRINGS */
+#ifdef ETHTOOL_SRXNTUPLE
+int rnpgbe_set_rx_ntuple(struct net_device __always_unused *dev,
+			 struct ethtool_rx_ntuple __always_unused *cmd);
+#endif /* ETHTOOL_SRXNTUPLE */
+#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)
+u32 rnpgbe_rss_indir_size(struct net_device *netdev);
+u32 rnpgbe_get_rxfh_key_size(struct net_device *netdev);
+void rnpgbe_get_reta(struct rnpgbe_adapter *adapter, u32 *indir);
+#ifdef HAVE_RXFH_HASHFUNC
+#ifdef HAVE_ETHTOOL_RXFH_PARAM
+int rnpgbe_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh);
+#else /* HAVE_ETHTOOL_RXFH_PARAM */
+int rnpgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
+#endif /* HAVE_ETHTOOL_RXFH_PARAM */
+#else /* HAVE_RXFH_HASHFUNC */
+int rnpgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key);
+#endif /* HAVE_RXFH_HASHFUNC */
+int rnpgbe_flash_device(struct net_device *dev, struct ethtool_flash *flash);
+#ifdef HAVE_RXFH_HASHFUNC
+#ifdef HAVE_ETHTOOL_RXFH_PARAM
+int rnpgbe_set_rxfh(struct net_device *netdev,
+		    struct ethtool_rxfh_param *rxfh,
+		    struct netlink_ext_ack *extack);
+#else /* HAVE_ETHTOOL_RXFH_PARAM */
+int rnpgbe_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
+		    const u8 hfunc);
+#endif /* HAVE_ETHTOOL_RXFH_PARAM */
+#else /* HAVE_RXFH_HASHFUNC */
+#ifdef HAVE_RXFH_NONCONST
+int rnpgbe_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key);
+#else /* HAVE_RXFH_NONCONST */
+int rnpgbe_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key);
+#endif /* HAVE_RXFH_NONCONST */
+#endif /* HAVE_RXFH_HASHFUNC */
+#endif /* defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) */
+#endif /* _RNPGBE_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c
new file mode 100755
index 0000000000000000000000000000000000000000..a83171e4de31ac39773f3cb116733554487664c6
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c
@@ -0,0 +1,1229 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include "rnpgbe.h"
+#include "rnpgbe_sriov.h"
+#include "rnpgbe_common.h"
+
+#ifdef CONFIG_RNP_DCB
+
+/**
+ * rnpgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for SR-IOV to the assigned rings.  It
+ * will also try to cache the proper offsets if RSS/FCoE are enabled along
+ * with VMDq.
+ *
+ **/
+static bool rnpgbe_cache_ring_dcb_sriov(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
+	int i;
+	u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+	/* verify we have DCB queueing enabled before proceeding */
+	if (tcs <= 1)
+		return false;
+
+	/* verify we have VMDq enabled before proceeding */
+	if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return false;
+
+	return true;
+}
+#endif /* CONFIG_RNP_DCB */
+
+/**
+ * rnpgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for DCB to the assigned rings.
+ *
+ **/
+static bool rnpgbe_cache_ring_dcb(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *dev = adapter->netdev;
+	unsigned int tx_idx, rx_idx;
+	int tc, offset, rss_i, i, step;
+	u8 num_tcs = netdev_get_num_tc(dev);
+	struct rnpgbe_ring *ring;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+
+	/* verify we have DCB queueing enabled before proceeding */
+	if (num_tcs <= 1)
+		return false;
+
+	rss_i = adapter->ring_feature[RING_F_RSS].indices;
+
+	step = 4;
+	for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
+		/*
+		 * we from tc start
+		 * tc0 0 4 8 c
+		 * tc1 1 5 9 d
+		 * tc2 2 6 a e
+		 * tc3 3 7 b f
+		 */
+		tx_idx = tc;
+		rx_idx = tc;
+		for (i = 0; i < rss_i; i++, tx_idx += step, rx_idx += step) {
+			ring = adapter->tx_ring[offset + i];
+
+			ring->ring_addr =
+				dma->dma_ring_addr + RING_OFFSET(tx_idx);
+			ring->rnpgbe_queue_idx = tx_idx;
+			ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT;
+			ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK;
+			ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR;
+
+			ring = adapter->rx_ring[offset + i];
+			ring->ring_addr =
+				dma->dma_ring_addr + RING_OFFSET(rx_idx);
+			ring->rnpgbe_queue_idx = rx_idx;
+			ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT;
+			ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK;
+			ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR;
+		}
+	}
+
+	return true;
+}
+
+/**
+ * rnpgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
+ * @adapter: board private structure to initialize
+ *
+ * SR-IOV doesn't use any descriptor rings but changes the default if
+ * no other mapping is used.
+ *
+ */
+static bool rnpgbe_cache_ring_sriov(struct rnpgbe_adapter *adapter)
+{
+	/* only proceed if VMDq is enabled */
+	if (!(adapter->flags & RNP_FLAG_VMDQ_ENABLED))
+		return false;
+	return true;
+}
+
+/**
+ * rnpgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for RSS to the assigned rings.
+ *
+ **/
+static bool rnpgbe_cache_ring_rss(struct rnpgbe_adapter *adapter)
+{
+	int i;
+	/* setup here */
+	int ring_step = 1;
+	struct rnpgbe_ring *ring;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+
+	/* some ring alloc rules can be added here */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		ring = adapter->tx_ring[i];
+		ring->rnpgbe_queue_idx = i * ring_step;
+		ring->ring_addr = dma->dma_ring_addr +
+				  RING_OFFSET(ring->rnpgbe_queue_idx);
+
+		ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT;
+		ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK;
+		ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR;
+	}
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		ring = adapter->rx_ring[i];
+		ring->rnpgbe_queue_idx = i * ring_step;
+		ring->ring_addr = dma->dma_ring_addr +
+				  RING_OFFSET(ring->rnpgbe_queue_idx);
+		ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT;
+		ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK;
+		ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR;
+	}
+
+	return true;
+}
+
+/**
+ * rnpgbe_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+ *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
+ *
+ * Note, the order the various feature calls is important.  It must start with
+ * the "most" features enabled at the same time, then trickle down to the
+ * least amount of features turned on at once.
+ **/
+static void rnpgbe_cache_ring_register(struct rnpgbe_adapter *adapter)
+{
+	/* start with default case */
+#ifdef CONFIG_RNP_DCB
+	if (rnpgbe_cache_ring_dcb_sriov(adapter))
+		return;
+
+#endif /* CONFIG_RNP_DCB */
+	if (rnpgbe_cache_ring_dcb(adapter))
+		return;
+
+	/* sriov ring alloc is added before, this maybe no use */
+	if (rnpgbe_cache_ring_sriov(adapter))
+		return;
+
+	rnpgbe_cache_ring_rss(adapter);
+}
+
+#define RNP_RSS_128Q_MASK 0x7F
+#define RNP_RSS_64Q_MASK 0x3F
+#define RNP_RSS_16Q_MASK 0xF
+#define RNP_RSS_32Q_MASK 0x1F
+#define RNP_RSS_8Q_MASK 0x7
+#define RNP_RSS_4Q_MASK 0x3
+#define RNP_RSS_2Q_MASK 0x1
+#define RNP_RSS_DISABLED_MASK 0x0
+
+#ifdef CONFIG_RNP_DCB
+/**
+ * rnpgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
+ * @adapter: board private structure to initialize
+ *
+ * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
+ * and VM pools where appropriate.  Also assign queues based on DCB
+ * priorities and map accordingly..
+ *
+ **/
+static bool rnpgbe_set_dcb_sriov_queues(struct rnpgbe_adapter *adapter)
+{
+	int i;
+	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
+	u16 vmdq_m = 0;
+	u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+	/* verify we have DCB queueing enabled before proceeding */
+	if (tcs <= 1)
+		return false;
+
+	/* verify we have VMDq enabled before proceeding */
+	if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return false;
+
+	/* Add starting offset to total pool count */
+	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
+
+	/* 16 pools w/ 8 TC per pool */
+	if (tcs > 4) {
+		vmdq_i = min_t(u16, vmdq_i, 16);
+		vmdq_m = RNP_n10_VMDQ_8Q_MASK;
+		/* 32 pools w/ 4 TC per pool */
+	} else {
+		vmdq_i = min_t(u16, vmdq_i, 32);
+		vmdq_m = RNP_n10_VMDQ_4Q_MASK;
+	}
+
+	/* remove the starting offset from the pool count */
+	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
+
+	/* save features for later use */
+	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
+	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
+
+	/*
+	 * We do not support DCB, VMDq, and RSS all simultaneously
+	 * so we will disable RSS since it is the lowest priority
+	 */
+	adapter->ring_feature[RING_F_RSS].indices = 2;
+	adapter->ring_feature[RING_F_RSS].mask = RNP_RSS_DISABLED_MASK;
+
+	/* disable ATR as it is not supported when VMDq is enabled */
+	adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE;
+
+	adapter->num_tx_queues = vmdq_i * tcs;
+	adapter->num_rx_queues = vmdq_i * tcs;
+
+	/* configure TC to queue mapping */
+	for (i = 0; i < tcs; i++)
+		netdev_set_tc_queue(adapter->netdev, i, 1, i);
+
+	return true;
+}
+#endif /* CONFIG_RNP_DCB */
+
+static bool rnpgbe_set_dcb_queues(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *dev = adapter->netdev;
+	struct rnpgbe_ring_feature *f;
+	int rss_i, rss_m, i;
+	int tcs;
+
+	/* Map queue offset and counts onto allocated tx queues */
+	tcs = netdev_get_num_tc(dev);
+
+	/* verify we have DCB queueing enabled before proceeding */
+	if (tcs <= 1)
+		return false;
+
+	/* determine the upper limit for our current DCB mode */
+	rss_i = dev->num_tx_queues / tcs;
+
+	/* we only support 4 tc , rss_i max is 32 */
+	/* 4 TC w/ 32 queues per TC */
+	rss_i = min_t(u16, rss_i, 32);
+	rss_m = RNP_RSS_32Q_MASK;
+
+	/* set RSS mask and indices */
+	/* f->limit is relative with cpu_vector */
+	f = &adapter->ring_feature[RING_F_RSS];
+	/* use f->limit to change rss */
+	rss_i = min_t(int, rss_i, f->limit);
+	f->indices = rss_i;
+	f->mask = rss_m;
+
+	/* disable ATR as it is not supported when multiple TCs are enabled */
+	adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE;
+
+	/* setup queue tc num */
+	for (i = 0; i < tcs; i++)
+		netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
+
+	/* set the true queues */
+	adapter->num_tx_queues = rss_i * tcs;
+	adapter->num_rx_queues = rss_i * tcs;
+
+	return true;
+}
+
+/**
+ * rnpgbe_set_sriov_queues - Allocate queues for SR-IOV devices
+ * @adapter: board private structure to initialize
+ *
+ * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
+ * and VM pools where appropriate.  If RSS is available, then also try and
+ * enable RSS and map accordingly.
+ *
+ **/
+static bool rnpgbe_set_sriov_queues(struct rnpgbe_adapter *adapter)
+{
+	u16 vmdq_m = 0;
+	u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
+	u16 rss_m = RNP_RSS_DISABLED_MASK;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	/* only proceed if SR-IOV is enabled */
+	if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return false;
+
+	/* save features for later use */
+	adapter->ring_feature[RING_F_VMDQ].indices =
+		adapter->max_ring_pair_counts - 1;
+	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
+
+	/* limit RSS based on user input and save for later use */
+	adapter->ring_feature[RING_F_RSS].indices = rss_i;
+	adapter->ring_feature[RING_F_RSS].mask = rss_m;
+
+	adapter->num_rx_queues = hw->sriov_ring_limit;
+	adapter->num_tx_queues = hw->sriov_ring_limit;
+
+	/* disable ATR as it is not supported when VMDq is enabled */
+	adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE;
+
+	return true;
+}
+
+/**
+ * rnpgbe_rss_indir_tbl_entries - return indir_tlb_entries
+ * @adapter: board private structure to initialize
+ *
+ */
+u32 rnpgbe_rss_indir_tbl_entries(struct rnpgbe_adapter *adapter)
+{
+	if (adapter->hw.rss_type == rnpgbe_rss_uv3p)
+		return 8;
+	else if (adapter->hw.rss_type == rnpgbe_rss_uv440)
+		return 128;
+	else if (adapter->hw.rss_type == rnpgbe_rss_n10)
+		return 128;
+	else
+		return 128;
+}
+
+/**
+ * rnpgbe_set_rss_queues - Allocate queues for RSS
+ * @adapter: board private structure to initialize
+ *
+ * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
+ * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
+ *
+ **/
+static bool rnpgbe_set_rss_queues(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_ring_feature *f;
+	u16 rss_i;
+
+	f = &adapter->ring_feature[RING_F_RSS];
+	/* use thid to change ring num */
+	rss_i = f->limit;
+	/* set limit -> indices */
+	f->indices = rss_i;
+
+	/* should init rss mask */
+	switch (adapter->hw.rss_type) {
+	case rnpgbe_rss_uv3p:
+		f->mask = RNP_RSS_8Q_MASK;
+		break;
+	case rnpgbe_rss_uv440:
+		f->mask = RNP_RSS_64Q_MASK;
+		break;
+	case rnpgbe_rss_n10:
+		/* maybe not good */
+		f->mask = RNP_RSS_128Q_MASK;
+		break;
+		/* maybe not good */
+	case rnpgbe_rss_n500:
+		f->mask = RNP_RSS_8Q_MASK;
+		break;
+	default:
+		f->mask = 0;
+
+		break;
+	}
+
+	/* set rss_i -> adapter->num_tx_queues */
+	/* should not more than irq */
+	adapter->num_tx_queues =
+		min_t(int, rss_i, adapter->max_ring_pair_counts);
+	adapter->num_rx_queues = adapter->num_tx_queues;
+
+	rnpgbe_dbg("[%s] limit:%d indices:%d queues:%d\n", adapter->name,
+		   f->limit, f->indices, adapter->num_tx_queues);
+
+	return true;
+}
+
+/**
+ * rnpgbe_set_num_queues - Allocate queues for device, feature dependent
+ * @adapter: board private structure to initialize
+ *
+ * This is the top level queue allocation routine.  The order here is very
+ * important, starting with the "most" number of features turned on at once,
+ * and ending with the smallest set of features.  This way large combinations
+ * can be allocated if they're turned on, and smaller combinations are the
+ * fallthrough conditions.
+ *
+ **/
+static void rnpgbe_set_num_queues(struct rnpgbe_adapter *adapter)
+{
+	/* Start with base case */
+	adapter->num_tx_queues = 1;
+	adapter->num_rx_queues = 1;
+
+#ifdef CONFIG_RNP_DCB
+	if (rnpgbe_set_dcb_sriov_queues(adapter))
+		return;
+
+#endif /* CONFIG_RNP_DCB */
+	if (rnpgbe_set_dcb_queues(adapter))
+		return;
+
+	if (rnpgbe_set_sriov_queues(adapter))
+		return;
+	/* at last we support rss */
+	rnpgbe_set_rss_queues(adapter);
+}
+
+static int rnpgbe_acquire_msix_vectors(struct rnpgbe_adapter *adapter,
+				       int vectors)
+{
+	int err;
+
+#ifdef DISABLE_RX_IRQ
+	vectors -= adapter->num_other_vectors;
+	adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);
+	return 0;
+#endif /* DISABLE_RX_IRQ */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+	err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+				    vectors, vectors);
+#else
+	err = pci_enable_msix(adapter->pdev, adapter->msix_entries, vectors);
+#endif
+	if (err < 0) {
+		rnpgbe_err("pci_enable_msix faild: req:%d err:%d\n", vectors,
+			   err);
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+		return -EINVAL;
+	}
+	/*
+	 * Adjust for only the vectors we'll use, which is minimum
+	 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+	 * vectors we were allocated.
+	 */
+	vectors -= adapter->num_other_vectors;
+	adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);
+	/* in dcb we use max 32 q-vectors */
+	/* each vectors for max 4 tcs */
+	if (adapter->flags & RNP_FLAG_DCB_ENABLED)
+		adapter->num_q_vectors = min(32, adapter->num_q_vectors);
+
+	return 0;
+}
+
+static void rnpgbe_add_ring(struct rnpgbe_ring *ring,
+			    struct rnpgbe_ring_container *head)
+{
+	ring->next = head->ring;
+	head->ring = ring;
+	head->count++;
+}
+
+/**
+ * rnpgbe_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @adapter: board private structure to initialize
+ * @eth_queue_idx: queue_index idx for this q_vector
+ * @v_idx: index of vector used for this q_vector
+ * @r_idx: total number of Tx rings to allocate
+ * @r_count: ring count
+ * @step: ring step
+ *
+ * We allocate one q_vector.  If allocation fails we return -ENOMEM.
+ **/
+static int rnpgbe_alloc_q_vector(struct rnpgbe_adapter *adapter,
+				 int eth_queue_idx, int v_idx, int r_idx,
+				 int r_count, int step)
+{
+	struct rnpgbe_q_vector *q_vector;
+	struct rnpgbe_ring *ring;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	int node = NUMA_NO_NODE;
+	int cpu = -1;
+	int ring_count, size;
+	int txr_count, rxr_count, idx;
+	int rxr_idx = r_idx, txr_idx = r_idx;
+	int cpu_offset = 0;
+#ifdef CPU_OFFSET_TEST
+	struct device *dev = &adapter->pdev->dev;
+	int i;
+	int orig_node = dev_to_node(dev);
+#endif /* CPU_OFFSET_TEST */
+
+	DPRINTK(PROBE, INFO,
+		"eth_queue_idx:%d v_idx:%d(off:%d) ring:%d ring_cnt:%d, "
+		"step:%d\n",
+		eth_queue_idx, v_idx, adapter->q_vector_off, r_idx, r_count,
+		step);
+
+	txr_count = rxr_count = r_count;
+
+	ring_count = txr_count + rxr_count;
+	size = sizeof(struct rnpgbe_q_vector) +
+	       (sizeof(struct rnpgbe_ring) * ring_count);
+
+#ifdef CPU_OFFSET_TEST
+	/* get the first cpu in dev numa */
+	for (i = 0; i < num_online_cpus(); i++) {
+		cpu = i;
+		node = cpu_to_node(cpu);
+		if (node == orig_node) {
+			cpu_offset = cpu;
+			break;
+		}
+	}
+
+	/* should consider larger than cpu number */
+	if (cpu_offset + v_idx - adapter->q_vector_off > num_online_cpus()) {
+		cpu_offset = cpu_offset - num_online_cpus();
+		rnpgbe_dbg("start from zero cpu %d\n", num_online_cpus());
+	}
+#endif /* CPU_OFFSET_TEST */
+	/* should minis adapter->q_vector_off */
+	if (cpu_online(cpu_offset + v_idx - adapter->q_vector_off)) {
+		/* cpu 1 - 7 */
+		cpu = cpu_offset + v_idx - adapter->q_vector_off;
+		node = cpu_to_node(cpu);
+	}
+
+	/* allocate q_vector and rings */
+	q_vector = kzalloc_node(size, GFP_KERNEL, node);
+	if (!q_vector)
+		q_vector = kzalloc(size, GFP_KERNEL);
+	if (!q_vector)
+		return -ENOMEM;
+
+#ifdef HAVE_IRQ_AFFINITY_HINT
+	/* setup affinity mask and node */
+	if (cpu != -1)
+		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
+#endif /* HAVE_IRQ_AFFINITY_HINT */
+	q_vector->numa_node = node;
+
+#ifdef CONFIG_RNP_DCA
+	/* initialize CPU for DCA */
+	q_vector->cpu = -1;
+
+#endif /* CONFIG_RNP_DCA */
+
+#ifdef HAVE_NETIF_NAPI_ADD_WEIGHT
+	netif_napi_add_weight(adapter->netdev, &q_vector->napi, rnpgbe_poll,
+			      adapter->napi_budge);
+#else /* HAVE_NETIF_NAPI_ADD_WEIGHT */
+	/* initialize nap */
+	netif_napi_add(adapter->netdev, &q_vector->napi, rnpgbe_poll);
+#endif /* HAVE_NETIF_NAPI_ADD_WEIGHT */
+	/* tie q_vector and adapter together */
+	adapter->q_vector[v_idx - adapter->q_vector_off] = q_vector;
+	q_vector->adapter = adapter;
+	q_vector->v_idx = v_idx;
+
+	/* initialize work limits */
+	q_vector->tx.work_limit = adapter->tx_work_limit;
+
+	/* initialize pointer to rings */
+	ring = q_vector->ring;
+
+	for (idx = 0; idx < txr_count; idx++) {
+		/* assign generic ring traits */
+		ring->dev = pci_dev_to_dev(adapter->pdev);
+		ring->netdev = adapter->netdev;
+
+		/* configure backlink on ring */
+		ring->q_vector = q_vector;
+
+		/* update q_vector Tx values */
+		rnpgbe_add_ring(ring, &q_vector->tx);
+
+		/* apply Tx specific ring traits */
+		ring->count = adapter->tx_ring_item_count;
+		if (adapter->flags & RNP_FLAG_DCB_ENABLED) {
+			int rss_i;
+
+			rss_i = adapter->ring_feature[RING_F_RSS].indices;
+			/* in dcb mode should assign rss */
+			ring->queue_index = eth_queue_idx + idx * rss_i;
+		} else {
+			ring->queue_index = eth_queue_idx + idx;
+		}
+		/* rnpgbe_queue_idx can be changed after */
+		/* it is used to location hw reg */
+		ring->rnpgbe_queue_idx = txr_idx;
+		ring->ring_addr = dma->dma_ring_addr + RING_OFFSET(txr_idx);
+		ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT;
+		ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK;
+		ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR;
+		ring->device_id = adapter->pdev->device;
+		ring->pfvfnum = hw->pfvfnum;
+		/* not support tunnel */
+		ring->ring_flags |= RNP_RING_NO_TUNNEL_SUPPORT;
+		/* assign ring to adapter */
+		adapter->tx_ring[ring->queue_index] = ring;
+
+		/* update count and index */
+		txr_idx += step;
+
+		rnpgbe_dbg("\t\t%s:vector[%d] <--RNP TxRing:%d, eth_queue:%d\n",
+			   adapter->name, v_idx, ring->rnpgbe_queue_idx,
+			   ring->queue_index);
+
+		/* push pointer to next ring */
+		ring++;
+	}
+
+	for (idx = 0; idx < rxr_count; idx++) {
+		/* assign generic ring traits */
+		ring->dev = pci_dev_to_dev(adapter->pdev);
+		ring->netdev = adapter->netdev;
+
+		/* configure backlink on ring */
+		ring->q_vector = q_vector;
+
+		/* update q_vector Rx values */
+		rnpgbe_add_ring(ring, &q_vector->rx);
+
+		/* apply Rx specific ring traits */
+		ring->count = adapter->rx_ring_item_count;
+		/* rnpgbe_queue_idx can be changed after */
+		/* it is used to location hw reg */
+		if (adapter->flags & RNP_FLAG_DCB_ENABLED) {
+			int rss_i;
+
+			rss_i = adapter->ring_feature[RING_F_RSS].indices;
+			/* in dcb mode should assign rss */
+			ring->queue_index = eth_queue_idx + idx * rss_i;
+		} else {
+			ring->queue_index = eth_queue_idx + idx;
+		}
+		ring->rnpgbe_queue_idx = rxr_idx;
+		ring->ring_addr = dma->dma_ring_addr + RING_OFFSET(rxr_idx);
+		ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT;
+		ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK;
+		ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR;
+		ring->device_id = adapter->pdev->device;
+		ring->pfvfnum = hw->pfvfnum;
+
+		ring->ring_flags |= RNP_RING_NO_TUNNEL_SUPPORT;
+		ring->ring_flags |= RNP_RING_STAGS_SUPPORT;
+
+		/* assign ring to adapter */
+		adapter->rx_ring[ring->queue_index] = ring;
+		rnpgbe_dbg("\t\t%s:vector[%d] <--RNP RxRing:%d, eth_queue:%d\n",
+			   adapter->name, v_idx, ring->rnpgbe_queue_idx,
+			   ring->queue_index);
+
+		/* update count and index */
+		rxr_idx += step;
+
+		/* push pointer to next ring */
+		ring++;
+	}
+
+	q_vector->vector_flags |= RNP_QVECTOR_FLAG_ITR_FEATURE;
+	q_vector->new_rx_count = adapter->rx_frames;
+	q_vector->old_rx_count = adapter->rx_frames;
+	q_vector->itr_rx = adapter->rx_usecs;
+	q_vector->rx.itr = adapter->rx_usecs;
+
+	return 0;
+}
+
+/**
+ * rnpgbe_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void rnpgbe_free_q_vector(struct rnpgbe_adapter *adapter, int v_idx)
+{
+	struct rnpgbe_q_vector *q_vector = adapter->q_vector[v_idx];
+	struct rnpgbe_ring *ring;
+
+	rnpgbe_dbg("v_idx:%d\n", v_idx);
+
+	rnpgbe_for_each_ring(ring, q_vector->tx)
+		adapter->tx_ring[ring->queue_index] = NULL;
+
+	rnpgbe_for_each_ring(ring, q_vector->rx)
+		adapter->rx_ring[ring->queue_index] = NULL;
+
+	adapter->q_vector[v_idx] = NULL;
+	netif_napi_del(&q_vector->napi);
+
+	if (q_vector->vector_flags & RNP_QVECTOR_FLAG_IRQ_MISS_CHECK)
+		hrtimer_cancel(&q_vector->irq_miss_check_timer);
+
+	/*
+	 * rnpgbe_get_stats64() might access the rings on this vector,
+	 * we must wait a grace period before freeing it.
+	 */
+	kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * rnpgbe_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int rnpgbe_alloc_q_vectors(struct rnpgbe_adapter *adapter)
+{
+	int v_idx = adapter->q_vector_off;
+	int ring_idx = 0;
+	int r_remaing =
+		min_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
+	int ring_step = 1;
+	int err, ring_cnt, v_remaing = adapter->num_q_vectors;
+	int q_vector_nums = 0;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+		ring_idx = 0;
+		/* only 2 rings when sriov enabled */
+		/* from back */
+		if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) {
+			// this mode pf use vf 0 ring
+			ring_idx = 0;
+			r_remaing = hw->sriov_ring_limit;
+
+		} else {
+			ring_idx = adapter->max_ring_pair_counts -
+				   ring_step * hw->sriov_ring_limit;
+			r_remaing = hw->sriov_ring_limit;
+		}
+	}
+
+	adapter->eth_queue_idx = 0;
+	BUG_ON(adapter->num_q_vectors == 0);
+
+	if (adapter->flags & RNP_FLAG_DCB_ENABLED) {
+		rnpgbe_dbg("in dcb mode r_remaing %d, num_q_vectors %d\n",
+			   r_remaing, v_remaing);
+	}
+
+	rnpgbe_dbg("r_remaing:%d, ring_step:%d num_q_vectors:%d\n", r_remaing,
+		   ring_step, v_remaing);
+
+	/* can support muti rings in one q_vector */
+	for (; r_remaing > 0 && v_remaing > 0; v_remaing--) {
+		ring_cnt = DIV_ROUND_UP(r_remaing, v_remaing);
+		if (adapter->flags & RNP_FLAG_DCB_ENABLED)
+			BUG_ON(ring_cnt != adapter->num_tc);
+
+		err = rnpgbe_alloc_q_vector(adapter, adapter->eth_queue_idx,
+					    v_idx, ring_idx, ring_cnt,
+					    ring_step);
+		if (err)
+			goto err_out;
+		ring_idx += ring_step * ring_cnt;
+		r_remaing -= ring_cnt;
+		v_idx++;
+		q_vector_nums++;
+		/* dcb mode only add 1 */
+		if (adapter->flags & RNP_FLAG_DCB_ENABLED)
+			adapter->eth_queue_idx += 1;
+		else
+			adapter->eth_queue_idx += ring_cnt;
+	}
+	/* should fix the real used q_vectors_nums */
+	adapter->num_q_vectors = q_vector_nums;
+
+	return 0;
+
+err_out:
+	adapter->num_tx_queues = 0;
+	adapter->num_rx_queues = 0;
+	adapter->num_q_vectors = 0;
+
+	while (v_idx--)
+		rnpgbe_free_q_vector(adapter, v_idx);
+
+	return -ENOMEM;
+}
+
+/**
+ * rnpgbe_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void rnpgbe_free_q_vectors(struct rnpgbe_adapter *adapter)
+{
+	int v_idx = adapter->num_q_vectors;
+
+	adapter->num_rx_queues = 0;
+	adapter->num_tx_queues = 0;
+	adapter->num_q_vectors = 0;
+
+	while (v_idx--)
+		rnpgbe_free_q_vector(adapter, v_idx);
+}
+
+static void rnpgbe_reset_interrupt_capability(struct rnpgbe_adapter *adapter)
+{
+	if (adapter->flags & RNP_FLAG_MSIX_ENABLED)
+		pci_disable_msix(adapter->pdev);
+	else if (adapter->flags & RNP_FLAG_MSI_CAPABLE)
+		pci_disable_msi(adapter->pdev);
+
+	kfree(adapter->msix_entries);
+	adapter->msix_entries = NULL;
+	adapter->q_vector_off = 0;
+
+	/* frist clean msix flags */
+	adapter->flags &= (~RNP_FLAG_MSIX_ENABLED);
+	adapter->flags &= (~RNP_FLAG_MSI_ENABLED);
+}
+
+/**
+ * rnpgbe_set_interrupt_capability - set MSI-X or MSI if supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int rnpgbe_set_interrupt_capability(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int vector, v_budget, err = 0;
+	int irq_mode_back = adapter->irq_mode;
+
+	v_budget = min_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
+
+	/* in one ring mode should reset v_budget */
+#ifdef RNP_MAX_RINGS
+	v_budget = min_t(int, v_budget, RNP_MAX_RINGS);
+#else /* RNP_MAX_RINGS */
+	v_budget = min_t(int, v_budget, num_online_cpus());
+#endif /* RNP_MAX_RINGS */
+	v_budget += adapter->num_other_vectors;
+
+	v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
+
+	if (adapter->irq_mode == irq_mode_msix) {
+		adapter->msix_entries = kcalloc(
+			v_budget, sizeof(struct msix_entry), GFP_KERNEL);
+
+		if (!adapter->msix_entries) {
+			rnpgbe_err("alloc msix_entries faild!\n");
+			return -EINVAL;
+		}
+		dbg("[%s] adapter:%p msix_entry:%p\n", __func__, adapter,
+		    adapter->msix_entries);
+
+		for (vector = 0; vector < v_budget; vector++)
+			adapter->msix_entries[vector].entry = vector;
+
+		err = rnpgbe_acquire_msix_vectors(adapter, v_budget);
+		if (!err) {
+			if (adapter->num_other_vectors)
+				adapter->q_vector_off = 1;
+			rnpgbe_dbg(
+				"adapter%d alloc vectors: cnt:%d [%d~%d] num_q_vectors:%d\n",
+				adapter->bd_number, v_budget,
+				adapter->q_vector_off,
+				adapter->q_vector_off + v_budget - 1,
+				adapter->num_q_vectors);
+			adapter->flags |= RNP_FLAG_MSIX_ENABLED;
+
+			goto out;
+		}
+		/* if has msi capability try it */
+		if (adapter->flags & RNP_FLAG_MSI_CAPABLE)
+			adapter->irq_mode = irq_mode_msi;
+		kfree(adapter->msix_entries);
+		pr_info("acquire msix failed, try to use msi\n");
+	} else {
+		rnpgbe_dbg("adapter%d not in msix mode\n", adapter->bd_number);
+	}
+	/* if has msi capability or set irq_mode */
+	if (adapter->irq_mode == irq_mode_msi) {
+		err = pci_enable_msi(adapter->pdev);
+		if (err) {
+			pr_info("Failed to allocate MSI interrupt, falling back to legacy. Error");
+		} else {
+			/* msi mode use only 1 irq */
+			adapter->flags |= RNP_FLAG_MSI_ENABLED;
+		}
+	}
+	/* write back origin irq_mode */
+	adapter->irq_mode = irq_mode_back;
+	/* legacy and msi only 1 vectors */
+	adapter->num_q_vectors = 1;
+	err = 0;
+
+out:
+	return err;
+}
+
+static void rnpgbe_print_ring_info(struct rnpgbe_adapter *adapter)
+{
+	int i;
+	struct rnpgbe_ring *ring;
+	struct rnpgbe_q_vector *q_vector;
+
+	rnpgbe_dbg("tx_queue count %d\n", adapter->num_tx_queues);
+	rnpgbe_dbg("queue-mapping :\n");
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		ring = adapter->tx_ring[i];
+		rnpgbe_dbg(" queue %d , physical ring %d\n", i,
+			   ring->rnpgbe_queue_idx);
+	}
+	rnpgbe_dbg("rx_queue count %d\n", adapter->num_rx_queues);
+	rnpgbe_dbg("queue-mapping :\n");
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		ring = adapter->rx_ring[i];
+		rnpgbe_dbg(" queue %d , physical ring %d\n", i,
+			   ring->rnpgbe_queue_idx);
+	}
+	rnpgbe_dbg("q_vector count %d\n", adapter->num_q_vectors);
+	rnpgbe_dbg("vector-queue mapping:\n");
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		q_vector = adapter->q_vector[i];
+		rnpgbe_dbg("vector %d\n", i);
+		rnpgbe_for_each_ring(ring, q_vector->tx) {
+			rnpgbe_dbg(" tx physical ring %d\n",
+				   ring->rnpgbe_queue_idx);
+		}
+		rnpgbe_for_each_ring(ring, q_vector->rx) {
+			rnpgbe_dbg(" rx physical ring %d\n",
+				   ring->rnpgbe_queue_idx);
+		}
+	}
+}
+
+static void update_ring_count(struct rnpgbe_adapter *adapter)
+{
+	if (adapter->flags2 & RNP_FLAG2_INSMOD)
+		return;
+
+	adapter->flags2 |= RNP_FLAG2_INSMOD;
+
+	/* limit ring count if in msi or legacy mode */
+	if (!(adapter->flags & RNP_FLAG_MSIX_ENABLED)) {
+		adapter->num_tx_queues = 1;
+		adapter->num_rx_queues = 1;
+		adapter->ring_feature[RING_F_RSS].limit = 1;
+		adapter->ring_feature[RING_F_FDIR].limit = 1;
+		adapter->ring_feature[RING_F_RSS].indices = 1;
+	}
+}
+
+/**
+ * rnpgbe_init_interrupt_scheme - Determine proper interrupt scheme
+ * @adapter: board private structure to initialize
+ *
+ * We determine which interrupt scheme to use based on...
+ * - Hardware queue count (num_*_queues)
+ *   - defined by miscellaneous hardware support/features (RSS, etc.)
+ **/
+int rnpgbe_init_interrupt_scheme(struct rnpgbe_adapter *adapter)
+{
+	int err;
+
+	/* Number of supported queues */
+	rnpgbe_set_num_queues(adapter);
+
+	/* Set interrupt mode */
+	err = rnpgbe_set_interrupt_capability(adapter);
+	if (err) {
+		e_dev_err("Unable to get interrupt\n");
+		goto err_set_interrupt;
+	}
+	/* update ring num only init */
+	update_ring_count(adapter);
+
+	err = rnpgbe_alloc_q_vectors(adapter);
+	if (err) {
+		e_dev_err("Unable to allocate memory for queue vectors\n");
+		goto err_alloc_q_vectors;
+	}
+	rnpgbe_cache_ring_register(adapter);
+
+	DPRINTK(PROBE, INFO,
+		"Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n\n",
+		(adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
+		adapter->num_rx_queues, adapter->num_tx_queues);
+	rnpgbe_print_ring_info(adapter);
+
+	set_bit(__RNP_DOWN, &adapter->state);
+
+	return 0;
+
+err_alloc_q_vectors:
+	rnpgbe_reset_interrupt_capability(adapter);
+err_set_interrupt:;
+	return err;
+}
+
+/**
+ * rnpgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @adapter: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+void rnpgbe_clear_interrupt_scheme(struct rnpgbe_adapter *adapter)
+{
+	adapter->num_tx_queues = 0;
+	adapter->num_rx_queues = 0;
+
+	rnpgbe_free_q_vectors(adapter);
+	rnpgbe_reset_interrupt_capability(adapter);
+}
+
+/**
+ * rnpgbe_tx_ctxtdesc - Send a control desc to hw
+ * @tx_ring: target ring of this control desc
+ * @mss_seg_len: mss length
+ * @l4_hdr_len:  l4 length
+ * @tunnel_hdr_len: tunnel_hdr_len
+ * @inner_vlan_tag: inner_vlan_tag
+ * @type_tucmd: cmd
+ *
+ **/
+static void rnpgbe_tx_ctxtdesc(struct rnpgbe_ring *tx_ring,
+			       u32 mss_len_vf_num,
+			       u32 inner_vlan_tunnel_len,
+			       int ignore_vlan,
+			       bool crc_pad)
+{
+	struct rnpgbe_tx_ctx_desc *context_desc;
+	u16 i = tx_ring->next_to_use;
+	struct rnpgbe_adapter *adapter = RING2ADAPT(tx_ring);
+	u32 type_tucmd = 0;
+
+	context_desc = RNP_TX_CTXTDESC(tx_ring, i);
+
+	i++;
+	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+	/* set bits to identify this as an advanced context descriptor */
+	type_tucmd |= RNP_TXD_CTX_CTRL_DESC;
+
+	/* set mac padding status if set priv_flags */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) {
+		if (!crc_pad)
+			type_tucmd |= RNP_TXD_MTI_CRC_PAD_CTRL;
+	}
+
+#define VLAN_MASK (0x0000ffff)
+#define VLAN_INSERT (0x00800000)
+	if (inner_vlan_tunnel_len & VLAN_MASK)
+		type_tucmd |= VLAN_INSERT;
+
+	context_desc->mss_len_vf_num = cpu_to_le32(mss_len_vf_num);
+	context_desc->inner_vlan_tunnel_len =
+		cpu_to_le32(inner_vlan_tunnel_len);
+	context_desc->resv_cmd = cpu_to_le32(type_tucmd);
+	context_desc->resv = 0;
+	if (tx_ring->q_vector->adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+		if (ignore_vlan)
+			context_desc->inner_vlan_tunnel_len |=
+				VF_VEB_IGNORE_VLAN;
+	}
+	buf_dump_line("ctx  ", __LINE__, context_desc, sizeof(*context_desc));
+}
+
+void rnpgbe_maybe_tx_ctxtdesc(struct rnpgbe_ring *tx_ring,
+			      struct rnpgbe_tx_buffer *first, u32 ignore_vlan)
+{
+	/* sriov mode pf use the last vf */
+	if (first->ctx_flag) {
+		rnpgbe_tx_ctxtdesc(tx_ring, first->mss_len_vf_num,
+				   first->inner_vlan_tunnel_len, ignore_vlan,
+				   first->gso_need_padding);
+	}
+}
+
+void rnpgbe_store_reta(struct rnpgbe_adapter *adapter)
+{
+	u32 i, reta_entries = rnpgbe_rss_indir_tbl_entries(adapter);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 reta = 0;
+	/* relative with rss table */
+	struct rnpgbe_ring *rx_ring;
+
+	/* Write redirection table to HW */
+	for (i = 0; i < reta_entries; i++) {
+		if (adapter->flags & RNP_FLAG_SRIOV_ENABLED)
+			reta = adapter->rss_indir_tbl[i];
+		else {
+			rx_ring = adapter->rx_ring[adapter->rss_indir_tbl[i]];
+			reta = rx_ring->rnpgbe_queue_idx;
+		}
+		hw->rss_indir_tbl[i] = reta;
+	}
+	hw->ops.set_rss_table(hw);
+}
+
+void rnpgbe_store_key(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED);
+
+	hw->ops.set_rss_key(hw, sriov_flag);
+}
+
+int rnpgbe_init_rss_key(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED);
+
+	/* only init rss key once */
+	/* no change rss key if user input one */
+	if (!adapter->rss_key_setup_flag) {
+		netdev_rss_key_fill(adapter->rss_key, RNP_RSS_KEY_SIZE);
+		adapter->rss_key_setup_flag = 1;
+	}
+	hw->ops.set_rss_key(hw, sriov_flag);
+
+	return 0;
+}
+
+int rnpgbe_init_rss_table(struct rnpgbe_adapter *adapter)
+{
+	int rx_nums = adapter->num_rx_queues;
+	int i, j;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_ring *rx_ring;
+	u32 reta = 0;
+	u32 reta_entries = rnpgbe_rss_indir_tbl_entries(adapter);
+
+	if (adapter->flags & RNP_FLAG_DCB_ENABLED) {
+		rx_nums = rx_nums / adapter->num_tc;
+		for (i = 0, j = 0; i < 8; i++) {
+			adapter->rss_tc_tbl[i] = j;
+			hw->rss_tc_tbl[i] = j;
+			j = (j + 1) % adapter->num_tc;
+		}
+	} else {
+		for (i = 0, j = 0; i < 8; i++) {
+			hw->rss_tc_tbl[i] = 0;
+			adapter->rss_tc_tbl[i] = 0;
+		}
+	}
+
+	/* adapter->num_q_vectors is not correct */
+	for (i = 0, j = 0; i < reta_entries; i++) {
+		/* init with default value */
+		if (!adapter->rss_tbl_setup_flag)
+			adapter->rss_indir_tbl[i] = j;
+
+		if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+			/* in sriov mode reta in [0, rx_nums] */
+			reta = j;
+		} else {
+			/* in no sriov, reta is real ring number */
+			rx_ring = adapter->rx_ring[adapter->rss_indir_tbl[i]];
+			reta = rx_ring->rnpgbe_queue_idx;
+		}
+		/* store rss_indir_tbl */
+		hw->rss_indir_tbl[i] = reta;
+
+		j = (j + 1) % rx_nums;
+	}
+	/* tbl only init once */
+	adapter->rss_tbl_setup_flag = 1;
+	hw->ops.set_rss_table(hw);
+	return 0;
+}
+
+/* setup to the hw  */
+s32 rnpgbe_fdir_write_perfect_filter(int fdir_mode, struct rnpgbe_hw *hw,
+				     union rnpgbe_atr_input *filter, u16 hw_id,
+				     u8 queue, bool prio_flag)
+{
+	if (filter->formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER)
+		hw->ops.set_layer2_remapping(hw, filter, hw_id, queue,
+					     prio_flag);
+	else
+		hw->ops.set_tuple5_remapping(hw, filter, hw_id, queue,
+					     prio_flag);
+
+	return 0;
+}
+
+s32 rnpgbe_fdir_erase_perfect_filter(int fdir_mode, struct rnpgbe_hw *hw,
+				     union rnpgbe_atr_input *input, u16 pri_id)
+{
+	/* just diable filter */
+	if (input->formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER) {
+		hw->ops.clr_layer2_remapping(hw, pri_id);
+		dbg("disble layer2 %d\n", pri_id);
+	} else {
+		hw->ops.clr_tuple5_remapping(hw, pri_id);
+		dbg("disble tuple5 %d\n", pri_id);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
new file mode 100755
index 0000000000000000000000000000000000000000..3bd253dea2aa5589aad01b576b299082781b8ff3
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
@@ -0,0 +1,8988 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#ifdef NETIF_F_HW_TC
+#include 
+#include 
+#include 
+#endif
+
+#include "rnpgbe_common.h"
+#include "rnpgbe.h"
+#include "rnpgbe_sriov.h"
+#include "rnpgbe_ptp.h"
+#include "rnpgbe_ethtool.h"
+
+#ifdef HAVE_XDP_SOCK_DRV
+#include 
+#endif
+
+#ifdef HAVE_UDP_ENC_RX_OFFLOAD
+#include 
+#include 
+#endif /* HAVE_UDP_ENC_RX_OFFLOAD */
+#ifdef HAVE_VXLAN_RX_OFFLOAD
+#include 
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+
+/* for test only */
+#ifdef CONFIG_ARM64
+#define NO_BQL_TEST
+#endif
+
+/* #define NO_BQL_TEST */
+
+#define USE_NUMA_MEMORY
+#define SUPPORT_IRQ_AFFINITY_CHANGE
+
+char rnpgbe_driver_name[] = "rnpgbe";
+static const char rnpgbe_driver_string[] =
+	"mucse 1 Gigabit PCI Express Network Driver";
+#define DRV_VERSION "0.2.4-rc3"
+static u32 driver_version = 0x00020402;
+#include "version.h"
+
+const char rnpgbe_driver_version[] = DRV_VERSION;
+static const char rnpgbe_copyright[] =
+	"Copyright (c) 2020-2024 mucse Corporation.";
+
+static struct rnpgbe_info *rnpgbe_info_tbl[] = {
+	[board_n500] = &rnpgbe_n500_info,
+	[board_n210] = &rnpgbe_n210_info,
+	[board_n210L] = &rnpgbe_n210L_info,
+};
+
+static int register_mbx_irq(struct rnpgbe_adapter *adapter);
+static void remove_mbx_irq(struct rnpgbe_adapter *adapter);
+
+#ifdef CONFIG_RNP_DISABLE_PACKET_SPLIT
+static bool rnpgbe_alloc_mapped_skb(struct rnpgbe_ring *rx_ring,
+				    struct rnpgbe_rx_buffer *bi);
+#else /* CONFIG_RNP_DISABLE_PACKET_SPLIT */
+static void rnpgbe_pull_tail(struct sk_buff *skb);
+#ifdef OPTM_WITH_LPAGE
+static bool rnpgbe_alloc_mapped_page(struct rnpgbe_ring *rx_ring,
+				     struct rnpgbe_rx_buffer *bi,
+				     union rnpgbe_rx_desc *rx_desc, u16 bufsz,
+				     u64 fun_id);
+static void rnpgbe_put_rx_buffer(struct rnpgbe_ring *rx_ring,
+				 struct rnpgbe_rx_buffer *rx_buffer);
+#else /* OPTM_WITH_LPAGE */
+static bool rnpgbe_alloc_mapped_page(struct rnpgbe_ring *rx_ring,
+				     struct rnpgbe_rx_buffer *bi);
+static void rnpgbe_put_rx_buffer(struct rnpgbe_ring *rx_ring,
+				 struct rnpgbe_rx_buffer *rx_buffer,
+				 struct sk_buff *skb);
+#endif /* OPTM_WITH_LPAGE */
+
+#endif /* CONFIG_RNP_DISABLE_PACKET_SPLIT */
+/* itr can be modified in napi handle */
+/* now hw not support this */
+#define ITR_TEST 0
+
+static struct pci_device_id rnpgbe_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N500_QUAD_PORT),
+	  .driver_data = board_n500 }, /* n500 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N500_DUAL_PORT),
+	  .driver_data = board_n500 }, /* n500 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N210),
+	  .driver_data = board_n210 }, /* n210 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N210L),
+	  .driver_data = board_n210L }, /* n210 */
+	/* required last entry */
+	{
+		0,
+	},
+};
+MODULE_DEVICE_TABLE(pci, rnpgbe_pci_tbl);
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0000);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static unsigned int fix_eth_name;
+module_param(fix_eth_name, uint, 0000);
+MODULE_PARM_DESC(fix_eth_name, "set eth adapter name to rnpgbeXX");
+
+#ifdef HAVE_PTP_1588_CLOCK
+static unsigned int module_enable_ptp = 1;
+module_param(module_enable_ptp, uint, 0000);
+MODULE_PARM_DESC(module_enable_ptp, "enable ptp, enabled default");
+#endif
+
+MODULE_AUTHOR("Mucse Corporation, ");
+MODULE_DESCRIPTION("Mucse(R) 1 Gigabit PCI Express Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static struct workqueue_struct *rnpgbe_wq;
+
+static int enable_hi_dma;
+
+#define RNP_LPI_T(x) (jiffies + msecs_to_jiffies(x))
+
+static void rnpgbe_service_timer(struct timer_list *t);
+static void rnpgbe_setup_eee_mode(struct rnpgbe_adapter *adapter, bool status);
+
+void rnpgbe_service_event_schedule(struct rnpgbe_adapter *adapter)
+{
+	if (!test_bit(__RNP_DOWN, &adapter->state) &&
+	    !test_and_set_bit(__RNP_SERVICE_SCHED, &adapter->state))
+		queue_work(rnpgbe_wq, &adapter->service_task);
+}
+
+static void rnpgbe_service_event_complete(struct rnpgbe_adapter *adapter)
+{
+	BUG_ON(!test_bit(__RNP_SERVICE_SCHED, &adapter->state));
+
+	/* flush memory to make sure state is correct before next watchdog */
+	smp_mb__before_atomic();
+	clear_bit(__RNP_SERVICE_SCHED, &adapter->state);
+}
+
+/**
+ * rnpgbe_set_ring_vector - set the ring_vector registers,
+ * mapping interrupt causes to vectors
+ * @adapter: pointer to adapter struct
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ *
+ */
+static void rnpgbe_set_ring_vector(struct rnpgbe_adapter *adapter,
+				   u8 rnpgbe_queue, u8 rnpgbe_msix_vector)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 data = 0;
+
+	data = hw->pfvfnum << 24;
+	data |= (rnpgbe_msix_vector << 8);
+	data |= (rnpgbe_msix_vector << 0);
+
+	DPRINTK(IFUP, INFO,
+		"Set Ring-Vector queue:%d (reg:0x%x) <-- Rx-MSIX:%d, Tx-MSIX:%d\n",
+		rnpgbe_queue, RING_VECTOR(rnpgbe_queue), rnpgbe_msix_vector,
+		rnpgbe_msix_vector);
+
+	rnpgbe_wr_reg(hw->ring_msix_base + RING_VECTOR(rnpgbe_queue), data);
+}
+
+void rnpgbe_unmap_and_free_tx_resource(struct rnpgbe_ring *ring,
+				       struct rnpgbe_tx_buffer *tx_buffer)
+{
+	if (tx_buffer->skb) {
+		dev_kfree_skb_any(tx_buffer->skb);
+		if (dma_unmap_len(tx_buffer, len))
+			dma_unmap_single(ring->dev,
+					 dma_unmap_addr(tx_buffer, dma),
+					 dma_unmap_len(tx_buffer, len),
+					 DMA_TO_DEVICE);
+	} else if (dma_unmap_len(tx_buffer, len)) {
+		dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma),
+			       dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE);
+	}
+	tx_buffer->next_to_watch = NULL;
+	tx_buffer->skb = NULL;
+	dma_unmap_len_set(tx_buffer, len, 0);
+	/* tx_buffer must be completely set up in the transmit path */
+}
+
+static u64 rnpgbe_get_tx_completed(struct rnpgbe_ring *ring)
+{
+	return ring->stats.packets;
+}
+
+static u64 rnpgbe_get_tx_pending(struct rnpgbe_ring *ring)
+{
+	u32 head = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_HEAD);
+	u32 tail = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_TAIL);
+
+	if (head != tail)
+		return (head < tail) ? tail - head :
+				       (tail + ring->count - head);
+
+	return 0;
+}
+
+static inline bool rnpgbe_check_tx_hang(struct rnpgbe_ring *tx_ring)
+{
+	u32 tx_done = rnpgbe_get_tx_completed(tx_ring);
+	u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
+	u32 tx_pending = rnpgbe_get_tx_pending(tx_ring);
+	bool ret = false;
+
+	clear_check_for_tx_hang(tx_ring);
+
+	/*
+	 * Check for a hung queue, but be thorough. This verifies
+	 * that a transmit has been completed since the previous
+	 * check AND there is at least one packet pending. The
+	 * ARMED bit is set to indicate a potential hang. The
+	 * bit is cleared if a pause frame is received to remove
+	 * false hang detection due to PFC or 802.3x frames. By
+	 * requiring this to fail twice we avoid races with
+	 * pfc clearing the ARMED bit and conditions where we
+	 * run the check_tx_hang logic with a transmit completion
+	 * pending but without time to complete it yet.
+	 */
+	if ((tx_done_old == tx_done) && tx_pending) {
+		/* make sure it is true for two checks in a row */
+		ret = test_and_set_bit(__RNP_HANG_CHECK_ARMED, &tx_ring->state);
+	} else {
+		/* update completed stats and continue */
+		tx_ring->tx_stats.tx_done_old = tx_done;
+		/* reset the countdown */
+		clear_bit(__RNP_HANG_CHECK_ARMED, &tx_ring->state);
+	}
+	return ret;
+}
+
+/**
+ * rnpgbe_tx_timeout_reset - initiate reset due to Tx timeout
+ * @adapter: driver private struct
+ **/
+static void rnpgbe_tx_timeout_reset(struct rnpgbe_adapter *adapter)
+{
+	/* Do the reset outside of interrupt context */
+	if (!test_bit(__RNP_DOWN, &adapter->state)) {
+		adapter->flags2 |= RNP_FLAG2_RESET_REQUESTED;
+		e_warn(drv, "initiating reset due to tx timeout\n");
+		rnpgbe_service_event_schedule(adapter);
+	}
+}
+
+/**
+ * rnpgbe_enable_eee_mode - check and enter in LPI mode
+ * @priv: driver private structure
+ * Description: this function is to verify and enter in LPI mode in case of
+ * EEE.
+ */
+static void rnpgbe_enable_eee_mode(struct rnpgbe_adapter *adapter)
+{
+	int i = 0;
+	struct rnpgbe_ring *tx_ring;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	for (i = 0; i < (adapter->num_tx_queues); i++) {
+		tx_ring = adapter->tx_ring[i];
+		if (tx_ring->next_to_use != tx_ring->next_to_clean)
+			return;
+	}
+	/* Check and enter in LPI mode */
+	if (!adapter->tx_path_in_lpi_mode) {
+		if (hw->ops.set_eee_mode)
+			hw->ops.set_eee_mode(hw,
+					     adapter->en_tx_lpi_clockgating);
+	}
+	adapter->tx_path_in_lpi_mode = true;
+}
+
+/**
+ * rnpgbe_disable_eee_mode - disable and exit from LPI mode
+ * @priv: driver private structure
+ * Description: this function is to exit and disable EEE in case of
+ * LPI state is true. This is called by the xmit.
+ */
+static void rnpgbe_disable_eee_mode(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	if (hw->ops.reset_eee_mode)
+		hw->ops.reset_eee_mode(hw);
+
+	if (!test_bit(__RNP_EEE_REMOVE, &adapter->state))
+		mod_timer(&adapter->eee_ctrl_timer,
+			  RNP_LPI_T(adapter->eee_timer));
+}
+
+/**
+ * rnpgbe_clean_tx_irq - Reclaim resources after transmit completes
+ * @q_vector: structure containing interrupt and ring information
+ * @tx_ring: tx ring to clean
+ **/
+static bool rnpgbe_clean_tx_irq(struct rnpgbe_q_vector *q_vector,
+				struct rnpgbe_ring *tx_ring, int napi_budget)
+{
+	struct rnpgbe_adapter *adapter = q_vector->adapter;
+	struct rnpgbe_tx_buffer *tx_buffer;
+	struct rnpgbe_tx_desc *tx_desc;
+	u64 total_bytes = 0, total_packets = 0;
+	int budget = q_vector->tx.work_limit;
+	int i = tx_ring->next_to_clean;
+
+	if (test_bit(__RNP_DOWN, &adapter->state))
+		return true;
+
+	tx_ring->tx_stats.poll_count++;
+	tx_buffer = &tx_ring->tx_buffer_info[i];
+	tx_desc = RNP_TX_DESC(tx_ring, i);
+	i -= tx_ring->count;
+
+	do {
+		struct rnpgbe_tx_desc *eop_desc = tx_buffer->next_to_watch;
+
+		/* if next_to_watch is not set then there is no work pending */
+		if (!eop_desc)
+			break;
+
+		/* prevent any other reads prior to eop_desc */
+		rmb();
+
+		/* if eop DD is not set pending work has not been completed */
+		if (!(eop_desc->vlan_cmd & cpu_to_le32(RNP_TXD_STAT_DD)))
+			break;
+		/* clear next_to_watch to prevent false hangs */
+		tx_buffer->next_to_watch = NULL;
+
+		/* update the statistics for this packet */
+		total_bytes += tx_buffer->bytecount;
+		total_packets += tx_buffer->gso_segs;
+
+		/* free the skb */
+		napi_consume_skb(tx_buffer->skb, napi_budget);
+
+		/* unmap skb header data */
+		dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma),
+				 dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE);
+
+		/* clear tx_buffer data */
+		tx_buffer->skb = NULL;
+		dma_unmap_len_set(tx_buffer, len, 0);
+
+		/* unmap remaining buffers */
+		while (tx_desc != eop_desc) {
+			tx_buffer++;
+			tx_desc++;
+			i++;
+			if (unlikely(!i)) {
+				i -= tx_ring->count;
+				tx_buffer = tx_ring->tx_buffer_info;
+				tx_desc = RNP_TX_DESC(tx_ring, 0);
+			}
+
+			/* unmap any remaining paged data */
+			if (dma_unmap_len(tx_buffer, len)) {
+				dma_unmap_page(tx_ring->dev,
+					       dma_unmap_addr(tx_buffer, dma),
+					       dma_unmap_len(tx_buffer, len),
+					       DMA_TO_DEVICE);
+				dma_unmap_len_set(tx_buffer, len, 0);
+			}
+			budget--;
+		}
+
+		/* move us one more past the eop_desc for start of next pkt */
+		tx_buffer++;
+		tx_desc++;
+		i++;
+		if (unlikely(!i)) {
+			i -= tx_ring->count;
+			tx_buffer = tx_ring->tx_buffer_info;
+			tx_desc = RNP_TX_DESC(tx_ring, 0);
+		}
+
+		/* issue prefetch for next Tx descriptor */
+		prefetch(tx_desc);
+
+		/* update budget accounting */
+		budget--;
+	} while (likely(budget > 0));
+#ifdef NO_BQL_TEST
+#else
+	netdev_tx_completed_queue(txring_txq(tx_ring), total_packets,
+				  total_bytes);
+#endif
+	i += tx_ring->count;
+	tx_ring->next_to_clean = i;
+	u64_stats_update_begin(&tx_ring->syncp);
+	tx_ring->stats.bytes += total_bytes;
+	tx_ring->stats.packets += total_packets;
+	tx_ring->tx_stats.tx_clean_count += total_packets;
+	tx_ring->tx_stats.tx_clean_times++;
+	if (tx_ring->tx_stats.tx_clean_times > 10) {
+		tx_ring->tx_stats.tx_clean_times = 0;
+		tx_ring->tx_stats.tx_clean_count = 0;
+	}
+
+	u64_stats_update_end(&tx_ring->syncp);
+	q_vector->tx.total_bytes += total_bytes;
+	q_vector->tx.total_packets += total_packets;
+	tx_ring->tx_stats.send_done_bytes += total_bytes;
+
+	if (!(q_vector->vector_flags & RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS)) {
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+		if (likely(netif_carrier_ok(tx_ring->netdev) &&
+			   (rnpgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
+			/* Make sure that anybody stopping the queue after this
+			 * sees the new next_to_clean.
+			 */
+			smp_mb();
+			if (__netif_subqueue_stopped(tx_ring->netdev,
+						     tx_ring->queue_index) &&
+			    !test_bit(__RNP_DOWN, &adapter->state)) {
+				netif_wake_subqueue(tx_ring->netdev,
+						    tx_ring->queue_index);
+				++tx_ring->tx_stats.restart_queue;
+			}
+		}
+	}
+
+	if (adapter->eee_active && total_packets) {
+		if (!adapter->tx_path_in_lpi_mode) {
+			if (!test_bit(__RNP_EEE_REMOVE, &adapter->state))
+				mod_timer(&adapter->eee_ctrl_timer,
+					  RNP_LPI_T(adapter->eee_timer));
+		}
+	}
+
+	return !!budget;
+}
+
+static inline void rnpgbe_rx_hash(struct rnpgbe_ring *ring,
+				  union rnpgbe_rx_desc *rx_desc,
+				  struct sk_buff *skb)
+{
+	int rss_type;
+
+	if (!(ring->netdev->features & NETIF_F_RXHASH))
+		return;
+#define RNP_RSS_TYPE_MASK 0xc0
+	rss_type = rx_desc->wb.cmd & RNP_RSS_TYPE_MASK;
+	skb_set_hash(skb, le32_to_cpu(rx_desc->wb.rss_hash),
+		     rss_type ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
+}
+
+/**
+ * rnpgbe_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @ring: structure containing ring specific data
+ * @rx_desc: current Rx descriptor being processed
+ * @skb: skb currently being received and modified
+ **/
+static inline void rnpgbe_rx_checksum(struct rnpgbe_ring *ring,
+				      union rnpgbe_rx_desc *rx_desc,
+				      struct sk_buff *skb)
+{
+	skb_checksum_none_assert(skb);
+	/* Rx csum disabled */
+	if (!(ring->netdev->features & NETIF_F_RXCSUM))
+		return;
+
+	/* if outer L3/L4  error */
+	/* must in promisc mode or rx-all mode */
+	if (rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_ERR_MASK))
+		return;
+	ring->rx_stats.csum_good++;
+	/* at least it is a ip packet which has ip checksum */
+
+	/* It must be a TCP or UDP packet with a valid checksum */
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static inline void rnpgbe_update_rx_tail(struct rnpgbe_ring *rx_ring, u32 val)
+{
+	rx_ring->next_to_use = val;
+#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT
+	/* update next to alloc since we have filled the ring */
+	rx_ring->next_to_alloc = val;
+#endif
+	/*
+	 * Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64).
+	 */
+	wmb();
+	rnpgbe_wr_reg(rx_ring->tail, val);
+
+	/* if rx_ring in delay setup mode, don't update
+	 * next_to_use to hw large than RNP_MIN_RXD
+	 */
+}
+
+#if (PAGE_SIZE < 8192)
+#define RNP_MAX_2K_FRAME_BUILD_SKB (RNP_RXBUFFER_1536 - NET_IP_ALIGN)
+#define RNP_2K_TOO_SMALL_WITH_PADDING                                          \
+	((NET_SKB_PAD + RNP_RXBUFFER_1536) > SKB_WITH_OVERHEAD(RNP_RXBUFFER_2K))
+
+static inline int rnpgbe_compute_pad(int rx_buf_len)
+{
+	int page_size, pad_size;
+
+	page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+	pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+	return pad_size;
+}
+
+static inline int rnpgbe_skb_pad(void)
+{
+	int rx_buf_len;
+
+	/* If a 2K buffer cannot handle a standard Ethernet frame then
+	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
+	 *
+	 * For a 3K buffer we need to add enough padding to allow for
+	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
+	 * cache-line alignment.
+	 */
+	if (RNP_2K_TOO_SMALL_WITH_PADDING)
+		rx_buf_len = RNP_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN);
+	else
+		rx_buf_len = RNP_RXBUFFER_1536;
+
+	/* if needed make room for NET_IP_ALIGN */
+	rx_buf_len -= NET_IP_ALIGN;
+	return rnpgbe_compute_pad(rx_buf_len);
+}
+
+#define RNP_SKB_PAD rnpgbe_skb_pad()
+#else /* PAGE_SIZE < 8192 */
+#define RNP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+/**
+ * rnpgbe_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, timestamp, protocol, and
+ * other fields within the skb.
+ **/
+static void rnpgbe_process_skb_fields(struct rnpgbe_ring *rx_ring,
+				      union rnpgbe_rx_desc *rx_desc,
+				      struct sk_buff *skb)
+{
+	struct net_device *dev = rx_ring->netdev;
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	// rnpgbe_update_rsc_stats(rx_ring, skb);
+	rnpgbe_rx_hash(rx_ring, rx_desc, skb);
+
+	rnpgbe_rx_checksum(rx_ring, rx_desc, skb);
+
+	if (hw->ncsi_en) {
+		// if ncsi with stags on
+		if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) {
+			// check outer stags with set one
+			u8 header[ETH_ALEN + ETH_ALEN];
+			u8 *data = skb->data;
+			struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
+			u16 vlan_tci;
+
+			switch (adapter->outer_vlan_type) {
+			case outer_vlan_type_88a8:
+				vlan_tci = htons(ETH_P_8021AD);
+				break;
+#ifdef ETH_P_QINQ1
+			case outer_vlan_type_9100:
+				vlan_tci = htons(ETH_P_QINQ1);
+				break;
+#endif
+#ifdef ETH_P_QINQ2
+			case outer_vlan_type_9200:
+				vlan_tci = htons(ETH_P_QINQ2);
+				break;
+#endif
+			default:
+				vlan_tci = htons(ETH_P_8021AD);
+				break;
+			}
+
+			if (veth->h_vlan_proto != vlan_tci)
+				goto skip_vlan;
+
+			if (veth->h_vlan_TCI != htons(adapter->stags_vid))
+				goto skip_vlan;
+
+			memcpy(header, data, ETH_ALEN + ETH_ALEN);
+			memcpy(skb->data + 4, header, ETH_ALEN + ETH_ALEN);
+			skb->len -= 4;
+			skb->data += 4;
+			goto skip_vlan;
+
+		}
+	}
+
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+	if (((dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+#ifdef NETIF_F_HW_VLAN_STAG_RX
+	    || (dev->features & NETIF_F_HW_VLAN_STAG_RX)) &&
+#else
+		     ) &&
+#endif
+#else /* NETIF_F_HW_VLAN_CTAG_RX */
+	if ((dev->features & NETIF_F_HW_VLAN_RX) &&
+#endif
+	    rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_VLAN_VALID) &&
+	    !ignore_veb_vlan(rx_ring->q_vector->adapter, rx_desc)) {
+		if (rnpgbe_test_ext_cmd(rx_desc, REV_OUTER_VLAN)) {
+			u16 vid_inner = le16_to_cpu(rx_desc->wb.vlan);
+			u16 vid_outer;
+			u16 vlan_tci = htons(ETH_P_8021Q);
+			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+					       vid_inner);
+			// check outer vlan type
+			if (rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_STAG)) {
+				switch (rx_ring->q_vector->adapter
+						->outer_vlan_type) {
+				case outer_vlan_type_88a8:
+					vlan_tci = htons(ETH_P_8021AD);
+					break;
+#ifdef ETH_P_QINQ1
+				case outer_vlan_type_9100:
+					vlan_tci = htons(ETH_P_QINQ1);
+					break;
+#endif /* ETH_P_QINQ1 */
+#ifdef ETH_P_QINQ2
+				case outer_vlan_type_9200:
+					vlan_tci = htons(ETH_P_QINQ2);
+					break;
+#endif /* ETH_P_QINQ2 */
+				default:
+					vlan_tci = htons(ETH_P_8021AD);
+					break;
+				}
+			} else {
+				vlan_tci = htons(ETH_P_8021Q);
+			}
+			vid_outer = le16_to_cpu(rx_desc->wb.mark);
+			/* if in stags mode should ignore only stags */
+			if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) {
+				/* push outer in if not equal stags or cvlan */
+				if ((vid_outer != adapter->stags_vid) ||
+				    (vlan_tci == htons(ETH_P_8021Q))) {
+					/* push outer inner */
+					skb = __vlan_hwaccel_push_inside(skb);
+					__vlan_hwaccel_put_tag(skb, vlan_tci,
+							       vid_outer);
+					/* if not 88a8, push again to avoid kernel crash
+					 * todo
+					 * */
+				}
+				/* if vid_outer is stags_vid do nothing */
+			} else {
+				/* push outer */
+				skb = __vlan_hwaccel_push_inside(skb);
+				__vlan_hwaccel_put_tag(skb, vlan_tci,
+						       vid_outer);
+			}
+
+		} else {
+			/* only inner vlan */
+			u16 vid = le16_to_cpu(rx_desc->wb.vlan);
+			/* check vlan type */
+			if (rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_STAG)) {
+				if ((adapter->flags2 &
+				     RNP_FLAG2_VLAN_STAGS_ENABLED) &&
+				    (vid == adapter->stags_vid)) {
+					/* do nothing ignore this stags */
+				} else {
+					/* should consider other stags */
+					switch (rx_ring->q_vector->adapter
+							->outer_vlan_type) {
+					case outer_vlan_type_88a8:
+						__vlan_hwaccel_put_tag(
+							skb,
+							htons(ETH_P_8021AD),
+							vid);
+						break;
+#ifdef ETH_P_QINQ1
+					case outer_vlan_type_9100:
+						__vlan_hwaccel_put_tag(
+							skb, htons(ETH_P_QINQ1),
+							vid);
+						break;
+#endif /* ETH_P_QINQ1 */
+#ifdef ETH_P_QINQ2
+					case outer_vlan_type_9200:
+						__vlan_hwaccel_put_tag(
+							skb, htons(ETH_P_QINQ2),
+							vid);
+						break;
+#endif /* ETH_P_QINQ2 */
+					default:
+						__vlan_hwaccel_put_tag(
+							skb,
+							htons(ETH_P_8021AD),
+							vid);
+						break;
+					}
+				}
+			} else {
+				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+						       vid);
+			}
+		}
+		rx_ring->rx_stats.vlan_remove++;
+	}
+skip_vlan:
+	skb_record_rx_queue(skb, rx_ring->queue_index);
+
+	skb->protocol = eth_type_trans(skb, dev);
+}
+
+static void rnpgbe_rx_skb(struct rnpgbe_q_vector *q_vector, struct sk_buff *skb)
+{
+	struct rnpgbe_adapter *adapter = q_vector->adapter;
+
+	if (!(adapter->flags & RNP_FLAG_IN_NETPOLL))
+		napi_gro_receive(&q_vector->napi, skb);
+	else
+		netif_rx(skb);
+}
+
+/* drop this packets if error */
+static bool rnpgbe_check_csum_error(struct rnpgbe_ring *rx_ring,
+				    union rnpgbe_rx_desc *rx_desc,
+				    unsigned int size,
+				    unsigned int *driver_drop_packets)
+{
+	bool err = false;
+
+	struct net_device *netdev = rx_ring->netdev;
+
+	if (netdev->features & NETIF_F_RXCSUM) {
+		if (unlikely(rnpgbe_test_staterr(rx_desc,
+						 RNP_RXD_STAT_ERR_MASK))) {
+			rx_debug_printk("rx error: VEB:%s mark:0x%x cmd:0x%x\n",
+					(rx_ring->q_vector->adapter->flags &
+					 RNP_FLAG_SRIOV_ENABLED) ?
+						"On" :
+						"Off",
+					rx_desc->wb.mark, rx_desc->wb.cmd);
+			/* push this packet to stack if in promisc mode */
+			rx_ring->rx_stats.csum_err++;
+
+			if ((!(netdev->flags & IFF_PROMISC) &&
+			     (!(netdev->features & NETIF_F_RXALL)))) {
+				/* if we fixed in hw */
+				err = true;
+			}
+		}
+	}
+	if (err) {
+		u32 ntc = rx_ring->next_to_clean + 1;
+#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT
+		struct rnpgbe_rx_buffer *rx_buffer;
+#if (PAGE_SIZE < 8192)
+		unsigned int truesize = rnpgbe_rx_pg_size(rx_ring) / 2;
+#else
+		unsigned int truesize =
+			ring_uses_build_skb(rx_ring) ?
+				SKB_DATA_ALIGN(RNP_SKB_PAD + size) :
+				SKB_DATA_ALIGN(size);
+#endif
+
+		if (likely(rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_EOP)))
+			*driver_drop_packets = *driver_drop_packets + 1;
+
+		/* we are reusing so sync this buffer for CPU use */
+		rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+		dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma,
+					      rx_buffer->page_offset, size,
+					      DMA_FROM_DEVICE);
+
+		/* we should clean it since we used all info in it */
+		rx_desc->wb.cmd = 0;
+
+#if (PAGE_SIZE < 8192)
+		rx_buffer->page_offset ^= truesize;
+#else
+		rx_buffer->page_offset += truesize;
+#endif
+#ifdef OPTM_WITH_LPAGE
+		rnpgbe_put_rx_buffer(rx_ring, rx_buffer);
+#else
+		rnpgbe_put_rx_buffer(rx_ring, rx_buffer, NULL);
+#endif
+#endif
+		/* update to the next desc */
+		ntc = (ntc < rx_ring->count) ? ntc : 0;
+		rx_ring->next_to_clean = ntc;
+	}
+	return err;
+}
+
+/**
+ * rnpgbe_rx_ring_reinit - just reinit rx_ring with new count in ->reset_count
+ * @rx_ring: rx descriptor ring to transact packets on
+ */
+static int rnpgbe_rx_ring_reinit(struct rnpgbe_adapter *adapter,
+			  	 struct rnpgbe_ring *rx_ring)
+{
+	struct rnpgbe_ring *temp_ring;
+	int err = 0;
+
+	if (rx_ring->count == rx_ring->reset_count)
+		return 0;
+	/* stop rx queue */
+	temp_ring = vzalloc(sizeof(struct rnpgbe_ring));
+	if (!temp_ring)
+		return -1;
+
+	rnpgbe_disable_rx_queue(adapter, rx_ring);
+	/* reinit for this ring */
+	memcpy(temp_ring, rx_ring, sizeof(struct rnpgbe_ring));
+	/* setup new count */
+	temp_ring->count = rx_ring->reset_count;
+	err = rnpgbe_setup_rx_resources(temp_ring, adapter);
+	if (err) {
+		rnpgbe_free_rx_resources(temp_ring);
+		goto err_setup;
+	}
+	rnpgbe_free_rx_resources(rx_ring);
+	memcpy(rx_ring, temp_ring, sizeof(struct rnpgbe_ring));
+	rnpgbe_configure_rx_ring(adapter, rx_ring);
+err_setup:
+	vfree(temp_ring);
+	/* start rx */
+	ring_wr32(rx_ring, RNP_DMA_RX_START, 1);
+	return 0;
+}
+
+#ifndef OPTM_WITH_LPAGE
+/**
+ * rnpgbe_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+static bool rnpgbe_alloc_rx_buffers(struct rnpgbe_ring *rx_ring,
+				    u16 cleaned_count)
+{
+	union rnpgbe_rx_desc *rx_desc;
+	struct rnpgbe_rx_buffer *bi;
+	u16 i = rx_ring->next_to_use;
+	u64 fun_id = ((u64)(rx_ring->pfvfnum) << (32 + 24));
+	bool err = false;
+#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT
+	u16 bufsz;
+#endif
+	/* nothing to do */
+	if (!cleaned_count)
+		return err;
+
+	rx_desc = RNP_RX_DESC(rx_ring, i);
+
+	BUG_ON(rx_desc == NULL);
+
+	bi = &rx_ring->rx_buffer_info[i];
+
+	BUG_ON(bi == NULL);
+
+	i -= rx_ring->count;
+#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT
+	bufsz = rnpgbe_rx_bufsz(rx_ring);
+#endif
+
+	do {
+#ifdef CONFIG_RNP_DISABLE_PACKET_SPLIT
+		if (!rnpgbe_alloc_mapped_skb(rx_ring, bi)) {
+			err = true;
+			break;
+		}
+#else
+		if (!rnpgbe_alloc_mapped_page(rx_ring, bi)) {
+			err = true;
+			break;
+		}
+		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+						 bi->page_offset, bufsz,
+						 DMA_FROM_DEVICE);
+#endif
+
+			/*
+		 * Refresh the desc even if buffer_addrs didn't change
+		 * because each write-back erases this info.
+		 */
+#ifdef CONFIG_RNP_DISABLE_PACKET_SPLIT
+		rx_desc->pkt_addr = cpu_to_le64(bi->dma + fun_id);
+#else
+		rx_desc->pkt_addr =
+			cpu_to_le64(bi->dma + bi->page_offset + fun_id);
+
+		//printk("%d rx_desc page_offset %x\n", i, bi->page_offset);
+#endif
+		/* clean dd */
+		rx_desc->resv_cmd = 0;
+
+		rx_desc++;
+		bi++;
+		i++;
+		if (unlikely(!i)) {
+			rx_desc = RNP_RX_DESC(rx_ring, 0);
+			bi = rx_ring->rx_buffer_info;
+			i -= rx_ring->count;
+		}
+
+		/* clear the hdr_addr for the next_to_use descriptor */
+		// rx_desc->cmd = 0;
+		cleaned_count--;
+	} while (cleaned_count);
+
+	i += rx_ring->count;
+
+	if (rx_ring->next_to_use != i)
+		rnpgbe_update_rx_tail(rx_ring, i);
+
+	return err;
+}
+
+#endif
+
+#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT
+static inline unsigned int rnpgbe_rx_offset(struct rnpgbe_ring *rx_ring)
+{
+	return ring_uses_build_skb(rx_ring) ? RNP_SKB_PAD : 0;
+}
+
+#ifdef OPTM_WITH_LPAGE
+/**
+ * rnpgbe_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+static bool rnpgbe_alloc_rx_buffers(struct rnpgbe_ring *rx_ring,
+				    u16 cleaned_count)
+{
+	union rnpgbe_rx_desc *rx_desc;
+	struct rnpgbe_rx_buffer *bi;
+	u16 i = rx_ring->next_to_use;
+	u64 fun_id = ((u64)(rx_ring->pfvfnum) << (32 + 24));
+	u16 bufsz;
+	bool err = false;
+	/* nothing to do */
+	if (!cleaned_count)
+		return err;
+
+	rx_desc = RNP_RX_DESC(rx_ring, i);
+
+	BUG_ON(rx_desc == NULL);
+
+	bi = &rx_ring->rx_buffer_info[i];
+
+	BUG_ON(bi == NULL);
+
+	i -= rx_ring->count;
+	bufsz = rnpgbe_rx_bufsz(rx_ring);
+
+	do {
+		int count = 1;
+		struct page *page;
+
+		if (!rnpgbe_alloc_mapped_page(rx_ring, bi, rx_desc, bufsz,
+					      fun_id)) {
+			err = true;
+			break;
+		}
+		page = bi->page;
+
+		rx_desc->resv_cmd = 0;
+
+		rx_desc++;
+		i++;
+		bi++;
+
+		if (unlikely(!i)) {
+			rx_desc = RNP_RX_DESC(rx_ring, 0);
+			bi = rx_ring->rx_buffer_info;
+			i -= rx_ring->count;
+		}
+
+		rx_desc->resv_cmd = 0;
+
+		cleaned_count--;
+
+		while (count < rx_ring->rx_page_buf_nums && cleaned_count) {
+			dma_addr_t dma;
+
+#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC)
+			DEFINE_DMA_ATTRS(attrs);
+
+			dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
+			dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs);
+#endif
+
+			bi->page_offset = rx_ring->rx_per_buf_mem * count +
+					  rnpgbe_rx_offset(rx_ring);
+			/* map page for use */
+			dma = dma_map_page_attrs(rx_ring->dev, page,
+						 bi->page_offset, bufsz,
+						 DMA_FROM_DEVICE,
+#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC)
+						 &attrs);
+#else
+
+						 RNP_RX_DMA_ATTR);
+#endif
+
+			if (dma_mapping_error(rx_ring->dev, dma)) {
+				netdev_dbg(rx_ring->netdev,
+					   "map second error\n");
+				rx_ring->rx_stats.alloc_rx_page_failed++;
+				break;
+			}
+
+			bi->dma = dma;
+			bi->page = page;
+
+			page_ref_add(page, USHRT_MAX);
+			bi->pagecnt_bias = USHRT_MAX;
+
+			/* sync the buffer for use by the device */
+			dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+							 0, bufsz,
+							 DMA_FROM_DEVICE);
+
+			/*
+			 * Refresh the desc even if buffer_addrs didn't change
+			 * because each write-back erases this info.
+			 */
+			rx_desc->pkt_addr = cpu_to_le64(bi->dma + fun_id);
+			/* clean dd */
+			rx_desc->resv_cmd = 0;
+
+			rx_desc++;
+			bi++;
+			i++;
+			if (unlikely(!i)) {
+				rx_desc = RNP_RX_DESC(rx_ring, 0);
+				bi = rx_ring->rx_buffer_info;
+				i -= rx_ring->count;
+			}
+			count++;
+			/* clear the hdr_addr for the next_to_use descriptor */
+			cleaned_count--;
+		}
+	} while (cleaned_count);
+
+	i += rx_ring->count;
+
+	if (rx_ring->next_to_use != i)
+		rnpgbe_update_rx_tail(rx_ring, i);
+	
+	return err;
+}
+#endif /* OPTM_WITH_LPAGE */
+/**
+ * rnpgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
+ * @data: pointer to the start of the headers
+ * @max_len: total length of section to find headers in
+ *
+ * This function is meant to determine the length of headers that will
+ * be recognized by hardware for LRO, GRO, and RSC offloads.  The main
+ * motivation of doing this is to only perform one pull for IPv4 TCP
+ * packets so that we can do basic things like calculating the gso_size
+ * based on the average data per packet.
+ **/
+static unsigned int rnpgbe_get_headlen(unsigned char *data,
+				       unsigned int max_len)
+{
+	union {
+		unsigned char *network;
+		/* l2 headers */
+		struct ethhdr *eth;
+		struct vlan_hdr *vlan;
+		/* l3 headers */
+		struct iphdr *ipv4;
+		struct ipv6hdr *ipv6;
+	} hdr;
+	__be16 protocol;
+	u8 nexthdr = 0; /* default to not TCP */
+	u8 hlen;
+
+	/* this should never happen, but better safe than sorry */
+	if (max_len < ETH_HLEN)
+		return max_len;
+
+	/* initialize network frame pointer */
+	hdr.network = data;
+
+	/* set first protocol and move network header forward */
+	protocol = hdr.eth->h_proto;
+	hdr.network += ETH_HLEN;
+
+	/* handle any vlan tag if present */
+	if (protocol == htons(ETH_P_8021Q)) {
+		if ((hdr.network - data) > (max_len - VLAN_HLEN))
+			return max_len;
+
+		protocol = hdr.vlan->h_vlan_encapsulated_proto;
+		hdr.network += VLAN_HLEN;
+	}
+
+	/* handle L3 protocols */
+	if (protocol == htons(ETH_P_IP)) {
+		if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
+			return max_len;
+
+		/* access ihl as a u8 to avoid unaligned access on ia64 */
+		hlen = (hdr.network[0] & 0x0F) << 2;
+
+		/* verify hlen meets minimum size requirements */
+		if (hlen < sizeof(struct iphdr))
+			return hdr.network - data;
+
+		/* record next protocol if header is present */
+		if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
+			nexthdr = hdr.ipv4->protocol;
+	} else if (protocol == htons(ETH_P_IPV6)) {
+		if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+			return max_len;
+
+		/* record next protocol */
+		nexthdr = hdr.ipv6->nexthdr;
+		hlen = sizeof(struct ipv6hdr);
+	} else {
+		return hdr.network - data;
+	}
+
+	/* relocate pointer to start of L4 header */
+	hdr.network += hlen;
+
+	/* finally sort out TCP/UDP */
+	if (nexthdr == IPPROTO_TCP) {
+		if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
+			return max_len;
+
+		/* access doff as a u8 to avoid unaligned access on ia64 */
+		hlen = (hdr.network[12] & 0xF0) >> 2;
+
+		/* verify hlen meets minimum size requirements */
+		if (hlen < sizeof(struct tcphdr))
+			return hdr.network - data;
+
+		hdr.network += hlen;
+	} else if (nexthdr == IPPROTO_UDP) {
+		if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
+			return max_len;
+
+		hdr.network += sizeof(struct udphdr);
+	}
+
+	/*
+	 * If everything has gone correctly hdr.network should be the
+	 * data section of the packet and will be the end of the header.
+	 * If not then it probably represents the end of the last recognized
+	 * header.
+	 */
+	if ((hdr.network - data) < max_len)
+		return hdr.network - data;
+	else
+		return max_len;
+}
+
+#ifdef OPTM_WITH_LPAGE
+
+/**
+ * rnpgbe_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean.  If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool rnpgbe_is_non_eop(struct rnpgbe_ring *rx_ring,
+			      union rnpgbe_rx_desc *rx_desc)
+{
+	u32 ntc = rx_ring->next_to_clean + 1;
+	/* fetch, update, and store next to clean */
+	ntc = (ntc < rx_ring->count) ? ntc : 0;
+	rx_ring->next_to_clean = ntc;
+
+	prefetch(RNP_RX_DESC(rx_ring, ntc));
+
+	/* if we are the last buffer then there is nothing else to do */
+	if (likely(rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_EOP)))
+		return false;
+	/* place skb in next buffer to be received */
+
+	/* we should clean it since we used all info in it */
+	rx_desc->wb.cmd = 0;
+
+	return true;
+}
+
+static bool rnpgbe_alloc_mapped_page(struct rnpgbe_ring *rx_ring,
+				     struct rnpgbe_rx_buffer *bi,
+				     union rnpgbe_rx_desc *rx_desc, u16 bufsz,
+				     u64 fun_id)
+{
+	struct page *page = bi->page;
+	dma_addr_t dma;
+#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC)
+	DEFINE_DMA_ATTRS(attrs);
+
+	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
+	dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs);
+#endif
+
+	/* since we are recycling buffers we should seldom need to alloc */
+	if (likely(page))
+		return true;
+
+	page = dev_alloc_pages(RNP_ALLOC_PAGE_ORDER);
+	if (unlikely(!page)) {
+		rx_ring->rx_stats.alloc_rx_page_failed++;
+		return false;
+	}
+
+	bi->page_offset = rnpgbe_rx_offset(rx_ring);
+
+	/* map page for use */
+	dma = dma_map_page_attrs(rx_ring->dev, page, bi->page_offset, bufsz,
+				 DMA_FROM_DEVICE,
+#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC)
+				 &attrs);
+#else
+				 RNP_RX_DMA_ATTR);
+#endif
+
+	/*
+	 * if mapping failed free memory back to system since
+	 * there isn't much point in holding memory we can't use
+	 */
+	if (dma_mapping_error(rx_ring->dev, dma)) {
+		__free_pages(page, RNP_ALLOC_PAGE_ORDER);
+		netdev_dbg(rx_ring->netdev, "map failed\n");
+
+		rx_ring->rx_stats.alloc_rx_page_failed++;
+		return false;
+	}
+	bi->dma = dma;
+	bi->page = page;
+	bi->page_offset = rnpgbe_rx_offset(rx_ring);
+	page_ref_add(page, USHRT_MAX - 1);
+	bi->pagecnt_bias = USHRT_MAX;
+	rx_ring->rx_stats.alloc_rx_page++;
+
+	/* sync the buffer for use by the device */
+	dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0, bufsz,
+					 DMA_FROM_DEVICE);
+	/*
+	 * Refresh the desc even if buffer_addrs didn't change
+	 * because each write-back erases this info.
+	 */
+	rx_desc->pkt_addr = cpu_to_le64(bi->dma + fun_id);
+
+	return true;
+}
+
+#else
+static bool rnpgbe_alloc_mapped_page(struct rnpgbe_ring *rx_ring,
+				     struct rnpgbe_rx_buffer *bi)
+{
+	struct page *page = bi->page;
+	dma_addr_t dma;
+#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC)
+	DEFINE_DMA_ATTRS(attrs);
+
+	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
+	dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs);
+#endif
+
+	/* since we are recycling buffers we should seldom need to alloc */
+	if (likely(page))
+		return true;
+
+	page = dev_alloc_pages(rnpgbe_rx_pg_order(rx_ring));
+	if (unlikely(!page)) {
+		rx_ring->rx_stats.alloc_rx_page_failed++;
+		return false;
+	}
+
+	/* map page for use */
+	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+				 rnpgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
+#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC)
+				 &attrs);
+#else
+				 RNP_RX_DMA_ATTR);
+#endif
+
+	/*
+	 * if mapping failed free memory back to system since
+	 * there isn't much point in holding memory we can't use
+	 */
+	if (dma_mapping_error(rx_ring->dev, dma)) {
+		__free_pages(page, rnpgbe_rx_pg_order(rx_ring));
+		netdev_dbg(rx_ring->netdev, "map failed\n");
+
+		rx_ring->rx_stats.alloc_rx_page_failed++;
+		return false;
+	}
+	bi->dma = dma;
+	bi->page = page;
+	bi->page_offset = rnpgbe_rx_offset(rx_ring);
+#ifdef HAVE_PAGE_COUNT_BULK_UPDATE
+	page_ref_add(page, USHRT_MAX - 1);
+	bi->pagecnt_bias = USHRT_MAX;
+#else
+	bi->pagecnt_bias = 1;
+#endif
+	rx_ring->rx_stats.alloc_rx_page++;
+
+	return true;
+}
+
+/**
+ * rnpgbe_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean.  If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool rnpgbe_is_non_eop(struct rnpgbe_ring *rx_ring,
+			      union rnpgbe_rx_desc *rx_desc,
+			      struct sk_buff *skb)
+{
+	u32 ntc = rx_ring->next_to_clean + 1;
+#ifdef CONFIG_RNP_DISABLE_PACKET_SPLIT
+	struct sk_buff *next_skb;
+#endif
+	/* fetch, update, and store next to clean */
+	ntc = (ntc < rx_ring->count) ? ntc : 0;
+	rx_ring->next_to_clean = ntc;
+
+	prefetch(RNP_RX_DESC(rx_ring, ntc));
+
+	/* if we are the last buffer then there is nothing else to do */
+	if (likely(rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_EOP)))
+		return false;
+#ifdef CONFIG_RNP_RNP_DISABLE_PACKET_SPLIT
+
+#else
+	/* place skb in next buffer to be received */
+	rx_ring->rx_buffer_info[ntc].skb = skb;
+#endif
+	rx_ring->rx_stats.non_eop_descs++;
+	/* we should clean it since we used all info in it */
+	rx_desc->wb.cmd = 0;
+
+	return true;
+}
+
+#endif
+/**
+ * rnpgbe_pull_tail - rnpgbe specific version of skb_pull_tail
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an rnpgbe specific version of __pskb_pull_tail.  The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void rnpgbe_pull_tail(struct sk_buff *skb)
+{
+	skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+	unsigned char *va;
+	unsigned int pull_len;
+
+	/*
+	 * it is valid to use page_address instead of kmap since we are
+	 * working with pages allocated out of the lomem pool per
+	 * alloc_page(GFP_ATOMIC)
+	 */
+	va = skb_frag_address(frag);
+
+	/*
+	 * we need the header to contain the greater of either ETH_HLEN or
+	 * 60 bytes if the skb->len is less than 60 for skb_pad.
+	 */
+	pull_len = rnpgbe_get_headlen(va, RNP_RX_HDR_SIZE);
+
+	/* align pull length to size of long to optimize memcpy performance */
+	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+	/* update all of the pointers */
+	skb_frag_size_sub(frag, pull_len);
+	skb_frag_off_add(frag, pull_len);
+	skb->data_len -= pull_len;
+	skb->tail += pull_len;
+}
+
+/**
+ * rnpgbe_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Check if the skb is valid. In the XDP case it will be an error pointer.
+ * Return true in this case to abort processing and advance to next
+ * descriptor.
+ *
+ * Check for corrupted packet headers caused by senders on the local L2
+ * embedded NIC switch not setting up their Tx Descriptors right.  These
+ * should be very rare.
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool rnpgbe_cleanup_headers(struct rnpgbe_ring __maybe_unused *rx_ring,
+				   union rnpgbe_rx_desc *rx_desc,
+				   struct sk_buff *skb)
+{
+#ifdef OPTM_WITH_LPAGE
+#else
+	/* XDP packets use error pointer so abort at this point */
+	if (IS_ERR(skb))
+		return true;
+#endif
+	/* place header in linear portion of buffer */
+	if (!skb_headlen(skb))
+		rnpgbe_pull_tail(skb);
+	/* if eth_skb_pad returns an error the skb was freed */
+	/* will padding skb->len to 60 */
+	if (eth_skb_pad(skb))
+		return true;
+
+	return false;
+}
+
+/**
+ * rnpgbe_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void rnpgbe_reuse_rx_page(struct rnpgbe_ring *rx_ring,
+				 struct rnpgbe_rx_buffer *old_buff)
+{
+	struct rnpgbe_rx_buffer *new_buff;
+	u16 nta = rx_ring->next_to_alloc;
+
+	new_buff = &rx_ring->rx_buffer_info[nta];
+
+	/* update, and store next to alloc */
+	nta++;
+	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+	/*
+	 * Transfer page from old buffer to new buffer.
+	 * Move each member individually to avoid possible store
+	 * forwarding stalls and unnecessary copy of skb.
+	 */
+	new_buff->dma = old_buff->dma;
+	new_buff->page = old_buff->page;
+	new_buff->page_offset = old_buff->page_offset;
+	new_buff->pagecnt_bias = old_buff->pagecnt_bias;
+}
+
+static inline bool rnpgbe_page_is_reserved(struct page *page)
+{
+	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+static bool rnpgbe_can_reuse_rx_page(struct rnpgbe_rx_buffer *rx_buffer)
+{
+	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+	struct page *page = rx_buffer->page;
+
+#ifdef OPTM_WITH_LPAGE
+	return false;
+#endif
+	/* avoid re-using remote pages */
+	if (unlikely(rnpgbe_page_is_reserved(page)))
+		return false;
+
+#if (PAGE_SIZE < 8192)
+	/* if we are only owner of page we can reuse it */
+#ifdef HAVE_PAGE_COUNT_BULK_UPDATE
+	if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+#else
+	if (unlikely((page_count(page) - pagecnt_bias) > 1))
+#endif
+		return false;
+#else
+
+		/*
+	 * The last offset is a bit aggressive in that we assume the
+	 * worst case of FCoE being enabled and using a 3K buffer.
+	 * However this should have minimal impact as the 1K extra is
+	 * still less than one buffer in size.
+	 */
+#define RNP_LAST_OFFSET (SKB_WITH_OVERHEAD(PAGE_SIZE) - RNP_RXBUFFER_2K)
+	if (rx_buffer->page_offset > RNP_LAST_OFFSET)
+		return false;
+#endif
+
+#ifdef HAVE_PAGE_COUNT_BULK_UPDATE
+	/* If we have drained the page fragment pool we need to update
+	 * the pagecnt_bias and page count so that we fully restock the
+	 * number of references the driver holds.
+	 */
+	if (unlikely(pagecnt_bias == 1)) {
+		page_ref_add(page, USHRT_MAX - 1);
+		rx_buffer->pagecnt_bias = USHRT_MAX;
+	}
+#else
+	/*
+	 * Even if we own the page, we are not allowed to use atomic_set()
+	 * This would break get_page_unless_zero() users.
+	 */
+	if (likely(!pagecnt_bias)) {
+		page_ref_inc(page);
+		rx_buffer->pagecnt_bias = 1;
+	}
+#endif
+
+	return true;
+}
+
+/**
+ * rnpgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @skb: sk_buff to place the data into
+ * @size: size of data
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static void rnpgbe_add_rx_frag(struct rnpgbe_ring *rx_ring,
+			       struct rnpgbe_rx_buffer *rx_buffer,
+			       struct sk_buff *skb, unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = rnpgbe_rx_pg_size(rx_ring) / 2;
+#else
+	unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+					SKB_DATA_ALIGN(RNP_SKB_PAD + size) :
+					SKB_DATA_ALIGN(size);
+#endif
+
+	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+			rx_buffer->page_offset, size, truesize);
+
+#if (PAGE_SIZE < 8192)
+	rx_buffer->page_offset ^= truesize;
+#else
+	rx_buffer->page_offset += truesize;
+#endif
+}
+
+#ifdef OPTM_WITH_LPAGE
+static struct rnpgbe_rx_buffer *
+rnpgbe_get_rx_buffer(struct rnpgbe_ring *rx_ring, union rnpgbe_rx_desc *rx_desc,
+		     const unsigned int size)
+{
+	struct rnpgbe_rx_buffer *rx_buffer;
+
+	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+	prefetchw(rx_buffer->page);
+
+	rx_buf_dump("rx buf",
+		    page_address(rx_buffer->page) + rx_buffer->page_offset,
+		    rx_desc->wb.len);
+
+	/* we are reusing so sync this buffer for CPU use */
+	dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, 0, size,
+				      DMA_FROM_DEVICE);
+	/* skip_sync: */
+	rx_buffer->pagecnt_bias--;
+
+	return rx_buffer;
+}
+#else
+static struct rnpgbe_rx_buffer *
+rnpgbe_get_rx_buffer(struct rnpgbe_ring *rx_ring, union rnpgbe_rx_desc *rx_desc,
+		     struct sk_buff **skb, const unsigned int size)
+{
+	struct rnpgbe_rx_buffer *rx_buffer;
+
+	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+	prefetchw(rx_buffer->page);
+	*skb = rx_buffer->skb;
+
+	rx_buf_dump("rx buf",
+		    page_address(rx_buffer->page) + rx_buffer->page_offset,
+		    rx_desc->wb.len);
+
+	/* we are reusing so sync this buffer for CPU use */
+	dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma,
+				      rx_buffer->page_offset, size,
+				      DMA_FROM_DEVICE);
+	/* skip_sync: */
+	rx_buffer->pagecnt_bias--;
+
+	return rx_buffer;
+}
+#endif
+#ifdef OPTM_WITH_LPAGE
+static void rnpgbe_put_rx_buffer(struct rnpgbe_ring *rx_ring,
+				 struct rnpgbe_rx_buffer *rx_buffer)
+{
+#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC)
+	DEFINE_DMA_ATTRS(attrs);
+
+	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
+	dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs);
+
+#endif
+	if (rnpgbe_can_reuse_rx_page(rx_buffer)) {
+		/* hand second half of page back to the ring */
+		rnpgbe_reuse_rx_page(rx_ring, rx_buffer);
+	} else {
+		/* we are not reusing the buffer so unmap it */
+		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+				     rnpgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE,
+#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC)
+				     &attrs);
+#else
+				     RNP_RX_DMA_ATTR);
+#endif
+		__page_frag_cache_drain(rx_buffer->page,
+					rx_buffer->pagecnt_bias);
+	}
+
+	/* clear contents of rx_buffer */
+	rx_buffer->page = NULL;
+}
+
+#else
+static void rnpgbe_put_rx_buffer(struct rnpgbe_ring *rx_ring,
+				 struct rnpgbe_rx_buffer *rx_buffer,
+				 struct sk_buff *skb)
+{
+#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC)
+	DEFINE_DMA_ATTRS(attrs);
+
+	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
+	dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs);
+
+#endif
+	if (rnpgbe_can_reuse_rx_page(rx_buffer)) {
+		/* hand second half of page back to the ring */
+		rnpgbe_reuse_rx_page(rx_ring, rx_buffer);
+	} else {
+		/* we are not reusing the buffer so unmap it */
+		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+				     rnpgbe_rx_pg_size(rx_ring),
+				     DMA_FROM_DEVICE,
+#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC)
+				     &attrs);
+#else
+				     RNP_RX_DMA_ATTR);
+#endif
+		__page_frag_cache_drain(rx_buffer->page,
+					rx_buffer->pagecnt_bias);
+	}
+
+	/* clear contents of rx_buffer */
+	rx_buffer->page = NULL;
+	rx_buffer->skb = NULL;
+}
+#endif
+
+#ifdef OPTM_WITH_LPAGE
+static struct sk_buff *rnpgbe_construct_skb(struct rnpgbe_ring *rx_ring,
+					    struct rnpgbe_rx_buffer *rx_buffer,
+					    union rnpgbe_rx_desc *rx_desc,
+					    unsigned int size)
+{
+	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+	unsigned int truesize = SKB_DATA_ALIGN(size);
+	unsigned int headlen;
+	struct sk_buff *skb;
+
+	/* prefetch first cache line of first page */
+	prefetch(va);
+#if L1_CACHE_BYTES < 128
+	prefetch(va + L1_CACHE_BYTES);
+#endif
+	/* allocate a skb to store the frags */
+	skb = napi_alloc_skb(&rx_ring->q_vector->napi, RNP_RX_HDR_SIZE);
+	if (unlikely(!skb))
+		return NULL;
+
+	prefetchw(skb->data);
+
+	/* Determine available headroom for copy */
+	headlen = size;
+	if (headlen > RNP_RX_HDR_SIZE)
+		headlen = rnpgbe_get_headlen(va, RNP_RX_HDR_SIZE);
+	/* align pull length to size of long to optimize memcpy performance */
+	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+
+	/* update all of the pointers */
+	size -= headlen;
+
+	if (size) {
+		skb_add_rx_frag(skb, 0, rx_buffer->page,
+				(va + headlen) - page_address(rx_buffer->page),
+				size, truesize);
+		rx_buffer->page_offset += truesize;
+	} else {
+		rx_buffer->pagecnt_bias++;
+	}
+
+	return skb;
+}
+
+#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC
+static struct sk_buff *rnpgbe_build_skb(struct rnpgbe_ring *rx_ring,
+					struct rnpgbe_rx_buffer *rx_buffer,
+					union rnpgbe_rx_desc *rx_desc,
+					unsigned int size)
+{
+	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+				SKB_DATA_ALIGN(size + RNP_SKB_PAD);
+	struct sk_buff *skb;
+
+	/* prefetch first cache line of first page */
+	prefetch(va);
+#if L1_CACHE_BYTES < 128
+	prefetch(va + L1_CACHE_BYTES);
+#endif
+
+	/* build an skb around the page buffer */
+	skb = build_skb(va - RNP_SKB_PAD, truesize);
+	if (unlikely(!skb))
+		return NULL;
+
+	/* update pointers within the skb to store the data */
+	skb_reserve(skb, RNP_SKB_PAD);
+	__skb_put(skb, size);
+	/* record DMA address if this is the start of a
+	 * chain of buffers
+	 */
+
+	return skb;
+}
+
+#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */
+
+#else
+
+static struct sk_buff *rnpgbe_construct_skb(struct rnpgbe_ring *rx_ring,
+					    struct rnpgbe_rx_buffer *rx_buffer,
+					    struct xdp_buff *xdp,
+					    union rnpgbe_rx_desc *rx_desc)
+{
+	unsigned int size = xdp->data_end - xdp->data;
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = rnpgbe_rx_pg_size(rx_ring) / 2;
+#else
+	unsigned int truesize =
+		SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start);
+#endif
+	struct sk_buff *skb;
+
+	/* prefetch first cache line of first page */
+	prefetch(xdp->data);
+#if L1_CACHE_BYTES < 128
+	prefetch(xdp->data + L1_CACHE_BYTES);
+#endif
+	/* allocate a skb to store the frags */
+	skb = napi_alloc_skb(&rx_ring->q_vector->napi, RNP_RX_HDR_SIZE);
+	if (unlikely(!skb))
+		return NULL;
+
+	prefetchw(skb->data);
+
+	if (size > RNP_RX_HDR_SIZE) {
+		/*
+		 * if (!rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_EOP))
+		 * RNP_CB(skb)->dma = rx_buffer->dma;
+		 */
+
+		skb_add_rx_frag(skb, 0, rx_buffer->page,
+				xdp->data - page_address(rx_buffer->page), size,
+				truesize);
+#if (PAGE_SIZE < 8192)
+		rx_buffer->page_offset ^= truesize;
+#else
+		rx_buffer->page_offset += truesize;
+#endif
+	} else {
+		memcpy(__skb_put(skb, size), xdp->data,
+		       ALIGN(size, sizeof(long)));
+		rx_buffer->pagecnt_bias++;
+	}
+
+	return skb;
+}
+
+#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC
+static struct sk_buff *rnpgbe_build_skb(struct rnpgbe_ring *rx_ring,
+					struct rnpgbe_rx_buffer *rx_buffer,
+					struct xdp_buff *xdp,
+					union rnpgbe_rx_desc *rx_desc)
+{
+#ifdef HAVE_XDP_BUFF_DATA_META
+	unsigned int metasize = xdp->data - xdp->data_meta;
+	void *va = xdp->data_meta;
+#else
+	void *va = xdp->data;
+#endif
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = rnpgbe_rx_pg_size(rx_ring) / 2;
+#else
+	unsigned int truesize =
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+		SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start);
+#endif
+	struct sk_buff *skb;
+
+	/* prefetch first cache line of first page */
+	prefetch(va);
+#if L1_CACHE_BYTES < 128
+	prefetch(va + L1_CACHE_BYTES);
+#endif
+
+	/* build an skb around the page buffer */
+	skb = build_skb(xdp->data_hard_start, truesize);
+	if (unlikely(!skb))
+		return NULL;
+
+	/* update pointers within the skb to store the data */
+	skb_reserve(skb, xdp->data - xdp->data_hard_start);
+	__skb_put(skb, xdp->data_end - xdp->data);
+#ifdef HAVE_XDP_BUFF_DATA_META
+	if (metasize)
+		skb_metadata_set(skb, metasize);
+#endif
+	/* update buffer offset */
+#if (PAGE_SIZE < 8192)
+	rx_buffer->page_offset ^= truesize;
+#else
+	rx_buffer->page_offset += truesize;
+#endif
+
+	return skb;
+}
+
+#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */
+#endif
+
+#define RNP_XDP_PASS 0
+#define RNP_XDP_CONSUMED 1
+#define RNP_XDP_TX 2
+
+#ifndef OPTM_WITH_LPAGE
+static void rnpgbe_rx_buffer_flip(struct rnpgbe_ring *rx_ring,
+				  struct rnpgbe_rx_buffer *rx_buffer,
+				  unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = rnpgbe_rx_pg_size(rx_ring) / 2;
+
+	rx_buffer->page_offset ^= truesize;
+#else
+	unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+					SKB_DATA_ALIGN(RNP_SKB_PAD + size) :
+					SKB_DATA_ALIGN(size);
+
+	rx_buffer->page_offset += truesize;
+#endif
+}
+#endif
+
+#ifdef OPTM_WITH_LPAGE
+
+/**
+ * rnpgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @q_vector: structure containing interrupt and ring information
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing.  The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed.
+ **/
+
+static int rnpgbe_clean_rx_irq(struct rnpgbe_q_vector *q_vector,
+			       struct rnpgbe_ring *rx_ring, int budget)
+{
+	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+	unsigned int err_packets = 0;
+	unsigned int driver_drop_packets = 0;
+	struct sk_buff *skb = rx_ring->skb;
+	struct rnpgbe_adapter *adapter = q_vector->adapter;
+	u16 cleaned_count = rnpgbe_desc_unused_rx(rx_ring);
+	bool fail_alloc = false;
+
+	while (likely(total_rx_packets < budget)) {
+		union rnpgbe_rx_desc *rx_desc;
+		struct rnpgbe_rx_buffer *rx_buffer;
+		unsigned int size;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= RNP_RX_BUFFER_WRITE) {
+			fail_alloc = rnpgbe_alloc_rx_buffers(rx_ring, cleaned_count) || fail_alloc;
+			cleaned_count = 0;
+		}
+		rx_desc = RNP_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+		rx_buf_dump("rx-desc:", rx_desc, sizeof(*rx_desc));
+		rx_debug_printk("  dd set: %s\n",
+				(rx_desc->wb.cmd & RNP_RXD_STAT_DD) ? "Yes" :
+								      "No");
+
+		if (!rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_DD))
+			break;
+
+		/* This memory barrier is needed to keep us from reading
+		 * any other fields out of the rx_desc until we know the
+		 * descriptor has been written back
+		 */
+		dma_rmb();
+		rx_debug_printk(
+			"queue:%d  rx-desc:%d has-data len:%d next_to_clean %d\n",
+			rx_ring->rnpgbe_queue_idx, rx_ring->next_to_clean,
+			rx_desc->wb.len, rx_ring->next_to_clean);
+
+		/* handle padding */
+		if ((adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) &&
+		    (!(adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG))) {
+			if (likely(rnpgbe_test_staterr(rx_desc,
+						       RNP_RXD_STAT_EOP))) {
+				size = le16_to_cpu(rx_desc->wb.len) -
+				       le16_to_cpu(rx_desc->wb.padding_len);
+			} else {
+				size = le16_to_cpu(rx_desc->wb.len);
+			}
+		} else {
+			/* size should not zero */
+			size = le16_to_cpu(rx_desc->wb.len);
+		}
+
+		if (!size)
+			break;
+
+		/*
+		 * should check csum err
+		 * maybe one packet use multiple descs
+		 * no problems hw set all csum_err in multiple descs
+		 * maybe BUG if the last sctp desc less than 60
+		 */
+		if (rnpgbe_check_csum_error(rx_ring, rx_desc, size,
+					    &driver_drop_packets)) {
+			cleaned_count++;
+			err_packets++;
+			if (err_packets + total_rx_packets > budget)
+				break;
+			continue;
+		}
+
+		rx_buffer = rnpgbe_get_rx_buffer(rx_ring, rx_desc, size);
+
+		if (skb) {
+			rnpgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
+#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC
+		} else if (ring_uses_build_skb(rx_ring)) {
+			skb = rnpgbe_build_skb(rx_ring, rx_buffer, rx_desc,
+					       size);
+#endif
+		} else {
+			skb = rnpgbe_construct_skb(rx_ring, rx_buffer, rx_desc,
+						   size);
+		}
+
+		/* exit if we failed to retrieve a buffer */
+		if (!skb) {
+			rx_ring->rx_stats.alloc_rx_buff_failed++;
+			rx_buffer->pagecnt_bias++;
+			break;
+		}
+#ifdef HAVE_PTP_1588_CLOCK
+		if (module_enable_ptp && adapter->ptp_rx_en &&
+		    adapter->flags2 & RNP_FLAG2_PTP_ENABLED)
+			rnpgbe_ptp_get_rx_hwstamp(adapter, rx_desc, skb);
+#endif
+		rnpgbe_put_rx_buffer(rx_ring, rx_buffer);
+		cleaned_count++;
+
+		/* place incomplete frames back on ring for completion */
+		if (rnpgbe_is_non_eop(rx_ring, rx_desc))
+			continue;
+
+		/* verify the packet layout is correct */
+		if (rnpgbe_cleanup_headers(rx_ring, rx_desc, skb)) {
+			/* we should clean it since we used all info in it */
+			rx_desc->wb.cmd = 0;
+			skb = NULL;
+			continue;
+		}
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += skb->len;
+
+		/* populate checksum, timestamp, VLAN, and protocol */
+		rnpgbe_process_skb_fields(rx_ring, rx_desc, skb);
+
+		/* we should clean it since we used all info in it */
+		rx_desc->wb.cmd = 0;
+		rnpgbe_rx_skb(q_vector, skb);
+		skb = NULL;
+
+		/* update budget accounting */
+		total_rx_packets++;
+	}
+
+	rx_ring->skb = skb;
+
+	u64_stats_update_begin(&rx_ring->syncp);
+	rx_ring->stats.packets += total_rx_packets;
+	rx_ring->stats.bytes += total_rx_bytes;
+	rx_ring->rx_stats.driver_drop_packets += driver_drop_packets;
+	rx_ring->rx_stats.rx_clean_count += total_rx_packets;
+	rx_ring->rx_stats.rx_clean_times++;
+	if (rx_ring->rx_stats.rx_clean_times > 10) {
+		rx_ring->rx_stats.rx_clean_times = 0;
+		rx_ring->rx_stats.rx_clean_count = 0;
+	}
+	u64_stats_update_end(&rx_ring->syncp);
+	q_vector->rx.total_packets += total_rx_packets;
+	q_vector->rx.total_bytes += total_rx_bytes;
+
+	if (total_rx_packets >= budget)
+		rx_ring->rx_stats.poll_again_count++;
+
+	return (fail_alloc ? budget : total_rx_packets);
+}
+
+#else
+
+/**
+ * rnpgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @q_vector: structure containing interrupt and ring information
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing.  The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed.
+ **/
+
+static int rnpgbe_clean_rx_irq(struct rnpgbe_q_vector *q_vector,
+			       struct rnpgbe_ring *rx_ring, int budget)
+{
+	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+	unsigned int err_packets = 0;
+	unsigned int driver_drop_packets = 0;
+#ifdef HAVE_PTP_1588_CLOCK
+	struct rnpgbe_adapter *adapter = q_vector->adapter;
+#endif
+	u16 cleaned_count = rnpgbe_desc_unused_rx(rx_ring);
+	bool xdp_xmit = false;
+	struct xdp_buff xdp;
+	bool fail_alloc = false;
+
+	xdp.data = NULL;
+	xdp.data_end = NULL;
+
+	while (likely(total_rx_packets < budget)) {
+		union rnpgbe_rx_desc *rx_desc;
+		struct rnpgbe_rx_buffer *rx_buffer;
+		struct sk_buff *skb;
+		unsigned int size;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= RNP_RX_BUFFER_WRITE) {
+			fail_alloc = rnpgbe_alloc_rx_buffers(rx_ring, cleaned_count) || fail_alloc;
+			cleaned_count = 0;
+		}
+		rx_desc = RNP_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+		rx_buf_dump("rx-desc:", rx_desc, sizeof(*rx_desc));
+		rx_debug_printk("  dd set: %s\n",
+				(rx_desc->wb.cmd & RNP_RXD_STAT_DD) ? "Yes" :
+								      "No");
+
+		if (!rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_DD))
+			break;
+
+		rx_debug_printk(
+			"queue:%d  rx-desc:%d has-data len:%d next_to_clean %d\n",
+			rx_ring->rnpgbe_queue_idx, rx_ring->next_to_clean,
+			rx_desc->wb.len, rx_ring->next_to_clean);
+
+		/* This memory barrier is needed to keep us from reading
+		 * any other fields out of the rx_desc until we know the
+		 * descriptor has been written back
+		 */
+		dma_rmb();
+		size = le16_to_cpu(rx_desc->wb.len);
+		if (!size)
+			break;
+
+		/*
+		 * should check csum err
+		 * maybe one packet use multiple descs
+		 * no problems hw set all csum_err in multiple descs
+		 * maybe BUG if the last sctp desc less than 60
+		 */
+		if (rnpgbe_check_csum_error(rx_ring, rx_desc, size,
+					    &driver_drop_packets)) {
+			cleaned_count++;
+			err_packets++;
+			if (err_packets + total_rx_packets > budget)
+				break;
+			continue;
+		}
+
+		rx_buffer = rnpgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
+
+		if (!skb) {
+			xdp.data = page_address(rx_buffer->page) +
+				   rx_buffer->page_offset;
+#ifdef HAVE_XDP_BUFF_DATA_META
+			xdp.data_meta = xdp.data;
+#endif
+			xdp.data_hard_start =
+				xdp.data - rnpgbe_rx_offset(rx_ring);
+			xdp.data_end = xdp.data + size;
+			/* call xdp hook use this to support xdp hook */
+		}
+
+		if (IS_ERR(skb)) {
+			if (PTR_ERR(skb) == -RNP_XDP_TX) {
+				xdp_xmit = true;
+				rnpgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
+			} else {
+				rx_buffer->pagecnt_bias++;
+			}
+			total_rx_packets++;
+			total_rx_bytes += size;
+		} else if (skb) {
+			rnpgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
+#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC
+		} else if (ring_uses_build_skb(rx_ring)) {
+			skb = rnpgbe_build_skb(rx_ring, rx_buffer, &xdp,
+					       rx_desc);
+#endif
+		} else {
+			skb = rnpgbe_construct_skb(rx_ring, rx_buffer, &xdp,
+						   rx_desc);
+		}
+
+		/* exit if we failed to retrieve a buffer */
+		if (!skb) {
+			rx_ring->rx_stats.alloc_rx_buff_failed++;
+			rx_buffer->pagecnt_bias++;
+			break;
+		}
+#ifdef HAVE_PTP_1588_CLOCK
+		if (module_enable_ptp && adapter->ptp_rx_en &&
+		    adapter->flags2 & RNP_FLAG2_PTP_ENABLED)
+			rnpgbe_ptp_get_rx_hwstamp(adapter, rx_desc, skb);
+#endif
+		rnpgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
+		cleaned_count++;
+
+		/* place incomplete frames back on ring for completion */
+		if (rnpgbe_is_non_eop(rx_ring, rx_desc, skb))
+			continue;
+
+		/* verify the packet layout is correct */
+		if (rnpgbe_cleanup_headers(rx_ring, rx_desc, skb)) {
+			/* we should clean it since we used all info in it */
+			rx_desc->wb.cmd = 0;
+			continue;
+		}
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += skb->len;
+
+		/* populate checksum, timestamp, VLAN, and protocol */
+		rnpgbe_process_skb_fields(rx_ring, rx_desc, skb);
+
+		/* we should clean it since we used all info in it */
+		rx_desc->wb.cmd = 0;
+
+		rnpgbe_rx_skb(q_vector, skb);
+
+		/* update budget accounting */
+		total_rx_packets++;
+	}
+
+	u64_stats_update_begin(&rx_ring->syncp);
+	rx_ring->stats.packets += total_rx_packets;
+	rx_ring->stats.bytes += total_rx_bytes;
+	rx_ring->rx_stats.driver_drop_packets += driver_drop_packets;
+	rx_ring->rx_stats.rx_clean_count += total_rx_packets;
+	rx_ring->rx_stats.rx_clean_times++;
+	if (rx_ring->rx_stats.rx_clean_times > 10) {
+		rx_ring->rx_stats.rx_clean_times = 0;
+		rx_ring->rx_stats.rx_clean_count = 0;
+	}
+	u64_stats_update_end(&rx_ring->syncp);
+	q_vector->rx.total_packets += total_rx_packets;
+	q_vector->rx.total_bytes += total_rx_bytes;
+
+	if (total_rx_packets >= budget)
+		rx_ring->rx_stats.poll_again_count++;
+	return (fail_alloc ? budget : total_rx_packets);
+}
+#endif
+
+#else /* CONFIG_RNP_DISABLE_PACKET_SPLIT */
+
+/**
+ * rnpgbe_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean.  If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool rnpgbe_is_non_eop(struct rnpgbe_ring *rx_ring,
+			      union rnpgbe_rx_desc *rx_desc,
+			      struct sk_buff *skb)
+{
+	u32 ntc = rx_ring->next_to_clean + 1;
+	/* fetch, update, and store next to clean */
+	ntc = (ntc < rx_ring->count) ? ntc : 0;
+	rx_ring->next_to_clean = ntc;
+
+	prefetch(RNP_RX_DESC(rx_ring, ntc));
+
+	/* if we are the last buffer then there is nothing else to do */
+	if (likely(rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_EOP)))
+		return false;
+#ifdef CONFIG_RNP_RNP_DISABLE_PACKET_SPLIT
+	netdev_dbg(rx_ring->netdev, "error spilt detect\n");
+#else
+	/* place skb in next buffer to be received */
+	rx_ring->rx_buffer_info[ntc].skb = skb;
+#endif
+	rx_ring->rx_stats.non_eop_descs++;
+	/* we should clean it since we used all info in it */
+	rx_desc->wb.cmd = 0;
+
+	return true;
+}
+
+/**
+ * rnpgbe_merge_active_tail - merge active tail into lro skb
+ * @tail: pointer to active tail in frag_list
+ *
+ * This function merges the length and data of an active tail into the
+ * skb containing the frag_list.  It resets the tail's pointer to the head,
+ * but it leaves the heads pointer to tail intact.
+ **/
+static inline struct sk_buff *rnpgbe_merge_active_tail(struct sk_buff *tail)
+{
+	struct sk_buff *head = RNP_CB(tail)->head;
+
+	if (!head)
+		return tail;
+
+	head->len += tail->len;
+	head->data_len += tail->len;
+	head->truesize += tail->truesize;
+
+	RNP_CB(tail)->head = NULL;
+
+	return head;
+}
+
+/**
+ * rnpgbe_add_active_tail - adds an active tail into the skb frag_list
+ * @head: pointer to the start of the skb
+ * @tail: pointer to active tail to add to frag_list
+ *
+ * This function adds an active tail to the end of the frag list.  This tail
+ * will still be receiving data so we cannot yet ad it's stats to the main
+ * skb.  That is done via rnpgbe_merge_active_tail.
+ **/
+static inline void rnpgbe_add_active_tail(struct sk_buff *head,
+					  struct sk_buff *tail)
+{
+	struct sk_buff *old_tail = RNP_CB(head)->tail;
+
+	if (old_tail) {
+		rnpgbe_merge_active_tail(old_tail);
+		old_tail->next = tail;
+	} else {
+		skb_shinfo(head)->frag_list = tail;
+	}
+
+	RNP_CB(tail)->head = head;
+	RNP_CB(head)->tail = tail;
+}
+
+/**
+ * rnpgbe_close_active_frag_list - cleanup pointers on a frag_list skb
+ * @head: pointer to head of an active frag list
+ *
+ * This function will clear the frag_tail_tracker pointer on an active
+ * frag_list and returns true if the pointer was actually set
+ **/
+static inline bool rnpgbe_close_active_frag_list(struct sk_buff *head)
+{
+	struct sk_buff *tail = RNP_CB(head)->tail;
+
+	if (!tail)
+		return false;
+
+	rnpgbe_merge_active_tail(tail);
+
+	RNP_CB(head)->tail = NULL;
+
+	return true;
+}
+
+static bool rnpgbe_alloc_mapped_skb(struct rnpgbe_ring *rx_ring,
+				    struct rnpgbe_rx_buffer *bi)
+{
+	struct sk_buff *skb = bi->skb;
+	dma_addr_t dma = bi->dma;
+
+	if (unlikely(dma))
+		return true;
+
+	if (likely(!skb)) {
+		skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+						rx_ring->rx_buf_len);
+		if (unlikely(!skb)) {
+			rx_ring->rx_stats.alloc_rx_buff_failed++;
+			return false;
+		}
+
+		bi->skb = skb;
+	}
+	dma = dma_map_single(rx_ring->dev, skb->data, rx_ring->rx_buf_len,
+			     DMA_FROM_DEVICE);
+
+	/*
+	 * if mapping failed free memory back to system since
+	 * there isn't much point in holding memory we can't use
+	 */
+	if (dma_mapping_error(rx_ring->dev, dma)) {
+		dev_kfree_skb_any(skb);
+		bi->skb = NULL;
+
+		rx_ring->rx_stats.alloc_rx_buff_failed++;
+		return false;
+	}
+
+	bi->dma = dma;
+	return true;
+}
+
+/**
+ * rnpgbe_clean_rx_irq - Clean completed descriptors from Rx ring - legacy
+ * @q_vector: structure containing interrupt and ring information
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a legacy approach to Rx interrupt
+ * handling.  This version will perform better on systems with a low cost
+ * dma mapping API.
+ *
+ * Returns amount of work completed.
+ **/
+static int rnpgbe_clean_rx_irq(struct rnpgbe_q_vector *q_vector,
+			       struct rnpgbe_ring *rx_ring, int budget)
+{
+	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+	struct rnpgbe_adapter *adapter = q_vector->adapter;
+	unsigned int driver_drop_packets = 0;
+	unsigned int err_packets = 0;
+	u16 len = 0;
+	u16 cleaned_count = rnpgbe_desc_unused_rx(rx_ring);
+
+	while (likely(total_rx_packets < budget)) {
+		struct rnpgbe_rx_buffer *rx_buffer;
+		union rnpgbe_rx_desc *rx_desc;
+		struct sk_buff *skb;
+		u16 ntc;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= RNP_RX_BUFFER_WRITE) {
+			rnpgbe_alloc_rx_buffers(rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+
+		ntc = rx_ring->next_to_clean;
+		rx_desc = RNP_RX_DESC(rx_ring, ntc);
+		rx_buffer = &rx_ring->rx_buffer_info[ntc];
+
+		if (!rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_DD))
+			break;
+
+		/* This memory barrier is needed to keep us from reading
+		 * any other fields out of the rx_desc until we know the
+		 * descriptor has been written back
+		 */
+		dma_rmb();
+
+		skb = rx_buffer->skb;
+
+		prefetch(skb->data);
+
+		/* handle padding */
+		if ((adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) &&
+		    (!(adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG))) {
+			if (likely(rnpgbe_test_staterr(rx_desc,
+						       RNP_RXD_STAT_EOP))) {
+				len = le16_to_cpu(rx_desc->wb.len) -
+				      le16_to_cpu(rx_desc->wb.padding_len);
+			} else {
+				len = le16_to_cpu(rx_desc->wb.len);
+			}
+		} else {
+			/* size should not zero */
+			len = le16_to_cpu(rx_desc->wb.len);
+		}
+
+		if (rnpgbe_check_csum_error(rx_ring, rx_desc, len,
+					    &driver_drop_packets)) {
+			dev_kfree_skb_any(skb);
+			cleaned_count++;
+			err_packets++;
+			if (err_packets + total_rx_packets > budget)
+				break;
+			continue;
+		}
+
+		/* pull the header of the skb in */
+		__skb_put(skb, len);
+
+		/*
+		 * Delay unmapping of the first packet. It carries the
+		 * header information, HW may still access the header after
+		 * the writeback.  Only unmap it when EOP is reached
+		 */
+		dma_unmap_single(rx_ring->dev, rx_buffer->dma,
+				 rx_ring->rx_buf_len, DMA_FROM_DEVICE);
+
+		/* clear skb reference in buffer info structure */
+		rx_buffer->skb = NULL;
+		rx_buffer->dma = 0;
+
+		cleaned_count++;
+
+		if (rnpgbe_is_non_eop(rx_ring, rx_desc, skb))
+			continue;
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += skb->len;
+
+		/* populate checksum, timestamp, VLAN, and protocol */
+		rnpgbe_process_skb_fields(rx_ring, rx_desc, skb);
+		/* we should clean it since we used all info in it */
+		rx_desc->wb.cmd = 0;
+
+		rnpgbe_rx_skb(q_vector, skb);
+
+		/* update budget accounting */
+		total_rx_packets++;
+	}
+
+	u64_stats_update_begin(&rx_ring->syncp);
+	rx_ring->stats.packets += total_rx_packets;
+	rx_ring->stats.bytes += total_rx_bytes;
+	rx_ring->rx_stats.driver_drop_packets += driver_drop_packets;
+	rx_ring->rx_stats.rx_clean_count += total_rx_packets;
+	rx_ring->rx_stats.rx_clean_times++;
+	if (rx_ring->rx_stats.rx_clean_times > 10) {
+		rx_ring->rx_stats.rx_clean_times = 0;
+		rx_ring->rx_stats.rx_clean_count = 0;
+	}
+	u64_stats_update_end(&rx_ring->syncp);
+	q_vector->rx.total_packets += total_rx_packets;
+	q_vector->rx.total_bytes += total_rx_bytes;
+
+	if (total_rx_packets >= budget)
+		rx_ring->rx_stats.poll_again_count++;
+
+	return total_rx_packets;
+}
+
+#endif /* CONFIG_RNP_DISABLE_PACKET_SPLIT */
+
+/**
+ * rnpgbe_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
+ *
+ * rnpgbe_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ **/
+static void rnpgbe_configure_msix(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_q_vector *q_vector;
+	int i;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	/*
+	 * configure ring-msix Registers table
+	 */
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct rnpgbe_ring *ring;
+
+		q_vector = adapter->q_vector[i];
+		rnpgbe_for_each_ring(ring, q_vector->rx) {
+			rnpgbe_set_ring_vector(adapter, ring->rnpgbe_queue_idx,
+					       q_vector->v_idx);
+		}
+	}
+	/* n500 should mask other */
+	if ((hw->hw_type == rnpgbe_hw_n500) ||
+	    (hw->hw_type == rnpgbe_hw_n210) ||
+	    (hw->hw_type == rnpgbe_hw_n210L)) {
+		/*
+	 *  8  lpi | PMT
+	 *  9  BMC_RX_IRQ |
+	 *  10 PHY_IRQ | LPI_IRQ
+	 *  11 BMC_TX_IRQ |
+	 *  may DMAR error if set pf to vm
+	 */
+#define OTHER_VECTOR_START (8)
+#define OTHER_VECTOR_STOP (11)
+#define MSIX_UNUSED (0x0f0f)
+		for (i = OTHER_VECTOR_START; i <= OTHER_VECTOR_STOP; i++) {
+			if (hw->feature_flags & RNP_HW_SOFT_MASK_OTHER_IRQ)
+				rnpgbe_wr_reg(hw->ring_msix_base +
+						      RING_VECTOR(i),
+					      MSIX_UNUSED);
+			else
+				rnpgbe_wr_reg(
+					hw->ring_msix_base + RING_VECTOR(i), 0);
+		}
+		if (hw->feature_flags & RNP_HW_FEATURE_EEE) {
+#define LPI_IRQ (8)
+			/* only open lpi irq */
+			if (hw->feature_flags & RNP_HW_SOFT_MASK_OTHER_IRQ)
+				rnpgbe_wr_reg(hw->ring_msix_base +
+						      RING_VECTOR(LPI_IRQ),
+					      0x000f);
+			else
+				rnpgbe_wr_reg(hw->ring_msix_base +
+						      RING_VECTOR(LPI_IRQ),
+					      0x0000);
+		}
+	}
+}
+
+
+static void rnpgbe_update_ring_itr_rx(struct rnpgbe_q_vector *q_vector)
+{
+	int new_val = q_vector->itr_rx;
+	int avg_wire_size = 0;
+	struct rnpgbe_adapter *adapter = q_vector->adapter;
+	unsigned int packets;
+	/* For non-gigabit speeds, just fix the interrupt rate at 4000
+	 * ints/sec - ITR timer value of 120 ticks.
+	 */
+	switch (adapter->link_speed) {
+	case RNP_LINK_SPEED_10_FULL:
+	case RNP_LINK_SPEED_100_FULL:
+		new_val = RNP_4K_ITR;
+		goto set_itr_val;
+	default:
+		break;
+	}
+
+	packets = q_vector->rx.total_packets;
+	if (packets)
+		avg_wire_size = max_t(u32, avg_wire_size,
+				      q_vector->rx.total_bytes / packets);
+
+	/* if avg_wire_size isn't set no work was done */
+	if (!avg_wire_size)
+		goto clear_counts;
+
+	/* Add 24 bytes to size to account for CRC, preamble, and gap */
+	avg_wire_size += 24;
+
+	/* Don't starve jumbo frames */
+	avg_wire_size = min(avg_wire_size, 3000);
+
+	/* Give a little boost to mid-size frames */
+	if ((avg_wire_size > 300) && (avg_wire_size < 1200))
+		new_val = avg_wire_size / 3;
+	else
+		new_val = avg_wire_size / 2;
+
+	if (new_val < RNP_LOWEREST_ITR)
+		new_val = RNP_LOWEREST_ITR;
+
+set_itr_val:
+	if (q_vector->rx.itr != new_val) {
+		q_vector->rx.update_count++;
+		if (q_vector->rx.update_count >= 2) {
+			q_vector->rx.itr = new_val;
+			q_vector->rx.update_count = 0;
+		}
+	} else
+		q_vector->rx.update_count = 0;
+
+clear_counts:
+	q_vector->rx.total_bytes = 0;
+	q_vector->rx.total_packets = 0;
+}
+
+static void rnpgbe_write_eitr_rx(struct rnpgbe_q_vector *q_vector)
+{
+	struct rnpgbe_adapter *adapter = q_vector->adapter;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 new_itr_rx = q_vector->rx.itr;
+	u32 old_itr_rx = q_vector->rx.itr;
+	struct rnpgbe_ring *ring;
+
+	new_itr_rx = new_itr_rx * hw->usecstocount;
+	/* if we are in auto mode write to hw */
+	if (!(adapter->priv_flags & RNP_PRIV_FLAG_RX_COALESCE)) {
+		rnpgbe_for_each_ring(ring, q_vector->rx) {
+			ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER,
+				  new_itr_rx);
+			if (ring->ring_flags & RNP_RING_LOWER_ITR) {
+				/* if we are alredy in this mode skip */
+				if (q_vector->itr_rx == RNP_LOWEREST_ITR)
+					continue;
+				ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_PKTCNT,
+					  1);
+				ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER,
+					  RNP_LOWEREST_ITR);
+				q_vector->itr_rx = RNP_LOWEREST_ITR;
+			} else {
+				if (new_itr_rx == q_vector->itr_rx)
+					continue;
+				ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER,
+					  new_itr_rx);
+				ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_PKTCNT,
+					  adapter->rx_frames);
+				q_vector->itr_rx = old_itr_rx;
+			}
+		}
+	}
+}
+
+enum latency_range {
+	lowest_latency = 0,
+	low_latency = 1,
+	bulk_latency = 2,
+	latency_invalid = 255
+};
+
+static inline void rnpgbe_irq_enable_queues(struct rnpgbe_adapter *adapter,
+					    struct rnpgbe_q_vector *q_vector)
+{
+	struct rnpgbe_ring *ring;
+
+	rnpgbe_for_each_ring(ring, q_vector->rx) {
+#ifdef CONFIG_RNP_DISABLE_TX_IRQ
+		rnpgbe_wr_reg(ring->dma_int_mask, ~(RX_INT_MASK));
+#else
+		rnpgbe_wr_reg(ring->dma_int_mask, ~(RX_INT_MASK | TX_INT_MASK));
+		ring_wr32(ring, RNP_DMA_INT_TRIG,
+			  (0x3 << 16) | TX_INT_MASK | RX_INT_MASK);
+#endif
+	}
+}
+
+static inline void rnpgbe_irq_disable_queues(struct rnpgbe_q_vector *q_vector)
+{
+	struct rnpgbe_ring *ring;
+
+	rnpgbe_for_each_ring(ring, q_vector->tx) {
+		ring_wr32(ring, RNP_DMA_INT_TRIG,
+			  (0x3 << 16) | (~(TX_INT_MASK | RX_INT_MASK)));
+		rnpgbe_wr_reg(ring->dma_int_mask, (RX_INT_MASK | TX_INT_MASK));
+	}
+}
+/**
+ * rnpgbe_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static inline void rnpgbe_irq_enable(struct rnpgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_q_vectors; i++)
+		rnpgbe_irq_enable_queues(adapter, adapter->q_vector[i]);
+}
+
+static void rnpgbe_lpi_task(struct rnpgbe_adapter *adapter)
+{
+	int status;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	if (hw->feature_flags & RNP_HW_FEATURE_EEE) {
+		status = hw->ops.get_lpi_status(hw);
+
+		if (status) {
+			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
+				adapter->tx_path_in_lpi_mode = true;
+			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
+				adapter->tx_path_in_lpi_mode = false;
+		}
+	}
+}
+
+static irqreturn_t rnpgbe_msix_other(int irq, void *data)
+{
+	struct rnpgbe_adapter *adapter = data;
+
+	set_bit(__RNP_IN_IRQ, &adapter->state);
+	rnpgbe_lpi_task(adapter);
+	rnpgbe_msg_task(adapter);
+	clear_bit(__RNP_IN_IRQ, &adapter->state);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t rnpgbe_msix_clean_rings(int irq, void *data)
+{
+	struct rnpgbe_q_vector *q_vector = data;
+
+	rnpgbe_irq_disable_queues(q_vector);
+
+	rnpgbe_write_eitr_rx(q_vector);
+	/*  disabled interrupts (on this vector) for us */
+
+	if (q_vector->rx.ring || q_vector->tx.ring)
+		napi_schedule_irqoff(&q_vector->napi);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * rnpgbe_poll - NAPI Rx polling callback
+ * @napi: structure for representing this polling device
+ * @budget: how many packets driver is allowed to clean
+ *
+ * This function is used for legacy and MSI, NAPI mode
+ **/
+int rnpgbe_poll(struct napi_struct *napi, int budget)
+{
+	struct rnpgbe_q_vector *q_vector =
+		container_of(napi, struct rnpgbe_q_vector, napi);
+	struct rnpgbe_adapter *adapter = q_vector->adapter;
+	struct rnpgbe_ring *ring;
+	int per_ring_budget, work_done = 0;
+	bool clean_complete = true;
+	int cleaned_total = 0;
+
+#ifdef CONFIG_RNP_DCA
+	if (adapter->flags & RNP_FLAG_DCA_ENABLED)
+		rnpgbe_update_dca(q_vector);
+#endif
+
+	rnpgbe_for_each_ring(ring, q_vector->tx) {
+		clean_complete = rnpgbe_clean_tx_irq(q_vector, ring, budget);
+	}
+
+	/* attempt to distribute budget to each queue fairly, but don't allow
+	 * the budget to go below 1 because we'll exit polling
+	 */
+	if (q_vector->rx.count > 1)
+		per_ring_budget = max(budget / q_vector->rx.count, 1);
+	else
+		per_ring_budget = budget;
+
+	rnpgbe_for_each_ring(ring, q_vector->rx) {
+		int cleaned = 0;
+		/* this ring is waitting to reset rx_len*/
+		/* avoid to deal this ring until reset done */
+		if (likely(!(ring->ring_flags & RNP_RING_FLAG_DO_RESET_RX_LEN)))
+			cleaned = rnpgbe_clean_rx_irq(q_vector, ring,
+						      per_ring_budget);
+		work_done += cleaned;
+		cleaned_total += cleaned;
+		if (cleaned >= per_ring_budget)
+			clean_complete = false;
+	}
+
+#ifndef HAVE_NETDEV_NAPI_LIST
+	if (!netif_running(adapter->netdev))
+		clean_complete = true;
+#endif
+
+	/* force close irq */
+	if (test_bit(__RNP_DOWN, &adapter->state))
+		clean_complete = true;
+
+	if (!clean_complete) {
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
+#ifdef SUPPORT_IRQ_AFFINITY_CHANGE
+		int cpu_id = smp_processor_id();
+
+		/* It is possible that the interrupt affinity has changed but,
+		 * if the cpu is pegged at 100%, polling will never exit while
+		 * traffic continues and the interrupt will be stuck on this
+		 * cpu.  We check to make sure affinity is correct before we
+		 * continue to poll, otherwise we must stop polling so the
+		 * interrupt can move to the correct cpu.
+		 */
+		if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
+			/* Tell napi that we are done polling */
+			if (likely(napi_complete_done(napi, work_done))) {
+				if (!test_bit(__RNP_DOWN, &adapter->state))
+					rnpgbe_irq_enable_queues(adapter, q_vector);
+			}
+			return min(work_done, budget - 1);
+		}
+#endif /* SUPPORT_IRQ_AFFINITY_CHANGE */
+#endif /* HAVE_IRQ_AFFINITY_NOTIFY */
+		return budget;
+	}
+
+	if (likely(napi_complete_done(napi, work_done))) {
+		/* try to do itr handle */
+		if (!(adapter->priv_flags & RNP_PRIV_FLAG_RX_COALESCE))
+			rnpgbe_update_ring_itr_rx(q_vector);
+
+		if (!test_bit(__RNP_DOWN, &adapter->state))
+			rnpgbe_irq_enable_queues(adapter, q_vector);
+	}
+
+	return min(work_done, budget - 1);
+}
+
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
+#ifdef SUPPORT_IRQ_AFFINITY_CHANGE
+/**
+ * rnpgbe_irq_affinity_notify - Callback for affinity changes
+ * @notify: context as to what irq was changed
+ * @mask: the new affinity mask
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * so that we may register to receive changes to the irq affinity masks.
+ **/
+static void rnpgbe_irq_affinity_notify(struct irq_affinity_notify *notify,
+				       const cpumask_t *mask)
+{
+	struct rnpgbe_q_vector *q_vector =
+		container_of(notify, struct rnpgbe_q_vector, affinity_notify);
+
+	cpumask_copy(&q_vector->affinity_mask, mask);
+}
+
+/**
+ * rnpgbe_irq_affinity_release - Callback for affinity notifier release
+ * @ref: internal core kernel usage
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * to inform the current notification subscriber that they will no longer
+ * receive notifications.
+ **/
+static void rnpgbe_irq_affinity_release(struct kref *ref)
+{
+}
+#endif
+#endif /* HAVE_IRQ_AFFINITY_NOTIFY */
+
+static irqreturn_t rnpgbe_intr(int irq, void *data)
+{
+	struct rnpgbe_adapter *adapter = data;
+	struct rnpgbe_q_vector *q_vector = adapter->q_vector[0];
+
+	/* disabled interrupts (on this vector) for us */
+	rnpgbe_irq_disable_queues(q_vector);
+
+	rnpgbe_write_eitr_rx(q_vector);
+
+	if (q_vector->rx.ring || q_vector->tx.ring)
+		napi_schedule_irqoff(&q_vector->napi);
+
+	rnpgbe_msg_task(adapter);
+	rnpgbe_lpi_task(adapter);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * rnpgbe_request_msix_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * rnpgbe_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int rnpgbe_request_msix_irqs(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int err;
+	int i = 0;
+
+	DPRINTK(IFUP, INFO, "[%s] num_q_vectors:%d\n", __func__,
+		adapter->num_q_vectors);
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct rnpgbe_q_vector *q_vector = adapter->q_vector[i];
+		struct msix_entry *entry =
+			&adapter->msix_entries[i + adapter->q_vector_off];
+
+		if (q_vector->tx.ring && q_vector->rx.ring) {
+			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+				 "%s-%s-%d-%d", netdev->name, "TxRx", i,
+				 q_vector->v_idx);
+		} else {
+			WARN(!(q_vector->tx.ring && q_vector->rx.ring),
+			     "%s vector%d tx rx is null, v_idx:%d\n",
+			     netdev->name, i, q_vector->v_idx);
+			/* skip this unused q_vector */
+			continue;
+		}
+		err = request_irq(entry->vector, &rnpgbe_msix_clean_rings, 0,
+				  q_vector->name, q_vector);
+		if (err) {
+			e_err(probe,
+			      "%s:request_irq failed for MSIX interrupt:%d "
+			      "Error: %d\n",
+			      netdev->name, entry->vector, err);
+			goto free_queue_irqs;
+		}
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
+		/* register for affinity change notifications */
+#ifdef SUPPORT_IRQ_AFFINITY_CHANGE
+		q_vector->affinity_notify.notify = rnpgbe_irq_affinity_notify;
+		q_vector->affinity_notify.release = rnpgbe_irq_affinity_release;
+		irq_set_affinity_notifier(entry->vector,
+					  &q_vector->affinity_notify);
+#endif /* SUPPORT_IRQ_AFFINITY_CHANGE */
+#endif /* HAVE_IRQ_AFFINITY_NOTIFY */
+#ifdef HAVE_IRQ_AFFINITY_HINT
+		DPRINTK(IFUP, INFO, "[%s] set %s affinity_mask\n", __func__,
+			q_vector->name);
+
+		irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask);
+#endif
+	}
+
+	return 0;
+
+free_queue_irqs:
+	while (i) {
+		i--;
+		irq_set_affinity_hint(
+			adapter->msix_entries[i + adapter->q_vector_off].vector,
+			NULL);
+		free_irq(
+			adapter->msix_entries[i + adapter->q_vector_off].vector,
+			adapter->q_vector[i]);
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
+#ifdef SUPPORT_IRQ_AFFINITY_CHANGE
+		irq_set_affinity_notifier(
+			adapter->msix_entries[i + adapter->q_vector_off].vector,
+			NULL);
+#endif
+#endif
+#ifdef HAVE_IRQ_AFFINITY_HINT
+		irq_set_affinity_hint(
+			adapter->msix_entries[i + adapter->q_vector_off].vector,
+			NULL);
+#endif
+	}
+	return err;
+}
+
+static int rnpgbe_free_msix_irqs(struct rnpgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct rnpgbe_q_vector *q_vector = adapter->q_vector[i];
+		struct msix_entry *entry =
+			&adapter->msix_entries[i + adapter->q_vector_off];
+
+		/* free only the irqs that were actually requested */
+		if (!q_vector->rx.ring && !q_vector->tx.ring)
+			continue;
+#ifdef HAVE_IRQ_AFFINITY_NOTIFY
+		/* clear the affinity notifier in the IRQ descriptor */
+		irq_set_affinity_notifier(entry->vector, NULL);
+#endif
+#ifdef HAVE_IRQ_AFFINITY_HINT
+		/* clear the affinity_mask in the IRQ descriptor */
+		irq_set_affinity_hint(entry->vector, NULL);
+#endif
+		DPRINTK(IFDOWN, INFO, "free irq %s\n", q_vector->name);
+		free_irq(entry->vector, q_vector);
+	}
+
+	return 0;
+}
+
+#ifdef DISABLE_RX_IRQ
+int rx_poll_thread_handler(void *data)
+{
+	int i;
+	struct rnpgbe_adapter *adapter = data;
+
+	dbg("%s  %s running...\n", __func__, adapter->name);
+
+	do {
+		for (i = 0; i < adapter->num_q_vectors; i++)
+			rnpgbe_msix_clean_rings(0, adapter->q_vector[i]);
+
+		msleep(30);
+	} while (!kthread_should_stop() && adapter->quit_poll_thread != true);
+
+	dbg("%s  %s stopped\n", __func__, adapter->name);
+
+	return 0;
+}
+#endif
+
+/**
+ * rnpgbe_request_irq - initialize interrupts
+ * @adapter: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int rnpgbe_request_irq(struct rnpgbe_adapter *adapter)
+{
+	int err;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+#ifdef DISABLE_RX_IRQ
+	adapter->rx_poll_thread =
+		kthread_run(rx_poll_thread_handler, adapter, adapter->name);
+	if (!adapter->rx_poll_thread) {
+		rnpgbe_err("kthread_run failed!\n");
+		return -EIO;
+	}
+	return 0;
+#endif
+	if (adapter->flags & RNP_FLAG_MSIX_ENABLED) {
+		pr_info("msix mode is used\n");
+		err = rnpgbe_request_msix_irqs(adapter);
+		if ((hw->hw_type == rnpgbe_hw_n500) ||
+		    (hw->hw_type == rnpgbe_hw_n210) ||
+		    (hw->hw_type == rnpgbe_hw_n210L))
+			wr32(hw, RNP500_LEGANCY_ENABLE, 0);
+	} else if (adapter->flags & RNP_FLAG_MSI_ENABLED) {
+		/* in this case one for all */
+		pr_info("msi mode is used\n");
+		err = request_irq(adapter->pdev->irq, rnpgbe_intr, 0,
+				  adapter->netdev->name, adapter);
+		adapter->hw.mbx.other_irq_enabled = true;
+		if ((hw->hw_type == rnpgbe_hw_n500) ||
+		    (hw->hw_type == rnpgbe_hw_n210) ||
+		    (hw->hw_type == rnpgbe_hw_n210L))
+			wr32(hw, RNP500_LEGANCY_ENABLE, 0);
+	} else {
+		pr_info("legacy mode is used\n");
+		err = request_irq(adapter->pdev->irq, rnpgbe_intr, IRQF_SHARED,
+				  adapter->netdev->name, adapter);
+		adapter->hw.mbx.other_irq_enabled = true;
+		if ((hw->hw_type == rnpgbe_hw_n500) ||
+		    (hw->hw_type == rnpgbe_hw_n210) ||
+		    (hw->hw_type == rnpgbe_hw_n210L)) {
+			wr32(hw, RNP500_LEGANCY_ENABLE, 1);
+			wr32(hw, RNP500_LEGANCY_TIME, 0x200);
+		}
+	}
+
+	if (err)
+		e_err(probe, "request_irq failed, Error %d\n", err);
+
+	return err;
+}
+
+static void rnpgbe_free_irq(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+#ifdef DISABLE_RX_IRQ
+	return;
+#endif
+	if (adapter->flags & RNP_FLAG_MSIX_ENABLED) {
+		rnpgbe_free_msix_irqs(adapter);
+	} else if (adapter->flags & RNP_FLAG_MSI_ENABLED) {
+		/* in this case one for all */
+		free_irq(adapter->pdev->irq, adapter);
+		adapter->hw.mbx.other_irq_enabled = false;
+	} else {
+		free_irq(adapter->pdev->irq, adapter);
+		adapter->hw.mbx.other_irq_enabled = false;
+		if ((hw->hw_type == rnpgbe_hw_n500) ||
+		    (hw->hw_type == rnpgbe_hw_n210) ||
+		    (hw->hw_type == rnpgbe_hw_n210L))
+			wr32(hw, RNP500_LEGANCY_ENABLE, 0);
+	}
+}
+
+/**
+ * rnpgbe_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static inline void rnpgbe_irq_disable(struct rnpgbe_adapter *adapter)
+{
+	int i, j;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		rnpgbe_irq_disable_queues(adapter->q_vector[i]);
+		j = i + adapter->q_vector_off;
+
+		if (adapter->flags & RNP_FLAG_MSIX_ENABLED)
+			synchronize_irq(adapter->msix_entries[j].vector);
+		else
+			synchronize_irq(adapter->pdev->irq);
+	}
+}
+
+int rnpgbe_setup_tx_maxrate(struct rnpgbe_ring *tx_ring, u64 max_rate,
+			    int samples_1sec)
+{
+	/* set hardware samping internal 1S */
+	ring_wr32(tx_ring, RNP_DMA_REG_TX_FLOW_CTRL_TM, samples_1sec);
+	ring_wr32(tx_ring, RNP_DMA_REG_TX_FLOW_CTRL_TH, max_rate);
+
+	return 0;
+}
+
+/**
+ * rnpgbe_tx_maxrate_own - callback to set the maximum per-queue bitrate
+ * @netdev: network interface device structure
+ * @queue_index: Tx queue to set
+ * @maxrate: desired maximum transmit bitrate Mbps
+ **/
+static int rnpgbe_tx_maxrate_own(struct rnpgbe_adapter *adapter,
+				 int queue_index)
+{
+	struct rnpgbe_ring *tx_ring = adapter->tx_ring[queue_index];
+	u64 real_rate = 0;
+	u32 maxrate = adapter->max_rate[queue_index];
+
+	if (!maxrate)
+		return rnpgbe_setup_tx_maxrate(
+			tx_ring, 0, adapter->hw.usecstocount * 100000);
+	/* we need turn it to bytes/s */
+	if (real_rate < 50)
+		real_rate = ((u64)maxrate * 1000 * 85) >> 3;
+	else
+		real_rate = ((u64)maxrate * 1000 * 94) >> 3;
+	rnpgbe_setup_tx_maxrate(tx_ring, real_rate,
+				adapter->hw.usecstocount * 100000);
+
+	return 0;
+}
+
+/**
+ * rnpgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
+ * @adapter: board private structure
+ * @ring: structure containing ring specific data
+ *
+ * Configure the Tx descriptor ring after a reset.
+ **/
+void rnpgbe_configure_tx_ring(struct rnpgbe_adapter *adapter,
+			      struct rnpgbe_ring *ring)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	/* disable queue to avoid issues while updating state */
+
+	ring_wr32(ring, RNP_DMA_TX_START, 0);
+
+	ring_wr32(ring, RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_LO, (u32)ring->dma);
+	ring_wr32(ring, RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_HI,
+		  (u32)(((u64)ring->dma) >> 32) | (hw->pfvfnum << 24));
+	ring_wr32(ring, RNP_DMA_REG_TX_DESC_BUF_LEN, ring->count);
+	ring->next_to_clean = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_HEAD);
+	ring->next_to_use = ring->next_to_clean;
+	ring->tail = ring->ring_addr + RNP_DMA_REG_TX_DESC_BUF_TAIL;
+	rnpgbe_wr_reg(ring->tail, ring->next_to_use);
+
+	ring_wr32(ring, RNP_DMA_REG_TX_DESC_FETCH_CTRL,
+		  (8 << 0) /* max_water_flow */
+			  | (TSRN10_TX_DEFAULT_BURST << 16)
+		  /* max-num_descs_peer_read */
+	);
+
+	ring_wr32(ring, RNP_DMA_REG_TX_INT_DELAY_TIMER,
+		  adapter->tx_usecs * hw->usecstocount);
+	ring_wr32(ring, RNP_DMA_REG_TX_INT_DELAY_PKTCNT, adapter->tx_frames);
+
+	rnpgbe_tx_maxrate_own(adapter, ring->queue_index);
+	/* flow control: bytes-peer-ctrl-tm-clk. 0:no-control */
+	if (adapter->flags & RNP_FLAG_FDIR_HASH_CAPABLE) {
+		ring->atr_sample_rate = adapter->atr_sample_rate;
+		ring->atr_count = 0;
+		set_bit(__RNP_TX_FDIR_INIT_DONE, &ring->state);
+	} else {
+		ring->atr_sample_rate = 0;
+	}
+
+	clear_bit(__RNP_HANG_CHECK_ARMED, &ring->state);
+
+	{
+		/* n500 should wait tx_ready before open tx start */
+		int timeout = 0;
+		u32 status = 0;
+
+		do {
+			status = ring_rd32(ring, RNP_DMA_TX_READY);
+			usleep_range(100, 200);
+			timeout++;
+			rnpgbe_dbg("wait %d tx ready to 1\n",
+				   ring->rnpgbe_queue_idx);
+		} while ((status != 1) && (timeout < 100));
+
+		if (timeout >= 100)
+			rnpgbe_dbg("wait tx ready timeout\n");
+		ring_wr32(ring, RNP_DMA_TX_START, 1);
+	}
+}
+
+/**
+ * rnpgbe_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void rnpgbe_configure_tx(struct rnpgbe_adapter *adapter)
+{
+	u32 i, dma_axi_ctl;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+
+	/* dma_axi_en.tx_en must be before Tx queues are enabled */
+	dma_axi_ctl = dma_rd32(dma, RNP_DMA_AXI_EN);
+	dma_axi_ctl |= TX_AXI_RW_EN;
+	dma_wr32(dma, RNP_DMA_AXI_EN, dma_axi_ctl);
+
+	/* Setup the HW Tx Head and Tail descriptor pointers */
+	for (i = 0; i < (adapter->num_tx_queues); i++)
+		rnpgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
+}
+
+void rnpgbe_disable_rx_queue(struct rnpgbe_adapter *adapter,
+			     struct rnpgbe_ring *ring)
+{
+	ring_wr32(ring, RNP_DMA_RX_START, 0);
+}
+
+void rnpgbe_configure_rx_ring(struct rnpgbe_adapter *adapter,
+			      struct rnpgbe_ring *ring)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u64 desc_phy = ring->dma;
+	u16 q_idx = ring->queue_index;
+
+	/* disable queue to avoid issues while updating state */
+	rnpgbe_disable_rx_queue(adapter, ring);
+
+	/* set descripts registers*/
+	ring_wr32(ring, RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_LO, (u32)desc_phy);
+	ring_wr32(ring, RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_HI,
+		  ((u32)(desc_phy >> 32)) | (hw->pfvfnum << 24));
+	ring_wr32(ring, RNP_DMA_REG_RX_DESC_BUF_LEN, ring->count);
+
+	ring->tail = ring->ring_addr + RNP_DMA_REG_RX_DESC_BUF_TAIL;
+	ring->next_to_clean = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD);
+	ring->next_to_use = ring->next_to_clean;
+
+#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT
+#if (PAGE_SIZE < 8192)
+	{
+		int split_size;
+
+		split_size = rnpgbe_rx_pg_size(ring) / 2 -
+			rnpgbe_rx_offset(ring) -
+			sizeof(struct skb_shared_info);
+			split_size = split_size >> 4;
+			ring_wr32(ring, PCI_DMA_REG_RX_SCATTER_LENGTH,
+				  split_size);
+	}
+#else
+	ring_wr32(ring, PCI_DMA_REG_RX_SCATTER_LENGTH, 96);
+#endif
+#else
+	ring_wr32(ring, PCI_DMA_REG_RX_SCATTER_LENGTH,
+			((hw->max_length_current + 15) >> 4));
+#endif
+
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+		ring_wr32(ring, RNP_DMA_REG_RX_DESC_FETCH_CTRL,
+			  0 | (TSRN10_RX_DEFAULT_LINE << 0) /* rx-desc-flow */
+				  | (TSRN10_RX_DEFAULT_BURST << 16)
+			  /* max-read-desc-cnt */
+		);
+
+	} else {
+		ring_wr32(ring, RNP_DMA_REG_RX_DESC_FETCH_CTRL,
+			  0 | (TSRN10_RX_DEFAULT_LINE << 0) /* rx-desc-flow */
+				  | (TSRN10_RX_DEFAULT_BURST << 16)
+			  /* max-read-desc-cnt */
+		);
+	}
+	/* setup rx drop */
+	if (adapter->rx_drop_status & BIT(q_idx)) {
+		ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH,
+			  adapter->drop_time);
+	} else {
+		/* if ncsi card ,maybe should setup this */
+		/* drop packets if no rx-desc in 800ms, maybe os crash */
+		if (hw->ncsi_en)
+			ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, 100000);
+
+		else
+			ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, 0);
+	}
+
+	ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER,
+		  adapter->rx_usecs * hw->usecstocount);
+	ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_PKTCNT, adapter->rx_frames);
+	rnpgbe_alloc_rx_buffers(ring, rnpgbe_desc_unused_rx(ring));
+}
+
+static void rnpgbe_configure_virtualization(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	u32 ring, vfnum;
+	int i, vf_ring;
+	u64 real_rate = 0;
+
+	if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) {
+		hw->ops.set_sriov_status(hw, false);
+		return;
+	}
+
+	/* Enable only the PF's pool for Tx/Rx */
+
+	if (adapter->flags2 & RNP_FLAG2_BRIDGE_MODE_VEB) {
+		dma_wr32(dma, RNP_DMA_CONFIG,
+			 dma_rd32(dma, RNP_DMA_CONFIG) & (~DMA_VEB_BYPASS));
+		adapter->flags2 |= RNP_FLAG2_BRIDGE_MODE_VEB;
+	}
+	ring = adapter->tx_ring[0]->rnpgbe_queue_idx;
+	hw->ops.set_sriov_status(hw, true);
+
+	/* store vfnum */
+	vfnum = hw->max_vfs - 1;
+	hw->veb_ring = ring;
+	hw->vfnum = vfnum;
+	/* use last-vf's table entry. the last */
+	adapter->vf_num_for_pf = 0x80 | vfnum;
+
+	/* setup vf tx rate setup here */
+	for (i = 0; i < adapter->num_vfs; i++) {
+		vf_ring = rnpgbe_get_vf_ringnum(hw, i, 0);
+		real_rate = (adapter->vfinfo[i].tx_rate * 1024 * 128);
+		rnpgbe_setup_ring_maxrate(adapter, vf_ring, real_rate);
+	}
+}
+
+static void rnpgbe_set_rx_buffer_len(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN * 3;
+	struct rnpgbe_ring *rx_ring;
+	int i;
+
+	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+		max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		rx_ring = adapter->rx_ring[i];
+#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT
+		clear_bit(__RNP_RX_3K_BUFFER, &rx_ring->state);
+		clear_bit(__RNP_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC
+
+		set_bit(__RNP_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+
+#else /* !HAVE_SWIOTLB_SKIP_CPU_SYNC */
+
+#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */
+
+#ifdef OPTM_WITH_LPAGE
+		rx_ring->rx_page_buf_nums = RNP_PAGE_BUFFER_NUMS(rx_ring);
+		rx_ring->rx_per_buf_mem = ALIGN(
+			(rnpgbe_rx_offset(rx_ring) + rnpgbe_rx_bufsz(rx_ring) +
+			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+			 RNP_RX_HWTS_OFFSET),
+			1024);
+#endif
+
+#else
+		/* should relative with mtu */
+		rx_ring->rx_buf_len = max_frame;
+#endif /* CONFIG_RNP_DISABLE_PACKET_SPLIT */
+	}
+}
+
+/**
+ * rnpgbe_configure_rx - Configure 8259x Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void rnpgbe_configure_rx(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	int i;
+	u32 rxctrl = 0, dma_axi_ctl;
+
+	/* set_rx_buffer_len must be called before ring initialization */
+	rnpgbe_set_rx_buffer_len(adapter);
+
+	/*
+	 * Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring
+	 */
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rnpgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
+
+	if (adapter->num_rx_queues > 0) {
+		wr32(hw, RNP_ETH_DEFAULT_RX_RING,
+		     adapter->rx_ring[0]->rnpgbe_queue_idx);
+	}
+
+	/* enable all receives */
+	rxctrl |= 0;
+
+	dma_axi_ctl = dma_rd32(dma, RNP_DMA_AXI_EN);
+	dma_axi_ctl |= RX_AXI_RW_EN;
+	dma_wr32(dma, RNP_DMA_AXI_EN, dma_axi_ctl);
+}
+
+#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX)
+
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_TX
+static int rnpgbe_vlan_rx_add_vid(struct net_device *netdev,
+				  __always_unused __be16 proto, u16 vid)
+#else /* !NETIF_F_HW_VLAN_CTAG_TX */
+static int rnpgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+#endif /* NETIF_F_HW_VLAN_CTAG_TX */
+#else /* !HAVE_INT_NDO_VLAN_RX_ADD_VID */
+static void rnpgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	bool veb_setup = true;
+
+	bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED);
+
+	if (sriov_flag) {
+		if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN) {
+			if (hw->ops.set_veb_vlan_mask) {
+				if (hw->ops.set_veb_vlan_mask(
+					    hw, vid, hw->vfnum, true) != 0) {
+					netdev_dbg(netdev,
+						   "out of vlan entries\n");
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+					return -EACCES;
+#else
+					return;
+#endif
+				}
+			}
+		} else {
+			/* in sriov mode */
+			if ((vid) && (adapter->vf_vlan) &&
+			    (vid != adapter->vf_vlan)) {
+				netdev_dbg(netdev,
+					   "only 1 vlan in sriov mode\n");
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+				return -EACCES;
+#else
+				return;
+#endif
+			}
+
+			/* update this */
+			if (vid) {
+				adapter->vf_vlan = vid;
+				if (hw->ops.set_vf_vlan_mode) {
+					if (hw->feature_flags &
+					    RNP_NET_FEATURE_VF_FIXED)
+						hw->ops.set_vf_vlan_mode(
+							hw, vid, 0, true);
+					else
+						hw->ops.set_vf_vlan_mode(
+							hw, vid, hw->vfnum,
+							true);
+				}
+			}
+		}
+	}
+
+#ifndef HAVE_VLAN_RX_REGISTER
+	if (vid) {
+		if (proto == htons(ETH_P_8021Q))
+			adapter->vlan_count++;
+	}
+
+	if (vid < VLAN_N_VID) {
+		if (proto != htons(ETH_P_8021Q)) {
+			set_bit(vid, adapter->active_vlans_stags);
+			veb_setup = false;
+		} else {
+			set_bit(vid, adapter->active_vlans);
+		}
+	}
+#endif
+	/* if vid 0 never setup veb */
+	if (vid == 0)
+		veb_setup = false;
+
+	/* only ctags setup veb if in sriov and not stags */
+	if (hw->ops.set_vlan_filter) {
+		hw->ops.set_vlan_filter(hw, vid, true,
+					(sriov_flag && veb_setup));
+	}
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+	/*
+	 * Copy feature flags from netdev to the vlan netdev for this vid.
+	 * This allows things like TSO to bubble down to our vlan device.
+	 * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so
+	 * we will not have a netdev that needs updating.
+	 */
+	if (adapter->vlgrp) {
+		struct vlan_group *vlgrp = adapter->vlgrp;
+		struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid);
+
+		if (v_netdev) {
+			v_netdev->features |= netdev->features;
+			vlan_group_set_device(vlgrp, vid, v_netdev);
+		}
+	}
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+	return 0;
+#endif
+}
+
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+static int rnpgbe_vlan_rx_kill_vid(struct net_device *netdev,
+				   __always_unused __be16 proto, u16 vid)
+#else /* !NETIF_F_HW_VLAN_CTAG_RX */
+static int rnpgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+#endif /* NETIF_F_HW_VLAN_CTAG_RX */
+#else
+static void rnpgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+#endif
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int i;
+	bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED);
+	bool veb_setup = true;
+
+	if (!vid)
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+		return 0;
+#else
+		return;
+#endif
+
+#ifdef HAVE_VLAN_RX_REGISTER
+	if (!test_bit(__RNP_DOWN, &adapter->state))
+		rnpgbe_irq_disable(adapter);
+
+	vlan_group_set_device(adapter->vlgrp, vid, NULL);
+
+	if (!test_bit(__RNP_DOWN, &adapter->state))
+		rnpgbe_irq_enable(adapter);
+
+#endif /* HAVE_VLAN_RX_REGISTER */
+
+	if (sriov_flag) {
+		if (vid) {
+			int true_remove = 1;
+			adapter->vf_vlan = 0;
+			for (i = 0; i < adapter->num_vfs; i++) {
+				if (vid == adapter->vfinfo[i].vf_vlan)
+					true_remove = 0;
+			}
+			/* if no vf use this vid */
+			if (true_remove) {
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+				if (proto != htons(ETH_P_8021Q)) {
+					veb_setup = false;
+#ifndef HAVE_VLAN_RX_REGISTER
+					if (!test_bit(vid,
+						      adapter->active_vlans))
+						true_remove = 1;
+#endif /* HAVE_VLAN_RX_REGISTER */
+				} else {
+#ifdef NETIF_F_HW_VLAN_STAG_RX
+#ifndef HAVE_VLAN_RX_REGISTER
+					if (!test_bit(
+						    vid,
+						    adapter->active_vlans_stags))
+						true_remove = 1;
+#endif /* HAVE_VLAN_RX_REGISTER */
+#endif
+				}
+#endif /* NETIF_F_HW_VLAN_CTAG_RX */
+#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */
+				if ((adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) &&
+						(vid == adapter->stags_vid))
+						true_remove = 0;
+				// if no other tags use this vid
+				if (true_remove) {
+						hw->ops.set_vlan_filter(
+							hw, vid, false,
+							veb_setup);
+				}
+			}
+			/* always clean veb */
+			hw->ops.set_vlan_filter(hw, vid, false, true);
+
+			if (hw->ops.set_vf_vlan_mode) {
+				if (hw->feature_flags &
+				    RNP_NET_FEATURE_VF_FIXED)
+					hw->ops.set_vf_vlan_mode(hw, vid, 0,
+								 false);
+				else
+					hw->ops.set_vf_vlan_mode(
+						hw, vid, hw->vfnum, false);
+			}
+
+			if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN) {
+				if (hw->ops.set_veb_vlan_mask) {
+					hw->ops.set_veb_vlan_mask(
+						hw, vid, hw->vfnum, false);
+				}
+			}
+		}
+	} else {
+		int true_remove = 0;
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+		if (proto != htons(ETH_P_8021Q)) {
+			veb_setup = false;
+#ifndef HAVE_VLAN_RX_REGISTER
+			if (!test_bit(vid, adapter->active_vlans))
+				true_remove = 1;
+#endif /* HAVE_VLAN_RX_REGISTER */
+
+		} else {
+#ifdef NETIF_F_HW_VLAN_STAG_RX
+#ifndef HAVE_VLAN_RX_REGISTER
+			if (!test_bit(vid, adapter->active_vlans_stags))
+				true_remove = 1;
+#endif /* HAVE_VLAN_RX_REGISTER */
+#endif
+		}
+#endif /* NETIF_F_HW_VLAN_CTAG_RX */
+#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */
+		if (true_remove) {
+			if ((adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) &&
+			    (vid == adapter->stags_vid))
+				goto SKIP_REMOVE;
+			hw->ops.set_vlan_filter(hw, vid, false, false);
+		}
+	}
+SKIP_REMOVE:;
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+#ifndef HAVE_VLAN_RX_REGISTER
+	if (vid) {
+		if (proto == htons(ETH_P_8021Q)) {
+			/* should check proto todo */
+			adapter->vlan_count--;
+		}
+	}
+	if (proto == htons(ETH_P_8021Q))
+		clear_bit(vid, adapter->active_vlans);
+#ifdef NETIF_F_HW_VLAN_STAG_RX
+	if (proto != htons(ETH_P_8021Q))
+		clear_bit(vid, adapter->active_vlans_stags);
+#endif /* NETIF_F_HW_VLAN_STAG_RX */
+#endif /* HAVE_VLAN_RX_REGISTER */
+#endif /* NETIF_F_HW_VLAN_CTAG_RX */
+#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+	return 0;
+#endif
+}
+
+#endif
+
+/**
+ * rnpgbe_vlan_strip_disable - helper to disable hw vlan stripping
+ * @adapter: driver data
+ */
+static void rnpgbe_vlan_strip_disable(struct rnpgbe_adapter *adapter)
+{
+	int i;
+	struct rnpgbe_ring *tx_ring;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		tx_ring = adapter->rx_ring[i];
+		hw->ops.set_vlan_strip(hw, tx_ring->rnpgbe_queue_idx, false);
+	}
+}
+
+/**
+ * rnpgbe_vlan_strip_enable - helper to enable hw vlan stripping
+ * @adapter: driver data
+ */
+static void rnpgbe_vlan_strip_enable(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_ring *tx_ring;
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		tx_ring = adapter->rx_ring[i];
+
+		hw->ops.set_vlan_strip(hw, tx_ring->rnpgbe_queue_idx, true);
+	}
+}
+
+static void rnpgbe_remove_vlan(struct rnpgbe_adapter *adapter)
+{
+	adapter->vlan_count = 0;
+}
+
+static void rnpgbe_restore_vlan(struct rnpgbe_adapter *adapter)
+{
+#ifndef HAVE_VLAN_RX_REGISTER
+	u16 vid;
+#endif
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	/* in stags open, set stags_vid to vlan filter */
+	if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED)
+		eth->ops.set_vfta(eth, adapter->stags_vid, true);
+
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_TX
+	rnpgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
+#else /* !NETIF_F_HW_VLAN_CTAG_TX */
+	rnpgbe_vlan_rx_add_vid(adapter->netdev, 0);
+#endif /* NETIF_F_HW_VLAN_CTAG_TX */
+#else /* !HAVE_INT_NDO_VLAN_RX_ADD_VID */
+	rnpgbe_vlan_rx_add_vid(adapter->netdev, 0);
+#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */
+
+#ifndef HAVE_VLAN_RX_REGISTER
+	for_each_set_bit (vid, adapter->active_vlans, VLAN_N_VID) {
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_TX
+		rnpgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q),
+				       vid);
+#else /* !NETIF_F_HW_VLAN_CTAG_TX */
+		rnpgbe_vlan_rx_add_vid(adapter->netdev, vid);
+#endif /* NETIF_F_HW_VLAN_CTAG_TX */
+#else /* !HAVE_INT_NDO_VLAN_RX_ADD_VID */
+		rnpgbe_vlan_rx_add_vid(adapter->netdev, vid);
+#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */
+	}
+#endif /* HAVE_VLAN_RX_REGISTER */
+
+#ifndef HAVE_VLAN_RX_REGISTER
+	for_each_set_bit (vid, adapter->active_vlans_stags, VLAN_N_VID) {
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_TX
+		rnpgbe_vlan_rx_add_vid(adapter->netdev, htons(0x88a8), vid);
+#else /* !NETIF_F_HW_VLAN_CTAG_TX */
+		rnpgbe_vlan_rx_add_vid(adapter->netdev, vid);
+#endif /* NETIF_F_HW_VLAN_CTAG_TX */
+#else /* !HAVE_INT_NDO_VLAN_RX_ADD_VID */
+		rnpgbe_vlan_rx_add_vid(adapter->netdev, vid);
+#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */
+	}
+#endif /* HAVE_VLAN_RX_REGISTER */
+}
+
+/**
+ * rnpgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_method entry point is called whenever the unicast/multicast
+ * address list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper unicast, multicast and
+ * promiscuous mode.
+ **/
+void rnpgbe_set_rx_mode(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	netdev_features_t features;
+	bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED);
+
+	hw->ops.set_rx_mode(hw, netdev, sriov_flag);
+
+	if (sriov_flag) {
+		if (!test_and_set_bit(__RNP_USE_VFINFI, &adapter->state)) {
+			rnpgbe_restore_vf_macvlans(adapter);
+
+			rnpgbe_restore_vf_macs(adapter);
+			clear_bit(__RNP_USE_VFINFI, &adapter->state);
+		}
+	}
+
+	features = netdev->features;
+
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		rnpgbe_vlan_strip_enable(adapter);
+	else
+		rnpgbe_vlan_strip_disable(adapter);
+#else
+	if (features & NETIF_F_HW_VLAN_RX)
+		rnpgbe_vlan_strip_enable(adapter);
+	else
+		rnpgbe_vlan_strip_disable(adapter);
+
+#endif
+#ifdef NETIF_F_HW_VLAN_STAG_RX
+	/* only do this if hw support stags */
+	if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) {
+		if (features & NETIF_F_HW_VLAN_STAG_RX)
+			rnpgbe_vlan_strip_enable(adapter);
+		else
+			rnpgbe_vlan_strip_disable(adapter);
+	}
+#endif
+}
+
+static void rnpgbe_napi_enable_all(struct rnpgbe_adapter *adapter)
+{
+	int q_idx;
+
+	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
+		napi_enable(&adapter->q_vector[q_idx]->napi);
+}
+
+static void rnpgbe_napi_disable_all(struct rnpgbe_adapter *adapter)
+{
+	int q_idx;
+
+	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
+		napi_disable(&adapter->q_vector[q_idx]->napi);
+}
+
+static void rnpgbe_fdir_filter_restore(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct hlist_node *node2;
+	struct rnpgbe_fdir_filter *filter;
+
+	spin_lock(&adapter->fdir_perfect_lock);
+
+	/* setup ntuple */
+	hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list,
+			fdir_node) {
+		if ((!filter->vf_num) && (filter->action != ACTION_TO_MPE)) {
+			rnpgbe_fdir_write_perfect_filter(
+					adapter->fdir_mode, hw, &filter->filter, filter->hw_idx,
+					(filter->action == RNP_FDIR_DROP_QUEUE) ?
+					RNP_FDIR_DROP_QUEUE :
+					adapter->rx_ring[filter->action]->rnpgbe_queue_idx,
+					(adapter->priv_flags & RNP_PRIV_FLAG_REMAP_PRIO) ?
+					true :
+					false);
+		} else {
+			rnpgbe_fdir_write_perfect_filter(
+					adapter->fdir_mode, hw, &filter->filter,
+					filter->hw_idx,
+					(filter->action == RNP_FDIR_DROP_QUEUE) ?
+					RNP_FDIR_DROP_QUEUE :
+					filter->action,
+					(adapter->priv_flags &
+					 RNP_PRIV_FLAG_REMAP_PRIO) ?
+					true :
+					false);
+		}
+	}
+
+	spin_unlock(&adapter->fdir_perfect_lock);
+}
+
+static void rnpgbe_configure_pause(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	hw->ops.set_pause_mode(hw);
+}
+
+static void rnpgbe_vlan_stags_flag(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	/* stags is added */
+	if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED)
+		hw->ops.set_txvlan_mode(hw, false);
+	else
+		hw->ops.set_txvlan_mode(hw, true);
+}
+
+static void rnpgbe_configure(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED);
+
+	/*
+	 * We must restore virtualization before VLANs or else
+	 * the VLVF registers will not be populated
+	 */
+	rnpgbe_configure_virtualization(adapter);
+
+	rnpgbe_set_rx_mode(adapter->netdev);
+	/* reconfigure hw */
+	hw->ops.set_mac(hw, hw->mac.addr, sriov_flag);
+
+	/* in sriov mode vlan is not reset */
+	rnpgbe_restore_vlan(adapter);
+
+	hw->ops.update_hw_info(hw);
+
+	/* init setup pause */
+	rnpgbe_configure_pause(adapter);
+
+	rnpgbe_vlan_stags_flag(adapter);
+
+	rnpgbe_init_rss_key(adapter);
+	rnpgbe_init_rss_table(adapter);
+
+	if (adapter->flags & RNP_FLAG_FDIR_HASH_CAPABLE) {
+
+	} else if (adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE)
+		rnpgbe_fdir_filter_restore(adapter);
+
+	rnpgbe_configure_tx(adapter);
+	rnpgbe_configure_rx(adapter);
+}
+
+/**
+ * rnpgbe_sfp_link_config - set up SFP+ link
+ * @adapter: pointer to private adapter struct
+ **/
+static void rnpgbe_sfp_link_config(struct rnpgbe_adapter *adapter)
+{
+	/*
+	 * We are assuming the worst case scenario here, and that
+	 * is that an SFP was inserted/removed after the reset
+	 * but before SFP detection was enabled.  As such the best
+	 * solution is to just start searching as soon as we start
+	 */
+	adapter->flags2 |= RNP_FLAG2_SFP_NEEDS_RESET;
+}
+
+static void rnpgbe_up_complete(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int i;
+
+	rnpgbe_configure_msix(adapter);
+
+	/* enable the optics for n10 SFP+ fiber */
+	if (hw->ops.enable_tx_laser)
+		hw->ops.enable_tx_laser(hw);
+
+	/* we need this */
+	smp_mb__before_atomic();
+	clear_bit(__RNP_DOWN, &adapter->state);
+	rnpgbe_napi_enable_all(adapter);
+
+	rnpgbe_sfp_link_config(adapter);
+	/*clear any pending interrupts*/
+	rnpgbe_irq_enable(adapter);
+
+	/* enable transmits */
+	netif_tx_start_all_queues(adapter->netdev);
+
+	/* enable rx transmit */
+	/* setup rx scater */
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		ring_wr32(adapter->rx_ring[i], RNP_DMA_RX_START, 1);
+
+	/* bring the link up in the watchdog, this could race with our first
+	 * link up interrupt but shouldn't be a problems
+	 */
+	adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE;
+	adapter->link_check_timeout = jiffies;
+	mod_timer(&adapter->service_timer, jiffies);
+
+	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
+	/* maybe differ in n500 */
+	hw->link = 0;
+	hw->ops.set_mbx_link_event(hw, 1);
+	hw->ops.set_mbx_ifup(hw, 1);
+
+}
+
+void rnpgbe_reinit_locked(struct rnpgbe_adapter *adapter)
+{
+	WARN_ON(in_interrupt());
+
+	while (test_and_set_bit(__RNP_RESETTING, &adapter->state))
+		usleep_range(1000, 2000);
+	rnpgbe_down(adapter);
+	/*
+	 * If SR-IOV enabled then wait a bit before bringing the adapter
+	 * back up to give the VFs time to respond to the reset.  The
+	 * two second wait is based upon the watchdog timer cycle in
+	 * the VF driver.
+	 */
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED)
+		msleep(2000);
+	rnpgbe_up(adapter);
+
+	clear_bit(__RNP_RESETTING, &adapter->state);
+}
+
+void rnpgbe_up(struct rnpgbe_adapter *adapter)
+{
+	/* hardware has been reset, we need to reload some things */
+	rnpgbe_configure(adapter);
+	rnpgbe_up_complete(adapter);
+}
+
+void rnpgbe_reset(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int err;
+	bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED);
+
+	rnpgbe_logd(LOG_ADPT_STAT, "%s\n", __func__);
+
+	/* lock SFP init bit to prevent race conditions with the watchdog */
+	while (test_and_set_bit(__RNP_IN_SFP_INIT, &adapter->state))
+		usleep_range(1000, 2000);
+
+	/* clear all SFP and link config related flags while holding SFP_INIT */
+	adapter->flags2 &=
+		~(RNP_FLAG2_SEARCH_FOR_SFP | RNP_FLAG2_SFP_NEEDS_RESET);
+	adapter->flags &= ~RNP_FLAG_NEED_LINK_CONFIG;
+	err = hw->ops.init_hw(hw);
+	if (err) {
+		e_dev_err("init_hw: Hardware Error: err:%d. line:%d\n", err,
+			  __LINE__);
+	}
+
+	clear_bit(__RNP_IN_SFP_INIT, &adapter->state);
+
+	/* reprogram the RAR[0] in case user changed it. */
+	hw->ops.set_mac(hw, hw->mac.addr, sriov_flag);
+
+#ifdef HAVE_PTP_1588_CLOCK
+	if (module_enable_ptp) {
+		if (adapter->flags2 & RNP_FLAG2_PTP_ENABLED &&
+		    (adapter->ptp_rx_en || adapter->ptp_tx_en))
+			rnpgbe_ptp_reset(adapter);
+	}
+#endif
+}
+
+#ifdef OPTM_WITH_LPAGE
+/**
+ * rnpgbe_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+static void rnpgbe_clean_rx_ring(struct rnpgbe_ring *rx_ring)
+{
+	u16 i = rx_ring->next_to_clean;
+	struct rnpgbe_rx_buffer *rx_buffer;
+#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC)
+	DEFINE_DMA_ATTRS(attrs);
+
+	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
+	dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs);
+#endif
+
+	if (!rx_ring->rx_buffer_info)
+		return;
+
+	if (rx_ring->skb)
+		dev_kfree_skb(rx_ring->skb);
+
+	rx_ring->skb = NULL;
+	rx_buffer = &rx_ring->rx_buffer_info[i];
+
+	/* Free all the Rx ring sk_buffs */
+	while (i != rx_ring->next_to_alloc) {
+		if (!rx_buffer->page)
+			goto next_buffer;
+		/* Invalidate cache lines that may have been written to by
+		 * device so that we avoid corrupting memory.
+		 */
+		dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma,
+					      rx_buffer->page_offset,
+					      rnpgbe_rx_bufsz(rx_ring),
+					      DMA_FROM_DEVICE);
+
+		/* free resources associated with mapping */
+		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+				     rnpgbe_rx_pg_size(rx_ring),
+				     DMA_FROM_DEVICE,
+#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC)
+				     &attrs);
+#else
+				     RNP_RX_DMA_ATTR);
+#endif
+
+		__page_frag_cache_drain(rx_buffer->page,
+					rx_buffer->pagecnt_bias);
+		/* now this page is not used */
+		rx_buffer->page = NULL;
+	next_buffer:
+		i++;
+		rx_buffer++;
+		if (i == rx_ring->count) {
+			i = 0;
+			rx_buffer = rx_ring->rx_buffer_info;
+		}
+	}
+
+#ifdef HAVE_AF_XDP_ZC_SUPPORT
+#endif
+	rx_ring->next_to_alloc = 0;
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+}
+
+#else
+/**
+ * rnpgbe_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+static void rnpgbe_clean_rx_ring(struct rnpgbe_ring *rx_ring)
+{
+	u16 i = rx_ring->next_to_clean;
+	struct rnpgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
+#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC)
+	DEFINE_DMA_ATTRS(attrs);
+
+	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
+	dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs);
+#endif
+
+	/* Free all the Rx ring sk_buffs */
+#ifdef CONFIG_RNP_DISABLE_PACKET_SPLIT
+	while (i != rx_ring->next_to_use) {
+#else
+	while (i != rx_ring->next_to_alloc) {
+#endif
+		if (rx_buffer->skb) {
+			struct sk_buff *skb = rx_buffer->skb;
+
+			dev_kfree_skb(skb);
+			rx_buffer->skb = NULL;
+		}
+
+#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT
+		/* Invalidate cache lines that may have been written to by
+		 * device so that we avoid corrupting memory.
+		 */
+		dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma,
+					      rx_buffer->page_offset,
+					      rnpgbe_rx_bufsz(rx_ring),
+					      DMA_FROM_DEVICE);
+
+		/* free resources associated with mapping */
+		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+				     rnpgbe_rx_pg_size(rx_ring),
+				     DMA_FROM_DEVICE,
+#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC)
+				     &attrs);
+#else
+				     RNP_RX_DMA_ATTR);
+#endif
+
+		__page_frag_cache_drain(rx_buffer->page,
+					rx_buffer->pagecnt_bias);
+		/* now this page is not used */
+		rx_buffer->page = NULL;
+#else /* CONFIG_RNP_DISABLE_PACKET_SPLIT */
+		if (rx_buffer->dma) {
+			dma_unmap_single(rx_ring->dev, rx_buffer->dma,
+					 rx_ring->rx_buf_len, DMA_FROM_DEVICE);
+			rx_buffer->dma = 0;
+		}
+#endif /* CONFIG_RNP_DISABLE_PACKET_SPLIT */
+		i++;
+		rx_buffer++;
+		if (i == rx_ring->count) {
+			i = 0;
+			rx_buffer = rx_ring->rx_buffer_info;
+		}
+	}
+
+#ifdef HAVE_AF_XDP_ZC_SUPPORT
+#endif
+#ifndef CONFIG_RNP_DISABLE_PACKET_SPLIT
+	rx_ring->next_to_alloc = 0;
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+#endif
+}
+#endif
+
+/**
+ * rnpgbe_clean_tx_ring - Free Tx Buffers
+ * @tx_ring: ring to be cleaned
+ **/
+static void rnpgbe_clean_tx_ring(struct rnpgbe_ring *tx_ring)
+{
+	unsigned long size;
+	u16 i = tx_ring->next_to_clean;
+	struct rnpgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
+
+	BUG_ON(tx_ring == NULL);
+
+	/* ring already cleared, nothing to do */
+	if (!tx_ring->tx_buffer_info)
+		return;
+
+	while (i != tx_ring->next_to_use) {
+		struct rnpgbe_tx_desc *eop_desc, *tx_desc;
+
+		dev_kfree_skb_any(tx_buffer->skb);
+		/* unmap skb header data */
+		dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma),
+				 dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE);
+
+		eop_desc = tx_buffer->next_to_watch;
+		tx_desc = RNP_TX_DESC(tx_ring, i);
+		/* unmap remaining buffers */
+		while (tx_desc != eop_desc) {
+			tx_buffer++;
+			tx_desc++;
+			i++;
+			if (unlikely(i == tx_ring->count)) {
+				i = 0;
+				tx_buffer = tx_ring->tx_buffer_info;
+				tx_desc = RNP_TX_DESC(tx_ring, 0);
+			}
+
+			/* unmap any remaining paged data */
+			if (dma_unmap_len(tx_buffer, len))
+				dma_unmap_page(tx_ring->dev,
+					       dma_unmap_addr(tx_buffer, dma),
+					       dma_unmap_len(tx_buffer, len),
+					       DMA_TO_DEVICE);
+		}
+		/* move us one more past the eop_desc for start of next pkt */
+		tx_buffer++;
+		i++;
+		if (unlikely(i == tx_ring->count)) {
+			i = 0;
+			tx_buffer = tx_ring->tx_buffer_info;
+		}
+	}
+
+	netdev_tx_reset_queue(txring_txq(tx_ring));
+	size = sizeof(struct rnpgbe_tx_buffer) * tx_ring->count;
+	memset(tx_ring->tx_buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(tx_ring->desc, 0, tx_ring->size);
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+}
+
+/**
+ * rnpgbe_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void rnpgbe_clean_all_rx_rings(struct rnpgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rnpgbe_clean_rx_ring(adapter->rx_ring[i]);
+}
+
+/**
+ * rnpgbe_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void rnpgbe_clean_all_tx_rings(struct rnpgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		rnpgbe_clean_tx_ring(adapter->tx_ring[i]);
+}
+
+static void rnpgbe_fdir_filter_exit(struct rnpgbe_adapter *adapter)
+{
+	struct hlist_node *node2;
+	struct rnpgbe_fdir_filter *filter;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	spin_lock(&adapter->fdir_perfect_lock);
+
+	hlist_for_each_entry_safe (filter, node2, &adapter->fdir_filter_list,
+				   fdir_node) {
+		/* call earase to hw */
+		rnpgbe_fdir_erase_perfect_filter(adapter->fdir_mode, hw,
+						 &filter->filter,
+						 filter->hw_idx);
+
+		hlist_del(&filter->fdir_node);
+		kfree(filter);
+	}
+	adapter->fdir_filter_count = 0;
+
+	adapter->layer2_count = hw->layer2_count;
+	adapter->tuple_5_count = hw->tuple5_count;
+
+	spin_unlock(&adapter->fdir_perfect_lock);
+}
+
+static void print_status(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	int i;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	struct net_device *netdev = adapter->netdev;
+
+	netdev_dbg(netdev, "eth 0x120 %x\n", eth_rd32(eth, 0x120));
+	netdev_dbg(netdev, "eth 0x124 %x\n", eth_rd32(eth, 0x124));
+
+	for (i = 0x200; i < 0x220; i = i + 4)
+		netdev_dbg(netdev, "eth 0x%x %x\n", i, eth_rd32(eth, i));
+
+	for (i = 0x300; i < 0x318; i = i + 4)
+		netdev_dbg(netdev, "eth 0x%x %x\n", i, eth_rd32(eth, i));
+
+	netdev_dbg(netdev, "eth 0x%x %x\n", 0x98, eth_rd32(eth, 0x98));
+	netdev_dbg(netdev, "eth 0x%x %x\n", 0x220, eth_rd32(eth, 0x220));
+
+	for (i = 0x138; i < 0x158; i = i + 4)
+		netdev_dbg(netdev, "dma 0x%x %x\n", i, dma_rd32(dma, i));
+	i = 0x170;
+	netdev_dbg(netdev, "dma 0x%x %x\n", i, dma_rd32(dma, i));
+	i = 0x174;
+	netdev_dbg(netdev, "dma 0x%x %x\n", i, dma_rd32(dma, i));
+	for (i = 0x214; i < 0x220; i = i + 4)
+		netdev_dbg(netdev, "dma 0x%x %x\n", i, dma_rd32(dma, i));
+	for (i = 0x234; i < 0x270; i = i + 4)
+		netdev_dbg(netdev, "dma 0x%x %x\n", i, dma_rd32(dma, i));
+	i = 0x1018;
+	netdev_dbg(netdev, "dma 0x%x %x\n", i, dma_rd32(dma, i));
+	i = 0x101c;
+	netdev_dbg(netdev, "dma 0x%x %x\n", i, dma_rd32(dma, i));
+	i = 0x1084;
+	netdev_dbg(netdev, "dma 0x%x %x\n", i, dma_rd32(dma, i));
+}
+
+void rnpgbe_down(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int i;
+	int err = 0;
+	bool is_pci_dead = pci_channel_offline(adapter->pdev);
+	bool is_pci_online = !is_pci_dead;
+	/* signal that we are down to the interrupt handler */
+	set_bit(__RNP_DOWN, &adapter->state);
+
+	if (!hw->ncsi_en)
+		hw->ops.set_mac_rx(hw, false);
+
+	hw->ops.set_mbx_link_event(hw, 0);
+	hw->ops.set_mbx_ifup(hw, 0);
+
+	rnpgbe_setup_eee_mode(adapter, false);
+
+	if (hw->ops.clean_link)
+		hw->ops.clean_link(hw);
+
+	if (netif_carrier_ok(netdev))
+		e_info(drv, "NIC Link is Down\n");
+
+	rnpgbe_remove_vlan(adapter);
+
+	netif_tx_stop_all_queues(netdev);
+
+	netif_carrier_off(netdev);
+
+	usleep_range(5000, 10000);
+	/* if we have tx desc to clean */
+	for (i = 0; i < adapter->num_tx_queues && is_pci_online; i++) {
+		struct rnpgbe_ring *tx_ring = adapter->tx_ring[i];
+		int head, tail;
+		int timeout = 0;
+
+		if (tx_ring->next_to_use == tx_ring->next_to_clean)
+			continue;
+
+		head = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_HEAD);
+		tail = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_TAIL);
+
+		while (head != tail) {
+			usleep_range(30000, 50000);
+
+			head = ring_rd32(tx_ring,
+					RNP_DMA_REG_TX_DESC_BUF_HEAD);
+			tail = ring_rd32(tx_ring,
+					RNP_DMA_REG_TX_DESC_BUF_TAIL);
+			timeout++;
+			if ((timeout >= 100) && (timeout < 101)) {
+				e_info(drv, "wait ring %d tx done timeout %x %x\n",
+				       i, head, tail);
+				adapter->priv_flags |=
+					RNP_PRIV_FLGA_TEST_TX_HANG;
+				print_status(adapter);
+				err = 1;
+			}
+			if (timeout >= 200) {
+				e_info(drv, "200 wait tx done timeout %x %x\n",
+				       head, tail);
+				print_status(adapter);
+				break;
+			}
+		}
+	}
+
+	rnpgbe_clean_all_tx_rings(adapter);
+
+	usleep_range(2000, 5000);
+
+	rnpgbe_irq_disable(adapter);
+
+	usleep_range(5000, 10000);
+
+	netif_tx_disable(netdev);
+
+	/* disable all enabled rx queues */
+	for (i = 0; i < adapter->num_rx_queues && is_pci_online; i++) {
+		rnpgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
+		/* only handle when srio enable and change rx length setup */
+		if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) &&
+		    (adapter->rx_ring[i]->ring_flags &
+		     RNP_RING_FLAG_CHANGE_RX_LEN)) {
+			int head;
+			struct rnpgbe_ring *ring = adapter->rx_ring[i];
+
+			head = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD);
+			adapter->rx_ring[i]->ring_flags &=
+				(~RNP_RING_FLAG_CHANGE_RX_LEN);
+			/* we should delay setup rx length to
+			 * wait rx head to 0
+			 */
+			if (head >= adapter->rx_ring[i]->reset_count) {
+				adapter->rx_ring[i]->ring_flags |=
+					RNP_RING_FLAG_DELAY_SETUP_RX_LEN;
+				/* set sw count to head + 1*/
+				adapter->rx_ring[i]->temp_count = head + 1;
+			}
+		}
+		/* only down without rx_len change no need handle */
+	}
+	/* call carrier off first to avoid false dev_watchdog timeouts */
+
+	rnpgbe_napi_disable_all(adapter);
+
+	adapter->flags2 &=
+		~(RNP_FLAG2_FDIR_REQUIRES_REINIT | RNP_FLAG2_RESET_REQUESTED);
+	adapter->flags &= ~RNP_FLAG_NEED_LINK_UPDATE;
+
+	/* ping all the active vfs to let them know we are going down */
+	if (adapter->num_vfs)
+		rnpgbe_ping_all_vfs(adapter);
+
+	/* disable transmits in the hardware now that interrupts are off */
+	for (i = 0; i < adapter->num_tx_queues && is_pci_online; i++) {
+		struct rnpgbe_ring *tx_ring = adapter->tx_ring[i];
+
+		ring_wr32(tx_ring, RNP_DMA_TX_START, 0);
+	}
+
+	if (!err) {
+		if (!pci_channel_offline(adapter->pdev)) {
+			if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+				rnpgbe_reset(adapter);
+			else if (!(adapter->flags & RNP_FLAG_SRIOV_INIT_DONE))
+				rnpgbe_reset(adapter);
+		}
+	}
+	/* power down the optics for n10 SFP+ fiber */
+	if (hw->ops.disable_tx_laser)
+		hw->ops.disable_tx_laser(hw);
+
+	rnpgbe_clean_all_rx_rings(adapter);
+
+#ifdef CONFIG_RNP_DCA
+	/* since we reset the hardware DCA settings were cleared */
+	rnpgbe_setup_dca(adapter);
+#endif
+}
+
+/**
+ * rnpgbe_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+#ifdef HAVE_TX_TIMEOUT_TXQUEUE
+static void rnpgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+#else
+static void rnpgbe_tx_timeout(struct net_device *netdev)
+#endif
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	/* Do the reset outside of interrupt context */
+	int i;
+	bool real_tx_hang = false;
+
+#define TX_TIMEO_LIMIT 16000
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct rnpgbe_ring *tx_ring = adapter->tx_ring[i];
+
+		if (check_for_tx_hang(tx_ring) &&
+		    rnpgbe_check_tx_hang(tx_ring)) {
+			real_tx_hang = true;
+		}
+	}
+
+	if (real_tx_hang) {
+		e_info(drv, "hw real hang!!!!");
+		/* Do the reset outside of interrupt context */
+#ifndef TEST_TX_HANG
+		rnpgbe_tx_timeout_reset(adapter);
+#endif
+	} else {
+		e_info(drv,
+		       "Fake Tx hang detected with timeout of %d "
+		       "seconds\n",
+		       netdev->watchdog_timeo / HZ);
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			struct rnpgbe_ring *temp_ring = adapter->tx_ring[i];
+			u32 head, tail;
+			struct rnpgbe_hw *hw = &adapter->hw;
+
+			head = ring_rd32(temp_ring,
+					 RNP_DMA_REG_TX_DESC_BUF_HEAD);
+			tail = ring_rd32(temp_ring,
+					 RNP_DMA_REG_TX_DESC_BUF_TAIL);
+			e_info(drv, "sw ring %d ---- %d %d\n",
+			       temp_ring->rnpgbe_queue_idx,
+			       temp_ring->next_to_use,
+			       temp_ring->next_to_clean);
+			e_info(drv, "hw ring %d ---- %d %d\n",
+			       temp_ring->rnpgbe_queue_idx, head, tail);
+			e_info(drv, "dma version %d\n",
+			       rnpgbe_rd_reg(hw->hw_addr));
+		}
+		print_status(adapter);
+		/* fake Tx hang - increase the kernel timeout */
+		if (netdev->watchdog_timeo < TX_TIMEO_LIMIT)
+			netdev->watchdog_timeo *= 2;
+	}
+}
+
+/**
+ * rnpgbe_sw_init - Initialize general software structures (struct rnpgbe_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * rnpgbe_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int rnpgbe_sw_init(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	unsigned int rss = 0, fdir;
+	int rss_limit = num_online_cpus();
+#ifdef RNP_MAX_RINGS
+	rss_limit = RNP_MAX_RINGS;
+#endif
+
+#ifdef CONFIG_RNP_DCB
+	int j;
+	struct tc_configuration *tc;
+#endif
+
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_device_id = pdev->subsystem_device;
+
+	rss = min_t(int, adapter->max_ring_pair_counts, rss_limit);
+	rss = min_t(int, rss,
+		    hw->mac.max_msix_vectors - adapter->num_other_vectors);
+	adapter->ring_feature[RING_F_RSS].limit =
+		min_t(int, rss, adapter->max_ring_pair_counts);
+
+	adapter->flags |= RNP_FLAG_VXLAN_OFFLOAD_CAPABLE;
+	adapter->flags |= RNP_FLAG_VXLAN_OFFLOAD_ENABLE;
+	adapter->max_q_vectors = hw->max_msix_vectors - 1;
+	adapter->atr_sample_rate = 20;
+
+	fdir = min_t(int, adapter->max_q_vectors, rss_limit);
+	adapter->ring_feature[RING_F_FDIR].limit = fdir;
+
+	if (hw->feature_flags & RNP_NET_FEATURE_RX_NTUPLE_FILTER) {
+		spin_lock_init(&adapter->fdir_perfect_lock);
+		adapter->fdir_filter_count = 0;
+		adapter->fdir_mode = hw->fdir_mode;
+		/* fdir_pballoc not from zero, so add 2 */
+		adapter->fdir_pballoc = 2 + hw->layer2_count + hw->tuple5_count;
+		adapter->layer2_count = hw->layer2_count;
+		adapter->tuple_5_count = hw->tuple5_count;
+	}
+
+	mutex_init(&adapter->eee_lock);
+	adapter->tx_lpi_timer = RNP_DEFAULT_TWT_LS;
+#ifdef CONFIG_RNP_DCA
+	/* we can't support dca */
+	adapter->flags |= RNP_FLAG_DCA_CAPABLE;
+#endif
+
+	/* itr sw setup here */
+	adapter->sample_interval = 1;
+	adapter->adaptive_rx_coal = 1;
+	adapter->adaptive_tx_coal = 1;
+	adapter->auto_rx_coal = 0;
+	adapter->napi_budge = 64;
+	/* set default work limits */
+	adapter->tx_work_limit = RNP_DEFAULT_TX_WORK;
+	adapter->rx_usecs = RNP_PKT_TIMEOUT;
+	adapter->rx_frames = RNP_RX_PKT_POLL_BUDGET;
+	adapter->priv_flags &= ~RNP_PRIV_FLAG_RX_COALESCE;
+	adapter->priv_flags &= ~RNP_PRIV_FLAG_TX_COALESCE;
+	adapter->tx_usecs = RNP_PKT_TIMEOUT_TX;
+	adapter->tx_frames = RNP_TX_PKT_POLL_BUDGET;
+
+	/* n-tuple support exists, always init our spinlock */
+	/* init fdir count */
+	/* enable itr by default in dynamic mode */
+	/* set default ring sizes */
+	adapter->tx_ring_item_count = RNP_DEFAULT_TXD;
+	adapter->rx_ring_item_count = RNP_DEFAULT_RXD;
+	set_bit(__RNP_DOWN, &adapter->state);
+
+	return 0;
+}
+
+/**
+ * rnpgbe_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @tx_ring:    tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int rnpgbe_setup_tx_resources(struct rnpgbe_ring *tx_ring,
+			      struct rnpgbe_adapter *adapter)
+{
+	struct device *dev = tx_ring->dev;
+	int orig_node = dev_to_node(dev);
+	int numa_node = NUMA_NO_NODE;
+	int size;
+
+	size = sizeof(struct rnpgbe_tx_buffer) * tx_ring->count;
+
+#ifdef USE_NUMA_MEMORY
+	if (tx_ring->q_vector)
+		numa_node = tx_ring->q_vector->numa_node;
+	tx_ring->tx_buffer_info = vzalloc_node(size, numa_node);
+	if (!tx_ring->tx_buffer_info)
+		tx_ring->tx_buffer_info = vzalloc(size);
+	if (!tx_ring->tx_buffer_info)
+		goto err;
+#else
+	tx_ring->tx_buffer_info = kzalloc(size, GFP_KERNEL);
+#endif
+	/* round up to nearest 4K */
+	tx_ring->size = tx_ring->count * sizeof(struct rnpgbe_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+#ifdef USE_NUMA_MEMORY
+	set_dev_node(dev, numa_node);
+#endif
+	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
+					   GFP_KERNEL);
+#ifdef USE_NUMA_MEMORY
+	set_dev_node(dev, orig_node);
+#endif
+	if (!tx_ring->desc)
+		tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+						   &tx_ring->dma, GFP_KERNEL);
+	if (!tx_ring->desc)
+		goto err;
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+	DPRINTK(IFUP, INFO,
+		"TxRing:%d, vector:%d ItemCounts:%d "
+		"desc:%p(0x%llx) node:%d\n",
+		tx_ring->rnpgbe_queue_idx, tx_ring->q_vector->v_idx,
+		tx_ring->count, tx_ring->desc, tx_ring->dma, numa_node);
+	return 0;
+
+err:
+
+#ifdef USE_NUMA_MEMORY
+	vfree(tx_ring->tx_buffer_info);
+#else
+	kfree(tx_ring->tx_buffer_info);
+#endif
+	tx_ring->tx_buffer_info = NULL;
+	dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
+	return -ENOMEM;
+}
+
+/**
+ * rnpgbe_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int rnpgbe_setup_all_tx_resources(struct rnpgbe_adapter *adapter)
+{
+	int i, err = 0;
+
+	tx_dbg("adapter->num_tx_queues:%d, adapter->tx_ring[0]:%p\n",
+	       adapter->num_tx_queues, adapter->tx_ring[0]);
+
+	for (i = 0; i < (adapter->num_tx_queues); i++) {
+		BUG_ON(adapter->tx_ring[i] == NULL);
+		err = rnpgbe_setup_tx_resources(adapter->tx_ring[i], adapter);
+		if (!err)
+			continue;
+
+		e_err(probe, "Allocation for Tx Queue %u failed\n", i);
+		goto err_setup_tx;
+	}
+
+	return 0;
+err_setup_tx:
+	/* rewind the index freeing the rings as we go */
+	while (i--)
+		rnpgbe_free_tx_resources(adapter->tx_ring[i]);
+	return err;
+}
+
+/**
+ * rnpgbe_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @rx_ring:    rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int rnpgbe_setup_rx_resources(struct rnpgbe_ring *rx_ring,
+			      struct rnpgbe_adapter *adapter)
+{
+	struct device *dev = rx_ring->dev;
+	int orig_node = dev_to_node(dev);
+	int numa_node = NUMA_NO_NODE;
+	int size;
+
+	BUG_ON(rx_ring == NULL);
+
+	size = sizeof(struct rnpgbe_rx_buffer) * rx_ring->count;
+
+#ifdef USE_NUMA_MEMORY
+	if (rx_ring->q_vector)
+		numa_node = rx_ring->q_vector->numa_node;
+
+	rx_ring->rx_buffer_info = vzalloc_node(size, numa_node);
+	if (!rx_ring->rx_buffer_info)
+		rx_ring->rx_buffer_info = vzalloc(size);
+	if (!rx_ring->rx_buffer_info)
+		goto err;
+#else
+	rx_ring->rx_buffer_info = kzalloc(size, GFP_KERNEL);
+#endif
+	/* Round up to nearest 4K */
+	rx_ring->size = rx_ring->count * sizeof(union rnpgbe_rx_desc);
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+#ifdef USE_NUMA_MEMORY
+	set_dev_node(dev, numa_node);
+#endif
+	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
+					   GFP_KERNEL);
+#ifdef USE_NUMA_MEMORY
+	set_dev_node(dev, orig_node);
+#endif
+	if (!rx_ring->desc)
+		rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+						   &rx_ring->dma, GFP_KERNEL);
+	if (!rx_ring->desc)
+		goto err;
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	DPRINTK(IFUP, INFO,
+		"RxRing:%d, vector:%d ItemCounts:%d "
+		"desc:%p(0x%llx) node:%d\n",
+		rx_ring->rnpgbe_queue_idx, rx_ring->q_vector->v_idx,
+		rx_ring->count, rx_ring->desc, rx_ring->dma, numa_node);
+
+	return 0;
+err:
+
+#ifdef USE_NUMA_MEMORY
+	vfree(rx_ring->rx_buffer_info);
+#else
+	kfree(rx_ring->rx_buffer_info);
+#endif
+	rx_ring->rx_buffer_info = NULL;
+	dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
+	return -ENOMEM;
+}
+
+/**
+ * rnpgbe_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int rnpgbe_setup_all_rx_resources(struct rnpgbe_adapter *adapter)
+{
+	int i, err = 0;
+	u32 head;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		BUG_ON(adapter->rx_ring[i] == NULL);
+
+		/* should check count and head */
+		/* in sriov condition may head large than count */
+		head = ring_rd32(adapter->rx_ring[i],
+				 RNP_DMA_REG_RX_DESC_BUF_HEAD);
+		if (unlikely(head >= adapter->rx_ring[i]->count)) {
+			dbg("[%s] Ring %d head large than count",
+			    adapter->netdev->name,
+			    adapter->rx_ring[i]->rnpgbe_queue_idx);
+			adapter->rx_ring[i]->ring_flags |=
+				RNP_RING_FLAG_DELAY_SETUP_RX_LEN;
+			adapter->rx_ring[i]->reset_count =
+				adapter->rx_ring[i]->count;
+			adapter->rx_ring[i]->count = head + 1;
+		}
+		err = rnpgbe_setup_rx_resources(adapter->rx_ring[i], adapter);
+		if (!err)
+			continue;
+
+		e_err(probe, "Allocation for Rx Queue %u failed\n", i);
+		goto err_setup_rx;
+	}
+
+	return 0;
+err_setup_rx:
+	/* rewind the index freeing the rings as we go */
+	while (i--)
+		rnpgbe_free_rx_resources(adapter->rx_ring[i]);
+	return err;
+}
+
+/**
+ * rnpgbe_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void rnpgbe_free_tx_resources(struct rnpgbe_ring *tx_ring)
+{
+	BUG_ON(tx_ring == NULL);
+
+	rnpgbe_clean_tx_ring(tx_ring);
+#ifdef USE_NUMA_MEMORY
+	vfree(tx_ring->tx_buffer_info);
+#else
+	kfree(tx_ring->tx_buffer_info);
+#endif
+	tx_ring->tx_buffer_info = NULL;
+
+	/* if not set, then don't free */
+	if (!tx_ring->desc)
+		return;
+
+	dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
+			  tx_ring->dma);
+
+	tx_ring->desc = NULL;
+}
+
+/**
+ * rnpgbe_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void rnpgbe_free_all_tx_resources(struct rnpgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < (adapter->num_tx_queues); i++)
+		rnpgbe_free_tx_resources(adapter->tx_ring[i]);
+}
+
+/**
+ * rnpgbe_free_rx_resources - Free Rx Resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void rnpgbe_free_rx_resources(struct rnpgbe_ring *rx_ring)
+{
+	BUG_ON(rx_ring == NULL);
+
+	rnpgbe_clean_rx_ring(rx_ring);
+
+#ifdef USE_NUMA_MEMORY
+	vfree(rx_ring->rx_buffer_info);
+#else
+	kfree(rx_ring->rx_buffer_info);
+#endif
+	rx_ring->rx_buffer_info = NULL;
+
+	/* if not set, then don't free */
+	if (!rx_ring->desc)
+		return;
+
+	dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
+			  rx_ring->dma);
+
+	rx_ring->desc = NULL;
+}
+
+/**
+ * rnpgbe_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void rnpgbe_free_all_rx_resources(struct rnpgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < (adapter->num_rx_queues); i++) {
+		if (adapter->rx_ring[i]->desc)
+			rnpgbe_free_rx_resources(adapter->rx_ring[i]);
+	}
+}
+
+/**
+ * rnpgbe_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int rnpgbe_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN * 2;
+
+	/* MTU < 68 is an error and causes problems on some kernels */
+	if ((new_mtu < hw->min_length) || (max_frame > hw->max_length))
+		return -EINVAL;
+
+	e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
+
+	if (netdev->mtu == new_mtu)
+		return 0;
+
+	/* must set new MTU before calling down or up */
+	netdev->mtu = new_mtu;
+
+	rnpgbe_msg_post_status(adapter, PF_SET_MTU);
+
+	if (netif_running(netdev))
+		rnpgbe_reinit_locked(adapter);
+
+	return 0;
+}
+
+/**
+ * rnpgbe_tx_maxrate - callback to set the maximum per-queue bitrate
+ * @netdev: network interface device structure
+ * @queue_index: Tx queue to set
+ * @maxrate: desired maximum transmit bitrate Mbps
+ **/
+__maybe_unused static int rnpgbe_tx_maxrate(struct net_device *netdev,
+					    int queue_index, u32 maxrate)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_ring *tx_ring = adapter->tx_ring[queue_index];
+	u64 real_rate = 0;
+
+	adapter->max_rate[queue_index] = maxrate;
+	rnpgbe_dbg("%s: queue:%d maxrate:%d\n", __func__, queue_index, maxrate);
+	if (!maxrate)
+		return rnpgbe_setup_tx_maxrate(
+			tx_ring, 0, adapter->hw.usecstocount * 100000);
+	/* we need turn it to bytes/s */
+	real_rate = ((u64)maxrate * 1000 * 94) >> 3;
+	rnpgbe_setup_tx_maxrate(tx_ring, real_rate,
+				adapter->hw.usecstocount * 100000);
+
+	return 0;
+}
+
+/**
+ * rnpgbe_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int rnpgbe_open(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int err;
+
+	DPRINTK(IFUP, INFO, "ifup\n");
+
+	/* disallow open during test */
+	if (test_bit(__RNP_TESTING, &adapter->state))
+		return -EBUSY;
+
+	netif_carrier_off(netdev);
+
+	/* allocate transmit descriptors */
+	err = rnpgbe_setup_all_tx_resources(adapter);
+	if (err)
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+	err = rnpgbe_setup_all_rx_resources(adapter);
+	if (err)
+		goto err_setup_rx;
+
+	rnpgbe_configure(adapter);
+
+	err = rnpgbe_request_irq(adapter);
+	if (err)
+		goto err_req_irq;
+
+	/* Notify the stack of the actual queue counts. */
+	err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
+	if (err)
+		goto err_set_queues;
+
+	err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
+	if (err)
+		goto err_set_queues;
+
+#ifdef HAVE_PTP_1588_CLOCK
+	if (module_enable_ptp)
+		rnpgbe_ptp_register(adapter);
+#endif
+	rnpgbe_up_complete(adapter);
+
+	return 0;
+
+err_set_queues:
+	rnpgbe_free_irq(adapter);
+err_req_irq:
+	rnpgbe_free_all_rx_resources(adapter);
+err_setup_rx:
+	rnpgbe_free_all_tx_resources(adapter);
+err_setup_tx:
+	hw->ops.set_mbx_ifup(hw, 0);
+	rnpgbe_reset(adapter);
+
+	return err;
+}
+
+/**
+ * rnpgbe_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int rnpgbe_close(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	DPRINTK(IFDOWN, INFO, "ifdown\n");
+
+#ifdef DISABLE_RX_IRQ
+	adapter->quit_poll_thread = true;
+#endif
+
+#ifdef HAVE_PTP_1588_CLOCK
+	if (module_enable_ptp)
+		rnpgbe_ptp_unregister(adapter);
+#endif
+	rnpgbe_down(adapter);
+
+	rnpgbe_free_irq(adapter);
+
+	rnpgbe_free_all_tx_resources(adapter);
+	rnpgbe_free_all_rx_resources(adapter);
+
+	/* if in sriov mode send link down to all vfs */
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+		adapter->link_up = 0;
+		adapter->link_up_old = 0;
+		rnpgbe_msg_post_status(adapter, PF_SET_LINK_STATUS);
+		/* wait all vf get this status */
+		usleep_range(5000, 10000);
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+#ifndef USE_LEGACY_PM_SUPPORT
+static int rnpgbe_resume(struct device *dev)
+#else
+static int rnpgbe_resume(struct pci_dev *pdev)
+#endif /* USE_LEGACY_PM_SUPPORT */
+{
+	struct rnpgbe_adapter *adapter;
+	struct net_device *netdev;
+	u32 err;
+	struct rnpgbe_hw *hw;
+#ifndef USE_LEGACY_PM_SUPPORT
+	struct pci_dev *pdev = to_pci_dev(dev);
+#endif
+
+	adapter = pci_get_drvdata(pdev);
+	hw = &adapter->hw;
+	netdev = adapter->netdev;
+	pr_info("call rnpgbe_resume\n");
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	/*
+	 * pci_restore_state clears dev->state_saved so call
+	 * pci_save_state to restore it.
+	 */
+	pci_save_state(pdev);
+
+	err = pcim_enable_device(pdev);
+	if (err) {
+		e_dev_err("Cannot enable PCI device from suspend\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	pci_wake_from_d3(pdev, false);
+
+	rtnl_lock();
+
+	err = rnpgbe_init_interrupt_scheme(adapter);
+	if (!err)
+		err = register_mbx_irq(adapter);
+
+	if (hw->ops.driver_status)
+		hw->ops.driver_status(hw, false, rnpgbe_driver_suspuse);
+
+	if (hw->ops.driver_status)
+		hw->ops.driver_status(hw, true, rnpgbe_driver_insmod);
+
+	rnpgbe_reset(adapter);
+
+	/* we should setup link in default */
+	hw->ops.setup_link(hw, DEFAULT_ADV, 1, 0, 0);
+	hw->advertised_link = DEFAULT_ADV;
+
+	if (!err) {
+		if (netif_running(netdev)) {
+			err = rnpgbe_open(netdev);
+		} else {
+			hw->ops.set_mbx_link_event(hw, 0);
+			hw->ops.set_mbx_ifup(hw, 0);
+		}
+	}
+
+	rtnl_unlock();
+
+	if (err)
+		return err;
+
+	netif_device_attach(netdev);
+
+	return 0;
+}
+
+#ifndef USE_LEGACY_PM_SUPPORT
+/**
+ * rnpgbe_freeze - quiesce the device (no IRQ's or DMA)
+ * @dev: The port's netdev
+ */
+static int rnpgbe_freeze(struct device *dev)
+{
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev));
+	struct net_device *netdev = adapter->netdev;
+
+	rtnl_lock();
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev)) {
+		rnpgbe_down(adapter);
+		rnpgbe_free_irq(adapter);
+
+		rnpgbe_free_all_tx_resources(adapter);
+		rnpgbe_free_all_rx_resources(adapter);
+	}
+
+	remove_mbx_irq(adapter);
+	rnpgbe_clear_interrupt_scheme(adapter);
+	rtnl_unlock();
+
+	return 0;
+}
+
+/**
+ * rnpgbe_thaw - un-quiesce the device
+ * @dev: The port's netdev
+ */
+static int rnpgbe_thaw(struct device *dev)
+{
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev));
+	struct net_device *netdev = adapter->netdev;
+	u32 err;
+
+	rtnl_lock();
+	err = rnpgbe_init_interrupt_scheme(adapter);
+
+	if (netif_running(netdev))
+		err = rnpgbe_open(netdev);
+
+	rtnl_unlock();
+
+	if (err)
+		return err;
+
+	netif_device_attach(netdev);
+
+	return 0;
+}
+#endif /* USE_LEGACY_PM_SUPPORT */
+
+#endif /* CONFIG_PM */
+
+static int __rnpgbe_shutdown_suspuse(struct pci_dev *pdev, bool *enable_wake)
+{
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 wufc = adapter->wol;
+#ifdef CONFIG_PM
+	int retval = 0;
+#endif
+
+	netif_device_detach(netdev);
+
+	rtnl_lock();
+	if (netif_running(netdev)) {
+		rnpgbe_down(adapter);
+		rnpgbe_free_irq(adapter);
+		rnpgbe_free_all_tx_resources(adapter);
+		rnpgbe_free_all_rx_resources(adapter);
+		/* should consider sriov mode ? */
+	}
+	rtnl_unlock();
+
+	/* if we open wol or ncsi_en, we must send this to hw */
+	if ((hw->ncsi_en || adapter->wol) && hw->ops.driver_status)
+		hw->ops.driver_status(hw, true, rnpgbe_driver_suspuse);
+
+	remove_mbx_irq(adapter);
+	rnpgbe_clear_interrupt_scheme(adapter);
+
+#ifdef CONFIG_PM
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+
+#endif
+	if (wufc) {
+		rnpgbe_set_rx_mode(netdev);
+
+		/* enable the optics for n10 SFP+ fiber as we can WoL */
+		if (hw->ops.enable_tx_laser)
+			hw->ops.enable_tx_laser(hw);
+
+		/* turn on all-multi mode if wake on multicast is enabled */
+
+	} else {
+	}
+
+	if (hw->ops.setup_wol)
+		hw->ops.setup_wol(hw, adapter->wol);
+
+	pci_wake_from_d3(pdev, !!wufc);
+	*enable_wake = !!wufc;
+
+	pci_disable_device(pdev);
+
+	return 0;
+}
+
+static int __rnpgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
+{
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 wufc = adapter->wol;
+#ifdef CONFIG_PM
+	int retval = 0;
+#endif
+
+	netif_device_detach(netdev);
+
+	rtnl_lock();
+	if (netif_running(netdev)) {
+		rnpgbe_down(adapter);
+		rnpgbe_free_irq(adapter);
+		rnpgbe_free_all_tx_resources(adapter);
+		rnpgbe_free_all_rx_resources(adapter);
+		/* should consider sriov mode ? */
+	}
+	rtnl_unlock();
+
+	/* only send mbx if ncsi or wol on */
+	if ((hw->ncsi_en || adapter->wol) && hw->ops.driver_status)
+		hw->ops.driver_status(hw, false, rnpgbe_driver_insmod);
+
+	remove_mbx_irq(adapter);
+	rnpgbe_clear_interrupt_scheme(adapter);
+
+#ifdef CONFIG_PM
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+
+#endif
+	if (wufc) {
+		rnpgbe_set_rx_mode(netdev);
+
+		/* enable the optics for n10 SFP+ fiber as we can WoL */
+		if (hw->ops.enable_tx_laser)
+			hw->ops.enable_tx_laser(hw);
+
+		/* turn on all-multi mode if wake on multicast is enabled */
+
+	} else {
+	}
+
+	if (hw->ops.setup_wol)
+		hw->ops.setup_wol(hw, adapter->wol);
+
+	pci_wake_from_d3(pdev, !!wufc);
+	*enable_wake = !!wufc;
+
+	pci_disable_device(pdev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+#ifndef USE_LEGACY_PM_SUPPORT
+static int rnpgbe_suspend(struct device *dev)
+#else
+static int rnpgbe_suspend(struct pci_dev *pdev,
+			  pm_message_t __always_unused state)
+#endif /* USE_LEGACY_PM_SUPPORT */
+{
+	int retval;
+	bool wake;
+#ifndef USE_LEGACY_PM_SUPPORT
+	struct pci_dev *pdev = to_pci_dev(dev);
+#endif
+
+	pr_info("call rnpgbe_suspend\n");
+
+	retval = __rnpgbe_shutdown_suspuse(pdev, &wake);
+	if (retval)
+		return retval;
+
+	if (wake) {
+		pci_prepare_to_sleep(pdev);
+	} else {
+		pci_wake_from_d3(pdev, false);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+__maybe_unused static void rnpgbe_shutdown(struct pci_dev *pdev)
+{
+	bool wake = false;
+
+	pr_info("call rnpgbe_shutdown\n");
+
+	__rnpgbe_shutdown(pdev, &wake);
+
+	pr_info("call rnpgbe_shutdown wake %x\n", wake);
+
+	if (system_state == SYSTEM_POWER_OFF) {
+		pci_wake_from_d3(pdev, wake);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+	pr_info("call rnpgbe_shutdown done\n");
+}
+
+/**
+ * rnpgbe_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+void rnpgbe_update_stats(struct rnpgbe_adapter *adapter)
+{
+	struct net_device_stats *net_stats = &adapter->netdev->stats;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_hw_stats *hw_stats = &adapter->hw_stats;
+
+	int i;
+	struct rnpgbe_ring *ring;
+	u64 hw_csum_rx_error = 0;
+	u64 hw_csum_rx_good = 0;
+	net_stats->tx_packets = 0;
+	net_stats->tx_bytes = 0;
+	net_stats->rx_packets = 0;
+	net_stats->rx_bytes = 0;
+	net_stats->rx_dropped = 0;
+	net_stats->rx_errors = 0;
+	hw_stats->vlan_strip_cnt = 0;
+	hw_stats->vlan_add_cnt = 0;
+
+	if (test_bit(__RNP_DOWN, &adapter->state) ||
+	    test_bit(__RNP_RESETTING, &adapter->state))
+		return;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		rnpgbe_for_each_ring(ring, adapter->q_vector[i]->rx) {
+			hw_csum_rx_error += ring->rx_stats.csum_err;
+			hw_csum_rx_good += ring->rx_stats.csum_good;
+			hw_stats->vlan_strip_cnt += ring->rx_stats.vlan_remove;
+			net_stats->rx_packets += ring->stats.packets;
+			net_stats->rx_bytes += ring->stats.bytes;
+		}
+
+		rnpgbe_for_each_ring(ring, adapter->q_vector[i]->tx) {
+			hw_stats->vlan_add_cnt += ring->tx_stats.vlan_add;
+			net_stats->tx_packets += ring->stats.packets;
+			net_stats->tx_bytes += ring->stats.bytes;
+		}
+	}
+
+	net_stats->rx_errors += hw_csum_rx_error;
+	hw->ops.update_hw_status(hw, hw_stats, net_stats);
+	adapter->hw_csum_rx_error = hw_csum_rx_error;
+	adapter->hw_csum_rx_good = hw_csum_rx_good;
+	net_stats->rx_errors = hw_csum_rx_error;
+}
+
+/**
+ * rnpgbe_watchdog_update_link - update the link status
+ * @adapter: pointer to the device adapter structure
+ * @link_speed: pointer to a u32 to store the link_speed
+ **/
+static void rnpgbe_watchdog_update_link(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 link_speed = adapter->link_speed;
+	bool link_up = adapter->link_up;
+	bool duplex = adapter->duplex_old;
+	bool flow_rx = true, flow_tx = true;
+
+	if (!(adapter->flags & RNP_FLAG_NEED_LINK_UPDATE))
+		return;
+
+	if (hw->ops.check_link) {
+		hw->ops.check_link(hw, &link_speed, &link_up, &duplex, false);
+	} else {
+		/* always assume link is up, if no check link function */
+		link_speed = RNP_LINK_SPEED_10GB_FULL;
+		link_up = true;
+	}
+
+	if (link_up || time_after(jiffies, (adapter->link_check_timeout +
+					    RNP_TRY_LINK_TIMEOUT))) {
+		adapter->flags &= ~RNP_FLAG_NEED_LINK_UPDATE;
+	}
+	adapter->link_up = link_up;
+	adapter->link_speed = link_speed;
+	adapter->duplex_old = duplex;
+
+	if (hw->ops.get_pause_mode)
+		hw->ops.get_pause_mode(hw);
+	switch (hw->fc.current_mode) {
+	case rnpgbe_fc_none:
+		flow_rx = false;
+		flow_tx = false;
+		break;
+	case rnpgbe_fc_tx_pause:
+		flow_rx = false;
+		flow_tx = true;
+
+		break;
+	case rnpgbe_fc_rx_pause:
+		flow_rx = true;
+		flow_tx = false;
+		break;
+
+	case rnpgbe_fc_full:
+		flow_rx = true;
+		flow_tx = true;
+		break;
+	default:
+		hw_dbg(hw, "Flow control param set incorrectly\n");
+	}
+
+	if (adapter->link_up) {
+		if (hw->ops.set_mac_speed)
+			hw->ops.set_mac_speed(hw, true, link_speed, duplex);
+		if (hw->ops.set_pause_mode)
+			hw->ops.set_pause_mode(hw);
+
+		e_info(drv, "NIC Link is Up %s, %s Duplex, Flow Control: %s\n",
+		       (link_speed == RNP_LINK_SPEED_40GB_FULL ?
+				"40 Gbps" :
+				(link_speed == RNP_LINK_SPEED_25GB_FULL ?
+					 "25 Gbps" :
+					 (link_speed == RNP_LINK_SPEED_10GB_FULL ?
+						  "10 Gbps" :
+						  (link_speed == RNP_LINK_SPEED_1GB_FULL ?
+							   "1000 Mbps" :
+							   (link_speed == RNP_LINK_SPEED_100_FULL ?
+								    "100 Mbps" :
+								    (link_speed == RNP_LINK_SPEED_10_FULL ?
+									     "10 Mbps" :
+									     "unknown speed")))))),
+		       ((duplex) ? "Full" : "Half"),
+		       ((flow_rx && flow_tx) ?
+				"RX/TX" :
+				(flow_rx ? "RX" : (flow_tx ? "TX" : "None"))));
+	} else {
+		if (hw->ops.set_mac_speed)
+			hw->ops.set_mac_speed(hw, false, 0, false);
+	}
+}
+
+static void rnpgbe_update_default_up(struct rnpgbe_adapter *adapter)
+{
+#ifdef CONFIG_RNP_DCB
+	struct net_device *netdev = adapter->netdev;
+	struct dcb_app app = {
+		.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
+		.protocol = 0,
+	};
+	u8 up = 0;
+
+	if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
+		up = dcb_ieee_getapp_mask(netdev, &app);
+
+	adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
+#endif
+}
+
+/**
+ * rnpgbe_eee_ctrl_timer - EEE TX SW timer.
+ * @arg : data hook
+ * Description:
+ *  if there is no data transfer and if we are not in LPI state,
+ *  then MAC Transmitter can be moved to LPI state.
+ */
+static void rnpgbe_eee_ctrl_timer(struct timer_list *t)
+{
+	struct rnpgbe_adapter *adapter = from_timer(adapter, t, eee_ctrl_timer);
+
+	rnpgbe_enable_eee_mode(adapter);
+	if (!test_bit(__RNP_EEE_REMOVE, &adapter->state))
+		mod_timer(&adapter->eee_ctrl_timer,
+			  RNP_LPI_T(adapter->eee_timer));
+}
+
+static bool rnpgbe_eee_init(struct rnpgbe_adapter *adapter)
+{
+	int tx_lpi_timer = adapter->tx_lpi_timer;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	mutex_lock(&adapter->eee_lock);
+
+	/* Check if it needs to be deactivated */
+	if (!adapter->eee_active) {
+		set_bit(__RNP_EEE_REMOVE, &adapter->state);
+		netdev_dbg(adapter->netdev, "disable EEE\n");
+		del_timer_sync(&adapter->eee_ctrl_timer);
+		hw->ops.set_eee_timer(hw, 0, tx_lpi_timer);
+	} else {
+		clear_bit(__RNP_EEE_REMOVE, &adapter->state);
+		timer_setup(&adapter->eee_ctrl_timer, rnpgbe_eee_ctrl_timer, 0);
+		mod_timer(&adapter->eee_ctrl_timer,
+			  RNP_LPI_T(adapter->eee_timer));
+		hw->ops.set_eee_timer(hw, RNP_DEFAULT_LIT_LS, tx_lpi_timer);
+	}
+
+	mutex_unlock(&adapter->eee_lock);
+	netdev_dbg(adapter->netdev, "Energy-Efficient Ethernet initialized\n");
+	return true;
+}
+
+static int rnpgbe_phy_init_eee(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	/* if hw no eee capability or eee closed by ethtool */
+	if ((!hw->eee_capability) || (!adapter->eee_enabled))
+		return -EIO;
+	/* init eee only in full duplex */
+	if (!hw->duplex)
+		return -EIO;
+	/* init eee not in speed 10 */
+	if (hw->speed == 10)
+		return -EIO;
+	/* init eee only local and lp all support eee */
+	if (!(adapter->local_eee & adapter->partner_eee))
+		return -EIO;
+	if ((hw->hw_type == rnpgbe_hw_n500) ||
+	    (hw->hw_type == rnpgbe_hw_n210) ||
+	    (hw->hw_type == rnpgbe_hw_n210L)) {
+		/* n500 only support eee in 100/1000 full */
+		if (!hw->duplex)
+			return -EIO;
+
+		if ((adapter->speed != RNP_LINK_SPEED_100_FULL) &&
+		    (adapter->speed != RNP_LINK_SPEED_1GB_FULL))
+			return -EIO;
+	}
+	/* if in sriov mode cannot open eee */
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED)
+		return -EIO;
+
+	return 0;
+}
+
+static void rnpgbe_setup_eee_mode(struct rnpgbe_adapter *adapter, bool status)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	if (status) {
+		/* if eee active before, first close it */
+		if (adapter->eee_active) {
+			adapter->eee_active = 0;
+			rnpgbe_eee_init(adapter);
+		}
+		/* if up, try to active eee */
+		adapter->eee_active = rnpgbe_phy_init_eee(adapter) >= 0;
+		/* if we can active eee, init it */
+		if (adapter->eee_active) {
+			rnpgbe_eee_init(adapter);
+			if (hw->ops.set_eee_pls)
+				hw->ops.set_eee_pls(hw, true);
+		}
+	} else {
+		/* if eee active before, close it */
+		if (adapter->eee_active) {
+			adapter->eee_active = 0;
+			rnpgbe_eee_init(adapter);
+		}
+		if (hw->ops.set_eee_pls)
+			hw->ops.set_eee_pls(hw, false);
+	}
+}
+
+/**
+ * rnpgbe_watchdog_link_is_up - update netif_carrier status and
+ *                             print link up message
+ * @adapter: pointer to the device adapter structure
+ **/
+static void rnpgbe_watchdog_link_is_up(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	/* only continue if link was previously down */
+	if (netif_carrier_ok(netdev))
+		return;
+
+	adapter->flags2 &= ~RNP_FLAG2_SEARCH_FOR_SFP;
+	switch (hw->mac.type) {
+	default:
+		break;
+	}
+
+	netif_carrier_on(netdev);
+
+	netif_tx_wake_all_queues(netdev);
+
+	/* update the default user priority for VFs */
+	rnpgbe_update_default_up(adapter);
+
+	hw->ops.set_mac_rx(hw, true);
+
+	rnpgbe_setup_eee_mode(adapter, true);
+}
+
+/**
+ * rnpgbe_watchdog_link_is_down - update netif_carrier status and
+ *                               print link down message
+ * @adapter: pointer to the adapter structure
+ **/
+static void rnpgbe_watchdog_link_is_down(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	adapter->link_up = false;
+	adapter->link_speed = 0;
+
+	/* only continue if link was up previously */
+	if (!netif_carrier_ok(netdev))
+		return;
+
+	/* poll for SFP+ cable when link is down */
+	adapter->flags2 |= RNP_FLAG2_SEARCH_FOR_SFP;
+
+	e_info(drv, "NIC Link is Down\n");
+
+	netif_carrier_off(netdev);
+
+	netif_tx_stop_all_queues(netdev);
+
+	hw->ops.set_mac_rx(hw, false);
+
+	rnpgbe_setup_eee_mode(adapter, false);
+}
+
+static void rnpgbe_update_link_to_vf(struct rnpgbe_adapter *adapter)
+{
+	if (!(adapter->flags & RNP_FLAG_VF_INIT_DONE))
+		return;
+
+	if ((adapter->link_up_old != adapter->link_up) ||
+	    (adapter->link_speed_old != adapter->link_speed)) {
+		if (!test_bit(__RNP_IN_IRQ, &adapter->state)) {
+			if (rnpgbe_msg_post_status(adapter,
+						   PF_SET_LINK_STATUS) == 0) {
+				adapter->link_up_old = adapter->link_up;
+				adapter->link_speed_old = adapter->link_speed;
+			}
+		}
+	}
+}
+
+/**
+ * rnpgbe_watchdog_subtask - check and bring link up
+ * @adapter: pointer to the device adapter structure
+ **/
+static void rnpgbe_watchdog_subtask(struct rnpgbe_adapter *adapter)
+{
+	/* if interface is down do nothing */
+	/* should do link status if in sriov */
+	if (test_bit(__RNP_DOWN, &adapter->state) ||
+	    test_bit(__RNP_RESETTING, &adapter->state))
+		return;
+
+	rnpgbe_watchdog_update_link(adapter);
+
+	if (adapter->link_up)
+		rnpgbe_watchdog_link_is_up(adapter);
+	else
+		rnpgbe_watchdog_link_is_down(adapter);
+
+	rnpgbe_update_link_to_vf(adapter);
+
+	rnpgbe_update_stats(adapter);
+}
+
+/**
+ * rnpgbe_service_timer - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+void rnpgbe_service_timer(struct timer_list *t)
+{
+	struct rnpgbe_adapter *adapter = from_timer(adapter, t, service_timer);
+	unsigned long next_event_offset;
+	bool ready = true;
+
+	/* poll faster when waiting for link */
+	if (adapter->flags & RNP_FLAG_NEED_LINK_UPDATE)
+		next_event_offset = HZ / 10;
+	else
+		next_event_offset = HZ;
+	/* Reset the timer */
+	if (!test_bit(__RNP_REMOVE, &adapter->state))
+		mod_timer(&adapter->service_timer, next_event_offset + jiffies);
+
+	if (ready)
+		rnpgbe_service_event_schedule(adapter);
+}
+
+static void rnpgbe_reset_pf_subtask(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u32 err;
+
+	if (!(adapter->flags2 & RNP_FLAG2_RESET_PF))
+		return;
+
+	rtnl_lock();
+	netif_device_detach(netdev);
+	if (netif_running(netdev)) {
+		rnpgbe_down(adapter);
+		rnpgbe_free_irq(adapter);
+		rnpgbe_free_all_tx_resources(adapter);
+		rnpgbe_free_all_rx_resources(adapter);
+	}
+	rtnl_unlock();
+	adapter->link_up = 0;
+	adapter->link_up_old = 0;
+	rnpgbe_msg_post_status(adapter, PF_SET_LINK_STATUS);
+	/* wait all vf get this status */
+	usleep_range(500, 1000);
+	rnpgbe_reset(adapter);
+	remove_mbx_irq(adapter);
+	rnpgbe_clear_interrupt_scheme(adapter);
+	rtnl_lock();
+	err = rnpgbe_init_interrupt_scheme(adapter);
+	register_mbx_irq(adapter);
+
+	if (!err && netif_running(netdev))
+		err = rnpgbe_open(netdev);
+
+	rtnl_unlock();
+	rnpgbe_msg_post_status(adapter, PF_SET_RESET);
+	netif_device_attach(netdev);
+	adapter->flags2 &= (~RNP_FLAG2_RESET_PF);
+}
+
+static void rnpgbe_reset_subtask(struct rnpgbe_adapter *adapter)
+{
+	if (!(adapter->flags2 & RNP_FLAG2_RESET_REQUESTED))
+		return;
+
+	adapter->flags2 &= ~RNP_FLAG2_RESET_REQUESTED;
+
+	/* If we're already down or resetting, just bail */
+	if (test_bit(__RNP_DOWN, &adapter->state) ||
+	    test_bit(__RNP_RESETTING, &adapter->state))
+		return;
+
+	netdev_err(adapter->netdev, "Reset adapter\n");
+	adapter->tx_timeout_count++;
+	rtnl_lock();
+	rnpgbe_reinit_locked(adapter);
+	rtnl_unlock();
+}
+
+static void rnpgbe_rx_len_reset_subtask(struct rnpgbe_adapter *adapter)
+{
+	int i;
+	struct rnpgbe_ring *rx_ring;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		rx_ring = adapter->rx_ring[i];
+		if (unlikely(rx_ring->ring_flags &
+			     RNP_RING_FLAG_DO_RESET_RX_LEN)) {
+			dbg("[%s] Rx-ring %d count reset\n",
+			    adapter->netdev->name, rx_ring->rnpgbe_queue_idx);
+			rnpgbe_rx_ring_reinit(adapter, rx_ring);
+			rx_ring->ring_flags &= (~RNP_RING_FLAG_DO_RESET_RX_LEN);
+		}
+	}
+}
+
+/* just modify rx itr */
+static void rnpgbe_auto_itr_moderation(struct rnpgbe_adapter *adapter)
+{
+	int i;
+	struct rnpgbe_ring *rx_ring;
+	u64 period = (u64)(jiffies - adapter->last_moder_jiffies);
+
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_RX_COALESCE)
+		return;
+
+	if (!adapter->adaptive_rx_coal ||
+	    period < adapter->sample_interval * HZ) {
+		return;
+	}
+
+	adapter->last_moder_jiffies = jiffies;
+
+	/* it is time to check moderation */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		u64 x, y, rate;
+		u64 rx_packets, packets, rx_pkt_diff;
+
+		rx_ring = adapter->rx_ring[i];
+		rx_packets = READ_ONCE(rx_ring->stats.packets);
+		rx_pkt_diff = rx_packets -
+			      adapter->last_moder_packets[rx_ring->queue_index];
+		packets = rx_pkt_diff;
+
+		x = packets * HZ;
+		y = do_div(x, period);
+		rate = x;
+
+		if (packets == 0) {
+		} else if (rate < 20000) {
+			rx_ring->ring_flags |= RNP_RING_LOWER_ITR;
+
+		} else {
+			rx_ring->ring_flags &= (~RNP_RING_LOWER_ITR);
+
+		}
+
+		/* write back new count */
+		adapter->last_moder_packets[rx_ring->queue_index] = rx_packets;
+	}
+}
+
+/**
+ * rnpgbe_service_task - manages and runs subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+static void rnpgbe_service_task(struct work_struct *work)
+{
+	struct rnpgbe_adapter *adapter =
+		container_of(work, struct rnpgbe_adapter, service_task);
+
+#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD)
+#ifndef HAVE_UDP_TUNNEL_NIC_INFO
+	if (adapter->flags2 & RNP_FLAG2_UDP_TUN_REREG_NEEDED) {
+		rtnl_lock();
+		adapter->flags2 &= ~RNP_FLAG2_UDP_TUN_REREG_NEEDED;
+#ifdef HAVE_UDP_ENC_RX_OFFLOAD
+		udp_tunnel_get_rx_info(adapter->netdev);
+#else
+		vxlan_get_rx_port(adapter->netdev);
+#endif /* HAVE_UDP_ENC_RX_OFFLOAD */
+		rtnl_unlock();
+	}
+#endif /* HAVE_UDP_TUNNEL_NIC_INFO */
+#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */
+
+	rnpgbe_reset_subtask(adapter);
+	rnpgbe_reset_pf_subtask(adapter);
+	rnpgbe_watchdog_subtask(adapter);
+	rnpgbe_rx_len_reset_subtask(adapter);
+	rnpgbe_auto_itr_moderation(adapter);
+	rnpgbe_service_event_complete(adapter);
+}
+
+static int rnpgbe_tso(struct rnpgbe_ring *tx_ring,
+		      struct rnpgbe_tx_buffer *first, u32 *mac_ip_len,
+		      u8 *hdr_len, u32 *tx_flags)
+{
+	struct sk_buff *skb = first->skb;
+	struct net_device *netdev = tx_ring->netdev;
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	union {
+		struct iphdr *v4;
+		struct ipv6hdr *v6;
+		unsigned char *hdr;
+	} ip;
+	union {
+		struct tcphdr *tcp;
+		struct udphdr *udp;
+		unsigned char *hdr;
+	} l4;
+	u32 paylen, l4_offset;
+	int err;
+	u8 *inner_mac;
+	u16 gso_segs, gso_size;
+	u16 gso_need_pad;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return 0;
+
+	if (!skb_is_gso(skb))
+		return 0;
+
+	err = skb_cow_head(skb, 0);
+	if (err < 0)
+		return err;
+
+	inner_mac = skb->data;
+	ip.hdr = skb_network_header(skb);
+	l4.hdr = skb_transport_header(skb);
+
+	/* initialize outer IP header fields */
+	if (ip.v4->version == 4) {
+		/* IP header will have to cancel out any data that
+		 * is not a part of the outer IP header
+		 */
+		ip.v4->tot_len = 0;
+		ip.v4->check = 0x0000;
+	} else {
+		ip.v6->payload_len = 0;
+	}
+
+#ifdef HAVE_ENCAP_TSO_OFFLOAD
+	if (skb_shinfo(skb)->gso_type &
+	    (SKB_GSO_GRE |
+#ifdef NETIF_F_GSO_PARTIAL
+	     SKB_GSO_GRE_CSUM |
+#endif
+	     SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) {
+#ifndef NETIF_F_GSO_PARTIAL
+		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+#else
+		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+#endif
+		}
+		/* we should alayws do this */
+		inner_mac = skb_inner_mac_header(skb);
+
+		first->tunnel_hdr_len = (inner_mac - skb->data);
+
+		if (skb_shinfo(skb)->gso_type &
+		    (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) {
+			*tx_flags |= RNP_TXD_TUNNEL_VXLAN;
+			l4.udp->check = 0;
+			tx_dbg("set outer l4.udp to 0\n");
+		} else {
+			*tx_flags |= RNP_TXD_TUNNEL_NVGRE;
+		}
+
+		/* reset pointers to inner headers */
+		ip.hdr = skb_inner_network_header(skb);
+		l4.hdr = skb_inner_transport_header(skb);
+	}
+
+#endif /* HAVE_ENCAP_TSO_OFFLOAD */
+
+	if (ip.v4->version == 4) {
+		/* IP header will have to cancel out any data that
+		 * is not a part of the outer IP header
+		 */
+		ip.v4->tot_len = 0;
+		ip.v4->check = 0x0000;
+
+	} else {
+		ip.v6->payload_len = 0;
+		/* set ipv6 type */
+		*tx_flags |= RNP_TXD_FLAG_IPv6;
+	}
+
+	/* determine offset of inner transport header */
+	l4_offset = l4.hdr - skb->data;
+
+	paylen = skb->len - l4_offset;
+	tx_dbg("before l4 checksum is %x\n", l4.tcp->check);
+
+	if (skb->csum_offset == offsetof(struct tcphdr, check)) {
+		tx_dbg("tcp before l4 checksum is %x\n", l4.tcp->check);
+		*tx_flags |= RNP_TXD_L4_TYPE_TCP;
+		/* compute length of segmentation header */
+		*hdr_len = (l4.tcp->doff * 4) + l4_offset;
+		csum_replace_by_diff(&l4.tcp->check,
+				     (__force __wsum)htonl(paylen));
+		l4.tcp->psh = 0;
+		tx_dbg("tcp l4 checksum is %x\n", l4.tcp->check);
+	} else {
+		tx_dbg("paylen is %x\n", paylen);
+		*tx_flags |= RNP_TXD_L4_TYPE_UDP;
+		/* compute length of segmentation header */
+		tx_dbg("udp before l4 checksum is %x\n", l4.udp->check);
+		*hdr_len = sizeof(*l4.udp) + l4_offset;
+		csum_replace_by_diff(&l4.udp->check,
+				     (__force __wsum)htonl(paylen));
+		tx_dbg("udp l4 checksum is %x\n", l4.udp->check);
+	}
+
+	tx_dbg("l4 checksum is %x\n", l4.tcp->check);
+	*mac_ip_len = (l4.hdr - ip.hdr) | ((ip.hdr - inner_mac) << 9);
+
+	/* compute header lengths */
+	/* pull values out of skb_shinfo */
+	gso_size = skb_shinfo(skb)->gso_size;
+	gso_segs = skb_shinfo(skb)->gso_segs;
+
+#ifndef HAVE_NDO_FEATURES_CHECK
+	/* too small a TSO segment size causes problems */
+	if (gso_size < 64) {
+		gso_size = 64;
+		gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, 64);
+	}
+#endif
+	/* if we close padding check gso confition */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) {
+		gso_need_pad = (first->skb->len - *hdr_len) % gso_size;
+		if (gso_need_pad) {
+			if ((gso_need_pad + *hdr_len) <= 60) {
+				gso_need_pad = 60 - (gso_need_pad + *hdr_len);
+				first->gso_need_padding = !!gso_need_pad;
+			}
+		}
+	}
+
+	/* update gso size and bytecount with header size */
+	/* to fix tx status */
+	first->gso_segs = gso_segs;
+	first->bytecount += (first->gso_segs - 1) * *hdr_len;
+	if (skb->csum_offset == offsetof(struct tcphdr, check)) {
+		first->mss_len_vf_num |=
+			(gso_size | ((l4.tcp->doff * 4) << 24));
+	} else {
+		first->mss_len_vf_num |= (gso_size | ((8) << 24));
+	}
+
+	*tx_flags |= RNP_TXD_FLAG_TSO | RNP_TXD_IP_CSUM | RNP_TXD_L4_CSUM;
+	first->ctx_flag = true;
+
+	return 1;
+}
+
+static int rnpgbe_tx_csum(struct rnpgbe_ring *tx_ring,
+			  struct rnpgbe_tx_buffer *first, u32 *mac_ip_len,
+			  u32 *tx_flags)
+{
+	struct sk_buff *skb = first->skb;
+	u8 l4_proto = 0;
+	u8 ip_len = 0;
+	u8 mac_len = 0;
+	u8 *inner_mac = skb->data;
+	u8 *exthdr;
+	__be16 frag_off;
+	union {
+		struct iphdr *v4;
+		struct ipv6hdr *v6;
+		unsigned char *hdr;
+	} ip;
+	union {
+		struct tcphdr *tcp;
+		struct udphdr *udp;
+		unsigned char *hdr;
+	} l4;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return 0;
+
+	ip.hdr = skb_network_header(skb);
+	l4.hdr = skb_transport_header(skb);
+
+	inner_mac = skb->data;
+
+#ifdef HAVE_ENCAP_CSUM_OFFLOAD
+	/* outer protocol */
+	if (skb->encapsulation) {
+		/* define outer network header type */
+		if (ip.v4->version == 4) {
+			l4_proto = ip.v4->protocol;
+		} else {
+			exthdr = ip.hdr + sizeof(*ip.v6);
+			l4_proto = ip.v6->nexthdr;
+			if (l4.hdr != exthdr)
+				ipv6_skip_exthdr(skb, exthdr - skb->data,
+						 &l4_proto, &frag_off);
+		}
+
+		/* define outer transport */
+		switch (l4_proto) {
+		case IPPROTO_UDP:
+			l4.udp->check = 0;
+			break;
+#ifdef HAVE_GRE_ENCAP_OFFLOAD
+		case IPPROTO_GRE:
+			/* There was a long-standing issue in GRE where GSO
+			 * was not setting the outer transport header unless
+			 * a GRE checksum was requested. This was fixed in
+			 * the 4.6 version of the kernel.  In the 4.7 kernel
+			 * support for GRE over IPv6 was added to GSO.  So we
+			 * can assume this workaround for all IPv4 headers
+			 * without impacting later versions of the GRE.
+			 */
+			if (ip.v4->version == 4)
+				l4.hdr = ip.hdr + (ip.v4->ihl * 4);
+			break;
+#endif
+		default:
+			skb_checksum_help(skb);
+			return -1;
+		}
+
+		/* switch IP header pointer from outer to inner header */
+		ip.hdr = skb_inner_network_header(skb);
+		l4.hdr = skb_inner_transport_header(skb);
+
+		inner_mac = skb_inner_mac_header(skb);
+		first->tunnel_hdr_len = inner_mac - skb->data;
+		first->ctx_flag = true;
+		tx_dbg("tunnel length is %d\n", first->tunnel_hdr_len);
+	}
+#endif /* HAVE_ENCAP_CSUM_OFFLOAD */
+
+	mac_len = (ip.hdr - inner_mac); // mac length
+	*mac_ip_len = (ip.hdr - inner_mac) << 9;
+	tx_dbg("inner checksum needed %d", skb_checksum_start_offset(skb));
+	tx_dbg("skb->encapsulation %d\n", skb->encapsulation);
+	ip_len = (l4.hdr - ip.hdr);
+	if (ip.v4->version == 4) {
+		l4_proto = ip.v4->protocol;
+	} else {
+		exthdr = ip.hdr + sizeof(*ip.v6);
+		l4_proto = ip.v6->nexthdr;
+		if (l4.hdr != exthdr)
+			ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
+					 &frag_off);
+		*tx_flags |= RNP_TXD_FLAG_IPv6;
+	}
+	/* Enable L4 checksum offloads */
+	switch (l4_proto) {
+	case IPPROTO_TCP:
+		*tx_flags |= RNP_TXD_L4_TYPE_TCP | RNP_TXD_L4_CSUM;
+		break;
+	case IPPROTO_SCTP:
+		tx_dbg("sctp checksum packet\n");
+		*tx_flags |= RNP_TXD_L4_TYPE_SCTP | RNP_TXD_L4_CSUM;
+		break;
+	case IPPROTO_UDP:
+		*tx_flags |= RNP_TXD_L4_TYPE_UDP | RNP_TXD_L4_CSUM;
+		break;
+	default:
+		skb_checksum_help(skb);
+		return 0;
+	}
+
+	if ((tx_ring->ring_flags & RNP_RING_NO_TUNNEL_SUPPORT) &&
+	    (first->ctx_flag)) {
+		/* if not support tunnel */
+		if (!(first->priv_tags)) {
+			first->ctx_flag = false;
+			mac_len += first->tunnel_hdr_len;
+			first->tunnel_hdr_len = 0;
+		}
+	}
+	tx_dbg("mac length is %d\n", mac_len);
+	tx_dbg("ip length is %d\n", ip_len);
+	*mac_ip_len = (mac_len << 9) | ip_len;
+
+	return 0;
+}
+
+static int __rnpgbe_maybe_stop_tx(struct rnpgbe_ring *tx_ring, u16 size)
+{
+	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	/* Herbert's original patch had:
+	 *  smp_mb__after_netif_stop_queue();
+	 * but since that doesn't exist yet, just open code it.
+	 */
+	smp_mb();
+
+	/* We need to check again in a case another CPU has just
+	 * made room available.
+	 */
+	if (likely(rnpgbe_desc_unused(tx_ring) < size))
+		return -EBUSY;
+
+	/* A reprieve! - use start_queue because it doesn't call schedule */
+	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	++tx_ring->tx_stats.restart_queue;
+
+	return 0;
+}
+
+static inline int rnpgbe_maybe_stop_tx(struct rnpgbe_ring *tx_ring, u16 size)
+{
+	if (likely(rnpgbe_desc_unused(tx_ring) >= size))
+		return 0;
+	return __rnpgbe_maybe_stop_tx(tx_ring, size);
+}
+
+static int rnpgbe_tx_map(struct rnpgbe_ring *tx_ring,
+			 struct rnpgbe_tx_buffer *first, u32 mac_ip_len,
+			 u32 tx_flags)
+{
+	struct sk_buff *skb = first->skb;
+	struct rnpgbe_tx_buffer *tx_buffer;
+	struct rnpgbe_tx_desc *tx_desc;
+	skb_frag_t *frag;
+	dma_addr_t dma;
+	unsigned int data_len, size;
+	u16 i = tx_ring->next_to_use;
+	u64 fun_id = ((u64)(tx_ring->pfvfnum) << (56));
+
+	tx_desc = RNP_TX_DESC(tx_ring, i);
+	size = skb_headlen(skb);
+	data_len = skb->data_len;
+
+	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+	tx_buffer = first;
+
+	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+		if (dma_mapping_error(tx_ring->dev, dma))
+			goto dma_error;
+
+		/* record length, and DMA address */
+		dma_unmap_len_set(tx_buffer, len, size);
+		dma_unmap_addr_set(tx_buffer, dma, dma);
+
+		/* 1st desc */
+		tx_desc->pkt_addr = cpu_to_le64(dma | fun_id);
+
+		while (unlikely(size > RNP_MAX_DATA_PER_TXD)) {
+			tx_desc->vlan_cmd_bsz = build_ctob(
+				tx_flags, mac_ip_len, RNP_MAX_DATA_PER_TXD);
+			buf_dump_line("tx0  ", __LINE__, tx_desc,
+				      sizeof(*tx_desc));
+			i++;
+			tx_desc++;
+			if (i == tx_ring->count) {
+				tx_desc = RNP_TX_DESC(tx_ring, 0);
+				i = 0;
+			}
+			dma += RNP_MAX_DATA_PER_TXD;
+			size -= RNP_MAX_DATA_PER_TXD;
+
+			tx_desc->pkt_addr = cpu_to_le64(dma | fun_id);
+		}
+
+		buf_dump_line("tx1  ", __LINE__, tx_desc, sizeof(*tx_desc));
+		if (likely(!data_len))
+			break;
+		tx_desc->vlan_cmd_bsz = build_ctob(tx_flags, mac_ip_len, size);
+		buf_dump_line("tx2  ", __LINE__, tx_desc, sizeof(*tx_desc));
+
+		/* ==== frag== */
+		i++;
+		tx_desc++;
+		if (i == tx_ring->count) {
+			tx_desc = RNP_TX_DESC(tx_ring, 0);
+			i = 0;
+		}
+
+		size = skb_frag_size(frag);
+		data_len -= size;
+		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+				       DMA_TO_DEVICE);
+		tx_buffer = &tx_ring->tx_buffer_info[i];
+	}
+
+	/* write last descriptor with RS and EOP bits */
+	tx_desc->vlan_cmd_bsz = build_ctob(
+		tx_flags | RNP_TXD_CMD_EOP | RNP_TXD_CMD_RS, mac_ip_len, size);
+	buf_dump_line("tx3  ", __LINE__, tx_desc, sizeof(*tx_desc));
+
+	/* set the timestamp */
+	first->time_stamp = jiffies;
+
+	tx_ring->tx_stats.send_bytes += first->bytecount;
+
+	/*
+	 * Force memory writes to complete before letting h/w know there
+	 * are new descriptors to fetch.  (Only applicable for weak-ordered
+	 * memory model archs, such as IA-64).
+	 *
+	 * We also need this memory barrier to make certain all of the
+	 * status bits have been updated before next_to_watch is written.
+	 */
+	/* timestamp the skb as late as possible, just prior to notifying
+	 * the MAC that it should transmit this packet
+	 */
+	wmb();
+	/* set next_to_watch value indicating a packet is present */
+	first->next_to_watch = tx_desc;
+
+	buf_dump_line("tx4  ", __LINE__, tx_desc, sizeof(*tx_desc));
+	i++;
+	if (i == tx_ring->count)
+		i = 0;
+	tx_ring->next_to_use = i;
+
+	skb_tx_timestamp(skb);
+#ifdef SIMULATE_TX
+	napi_consume_skb(first->skb, 64);
+	dma_unmap_single(tx_ring->dev, dma_unmap_addr(first, dma),
+			 dma_unmap_len(first, len), DMA_TO_DEVICE);
+
+	tx_ring->stats.bytes += skb->len;
+	tx_ring->stats.packets += 1;
+	first->skb = NULL;
+#else
+
+#ifdef NO_BQL_TEST
+#else
+	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
+#endif
+	/* notify HW of packet */
+	rnpgbe_wr_reg(tx_ring->tail, i);
+
+#ifndef SPIN_UNLOCK_IMPLIES_MMIOWB
+	/* we need this if more than one processor can write to our tail
+	 * at a time, it synchronizes IO on IA64/Altix systems
+	 */
+	mmiowb();
+#endif
+
+#endif
+	return 0;
+dma_error:
+	dev_err(tx_ring->dev, "TX DMA map failed\n");
+
+	/* clear dma mappings for failed tx_buffer_info map */
+	for (;;) {
+		tx_buffer = &tx_ring->tx_buffer_info[i];
+		rnpgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+		if (tx_buffer == first)
+			break;
+		if (i == 0)
+			i += tx_ring->count;
+		i--;
+	}
+	dev_kfree_skb_any(first->skb);
+	first->skb = NULL;
+	tx_ring->next_to_use = i;
+
+	return -1;
+}
+
+static netdev_tx_t rnpgbe_xmit_frame_ring(struct sk_buff *skb,
+				   struct rnpgbe_adapter *adapter,
+				   struct rnpgbe_ring *tx_ring, bool tx_padding)
+{
+	struct rnpgbe_tx_buffer *first;
+	int tso;
+	u32 tx_flags = 0;
+	unsigned short f;
+	u16 count = TXD_USE_COUNT(skb_headlen(skb));
+	__be16 protocol = skb->protocol;
+	u8 hdr_len = 0;
+	int ignore_vlan = 0;
+	/* default len should not 0 (hw request) */
+	u32 mac_ip_len = 20;
+
+	tx_dbg("=== begin ====\n");
+	tx_dbg("rnp skb:%p, skb->len:%d  headlen:%d, data_len:%d\n", skb,
+	       skb->len, skb_headlen(skb), skb->data_len);
+	tx_dbg("next_to_clean %d, next_to_use %d\n", tx_ring->next_to_clean,
+	       tx_ring->next_to_use);
+	/*
+	 * need: 1 descriptor per page * PAGE_SIZE/RNP_MAX_DATA_PER_TXD,
+	 *       + 1 desc for skb_headlen/RNP_MAX_DATA_PER_TXD,
+	 *       + 2 desc gap to keep tail from touching head,
+	 *       + 1 desc for context descriptor,
+	 * otherwise try next time
+	 */
+	if (adapter->tx_path_in_lpi_mode)
+		rnpgbe_disable_eee_mode(adapter);
+
+	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+		skb_frag_t *frag_temp = &skb_shinfo(skb)->frags[f];
+
+		count += TXD_USE_COUNT(skb_frag_size(frag_temp));
+		tx_dbg(" rnp #%d frag: size:%d\n", f, skb_frag_size(frag_temp));
+	}
+
+	if (rnpgbe_maybe_stop_tx(tx_ring, count + 3)) {
+		tx_ring->tx_stats.tx_busy++;
+		return NETDEV_TX_BUSY;
+	}
+
+	/* record the location of the first descriptor for this packet */
+	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+	first->skb = skb;
+	/* maybe consider len smaller than 60 */
+	first->bytecount = (skb->len > 60) ? skb->len : 60;
+	first->gso_segs = 1;
+	first->priv_tags = 0;
+	first->mss_len_vf_num = 0;
+	first->inner_vlan_tunnel_len = 0;
+	first->ctx_flag =
+		(adapter->flags & RNP_FLAG_SRIOV_ENABLED) ? true : false;
+
+	/* if we have a HW VLAN tag being added default to the HW one */
+	/* RNP_TXD_VLAN_VALID is used for veb */
+	/* setup padding flag */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) {
+		first->ctx_flag = true;
+		first->gso_need_padding = tx_padding;
+	}
+
+	/* RNP_FLAG2_VLAN_STAGS_ENABLED and
+	 * tx-stags-offload not support together
+	 */
+	if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) {
+		/* always add a stags for any packets out */
+		first->inner_vlan_tunnel_len |= (adapter->stags_vid);
+		first->priv_tags = 1;
+		first->ctx_flag = true;
+
+		if (skb_vlan_tag_present(skb)) {
+			tx_flags |= RNP_TXD_VLAN_VALID |
+				    RNP_TXD_VLAN_CTRL_INSERT_VLAN;
+			tx_flags |= skb_vlan_tag_get(skb);
+			/* else if it is a SW VLAN check the next
+			 * protocol and store the tag
+			 */
+		} else if (protocol == htons(ETH_P_8021Q)) {
+			struct vlan_hdr *vhdr, _vhdr;
+
+			vhdr = skb_header_pointer(skb, ETH_HLEN,
+						  sizeof(_vhdr), &_vhdr);
+			if (!vhdr)
+				goto out_drop;
+
+			protocol = vhdr->h_vlan_encapsulated_proto;
+			tx_flags |= ntohs(vhdr->h_vlan_TCI);
+			tx_flags |= RNP_TXD_VLAN_VALID;
+		}
+
+	} else {
+		/* normal mode*/
+		if (skb_vlan_tag_present(skb)) {
+#ifndef NO_SKB_VLAN_PROTO
+			if (skb->vlan_proto != htons(ETH_P_8021Q)) {
+				/* veb only use ctags */
+				tx_flags |= skb_vlan_tag_get(skb);
+				tx_flags |= RNP_TXD_SVLAN_TYPE |
+					    RNP_TXD_VLAN_CTRL_INSERT_VLAN;
+			} else {
+#endif
+				tx_flags |= skb_vlan_tag_get(skb);
+				tx_flags |= RNP_TXD_VLAN_VALID |
+					    RNP_TXD_VLAN_CTRL_INSERT_VLAN;
+#ifndef NO_SKB_VLAN_PROTO
+			}
+#endif
+			tx_ring->tx_stats.vlan_add++;
+			/* else if it is a SW VLAN check the next
+			 * protocol and store the tag
+			 */
+			/* veb only use ctags */
+		} else if (protocol == htons(ETH_P_8021Q)) {
+			struct vlan_hdr *vhdr, _vhdr;
+
+			vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr),
+						  &_vhdr);
+			if (!vhdr)
+				goto out_drop;
+
+			protocol = vhdr->h_vlan_encapsulated_proto;
+			tx_flags |= ntohs(vhdr->h_vlan_TCI);
+			tx_flags |= RNP_TXD_VLAN_VALID;
+			ignore_vlan = 1;
+		}
+	}
+	protocol = vlan_get_protocol(skb);
+#ifdef SKB_SHARED_TX_IS_UNION
+	if (unlikely(skb_tx(skb)->hardware) &&
+	    adapter->flags2 & RNP_FLAG2_PTP_ENABLED && adapter->ptp_tx_en) {
+		if (!test_and_set_bit_lock(__RNP_PTP_TX_IN_PROGRESS,
+					   &adapter->state)) {
+			skb_tx(skb)->in_progress = 1;
+
+#else
+	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+	    adapter->flags2 & RNP_FLAG2_PTP_ENABLED && adapter->ptp_tx_en) {
+		if (!test_and_set_bit_lock(__RNP_PTP_TX_IN_PROGRESS,
+					   &adapter->state)) {
+			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+#endif
+			tx_flags |= RNP_TXD_FLAG_PTP;
+			adapter->ptp_tx_skb = skb_get(skb);
+			adapter->tx_hwtstamp_start = jiffies;
+			schedule_work(&adapter->tx_hwtstamp_work);
+		} else {
+			netdev_dbg(tx_ring->netdev, "ptp_tx_skb miss\n");
+		}
+	}
+	/* record initial flags and protocol */
+	tso = rnpgbe_tso(tx_ring, first, &mac_ip_len, &hdr_len, &tx_flags);
+	if (tso < 0)
+		goto out_drop;
+	else if (!tso)
+		rnpgbe_tx_csum(tx_ring, first, &mac_ip_len, &tx_flags);
+	/* check sriov mode */
+	/* in this mode pf send msg should with vf_num */
+	if (unlikely(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) {
+		first->ctx_flag = true;
+		first->mss_len_vf_num |= (adapter->vf_num_for_pf << 16);
+	}
+
+	/* add control desc */
+	rnpgbe_maybe_tx_ctxtdesc(tx_ring, first, ignore_vlan);
+
+	if (rnpgbe_tx_map(tx_ring, first, mac_ip_len, tx_flags))
+		goto cleanup_tx_tstamp;
+#ifndef HAVE_TRANS_START_IN_QUEUE
+	tx_ring->netdev->trans_start = jiffies;
+#endif
+	/* need this */
+	rnpgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+	tx_dbg("=== end ====\n\n\n\n");
+	return NETDEV_TX_OK;
+
+out_drop:
+	dev_kfree_skb_any(first->skb);
+	first->skb = NULL;
+cleanup_tx_tstamp:
+	if (unlikely(tx_flags & RNP_TXD_FLAG_PTP)) {
+		dev_kfree_skb_any(adapter->ptp_tx_skb);
+		adapter->ptp_tx_skb = NULL;
+		cancel_work_sync(&adapter->tx_hwtstamp_work);
+		clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state);
+	}
+
+	return NETDEV_TX_OK;
+}
+
+static bool check_sctp_no_padding(struct sk_buff *skb)
+{
+	bool no_padding = false;
+	u8 l4_proto = 0;
+	u8 *exthdr;
+	__be16 frag_off;
+	union {
+		struct iphdr *v4;
+		struct ipv6hdr *v6;
+		unsigned char *hdr;
+	} ip;
+	union {
+		struct tcphdr *tcp;
+		struct udphdr *udp;
+		unsigned char *hdr;
+	} l4;
+
+	ip.hdr = skb_network_header(skb);
+	l4.hdr = skb_transport_header(skb);
+
+	if (ip.v4->version == 4) {
+		l4_proto = ip.v4->protocol;
+	} else {
+		exthdr = ip.hdr + sizeof(*ip.v6);
+		l4_proto = ip.v6->nexthdr;
+		if (l4.hdr != exthdr)
+			ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
+					 &frag_off);
+	}
+	switch (l4_proto) {
+	case IPPROTO_SCTP:
+		no_padding = true;
+		break;
+	default:
+
+		break;
+	}
+
+	return no_padding;
+}
+
+static netdev_tx_t rnpgbe_xmit_frame(struct sk_buff *skb,
+				     struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_ring *tx_ring;
+	bool tx_padding = false;
+
+	if (!netif_carrier_ok(netdev)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	/*
+	 * The minimum packet size for olinfo paylen is 17 so pad the skb
+	 * in order to meet this minimum size requirement.
+	 */
+	if ((adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) &&
+	    (!(adapter->priv_flags & RNP_PRIV_FLAG_SOFT_TX_PADDING))) {
+		if (skb->len < 60) {
+			if (!check_sctp_no_padding(skb)) {
+				if (skb_put_padto(skb, 60))
+					return NETDEV_TX_OK;
+
+			} else {
+				tx_padding = true;
+			}
+		}
+	} else {
+		if (skb->len < 33) {
+			if (skb_padto(skb, 33))
+				return NETDEV_TX_OK;
+			skb->len = 33;
+		}
+	}
+	tx_ring = adapter->tx_ring[skb->queue_mapping];
+
+	return rnpgbe_xmit_frame_ring(skb, adapter, tx_ring, tx_padding);
+}
+
+/**
+ * rnpgbe_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int rnpgbe_set_mac(struct net_device *netdev, void *p)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct sockaddr *addr = p;
+	const u8 target_addr[ETH_ALEN];
+	bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED);
+
+	dbg("[%s] call set mac\n", netdev->name);
+
+	memcpy((void *)target_addr, addr->sa_data, netdev->addr_len);
+
+	if (!is_valid_ether_addr(target_addr))
+		return -EADDRNOTAVAIL;
+
+	eth_hw_addr_set(netdev, target_addr);
+	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+	hw->ops.set_mac(hw, hw->mac.addr, sriov_flag);
+	/* reset veb table */
+	rnpgbe_configure_virtualization(adapter);
+	return 0;
+}
+
+static int rnpgbe_mdio_read(struct net_device *netdev, int prtad, int devad,
+			    u32 addr, u32 *phy_value)
+{
+	int rc = -EIO;
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u16 value;
+
+	rc = hw->ops.phy_read_reg(hw, addr, 0, &value);
+	*phy_value = value;
+
+	return rc;
+}
+
+static int rnpgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
+			     u16 addr, u16 value)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	return hw->ops.phy_write_reg(hw, addr, 0, value);
+}
+
+static int rnpgbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+			    int cmd)
+{
+	struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&ifr->ifr_data;
+	int prtad, devad, ret;
+	u32 phy_value;
+
+	prtad = (mii->phy_id & MDIO_PHY_ID_PRTAD) >> 5;
+	devad = (mii->phy_id & MDIO_PHY_ID_DEVAD);
+
+	if (cmd == SIOCGMIIREG) {
+		ret = rnpgbe_mdio_read(netdev, prtad, devad, mii->reg_num,
+				       &phy_value);
+		if (ret < 0)
+			return ret;
+		mii->val_out = phy_value;
+		return 0;
+	} else {
+		return rnpgbe_mdio_write(netdev, prtad, devad, mii->reg_num,
+					 mii->val_in);
+	}
+}
+
+static int rnpgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+#ifdef HAVE_PTP_1588_CLOCK
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+#endif
+	/* ptp 1588 used this */
+	switch (cmd) {
+#ifdef HAVE_PTP_1588_CLOCK
+#ifdef SIOCGHWTSTAMP
+	case SIOCGHWTSTAMP:
+		/* if null, return here */
+		if (!adapter->hwts_ops)
+			return -EINVAL;
+		if (module_enable_ptp)
+			return rnpgbe_ptp_get_ts_config(adapter, req);
+		break;
+#endif
+	case SIOCSHWTSTAMP:
+		/* if null, return here */
+		if (!adapter->hwts_ops)
+			return -EINVAL;
+		if (module_enable_ptp)
+			return rnpgbe_ptp_set_ts_config(adapter, req);
+		break;
+#endif
+	case SIOCGMIIPHY:
+		return 0;
+	case SIOCGMIIREG:
+		/*fall through */
+	case SIOCSMIIREG:
+		return rnpgbe_mii_ioctl(netdev, req, cmd);
+	}
+	return -EINVAL;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void rnpgbe_netpoll(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int i;
+
+	/* if interface is down do nothing */
+	if (test_bit(__RNP_DOWN, &adapter->state))
+		return;
+
+	adapter->flags |= RNP_FLAG_IN_NETPOLL;
+	for (i = 0; i < adapter->num_q_vectors; i++)
+		rnpgbe_msix_clean_rings(0, adapter->q_vector[i]);
+	adapter->flags &= ~RNP_FLAG_IN_NETPOLL;
+}
+
+#endif
+
+#ifdef HAVE_NDO_GET_STATS64
+#ifdef HAVE_VOID_NDO_GET_STATS64
+static void rnpgbe_get_stats64(struct net_device *netdev,
+			       struct rtnl_link_stats64 *stats)
+#else
+static struct rtnl_link_stats64 *
+rnpgbe_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+#endif
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int i;
+
+	rcu_read_lock();
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct rnpgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
+		u64 bytes, packets;
+		unsigned int start;
+
+		if (ring) {
+			do {
+				start = u64_stats_fetch_begin(&ring->syncp);
+				packets = ring->stats.packets;
+				bytes = ring->stats.bytes;
+			} while (u64_stats_fetch_retry(&ring->syncp, start));
+			stats->rx_packets += packets;
+			stats->rx_bytes += bytes;
+		}
+	}
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct rnpgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
+		u64 bytes, packets;
+		unsigned int start;
+
+		if (ring) {
+			do {
+				start = u64_stats_fetch_begin(&ring->syncp);
+				packets = ring->stats.packets;
+				bytes = ring->stats.bytes;
+			} while (u64_stats_fetch_retry(&ring->syncp, start));
+			stats->tx_packets += packets;
+			stats->tx_bytes += bytes;
+		}
+	}
+	rcu_read_unlock();
+	/* following stats updated by rnpgbe_watchdog_task() */
+	stats->multicast = netdev->stats.multicast;
+	stats->rx_errors = netdev->stats.rx_errors;
+	stats->rx_length_errors = netdev->stats.rx_length_errors;
+	stats->rx_crc_errors = netdev->stats.rx_crc_errors;
+	stats->rx_missed_errors = netdev->stats.rx_missed_errors;
+
+#ifndef HAVE_VOID_NDO_GET_STATS64
+	return stats;
+#endif
+}
+#else
+/**
+ * rnpgbe_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *rnpgbe_get_stats(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	/* update the stats data */
+	rnpgbe_update_stats(adapter);
+
+#ifdef HAVE_NETDEV_STATS_IN_NETDEV
+	/* only return the current stats */
+	return &netdev->stats;
+#else
+	/* only return the current stats */
+	return &adapter->net_stats;
+#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
+}
+
+#endif
+
+/**
+ * rnpgbe_setup_tc - configure net_device for multiple traffic classes
+ *
+ * @netdev: net device to configure
+ * @tc: number of traffic classes to enable
+ */
+int rnpgbe_setup_tc(struct net_device *dev, u8 tc)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int ret = 0;
+
+	if (tc)
+		return -EINVAL;
+
+	/* if now we are in force mode, never need force, if not force it */
+	if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) {
+		hw->ops.set_mac_rx(hw, false);
+		if (hw->ops.driver_status)
+			hw->ops.driver_status(hw, true,
+					      rnpgbe_driver_force_control_phy);
+	}
+
+	/* Hardware supports up to 8 traffic classes */
+	if ((tc > RNP_MAX_TCS_NUM) || (tc == 1))
+		return -EINVAL;
+	/* we canot support tc with sriov mode */
+	if ((tc) && (adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return -EINVAL;
+
+	/* Hardware has to reinitialize queues and interrupts to
+	 * match packet buffer alignment. Unfortunately, the
+	 * hardware is not flexible enough to do this dynamically.
+	 */
+	while (test_and_set_bit(__RNP_RESETTING, &adapter->state))
+		usleep_range(1000, 2000);
+
+	if (netif_running(dev))
+		rnpgbe_close(dev);
+
+	rnpgbe_fdir_filter_exit(adapter);
+	adapter->priv_flags &= (~RNP_PRIV_FLAG_TCP_SYNC);
+	remove_mbx_irq(adapter);
+	rnpgbe_clear_interrupt_scheme(adapter);
+	adapter->num_tc = tc;
+
+	if (tc) {
+		netdev_set_num_tc(dev, tc);
+		adapter->flags |= RNP_FLAG_DCB_ENABLED;
+
+	} else {
+		netdev_reset_tc(dev);
+
+		adapter->flags &= ~RNP_FLAG_DCB_ENABLED;
+	}
+
+	rnpgbe_init_interrupt_scheme(adapter);
+
+	register_mbx_irq(adapter);
+	/* rss table must reset */
+	adapter->rss_tbl_setup_flag = 0;
+
+	if (netif_running(dev))
+		ret = rnpgbe_open(dev);
+	if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) {
+		if (hw->ops.driver_status)
+			hw->ops.driver_status(hw, false,
+					      rnpgbe_driver_force_control_phy);
+	}
+
+	clear_bit(__RNP_RESETTING, &adapter->state);
+	return ret;
+}
+
+#ifdef CONFIG_PCI_IOV
+void rnpgbe_sriov_reinit(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	rtnl_lock();
+	rnpgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
+	rtnl_unlock();
+	usleep_range(10000, 20000);
+}
+#endif
+
+static void rnpgbe_do_reset(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (netif_running(netdev))
+		rnpgbe_reinit_locked(adapter);
+	else
+		rnpgbe_reset(adapter);
+}
+
+#ifdef HAVE_NDO_SET_FEATURES
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+static u32 rnpgbe_fix_features(struct net_device *netdev, u32 features)
+#else
+static netdev_features_t rnpgbe_fix_features(struct net_device *netdev,
+					     netdev_features_t features)
+#endif
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) ||                                    \
+	defined(NETIF_F_HW_VLAN_STAG_FILTER) ||                                \
+	defined(NETIF_F_HW_VLAN_CTAG_RX) ||                                    \
+	defined(NETIF_F_HW_VLAN_STAG_RX) ||                                    \
+	defined(NETIF_F_HW_VLAN_STAG_TX) || defined(NETIF_F_HW_VLAN_CTAG_Ta)
+	struct rnpgbe_hw *hw = &adapter->hw;
+#endif
+
+	/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
+	if (!(features & NETIF_F_RXCSUM))
+		features &= ~NETIF_F_LRO;
+
+	/* Turn off LRO if not RSC capable */
+	if (!(adapter->flags2 & RNP_FLAG2_RSC_CAPABLE))
+		features &= ~NETIF_F_LRO;
+
+#ifdef NETIF_F_HW_VLAN_CTAG_FILTER
+
+	if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
+#ifdef NETIF_F_HW_VLAN_STAG_FILTER
+		if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER)
+			features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
+#endif
+	}
+
+#endif
+
+#ifdef NETIF_F_HW_VLAN_STAG_FILTER
+	if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER) {
+		if (!(features & NETIF_F_HW_VLAN_STAG_FILTER)) {
+#ifdef NETIF_F_HW_VLAN_CTAG_FILTER
+			features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+#endif
+		}
+	}
+#endif
+
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+	if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) {
+#ifdef NETIF_F_HW_VLAN_STAG_RX
+		if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD)
+			features &= ~NETIF_F_HW_VLAN_STAG_RX;
+#endif
+	}
+#endif
+
+#ifdef NETIF_F_HW_VLAN_STAG_RX
+
+	if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) {
+		if (!(features & NETIF_F_HW_VLAN_STAG_RX)) {
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+			features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+#endif
+		}
+	}
+#endif
+
+#ifdef NETIF_F_HW_VLAN_CTAG_TX
+
+	if (!(features & NETIF_F_HW_VLAN_CTAG_TX)) {
+#ifdef NETIF_F_HW_VLAN_STAG_RX
+		if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD)
+			features &= ~NETIF_F_HW_VLAN_STAG_TX;
+#endif
+	}
+#endif
+
+#ifdef NETIF_F_HW_VLAN_STAG_TX
+
+	if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) {
+		if (!(features & NETIF_F_HW_VLAN_STAG_TX)) {
+#ifdef NETIF_F_HW_VLAN_CTAG_TX
+			features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+#endif
+		}
+	}
+#endif
+
+	return features;
+}
+
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+static int rnpgbe_set_features(struct net_device *netdev, u32 features)
+#else
+static int rnpgbe_set_features(struct net_device *netdev,
+			       netdev_features_t features)
+#endif
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	netdev_features_t changed = netdev->features ^ features;
+	bool need_reset = false;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	netdev->features = features;
+
+	/* if changed ntuple should close all */
+	if (changed & NETIF_F_NTUPLE) {
+		if (!(features & NETIF_F_NTUPLE))
+			rnpgbe_fdir_filter_exit(adapter);
+	}
+
+	switch (features & NETIF_F_NTUPLE) {
+	case NETIF_F_NTUPLE:
+		/* turn off ATR, enable perfect filters and reset */
+		if (!(adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE))
+			need_reset = true;
+
+		adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE;
+		adapter->flags |= RNP_FLAG_FDIR_PERFECT_CAPABLE;
+		break;
+	default:
+		/* turn off perfect filters, enable ATR and reset */
+		if (adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE)
+			need_reset = true;
+
+		adapter->flags &= ~RNP_FLAG_FDIR_PERFECT_CAPABLE;
+
+		/* We cannot enable ATR if SR-IOV is enabled */
+		if (adapter->flags & RNP_FLAG_SRIOV_ENABLED)
+			break;
+
+		/* We cannot enable ATR if we have 2 or more traffic classes */
+		if (netdev_get_num_tc(netdev) > 1)
+			break;
+
+		/* A sample rate of 0 indicates ATR disabled */
+		if (!adapter->atr_sample_rate)
+			break;
+
+		adapter->flags |= RNP_FLAG_FDIR_HASH_CAPABLE;
+		break;
+	}
+
+#ifdef NETIF_F_HW_VLAN_CTAG_FILTER
+	/* vlan filter changed */
+	if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+		if (features & (NETIF_F_HW_VLAN_CTAG_FILTER)) {
+			/* not open if in promise mode */
+			if (!(netdev->flags & IFF_PROMISC))
+				hw->ops.set_vlan_filter_en(hw, true);
+		} else {
+			hw->ops.set_vlan_filter_en(hw, false);
+		}
+		rnpgbe_msg_post_status(adapter, PF_VLAN_FILTER_STATUS);
+	}
+#endif /* NETIF_F_HW_VLAN_CTAG_FILTER */
+
+	/* rss hash changed */
+	if (changed & (NETIF_F_RXHASH)) {
+		bool iov_en = (adapter->flags & RNP_FLAG_SRIOV_ENABLED) ? true :
+									  false;
+
+		if (netdev->features & (NETIF_F_RXHASH))
+			hw->ops.set_rx_hash(hw, true, iov_en);
+		else
+			hw->ops.set_rx_hash(hw, false, iov_en);
+	}
+
+	/* rx fcs changed */
+	/* in this mode rx l4/sctp checksum will get error */
+	if (changed & NETIF_F_RXFCS) {
+
+		if (features & NETIF_F_RXFCS) {
+			adapter->priv_flags |= RNP_PRIV_FLAG_RX_FCS;
+			hw->ops.set_fcs_mode(hw, true);
+		} else {
+			adapter->priv_flags &= (~RNP_PRIV_FLAG_RX_FCS);
+			hw->ops.set_fcs_mode(hw, false);
+		}
+		rnpgbe_msg_post_status(adapter, PF_FCS_STATUS);
+	}
+
+	if (changed & NETIF_F_RXALL)
+		need_reset = true;
+
+	if (features & NETIF_F_RXALL)
+		adapter->priv_flags |= RNP_PRIV_FLAG_RX_ALL;
+	else
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_RX_ALL);
+
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		rnpgbe_vlan_strip_enable(adapter);
+	else
+		rnpgbe_vlan_strip_disable(adapter);
+#endif
+
+	if (need_reset)
+		rnpgbe_do_reset(netdev);
+
+	return 0;
+}
+#endif /* HAVE_NDO_SET_FEATURES */
+
+#ifdef HAVE_BRIDGE_ATTRIBS
+#ifdef HAVE_NDO_BRIDGE_SETLINK_EXTACK
+static int
+rnpgbe_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+			  __always_unused u16 flags,
+			  struct netlink_ext_ack __always_unused *ext)
+#elif defined(HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS)
+static int rnpgbe_ndo_bridge_setlink(struct net_device *dev,
+				     struct nlmsghdr *nlh,
+				     __always_unused u16 flags)
+#else
+static int rnpgbe_ndo_bridge_setlink(struct net_device *dev,
+				     struct nlmsghdr *nlh)
+#endif /* HAVE_NDO_BRIDGE_SETLINK_EXTACK */
+
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct nlattr *attr, *br_spec;
+	int rem;
+
+	if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return -EOPNOTSUPP;
+
+	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+	nla_for_each_nested(attr, br_spec, rem) {
+		__u16 mode;
+
+		if (nla_type(attr) != IFLA_BRIDGE_MODE)
+			continue;
+
+		mode = nla_get_u16(attr);
+		if (mode == BRIDGE_MODE_VEPA) {
+			adapter->flags2 &= ~RNP_FLAG2_BRIDGE_MODE_VEB;
+			wr32(hw, RNP_DMA_CONFIG,
+			     rd32(hw, RNP_DMA_CONFIG) | DMA_VEB_BYPASS);
+		} else if (mode == BRIDGE_MODE_VEB) {
+			adapter->flags2 |= RNP_FLAG2_BRIDGE_MODE_VEB;
+			wr32(hw, RNP_DMA_CONFIG,
+			     rd32(hw, RNP_DMA_CONFIG) & (~DMA_VEB_BYPASS));
+
+		} else
+			return -EINVAL;
+
+		e_info(drv, "enabling bridge mode: %s\n",
+		       mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+	}
+
+	return 0;
+}
+
+#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
+static int rnpgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+				     struct net_device *dev,
+				     u32 __maybe_unused filter_mask,
+				     int nlflags)
+#elif defined(HAVE_BRIDGE_FILTER)
+static int rnpgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+				     struct net_device *dev,
+				     u32 __always_unused filter_mask)
+#else
+static int rnpgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+				     struct net_device *dev)
+#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+	u16 mode;
+
+	if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return 0;
+
+	if (adapter->flags2 & RNP_FLAG2_BRIDGE_MODE_VEB)
+		mode = BRIDGE_MODE_VEB;
+	else
+		mode = BRIDGE_MODE_VEPA;
+
+#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
+	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags,
+				       filter_mask, NULL);
+#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS)
+	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags);
+#elif defined(HAVE_NDO_FDB_ADD_VID) ||                                         \
+	defined NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS
+	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0);
+#else
+	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
+#endif /* HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT */
+}
+
+#endif /* HAVE_BRIDGE_ATTRIBS */
+
+#ifdef HAVE_NDO_FEATURES_CHECK
+#define RNP_MAX_TUNNEL_HDR_LEN 80
+#ifdef NETIF_F_GSO_PARTIAL
+#define RNP_MAX_MAC_HDR_LEN 127
+#define RNP_MAX_NETWORK_HDR_LEN 511
+
+static netdev_features_t rnpgbe_features_check(struct sk_buff *skb,
+					       struct net_device *dev,
+					       netdev_features_t features)
+{
+	unsigned int network_hdr_len, mac_hdr_len;
+
+	/* Make certain the headers can be described by a context descriptor */
+	mac_hdr_len = skb_network_header(skb) - skb->data;
+	if (unlikely(mac_hdr_len > RNP_MAX_MAC_HDR_LEN))
+		return features &
+		       ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC |
+			 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_TSO | NETIF_F_TSO6);
+
+	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+	if (unlikely(network_hdr_len > RNP_MAX_NETWORK_HDR_LEN))
+		return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC |
+				    NETIF_F_TSO | NETIF_F_TSO6);
+
+	/* We can only support IPV4 TSO in tunnels if we can mangle the
+	 * inner IP ID field, so strip TSO if MANGLEID is not supported.
+	 */
+	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+		features &= ~NETIF_F_TSO;
+
+	return features;
+}
+#else
+static netdev_features_t rnpgbe_features_check(struct sk_buff *skb,
+					       struct net_device *dev,
+					       netdev_features_t features)
+{
+	if (!skb->encapsulation)
+		return features;
+
+	if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
+		     RNP_MAX_TUNNEL_HDR_LEN))
+		return features & ~NETIF_F_CSUM_MASK;
+
+	return features;
+}
+
+#endif /* NETIF_F_GSO_PARTIAL */
+#endif /* HAVE_NDO_FEATURES_CHECK */
+
+#ifdef HAVE_NET_DEVICE_OPS
+const struct net_device_ops rnpgbe_netdev_ops = {
+	.ndo_open = rnpgbe_open,
+	.ndo_stop = rnpgbe_close,
+	.ndo_start_xmit = rnpgbe_xmit_frame,
+	.ndo_set_rx_mode = rnpgbe_set_rx_mode,
+	.ndo_validate_addr = eth_validate_addr,
+
+#ifdef HAVE_NDO_ETH_IOCTL
+	.ndo_eth_ioctl = rnpgbe_ioctl,
+#else
+	.ndo_do_ioctl = rnpgbe_ioctl,
+#endif /* HAVE_NDO_ETH_IOCTL */
+
+#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT
+	/* RHEL7 requires this to be defined to enable extended ops.
+	 * RHEL7 uses the function get_ndo_ext to retrieve offsets for
+	 * extended fields from with the net_device_ops struct and
+	 * ndo_size is checked to determine whether or not
+	 * the offset is valid.
+	 */
+	.ndo_size = sizeof(const struct net_device_ops),
+#endif
+#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU
+	.extended.ndo_change_mtu = rnpgbe_change_mtu,
+#else
+	.ndo_change_mtu = rnpgbe_change_mtu,
+#endif
+#ifdef HAVE_NDO_GET_STATS64
+	.ndo_get_stats64 = rnpgbe_get_stats64,
+#else
+	.ndo_get_stats = rnpgbe_get_stats,
+#endif
+	.ndo_tx_timeout = rnpgbe_tx_timeout,
+#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_TX_MAXRATE
+	.extended.ndo_set_tx_maxrate = rnpgbe_tx_maxrate,
+#else
+#ifndef NO_TX_MAXRATE
+	.ndo_set_tx_maxrate = rnpgbe_tx_maxrate,
+#endif
+#endif
+	.ndo_set_mac_address = rnpgbe_set_mac,
+#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX)
+	.ndo_vlan_rx_add_vid = rnpgbe_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid = rnpgbe_vlan_rx_kill_vid,
+#endif
+
+#ifdef IFLA_VF_MAX
+	.ndo_set_vf_mac = rnpgbe_ndo_set_vf_mac,
+#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN
+	.extended.ndo_set_vf_vlan = rnpgbe_ndo_set_vf_vlan,
+#else
+	.ndo_set_vf_vlan = rnpgbe_ndo_set_vf_vlan,
+#endif
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+	.ndo_set_vf_rate = rnpgbe_ndo_set_vf_bw,
+#else
+	.ndo_set_vf_tx_rate = rnpgbe_ndo_set_vf_bw,
+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
+#if defined(HAVE_VF_SPOOFCHK_CONFIGURE) && IS_ENABLED(CONFIG_PCI_IOV)
+	.ndo_set_vf_spoofchk = rnpgbe_ndo_set_vf_spoofchk,
+#endif
+
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+	.ndo_set_vf_link_state = rnpgbe_ndo_set_vf_link_state,
+#endif
+#ifdef HAVE_NDO_SET_VF_TRUST
+#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT
+	.extended.ndo_set_vf_trust = rnpgbe_ndo_set_vf_trust,
+#else
+	.ndo_set_vf_trust = rnpgbe_ndo_set_vf_trust,
+#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */
+#endif /* HAVE_NDO_SET_VF_TRUST */
+	.ndo_get_vf_config = rnpgbe_ndo_get_vf_config,
+#endif /* IFLA_VF_MAX */
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller = rnpgbe_netpoll,
+#endif
+#ifdef HAVE_FDB_OPS
+//.ndo_fdb_add		= rnpgbe_ndo_fdb_add,
+#endif
+#ifdef HAVE_BRIDGE_ATTRIBS
+	.ndo_bridge_setlink = rnpgbe_ndo_bridge_setlink,
+	.ndo_bridge_getlink = rnpgbe_ndo_bridge_getlink,
+#endif
+#ifdef HAVE_NDO_FEATURES_CHECK
+	.ndo_features_check = rnpgbe_features_check,
+#endif /* HAVE_NDO_FEATURES_CHECK */
+
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+};
+
+/* RHEL6 keeps these operations in a separate structure */
+static const struct net_device_ops_ext rnpgbe_netdev_ops_ext = {
+	.size = sizeof(struct net_device_ops_ext),
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+#ifdef HAVE_NDO_SET_FEATURES
+	.ndo_set_features = rnpgbe_set_features,
+	.ndo_fix_features = rnpgbe_fix_features,
+#endif /* HAVE_NDO_SET_FEATURES */
+};
+#endif /* HAVE_NET_DEVICE_OPS */
+
+static void rnpgbe_assign_netdev_ops(struct net_device *dev)
+{
+	/* different hw can assign difference fun */
+#ifdef HAVE_NET_DEVICE_OPS
+	dev->netdev_ops = &rnpgbe_netdev_ops;
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+	set_netdev_ops_ext(dev, &rnpgbe_netdev_ops_ext);
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+#else /* HAVE_NET_DEVICE_OPS */
+	dev->open = &rnpgbe_open;
+	dev->stop = &rnpgbe_close;
+	dev->hard_start_xmit = &rnpgbe_xmit_frame;
+#ifdef HAVE_SET_RX_MODE
+	dev->set_rx_mode = &rnpgbe_set_rx_mode;
+#endif
+	dev->set_multicast_list = &rnpgbe_set_rx_mode;
+	dev->set_mac_address = &rnpgbe_set_mac;
+	dev->change_mtu = &rnpgbe_change_mtu;
+	dev->do_ioctl = &rnpgbe_ioctl;
+#ifdef HAVE_TX_TIMEOUT
+	dev->tx_timeout = &rnpgbe_tx_timeout;
+#endif
+#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX)
+	dev->vlan_rx_add_vid = &rnpgbe_vlan_rx_add_vid;
+	dev->vlan_rx_kill_vid = &rnpgbe_vlan_rx_kill_vid;
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	dev->poll_controller = &rnpgbe_netpoll;
+#endif
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+	dev->select_queue = &__netdev_pick_tx;
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+#endif /* HAVE_NET_DEVICE_OPS */
+	rnpgbe_set_ethtool_ops(dev);
+	dev->watchdog_timeo = 5 * HZ;
+}
+
+/**
+ * rnpgbe_wol_supported - Check whether device supports WoL
+ * @hw: hw specific details
+ * @device_id: the device ID
+ *
+ * This function is used by probe and ethtool to determine
+ * which devices have WoL support
+ *
+ **/
+int rnpgbe_wol_supported(struct rnpgbe_adapter *adapter, u16 device_id)
+{
+	int is_wol_supported = 0;
+
+	switch (device_id) {
+	case PCI_DEVICE_ID_N210:
+	case PCI_DEVICE_ID_N500_QUAD_PORT:
+	case PCI_DEVICE_ID_N500_DUAL_PORT:
+		is_wol_supported = 1;
+		break;
+	default:
+		is_wol_supported = 0;
+		break;
+	}
+
+	return is_wol_supported;
+}
+
+static inline unsigned long rnpgbe_tso_features(struct rnpgbe_hw *hw)
+{
+	unsigned long features = 0;
+
+#ifdef NETIF_F_TSO
+	if (hw->feature_flags & RNP_NET_FEATURE_TSO)
+		features |= NETIF_F_TSO;
+#endif /* NETIF_F_TSO */
+#ifdef NETIF_F_TSO6
+	if (hw->feature_flags & RNP_NET_FEATURE_TSO)
+		features |= NETIF_F_TSO6;
+#endif /* NETIF_F_TSO6 */
+#ifdef NETIF_F_GSO_PARTIAL
+	features |= NETIF_F_GSO_PARTIAL;
+	if (hw->feature_flags & RNP_NET_FEATURE_TX_UDP_TUNNEL)
+		features |= RNP_GSO_PARTIAL_FEATURES;
+#endif
+
+	return features;
+}
+
+static void remove_mbx_irq(struct rnpgbe_adapter *adapter)
+{
+	/* mbx */
+	if (adapter->num_other_vectors) {
+		/* only msix use indepented intr */
+		if (adapter->flags & RNP_FLAG_MSIX_ENABLED) {
+			adapter->hw.mbx.ops.configure(
+				&adapter->hw, adapter->msix_entries[0].entry,
+				false);
+			if (adapter->hw.mbx.other_irq_enabled) {
+				free_irq(adapter->msix_entries[0].vector, adapter);
+				adapter->hw.mbx.other_irq_enabled = false;
+			}
+		}
+	}
+}
+
+static int register_mbx_irq(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	int err = 0;
+
+	/* for mbx:vector0 */
+	if (adapter->num_other_vectors) {
+		/* only do this in msix mode */
+		if (adapter->flags & RNP_FLAG_MSIX_ENABLED) {
+			err = request_irq(adapter->msix_entries[0].vector,
+					  rnpgbe_msix_other, 0, netdev->name,
+					  adapter);
+			if (err) {
+				e_err(probe,
+				      "request_irq for msix_other failed: %d\n",
+				      err);
+				goto err_mbx;
+			}
+			hw->mbx.ops.configure(
+				hw, adapter->msix_entries[0].entry, true);
+			adapter->hw.mbx.other_irq_enabled = true;
+		}
+	}
+
+err_mbx:
+	return err;
+}
+
+static int rnpgbe_rm_adpater(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *netdev;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	netdev = adapter->netdev;
+
+	/* if not register, just return */
+	if (adapter->flags2 & RNP_FLAG2_NO_NET_REG) {
+		free_netdev(netdev);
+		return 0;
+	}
+
+	pr_info("= remove adapter:%s =\n", netdev->name);
+
+	rnpgbe_dbg_adapter_exit(adapter);
+
+	netif_carrier_off(netdev);
+
+	set_bit(__RNP_DOWN, &adapter->state);
+	set_bit(__RNP_REMOVE, &adapter->state);
+#ifdef HAVE_PTP_1588_CLOCK
+	if (module_enable_ptp) {
+		while (test_bit(__RNP_PTP_TX_IN_PROGRESS, &adapter->state))
+			usleep_range(10000, 20000);
+		cancel_work_sync(&adapter->tx_hwtstamp_work);
+	}
+#endif
+	if (adapter->eee_active) {
+		adapter->eee_active = 0;
+		rnpgbe_eee_init(adapter);
+	}
+	cancel_work_sync(&adapter->service_task);
+
+	del_timer_sync(&adapter->service_timer);
+#ifdef CONFIG_RNP_DCA
+	if (adapter->flags & RNP_FLAG_DCA_ENABLED) {
+		adapter->flags &= ~RNP_FLAG_DCA_ENABLED;
+		dca_remove_requester(&pdev->dev);
+		wr32(&adapter->hw + RNP_DCA_CTRL, 1);
+	}
+#endif
+	rnpgbe_sysfs_exit(adapter);
+
+	rnpgbe_fdir_filter_exit(adapter);
+	adapter->priv_flags &= (~RNP_PRIV_FLAG_TCP_SYNC);
+
+	if (netdev->reg_state == NETREG_REGISTERED)
+		unregister_netdev(netdev);
+
+	adapter->netdev = NULL;
+
+	if (hw->ops.driver_status)
+		hw->ops.driver_status(hw, false, rnpgbe_driver_insmod);
+
+	remove_mbx_irq(adapter);
+
+	rnpgbe_clear_interrupt_scheme(adapter);
+
+	if (adapter->io_addr)
+		iounmap(adapter->io_addr);
+
+	if (adapter->io_addr_bar0)
+		iounmap(adapter->io_addr_bar0);
+
+	free_netdev(netdev);
+
+	pr_info("remove complete\n");
+
+	return 0;
+}
+
+#ifndef NO_CM3_MBX
+static int rnpgbe_check_fw_from_flash(struct rnpgbe_hw *hw, const u8 *data)
+{
+	u32 device_id;
+	int ret = 0;
+	u32 chip_data;
+
+	if (*((u32 *)(data)) != 0xa55aa55a)
+		return -EINVAL;
+
+	device_id = *((u16 *)data + 30); 
+
+	/* if no device_id no check */
+	if ((device_id == 0) || (device_id == 0xffff))
+		return 0;
+
+#define CHIP_OFFSET (0x1f014 + 0x1000)
+	/* we should get hw_type from sfc-flash */
+	chip_data = ioread32(hw->hw_addr + CHIP_OFFSET);
+	if (chip_data == 0x11111111)
+		hw->hw_type = rnpgbe_hw_n210;
+	else if (chip_data == 0x0)
+		hw->hw_type = rnpgbe_hw_n210L;
+	else
+		return 0;
+
+	switch (hw->hw_type) {
+	case rnpgbe_hw_n210:
+		if (device_id != 0x8208)
+			ret = 1;
+	break;
+	case rnpgbe_hw_n210L:
+		if (device_id != 0x820a)
+			ret = 1;
+	break;
+	default:
+		ret = 1;
+	}
+
+	return ret;
+}
+
+static int rnpgbe_init_firmware(struct rnpgbe_hw *hw, const u8 *data,
+				int file_size)
+{
+	struct device *dev = &(hw->pdev->dev);
+	loff_t old_pos = 0;
+	loff_t pos = 0;
+	loff_t end_pos = file_size;
+	u32 rd_len = 0x1000;
+	int get_len = 0;
+	u32 iter = 0;
+	int err = 0;
+	u32 fw_off = 0;
+	u32 old_data = 0;
+	u32 new_data = 0;
+	char *buf = kzalloc(0x1000, GFP_KERNEL);
+
+	dev_info(dev, "initializing firmware, which will take some time.");
+	/* capy bin to bar */
+	while (pos < end_pos) {
+		if ((pos >= 0x1f000) && (pos < 0x20000)) {
+			pos += rd_len;
+			continue;
+		}
+
+		old_pos = pos;
+		if (end_pos - pos < rd_len) {
+			get_len = end_pos - pos;
+		} else {
+			get_len = rd_len;
+		}
+			
+		memcpy(buf, data + pos, get_len);
+
+		if (((get_len < rd_len) && ((old_pos + get_len) != end_pos)) ||
+		    (get_len < 0)) {
+			dev_err(dev, "read err, pos 0x%x, get len %d",
+				(u32)old_pos, get_len);
+			err = -EIO;
+			return err;
+		}
+
+		for (iter = 0; iter < get_len; iter += 4) {
+			old_data = *((u32 *)(buf + iter));
+			fw_off = (u32)old_pos + iter + 0x1000;
+			iowrite32(old_data, (hw->hw_addr + fw_off));
+		}
+
+		if (pos == old_pos) {
+			pos += get_len;
+		}
+	}
+
+	dev_info(dev, "Checking for firmware. Wait a moment, please.");
+	/* check */
+	pos = 0x0;
+	while (pos < end_pos) {
+		if ((pos >= 0x1f000) && (pos < 0x20000)) {
+			pos += rd_len;
+			continue;
+		}
+
+		old_pos = pos;
+
+		if (end_pos - pos < rd_len) {
+			get_len = end_pos - pos;
+		} else {
+			get_len = rd_len;
+		}
+			
+		memcpy(buf, data + pos, get_len);
+		if (((get_len < rd_len) && ((old_pos + get_len) != end_pos)) ||
+		    (get_len < 0)) {
+			dev_err(dev, "read err, pos 0x%x, get len %d",
+				(u32)old_pos, get_len);
+			kfree(buf);
+			err = -EIO;
+			return err;
+		}
+
+		for (iter = 0; iter < get_len; iter += 4) {
+			old_data = *((u32 *)(buf + iter));
+			fw_off = (u32)old_pos + iter + 0x1000;
+			new_data = ioread32(hw->hw_addr + fw_off);
+			if (old_data != new_data) {
+				dev_err(dev,
+					"Err at 0x%08x write:0x%08x read:0x%08x",
+					fw_off, old_data, new_data);
+				err = -EIO;
+			}
+		}
+
+		if (pos == old_pos) {
+			pos += get_len;
+		}
+	}
+
+	kfree(buf);
+	return err;
+}
+#endif /* NO_CM3_MBX */
+
+static int rnpgbe_add_adpater(struct pci_dev *pdev, struct rnpgbe_info *ii,
+			      struct rnpgbe_adapter **padapter)
+{
+	int i, err = 0;
+	struct rnpgbe_adapter *adapter = NULL;
+	struct net_device *netdev;
+	struct rnpgbe_hw *hw;
+	u8 __iomem *hw_addr = NULL;
+	u8 __iomem *hw_addr_bar0 = NULL;
+
+	u32 dma_version = 0;
+	u32 nic_version = 0;
+	u32 queues = ii->total_queue_pair_cnts;
+	static int bd_number;
+	//netdev_features_t hw_enc_features = 0;
+#ifndef NETIF_F_GSO_PARTIAL
+#ifdef HAVE_NDO_SET_FEATURES
+#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+	netdev_features_t hw_features;
+#else
+	u32 hw_features;
+#endif
+#endif
+#endif /* NETIF_F_GSO_PARTIAL */
+
+	pr_info("====  add adapter queues:%d ====", queues);
+#ifdef HAVE_TX_MQ
+	netdev = alloc_etherdev_mq(sizeof(struct rnpgbe_adapter), queues);
+#else
+	queues = 1;
+	netdev = alloc_etherdev(sizeof(struct rnpgbe_adapter));
+#endif
+	if (!netdev)
+		return -ENOMEM;
+
+	if (!fix_eth_name)
+		SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	adapter = netdev_priv(netdev);
+
+	memset((char *)adapter, 0x00, sizeof(struct rnpgbe_adapter));
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+#ifdef HAVE_TX_MQ
+#ifndef HAVE_NETDEV_SELECT_QUEUE
+	adapter->indices = queues;
+#endif
+
+#endif
+	adapter->max_ring_pair_counts = queues;
+	if (padapter)
+		*padapter = adapter;
+
+	adapter->bd_number = bd_number++;
+	adapter->port = 0;
+	snprintf(adapter->name, sizeof(netdev->name), "%s%d",
+		 rnpgbe_driver_name, adapter->bd_number);
+	pci_set_drvdata(pdev, adapter);
+
+	hw = &adapter->hw;
+	hw->back = adapter;
+	/* first setup hw type */
+	hw->pdev = pdev;
+	hw->rss_type = ii->rss_type;
+	hw->hw_type = ii->hw_type;
+	switch (hw->hw_type) {
+	case rnpgbe_hw_n500:
+		/* n500 use bar2 */
+#define RNP_NIC_BAR_N500 2
+		hw_addr = ioremap(pci_resource_start(pdev, RNP_NIC_BAR_N500),
+				  pci_resource_len(pdev, RNP_NIC_BAR_N500));
+		if (!hw_addr) {
+			dev_err(&pdev->dev, "pcim_iomap bar%d failed!\n",
+				RNP_NIC_BAR_N500);
+			return -EIO;
+		}
+		pr_info("[bar%d]: %p %llx len=%d kB\n", RNP_NIC_BAR_N500,
+			hw_addr,
+			(unsigned long long)pci_resource_start(
+				pdev, RNP_NIC_BAR_N500),
+			(int)pci_resource_len(pdev, RNP_NIC_BAR_N500) / 1024);
+		/* get dma version */
+		dma_version = rnpgbe_rd_reg(hw_addr);
+
+		hw->hw_addr = hw_addr;
+		/* setup msix base */
+		hw->ring_msix_base = hw->hw_addr + 0x28700;
+		//hw->ring_msix_base = hw->hw_addr + 0x2a000;
+
+		// todo n500 no need this ?
+		// hw->pfvfnum = PF_NUM_N500(rnpgbe_get_fuc(pdev));
+		hw->pfvfnum_system = PF_NUM_N500(rnpgbe_get_fuc(pdev));
+		nic_version = rd32(hw, RNP500_TOP_NIC_VERSION);
+		adapter->irq_mode = irq_mode_msix;
+		adapter->flags |= RNP_FLAG_MSI_CAPABLE | RNP_FLAG_MSIX_CAPABLE |
+				  RNP_FLAG_LEGACY_CAPABLE;
+		/* ifdef uart show tod */
+#ifdef FW_UART_SHOW_TSTAMPS
+		hw_addr_bar0 = ioremap(pci_resource_start(pdev, 0),
+				       pci_resource_len(pdev, 0));
+#endif
+		break;
+
+	case rnpgbe_hw_n210:
+	case rnpgbe_hw_n210L:
+#define RNP_NIC_BAR_N210 2
+#ifndef NO_CM3_MBX
+		if (pci_resource_len(pdev, 0) == 0x100000) {
+			char *filename = "n210_driver_update.bin";
+			const struct firmware *fw;
+			int rc;
+
+			hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+					      pci_resource_len(pdev, 0));
+			if (!(hw->hw_addr)) {
+				dev_err(&pdev->dev, "pci_iomap bar%d failed",
+					RNP_NIC_BAR_N210);
+				return -EIO;
+			}
+
+			rc = request_firmware(&fw, filename, &pdev->dev);
+			if (rc != 0) {
+				printk("Error %d requesting firmware file: %s\n", rc,
+						filename);
+				return rc;
+			}
+			dev_info(&pdev->dev, "%s size %ld", filename, fw->size);
+			// check hw_type and firmware here
+			if (rnpgbe_check_fw_from_flash(hw, fw->data)) {
+				dev_info(&pdev->dev, "firmware type error\n");
+				release_firmware(fw);
+				return -EIO;
+			}
+			/* first protect off */
+			rsp_hal_sfc_write_protect(hw, 0);
+
+			err = rsp_hal_sfc_flash_erase(hw, fw->size);
+			if (err) {
+				release_firmware(fw);
+				dev_err(&pdev->dev, "erase flash failed!");
+				return err;
+			}
+
+			err = rnpgbe_init_firmware(hw, fw->data, fw->size);
+			if (err) {
+				release_firmware(fw);
+				dev_err(&pdev->dev, "init firmware failed!");
+				return err;
+			}
+			dev_info(&pdev->dev, "init firmware successfully.");
+			dev_info(&pdev->dev,
+				 "Please reboot. Then you can use the device.");
+
+			release_firmware(fw);
+			iounmap(hw->hw_addr);
+			adapter->flags2 |= RNP_FLAG2_NO_NET_REG;
+			return 0;
+		}
+#endif /* NO_CM3_MBX */
+		hw_addr = ioremap(pci_resource_start(pdev, RNP_NIC_BAR_N210),
+				  pci_resource_len(pdev, RNP_NIC_BAR_N210));
+		if (!hw_addr) {
+			dev_err(&pdev->dev, "pcim_iomap bar%d failed!\n",
+				RNP_NIC_BAR_N210);
+			return -EIO;
+		}
+		pr_info("[bar%d]:%p %llx len=%d MB\n", RNP_NIC_BAR_N210,
+			hw_addr,
+			(unsigned long long)pci_resource_start(
+				pdev, RNP_NIC_BAR_N210),
+			(int)pci_resource_len(pdev, RNP_NIC_BAR_N210) / 1024 /
+				1024);
+		/* get dma version */
+		dma_version = rnpgbe_rd_reg(hw_addr);
+
+		hw->hw_addr = hw_addr;
+		/* setup msix base */
+		hw->ring_msix_base = hw->hw_addr + 0x29000;
+		//hw->ring_msix_base = hw->hw_addr + 0x2a000;
+
+		// todo n500 no need this ?
+		hw->pfvfnum_system = PF_NUM_N500(rnpgbe_get_fuc(pdev));
+		nic_version = rd32(hw, RNP500_TOP_NIC_VERSION);
+		adapter->irq_mode = irq_mode_msix;
+		adapter->flags |= RNP_FLAG_MSI_CAPABLE | RNP_FLAG_MSIX_CAPABLE |
+				  RNP_FLAG_LEGACY_CAPABLE;
+		break;
+	default:
+#ifdef FIX_VF_BUG
+		hw_addr_bar0 = ioremap(pci_resource_start(pdev, 0),
+				       pci_resource_len(pdev, 0));
+#endif
+		hw_addr = ioremap(pci_resource_start(pdev, 0),
+				  pci_resource_len(pdev, 0));
+		goto err_free_net;
+	}
+
+	/* assign to adapter */
+	adapter->io_addr = hw_addr;
+	adapter->io_addr_bar0 = hw_addr_bar0;
+
+	hw->dma_version = dma_version;
+	adapter->msg_enable = netif_msg_init(debug, NETIF_MSG_DRV
+#ifdef MSG_PROBE_ENABLE
+			| NETIF_MSG_PROBE
+#endif
+#ifdef MSG_IFUP_ENABLE
+			| NETIF_MSG_IFUP
+#endif
+#ifdef MSG_IFDOWN_ENABLE
+			| NETIF_MSG_IFDOWN
+#endif
+	);
+
+	/* we have other irq */
+	adapter->num_other_vectors = 1;
+	/* get software info */
+	ii->get_invariants(hw);
+
+	spin_lock_init(&adapter->link_stat_lock);
+
+	if (adapter->num_other_vectors) {
+		/* Mailbox */
+		rnpgbe_init_mbx_params_pf(hw);
+		memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+		if (dma_version >= 0x20210111) {
+#ifndef NO_CM3_MBX
+			rnpgbe_mbx_link_event_enable(hw, 0);
+			/* call driver status */
+			if (hw->ops.driver_status)
+				hw->ops.driver_status(hw, true, rnpgbe_driver_insmod);
+			if (rnpgbe_mbx_get_capability(hw, ii)) {
+				dev_err(&pdev->dev,
+					"rnpgbe_mbx_get_capability failed!\n");
+				err = -EIO;
+				goto err_free_net;
+			}
+
+			/* get lldp status if version large than 0.1.1.40 */
+			if ((hw->fw_version >= 0x00010128) &&
+			    ((hw->fw_version & 0xff000000) == 0))
+				rnpgbe_mbx_lldp_get(hw);
+
+			if (hw->lldp_status.enable)
+				adapter->priv_flags |= RNP_PRIV_FLAG_LLDP;
+
+			hw->usecstocount = hw->axi_mhz;
+
+			{
+				struct rnpgbe_eee_cap eee_cap;
+
+				memset(&eee_cap, 0x00,
+				       sizeof(struct rnpgbe_eee_cap));
+
+				if (hw->feature_flags & RNP_HW_FEATURE_EEE) {
+					rnpgbe_mbx_get_eee_capability(hw,
+								      &eee_cap);
+					if (eee_cap.local_capability) {
+						hw->eee_capability =
+							eee_cap.local_capability;
+						adapter->eee_enabled = 1;
+						adapter->local_eee =
+							eee_cap.local_eee;
+						adapter->partner_eee =
+							eee_cap.partner_eee;
+						rnpgbe_mbx_phy_eee_set(
+							hw,
+							adapter->tx_lpi_timer,
+							hw->eee_capability);
+					}
+				}
+			}
+			{
+				/* pf0 we cannot detect in vm, close mask in default */
+				if (!hw->pfvfnum) {
+					hw->feature_flags &=
+						(~RNP_HW_SOFT_MASK_OTHER_IRQ);
+
+#ifdef FW_UART_SHOW_TSTAMPS
+					rnpgbe_wr_reg(hw_addr_bar0 + 0xc04,
+						      0xffff0101);
+#endif
+				} else {
+					if (hw->pfvfnum == hw->pfvfnum_system)
+						hw->feature_flags |=
+							RNP_HW_SOFT_MASK_OTHER_IRQ;
+				}
+			}
+
+			adapter->portid_of_card = hw->port_id[0];
+#else
+			hw->fw_version = rnpgbe_rd_reg(hw_addr + 0x8024);
+			rnpgbe_dbg("no mbx to get capability\n");
+#endif
+			adapter->portid_of_card = hw->pfvfnum >> 5;
+			adapter->wol = hw->wol;
+		}
+	}
+
+	if (hw->force_en)
+		adapter->priv_flags |= RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE;
+	hw->driver_version = driver_version;
+
+	hw->default_rx_queue = 0;
+	pr_info("%s %s: dma version:0x%x, nic version:0x%x, pfvfnum:0x%x\n",
+		adapter->name, pci_name(pdev), hw->dma_version, nic_version,
+		hw->pfvfnum);
+
+	/* Setup hw api */
+	hw->mac.type = ii->mac;
+	/* EEPROM */
+	if (ii->eeprom_ops)
+		memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
+
+	hw->phy.sfp_type = rnpgbe_sfp_type_unknown;
+	hw->ops.setup_ethtool(netdev);
+	rnpgbe_assign_netdev_ops(netdev);
+
+	rnpgbe_check_options(adapter);
+	/* setup the private structure */
+	/* this private is used only once
+	 */
+	err = rnpgbe_sw_init(adapter);
+	if (err)
+		goto err_sw_init;
+
+	err = hw->ops.reset_hw(hw);
+	hw->phy.reset_if_overtemp = false;
+	if (err) {
+		e_dev_err("HW Init failed: %d\n", err);
+		goto err_sw_init;
+	}
+	hw->ops.setup_link(hw, DEFAULT_ADV, 1, 0, 0);
+	hw->advertised_link = DEFAULT_ADV;
+	/* should force phy down first */
+	hw->ops.set_mbx_link_event(hw, 0);
+	hw->ops.set_mbx_ifup(hw, 0);
+
+#if defined(CONFIG_PCI_IOV)
+	if (adapter->num_other_vectors) {
+		rnpgbe_enable_sriov(adapter);
+		pci_sriov_set_totalvfs(pdev, hw->max_vfs - 1);
+	}
+#endif
+
+#ifdef HAVE_NETDEVICE_MIN_MAX_MTU
+	/* MTU range: 68 - 9710 */
+#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU
+	netdev->extended->min_mtu = hw->min_length;
+	netdev->extended->max_mtu =
+		hw->max_length - (ETH_HLEN + 2 * ETH_FCS_LEN);
+#else
+	netdev->min_mtu = hw->min_length;
+	netdev->max_mtu = hw->max_length - (ETH_HLEN + 2 * ETH_FCS_LEN);
+#endif
+#endif
+
+#ifdef NETIF_F_GSO_PARTIAL
+
+	if (hw->feature_flags & RNP_NET_FEATURE_SG)
+		netdev->features |= NETIF_F_SG;
+	if (hw->feature_flags & RNP_NET_FEATURE_TSO)
+		netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+	if (hw->feature_flags & RNP_NET_FEATURE_RX_HASH)
+		netdev->features |= NETIF_F_RXHASH;
+	if (hw->feature_flags & RNP_NET_FEATURE_RX_CHECKSUM)
+		netdev->features |= NETIF_F_RXCSUM;
+	if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM)
+		netdev->features |= NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC;
+
+	if (hw->feature_flags & RNP_NET_FEATURE_USO) {
+#ifdef NETIF_F_GSO_UDP_L4
+		netdev->features |= NETIF_F_GSO_UDP_L4;
+#endif
+	}
+	if (enable_hi_dma)
+		netdev->features |= NETIF_F_HIGHDMA;
+
+	if (hw->feature_flags & RNP_NET_FEATURE_TX_UDP_TUNNEL) {
+		netdev->gso_partial_features = RNP_GSO_PARTIAL_FEATURES;
+		netdev->features |=
+			NETIF_F_GSO_PARTIAL | RNP_GSO_PARTIAL_FEATURES;
+	}
+
+	netdev->hw_features |= netdev->features;
+
+	//if (hw->feature_flags & RNP_NET_FEATURE_VLAN_FILTER)
+	//	netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+	//if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER)
+	//	netdev->hw_features |= NETIF_F_HW_VLAN_STAG_FILTER;
+	if (hw->feature_flags & RNP_NET_FEATURE_VLAN_OFFLOAD) {
+		if (!hw->ncsi_en) {
+			netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+			netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+		}
+	}
+	if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) {
+		if (!hw->ncsi_en) {
+			netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+			netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+		}
+	}
+	netdev->hw_features |= NETIF_F_RXALL;
+	if (hw->feature_flags & RNP_NET_FEATURE_RX_NTUPLE_FILTER)
+		netdev->hw_features |= NETIF_F_NTUPLE;
+	/* only open rx-fcs in no ocp mode */
+	if ((hw->feature_flags & RNP_NET_FEATURE_RX_FCS) && (!hw->ncsi_en))
+		netdev->hw_features |= NETIF_F_RXFCS;
+#ifdef NETIF_F_HW_TC
+	if (hw->feature_flags & RNP_NET_FEATURE_HW_TC)
+		netdev->hw_features |= NETIF_F_HW_TC;
+#endif
+
+	netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+	netdev->hw_enc_features |= netdev->vlan_features;
+	netdev->mpls_features |= NETIF_F_HW_CSUM;
+
+	if (hw->feature_flags & RNP_NET_FEATURE_VLAN_FILTER)
+		netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+	if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER)
+		netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
+	if (hw->feature_flags & RNP_NET_FEATURE_VLAN_OFFLOAD) {
+		if (!hw->ncsi_en) {
+			netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+			netdev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+		}
+	}
+	if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) {
+		if (!hw->ncsi_en) {
+			netdev->features |= NETIF_F_HW_VLAN_STAG_RX;
+			netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
+		}
+	}
+
+	netdev->priv_flags |= IFF_UNICAST_FLT;
+	netdev->priv_flags |= IFF_SUPP_NOFCS;
+
+	if (adapter->flags2 & RNP_FLAG2_RSC_CAPABLE)
+		netdev->hw_features |= NETIF_F_LRO;
+
+#else /* NETIF_F_GSO_PARTIAL */
+
+	if (hw->feature_flags & RNP_NET_FEATURE_SG)
+		netdev->features |= NETIF_F_SG;
+	if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM)
+		netdev->features |= NETIF_F_IP_CSUM;
+
+	if (enable_hi_dma)
+		netdev->features |= NETIF_F_HIGHDMA;
+
+	if (hw->feature_flags & RNP_NET_FEATURE_TX_UDP_TUNNEL) {
+		netdev->features |=
+			NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM;
+	}
+
+#ifdef NETIF_F_IPV6_CSUM
+	if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM)
+		netdev->features |= NETIF_F_IPV6_CSUM;
+#endif
+
+	if (hw->feature_flags & RNP_NET_FEATURE_TSO)
+		netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+#ifdef NETIF_F_GSO_UDP_L4
+	if (hw->feature_flags & RNP_NET_FEATURE_USO)
+		netdev->features |= NETIF_F_GSO_UDP_L4;
+#endif
+
+#ifdef NETIF_F_HW_VLAN_CTAG_TX
+
+	if (hw->feature_flags & RNP_NET_FEATURE_VLAN_FILTER)
+		netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+	if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER)
+		netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
+	if (hw->feature_flags & RNP_NET_FEATURE_VLAN_OFFLOAD) {
+		if (!hw->ncsi_en) {
+			netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+			netdev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+		}
+	}
+	if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) {
+		if (!hw->ncsi_en) {
+			netdev->features |= NETIF_F_HW_VLAN_STAG_RX;
+			netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
+		}
+	}
+#endif
+	netdev->features |= rnpgbe_tso_features(hw);
+
+#ifdef NETIF_F_RXHASH
+	if (hw->feature_flags & RNP_NET_FEATURE_RX_HASH)
+		netdev->features |= NETIF_F_RXHASH;
+#endif /* NETIF_F_RXHASH */
+
+	if (hw->feature_flags & RNP_NET_FEATURE_RX_CHECKSUM)
+		netdev->features |= NETIF_F_RXCSUM;
+
+#ifdef HAVE_NDO_SET_FEATURES
+	/* copy netdev features into list of user selectable features */
+#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+	hw_features = netdev->hw_features;
+#else
+	hw_features = get_netdev_hw_features(netdev);
+#endif
+	hw_features |= netdev->features;
+
+	/* give us the option of enabling RSC/LRO later */
+	if (adapter->flags2 & RNP_FLAG2_RSC_CAPABLE)
+		hw_features |= NETIF_F_LRO;
+#else
+#ifdef NETIF_F_GRO
+	/* this is only needed on kernels prior to 2.6.39 */
+	netdev->features |= NETIF_F_GRO;
+#endif /* NETIF_F_GRO */
+#endif /* HAVE_NDO_SET_FEATURES */
+
+#ifdef HAVE_NDO_SET_FEATURES
+
+	if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM)
+		hw_features |= NETIF_F_SCTP_CSUM;
+	if (hw->feature_flags & RNP_NET_FEATURE_RX_NTUPLE_FILTER)
+		hw_features |= NETIF_F_NTUPLE;
+#ifdef NETIF_F_HW_TC
+	if (hw->feature_flags & RNP_NET_FEATURE_HW_TC)
+		hw_features |= NETIF_F_HW_TC;
+	hw_features |= NETIF_F_RXALL;
+
+	if ((hw->feature_flags & RNP_NET_FEATURE_RX_FCS) && (!hw->ncsi_en))
+		hw_features |= NETIF_F_RXFCS;
+#endif
+#endif
+
+#ifdef HAVE_NDO_SET_FEATURES
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+	set_netdev_hw_features(netdev, hw_features);
+#else
+	netdev->hw_features = hw_features;
+#endif
+#endif
+
+#ifdef HAVE_NETDEV_VLAN_FEATURES
+
+	if (hw->feature_flags & RNP_NET_FEATURE_SG)
+		netdev->vlan_features |= NETIF_F_SG;
+	if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM)
+		netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+	if (hw->feature_flags & RNP_NET_FEATURE_TSO)
+		netdev->vlan_features |= NETIF_F_TSO | NETIF_F_TSO6;
+#ifdef NETIF_F_GSO_UDP_L4
+	if (hw->feature_flags & RNP_NET_FEATURE_USO)
+		netdev->vlan_features |= NETIF_F_GSO_UDP_L4;
+#endif
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+
+#ifdef HAVE_ENCAP_CSUM_OFFLOAD
+	if (hw->feature_flags & RNP_NET_FEATURE_SG)
+		netdev->hw_enc_features |= NETIF_F_SG;
+#endif /* HAVE_ENCAP_CSUM_OFFLOAD */
+
+#ifdef HAVE_VXLAN_RX_OFFLOAD
+	if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM)
+		netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+
+#endif /* NETIF_F_GSO_PARTIAL */
+
+#ifdef IFF_UNICAST_FLT
+	netdev->priv_flags |= IFF_UNICAST_FLT;
+#endif
+#ifdef IFF_SUPP_NOFCS
+	netdev->priv_flags |= IFF_SUPP_NOFCS;
+#endif
+
+	if (adapter->flags2 & RNP_FLAG2_RSC_ENABLED)
+		netdev->features |= NETIF_F_LRO;
+
+	eth_hw_addr_set(netdev, hw->mac.perm_addr);
+#ifdef ETHTOOL_GPERMADDR
+	memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
+#endif
+	pr_info("dev mac:%pM\n", netdev->dev_addr);
+
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
+		e_dev_err("invalid MAC address\n");
+		err = -EIO;
+		goto err_sw_init;
+	}
+	ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
+
+	timer_setup(&adapter->service_timer, rnpgbe_service_timer, 0);
+
+#ifdef HAVE_PTP_1588_CLOCK
+	if (module_enable_ptp) {
+#endif
+		/* setup ptp_addr according to mac type */
+		switch (adapter->hw.mac.mac_type) {
+		case mac_dwc_xlg:
+			adapter->ptp_addr = adapter->hw.mac.mac_addr + 0xd00;
+			adapter->gmac4 = 1;
+			break;
+		case mac_dwc_g:
+			adapter->ptp_addr = adapter->hw.mac.mac_addr + 0x700;
+			adapter->gmac4 = 0;
+			break;
+		}
+#ifdef HAVE_PTP_1588_CLOCK
+		adapter->flags2 |= RNP_FLAG2_PTP_ENABLED;
+		if (adapter->flags2 & RNP_FLAG2_PTP_ENABLED) {
+			adapter->tx_timeout_factor = 10;
+			INIT_WORK(&adapter->tx_hwtstamp_work,
+				  rnpgbe_tx_hwtstamp_work);
+		}
+	}
+#endif
+
+	INIT_WORK(&adapter->service_task, rnpgbe_service_task);
+	clear_bit(__RNP_SERVICE_SCHED, &adapter->state);
+
+	if (fix_eth_name)
+		strncpy(netdev->name, adapter->name, sizeof(netdev->name) - 1);
+	else
+		strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+
+	err = rnpgbe_init_interrupt_scheme(adapter);
+	if (err)
+		goto err_sw_init;
+
+	err = register_mbx_irq(adapter);
+	if (err)
+		goto err_register;
+
+#ifdef CONFIG_PCI_IOV
+	rnpgbe_enable_sriov_true(adapter);
+#endif
+
+	/* WOL not supported for all devices */
+	{
+		struct ethtool_wolinfo wol;
+
+		if (rnpgbe_wol_exclusion(adapter, &wol) ||
+		    !device_can_wakeup(&adapter->pdev->dev))
+			adapter->wol = 0;
+
+		device_set_wakeup_enable(&adapter->pdev->dev, !!adapter->wol);
+	}
+	/* reset the hardware with the new settings */
+	err = hw->ops.start_hw(hw);
+
+	/* we should start down? test it */
+	set_bit(__RNP_DOWN, &adapter->state);
+
+	if (!fix_eth_name)
+		strscpy(netdev->name, "eth%d", sizeof(netdev->name));
+	err = register_netdev(netdev);
+	if (err) {
+		e_dev_err("register_netdev failed!\n");
+		goto err_register;
+	}
+
+	/* power down the optics for n10 SFP+ fiber */
+	if (hw->ops.disable_tx_laser)
+		hw->ops.disable_tx_laser(hw);
+
+	/* carrier off reporting is important to ethtool even BEFORE open */
+	netif_carrier_off(netdev);
+
+#ifdef CONFIG_RNP_DCA
+	if (dca_add_requester(&pdev->dev) == 0) {
+		adapter->flags |= RNP_FLAG_DCA_ENABLED;
+		rnpgbe_setup_dca(adapter);
+	}
+#endif
+
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+		DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
+			adapter->num_vfs);
+		for (i = 0; i < adapter->num_vfs; i++)
+			rnpgbe_vf_configuration(pdev, (i | 0x10000000));
+	}
+
+	if (rnpgbe_sysfs_init(adapter))
+		e_err(probe, "failed to allocate sysfs resources\n");
+
+	rnpgbe_dbg_adapter_init(adapter);
+
+	return 0;
+err_register:
+	remove_mbx_irq(adapter);
+	rnpgbe_clear_interrupt_scheme(adapter);
+err_sw_init:
+	rnpgbe_disable_sriov(adapter);
+	adapter->flags2 &= ~RNP_FLAG2_SEARCH_FOR_SFP;
+err_free_net:
+	free_netdev(netdev);
+	return err;
+}
+
+/**
+ * rnpgbe_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in rnpgbe_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * rnpgbe_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+#ifdef HAVE_CONFIG_HOTPLUG
+static int __devinit rnpgbe_probe(struct pci_dev *pdev,
+				  const struct pci_device_id *id)
+#else
+static int rnpgbe_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+#endif
+{
+	struct rnpgbe_adapter *adapter;
+	struct rnpgbe_info *ii = rnpgbe_info_tbl[id->driver_data];
+	int err;
+
+	/* Catch broken hardware that put the wrong VF device ID in
+	 * the PCIe SR-IOV capability.
+	 */
+	if (pdev->is_virtfn) {
+		WARN(1, "%s (%hx:%hx) should not be a VF!\n", pci_name(pdev),
+		     pdev->vendor, pdev->device);
+		return -EINVAL;
+	}
+
+	err = pci_enable_device_mem(pdev);
+	if (err)
+		return err;
+
+	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(56)) &&
+	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(56))) {
+		enable_hi_dma = 1;
+	} else {
+		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (err) {
+			err = dma_set_coherent_mask(&pdev->dev,
+						    DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pdev->dev,
+					"No usable DMA configuration, aborting\n");
+				goto err_dma;
+			}
+		}
+		enable_hi_dma = 0;
+	}
+
+	err = pci_request_mem_regions(pdev, rnpgbe_driver_name);
+	if (err) {
+		dev_err(&pdev->dev,
+			"pci_request_selected_regions failed 0x%x\n", err);
+		goto err_pci_reg;
+	}
+#ifndef NO_PCIE_ERROR_REPORTING
+	pci_enable_pcie_error_reporting(pdev);
+#endif
+	pci_set_master(pdev);
+	pci_save_state(pdev);
+
+	err = rnpgbe_add_adpater(pdev, ii, &adapter);
+	if (err)
+		goto err_regions;
+
+	return 0;
+err_regions:
+	pci_release_mem_regions(pdev);
+err_dma:
+err_pci_reg:
+	return err;
+}
+
+/**
+ * rnpgbe_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * rnpgbe_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+#ifdef HAVE_CONFIG_HOTPLUG
+static void __devexit rnpgbe_remove(struct pci_dev *pdev)
+#else
+static void rnpgbe_remove(struct pci_dev *pdev)
+#endif
+{
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(pdev);
+
+	if (pci_channel_offline(pdev)) {
+		printk("%s:%s  card pluged out ,pci-err-stat:%d\n", __func__,
+		       pci_name(pdev), pdev->error_state);
+	}
+
+#ifdef CONFIG_PCI_IOV
+	/*
+	 * Only disable SR-IOV on unload if the user specified the now
+	 * deprecated max_vfs module parameter.
+	 */
+	rnpgbe_disable_sriov(adapter);
+#endif
+	rnpgbe_rm_adpater(adapter);
+	pci_release_mem_regions(pdev);
+#ifndef NO_PCIE_ERROR_REPORTING
+	pci_disable_pcie_error_reporting(pdev);
+#endif
+	pci_disable_device(pdev);
+}
+
+/**
+ * rnpgbe_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t rnpgbe_io_error_detected(struct pci_dev *pdev,
+						 pci_channel_state_t state)
+{
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
+
+#ifdef CONFIG_PCI_IOV
+	struct pci_dev *bdev, *vfdev;
+	u32 dw0, dw1, dw2, dw3;
+	int vf, pos;
+	u16 req_id, pf_func;
+
+	if (adapter->num_vfs == 0)
+		goto skip_bad_vf_detection;
+
+	bdev = pdev->bus->self;
+	while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
+		bdev = bdev->bus->self;
+
+	if (!bdev)
+		goto skip_bad_vf_detection;
+
+	pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
+	if (!pos)
+		goto skip_bad_vf_detection;
+
+	pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0);
+	pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1);
+	pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2);
+	pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3);
+
+	req_id = dw1 >> 16;
+	/* On the n500 if bit 7 of the requestor ID is set then it's a VF ? */
+	if (!(req_id & 0x0080))
+		goto skip_bad_vf_detection;
+
+	pf_func = req_id & 0x01;
+	if ((pf_func & 1) == (pdev->devfn & 1)) {
+		unsigned int device_id;
+
+		vf = (req_id & 0x7F) >> 1;
+		e_dev_err("VF %d has caused a PCIe error\n", vf);
+		e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
+			  "%8.8x\tdw3: %8.8x\n",
+			  dw0, dw1, dw2, dw3);
+
+		device_id = PCI_DEVICE_ID_N500_VF;
+
+		/* Find the pci device of the offending VF */
+		vfdev = pci_get_device(PCI_VENDOR_ID_MUCSE, device_id, NULL);
+		while (vfdev) {
+			if (vfdev->devfn == (req_id & 0xFF))
+				break;
+			vfdev = pci_get_device(PCI_VENDOR_ID_MUCSE, device_id,
+					       vfdev);
+		}
+		/*
+		 * There's a slim chance the VF could have been hot plugged,
+		 * so if it is no longer present we don't need to issue the
+		 * VFLR.  Just clean up the AER in that case.
+		 */
+		if (vfdev) {
+			e_dev_err("Issuing VFLR to VF %d\n", vf);
+			pci_write_config_dword(vfdev, 0xA8, 0x00008000);
+			/* Free device reference count */
+			pci_dev_put(vfdev);
+		}
+
+		pci_aer_clear_nonfatal_status(pdev);
+	}
+
+	/*
+	 * Even though the error may have occurred on the other port
+	 * we still need to increment the vf error reference count for
+	 * both ports because the I/O resume function will be called
+	 * for both of them.
+	 */
+	adapter->vferr_refcount++;
+
+	return PCI_ERS_RESULT_RECOVERED;
+
+skip_bad_vf_detection:
+#endif /* CONFIG_PCI_IOV */
+	netif_device_detach(netdev);
+
+	if (state == pci_channel_io_perm_failure)
+		return PCI_ERS_RESULT_DISCONNECT;
+
+	if (netif_running(netdev))
+		rnpgbe_down(adapter);
+	pci_disable_device(pdev);
+	/* Request a slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * rnpgbe_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t rnpgbe_io_slot_reset(struct pci_dev *pdev)
+{
+	pci_ers_result_t result = PCI_ERS_RESULT_NONE;
+
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(pdev);
+
+	if (pci_enable_device_mem(pdev)) {
+		e_err(probe, "Cannot re-enable PCI device after reset.\n");
+		result = PCI_ERS_RESULT_DISCONNECT;
+	} else {
+		/* we need this */
+		smp_mb__before_atomic();
+
+		pci_set_master(pdev);
+		pci_restore_state(pdev);
+		pci_save_state(pdev);
+		pci_wake_from_d3(pdev, false);
+		rnpgbe_reset(adapter);
+		result = PCI_ERS_RESULT_RECOVERED;
+	}
+
+	pci_aer_clear_nonfatal_status(pdev);
+	return result;
+}
+
+/**
+ * rnpgbe_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void rnpgbe_io_resume(struct pci_dev *pdev)
+{
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
+
+#ifdef CONFIG_PCI_IOV
+	if (adapter->vferr_refcount) {
+		e_info(drv, "Resuming after VF err\n");
+		adapter->vferr_refcount--;
+		return;
+	}
+
+#endif
+	if (netif_running(netdev))
+		rnpgbe_up(adapter);
+
+	netif_device_attach(netdev);
+}
+
+#ifdef CONFIG_PM
+#ifdef HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY
+static void rnpgbe_io_reset_notify(struct pci_dev *pdev, bool prepare)
+{
+	struct device *dev = pci_dev_to_dev(pdev);
+
+	if (prepare)
+		rnpgbe_suspend(dev);
+	else
+		rnpgbe_resume(dev);
+}
+#endif
+
+#ifdef HAVE_PCI_ERROR_HANDLER_RESET_PREPARE
+static void pci_io_reset_prepare(struct pci_dev *pdev)
+{
+#ifndef USE_LEGACY_PM_SUPPORT
+	struct device *dev = pci_dev_to_dev(pdev);
+
+	rnpgbe_suspend(dev);
+#else
+	pm_message_t a;
+
+	rnpgbe_suspend(pdev, a);
+#endif
+}
+
+static void pci_io_reset_done(struct pci_dev *pdev)
+{
+#ifndef USE_LEGACY_PM_SUPPORT
+	struct device *dev = pci_dev_to_dev(pdev);
+
+	rnpgbe_resume(dev);
+#else
+	rnpgbe_resume(pdev);
+#endif
+}
+#endif
+#endif
+
+static const struct pci_error_handlers rnpgbe_err_handler = {
+	.error_detected = rnpgbe_io_error_detected,
+	.slot_reset = rnpgbe_io_slot_reset,
+#ifdef CONFIG_PM
+#ifdef HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY
+	.reset_notify = rnpgbe_io_reset_notify,
+#endif
+#ifdef HAVE_PCI_ERROR_HANDLER_RESET_PREPARE
+	.reset_prepare = pci_io_reset_prepare,
+	.reset_done = pci_io_reset_done,
+#endif
+#endif
+	.resume = rnpgbe_io_resume,
+};
+
+#ifdef HAVE_RHEL6_SRIOV_CONFIGURE
+static struct pci_driver_rh rnpgbe_driver_rh = {
+	.sriov_configure = rnpgbe_pci_sriov_configure,
+};
+#endif
+
+#ifdef CONFIG_PM
+#ifndef USE_LEGACY_PM_SUPPORT
+static const struct dev_pm_ops rnpgbe_pm_ops = {
+	.suspend = rnpgbe_suspend,
+	.resume = rnpgbe_resume,
+	.freeze = rnpgbe_freeze,
+	.thaw = rnpgbe_thaw,
+	.poweroff = rnpgbe_suspend,
+	.restore = rnpgbe_resume,
+};
+#endif /* USE_LEGACY_PM_SUPPORT */
+#endif
+
+static struct pci_driver rnpgbe_driver = {
+	.name = rnpgbe_driver_name,
+	.id_table = rnpgbe_pci_tbl,
+	.probe = rnpgbe_probe,
+#ifdef HAVE_CONFIG_HOTPLUG
+	.remove = __devexit_p(rnpgbe_remove),
+#else
+	.remove = rnpgbe_remove,
+#endif
+#ifdef CONFIG_PM
+#ifndef USE_LEGACY_PM_SUPPORT
+	.driver = {
+		.pm = &rnpgbe_pm_ops,
+	},
+#else
+	.suspend = rnpgbe_suspend,
+	.resume = rnpgbe_resume,
+#endif /* USE_LEGACY_PM_SUPPORT */
+#endif
+#ifndef USE_REBOOT_NOTIFIER
+	.shutdown = rnpgbe_shutdown,
+#endif
+#if defined(HAVE_SRIOV_CONFIGURE)
+	.sriov_configure = rnpgbe_pci_sriov_configure,
+#elif defined(HAVE_RHEL6_SRIOV_CONFIGURE)
+	.rh_reserved = &rnpgbe_driver_rh,
+#endif /* HAVE_SRIOV_CONFIGURE */
+#ifdef HAVE_PCI_ERS
+	.err_handler = &rnpgbe_err_handler
+#endif
+};
+
+static int __init rnpgbe_init_module(void)
+{
+	int ret;
+
+	pr_info("%s - version %s\n", rnpgbe_driver_string,
+		rnpgbe_driver_version);
+	pr_info("%s\n", rnpgbe_copyright);
+	rnpgbe_wq = create_singlethread_workqueue(rnpgbe_driver_name);
+
+	if (!rnpgbe_wq) {
+		pr_err("%s: Failed to create workqueue\n", rnpgbe_driver_name);
+		return -ENOMEM;
+	}
+
+	rnpgbe_dbg_init();
+
+	ret = pci_register_driver(&rnpgbe_driver);
+	if (ret) {
+		destroy_workqueue(rnpgbe_wq);
+		rnpgbe_dbg_exit();
+		return ret;
+	}
+
+	return 0;
+}
+
+module_init(rnpgbe_init_module);
+
+static void __exit rnpgbe_exit_module(void)
+{
+	pci_unregister_driver(&rnpgbe_driver);
+
+	destroy_workqueue(rnpgbe_wq);
+
+	rnpgbe_dbg_exit();
+
+	rcu_barrier(); /* Wait for completion of call_rcu()'s */
+}
+
+module_exit(rnpgbe_exit_module);
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c
new file mode 100755
index 0000000000000000000000000000000000000000..7958bfdcfd7ba8b2cd333d8f69b79ef0cf341ac8
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c
@@ -0,0 +1,665 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+#include 
+#include "rnpgbe.h"
+#include "rnpgbe_type.h"
+#include "rnpgbe_common.h"
+#include "rnpgbe_mbx.h"
+#include "rnpgbe_mbx_fw.h"
+
+#define VF2PF_MBOX_VEC(mbx, vf) (mbx->vf2pf_mbox_vec_base + 4 * (vf))
+#define CPU2PF_MBOX_VEC(mbx) (mbx->cpu2pf_mbox_vec)
+
+/* == PF <--> VF mailbox ==== */
+#define SHARE_MEM_BYTES 64
+#define PF_VF_SHM(mbx, vf)                                                     \
+	(mbx->pf_vf_shm_base +                                                 \
+	 mbx->mbx_mem_size * vf)
+#define PF2VF_COUNTER(mbx, vf) (PF_VF_SHM(mbx, vf) + 0)
+#define VF2PF_COUNTER(mbx, vf) (PF_VF_SHM(mbx, vf) + 4)
+#define PF_VF_SHM_DATA(mbx, vf) (PF_VF_SHM(mbx, vf) + 8)
+#define PF2VF_MBOX_CTRL(mbx, vf) (mbx->pf2vf_mbox_ctrl_base + 4 * vf)
+#define PF_VF_MBOX_MASK_LO(mbx) (mbx->pf_vf_mbox_mask_lo)
+#define PF_VF_MBOX_MASK_HI(mbx) (mbx->pf_vf_mbox_mask_hi)
+/* === CPU <--> PF === */
+#define CPU_PF_SHM(mbx) (mbx->cpu_pf_shm_base)
+#define CPU2PF_COUNTER(mbx) (CPU_PF_SHM(mbx) + 0)
+#define PF2CPU_COUNTER(mbx) (CPU_PF_SHM(mbx) + 4)
+#define CPU_PF_SHM_DATA(mbx) (CPU_PF_SHM(mbx) + 8)
+#define PF2CPU_MBOX_CTRL(mbx) (mbx->pf2cpu_mbox_ctrl)
+#define CPU_PF_MBOX_MASK(mbx) (mbx->cpu_pf_mbox_mask)
+#define MBOX_CTRL_REQ (1 << 0) /* WO */
+#define MBOX_CTRL_PF_HOLD_SHM (1 << 3) /* VF:RO, PF:WR */
+#define MBOX_IRQ_EN 0
+#define MBOX_IRQ_DISABLE 1
+#define mbx_prd32(hw, reg) prnpgbe_rd_reg((hw)->hw_addr + (reg))
+#define mbx_rd32(hw, reg) rnpgbe_rd_reg((hw)->hw_addr + (reg))
+#define mbx_pwr32(hw, reg, val) p_rnpgbe_wr_reg((hw)->hw_addr + (reg), (val))
+#define mbx_wr32(hw, reg, val) rnpgbe_wr_reg((hw)->hw_addr + (reg), (val))
+
+/**
+ *  rnpgbe_read_mbx - Reads a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox/vfnum to read
+ *
+ *  returns SUCCESS if it successfully read message from buffer
+ **/
+s32 rnpgbe_read_mbx(struct rnpgbe_hw *hw, u32 *msg, u16 size,
+		    enum MBX_ID mbx_id)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = RNP_ERR_MBX;
+
+	/* limit read to size of mailbox */
+	if (size > mbx->size)
+		size = mbx->size;
+
+	if (mbx->ops.read)
+		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  rnpgbe_write_mbx - Write a message to the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 rnpgbe_write_mbx(struct rnpgbe_hw *hw, u32 *msg, u16 size,
+		     enum MBX_ID mbx_id)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = 0;
+
+	if (size > mbx->size)
+		ret_val = RNP_ERR_MBX;
+	else if (mbx->ops.write)
+		ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+	return ret_val;
+}
+
+static inline u16 rnpgbe_mbx_get_req(struct rnpgbe_hw *hw, int reg)
+{
+	mb();
+	return ioread32(hw->hw_addr + reg) & 0xffff;
+}
+
+static inline u16 rnpgbe_mbx_get_ack(struct rnpgbe_hw *hw, int reg)
+{
+	mb();
+	return (mbx_rd32(hw, reg) >> 16);
+}
+
+static inline void rnpgbe_mbx_inc_pf_req(struct rnpgbe_hw *hw,
+					 enum MBX_ID mbx_id)
+{
+	u16 req;
+	int reg;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	u32 v;
+
+	reg = (mbx_id == MBX_CM3CPU) ? PF2CPU_COUNTER(mbx) :
+				       PF2VF_COUNTER(mbx, mbx_id);
+	v = mbx_rd32(hw, reg);
+
+	req = (v & 0xffff);
+	req++;
+	v &= ~(0x0000ffff);
+	v |= req;
+	mb();
+	mbx_wr32(hw, reg, v);
+
+	/* update stats */
+	hw->mbx.stats.msgs_tx++;
+}
+
+static inline void rnpgbe_mbx_inc_pf_ack(struct rnpgbe_hw *hw,
+					 enum MBX_ID mbx_id)
+{
+	u16 ack;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	int reg = (mbx_id == MBX_CM3CPU) ? PF2CPU_COUNTER(mbx) :
+					   PF2VF_COUNTER(mbx, mbx_id);
+	u32 v = mbx_rd32(hw, reg);
+
+	ack = (v >> 16) & 0xffff;
+	ack++;
+	v &= ~(0xffff0000);
+	v |= (ack << 16);
+	mb();
+	mbx_wr32(hw, reg, v);
+
+	/* update stats */
+	hw->mbx.stats.msgs_rx++;
+}
+
+/**
+ *  rnpgbe_check_for_msg - checks to see if someone sent us mail
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 rnpgbe_check_for_msg(struct rnpgbe_hw *hw, enum MBX_ID mbx_id)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = RNP_ERR_MBX;
+
+	if (mbx->ops.check_for_msg)
+		ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  rnpgbe_check_for_ack - checks to see if someone sent us ACK
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 rnpgbe_check_for_ack(struct rnpgbe_hw *hw, enum MBX_ID mbx_id)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = RNP_ERR_MBX;
+
+	if (mbx->ops.check_for_ack)
+		ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  rnpgbe_poll_for_msg - Wait for message notification
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification
+ **/
+static s32 rnpgbe_poll_for_msg(struct rnpgbe_hw *hw, enum MBX_ID mbx_id)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	if (!countdown || !mbx->ops.check_for_msg)
+		goto out;
+
+	while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+		countdown--;
+		if (!countdown)
+			break;
+		udelay(mbx->usec_delay);
+	}
+
+out:
+	return countdown ? 0 : -ETIME;
+}
+
+/**
+ *  rnpgbe_poll_for_ack - Wait for message acknowledgement
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 rnpgbe_poll_for_ack(struct rnpgbe_hw *hw, enum MBX_ID mbx_id)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	if (!countdown || !mbx->ops.check_for_ack)
+		goto out;
+
+	while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+		countdown--;
+		if (!countdown) {
+			printk("wait ack timeout\n");
+			break;
+		}
+		udelay(mbx->usec_delay);
+	}
+
+out:
+	return countdown ? 0 : RNP_ERR_MBX;
+}
+
+/**
+ *  rnpgbe_read_posted_mbx - Wait for message notification and receive message
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification and
+ *  copied it into the receive buffer.
+ **/
+static s32 rnpgbe_read_posted_mbx(struct rnpgbe_hw *hw, u32 *msg, u16 size,
+				  enum MBX_ID mbx_id)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = RNP_ERR_MBX;
+
+	if (!mbx->ops.read)
+		goto out;
+
+	ret_val = rnpgbe_poll_for_msg(hw, mbx_id);
+
+	/* if ack received read message, otherwise we timed out */
+	if (!ret_val)
+		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+	return ret_val;
+}
+
+/**
+ *  rnpgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer and
+ *  received an ack to that message within delay * timeout period
+ **/
+static s32 rnpgbe_write_posted_mbx(struct rnpgbe_hw *hw, u32 *msg, u16 size,
+				   enum MBX_ID mbx_id)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = RNP_ERR_MBX;
+
+	/* if pcie off, nothing todo */
+	if (pci_channel_offline(hw->pdev))
+		return -EIO;
+
+	/* exit if either we can't write or there isn't a defined timeout */
+	if (!mbx->ops.write || !mbx->timeout)
+		goto out;
+
+	/* send msg and hold buffer lock */
+	ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+	/* if msg sent wait until we receive an ack */
+	if (!ret_val)
+		ret_val = rnpgbe_poll_for_ack(hw, mbx_id);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  rnpgbe_check_for_msg_pf - checks to see if the VF has sent mail
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 rnpgbe_check_for_msg_pf(struct rnpgbe_hw *hw, enum MBX_ID mbx_id)
+{
+	s32 ret_val = RNP_ERR_MBX;
+	u16 hw_req_count = 0;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+
+	/* if pcie off, nothing todo */
+	if (pci_channel_offline(hw->pdev))
+		return -EIO;
+
+	if (mbx_id == MBX_CM3CPU) {
+		hw_req_count = rnpgbe_mbx_get_req(hw, CPU2PF_COUNTER(mbx));
+		if (mbx->mbx_feature & MBX_FEATURE_NO_ZERO) {
+			if ((hw_req_count != 0) &&
+			    (hw_req_count != hw->mbx.cpu_req)) {
+				ret_val = 0;
+				hw->mbx.stats.reqs++;
+			}
+
+		} else {
+			if (hw_req_count != hw->mbx.cpu_req) {
+				ret_val = 0;
+				hw->mbx.stats.reqs++;
+			}
+		}
+	} else {
+		if (rnpgbe_mbx_get_req(hw, VF2PF_COUNTER(mbx, mbx_id)) !=
+		    hw->mbx.vf_req[mbx_id]) {
+			ret_val = 0;
+			hw->mbx.stats.reqs++;
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ *  rnpgbe_check_for_ack_pf - checks to see if the VF has ACKed
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 rnpgbe_check_for_ack_pf(struct rnpgbe_hw *hw, enum MBX_ID mbx_id)
+{
+	s32 ret_val = RNP_ERR_MBX;
+	u16 hw_cpu_ack = 0;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+
+	/* if pcie off, nothing todo */
+	if (pci_channel_offline(hw->pdev))
+		return -EIO;
+
+	if (mbx_id == MBX_CM3CPU) {
+		hw_cpu_ack = rnpgbe_mbx_get_ack(hw, CPU2PF_COUNTER(mbx));
+		if ((hw_cpu_ack != 0) &&
+		   (hw_cpu_ack != hw->mbx.cpu_ack)) {
+			ret_val = 0;
+			hw->mbx.stats.acks++;
+		}
+	} else {
+		if (rnpgbe_mbx_get_ack(hw, VF2PF_COUNTER(mbx, mbx_id)) !=
+		    hw->mbx.vf_ack[mbx_id]) {
+			ret_val = 0;
+			hw->mbx.stats.acks++;
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ *  rnpgbe_obtain_mbx_lock_pf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: the VF index or CPU
+ *
+ *  return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 rnpgbe_obtain_mbx_lock_pf(struct rnpgbe_hw *hw, enum MBX_ID mbx_id)
+{
+	int try_cnt = 5000;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) :
+						PF2VF_MBOX_CTRL(mbx, mbx_id);
+
+	while (try_cnt-- > 0) {
+		/* Take ownership of the buffer */
+		mbx_wr32(hw, CTRL_REG, MBOX_CTRL_PF_HOLD_SHM);
+		wmb();
+		/* reserve mailbox for cm3 use */
+		if (mbx_rd32(hw, CTRL_REG) & MBOX_CTRL_PF_HOLD_SHM)
+			return 0;
+		udelay(100);
+	}
+
+	rnpgbe_err("%s: faild to get:%d lock\n", __func__, mbx_id);
+	return -EPERM;
+}
+
+/**
+ *  rnpgbe_write_mbx_pf - Places a message in the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: the VF index
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 rnpgbe_write_mbx_pf(struct rnpgbe_hw *hw, u32 *msg, u16 size,
+			       enum MBX_ID mbx_id)
+{
+	s32 ret_val = 0;
+	u16 i;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	u32 DATA_REG = (mbx_id == MBX_CM3CPU) ? CPU_PF_SHM_DATA(mbx) :
+						PF_VF_SHM_DATA(mbx, mbx_id);
+	u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) :
+						PF2VF_MBOX_CTRL(mbx, mbx_id);
+	
+	/* if pcie is off, we cannot exchange with hw */
+	if (pci_channel_offline(hw->pdev))
+		return -EIO;
+
+	if (size > RNP_VFMAILBOX_SIZE) {
+		printk(KERN_DEBUG "%s: size:%d should <%d\n", __func__, size,
+		       RNP_VFMAILBOX_SIZE);
+		return -EINVAL;
+	}
+
+	/* lock the mailbox to prevent pf/vf/cpu race condition */
+	ret_val = rnpgbe_obtain_mbx_lock_pf(hw, mbx_id);
+	if (ret_val) {
+		printk(KERN_DEBUG
+		       "%s: get mbx:%d wlock failed. ret:%d. req:0x%08x-0x%08x\n",
+		       __func__, mbx_id, ret_val, msg[0], msg[1]);
+		goto out_no_write;
+	}
+
+	/* copy the caller specified message to the mailbox memory buffer */
+	for (i = 0; i < size; i++) {
+		mbx_wr32(hw, DATA_REG + i * 4, msg[i]);
+		rnpgbe_logd(LOG_MBX_OUT, "  w-mbx:0x%x <= 0x%x\n",
+			    DATA_REG + i * 4, msg[i]);
+	}
+
+	/* flush msg and acks as we are overwriting the message buffer */
+	if (mbx_id == MBX_CM3CPU) {
+		hw->mbx.cpu_ack = rnpgbe_mbx_get_ack(hw, CPU2PF_COUNTER(mbx));
+	} else {
+		hw->mbx.vf_ack[mbx_id] =
+			rnpgbe_mbx_get_ack(hw, VF2PF_COUNTER(mbx, mbx_id));
+	}
+	rnpgbe_mbx_inc_pf_req(hw, mbx_id);
+
+	/* Interrupt VF/CM3 to tell it a message
+	 * has been sent and release buffer
+	 */
+	if (mbx->mbx_feature & MBX_FEATURE_WRITE_DELAY)
+		udelay(300);
+	mbx_wr32(hw, CTRL_REG, MBOX_CTRL_REQ);
+
+out_no_write:
+	/* sometimes happen */
+
+	return ret_val;
+}
+
+/**
+ *  rnpgbe_read_mbx_pf - Read a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  This function copies a message from the mailbox buffer to the caller's
+ *  memory buffer.  The presumption is that the caller knows that there was
+ *  a message due to a VF/CPU request so no polling for message is needed.
+ **/
+static s32 rnpgbe_read_mbx_pf(struct rnpgbe_hw *hw, u32 *msg, u16 size,
+			      enum MBX_ID mbx_id)
+{
+	s32 ret_val = -EIO;
+	u32 i;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	u32 BUF_REG = (mbx_id == MBX_CM3CPU) ? CPU_PF_SHM_DATA(mbx) :
+					       PF_VF_SHM_DATA(mbx, mbx_id);
+	u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) :
+						PF2VF_MBOX_CTRL(mbx, mbx_id);
+	/* if pcie off, nothing todo */
+	if (pci_channel_offline(hw->pdev))
+		return -EIO;
+	if (size > RNP_VFMAILBOX_SIZE) {
+		printk(KERN_DEBUG "%s: size:%d should <%d\n", __func__, size,
+		       RNP_VFMAILBOX_SIZE);
+		return -EINVAL;
+	}
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = rnpgbe_obtain_mbx_lock_pf(hw, mbx_id);
+	if (ret_val)
+		goto out_no_read;
+
+	/* we need this */
+	mb();
+	/* copy the message from the mailbox memory buffer */
+	for (i = 0; i < size; i++) {
+		msg[i] = mbx_rd32(hw, BUF_REG + 4 * i);
+		rnpgbe_logd(LOG_MBX_IN, "  r-mbx:0x%x => 0x%x\n",
+			    BUF_REG + 4 * i, msg[i]);
+	}
+	mbx_wr32(hw, BUF_REG, 0);
+
+	/* update req. used by rnpvf_check_for_msg_vf  */
+	if (mbx_id == MBX_CM3CPU) {
+		hw->mbx.cpu_req = rnpgbe_mbx_get_req(hw, CPU2PF_COUNTER(mbx));
+	} else {
+		hw->mbx.vf_req[mbx_id] =
+			rnpgbe_mbx_get_req(hw, VF2PF_COUNTER(mbx, mbx_id));
+	}
+	/* this ack maybe too earier? */
+	/* Acknowledge receipt and release mailbox, then we're done */
+	rnpgbe_mbx_inc_pf_ack(hw, mbx_id);
+
+	/* free ownership of the buffer */
+	mbx_wr32(hw, CTRL_REG, 0);
+
+out_no_read:
+
+	return ret_val;
+}
+
+static void rnpgbe_mbx_reset(struct rnpgbe_hw *hw)
+{
+	int idx, v;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+
+	for (idx = 0; idx < hw->max_vfs; idx++) {
+		v = mbx_rd32(hw, VF2PF_COUNTER(mbx, idx));
+		hw->mbx.vf_req[idx] = v & 0xffff;
+		hw->mbx.vf_ack[idx] = (v >> 16) & 0xffff;
+		mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0);
+	}
+	v = mbx_rd32(hw, CPU2PF_COUNTER(mbx));
+	hw->mbx.cpu_req = v & 0xffff;
+	hw->mbx.cpu_ack = (v >> 16) & 0xffff;
+
+	printk(KERN_DEBUG "now mbx.cpu_req %d mbx.cpu_ack %d\n",
+	       hw->mbx.cpu_req, hw->mbx.cpu_ack);
+	mbx_wr32(hw, PF2CPU_MBOX_CTRL(mbx), 0);
+
+	if (PF_VF_MBOX_MASK_LO(mbx))
+		wr32(hw, PF_VF_MBOX_MASK_LO(mbx), 0);
+	if (PF_VF_MBOX_MASK_HI(mbx))
+		wr32(hw, PF_VF_MBOX_MASK_HI(mbx), 0);
+
+	wr32(hw, CPU_PF_MBOX_MASK(mbx), 0xffff0000);
+}
+
+static int rnpgbe_mbx_configure_pf(struct rnpgbe_hw *hw, int nr_vec,
+				   bool enable)
+{
+	int idx = 0;
+	u32 v;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+
+	/* if pcie off, nothing todo */
+	if (pci_channel_offline(hw->pdev))
+		return -EIO;
+	if (enable) {
+		for (idx = 0; idx < hw->max_vfs; idx++) {
+			v = mbx_rd32(hw, VF2PF_COUNTER(mbx, idx));
+			hw->mbx.vf_req[idx] = v & 0xffff;
+			hw->mbx.vf_ack[idx] = (v >> 16) & 0xffff;
+
+			mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0);
+		}
+		v = mbx_rd32(hw, CPU2PF_COUNTER(mbx));
+		hw->mbx.cpu_req = v & 0xffff;
+		hw->mbx.cpu_ack = (v >> 16) & 0xffff;
+		mbx_wr32(hw, PF2CPU_MBOX_CTRL(mbx), 0);
+
+		for (idx = 0; idx < hw->max_vfs; idx++) {
+			mbx_wr32(hw, VF2PF_MBOX_VEC(mbx, idx),
+				 nr_vec);
+		/* vf to pf req interrupt */
+		}
+
+		if (PF_VF_MBOX_MASK_LO(mbx))
+			wr32(hw, PF_VF_MBOX_MASK_LO(mbx),
+			     0);
+		/* allow vf to vectors */
+
+		if (PF_VF_MBOX_MASK_HI(mbx))
+			wr32(hw, PF_VF_MBOX_MASK_HI(mbx), 0);
+		/* enable irq */
+
+		/* bind cm3cpu mbx to irq */
+		wr32(hw, CPU2PF_MBOX_VEC(mbx), nr_vec);
+		/* cm3 and VF63 share #63 irq */
+		/* allow CM3CPU to PF MBX IRQ */
+		wr32(hw, CPU_PF_MBOX_MASK(mbx), 0xffff0000);
+
+		rnpgbe_dbg("[%s] mbx-vector:%d\n", __func__, nr_vec);
+
+	} else {
+		if (PF_VF_MBOX_MASK_LO(mbx))
+			wr32(hw, PF_VF_MBOX_MASK_LO(mbx),
+			     0xffffffff);
+		/* disable irq */
+		if (PF_VF_MBOX_MASK_HI(mbx))
+			wr32(hw, PF_VF_MBOX_MASK_HI(mbx),
+			     0xffffffff);
+
+		/* disable CM3CPU to PF MBX IRQ */
+		wr32(hw, CPU_PF_MBOX_MASK(mbx), 0xfffffffe);
+
+		/* reset vf->pf status/ctrl */
+		for (idx = 0; idx < hw->max_vfs; idx++)
+			mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0);
+		/* reset pf->cm3 ctrl */
+		mbx_wr32(hw, PF2CPU_MBOX_CTRL(mbx), 0);
+		/* used to sync link status */
+		wr32(hw, RNP_DMA_DUMY, 0);
+	}
+	return 0;
+}
+
+/**
+ *  rnpgbe_init_mbx_params_pf - set initial values for pf mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+s32 rnpgbe_init_mbx_params_pf(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+
+	mbx->usec_delay = 100;
+	mbx->timeout = (4 * 1000 * 1000) / mbx->usec_delay;
+	mbx->stats.msgs_tx = 0;
+	mbx->stats.msgs_rx = 0;
+	mbx->stats.reqs = 0;
+	mbx->stats.acks = 0;
+	mbx->stats.rsts = 0;
+	mbx->size = RNP_VFMAILBOX_SIZE;
+
+	mutex_init(&mbx->lock);
+	rnpgbe_mbx_reset(hw);
+	return 0;
+}
+
+struct rnpgbe_mbx_operations rnpgbe_mbx_ops_generic = {
+	.init_params = rnpgbe_init_mbx_params_pf,
+	.read = rnpgbe_read_mbx_pf,
+	.write = rnpgbe_write_mbx_pf,
+	.read_posted = rnpgbe_read_posted_mbx,
+	.write_posted = rnpgbe_write_posted_mbx,
+	.check_for_msg = rnpgbe_check_for_msg_pf,
+	.check_for_ack = rnpgbe_check_for_ack_pf,
+	.configure = rnpgbe_mbx_configure_pf,
+};
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h
new file mode 100755
index 0000000000000000000000000000000000000000..e0a2ca9abb168033740024ddf4de828c74b41d3c
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h
@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBE_MBX_H_
+#define _RNPGBE_MBX_H_
+
+#include "rnpgbe_type.h"
+#include "rnpgbe_mbx_fw.h"
+
+#define RNP_VFMAILBOX_SIZE 14 /* 16 32 bit words - 64 bytes */
+#define RNP_ERR_MBX -100
+#define RNP_VT_MSGTYPE_ACK 0x80000000
+/* Messages below or'd with */
+/* this are the ACK */
+#define RNP_VT_MSGTYPE_NACK 0x40000000
+/* Messages below or'd with
+ * this are the NACK
+ */
+#define RNP_VT_MSGTYPE_CTS 0x20000000
+/* Indicates that VF is still
+ *clear to send requests
+ */
+#define RNP_VT_MSGINFO_SHIFT 14
+/* bits 23:16 are used for exra info for certain messages */
+#define RNP_VT_MSGINFO_MASK (0x7F << RNP_VT_MSGINFO_SHIFT)
+/* VLAN pool filtering masks */
+#define RNP_VLVF_VIEN 0x80000000 /* filter is valid */
+#define RNP_VLVF_ENTRIES 64
+#define RNP_VLVF_VLANID_MASK 0x00000FFF
+/* mailbox msg_data */
+#define RNP_VNUM_OFFSET (21)
+#define RNP_VF_MASK (0x7f << 21)
+#define RNP_MAIL_CMD_MASK 0x3fff
+/* mailbox API, legacy requests */
+#define RNP_VF_RESET 0x01 /* VF requests reset */
+#define RNP_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define RNP_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define RNP_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+/* mailbox API, version 1.0 VF requests */
+#define RNP_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define RNP_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+#define RNP_VF_GET_MACADDR 0x07 /* get vf macaddr */
+#define RNP_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+/* mailbox API, version 1.1 VF requests */
+#define RNP_VF_GET_QUEUES 0x09 /* get queue configuration */
+#define RNP_VF_SET_VLAN_STRIP 0x0a /* VF requests PF to set VLAN STRIP */
+#define RNP_VF_REG_RD 0x0b /* vf read reg */
+#define RNP_VF_GET_MTU 0x0c /* vf get pf ethtool setup */
+#define RNP_VF_SET_MTU 0x0d /* vf get pf ethtool setup */
+#define RNP_VF_GET_FW 0x0e /* vf get firmware version */
+#define RNP_VF_GET_LINK 0x10 /* get link status */
+#define RNP_VF_RESET_PF 0x11
+#define RNP_VF_GET_DMA_FRAG 0x12
+#define RNP_VF_SET_DMA_FRAG 0x13
+#define RNP_VF_SET_STATS_CLR 0x14 /* vf set stats status */
+#define RNP_VF_GET_STATS_CLR 0x15
+#define RNP_PF_SET_FCS 0x10 /* PF set fcs status */
+#define RNP_PF_SET_PAUSE 0x11 /* PF set pause status */
+#define RNP_PF_SET_FT_PADDING 0x12 /* PF set ft padding status */
+#define RNP_PF_SET_VLAN_FILTER 0x13 /* PF set ntuple status */
+#define RNP_PF_SET_VLAN 0x14 /* PF set ntuple status */
+#define RNP_PF_SET_LINK 0x15 /* PF set ntuple status */
+#define RNP_PF_SET_MTU 0x16 /* PF set ntuple status */
+#define RNP_PF_SET_RESET 0x17 /* PF set ntuple status */
+#define RNP_PF_LINK_UP (1 << 31)
+#define RNP_PF_REMOVE 0x0f
+/* GET_QUEUES return data indices within the mailbox */
+#define RNP_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define RNP_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define RNP_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define RNP_VF_DEF_QUEUE 4 /* Default queue offset */
+#define RNP_VF_QUEUE_START 5 /* Default queue offset */
+#define RNP_VF_QUEUE_DEPTH 6 /* ring depth */
+/* length of permanent address message returned from PF */
+#define RNP_VF_PERMADDR_MSG_LEN 11
+/* word in permanent address message with the current multicast type */
+#define RNP_VF_MC_TYPE_WORD 3
+#define RNP_VF_DMA_VERSION_WORD 4
+#define RNP_VF_VLAN_WORD 5
+#define RNP_VF_PHY_TYPE_WORD 6
+#define RNP_VF_FW_VERSION_WORD 7
+#define RNP_VF_LINK_STATUS_WORD 8
+#define RNP_VF_AXI_MHZ 9
+#define PF_FEATRURE_VLAN_FILTER BIT(0)
+#define PF_NCSI_EN BIT(1)
+#define RNP_VF_FEATURE 10
+#define RNP_PF_CONTROL_PRING_MSG 0x0100 /* PF control message */
+#define RNP_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define RNP_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+enum MBX_ID {
+	MBX_VF0 = 0,
+	MBX_VF1,
+	MBX_VF2,
+	MBX_VF3,
+	MBX_VF4,
+	MBX_VF5,
+	MBX_VF6,
+	MBX_VF7,
+	MBX_VF8,
+	MBX_VF9,
+	MBX_VF10,
+	MBX_VF11,
+	MBX_VF12,
+	MBX_VF13,
+	MBX_VF14,
+	MBX_VF15,
+	MBX_VF16,
+	MBX_VF17,
+	MBX_VF18,
+	MBX_VF19,
+	MBX_VF20,
+	MBX_VF21,
+	MBX_VF22,
+	MBX_VF23,
+	MBX_VF24,
+	MBX_VF25,
+	MBX_VF26,
+	MBX_VF27,
+	MBX_VF28,
+	MBX_VF29,
+	MBX_VF30,
+	MBX_VF31,
+	MBX_VF32,
+	MBX_VF33,
+	MBX_VF34,
+	MBX_VF35,
+	MBX_VF36,
+	MBX_VF37,
+	MBX_VF38,
+	MBX_VF39,
+	MBX_VF40,
+	MBX_VF41,
+	MBX_VF42,
+	MBX_VF43,
+	MBX_VF44,
+	MBX_VF45,
+	MBX_VF46,
+	MBX_VF47,
+	MBX_VF48,
+	MBX_VF49,
+	MBX_VF50,
+	MBX_VF51,
+	MBX_VF52,
+	MBX_VF53,
+	MBX_VF54,
+	MBX_VF55,
+	MBX_VF56,
+	MBX_VF57,
+	MBX_VF58,
+	MBX_VF59,
+	MBX_VF60,
+	MBX_VF61,
+	MBX_VF62,
+	MBX_VF63,
+	MBX_CM3CPU,
+	MBX_FW = MBX_CM3CPU,
+	MBX_VFCNT
+};
+
+enum PF_STATUS {
+	PF_FCS_STATUS,
+	PF_PAUSE_STATUS,
+	PF_FT_PADDING_STATUS,
+	PF_VLAN_FILTER_STATUS,
+	PF_SET_VLAN_STATUS,
+	PF_SET_LINK_STATUS,
+	PF_SET_MTU,
+	PF_SET_RESET,
+};
+
+s32 rnpgbe_read_mbx(struct rnpgbe_hw *hw, u32 *msg, u16 size, enum MBX_ID);
+s32 rnpgbe_write_mbx(struct rnpgbe_hw *hw, u32 *msg, u16 size, enum MBX_ID);
+s32 rnpgbe_check_for_msg(struct rnpgbe_hw *hw, enum MBX_ID);
+s32 rnpgbe_check_for_ack(struct rnpgbe_hw *hw, enum MBX_ID);
+s32 rnpgbe_check_for_rst(struct rnpgbe_hw *hw, enum MBX_ID);
+s32 rnpgbe_init_mbx_params_pf(struct rnpgbe_hw *hw);
+extern struct rnpgbe_mbx_operations rnpgbe_mbx_ops_generic;
+int rnpgbe_fw_get_macaddr(struct rnpgbe_hw *hw, int pfvfnum, u8 *mac_addr,
+			  int lane);
+int rnpgbe_mbx_fw_reset_phy(struct rnpgbe_hw *hw);
+unsigned int rnpgbe_mbx_change_timeout(struct rnpgbe_hw *hw, int timeout_ms);
+struct rnpgbe_info;
+int rnpgbe_mbx_get_capability(struct rnpgbe_hw *hw, struct rnpgbe_info *info);
+int rnpgbe_mbx_get_eee_capability(struct rnpgbe_hw *hw,
+				  struct rnpgbe_eee_cap *eee_cap);
+int rnpgbe_mbx_link_event_enable(struct rnpgbe_hw *hw, int enable);
+int rnpgbe_mbx_ifup_down(struct rnpgbe_hw *hw, int up);
+int rnpgbe_mbx_tstamps_show(struct rnpgbe_hw *hw, u32 sec, u32 nanosec);
+int rnpgbe_mbx_led_set(struct rnpgbe_hw *hw, int value);
+int rnpgbe_mbx_get_dump(struct rnpgbe_hw *hw, int flags, u32 *data_out,
+			int buflen);
+int rnpgbe_mbx_get_dump_flags(struct rnpgbe_hw *hw);
+int rnpgbe_mbx_set_dump(struct rnpgbe_hw *hw, int flag);
+int rnpgbe_mbx_sfp_write(struct rnpgbe_hw *hw, int sfp_addr, int reg, short v);
+int rnpgbe_mbx_sfp_module_eeprom_info(struct rnpgbe_hw *hw, int sfp_addr,
+				      int reg, int data_len, u8 *buf);
+int rnpgbe_mbx_get_temp(struct rnpgbe_hw *hw, int *voltage);
+int rnpgbe_mbx_phy_pause_set(struct rnpgbe_hw *hw, u32 pause_mode);
+int rnpgbe_mbx_phy_link_set(struct rnpgbe_hw *hw, int adv, int autoneg,
+			    int speed, int duplex, int mdix_ctrl);
+int rnpgbe_mbx_phy_pause_get(struct rnpgbe_hw *hw, u32 *pause_mode);
+int rnpgbe_mbx_phy_eee_set(struct rnpgbe_hw *hw, u32 tx_lpi_timer,
+			   u32 local_eee);
+int rnpgbe_maintain_req(struct rnpgbe_hw *hw, int cmd, int arg0,
+			int req_data_bytes, int reply_bytes,
+			dma_addr_t dma_phy_addr);
+int rnpgbe_mbx_get_lane_stat(struct rnpgbe_hw *hw);
+int rnpgbe_mbx_wol_set(struct rnpgbe_hw *hw, u32 mode);
+int rnpgbe_mbx_gephy_test_set(struct rnpgbe_hw *hw, u32 mode);
+int rnpgbe_mbx_lldp_set(struct rnpgbe_hw *hw, u32 enable);
+int rnpgbe_mbx_lldp_get(struct rnpgbe_hw *hw);
+int rnpgbe_mbx_ifsuspuse(struct rnpgbe_hw *hw, int status);
+int rnpgbe_mbx_ifinsmod(struct rnpgbe_hw *hw, int status);
+int rnpgbe_mbx_ifforce_control_mac(struct rnpgbe_hw *hw, int status);
+int rnpgbe_set_lane_fun(struct rnpgbe_hw *hw, int fun, int value0, int value1,
+			int value2, int value3);
+int rnpgbe_mbx_reg_write(struct rnpgbe_hw *hw, int fw_reg, int value);
+int rnpgbe_mbx_fw_reg_read(struct rnpgbe_hw *hw, int fw_reg);
+int rnpgbe_mbx_force_speed(struct rnpgbe_hw *hw, int speed);
+
+#endif /* _RNPGBE_MBX_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c
new file mode 100755
index 0000000000000000000000000000000000000000..8f06f61eb77bfaa399444683b13e07dada195346
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c
@@ -0,0 +1,1582 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+#include 
+#include 
+
+#include "rnpgbe.h"
+#include "rnpgbe_mbx.h"
+#include "rnpgbe_mbx_fw.h"
+
+#define RNP_FW_MAILBOX_SIZE RNP_VFMAILBOX_SIZE
+
+#define dbg_here printk(KERN_DEBUG "%s %d\n", __func__, __LINE__)
+
+static struct mbx_req_cookie *mbx_cookie_zalloc(int priv_len)
+{
+	struct mbx_req_cookie *cookie =
+		kzalloc(sizeof(*cookie) + priv_len, GFP_KERNEL);
+
+	if (cookie) {
+		cookie->timeout_jiffes = 30 * HZ;
+		cookie->magic = COOKIE_MAGIC;
+		cookie->priv_len = priv_len;
+	}
+
+	return cookie;
+}
+
+static int rnpgbe_mbx_write_posted_locked(struct rnpgbe_hw *hw,
+					  struct mbx_fw_cmd_req *req)
+{
+	int err = 0;
+	int retry = 3;
+
+	/* if pcie off, nothing todo */
+	if (pci_channel_offline(hw->pdev))
+		return -EIO;
+
+	if (mutex_lock_interruptible(&hw->mbx.lock)) {
+		rnpgbe_err("[%s] get mbx lock faild opcode:0x%x\n", __func__,
+			   req->opcode);
+		return -EAGAIN;
+	}
+
+	rnpgbe_logd(LOG_MBX_LOCK, "%s %d lock:%p hw:%p opcode:0x%x\n", __func__,
+		    hw->pfvfnum, &hw->mbx.lock, hw, req->opcode);
+
+try_again:
+	retry--;
+	if (retry < 0) {
+		mutex_unlock(&hw->mbx.lock);
+		rnpgbe_err("%s: write_posted faild! err:0x%x opcode:0x%x\n",
+			   __func__, err, req->opcode);
+		return -EIO;
+	}
+
+	err = hw->mbx.ops.write_posted(
+		hw, (u32 *)req, (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+	if (err)
+		goto try_again;
+
+	mutex_unlock(&hw->mbx.lock);
+
+	return err;
+}
+
+/*
+ * force firmware report link event to driver
+ */
+static void rnpgbe_link_stat_mark_reset(struct rnpgbe_hw *hw)
+{
+	wr32(hw, RNP_DMA_DUMY, 0xa0000000);
+}
+
+static void rnpgbe_link_stat_mark_disable(struct rnpgbe_hw *hw)
+{
+	wr32(hw, RNP_DMA_DUMY, 0);
+}
+
+static int rnpgbe_mbx_fw_post_req(struct rnpgbe_hw *hw,
+				  struct mbx_fw_cmd_req *req,
+				  struct mbx_req_cookie *cookie)
+{
+	int err = 0;
+	struct rnpgbe_adapter *adpt = hw->back;
+
+	/* if pcie off, nothing todo */
+	if (pci_channel_offline(hw->pdev))
+		return -EIO;
+
+	cookie->errcode = 0;
+	cookie->done = 0;
+	init_waitqueue_head(&cookie->wait);
+
+	if (mutex_lock_interruptible(&hw->mbx.lock)) {
+		rnpgbe_err("[%s] wait mbx lock timeout opcode:0x%x\n", __func__,
+			   req->opcode);
+		return -EAGAIN;
+	}
+
+	rnpgbe_logd(LOG_MBX_LOCK, "%s %d lock:%p hw:%p opcode:0x%x\n", __func__,
+		    hw->pfvfnum, &hw->mbx.lock, hw, req->opcode);
+
+	err = rnpgbe_write_mbx(hw, (u32 *)req,
+			       (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+	if (err) {
+		rnpgbe_err("rnpgbe_write_mbx faild! err:%d opcode:0x%x\n", err,
+			   req->opcode);
+		mutex_unlock(&hw->mbx.lock);
+		return err;
+	}
+
+	if (cookie->timeout_jiffes != 0) {
+	retry:
+		err = wait_event_interruptible_timeout(cookie->wait,
+						       cookie->done == 1,
+						       cookie->timeout_jiffes);
+		if (err == -ERESTARTSYS)
+			goto retry;
+		if (err == 0) {
+			rnpgbe_err(
+				"[%s] %s faild! pfvfnum:0x%x hw:%p timeout err:%d opcode:%x\n",
+				adpt->name, __func__, hw->pfvfnum, hw, err,
+				req->opcode);
+			err = -ETIME;
+		} else {
+			err = 0;
+		}
+	} else {
+		wait_event_interruptible(cookie->wait, cookie->done == 1);
+	}
+
+	mutex_unlock(&hw->mbx.lock);
+
+	if (cookie->errcode)
+		err = cookie->errcode;
+
+	return err;
+}
+
+static int rnpgbe_fw_send_cmd_wait(struct rnpgbe_hw *hw,
+				   struct mbx_fw_cmd_req *req,
+				   struct mbx_fw_cmd_reply *reply)
+{
+	int err;
+	int retry_cnt = 3;
+
+	if (!hw || !req || !reply || !hw->mbx.ops.read_posted) {
+		printk(KERN_DEBUG "error: hw:%p req:%p reply:%p\n", hw, req,
+		       reply);
+		return -EINVAL;
+	}
+
+	/* if pcie off, nothing todo */
+	if (pci_channel_offline(hw->pdev))
+		return -EIO;
+
+	if (mutex_lock_interruptible(&hw->mbx.lock)) {
+		rnpgbe_err("[%s] get mbx lock faild opcode:0x%x\n", __func__,
+			   req->opcode);
+		return -EAGAIN;
+	}
+
+	rnpgbe_logd(LOG_MBX_LOCK, "%s %d lock:%p hw:%p opcode:0x%x\n", __func__,
+		    hw->pfvfnum, &hw->mbx.lock, hw, req->opcode);
+	err = hw->mbx.ops.write_posted(
+		hw, (u32 *)req, (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+	if (err) {
+		rnpgbe_err("%s: write_posted faild! err:0x%x opcode:0x%x\n",
+			   __func__, err, req->opcode);
+		mutex_unlock(&hw->mbx.lock);
+		return err;
+	}
+
+retry:
+	retry_cnt--;
+	if (retry_cnt < 0)
+		return -EIO;
+
+	err = hw->mbx.ops.read_posted(hw, (u32 *)reply, sizeof(*reply) / 4,
+				      MBX_FW);
+	if (err) {
+		rnpgbe_err("%s: read_posted faild! err:0x%x opcode:0x%x\n",
+			   __func__, err, req->opcode);
+		mutex_unlock(&hw->mbx.lock);
+		return err;
+	}
+	if (reply->opcode != req->opcode)
+		goto retry;
+
+	mutex_unlock(&hw->mbx.lock);
+
+	if (reply->error_code) {
+		rnpgbe_err("%s: reply err:0x%x req:0x%x\n", __func__,
+			   reply->error_code, req->opcode);
+		return -reply->error_code;
+	}
+	return 0;
+}
+
+int rnpgbe_mbx_get_lane_stat(struct rnpgbe_hw *hw)
+{
+	int err = 0;
+	struct mbx_fw_cmd_req req;
+	struct rnpgbe_adapter *adpt = hw->back;
+	struct lane_stat_data *st;
+	struct mbx_req_cookie *cookie = NULL;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+
+	if (hw->mbx.other_irq_enabled) {
+		cookie = mbx_cookie_zalloc(sizeof(struct lane_stat_data));
+
+		if (!cookie) {
+			rnpgbe_err("%s: no memory\n", __func__);
+			return -ENOMEM;
+		}
+
+		st = (struct lane_stat_data *)cookie->priv;
+
+		build_get_lane_status_req(&req, hw->nr_lane, cookie);
+
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+
+		if (err) {
+			rnpgbe_err("%s: error:%d\n", __func__, err);
+			goto quit;
+		}
+	} else {
+		memset(&reply, 0, sizeof(reply));
+
+		build_get_lane_status_req(&req, hw->nr_lane, &req);
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		if (err) {
+			rnpgbe_err("%s: 1 error:%d\n", __func__, err);
+			goto quit;
+		}
+		st = (struct lane_stat_data *)&(reply.data);
+	}
+
+	hw->phy_type = st->phy_type;
+	hw->speed = adpt->speed = st->speed;
+	if (st->is_sgmii) {
+		adpt->phy_addr = st->phy_addr;
+	} else {
+		adpt->sfp.fault = st->sfp.fault;
+		adpt->sfp.los = st->sfp.los;
+		adpt->sfp.mod_abs = st->sfp.mod_abs;
+		adpt->sfp.tx_dis = st->sfp.tx_dis;
+	}
+	adpt->si.main = st->si_main;
+	adpt->si.pre = st->si_pre;
+	adpt->si.post = st->si_post;
+	adpt->si.tx_boost = st->si_tx_boost;
+	adpt->link_traing = st->link_traing;
+	adpt->fec = st->fec;
+	hw->is_sgmii = st->is_sgmii;
+	hw->pci_gen = st->pci_gen;
+	hw->pci_lanes = st->pci_lanes;
+	adpt->speed = st->speed;
+	adpt->hw.link = st->linkup;
+	hw->is_backplane = st->is_backplane;
+	hw->supported_link = st->supported_link;
+	hw->advertised_link = st->advertised_link;
+	hw->tp_mdx = st->tp_mdx;
+
+	rnpgbe_logd(
+		LOG_MBX_LINK_STAT,
+		"%s:pma_type:0x%x phy_type:0x%x,linkup:%d duplex:%d auton:%d "
+		"fec:%d an:%d lt:%d is_sgmii:%d supported_link:0x%x, backplane:%d "
+		"speed:%d sfp_connector:0x%x adv %0xx\n",
+		adpt->name, st->pma_type, st->phy_type, st->linkup, st->duplex,
+		st->autoneg, st->fec, st->an, st->link_traing, st->is_sgmii,
+		hw->supported_link, hw->is_backplane, st->speed,
+		st->sfp_connector, hw->advertised_link);
+quit:
+	if (cookie)
+		kfree(cookie);
+	return err;
+}
+
+int rnpgbe_mbx_fw_reset_phy(struct rnpgbe_hw *hw)
+{
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+	int ret;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	if (hw->mbx.other_irq_enabled) {
+		struct mbx_req_cookie *cookie = mbx_cookie_zalloc(0);
+
+		if (!cookie)
+			return -ENOMEM;
+
+		build_reset_phy_req(&req, cookie);
+
+		ret = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+		kfree(cookie);
+		return ret;
+
+	} else {
+		build_reset_phy_req(&req, &req);
+		return rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+	}
+}
+
+int rnpgbe_maintain_req(struct rnpgbe_hw *hw, int cmd, int arg0,
+			int req_data_bytes, int reply_bytes,
+			dma_addr_t dma_phy_addr)
+{
+	int err;
+	struct mbx_req_cookie *cookie = NULL;
+
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	cookie = mbx_cookie_zalloc(0);
+	if (!cookie)
+		return -ENOMEM;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+	cookie->timeout_jiffes = 60 * HZ;
+
+	build_maintain_req(&req, cookie, cmd, arg0, req_data_bytes, reply_bytes,
+			   dma_phy_addr & 0xffffffff,
+			   (dma_phy_addr >> 32) & 0xffffffff);
+
+	if (hw->mbx.other_irq_enabled) {
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+	} else {
+		int old_mbx_timeout = hw->mbx.timeout;
+
+		hw->mbx.timeout =
+			(60 * 1000 * 1000) / hw->mbx.usec_delay; /* wait 60s */
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		hw->mbx.timeout = old_mbx_timeout;
+	}
+
+	if (cookie)
+		kfree(cookie);
+
+	return (err) ? -EIO : 0;
+}
+
+int rnpgbe_fw_get_macaddr(struct rnpgbe_hw *hw, int pfvfnum, u8 *mac_addr,
+			  int nr_lane)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	rnpgbe_dbg("%s: pfvfnum:0x%x nr_lane:%d\n", __func__, pfvfnum, nr_lane);
+
+	if (!mac_addr) {
+		rnpgbe_err("%s: mac_addr is null\n", __func__);
+		return -EINVAL;
+	}
+
+	if (hw->mbx.other_irq_enabled) {
+		struct mbx_req_cookie *cookie =
+			mbx_cookie_zalloc(sizeof(reply.mac_addr));
+		struct mac_addr *mac = (struct mac_addr *)cookie->priv;
+
+		if (!cookie)
+			return -ENOMEM;
+
+		build_get_macaddress_req(&req, 1 << nr_lane, pfvfnum, cookie);
+
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+		if (err) {
+			kfree(cookie);
+			return err;
+		}
+
+		if ((1 << nr_lane) & mac->lanes)
+			memcpy(mac_addr, mac->addrs[nr_lane].mac, 6);
+
+		kfree(cookie);
+		return 0;
+
+	} else {
+		build_get_macaddress_req(&req, 1 << nr_lane, pfvfnum, &req);
+
+		// mbx_fw_req_set_reply(&req, hw->mbx.reply_dma_phy);
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		if (err) {
+			rnpgbe_err("%s: faild. err:%d\n", __func__, err);
+			return err;
+		}
+
+		if ((1 << nr_lane) & reply.mac_addr.lanes) {
+			memcpy(mac_addr, reply.mac_addr.addrs[nr_lane].mac, 6);
+			return 0;
+		}
+	}
+
+	return -ENODATA;
+}
+
+static int rnpgbe_mbx_sfp_read(struct rnpgbe_hw *hw, int sfp_i2c_addr, int reg,
+			       int cnt, u8 *out_buf)
+{
+	struct mbx_fw_cmd_req req;
+	int err = -EIO;
+	int nr_lane = hw->nr_lane;
+
+	if ((cnt > MBX_SFP_READ_MAX_CNT) || !out_buf) {
+		rnpgbe_err("%s: cnt:%d should <= %d out_buf:%p\n", __func__,
+			   cnt, MBX_SFP_READ_MAX_CNT, out_buf);
+		return -EINVAL;
+	}
+
+	memset(&req, 0, sizeof(req));
+
+	if (hw->mbx.other_irq_enabled) {
+		struct mbx_req_cookie *cookie = mbx_cookie_zalloc(cnt);
+
+		if (!cookie)
+			return -ENOMEM;
+
+		build_mbx_sfp_read(&req, nr_lane, sfp_i2c_addr, reg, cnt,
+				   cookie);
+
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+		if (err) {
+			kfree(cookie);
+			return err;
+
+		} else {
+			memcpy(out_buf, cookie->priv, cnt);
+			err = 0;
+		}
+		kfree(cookie);
+	} else {
+		struct mbx_fw_cmd_reply reply;
+
+		memset(&reply, 0, sizeof(reply));
+		build_mbx_sfp_read(&req, nr_lane, sfp_i2c_addr, reg, cnt,
+				   &reply);
+
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		if (err == 0)
+			memcpy(out_buf, reply.sfp_read.value, cnt);
+	}
+
+	return err;
+}
+
+int rnpgbe_mbx_sfp_module_eeprom_info(struct rnpgbe_hw *hw, int sfp_addr,
+				      int reg, int data_len, u8 *buf)
+{
+	int left = data_len;
+	int cnt, err;
+
+	do {
+		cnt = (left > MBX_SFP_READ_MAX_CNT) ? MBX_SFP_READ_MAX_CNT :
+						      left;
+		err = rnpgbe_mbx_sfp_read(hw, sfp_addr, reg, cnt, buf);
+		if (err) {
+			rnpgbe_err("%s: error:%d\n", __func__, err);
+			return err;
+		}
+		reg += cnt;
+		buf += cnt;
+		left -= cnt;
+	} while (left > 0);
+
+	return 0;
+}
+
+int rnpgbe_mbx_sfp_write(struct rnpgbe_hw *hw, int sfp_addr, int reg, short v)
+{
+	struct mbx_fw_cmd_req req;
+	int err;
+	int nr_lane = hw->nr_lane;
+
+	memset(&req, 0, sizeof(req));
+
+	build_mbx_sfp_write(&req, nr_lane, sfp_addr, reg, v);
+
+	err = rnpgbe_mbx_write_posted_locked(hw, &req);
+	return err;
+}
+
+int rnpgbe_mbx_fw_reg_read(struct rnpgbe_hw *hw, int fw_reg)
+{
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+	int err, ret = 0xffffffff;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	if (hw->fw_version < 0x00050200)
+		return -EOPNOTSUPP;
+
+	if (hw->mbx.other_irq_enabled) {
+		struct mbx_req_cookie *cookie =
+			mbx_cookie_zalloc(sizeof(reply.r_reg));
+
+		build_readreg_req(&req, fw_reg, cookie);
+
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+		if (err) {
+			kfree(cookie);
+			return ret;
+		}
+		ret = ((int *)(cookie->priv))[0];
+	} else {
+		build_readreg_req(&req, fw_reg, &reply);
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		if (err) {
+			rnpgbe_err("%s: faild. err:%d\n", __func__, err);
+			return err;
+		}
+		ret = reply.r_reg.value[0];
+	}
+	return ret;
+}
+
+int rnpgbe_mbx_reg_write(struct rnpgbe_hw *hw, int fw_reg, int value)
+{
+	struct mbx_fw_cmd_req req;
+	int err;
+
+	memset(&req, 0, sizeof(req));
+	if (hw->fw_version < 0x00050200)
+		return -EOPNOTSUPP;
+
+	build_writereg_req(&req, NULL, fw_reg, 4, &value);
+
+	err = rnpgbe_mbx_write_posted_locked(hw, &req);
+	return err;
+}
+
+int rnpgbe_mbx_wol_set(struct rnpgbe_hw *hw, u32 mode)
+{
+	struct mbx_fw_cmd_req req;
+	int err;
+	int nr_lane = hw->nr_lane;
+
+	memset(&req, 0, sizeof(req));
+
+	build_mbx_wol_set(&req, nr_lane, mode);
+
+	err = rnpgbe_mbx_write_posted_locked(hw, &req);
+	return err;
+}
+
+int rnpgbe_mbx_gephy_test_set(struct rnpgbe_hw *hw, u32 mode)
+{
+	struct mbx_fw_cmd_req req;
+	int err;
+	int nr_lane = hw->nr_lane;
+
+	memset(&req, 0, sizeof(req));
+
+	build_mbx_gephy_test_set(&req, nr_lane, mode);
+
+	err = rnpgbe_mbx_write_posted_locked(hw, &req);
+	return err;
+}
+
+int rnpgbe_mbx_lldp_set(struct rnpgbe_hw *hw, u32 enable)
+{
+	struct mbx_fw_cmd_req req;
+	int err;
+	int nr_lane = hw->nr_lane;
+
+	memset(&req, 0, sizeof(req));
+
+	build_mbx_lldp_set(&req, nr_lane, enable);
+
+	err = rnpgbe_mbx_write_posted_locked(hw, &req);
+
+	return err;
+}
+
+int rnpgbe_mbx_lldp_get(struct rnpgbe_hw *hw)
+{
+	int err;
+	struct mbx_req_cookie *cookie = NULL;
+	struct mbx_fw_cmd_reply reply;
+	struct mbx_fw_cmd_req req;
+	struct get_lldp_reply *get_lldp;
+
+	cookie = mbx_cookie_zalloc(sizeof(*get_lldp));
+	if (!cookie)
+		return -ENOMEM;
+	get_lldp = (struct get_lldp_reply *)cookie->priv;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	build_get_lldp_req(&req, cookie, hw->nr_lane);
+
+	if (hw->mbx.other_irq_enabled) {
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+	} else {
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		get_lldp = &reply.get_lldp;
+	}
+
+	if (err == 0) {
+		hw->lldp_status.enable = get_lldp->value;
+		hw->lldp_status.inteval = get_lldp->inteval;
+	}
+
+	if (cookie)
+		kfree(cookie);
+
+	return err ? -err : 0;
+}
+
+int rnpgbe_mbx_set_dump(struct rnpgbe_hw *hw, int flag)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+
+	memset(&req, 0, sizeof(req));
+	build_set_dump(&req, hw->nr_lane, flag);
+
+	err = rnpgbe_mbx_write_posted_locked(hw, &req);
+
+	return err;
+}
+
+/*
+ * @speed :
+ * 0 : disable force speed
+ * 1000 : force 1000Mbps
+ * 10000 : force 10000Mbps
+ */
+int rnpgbe_mbx_force_speed(struct rnpgbe_hw *hw, int speed)
+{
+	int cmd = 0x01150000;
+
+	if (hw->force_10g_1g_speed_ablity == 0)
+		return -EINVAL;
+
+	if (speed == RNP_LINK_SPEED_10GB_FULL) {
+		cmd = 0x01150002;
+		hw->force_speed_stat = FORCE_SPEED_STAT_10G;
+	} else if (speed == RNP_LINK_SPEED_1GB_FULL) {
+		cmd = 0x01150001;
+		hw->force_speed_stat = FORCE_SPEED_STAT_1G;
+	} else {
+		cmd = 0x01150000;
+		hw->force_speed_stat = FORCE_SPEED_STAT_DISABLED;
+	}
+	return rnpgbe_mbx_set_dump(hw, cmd);
+}
+
+int rnpgbe_mbx_get_dump_flags(struct rnpgbe_hw *hw)
+{
+	int err;
+	struct mbx_req_cookie *cookie = NULL;
+	struct mbx_fw_cmd_reply reply;
+	struct mbx_fw_cmd_req req;
+	struct get_dump_reply *get_dump;
+
+	cookie = mbx_cookie_zalloc(sizeof(*get_dump));
+	if (!cookie)
+		return -ENOMEM;
+	get_dump = (struct get_dump_reply *)cookie->priv;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	build_get_dump_req(&req, cookie, hw->nr_lane, 0, 0, 0);
+
+	if (hw->mbx.other_irq_enabled) {
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+	} else {
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		get_dump = &reply.get_dump;
+	}
+
+	if (err == 0) {
+		hw->dump.version = get_dump->version;
+		hw->dump.flag = get_dump->flags;
+		hw->dump.len = get_dump->bytes;
+	}
+	if (cookie)
+		kfree(cookie);
+
+	return err ? -err : 0;
+}
+
+int rnpgbe_mbx_get_dump(struct rnpgbe_hw *hw, int flags, u32 *data_out,
+			int bytes)
+{
+	int err;
+	struct mbx_req_cookie *cookie = NULL;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	struct mbx_fw_cmd_reply reply;
+	struct mbx_fw_cmd_req req;
+	struct get_dump_reply *get_dump;
+	int ram_size = mbx->share_size;
+	int i, offset = 0;
+
+	cookie = mbx_cookie_zalloc(sizeof(*get_dump));
+	if (!cookie)
+		return -ENOMEM;
+
+	get_dump = (struct get_dump_reply *)cookie->priv;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	do {
+		build_get_dump_req(&req, cookie, hw->nr_lane, offset, 0,
+				   ram_size);
+
+		if (hw->mbx.other_irq_enabled) {
+			err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+		} else {
+			err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+			get_dump = &reply.get_dump;
+		}
+
+		if (err == 0 && data_out) {
+			int len = ram_size;
+
+			if ((bytes - offset) < ram_size)
+				len = bytes - offset;
+
+			for (i = 0; i < len; i = i + 4)
+				*(data_out + offset / 4 + i / 4) =
+					rnpgbe_rd_reg(hw->hw_addr +
+						      mbx->cpu_vf_share_ram +
+						      i);
+		}
+
+		offset += ram_size;
+
+	} while (offset < bytes);
+
+	if (cookie)
+		kfree(cookie);
+
+	return err ? -err : 0;
+}
+
+int rnp500_fw_update(struct rnpgbe_hw *hw, int partition, const u8 *fw_bin,
+		     int bytes)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	int err = 0;
+	int offset = 0, ram_size = mbx->share_size;
+	struct mbx_req_cookie *cookie = NULL;
+
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	int i;
+	u32 *msg = (u32 *)fw_bin;
+
+	cookie = mbx_cookie_zalloc(0);
+	if (!cookie) {
+		dev_err(&hw->pdev->dev, "%s: no memory:%d!", __func__, 0);
+		return -ENOMEM;
+	}
+
+	/* if bytes more than ram_size, we update header at last */
+	if (bytes > ram_size) {
+		offset += ram_size;
+
+		/* todo */
+		// n210 should clean header first
+		if ((hw->hw_type == rnpgbe_hw_n210) ||
+		    (hw->hw_type == rnpgbe_hw_n210L)) {
+			//offset = 0;
+			memset(&req, 0, sizeof(req));
+			memset(&reply, 0, sizeof(reply));
+
+			for (i = 0; i < ram_size; i = i + 4)
+				rnpgbe_wr_reg(hw->hw_addr + mbx->cpu_vf_share_ram + i,
+						0xffffffff);
+
+			build_fw_update_n500_req(&req, cookie, partition, 0);
+			if (hw->mbx.other_irq_enabled) {
+				err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+			} else {
+				int old_mbx_timeout = hw->mbx.timeout;
+
+				hw->mbx.timeout = (20 * 1000 * 1000) /
+					hw->mbx.usec_delay; // wait 20s
+				err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+				hw->mbx.timeout = old_mbx_timeout;
+			}
+		}
+	}
+
+	while (offset < bytes) {
+		memset(&req, 0, sizeof(req));
+		memset(&reply, 0, sizeof(reply));
+
+		for (i = 0; i < ram_size; i = i + 4)
+			rnpgbe_wr_reg(hw->hw_addr + mbx->cpu_vf_share_ram + i,
+				      *(msg + offset / 4 + i / 4));
+
+		build_fw_update_n500_req(&req, cookie, partition, offset);
+		if (hw->mbx.other_irq_enabled) {
+			err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+		} else {
+			int old_mbx_timeout = hw->mbx.timeout;
+
+			hw->mbx.timeout = (20 * 1000 * 1000) /
+					  hw->mbx.usec_delay; // wait 20s
+			err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+			hw->mbx.timeout = old_mbx_timeout;
+		}
+
+		if (err)
+			goto out;
+		offset += ram_size;
+	}
+	// we write header at last
+	if (bytes > ram_size) {
+		offset = 0;
+		memset(&req, 0, sizeof(req));
+		memset(&reply, 0, sizeof(reply));
+
+		for (i = 0; i < ram_size; i = i + 4)
+			rnpgbe_wr_reg(hw->hw_addr + mbx->cpu_vf_share_ram + i,
+				      *(msg + offset / 4 + i / 4));
+
+		build_fw_update_n500_req(&req, cookie, partition, offset);
+		if (hw->mbx.other_irq_enabled) {
+			err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+		} else {
+			int old_mbx_timeout = hw->mbx.timeout;
+
+			hw->mbx.timeout = (20 * 1000 * 1000) /
+					  hw->mbx.usec_delay; // wait 20s
+			err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+			hw->mbx.timeout = old_mbx_timeout;
+		}
+	}
+out:
+	return err ? -err : 0;
+}
+
+int rnpgbe_fw_update(struct rnpgbe_hw *hw, int partition, const u8 *fw_bin,
+		     int bytes)
+{
+	int err;
+	struct mbx_req_cookie *cookie = NULL;
+
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	void *dma_buf = NULL;
+	dma_addr_t dma_phy;
+
+	cookie = mbx_cookie_zalloc(0);
+	if (!cookie) {
+		dev_err(&hw->pdev->dev, "%s: no memory:%d!", __func__, 0);
+		return -ENOMEM;
+	}
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	dma_buf =
+		dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy, GFP_ATOMIC);
+	if (!dma_buf) {
+		//dev_err(&hw->pdev->dev, "%s: falied:%d!", __func__, bytes);
+		err = -ENOMEM;
+		goto quit;
+	}
+
+	memcpy(dma_buf, fw_bin, bytes);
+
+	build_fw_update_req(&req, cookie, partition, dma_phy & 0xffffffff,
+			    (dma_phy >> 32) & 0xffffffff, bytes);
+	if (hw->mbx.other_irq_enabled) {
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+	} else {
+		int old_mbx_timeout = hw->mbx.timeout;
+
+		hw->mbx.timeout = (20 * 1000 * 1000) / hw->mbx.usec_delay;
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		hw->mbx.timeout = old_mbx_timeout;
+	}
+
+quit:
+	if (dma_buf)
+		dma_free_coherent(&hw->pdev->dev, bytes, dma_buf, dma_phy);
+
+	if (cookie)
+		kfree(cookie);
+
+	printk(KERN_DEBUG "%s: %s (errcode:%d)\n", __func__,
+	       err ? " failed" : " success", err);
+	return (err) ? -EIO : 0;
+}
+
+int rnpgbe_mbx_link_event_enable(struct rnpgbe_hw *hw, int enable)
+{
+	struct mbx_fw_cmd_reply reply;
+	struct mbx_fw_cmd_req req;
+	int err;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	if (enable)
+		wr32(hw, RNP_DMA_DUMY, 0xa0000000);
+
+	build_link_set_event_mask(&req, BIT(EVT_LINK_UP),
+				  (enable & 1) << EVT_LINK_UP, &req);
+
+	err = rnpgbe_mbx_write_posted_locked(hw, &req);
+	if (!enable)
+		wr32(hw, RNP_DMA_DUMY, 0);
+
+	return err;
+}
+
+int rnpgbe_fw_get_capablity(struct rnpgbe_hw *hw, struct phy_abilities *abil)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+	build_phy_abalities_req(&req, &req);
+	err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+
+	if (err == 0)
+		memcpy(abil, &reply.phy_abilities, sizeof(*abil));
+
+	return err;
+}
+
+static int to_mac_type(struct phy_abilities *ability)
+{
+	int lanes = hweight_long(ability->lane_mask);
+
+	if ((ability->phy_type == PHY_TYPE_40G_BASE_KR4) ||
+	    (ability->phy_type == PHY_TYPE_40G_BASE_LR4) ||
+	    (ability->phy_type == PHY_TYPE_40G_BASE_CR4) ||
+	    (ability->phy_type == PHY_TYPE_40G_BASE_SR4)) {
+		if (lanes == 1)
+			return rnpgbe_mac_n10g_x8_40G;
+		else
+			return rnpgbe_mac_n10g_x8_10G;
+
+	} else if ((ability->phy_type == PHY_TYPE_10G_BASE_KR) ||
+		   (ability->phy_type == PHY_TYPE_10G_BASE_LR) ||
+		   (ability->phy_type == PHY_TYPE_10G_BASE_ER) ||
+		   (ability->phy_type == PHY_TYPE_10G_BASE_SR)) {
+		if (lanes == 1)
+			return rnpgbe_mac_n10g_x2_10G;
+		else if (lanes == 2)
+			return rnpgbe_mac_n10g_x4_10G;
+		else
+			return rnpgbe_mac_n10g_x8_10G;
+
+	} else if (ability->phy_type == PHY_TYPE_1G_BASE_KX) {
+		return rnpgbe_mac_n10l_x8_1G;
+
+	} else if (ability->phy_type == PHY_TYPE_SGMII) {
+		return rnpgbe_mac_n10l_x8_1G;
+	}
+	return rnpgbe_mac_unknown;
+}
+
+int rnpgbe_set_lane_fun(struct rnpgbe_hw *hw, int fun, int value0, int value1,
+			int value2, int value3)
+{
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+	build_set_lane_fun(&req, hw->nr_lane, fun, value0, value1, value2,
+			   value3);
+
+	return rnpgbe_mbx_write_posted_locked(hw, &req);
+}
+
+int rnpgbe_mbx_ifinsmod(struct rnpgbe_hw *hw, int status)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	build_ifinsmod(&req, hw->driver_version, status);
+
+	if (mutex_lock_interruptible(&hw->mbx.lock))
+		return -EAGAIN;
+
+	if (status) {
+		err = hw->mbx.ops.write_posted(
+			hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4,
+			MBX_FW);
+	} else {
+		err = hw->mbx.ops.write(hw, (u32 *)&req,
+					(req.datalen + MBX_REQ_HDR_LEN) / 4,
+					MBX_FW);
+	}
+
+	mutex_unlock(&hw->mbx.lock);
+
+	rnpgbe_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d status:%d\n", __func__,
+		    hw->nr_lane, status);
+
+	return err;
+}
+
+int rnpgbe_mbx_ifsuspuse(struct rnpgbe_hw *hw, int status)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	build_ifsuspuse(&req, hw->nr_lane, status);
+
+	if (mutex_lock_interruptible(&hw->mbx.lock))
+		return -EAGAIN;
+
+	err = hw->mbx.ops.write_posted(
+		hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+
+	mutex_unlock(&hw->mbx.lock);
+
+	rnpgbe_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d status:%d\n", __func__,
+		    hw->nr_lane, status);
+
+	return err;
+}
+
+int rnpgbe_mbx_ifforce_control_mac(struct rnpgbe_hw *hw, int status)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	build_ifforce(&req, hw->nr_lane, status);
+
+	if (mutex_lock_interruptible(&hw->mbx.lock))
+		return -EAGAIN;
+
+	err = hw->mbx.ops.write_posted(
+		hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+
+	mutex_unlock(&hw->mbx.lock);
+
+	rnpgbe_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d status:%d\n", __func__,
+		    hw->nr_lane, status);
+
+	return err;
+}
+
+int rnpgbe_mbx_tstamps_show(struct rnpgbe_hw *hw, u32 sec, u32 nanosec)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	build_tstamp_show(&req, sec, nanosec);
+
+	if (mutex_lock_interruptible(&hw->mbx.lock))
+		return -EAGAIN;
+
+	err = hw->mbx.ops.write_posted(
+		hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+
+	mutex_unlock(&hw->mbx.lock);
+
+	return err;
+}
+
+int rnpgbe_mbx_ifup_down(struct rnpgbe_hw *hw, int up)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	build_ifup_down(&req, hw->nr_lane, up);
+
+	if (mutex_lock_interruptible(&hw->mbx.lock))
+		return -EAGAIN;
+
+	err = hw->mbx.ops.write_posted(
+		hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+
+	mutex_unlock(&hw->mbx.lock);
+
+	rnpgbe_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d up:%d\n", __func__,
+		    hw->nr_lane, up);
+
+	if (up)
+		rnpgbe_link_stat_mark_reset(hw);
+
+	return err;
+}
+
+int rnpgbe_mbx_led_set(struct rnpgbe_hw *hw, int value)
+{
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	build_led_set(&req, hw->nr_lane, value, &reply);
+
+	return rnpgbe_mbx_write_posted_locked(hw, &req);
+}
+
+int rnpgbe_mbx_get_eee_capability(struct rnpgbe_hw *hw,
+				  struct rnpgbe_eee_cap *eee_cap)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	build_phy_eee_abalities_req(&req, &req);
+
+	err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+
+	if (err == 0) {
+		memcpy(eee_cap, &reply.phy_eee_abilities, sizeof(*eee_cap));
+		return 0;
+	}
+
+	return -EIO;
+}
+
+int rnpgbe_mbx_phy_eee_set(struct rnpgbe_hw *hw, u32 tx_lpi_timer,
+			   u32 local_eee)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+
+	memset(&req, 0, sizeof(req));
+
+	build_phy_eee_set(&req, local_eee, tx_lpi_timer, hw->nr_lane);
+
+	if (mutex_lock_interruptible(&hw->mbx.lock))
+		return -EAGAIN;
+
+	err = hw->mbx.ops.write_posted(
+		hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+
+	mutex_unlock(&hw->mbx.lock);
+
+	return -EIO;
+}
+
+int rnpgbe_mbx_get_capability(struct rnpgbe_hw *hw, struct rnpgbe_info *info)
+{
+	int err;
+	struct phy_abilities ablity;
+	int try_cnt = 3;
+
+	memset(&ablity, 0, sizeof(ablity));
+	rnpgbe_link_stat_mark_disable(hw);
+
+	while (try_cnt--) {
+		err = rnpgbe_fw_get_capablity(hw, &ablity);
+		if (err == 0 && info) {
+			hw->lane_mask = ablity.lane_mask & 0xf;
+			info->mac = to_mac_type(&ablity);
+			info->adapter_cnt = hweight_long(hw->lane_mask);
+			hw->sfc_boot = (ablity.nic_mode & 0x1) ? 1 : 0;
+			hw->pxe_en = (ablity.nic_mode & 0x2) ? 1 : 0;
+			hw->ncsi_en = (ablity.nic_mode & 0x4) ? 1 : 0;
+			hw->pfvfnum = ablity.pfnum;
+			hw->speed = ablity.speed;
+			hw->nr_lane = 0; // PF1
+			hw->fw_version = ablity.fw_version;
+			hw->mac_type = info->mac;
+			hw->phy_type = ablity.phy_type;
+			hw->axi_mhz = ablity.axi_mhz;
+			hw->port_ids = ablity.port_ids;
+			hw->bd_uid = ablity.bd_uid;
+			hw->phy_id = ablity.phy_id;
+			//hw->wol = ablity.wol_status;
+
+			if ((hw->fw_version >= 0x00050201) &&
+			    (ablity.speed == SPEED_10000)) {
+				hw->force_speed_stat =
+					FORCE_SPEED_STAT_DISABLED;
+				hw->force_10g_1g_speed_ablity = 1;
+			}
+			if (hw->fw_version >= 0x0001012C) {
+				/* this version can get wol_en from hw */
+				hw->wol = ablity.wol_status & 0xff;
+				hw->wol_en = ablity.wol_status & 0x100;
+			} else {
+				/* other version only pf0 or ncsi can wol */
+				hw->wol = ablity.wol_status & 0xff;
+				if ((hw->ncsi_en) || (!ablity.pfnum))
+					hw->wol_en = 1;
+			}
+			/* 0.1.5.0 can get force status from fw */
+			if (hw->fw_version >= 0x00010500) {
+				hw->force_en = ablity.e.force_down_en;
+				hw->force_cap = 1;
+			}
+
+			/* 0.1.6.0 can get trim valid from hw */
+			if (hw->fw_version >= 0x00010600)
+				hw->trim_valid = (ablity.nic_mode & 0x8) ? 1 : 0;
+
+			pr_info("%s: nic-mode:%d mac:%d adpt_cnt:%d lane_mask:0x%x, phy_type: "
+				"0x%x, "
+				"pfvfnum:0x%x, fw-version:0x%08x\n, axi:%d Mhz,"
+				"port_id:%d bd_uid:0x%08x 0x%x ex-ablity:0x%x fs:%d speed:%d\n",
+				__func__, hw->mode, info->mac,
+				info->adapter_cnt, hw->lane_mask, hw->phy_type,
+				hw->pfvfnum, ablity.fw_version, ablity.axi_mhz,
+				ablity.port_id[0], hw->bd_uid, ablity.phy_id,
+				ablity.ext_ablity,
+				hw->force_10g_1g_speed_ablity, ablity.speed);
+			if (info->adapter_cnt != 0)
+				return 0;
+		}
+	}
+
+	dev_err(&hw->pdev->dev, "%s: error!\n", __func__);
+	return -EIO;
+}
+
+int rnpgbe_mbx_get_temp(struct rnpgbe_hw *hw, int *voltage)
+{
+	int err;
+	struct mbx_req_cookie *cookie = NULL;
+	struct mbx_fw_cmd_reply reply;
+	struct mbx_fw_cmd_req req;
+	struct get_temp *temp;
+	int temp_v = 0;
+
+	cookie = mbx_cookie_zalloc(sizeof(*temp));
+	if (!cookie)
+		return -ENOMEM;
+	temp = (struct get_temp *)cookie->priv;
+
+	memset(&req, 0, sizeof(req));
+
+	build_get_temp(&req, cookie);
+
+	if (hw->mbx.other_irq_enabled) {
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+	} else {
+		memset(&reply, 0, sizeof(reply));
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		temp = &reply.get_temp;
+	}
+
+	if (voltage)
+		*voltage = temp->volatage;
+	temp_v = temp->temp;
+
+	if (cookie)
+		kfree(cookie);
+	return temp_v;
+}
+
+enum speed_enum {
+	speed_10,
+	speed_100,
+	speed_1000,
+	speed_10000,
+	speed_25000,
+	speed_40000,
+
+};
+
+static void rnpgbe_link_stat_mark(struct rnpgbe_hw *hw, int up)
+{
+	u32 v;
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+
+	v = rd32(hw, RNP_DMA_DUMY);
+	v &= ~(0x0f000f11);
+	v |= 0xa0000000;
+	if (up) {
+		v |= BIT(0);
+		switch (hw->speed) {
+		case 10:
+			v |= (speed_10 << 8);
+			break;
+		case 100:
+			v |= (speed_100 << 8);
+			break;
+		case 1000:
+			v |= (speed_1000 << 8);
+			break;
+		case 10000:
+			v |= (speed_10000 << 8);
+			break;
+		case 25000:
+			v |= (speed_25000 << 8);
+			break;
+		case 40000:
+			v |= (speed_40000 << 8);
+			break;
+		}
+		v |= (hw->duplex << 4);
+		v |= (hw->fc.current_mode << 24);
+	} else {
+		v &= ~BIT(0);
+	}
+	/* we shoul update lldp_status */
+	if (hw->fw_version >= 0x00010500) {
+		if (adapter->priv_flags & RNP_PRIV_FLAG_LLDP)
+			v |= BIT(6);
+		else
+			v &= (~BIT(6));
+	}
+	wr32(hw, RNP_DMA_DUMY, v);
+}
+
+static inline int rnpgbe_mbx_fw_req_handler(struct rnpgbe_adapter *adapter,
+		struct mbx_fw_cmd_req *req)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	switch (req->opcode) {
+		case LINK_STATUS_EVENT:
+			rnpgbe_logd(
+					LOG_LINK_EVENT,
+					"[LINK_STATUS_EVENT:0x%x] %s:link changed: changed_lane:0x%x, "
+					"status:0x%x, speed:%d, duplex:%d\n",
+					req->opcode, adapter->name,
+			req->link_stat.changed_lanes,
+			req->link_stat.lane_status, req->link_stat.st[0].speed,
+			req->link_stat.st[0].duplex);
+
+		if (req->link_stat.lane_status)
+			adapter->hw.link = 1;
+		else
+			adapter->hw.link = 0;
+
+		if ((hw->hw_type == rnpgbe_hw_n500) ||
+		    (hw->hw_type == rnpgbe_hw_n210) ||
+		    (hw->hw_type == rnpgbe_hw_n210L)) {
+			adapter->local_eee = req->link_stat.st[0].local_eee;
+			adapter->partner_eee = req->link_stat.st[0].partner_eee;
+			/* fw_version more than 0.1.5.0 can up lldp_status */
+			if (hw->fw_version >= 0x00010500) {
+				if (req->link_stat.st[0].lldp_status)
+					adapter->priv_flags |=  RNP_PRIV_FLAG_LLDP;
+				else
+					adapter->priv_flags &= (~RNP_PRIV_FLAG_LLDP);
+			}
+		}
+
+		if (req->link_stat.port_st_magic == SPEED_VALID_MAGIC) {
+			hw->speed = req->link_stat.st[0].speed;
+			hw->duplex = req->link_stat.st[0].duplex;
+			/* n500 can update pause and tp */
+			if ((hw->hw_type == rnpgbe_hw_n500) ||
+			    (hw->hw_type == rnpgbe_hw_n210) ||
+			    (hw->hw_type == rnpgbe_hw_n210L)) {
+				hw->fc.current_mode =
+					req->link_stat.st[0].pause;
+				hw->tp_mdx = req->link_stat.st[0].tp_mdx;
+			}
+
+			switch (hw->speed) {
+			case 10:
+				adapter->speed = RNP_LINK_SPEED_10_FULL;
+				break;
+			case 100:
+				adapter->speed = RNP_LINK_SPEED_100_FULL;
+				break;
+			case 1000:
+				adapter->speed = RNP_LINK_SPEED_1GB_FULL;
+				break;
+			case 10000:
+				adapter->speed = RNP_LINK_SPEED_10GB_FULL;
+				break;
+			case 25000:
+				adapter->speed = RNP_LINK_SPEED_25GB_FULL;
+				break;
+			case 40000:
+				adapter->speed = RNP_LINK_SPEED_40GB_FULL;
+				break;
+			}
+		}
+		if (req->link_stat.lane_status)
+			rnpgbe_link_stat_mark(hw, 1);
+		else
+			rnpgbe_link_stat_mark(hw, 0);
+
+		adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE;
+		break;
+	}
+	rnpgbe_service_event_schedule(adapter);
+
+	return 0;
+}
+
+static inline int rnpgbe_mbx_fw_reply_handler(struct rnpgbe_adapter *adapter,
+					      struct mbx_fw_cmd_reply *reply)
+{
+	struct mbx_req_cookie *cookie;
+
+	cookie = reply->cookie;
+	if (!cookie || cookie->magic != COOKIE_MAGIC)
+		return -EIO;
+
+	if (cookie->priv_len > 0)
+		memcpy(cookie->priv, reply->data, cookie->priv_len);
+
+	cookie->done = 1;
+
+	if (reply->flags & FLAGS_ERR)
+		cookie->errcode = reply->error_code;
+	else
+		cookie->errcode = 0;
+
+	wake_up_interruptible(&cookie->wait);
+
+	return 0;
+}
+
+static inline int rnpgbe_rcv_msg_from_fw(struct rnpgbe_adapter *adapter)
+{
+	u32 msgbuf[RNP_FW_MAILBOX_SIZE];
+	struct rnpgbe_hw *hw = &adapter->hw;
+	s32 retval;
+
+	retval = rnpgbe_read_mbx(hw, msgbuf, RNP_FW_MAILBOX_SIZE, MBX_FW);
+	if (retval) {
+		printk(KERN_DEBUG "Error receiving message from FW:%d\n",
+		       retval);
+		return retval;
+	}
+
+	rnpgbe_logd(LOG_MBX_MSG_IN,
+		    "msg from fw: msg[0]=0x%08x_0x%08x_0x%08x_0x%08x\n",
+		    msgbuf[0], msgbuf[1], msgbuf[2], msgbuf[3]);
+
+	/* this is a message we already processed, do nothing */
+	if (((unsigned short *)msgbuf)[0] & FLAGS_DD) {
+		return rnpgbe_mbx_fw_reply_handler(
+			adapter, (struct mbx_fw_cmd_reply *)msgbuf);
+	} else {
+		return rnpgbe_mbx_fw_req_handler(
+			adapter, (struct mbx_fw_cmd_req *)msgbuf);
+	}
+}
+
+static void rnpgbe_rcv_ack_from_fw(struct rnpgbe_adapter *adapter)
+{
+	/* do-nothing */
+}
+
+int rnpgbe_fw_msg_handler(struct rnpgbe_adapter *adapter)
+{
+	/* == check fw-req */
+	if (!rnpgbe_check_for_msg(&adapter->hw, MBX_FW)) {
+		/* printk("check msg from fw\n"); */
+		rnpgbe_rcv_msg_from_fw(adapter);
+	}
+
+	/* process any acks */
+	if (!rnpgbe_check_for_ack(&adapter->hw, MBX_FW))
+		rnpgbe_rcv_ack_from_fw(adapter);
+
+	return 0;
+}
+
+int rnpgbe_mbx_phy_link_set(struct rnpgbe_hw *hw, int adv, int autoneg,
+			    int speed, int duplex, int mdix_ctrl)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+
+	memset(&req, 0, sizeof(req));
+
+	build_phy_link_set(&req, adv, hw->nr_lane, autoneg, speed, duplex,
+			   mdix_ctrl);
+
+	if (mutex_lock_interruptible(&hw->mbx.lock))
+		return -EAGAIN;
+	err = hw->mbx.ops.write_posted(
+		hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+
+	mutex_unlock(&hw->mbx.lock);
+
+	return err;
+}
+
+int rnpgbe_mbx_phy_pause_set(struct rnpgbe_hw *hw, u32 pause_mode)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+
+	memset(&req, 0, sizeof(req));
+
+	build_phy_pause_set(&req, pause_mode, hw->nr_lane);
+
+	if (mutex_lock_interruptible(&hw->mbx.lock))
+		return -EAGAIN;
+	err = hw->mbx.ops.write_posted(
+		hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+
+	mutex_unlock(&hw->mbx.lock);
+
+	return err;
+}
+
+int rnpgbe_mbx_phy_pause_get(struct rnpgbe_hw *hw, u32 *pause_mode)
+{
+	struct mbx_fw_cmd_req req;
+	int err = -EIO;
+	struct mbx_req_cookie *cookie = NULL;
+	struct phy_pause_data *st;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+
+	if (hw->mbx.other_irq_enabled) {
+		cookie = mbx_cookie_zalloc(sizeof(struct lane_stat_data));
+
+		if (!cookie) {
+			rnpgbe_err("%s: no memory\n", __func__);
+			return -ENOMEM;
+		}
+
+		st = (struct phy_pause_data *)cookie->priv;
+		build_get_phy_pause_req(&req, hw->nr_lane, cookie);
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+		if (err) {
+			rnpgbe_err("%s: error:%d\n", __func__, err);
+			goto quit;
+		}
+	} else {
+		memset(&reply, 0, sizeof(reply));
+
+		build_get_phy_pause_req(&req, hw->nr_lane, &req);
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		if (err) {
+			rnpgbe_err("%s: 1 error:%d\n", __func__, err);
+			goto quit;
+		}
+		st = (struct phy_pause_data *)&(reply.data);
+	}
+
+	*pause_mode = st->pause_mode;
+quit:
+	if (cookie)
+		kfree(cookie);
+	return err;
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h
new file mode 100755
index 0000000000000000000000000000000000000000..2303f385fea3af75c90d112ff8e607333734de0a
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h
@@ -0,0 +1,1238 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBE_MBX_FW_H
+#define _RNPGBE_MBX_FW_H
+
+#include 
+#include 
+#include 
+
+#ifndef _PACKED_ALIGN4
+#define _PACKED_ALIGN4 __attribute__((packed, aligned(4)))
+#endif
+
+struct mbx_fw_cmd_reply;
+typedef void (*cookie_cb)(struct mbx_fw_cmd_reply *reply, void *priv);
+
+struct mbx_req_cookie {
+	int magic;
+#define COOKIE_MAGIC 0xCE
+	cookie_cb cb;
+	int timeout_jiffes;
+	int errcode;
+
+	wait_queue_head_t wait;
+
+	int done;
+	int priv_len;
+	char priv[64];
+};
+
+enum GENERIC_CMD {
+	/* generat */
+	GET_VERSION = 0x0001,
+	READ_REG = 0xFF03,
+	WRITE_REG = 0xFF04,
+	MODIFY_REG = 0xFF07,
+
+	/* virtualization */
+	IFUP_DOWN = 0x0800,
+	SEND_TO_PF = 0x0801,
+	SEND_TO_VF = 0x0802,
+	DRIVER_INSMOD = 0x0803,
+	SYSTEM_SUSPUSE = 0x0804,
+	SYSTEM_FORCE = 0x0805,
+
+	/* link configuration admin commands */
+	GET_PHY_ABALITY = 0x0601,
+	GET_MAC_ADDRES = 0x0602,
+	RESET_PHY = 0x0603,
+	LED_SET = 0x0604,
+	GET_LINK_STATUS = 0x0607,
+	LINK_STATUS_EVENT = 0x0608,
+	SET_LANE_FUN = 0x0609,
+	GET_LANE_STATUS = 0x0610,
+	SFP_SPEED_CHANGED_EVENT = 0x0611,
+	SET_EVENT_MASK = 0x0613,
+	SET_LOOPBACK_MODE = 0x0618,
+	SET_PHY_REG = 0x0628,
+	GET_PHY_REG = 0x0629,
+	PHY_LINK_SET = 0x0630,
+	GET_PHY_STATISTICS = 0x0631,
+	PHY_PAUSE_SET = 0x0632,
+	PHY_PAUSE_GET = 0x0633,
+	PHY_EEE_SET = 0x0636,
+	PHY_EEE_GET = 0x0637,
+
+	/*sfp-module*/
+	SFP_MODULE_READ = 0x0900,
+	SFP_MODULE_WRITE = 0x0901,
+
+	/* fw update */
+	FW_UPDATE = 0x0700,
+	FW_MAINTAIN = 0x0701,
+	FW_UPDATE_N500 = 0x0702,
+	WOL_EN = 0x0910,
+	GET_DUMP = 0x0a00,
+	SET_DUMP = 0x0a10,
+	GET_TEMP = 0x0a11,
+	SET_WOL = 0x0a12,
+	SET_TEST_MODE = 0x0a13,
+	SHOW_TX_STAMP = 0x0a14,
+	LLDP_TX_CTRL = 0x0a15,
+};
+
+enum link_event_mask {
+	EVT_LINK_UP = 1,
+	EVT_NO_MEDIA = 2,
+	EVT_LINK_FAULT = 3,
+	EVT_PHY_TEMP_ALARM = 4,
+	EVT_EXCESSIVE_ERRORS = 5,
+	EVT_SIGNAL_DETECT = 6,
+	EVT_AUTO_NEGOTIATION_DONE = 7,
+	EVT_MODULE_QUALIFICATION_FAILD = 8,
+	EVT_PORT_TX_SUSPEND = 9,
+};
+
+enum pma_type {
+	PHY_TYPE_NONE = 0,
+	PHY_TYPE_1G_BASE_KX,
+	PHY_TYPE_SGMII,
+	PHY_TYPE_10G_BASE_KR,
+	PHY_TYPE_25G_BASE_KR,
+	PHY_TYPE_40G_BASE_KR4,
+	PHY_TYPE_10G_BASE_SR,
+	PHY_TYPE_40G_BASE_SR4,
+	PHY_TYPE_40G_BASE_CR4,
+	PHY_TYPE_40G_BASE_LR4,
+	PHY_TYPE_10G_BASE_LR,
+	PHY_TYPE_10G_BASE_ER,
+};
+
+struct phy_abilities {
+	unsigned char link_stat;
+	unsigned char lane_mask;
+
+	int speed;
+	short phy_type;
+	short nic_mode;
+	short pfnum;
+	unsigned int fw_version;
+	unsigned int axi_mhz;
+	union {
+		unsigned char port_id[4];
+		unsigned int port_ids;
+	};
+	unsigned int bd_uid;
+	int phy_id;
+	int wol_status;
+
+	union {
+		int ext_ablity;
+		struct {
+			unsigned int valid : 1; /* 0 */
+			unsigned int wol_en : 1; /* 1 */
+			unsigned int pci_preset_runtime_en : 1; /* 2 */
+			unsigned int smbus_en : 1; /* 3 */
+			unsigned int ncsi_en : 1; /* 4 */
+			unsigned int rpu_en : 1; /* 5 */
+			unsigned int v2 : 1; /* 6 */
+			unsigned int pxe_en : 1; /* 7 */
+			unsigned int mctp_en : 1; /* 8 */
+			unsigned int yt8614 : 1; /* 9 */
+			unsigned int pci_ext_reset : 1; /* 10 */
+			unsigned int rpu_availble : 1; /* 11 */
+			unsigned int fw_lldp_ablity : 1; /* 12 */
+			unsigned int lldp_enabled : 1; /* 13 */
+			unsigned int only_1g : 1; /* 14 */
+			unsigned int force_down_en: 1;
+		} e;
+	};
+
+} _PACKED_ALIGN4;
+
+enum LOOPBACK_LEVEL {
+	LOOPBACK_DISABLE = 0,
+	LOOPBACK_MAC = 1,
+	LOOPBACK_PCS = 5,
+	LOOPBACK_EXTERNAL = 6,
+};
+enum LOOPBACK_TYPE {
+	/* Tx->Rx */
+	LOOPBACK_TYPE_LOCAL = 0x0,
+};
+
+enum LOOPBACK_FORCE_SPEED {
+	LOOPBACK_FORCE_SPEED_NONE = 0x0,
+	LOOPBACK_FORCE_SPEED_1GBS = 0x1,
+	LOOPBACK_FORCE_SPEED_10GBS = 0x2,
+	LOOPBACK_FORCE_SPEED_40_25GBS = 0x3,
+};
+
+enum PHY_INTERFACE {
+	PHY_INTERNAL_PHY = 0,
+	PHY_EXTERNAL_PHY_MDIO = 1,
+};
+
+/* Table 3-54.  Get link status response (opcode: 0x0607) */
+struct link_stat_data {
+	char phy_type;
+	unsigned char speed;
+#define LNK_STAT_SPEED_UNKOWN 0
+#define LNK_STAT_SPEED_10 1
+#define LNK_STAT_SPEED_100 2
+#define LNK_STAT_SPEED_1000 3
+#define LNK_STAT_SPEED_10000 4
+#define LNK_STAT_SPEED_25000 5
+#define LNK_STAT_SPEED_40000 6
+
+	/* 2 */
+	char link_stat : 1;
+#define LINK_UP 1
+#define LINK_DOWN 0
+
+	char link_fault : 4;
+#define LINK_LINK_FAULT BIT(0)
+#define LINK_TX_FAULT BIT(1)
+#define LINK_RX_FAULT BIT(2)
+#define LINK_REMOTE_FAULT BIT(3)
+
+	char extern_link_stat : 1;
+	char media_availble : 1;
+
+	char rev1 : 1;
+
+	/* 3:ignore */
+	char an_completed : 1;
+	char lp_an_ablity : 1;
+	char parallel_detection_fault : 1;
+	char fec_enabled : 1;
+	char low_power_state : 1;
+	char link_pause_status : 2;
+	char qualified_odule : 1;
+
+	/* 4 */
+	char phy_temp_alarm : 1;
+	char excessive_link_errors : 1;
+	char port_tx_suspended : 2;
+	char force_40G_enabled : 1;
+	char external_25G_phy_err_code : 3;
+#define EXTERNAL_25G_PHY_NOT_PRESENT 1
+#define EXTERNAL_25G_PHY_NVM_CRC_ERR 2
+#define EXTERNAL_25G_PHY_MDIO_ACCESS_FAILD 6
+#define EXTERNAL_25G_PHY_INIT_SUCCED 7
+
+	/* 5 */
+	char loopback_enabled_status : 4;
+#define LOOPBACK_DISABLE 0x0
+#define LOOPBACK_MAC 0x1
+#define LOOPBACK_SERDES 0x2
+#define LOOPBACK_PHY_INTERNAL 0x3
+#define LOOPBACK_PHY_EXTERNAL 0x4
+	char loopback_type_status : 1;
+#define LOCAL_LOOPBACK 0 /* tx->rx */
+#define FAR_END_LOOPBACK 0 /* rx->Tx */
+	char rev3 : 1;
+	char external_dev_power_ability : 2;
+	/* 6-7 */
+	short max_frame_sz;
+	/* 8 */
+	char _25gb_kr_fec_enabled : 1;
+	char _25gb_rs_fec_enabled : 1;
+	char crc_enabled : 1;
+	char rev4 : 5;
+	/* 9 */
+	int link_type; /* same as Phy type */
+	char link_type_ext;
+} _PACKED_ALIGN4;
+
+struct port_stat {
+	u8 phyid;
+
+	u8 duplex : 1;
+	u8 autoneg : 1;
+	u8 fec : 1;
+	u16 speed;
+	u16 pause : 4;
+	u16 local_eee : 3;
+	u16 partner_eee : 3;
+	u16 tp_mdx : 2;
+	u16 lldp_status : 1;
+	u16 revs : 3;
+} __attribute__((packed));
+
+struct phy_pause_data {
+	u32 pause_mode;
+} __attribute__((packed));
+
+struct lane_stat_data {
+	u8 nr_lane;
+	u8 pci_gen : 4;
+	u8 pci_lanes : 4;
+	u8 pma_type;
+	u8 phy_type;
+
+	u16 linkup : 1;
+	u16 duplex : 1;
+	u16 autoneg : 1;
+	u16 fec : 1;
+	u16 an : 1;
+	u16 link_traing : 1;
+	u16 media_availble : 1; //
+	u16 is_sgmii : 1; //
+	u16 link_fault : 4;
+#define LINK_LINK_FAULT BIT(0)
+#define LINK_TX_FAULT BIT(1)
+#define LINK_RX_FAULT BIT(2)
+#define LINK_REMOTE_FAULT BIT(3)
+	u16 is_backplane : 1;
+	u16 tp_mdx : 2;
+
+	union {
+		u8 phy_addr;
+		struct {
+			u8 mod_abs : 1;
+			u8 fault : 1;
+			u8 tx_dis : 1;
+			u8 los : 1;
+		} sfp;
+	};
+	u8 sfp_connector;
+	u32 speed;
+
+	u32 si_main;
+	u32 si_pre;
+	u32 si_post;
+	u32 si_tx_boost;
+	u32 supported_link;
+	u32 phy_id;
+	u32 advertised_link;
+} __attribute__((packed));
+
+struct yt_phy_statistics {
+	u32 pkg_ib_valid; /* rx crc good and length 64-1518 */
+	u32 pkg_ib_os_good; /* rx crc good and length >1518 */
+	u32 pkg_ib_us_good; /* rx crc good and length <64 */
+	u16 pkg_ib_err; /* rx crc wrong and length 64-1518 */
+	u16 pkg_ib_os_bad; /* rx crc wrong and length >1518 */
+	u16 pkg_ib_frag; /* rx crc wrong and length <64 */
+	u16 pkg_ib_nosfd; /* rx sfd missed */
+	u32 pkg_ob_valid; /* tx crc good and length 64-1518 */
+	u32 pkg_ob_os_good; /* tx crc good and length >1518 */
+	u32 pkg_ob_us_good; /* tx crc good and length <64 */
+	u16 pkg_ob_err; /* tx crc wrong and length 64-1518 */
+	u16 pkg_ob_os_bad; /* tx crc wrong and length >1518 */
+	u16 pkg_ob_frag; /* tx crc wrong and length <64 */
+	u16 pkg_ob_nosfd; /* tx sfd missed */
+} __attribute__((packed));
+
+struct phy_statistics {
+	union {
+		struct yt_phy_statistics yt;
+	};
+} __attribute__((packed));
+/* == flags == */
+#define FLAGS_DD BIT(0) /* driver clear 0, FW must set 1 */
+#define FLAGS_CMP BIT(1) /* driver clear 0, FW mucst set */
+#define FLAGS_ERR                                                              \
+	BIT(2) /* driver clear 0, FW must set only if it reporting an error */
+#define FLAGS_LB BIT(9)
+#define FLAGS_RD BIT(10) /* set if additonal buffer has command paramters */
+#define FLAGS_BUF BIT(12) /* set 1 on indirect command */
+#define FLAGS_SI BIT(13) /* not irq when command complete */
+#define FLAGS_EI BIT(14) /* interrupt on error */
+#define FLAGS_FE BIT(15) /* flush erro */
+
+#ifndef SHM_DATA_MAX_BYTES
+#define SHM_DATA_MAX_BYTES (64 - 2 * 4)
+#endif
+
+#define MBX_REQ_HDR_LEN 24
+#define MBX_REPLYHDR_LEN 16
+#define MBX_REQ_MAX_DATA_LEN (SHM_DATA_MAX_BYTES - MBX_REQ_HDR_LEN)
+#define MBX_REPLY_MAX_DATA_LEN (SHM_DATA_MAX_BYTES - MBX_REPLYHDR_LEN)
+
+// TODO req is little endian. bigendian should be conserened
+
+struct mbx_fw_cmd_req {
+	unsigned short flags; /* 0-1 */
+	unsigned short opcode; /* 2-3 enum LINK_ADM_CMD */
+	unsigned short datalen; /* 4-5 */
+	unsigned short ret_value; /* 6-7 */
+	union {
+		struct {
+			unsigned int cookie_lo; /* 8-11 */
+			unsigned int cookie_hi; /* 12-15 */
+		};
+		void *cookie;
+	};
+	unsigned int reply_lo; /* 16-19 5dw */
+	unsigned int reply_hi; /* 20-23 */
+	/*=== data === 7dw [24-64] */
+	union {
+		char data[0];
+
+		struct {
+			unsigned int addr;
+			unsigned int bytes;
+		} r_reg;
+
+		struct {
+			unsigned int addr;
+			unsigned int bytes;
+			unsigned int data[4];
+		} w_reg;
+
+		struct {
+			unsigned int lanes;
+		} ptp;
+
+		struct {
+			int lane;
+			int up;
+		} ifup;
+		struct {
+			u32 sec;
+			u32 nanosec;
+
+		} tstamps;
+
+		struct {
+			int lane;
+			int status;
+		} ifinsmod;
+		struct {
+			int lane;
+			int status;
+		} ifforce;
+
+		struct {
+			int lane;
+			int status;
+		} ifsuspuse;
+
+		struct {
+			int nr_lane;
+		} get_lane_st;
+
+		struct {
+			int nr_lane;
+			int func;
+#define LANE_FUN_AN 0
+#define LANE_FUN_LINK_TRAING 1
+#define LANE_FUN_FEC 2
+#define LANE_FUN_SI 3
+#define LANE_FUN_SFP_TX_DISABLE 4
+#define LANE_FUN_PCI_LANE 5
+#define LANE_FUN_PRBS 6
+#define LANE_FUN_SPEED_CHANGE 7
+
+			int value0;
+			int value1;
+			int value2;
+			int value3;
+		} set_lane_fun;
+
+		struct {
+			int flag;
+			int nr_lane;
+		} set_dump;
+
+		struct {
+			int lane;
+			int enable;
+		} wol;
+
+		struct {
+			int lane;
+			int mode;
+		} gephy_test;
+
+		struct {
+			int lane;
+			int op;
+			int enable;
+			int inteval;
+		} lldp_tx;
+
+		struct {
+			unsigned int bytes;
+			unsigned int nr_lane;
+			unsigned int bin_offset;
+			unsigned int no_use;
+		} get_dump;
+
+		struct {
+			unsigned int nr_lane;
+			int value;
+#define LED_IDENTIFY_INACTIVE 0
+#define LED_IDENTIFY_ACTIVE 1
+#define LED_IDENTIFY_ON 2
+#define LED_IDENTIFY_OFF 3
+		} led_set;
+
+		struct {
+			unsigned int addr;
+			unsigned int data;
+			unsigned int mask;
+		} modify_reg;
+
+		struct {
+			unsigned int adv_speed_mask;
+			unsigned int autoneg;
+			unsigned int speed;
+			unsigned int duplex;
+			int nr_lane;
+			unsigned int tp_mdix_ctrl;
+		} phy_link_set;
+
+		struct {
+			unsigned int pause_mode;
+			int nr_lane;
+		} phy_pause_set;
+		struct {
+			unsigned int pause_mode;
+			int nr_lane;
+		} phy_pause_get;
+		struct {
+			u32 local_eee;
+			u32 tx_lpi_timer;
+			int nr_lane;
+		} phy_eee_set;
+		struct {
+			unsigned int nr_lane;
+			unsigned int sfp_adr; /* 0xa0 or 0xa2 */
+			unsigned int reg;
+			unsigned int cnt;
+		} sfp_read;
+
+		struct {
+			unsigned int nr_lane;
+			unsigned int sfp_adr; /* 0xa0 or 0xa2 */
+			unsigned int reg;
+			unsigned int val;
+		} sfp_write;
+
+		struct {
+			unsigned int nr_lane; /* 0-3 */
+		} get_linkstat;
+		struct {
+			unsigned short changed_lanes;
+			unsigned short lane_status;
+			unsigned int port_st_magic;
+#define SPEED_VALID_MAGIC 0xa4a6a8a9
+			struct port_stat st[4];
+		} link_stat; /* FW->RC */
+
+		struct {
+			unsigned short enable_stat;
+			unsigned short event_mask;
+		} stat_event_mask;
+
+		struct { /* set loopback */
+			unsigned char loopback_level;
+			unsigned char loopback_type;
+			unsigned char loopback_force_speed;
+
+			char loopback_force_speed_enable : 1;
+		} loopback;
+
+		struct {
+			int cmd;
+			int arg0;
+			int req_bytes;
+			int reply_bytes;
+			int ddr_lo;
+			int ddr_hi;
+		} maintain;
+
+		struct { /* set phy register */
+			char phy_interface;
+			union {
+				char page_num;
+				char external_phy_addr;
+			};
+			int phy_reg_addr;
+			int phy_w_data;
+			int reg_addr;
+			int w_data;
+			/* 1 = ignore page_num, use last QSFP */
+			char recall_qsfp_page : 1;
+			/* page value */
+			/* 0 = use page_num for QSFP */
+			char nr_lane;
+		} set_phy_reg;
+		struct {
+		} get_phy_ablity;
+
+		struct {
+			int lane_mask;
+			int pfvf_num;
+		} get_mac_addr;
+
+		struct {
+			char phy_interface;
+			union {
+				char page_num;
+				char external_phy_addr;
+			};
+			int phy_reg_addr;
+			char nr_lane;
+		} get_phy_reg;
+
+		struct {
+			unsigned int nr_lane;
+		} phy_statistics;
+
+		struct {
+			char paration;
+			unsigned int bytes;
+			unsigned int bin_phy_lo;
+			unsigned int bin_phy_hi;
+		} fw_update;
+	};
+} _PACKED_ALIGN4;
+
+#define EEE_1000BT BIT(2)
+#define EEE_100BT BIT(1)
+
+struct rnpgbe_eee_cap {
+	unsigned int local_capability;
+	unsigned int local_eee;
+	unsigned int partner_eee;
+};
+
+/* firmware -> driver */
+struct mbx_fw_cmd_reply {
+	/* fw must set: DD, CMP, Error(if error), copy value */
+	unsigned short flags;
+	/* from command: LB,RD,VFC,BUF,SI,EI,FE */
+	unsigned short opcode; /* 2-3: copy from req */
+	unsigned short error_code; /* 4-5: 0 if no error */
+	unsigned short datalen; /* 6-7: */
+	union {
+		struct {
+			unsigned int cookie_lo; /* 8-11: */
+			unsigned int cookie_hi; /* 12-15: */
+		};
+		void *cookie;
+	};
+	/* ===== data ==== [16-64] */
+	union {
+		char data[0];
+
+		struct version {
+			unsigned int major;
+			unsigned int sub;
+			unsigned int modify;
+		} version;
+
+		struct {
+			unsigned int value[4];
+		} r_reg;
+
+		struct {
+			unsigned int new_value;
+		} modify_reg;
+
+		struct get_temp {
+			int temp;
+			int volatage;
+		} get_temp;
+
+		struct {
+#define MBX_SFP_READ_MAX_CNT 32
+			char value[MBX_SFP_READ_MAX_CNT];
+		} sfp_read;
+
+		struct mac_addr {
+			int lanes;
+			struct _addr {
+				/*
+				 * for macaddr:01:02:03:04:05:06
+				 * mac-hi=0x01020304 mac-lo=0x05060000
+				 */
+				unsigned char mac[8];
+			} addrs[4];
+		} mac_addr;
+
+		struct get_dump_reply {
+			int flags;
+			int version;
+			int bytes;
+			int data[4];
+		} get_dump;
+
+		struct get_lldp_reply {
+			int value;
+			int inteval;
+		} get_lldp;
+
+		struct rnpgbe_eee_cap phy_eee_abilities;
+		struct lane_stat_data lanestat;
+		struct link_stat_data linkstat;
+		struct phy_abilities phy_abilities;
+		struct phy_statistics phy_statistics;
+	};
+} _PACKED_ALIGN4;
+
+static inline void build_maintain_req(struct mbx_fw_cmd_req *req, void *cookie,
+				      int cmd, int arg0, int req_bytes,
+				      int reply_bytes, u32 dma_phy_lo,
+				      u32 dma_phy_hi)
+{
+	req->flags = 0;
+	req->opcode = FW_MAINTAIN;
+	req->datalen = sizeof(req->maintain);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->maintain.cmd = cmd;
+	req->maintain.arg0 = arg0;
+	req->maintain.req_bytes = req_bytes;
+	req->maintain.reply_bytes = reply_bytes;
+	req->maintain.ddr_lo = dma_phy_lo;
+	req->maintain.ddr_hi = dma_phy_hi;
+}
+
+static inline void build_fw_update_req(struct mbx_fw_cmd_req *req, void *cookie,
+				       int partition, u32 fw_bin_phy_lo,
+				       u32 fw_bin_phy_hi, int fw_bytes)
+{
+	req->flags = 0;
+	req->opcode = FW_UPDATE;
+	req->datalen = sizeof(req->fw_update);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->fw_update.paration = partition;
+	req->fw_update.bytes = fw_bytes;
+	req->fw_update.bin_phy_lo = fw_bin_phy_lo;
+	req->fw_update.bin_phy_hi = fw_bin_phy_hi;
+}
+
+static inline void build_fw_update_n500_req(struct mbx_fw_cmd_req *req,
+					    void *cookie, int partition,
+					    int fw_bytes)
+{
+	req->flags = 0;
+	req->opcode = FW_UPDATE_N500;
+	req->datalen = sizeof(req->fw_update);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->fw_update.paration = partition;
+	req->fw_update.bytes = fw_bytes;
+}
+
+static inline void build_reset_phy_req(struct mbx_fw_cmd_req *req, void *cookie)
+{
+	req->flags = 0;
+	req->opcode = RESET_PHY;
+	req->datalen = 0;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->cookie = cookie;
+}
+
+static inline void build_phy_eee_abalities_req(struct mbx_fw_cmd_req *req,
+					       void *cookie)
+{
+	req->flags = 0;
+	req->opcode = PHY_EEE_GET;
+	req->datalen = 0;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->cookie = cookie;
+}
+
+static inline void build_phy_abalities_req(struct mbx_fw_cmd_req *req,
+					   void *cookie)
+{
+	req->flags = 0;
+	req->opcode = GET_PHY_ABALITY;
+	req->datalen = 0;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->cookie = cookie;
+}
+
+static inline void build_get_macaddress_req(struct mbx_fw_cmd_req *req,
+					    int lane_mask, int pfvfnum,
+					    void *cookie)
+{
+	req->flags = 0;
+	req->opcode = GET_MAC_ADDRES;
+	req->datalen = sizeof(req->get_mac_addr);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+
+	req->get_mac_addr.lane_mask = lane_mask;
+	req->get_mac_addr.pfvf_num = pfvfnum;
+}
+
+static inline void build_version_req(struct mbx_fw_cmd_req *req, void *cookie)
+{
+	req->flags = 0;
+	req->opcode = GET_VERSION;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->datalen = 0;
+	req->cookie = cookie;
+}
+
+/* 7.10.11.8 Read egister admin command */
+static inline void build_readreg_req(struct mbx_fw_cmd_req *req, int reg_addr,
+				     void *cookie)
+{
+	req->flags = 0;
+	req->opcode = READ_REG;
+	req->datalen = sizeof(req->r_reg);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->r_reg.addr = reg_addr & ~(3);
+	req->r_reg.bytes = 4;
+}
+
+static inline void mbx_fw_req_set_reply(struct mbx_fw_cmd_req *req,
+					dma_addr_t reply)
+{
+	u64 address = reply;
+
+	req->reply_hi = (address >> 32);
+	req->reply_lo = (address) & 0xffffffff;
+}
+
+/* 7.10.11.9 Write egister admin command */
+static inline void build_writereg_req(struct mbx_fw_cmd_req *req, void *cookie,
+				      int reg_addr, int bytes, int value[4])
+{
+	int i;
+
+	req->flags = 0;
+	req->opcode = WRITE_REG;
+	req->datalen = sizeof(req->w_reg);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->w_reg.addr = reg_addr & ~3;
+	req->w_reg.bytes = bytes;
+	for (i = 0; i < bytes / 4; i++)
+		req->w_reg.data[i] = value[i];
+}
+
+/* 7.10.11.10 modify egister admin command */
+static inline void build_modifyreg_req(struct mbx_fw_cmd_req *req, void *cookie,
+				       int reg_addr, int value,
+				       unsigned int mask)
+{
+	req->flags = 0;
+	req->opcode = MODIFY_REG;
+	req->datalen = sizeof(req->modify_reg);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->modify_reg.addr = reg_addr;
+	req->modify_reg.data = value;
+	req->modify_reg.mask = mask;
+}
+
+static inline void build_get_lane_status_req(struct mbx_fw_cmd_req *req,
+					     int nr_lane, void *cookie)
+{
+	req->flags = 0;
+	req->opcode = GET_LANE_STATUS;
+	req->datalen = sizeof(req->get_lane_st);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->get_lane_st.nr_lane = nr_lane;
+}
+
+static inline void build_get_link_status_req(struct mbx_fw_cmd_req *req,
+					     int nr_lane, void *cookie)
+{
+	req->flags = 0;
+	req->opcode = GET_LINK_STATUS;
+	req->datalen = sizeof(req->get_linkstat);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->get_linkstat.nr_lane = nr_lane;
+}
+
+static inline void build_get_temp(struct mbx_fw_cmd_req *req, void *cookie)
+{
+	req->flags = 0;
+	req->opcode = GET_TEMP;
+	req->datalen = 0;
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+}
+static inline void build_get_dump_req(struct mbx_fw_cmd_req *req, void *cookie,
+				      int nr_lane, u32 fw_bin_phy_lo,
+				      u32 fw_bin_phy_hi, int bytes)
+{
+	req->flags = 0;
+	req->opcode = GET_DUMP;
+	req->datalen = sizeof(req->get_dump);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->get_dump.bytes = bytes;
+	req->get_dump.nr_lane = nr_lane;
+	req->get_dump.bin_offset = fw_bin_phy_lo;
+	req->get_dump.no_use = fw_bin_phy_hi;
+}
+
+static inline void build_set_dump(struct mbx_fw_cmd_req *req, int nr_lane,
+				  int flag)
+{
+	req->flags = 0;
+	req->opcode = SET_DUMP;
+	req->datalen = sizeof(req->set_dump);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->set_dump.flag = flag;
+	req->set_dump.nr_lane = nr_lane;
+}
+
+static inline void build_led_set(struct mbx_fw_cmd_req *req,
+				 unsigned int nr_lane, int value, void *cookie)
+{
+	req->flags = 0;
+	req->opcode = LED_SET;
+	req->datalen = sizeof(req->led_set);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->led_set.nr_lane = nr_lane;
+	req->led_set.value = value;
+}
+
+static inline void build_set_lane_fun(struct mbx_fw_cmd_req *req, int nr_lane,
+				      int fun, int value0, int value1,
+				      int value2, int value3)
+{
+	req->flags = 0;
+	req->opcode = SET_LANE_FUN;
+	req->datalen = sizeof(req->set_lane_fun);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->set_lane_fun.func = fun;
+	req->set_lane_fun.nr_lane = nr_lane;
+	req->set_lane_fun.value0 = value0;
+	req->set_lane_fun.value1 = value1;
+	req->set_lane_fun.value2 = value2;
+	req->set_lane_fun.value3 = value3;
+}
+
+static inline void build_set_phy_reg(struct mbx_fw_cmd_req *req, void *cookie,
+				     enum PHY_INTERFACE phy_inf, char nr_lane,
+				     int reg, int w_data, int recall_qsfp_page)
+{
+	req->flags = 0;
+	req->opcode = SET_PHY_REG;
+	req->datalen = sizeof(req->set_phy_reg);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+
+	req->set_phy_reg.phy_interface = phy_inf;
+	req->set_phy_reg.nr_lane = nr_lane;
+	req->set_phy_reg.phy_reg_addr = reg;
+	req->set_phy_reg.phy_w_data = w_data;
+
+	if (recall_qsfp_page)
+		req->set_phy_reg.recall_qsfp_page = 1;
+	else
+		req->set_phy_reg.recall_qsfp_page = 0;
+}
+
+static inline void build_get_phy_reg(struct mbx_fw_cmd_req *req, void *cookie,
+				     enum PHY_INTERFACE phy_inf, char nr_lane,
+				     int reg)
+{
+	req->flags = 0;
+	req->opcode = GET_PHY_REG;
+	req->datalen = sizeof(req->get_phy_reg);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+
+	req->get_phy_reg.phy_interface = phy_inf;
+
+	req->get_phy_reg.nr_lane = nr_lane;
+	req->get_phy_reg.phy_reg_addr = reg;
+}
+
+static inline void build_phy_pause_set(struct mbx_fw_cmd_req *req,
+				       int pause_mode, int nr_lane)
+{
+	req->flags = 0;
+	req->opcode = PHY_PAUSE_SET;
+	req->datalen = sizeof(req->phy_pause_set);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->phy_pause_set.nr_lane = nr_lane;
+	req->phy_pause_set.pause_mode = pause_mode;
+}
+
+static inline void build_get_phy_pause_req(struct mbx_fw_cmd_req *req,
+					   int nr_lane, void *cookie)
+{
+	req->flags = 0;
+	req->opcode = PHY_PAUSE_GET;
+	req->datalen = sizeof(req->phy_pause_get);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->phy_pause_set.nr_lane = nr_lane;
+	req->phy_pause_set.pause_mode = 0;
+}
+
+static inline void build_phy_eee_set(struct mbx_fw_cmd_req *req, u32 local_eee,
+				     u32 tx_lpi_timer, int nr_lane)
+{
+	req->flags = 0;
+	req->opcode = PHY_EEE_SET;
+	req->datalen = sizeof(req->phy_eee_set);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->phy_eee_set.nr_lane = nr_lane;
+	req->phy_eee_set.local_eee = local_eee;
+	req->phy_eee_set.tx_lpi_timer = tx_lpi_timer;
+}
+
+static inline void build_phy_link_set(struct mbx_fw_cmd_req *req,
+				      unsigned int adv, int nr_lane,
+				      unsigned int autoneg, unsigned int speed,
+				      unsigned int duplex,
+				      unsigned int tp_mdix_ctrl)
+{
+	req->flags = 0;
+	req->opcode = PHY_LINK_SET;
+	req->datalen = sizeof(req->phy_link_set);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->phy_link_set.nr_lane = nr_lane;
+	req->phy_link_set.adv_speed_mask = adv;
+	req->phy_link_set.autoneg = autoneg;
+	req->phy_link_set.speed = speed;
+	req->phy_link_set.duplex = duplex;
+	req->phy_link_set.tp_mdix_ctrl = tp_mdix_ctrl;
+}
+static inline void build_tstamp_show(struct mbx_fw_cmd_req *req, u32 sec,
+				     u32 nanosec)
+{
+	req->flags = 0;
+	req->opcode = SHOW_TX_STAMP;
+	req->datalen = sizeof(req->tstamps);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->tstamps.sec = sec;
+	req->tstamps.nanosec = nanosec;
+}
+
+static inline void build_ifup_down(struct mbx_fw_cmd_req *req,
+				   unsigned int nr_lane, int up)
+{
+	req->flags = 0;
+	req->opcode = IFUP_DOWN;
+	req->datalen = sizeof(req->ifup);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->ifup.lane = nr_lane;
+	req->ifup.up = up;
+}
+
+static inline void build_ifinsmod(struct mbx_fw_cmd_req *req,
+				  unsigned int nr_lane, int status)
+{
+	req->flags = 0;
+	req->opcode = DRIVER_INSMOD;
+	req->datalen = sizeof(req->ifinsmod);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->ifinsmod.lane = nr_lane;
+	req->ifinsmod.status = status;
+}
+
+static inline void build_ifsuspuse(struct mbx_fw_cmd_req *req,
+				   unsigned int nr_lane, int status)
+{
+	req->flags = 0;
+	req->opcode = SYSTEM_SUSPUSE;
+	req->datalen = sizeof(req->ifsuspuse);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->ifinsmod.lane = nr_lane;
+	req->ifinsmod.status = status;
+}
+
+static inline void build_ifforce(struct mbx_fw_cmd_req *req,
+				 unsigned int nr_lane, int status)
+{
+	req->flags = 0;
+	req->opcode = SYSTEM_FORCE;
+	req->datalen = sizeof(req->ifforce);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->ifforce.lane = nr_lane;
+	req->ifforce.status = status;
+}
+
+static inline void build_mbx_sfp_read(struct mbx_fw_cmd_req *req,
+				      unsigned int nr_lane, int sfp_addr,
+				      int reg, int cnt, void *cookie)
+{
+	req->flags = 0;
+	req->opcode = SFP_MODULE_READ;
+	req->datalen = sizeof(req->sfp_read);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->sfp_read.nr_lane = nr_lane;
+	req->sfp_read.sfp_adr = sfp_addr;
+	req->sfp_read.reg = reg;
+	;
+	req->sfp_read.cnt = cnt;
+}
+
+static inline void build_mbx_sfp_write(struct mbx_fw_cmd_req *req,
+				       unsigned int nr_lane, int sfp_addr,
+				       int reg, int v)
+{
+	req->flags = 0;
+	req->opcode = SFP_MODULE_WRITE;
+	req->datalen = sizeof(req->sfp_write);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->sfp_write.nr_lane = nr_lane;
+	req->sfp_write.sfp_adr = sfp_addr;
+	req->sfp_write.reg = reg;
+	req->sfp_write.val = v;
+}
+
+static inline void build_mbx_wol_set(struct mbx_fw_cmd_req *req,
+				     unsigned int nr_lane, u32 mode)
+{
+	req->flags = 0;
+	req->opcode = SET_WOL;
+	req->datalen = sizeof(req->sfp_write);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->wol.lane = nr_lane;
+	req->wol.enable = mode;
+}
+
+static inline void build_mbx_gephy_test_set(struct mbx_fw_cmd_req *req,
+					    unsigned int nr_lane, u32 mode)
+{
+	req->flags = 0;
+	req->opcode = SET_TEST_MODE;
+	req->datalen = sizeof(req->sfp_write);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->gephy_test.lane = nr_lane;
+	req->gephy_test.mode = mode;
+}
+static inline void build_get_lldp_req(struct mbx_fw_cmd_req *req, void *cookie,
+				      int nr_lane)
+{
+#define LLDP_TX_GET (1)
+
+	req->flags = 0;
+	req->opcode = LLDP_TX_CTRL;
+	req->datalen = sizeof(req->lldp_tx);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->lldp_tx.lane = nr_lane;
+	req->lldp_tx.op = LLDP_TX_GET;
+	req->lldp_tx.enable = 0;
+}
+
+static inline void build_mbx_lldp_set(struct mbx_fw_cmd_req *req,
+				      unsigned int nr_lane, u32 enable)
+{
+#define LLDP_TX_SET (0)
+	req->flags = 0;
+	req->opcode = LLDP_TX_CTRL;
+	req->datalen = sizeof(req->sfp_write);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->lldp_tx.lane = nr_lane;
+	req->lldp_tx.op = LLDP_TX_SET;
+	req->lldp_tx.enable = enable;
+	req->lldp_tx.inteval = 30;
+}
+
+/* enum link_event_mask or */
+static inline void build_link_set_event_mask(struct mbx_fw_cmd_req *req,
+					     unsigned short event_mask,
+					     unsigned short enable,
+					     void *cookie)
+{
+	req->flags = 0;
+	req->opcode = SET_EVENT_MASK;
+	req->datalen = sizeof(req->stat_event_mask);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->stat_event_mask.event_mask = event_mask;
+	req->stat_event_mask.enable_stat = enable;
+}
+
+static inline void
+build_link_set_loopback_req(struct mbx_fw_cmd_req *req, void *cookie,
+			    enum LOOPBACK_LEVEL level,
+			    enum LOOPBACK_FORCE_SPEED force_speed)
+{
+	req->flags = 0;
+	req->opcode = SET_LOOPBACK_MODE;
+	req->datalen = sizeof(req->loopback);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+
+	req->loopback.loopback_level = level;
+	req->loopback.loopback_type = LOOPBACK_TYPE_LOCAL;
+	if (force_speed != LOOPBACK_FORCE_SPEED_NONE) {
+		req->loopback.loopback_force_speed = force_speed;
+		req->loopback.loopback_force_speed_enable = 1;
+	}
+}
+
+/* =========== errcode======= */
+enum MBX_ERR {
+	MBX_OK = 0,
+	MBX_ERR_NO_PERM,
+	MBX_ERR_INVAL_OPCODE,
+	MBX_ERR_INVALID_PARAM,
+	MBX_ERR_INVALID_ADDR,
+	MBX_ERR_INVALID_LEN,
+	MBX_ERR_NODEV,
+	MBX_ERR_IO,
+};
+int rnpgbe_fw_get_capablity(struct rnpgbe_hw *hw, struct phy_abilities *abil);
+
+#endif /* _RNPGBE_MBX_FW_H */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_param.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_param.c
new file mode 100755
index 0000000000000000000000000000000000000000..e77c21cb159680c2002a33d90eb844250472d6a2
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_param.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+
+#include "rnpgbe.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define RNP_MAX_NIC 32
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+
+#define STRINGIFY(foo) #foo /* magic for getting defines into strings */
+#define XSTRINGIFY(bar) STRINGIFY(bar)
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define RNP_PARAM_INIT                                                         \
+	{                                                                      \
+		[0 ... RNP_MAX_NIC] = OPTION_UNSET                             \
+	}
+#ifndef module_param_array
+/* Module Parameters are always initialized to -1, so that the driver
+ * can tell the difference between no user specified value or the
+ * user asking for the default value.
+ * The true default values are loaded in when rnpgbe_check_options is called.
+ *
+ * This is a GCC extension to ANSI C.
+ * See the item "Labelled Elements in Initializers" in the section
+ * "Extensions to the C Language Family" of the GCC documentation.
+ */
+
+#define RNP_PARAM(X, desc)                                                     \
+	static const int __devinitdata X[RNP_MAX_NIC + 1] = RNP_PARAM_INIT;    \
+	MODULE_PARM(X, "1-" __MODULE_STRING(RNP_MAX_NIC) "i");                 \
+	MODULE_PARM_DESC(X, desc);
+#else
+#define RNP_PARAM(X, desc)                                                     \
+	static int __devinitdata X[RNP_MAX_NIC + 1] = RNP_PARAM_INIT;          \
+	static unsigned int num_##X;                                           \
+	module_param_array_named(X, X, int, &num_##X, 0);                      \
+	MODULE_PARM_DESC(X, desc);
+#endif
+/* IntMode (Interrupt Mode)
+ *
+ * Valid Range: 0-2
+ *  - 0 - Legacy Interrupt
+ *  - 1 - MSI Interrupt
+ *  - 2 - MSI-X Interrupt(s)
+ *
+ * Default Value: 2
+ */
+RNP_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), "
+		   "default 2");
+#define RNP_INT_LEGACY 0
+#define RNP_INT_MSI 1
+#define RNP_INT_MSIX 2
+
+#ifdef CONFIG_PCI_IOV
+/* max_vfs - SR I/O Virtualization
+ *
+ * Valid Range: 0-63 for n10
+ * Valid Range: 0-7 for n400/n10
+ *  - 0 Disables SR-IOV
+ *  - 1-x - enables SR-IOV and sets the number of VFs enabled
+ *
+ * Default Value: 0
+ */
+
+RNP_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable (default), "
+		   "1-" XSTRINGIFY(MAX_SRIOV_VFS) " = enable "
+						  "this many VFs");
+
+/* SRIOV_Mode (SRIOV Mode)
+ *
+ * Valid Range: 0-1
+ *  - 0 - Legacy Interrupt
+ *  - 1 - MSI Interrupt
+ *  - 2 - MSI-X Interrupt(s)
+ *
+ * Default Value: 0
+ */
+RNP_PARAM(SRIOV_Mode, "Change SRIOV Mode (0=MAC_MODE, 1=VLAN_MODE), "
+		      "default 0");
+#define RNP_SRIOV_MAC_MODE 0
+#define RNP_SRIOV_VLAN_MODE 1
+#endif
+
+/* pf_msix_counts_set - Limit max msix counts
+ *
+ * Valid Range: 2-63 for n10
+ * Valid Range: 2-7 for n400/n10
+ *
+ * Default Value: 0 (un-limit)
+ */
+RNP_PARAM(pf_msix_counts_set, "Number of Max MSIX Count: (default un-limit)");
+#define RNP_INT_MIN 2
+#define RNP_INT_MAX 64
+
+/* eee_timer - LPI tx expiration time in msec
+ *
+ * Valid Range: 100-10000
+ *
+ * Default Value: 4000
+ */
+RNP_PARAM(eee_timer, "LPI tx expiration time in msec: (default 1000)");
+#define RNP_EEE_MIN (100)
+#define RNP_EEE_DEFAULT (4000)
+#define RNP_EEE_MAX (10000)
+
+/* priv_rx_skip - priv header len
+ *
+ * Valid Range: [0, 16]
+ *
+ * Default Value: -
+ */
+RNP_PARAM(rx_skip, "rx_skip header in DW: (default 0)");
+#define RNP_RX_SKIP_MIN (0)
+#define RNP_RX_SKIP_DEFAULT (0)
+#define RNP_RX_SKIP_MAX (16)
+
+struct rnpgbe_option {
+	enum { enable_option, range_option, list_option } type;
+	const char *name;
+	const char *err;
+	const char *msg;
+	int def;
+	union {
+		struct { /* range_option info */
+			int min;
+			int max;
+		} r;
+		struct { /* list_option info */
+			int nr;
+			const struct rnpgbe_opt_list {
+				int i;
+				char *str;
+			} *p;
+		} l;
+	} arg;
+};
+
+#ifdef HAVE_CONFIG_HOTPLUG
+static int __devinit rnpgbe_validate_option(struct net_device *netdev,
+					    unsigned int *value,
+					    struct rnpgbe_option *opt)
+#else
+static int rnpgbe_validate_option(struct net_device *netdev,
+				  unsigned int *value,
+				  struct rnpgbe_option *opt)
+#endif
+{
+	if (*value == OPTION_UNSET) {
+		netdev_info(netdev, "Invalid %s specified (%d),  %s\n",
+			    opt->name, *value, opt->err);
+		*value = opt->def;
+		return 0;
+	}
+
+	switch (opt->type) {
+	case enable_option:
+		switch (*value) {
+		case OPTION_ENABLED:
+			netdev_info(netdev, "%s Enabled\n", opt->name);
+			return 0;
+		case OPTION_DISABLED:
+			netdev_info(netdev, "%s Disabled\n", opt->name);
+			return 0;
+		}
+		break;
+	case range_option:
+		if ((*value >= opt->arg.r.min && *value <= opt->arg.r.max) ||
+		    *value == opt->def) {
+			if (opt->msg)
+				netdev_info(netdev, "%s set to %d, %s\n",
+					    opt->name, *value, opt->msg);
+			else
+				netdev_info(netdev, "%s set to %d\n", opt->name,
+					    *value);
+			return 0;
+		}
+		break;
+	case list_option: {
+		int i;
+
+		for (i = 0; i < opt->arg.l.nr; i++) {
+			const struct rnpgbe_opt_list *ent = &opt->arg.l.p[i];
+
+			if (*value == ent->i) {
+				if (ent->str[0] != '\0')
+					netdev_info(netdev, "%s\n", ent->str);
+				return 0;
+			}
+		}
+	} break;
+	default:
+		BUG();
+	}
+
+	netdev_info(netdev, "Invalid %s specified (%d),  %s\n", opt->name,
+		    *value, opt->err);
+	*value = opt->def;
+	return -1;
+}
+
+#define LIST_LEN(l) (sizeof(l) / sizeof(l[0]))
+#define PSTR_LEN 10
+
+/**
+ * rnpgbe_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input.  If an invalid value is given, or if no user specified
+ * value exists, a default value is used.  The final value is stored
+ * in a variable in the adapter structure.
+ **/
+#ifdef HAVE_CONFIG_HOTPLUG
+void __devinit rnpgbe_check_options(struct rnpgbe_adapter *adapter)
+#else
+void rnpgbe_check_options(struct rnpgbe_adapter *adapter)
+#endif
+{
+	int bd = adapter->bd_number;
+	u32 *aflags = &adapter->flags;
+
+	if (bd >= RNP_MAX_NIC) {
+		netdev_notice(adapter->netdev,
+			      "Warning: no configuration for board #%d\n", bd);
+		netdev_notice(adapter->netdev,
+			      "Using defaults for all values\n");
+#ifndef module_param_array
+		bd = RNP_MAX_NIC;
+#endif
+	}
+
+	{ /* Interrupt Mode */
+		unsigned int int_mode;
+		static struct rnpgbe_option opt = {
+			.type = range_option,
+			.name = "Interrupt Mode",
+			.err = "using default of " __MODULE_STRING(
+				RNP_INT_MSIX),
+			.def = RNP_INT_MSIX,
+			.arg = { .r = { .min = RNP_INT_LEGACY,
+					.max = RNP_INT_MSIX } }
+		};
+
+#ifdef module_param_array
+		if (num_IntMode > bd) {
+#endif
+			int_mode = IntMode[bd];
+			if (int_mode == OPTION_UNSET)
+				int_mode = RNP_INT_MSIX;
+			rnpgbe_validate_option(adapter->netdev, &int_mode,
+					       &opt);
+			switch (int_mode) {
+			case RNP_INT_MSIX:
+				if (!(*aflags & RNP_FLAG_MSIX_CAPABLE)) {
+					netdev_info(adapter->netdev,
+						    "Ignoring MSI-X setting; "
+						    "support unavailable\n");
+				} else
+					adapter->irq_mode = irq_mode_msix;
+				break;
+			case RNP_INT_MSI:
+				if (!(*aflags & RNP_FLAG_MSI_CAPABLE)) {
+					netdev_info(adapter->netdev,
+						    "Ignoring MSI setting; "
+						    "support unavailable\n");
+				} else
+					adapter->irq_mode = irq_mode_msi;
+
+				break;
+			case RNP_INT_LEGACY:
+				if (!(*aflags & RNP_FLAG_LEGACY_CAPABLE)) {
+					netdev_info(adapter->netdev,
+						    "Ignoring MSI setting; "
+						    "support unavailable\n");
+				} else
+					adapter->irq_mode = irq_mode_legency;
+
+				break;
+			}
+#ifdef module_param_array
+		} else {
+			/* default settings */
+			if (*aflags & RNP_FLAG_MSIX_CAPABLE)
+				adapter->irq_mode = irq_mode_msix;
+			else if (*aflags & RNP_FLAG_MSI_CAPABLE)
+				adapter->irq_mode = irq_mode_msi;
+			else
+				adapter->irq_mode = irq_mode_legency;
+		}
+#endif
+	}
+
+#ifdef CONFIG_PCI_IOV
+	{ /* Single Root I/O Virtualization (SR-IOV) */
+		struct rnpgbe_hw *hw = &adapter->hw;
+		static struct rnpgbe_option opt = {
+			.type = range_option,
+			.name = "I/O Virtualization (IOV)",
+			.err = "defaulting to Disabled",
+			.def = OPTION_DISABLED,
+			.arg = { .r = { .min = OPTION_DISABLED,
+					.max = OPTION_DISABLED } }
+		};
+
+		opt.arg.r.max = hw->max_vfs;
+#ifdef module_param_array
+		if (num_max_vfs > bd) {
+#endif
+			unsigned int vfs = max_vfs[bd];
+
+			if (rnpgbe_validate_option(adapter->netdev, &vfs,
+						   &opt)) {
+				vfs = 0;
+				DPRINTK(PROBE, INFO,
+					"max_vfs out of range "
+					"Disabling SR-IOV.\n");
+			}
+
+			adapter->num_vfs = vfs;
+
+			if (vfs)
+				*aflags |= RNP_FLAG_SRIOV_ENABLED;
+			else
+				*aflags &= ~RNP_FLAG_SRIOV_ENABLED;
+#ifdef module_param_array
+		} else {
+			if (opt.def == OPTION_DISABLED) {
+				adapter->num_vfs = 0;
+				*aflags &= ~RNP_FLAG_SRIOV_ENABLED;
+			} else {
+				adapter->num_vfs = opt.def;
+				*aflags |= RNP_FLAG_SRIOV_ENABLED;
+			}
+		}
+#endif
+	}
+
+	{ /* Interrupt Mode */
+		unsigned int sriov_mode;
+		static struct rnpgbe_option opt = {
+			.type = range_option,
+			.name = "SRIOV Mode",
+			.err = "using default of " __MODULE_STRING(
+				RNP_SRIOV_MAC_MODE),
+			.def = RNP_SRIOV_MAC_MODE,
+			.arg = { .r = { .min = RNP_SRIOV_MAC_MODE,
+					.max = RNP_SRIOV_VLAN_MODE } }
+		};
+
+#ifdef module_param_array
+		if (num_SRIOV_Mode > bd) {
+#endif
+			sriov_mode = SRIOV_Mode[bd];
+			if (sriov_mode == OPTION_UNSET)
+				sriov_mode = RNP_SRIOV_MAC_MODE;
+			rnpgbe_validate_option(adapter->netdev, &sriov_mode,
+					       &opt);
+
+			if (sriov_mode == RNP_SRIOV_VLAN_MODE)
+				adapter->priv_flags |=
+					RNP_PRIV_FLAG_SRIOV_VLAN_MODE;
+
+#ifdef module_param_array
+		} else {
+			/* default settings */
+			/* msix -> msi -> Legacy */
+			adapter->priv_flags &= (~RNP_PRIV_FLAG_SRIOV_VLAN_MODE);
+		}
+#endif
+	}
+#endif /* CONFIG_PCI_IOV */
+
+	{ /* max msix count setup */
+		unsigned int pf_msix_counts;
+		struct rnpgbe_hw *hw = &adapter->hw;
+		static struct rnpgbe_option opt = {
+			.type = range_option,
+			.name = "Limit Msix Count",
+			.err = "using default of Un-limit",
+			.def = OPTION_DISABLED,
+			.arg = { .r = { .min = RNP_INT_MIN,
+					.max = RNP_INT_MIN } }
+		};
+
+		opt.arg.r.max = hw->max_msix_vectors;
+#ifdef module_param_array
+		if (num_pf_msix_counts_set > bd) {
+#endif
+			pf_msix_counts = pf_msix_counts_set[bd];
+			if (pf_msix_counts == OPTION_DISABLED)
+				pf_msix_counts = 0;
+			rnpgbe_validate_option(adapter->netdev, &pf_msix_counts,
+					       &opt);
+
+			if (pf_msix_counts) {
+				if (hw->ops.update_msix_count)
+					hw->ops.update_msix_count(
+						hw, pf_msix_counts);
+			}
+
+#ifdef module_param_array
+		} else {
+		}
+#endif
+	}
+
+	{ /* LPI tx expiration time in msec */
+		unsigned int eee_timer_delay;
+		static struct rnpgbe_option opt = {
+			.type = range_option,
+			.name = "eee timer exp",
+			.err = "using default of 1000",
+			.def = OPTION_DISABLED,
+			.arg = { .r = { .min = RNP_EEE_MIN,
+					.max = RNP_EEE_MAX } }
+		};
+
+#ifdef module_param_array
+		if (num_eee_timer > bd) {
+#endif
+			eee_timer_delay = eee_timer[bd];
+			if (eee_timer_delay == OPTION_DISABLED)
+				eee_timer_delay = RNP_EEE_DEFAULT;
+			rnpgbe_validate_option(adapter->netdev,
+					       &eee_timer_delay, &opt);
+			adapter->eee_timer = eee_timer_delay;
+#ifdef module_param_array
+		} else {
+			adapter->eee_timer = RNP_EEE_DEFAULT;
+		}
+#endif
+	}
+
+	{ /* rx_skip in DW */
+		unsigned int rx_skip_priv;
+		static struct rnpgbe_option opt = {
+			.type = range_option,
+			.name = "rx_skip in DW",
+			.err = "using default of 0",
+			.def = OPTION_DISABLED,
+			.arg = { .r = { .min = RNP_RX_SKIP_MIN,
+					.max = RNP_RX_SKIP_MAX } }
+		};
+
+#ifdef module_param_array
+		if (num_rx_skip > bd) {
+#endif
+			rx_skip_priv = rx_skip[bd];
+			if (rx_skip_priv == OPTION_DISABLED)
+				rx_skip_priv = RNP_RX_SKIP_DEFAULT;
+			rnpgbe_validate_option(adapter->netdev, &rx_skip_priv,
+					       &opt);
+			if (rx_skip_priv) {
+				adapter->priv_skip_count = rx_skip_priv - 1;
+				adapter->priv_flags |= RNP_PRIV_FLAG_RX_SKIP_EN;
+			} else
+				adapter->priv_flags &=
+					~RNP_PRIV_FLAG_RX_SKIP_EN;
+#ifdef module_param_array
+		} else {
+			adapter->priv_skip_count = RNP_RX_SKIP_DEFAULT;
+			adapter->priv_flags &= ~RNP_PRIV_FLAG_RX_SKIP_EN;
+		}
+#endif
+	}
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_phy.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_phy.h
new file mode 100755
index 0000000000000000000000000000000000000000..5fd912d35b7712b438c4b10f85a0a95779df29b1
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_phy.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBE_PHY_H_
+#define _RNPGBE_PHY_H_
+
+#include "rnpgbe_type.h"
+
+#define RNP_I2C_EEPROM_DEV_ADDR 0xA0
+#define RNP_I2C_EEPROM_DEV_ADDR2 0xA2
+#define RNP_YT8531_PHY_SPEC_CTRL 0x10
+#define RNP_YT8531_PHY_SPEC_CTRL_FORCE_MDIX 0x0020
+#define RNP_YT8531_PHY_SPEC_CTRL_AUTO_MDI_MDIX 0x0060
+#define RNP_YT8531_PHY_SPEC_CTRL_MDIX_CFG_MASK 0x0060
+/* EEPROM byte offsets */
+#define SFF_MODULE_ID_OFFSET 0x00
+#define SFF_DIAG_SUPPORT_OFFSET 0x5c
+#define SFF_MODULE_ID_SFP 0x3
+#define SFF_MODULE_ID_QSFP 0xc
+#define SFF_MODULE_ID_QSFP_PLUS 0xd
+#define SFF_MODULE_ID_QSFP28 0x11
+/* Bitmasks */
+#define RNP_SFF_DA_PASSIVE_CABLE 0x4
+#define RNP_SFF_DA_ACTIVE_CABLE 0x8
+#define RNP_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
+#define RNP_SFF_1GBASESX_CAPABLE 0x1
+#define RNP_SFF_1GBASELX_CAPABLE 0x2
+#define RNP_SFF_1GBASET_CAPABLE 0x8
+#define RNP_SFF_10GBASESR_CAPABLE 0x10
+#define RNP_SFF_10GBASELR_CAPABLE 0x20
+#define RNP_SFF_ADDRESSING_MODE 0x4
+#define RNP_I2C_EEPROM_READ_MASK 0x100
+#define RNP_I2C_EEPROM_STATUS_MASK 0x3
+#define RNP_I2C_EEPROM_STATUS_NO_OPERATION 0x0
+#define RNP_I2C_EEPROM_STATUS_PASS 0x1
+#define RNP_I2C_EEPROM_STATUS_FAIL 0x2
+#define RNP_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
+/* Flow control defines */
+#define RNP_TAF_SYM_PAUSE 0x400
+#define RNP_TAF_ASM_PAUSE 0x800
+/* Bit-shift macros */
+#define RNP_SFF_VENDOR_OUI_BYTE0_SHIFT 24
+#define RNP_SFF_VENDOR_OUI_BYTE1_SHIFT 16
+#define RNP_SFF_VENDOR_OUI_BYTE2_SHIFT 8
+/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
+#define RNP_SFF_VENDOR_OUI_TYCO 0x00407600
+#define RNP_SFF_VENDOR_OUI_FTL 0x00906500
+#define RNP_SFF_VENDOR_OUI_AVAGO 0x00176A00
+#define RNP_SFF_VENDOR_OUI_INTEL 0x001B2100
+/* I2C SDA and SCL timing parameters for standard mode */
+#define RNP_I2C_T_HD_STA 4
+#define RNP_I2C_T_LOW 5
+#define RNP_I2C_T_HIGH 4
+#define RNP_I2C_T_SU_STA 5
+#define RNP_I2C_T_HD_DATA 5
+#define RNP_I2C_T_SU_DATA 1
+#define RNP_I2C_T_RISE 1
+#define RNP_I2C_T_FALL 1
+#define RNP_I2C_T_SU_STO 4
+#define RNP_I2C_T_BUF 5
+#define RNP_TN_LASI_STATUS_REG 0x9005
+#define RNP_TN_LASI_STATUS_TEMP_ALARM 0x0008
+/* SFP+ SFF-8472 Compliance code */
+#define RNP_SFF_SFF_8472_UNSUP 0x00
+#endif /* _RNPGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.c
new file mode 100755
index 0000000000000000000000000000000000000000..9e0047d2d70c86cfd42a04344b6e5b065144c290
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.c
@@ -0,0 +1,834 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "rnpgbe.h"
+#include "rnpgbe_regs.h"
+#include "rnpgbe_ptp.h"
+#include "rnpgbe_mbx.h"
+
+/* #define DEBUG_PTP_TX_TIMESTAMP */
+
+/* PTP and HW Timer ops */
+static void config_hw_tstamping(void __iomem *ioaddr, u32 data)
+{
+	writel(data, ioaddr + PTP_TCR);
+}
+
+static void config_sub_second_increment(void __iomem *ioaddr, u32 ptp_clock,
+					int gmac4, u32 *ssinc)
+{
+	u32 value = readl(ioaddr + PTP_TCR);
+	unsigned long data;
+	u32 reg_value;
+
+	/* For GMAC3.x, 4.x versions, in "fine adjustement mode" set sub-second
+	 * increment to twice the number of nanoseconds of a clock cycle.
+	 * The calculation of the default_addend value by the caller will set it
+	 * to mid-range = 2^31 when the remainder of this division is zero,
+	 * which will make the accumulator overflow once every 2 ptp_clock
+	 * cycles, adding twice the number of nanoseconds of a clock cycle :
+	 * 2000000000ULL / ptp_clock.
+	 */
+	if (value & RNP_PTP_TCR_TSCFUPDT)
+		data = (2000000000ULL / ptp_clock);
+	else
+		data = (1000000000ULL / ptp_clock);
+
+	/* 0.465ns accuracy */
+	if (!(value & RNP_PTP_TCR_TSCTRLSSR))
+		data = (data * 1000) / 465;
+
+	data &= RNP_PTP_SSIR_SSINC_MASK;
+
+	reg_value = data;
+	if (gmac4)
+		reg_value <<= RNP_PTP_SSIR_SSINC_SHIFT;
+
+	writel(reg_value, ioaddr + PTP_SSIR);
+
+	if (ssinc)
+		*ssinc = data;
+}
+
+static int config_addend(void __iomem *ioaddr, u32 addend)
+{
+	u32 value;
+	int limit;
+
+	writel(addend, ioaddr + PTP_TAR);
+	/* issue command to update the addend value */
+	value = readl(ioaddr + PTP_TCR);
+	value |= RNP_PTP_TCR_TSADDREG;
+	writel(value, ioaddr + PTP_TCR);
+
+	/* wait for present addend update to complete */
+	limit = 10;
+	while (limit--) {
+		if (!(readl(ioaddr + PTP_TCR) & RNP_PTP_TCR_TSADDREG))
+			break;
+		mdelay(10);
+	}
+	if (limit < 0)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
+{
+	int limit;
+	u32 value;
+
+	writel(sec, ioaddr + PTP_STSUR);
+	writel(nsec, ioaddr + PTP_STNSUR);
+	/* issue command to initialize the system time value */
+	value = readl(ioaddr + PTP_TCR);
+	value |= RNP_PTP_TCR_TSINIT;
+	writel(value, ioaddr + PTP_TCR);
+
+	/* wait for present system time initialize to complete */
+	limit = 10;
+	while (limit--) {
+		if (!(readl(ioaddr + PTP_TCR) & RNP_PTP_TCR_TSINIT))
+			break;
+		mdelay(10);
+	}
+	if (limit < 0)
+		return -EBUSY;
+#ifdef FW_UART_SHOW_TSTAMPS
+	/* setup pps control */
+	writel(0x1, ioaddr + PTP_PPS_CONTROL);
+#endif
+	return 0;
+}
+
+static void get_systime(void __iomem *ioaddr, u64 *systime)
+{
+	u64 ns;
+
+	/* Get the TSSS value */
+	ns = readl(ioaddr + PTP_STNSR);
+	/* Get the TSS and convert sec time value to nanosecond */
+	ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
+
+	if (systime)
+		*systime = ns;
+}
+
+static void config_mac_interrupt_enable(void __iomem *ioaddr, bool on)
+{
+	rnpgbe_wr_reg(ioaddr + RNP_MAC_INTERRUPT_ENABLE, on);
+}
+
+static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, int add_sub,
+			  int gmac4)
+{
+	u32 value;
+	int limit;
+
+	if (add_sub) {
+		/* If the new sec value needs to be subtracted with
+		 * the system time, then MAC_STSUR reg should be
+		 * programmed with (2^32 – )
+		 */
+		if (gmac4)
+			sec = -sec;
+
+		value = readl(ioaddr + PTP_TCR);
+		if (value & RNP_PTP_TCR_TSCTRLSSR)
+			nsec = (RNP_PTP_DIGITAL_ROLLOVER_MODE - nsec);
+		else
+			nsec = (RNP_PTP_BINARY_ROLLOVER_MODE - nsec);
+	}
+
+	writel(sec, ioaddr + PTP_STSUR);
+	value = (add_sub << RNP_PTP_STNSUR_ADDSUB_SHIFT) | nsec;
+	writel(value, ioaddr + PTP_STNSUR);
+
+	/* issue command to initialize the system time value */
+	value = readl(ioaddr + PTP_TCR);
+	value |= RNP_PTP_TCR_TSUPDT;
+	writel(value, ioaddr + PTP_TCR);
+
+	/* wait for present system time adjust/update to complete */
+	limit = 10;
+	while (limit--) {
+		if (!(readl(ioaddr + PTP_TCR) & RNP_PTP_TCR_TSUPDT))
+			break;
+		mdelay(10);
+	}
+	if (limit < 0)
+		return -EBUSY;
+
+	return 0;
+}
+
+const static struct rnpgbe_hwtimestamp mac_ptp = {
+	.config_hw_tstamping = config_hw_tstamping,
+	.config_mac_irq_enable = config_mac_interrupt_enable,
+	.init_systime = init_systime,
+	.config_sub_second_increment = config_sub_second_increment,
+	.config_addend = config_addend,
+	.adjust_systime = adjust_systime,
+	.get_systime = get_systime,
+};
+
+#ifdef HAVE_PTP_CLOCK_INFO_ADJFINE
+static int rnpgbe_ptp_adjfreq(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+	struct rnpgbe_adapter *pf =
+		container_of(ptp, struct rnpgbe_adapter, ptp_clock_ops);
+	unsigned long flags;
+	u32 addend;
+
+	if (pf == NULL) {
+		printk(KERN_DEBUG "adapter_of contail is null\n");
+		return 0;
+	}
+	addend = adjust_by_scaled_ppm(pf->default_addend, scaled_ppm);
+	spin_lock_irqsave(&pf->ptp_lock, flags);
+	pf->hwts_ops->config_addend(pf->ptp_addr, addend);
+	spin_unlock_irqrestore(&pf->ptp_lock, flags);
+
+	return 0;
+}
+#else /* HAVE_PTP_CLOCK_INFO_ADJFINE */
+static int rnpgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+	struct rnpgbe_adapter *pf =
+		container_of(ptp, struct rnpgbe_adapter, ptp_clock_ops);
+	unsigned long flags;
+	u32 diff, addend;
+	int neg_adj = 0;
+	u64 adj;
+
+	if (pf == NULL) {
+		printk(KERN_DEBUG "adapter_of contail is null\n");
+		return 0;
+	}
+	if (ppb < 0) {
+		neg_adj = 1;
+		ppb = -ppb;
+	}
+
+	addend = pf->default_addend;
+	adj = addend;
+	adj *= ppb;
+
+	diff = div_u64(adj, 1000000000ULL);
+	addend = neg_adj ? (addend - diff) : (addend + diff);
+
+	spin_lock_irqsave(&pf->ptp_lock, flags);
+	pf->hwts_ops->config_addend(pf->ptp_addr, addend);
+	spin_unlock_irqrestore(&pf->ptp_lock, flags);
+
+	return 0;
+}
+#endif /* HAVE_PTP_CLOCK_INFO_ADJFINE */
+
+static int rnpgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	struct rnpgbe_adapter *pf =
+		container_of(ptp, struct rnpgbe_adapter, ptp_clock_ops);
+	unsigned long flags;
+	u32 sec, nsec;
+	u32 quotient, reminder;
+	int neg_adj = 0;
+
+	if (delta < 0) {
+		neg_adj = 1;
+		delta = -delta;
+	}
+
+	if (delta == 0)
+		return 0;
+
+	quotient = div_u64_rem(delta, 1000000000ULL, &reminder);
+	sec = quotient;
+	nsec = reminder;
+
+	spin_lock_irqsave(&pf->ptp_lock, flags);
+	pf->hwts_ops->adjust_systime(pf->ptp_addr, sec, nsec, neg_adj,
+				     pf->gmac4);
+	spin_unlock_irqrestore(&pf->ptp_lock, flags);
+
+	return 0;
+}
+
+static int rnpgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+	struct rnpgbe_adapter *pf =
+		container_of(ptp, struct rnpgbe_adapter, ptp_clock_ops);
+	unsigned long flags;
+	u64 ns = 0;
+
+	spin_lock_irqsave(&pf->ptp_lock, flags);
+
+	pf->hwts_ops->get_systime(pf->ptp_addr, &ns);
+
+	spin_unlock_irqrestore(&pf->ptp_lock, flags);
+
+	*ts = ns_to_timespec64(ns);
+
+	return 0;
+}
+
+static int rnpgbe_ptp_settime(struct ptp_clock_info *ptp,
+			      const struct timespec64 *ts)
+{
+	struct rnpgbe_adapter *pf =
+		container_of(ptp, struct rnpgbe_adapter, ptp_clock_ops);
+	unsigned long flags;
+
+	spin_lock_irqsave(&pf->ptp_lock, flags);
+	pf->hwts_ops->init_systime(pf->ptp_addr, ts->tv_sec, ts->tv_nsec);
+	spin_unlock_irqrestore(&pf->ptp_lock, flags);
+
+	return 0;
+}
+
+#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64
+static int rnpgbe_ptp_gettime32(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+	struct timespec64 ts64;
+	int err;
+
+	err = rnpgbe_ptp_gettime(ptp, &ts64);
+	if (err)
+		return err;
+
+	*ts = timespec64_to_timespec(ts64);
+
+	return 0;
+}
+
+static int rnpgbe_ptp_settime32(struct ptp_clock_info *ptp,
+				const struct timespec *ts)
+{
+	struct timespec64 ts64;
+
+	ts64 = timespec_to_timespec64(*ts);
+	return rnpgbe_ptp_settime(ptp, &ts64);
+}
+#endif /* HAVE_PTP_CLOCK_INFO_GETTIME64 */
+
+static int rnpgbe_ptp_feature_enable(struct ptp_clock_info *ptp,
+				     struct ptp_clock_request *rq, int on)
+{
+	return -EOPNOTSUPP;
+}
+
+int rnpgbe_ptp_get_ts_config(struct rnpgbe_adapter *pf, struct ifreq *ifr)
+{
+	struct hwtstamp_config *config = &pf->tstamp_config;
+
+	return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? -EFAULT :
+								      0;
+}
+
+static int rnpgbe_ptp_setup_ptp(struct rnpgbe_adapter *pf, u32 value)
+{
+	u32 sec_inc = 0;
+	u64 temp = 0;
+	struct timespec64 now;
+
+	/*For now just use extrnal clock(the kernel-system clock)*/
+	/* 1.Mask the Timestamp Trigger interrupt */
+	/* 2.enable time stamping */
+	/* 2.1 clear all bytes about time ctrl reg*/
+	pf->hwts_ops->config_hw_tstamping(pf->ptp_addr, value);
+	/* 3.Program the PTPclock frequency */
+	/* program Sub Second Increment reg
+	 * we use kernel-system clock
+	 */
+	pf->hwts_ops->config_sub_second_increment(
+		pf->ptp_addr, pf->clk_ptp_rate, pf->gmac4, &sec_inc);
+	/* 4.If use fine correction approash then,
+	 * Program MAC_Timestamp_Addend register
+	 */
+	if (sec_inc == 0) {
+		printk(KERN_DEBUG "%s:%d the sec_inc is zero this is a bug\n",
+		       __func__, __LINE__);
+		return -EFAULT;
+	}
+	temp = div_u64(1000000000ULL, sec_inc);
+	/* Store sub second increment and flags for later use */
+	pf->sub_second_inc = sec_inc;
+	pf->systime_flags = value;
+	/* calculate default added value:
+	 * formula is :
+	 * addend = (2^32)/freq_div_ratio;
+	 * where, freq_div_ratio = 1e9ns/sec_inc
+	 */
+	temp = (u64)(temp << 32);
+
+	if (pf->clk_ptp_rate == 0) {
+		pf->clk_ptp_rate = 1000;
+		printk(KERN_DEBUG "%s:%d clk_ptp_rate is zero\n", __func__,
+		       __LINE__);
+	}
+
+	pf->default_addend = div_u64(temp, pf->clk_ptp_rate);
+
+	pf->hwts_ops->config_addend(pf->ptp_addr, pf->default_addend);
+	/* 5.Poll wait for the TCR Update Addend Register*/
+	/* 6.enabled Fine Update method */
+	/* 7.program the second and nanosecond register*/
+	/*TODO If we need to enable one-step timestamp */
+
+	/* initialize system time */
+	ktime_get_real_ts64(&now);
+
+	/* lower 32 bits of tv_sec are safe until y2106 */
+	pf->hwts_ops->init_systime(pf->ptp_addr, (u32)now.tv_sec, now.tv_nsec);
+
+	return 0;
+}
+
+int rnpgbe_ptp_set_ts_config(struct rnpgbe_adapter *pf, struct ifreq *ifr)
+{
+	struct hwtstamp_config config;
+	u32 ptp_v2 = 0;
+	u32 tstamp_all = 0;
+	u32 ptp_over_ipv4_udp = 0;
+	u32 ptp_over_ipv6_udp = 0;
+	u32 ptp_over_ethernet = 0;
+	u32 snap_type_sel = 0;
+	u32 ts_master_en = 0;
+	u32 ts_event_en = 0;
+	u32 value = 0;
+	s32 ret = -1;
+
+	if (!(pf->flags2 & RNP_FLAG2_PTP_ENABLED)) {
+		pci_alert(pf->pdev, "No support for HW time stamping\n");
+		pf->ptp_tx_en = 0;
+		pf->ptp_tx_en = 0;
+
+		return -EOPNOTSUPP;
+	}
+
+	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+		return -EFAULT;
+
+	netdev_info(pf->netdev,
+		    "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
+		    __func__, config.flags, config.tx_type, config.rx_filter);
+	/* reserved for future extensions */
+	if (config.flags)
+		return -EINVAL;
+
+	if (config.tx_type != HWTSTAMP_TX_OFF &&
+	    config.tx_type != HWTSTAMP_TX_ON)
+		return -ERANGE;
+
+	switch (config.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		/* time stamp no incoming packet at all */
+		config.rx_filter = HWTSTAMP_FILTER_NONE;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+		/* PTP v1, UDP, any kind of event packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+		/* 'mac' hardware can support Sync, Pdelay_Req and
+		 * Pdelay_resp by setting bit14 and bits17/16 to 01
+		 * This leaves Delay_Req timestamps out.
+		 * Enable all events *and* general purpose message
+		 * timestamping
+		 */
+		snap_type_sel = RNP_PTP_TCR_SNAPTYPSEL_1;
+		ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA;
+		ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+		/* PTP v1, UDP, Sync packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+		/* take time stamp for SYNC messages only */
+		ts_event_en = RNP_PTP_TCR_TSEVNTENA;
+
+		ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA;
+		ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+		/* PTP v1, UDP, Delay_req packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+		/* take time stamp for Delay_Req messages only */
+		ts_master_en = RNP_PTP_TCR_TSMSTRENA;
+		ts_event_en = RNP_PTP_TCR_TSEVNTENA;
+
+		ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA;
+		ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+		/* PTP v2, UDP, any kind of event packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+		ptp_v2 = RNP_PTP_TCR_TSVER2ENA;
+
+		/* take time stamp for all event messages */
+		snap_type_sel = RNP_PTP_TCR_SNAPTYPSEL_1;
+
+		ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA;
+		ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+		/* PTP v2, UDP, Sync packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+		ptp_v2 = RNP_PTP_TCR_TSVER2ENA;
+		/* take time stamp for SYNC messages only */
+		ts_event_en = RNP_PTP_TCR_TSEVNTENA;
+		ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA;
+		ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+		/* PTP v2, UDP, Delay_req packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+		ptp_v2 = RNP_PTP_TCR_TSVER2ENA;
+		/* take time stamp for Delay_Req messages only */
+		ts_master_en = RNP_PTP_TCR_TSMSTRENA;
+		ts_event_en = RNP_PTP_TCR_TSEVNTENA;
+		ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA;
+		ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+		/* PTP v2/802.AS1 any layer, any kind of event packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+		ptp_v2 = RNP_PTP_TCR_TSVER2ENA;
+		snap_type_sel = RNP_PTP_TCR_SNAPTYPSEL_1;
+		ts_event_en = RNP_PTP_TCR_TSEVNTENA;
+		ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA;
+		ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA;
+		ptp_over_ethernet = RNP_PTP_TCR_TSIPENA;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+		/* PTP v2/802.AS1, any layer, Sync packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+		ptp_v2 = RNP_PTP_TCR_TSVER2ENA;
+		/* take time stamp for SYNC messages only */
+		ts_event_en = RNP_PTP_TCR_TSEVNTENA;
+		ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA;
+		ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA;
+		ptp_over_ethernet = RNP_PTP_TCR_TSIPENA;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+		/* PTP v2/802.AS1, any layer, Delay_req packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+		ptp_v2 = RNP_PTP_TCR_TSVER2ENA;
+		/* take time stamp for Delay_Req messages only */
+		ts_master_en = RNP_PTP_TCR_TSMSTRENA;
+		ts_event_en = RNP_PTP_TCR_TSEVNTENA;
+
+		ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA;
+		ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA;
+		ptp_over_ethernet = RNP_PTP_TCR_TSIPENA;
+		break;
+
+#ifdef HWTSTAMP_FILTER_NTP_ALL
+	case HWTSTAMP_FILTER_NTP_ALL:
+#endif
+	case HWTSTAMP_FILTER_ALL:
+		/* time stamp any incoming packet */
+		config.rx_filter = HWTSTAMP_FILTER_ALL;
+		tstamp_all = RNP_PTP_TCR_TSENALL;
+		break;
+
+	default:
+		return -ERANGE;
+	}
+
+	pf->ptp_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
+	pf->ptp_tx_en = config.tx_type == HWTSTAMP_TX_ON;
+
+	netdev_info(
+		pf->netdev,
+		"ptp config rx filter 0x%.2x tx_type 0x%.2x rx_en[%d] tx_en[%d]\n",
+		config.rx_filter, config.tx_type, pf->ptp_rx_en, pf->ptp_tx_en);
+	if (!pf->ptp_rx_en && !pf->ptp_tx_en)
+		/*rx and tx is not use hardware ts so clear the ptp register */
+		pf->hwts_ops->config_hw_tstamping(pf->ptp_addr, 0);
+	else {
+		value = (RNP_PTP_TCR_TSENA | RNP_PTP_TCR_TSCFUPDT |
+			 RNP_PTP_TCR_TSCTRLSSR | tstamp_all | ptp_v2 |
+			 ptp_over_ethernet | ptp_over_ipv6_udp |
+			 ptp_over_ipv4_udp | ts_master_en | snap_type_sel);
+
+		ret = rnpgbe_ptp_setup_ptp(pf, value);
+		if (ret < 0)
+			return ret;
+	}
+	pf->ptp_config_value = value;
+	memcpy(&pf->tstamp_config, &config, sizeof(config));
+
+	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT :
+								      0;
+}
+
+/* structure describing a PTP hardware clock */
+static struct ptp_clock_info rnpgbe_ptp_clock_ops = {
+	.owner = THIS_MODULE,
+	.name = "rnp ptp",
+	.max_adj = 50000000,
+	.n_alarm = 0,
+	.n_ext_ts = 0,
+	.n_per_out = 0, /* will be overwritten in stmmac_ptp_register */
+#ifndef COMPAT_PTP_NO_PINS
+	.n_pins = 0, /*should be 0 if not set*/
+#endif
+#ifdef HAVE_PTP_CLOCK_INFO_ADJFINE
+	.adjfine = rnpgbe_ptp_adjfreq,
+#else
+	.adjfreq = rnpgbe_ptp_adjfreq,
+#endif
+	.adjtime = rnpgbe_ptp_adjtime,
+
+#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
+	.gettime64 = rnpgbe_ptp_gettime,
+	.settime64 = rnpgbe_ptp_settime,
+#else /* HAVE_PTP_CLOCK_INFO_GETTIME64 */
+	.gettime = rnpgbe_ptp_gettime32,
+	.settime = rnpgbe_ptp_settime32,
+#endif /* HAVE_PTP_CLOCK_INFO_GETTIME64 */
+	.enable = rnpgbe_ptp_feature_enable,
+};
+
+int rnpgbe_ptp_register(struct rnpgbe_adapter *pf)
+{
+	pf->hwts_ops = &mac_ptp;
+
+	pf->ptp_tx_en = 0;
+	pf->ptp_rx_en = 0;
+
+	spin_lock_init(&pf->ptp_lock);
+	pf->flags2 |= RNP_FLAG2_PTP_ENABLED;
+	pf->ptp_clock_ops = rnpgbe_ptp_clock_ops;
+
+	/*default mac clock rate is 100Mhz */
+	pf->clk_ptp_rate = 50000000; // 100Mhz
+	//pf->clk_ptp_rate = 62500000; // 100Mhz
+	if (pf->pdev == NULL)
+		printk(KERN_DEBUG "pdev dev is null\n");
+
+	pf->ptp_clock = ptp_clock_register(&pf->ptp_clock_ops, &pf->pdev->dev);
+	if (pf->ptp_clock == NULL)
+		pci_err(pf->pdev, "ptp clock register failed\n");
+
+	if (IS_ERR(pf->ptp_clock)) {
+		pci_err(pf->pdev, "ptp_clock_register failed\n");
+		pf->ptp_clock = NULL;
+	} else {
+		pci_info(pf->pdev, "registered PTP clock\n");
+	}
+
+	return 0;
+}
+
+void rnpgbe_ptp_unregister(struct rnpgbe_adapter *pf)
+{
+	/*1. stop the ptp module*/
+	if (pf->ptp_clock) {
+		ptp_clock_unregister(pf->ptp_clock);
+		pf->ptp_clock = NULL;
+		pr_debug("Removed PTP HW clock successfully on %s\n",
+			 "rnpgbe_ptp");
+		pf->hwts_ops = NULL;
+	}
+}
+
+#if defined(DEBUG_PTP_HARD_SOFTWAY_RX) || defined(DEBUG_PTP_HARD_SOFTWAY_TX)
+static u64 rnpgbe_get_software_ts(void)
+{
+	struct timespec64 ts;
+
+	ktime_get_real_ts64(&ts);
+	return (ts.tv_nsec + ts.tv_sec * 1000000000ULL);
+}
+#endif
+
+#if defined(DEBUG_PTP_TX_TIMESTAMP) || defined(DEBUG_PTP_RX_TIMESTAMP)
+#define TIME_ZONE_CHINA (8)
+char *asctime(const struct tm *timeptr)
+{
+	static const char wday_name[][4] = { "Sun", "Mon", "Tue", "Wed",
+					     "Thu", "Fri", "Sat" };
+	static const char mon_name[][4] = { "Jan", "Feb", "Mar", "Apr",
+					    "May", "Jun", "Jul", "Aug",
+					    "Sep", "Oct", "Nov", "Dec" };
+	static char result[26];
+
+	sprintf(result, "%.3s %.3s%3d %.2d:%.2d:%.2d %ld\n",
+		wday_name[timeptr->tm_wday], mon_name[timeptr->tm_mon],
+		timeptr->tm_mday, timeptr->tm_hour + TIME_ZONE_CHINA,
+		timeptr->tm_min, timeptr->tm_sec, 1900 + timeptr->tm_year);
+	return result;
+}
+
+static void rnpgbe_print_human_timestamp(uint64_t ns, uint8_t *direct)
+{
+	struct timespec64 ts;
+	struct tm tms;
+	ktime_t ktm = ns_to_ktime(ns);
+
+	ts = ktime_to_timespec64(ktm);
+	time64_to_tm(ts.tv_sec, ts.tv_nsec / 1000000000ULL, &tms);
+	printk(KERN_DEBUG "[%s] %s ------\n", direct, asctime(&tms));
+}
+#endif
+
+void rnpgbe_tx_hwtstamp_work(struct work_struct *work)
+{
+	struct rnpgbe_adapter *adapter =
+		container_of(work, struct rnpgbe_adapter, tx_hwtstamp_work);
+#ifdef FW_UART_SHOW_TSTAMPS
+	struct rnpgbe_hw *hw = &adapter->hw;
+#endif
+	void __iomem *ioaddr = adapter->hw.hw_addr;
+
+	/* 1. read port belone timestatmp status reg */
+	/* 2. status enabled read nsec and sec reg*/
+	/* 3. */
+	u64 nanosec = 0, sec = 0;
+
+	if (!adapter->ptp_tx_skb) {
+		clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state);
+		return;
+	}
+
+	if (rnpgbe_rd_reg(ioaddr + RNP_ETH_PTP_TX_TSVALUE_STATUS(0)) & 0x01) {
+		struct sk_buff *skb = adapter->ptp_tx_skb;
+		struct skb_shared_hwtstamps shhwtstamps;
+		u64 txstmp = 0;
+		/* read  and add nsec, sec turn to nsec*/
+
+		nanosec = rnpgbe_rd_reg(ioaddr + RNP_ETH_PTP_TX_LTIMES(0));
+		sec = rnpgbe_rd_reg(ioaddr + RNP_ETH_PTP_TX_HTIMES(0));
+		/* when we read the timestamp finish need to notice the hardware
+		 * that the timestamp need to update via set tx_hwts_clear-reg
+		 * from high to low
+		 */
+		//printk("tx time %llx-- %llx\n", nanosec, sec);
+		rnpgbe_wr_reg(ioaddr + RNP_ETH_PTP_TX_CLEAR(0),
+			      PTP_GET_TX_HWTS_FINISH);
+		rnpgbe_wr_reg(ioaddr + RNP_ETH_PTP_TX_CLEAR(0),
+			      PTP_GET_TX_HWTS_UPDATE);
+
+		txstmp = nanosec & PTP_HWTX_TIME_VALUE_MASK;
+		txstmp += (sec & PTP_HWTX_TIME_VALUE_MASK) * 1000000000ULL;
+
+		/* Clear the global tx_hwtstamp_skb pointer and force writes
+		 * prior to notifying the stack of a Tx timestamp.
+		 */
+		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+		shhwtstamps.hwtstamp = ns_to_ktime(txstmp);
+		adapter->ptp_tx_skb = NULL;
+#ifdef DEBUG_PTP_TX_TIMESTAMP
+		rnpgbe_print_human_timestamp(txstmp, "TX");
+#endif
+		/* force write prior to skb_tstamp_tx
+		 * because the xmit will re used the point to store ptp skb
+		 */
+		wmb();
+
+		skb_tstamp_tx(skb, &shhwtstamps);
+		dev_consume_skb_any(skb);
+		clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state);
+		/* send tstamps to hw */
+#ifdef FW_UART_SHOW_TSTAMPS
+		rnpgbe_mbx_tstamps_show(hw, sec, nanosec);
+#endif
+	} else if (time_after(jiffies,
+			      adapter->tx_hwtstamp_start +
+				      adapter->tx_timeout_factor * HZ)) {
+		/* this function will mark the skb drop*/
+		if (adapter->ptp_tx_skb)
+			dev_kfree_skb_any(adapter->ptp_tx_skb);
+		adapter->ptp_tx_skb = NULL;
+		adapter->tx_hwtstamp_timeouts++;
+		clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state);
+		netdev_warn(adapter->netdev, "clearing Tx timestamp hang\n");
+	} else {
+		/* reschedule to check later */
+#ifdef DEBUG_PTP_HARD_SOFTWAY_TX
+		struct skb_shared_hwtstamps shhwtstamp;
+		u64 ns = 0;
+
+		ns = rnpgbe_get_software_ts();
+		shhwtstamp.hwtstamp = ns_to_ktime(ns);
+		if (adapter->ptp_tx_skb) {
+			skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamp);
+			dev_consume_skb_any(adapter->ptp_tx_skb);
+			adapter->ptp_tx_skb = NULL;
+		}
+#else
+		schedule_work(&adapter->tx_hwtstamp_work);
+#endif
+	}
+}
+
+void rnpgbe_ptp_get_rx_hwstamp(struct rnpgbe_adapter *adapter,
+			       union rnpgbe_rx_desc *desc, struct sk_buff *skb)
+{
+	u64 ns = 0;
+	u64 tsvalueh = 0, tsvaluel = 0;
+	struct skb_shared_hwtstamps *hwtstamps = NULL;
+
+	if (!skb || !adapter->ptp_rx_en) {
+		netdev_dbg(adapter->netdev,
+			   "hwstamp skb is null or "
+			   "rx_en iszero %u\n",
+			   adapter->ptp_rx_en);
+		return;
+	}
+
+#ifdef DEBUG_PTP_HARD_SOFTWAY_RX
+	ns = rnpgbe_get_software_ts();
+#else
+	if (likely(!((desc->wb.cmd) & RNP_RXD_STAT_PTP)))
+		return;
+	hwtstamps = skb_hwtstamps(skb);
+	/* because of rx hwstamp store before the mac head
+	 * skb->head and skb->data is point to same location when call alloc_skb
+	 * so we must move 16 bytes the skb->data to the mac head location
+	 * but for the head point if we need move the skb->head need to be diss
+	 */
+	/* low8bytes is null high8bytes is timestamp
+	 * high32bit is seconds low32bits is nanoseconds
+	 */
+	skb_copy_from_linear_data_offset(skb, RNP_RX_TIME_RESERVE, &tsvalueh,
+					 RNP_RX_SEC_SIZE);
+	skb_copy_from_linear_data_offset(skb,
+					 RNP_RX_TIME_RESERVE + RNP_RX_SEC_SIZE,
+					 &tsvaluel, RNP_RX_NANOSEC_SIZE);
+	skb_pull(skb, RNP_RX_HWTS_OFFSET);
+	tsvalueh = ntohl(tsvalueh);
+	tsvaluel = ntohl(tsvaluel);
+
+	ns = tsvaluel & RNP_RX_NSEC_MASK;
+	ns += ((tsvalueh & RNP_RX_SEC_MASK) * 1000000000ULL);
+
+	netdev_dbg(adapter->netdev,
+		   "ptp get hardware ts-sec %llu ts-nanosec %llu\n", tsvalueh,
+		   tsvaluel);
+#endif
+	hwtstamps->hwtstamp = ns_to_ktime(ns);
+#ifdef DEBUG_PTP_RX_TIMESTAMP
+	rnpgbe_print_human_timestamp(ns, "RX");
+#endif
+}
+
+void rnpgbe_ptp_reset(struct rnpgbe_adapter *adapter)
+{
+	rnpgbe_ptp_setup_ptp(adapter, adapter->ptp_config_value);
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.h
new file mode 100755
index 0000000000000000000000000000000000000000..8acd996c8fca7387a09d0cd18686bab4e833c785
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef __RNPGBE_PTP_H__
+#define __RNPGBE_PTP_H__
+
+struct rnpgbe_hwtimestamp {
+	void (*config_hw_tstamping)(void __iomem *ioaddr, u32 data);
+	void (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock,
+					    int gmac4, u32 *ssinc);
+	void (*config_mac_irq_enable)(void __iomem *ioaddr, bool on);
+	int (*init_systime)(void __iomem *ioaddr, u32 sec, u32 nsec);
+	int (*config_addend)(void __iomem *ioaddr, u32 addend);
+	int (*adjust_systime)(void __iomem *ioaddr, u32 sec, u32 nsec,
+			      int add_sub, int gmac4);
+	void (*get_systime)(void __iomem *ioaddr, u64 *systime);
+};
+/* IEEE 1588 PTP register offsets */
+#define PTP_TCR 0x00 /* Timestamp Control Reg */
+#define PTP_SSIR 0x04 /* Sub-Second Increment Reg */
+#define PTP_STSR 0x08 /* System Time – Seconds Regr */
+#define PTP_STNSR 0x0c /* System Time – Nanoseconds Reg */
+#define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */
+#define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */
+#define PTP_TAR 0x18 /* Timestamp Addend Reg */
+#define PTP_PPS_CONTROL 0x2c
+#define RNP_PTP_STNSUR_ADDSUB_SHIFT 31
+#define RNP_PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */
+#define RNP_PTP_BINARY_ROLLOVER_MODE 0x80000000 /* ~0.466 ns */
+/* PTP Timestamp control register defines */
+#define RNP_PTP_TCR_TSENA BIT(0) /*Timestamp Enable*/
+#define RNP_PTP_TCR_TSCFUPDT BIT(1) /* Timestamp Fine/Coarse Update */
+#define RNP_PTP_TCR_TSINIT BIT(2) /* Timestamp Initialize */
+#define RNP_PTP_TCR_TSUPDT BIT(3) /* Timestamp Update */
+#define RNP_PTP_TCR_TSTRIG BIT(4) /* Timestamp Interrupt Trigger Enable */
+#define RNP_PTP_TCR_TSADDREG BIT(5) /* Addend Reg Update */
+#define RNP_PTP_TCR_TSENALL BIT(8) /* Enable Timestamp for All Frames */
+#define RNP_PTP_TCR_TSCTRLSSR BIT(9) /* Digital or Binary Rollover Control */
+#define RNP_PTP_TCR_TSVER2ENA                                                  \
+	BIT(10) /* Enable PTP packet Processing for Version 2 Format */
+#define RNP_PTP_TCR_TSIPENA                                                    \
+	BIT(11) /* Enable Processing of PTP over Ethernet Frames */
+#define RNP_PTP_TCR_TSIPV6ENA                                                  \
+	BIT(12) /* Enable Processing of PTP Frames Sent over IPv6-UDP */
+#define RNP_PTP_TCR_TSIPV4ENA                                                  \
+	BIT(13) /* Enable Processing of PTP Frames Sent over IPv4-UDP */
+#define RNP_PTP_TCR_TSEVNTENA                                                  \
+	BIT(14) /* Enable Timestamp Snapshot for Event Messages */
+#define RNP_PTP_TCR_TSMSTRENA                                                  \
+	BIT(15) /* Enable Snapshot for Messages Relevant to Master */
+/* Note 802.1 AS Is work Over Ethernet FramesC_Sub_Second_Incremen
+ * and Normal PTP Is work Oveer UDP
+ */
+
+/* Select PTP packets for Taking Snapshots
+ * On mac specifically:
+ * Enable SYNC, Pdelay_Req, Pdelay_Resp when TSEVNTENA is enabled.
+ * or
+ * Enable  SYNC, Follow_Up, Delay_Req, Delay_Resp, Pdelay_Req, Pdelay_Resp,
+ * Pdelay_Resp_Follow_Up if TSEVNTENA is disabled
+ */
+#define RNP_PTP_TCR_SNAPTYPSEL_1 BIT(16)
+#define RNP_PTP_TCR_TSENMACADDR                                                \
+	BIT(18) /* Enable MAC address for PTP Frame Filtering */
+#define RNP_PTP_TCR_ESTI                                                       \
+	BIT(20) /* External System Time Input Or MAC Internal Clock*/
+#define RNP_PTP_TCR_AV8021ASMEN BIT(28) /* AV802.1 AS Mode Enable*/
+/* Sub Second increament define */
+#define RNP_PTP_SSIR_SSINC_MASK (0xff) /* Sub-second increment value */
+#define RNP_PTP_SSIR_SSINC_SHIFT (16) /* Sub-second increment offset */
+#define RNP_MAC_TXTSC BIT(15) /* TX timestamp reg is fill complete */
+#define RNP_MAC_TXTSSTSLO GENMASK(30, 0) /* nano second avalid value  */
+#define RNP_RX_SEC_MASK GENMASK(30, 0)
+#define RNP_RX_NSEC_MASK GENMASK(30, 0)
+#define RNP_RX_TIME_RESERVE (8)
+#define RNP_RX_SEC_SIZE (4)
+#define RNP_RX_NANOSEC_SIZE (4)
+#define RNP_RX_HWTS_OFFSET                                                     \
+	(RNP_RX_SEC_SIZE + RNP_RX_NANOSEC_SIZE + RNP_RX_TIME_RESERVE)
+#define PTP_HWTX_TIME_VALUE_MASK GENMASK(31, 0)
+#define PTP_GET_TX_HWTS_FINISH (1)
+#define PTP_GET_TX_HWTS_UPDATE (0)
+/* hardware ts can't so fake ts from the software clock */
+#define DEBUG_PTP_HARD_SOFTWAY
+
+#ifdef HAVE_PTP_1588_CLOCK
+int rnpgbe_ptp_get_ts_config(struct rnpgbe_adapter *pf, struct ifreq *ifr);
+int rnpgbe_ptp_set_ts_config(struct rnpgbe_adapter *pf, struct ifreq *ifr);
+int rnpgbe_ptp_register(struct rnpgbe_adapter *pf);
+void rnpgbe_ptp_unregister(struct rnpgbe_adapter *pf);
+void rnpgbe_ptp_get_rx_hwstamp(struct rnpgbe_adapter *pf,
+			       union rnpgbe_rx_desc *desc, struct sk_buff *skb);
+void rnpgbe_tx_hwtstamp_work(struct work_struct *work);
+void rnpgbe_ptp_reset(struct rnpgbe_adapter *adapter);
+#endif
+#endif /* __RNPGBE_PTP_H__ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_regs.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_regs.h
new file mode 100755
index 0000000000000000000000000000000000000000..b82919c4c03582f202b1880731e9bffe6abe49f6
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_regs.h
@@ -0,0 +1,774 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef RNPGBE_REGS_H
+#define RNPGBE_REGS_H
+
+/*             BAR2 memory                   */
+/* ------------------------------------------*/
+/*	module  | size  |  start   |    end  */
+/*	DMA	| 32KB	| 0_0000H  | 0_7FFFH */
+/*	REG	| 32KB	| 0_8000H  | 0_FFFFH */
+/*	ETH	| 64KB	| 1_0000H  | 1_FFFFH */
+/*	GMAC	| 32KB	| 2_0000H  | 2_7FFFH */
+/*	MSIX    | 32KB  | 2_8000H  | 2_FFFFH */
+/* ------------------------------------------*/
+
+/* ==================== RNP-DMA Global Registers ==================== */
+/* n10 */
+#define RNP10_RING_BASE (0x8000)
+/* n20 */
+#define RNP20_RING_BASE (0x8000)
+/* n500 */
+#define RNP500_RING_BASE (0x1000)
+#define RING_OFFSET(queue_idx) (0x100 * (queue_idx))
+#define RNP_DMA_VERSION (0x0000)
+#define RNP_DMA_CONFIG (0x0004)
+#define DMA_MAC_LOOPBACK (1 << 0)
+#define DMA_SWITCH_LOOPBACK (1 << 1)
+#define DMA_VEB_BYPASS (1 << 4)
+#define DMA_AXI_ORDER (1 << 5)
+#define DMA_RX_PADDING (1 << 8)
+#define DMA_MAP_MODE(n) (n << 12)
+#define DMA_RX_FRAGMENT_BYTES(n) (((n) / 16) << 16)
+#define RNP_DMA_STATUS (0x0008)
+#define RNP_DMA_RX_DATA_PROG_FULL_THRESH (0x00a0)
+#define DMA_RING_NUM (0xff << 24)
+#define RC_CONTROL_HW (0x01)
+#define RC_CONTROL_PHY_DRIVER (0x02)
+#define RC_JUMP_STATUS (0x04)
+#define RC_PHY_LINK_DONE (0x08)
+#define RC_LINK_CHANGE (0x10)
+#define RNP_DMA_DUMY (0x000c)
+#define RNP_DMA_RX_START (0x10)
+#define RNP_DMA_RX_READY (0x14)
+#define RNP_DMA_TX_START (0x18)
+#define RNP_DMA_TX_READY (0x1c)
+#define RNP_DMA_INT_STAT (0x20)
+#define RNP_DMA_INT_MASK (0x24)
+#define TX_INT_MASK (1 << 1)
+#define RX_INT_MASK (1 << 0)
+#define RNP_DMA_INT_CLR (0x28)
+#define RNP_DMA_INT_TRIG (0x2c)
+#define RNP_DMA_AXI_EN (0x0010)
+#define RX_AXI_RW_EN (0x03 << 0)
+#define TX_AXI_RW_EN (0x03 << 2)
+#define RNP_DMA_AXI_STAT (0x0014)
+#define RNP_VEB_MAC_MASK_LO (0x0020)
+#define RNP_VEB_MAC_MASK_HI (0x0024)
+#define RNP_VEB_VLAN_MASK (0x0028)
+#define DEBUG_PROBE_NUM 16
+#define RNP_DMA_DEBUG_PROBE_LO_REG(n) (0x0100 + 0x08 * (n))
+#define RNP_DMA_DEBUG_PROBE_HI_REG(n) (0x0100 + 0x08 * (n))
+#define DEBUG_CNT_NUM 76
+#define RNP_DMA_DEBUG_CNT(n) (0x0200 + 0x04 * (n))
+#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_0 (RNP_DMA_DEBUG_CNT(17))
+#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_1 (RNP_DMA_DEBUG_CNT(18))
+#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_2 (RNP_DMA_DEBUG_CNT(19))
+#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_3 (RNP_DMA_DEBUG_CNT(20))
+#define RNP_DMA_STATS_DMA_TO_SWITCH (RNP_DMA_DEBUG_CNT(21))
+#define RNP_DMA_STATS_MAC_TO_DMA (RNP_DMA_DEBUG_CNT(22))
+#define RNP_DMA_STATS_SWITCH_TO_DMA (RNP_DMA_DEBUG_CNT(23))
+#define RNP_PCI_WR_TO_HOST (RNP_DMA_DEBUG_CNT(34))
+/* RX-Queue Registers */
+#define RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_HI (0x30)
+#define RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_LO (0x34)
+#define RNP_DMA_REG_RX_DESC_BUF_LEN (0x38)
+#define RNP_DMA_REG_RX_DESC_BUF_HEAD (0x3c)
+#define RNP_DMA_REG_RX_DESC_BUF_TAIL (0x40)
+#define RNP_DMA_REG_RX_DESC_FETCH_CTRL (0x44)
+#define RNP_DMA_REG_RX_INT_DELAY_TIMER (0x48)
+#define RNP_DMA_REG_RX_INT_DELAY_PKTCNT (0x4c)
+#define RNP_DMA_REG_RX_ARB_DEF_LVL (0x50)
+#define PCI_DMA_REG_RX_DESC_TIMEOUT_TH (0x54)
+#define PCI_DMA_REG_RX_SCATTER_LENGTH (0x58)
+/* TX-Queue Registers */
+#define RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_HI (0x60)
+#define RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_LO (0x64)
+#define RNP_DMA_REG_TX_DESC_BUF_LEN (0x68)
+#define RNP_DMA_REG_TX_DESC_BUF_HEAD (0x6c)
+#define RNP_DMA_REG_TX_DESC_BUF_TAIL (0x70)
+#define RNP_DMA_REG_TX_DESC_FETCH_CTRL (0x74)
+#define RNP_DMA_REG_TX_INT_DELAY_TIMER (0x78)
+#define RNP_DMA_REG_TX_INT_DELAY_PKTCNT (0x7c)
+#define RNP_DMA_REG_TX_ARB_DEF_LVL (0x80)
+#define RNP_DMA_REG_TX_FLOW_CTRL_TH (0x84)
+#define RNP_DMA_REG_TX_FLOW_CTRL_TM (0x88)
+#define RNP_DMA_PKT_FIFO_DATA_PROG_FULL_THRESH (0x0098)
+/* VEB Registers */
+#define VEB_TBL_CNTS 64
+#define RNP_DMA_PORT_VBE_MAC_LO_TBL(port, vf)                                  \
+	(0x80A0 + 4 * (port) + 0x100 * (vf))
+#define RNP_DMA_PORT_VBE_MAC_HI_TBL(port, vf)                                  \
+	(0x80B0 + 4 * (port) + 0x100 * (vf))
+#define RNP_DMA_PORT_VEB_VID_TBL(port, vf) (0x80C0 + 4 * (port) + 0x100 * (vf))
+#define RNP_DMA_PORT_VEB_VF_RING_TBL(port, vf)                                 \
+	(0x80D0 + 4 * (port) + 0x100 * (vf))
+#define RNP_DMA_STATS_MAC_TO_MAC (0x1b0)
+#define RNP_DMA_STATS_SWITCH_TO_SWITCH (0x1a4)
+
+/* ================================================================== */
+#define RNP500_NIC_BASE (0x8000)
+#define RNP500_TOP_NIC_REST_N (0x8010 - RNP500_NIC_BASE)
+#define RNP500_TOP_MAC_OUI (0xc004 - RNP500_NIC_BASE)
+#define RNP500_TOP_MAC_SN (0xc008 - RNP500_NIC_BASE)
+#define RNP500_TOP_NIC_CONFIG (0x0004)
+
+/* ==================== RNP-ETH Global Registers ==================== */
+#define RNP_ETH_BASE (0x10000)
+
+/*
+ * [3:0]:
+ * 4'b0000:RSS disable
+ * 4'b0001:RSS only
+ * 4'b0100:DCB and RSS--8*16
+ * 4'b1010:POOLS and RSS--32*4
+ * [3] :virtual enable
+ * [16]:ipv4_hash_tcp_enable
+ * [17]:ipv4_hash_enable
+ * [20]:ipv6_hash_enable
+ * [21]:ipv6_hash_tcp_enable
+ * [22]:ipv4_hash_udp_enable
+ * [23]:ipv6_hash_udp_enable
+ * [24]:ipv4_hash_sctp_enable
+ * [25]:ipv6_hash_sctp_enable
+ */
+
+#define INNER_L4_BIT BIT(6)
+#define PKT_LEN_ERR (2)
+#define HDR_LEN_ERR (1)
+#define DROP_ALL_THRESH (2046) /* drop all rx */
+#define RECEIVE_ALL_THRESH (0x270) /* receive all rx */
+#define RNP500_VEB_TBL_CNTS 8
+#define RNP500_DMA_RBUF_FIFO (0x00b0)
+#define RNP500_DMA_PORT_VBE_MAC_LO_TBL(port, vf)                               \
+	(0x10c0 + 4 * (port) + 0x100 * (vf))
+#define RNP500_DMA_PORT_VBE_MAC_HI_TBL(port, vf)                               \
+	(0x10c4 + 4 * (port) + 0x100 * (vf))
+#define RNP500_DMA_PORT_VEB_VID_TBL(port, vf)                                  \
+	(0x10C8 + 4 * (port) + 0x100 * (vf))
+#define RNP500_DMA_PORT_VEB_VF_RING_TBL(port, vf)                              \
+	(0x10cc + 4 * (port) + 0x100 * (vf))
+#define RNP500_ETH_BASE (0x10000)
+#define RNP500_ETH_TUPLE5_SAQF(n) (0xc000 + 0x04 * (n))
+#define RNP500_ETH_TUPLE5_DAQF(n) (0xc400 + 0x04 * (n))
+#define RNP500_ETH_TUPLE5_SDPQF(n) (0xc800 + 0x04 * (n))
+#define RNP500_ETH_TUPLE5_FTQF(n) (0xcc00 + 0x04 * (n))
+#define RNP500_ETH_TUPLE5_POLICY(n) (0xce00 + 0x04 * (n))
+#define RNP500_ETH_DEFAULT_RX_MIN_LEN (0x80f0)
+#define RNP500_ETH_DEFAULT_RX_MAX_LEN (0x80f4)
+#define RNP500_ETH_VLAN_VME_REG(n) (0x8040 + 0x04 * (n))
+#define RNP500_ETH_ERR_MASK_VECTOR (0x8060)
+#define RNP500_ETH_RSS_MASK (0x3ff0001)
+#define RNP500_ETH_ENABLE_RSS_ONLY (0x3f30001)
+#define RNP500_ETH_RSS_CONTROL (0x92a0)
+#define RNP500_MRQC_IOV_EN (0x92a0)
+#define RNP500_IOV_ENABLED (1 << 3)
+#define RNP500_ETH_DISABLE_RSS (0)
+#define RNP500_ETH_SYNQF (0x9290)
+#define RNP500_ETH_SYNQF_PRIORITY (0x9294)
+#define RNP500_ETH_FCS_EN (0x804c)
+#define RNP500_ETH_HIGH_WATER(n) (0x80c0 + n * (0x08))
+#define RNP500_ETH_LOW_WATER(n) (0x80c4 + n * (0x08))
+#define RNP500_ETH_WRAP_FIELD_TYPE (0x805c)
+#define RNP500_ETH_TX_VLAN_CONTROL_EANBLE (0x0070)
+#define RNP500_ETH_TX_VLAN_TYPE (0x0074)
+#define RNP500_ETH_RX_MAC_LEN_REG (0x80e0)
+#define RNP500_ETH_WHOLE_PKT_LEN_ERR_DROP (0x807c)
+#define RNP500_RAH_AV 0x80000000
+#define RNP500_ETH_RAR_RL(n) (0xa000 + 0x04 * n)
+#define RNP500_ETH_RAR_RH(n) (0xa400 + 0x04 * n)
+#define RNP500_FCTRL_BPE BIT(10)
+#define RNP500_FCTRL_UPE BIT(9)
+#define RNP500_FCTRL_MPE BIT(8)
+#define RNP500_ETH_DMAC_FCTRL (0x9110)
+#define RNP500_ETH_DMAC_MCSTCTRL (0x9114)
+#define RNP500_MCSTCTRL_MULTICASE_TBL_EN (1 << 4)
+#define RNP500_MCSTCTRL_UNICASE_TBL_EN (1 << 3)
+#define RNP500_VM_DMAC_MPSAR_RING(entry)                                       \
+	(0xb400 + (4 * (entry)))
+#define RNP500_ETH_MUTICAST_HASH_TABLE(n) (0xac00 + 0x04 * n)
+#define RNP500_ETH_RSS_KEY (0x92d0)
+#define RNP500_ETH_TC_IPH_OFFSET_TABLE(n) (0xe800 + 0x04 * (n))
+#define RNP500_ETH_RSS_INDIR_TBL(n) (0xe000 + 0x04 * (n))
+#define RNP500_ETH_VLAN_FILTER_TABLE(n) (0xb000 + 0x04 * (n))
+#define RNP500_VFTA RNP500_ETH_VLAN_FILTER_TABLE
+#define RNP500_VLVF(idx) (0xb600 + 4 * (idx))
+#define RNP500_VLVF_TABLE(idx) (0xb700 + 4 * (idx))
+#define RNP500_ETH_VLAN_FILTER_ENABLE (0x9118)
+#define RNP500_PRIORITY_1_MARK (0x8080)
+#define RNP500_PRIORITY_1 (400)
+#define RNP500_PRIORITY_0 (300)
+#define RNP500_PRIORITY_0_MARK (0x8084)
+#define RNP500_PRIORITY_EN (0x8088)
+#define RNP500_PRIORITY_EN_8023 (0x808c)
+#define RNP500_ETH_LAYER2_ETQF(n) (0x9200 + 0x04 * (n))
+#define RNP500_ETH_LAYER2_ETQS(n) (0x9240 + 0x04 * (n))
+#define RNP500_ETH_BYPASS (0x8000)
+#define RNP500_ETH_ERR_MASK_VECTOR (0x8060)
+#define RNP500_ETH_PRIV_DATA_CONTROL_REG (0x8068)
+#define RNP500_ETH_DEFAULT_RX_RING (0x806c)
+#define RNP500_ETH_DOUBLE_VLAN_DROP (0x8078)
+#define RNP500_HOST_FILTER_EN (0x800c)
+#define RNP500_BAD_PACKETS_RECEIVE_EN (0x8024)
+#define RNP500_REDIR_EN (0x8030)
+#define WATCHDOG_TIMER_ERROR BIT(0)
+#define RUN_FRAME_ERROR BIT(1)
+#define GAINT_FRAME_ERROR BIT(2)
+#define LATE_COLLISION_ERROR BIT(3)
+#define GMII_ERROR BIT(4)
+#define DRIBBLING_BIT_ERROR BIT(5)
+#define CRC_ERROR BIT(6)
+#define LENGTH_ERROR BIT(8)
+#define DA_FILTER_ERROR BIT(9)
+#define SA_FILTER_ERROR BIT(10)
+#define RNP500_MAC_ERR_MASK (0x8034)
+#define RNP500_ETH_SCTP_CHECKSUM_EN (0x8038)
+#define RNP500_ETH_VLAN_RM_TYPE (0x8054)
+#define RNP500_ETH_EXCEPT_DROP_PROC (0x0470)
+#define RNP500_ETH_EMAC_PARSE_PROGFULL_THRESH (0x8098)
+#define RNP500_ETH_TX_MUX_DROP (0x98)
+#define RNP500_VEB_VFMPRC(n) (0x4018 + 0x100 * n)
+#define RNP500_VEB_VFBPRC(n) (0x401c + 0x100 * n)
+#define RNP500_RX_TIMEOUT_DROP(n) (0x404c + 0x100 * n)
+#define RNP500_STATISTIC_CRL(n) (0x4048 + 0x100 * n)
+/* n500 statistics REG */
+#define RNP500_RX_MULTI_PKT_NUM (0x8224)
+#define RNP500_RX_BROAD_PKT_NUM (0x8228)
+#define RNP500_RX_MAC_CUT_NUM (0x8304)
+#define RNP500_RX_MAC_LCS_ERR_NUM (0x8308)
+#define RNP500_RX_MAC_LEN_ERR_NUM (0X830C)
+#define RNP500_RX_MAC_SLEN_ERR_NUM (0x8310)
+#define RNP500_RX_MAC_GLEN_ERR_NUM (0x8314)
+#define RNP500_RX_MAC_FCS_ERR_NUM (0x8318)
+#define RNP500_RX_MAC_SFCS_ERR_NUM (0x831c)
+#define RNP500_RX_MAC_GFCS_ERR_NUM (0x8320)
+#define RNP500_TX_MULTI_NUM (0x214)
+#define RNP500_TX_BROADCAST_NUM (0x218)
+#define RNP500_RX_DROP_PKT_NUM (0X8230)
+#define RNP500_RXTRANS_DROP (0x8908)
+#define RNP500_RXTRANS_CUT_ERR_PKTS (0x894c)
+#define RNP500_DECAP_PKT_DROP1_NUM (0X82ec)
+#define RNP500_MAC_COUNT_CONTROL (0x0100)
+#define RNP500_MAC_GLEN_ERR_NUM (0X01a8)
+#define RNP500_RX_DEBUG(n) (0x8400 + 0x04 * n)
+#define RNP500_ETH_HOST_L2_DROP_PKTS RNP500_RX_DEBUG(4)
+#define RNP500_ETH_REDIR_INPUT_MATCH_DROP_PKTS RNP500_RX_DEBUG(5)
+#define RNP500_ETH_ETYPE_DROP_PKTS RNP500_RX_DEBUG(6)
+#define RNP500_ETH_TCP_SYN_DROP_PKTS RNP500_RX_DEBUG(7)
+#define RNP500_ETH_REDIR_TUPLE5_DROP_PKTS RNP500_RX_DEBUG(8)
+
+/* ================================================================== */
+#define ETH_ERR_SCTP (1 << 4)
+#define ETH_ERR_L4 (1 << 3)
+#define ETH_ERR_L3 (1 << 2)
+#define ETH_ERR_PKT_LEN_ERR (1 << 1)
+#define ETH_ERR_HDR_LEN_ERR (1 << 0)
+#define ETH_IGNORE_ALL_ERR                                                     \
+	(ETH_ERR_SCTP | ETH_ERR_L4 | ETH_ERR_L3 | ETH_ERR_PKT_LEN_ERR |        \
+	 ETH_ERR_HDR_LEN_ERR)
+#define VM_DMAC_TBL_SZ 128
+#define RNP_ETH_ENABLE_RSS_ONLY (0x3f30001)
+#define RNP_ETH_DISABLE_RSS (0)
+#define RNP_ETH_TX_PROGFULL_THRESH_PORT(n) (RNP_ETH_BASE + 0x0060 + 0x08 * (n))
+#define RNP_ETH_TX_PROGEMPTY_THRESH_PORT(n) (RNP_ETH_BASE + 0x0064 + 0x08 * (n))
+#define RNP_ETH_EMAC_DMA_PROFULL_THRESH (RNP_ETH_BASE + 0x0080)
+#define RNP_ETH_EMAC_DMA_PROEMPTY_THRESH (RNP_ETH_BASE + 0x0084)
+#define RNP_ETH_EMAC_SW_PROFULL_THRESH (RNP_ETH_BASE + 0x0088)
+#define RNP_ETH_EMAC_SW_PROEMPTY_THRESH (RNP_ETH_BASE + 0x008c)
+#define RNP_ETH_EMAC_BMC_TX_PROFULL_THRESH (RNP_ETH_BASE + 0x0090)
+#define RNP_ETH_EMAC_BMC_TX_PROEMPTY_THRESH (RNP_ETH_BASE + 0x0094)
+#define RNP_ETH_CNT_PKT_EMAC_TX(n) (RNP_ETH_BASE + 0x00a0 + 0x04 * (n))
+#define RNP_ETH_CNT_PKT_PECL_TX(n) (RNP_ETH_BASE + 0x00b0 + 0x04 * (n))
+#define RNP_ETH_STATUS_TX_FLOWCTRL(n) (RNP_ETH_BASE + 0x00c0 + 0x04 * (n))
+#define RNP_ETH_VERSION_FLOWWCTRL (RNP_ETH_BASE + 0x00d0)
+#define RNP_ETH_CFG_ETH_MAC (RNP_ETH_BASE + 0x00d4)
+#define RNP_ETH_SCA_TX_CS(port) (RNP_ETH_BASE + 0x0100 + 0x08 * (port))
+#define RNP_ETH_SCA_TX_NS(port) (RNP_ETH_BASE + 0x0104 + 0x08 * (port))
+#define RNP_ETH_TXTRANS_CS(port) (RNP_ETH_BASE + 0x0120 + 0x08 * (port))
+#define RNP_ETH_TXTRANS_NS(port) (RNP_ETH_BASE + 0x0124 + 0x08 * (port))
+#define RNP_ETH_1TO4_INST0_IN_PKTS (RNP_ETH_BASE + 0x0200)
+#define RNP_ETH_1TO4_INST1_IN_PKTS (RNP_ETH_BASE + 0x0204)
+#define RNP_ETH_1TO4_INST2_IN_PKTS (RNP_ETH_BASE + 0x0208)
+#define RNP_ETH_1TO4_INST3_IN_PKTS (RNP_ETH_BASE + 0x020c)
+#define RNP_ETH_IN_0_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x0210 + 0x10 * (port))
+#define RNP_ETH_IN_1_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x0214 + 0x10 * (port))
+#define RNP_ETH_IN_2_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x0218 + 0x10 * (port))
+#define RNP_ETH_IN_3_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x021c + 0x10 * (port))
+#define RNP_ETH_EMAC_TX_TO_PHY_PKTS(port) (RNP_ETH_BASE + 0x0250 + 4 * (port))
+#define RNP_ETH_TXTRANS_PTP_PKT_NUM(port) (RNP_ETH_BASE + 0x0260 + 4 * (port))
+#define RNP_ETH_TX_DEBUG(n) (RNP_ETH_BASE + 0x0300 + 0x04 * (n))
+#define RNP_ETH_PTP_TX_STATUS(n) (RNP_ETH_BASE + 0x0400)
+#define RNP_ETH_PTP_TX_HTIMES(n) (RNP_ETH_BASE + 0x0404)
+#define RNP_ETH_PTP_TX_LTIMES(n) (RNP_ETH_BASE + 0x0408)
+#define RNP_ETH_PTP_TX_TSVALUE_STATUS(n) (RNP_ETH_BASE + 0x040c)
+#define RNP_ETH_PTP_TX_CLEAR(n) (RNP_ETH_BASE + 0x0410)
+#define RNP_ETH_MAC_SPEED_PORT(n) (RNP_ETH_BASE + 0x0450 + 0x04 * (n))
+#define RNP_ETH_MAC_LOOPBACK_MODE_PORT(n) (RNP_ETH_BASE + 0x0460 + 0x04 * (n))
+#define RNP_ETH_EXCEPT_DROP_PROC (RNP_ETH_BASE + 0x0470)
+#define RNP_ETH_IPP (RNP_ETH_BASE + 0x8000)
+#define RNP_ETH_BYPASS (RNP_ETH_BASE + 0x8000)
+#define RNP_ETH_TUNNEL_MOD (RNP_ETH_BASE + 0x8004)
+#define RNP_ETH_LOOPBACK_EN (RNP_ETH_BASE + 0x8008)
+#define RNP_FIFO_CTRL_MODE (RNP_ETH_BASE + 0x800c)
+#define RNP_ETH_VXLAN_PORT (RNP_ETH_BASE + 0x8010)
+#define RNP_ETH_NVGRE_PORT (RNP_ETH_BASE + 0x8014)
+#define RNP_ETH_RDMA_PORT (RNP_ETH_BASE + 0x8018)
+#define RNP_HOST_FILTER_EN (RNP_ETH_BASE + 0x801c)
+#define RNP_MNG_FILTER_EN (RNP_ETH_BASE + 0x8020)
+#define RNP_ETH_TCAM_EN (RNP_ETH_BASE + 0x8024)
+#define RNP_CONGEST_DROP_EN (RNP_ETH_BASE + 0x8028)
+#define RNP_REDIR_EN (RNP_ETH_BASE + 0x8030)
+#define RNP_ETH_SCTP_CHECKSUM_EN (RNP_ETH_BASE + 0x8038)
+#define RNP_ETH_ARP_FUNC_EN (RNP_ETH_BASE + 0x803c)
+#define RNP_ETH_VLAN_VME_REG(n) (RNP_ETH_BASE + 0x8040 + 0x04 * (n))
+#define RNP_ETH_CVLAN_RM_EN (RNP_ETH_BASE + 0x8050)
+#define RNP_ETH_VLAN_RM_TYPE (RNP_ETH_BASE + 0x8054)
+#define RNP_ETH_WRAP_FIELD_TYPE (RNP_ETH_BASE + 0x805c)
+#define RNP_ETH_ERR_MASK_VECTOR (RNP_ETH_BASE + 0x8060)
+#define RNP_ETH_DEFAULT_RX_RING (RNP_ETH_BASE + 0x806c)
+#define RNP_ETH_RX_PROGFULL_THRESH_PORT(n) (RNP_ETH_BASE + 0x8070 + 0x08 * (n))
+#define RNP_ETH_RX_PROGEMPTY_THRESH_PORT(n) (RNP_ETH_BASE + 0x8074 + 0x08 * (n))
+#define RNP_ETH_EMAC_GAT_PROGFULL_THRESH (RNP_ETH_BASE + 0x8090)
+#define RNP_ETH_EMAC_GAT_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x8094)
+#define RNP_ETH_EMAC_PARSE_PROGFULL_THRESH (RNP_ETH_BASE + 0x8098)
+#define RNP_ETH_EMAC_PARSE_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x809c)
+#define RNP_ETH_FC_PROGFULL_THRESH (RNP_ETH_BASE + 0x80a0)
+#define RNP_ETH_FC_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80a4)
+#define RNP_ETH_DIS_PROGFULL_THRESH (RNP_ETH_BASE + 0x80a8)
+#define RNP_ETH_DIS_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80ac)
+#define RNP_ETH_COV_PROGFULL_THRESH (RNP_ETH_BASE + 0x80b0)
+#define RNP_ETH_COV_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80b4)
+#define RNP_ETH_BMC_RX_PROGFULL_THRESH (RNP_ETH_BASE + 0x80b8)
+#define RNP_ETH_BMC_RX_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80bc)
+#define RNP_ETH_HIGH_WATER(n) (RNP_ETH_BASE + 0x80c0 + n * (0x08))
+#define RNP_ETH_LOW_WATER(n) (RNP_ETH_BASE + 0x80c4 + n * (0x08))
+#define RNP_ETH_DEFAULT_RX_MIN_LEN (RNP_ETH_BASE + 0x80f0)
+#define RNP_ETH_DEFAULT_RX_MAX_LEN (RNP_ETH_BASE + 0x80f4)
+#define RNP_ETH_PTP_EVENT_PORT (RNP_ETH_BASE + 0x80f8)
+#define RNP_ETH_PTP_GENER_PORT_REG (RNP_ETH_BASE + 0x80fc)
+#define RNP_ETH_RX_TRANS_CS_PORT(n) (RNP_ETH_BASE + 0x8100 + 0x08 * (n))
+#define RNP_ETH_RX_TRANS_NS_PORT(n) (RNP_ETH_BASE + 0x8104 + 0x08 * (n))
+#define RNP_ETH_GAT_RX_CS (RNP_ETH_BASE + 0x8120)
+#define RNP_ETH_GAT_RX_NS (RNP_ETH_BASE + 0x8124)
+#define RNP_ETH_EMAC_PIP_CS (RNP_ETH_BASE + 0x8128)
+#define RNP_ETH_EMAC_PIP_NS (RNP_ETH_BASE + 0x812c)
+#define RNP_ETH_EMAC_FC_CS (RNP_ETH_BASE + 0x8138)
+#define RNP_ETH_EMAC_FC_NS (RNP_ETH_BASE + 0x813c)
+#define RNP_ETH_EMAC_DIS_CS (RNP_ETH_BASE + 0x8140)
+#define RNP_ETH_EMAC_DIS_NS (RNP_ETH_BASE + 0x8144)
+#define RNP_ETH_HOST_L2_FILTER_CS (RNP_ETH_BASE + 0x8150)
+#define RNP_ETH_HOST_L2_FILTER_NS (RNP_ETH_BASE + 0x8154)
+#define RNP_ETH_EMAC_DECAP_CS (RNP_ETH_BASE + 0x8158)
+#define RNP_ETH_EMAC_DECAP_NS (RNP_ETH_BASE + 0x815c)
+#define RNP_ETH_PFC_CONFIG_PROT(n) (RNP_ETH_BASE + 0x8180 + n * (0x04))
+#define RNP_ETH_RX_PKT_NUM(port) (RNP_ETH_BASE + 0x8220 + 0x04 * (port))
+#define RNP_ETH_RX_DROP_PKT_NUM(port) (RNP_ETH_BASE + 0x8230 + 0x04 * (port))
+#define RNP_ETH_TOTAL_GAT_RX_PKT_NUM (RNP_ETH_BASE + 0x8240)
+#define RNP_ETH_PKT_ARP_REQ_NUM (RNP_ETH_BASE + 0x8250)
+#define RNP_ETH_PKT_ARP_RESPONSE_NUM (RNP_ETH_BASE + 0x8254)
+#define RNP_ETH_ICMP_NUM (RNP_ETH_BASE + 0x8258)
+#define RNP_ETH_PKT_UDP_NUM (RNP_ETH_BASE + 0x825c)
+#define RNP_ETH_PKT_TCP_NUM (RNP_ETH_BASE + 0x8260)
+#define RNP_ETH_PKT_ESP_NUM (RNP_ETH_BASE + 0x8264)
+#define RNP_ETH_PKT_GRE_NUM (RNP_ETH_BASE + 0x8268)
+#define RNP_ETH_PKT_SCTP_NUM (RNP_ETH_BASE + 0x826c)
+#define RNP_ETH_PKT_TCPSYN_NUM (RNP_ETH_BASE + 0x8270)
+#define RNP_ETH_PKT_VXLAN_NUM (RNP_ETH_BASE + 0x8274)
+#define RNP_ETH_PKT_NVGRE_NUM (RNP_ETH_BASE + 0x8278)
+#define RNP_ETH_PKT_FRAGMENT_NUM (RNP_ETH_BASE + 0x827c)
+#define RNP_ETH_PKT_LAYER1_VLAN_NUM (RNP_ETH_BASE + 0x8280)
+#define RNP_ETH_PKT_LAYER2_VLAN_NUM (RNP_ETH_BASE + 0x8284)
+#define RNP_ETH_PKT_IPV4_NUM (RNP_ETH_BASE + 0x8288)
+#define RNP_ETH_PKT_IPV6_NUM (RNP_ETH_BASE + 0x828c)
+#define RNP_ETH_PKT_INGRESS_NUM (RNP_ETH_BASE + 0x8290)
+#define RNP_ETH_PKT_EGRESS_NUM (RNP_ETH_BASE + 0x8294)
+#define RNP_ETH_PKT_IP_HDR_LEN_ERR_NUM (RNP_ETH_BASE + 0x8298)
+#define RNP_ETH_PKT_IP_PKT_LEN_ERR_NUM (RNP_ETH_BASE + 0x829c)
+#define RNP_ETH_PKT_L3_HDR_CHK_ERR_NUM (RNP_ETH_BASE + 0x82a0)
+#define RNP_ETH_PKT_L4_HDR_CHK_ERR_NUM (RNP_ETH_BASE + 0x82a4)
+#define RNP_ETH_PKT_SCTP_CHK_ERR_NUM (RNP_ETH_BASE + 0x82a8)
+#define RNP_ETH_PKT_VLAN_ERR_NUM (RNP_ETH_BASE + 0x82ac)
+#define RNP_ETH_PKT_RDMA_NUM (RNP_ETH_BASE + 0x82b0)
+#define RNP_ETH_PKT_ARP_AUTO_RESPONSE_NUM (RNP_ETH_BASE + 0x82b4)
+#define RNP_ETH_PKT_ICMPV6_NUM (RNP_ETH_BASE + 0x82b8)
+#define RNP_ETH_PKT_IPV6_EXTEND_NUM (RNP_ETH_BASE + 0x82bc)
+#define RNP_ETH_PKT_802_3_NUM (RNP_ETH_BASE + 0x82c0)
+#define RNP_ETH_PKT_EXCEPT_SHORT_NUM (RNP_ETH_BASE + 0x82c4)
+#define RNP_ETH_PKT_PTP_NUM (RNP_ETH_BASE + 0x82c8)
+#define RNP_ETH_DECAP_PKT_IN_NUM (RNP_ETH_BASE + 0x82d0)
+#define RNP_ETH_DECAP_PKT_OUT_NUM (RNP_ETH_BASE + 0x82d4)
+#define RNP_ETH_DECAP_DMAC_OUT_NUM (RNP_ETH_BASE + 0x82d8)
+#define RNP_ETH_DECAP_BMC_OUT_NUM (RNP_ETH_BASE + 0x82dc)
+#define RNP_ETH_DECAP_SW_OUT_NUM (RNP_ETH_BASE + 0x82e0)
+#define RNP_ETH_DECAP_MIRROR_OUT_NUM (RNP_ETH_BASE + 0x82e4)
+#define RNP_ETH_DECAP_PKT_DROP_NUM(port) (RNP_ETH_BASE + 0x82e8 + 0x04 * (port))
+#define RNP_ETH_INVALID_DROP_PKTS RNP_ETH_DECAP_PKT_DROP_NUM(0)
+#define RNP_ETH_FILTER_DROP_PKTS RNP_ETH_DECAP_PKT_DROP_NUM(1)
+#define RNP_ETH_DECAP_DMAC_DROP_NUM (RNP_ETH_BASE + 0x82f0)
+#define RNP_ETH_DECAP_BMC_DROP_NUM (RNP_ETH_BASE + 0x82f4)
+#define RNP_ETH_DECAP_SWITCH_DROP_NUM (RNP_ETH_BASE + 0x82f8)
+#define RNP_ETH_DECAP_RM_VLAN_NUM (RNP_ETH_BASE + 0x82fc)
+#define RNP_ETH_RX_FC_PKT_IN_NUM (RNP_ETH_BASE + 0x8300)
+#define RNP_ETH_RX_FC_PKT_OUT_NUM (RNP_ETH_BASE + 0x8304)
+#define RNP_ETH_RX_FC_PKT_DROP0_NUM (RNP_ETH_BASE + 0x8308)
+#define RNP_ETH_RX_FC_PKT_DROP1_NUM (RNP_ETH_BASE + 0x830c)
+#define RNP_ETH_RING_FC_STATUS0 (RNP_ETH_BASE + 0x8310)
+#define RNP_ETH_RING_FC_STATUS1 (RNP_ETH_BASE + 0x8314)
+#define RNP_ETH_RING_FC_STATUS2 (RNP_ETH_BASE + 0x8318)
+#define RNP_ETH_RING_FC_STATUS3 (RNP_ETH_BASE + 0x831c)
+#define RNP_ETH_RX_DEBUG(n) (RNP_ETH_BASE + 0x8400 + 0x04 * (n))
+#define RNP_ETH_RX_FC_DEBUG0_NUM RNP_ETH_RX_DEBUG(0)
+#define RNP_ETH_RX_FC_DEBUG1_NUM RNP_ETH_RX_DEBUG(1)
+#define RNP_ETH_RX_DIS_DEBUG0_NUM RNP_ETH_RX_DEBUG(2)
+#define RNP_ETH_RX_DIS_DEBUG1_NUM RNP_ETH_RX_DEBUG(3)
+#define RNP_ETH_HOST_L2_DROP_PKTS RNP_ETH_RX_DEBUG(4)
+#define RNP_ETH_REDIR_INPUT_MATCH_DROP_PKTS RNP_ETH_RX_DEBUG(5)
+#define RNP_ETH_ETYPE_DROP_PKTS RNP_ETH_RX_DEBUG(6)
+#define RNP_ETH_TCP_SYN_DROP_PKTS RNP_ETH_RX_DEBUG(7)
+#define RNP_ETH_REDIR_TUPLE5_DROP_PKTS RNP_ETH_RX_DEBUG(8)
+#define RNP_ETH_REDIR_TCAM_DROP_PKTS RNP_ETH_RX_DEBUG(9)
+#define RNP_ETH_VMARK_TC(n) (RNP_ETH_BASE + 0x8500 + 0x04 * (n))
+#define RNP_RING_FC_ENABLE (RNP_ETH_BASE + 0x8520)
+#define RNP_SELECT_RING_EN(n) (RNP_ETH_BASE + 0x8524 + (0x4 * n))
+#define RNP_TC_FC_SW_EN (RNP_ETH_BASE + 0x8534)
+#define RNP_ETH_LOCAL_DIP(n) (RNP_ETH_BASE + 0x8600 + 0x04 * (n))
+#define RNP_ETH_LOCAL_DMAC_H(n) (RNP_ETH_BASE + 0x8700 + 0x04 * (n))
+#define RNP_ETH_LOCAL_DMAC_L(n) (RNP_ETH_BASE + 0x8800 + 0x04 * (n))
+#define RNP_RXTRANS_RX_PKTS(port) (RNP_ETH_BASE + 0x8900 + 0x40 * (port))
+#define RNP_RXTRANS_DROP_PKTS(port) (RNP_ETH_BASE + 0x8904 + 0x40 * (port))
+#define RNP_RXTRANS_WDT_ERR_PKTS(port) (RNP_ETH_BASE + 0x8908 + 0x40 * (port))
+#define RNP_RXTRANS_CODE_ERR_PKTS(port) (RNP_ETH_BASE + 0x890c + 0x40 * (port))
+#define RNP_RXTRANS_CRC_ERR_PKTS(port) (RNP_ETH_BASE + 0x8910 + 0x40 * (port))
+#define RNP_RXTRANS_SLEN_ERR_PKTS(port) (RNP_ETH_BASE + 0x8914 + 0x40 * (port))
+#define RNP_RXTRANS_GLEN_ERR_PKTS(port) (RNP_ETH_BASE + 0x8918 + 0x40 * (port))
+#define RNP_RXTRANS_IPH_ERR_PKTS(port) (RNP_ETH_BASE + 0x891c + 0x40 * (port))
+#define RNP_RXTRANS_CSUM_ERR_PKTS(port) (RNP_ETH_BASE + 0x8920 + 0x40 * (port))
+#define RNP_RXTRANS_LEN_ERR_PKTS(port) (RNP_ETH_BASE + 0x8924 + 0x40 * (port))
+#define RNP_RXTRANS_CUT_ERR_PKTS(port) (RNP_ETH_BASE + 0x8928 + 0x40 * (port))
+#define RNP_RXTRANS_EXCEPT_BYTES(port) (RNP_ETH_BASE + 0x892c + 0x40 * (port))
+#define RNP_RXTRANS_G1600_BYTES_PKTS(port)                                     \
+	(RNP_ETH_BASE + 0x8930 + 0x40 * (port))
+#define RNP_RX_RING_MAXRATE(n) (RNP_ETH_BASE + 0x8a00 + (0x4 * n))
+#define RNP_ETH_RX_PROGFULL_RTRN(n) (RNP_ETH_BASE + 0x8c00 + 0x04 * (n))
+#define RNP_ETH_CNT_PKT_EMAC_RX(n) (RNP_ETH_BASE + 0x8c10 + 0x04 * (n))
+#define RNP_ETH_CNT_PKT_PECL_RX(n) (RNP_ETH_BASE + 0x8c20 + 0x04 * (n))
+#define RNP_ETH_STATUS_RX_FLOWCTRL(n) (RNP_ETH_BASE + 0x8c30 + 0x04 * (n))
+#define RNP_ETH_DMAC_FCTRL (RNP_ETH_BASE + 0x9110)
+#define RNP_ETH_DMAC_MCSTCTRL (RNP_ETH_BASE + 0x9114)
+#define RNP_MCSTCTRL_MULTICASE_TBL_EN (1 << 2)
+#define RNP_MCSTCTRL_UNICASE_TBL_EN (1 << 3)
+#define RNP_MCSTCTRL_DMAC_47 0x00
+#define RNP_MCSTCTRL_DMAC_46 0x01
+#define RNP_MCSTCTRL_DMAC_45 0x02
+#define RNP_MCSTCTRL_DMAC_43 0x03
+#define RNP_ETH_VLAN_FILTER_ENABLE (RNP_ETH_BASE + 0x9118)
+#define RNP_ETH_INPORT_POLICY_VAL (RNP_ETH_BASE + 0x91d0)
+#define RNP_ETH_INPORT_POLICY_REG(n) (RNP_ETH_BASE + 0x91e0 + 0x04 * (n))
+#define ETH_LAYER2_NUM (16)
+#define RNP_ETH_LAYER2_ETQF(n) (RNP_ETH_BASE + 0x9200 + 0x04 * (n))
+#define RNP_ETH_LAYER2_ETQS(n) (RNP_ETH_BASE + 0x9240 + 0x04 * (n))
+#define RNP_ETH_LAYER2_ETQS_DEFAULT (RNP_ETH_BASE + 0x9280)
+#define RNP_ETH_ETQF_DEFAULT (RNP_ETH_BASE + 0x9284)
+#define RNP_ETH_SYNQF (RNP_ETH_BASE + 0x9290)
+#define RNP_ETH_SYNQF_PRIORITY (RNP_ETH_BASE + 0x9294)
+/*
+ * [3:0]:
+ * 4'b0000:RSS disable
+ * 4'b0001:RSS only
+ * 4'b0100:DCB and RSS--8*16
+ * 4'b1010:POOLS and RSS--32*4
+ * [3] :virtual enable
+ * [16]:ipv4_hash_tcp_enable
+ * [17]:ipv4_hash_enable
+ * [20]:ipv6_hash_enable
+ * [21]:ipv6_hash_tcp_enable
+ * [22]:ipv4_hash_udp_enable
+ * [23]:ipv6_hash_udp_enable
+ * [24]:ipv4_hash_sctp_enable
+ * [25]:ipv6_hash_sctp_enable
+ */
+#define RNP_ETH_RSS_CONTROL (RNP_ETH_BASE + 0x92a0)
+#define RNP_MRQC_IOV_EN (RNP_ETH_BASE + 0x92a0)
+#define RNP_IOV_ENABLED (1 << 3)
+#define RNP_ETH_RSS_KEY (RNP_ETH_BASE + 0x92d0)
+#define RNP_ETH_RAR_RL(n) (RNP_ETH_BASE + 0xa000 + 0x04 * n)
+#define RNP_ETH_RAR_RH(n) (RNP_ETH_BASE + 0xa400 + 0x04 * n)
+#define RNP_ETH_UTA(n) (RNP_ETH_BASE + 0xa800 + 0x04 * n)
+#define RNP_ETH_MUTICAST_HASH_TABLE(n) (RNP_ETH_BASE + 0xac00 + 0x04 * n)
+#define RNP_MTA(n) RNP_ETH_MUTICAST_HASH_TABLE(n)
+#define RNP_ETH_VLAN_FILTER_TABLE(n) (RNP_ETH_BASE + 0xb000 + 0x04 * (n))
+#define RNP_VFTA RNP_ETH_VLAN_FILTER_TABLE
+#define RNP_FCTRL_MULTICASE_BYPASS (1 << 8)
+#define RNP_FCTRL_UNICASE_BYPASS (1 << 9)
+#define RNP_FCTRL_BROADCASE_BYPASS (1 << 10)
+#define RNP_ETH_ETYPE_TABLE(n) (RNP_ETH_BASE + 0xb300 + 0x04 * (n))
+#define RNP_VM_DMAC_MPSAR_RING(entry)                                          \
+	(RNP_ETH_BASE + 0xb400 + (4 * (entry)))
+#define RNP_VLVF(idx) (RNP_ETH_BASE + 0xb600 + 4 * (idx))
+#define RNP_VLVFB(idx) (RNP_ETH_BASE + 0xb700 + 4 * (idx))
+#define RNP_VM_TUNNEL_PFVLVF_L(n) (RNP_ETH_BASE + 0xb800 + 0x04 * (n))
+#define RNP_VM_TUNNEL_PFVLVF_H(n) (RNP_ETH_BASE + 0xb900 + 0x04 * (n))
+/* 5 tuple */
+#define ETH_TUPLE5_NUM 128
+#define RNP_ETH_TUPLE5_SAQF(n) (RNP_ETH_BASE + 0xc000 + 0x04 * (n))
+#define RNP_ETH_TUPLE5_DAQF(n) (RNP_ETH_BASE + 0xc400 + 0x04 * (n))
+#define RNP_ETH_TUPLE5_SDPQF(n) (RNP_ETH_BASE + 0xc800 + 0x04 * (n))
+#define RNP_ETH_TUPLE5_FTQF(n) (RNP_ETH_BASE + 0xcc00 + 0x04 * (n))
+#define RNP_ETH_TUPLE5_POLICY(n) (RNP_ETH_BASE + 0xd000 + 0x04 * (n))
+#define RNP_ETH_RSS_INDIR_TBL(p, n)                                            \
+	(RNP_ETH_BASE + 0xe000 + 0x04 * (n) + 0x200 * (p))
+#define RNP_ETH_TC_IPH_OFFSET_TABLE(n) (RNP_ETH_BASE + 0xe800 + 0x04 * (n))
+#define RNP_ETH_TC_VLAN_OFFSET_TABLE(n) (RNP_ETH_BASE + 0xe820 + 0x04 * (n))
+#define RNP_ETH_TC_PORT_OFFSET_TABLE(n) (RNP_ETH_BASE + 0xe840 + 0x04 * (n))
+#define RNP_REDIR_RING_MASK (RNP_ETH_BASE + 0xe860)
+#define RNP_ETH_RSS_MODE (0x6fe00)
+#define RNP_ETH_RSS_INDIR_TBL_UV3P(n) (0x6ff00 + 0x04 * (n))
+
+/* ================================================================== */
+
+/* ==================== RNP-REG Global Registers ==================== */
+#define RNP_COMM_REG0 0x30000
+#define RNP_TOP_NIC_VERSION (RNP_COMM_REG0 + 0x0000)
+#define RNP500_PHY_RELEASE (0x30000)
+#define RNP500_TP_SFP (0x30200)
+#define RNP500_TOP_NIC_VERSION (0x8000 + 0x0000)
+#define RNP500_FPGA_VERSION (0x8020)
+#define RNP500_FPGA_TIME (0x8024)
+#define RNP500_LEGANCY_TIME (0xd000)
+#define RNP500_LEGANCY_ENABLE (0xd004)
+#define RNP_TOP_NIC_CONFIG (RNP_COMM_REG0 + 0x0004)
+#define RNP_TOP_NIC_STAT (RNP_COMM_REG0 + 0x0008)
+#define RNP_TOP_NIC_DUMMY (RNP_COMM_REG0 + 0x000c)
+#define RNP_TOP_NIC_REST_N (RNP_COMM_REG0 + 0x0010)
+#define NIC_RESET 0
+#define RNP_TOP_DMA_MEM_SLP (RNP_COMM_REG0 + 0x4004)
+#define RNP_TOP_DMA_MEM_SD (RNP_COMM_REG0 + 0x4008)
+#define RNP_TOP_ETH_TIMESTAMP_SEL (RNP_COMM_REG0 + 0x8010)
+#define RNP_TOP_ETH_MAC_CLK_SEL (RNP_COMM_REG0 + 0x8014)
+#define RNP_TOP_ETH_INF_ETH_STATUS (RNP_COMM_REG0 + 0x8018)
+#define RNP_TOP_ETH_BUG_40G_PATCH (RNP_COMM_REG0 + 0x801c)
+#define RNP_TOP_ETH_PWR_PORT_NUM (4)
+#define RNP_TOP_ETH_PWR_CLAMP_CTRL_PORT(n) (RNP_COMM_REG0 + 0x8020 + 0xc * (n))
+#define RNP_TOP_ETH_PWR_ISOLATE_PORT(n) (RNP_COMM_REG0 + 0x8024 + 0xc * (n))
+#define RNP_TOP_ETH_PWR_DOWN_PORT(n) (RNP_COMM_REG0 + 0x8028 + 0xc * (n))
+#define RNP_TOP_ETH_TCAM_CONFIG_ENABLE (RNP_COMM_REG0 + 0x8050)
+#define RNP_TOP_ETH_SLIP (RNP_COMM_REG0 + 0x8060)
+#define RNP_TOP_ETH_SHUT_DOWN (RNP_COMM_REG0 + 0x8064)
+#define RNP_TOP_ETH_OVS_SLIP (RNP_COMM_REG0 + 0x8068)
+#define RNP_TOP_ETH_OVS_SHUT_DOWN (RNP_COMM_REG0 + 0x806c)
+#define RNP_FC_PORT_ENABLE (RNP_COMM_REG0 + 0x9004)
+#define RNP_FC_PORT_PRIO_MAP(n) (RNP_COMM_REG0 + 0x9008 + (0x04 * n))
+#define RNP_FC_EN_CONF_AVAILBLE (RNP_COMM_REG0 + 0x9018)
+#define RNP_FC_UNCTAGS_MAP_OFFSET (16)
+#define RNP_TOP_MAC_OUI (RNP_COMM_REG0 + 0xc004)
+#define RNP_TOP_MAC_SN (RNP_COMM_REG0 + 0xc008)
+/* ================================================================== */
+
+/* ==================== RNP-SERDES Global Registers ================= */
+#define RNP_SERDES (0x40000)
+#define RNP_PCS_OFFSET (0x1000)
+#define RNP_PCS_BASE(i) (RNP_SERDES + RNP_PCS_OFFSET * i)
+#define RNP_PCS_1G_OR_10G BIT(13)
+#define RNP_PCS_SPPEED_MASK (0x1c)
+#define RNP_PCS_SPPEED_10G (0x0)
+#define RNP_PCS_SPPEED_40G (0xc)
+#define RNP_PCS_LINK_SPEED (0x30000)
+#define RNP_PCS_LINKUP BIT(2)
+#define RNP_PCS_LINK_STATUS (0x30001)
+/* ================================================================== */
+
+/* ==================== RNP-MAC Global Registers ==================== */
+#define RNP10_MAC_BASE (0x60000)
+#define RNP_XLMAC (0x60000)
+#define RNP10_MAC_TX_CFG (0x0000)
+#define RNP10_MAC_RX_CFG (0x0004)
+#define RNP_RX_ALL BIT(31)
+#define RNP_RX_ALL_MUL BIT(4)
+#define RNP10_MAC_PKT_FLT (0x0008)
+#define RNP10_MAC_LPI_CTRL (0x00d0)
+#define RNP10_MAC_Q0_TX_FLOW_CTRL(i) (0x0070 + 0x04 * (i))
+#define RNP10_MAC_RX_FLOW_CTRL (0x0090)
+#define RNP10_TX_FLOW_ENABLE_MASK (0x2)
+#define RNP10_RX_FLOW_ENABLE_MASK (0x1)
+#define RNP10_MAC_TX_VLAN_TAG (0x0050)
+#define RNP10_MAC_TX_VLAN_MODE (0x0060)
+#define RNP10_MAC_INNER_VLAN_INCL (0x0064)
+#define RNP10_MAC_UNICAST_LOW(i) (0x304 + i * 0x08)
+#define RNP10_MAC_UNICAST_HIGH(i) (0x300 + i * 0x08)
+#define RNP500_MAC_BASE (0x20000)
+#define RNP_MODE_NO_SA_INSER (0x0)
+#define RNP_SARC_OFFSET (28)
+#define RNP_TWOKPE_MASK BIT(27)
+#define RNP_SFTERR_MASK BIT(26)
+#define RNP_CST_MASK BIT(25)
+#define RNP_TC_MASK BIT(24)
+#define RNP_WD_MASK BIT(23)
+#define RNP_JD_MASK BIT(22)
+#define RNP_BE_MASK BIT(21)
+#define RNP_JE_MASK BIT(20)
+#define RNP_IFG_96 (0x00)
+#define RNP_IFG_OFFSET (17)
+#define RNP_DCRS_MASK BIT(16)
+#define RNP_PS_MASK BIT(15)
+#define RNP_FES_MASK BIT(14)
+#define RNP_DO_MASK BIT(13)
+#define RNP_LM_MASK BIT(12)
+#define RNP_DM_MASK BIT(11)
+#define RNP_IPC_MASK BIT(10)
+#define RNP_DR_MASK BIT(9)
+#define RNP_LUD_MASK BIT(8)
+#define RNP_ACS_MASK BIT(7)
+#define RNP_BL_MODE (0x00)
+#define RNP_BL_OFFSET (5)
+#define RNP_DC_MASK BIT(4)
+#define RNP_TE_MASK BIT(3)
+#define RNP_RE_MASK BIT(2)
+#define RNP_PRELEN_MODE (0)
+#define RNP500_MAC_UNICAST_LOW(i) (0x44 + i * 0x08)
+#define RNP500_MAC_UNICAST_HIGH(i) (0x40 + i * 0x08)
+#define GMAC_CONTROL 0x00000000 /* Configuration */
+#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
+#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
+#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */
+#define GMAC_MII_ADDR 0x00000010 /* MII Address */
+#define GMAC_MII_DATA 0x00000014 /* MII Data */
+#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */
+#define GMAC_PMT 0x0000002c
+
+enum power_event {
+	pointer_reset = 0x80000000,
+	global_unicast = 0x00000200,
+	wake_up_rx_frame = 0x00000040,
+	magic_frame = 0x00000020,
+	wake_up_frame_en = 0x00000004,
+	magic_pkt_en = 0x00000002,
+	power_down = 0x00000001,
+};
+
+#define GMAC_VTHM_MASK BIT(19)
+#define GMAC_ESVL_MASK BIT(18)
+#define GMAC_VTIM_MASK BIT(17)
+#define GMAC_ETV_MASK BIT(16)
+#define GMAC_VLAN_TAG_CTRL 0x0000001c
+#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense */
+#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */
+#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */
+#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */
+#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
+#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */
+#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
+#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */
+#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */
+#define GMAC_CONTROL_ACS 0x00000080 /* Auto Pad/FCS Stripping */
+#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */
+#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
+#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
+/* GMAC Frame Filter defines */
+#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
+#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
+#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
+#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
+#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
+#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
+#define GMAC_FRAME_FILTER_PCF 0x00000080 /* Pass Control frames */
+#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
+#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
+#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
+#define GMAC_FRAME_FILTER_VLAN 0x00010000 /* vlan filter open */
+#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
+/* GMII ADDR  defines */
+#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
+#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
+/* GMAC FLOW CTRL defines */
+#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
+#define GMAC_FLOW_CTRL_PT_SHIFT 16
+#define GMAC_FLOW_CTRL_UP 0x00000008 /* Unicast pause frame enable */
+#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
+#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
+#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
+/* Energy Efficient Ethernet (EEE)
+ *
+ * LPI status, timer and control register offset
+ */
+/* EEE and LPI defines */
+#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0)
+#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1)
+#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
+#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3)
+#define GMAC_LPI_CTRL_STATUS 0x0030
+#define GMAC_LPI_TIMER_CTRL 0x0034
+#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
+#define GMAC_INT_STATUS_PMT BIT(3)
+#define GMAC_INT_STATUS_MMCIS BIT(4)
+#define GMAC_INT_STATUS_MMCRIS BIT(5)
+#define GMAC_INT_STATUS_MMCTIS BIT(6)
+#define GMAC_INT_STATUS_MMCCSUM BIT(7)
+#define GMAC_INT_STATUS_TSTAMP BIT(9)
+#define GMAC_INT_STATUS_LPIIS BIT(10)
+/* LPI control and status defines */
+#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */
+#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */
+#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */
+#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */
+#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */
+#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */
+#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */
+#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */
+#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */
+#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
+#define GMAC_MANAGEMENT_RX_UNDERSIZE (0x01a4)
+#define GMAC_MANAGEMENT_TX_PAUSE (0x170)
+#define GMAC_MANAGEMENT_RX_PAUSE (0x1D0)
+#define RNP_MAC_TX_CFG (RNP_XLMAC + 0x0000)
+#define RNP_MAC_RX_CFG (RNP_XLMAC + 0x0004)
+#define RNP_MAC_PKT_FLT (RNP_XLMAC + 0x0008)
+#define RNP_MAC_LPI_CTRL (RNP_XLMAC + 0x00d0)
+#define RNP_MAC_TX_VLAN_TAG (RNP_XLMAC + 0x0050)
+#define RNP_MAC_TX_VLAN_MODE (RNP_XLMAC + 0x0060)
+#define RNP_MAC_INNER_VLAN_INCL (RNP_XLMAC + 0x0064)
+#define RNP_MAC_Q0_TX_FLOW_CTRL(i) (RNP_XLMAC + 0x0070 + 0x04 * (i))
+#define RNP_MAC_RX_FLOW_CTRL (RNP_XLMAC + 0x0090)
+#define RNP_MAC_HW_FEATURE (RNP_XLMAC + 0x0120)
+/*1588 */
+#define RNP_MAC_TS_CTRL (RNP_XLMAC + 0X0d00)
+#define RNP_MAC_SUB_SECOND_INCREMENT (RNP_XLMAC + 0x0d04)
+#define RNP_MAC_SYS_TIME_SEC_CFG (RNP_XLMAC + 0x0d08)
+#define RNP_MAC_SYS_TIME_NANOSEC_CFG (RNP_XLMAC + 0x0d0c)
+#define RNP_MAC_SYS_TIME_SEC_UPDATE (RNP_XLMAC + 0x0d10)
+#define RNP_MAC_SYS_TIME_NANOSEC_UPDATE (RNP_XLMAC + 0x0d14)
+#define RNP_MAC_TS_ADDEND (RNP_XLMAC + 0x0d18)
+#define RNP_MAC_TS_STATS (RNP_XLMAC + 0x0d20)
+#define RNP_MAC_INTERRUPT_ENABLE (RNP_XLMAC + 0x00b4)
+#define RNP_MAC_STATS_BROADCAST_LOW (RNP_XLMAC + 0x0918)
+#define RNP_MAC_STATS_BROADCAST_HIGH (RNP_XLMAC + 0x091c)
+#define RNP_MAC_STATS_MULTICAST_LOW (RNP_XLMAC + 0x0920)
+#define RNP_MAC_STATS_MULTICAST_HIGH (RNP_XLMAC + 0x0924)
+#define RNP_TX_FLOW_ENABLE_MASK (0x2)
+#define RNP_RX_FLOW_ENABLE_MASK (0x1)
+/* ================================================================== */
+
+/* ==================== RNP-MSIX Global Registers ==================== */
+#define RING_VECTOR(n) (0x04 * (n))
+/* ================================================================== */
+
+/* ==================== OTHER Global Registers ==================== */
+/* =====  PF-VF Functions ==== */
+#define VF_NUM_REG 0xa3000
+/* 8bit: 7:vf_actiove 6:fun0/fun1 [5:0]:vf_num */
+#define VF_NUM(vfnum, fun) ((1 << 7) | (((fun) & 0x1) << 6) | ((vfnum) & 0x3f))
+#define PF_NUM(fun) (((fun) & 0x1) << 6)
+#define IS_VF(vfnum) (((vfnum) & (1 << 7)) ? 1 : 0)
+/* 8bit: 7:vf_actiove [6:5]:fun0/fun1 [4:0]:vf_num */
+#define PF_NUM_N500(fun) (((fun) & 0x3) << 5)
+/* PFC Flow Control*/
+
+enum NIC_MODE {
+	MODE_NIC_MODE_2PORT_40G = 0,
+	MODE_NIC_MODE_2PORT_10G = 1,
+	MODE_NIC_MODE_4PORT_10G = 2,
+	MODE_NIC_MODE_8PORT_10G = 3,
+};
+/* ================================================================== */
+
+#endif /* RNPGBE_REGS_H */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.c
new file mode 100755
index 0000000000000000000000000000000000000000..f51e5238a76c39d60cda02b9fd36eb33f246b0db
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include "rnpgbe_sfc.h"
+#include "rnpgbe.h"
+
+#ifndef NO_CM3_MBX
+static inline void rsp_hal_sfc_command(u8 __iomem *hw_addr, u32 cmd)
+{
+	iowrite32(cmd, (hw_addr + 0x8));
+	iowrite32(1, (hw_addr + 0x0));
+	while (ioread32(hw_addr) != 0)
+		;
+}
+
+static inline void rsp_hal_sfc_flash_write_disable(u8 __iomem *hw_addr)
+{
+	iowrite32(CMD_CYCLE(8), (hw_addr + 0x10));
+	iowrite32(WR_DATA_CYCLE(0), (hw_addr + 0x14));
+
+	rsp_hal_sfc_command(hw_addr, CMD_WRITE_DISABLE);
+}
+
+static int32_t rsp_hal_sfc_flash_wait_idle(u8 __iomem *hw_addr)
+{
+	iowrite32(CMD_CYCLE(8), (hw_addr + 0x10));
+	iowrite32(RD_DATA_CYCLE(8), (hw_addr + 0x14));
+
+	while (1) {
+		rsp_hal_sfc_command(hw_addr, CMD_READ_STATUS);
+		if ((ioread32(hw_addr + 0x4) & 0x1) == 0)
+			break;
+	}
+	return HAL_OK;
+}
+
+static inline void rsp_hal_sfc_flash_write_enable(u8 __iomem *hw_addr)
+{
+	iowrite32(CMD_CYCLE(8), (hw_addr + 0x10));
+	iowrite32(0x1f, (hw_addr + 0x18));
+	iowrite32(0x100000, (hw_addr + 0x14));
+
+	rsp_hal_sfc_command(hw_addr, CMD_WRITE_ENABLE);
+}
+
+static int rsp_hal_sfc_flash_erase_sector_internal(u8 __iomem *hw_addr,
+						   u32 address)
+{
+	if (address >= RSP_FLASH_HIGH_16M_OFFSET)
+		return HAL_EINVAL;
+
+	if (address % 4096)
+		return HAL_EINVAL;
+
+	rsp_hal_sfc_flash_write_enable(hw_addr);
+
+	iowrite32((CMD_CYCLE(8) | ADDR_CYCLE(24)), (hw_addr + 0x10));
+	iowrite32((RD_DATA_CYCLE(0) | WR_DATA_CYCLE(0)), (hw_addr + 0x14));
+	iowrite32(SFCADDR(address), (hw_addr + 0xc));
+	rsp_hal_sfc_command(hw_addr, CMD_SECTOR_ERASE);
+	rsp_hal_sfc_flash_wait_idle(hw_addr);
+	rsp_hal_sfc_flash_write_disable(hw_addr);
+
+	return HAL_OK;
+}
+
+int rsp_hal_sfc_write_protect(struct rnpgbe_hw *hw, u32 value)
+{
+
+	rsp_hal_sfc_flash_write_enable(hw->hw_addr);
+
+	iowrite32(CMD_CYCLE(8), (hw->hw_addr + 0x10));
+	iowrite32(WR_DATA_CYCLE(8), (hw->hw_addr + 0x14));
+	iowrite32((value << 24), (hw->hw_addr + 0x04));
+	rsp_hal_sfc_command(hw->hw_addr, CMD_WRITE_STATUS);
+
+	return 0;
+}
+
+int rsp_hal_sfc_flash_erase(struct rnpgbe_hw *hw, u32 size)
+{
+	u32 addr = SFC_MEM_BASE;
+	u32 i = 0;
+	u32 page_size = 0x1000;
+
+	size = ((size + (page_size - 1)) / page_size) * page_size;
+
+	addr = addr - SFC_MEM_BASE;
+
+	if (size == 0)
+		return HAL_EINVAL;
+
+	if ((addr + size) > RSP_FLASH_HIGH_16M_OFFSET)
+		return HAL_EINVAL;
+
+	if (addr % page_size)
+		return HAL_EINVAL;
+
+	if (size % page_size)
+		return HAL_EINVAL;
+	
+	// disble protect
+
+	for (i = 0; i < size; i += page_size) {
+		if ((i >= 0x1f000) && (i < 0x20000))
+			continue;
+
+		rsp_hal_sfc_flash_erase_sector_internal(hw->hw_addr,
+							(addr + i));
+	}
+
+	return HAL_OK;
+}
+
+#endif /*NO_CM3_MBX*/
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.h
new file mode 100755
index 0000000000000000000000000000000000000000..b3af1d05e7e141691293e3a06705ac8faca03680
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBE_SFC_H
+#define _RNPGBE_SFC_H
+
+/* Return value */
+#define HAL_OK 0
+#define HAL_EINVAL (-3) /* Invalid argument */
+#define HAL_ETIME (-6) /* Timer expired */
+#define RSP_FLASH_HIGH_16M_OFFSET 0x1000000
+#define SFC_MEM_BASE 0x28000000
+#define RSP_FLASH_SIZE 0x1000000
+#define CMD_WRITE_DISABLE 0x04000000
+#define CMD_READ_STATUS 0x05000000
+#define CMD_WRITE_STATUS 0x01000000
+#define CMD_WRITE_ENABLE 0x06000000
+#define CMD_SECTOR_ERASE 0x20000000
+#define CMD_BLOCK_ERASE_64K 0xd8000000
+#define SFCADDR(a) ((a) << 8)
+#define CMD_CYCLE(c) (((c) & 0xff) << 0)
+#define RD_DATA_CYCLE(c) (((c) & 0xff) << 8)
+#define WR_DATA_CYCLE(c) (((c) & 0xff) << 0)
+#define ADDR_CYCLE(c) (((c) & 0xff) << 16)
+
+#endif /* _RNPGBE_SFC_H */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sriov.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sriov.c
new file mode 100755
index 0000000000000000000000000000000000000000..147c735a296af1401cb1c547cad18cc6f49ec0ce
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sriov.c
@@ -0,0 +1,1689 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#ifdef NETIF_F_HW_VLAN_CTAG_TX
+#include 
+#endif /* NETIF_F_HW_VLAN_CTAG_TX */
+
+#include "rnpgbe.h"
+#include "rnpgbe_type.h"
+#include "rnpgbe_sriov.h"
+
+#ifdef CONFIG_PCI_IOV
+static int __rnpgbe_enable_sriov(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int num_vf_macvlans, i, num_vebvlans;
+	struct vf_macvlans *mv_list;
+	struct vf_vebvlans *vv_list = NULL;
+
+	dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+	/* sriov and dcb cannot open together */
+	/* reset numtc */
+	adapter->flags &= (~RNP_FLAG_DCB_ENABLED);
+	netdev_reset_tc(adapter->netdev);
+
+	e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
+
+	dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+
+	/* Enable VMDq flag so device will be set in VM mode */
+	adapter->flags |= RNP_FLAG_VMDQ_ENABLED;
+	if (!adapter->ring_feature[RING_F_VMDQ].limit)
+		adapter->ring_feature[RING_F_VMDQ].limit = 1;
+	if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED)
+		adapter->ring_feature[RING_F_VMDQ].offset = 0;
+	else
+		adapter->ring_feature[RING_F_VMDQ].offset = hw->max_vfs - 1;
+
+	num_vf_macvlans = hw->num_rar_entries -
+			  (hw->max_pf_macvlans + 1 + adapter->num_vfs);
+	num_vebvlans = hw->num_vebvlan_entries;
+
+	adapter->mv_list = mv_list = kcalloc(
+		num_vf_macvlans, sizeof(struct vf_macvlans), GFP_KERNEL);
+	if (num_vebvlans)
+		hw->vv_list = vv_list = kcalloc(
+			num_vebvlans, sizeof(struct vf_vebvlans), GFP_KERNEL);
+
+	if (mv_list) {
+		/* Initialize list of VF macvlans */
+		INIT_LIST_HEAD(&adapter->vf_mvs.l);
+		for (i = 0; i < num_vf_macvlans; i++) {
+			mv_list->vf = -1;
+			mv_list->free = true;
+			mv_list->rar_entry = hw->mac.num_rar_entries -
+					     (i + adapter->num_vfs + 1);
+			list_add(&mv_list->l, &adapter->vf_mvs.l);
+			mv_list++;
+		}
+	}
+
+	if (vv_list) {
+		/* Initialize list of VF macvlans */
+		INIT_LIST_HEAD(&hw->vf_vas.l);
+		for (i = 0; i < num_vebvlans; i++) {
+			vv_list->vid = -1;
+			vv_list->vid = 0;
+			vv_list->free = true;
+			vv_list->veb_entry = i;
+			list_add(&vv_list->l, &hw->vf_vas.l);
+			vv_list++;
+		}
+	}
+
+	adapter->flags2 |= RNP_FLAG2_BRIDGE_MODE_VEB;
+	dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+
+	hw->ops.set_sriov_status(hw, true);
+
+	adapter->vfinfo = kcalloc(adapter->num_vfs,
+				  sizeof(struct vf_data_storage), GFP_KERNEL);
+	if (adapter->vfinfo) {
+		/* limit trafffic classes based on VFs enabled */
+		/* TODO analyze VF need support pfc or traffic classes */
+		/* We do not support RSS w/ SR-IOV */
+		adapter->ring_feature[RING_F_RSS].limit = hw->sriov_ring_limit;
+
+		/* Disable RSC when in SR-IOV mode */
+		adapter->flags2 &=
+			~(RNP_FLAG2_RSC_CAPABLE | RNP_FLAG2_RSC_ENABLED);
+
+		adapter->flags |= RNP_FLAG_SRIOV_ENABLED;
+		/* force close eee if open sriov */
+		adapter->eee_enabled = 0;
+		return 0;
+	}
+
+	/* open flags at last to avoid null call adapter->vfinfo */
+	dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+	return -ENOMEM;
+}
+
+void rnpgbe_enable_sriov_true(struct rnpgbe_adapter *adapter)
+{
+	int err = 0;
+
+	if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return;
+
+	adapter->flags |= RNP_FLAG_SRIOV_INIT_DONE;
+
+	err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+	if (err) {
+		printk(KERN_DEBUG "Failed to enable PCI sriov: %d num %d\n",
+		       err, adapter->num_vfs);
+		printk(KERN_DEBUG "We cannot handle this error\n");
+	}
+
+	adapter->flags |= RNP_FLAG_VF_INIT_DONE;
+}
+
+/* Note this function is called when the user wants to enable SR-IOV
+ * VFs using the now deprecated module parameter
+ * never used
+ */
+void rnpgbe_enable_sriov(struct rnpgbe_adapter *adapter)
+{
+	int pre_existing_vfs = 0;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	pre_existing_vfs = pci_num_vf(adapter->pdev);
+	if (!pre_existing_vfs && !adapter->num_vfs)
+		return;
+
+	dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+	if (!pre_existing_vfs)
+		dev_warn(&adapter->pdev->dev,
+			 "Enabling SR-IOV VFs using the module parameter is deprecated"
+			 "- please use the pci sysfs interface.\n");
+
+	dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+	/* If there are pre-existing VFs then we have to force
+	 * use of that many - over ride any module parameter value.
+	 * This may result from the user unloading the PF driver
+	 * while VFs were assigned to guest VMs or because the VFs
+	 * have been created via the new PCI SR-IOV sysfs interface.
+	 */
+	if (pre_existing_vfs) {
+		adapter->num_vfs = pre_existing_vfs;
+		dev_warn(&adapter->pdev->dev,
+			 "Virtual Functions already enabled for this device - Please"
+			 "reload all VF drivers to avoid spoofed packet errors\n");
+	} else {
+		int i;
+		/*
+		 * The n10 supports up to 64 VFs per physical function
+		 * but this implementation limits allocation to 126 so that
+		 * basic networking resources are still available to the
+		 * physical function.  If the user requests greater than
+		 * 64 VFs then it is an error - reset to default of zero.
+		 */
+		adapter->num_vfs =
+			min_t(unsigned int, adapter->num_vfs, hw->max_vfs - 1);
+
+		if (__rnpgbe_enable_sriov(adapter)) {
+			e_err(probe, "Failed to alloc memory for sriov\n");
+			adapter->num_vfs = 0;
+		}
+
+		for (i = 0; i < adapter->num_vfs; i++)
+			rnpgbe_vf_configuration(adapter->pdev,
+						(i | 0x10000000));
+
+		dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+	}
+}
+
+static bool rnpgbe_vfs_are_assigned(struct rnpgbe_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	struct pci_dev *vfdev;
+	unsigned int dev_id = RNP_DEV_ID_N10_PF0_VF_N;
+	unsigned int vendor_id = PCI_VENDOR_ID_MUCSE;
+
+	switch (adapter->pdev->device) {
+	case RNP_DEV_ID_N10_PF0:
+	case RNP_DEV_ID_N10_PF1:
+		vendor_id = 0x1dab;
+		if (rnpgbe_is_pf1(pdev))
+			dev_id = RNP_DEV_ID_N10_PF1_VF;
+		else
+			dev_id = RNP_DEV_ID_N10_PF0_VF;
+		break;
+	case PCI_DEVICE_ID_N10_PF0:
+	case PCI_DEVICE_ID_N10_PF1:
+		vendor_id = PCI_VENDOR_ID_MUCSE;
+		if (rnpgbe_is_pf1(pdev))
+			dev_id = RNP_DEV_ID_N10_PF1_VF_N;
+		else
+			dev_id = RNP_DEV_ID_N10_PF0_VF_N;
+	}
+
+	/* loop through all the VFs to see if we own any that are assigned */
+	vfdev = pci_get_device(vendor_id, dev_id, NULL);
+	while (vfdev) {
+		/* if we don't own it we don't care */
+		if (vfdev->is_virtfn && vfdev->physfn == pdev) {
+			/* if it is assigned we cannot release it */
+			if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+				return true;
+		}
+
+		vfdev = pci_get_device(vendor_id, dev_id, vfdev);
+	}
+
+	return false;
+}
+#endif /* #ifdef CONFIG_PCI_IOV */
+
+int rnpgbe_disable_sriov(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int rss;
+	int time = 0;
+
+	if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return 0;
+
+	adapter->num_vfs = 0;
+	adapter->flags &= ~RNP_FLAG_SRIOV_ENABLED;
+	adapter->flags &= ~RNP_FLAG_SRIOV_INIT_DONE;
+	adapter->flags &= ~RNP_FLAG_VF_INIT_DONE;
+	adapter->vlan_count = 0;
+	msleep(100);
+
+	if (pci_channel_offline(adapter->pdev) == false) {
+		if (!hw->ncsi_en)
+			hw->ops.set_mac_rx(hw, false);
+		hw->ops.set_sriov_status(hw, false);
+	}
+
+	/* set num VFs to 0 to prevent access to vfinfo */
+	while (test_and_set_bit(__RNP_USE_VFINFI, &adapter->state)) {
+		msleep(100);
+		time++;
+
+		if (time > 100) {
+			printk(KERN_DEBUG "wait flags timeout\n");
+			break;
+		}
+	}
+	if (time < 100)
+		clear_bit(__RNP_USE_VFINFI, &adapter->state);
+
+	/* free VF control structures */
+	kfree(adapter->vfinfo);
+	adapter->vfinfo = NULL;
+
+	/* free macvlan list */
+	if (hw->vv_list) {
+		kfree(hw->vv_list);
+		hw->vv_list = NULL;
+	}
+
+	if (adapter->mv_list) {
+		kfree(adapter->mv_list);
+		adapter->mv_list = NULL;
+	}
+
+	dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+#ifdef CONFIG_PCI_IOV
+	/*
+	 * If our VFs are assigned we cannot shut down SR-IOV
+	 * without causing issues, so just leave the hardware
+	 * available but disabled
+	 */
+	if (rnpgbe_vfs_are_assigned(adapter)) {
+		e_dev_warn("Unloading driver while VFs are assigned"
+			   "- VFs will not be "
+			   "deallocated\n");
+		return -EPERM;
+	}
+	/* disable iov and allow time for transactions to clear */
+	pci_disable_sriov(adapter->pdev);
+#endif
+	dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+
+	/* set default pool back to 0 */
+
+	/* Disable VMDq flag so device will be set in VM mode */
+	if (adapter->ring_feature[RING_F_VMDQ].limit == 1)
+		adapter->flags &= ~RNP_FLAG_VMDQ_ENABLED;
+	adapter->ring_feature[RING_F_VMDQ].offset = 0;
+
+	rss = min_t(int, adapter->max_ring_pair_counts, num_online_cpus());
+
+	rss = min_t(int, rss,
+		    hw->mac.max_msix_vectors - adapter->num_other_vectors);
+
+	adapter->ring_feature[RING_F_RSS].limit = rss;
+
+	/* take a breather then clean up driver data */
+	msleep(100);
+
+	dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+	return 0;
+}
+
+static bool check_ari_mode(struct pci_dev *dev)
+{
+	struct pci_bus *bus = dev->bus;
+
+	return bus->self && bus->self->ari_enabled;
+}
+
+static int rnpgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(dev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int err = 0;
+	int i;
+	int pre_existing_vfs = pci_num_vf(dev);
+
+	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+		err = rnpgbe_disable_sriov(adapter);
+	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+		goto out;
+
+	if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN) {
+		if (adapter->vlan_count > hw->max_vfs - 1) {
+			dev_err(&adapter->pdev->dev,
+				"vlans is too much, delete less than %d vlans\n",
+				hw->max_vfs - 1);
+
+			err = -EOPNOTSUPP;
+			goto err_out;
+		}
+
+	} else if (adapter->vlan_count > 1) {
+		dev_err(&adapter->pdev->dev,
+			"only 1 vlan in sriov mode, delete other vlans\n");
+		dev_err(&adapter->pdev->dev, "please delete all vlans first\n");
+
+		err = -EOPNOTSUPP;
+		goto err_out;
+	}
+
+	adapter->vlan_count = 0;
+	if (err)
+		goto err_out;
+
+	/* While the SR-IOV capability structure reports total VFs to be
+	 * 64 we limit the actual number that can be allocated to 63 so
+	 * that some transmit/receive resources can be reserved to the
+	 * PF.  The PCI bus driver already checks for other values out of
+	 * range.
+	 */
+
+	if (check_ari_mode(dev)) {
+		if (num_vfs > (hw->max_vfs - 1)) {
+			err = -EPERM;
+			goto err_out;
+		}
+	} else {
+		if (num_vfs > hw->max_vfs_noari) {
+			err = -EPERM;
+			goto err_out;
+		}
+	}
+
+	/* maybe too early */
+	adapter->num_vfs = num_vfs;
+
+	err = __rnpgbe_enable_sriov(adapter);
+	if (err)
+		goto err_out;
+
+	for (i = 0; i < adapter->num_vfs; i++)
+		rnpgbe_vf_configuration(dev, (i | 0x10000000));
+	dbg("flags:0x%x\n", adapter->flags);
+	if (hw->ops.clr_rar_all)
+		hw->ops.clr_rar_all(hw);
+
+	rnpgbe_sriov_reinit(adapter);
+
+	adapter->flags |= RNP_FLAG_SRIOV_INIT_DONE;
+	err = pci_enable_sriov(dev, num_vfs);
+	if (err) {
+		e_dev_warn("Failed to enable PCI sriov: %d num %d\n", err,
+			   num_vfs);
+		rnpgbe_disable_sriov(adapter);
+		rnpgbe_sriov_reinit(adapter);
+		goto err_out;
+	}
+	adapter->flags |= RNP_FLAG_VF_INIT_DONE;
+
+out:
+	return num_vfs;
+
+err_out:
+	return err;
+#endif
+	return 0;
+}
+
+static int rnpgbe_pci_sriov_disable(struct pci_dev *dev)
+{
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(dev);
+	int err;
+	u32 current_flags = adapter->flags;
+
+	err = rnpgbe_disable_sriov(adapter);
+
+	/* Only reinit if no error and state changed */
+	if (!err && current_flags != adapter->flags) {
+		/* rnpgbe_disable_sriov() doesn't clear VMDQ flag */
+		adapter->flags &= ~RNP_FLAG_VMDQ_ENABLED;
+#ifdef CONFIG_PCI_IOV
+		rnpgbe_sriov_reinit(adapter);
+#endif
+	}
+
+	return err;
+}
+
+static int rnpgbe_set_vf_multicasts(struct rnpgbe_adapter *adapter,
+				    u32 *msgbuf,
+				    u32 vf)
+{
+	int entries = (msgbuf[0] & RNP_VT_MSGINFO_MASK) >> RNP_VT_MSGINFO_SHIFT;
+	u16 *hash_list = (u16 *)&msgbuf[1];
+	struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int i;
+
+	/* only so many hash values supported */
+	entries = min(entries, RNP_MAX_VF_MC_ENTRIES);
+
+	/*
+	 * salt away the number of multi cast addresses assigned
+	 * to this VF for later use to restore when the PF multi cast
+	 * list changes
+	 */
+	vfinfo->num_vf_mc_hashes = entries;
+
+	/*
+	 * VFs are limited to using the MTA hash table for their multicast
+	 * addresses
+	 */
+	for (i = 0; i < entries; i++)
+		vfinfo->vf_mc_hashes[i] = hash_list[i];
+
+	for (i = 0; i < vfinfo->num_vf_mc_hashes; i++)
+		hw->ops.set_sriov_vf_mc(hw, vfinfo->vf_mc_hashes[i]);
+
+	return 0;
+}
+
+void rnpgbe_restore_vf_macs(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int vf;
+	u8 *mac_addr;
+	int rar_entry;
+
+	for (vf = 0; vf < adapter->num_vfs; vf++) {
+		mac_addr = adapter->vfinfo[vf].vf_mac_addresses;
+		rar_entry = hw->mac.num_rar_entries - (vf + 1);
+		/* setup to the hw */
+		if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED)
+			hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf + 1,
+						true);
+		else
+			hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf,
+						true);
+	}
+}
+
+void rnpgbe_restore_vf_macvlans(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct list_head *pos;
+	struct vf_macvlans *entry;
+
+	hw_dbg(hw, "%s Staring..\n", __func__);
+
+	list_for_each(pos, &adapter->vf_mvs.l) {
+		entry = list_entry(pos, struct vf_macvlans, l);
+		if (!entry->free) {
+			hw_dbg(hw, "  vf:%d MACVLAN: RAR[%d] <= %pM\n",
+			       entry->vf, entry->rar_entry, entry->vf_macvlan);
+
+			if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) {
+				hw->ops.set_rar_with_vf(hw, entry->vf_macvlan,
+							entry->rar_entry,
+							entry->vf + 1, true);
+			} else {
+				hw->ops.set_rar_with_vf(hw, entry->vf_macvlan,
+							entry->rar_entry,
+							entry->vf, true);
+			}
+		}
+	}
+	hw_dbg(hw, "%s Done\n", __func__);
+}
+
+void rnpgbe_restore_vf_multicasts(struct rnpgbe_adapter *adapter)
+{
+	/* Restore any VF macvlans */
+	rnpgbe_restore_vf_macvlans(adapter);
+}
+
+static int rnpgbe_set_vf_vlan(struct rnpgbe_adapter *adapter, int add, int vid,
+			      u32 vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int true_handle = 1;
+	int i;
+	/* VLAN 0 is a special case, don't allow it to be removed */
+	if (!vid && !add)
+		return 0;
+
+	/* should check other vf */
+	if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED)) {
+		/* if other vf use this vlan, don't true remove */
+		if (!add) {
+			/* check equal pf_vlan? */
+			if (vid == adapter->vf_vlan)
+				true_handle = 0;
+			if (!test_and_set_bit(__RNP_USE_VFINFI,
+					      &adapter->state)) {
+				for (i = 0; i < adapter->num_vfs; i++) {
+					/* check if other vf_vlan still valid */
+					if ((i != vf) &&
+					    (vid == adapter->vfinfo[i].vf_vlan))
+						true_handle = 0;
+					/* check if other pf_vlan still valid */
+					if ((i != vf) &&
+					    (vid == adapter->vfinfo[i].pf_vlan))
+						true_handle = 0;
+				}
+				clear_bit(__RNP_USE_VFINFI, &adapter->state);
+			}
+		}
+	}
+	if (true_handle)
+		hw->ops.set_vf_vlan_filter(hw, vid, vf, (bool)add, false);
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) {
+		if (hw->ops.set_vf_vlan_mode) {
+			if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED)
+				hw->ops.set_vf_vlan_mode(hw, vid, vf + 1,
+							 (bool)add);
+			else
+				hw->ops.set_vf_vlan_mode(hw, vid, vf,
+							 (bool)add);
+		}
+	}
+
+	return 0;
+}
+
+static inline void rnpgbe_vf_reset_event(struct rnpgbe_adapter *adapter, u32 vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+	int i;
+
+	/* reset multicast table array for vf */
+	adapter->vfinfo[vf].num_vf_mc_hashes = 0;
+
+	/* Flush and reset the mta with the new values */
+	rnpgbe_set_rx_mode(adapter->netdev);
+
+	/* clear this rar_entry */
+	hw->ops.clr_rar(hw, rar_entry);
+
+	/* reset VF api back to unknown */
+	adapter->vfinfo[vf].vf_api = 0;
+	/* clear vf multicast */
+	for (i = 0; i < RNP_MAX_VF_MC_ENTRIES; i++)
+		adapter->vfinfo[vf].vf_mc_hashes[i] = 0;
+	/* clear vf vlan setup */
+	adapter->vfinfo[vf].vf_vlan = 0;
+	adapter->vfinfo[vf].vlan_count = 0;
+}
+
+static int rnpgbe_set_vf_mac(struct rnpgbe_adapter *adapter, int vf,
+			     unsigned char *mac_addr)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	/* this rar_entry may be cofict with mac vlan with pf */
+	int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+
+	memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+
+	/* setup to the hw */
+	if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED)
+		hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf + 1, true);
+	else
+		hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf, true);
+
+	return 0;
+}
+
+static int rnpgbe_set_vf_macvlan(struct rnpgbe_adapter *adapter, int vf,
+				 int index, unsigned char *mac_addr)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct list_head *pos;
+	struct vf_macvlans *entry;
+	/* index = 0 , only earase */
+	/* index = 1 , earase and then set */
+	if (index <= 1) {
+		list_for_each (pos, &adapter->vf_mvs.l) {
+			entry = list_entry(pos, struct vf_macvlans, l);
+			if (entry->vf == vf) {
+				entry->vf = -1;
+				entry->free = true;
+				entry->is_macvlan = false;
+				hw->ops.clr_rar(hw, entry->rar_entry);
+			}
+		}
+	}
+
+	/*
+	 * If index was zero then we were asked to clear the uc list
+	 * for the VF.  We're done.
+	 */
+	if (!index)
+		return 0;
+
+	entry = NULL;
+
+	list_for_each (pos, &adapter->vf_mvs.l) {
+		entry = list_entry(pos, struct vf_macvlans, l);
+		if (entry->free)
+			break;
+	}
+
+	/*
+	 * If we traversed the entire list and didn't find a free entry
+	 * then we're out of space on the RAR table.  Also entry may
+	 * be NULL because the original memory allocation for the list
+	 * failed, which is not fatal but does mean we can't support
+	 * VF requests for MACVLAN because we couldn't allocate
+	 * memory for the list management required.
+	 */
+	if (!entry || !entry->free)
+		return -ENOSPC;
+
+	entry->free = false;
+	entry->is_macvlan = true;
+	entry->vf = vf;
+	memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
+
+	if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) {
+		hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, entry->rar_entry,
+					entry->vf + 1, true);
+	} else {
+		hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, entry->rar_entry,
+					entry->vf, true);
+	}
+	return 0;
+}
+
+int rnpgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
+{
+	unsigned char vf_mac_addr[6];
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(pdev);
+	unsigned int vfn = (event_mask & 0x3f);
+
+	bool enable = ((event_mask & 0x10000000U) != 0);
+
+	if (enable) {
+		eth_zero_addr(vf_mac_addr);
+		memcpy(vf_mac_addr, adapter->hw.mac.perm_addr, 6);
+		vf_mac_addr[5] = vf_mac_addr[5] + (0x80 | vfn);
+		vf_mac_addr[4] = vf_mac_addr[4] + (pdev->devfn);
+
+		memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
+	}
+
+	return 0;
+}
+
+static int rnpgbe_vf_reset_msg(struct rnpgbe_adapter *adapter, u32 vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
+	u32 msgbuf[RNP_VF_PERMADDR_MSG_LEN];
+	u8 *addr = (u8 *)(&msgbuf[1]);
+
+	/* reset the filters for the device */
+	rnpgbe_vf_reset_event(adapter, vf);
+
+	/* set vf mac address */
+	if (!is_zero_ether_addr(vf_mac))
+		rnpgbe_set_vf_mac(adapter, vf, vf_mac);
+
+	/* enable VF mailbox for further messages */
+	adapter->vfinfo[vf].clear_to_send = true;
+
+	/* Enable counting of spoofed packets in the SSVPC register */
+
+	/* reply to reset with ack and vf mac address */
+	msgbuf[0] = RNP_VF_RESET;
+	if (!is_zero_ether_addr(vf_mac)) {
+		msgbuf[0] |= RNP_VT_MSGTYPE_ACK;
+		memcpy(addr, vf_mac, ETH_ALEN);
+	} else {
+		msgbuf[0] |= RNP_VT_MSGTYPE_NACK;
+		dev_warn(&adapter->pdev->dev,
+			 "VF %d has no MAC address assigned, you have to assign"
+			 "one manually\n",
+			 vf);
+	}
+
+	/*
+	 * Piggyback the multicast filter type so VF can compute the
+	 * correct vectors
+	 */
+	msgbuf[RNP_VF_MC_TYPE_WORD] = 0;
+	/* setup link status , pause mode, ft padding mode */
+
+	/* link status */
+	/* pause mode */
+	msgbuf[RNP_VF_MC_TYPE_WORD] |= (0xff & hw->fc.current_mode) << 16;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING)
+		msgbuf[RNP_VF_MC_TYPE_WORD] |= (0x01 << 8);
+	else
+		msgbuf[RNP_VF_MC_TYPE_WORD] |= (0x00 << 8);
+	/* mc_type */
+	msgbuf[RNP_VF_MC_TYPE_WORD] |= rd32(hw, RNP_ETH_DMAC_MCSTCTRL) & 0x03;
+	msgbuf[RNP_VF_DMA_VERSION_WORD] = rd32(hw, RNP_DMA_VERSION);
+	msgbuf[RNP_VF_VLAN_WORD] = adapter->vfinfo[vf].pf_vlan;
+	msgbuf[RNP_VF_PHY_TYPE_WORD] = (hw->mac_type << 16) | hw->phy_type;
+	msgbuf[RNP_VF_FW_VERSION_WORD] = (hw->fw_version);
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+	if (adapter->vfinfo[vf].link_state == rnpgbe_link_state_auto) {
+		msgbuf[RNP_VF_LINK_STATUS_WORD] =
+			(adapter->link_up ? RNP_PF_LINK_UP : 0) |
+			adapter->link_speed;
+	} else if (adapter->vfinfo[vf].link_state == rnpgbe_link_state_on) {
+		msgbuf[RNP_VF_LINK_STATUS_WORD] =
+			RNP_PF_LINK_UP | adapter->link_speed;
+
+	} else {
+		msgbuf[RNP_VF_LINK_STATUS_WORD] = 0;
+	}
+#else
+	msgbuf[RNP_VF_LINK_STATUS_WORD] = 0;
+#endif
+
+	msgbuf[RNP_VF_AXI_MHZ] = hw->usecstocount;
+	msgbuf[RNP_VF_FEATURE] = 0;
+#ifdef NETIF_F_HW_VLAN_CTAG_FILTER
+	if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+		msgbuf[RNP_VF_FEATURE] |= PF_FEATRURE_VLAN_FILTER;
+#endif
+	if (hw->ncsi_en)
+		msgbuf[RNP_VF_FEATURE] |= PF_NCSI_EN;
+
+	/* now vf maybe has no irq handler if it is the first reset*/
+	rnpgbe_write_mbx(hw, msgbuf, RNP_VF_PERMADDR_MSG_LEN, vf);
+
+	return 0;
+}
+
+static int rnpgbe_get_vf_mac_addr(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+				  u32 vf)
+{
+	u8 *mac = ((u8 *)(&msgbuf[1]));
+
+	memcpy(mac, adapter->vfinfo[vf].vf_mac_addresses, 6);
+
+	return 0;
+}
+
+/* vf call setup a new mac */
+static int rnpgbe_set_vf_mac_addr(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+				  u32 vf)
+{
+	u8 *new_mac = ((u8 *)(&msgbuf[1]));
+
+	if (!is_valid_ether_addr(new_mac)) {
+		e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
+		return -1;
+	}
+
+	if (adapter->vfinfo[vf].pf_set_mac &&
+	    memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, ETH_ALEN)) {
+		e_warn(drv,
+		       "VF %d attempted to override administratively set MAC address\n"
+		       "Reload the VF driver to resume operations\n",
+		       vf);
+		return -1;
+	}
+	rnpgbe_set_vf_mac(adapter, vf, new_mac);
+
+	return 0;
+}
+
+static int rnpgbe_set_vf_vlan_msg(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+				  u32 vf)
+{
+	int add = ((msgbuf[0] & RNP_VT_MSGINFO_MASK) >> RNP_VT_MSGINFO_SHIFT);
+	int vid = (msgbuf[1] & RNP_VLVF_VLANID_MASK);
+	int err;
+
+	if (adapter->vfinfo[vf].pf_vlan) {
+		e_warn(drv,
+		       "VF %d attempted to override administratively set VLAN "
+		       "configuration\n"
+		       "Reload the VF driver to resume operations\n",
+		       vf);
+		return -1;
+	}
+
+	if ((add) && (adapter->vfinfo[vf].vlan_count)) {
+		e_warn(drv, "VF %d attempted to set more than 1 vlan", vf);
+		e_warn(drv, " vlan now %d, try to set %d\n",
+		       adapter->vfinfo[vf].vf_vlan, vid);
+		return -1;
+	}
+
+	/* vlan 0 has no work todo */
+	if (!vid)
+		return 0;
+	if (add) {
+		adapter->vfinfo[vf].vlan_count++;
+		/* store vf vlan setup */
+		adapter->vfinfo[vf].vf_vlan = vid;
+	} else if (adapter->vfinfo[vf].vlan_count) {
+		adapter->vfinfo[vf].vf_vlan = 0;
+		adapter->vfinfo[vf].vlan_count--;
+	}
+
+	err = rnpgbe_set_vf_vlan(adapter, add, vid, vf);
+
+	return err;
+}
+
+static int rnpgbe_set_vf_vlan_strip_msg(struct rnpgbe_adapter *adapter,
+					u32 *msgbuf, u32 vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int vlan_strip_on = !!(msgbuf[1] >> 31);
+	int queue_cnt = msgbuf[1] & 0xffff;
+	int err = 0, i;
+
+	vf_dbg("strip_on:%d queeu_cnt:%d, %d %d\n", vlan_strip_on, queue_cnt,
+	       msgbuf[2], msgbuf[3]);
+
+	for (i = 0; i < queue_cnt; i++) {
+		if (vlan_strip_on)
+			hw->ops.set_vlan_strip(hw, msgbuf[2 + i], true);
+		else
+			hw->ops.set_vlan_strip(hw, msgbuf[2 + i], false);
+	}
+
+	return err;
+}
+
+static int rnpgbe_set_vf_macvlan_msg(struct rnpgbe_adapter *adapter,
+				     u32 *msgbuf, u32 vf)
+{
+	u8 *new_mac = ((u8 *)(&msgbuf[1]));
+	int index = (msgbuf[0] & RNP_VT_MSGINFO_MASK) >> RNP_VT_MSGINFO_SHIFT;
+	int err;
+
+	if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
+		e_warn(drv,
+		       "VF %d requested MACVLAN filter but is administratively denied\n",
+		       vf);
+		return -1;
+	}
+
+	/* An non-zero index indicates the VF is setting a filter */
+	if (index) {
+		if (!is_valid_ether_addr(new_mac)) {
+			e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
+			return -1;
+		}
+	}
+
+	err = rnpgbe_set_vf_macvlan(adapter, vf, index, new_mac);
+	if (err == -ENOSPC)
+		e_warn(drv, "VF %d has requested a MACVLAN filter but "
+			    "there is no space for it\n", vf);
+
+	return err < 0;
+}
+
+static int rnpgbe_negotiate_vf_api(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+				   u32 vf)
+{
+	adapter->vfinfo[vf].vf_api = 0;
+
+	return 0;
+}
+
+static int rnpgbe_get_vf_reg(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+			     u32 vf)
+{
+	u32 reg = msgbuf[1];
+
+	msgbuf[1] = rd32(&adapter->hw, reg);
+
+	return 0;
+}
+
+static int rnpgbe_set_vf_mtu(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+			     u32 vf)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	if (msgbuf[1] > netdev->mtu) {
+		e_dev_warn("vf %d try to change %d mtu to %d (too large)\n",
+			   vf, netdev->mtu, msgbuf[1]);
+		return -1;
+	} else {
+		return 0;
+	}
+}
+
+static int rnpgbe_get_vf_mtu(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+			     u32 vf)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	msgbuf[1] = netdev->mtu;
+
+	return 0;
+}
+
+static int rnpgbe_get_vf_fw(struct rnpgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	msgbuf[1] = hw->fw_version;
+
+	return 0;
+}
+
+static int rnpgbe_get_vf_link(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+			      u32 vf)
+{
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+	if (adapter->vfinfo[vf].link_state == rnpgbe_link_state_auto) {
+		msgbuf[1] = (adapter->link_up ? RNP_PF_LINK_UP : 0) |
+			    adapter->link_speed;
+	} else if (adapter->vfinfo[vf].link_state == rnpgbe_link_state_on) {
+		msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed;
+
+	} else {
+		msgbuf[1] = 0;
+	}
+#else /* HAVE_NDO_SET_VF_LINK_STATE */
+	msgbuf[0] = 0;
+
+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
+	return 0;
+}
+
+static int rnpgbe_get_vf_dma_frag(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+				  u32 vf)
+{
+	/* we fixed 1536 bytes */
+	msgbuf[1] = 1536;
+	return 0;
+}
+
+static int rnpgbe_vf_get_stats_clr(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+				   u32 vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+
+	if (dma_rd32(dma, RNP500_STATISTIC_CRL(vf)))
+		msgbuf[1] = 1;
+	else
+		msgbuf[1] = 0;
+
+	return 0;
+}
+
+static int rnpgbe_vf_set_stats_clr(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+				   u32 vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+
+	if (msgbuf[1])
+		dma_wr32(dma, RNP500_STATISTIC_CRL(vf), 1);
+	else
+		dma_wr32(dma, RNP500_STATISTIC_CRL(vf), 0);
+
+	return 0;
+}
+
+static int rnpgbe_get_vf_queues(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+				u32 vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	msgbuf[RNP_VF_TX_QUEUES] = hw->sriov_ring_limit;
+	msgbuf[RNP_VF_RX_QUEUES] = hw->sriov_ring_limit;
+	msgbuf[RNP_VF_TRANS_VLAN] = adapter->vfinfo[vf].pf_vlan;
+	msgbuf[RNP_VF_DEF_QUEUE] = 0;
+	if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED)
+		msgbuf[RNP_VF_QUEUE_START] = vf * hw->sriov_ring_limit +
+			hw->sriov_ring_limit;
+	else
+		msgbuf[RNP_VF_QUEUE_START] = vf * hw->sriov_ring_limit;
+	msgbuf[RNP_VF_QUEUE_DEPTH] = (adapter->tx_ring_item_count << 16) |
+				     adapter->rx_ring_item_count;
+
+	return 0;
+}
+
+static int rnpgbe_rcv_msg_from_vf(struct rnpgbe_adapter *adapter, u32 vf)
+{
+	u32 mbx_size = RNP_VFMAILBOX_SIZE;
+	u32 msgbuf[RNP_VFMAILBOX_SIZE];
+	struct rnpgbe_hw *hw = &adapter->hw;
+	s32 retval;
+
+	vf_dbg("msg from vf:%d\n", vf);
+
+	retval = rnpgbe_read_mbx(hw, msgbuf, mbx_size, vf);
+	if (retval) {
+		pr_err("Error receiving message from VF\n");
+		return retval;
+	}
+	vf_dbg("msg[0]=0x%08x\n", msgbuf[0]);
+
+	/* this is a message we already processed, do nothing */
+	if (msgbuf[0] & (RNP_VT_MSGTYPE_ACK | RNP_VT_MSGTYPE_NACK))
+		return retval;
+
+	/* flush the ack before we write any messages back */
+
+	/* clear vf_num */
+	msgbuf[0] &= (~RNP_VF_MASK);
+
+	/* this is a vf reset irq */
+	if ((msgbuf[0] & RNP_MAIL_CMD_MASK) == RNP_VF_RESET) {
+		vf_dbg("vf %d up\n", vf);
+		return rnpgbe_vf_reset_msg(adapter, vf);
+	}
+
+	/*
+	 * until the vf completes a virtual function reset it should not be
+	 * allowed to start any configuration.
+	 */
+	if (!adapter->vfinfo[vf].clear_to_send) {
+		vf_dbg("wait vf clear to send\n");
+		msgbuf[0] |= RNP_VT_MSGTYPE_NACK;
+		rnpgbe_write_mbx(hw, msgbuf, 1, vf);
+		return retval;
+	}
+
+	switch ((msgbuf[0] & RNP_MAIL_CMD_MASK)) {
+	case RNP_VF_SET_MAC_ADDR:
+		retval = rnpgbe_set_vf_mac_addr(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_SET_MULTICAST:
+		retval = rnpgbe_set_vf_multicasts(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_SET_VLAN:
+		retval = rnpgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_SET_VLAN_STRIP:
+		retval = rnpgbe_set_vf_vlan_strip_msg(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_GET_MACADDR:
+		retval = rnpgbe_get_vf_mac_addr(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_SET_MACVLAN:
+		retval = rnpgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_API_NEGOTIATE:
+		retval = rnpgbe_negotiate_vf_api(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_GET_QUEUES:
+		retval = rnpgbe_get_vf_queues(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_REG_RD:
+		retval = rnpgbe_get_vf_reg(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_GET_MTU:
+		retval = rnpgbe_get_vf_mtu(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_SET_MTU:
+		retval = rnpgbe_set_vf_mtu(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_GET_FW:
+		retval = rnpgbe_get_vf_fw(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_GET_LINK:
+		retval = rnpgbe_get_vf_link(adapter, msgbuf, vf);
+		break;
+	case RNP_PF_REMOVE:
+		vf_dbg("vf %d removed\n", vf);
+		adapter->vfinfo[vf].clear_to_send = false;
+		adapter->vfinfo[vf].vf_vlan = 0;
+		/* todo clean vf info */
+		retval = 1;
+		break;
+	case RNP_VF_RESET_PF:
+		adapter->flags2 |= RNP_FLAG2_RESET_PF;
+		retval = 1;
+		break;
+	case RNP_VF_GET_DMA_FRAG:
+		retval = rnpgbe_get_vf_dma_frag(adapter, msgbuf, vf);
+
+		break;
+	case RNP_VF_SET_STATS_CLR:
+		retval = rnpgbe_vf_set_stats_clr(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_GET_STATS_CLR:
+		retval = rnpgbe_vf_get_stats_clr(adapter, msgbuf, vf);
+		break;
+	default:
+		e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
+		retval = RNP_ERR_MBX;
+		break;
+	}
+
+	/* notify the VF of the results of what it sent us */
+	if (retval)
+		msgbuf[0] |= RNP_VT_MSGTYPE_NACK;
+	else
+		msgbuf[0] |= RNP_VT_MSGTYPE_ACK;
+
+	/* write vf_num */
+	msgbuf[0] |= (vf << 21);
+
+	msgbuf[0] |= RNP_VT_MSGTYPE_CTS;
+
+	if ((msgbuf[0] & RNP_MAIL_CMD_MASK) != RNP_PF_REMOVE)
+		rnpgbe_write_mbx(hw, msgbuf, mbx_size, vf);
+
+	return retval;
+}
+
+static void rnpgbe_rcv_ack_from_vf(struct rnpgbe_adapter *adapter, u32 vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 msg = RNP_VT_MSGTYPE_NACK;
+
+	/* if device isn't clear to send it shouldn't be reading either */
+	if (!adapter->vfinfo[vf].clear_to_send)
+		rnpgbe_write_mbx(hw, &msg, 1, vf);
+}
+
+void rnpgbe_msg_task(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 vf;
+
+	rnpgbe_fw_msg_handler(adapter);
+
+	if (!(adapter->flags & RNP_FLAG_SRIOV_INIT_DONE))
+		return;
+	for (vf = 0; vf < adapter->num_vfs; vf++) {
+		if (test_and_set_bit(__VF_MBX_USED,
+				     &adapter->vfinfo[vf].status)) {
+			adapter->miss_time++;
+			e_info(drv, "we missed some irqs %d\n", vf);
+			continue;
+		}
+
+		/* process any messages pending */
+		if (!rnpgbe_check_for_msg(hw, vf))
+			rnpgbe_rcv_msg_from_vf(adapter, vf);
+
+		/* process any acks */
+		if (!rnpgbe_check_for_ack(hw, vf))
+			rnpgbe_rcv_ack_from_vf(adapter, vf);
+		clear_bit(__VF_MBX_USED, &adapter->vfinfo[vf].status);
+	}
+}
+
+static int rnpgbe_msg_post_status_signle_link(struct rnpgbe_adapter *adapter,
+					      int vf,
+					      int link_state)
+{
+	u32 msgbuf[RNP_VFMAILBOX_SIZE];
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+
+	msgbuf[0] = RNP_PF_SET_LINK | (vf << RNP_VNUM_OFFSET);
+	switch (link_state) {
+	case rnpgbe_link_state_on:
+		msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed;
+		break;
+	case rnpgbe_link_state_off:
+		msgbuf[1] = 0;
+		break;
+	case rnpgbe_link_state_auto:
+		if (adapter->link_up)
+			msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed;
+		else
+			msgbuf[1] = 0;
+		break;
+	}
+	return mbx->ops.write(hw, msgbuf, 2, vf);
+}
+
+static int rnpgbe_msg_post_status_signle(struct rnpgbe_adapter *adapter,
+				  	 enum PF_STATUS status, int vf)
+{
+	u32 msgbuf[RNP_VFMAILBOX_SIZE];
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+
+	switch (status) {
+	case PF_FCS_STATUS:
+		msgbuf[0] = RNP_PF_SET_FCS | (vf << RNP_VNUM_OFFSET);
+		if (adapter->netdev->features & NETIF_F_RXFCS)
+			msgbuf[1] = 1;
+		else
+			msgbuf[1] = 0;
+		break;
+	case PF_PAUSE_STATUS:
+		msgbuf[0] = RNP_PF_SET_PAUSE | (vf << RNP_VNUM_OFFSET);
+		msgbuf[1] = hw->fc.requested_mode;
+		break;
+	case PF_FT_PADDING_STATUS:
+		msgbuf[0] = RNP_PF_SET_FT_PADDING | (vf << RNP_VNUM_OFFSET);
+		if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING)
+			msgbuf[1] = 1;
+		else
+			msgbuf[1] = 0;
+
+		break;
+	case PF_VLAN_FILTER_STATUS:
+		msgbuf[0] = RNP_PF_SET_VLAN_FILTER | (vf << RNP_VNUM_OFFSET);
+#ifdef NETIF_F_HW_VLAN_CTAG_FILTER
+		if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+			msgbuf[1] = 1;
+		else
+			msgbuf[1] = 0;
+#else
+		msgbuf[1] = 0;
+#endif
+
+		break;
+	case PF_SET_VLAN_STATUS:
+		msgbuf[0] = RNP_PF_SET_VLAN | (vf << RNP_VNUM_OFFSET);
+
+		msgbuf[1] = adapter->vfinfo[vf].pf_vlan;
+		break;
+	case PF_SET_LINK_STATUS:
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+		if (adapter->vfinfo[vf].link_state != rnpgbe_link_state_auto)
+			return 0;
+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
+		/* only update link state if in auto mode */
+		msgbuf[0] = RNP_PF_SET_LINK | (vf << RNP_VNUM_OFFSET);
+		if (adapter->link_up)
+			msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed;
+		else
+			msgbuf[1] = 0;
+		break;
+	case PF_SET_MTU:
+		msgbuf[0] = RNP_PF_SET_MTU | (vf << RNP_VNUM_OFFSET);
+		msgbuf[1] = adapter->netdev->mtu;
+		break;
+	case PF_SET_RESET:
+		msgbuf[0] = RNP_PF_SET_RESET | (vf << RNP_VNUM_OFFSET);
+		msgbuf[1] = 0;
+
+		break;
+	}
+	return mbx->ops.write_posted(hw, msgbuf, 2, vf);
+}
+
+/* try to send mailbox to all active vf */
+int rnpgbe_msg_post_status(struct rnpgbe_adapter *adapter,
+			   enum PF_STATUS status)
+{
+	u32 vf;
+	int err = 0;
+
+	if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return 0;
+	/* broadcast */
+	for (vf = 0; vf < adapter->num_vfs; vf++) {
+		if (!(adapter->vfinfo[vf].clear_to_send))
+			continue;
+
+		if (!test_bit(__RNP_IN_IRQ, &adapter->state)) {
+			if (test_and_set_bit(__VF_MBX_USED,
+					     &adapter->vfinfo[vf].status)) {
+				adapter->miss_time++;
+				printk(KERN_DEBUG "send miss \n");
+				return -1;
+			}
+			err |= rnpgbe_msg_post_status_signle(adapter, status,
+							     vf);
+			clear_bit(__VF_MBX_USED, &adapter->vfinfo[vf].status);
+		}
+	}
+	return err;
+}
+
+void rnpgbe_ping_all_vfs(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 ping;
+	int i;
+
+	for (i = 0; i < adapter->num_vfs; i++) {
+		ping = RNP_PF_CONTROL_PRING_MSG;
+		/* only send to active vf */
+		ping |= RNP_VT_MSGTYPE_CTS;
+		rnpgbe_write_mbx(hw, &ping, 1, i);
+	}
+}
+
+int rnpgbe_get_vf_ringnum(struct rnpgbe_hw *hw, int vf, int num)
+{
+	return vf;
+}
+
+int rnpgbe_setup_ring_maxrate(struct rnpgbe_adapter *adapter, int ring,
+			      u64 max_rate)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	int samples_1sec = adapter->hw.usecstocount * 100000;
+
+	dma_ring_wr32(dma, RING_OFFSET(ring) + RNP_DMA_REG_TX_FLOW_CTRL_TM,
+		      samples_1sec);
+	dma_ring_wr32(dma, RING_OFFSET(ring) + RNP_DMA_REG_TX_FLOW_CTRL_TH,
+		      max_rate);
+	return 0;
+}
+
+static int rnpgbe_disable_port_vlan(struct rnpgbe_adapter *adapter, int vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int err;
+
+	err = rnpgbe_set_vf_vlan(adapter, false, adapter->vfinfo[vf].pf_vlan,
+				 vf);
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) {
+		if (hw->ops.set_vf_vlan_mode) {
+			if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED)
+				hw->ops.set_vf_vlan_mode(
+					hw, adapter->vfinfo[vf].pf_vlan, vf + 1,
+					false);
+			else
+				hw->ops.set_vf_vlan_mode(
+					hw, adapter->vfinfo[vf].pf_vlan, vf,
+					false);
+		}
+	}
+	adapter->vfinfo[vf].pf_vlan = 0;
+	adapter->vfinfo[vf].pf_qos = 0;
+	/* clear veb */
+	hw->ops.set_vf_vlan_filter(hw, 0, vf, false, true);
+
+	return err;
+}
+
+static int rnpgbe_enable_port_vlan(struct rnpgbe_adapter *adapter, int vf,
+				   u16 vlan, u8 qos)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int err;
+
+	err = rnpgbe_set_vf_vlan(adapter, true, vlan, vf);
+	if (err)
+		goto out;
+
+	adapter->vfinfo[vf].pf_vlan = vlan;
+	adapter->vfinfo[vf].pf_qos = qos;
+	dev_info(pci_dev_to_dev(adapter->pdev),
+		 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+	if (test_bit(__RNP_DOWN, &adapter->state)) {
+		dev_warn(
+			pci_dev_to_dev(adapter->pdev),
+			"The VF VLAN has been set, but the PF device is not up.\n");
+		dev_warn(
+			pci_dev_to_dev(adapter->pdev),
+			"Bring the PF device up before attempting to use the VF device.\n");
+	}
+	/* setup veb only */
+	hw->ops.set_vf_vlan_filter(hw, vlan, vf, true, true);
+
+	/* if in sriov vlan mode should setup pfvlvf table */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) {
+		if (hw->ops.set_vf_vlan_mode) {
+			if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED)
+				hw->ops.set_vf_vlan_mode(hw, vlan, vf + 1,
+							 true);
+			else
+				hw->ops.set_vf_vlan_mode(hw, vlan, vf, true);
+		}
+	}
+out:
+	return err;
+}
+
+#ifdef IFLA_VF_VLAN_INFO_MAX
+int rnpgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+			   __be16 vlan_proto)
+#else
+int rnpgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
+#endif
+{
+	int err = 0;
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	/* VLAN IDs accepted range 0-4094 */
+	if (vf < 0 || vf >= adapter->num_vfs || vlan > VLAN_VID_MASK - 1 ||
+	    qos > 7)
+		return -EINVAL;
+
+#ifdef IFLA_VF_VLAN_INFO_MAX
+	if (vlan_proto != htons(ETH_P_8021Q))
+		return -EPROTONOSUPPORT;
+#endif
+	if (vlan || qos) {
+		/*
+		 * Check if there is already a port VLAN set, if so
+		 * we have to delete the old one first before we
+		 * can set the new one.  The usage model had
+		 * previously assumed the user would delete the
+		 * old port VLAN before setting a new one but this
+		 * is not necessarily the case.
+		 */
+		if (adapter->vfinfo[vf].vf_vlan) {
+			dev_err(&adapter->pdev->dev,
+				"vf set vlan before, delete it before add new\n");
+			err = -EINVAL;
+			goto out;
+		}
+		if (adapter->vfinfo[vf].pf_vlan)
+			err = rnpgbe_disable_port_vlan(adapter, vf);
+		if (err)
+			goto out;
+		err = rnpgbe_enable_port_vlan(adapter, vf, vlan, qos);
+
+	} else {
+		/* if vf set vlan,  return error */
+		if ((!adapter->vfinfo[vf].pf_vlan) &&
+		    (adapter->vfinfo[vf].vf_vlan)) {
+			dev_err(&adapter->pdev->dev,
+				"pf cann't delete vf set vlan\n");
+
+			err = -EINVAL;
+			goto out;
+		} else if (adapter->vfinfo[vf].pf_vlan)
+			err = rnpgbe_disable_port_vlan(adapter, vf);
+	}
+	/* send mbx to vf */
+	if (adapter->vfinfo[vf].clear_to_send)
+		rnpgbe_msg_post_status_signle(adapter, PF_SET_VLAN_STATUS, vf);
+out:
+	return err;
+}
+
+#if IS_ENABLED(CONFIG_PCI_IOV)
+int rnpgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (vf < 0 || vf >= adapter->num_vfs)
+		return -EINVAL;
+
+	/* maybe we not support this in hw */
+	adapter->vfinfo[vf].spoofchk_enabled = setting;
+
+	return 0;
+}
+
+#endif /* CONFIG_PCI_IOV */
+
+#ifdef HAVE_NDO_SET_VF_TRUST
+int rnpgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (vf < 0 || vf >= adapter->num_vfs)
+		return -EINVAL;
+
+	/* nothing to do */
+	if (adapter->vfinfo[vf].trusted == setting)
+		return 0;
+
+	adapter->vfinfo[vf].trusted = setting;
+	e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not ");
+
+	return 0;
+}
+
+#endif
+
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+
+int rnpgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+
+	if (vf < 0 || vf >= adapter->num_vfs) {
+		dev_err(pci_dev_to_dev(adapter->pdev),
+			"NDO set VF link - invalid VF identifier %d\n", vf);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	switch (state) {
+	case IFLA_VF_LINK_STATE_ENABLE:
+		dev_info(pci_dev_to_dev(adapter->pdev),
+			 "NDO set VF %d link state %d\n", vf, state);
+		adapter->vfinfo[vf].link_state = rnpgbe_link_state_on;
+		rnpgbe_msg_post_status_signle_link(adapter, vf,
+						   rnpgbe_link_state_on);
+		break;
+	case IFLA_VF_LINK_STATE_DISABLE:
+		dev_info(pci_dev_to_dev(adapter->pdev),
+			 "NDO set VF %d link state disable\n", vf);
+		adapter->vfinfo[vf].link_state = rnpgbe_link_state_off;
+		rnpgbe_msg_post_status_signle_link(adapter, vf,
+						   rnpgbe_link_state_off);
+		break;
+	case IFLA_VF_LINK_STATE_AUTO:
+		dev_info(pci_dev_to_dev(adapter->pdev),
+			 "NDO set VF %d link state auto\n", vf);
+		adapter->vfinfo[vf].link_state = rnpgbe_link_state_auto;
+		rnpgbe_msg_post_status_signle_link(adapter, vf,
+						   rnpgbe_link_state_auto);
+		break;
+	default:
+		dev_err(pci_dev_to_dev(adapter->pdev),
+			"NDO set VF %d - invalid link state %d\n", vf, state);
+		ret = -EINVAL;
+	}
+out:
+	return ret;
+}
+
+#endif
+
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+int rnpgbe_ndo_set_vf_bw(struct net_device *netdev, int vf,
+			 int __always_unused min_tx_rate, int max_tx_rate)
+#else
+int rnpgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate)
+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	/* limit vf ring rate */
+	int ring_max_rate;
+	int vf_ring;
+	int link_speed = 0;
+	u64 real_rate = 0;
+
+	if (vf >= hw->max_vfs - 1)
+		return -EINVAL;
+
+	switch (adapter->link_speed) {
+	case RNP_LINK_SPEED_40GB_FULL:
+		link_speed = 40000;
+		break;
+	case RNP_LINK_SPEED_25GB_FULL:
+		link_speed = 25000;
+		break;
+	case RNP_LINK_SPEED_10GB_FULL:
+		link_speed = 10000;
+		break;
+	case RNP_LINK_SPEED_1GB_FULL:
+		link_speed = 1000;
+		break;
+	case RNP_LINK_SPEED_100_FULL:
+		link_speed = 100;
+		break;
+	}
+	/* rate limit cannot be greater than link speed */
+	if (max_tx_rate && (max_tx_rate > link_speed))
+		return -EINVAL;
+
+	adapter->vfinfo[vf].tx_rate = max_tx_rate;
+
+	ring_max_rate = max_tx_rate / hw->sriov_ring_limit;
+
+	if (max_tx_rate <= 10)
+		real_rate = (ring_max_rate * 1000 * 85) >> 3;
+	else if (max_tx_rate <= 50)
+		real_rate = (ring_max_rate * 1000 * 90) >> 3;
+	else if (max_tx_rate <= 100)
+		real_rate = (ring_max_rate * 1000 * 94) >> 3;
+	else
+		real_rate = (ring_max_rate * 1000 * 99) >> 3;
+
+	vf_ring = rnpgbe_get_vf_ringnum(hw, vf, 0);
+	rnpgbe_setup_ring_maxrate(adapter, vf_ring, real_rate);
+
+	return 0;
+}
+
+int rnpgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
+		return -EINVAL;
+	adapter->vfinfo[vf].pf_set_mac = true;
+	dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
+	dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
+				      " change effective.");
+	if (test_bit(__RNP_DOWN, &adapter->state)) {
+		dev_warn(&adapter->pdev->dev,
+			 "The VF MAC address has been set,"
+			 " but the PF device is not up.\n");
+		dev_warn(&adapter->pdev->dev,
+			 "Bring the PF device up before"
+			 " attempting to use the VF device.\n");
+	}
+	rnpgbe_set_vf_mac(adapter, vf, mac);
+	/* send reset to vf only vf is up */
+	if (adapter->vfinfo[vf].clear_to_send)
+		rnpgbe_msg_post_status_signle(adapter, PF_SET_RESET, vf);
+
+	return 0;
+}
+
+int rnpgbe_ndo_get_vf_config(struct net_device *netdev, int vf,
+			     struct ifla_vf_info *ivi)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (vf >= adapter->num_vfs)
+		return -EINVAL;
+	ivi->vf = vf;
+	memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+	ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate;
+	ivi->min_tx_rate = 0;
+#else
+	ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
+
+	if (adapter->vfinfo[vf].pf_vlan)
+		ivi->vlan = adapter->vfinfo[vf].pf_vlan;
+	else
+		ivi->vlan = adapter->vfinfo[vf].vf_vlan;
+
+	ivi->qos = adapter->vfinfo[vf].pf_qos;
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+	ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
+#endif
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+	ivi->linkstate = adapter->vfinfo[vf].link_state;
+#endif
+#ifdef HAVE_NDO_SET_VF_TRUST
+	ivi->trusted = adapter->vfinfo[vf].trusted;
+#endif
+
+	return 0;
+}
+
+int rnpgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+	vf_dbg("\n\n !!!! %s:%d num_vfs:%d\n", __func__, __LINE__, num_vfs);
+	if (num_vfs == 0)
+		return rnpgbe_pci_sriov_disable(dev);
+	else
+		return rnpgbe_pci_sriov_enable(dev, num_vfs);
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sriov.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sriov.h
new file mode 100755
index 0000000000000000000000000000000000000000..63ba94477e7ebef5d9f9445899f66f2234a0a6d3
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sriov.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBE_SRIOV_H_
+#define _RNPGBE_SRIOV_H_
+
+int rnpgbe_setup_ring_maxrate(struct rnpgbe_adapter *adapter, int ring,
+			      u64 max_rate);
+int rnpgbe_get_vf_ringnum(struct rnpgbe_hw *hw, int vf, int num);
+void rnpgbe_restore_vf_macs(struct rnpgbe_adapter *adapter);
+void rnpgbe_restore_vf_multicasts(struct rnpgbe_adapter *adapter);
+void rnpgbe_restore_vf_macvlans(struct rnpgbe_adapter *adapter);
+void rnpgbe_msg_task(struct rnpgbe_adapter *adapter);
+int rnpgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
+void rnpgbe_ping_all_vfs(struct rnpgbe_adapter *adapter);
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+int rnpgbe_ndo_set_vf_bw(struct net_device *netdev, int vf,
+			 int __always_unused min_tx_rate, int max_tx_rate);
+#else /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
+int rnpgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate);
+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
+int rnpgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
+int rnpgbe_msg_post_status(struct rnpgbe_adapter *adapter,
+			   enum PF_STATUS status);
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+int rnpgbe_ndo_set_vf_bw(struct net_device *netdev, int vf,
+			 int __always_unused min_tx_rate, int max_tx_rate);
+#else /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
+int rnpgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate);
+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
+int rnpgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
+int rnpgbe_ndo_get_vf_config(struct net_device *netdev, int vf,
+			     struct ifla_vf_info *ivi);
+void rnpgbe_check_vf_rate_limit(struct rnpgbe_adapter *adapter);
+int rnpgbe_disable_sriov(struct rnpgbe_adapter *adapter);
+#ifdef CONFIG_PCI_IOV
+void rnpgbe_enable_sriov_true(struct rnpgbe_adapter *adapter);
+void rnpgbe_enable_sriov(struct rnpgbe_adapter *adapter);
+#endif /* CONFIG_PCI_IOV */
+int rnpgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+#ifdef IFLA_VF_VLAN_INFO_MAX
+int rnpgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+			   __be16 vlan_proto);
+#else /* IFLA_VF_VLAN_INFO_MAX */
+int rnpgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
+#endif /* IFLA_VF_VLAN_INFO_MAX */
+int rnpgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state);
+#if IS_ENABLED(CONFIG_PCI_IOV)
+int rnpgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
+#endif /* IS_ENABLED(CONFIG_PCI_IOV) */
+#ifdef HAVE_NDO_SET_VF_TRUST
+int rnpgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
+#endif /* HAVE_NDO_SET_VF_TRUST */
+
+#endif /* _RNPGBE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sysfs.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sysfs.c
new file mode 100755
index 0000000000000000000000000000000000000000..9d0ba98d8e22c19c23e286d82c3a6680c75357cf
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sysfs.c
@@ -0,0 +1,1395 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "rnpgbe.h"
+#include "rnpgbe_common.h"
+#include "rnpgbe_type.h"
+#include "version.h"
+#include "rnpgbe_mbx.h"
+#include "rnpgbe_mbx_fw.h"
+
+struct maintain_req {
+	int magic;
+#define MAINTAIN_MAGIC 0xa6a7a8a9
+
+	int cmd;
+	int arg0;
+	int req_data_bytes;
+	int reply_bytes;
+	char data[0];
+} __attribute__((packed));
+
+struct maintain_reply {
+	int magic;
+#define MAINTAIN_REPLY_MAGIC 0xB6B7B8B9
+	int cmd;
+	int arg0;
+	int data_bytes;
+	int rev;
+	int data[0];
+} __attribute__((packed));
+
+struct ucfg_mac_sn {
+	unsigned char macaddr[64];
+	unsigned char sn[32];
+	int magic;
+#define MAC_SN_MAGIC 0x87654321
+	char rev[52];
+	unsigned char pn[32];
+} __attribute__((packed, aligned(4)));
+
+static int print_desc(char *buf, void *data, int len)
+{
+	u8 *ptr = (u8 *)data;
+	int ret = 0;
+	int i = 0;
+
+	for (i = 0; i < len; i++)
+		ret += sprintf(buf + ret, "%02x ", *(ptr + i));
+
+	return ret;
+}
+
+#ifdef RNPGBE_HWMON
+static ssize_t rnpgbe_hwmon_show_location(struct device __always_unused *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct hwmon_attr *rnpgbe_attr =
+		container_of(attr, struct hwmon_attr, dev_attr);
+
+	return snprintf(buf, PAGE_SIZE, "loc%u\n",
+			rnpgbe_attr->sensor->location);
+}
+
+static ssize_t rnpgbe_hwmon_show_name(struct device __always_unused *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "rnpgbe\n");
+}
+
+static ssize_t rnpgbe_hwmon_show_temp(struct device __always_unused *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct hwmon_attr *rnpgbe_attr =
+		container_of(attr, struct hwmon_attr, dev_attr);
+	unsigned int value;
+
+	/* reset the temp field */
+	rnpgbe_attr->hw->ops.get_thermal_sensor_data(rnpgbe_attr->hw);
+
+	value = rnpgbe_attr->sensor->temp;
+	/* display millidegree */
+	value *= 1000;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", value);
+}
+
+static ssize_t
+rnpgbe_hwmon_show_cautionthresh(struct device __always_unused *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct hwmon_attr *rnpgbe_attr =
+		container_of(attr, struct hwmon_attr, dev_attr);
+	unsigned int value = rnpgbe_attr->sensor->caution_thresh;
+	/* display millidegree */
+	value *= 1000;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", value);
+}
+
+static ssize_t rnpgbe_hwmon_show_maxopthresh(struct device __always_unused *dev,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	struct hwmon_attr *rnpgbe_attr =
+		container_of(attr, struct hwmon_attr, dev_attr);
+	unsigned int value = rnpgbe_attr->sensor->max_op_thresh;
+
+	/* display millidegree */
+	value *= 1000;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", value);
+}
+
+/**
+ * rnpgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
+ * @adapter: pointer to the adapter structure
+ * @offset: offset in the eeprom sensor data table
+ * @type: type of sensor data to display
+ *
+ * For each file we want in hwmon's sysfs interface we need a device_attribute
+ * This is included in our hwmon_attr struct that contains the references to
+ * the data structures we need to get the data to display.
+ */
+static int rnpgbe_add_hwmon_attr(struct rnpgbe_adapter *adapter,
+				 unsigned int offset, int type)
+{
+	unsigned int n_attr;
+	struct hwmon_attr *rnpgbe_attr;
+#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
+
+	n_attr = adapter->rnpgbe_hwmon_buff->n_hwmon;
+	rnpgbe_attr = &adapter->rnpgbe_hwmon_buff->hwmon_list[n_attr];
+#else /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */
+	int rc;
+
+	n_attr = adapter->rnpgbe_hwmon_buff.n_hwmon;
+	rnpgbe_attr = &adapter->rnpgbe_hwmon_buff.hwmon_list[n_attr];
+#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */
+
+	switch (type) {
+	case RNPGBE_HWMON_TYPE_LOC:
+		rnpgbe_attr->dev_attr.show = rnpgbe_hwmon_show_location;
+		snprintf(rnpgbe_attr->name, sizeof(rnpgbe_attr->name),
+			 "temp%u_label", offset + 1);
+		break;
+	case RNPGBE_HWMON_TYPE_NAME:
+		rnpgbe_attr->dev_attr.show = rnpgbe_hwmon_show_name;
+		snprintf(rnpgbe_attr->name, sizeof(rnpgbe_attr->name), "name");
+		break;
+	case RNPGBE_HWMON_TYPE_TEMP:
+		rnpgbe_attr->dev_attr.show = rnpgbe_hwmon_show_temp;
+		snprintf(rnpgbe_attr->name, sizeof(rnpgbe_attr->name),
+			 "temp%u_input", offset + 1);
+		break;
+	case RNPGBE_HWMON_TYPE_CAUTION:
+		rnpgbe_attr->dev_attr.show = rnpgbe_hwmon_show_cautionthresh;
+		snprintf(rnpgbe_attr->name, sizeof(rnpgbe_attr->name),
+			 "temp%u_max", offset + 1);
+		break;
+	case RNPGBE_HWMON_TYPE_MAX:
+		rnpgbe_attr->dev_attr.show = rnpgbe_hwmon_show_maxopthresh;
+		snprintf(rnpgbe_attr->name, sizeof(rnpgbe_attr->name),
+			 "temp%u_crit", offset + 1);
+		break;
+	default:
+		return -EPERM;
+	}
+
+	/* These always the same regardless of type */
+	rnpgbe_attr->sensor = &adapter->hw.thermal_sensor_data.sensor[offset];
+	rnpgbe_attr->hw = &adapter->hw;
+	rnpgbe_attr->dev_attr.store = NULL;
+	rnpgbe_attr->dev_attr.attr.mode = 0444;
+	rnpgbe_attr->dev_attr.attr.name = rnpgbe_attr->name;
+
+#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
+	sysfs_attr_init(&rnpgbe_attr->dev_attr.attr);
+
+	adapter->rnpgbe_hwmon_buff->attrs[n_attr] = &rnpgbe_attr->dev_attr.attr;
+
+	++adapter->rnpgbe_hwmon_buff->n_hwmon;
+
+	return 0;
+#else /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */
+	rc = device_create_file(pci_dev_to_dev(adapter->pdev),
+				&rnpgbe_attr->dev_attr);
+
+	if (rc == 0)
+		++adapter->rnpgbe_hwmon_buff.n_hwmon;
+
+	return rc;
+#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */
+}
+#endif /* RNPGBE_HWMON */
+
+#define to_net_device(n) container_of(n, struct net_device, dev)
+
+#ifndef NO_BIT_ATTRS
+static ssize_t maintain_read(struct file *filp, struct kobject *kobj,
+			     struct bin_attribute *attr, char *buf, loff_t off,
+			     size_t count)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int rbytes = count;
+
+	if (adapter->maintain_buf == NULL)
+		return 0;
+
+	if (off + count > adapter->maintain_buf_len)
+		rbytes = adapter->maintain_buf_len - off;
+
+	memcpy(buf, adapter->maintain_buf + off, rbytes);
+
+	if ((off + rbytes) >= adapter->maintain_buf_len) {
+		kfree(adapter->maintain_buf);
+		adapter->maintain_buf = NULL;
+		adapter->maintain_buf_len = 0;
+	}
+
+	return rbytes;
+}
+
+static void n500_exchange_share_ram(struct rnpgbe_hw *hw,
+				    u32 *buf, int flag,
+				    int len)
+{
+	int i;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+
+	if (len > mbx->share_size)
+		return;
+	/* write */
+	if (flag) {
+		for (i = 0; i < len; i = i + 4)
+			rnpgbe_wr_reg(hw->hw_addr + mbx->cpu_vf_share_ram + i,
+				      *(buf + i / 4));
+	} else {
+		/* read */
+		for (i = 0; i < len; i = i + 4)
+			*(buf + i / 4) = rnpgbe_rd_reg(
+				hw->hw_addr + mbx->cpu_vf_share_ram + i);
+	}
+}
+
+static void n210_clean_share_ram(struct rnpgbe_hw *hw)
+{
+	int i;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	int len = mbx->share_size;
+
+	for (i = 0; i < len; i = i + 4)
+		rnpgbe_wr_reg(hw->hw_addr + mbx->cpu_vf_share_ram + i,
+				0xffffffff);
+
+}
+
+static int check_fw_type(struct rnpgbe_hw *hw, const u8 *data)
+{
+	u32 device_id;
+	int ret = 0;
+
+	device_id = *((u16 *)data + 30);
+
+	/* if no device_id no check */
+	if ((device_id == 0) || (device_id == 0xffff))
+		return 0;
+
+	switch (hw->hw_type) {
+	case rnpgbe_hw_n500:
+		if (device_id != 0x8308)
+			ret = 1;
+	break;
+	case rnpgbe_hw_n210:
+		if (device_id != 0x8208)
+			ret = 1;
+	break;
+	case rnpgbe_hw_n210L:
+		if (device_id != 0x820a)
+			ret = 1;
+	break;
+
+	default:
+		ret = 1;
+	}
+
+	return ret;
+}
+
+static ssize_t maintain_write(struct file *filp, struct kobject *kobj,
+			      struct bin_attribute *attr, char *buf,
+			      loff_t off,
+			      size_t count)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	int err = -EINVAL;
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct maintain_req *req;
+	void *dma_buf = NULL;
+	dma_addr_t dma_phy;
+	int bytes;
+
+	if (off == 0) {
+		if (count < sizeof(*req))
+			return -EINVAL;
+		req = (struct maintain_req *)buf;
+		if (req->magic != MAINTAIN_MAGIC)
+			return -EINVAL;
+
+		bytes = max_t(int, req->req_data_bytes, req->reply_bytes);
+		bytes += sizeof(*req);
+
+		if (adapter->maintain_buf) {
+			kfree(adapter->maintain_buf);
+			adapter->maintain_buf = NULL;
+			adapter->maintain_buf_len = 0;
+		}
+
+		dma_buf = dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy,
+					     GFP_ATOMIC);
+		if (!dma_buf) {
+			netdev_err(netdev, "%s: alloc dma_buf failed:%d!",
+				   __func__,
+				   bytes);
+			return -ENOMEM;
+		}
+
+		adapter->maintain_dma_buf = dma_buf;
+		adapter->maintain_dma_phy = dma_phy;
+		adapter->maintain_dma_size = bytes;
+		adapter->maintain_in_bytes = req->req_data_bytes + sizeof(*req);
+
+		memcpy(dma_buf + off, buf, count);
+
+		if (count < adapter->maintain_in_bytes)
+			return count;
+	}
+
+	dma_buf = adapter->maintain_dma_buf;
+	dma_phy = adapter->maintain_dma_phy;
+	req = (struct maintain_req *)dma_buf;
+	memcpy(dma_buf + off, buf, count);
+
+	/* all data got, send req */
+	if ((off + count) >= adapter->maintain_in_bytes) {
+		int reply_bytes = req->reply_bytes;
+		int offset;
+		struct rnpgbe_mbx_info *mbx = &hw->mbx;
+		/* add check fw here /n210 n500 flag */
+		if (req->cmd == 1) {
+			if (check_fw_type(hw, (u8 *)(dma_buf + sizeof(*req)))) {
+				err = -EINVAL;
+				goto err_quit;
+			}
+		}
+
+		if (req->cmd) {
+			int data_len;
+			int ram_size = mbx->share_size;
+
+			offset = 0;
+			if ((req->req_data_bytes > ram_size) &&
+			    (req->cmd == 1)) {
+				offset += ram_size;
+				/* if n210 first clean header */
+				if ((hw->hw_type == rnpgbe_hw_n210) || 
+				    (hw->hw_type == rnpgbe_hw_n210L)){
+					n210_clean_share_ram(hw);
+					err = rnpgbe_maintain_req(hw, req->cmd,
+							req->arg0,
+							0, 0, 0);
+					if (err != 0)
+						goto err_quit;
+				}
+			}
+
+			while (offset < req->req_data_bytes) {
+				data_len =
+					(req->req_data_bytes - offset) >
+					ram_size ?
+					ram_size :
+					(req->req_data_bytes -
+					 offset);
+				/* copy to ram */
+				n500_exchange_share_ram(hw,
+						(u32 *)(dma_buf + offset +
+							sizeof(*req)),
+						1, data_len);
+				err = rnpgbe_maintain_req(hw, req->cmd,
+						req->arg0,
+						offset, 0, 0);
+				if (err != 0)
+					goto err_quit;
+
+				offset += data_len;
+			}
+			/* write header if update hw */
+			if ((req->req_data_bytes > ram_size) && (req->cmd == 1)) {
+				offset = 0;
+				data_len = ram_size;
+				/* copy to ram */
+				n500_exchange_share_ram(hw,
+						(u32 *)(dma_buf + offset +
+							sizeof(*req)),
+						1, data_len);
+				err = rnpgbe_maintain_req(hw, req->cmd,
+						req->arg0,
+						offset, 0, 0);
+				if (err != 0)
+					goto err_quit;
+			}
+
+		} else {
+			/* it is read ? */
+			int data_len;
+			int ram_size = mbx->share_size;
+			struct maintain_reply reply;
+			adapter->maintain_buf_len = (reply_bytes + 3) & (~3);
+			adapter->maintain_buf = kmalloc(
+					adapter->maintain_buf_len, GFP_KERNEL);
+			if (!adapter->maintain_buf) {
+				netdev_err(netdev, "alloc failed for maintain buf:%d\n",
+					   adapter->maintain_buf_len);
+				err = -ENOMEM;
+
+				goto err_quit;
+			}
+			reply.magic = MAINTAIN_REPLY_MAGIC;
+			reply.cmd = req->cmd;
+			reply.arg0 = req->arg0;
+			reply.data_bytes = req->reply_bytes;
+			memcpy(adapter->maintain_buf, &reply,
+					sizeof(struct maintain_reply));
+
+			reply_bytes = reply_bytes - sizeof(*req);
+			/* copy req first */
+			offset = 0;
+			while (offset < reply_bytes) {
+				data_len = (reply_bytes - offset) >
+					   ram_size ?
+					   ram_size :
+					   (reply_bytes - offset);
+				err = rnpgbe_maintain_req(hw, req->cmd,
+						req->arg0, 0,
+						offset, 0);
+				if (err != 0)
+					goto err_quit;
+				/* copy to ram */
+				n500_exchange_share_ram(hw,
+						(u32 *)(adapter->maintain_buf +
+							offset + sizeof(*req)),
+						0, data_len);
+				offset += data_len;
+			}
+		}
+		if (dma_buf) {
+			dma_free_coherent(&hw->pdev->dev,
+					adapter->maintain_dma_size,
+					dma_buf, dma_phy);
+		}
+		adapter->maintain_dma_buf = NULL;
+
+	}
+
+	return count;
+err_quit:
+	if (dma_buf) {
+		dma_free_coherent(&hw->pdev->dev, adapter->maintain_dma_size,
+				dma_buf, dma_phy);
+		adapter->maintain_dma_buf = NULL;
+	}
+	return err;
+}
+
+static BIN_ATTR(maintain, 0644, maintain_read, maintain_write, 1 * 1024 * 1024);
+#endif
+
+static ssize_t version_info_show(struct device *dev, struct device_attribute *attr,
+				 char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int ret = 0;
+
+	ret += sprintf(buf + ret, "drver: %s %s\n",
+			rnpgbe_driver_version, GIT_COMMIT);
+
+	ret += sprintf(buf + ret, "fw   : %d.%d.%d.%d 0x%08x\n",
+		       ((char *)&(hw->fw_version))[3],
+		       ((char *)&(hw->fw_version))[2],
+		       ((char *)&(hw->fw_version))[1],
+		       ((char *)&(hw->fw_version))[0],
+		       hw->bd_uid | (hw->sfc_boot ? 0x80000000 : 0) |
+		       (hw->pxe_en ? 0x40000000 : 0) |
+		       (hw->ncsi_en ? 0x20000000 : 0) |
+		       (hw->trim_valid ? 0x10000000 : 0));
+
+	return ret;
+}
+
+#ifdef TEST_PF_RESET
+static ssize_t test_info_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	int ret = 0;
+	int i;
+	struct rnpgbe_q_vector *q_vector;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		q_vector = adapter->q_vector[i];
+		ret += sprintf(buf + ret, "q_vector %d itr  %d\n",
+				q_vector->v_idx, q_vector->itr_rx >> 2);
+	}
+
+	return ret;
+}
+
+static ssize_t test_info_store(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf,
+			       size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = count;
+
+	return ret;
+}
+#endif
+
+static ssize_t rx_desc_info_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	u32 rx_ring_num = adapter->sysfs_rx_ring_num;
+	u32 rx_desc_num = adapter->sysfs_rx_desc_num;
+	struct rnpgbe_ring *ring = adapter->rx_ring[rx_ring_num];
+	int ret = 0;
+	union rnpgbe_rx_desc *desc;
+
+	if (test_bit(__RNP_DOWN, &adapter->state)) {
+		ret += sprintf(buf + ret, "port not up \n");
+
+		return ret;
+	}
+
+	desc = RNP_RX_DESC(ring, rx_desc_num);
+	ret += sprintf(buf + ret, "rx ring %d desc %d:\n", rx_ring_num,
+			rx_desc_num);
+	ret += print_desc(buf + ret, desc, sizeof(*desc));
+	ret += sprintf(buf + ret, "\n");
+
+	return ret;
+}
+
+static ssize_t rx_desc_info_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = count;
+
+	u32 rx_desc_num = adapter->sysfs_rx_desc_num;
+	u32 rx_ring_num = adapter->sysfs_rx_ring_num;
+
+	struct rnpgbe_ring *ring = adapter->rx_ring[rx_ring_num];
+
+	if (kstrtou32(buf, 0, &rx_desc_num) != 0)
+		return -EINVAL;
+	if (rx_desc_num < ring->count)
+		adapter->sysfs_rx_desc_num = rx_desc_num;
+	else
+		ret = -EINVAL;
+
+	return ret;
+}
+
+static ssize_t tcp_sync_info_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC)
+		ret += sprintf(buf + ret, "tcp syn to queue %d prio %s\n",
+			       adapter->tcp_sync_queue,
+			       (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC_PRIO) ?
+			       "NO" :
+			       "OFF");
+	else
+		ret += sprintf(buf + ret, "tcp sync remap off\n");
+
+	return ret;
+}
+
+static ssize_t tcp_sync_info_store(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int ret = count;
+	u32 tcp_sync_queue;
+
+	if (kstrtou32(buf, 0, &tcp_sync_queue) != 0)
+		return -EINVAL;
+
+	if (tcp_sync_queue < adapter->num_rx_queues) {
+		adapter->tcp_sync_queue = tcp_sync_queue;
+		adapter->priv_flags |= RNP_PRIV_FLAG_TCP_SYNC;
+
+		if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC_PRIO)
+			hw->ops.set_tcp_sync_remapping(hw,
+						       adapter->tcp_sync_queue,
+						       true, true);
+		else
+			hw->ops.set_tcp_sync_remapping(hw,
+						       adapter->tcp_sync_queue,
+						       true, false);
+
+	} else {
+		adapter->priv_flags &= ~RNP_PRIV_FLAG_TCP_SYNC;
+
+		hw->ops.set_tcp_sync_remapping(hw, adapter->tcp_sync_queue,
+				false, false);
+	}
+
+	return ret;
+}
+
+static ssize_t rx_skip_info_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_RX_SKIP_EN)
+		ret += sprintf(buf + ret, "rx skip bytes: %d\n",
+			       16 * (adapter->priv_skip_count + 1));
+	else
+		ret += sprintf(buf + ret, "rx skip off\n");
+
+	return ret;
+}
+
+static ssize_t rx_drop_info_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+
+	ret += sprintf(buf + ret, "rx_drop_status %llx\n",
+		       adapter->rx_drop_status);
+
+	return ret;
+}
+
+static ssize_t rx_drop_info_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int ret = count;
+	u64 rx_drop_status;
+
+	if (kstrtou64(buf, 0, &rx_drop_status) != 0)
+		return -EINVAL;
+
+	adapter->rx_drop_status = rx_drop_status;
+
+	hw->ops.update_rx_drop(hw);
+
+	return ret;
+}
+
+static ssize_t outer_vlan_info_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_DOUBLE_VLAN)
+		ret += sprintf(buf + ret, "double vlan on\n");
+	else
+		ret += sprintf(buf + ret, "double vlan off\n");
+
+	switch (adapter->outer_vlan_type) {
+	case outer_vlan_type_88a8:
+		ret += sprintf(buf + ret, "outer vlan 0x88a8\n");
+
+		break;
+#ifdef ETH_P_QINQ1
+	case outer_vlan_type_9100:
+		ret += sprintf(buf + ret, "outer vlan 0x9100\n");
+
+		break;
+#endif
+#ifdef ETH_P_QINQ2
+	case outer_vlan_type_9200:
+		ret += sprintf(buf + ret, "outer vlan 0x9200\n");
+
+		break;
+#endif
+	default:
+		ret += sprintf(buf + ret, "outer vlan error\n");
+		break;
+	}
+	return ret;
+}
+
+static ssize_t outer_vlan_info_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int ret = count;
+	u32 outer_vlan_type;
+
+	if (kstrtou32(buf, 0, &outer_vlan_type) != 0)
+		return -EINVAL;
+	if (outer_vlan_type < outer_vlan_type_max)
+		adapter->outer_vlan_type = outer_vlan_type;
+	else
+		ret = -EINVAL;
+	if (hw->ops.set_outer_vlan_type)
+		hw->ops.set_outer_vlan_type(hw, outer_vlan_type);
+
+	return ret;
+}
+
+static ssize_t tx_stags_info_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+
+	if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED)
+		ret += sprintf(buf + ret, "tx stags on\n");
+	else
+		ret += sprintf(buf + ret, "tx stags off\n");
+
+	ret += sprintf(buf + ret, "vid 0x%x\n", adapter->stags_vid);
+
+	return ret;
+}
+
+static ssize_t tx_stags_info_store(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	int ret = count;
+	u16 tx_stags;
+
+	if (kstrtou16(buf, 0, &tx_stags) != 0)
+		return -EINVAL;
+	if (tx_stags < VLAN_N_VID)
+		adapter->stags_vid = tx_stags;
+	else
+		ret = -EINVAL;
+
+	eth->ops.set_vfta(eth, adapter->stags_vid, true);
+
+	return ret;
+}
+
+static ssize_t gephy_test_info_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+
+	if (adapter->gephy_test_mode)
+		ret += sprintf(buf + ret, "gephy_test on: %d\n",
+			       adapter->gephy_test_mode);
+	else
+		ret += sprintf(buf + ret, "gephy_test off\n");
+
+	return ret;
+}
+
+static ssize_t gephy_test_info_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int ret = count;
+	u32 test_mode;
+
+#define MAX_MODE (5)
+	if (kstrtou32(buf, 0, &test_mode) != 0)
+		return -EINVAL;
+	if (test_mode < 5)
+		adapter->gephy_test_mode = test_mode;
+	else
+		ret = -EINVAL;
+
+	rnpgbe_mbx_gephy_test_set(hw, test_mode);
+
+	return ret;
+}
+
+static ssize_t tx_desc_info_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	u32 tx_ring_num = adapter->sysfs_tx_ring_num;
+	u32 tx_desc_num = adapter->sysfs_tx_desc_num;
+	struct rnpgbe_ring *ring = adapter->tx_ring[tx_ring_num];
+	int ret = 0;
+	struct rnpgbe_tx_desc *desc;
+
+	if (test_bit(__RNP_DOWN, &adapter->state)) {
+		ret += sprintf(buf + ret, "port not up\n");
+
+		return ret;
+	}
+
+	desc = RNP_TX_DESC(ring, tx_desc_num);
+	ret += sprintf(buf + ret, "tx ring %d desc %d:\n", tx_ring_num,
+		       tx_desc_num);
+	ret += print_desc(buf + ret, desc, sizeof(*desc));
+	ret += sprintf(buf + ret, "\n");
+
+	return ret;
+}
+
+static ssize_t tx_desc_info_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = count;
+	u32 tx_desc_num = adapter->sysfs_tx_desc_num;
+	u32 tx_ring_num = adapter->sysfs_tx_ring_num;
+	struct rnpgbe_ring *ring = adapter->tx_ring[tx_ring_num];
+
+	if (kstrtou32(buf, 0, &tx_desc_num) != 0)
+		return -EINVAL;
+	if (tx_desc_num < ring->count)
+		adapter->sysfs_tx_desc_num = tx_desc_num;
+	else
+		ret = -EINVAL;
+
+	return ret;
+}
+
+static ssize_t rx_ring_info_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	u32 rx_ring_num = adapter->sysfs_rx_ring_num;
+	struct rnpgbe_ring *ring = adapter->rx_ring[rx_ring_num];
+	int ret = 0;
+	union rnpgbe_rx_desc *rx_desc;
+
+	if (test_bit(__RNP_DOWN, &adapter->state)) {
+		ret += sprintf(buf + ret, "port not up\n");
+
+		return ret;
+	}
+
+	ret += sprintf(buf + ret, "queue %d info:\n", rx_ring_num);
+	ret += sprintf(buf + ret, "next_to_use %d\n", ring->next_to_use);
+	ret += sprintf(buf + ret, "next_to_clean %d\n", ring->next_to_clean);
+	rx_desc = RNP_RX_DESC(ring, ring->next_to_clean);
+	ret += sprintf(buf + ret, "next_to_clean desc: ");
+	ret += print_desc(buf + ret, rx_desc, sizeof(*rx_desc));
+	ret += sprintf(buf + ret, "\n");
+
+	return ret;
+}
+
+static ssize_t rx_ring_info_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = count;
+
+	u32 rx_ring_num = adapter->sysfs_rx_ring_num;
+
+	if (kstrtou32(buf, 0, &rx_ring_num) != 0)
+		return -EINVAL;
+	if (rx_ring_num < adapter->num_rx_queues)
+		adapter->sysfs_rx_ring_num = rx_ring_num;
+	else
+		ret = -EINVAL;
+
+	return ret;
+}
+
+static ssize_t tx_ring_info_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	u32 tx_ring_num = adapter->sysfs_tx_ring_num;
+	struct rnpgbe_ring *ring = adapter->tx_ring[tx_ring_num];
+	int ret = 0;
+	struct rnpgbe_tx_buffer *tx_buffer;
+	struct rnpgbe_tx_desc *eop_desc;
+
+	if (test_bit(__RNP_DOWN, &adapter->state)) {
+		ret += sprintf(buf + ret, "port not up \n");
+
+		return ret;
+	}
+
+	ret += sprintf(buf + ret, "queue %d info:\n", tx_ring_num);
+	ret += sprintf(buf + ret, "next_to_use %d\n", ring->next_to_use);
+	ret += sprintf(buf + ret, "next_to_clean %d\n", ring->next_to_clean);
+
+	tx_buffer = &ring->tx_buffer_info[ring->next_to_clean];
+	eop_desc = tx_buffer->next_to_watch;
+	/* if have watch desc */
+	if (eop_desc) {
+		ret += sprintf(buf + ret, "next_to_watch:\n");
+		ret += print_desc(buf + ret, eop_desc, sizeof(*eop_desc));
+		ret += sprintf(buf + ret, "\n");
+	} else {
+		ret += sprintf(buf + ret, "no next_to_watch data\n");
+	}
+
+	return ret;
+}
+
+static ssize_t tx_ring_info_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = count;
+
+	u32 tx_ring_num = adapter->sysfs_tx_ring_num;
+
+	if (kstrtou32(buf, 0, &tx_ring_num) != 0)
+		return -EINVAL;
+
+	if (tx_ring_num < adapter->num_tx_queues)
+		adapter->sysfs_tx_ring_num = tx_ring_num;
+	else
+		ret = -EINVAL;
+
+	return ret;
+}
+
+static ssize_t active_vid_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+#ifndef HAVE_VLAN_RX_REGISTER
+	u16 vid;
+#endif /* HAVE_VLAN_RX_REGISTER */
+	u16 current_vid = 0;
+	int ret = 0;
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u8 vfnum = hw->max_vfs - 1;
+	/* use last-vf's table entry. the last */
+
+	if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED)) {
+		current_vid = rd32(hw, RNP_DMA_PORT_VEB_VID_TBL(adapter->port,
+								vfnum));
+	}
+
+#ifndef HAVE_VLAN_RX_REGISTER
+	for_each_set_bit (vid, adapter->active_vlans, VLAN_N_VID) {
+		ret += sprintf(buf + ret, "%u%s ", vid,
+			       (current_vid == vid ? "*" : ""));
+	}
+#endif /* HAVE_VLAN_RX_REGISTER */
+	ret += sprintf(buf + ret, "\n");
+	return ret;
+}
+
+static ssize_t active_vid_store(struct device *dev,
+				struct device_attribute *attr, const char *buf,
+				size_t count)
+{
+	u16 vid;
+	int err = -EINVAL;
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+#ifndef HAVE_VLAN_RX_REGISTER
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u8 vfnum = hw->max_vfs - 1;
+	/* use last-vf's table entry. the last */
+	int port = 0;
+#endif /* HAVE_VLAN_RX_REGISTER */
+
+	if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return -EIO;
+
+	if (kstrtou16(buf, 0, &vid) != 0)
+		return -EINVAL;
+
+#ifndef HAVE_VLAN_RX_REGISTER
+	if ((vid < 4096) && test_bit(vid, adapter->active_vlans)) {
+		if (rd32(hw, RNP_DMA_VERSION) >= 0x20201231) {
+			for (port = 0; port < 4; port++)
+				wr32(hw, RNP_DMA_PORT_VEB_VID_TBL(port, vfnum),
+				     vid);
+		} else {
+			wr32(hw, RNP_DMA_PORT_VEB_VID_TBL(adapter->port, vfnum),
+			     vid);
+		}
+		err = 0;
+	}
+#endif /* HAVE_VLAN_RX_REGISTER */
+
+	return err ? err : count;
+}
+
+static ssize_t port_idx_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	int ret = 0;
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	ret += sprintf(buf, "%d\n", adapter->portid_of_card);
+	return ret;
+}
+
+static DEVICE_ATTR(port_idx, 0644, port_idx_show, NULL);
+
+static ssize_t debug_link_stat_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	int ret = 0;
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	ret += sprintf(buf, "%d %d dumy:0x%x up-flag:%d carry:%d\n",
+		       adapter->link_up, adapter->hw.link, rd32(hw, 0xc),
+		       adapter->flags & RNP_FLAG_NEED_LINK_UPDATE,
+		       netif_carrier_ok(netdev));
+	return ret;
+}
+
+static DEVICE_ATTR(debug_link_stat, 0644, debug_link_stat_show, NULL);
+
+static ssize_t pci_store(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	int err = -EINVAL;
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int gen = 3, lanes = 8;
+
+	if (count > 30)
+		return -EINVAL;
+
+	if (sscanf(buf, "gen%dx%d", &gen, &lanes) != 2) {
+		printk(KERN_DEBUG "Error: invalid input. example: gen3x8\n");
+		return -EINVAL;
+	}
+	if (gen > 3 || lanes > 8)
+		return -EINVAL;
+
+	err = rnpgbe_set_lane_fun(hw, LANE_FUN_PCI_LANE, gen, lanes, 0, 0);
+
+	return err ? err : count;
+}
+
+static ssize_t pci_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	int ret = 0;
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	if (rnpgbe_mbx_get_lane_stat(hw) != 0)
+		ret += sprintf(buf, " IO Error\n");
+	else
+		ret += sprintf(buf, "gen%dx%d\n", hw->pci_gen, hw->pci_lanes);
+
+	return ret;
+}
+
+static DEVICE_ATTR(pci, 0644, pci_show, pci_store);
+
+static ssize_t temperature_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int ret = 0, temp = 0, voltage = 0;
+
+	temp = rnpgbe_mbx_get_temp(hw, &voltage);
+
+	ret += sprintf(buf, "temp:%d oC \n", temp);
+	return ret;
+}
+
+static struct pci_dev *pcie_find_root_port_old(struct pci_dev *dev)
+{
+	while (1) {
+		if (!pci_is_pcie(dev))
+			break;
+		if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+			return dev;
+		if (!dev->bus->self)
+			break;
+		dev = dev->bus->self;
+	}
+	return NULL;
+}
+
+static ssize_t root_slot_info_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+	struct pci_dev *root_pdev = pcie_find_root_port_old(adapter->pdev);
+
+	if (root_pdev) {
+		ret += sprintf(buf + ret, "%02x:%02x.%x\n",
+			       root_pdev->bus->number,
+			       PCI_SLOT(root_pdev->devfn),
+			       PCI_FUNC(root_pdev->devfn));
+	}
+	return ret;
+}
+
+static DEVICE_ATTR(root_slot_info, 0644, root_slot_info_show, NULL);
+static DEVICE_ATTR(temperature, 0644, temperature_show, NULL);
+static DEVICE_ATTR(active_vid, 0644, active_vid_show, active_vid_store);
+static DEVICE_ATTR(tx_ring_info, 0644, tx_ring_info_show, tx_ring_info_store);
+static DEVICE_ATTR(rx_ring_info, 0644, rx_ring_info_show, rx_ring_info_store);
+static DEVICE_ATTR(tx_desc_info, 0644, tx_desc_info_show, tx_desc_info_store);
+static DEVICE_ATTR(rx_desc_info, 0644, rx_desc_info_show, rx_desc_info_store);
+static DEVICE_ATTR(rx_drop_info, 0644, rx_drop_info_show, rx_drop_info_store);
+static DEVICE_ATTR(outer_vlan_info, 0644, outer_vlan_info_show,
+		   outer_vlan_info_store);
+static DEVICE_ATTR(tcp_sync_info, 0644, tcp_sync_info_show,
+		   tcp_sync_info_store);
+static DEVICE_ATTR(rx_skip_info, 0644, rx_skip_info_show, NULL);
+static DEVICE_ATTR(tx_stags_info, 0644, tx_stags_info_show,
+		   tx_stags_info_store);
+static DEVICE_ATTR(gephy_test_info, 0644, gephy_test_info_show,
+		   gephy_test_info_store);
+#ifdef TEST_PF_RESET
+static DEVICE_ATTR(test_info, 0644, test_info_show, test_info_store);
+#endif
+static DEVICE_ATTR(version_info, 0644, version_info_show, NULL);
+
+static struct attribute *vendor_dev_attrs[] = {
+	&dev_attr_pci.attr,
+	&dev_attr_temperature.attr,
+	&dev_attr_tx_ring_info.attr,
+	&dev_attr_rx_ring_info.attr,
+	&dev_attr_tx_desc_info.attr,
+	&dev_attr_rx_desc_info.attr,
+	&dev_attr_tcp_sync_info.attr,
+	&dev_attr_rx_drop_info.attr,
+	&dev_attr_outer_vlan_info.attr,
+	&dev_attr_rx_skip_info.attr,
+	&dev_attr_debug_link_stat.attr,
+
+        NULL,
+};
+
+static struct attribute *dev_attrs[] = {
+	&dev_attr_tx_stags_info.attr,
+	&dev_attr_gephy_test_info.attr,
+#ifdef TEST_PF_RESET
+	&dev_attr_test_info.attr,
+#endif
+	&dev_attr_version_info.attr,
+	&dev_attr_root_slot_info.attr,
+	&dev_attr_active_vid.attr,
+	&dev_attr_port_idx.attr,
+	NULL,
+};
+
+#ifndef NO_BIT_ATTRS
+static struct bin_attribute *dev_bin_attrs[] = {
+	&bin_attr_maintain,
+	NULL,
+};
+#endif /* NO_BIT_ATTRS */
+static struct attribute_group dev_attr_grp = {
+	.attrs = dev_attrs,
+#ifndef NO_BIT_ATTRS
+	.bin_attrs = dev_bin_attrs,
+#endif /* NO_BIT_ATTRS */
+};
+
+const static struct attribute_group vendor_attr_grp = {
+        .name = "vendor",
+        .attrs = vendor_dev_attrs,
+};
+
+const static struct attribute_group *attr_grps[] = {
+        &dev_attr_grp,
+        &vendor_attr_grp,
+        NULL,
+};
+
+static void
+rnpgbe_sysfs_del_adapter(struct rnpgbe_adapter __maybe_unused *adapter)
+{
+#ifdef RNPGBE_HWMON
+#ifndef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
+	int i;
+
+	if (adapter == NULL)
+		return;
+
+	for (i = 0; i < adapter->rnpgbe_hwmon_buff.n_hwmon; i++) {
+		device_remove_file(
+			pci_dev_to_dev(adapter->pdev),
+			&adapter->rnpgbe_hwmon_buff.hwmon_list[i].dev_attr);
+	}
+
+	kfree(adapter->rnpgbe_hwmon_buff.hwmon_list);
+
+	if (adapter->rnpgbe_hwmon_buff.device)
+		hwmon_device_unregister(adapter->rnpgbe_hwmon_buff.device);
+#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */
+#endif /* RNPGBE_HWMON */
+}
+
+/* called from rnpgbe_main.c */
+void rnpgbe_sysfs_exit(struct rnpgbe_adapter *adapter)
+{
+	rnpgbe_sysfs_del_adapter(adapter);
+	sysfs_remove_groups(&adapter->netdev->dev.kobj, &attr_grps[0]);
+	if (adapter->maintain_buf) {
+		kfree(adapter->maintain_buf);
+		adapter->maintain_buf = NULL;
+		adapter->maintain_buf_len = 0;
+	}
+}
+
+/* called from rnpgbe_main.c */
+int rnpgbe_sysfs_init(struct rnpgbe_adapter *adapter)
+{
+	int rc = 0;
+	int flag;
+#ifdef RNPGBE_HWMON
+#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
+	struct hwmon_buff *rnpgbe_hwmon;
+	struct device *hwmon_dev;
+#else /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */
+	struct hwmon_buff *rnpgbe_hwmon = &adapter->rnpgbe_hwmon_buff;
+	int n_attrs;
+#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */
+	unsigned int i;
+#endif /* RNPGBE_HWMON */
+
+	flag = sysfs_create_groups(&adapter->netdev->dev.kobj, &attr_grps[0]);
+	if (flag != 0) {
+		dev_err(&adapter->netdev->dev,
+			"sysfs_create_group faild:flag:%d\n", flag);
+		return flag;
+	}
+#ifdef RNPGBE_HWMON
+	/* If this method isn't defined we don't support thermals */
+	if (adapter->hw.ops.init_thermal_sensor_thresh == NULL)
+		goto no_thermal;
+
+	/* Don't create thermal hwmon interface if no sensors present */
+	if (adapter->hw.ops.init_thermal_sensor_thresh(&adapter->hw))
+		goto no_thermal;
+
+#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
+	rnpgbe_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*rnpgbe_hwmon),
+				    GFP_KERNEL);
+
+	if (!rnpgbe_hwmon) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	adapter->rnpgbe_hwmon_buff = rnpgbe_hwmon;
+#else /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */
+	/*
+	 * Allocation space for max attributs
+	 * max num sensors * values (loc, temp, max, caution)
+	 */
+	n_attrs = RNPGBE_MAX_SENSORS * 4;
+	rnpgbe_hwmon->hwmon_list =
+		kcalloc(n_attrs, sizeof(struct hwmon_attr), GFP_KERNEL);
+
+	if (!rnpgbe_hwmon->hwmon_list) {
+		rc = -ENOMEM;
+		goto err;
+	}
+#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */
+
+	for (i = 0; i < RNPGBE_MAX_SENSORS; i++) {
+		/*
+		 * Only create hwmon sysfs entries for sensors that have
+		 * meaningful data for.
+		 */
+		if (adapter->hw.thermal_sensor_data.sensor[i].location == 0)
+			continue;
+
+		/* Bail if any hwmon attr struct fails to initialize */
+		rc = rnpgbe_add_hwmon_attr(adapter, i,
+					   RNPGBE_HWMON_TYPE_CAUTION);
+		if (rc)
+			goto err;
+		rc = rnpgbe_add_hwmon_attr(adapter, i, RNPGBE_HWMON_TYPE_LOC);
+		if (rc)
+			goto err;
+		rc = rnpgbe_add_hwmon_attr(adapter, i, RNPGBE_HWMON_TYPE_TEMP);
+		if (rc)
+			goto err;
+		rc = rnpgbe_add_hwmon_attr(adapter, i, RNPGBE_HWMON_TYPE_MAX);
+		if (rc)
+			goto err;
+	}
+
+#ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
+	rnpgbe_hwmon->groups[0] = &rnpgbe_hwmon->group;
+	rnpgbe_hwmon->group.attrs = rnpgbe_hwmon->attrs;
+
+	hwmon_dev = devm_hwmon_device_register_with_groups(
+		&adapter->pdev->dev, "rnpgbe", rnpgbe_hwmon, rnpgbe_hwmon->groups);
+
+	if (IS_ERR(hwmon_dev)) {
+		rc = PTR_ERR(hwmon_dev);
+		goto exit;
+	}
+
+#else /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */
+	rnpgbe_hwmon->device =
+		hwmon_device_register(pci_dev_to_dev(adapter->pdev));
+
+	if (IS_ERR(rnpgbe_hwmon->device)) {
+		rc = PTR_ERR(rnpgbe_hwmon->device);
+		goto err;
+	}
+
+#endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */
+no_thermal:
+#endif /* RNPGBE_HWMON */
+	goto exit;
+
+err:
+	rnpgbe_sysfs_exit(adapter);
+exit:
+	return rc;
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_type.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_type.h
new file mode 100755
index 0000000000000000000000000000000000000000..db9dc60ad18ae3dedc1994aa0997dc8f6f9efe13
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_type.h
@@ -0,0 +1,1346 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBE_TYPE_H_
+#define _RNPGBE_TYPE_H_
+
+#include 
+#include 
+#include 
+
+//#define OPTM_WITH_LPAGE
+
+#if defined(CONFIG_MGBE_OPTM_WITH_LPAGE) && !defined(OPTM_WITH_LPAGE)
+#define OPTM_WITH_LPAGE
+#endif
+
+#if defined(CONFIG_MXGBE_MSIX_COUNT)
+#define RNP_N10_MSIX_VECTORS CONFIG_MXGBE_MSIX_COUNT
+#endif
+
+//#define DISABLE_PACKET_SPLIT
+
+// if kylin os, try to set OPTM_WITH_LPAGE to reduce memory cost?
+#if (PAGE_SIZE < 8192)
+//error
+#ifdef OPTM_WITH_LPAGE
+//#error can't open OPTM_WITH_LPAGE with PAGE_SIZE small than 8192
+#undef OPTM_WITH_LPAGE
+#endif
+#endif
+
+/* OPTM_WITH_LPAGE should never define along
+ * with CONFIG_RNP_DISABLE_PACKET_SPLIT
+ **/
+
+#include "rnpgbe_regs.h"
+#include "rnp_compat.h"
+
+/* Device IDs */
+#define PCI_VENDOR_ID_MUCSE 0x8848
+#define PCI_DEVICE_ID_N10_PF0 0x1000
+#define PCI_DEVICE_ID_N10_PF1 0x1001
+
+#define RNP_DEV_ID_N10_PF0 0x7001
+#define RNP_DEV_ID_N10_PF1 0x7002
+
+#define PCI_DEVICE_ID_N10 0x1000
+#define PCI_DEVICE_ID_N10C 0x1C00
+#define PCI_DEVICE_ID_N400 0x1001
+#define PCI_DEVICE_ID_N500_QUAD_PORT 0x8308
+#define PCI_DEVICE_ID_N500_DUAL_PORT 0x8318
+#define PCI_DEVICE_ID_N500_VF 0x8309
+#define PCI_DEVICE_ID_N210 0x8208
+#define PCI_DEVICE_ID_N210L 0x820a
+/* Wake Up Control */
+#define RNP_WUC_PME_EN 0x00000002 /* PME Enable */
+#define RNP_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define RNP_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion  */
+
+/* Wake Up Filter Control */
+#define RNP_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define RNP_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define RNP_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define RNP_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define RNP_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define RNP_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define RNP_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define RNP_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define RNP_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */
+
+#define RNP_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
+#define RNP_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define RNP_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define RNP_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define RNP_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define RNP_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
+#define RNP_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
+#define RNP_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
+#define RNP_WUFC_FLX_FILTERS_6 0x003F0000 /* Mask for 6 flex filters */
+#define RNP_WUFC_FLX_FILTERS_8 0x00FF0000 /* Mask for 8 flex filters */
+#define RNP_WUFC_FW_RST_WK 0x80000000 /* Ena wake on FW reset assertion */
+/* Mask for Ext. flex filters */
+#define RNP_WUFC_EXT_FLX_FILTERS 0x00300000
+#define RNP_WUFC_ALL_FILTERS 0x000F00FF /* Mask all 4 flex filters */
+#define RNP_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask all 6 flex filters */
+#define RNP_WUFC_ALL_FILTERS_8 0x00FF00FF /* Mask all 8 flex filters */
+#define RNP_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
+
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL 0x0020
+#define ADVERTISE_2500_HALF 0x0040 /* NOT used, just FYI */
+#define ADVERTISE_2500_FULL 0x0080
+
+#define RNPGBE_MAX_SENSORS 1
+struct rnpgbe_thermal_diode_data {
+	unsigned int location;
+	unsigned int temp;
+	unsigned int caution_thresh;
+	unsigned int max_op_thresh;
+};
+
+struct rnpgbe_thermal_sensor_data {
+	struct rnpgbe_thermal_diode_data sensor[RNPGBE_MAX_SENSORS];
+};
+
+/* Proxy Status */
+#define RNP_PROXYS_EX 0x00000004 /* Exact packet received */
+#define RNP_PROXYS_ARP_DIR 0x00000020 /* ARP w/filter match received */
+#define RNP_PROXYS_NS 0x00000200 /* IPV6 NS received */
+#define RNP_PROXYS_NS_DIR 0x00000400 /* IPV6 NS w/DA match received */
+#define RNP_PROXYS_ARP 0x00000800 /* ARP request packet received */
+#define RNP_PROXYS_MLD 0x00001000 /* IPv6 MLD packet received */
+
+/* Proxying Filter Control */
+#define RNP_PROXYFC_ENABLE 0x00000001 /* Port Proxying Enable */
+#define RNP_PROXYFC_EX 0x00000004 /* Directed Exact Proxy Enable */
+#define RNP_PROXYFC_ARP_DIR 0x00000020 /* Directed ARP Proxy Enable */
+#define RNP_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */
+#define RNP_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Enable */
+#define RNP_PROXYFC_MLD 0x00000800 /* IPv6 MLD Proxy Enable */
+#define RNP_PROXYFC_NO_TCO 0x00008000 /* Ignore TCO packets */
+
+#define RNP_WUPL_LENGTH_MASK 0xFFFF
+
+/* max 4 in n10 */
+#define RNP_MAX_TRAFFIC_CLASS 4
+#define TSRN10_TX_DEFAULT_BURST 8
+
+#ifndef TSRN10_RX_DEFAULT_BURST
+#define TSRN10_RX_DEFAULT_BURST 16
+#endif
+
+#ifndef TSRN10_RX_DEFAULT_LINE
+#define TSRN10_RX_DEFAULT_LINE 32
+#endif
+
+#ifndef RNP_PKT_TIMEOUT
+#define RNP_PKT_TIMEOUT 30
+#endif
+
+#ifndef RNP_RX_PKT_POLL_BUDGET
+#define RNP_RX_PKT_POLL_BUDGET 64
+#endif
+
+#ifndef RNP_TX_PKT_POLL_BUDGET
+#define RNP_TX_PKT_POLL_BUDGET 0x30
+#endif
+
+#ifndef RNP_PKT_TIMEOUT_TX
+#define RNP_PKT_TIMEOUT_TX 200
+#endif
+/* VF Device IDs */
+#define RNP_DEV_ID_N10_PF0_VF 0x8001
+#define RNP_DEV_ID_N10_PF1_VF 0x8002
+
+#define RNP_DEV_ID_N10_PF0_VF_N 0x1010
+#define RNP_DEV_ID_N10_PF1_VF_N 0x1011
+
+/* Transmit Descriptor - Advanced */
+struct rnpgbe_tx_desc {
+	union {
+		__le64 pkt_addr; // Packet buffer address
+		struct {
+			__le32 adr_lo;
+			__le32 adr_hi;
+		};
+	};
+	union {
+		__le64 vlan_cmd_bsz;
+		struct {
+			__le32 blen_mac_ip_len;
+			__le32 vlan_cmd;
+		};
+	};
+#define RNP_TXD_FLAGS_VLAN_PRIO_MASK 0xe000
+#define RNP_TX_FLAGS_VLAN_PRIO_SHIFT 13
+#define RNP_TX_FLAGS_VLAN_CFI_SHIFT 12
+
+#define RNP_TXD_VLAN_VALID (0x80000000)
+#define RNP_TXD_SVLAN_TYPE (0x02000000)
+#define RNP_TXD_VLAN_CTRL_NOP (0x00 << 13)
+#define RNP_TXD_VLAN_CTRL_RM_VLAN (0x20000000)
+#define RNP_TXD_VLAN_CTRL_INSERT_VLAN (0x40000000)
+
+#define RNP_TXD_L4_CSUM (0x10000000) //udp tcp sctp csum
+#define RNP_TXD_IP_CSUM (0x8000000)
+#define RNP_TXD_TUNNEL_MASK (0x3000000)
+#define RNP_TXD_TUNNEL_VXLAN (0x1000000)
+#define RNP_TXD_TUNNEL_NVGRE (0x2000000)
+#define RNP_TXD_L4_TYPE_UDP (0xc00000)
+#define RNP_TXD_L4_TYPE_TCP (0x400000)
+#define RNP_TXD_L4_TYPE_SCTP (0x800000)
+#define RNP_TXD_FLAG_IPv4 (0)
+#define RNP_TXD_FLAG_IPv6 (0x200000)
+#define RNP_TXD_FLAG_TSO (0x100000)
+#define RNP_TXD_FLAG_PTP (0x4000000)
+#define RNP_TXD_CMD_RS (0x040000)
+#define RNP_TXD_CMD_INNER_VLAN (0x08000000)
+#define RNP_TXD_STAT_DD (0x020000)
+#define RNP_TXD_CMD_EOP (0x010000)
+#define RNP_TXD_PAD_CTRL (0x01000000)
+};
+
+struct rnpgbe_tx_ctx_desc {
+	__le32 mss_len_vf_num;
+	__le32 inner_vlan_tunnel_len;
+#define VF_VEB_MARK (1 << 24) //bit 56
+#define VF_VEB_IGNORE_VLAN (1 << 25) //bit 57
+	__le32 resv;
+	__le32 resv_cmd;
+#define RNP_TXD_FLAG_TO_RPU (1 << 15)
+#define RNP_TXD_SMAC_CTRL_NOP (0x00 << 12)
+#define RNP_TXD_SMAC_CTRL_REPLACE_MACADDR0 (0x02 << 12)
+#define RNP_TXD_SMAC_CTRL_REPLACE_MACADDR1 (0x06 << 12)
+#define RNP_TXD_CTX_VLAN_CTRL_NOP (0x00 << 10)
+#define RNP_TXD_CTX_VLAN_CTRL_RM_VLAN (0x01 << 10)
+#define RNP_TXD_CTX_VLAN_CTRL_INSERT_VLAN (0x02 << 10)
+#define RNP_TXD_MTI_CRC_PAD_CTRL (0x01000000)
+#define RNP_TXD_CTX_CTRL_DESC (0x080000)
+#define RNP_TXD_CMD_RS (0x040000)
+#define RNP_TXD_STAT_DD (0x020000)
+};
+
+/* Receive Descriptor - Advanced */
+union rnpgbe_rx_desc {
+	struct {
+		union {
+			__le64 pkt_addr; /* Packet buffer address */
+			struct {
+				__le32 addr_lo;
+				__le32 addr_hi;
+			};
+		};
+		__le64 resv_cmd;
+#define RNP_RXD_FLAG_RS (0)
+	};
+
+	struct {
+		__le32 rss_hash;
+		__le16 mark;
+		__le16 rev1;
+#define RNP_RX_L3_TYPE_MASK (1 << 15) // 1 is ipv4
+#define VEB_VF_PKG (1 << 1) // bit 49
+#define VEB_VF_IGNORE_VLAN (1 << 0) //bit 48
+#define REV_OUTER_VLAN (1 << 5)
+		__le16 len;
+		__le16 padding_len;
+		__le16 vlan;
+		__le16 cmd;
+#define RNP_RXD_STAT_VLAN_VALID (1 << 15)
+#define RNP_RXD_STAT_STAG (0x01 << 14)
+#define RNP_RXD_STAT_TUNNEL_NVGRE (0x02 << 13)
+#define RNP_RXD_STAT_TUNNEL_VXLAN (0x01 << 13)
+#define RNP_RXD_STAT_TUNNEL_MASK (0x03 << 13)
+#define RNP_RXD_STAT_ERR_MASK (0x1f << 8)
+#define RNP_RXD_STAT_SCTP_MASK (0x04 << 8)
+#define RNP_RXD_STAT_L4_MASK (0x02 << 8)
+#define RNP_RXD_STAT_L4_SCTP (0x02 << 6)
+#define RNP_RXD_STAT_L4_TCP (0x01 << 6)
+#define RNP_RXD_STAT_L4_UDP (0x03 << 6)
+#define RNP_RXD_STAT_IPV6 (1 << 5)
+#define RNP_RXD_STAT_IPV4 (0 << 5)
+#define RNP_RXD_STAT_PTP (1 << 4)
+#define RNP_RXD_STAT_DD (1 << 1)
+#define RNP_RXD_STAT_EOP (1 << 0)
+	} wb;
+} __packed;
+
+/* Host Interface Command Structures */
+struct rnpgbe_hic_hdr {
+	u8 cmd;
+	u8 buf_len;
+	union {
+		u8 cmd_resv;
+		u8 ret_status;
+	} cmd_or_resp;
+	u8 checksum;
+};
+
+struct rnpgbe_hic_drv_info {
+	struct rnpgbe_hic_hdr hdr;
+	u8 port_num;
+	u8 ver_sub;
+	u8 ver_build;
+	u8 ver_min;
+	u8 ver_maj;
+	u8 pad; /* end spacing to ensure length is mult. of dword */
+	u16 pad2; /* end spacing to ensure length is mult. of dword2 */
+};
+
+/* Context descriptors */
+struct rnpgbe_adv_tx_context_desc {
+	__le32 vlan_macip_lens;
+	__le32 seqnum_seed;
+	__le32 type_tucmd_mlhl;
+	__le32 mss_l4len_idx;
+};
+
+/* RAH */
+#define RNP_RAH_VIND_MASK 0x003C0000
+#define RNP_RAH_VIND_SHIFT 18
+#define RNP_RAH_AV 0x80000000
+#define RNP_CLEAR_VMDQ_ALL 0xFFFFFFFF
+
+/* Autonegotiation advertised speeds */
+typedef u32 rnpgbe_autoneg_advertised;
+/* Link speed */
+typedef u32 rnpgbe_link_speed;
+#define RNP_LINK_SPEED_UNKNOWN 0
+#define RNP_LINK_SPEED_10_FULL BIT(2)
+#define RNP_LINK_SPEED_100_FULL BIT(3)
+#define RNP_LINK_SPEED_1GB_FULL BIT(4)
+#define RNP_LINK_SPEED_10GB_FULL BIT(5)
+#define RNP_LINK_SPEED_40GB_FULL BIT(6)
+#define RNP_LINK_SPEED_25GB_FULL BIT(7)
+#define RNP_LINK_SPEED_50GB_FULL BIT(8)
+#define RNP_LINK_SPEED_100GB_FULL BIT(9)
+#define RNP_LINK_SPEED_10_HALF BIT(10)
+#define RNP_LINK_SPEED_100_HALF BIT(11)
+#define RNP_LINK_SPEED_1GB_HALF BIT(12)
+#define RNP_SFP_MODE_10G_LR BIT(13)
+#define RNP_SFP_MODE_10G_SR BIT(14)
+#define RNP_SFP_MODE_10G_LRM BIT(15)
+#define RNP_SFP_MODE_1G_T BIT(16)
+#define RNP_SFP_MODE_1G_KX BIT(17)
+#define RNP_SFP_MODE_1G_SX BIT(18)
+#define RNP_SFP_MODE_1G_LX BIT(19)
+#define RNP_SFP_MODE_40G_SR4 BIT(20)
+#define RNP_SFP_MODE_40G_CR4 BIT(21)
+#define RNP_SFP_MODE_40G_LR4 BIT(22)
+#define RNP_SFP_MODE_1G_CX BIT(23)
+#define RNP_SFP_MODE_10G_BASE_T BIT(24)
+#define RNP_SFP_MODE_FIBER_CHANNEL_SPEED BIT(25) // sfp-a0-10 != 0
+#define RNP_SFP_CONNECTOR_DAC BIT(26)
+#define RNP_SFP_TO_SGMII BIT(27)
+#define RNP_SFP_25G_SR BIT(28)
+#define RNP_SFP_25G_KR BIT(29)
+#define RNP_SFP_25G_CR BIT(30)
+
+/* Flow Control Data Sheet defined values
+ * Calculation and defines taken from 802.1bb Annex O
+ */
+
+enum rnpgbe_atr_flow_type {
+	RNP_ATR_FLOW_TYPE_IPV4 = 0x0,
+	RNP_ATR_FLOW_TYPE_UDPV4 = 0x1,
+	RNP_ATR_FLOW_TYPE_TCPV4 = 0x2,
+	RNP_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+	RNP_ATR_FLOW_TYPE_IPV6 = 0x4,
+	RNP_ATR_FLOW_TYPE_UDPV6 = 0x5,
+	RNP_ATR_FLOW_TYPE_TCPV6 = 0x6,
+	RNP_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+	RNP_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10,
+	RNP_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11,
+	RNP_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12,
+	RNP_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13,
+	RNP_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14,
+	RNP_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15,
+	RNP_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16,
+	RNP_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17,
+	RNP_ATR_FLOW_TYPE_ETHER = 0x18,
+	RNP_ATR_FLOW_TYPE_USERDEF = 0x19,
+};
+
+#define RNP_FDIR_DROP_QUEUE (200)
+
+enum {
+	fdir_mode_tcam = 0,
+	fdir_mode_tuple5,
+};
+/* Flow Director ATR input struct. */
+union rnpgbe_atr_input {
+	/*
+	 * Byte layout in order, all values with MSB first:
+	 *
+	 * vm_pool      - 1 byte
+	 * flow_type    - 1 byte
+	 * vlan_id      - 2 bytes
+	 * src_ip       - 16 bytes
+	 * inner_mac    - 6 bytes
+	 * cloud_mode   - 2 bytes
+	 * tni_vni      - 4 bytes
+	 * dst_ip       - 16 bytes
+	 * src_port     - 2 bytes
+	 * dst_port     - 2 bytes
+	 * flex_bytes   - 2 bytes
+	 * bkt_hash     - 2 bytes
+	 */
+	struct {
+		u8 vm_pool;
+		u8 flow_type;
+		__be16 vlan_id;
+		__be32 dst_ip[4];
+		__be32 dst_ip_mask[4];
+		__be32 src_ip[4];
+		__be32 src_ip_mask[4];
+		u8 inner_mac[6];
+		u8 inner_mac_mask[6];
+		__be16 tunnel_type;
+		__be32 tni_vni;
+		__be16 src_port;
+		__be16 src_port_mask;
+		__be16 dst_port;
+		__be16 dst_port_mask;
+		__be16 flex_bytes;
+		__be16 bkt_hash;
+	} formatted;
+	struct {
+		u8 vm_poll;
+		u8 flow_type;
+		u16 vlan_id;
+		__be16 proto;
+		__be16 resv;
+		__be32 nouse[12];
+	} layer2_formate;
+	__be32 dword_stream[14];
+};
+
+/* BitTimes (BT) conversion */
+#define RNP_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024))
+#define RNP_B2BT(BT) (BT * 8)
+
+/* Calculate Delay to respond to PFC */
+#define RNP_PFC_D 672
+
+/* Calculate Cable Delay */
+#define RNP_CABLE_DC 5556 /* Delay Copper */
+#define RNP_CABLE_DO 5000 /* Delay Optical */
+
+/* Calculate Interface Delay X540 */
+#define RNP_PHY_DC 25600 /* Delay 10G BASET */
+#define RNP_MAC_DC 8192 /* Delay Copper XAUI interface */
+#define RNP_XAUI_DC (2 * 2048) /* Delay Copper Phy */
+
+#define RNP_ID_X540 (RNP_MAC_DC + RNP_XAUI_DC + RNP_PHY_DC)
+
+/* Calculate Interface Delay 82598, n10 */
+#define RNP_PHY_D 12800
+#define RNP_MAC_D 4096
+#define RNP_XAUI_D (2 * 1024)
+
+/* PHY MDI STANDARD CONFIG */
+#define RNP_MDI_PHY_ID1_OFFSET 2
+#define RNP_MDI_PHY_ID2_OFFSET 3
+#define RNP_MDI_PHY_ID_MASK 0xFFFFFC00U
+#define RNP_MDI_PHY_SPEED_SELECT1 0x0040
+#define RNP_MDI_PHY_DUPLEX 0x0100
+#define RNP_MDI_PHY_RESTART_AN 0x0200
+#define RNP_MDI_PHY_ANE 0x1000
+#define RNP_MDI_PHY_SPEED_SELECT0 0x2000
+#define RNP_MDI_PHY_RESET
+
+#define NGBE_PHY_RST_WAIT_PERIOD 50
+
+#define RNP_ID (RNP_MAC_D + RNP_XAUI_D + RNP_PHY_D)
+
+/* Calculate Delay incurred from higher layer */
+#define RNP_HD 6144
+
+/* Calculate PCI Bus delay for low thresholds */
+#define RNP_PCI_DELAY 10000
+
+/* Flow Director compressed ATR hash input struct */
+union rnpgbe_atr_hash_dword {
+	struct {
+		u8 vm_pool;
+		u8 flow_type;
+		__be16 vlan_id;
+	} formatted;
+	__be32 ip;
+	struct {
+		__be16 src;
+		__be16 dst;
+	} port;
+	__be16 flex_bytes;
+	__be32 dword;
+};
+
+enum rnpgbe_eeprom_type {
+	rnpgbe_eeprom_uninitialized = 0,
+	rnpgbe_eeprom_spi,
+	rnpgbe_flash,
+	rnpgbe_eeprom_none /* No NVM support */
+};
+
+enum mac_type {
+	mac_dwc_xlg,
+	mac_dwc_g,
+
+};
+
+enum rnpgbe_mac_type {
+	rnpgbe_mac_unknown = 0,
+	rnpgbe_mac_n10g_x8_40G,
+	rnpgbe_mac_n10g_x2_10G,
+	rnpgbe_mac_n10g_x4_10G,
+	rnpgbe_mac_n10g_x8_10G,
+	rnpgbe_mac_n10l_x8_1G,
+	rnpgbe_num_macs
+};
+
+enum rnpgbe_rss_type {
+	rnpgbe_rss_uv440 = 0,
+	rnpgbe_rss_uv3p,
+	rnpgbe_rss_n10,
+	rnpgbe_rss_n20,
+	rnpgbe_rss_n500
+};
+
+enum rnpgbe_hw_type {
+	rnpgbe_hw_n500 = 0,
+	rnpgbe_hw_n210,
+	rnpgbe_hw_n210L,
+};
+
+enum rnpgbe_eth_type { rnpgbe_eth_n10 = 0, rnpgbe_eth_n500 };
+
+enum rnpgbe_phy_type {
+	rnpgbe_phy_unknown = 0,
+	rnpgbe_phy_none,
+	rnpgbe_phy_sfp,
+	rnpgbe_phy_sfp_unsupported,
+	rnpgbe_phy_generic,
+	rnpgbe_phy_sfp_unknown,
+	rnpgbe_phy_sgmii,
+};
+
+enum rnpgbe_sfp_type {
+	rnpgbe_sfp_type_da_cu = 0,
+	rnpgbe_sfp_type_sr = 1,
+	rnpgbe_sfp_type_lr = 2,
+	rnpgbe_sfp_type_da_cu_core0 = 3,
+	rnpgbe_sfp_type_da_cu_core1 = 4,
+	rnpgbe_sfp_type_srlr_core0 = 5,
+	rnpgbe_sfp_type_srlr_core1 = 6,
+	rnpgbe_sfp_type_da_act_lmt_core0 = 7,
+	rnpgbe_sfp_type_da_act_lmt_core1 = 8,
+	rnpgbe_sfp_type_1g_cu_core0 = 9,
+	rnpgbe_sfp_type_1g_cu_core1 = 10,
+	rnpgbe_sfp_type_1g_sx_core0 = 11,
+	rnpgbe_sfp_type_1g_sx_core1 = 12,
+	rnpgbe_sfp_type_1g_lx_core0 = 13,
+	rnpgbe_sfp_type_1g_lx_core1 = 14,
+	rnpgbe_sfp_type_not_present = 0xFFFE,
+	rnpgbe_sfp_type_unknown = 0xFFFF
+};
+
+enum rnpgbe_media_type {
+	rnpgbe_media_type_unknown = 0,
+	rnpgbe_media_type_fiber,
+	rnpgbe_media_type_copper,
+	rnpgbe_media_type_backplane,
+	rnpgbe_media_type_cx4,
+	rnpgbe_media_type_da,
+	rnpgbe_media_type_virtual
+
+};
+
+/* Flow Control Settings */
+enum rnpgbe_fc_mode {
+	rnpgbe_fc_none = 0,
+	rnpgbe_fc_rx_pause,
+	rnpgbe_fc_tx_pause,
+	rnpgbe_fc_full,
+	rnpgbe_fc_default
+};
+
+#define PAUSE_TX (0x1)
+#define PAUSE_RX (0x2)
+#define PAUSE_AUTO (0x10)
+
+#define ASYM_PAUSE BIT(11)
+#define SYM_PAUSE BIT(10)
+
+struct rnpgbe_addr_filter_info {
+	u32 num_mc_addrs;
+	u32 rar_used_count;
+	u32 mta_in_use;
+	u32 overflow_promisc;
+	bool uc_set_promisc;
+	bool user_set_promisc;
+};
+
+/* Bus parameters */
+struct rnpgbe_bus_info {
+	u16 func;
+	u16 lan_id;
+};
+
+/* Flow control parameters */
+struct rnpgbe_fc_info {
+	u32 high_water[RNP_MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
+	u32 low_water[RNP_MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */
+	u16 pause_time; /* Flow Control Pause timer */
+	bool send_xon; /* Flow control send XON */
+	bool strict_ieee; /* Strict IEEE mode */
+	bool disable_fc_autoneg; /* Do not autonegotiate FC */
+	bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
+	enum rnpgbe_fc_mode current_mode; /* FC mode in effect */
+	u32 requested_mode; /* FC mode requested by caller */
+};
+
+/* Statistics counters collected by the MAC */
+struct rnpgbe_hw_stats {
+	u64 dma_to_dma;
+	u64 dma_to_switch;
+	u64 mac_to_mac;
+	u64 switch_to_switch;
+	u64 mac_to_dma;
+	u64 switch_to_dma;
+	u64 vlan_add_cnt;
+	u64 vlan_strip_cnt;
+	//=== error
+	u64 invalid_droped_packets;
+	u64 filter_dropped_packets;
+	//== drop ==
+	u64 rx_capabity_lost;
+	u64 host_l2_match_drop;
+	u64 redir_input_match_drop;
+	u64 redir_etype_match_drop;
+	u64 redir_tcp_syn_match_drop;
+	u64 redir_tuple5_match_drop;
+	u64 redir_tcam_match_drop;
+
+	u64 bmc_dropped_packets;
+	u64 switch_dropped_packets;
+	//=== rx
+	u64 dma_to_host;
+	//=== dma-tx ==
+	u64 port0_tx_packets;
+	u64 port1_tx_packets;
+	u64 port2_tx_packets;
+	u64 port3_tx_packets;
+	//=== emac 1to4 tx ==
+	u64 in0_tx_pkts;
+	u64 in1_tx_pkts;
+	u64 in2_tx_pkts;
+	u64 in3_tx_pkts;
+	//=== phy tx ==
+	u64 port0_to_phy_pkts;
+	u64 port1_to_phy_pkts;
+	u64 port2_to_phy_pkts;
+	u64 port3_to_phy_pkts;
+	//=== mac rx ===
+	u64 mac_rx_broadcast;
+	u64 mac_rx_multicast;
+	u64 tx_broadcast;
+	u64 tx_multicast;
+	// n500 use this
+	u64 ultra_short_cnt;
+	u64 jumbo_cnt;
+
+	u64 dma_rx_drop_cnt_0;
+	u64 dma_rx_drop_cnt_1;
+	u64 dma_rx_drop_cnt_2;
+	u64 dma_rx_drop_cnt_3;
+	u64 dma_rx_drop_cnt_4;
+	u64 dma_rx_drop_cnt_5;
+	u64 dma_rx_drop_cnt_6;
+	u64 dma_rx_drop_cnt_7;
+	u64 tx_pause;
+	u64 rx_pause;
+};
+
+/* forward declaration */
+struct rnpgbe_hw;
+struct rnpgbe_eth_info;
+struct rnpgbe_dma_info;
+struct rnpgbe_mac_info;
+
+/* iterator type for walking multicast address lists */
+typedef u8 *(*rnpgbe_mc_addr_itr)(struct rnpgbe_hw *hw, u8 **mc_addr_ptr,
+				  u32 *vmdq);
+
+/* Function pointer table */
+struct rnpgbe_eeprom_operations {
+	s32 (*init_params)(struct rnpgbe_hw *hw);
+	//	s32 (*read)(struct rnpgbe_hw *hw, u16, u16 *);
+	//	s32 (*read_buffer)(struct rnpgbe_hw *hw, u16, u16, u16 *);
+	//	s32 (*write)(struct rnpgbe_hw *hw, u16, u16);
+	//	s32 (*write_buffer)(struct rnpgbe_hw *hw, u16, u16, u16 *);
+	//	s32 (*validate_checksum)(struct rnpgbe_hw *hw, u16 *);
+	s32 (*update_checksum)(struct rnpgbe_hw *hw);
+	u16 (*calc_checksum)(struct rnpgbe_hw *hw);
+};
+
+/* add nic operations */
+struct rnpgbe_eth_operations {
+	/* RAR, Multicast, VLAN */
+	s32 (*get_mac_addr)(struct rnpgbe_eth_info *eth, u8 *addr);
+	s32 (*set_rar)(struct rnpgbe_eth_info *eth, u32 index, u8 *addr,
+		       bool enable_addr);
+	s32 (*clear_rar)(struct rnpgbe_eth_info *eth, u32 index);
+	s32 (*set_vmdq)(struct rnpgbe_eth_info *eth, u32 rar, u32 vmdq);
+	s32 (*clear_vmdq)(struct rnpgbe_eth_info *eth, u32 rar, u32 vmdq);
+
+	s32 (*update_mc_addr_list)(struct rnpgbe_eth_info *eth,
+				   struct net_device *netdev, bool sriov_on);
+	void (*clr_mc_addr)(struct rnpgbe_eth_info *eth);
+
+	int (*set_rss_hfunc)(struct rnpgbe_eth_info *eth, int hfunc);
+	void (*set_rss_key)(struct rnpgbe_eth_info *eth, bool sriov_flag);
+	void (*set_rss_table)(struct rnpgbe_eth_info *eth);
+	void (*set_rx_hash)(struct rnpgbe_eth_info *eth, bool status,
+			    bool sriov_flag);
+
+	// ntuple function
+	void (*set_layer2_remapping)(struct rnpgbe_eth_info *eth,
+				     union rnpgbe_atr_input *input, u16 pri_id,
+				     u8 queue, bool prio_flag);
+	void (*clr_layer2_remapping)(struct rnpgbe_eth_info *eth, u16 pri_id);
+	void (*clr_all_layer2_remapping)(struct rnpgbe_eth_info *eth);
+	void (*set_tuple5_remapping)(struct rnpgbe_eth_info *eth,
+				     union rnpgbe_atr_input *input, u16 pri_id,
+				     u8 queue, bool prio_flag);
+	void (*clr_tuple5_remapping)(struct rnpgbe_eth_info *eth, u16 pri_id);
+	void (*clr_all_tuple5_remapping)(struct rnpgbe_eth_info *eth);
+	void (*set_tcp_sync_remapping)(struct rnpgbe_eth_info *eth, int queue,
+				       bool flag, bool prio);
+	void (*set_rx_skip)(struct rnpgbe_eth_info *eth, int count, bool flag);
+
+	void (*set_min_max_packet)(struct rnpgbe_eth_info *eth, int min,
+				   int max);
+	void (*set_vlan_strip)(struct rnpgbe_eth_info *eth, u16 queue,
+			       bool enable);
+	s32 (*set_vfta)(struct rnpgbe_eth_info *eth, u32 vlan, bool vlan_on);
+	void (*clr_vfta)(struct rnpgbe_eth_info *eth);
+	void (*set_vlan_filter)(struct rnpgbe_eth_info *eth, bool status);
+	void (*set_outer_vlan_type)(struct rnpgbe_eth_info *eth, int type);
+	void (*set_double_vlan)(struct rnpgbe_eth_info *eth, bool on);
+	void (*set_vxlan_port)(struct rnpgbe_eth_info *eth, u32 port);
+	void (*set_vxlan_mode)(struct rnpgbe_eth_info *eth, bool inner);
+	s32 (*set_fc_mode)(struct rnpgbe_eth_info *eth);
+
+	void (*set_rx)(struct rnpgbe_eth_info *eth, bool status);
+	void (*set_fcs)(struct rnpgbe_eth_info *eth, bool status);
+
+	void (*set_vf_vlan_mode)(struct rnpgbe_eth_info *eth, u16 vlan, int vf,
+				 bool enable);
+};
+
+enum {
+	rnpgbe_driver_insmod,
+	rnpgbe_driver_suspuse,
+	rnpgbe_driver_force_control_phy,
+};
+
+struct rnpgbe_hw_operations {
+	s32 (*init_hw)(struct rnpgbe_hw *hw);
+	s32 (*reset_hw)(struct rnpgbe_hw *hw);
+	s32 (*start_hw)(struct rnpgbe_hw *hw);
+
+	void (*set_mtu)(struct rnpgbe_hw *hw, int mtu);
+	void (*set_vlan_filter_en)(struct rnpgbe_hw *hw, bool enable);
+	void (*set_vlan_filter)(struct rnpgbe_hw *hw, u16 vid, bool enable,
+				bool sriov_flag);
+	int (*set_veb_vlan_mask)(struct rnpgbe_hw *hw, u16 vid, int vf,
+				 bool enable);
+	void (*set_vf_vlan_filter)(struct rnpgbe_hw *hw, u16 vid, int vf,
+				   bool enable, bool veb_only);
+	void (*clr_vfta)(struct rnpgbe_hw *hw);
+	void (*set_vlan_strip)(struct rnpgbe_hw *hw, u16 queue, bool strip);
+	void (*set_mac)(struct rnpgbe_hw *hw, u8 *mac, bool sriov_flag);
+	void (*set_rx_mode)(struct rnpgbe_hw *hw, struct net_device *netdev,
+			    bool sriov_flag);
+	void (*set_rar_with_vf)(struct rnpgbe_hw *hw, u8 *mac, int idx,
+				u32 vfnum, bool enable);
+	void (*clr_rar)(struct rnpgbe_hw *hw, int idx);
+	void (*clr_rar_all)(struct rnpgbe_hw *hw);
+	void (*clr_vlan_veb)(struct rnpgbe_hw *hw);
+	void (*set_txvlan_mode)(struct rnpgbe_hw *hw, bool vlan);
+	void (*set_tx_maxrate)(struct rnpgbe_hw *hw, bool flag);
+	void (*set_fcs_mode)(struct rnpgbe_hw *hw, bool status);
+	void (*set_vxlan_port)(struct rnpgbe_hw *hw, u32 port);
+	void (*set_vxlan_mode)(struct rnpgbe_hw *hw, bool inner);
+	void (*set_mac_speed)(struct rnpgbe_hw *hw, bool link, u32 speed,
+			      bool duplex);
+	void (*set_mac_rx)(struct rnpgbe_hw *hw, bool status);
+	void (*update_sriov_info)(struct rnpgbe_hw *hw);
+
+	void (*set_sriov_status)(struct rnpgbe_hw *hw, bool status);
+	//void (*set_sriov_vf_mac)(struct rnpgbe_hw *, u8 *, int, bool);
+	void (*set_sriov_vf_mc)(struct rnpgbe_hw *hw, u16 mc_addr);
+
+	void (*set_pause_mode)(struct rnpgbe_hw *hw);
+	void (*get_pause_mode)(struct rnpgbe_hw *hw);
+	void (*update_hw_info)(struct rnpgbe_hw *hw);
+	void (*set_rx_hash)(struct rnpgbe_hw *hw, bool status, bool sriov_flag);
+	int (*set_rss_hfunc)(struct rnpgbe_hw *hw, u8 hfunc);
+	void (*set_rss_key)(struct rnpgbe_hw *hw, bool sriov_flag);
+	void (*set_rss_table)(struct rnpgbe_hw *hw);
+
+	//MBX_ID
+	void (*set_mbx_link_event)(struct rnpgbe_hw *hw, int enable);
+	void (*set_mbx_ifup)(struct rnpgbe_hw *hw, int enable);
+
+	s32 (*get_thermal_sensor_data)(struct rnpgbe_hw *hw);
+	s32 (*init_thermal_sensor_thresh)(struct rnpgbe_hw *hw);
+
+	void (*disable_tx_laser)(struct rnpgbe_hw *hw);
+	void (*enable_tx_laser)(struct rnpgbe_hw *hw);
+	void (*flap_tx_laser)(struct rnpgbe_hw *hw);
+	s32 (*check_link)(struct rnpgbe_hw *hw, rnpgbe_link_speed *speed,
+			  bool *link_up, bool *duplex,
+			  bool link_up_wait_to_complete);
+	s32 (*setup_link)(struct rnpgbe_hw *hw, rnpgbe_link_speed adv,
+			  u32 autoneg, u32 speed, u32 duplex);
+	void (*clean_link)(struct rnpgbe_hw *hw);
+	s32 (*get_link_capabilities)(struct rnpgbe_hw *hw,
+				     rnpgbe_link_speed *speed, bool *autoneg);
+	s32 (*init_rx_addrs)(struct rnpgbe_hw *hw);
+
+	// ntuple function
+	void (*set_layer2_remapping)(struct rnpgbe_hw *hw,
+				     union rnpgbe_atr_input *input, u16 pri_id,
+				     u8 queue, bool prio_flag);
+	void (*clr_layer2_remapping)(struct rnpgbe_hw *hw, u16 pri_id);
+	void (*clr_all_layer2_remapping)(struct rnpgbe_hw *hw);
+	void (*set_tuple5_remapping)(struct rnpgbe_hw *hw,
+				     union rnpgbe_atr_input *input, u16 pri_id,
+				     u8 queue, bool prio_flag);
+	void (*clr_tuple5_remapping)(struct rnpgbe_hw *hw, u16 pri_id);
+	void (*clr_all_tuple5_remapping)(struct rnpgbe_hw *hw);
+	void (*set_tcp_sync_remapping)(struct rnpgbe_hw *hw, int queue,
+				       bool flag, bool prio);
+	void (*set_rx_skip)(struct rnpgbe_hw *hw, int count, bool flag);
+	void (*set_outer_vlan_type)(struct rnpgbe_hw *hw, int type);
+
+	void (*update_hw_status)(struct rnpgbe_hw *hw,
+				 struct rnpgbe_hw_stats *hw_stats,
+				 struct net_device_stats *net_stats);
+	void (*update_msix_count)(struct rnpgbe_hw *hw, int msix_count);
+
+	void (*update_rx_drop)(struct rnpgbe_hw *hw);
+
+	// ethtool
+	void (*setup_ethtool)(struct net_device *netdev);
+
+	s32 (*phy_read_reg)(struct rnpgbe_hw *hw, u32 reg_addr, u32 device_type,
+			    u16 *phy_data);
+	s32 (*phy_write_reg)(struct rnpgbe_hw *hw, u32 reg_addr,
+			     u32 device_type, u16 phy_data);
+	void (*setup_wol)(struct rnpgbe_hw *hw, u32 mode);
+	void (*set_vf_vlan_mode)(struct rnpgbe_hw *hw, u16 vlan, int vf,
+				 bool enable);
+	void (*driver_status)(struct rnpgbe_hw *hw, bool enable, int mode);
+
+	void (*setup_eee)(struct rnpgbe_hw *hw, int ls, int tw, u32 local_eee);
+
+	void (*set_eee_mode)(struct rnpgbe_hw *hw, bool en_tx_lpi_clockgating);
+	void (*reset_eee_mode)(struct rnpgbe_hw *hw);
+	void (*set_eee_timer)(struct rnpgbe_hw *hw, int ls, int tw);
+	void (*set_eee_pls)(struct rnpgbe_hw *hw, int link);
+
+	u32 (*get_lpi_status)(struct rnpgbe_hw *hw);
+
+	int (*get_ncsi_mac)(struct rnpgbe_hw *hw, u8 *addr, int idx);
+	int (*get_ncsi_vlan)(struct rnpgbe_hw *hw, u16 *vlan, int idx);
+
+	void (*set_lldp)(struct rnpgbe_hw *hw, bool enable);
+	void (*get_lldp)(struct rnpgbe_hw *hw);
+};
+
+struct rnpgbe_mac_operations {
+	void (*set_mac_rx)(struct rnpgbe_mac_info *mac, bool status);
+	void (*set_mac_speed)(struct rnpgbe_mac_info *mac, bool link, u32 speed,
+			      bool duplex);
+	void (*set_mac_fcs)(struct rnpgbe_mac_info *mac, bool status);
+	s32 (*set_fc_mode)(struct rnpgbe_mac_info *mac);
+	void (*check_link)(struct rnpgbe_mac_info *mac,
+			   rnpgbe_link_speed *speed, bool *link_up,
+			   bool link_up_wait_to_complete);
+	void (*set_mac)(struct rnpgbe_mac_info *mac, u8 *addr, int index);
+	int (*mdio_write)(struct rnpgbe_mac_info *mac, int phyreg, int phydata);
+	int (*mdio_read)(struct rnpgbe_mac_info *mac, u32 phyreg,
+			 u32 *regvalue);
+	void (*pmt)(struct rnpgbe_mac_info *mac, u32 mode, bool ncsi_en);
+	void (*set_eee_mode)(struct rnpgbe_mac_info *mac,
+			     bool en_tx_lpi_clockgating);
+	void (*reset_eee_mode)(struct rnpgbe_mac_info *mac);
+	void (*set_eee_timer)(struct rnpgbe_mac_info *mac, int ls, int tw);
+	void (*set_eee_pls)(struct rnpgbe_mac_info *mac, int link);
+	u32 (*get_lpi_status)(struct rnpgbe_mac_info *mac);
+};
+
+struct rnpgbe_phy_operations {
+	s32 (*identify)(struct rnpgbe_hw *hw);
+	s32 (*identify_sfp)(struct rnpgbe_hw *hw);
+	s32 (*init)(struct rnpgbe_hw *hw);
+	s32 (*reset)(struct rnpgbe_hw *hw);
+	s32 (*read_reg)(struct rnpgbe_hw *hw, u32 reg_addr, u32 device_type,
+			u16 *phy_data);
+	s32 (*write_reg)(struct rnpgbe_hw *hw, u32 reg_addr, u32 device_type,
+			 u16 phy_data);
+	s32 (*setup_link)(struct rnpgbe_hw *hw);
+	s32 (*setup_link_speed)(struct rnpgbe_hw *hw, rnpgbe_link_speed speed,
+				bool wait_to_complete);
+	s32 (*check_link)(struct rnpgbe_hw *hw, rnpgbe_link_speed *speed,
+			  bool *flag);
+	s32 (*get_firmware_version)(struct rnpgbe_hw *hw, u16 *version);
+	s32 (*read_i2c_byte)(struct rnpgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+			     u8 *data);
+	s32 (*write_i2c_byte)(struct rnpgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+			      u8 data);
+	s32 (*read_i2c_sff8472)(struct rnpgbe_hw *hw, u8 byte_offset, u8 *data);
+	s32 (*read_i2c_eeprom)(struct rnpgbe_hw *hw, u8 byte_offset, u8 *data);
+	s32 (*write_i2c_eeprom)(struct rnpgbe_hw *hw, u8 byte_offset, u8 data);
+	s32 (*check_overtemp)(struct rnpgbe_hw *hw);
+};
+
+struct rnpgbe_eeprom_info {
+	struct rnpgbe_eeprom_operations ops;
+	enum rnpgbe_eeprom_type type;
+	u32 semaphore_delay;
+	u16 word_size;
+	u16 address_bits;
+	u16 word_page_size;
+};
+
+struct rnpgbe_dma_operations {
+	void (*set_tx_maxrate)(struct rnpgbe_dma_info *dma, u16 queue,
+			       u32 max_rate);
+	void (*set_veb_mac)(struct rnpgbe_dma_info *dma, u8 *mac, u32 vfnum,
+			    u32 ring);
+	/* only set own vlan */
+	void (*set_veb_vlan)(struct rnpgbe_dma_info *dma, u16 vlan, u32 vfnum);
+	void (*set_veb_vlan_mask)(struct rnpgbe_dma_info *dma, u16 vlan,
+				  u16 mask, int entry);
+	void (*clr_veb_all)(struct rnpgbe_dma_info *dma);
+};
+
+struct rnpgbe_dma_info {
+	struct rnpgbe_dma_operations ops;
+	u8 __iomem *dma_base_addr;
+	u8 __iomem *dma_ring_addr;
+	void *back;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	u32 dma_version;
+};
+
+#define RNP_MAX_MTA 128
+struct rnpgbe_eth_info {
+	struct rnpgbe_eth_operations ops;
+	u8 __iomem *eth_base_addr;
+	enum rnpgbe_eth_type eth_type;
+	void *back;
+
+	u32 mta_shadow[RNP_MAX_MTA];
+	s32 mc_filter_type;
+	u32 mcft_size;
+	u32 vft_size;
+	u32 num_rar_entries;
+	u32 rar_highwater;
+	u32 rx_pb_size;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	u32 reg_off;
+	u32 orig_autoc;
+	u32 cached_autoc;
+	u32 orig_autoc2;
+};
+
+struct rnpgbe_nic_info {
+	u8 __iomem *nic_base_addr;
+};
+
+struct mii_regs {
+	unsigned int addr; /* MII Address */
+	unsigned int data; /* MII Data */
+	unsigned int addr_shift; /* MII address shift */
+	unsigned int reg_shift; /* MII reg shift */
+	unsigned int addr_mask; /* MII address mask */
+	unsigned int reg_mask; /* MII reg mask */
+	unsigned int clk_csr_shift;
+	unsigned int clk_csr_mask;
+};
+
+#define RNP_FLAGS_DOUBLE_RESET_REQUIRED 0x01
+#define RNP_FLAGS_INIT_MAC_ADDRESS 0x02
+struct rnpgbe_mac_info {
+	struct rnpgbe_mac_operations ops;
+	u8 __iomem *mac_addr;
+	void *back;
+	struct mii_regs mii;
+	int phy_addr;
+	int clk_csr;
+	enum rnpgbe_mac_type type;
+	enum mac_type mac_type;
+	u8 addr[ETH_ALEN];
+	u8 perm_addr[ETH_ALEN];
+	/* prefix for World Wide Node Name (WWNN) */
+	u16 wwnn_prefix;
+	/* prefix for World Wide Port Name (WWPN) */
+	u16 wwpn_prefix;
+	u16 max_msix_vectors;
+	u32 mta_shadow[RNP_MAX_MTA];
+	s32 mc_filter_type;
+	u32 mcft_size;
+	u32 vft_size;
+	u32 num_rar_entries;
+	u32 rar_highwater;
+	u32 rx_pb_size;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	u32 reg_off;
+	u32 orig_autoc;
+	u32 cached_autoc;
+	u32 orig_autoc2;
+	bool orig_link_settings_stored;
+	bool autotry_restart;
+	u8 mac_flags;
+};
+
+struct rnpgbe_phy_info {
+	struct rnpgbe_phy_operations ops;
+	struct mdio_if_info mdio;
+	enum rnpgbe_phy_type type;
+	u32 id;
+	u32 phy_addr;
+	bool is_mdix;
+	u8 mdix;
+	enum rnpgbe_sfp_type sfp_type;
+	bool sfp_setup_needed;
+	u32 revision;
+	enum rnpgbe_media_type media_type;
+	bool reset_disable;
+	rnpgbe_autoneg_advertised autoneg_advertised;
+	bool smart_speed_active;
+	bool multispeed_fiber;
+	bool reset_if_overtemp;
+};
+
+#include "rnpgbe_mbx.h"
+
+struct rnpgbe_pcs_operations {
+	u32 (*read)(struct rnpgbe_hw *hw, int num, u32 addr);
+	void (*write)(struct rnpgbe_hw *hw, int num, u32 addr, u32 value);
+};
+
+struct rnpgbe_mbx_operations {
+	s32 (*init_params)(struct rnpgbe_hw *hw);
+	s32 (*read)(struct rnpgbe_hw *hw, u32 *msg, u16 size, enum MBX_ID);
+	s32 (*write)(struct rnpgbe_hw *hw, u32 *msg, u16 size, enum MBX_ID);
+	s32 (*read_posted)(struct rnpgbe_hw *hw, u32 *msg, u16 size,
+			   enum MBX_ID);
+	s32 (*write_posted)(struct rnpgbe_hw *hw, u32 *msg, u16 size,
+			    enum MBX_ID);
+	s32 (*check_for_msg)(struct rnpgbe_hw *hw, enum MBX_ID);
+	s32 (*check_for_ack)(struct rnpgbe_hw *hw, enum MBX_ID);
+	//s32 (*check_for_rst)(struct rnpgbe_hw *, enum MBX_ID);
+	s32 (*configure)(struct rnpgbe_hw *hw, int nr_vec, bool enable);
+};
+
+struct rnpgbe_mbx_stats {
+	u32 msgs_tx;
+	u32 msgs_rx;
+
+	u32 acks;
+	u32 reqs;
+	u32 rsts;
+};
+
+struct rnpgbe_pcs_info {
+	struct rnpgbe_pcs_operations ops;
+	int pcs_count;
+};
+
+struct rnpgbe_mbx_info {
+	struct rnpgbe_mbx_operations ops;
+	struct rnpgbe_mbx_stats stats;
+	u32 timeout;
+	u32 usec_delay;
+	u32 v2p_mailbox;
+	u16 size;
+
+	u16 vf_req[64];
+	u16 vf_ack[64];
+	u16 cpu_req;
+	u16 cpu_ack;
+
+	struct mutex lock;
+
+	bool other_irq_enabled;
+	// add reg define
+	int mbx_size;
+
+	int mbx_mem_size;
+#define MBX_FEATURE_NO_ZERO BIT(0)
+#define MBX_FEATURE_WRITE_DELAY BIT(1)
+	u32 mbx_feature;
+	// cm3 <-> pf mbx
+	u32 cpu_pf_shm_base;
+	u32 pf2cpu_mbox_ctrl;
+	u32 pf2cpu_mbox_mask;
+	u32 cpu_pf_mbox_mask;
+	u32 cpu2pf_mbox_vec;
+
+	// pf <--> vf mbx
+	u32 pf_vf_shm_base;
+	u32 pf2vf_mbox_ctrl_base;
+	u32 pf_vf_mbox_mask_lo;
+	u32 pf_vf_mbox_mask_hi;
+	u32 pf2vf_mbox_vec_base;
+	u32 vf2pf_mbox_vec_base;
+
+	u32 cpu_vf_share_ram;
+	int share_size;
+};
+
+struct vf_vebvlans {
+	struct list_head l;
+	bool free;
+	int veb_entry;
+	u16 vid;
+	u16 mask;
+};
+struct lldp_status {
+	int enable;
+	int inteval;
+};
+
+struct rnpgbe_hw {
+	void *back;
+	u8 __iomem *hw_addr;
+	u8 __iomem *ring_msix_base;
+	u8 __iomem *rpu_addr; // 0x4000_0000
+	u8 pfvfnum; // fun
+	u8 pfvfnum_system;
+	struct pci_dev *pdev;
+
+	u16 device_id;
+	u16 vendor_id;
+	u16 subsystem_device_id;
+	u16 subsystem_vendor_id;
+	char lane_mask;
+	u16 mac_type;
+	u16 phy_type;
+	int nr_lane;
+	int sfc_boot;
+	int pxe_en;
+	int ncsi_en;
+	int trim_valid;
+
+	u8 is_backplane : 1;
+	u8 is_sgmii : 1;
+	u8 force_10g_1g_speed_ablity : 1;
+	u8 force_speed_stat : 2;
+#define FORCE_SPEED_STAT_DISABLED 0
+#define FORCE_SPEED_STAT_1G 1
+#define FORCE_SPEED_STAT_10G 2
+
+	u32 supported_link;
+	u32 advertised_link;
+	u32 autoneg;
+	u32 fake_autoneg;
+	u32 tp_mdx;
+	u32 tp_mdix_ctrl;
+	u32 phy_id;
+
+	u32 eee_capability;
+
+	u8 link;
+	u8 pci_gen;
+	u8 pci_lanes;
+	u16 max_msix_vectors;
+
+	int speed;
+	int duplex;
+	u32 dma_version;
+	u32 wol;
+	u32 wol_en;
+	u16 min_length;
+	u16 max_length;
+	u16 min_length_current;
+	u16 max_length_current;
+	/* rss info */
+#define HW_MAX_RETA_ENTRIES 512
+	u8 rss_indir_tbl[HW_MAX_RETA_ENTRIES];
+#define HW_MAX_TC_ENTRIES 8
+	u8 rss_tc_tbl[HW_MAX_TC_ENTRIES];
+	int rss_indir_tbl_num;
+	int rss_tc_tbl_num;
+	u32 rss_tbl_setup_flag;
+#define HW_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
+	u8 rss_key[HW_RSS_KEY_SIZE];
+	u32 rss_key_setup_flag;
+	u32 vfnum;
+	int num_rar_entries;
+	int max_vfs;
+	int max_vfs_noari;
+	int sriov_ring_limit;
+	int max_pf_macvlans;
+	int num_vebvlan_entries;
+
+	int fdir_mode;
+	int layer2_count;
+	int tuple5_count;
+	int veb_ring;
+
+	struct lldp_status lldp_status;
+
+	u32 fdir_pballoc; //total count
+	enum rnpgbe_rss_type rss_type;
+	enum rnpgbe_hw_type hw_type;
+	struct rnpgbe_hw_operations ops;
+	struct rnpgbe_nic_info nic;
+	struct rnpgbe_dma_info dma;
+	struct rnpgbe_eth_info eth;
+	struct rnpgbe_mac_info mac;
+	struct rnpgbe_addr_filter_info addr_ctrl;
+	struct rnpgbe_fc_info fc;
+	struct rnpgbe_phy_info phy;
+	struct rnpgbe_eeprom_info eeprom;
+	struct rnpgbe_bus_info bus;
+	struct rnpgbe_mbx_info mbx;
+	struct rnpgbe_pcs_info pcs;
+	bool adapter_stopped;
+	bool force_full_reset;
+	bool mng_fw_enabled;
+	bool wol_enabled;
+	unsigned long wol_supported;
+	int fw_version;
+	int force_en;
+	int force_cap;
+	u32 driver_version;
+	u8 sfp_connector;
+
+	struct vf_vebvlans vf_vas;
+	struct vf_vebvlans *vv_list;
+
+	u32 axi_mhz;
+	u32 bd_uid;
+	union {
+		u8 port_id[4];
+		u32 port_ids;
+	};
+
+	int mode;
+	int default_rx_queue;
+	u32 usecstocount;
+#define RNP_NET_FEATURE_SG ((u32)(1 << 0))
+#define RNP_NET_FEATURE_TX_CHECKSUM ((u32)(1 << 1))
+#define RNP_NET_FEATURE_RX_CHECKSUM ((u32)(1 << 2))
+#define RNP_NET_FEATURE_TSO ((u32)(1 << 3))
+#define RNP_NET_FEATURE_TX_UDP_TUNNEL (1 << 4)
+#define RNP_NET_FEATURE_VLAN_FILTER (1 << 5)
+#define RNP_NET_FEATURE_VLAN_OFFLOAD (1 << 6)
+#define RNP_NET_FEATURE_RX_NTUPLE_FILTER (1 << 7)
+#define RNP_NET_FEATURE_TCAM (1 << 8)
+#define RNP_NET_FEATURE_RX_HASH (1 << 9)
+#define RNP_NET_FEATURE_RX_FCS (1 << 10)
+#define RNP_NET_FEATURE_HW_TC (1 << 11)
+#define RNP_NET_FEATURE_USO (1 << 12)
+#define RNP_NET_FEATURE_STAG_FILTER (1 << 13)
+#define RNP_NET_FEATURE_STAG_OFFLOAD (1 << 14)
+#define RNP_NET_FEATURE_VF_FIXED (1 << 15)
+#define RNP_VEB_VLAN_MASK_EN (1 << 16)
+#define RNP_HW_FEATURE_EEE (1 << 17)
+#define RNP_HW_SOFT_MASK_OTHER_IRQ (1 << 18)
+
+	u32 feature_flags;
+	struct rnpgbe_thermal_sensor_data thermal_sensor_data;
+
+	struct {
+		int version;
+		int len;
+		int flag;
+	} dump;
+};
+
+struct rnpgbe_info {
+	enum rnpgbe_mac_type mac;
+	enum rnpgbe_rss_type rss_type;
+	enum rnpgbe_hw_type hw_type;
+	s32 (*get_invariants)(struct rnpgbe_hw *hw);
+	struct rnpgbe_mac_operations *mac_ops;
+	struct rnpgbe_eeprom_operations *eeprom_ops;
+	struct rnpgbe_phy_operations *phy_ops;
+	struct rnpgbe_mbx_operations *mbx_ops;
+	struct rnpgbe_pcs_operations *pcs_ops;
+
+	bool one_pf_with_two_dma;
+	int reg_off;
+	int adapter_cnt;
+	char lane_mask;
+	int hi_dma;
+	int total_queue_pair_cnts;
+	int dma2_in_1pf;
+	char *hw_addr;
+};
+
+/* Error Codes */
+#define RNP_ERR_EEPROM -1
+#define RNP_ERR_EEPROM_CHECKSUM -2
+#define RNP_ERR_PHY -3
+#define RNP_ERR_CONFIG -4
+#define RNP_ERR_PARAM -5
+#define RNP_ERR_MAC_TYPE -6
+#define RNP_ERR_UNKNOWN_PHY -7
+#define RNP_ERR_LINK_SETUP -8
+#define RNP_ERR_ADAPTER_STOPPED -9
+#define RNP_ERR_INVALID_MAC_ADDR -10
+#define RNP_ERR_DEVICE_NOT_SUPPORTED -11
+#define RNP_ERR_MASTER_REQUESTS_PENDING -12
+#define RNP_ERR_INVALID_LINK_SETTINGS -13
+#define RNP_ERR_AUTONEG_NOT_COMPLETE -14
+#define RNP_ERR_RESET_FAILED -15
+#define RNP_ERR_SWFW_SYNC -16
+#define RNP_ERR_PHY_ADDR_INVALID -17
+#define RNP_ERR_I2C -18
+#define RNP_ERR_SFP_NOT_SUPPORTED -19
+#define RNP_ERR_SFP_NOT_PRESENT -20
+#define RNP_ERR_SFP_NO_INIT_SEQ_PRESENT -21
+#define RNP_ERR_FDIR_REINIT_FAILED -23
+#define RNP_ERR_EEPROM_VERSION -24
+#define RNP_ERR_NO_SPACE -25
+#define RNP_ERR_OVERTEMP -26
+#define RNP_ERR_FC_NOT_NEGOTIATED -27
+#define RNP_ERR_FC_NOT_SUPPORTED -28
+#define RNP_ERR_SFP_SETUP_NOT_COMPLETE -30
+#define RNP_ERR_PBA_SECTION -31
+#define RNP_ERR_INVALID_ARGUMENT -32
+#define RNP_ERR_HOST_INTERFACE_COMMAND -33
+#define RNP_NOT_IMPLEMENTED 0x7FFFFFFF
+
+#define RNP_RAH_AV 0x80000000
+/* eth fix code */
+#define RNP_FCTRL_BPE BIT(10)
+#define RNP_FCTRL_UPE BIT(9)
+#define RNP_FCTRL_MPE BIT(8)
+#define RNP_MCSTCTRL_MTA BIT(2)
+#define RNP_MCSTCTRL_UTA BIT(3)
+#define RNP_MAX_LAYER2_FILTERS (16)
+#define RNP_MAX_TUPLE5_FILTERS (128)
+#define RNP_MAX_TCAM_FILTERS (4096)
+#define RNP_SRC_IP_MASK BIT(0)
+#define RNP_DST_IP_MASK BIT(1)
+#define RNP_SRC_PORT_MASK BIT(2)
+#define RNP_DST_PORT_MASK BIT(3)
+#define RNP_L4_PROTO_MASK BIT(4)
+#endif /* _RNPGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/version.h b/drivers/net/ethernet/mucse/rnpgbe/version.h
new file mode 100755
index 0000000000000000000000000000000000000000..e64c85211ab4d8b60c9867cf5c0e73a196073d40
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/version.h
@@ -0,0 +1,4 @@
+#ifndef VERSION_H
+#define VERSION_H
+#define GIT_COMMIT " 621bb20"
+#endif /* VERSION_H */