diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile index 9d4e5b080f38c5eed91d956b12d379904d842bd5..0ed2cc1350fb35b8f7927dfb45ddbd05c867da56 100644 --- a/drivers/net/ethernet/wangxun/txgbe/Makefile +++ b/drivers/net/ethernet/wangxun/txgbe/Makefile @@ -25,4 +25,8 @@ txgbe-objs := txgbe_main.o \ txgbe_xsk.o \ txgbe_lib.o \ txgbe_pcierr.o \ - txgbe_kcompat.o \ No newline at end of file + txgbe_e56.o \ + txgbe_e56_bp.o \ + txgbe_aml.o \ + txgbe_aml40.o \ + txgbe_kcompat.o diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h index f144b654b12d20142ae088848a1447884b91cfa9..464db2d3ecfd533c2190c418a3c87fdc18283192 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -86,11 +86,12 @@ DECLARE_STATIC_KEY_FALSE(txgbe_xdp_locking_key); #define TXGBE_DEFAULT_TXD DEFAULT_TXD #define TXGBE_DEFAULT_TX_WORK DEFAULT_TX_WORK #else -#define TXGBE_DEFAULT_TXD 512 +#define TXGBE_DEFAULT_TXD 1024 #define TXGBE_DEFAULT_TX_WORK 256 #endif #define TXGBE_MAX_TXD 8192 #define TXGBE_MIN_TXD 128 +#define TXGBE_MAX_TX_WORK 65535 #if (PAGE_SIZE < 8192) #define TXGBE_DEFAULT_RXD 512 @@ -207,6 +208,8 @@ enum txgbe_tx_flags { #define VMDQ_P(p) (p) #endif +#define TXGBE_VF_MAX_TX_QUEUES 4 + struct vf_data_storage { struct pci_dev *vfdev; u8 IOMEM *b4_addr; @@ -220,6 +223,7 @@ struct vf_data_storage { bool pf_set_mac; u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_qos; + __be16 vlan_proto; u16 min_tx_rate; u16 max_tx_rate; u16 vlan_count; @@ -233,6 +237,8 @@ struct vf_data_storage { u8 trusted; int xcast_mode; unsigned int vf_api; + u16 ft_filter_idx[TXGBE_MAX_RDB_5T_CTL0_FILTERS]; + u16 queue_max_tx_rate[TXGBE_VF_MAX_TX_QUEUES]; }; struct vf_macvlans { @@ -286,10 +292,14 @@ struct txgbe_lro_list { #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #endif +#define DESC_RESERVED 96 +#define DESC_RESERVED_AML 192 + /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ struct txgbe_tx_buffer { union txgbe_tx_desc *next_to_watch; + u32 next_eop; unsigned long time_stamp; union { struct sk_buff *skb; @@ -305,6 +315,7 @@ struct txgbe_tx_buffer { __be16 protocol; DEFINE_DMA_UNMAP_ADDR(dma); DEFINE_DMA_UNMAP_LEN(len); + void *va; u32 tx_flags; }; @@ -451,6 +462,8 @@ struct txgbe_ring { */ u16 next_to_use; u16 next_to_clean; + u16 next_to_free; + u16 rx_offset; #ifdef HAVE_PTP_1588_CLOCK unsigned long last_rx_timestamp; @@ -458,8 +471,11 @@ struct txgbe_ring { #endif u16 rx_buf_len; union { -#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT - u16 next_to_alloc; +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIs + union { + u16 next_to_alloc; + u16 next_rs_idx; + }; #endif struct { u8 atr_sample_rate; @@ -495,6 +511,8 @@ struct txgbe_ring { #endif #endif #endif + dma_addr_t headwb_dma; + u32 *headwb_mem; } ____cacheline_internodealigned_in_smp; enum txgbe_ring_f_enum { @@ -551,6 +569,7 @@ struct txgbe_ring_feature { #if (PAGE_SIZE < 8192) +#define TXGBE_MAX_2K_FRAME_BUILD_SKB (TXGBE_RXBUFFER_1536 - NET_IP_ALIGN) #define TXGBE_2K_TOO_SMALL_WITH_PADDING \ ((NET_SKB_PAD + TXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(TXGBE_RXBUFFER_2K)) @@ -601,14 +620,11 @@ static inline unsigned int txgbe_rx_bufsz(struct txgbe_ring __maybe_unused *ring #if MAX_SKB_FRAGS < 8 return ALIGN(TXGBE_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024); #else -#if IS_ENABLED(CONFIG_FCOE) - if (test_bit(__TXGBE_RX_FCOE, &ring->state)) - return (PAGE_SIZE < 8192) ? TXGBE_RXBUFFER_4K : - TXGBE_RXBUFFER_3K; -#endif -#ifdef HAVE_XDP_SUPPORT if (test_bit(__TXGBE_RX_3K_BUFFER, &ring->state)) return TXGBE_RXBUFFER_3K; +#if (PAGE_SIZE < 8192) + if (ring_uses_build_skb(ring)) + return TXGBE_MAX_2K_FRAME_BUILD_SKB; #endif return TXGBE_RXBUFFER_2K; #endif @@ -616,10 +632,6 @@ static inline unsigned int txgbe_rx_bufsz(struct txgbe_ring __maybe_unused *ring static inline unsigned int txgbe_rx_pg_order(struct txgbe_ring __maybe_unused *ring) { -#if IS_ENABLED(CONFIG_FCOE) - if (test_bit(__TXGBE_RX_FCOE, &ring->state)) - return (PAGE_SIZE < 8192) ? 1 : 0; -#endif #if (PAGE_SIZE < 8192) if (test_bit(__TXGBE_RX_3K_BUFFER, &ring->state)) return 1; @@ -630,16 +642,14 @@ static inline unsigned int txgbe_rx_pg_order(struct txgbe_ring __maybe_unused *r static inline unsigned int txgbe_rx_offset(struct txgbe_ring *rx_ring) { - if (rx_ring->xdp_prog) - return TXGBE_SKB_PAD; - else - return 0; + return ring_uses_build_skb(rx_ring) ? TXGBE_SKB_PAD : 0; } #endif struct txgbe_ring_container { struct txgbe_ring *ring; /* pointer to linked list of rings */ + unsigned long next_update; /* jiffies value of last update */ unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_packets; /* total packets processed this int */ u16 work_limit; /* total work allowed per interrupt */ @@ -797,6 +807,14 @@ struct hwmon_buff { #define TXGBE_16K_ITR 248 #define TXGBE_12K_ITR 336 +#define TXGBE_ITR_ADAPTIVE_MIN_INC 2 +#define TXGBE_ITR_ADAPTIVE_MIN_USECS 10 +#define TXGBE_ITR_ADAPTIVE_MAX_USECS 84 +#define TXGBE_ITR_ADAPTIVE_LATENCY 0x80 +#define TXGBE_ITR_ADAPTIVE_BULK 0x00 +#define TXGBE_ITR_ADAPTIVE_MASK_USECS (TXGBE_ITR_ADAPTIVE_LATENCY - \ + TXGBE_ITR_ADAPTIVE_MIN_INC) + /* txgbe_test_staterr - tests bits in Rx descriptor status and error fields */ static inline __le32 txgbe_test_staterr(union txgbe_rx_desc *rx_desc, const u32 stat_err_bits) @@ -931,6 +949,7 @@ struct txgbe_therm_proc_data { #define TXGBE_FLAG2_RSS_FIELD_IPV6_UDP (1U << 10) #define TXGBE_FLAG2_RSS_ENABLED (1U << 12) #define TXGBE_FLAG2_PTP_PPS_ENABLED (1U << 11) + #define TXGBE_FLAG2_EEE_CAPABLE (1U << 14) #define TXGBE_FLAG2_EEE_ENABLED (1U << 15) #define TXGBE_FLAG2_VXLAN_REREG_NEEDED (1U << 16) @@ -947,10 +966,19 @@ struct txgbe_therm_proc_data { #define TXGBE_FLAG2_KR_PRO_DOWN (1U << 27) #define TXGBE_FLAG2_KR_PRO_REINIT (1U << 28) #define TXGBE_FLAG2_ECC_ERR_RESET (1U << 29) +#define TXGBE_FLAG2_RX_LEGACY (1U << 30) #define TXGBE_FLAG2_PCIE_NEED_RECOVER (1U << 31) +/* amlite: new SW-FW mbox */ +//#define TXGBE_FLAG2_SWFW_MBOX_REPLY (1U << 30) +#define TXGBE_FLAG2_SERVICE_RUNNING (1U << 13) +/* amlite: dma reset */ +#define TXGBE_FLAG2_DMA_RESET_REQUESTED (1U << 2) +#define TXGBE_FLAG2_PCIE_NEED_Q_RESET (1U << 30) +#define TXGBE_FLAG3_PHY_EVENT (1U << 0) +#define TXGBE_FLAG3_TEMP_SENSOR_INPROGRESS (1U << 1) #define TXGBE_SET_FLAG(_input, _flag, _result) \ ((_flag <= _result) ? \ @@ -964,6 +992,11 @@ enum txgbe_isb_idx { TXGBE_ISB_VEC1, TXGBE_ISB_MAX }; +#define TXGBE_PHY_FEC_RS (1U) +#define TXGBE_PHY_FEC_BASER (1U << 1) +#define TXGBE_PHY_FEC_OFF (1U << 2) +#define TXGBE_PHY_FEC_AUTO (TXGBE_PHY_FEC_OFF | TXGBE_PHY_FEC_BASER |\ + TXGBE_PHY_FEC_RS) /* board specific private data structure */ struct txgbe_adapter { @@ -981,22 +1014,29 @@ struct txgbe_adapter { struct pci_dev *pdev; unsigned long state; - + u32 bp_link_mode; + u32 curbp_link_mode; /* Some features need tri-state capability, * thus the additional *_CAPABLE flags. */ u32 flags; u32 flags2; + u32 flags3; + u8 tx_unidir_mode; u8 an73_mode; u8 backplane_an; u8 an73; - u8 an37; + u8 autoneg; u16 ffe_main; u16 ffe_pre; u16 ffe_post; u8 ffe_set; + u16 fec_mode; u8 backplane_mode; u8 backplane_auto; + struct phytxeq aml_txeq; + bool an_done; + u32 fsm; bool cloud_mode; @@ -1091,13 +1131,16 @@ struct txgbe_adapter { unsigned int rx_ring_count; u32 link_speed; + u32 speed; bool link_up; unsigned long sfp_poll_time; unsigned long link_check_timeout; + struct mutex e56_lock; struct timer_list service_timer; struct work_struct service_task; struct work_struct sfp_sta_task; + struct work_struct temp_task; #ifdef POLL_LINK_STATUS struct timer_list link_check_timer; #endif @@ -1109,6 +1152,9 @@ struct txgbe_adapter { u32 atr_sample_rate; spinlock_t fdir_perfect_lock; + struct txgbe_etype_filter_info etype_filter_info; + struct txgbe_5tuple_filter_info ft_filter_info; + #if IS_ENABLED(CONFIG_FCOE) struct txgbe_fcoe fcoe; #endif /* CONFIG_FCOE */ @@ -1116,14 +1162,16 @@ struct txgbe_adapter { u32 wol; u16 bd_number; - #ifdef HAVE_BRIDGE_ATTRIBS u16 bridge_mode; #endif - + u8 fec_link_mode; + u8 cur_fec_link; + bool link_valid; + u32 etrack_id; char eeprom_id[32]; - char fw_version[32]; - u16 eeprom_cap; + char fl_version[16]; + char fw_version[64]; bool netdev_registered; u32 interrupt_event; #ifdef HAVE_ETHTOOL_SET_PHYS_ID @@ -1147,11 +1195,17 @@ struct txgbe_adapter { u32 tx_hwtstamp_skipped; u32 rx_hwtstamp_cleared; void (*ptp_setup_sdp) (struct txgbe_adapter *); + u64 pps_edge_start; + u64 pps_edge_end; + u64 sec_to_cc; + u8 pps_enabled; #endif /* HAVE_PTP_1588_CLOCK */ DECLARE_BITMAP(active_vfs, TXGBE_MAX_VF_FUNCTIONS); unsigned int num_vfs; + unsigned int max_vfs; struct vf_data_storage *vfinfo; + int vf_rate_link_speed; struct vf_macvlans vf_mvs; struct vf_macvlans *mv_list; #ifdef CONFIG_PCI_IOV @@ -1206,6 +1260,7 @@ struct txgbe_adapter { u64 eth_priv_flags; #define TXGBE_ETH_PRIV_FLAG_LLDP BIT(0) +#define TXGBE_ETH_PRIV_FLAG_LEGACY_RX BIT(1) #ifdef HAVE_AF_XDP_ZC_SUPPORT /* AF_XDP zero-copy */ @@ -1218,6 +1273,21 @@ struct txgbe_adapter { u16 num_xsk_pools; #endif bool cmplt_to_dis; + u8 i2c_eeprom[512]; + u32 eeprom_len; + u32 eeprom_type; + + /* amlite: new SW-FW mbox */ +/* u32 swfw_mbox_buf[64]; */ + u8 swfw_index; + u8 desc_reserved; + + int amlite_temp; + + int vlan_rate_link_speed; + DECLARE_BITMAP(limited_vlans, 4096); + int active_vlan_limited; + int queue_rate_limit[64]; // From back to front }; static inline u32 txgbe_misc_isb(struct txgbe_adapter *adapter, @@ -1245,7 +1315,7 @@ struct txgbe_fdir_filter { struct hlist_node fdir_node; union txgbe_atr_input filter; u16 sw_idx; - u16 action; + u64 action; }; enum txgbe_state_t { @@ -1262,6 +1332,7 @@ enum txgbe_state_t { __TXGBE_PTP_RUNNING, __TXGBE_PTP_TX_IN_PROGRESS, #endif + __TXGBE_SWFW_BUSY, }; struct txgbe_cb { @@ -1317,6 +1388,7 @@ void txgbe_assign_netdev_ops(struct net_device *netdev); extern char txgbe_driver_name[]; extern const char txgbe_driver_version[]; +void txgbe_service_event_schedule(struct txgbe_adapter *adapter); void txgbe_irq_disable(struct txgbe_adapter *adapter); void txgbe_irq_enable(struct txgbe_adapter *adapter, bool queues, bool flush); int txgbe_open(struct net_device *netdev); @@ -1345,7 +1417,7 @@ netdev_tx_t txgbe_xmit_frame_ring(struct sk_buff *, struct txgbe_ring *); void txgbe_unmap_and_free_tx_resource(struct txgbe_ring *, struct txgbe_tx_buffer *); -void txgbe_alloc_rx_buffers(struct txgbe_ring *, u16); +bool txgbe_alloc_rx_buffers(struct txgbe_ring *rx_ring, u16 cleaned_count); void txgbe_configure_rscctl(struct txgbe_adapter *adapter, struct txgbe_ring *); void txgbe_clear_rscctl(struct txgbe_adapter *adapter, @@ -1410,6 +1482,7 @@ void txgbe_dbg_init(void); void txgbe_dbg_exit(void); #endif /* HAVE_TXGBE_DEBUG_FS */ void txgbe_dump(struct txgbe_adapter *adapter); +void txgbe_setup_reta(struct txgbe_adapter *adapter); static inline struct netdev_queue *txring_txq(const struct txgbe_ring *ring) { @@ -1428,9 +1501,9 @@ int txgbe_get_settings(struct net_device *netdev, int txgbe_write_uc_addr_list(struct net_device *netdev, int pool); void txgbe_full_sync_mac_table(struct txgbe_adapter *adapter); int txgbe_add_mac_filter(struct txgbe_adapter *adapter, - u8 *addr, u16 pool); + const u8 *addr, u16 pool); int txgbe_del_mac_filter(struct txgbe_adapter *adapter, - u8 *addr, u16 pool); + const u8 *addr, u16 pool); int txgbe_available_rars(struct txgbe_adapter *adapter); #ifndef HAVE_VLAN_RX_REGISTER void txgbe_vlan_mode(struct net_device *, u32); @@ -1457,6 +1530,7 @@ void txgbe_set_rx_drop_en(struct txgbe_adapter *adapter); u32 txgbe_rss_indir_tbl_entries(struct txgbe_adapter *adapter); void txgbe_store_reta(struct txgbe_adapter *adapter); +void txgbe_store_vfreta(struct txgbe_adapter *adapter); int txgbe_setup_isb_resources(struct txgbe_adapter *adapter); void txgbe_free_isb_resources(struct txgbe_adapter *adapter); @@ -1465,6 +1539,14 @@ void txgbe_configure_isb(struct txgbe_adapter *adapter); void txgbe_clean_tx_ring(struct txgbe_ring *tx_ring); void txgbe_clean_rx_ring(struct txgbe_ring *rx_ring); u32 txgbe_tx_cmd_type(u32 tx_flags); +void txgbe_free_headwb_resources(struct txgbe_ring *ring); +u16 txgbe_frac_to_bi(u16 frac, u16 denom, int max_bits); +int txgbe_link_mbps(struct txgbe_adapter *adapter); + +int txgbe_find_nth_limited_vlan(struct txgbe_adapter *adapter, int vlan); +void txgbe_del_vlan_limit(struct txgbe_adapter *adapter, int vlan); +void txgbe_set_vlan_limit(struct txgbe_adapter *adapter, int vlan, int rate_limit); +void txgbe_check_vlan_rate_limit(struct txgbe_adapter *adapter); /** * interrupt masking operations. each bit in PX_ICn correspond to a interrupt. diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c new file mode 100644 index 0000000000000000000000000000000000000000..a1a9751db0581cefa41f6a42eeaa9d6cba5185de --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c @@ -0,0 +1,463 @@ +#include "txgbe_type.h" +#include "txgbe_hw.h" +#include "txgbe_aml.h" +#include "txgbe_e56.h" +#include "txgbe_e56_bp.h" +#include "txgbe_phy.h" + +#include "txgbe.h" + +/** + * txgbe_get_media_type_aml - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +static enum txgbe_media_type txgbe_get_media_type_aml(struct txgbe_hw *hw) +{ + u8 device_type = hw->subsystem_device_id & 0xF0; + enum txgbe_media_type media_type; + + switch (device_type) { + case TXGBE_ID_KR_KX_KX4: + media_type = txgbe_media_type_backplane; + break; + case TXGBE_ID_SFP: + media_type = txgbe_media_type_fiber; + break; + default: + media_type = txgbe_media_type_unknown; + break; + } + + return media_type; +} + +/** + * txgbe_setup_mac_link_aml - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 txgbe_setup_mac_link_aml(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + u32 link_capabilities = TXGBE_LINK_SPEED_UNKNOWN; + u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; + struct txgbe_adapter *adapter = hw->back; + bool link_up = false; + bool autoneg = false; + s32 ret_status = 0; + int i = 0; + s32 status = 0; + u32 value = 0; + + /* Check to see if speed passed in is supported. */ + status = TCALL(hw, mac.ops.get_link_capabilities, + &link_capabilities, &autoneg); + if (status) + goto out; + speed &= link_capabilities; + + if (speed == TXGBE_LINK_SPEED_UNKNOWN) { + status = TXGBE_ERR_LINK_SETUP; + goto out; + } + + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1 || + txgbe_is_backplane(hw)) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (!adapter->backplane_an) { + if ((link_speed == speed) && link_up) + goto out; + } else { + if (link_up && adapter->an_done) + goto out; + } + mutex_lock(&adapter->e56_lock); + txgbe_e56_set_phylinkmode(adapter, 25, hw->bypassCtle); + mutex_unlock(&adapter->e56_lock); + return 0; + } + + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & (TXGBE_SFP1_MOD_ABS_LS | TXGBE_SFP1_RX_LOS_LS)) + goto out; + + for (i = 0; i < 4; i++) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (link_up) + break; + msleep(250); + } + + if (speed == TXGBE_LINK_SPEED_25GB_FULL) + adapter->cur_fec_link = txgbe_get_cur_fec_mode(hw); + + if ((link_speed == speed) && link_up && + !(speed == TXGBE_LINK_SPEED_25GB_FULL && + !(adapter->fec_link_mode & adapter->cur_fec_link))) + goto out; + + if (speed == TXGBE_LINK_SPEED_25GB_FULL && + link_speed == TXGBE_LINK_SPEED_25GB_FULL) { + txgbe_e56_fec_mode_polling(hw, &link_up); + + if (link_up) + goto out; + } + + mutex_lock(&adapter->e56_lock); + ret_status = txgbe_set_link_to_amlite(hw, speed); + mutex_unlock(&adapter->e56_lock); + + if (ret_status == TXGBE_ERR_PHY_INIT_NOT_DONE) + goto out; + + if (ret_status == TXGBE_ERR_TIMEOUT) { + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + goto out; + } + + if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + txgbe_e56_fec_mode_polling(hw, &link_up); + } else { + for (i = 0; i < 4; i++) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (link_up) + goto out; + msleep(250); + } + } + +out: + return status; +} + +/** + * txgbe_get_link_capabilities_aml - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +static s32 txgbe_get_link_capabilities_aml(struct txgbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + struct txgbe_adapter *adapter = hw->back; + s32 status = 0; + + if (hw->phy.multispeed_fiber) { + *speed = TXGBE_LINK_SPEED_10GB_FULL | + TXGBE_LINK_SPEED_25GB_FULL; + *autoneg = true; + } else if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) { + + if (hw->phy.fiber_suppport_speed == + TXGBE_LINK_SPEED_10GB_FULL && AUTO <= 1) { + adapter->backplane_an = false; + *autoneg = false; + } else { + *autoneg = true; + } + *speed = hw->phy.fiber_suppport_speed; + } else if (hw->phy.sfp_type == txgbe_sfp_type_25g_sr_core0 || + hw->phy.sfp_type == txgbe_sfp_type_25g_sr_core1 || + hw->phy.sfp_type == txgbe_sfp_type_25g_lr_core0 || + hw->phy.sfp_type == txgbe_sfp_type_25g_lr_core1) { + *speed = TXGBE_LINK_SPEED_25GB_FULL; + *autoneg = false; + } else if (hw->phy.sfp_type == txgbe_sfp_type_25g_aoc_core0 || + hw->phy.sfp_type == txgbe_sfp_type_25g_aoc_core1) { + *speed = TXGBE_LINK_SPEED_25GB_FULL; + *autoneg = false; + } else { + /* SFP */ + if (hw->phy.sfp_type == txgbe_sfp_type_not_present) + *speed = TXGBE_LINK_SPEED_25GB_FULL; + else + *speed = TXGBE_LINK_SPEED_10GB_FULL; + *autoneg = true; + } + + return status; +} + +/** + * txgbe_check_mac_link_aml - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +static s32 txgbe_check_mac_link_aml(struct txgbe_hw *hw, u32 *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u32 links_reg = 0; + u32 i; + + if (link_up_wait_to_complete) { + for (i = 0; i < TXGBE_LINK_UP_TIME; i++) { + links_reg = rd32(hw, + TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + } + } else { + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + *link_up = true; + } else { + *link_up = false; + } + } + + if ((hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core0) || + (hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core1)) { + *link_up = hw->f2c_mod_status; + + if (*link_up) { + /* recover led configure when link up */ + wr32(hw, TXGBE_CFG_LED_CTL, 0); + } else { + /* over write led when link down */ + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_UP | TXGBE_AMLITE_LED_LINK_25G | + TXGBE_AMLITE_LED_LINK_10G | TXGBE_AMLITE_LED_LINK_ACTIVE); + } + } + + if (*link_up) { + if ((links_reg & TXGBE_CFG_PORT_ST_AML_LINK_25G) == + TXGBE_CFG_PORT_ST_AML_LINK_25G) + *speed = TXGBE_LINK_SPEED_25GB_FULL; + else if ((links_reg & TXGBE_CFG_PORT_ST_AML_LINK_10G) == + TXGBE_CFG_PORT_ST_AML_LINK_10G) + *speed = TXGBE_LINK_SPEED_10GB_FULL; + } else { + *speed = TXGBE_LINK_SPEED_UNKNOWN; + } + + return 0; +} + +/** + * txgbe_setup_mac_link_multispeed_fiber_aml - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the MAC and/or PHY register and restarts link. + **/ +static s32 txgbe_setup_mac_link_multispeed_fiber_aml(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + u32 highest_link_speed = TXGBE_LINK_SPEED_UNKNOWN; + u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; + struct txgbe_adapter *adapter = hw->back; + bool autoneg, link_up = false; + u32 speedcnt = 0; + s32 status = 0; + + /* Mask off requested but non-supported speeds */ + status = TCALL(hw, mac.ops.get_link_capabilities, + &link_speed, &autoneg); + if (status != 0) + return status; + + speed &= link_speed; + + /* Try each speed one by one, highest priority first. We do this in + * software because 10Gb fiber doesn't support speed autonegotiation. + */ + if (speed & TXGBE_LINK_SPEED_25GB_FULL) { + speedcnt++; + highest_link_speed = TXGBE_LINK_SPEED_25GB_FULL; + + /* If we already have link at this speed, just jump out */ + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + + adapter->cur_fec_link = txgbe_get_cur_fec_mode(hw); + + if ((link_speed == TXGBE_LINK_SPEED_25GB_FULL) && link_up && + adapter->fec_link_mode & adapter->cur_fec_link) + goto out; + + /* Allow module to change analog characteristics (10G->25G) */ + msec_delay(40); + + status = TCALL(hw, mac.ops.setup_mac_link, + TXGBE_LINK_SPEED_25GB_FULL, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /*aml wait link in setup,no need to repeatly wait*/ + /* If we have link, just jump out */ + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + + if (link_up) + goto out; + } + + if (speed & TXGBE_LINK_SPEED_10GB_FULL) { + speedcnt++; + if (highest_link_speed == TXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = TXGBE_LINK_SPEED_10GB_FULL; + + /* If we already have link at this speed, just jump out */ + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + + if ((link_speed == TXGBE_LINK_SPEED_10GB_FULL) && link_up) + goto out; + + /* Allow module to change analog characteristics (25G->10G) */ + msec_delay(40); + + status = TCALL(hw, mac.ops.setup_mac_link, + TXGBE_LINK_SPEED_10GB_FULL, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /*aml wait link in setup,no need to repeatly wait*/ + /* If we have link, just jump out */ + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + + if (link_up) { + adapter->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; + goto out; + } + } + + /* We didn't get link. Configure back to the highest speed we tried, + * (if there was more than one). We call ourselves back with just the + * single highest speed that the user requested. + */ + if (speedcnt > 1) + status = txgbe_setup_mac_link_multispeed_fiber_aml(hw, + highest_link_speed, + autoneg_wait_to_complete); + +out: + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & TXGBE_LINK_SPEED_25GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_25GB_FULL; + + if (speed & TXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_10GB_FULL; + + return status; +} + + +static void txgbe_init_mac_link_ops_aml(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + + if (mac->ops.get_media_type(hw) == txgbe_media_type_fiber) { + mac->ops.disable_tx_laser = + txgbe_disable_tx_laser_multispeed_fiber; + mac->ops.enable_tx_laser = + txgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = txgbe_flap_tx_laser_multispeed_fiber; + + if (hw->phy.multispeed_fiber) { + /* Set up dual speed SFP+ support */ + mac->ops.setup_link = txgbe_setup_mac_link_multispeed_fiber_aml; + mac->ops.setup_mac_link = txgbe_setup_mac_link_aml; + mac->ops.set_rate_select_speed = + txgbe_set_hard_rate_select_speed; + } else { + mac->ops.setup_link = txgbe_setup_mac_link_aml; + mac->ops.set_rate_select_speed = + txgbe_set_hard_rate_select_speed; + } + } +} + +static s32 txgbe_setup_sfp_modules_aml(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + + DEBUGFUNC("txgbe_setup_sfp_modules_aml"); + + if (hw->phy.sfp_type != txgbe_sfp_type_unknown) { + txgbe_init_mac_link_ops_aml(hw); + + hw->phy.ops.reset = NULL; + } + + return ret_val; +} + +/** + * txgbe_init_phy_ops - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +static s32 txgbe_init_phy_ops_aml(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + + txgbe_init_i2c(hw); + wr32(hw, TXGBE_MAC_MDIO_CLAUSE_22_PORT, + TXGBE_MAC_MDIO_CLAUSE_ALL_PRTCL22); + + /* Identify the PHY or SFP module */ + ret_val = TCALL(hw, phy.ops.identify); + if (ret_val == TXGBE_ERR_SFP_NOT_SUPPORTED) + goto init_phy_ops_out; + + /* Setup function pointers based on detected SFP module and speeds */ + txgbe_init_mac_link_ops_aml(hw); + if (hw->phy.sfp_type != txgbe_sfp_type_unknown) + hw->phy.ops.reset = NULL; + +init_phy_ops_out: + return ret_val; +} + +s32 txgbe_init_ops_aml(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + struct txgbe_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + ret_val = txgbe_init_ops_generic(hw); + + /* PHY */ + phy->ops.init = txgbe_init_phy_ops_aml; + + /* MAC */ + mac->ops.get_media_type = txgbe_get_media_type_aml; + mac->ops.setup_sfp = txgbe_setup_sfp_modules_aml; + + /* LINK */ + mac->ops.get_link_capabilities = txgbe_get_link_capabilities_aml; + mac->ops.setup_link = txgbe_setup_mac_link_aml; + mac->ops.check_link = txgbe_check_mac_link_aml; + + return ret_val; +} + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h new file mode 100644 index 0000000000000000000000000000000000000000..4a65d60e204a85ec3eebd5a06d129c7de2df5110 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h @@ -0,0 +1,7 @@ +#ifndef _TXGBE_AML_H_ +#define _TXGBE_AML_H_ + +s32 txgbe_init_ops_aml(struct txgbe_hw *hw); + +#endif /* _TXGBE_AML_H_ */ + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.c new file mode 100644 index 0000000000000000000000000000000000000000..996c565e95e1ab3a6357754c033c66b7dcfe433e --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.c @@ -0,0 +1,293 @@ +#include "txgbe_type.h" +#include "txgbe_hw.h" +#include "txgbe_aml40.h" +#include "txgbe_e56.h" +#include "txgbe_e56_bp.h" +#include "txgbe_phy.h" + +#include "txgbe.h" + +/** + * txgbe_get_media_type_aml40 - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum txgbe_media_type txgbe_get_media_type_aml40(struct txgbe_hw *hw) +{ + u8 device_type = hw->subsystem_device_id & 0xF0; + enum txgbe_media_type media_type; + + switch (device_type) { + case TXGBE_ID_KR_KX_KX4: + media_type = txgbe_media_type_backplane; + break; + case TXGBE_ID_SFP: + media_type = txgbe_media_type_fiber_qsfp; + break; + default: + media_type = txgbe_media_type_unknown; + break; + } + + return media_type; +} + +/** + * txgbe_setup_mac_link_aml - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 txgbe_setup_mac_link_aml40(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + u32 link_capabilities = TXGBE_LINK_SPEED_UNKNOWN; + u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; + struct txgbe_adapter *adapter = hw->back; + bool link_up = false; + bool autoneg = false; + s32 ret_status = 0; + int i = 0; + s32 status = 0; + + /* Check to see if speed passed in is supported. */ + status = TCALL(hw, mac.ops.get_link_capabilities, + &link_capabilities, &autoneg); + if (status) + goto out; + + speed &= link_capabilities; + + if (speed == TXGBE_LINK_SPEED_UNKNOWN) { + status = TXGBE_ERR_LINK_SETUP; + goto out; + } + + if (hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || + txgbe_is_backplane(hw)) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (!adapter->backplane_an) { + if ((link_speed == speed) && link_up) + goto out; + } else { + if (link_up && adapter->an_done) + goto out; + } + mutex_lock(&adapter->e56_lock); + txgbe_e56_set_phylinkmode(adapter, 40, hw->bypassCtle); + mutex_unlock(&adapter->e56_lock); + goto out; + } + + for (i = 0; i < 4; i++) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (link_up) + break; + msleep(250); + } + + if ((link_speed == speed) && link_up) + goto out; + + mutex_lock(&adapter->e56_lock); + ret_status = txgbe_set_link_to_amlite(hw, speed); + mutex_unlock(&adapter->e56_lock); + + if (ret_status == TXGBE_ERR_TIMEOUT) + adapter->link_valid = false; + + for (i = 0; i < 4; i++) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (link_up) + goto out; + msleep(250); + } + + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + +out: + return status; +} + +/** + * txgbe_get_link_capabilities_aml40 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +static s32 txgbe_get_link_capabilities_aml40(struct txgbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + s32 status = 0; + + if (hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1) { + *autoneg = true; + *speed = TXGBE_LINK_SPEED_40GB_FULL; + } else if (txgbe_is_backplane(hw)) { + *speed = TXGBE_LINK_SPEED_40GB_FULL; + *autoneg = true; + } else { + *speed = TXGBE_LINK_SPEED_40GB_FULL; + *autoneg = true; + } + + return status; +} + +/** + * txgbe_check_mac_link_aml40 - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +static s32 txgbe_check_mac_link_aml40(struct txgbe_hw *hw, u32 *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + struct txgbe_adapter *adapter = hw->back; + u32 links_reg = 0; + u32 i; + + if (link_up_wait_to_complete) { + for (i = 0; i < TXGBE_LINK_UP_TIME; i++) { + links_reg = rd32(hw, + TXGBE_CFG_PORT_ST); + + if (!adapter->link_valid) { + *link_up = false; + + msleep(100); + continue; + } + + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + } + } else { + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) + *link_up = true; + else + *link_up = false; + } + + if (!adapter->link_valid) + *link_up = false; + + if (*link_up) { + if ((links_reg & TXGBE_CFG_PORT_ST_AML_LINK_40G) == + TXGBE_CFG_PORT_ST_AML_LINK_40G) + *speed = TXGBE_LINK_SPEED_40GB_FULL; + } else { + *speed = TXGBE_LINK_SPEED_UNKNOWN; + } + + if (txgbe_is_backplane(hw)) { + if (!adapter->an_done) { + *link_up = false; + *speed = TXGBE_LINK_SPEED_UNKNOWN; + } + } + + return 0; +} + +static void txgbe_init_mac_link_ops_aml40(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + + mac->ops.disable_tx_laser = + txgbe_disable_tx_laser_multispeed_fiber; + mac->ops.enable_tx_laser = + txgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = txgbe_flap_tx_laser_multispeed_fiber; + + mac->ops.setup_link = txgbe_setup_mac_link_aml40; + mac->ops.set_rate_select_speed = txgbe_set_hard_rate_select_speed; +} + +static s32 txgbe_setup_sfp_modules_aml40(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + + DEBUGFUNC("txgbe_setup_sfp_modules_aml40"); + + if (hw->phy.sfp_type != txgbe_sfp_type_unknown) { + txgbe_init_mac_link_ops_aml40(hw); + + hw->phy.ops.reset = NULL; + } + + return ret_val; +} + +/** + * txgbe_init_phy_ops - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +s32 txgbe_init_phy_ops_aml40(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + + txgbe_init_i2c(hw); + wr32(hw, TXGBE_MAC_MDIO_CLAUSE_22_PORT, + TXGBE_MAC_MDIO_CLAUSE_ALL_PRTCL22); + + /* Identify the PHY or SFP module */ + ret_val = TCALL(hw, phy.ops.identify); + if (ret_val == TXGBE_ERR_SFP_NOT_SUPPORTED) + goto init_phy_ops_out; + + /* Setup function pointers based on detected SFP module and speeds */ + txgbe_init_mac_link_ops_aml40(hw); + if (hw->phy.sfp_type != txgbe_sfp_type_unknown) + hw->phy.ops.reset = NULL; + +init_phy_ops_out: + return ret_val; +} + +s32 txgbe_init_ops_aml40(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + struct txgbe_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + ret_val = txgbe_init_ops_generic(hw); + + /* PHY */ + phy->ops.init = txgbe_init_phy_ops_aml40; + + /* MAC */ + mac->ops.get_media_type = txgbe_get_media_type_aml40; + mac->ops.setup_sfp = txgbe_setup_sfp_modules_aml40; + + /* LINK */ + mac->ops.check_link = txgbe_check_mac_link_aml40; + mac->ops.setup_link = txgbe_setup_mac_link_aml40; + mac->ops.get_link_capabilities = txgbe_get_link_capabilities_aml40; + + return ret_val; +} + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.h new file mode 100644 index 0000000000000000000000000000000000000000..b264ee0db1cbe85bfe6c6a41638fead386bdd72b --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.h @@ -0,0 +1,8 @@ +#ifndef _TXGBE_AML40_H_ +#define _TXGBE_AML40_H_ + +enum txgbe_media_type txgbe_get_media_type_aml40(struct txgbe_hw *hw); +s32 txgbe_init_ops_aml40(struct txgbe_hw *hw); +s32 txgbe_init_phy_ops_aml40(struct txgbe_hw *hw); +#endif /* _TXGBE_AML40_H_ */ + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.c index f12309c769703f539b32a47aaf0f2bdee6b46294..9618c47e553434ad1d7a469a6083e1094e9c68e4 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.c @@ -14,8 +14,22 @@ int txgbe_bp_mode_setting(struct txgbe_adapter *adapter) struct txgbe_hw *hw = &adapter->hw; /*default to open an73*/ - adapter->backplane_an = AUTO?1:0; - adapter->an37 = AUTO?1:0; + if ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_KR_KX_KX4) + adapter->backplane_an = AUTO ? 1 : 0; + + switch (hw->mac.type) { + case txgbe_mac_sp: + if (AUTO > 1) + adapter->backplane_an = AUTO ? 1 : 0; + break; + case txgbe_mac_aml40: + case txgbe_mac_aml: + default: + adapter->backplane_an = AUTO ? 1 : 0; + break; + } + + adapter->autoneg = AUTO ? 1 : 0; switch (adapter->backplane_mode) { case TXGBE_BP_M_KR: hw->subsystem_device_id = TXGBE_ID_WX1820_KR_KX_KX4; @@ -35,13 +49,13 @@ int txgbe_bp_mode_setting(struct txgbe_adapter *adapter) if (adapter->backplane_auto == TXGBE_BP_M_AUTO) { adapter->backplane_an = 1; - adapter->an37 = 1; + adapter->autoneg = 1; } else if (adapter->backplane_auto == TXGBE_BP_M_NAUTO) { adapter->backplane_an = 0; - adapter->an37 = 0; + adapter->autoneg = 0; } - if (adapter->ffe_set == 0) + if ((adapter->ffe_set == 0) && (KR_SET == 0)) return 0; if (KR_SET == 1) { @@ -66,27 +80,23 @@ int txgbe_bp_mode_setting(struct txgbe_adapter *adapter) void txgbe_bp_watchdog_event(struct txgbe_adapter *adapter) { - u32 value = 0; struct txgbe_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; + u32 value = 0; + int ret = 0; /* only continue if link is down */ - if (netif_carrier_ok(netdev)) + if (netif_carrier_ok(adapter->netdev)) return; - if (KR_POLLING == 1) { + if (adapter->flags2 & TXGBE_FLAG2_KR_TRAINING) { value = txgbe_rd32_epcs(hw, 0x78002); - value = value & 0x4; - if (value == 0x4) { - e_dev_info("Enter training\n"); - handle_bkp_an73_flow(0, adapter); - } - } else { - if(adapter->flags2 & TXGBE_FLAG2_KR_TRAINING){ - e_dev_info("Enter training\n"); - handle_bkp_an73_flow(0, adapter); - adapter->flags2 &= ~TXGBE_FLAG2_KR_TRAINING; + if ((value & BIT(2)) == BIT(2)) { + e_info(hw, "Enter training\n"); + ret = handle_bkp_an73_flow(0, adapter); + if (ret) + txgbe_set_link_to_kr(hw, 1); } + adapter->flags2 &= ~TXGBE_FLAG2_KR_TRAINING; } } @@ -98,30 +108,24 @@ void txgbe_bp_down_event(struct txgbe_adapter *adapter) if (adapter->backplane_an == 0) return; - switch (KR_RESTART_T_MODE) { - case 1: - txgbe_wr32_epcs(hw, TXGBE_VR_AN_KR_MODE_CL, 0x0000); - txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0000); - txgbe_wr32_epcs(hw, 0x78001, 0x0000); + val = txgbe_rd32_epcs(hw, 0x78002); + val1 = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_CTL); + kr_dbg(KR_MODE, "AN INT : %x - AN CTL : %x - PL : %x\n", + val, val1, txgbe_rd32_epcs(hw, 0x70012)); + switch (AN73_TRAINNING_MODE) { + case 0: msleep(1000); - txgbe_set_link_to_kr(hw, 1); - break; - case 2: - txgbe_wr32_epcs(hw, TXGBE_VR_AN_KR_MODE_CL, 0x0000); - txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0000); - txgbe_wr32_epcs(hw, 0x78001, 0x0000); - msleep(1050); - txgbe_wr32_epcs(hw, TXGBE_VR_AN_KR_MODE_CL, 0x0001); - txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3200); - txgbe_wr32_epcs(hw, 0x78001, 0x0007); + if ((val & BIT(2)) == BIT(2)) { + if (!(adapter->flags2 & TXGBE_FLAG2_KR_TRAINING)) + adapter->flags2 |= TXGBE_FLAG2_KR_TRAINING; + } else { + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0); + txgbe_wr32_epcs(hw, 0x78002, 0x0000); + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000); + } break; - default: - if (AN73_TRAINNING_MODE == 1) - msleep(100); - else - msleep(1000); - val = txgbe_rd32_epcs(hw, 0x78002); - val1 = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_CTL); + case 1: + msleep(100); if ((val & BIT(2)) == BIT(2)) { if (!(adapter->flags2 & TXGBE_FLAG2_KR_TRAINING)) adapter->flags2 |= TXGBE_FLAG2_KR_TRAINING; @@ -130,8 +134,8 @@ void txgbe_bp_down_event(struct txgbe_adapter *adapter) txgbe_wr32_epcs(hw, 0x78002, 0x0000); txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000); } - kr_dbg(KR_MODE, "0x78002 : %x - 0x70000 : %x\n", val, val1); - kr_dbg(KR_MODE, "0x70012 : %x\n", txgbe_rd32_epcs(hw, 0x70012)); + break; + default: break; } } @@ -209,6 +213,83 @@ int chk_bkp_an73_ability(bkpan73ability tBkpAn73Ability, bkpan73ability tLpBkpAn return 0; } +static void txgbe_bp_print_page_status(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rdata = 0; + + rdata = txgbe_rd32_epcs(hw, 0x70010); + kr_dbg(KR_MODE, "read 70010 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70011); + kr_dbg(KR_MODE, "read 70011 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70012); + kr_dbg(KR_MODE, "read 70012 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70013); + kr_dbg(KR_MODE, "read 70013 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70014); + kr_dbg(KR_MODE, "read 70014 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70015); + kr_dbg(KR_MODE, "read 70015 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70016); + kr_dbg(KR_MODE, "read 70016 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70017); + kr_dbg(KR_MODE, "read 70017 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70018); + kr_dbg(KR_MODE, "read 70018 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70019); + kr_dbg(KR_MODE, "read 70019 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70020); + kr_dbg(KR_MODE, "read 70020 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70021); + kr_dbg(KR_MODE, "read 70021 data %0x\n", rdata); +} + +static void txgbe_bp_exchange_page(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 an_int, base_page = 0; + int count = 0; + + an_int = txgbe_rd32_epcs(hw, 0x78002); + if (!(an_int & BIT(2))) + return; + /* 500ms timeout */ + for (count = 0; count < 5000; count++) { + kr_dbg(KR_MODE, "-----count----- %d\n", count); + if (an_int & BIT(2)) { + u8 next_page = 0; + u32 rdata, addr; + + txgbe_bp_print_page_status(adapter); + addr = base_page == 0 ? 0x70013 : 0x70019; + rdata = txgbe_rd32_epcs(hw, addr); + if (rdata & BIT(14)) { + if (rdata & BIT(15)) { + /* always set null message */ + txgbe_wr32_epcs(hw, 0x70016, 0x2001); + kr_dbg(KR_MODE, "write 70016 0x%0x\n", + 0x2001); + rdata = txgbe_rd32_epcs(hw, 0x70010); + txgbe_wr32_epcs(hw, 0x70010, + rdata | BIT(15)); + kr_dbg(KR_MODE, "write 70010 0x%0x\n", + rdata); + next_page = 1; + } else { + next_page = 0; + } + base_page = 1; + } + /* clear an pacv int */ + txgbe_wr32_epcs(hw, 0x78002, 0x0000); + kr_dbg(KR_MODE, "write 78002 0x%0x\n", 0x0000); + usec_delay(100); + if (next_page == 0) + return; + } + usec_delay(100); + } +} /*Get Ethernet Backplane AN73 Base Page Ability **byLinkPartner: @@ -235,6 +316,10 @@ int get_bkp_an73_ability(bkpan73ability *pt_bkp_an73_ability, unsigned char byLi pt_bkp_an73_ability->nextPage = (rdata >> 15) & 0x01; kr_dbg(KR_MODE, " Next Page (bit15): %d\n", pt_bkp_an73_ability->nextPage); + /* if have next pages, exchange next pages. */ + if (pt_bkp_an73_ability->nextPage) + txgbe_bp_exchange_page(adapter); + rdata = txgbe_rd32_epcs(hw, 0x70014); kr_dbg(KR_MODE, "SR AN MMD LP Base Page Ability Register 2: 0x%x\n", rdata); pt_bkp_an73_ability->linkAbility = rdata & 0xE0; @@ -308,33 +393,7 @@ static void set_fields( } } -/*Clear Ethernet Backplane AN73 Interrupt status -**- intIndexHi =0, only intIndex bit will be cleared -**- intIndexHi !=0, the [intIndexHi, intIndex] range will be cleared -*/ -int clr_bkp_an73_int(unsigned int intIndex, unsigned int intIndexHi, struct txgbe_adapter * adapter) -{ - struct txgbe_hw *hw = &adapter->hw; - unsigned int rdata, wdata; - int status = 0; - - rdata = txgbe_rd32_epcs(hw, 0x78002); - kr_dbg(KR_MODE, "[Before clear] Read VR AN MMD Interrupt Register: 0x%x\n", rdata); - - wdata = rdata; - if (intIndexHi) - set_fields(&wdata, intIndexHi, intIndex, 0); - else - set_fields(&wdata, intIndex, intIndex, 0); - - txgbe_wr32_epcs(hw, 0x78002, wdata); - rdata = txgbe_rd32_epcs(hw, 0x78002); - kr_dbg(KR_MODE, "[After clear] Read VR AN MMD Interrupt Register: 0x%x\n", rdata); - - return status; -} - -void read_phy_lane_txeq(unsigned short lane, struct txgbe_adapter *adapter) +static void read_phy_lane_txeq(unsigned short lane, struct txgbe_adapter *adapter) { struct txgbe_hw *hw = &adapter->hw; unsigned int addr, rdata; @@ -369,7 +428,7 @@ void read_phy_lane_txeq(unsigned short lane, struct txgbe_adapter *adapter) **- bits[1:0] =2'b11: Enable the CL72 KR training **- bits[1:0] =2'b01: Disable the CL72 KR training */ -int en_cl72_krtr(unsigned int enable, struct txgbe_adapter *adapter) +static int en_cl72_krtr(unsigned int enable, struct txgbe_adapter *adapter) { struct txgbe_hw *hw = &adapter->hw; unsigned int wdata = 0; @@ -409,7 +468,7 @@ int en_cl72_krtr(unsigned int enable, struct txgbe_adapter *adapter) return 0; } -int chk_cl72_krtr_status(struct txgbe_adapter *adapter) +static int chk_cl72_krtr_status(struct txgbe_adapter *adapter) { struct txgbe_hw *hw = &adapter->hw; unsigned int rdata = 0, rdata1; @@ -437,7 +496,6 @@ int chk_cl72_krtr_status(struct txgbe_adapter *adapter) kr_dbg(KR_MODE, " Start-Up Protocol Status (bit2): %d\n", ((rdata >> 2) & 0x01)); kr_dbg(KR_MODE, " Frame Lock (bit1): %d\n", ((rdata >> 1) & 0x01)); kr_dbg(KR_MODE, " Receiver Status (bit0): %d\n", ((rdata >> 0) & 0x01)); - /*If bit3 is set, Training is completed with failure*/ if ((rdata1 >> 3) & 0x01) { kr_dbg(KR_MODE, "Training is completed with failure!!!\n"); @@ -448,7 +506,7 @@ int chk_cl72_krtr_status(struct txgbe_adapter *adapter) /*If bit0 is set, Receiver trained and ready to receive data*/ if ((rdata1 >> 0) & 0x01) { kr_dbg(KR_MODE, "Receiver trained and ready to receive data ^_^\n"); - e_dev_info("Receiver ready.\n"); + e_info(hw, "Receiver ready.\n"); read_phy_lane_txeq(0, adapter); return status; } @@ -459,21 +517,64 @@ int chk_cl72_krtr_status(struct txgbe_adapter *adapter) return status; } +static int txgbe_cl72_trainning(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rdata = 0, rdata1 = 0; + bool lpld_all_rd = false; + int ret = 0; + + if (AN73_TRAINNING_MODE == 1) + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0); + + ret |= en_cl72_krtr(3, adapter); + kr_dbg(KR_MODE, "\nCheck the Clause 72 KR Training status ...\n"); + ret |= chk_cl72_krtr_status(adapter); + + ret = read_poll_timeout(txgbe_rd32_epcs, rdata, (rdata & 0x8000), 1000, + 200000, false, hw, 0x10099); + if (!ret) { + rdata1 = txgbe_rd32_epcs(hw, 0x1009b) & 0x8000; + if (rdata1 == 0x8000) + lpld_all_rd = true; + } + + if (lpld_all_rd) { + rdata = rd32_ephy(hw, 0x100E); + rdata1 = rd32_ephy(hw, 0x100F); + e_dev_info("Lp and Ld all Ready, FFE : %d-%d-%d.\n", + (rdata >> 6) & 0x3F, rdata1 & 0x3F, (rdata1 >> 6) & 0x3F); + if (AN73_TRAINNING_MODE == 1 && hw->dac_sfp == false) + if ((((rdata >> 6) & 0x3F) == 27) && + ((rdata1 & 0x3F) == 8) && + (((rdata1 >> 6) & 0x3F)) == 44) + return -1; + /* clear an pacv int */ + txgbe_wr32_epcs(hw, 0x78002, 0x0000); + ret = read_poll_timeout(txgbe_rd32_epcs, rdata, (rdata & 0x1000), 1000, + 100000, false, hw, 0x30020); + if (!ret) + e_dev_info("INT_AN_INT_CMPLT =1, AN73 Done Success.\n"); + return 0; + } + /* clear an pacv int */ + txgbe_wr32_epcs(hw, 0x78002, 0x0000); + if (AN73_TRAINNING_MODE == 0) + en_cl72_krtr(1, adapter); + + return -1; +} + int handle_bkp_an73_flow(unsigned char bp_link_mode, struct txgbe_adapter *adapter) { bkpan73ability tBkpAn73Ability , tLpBkpAn73Ability ; - u32 rdata = 0, rdata1 = 0, round = 1; struct txgbe_hw *hw = &adapter->hw; - bool lpld_all_rd = false; - unsigned int addr, data; - int status = 0, k; + bool fec_en = false; + u32 fecAbility = 0; + int ret = 0; tBkpAn73Ability.currentLinkMode = bp_link_mode; - if (AN73_TRAINNING_MODE == 1) { - round = 2; - txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0); - } kr_dbg(KR_MODE, "HandleBkpAn73Flow().\n"); kr_dbg(KR_MODE, "---------------------------------\n"); @@ -482,8 +583,6 @@ int handle_bkp_an73_flow(unsigned char bp_link_mode, struct txgbe_adapter *adapt get_bkp_an73_ability(&tBkpAn73Ability, 0, adapter); /*2. Check the AN73 Interrupt Status*/ kr_dbg(KR_MODE, "<2>. Check the AN73 Interrupt Status ...\n"); - /*3.Clear the AN_PG_RCV interrupt*/ - clr_bkp_an73_int(2, 0x0, adapter); /*3.1. Get the link partner AN73 Base Page Ability*/ kr_dbg(KR_MODE, "<3.1>. Get the link partner AN73 Base Page Ability ...\n"); @@ -497,57 +596,20 @@ int handle_bkp_an73_flow(unsigned char bp_link_mode, struct txgbe_adapter *adapt chk_bkp_an73_ability(tBkpAn73Ability, tLpBkpAn73Ability, adapter); /*Check the FEC and KR Training for KR mode*/ - /* FEC handling */ kr_dbg(KR_MODE, "<3.3>. Check the FEC for KR mode ...\n"); - tBkpAn73Ability.fecAbility = 0x3; - tLpBkpAn73Ability.fecAbility = 0x3; - if (((tBkpAn73Ability.fecAbility & tLpBkpAn73Ability.fecAbility) == 0x03) - && (KR_FEC == 1)) { - e_dev_info("Enable KR FEC ...\n"); - //Write 1 to SR_PMA_KR_FEC_CTRL bit0 to enable the FEC - data = 1; - addr = 0x100ab; //SR_PMA_KR_FEC_CTRL - txgbe_wr32_epcs(hw, addr, data); - } else { - e_dev_info("KR FEC is disabled.\n"); - } - kr_dbg(KR_MODE, "\n<3.4>. Check the CL72 KR Training for KR mode ...\n"); + fecAbility = tBkpAn73Ability.fecAbility & tLpBkpAn73Ability.fecAbility; + fec_en = fecAbility >= 0x1 ? TRUE : FALSE; + adapter->cur_fec_link = fec_en ? + TXGBE_PHY_FEC_BASER : TXGBE_PHY_FEC_OFF; + /* SR_PMA_KR_FEC_CTRL bit0 */ + txgbe_wr32_epcs(hw, 0x100ab, fec_en); + e_dev_info("KR FEC is %s.\n", fec_en ? "endabled" : "disabled"); - for (k = 0; k < round; k++) { - status |= en_cl72_krtr(3, adapter); - kr_dbg(KR_MODE, "\nCheck the Clause 72 KR Training status ...\n"); - status |= chk_cl72_krtr_status(adapter); - - status = read_poll_timeout(txgbe_rd32_epcs, rdata, (rdata & 0x8000), 1000, - 200000, false, hw, 0x10099); - if (!status) { - rdata1 = txgbe_rd32_epcs(hw, 0x1009b) & 0x8000; - if (rdata1 == 0x8000) - lpld_all_rd = true; - } - - if (lpld_all_rd) { - rdata = rd32_ephy(hw, 0x100E); - rdata1 = rd32_ephy(hw, 0x100F); - e_dev_info("Lp and Ld all Ready, FFE : %d-%d-%d.\n", - (rdata >> 6) & 0x3F, rdata1 & 0x3F, (rdata1 >> 6) & 0x3F); - clr_bkp_an73_int(2, 0, adapter); - clr_bkp_an73_int(1, 0, adapter); - clr_bkp_an73_int(0, 0, adapter); - status = read_poll_timeout(txgbe_rd32_epcs, rdata, (rdata & 0x1000), 1000, - 100000, false, hw, 0x30020); - if (!status) - e_dev_info("INT_AN_INT_CMPLT =1, AN73 Done Success.\n"); - return 0; - } - clr_bkp_an73_int(2, 0, adapter); - clr_bkp_an73_int(1, 0, adapter); - clr_bkp_an73_int(0, 0, adapter); - } - e_dev_info("Trainning failure\n"); + kr_dbg(KR_MODE, "\n<3.4>. Check the CL72 KR Training for KR mode ...\n"); - if (AN73_TRAINNING_MODE == 0) - status |= en_cl72_krtr(1, adapter); + ret = txgbe_cl72_trainning(adapter); + if (ret) + kr_dbg(KR_MODE, "Trainning failure\n"); - return status; + return ret; } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.h index 7d66c671ebe19b83c694999db98b0f9ea12165d5..d73cb2ba02963a3c74ecf86ff269e29622710768 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ #ifndef _TXGBE_BP_H_ #define _TXGBE_BP_H_ @@ -7,6 +7,21 @@ #include "txgbe_type.h" #include "txgbe_hw.h" +typedef enum { + ABILITY_1000BASE_KX, + ABILITY_10GBASE_KX4, + ABILITY_10GBASE_KR, + ABILITY_40GBASE_KR4, + ABILITY_40GBASE_CR4, + ABILITY_100GBASE_CR10, + ABILITY_100GBASE_KP4, + ABILITY_100GBASE_KR4, + ABILITY_100GBASE_CR4, + ABILITY_25GBASE_KRCR_S, + ABILITY_25GBASE_KRCR, + ABILITY_MAX, +} ability_filed_encding; + /* Backplane AN73 Base Page Ability struct*/ typedef struct TBKPAN73ABILITY { unsigned int nextPage; //Next Page (bit0) @@ -15,33 +30,6 @@ typedef struct TBKPAN73ABILITY { unsigned int currentLinkMode; //current link mode for local device } bkpan73ability; -#ifndef read_poll_timeout -#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \ - sleep_before_read, args...) \ -({ \ - u64 __timeout_us = (timeout_us); \ - unsigned long __sleep_us = (sleep_us); \ - ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ - might_sleep_if((__sleep_us) != 0); \ - if (sleep_before_read && __sleep_us) \ - usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ - for (;;) { \ - (val) = op(args); \ - if (cond) \ - break; \ - if (__timeout_us && \ - ktime_compare(ktime_get(), __timeout) > 0) { \ - (val) = op(args); \ - break; \ - } \ - if (__sleep_us) \ - usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ - cpu_relax(); \ - } \ - (cond) ? 0 : -ETIMEDOUT; \ -}) -#endif - #define kr_dbg(KR_MODE, fmt, arg...) \ do { \ if (KR_MODE) \ @@ -55,8 +43,6 @@ void txgbe_bp_close_protect(struct txgbe_adapter *adapter); int handle_bkp_an73_flow(unsigned char bp_link_mode, struct txgbe_adapter *adapter); int get_bkp_an73_ability(bkpan73ability *pt_bkp_an73_ability, unsigned char byLinkPartner, struct txgbe_adapter *adapter); -int clr_bkp_an73_int(unsigned int intIndex, unsigned int intIndexHi, - struct txgbe_adapter *adapter); int chk_bkp_an73_ability(bkpan73ability tBkpAn73Ability, bkpan73ability tLpBkpAn73Ability, struct txgbe_adapter *adapter); #endif diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.c index b7351e5f0d66a2be25ac8c426835f6a2429949bc..0c5ac375427b0b4eab2ebd774d6eb31a5e61f0af 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.c @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_dcb.c, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_dcb.c, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.h index e912c076521de6afa96d2c6424366e5b8f52289d..7bb35bdfa1c30c53dde5e6a3b9c6976ebb6b21cd 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.h @@ -1,6 +1,6 @@ /* * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_dcb.h, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_dcb.h, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb_nl.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb_nl.c index 0efb05b9977c30ef5191b341b5a0ae814c955b52..0ecca0a348cbbaac4613d517b22a113e80598848 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb_nl.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb_nl.c @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_dcb_nl.c, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_dcb_nl.c, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_debugfs.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_debugfs.c index 3342d88073a445310c77c493c94fafa13108d032..8e78215d4033de799468d00244432b928ef38ac8 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_debugfs.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_debugfs.c @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_debugfs.c, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_debugfs.c, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List @@ -45,7 +45,7 @@ enum txgbe_data_func { /** * data operation **/ -ssize_t +static ssize_t txgbe_simple_read_from_pcibar(struct txgbe_adapter *adapter, int res, void __user *buf, size_t size, loff_t *ppos) { @@ -71,7 +71,7 @@ txgbe_simple_read_from_pcibar(struct txgbe_adapter *adapter, int res, return size; } -ssize_t +static ssize_t txgbe_simple_read_from_flash(struct txgbe_adapter *adapter, void __user *buf, size_t size, loff_t *ppos) { @@ -116,7 +116,7 @@ txgbe_simple_read_from_flash(struct txgbe_adapter *adapter, return size; } -ssize_t +static ssize_t txgbe_simple_write_to_flash(struct txgbe_adapter *adapter, const void __user *from, size_t size, loff_t *ppos, size_t available) { diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c new file mode 100644 index 0000000000000000000000000000000000000000..c82417726b989e398e5e6ac8d31fa4134b3f8c77 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c @@ -0,0 +1,3762 @@ +#include "txgbe_e56.h" +#include "txgbe_hw.h" + +#include + +void field_set(u32 *psrcdata, u32 bithigh, u32 bitlow, u32 setvalue) +{ + *psrcdata &= ~GENMASK(bithigh, bitlow); + *psrcdata |= FIELD_PREP_M(GENMASK(bithigh, bitlow), setvalue); +} + +s32 txgbe_e56_check_phy_link(struct txgbe_hw *hw, u32 *speed, + bool *link_up) +{ + struct txgbe_adapter *adapter = hw->back; + u32 rdata = 0; + u32 links_reg = 0; + + /* must read it twice because the state may + * not be correct the first time you read it + */ + rdata = txgbe_rd32_epcs(hw, 0x30001); + rdata = txgbe_rd32_epcs(hw, 0x30001); + + if (rdata & TXGBE_E56_PHY_LINK_UP) + *link_up = true; + else + *link_up = false; + + if (!adapter->link_valid) + *link_up = false; + + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (*link_up) { + if ((links_reg & TXGBE_CFG_PORT_ST_AML_LINK_40G) == + TXGBE_CFG_PORT_ST_AML_LINK_40G) + *speed = TXGBE_LINK_SPEED_40GB_FULL; + else if ((links_reg & TXGBE_CFG_PORT_ST_AML_LINK_25G) == + TXGBE_CFG_PORT_ST_AML_LINK_25G) + *speed = TXGBE_LINK_SPEED_25GB_FULL; + else if ((links_reg & TXGBE_CFG_PORT_ST_AML_LINK_10G) == + TXGBE_CFG_PORT_ST_AML_LINK_10G) + *speed = TXGBE_LINK_SPEED_10GB_FULL; + } else { + *speed = TXGBE_LINK_SPEED_UNKNOWN; + } + + return 0; +} + + +static u32 E56phyTxFfeCfg(struct txgbe_hw *hw, u32 speed) +{ + struct txgbe_adapter *adapter = hw->back; + u32 addr; + + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + adapter->aml_txeq.main = S10G_TX_FFE_CFG_MAIN; + adapter->aml_txeq.pre1 = S10G_TX_FFE_CFG_PRE1; + adapter->aml_txeq.pre2 = S10G_TX_FFE_CFG_PRE2; + adapter->aml_txeq.post = S10G_TX_FFE_CFG_POST; + } else if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + adapter->aml_txeq.main = S25G_TX_FFE_CFG_MAIN; + adapter->aml_txeq.pre1 = S25G_TX_FFE_CFG_PRE1; + adapter->aml_txeq.pre2 = S25G_TX_FFE_CFG_PRE2; + adapter->aml_txeq.post = S25G_TX_FFE_CFG_POST; + + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1 || + txgbe_is_backplane(hw)) { + adapter->aml_txeq.main = S25G_TX_FFE_CFG_DAC_MAIN; + adapter->aml_txeq.pre1 = S25G_TX_FFE_CFG_DAC_PRE1; + adapter->aml_txeq.pre2 = S25G_TX_FFE_CFG_DAC_PRE2; + adapter->aml_txeq.post = S25G_TX_FFE_CFG_DAC_POST; + } + } else if (speed == TXGBE_LINK_SPEED_40GB_FULL) { + adapter->aml_txeq.main = S10G_TX_FFE_CFG_MAIN; + adapter->aml_txeq.pre1 = S10G_TX_FFE_CFG_PRE1; + adapter->aml_txeq.pre2 = S10G_TX_FFE_CFG_PRE2; + adapter->aml_txeq.post = S10G_TX_FFE_CFG_POST; + + if (hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || + txgbe_is_backplane(hw)) { + adapter->aml_txeq.main = 0x2b2b2b2b; + adapter->aml_txeq.pre1 = 0x03030303; + adapter->aml_txeq.pre2 = 0; + adapter->aml_txeq.post = 0x11111111; + } + } else { + return 0; + } + + addr = 0x141c; + txgbe_wr32_ephy(hw, addr, adapter->aml_txeq.main); + + addr = 0x1420; + txgbe_wr32_ephy(hw, addr, adapter->aml_txeq.pre1); + + addr = 0x1424; + txgbe_wr32_ephy(hw, addr, adapter->aml_txeq.pre2); + + addr = 0x1428; + txgbe_wr32_ephy(hw, addr, adapter->aml_txeq.post); + + return 0; +} + +int txgbe_e56_get_temp(struct txgbe_hw *hw, int *pTempData) +{ + int data_code, temp_data, temp_fraction; + u32 rdata; + u32 timer = 0; + + while(1) { + rdata = rd32(hw, 0x1033c); + if (((rdata >> 12) & 0x1 )!= 0) + break; + + if (timer++ > PHYINIT_TIMEOUT) { + return -ETIMEDOUT; + } + } + + data_code = rdata & 0xFFF; + temp_data = 419400 + 2205 * (data_code * 1000 / 4094 - 500); + + //Change double Temperature to int + *pTempData = temp_data/10000; + temp_fraction = temp_data - (*pTempData * 10000); + if (temp_fraction >= 5000) + *pTempData += 1; + + return 0; +} + +u32 txgbe_e56_cfg_40g(struct txgbe_hw *hw) +{ + u32 addr; + u32 rdata = 0; + int i; + + //CMS Config Master + addr = E56G_CMS_ANA_OVRDVAL_7_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_CMS_ANA_OVRDVAL_7 *)&rdata)->ana_lcpll_lf_vco_swing_ctrl_i = 0xf; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_CMS_ANA_OVRDEN_1 *)&rdata)->ovrd_en_ana_lcpll_lf_vco_swing_ctrl_i = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G_CMS_ANA_OVRDVAL_9_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 23, 0, 0x260000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_CMS_ANA_OVRDEN_1 *)&rdata)->ovrd_en_ana_lcpll_lf_test_in_i = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + //TXS Config Master + for (i = 0; i < 4; i++) { + addr = E56PHY_TXS_TXS_CFG_1_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_TXS_CFG_1_ADAPTATION_WAIT_CNT_X256, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_WKUP_CNT_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_WKUP_CNTLDO_WKUP_CNT_X32, 0xff); + field_set(&rdata, E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_PIN_OVRDVAL_6_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 19, 16, 0x6); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_PIN_OVRDEN_0_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_ANA_OVRDVAL_1_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDVAL_1_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_ANA_OVRDEN_0_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } + //Setting TX FFE + E56phyTxFfeCfg(hw, TXGBE_LINK_SPEED_40GB_FULL); + + //RXS Config master + for (i = 0; i < 4; i++) { + addr = E56PHY_RXS_RXS_CFG_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_DSER_DATA_SEL, 0x0); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_TRAIN_CLK_GATE_BYPASS_EN, 0x1fff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_OSC_CAL_N_CDR_0 *)&rdata)->prediv0 = 0xfa0; + ((E56G_RXS0_OSC_CAL_N_CDR_0 *)&rdata)->target_cnt0= 0x203a; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_4_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->osc_range_sel0= 0x2; + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->vco_code_init= 0x7ff; + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->osc_current_boost_en0= 0x1; + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->bbcdr_current_boost0 = 0x0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_SDM_WIDTH, 0x3); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_POSTLOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_POSTLOCK, 0xc); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BBCDR_RDY_CNT, 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_6_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_PRELOCK, 0x7); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_POSTLOCK, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_INTL_CONFIG_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_INTL_CONFIG_0 *)&rdata)->adc_intl2slice_delay0 = 0x5555; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_INTL_CONFIG_2_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_INTL_CONFIG_2 *)&rdata)->interleaver_hbw_disable0 = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_LTH, 0x56); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_UTH, 0x6a); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_LTH, 0x1e8); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_UTH, 0x78); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_2_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_LTH, 0x100); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_UTH, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_3_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_LTH, 0x4); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_UTH, 0x37); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_TXFFE_TRAIN_MOD_TYPE, 0x38); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_0_VGA_TARGET, 0x34); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT123, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT123, 0xa); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT0, 0x9); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT123, 0x9); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_1_LFEQ_LUT, 0x1ffffea); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_2_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P1, + S10G_PHY_RX_CTLE_TAP_FRACP1); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P2, + S10G_PHY_RX_CTLE_TAP_FRACP2); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P3, + S10G_PHY_RX_CTLE_TAP_FRACP3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_3_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P1, + S10G_PHY_RX_CTLE_TAPWT_WEIGHT1); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P2, + S10G_PHY_RX_CTLE_TAPWT_WEIGHT2); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P3, + S10G_PHY_RX_CTLE_TAPWT_WEIGHT3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_SLICE_DATA_AVG_CNT, + 0x3); + field_set(&rdata, + E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_DATA_AVG_CNT, 0x3); + field_set(&rdata, + E56PHY_RXS_OFFSET_N_GAIN_CAL_0_FE_OFFSET_DAC_CLK_CNT_X8, + 0xc); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_1_SAMP_ADAPT_CFG, + 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_FFE_TRAINING_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_FFE_TRAINING_0_FFE_TAP_EN, 0xf9ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_IDLE_DETECT_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0xa); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS3_ANA_OVRDVAL_11_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw ,addr); + ((E56G__RXS3_ANA_OVRDVAL_11 *)&rdata)->ana_test_adc_clkgen_i = 0x0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS0_ANA_OVRDEN_2_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw ,addr); + ((E56G__RXS0_ANA_OVRDEN_2 *)&rdata)->ovrd_en_ana_test_adc_clkgen_i = 0x0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_0_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_6_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 4, 0, 0x6); + field_set(&rdata, 14, 13, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_BBCDR_VCOFILT_BYP_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_TEST_BBCDR_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_15_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 2, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_17_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_17_ANA_VGA2_BOOST_CSTM_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_3_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_ANABS_CONFIG_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_VGA2_BOOST_CSTM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_14_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_4_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_EYE_SCAN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_EYE_SCAN_1_EYE_SCAN_REF_TIMER, 0x400); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_RINGO_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 9, 4, 0x366); + txgbe_wr32_ephy(hw, addr, rdata); + } + + // PDIG Config master + addr = E56PHY_PMD_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_3_CTRL_FSM_TIMEOUT_X64K, 0x80); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_ON_PERIOD_X64K, 0x18); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_PERIOD_X512K, 0x3e); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_5_USE_RECENT_MARKER_OFFSET, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_GAIN_CAL_ERR, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_DO_RX_ADC_OFST_CAL, 0x3); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_RX_ERR_ACTION_EN, 0x40); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST0_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST1_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST2_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST3_WAIT_CNT_X4096, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST4_WAIT_CNT_X4096, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST5_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST6_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST7_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST8_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST9_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST10_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST11_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST12_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST13_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST14_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST15_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_7_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST4_EN, 0x4bf); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST5_EN, 0xc4bf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_8_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_8_TRAIN_ST7_EN, 0x47ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_12_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_12_TRAIN_ST15_EN, 0x67ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_13_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST0_DONE_EN, 0x8001); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST1_DONE_EN, 0x8002); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_14_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_14_TRAIN_ST3_DONE_EN, 0x8008); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_15_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_15_TRAIN_ST4_DONE_EN, 0x8004); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_17_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_17_TRAIN_ST8_DONE_EN, 0x20c0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_18_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_18_TRAIN_ST10_DONE_EN, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_29_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_29_TRAIN_ST15_DC_EN, 0x3f6d); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_33_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN0_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN1_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_34_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN2_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN3_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_KRT_TFSM_CFG_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X1000K, 0x49); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X8000K, 0x37); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_HOLDOFF_TIMER_X256K, 0x2f); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_FETX_FFE_TRAIN_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_2, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + return 0; +} + +u32 txgbe_e56_cfg_25g(struct txgbe_hw *hw) +{ + u32 addr; + u32 rdata = 0; + + addr = E56PHY_CMS_PIN_OVRDVAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_PIN_OVRDVAL_0_INT_PLL0_TX_SIGNAL_TYPE_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CMS_PIN_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_PIN_OVRDEN_0_OVRD_EN_PLL0_TX_SIGNAL_TYPE_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CMS_ANA_OVRDVAL_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDVAL_2_ANA_LCPLL_HF_VCO_SWING_CTRL_I, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CMS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_VCO_SWING_CTRL_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CMS_ANA_OVRDVAL_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 23, 0, 0x260000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_HF_TEST_IN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_TXS_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_TXS_CFG_1_ADAPTATION_WAIT_CNT_X256, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_WKUP_CNT_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_WKUP_CNTLDO_WKUP_CNT_X32, 0xff); + field_set(&rdata, E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_PIN_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 27, 24, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_PIN_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_ANA_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDVAL_1_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + E56phyTxFfeCfg(hw, TXGBE_LINK_SPEED_25GB_FULL); + + addr = E56PHY_RXS_RXS_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_DSER_DATA_SEL, 0x0); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_TRAIN_CLK_GATE_BYPASS_EN, 0x1fff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_1_PREDIV1, 0x700); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_1_TARGET_CNT1, 0x2418); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_OSC_RANGE_SEL1, 0x1); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_VCO_CODE_INIT, 0x7fb); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_OSC_CURRENT_BOOST_EN1, 0x0); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_BBCDR_CURRENT_BOOST1, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_SDM_WIDTH, 0x3); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_POSTLOCK, 0x3); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_POSTLOCK, 0xa); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BBCDR_RDY_CNT, 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_PRELOCK, 0x7); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_POSTLOCK, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_INTL_CONFIG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_INTL_CONFIG_0_ADC_INTL2SLICE_DELAY1, 0x3333); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_INTL_CONFIG_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_INTL_CONFIG_2_INTERLEAVER_HBW_DISABLE1, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_TXFFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_LTH, 0x56); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_UTH, 0x6a); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_TXFFE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_LTH, 0x1f8); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_UTH, 0xf0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_TXFFE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_LTH, 0x100); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_UTH, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_TXFFE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_LTH, 0x4); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_UTH, 0x37); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_TXFFE_TRAIN_MOD_TYPE, 0x38); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS0_FOM_18__ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56G__RXS0_FOM_18__DFE_COEFFL_HINT__MSB, + E56G__RXS0_FOM_18__DFE_COEFFL_HINT__LSB, 0x0); + //change 0x90 to 0x0 to fix 25G link up keep when cable unplugged + field_set(&rdata, E56G__RXS0_FOM_18__DFE_COEFFH_HINT__MSB, + E56G__RXS0_FOM_18__DFE_COEFFH_HINT__LSB, 0x0); + field_set(&rdata, E56G__RXS0_FOM_18__DFE_COEFF_HINT_LOAD__MSB, + E56G__RXS0_FOM_18__DFE_COEFF_HINT_LOAD__LSB, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_VGA_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_0_VGA_TARGET, 0x34); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_VGA_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT123, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT123, 0xa); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT0, 0x9); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT123, 0x9); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_1_LFEQ_LUT, 0x1ffffea); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P1, S25G_PHY_RX_CTLE_TAP_FRACP1); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P2, S25G_PHY_RX_CTLE_TAP_FRACP2); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P3, S25G_PHY_RX_CTLE_TAP_FRACP3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P1, S25G_PHY_RX_CTLE_TAPWT_WEIGHT1); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P2, S25G_PHY_RX_CTLE_TAPWT_WEIGHT2); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P3, S25G_PHY_RX_CTLE_TAPWT_WEIGHT3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_SLICE_DATA_AVG_CNT, 0x3); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_DATA_AVG_CNT, 0x3); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_FE_OFFSET_DAC_CLK_CNT_X8, 0xc); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_1_SAMP_ADAPT_CFG, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_FFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_FFE_TRAINING_0_FFE_TAP_EN, 0xf9ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_IDLE_DETECT_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0xa); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDVAL_11, ana_test_adc_clkgen_i, 0x0); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_2, ovrd_en_ana_test_adc_clkgen_i, 0x0); + + addr = E56PHY_RXS_ANA_OVRDVAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_0_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 4, 0, 0x0); + field_set(&rdata, 14, 13, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_BBCDR_VCOFILT_BYP_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_TEST_BBCDR_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_15_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 2, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_17_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_17_ANA_VGA2_BOOST_CSTM_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_ANABS_CONFIG_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_VGA2_BOOST_CSTM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_14_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_EYE_SCAN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_EYE_SCAN_1_EYE_SCAN_REF_TIMER, 0x400); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_RINGO_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 21, 12, 0x366); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_3_CTRL_FSM_TIMEOUT_X64K, 0x80); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_ON_PERIOD_X64K, 0x18); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_PERIOD_X512K, 0x3e); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_5_USE_RECENT_MARKER_OFFSET, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_GAIN_CAL_ERR, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_DO_RX_ADC_OFST_CAL, 0x3); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_RX_ERR_ACTION_EN, 0x40); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST0_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST1_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST2_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST3_WAIT_CNT_X4096, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST4_WAIT_CNT_X4096, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST5_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST6_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST7_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST8_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST9_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST10_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST11_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST12_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST13_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST14_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST15_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_7_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST4_EN, 0x4bf); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST5_EN, 0xc4bf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_8_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_8_TRAIN_ST7_EN, 0x47ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_12_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_12_TRAIN_ST15_EN, 0x67ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_13_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST0_DONE_EN, 0x8001); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST1_DONE_EN, 0x8002); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_14_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_14_TRAIN_ST3_DONE_EN, 0x8008); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_15_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_15_TRAIN_ST4_DONE_EN, 0x8004); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_17_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_17_TRAIN_ST8_DONE_EN, 0x20c0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_18_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_18_TRAIN_ST10_DONE_EN, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_29_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_29_TRAIN_ST15_DC_EN, 0x3f6d); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_33_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN0_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN1_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_34_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN2_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN3_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_KRT_TFSM_CFG_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X1000K, 0x49); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X8000K, 0x37); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_HOLDOFF_TIMER_X256K, 0x2f); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_FETX_FFE_TRAIN_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_2, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + return 0; +} + +u32 txgbe_e56_cfg_10g(struct txgbe_hw *hw) +{ + u32 addr; + u32 rdata = 0; + + addr = E56G_CMS_ANA_OVRDVAL_7_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_CMS_ANA_OVRDVAL_7 *)&rdata)->ana_lcpll_lf_vco_swing_ctrl_i = 0xf; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_CMS_ANA_OVRDEN_1 *)&rdata)->ovrd_en_ana_lcpll_lf_vco_swing_ctrl_i = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G_CMS_ANA_OVRDVAL_9_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 23, 0, 0x260000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_CMS_ANA_OVRDEN_1 *)&rdata)->ovrd_en_ana_lcpll_lf_test_in_i = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_TXS_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_TXS_CFG_1_ADAPTATION_WAIT_CNT_X256, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_WKUP_CNT_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_WKUP_CNTLDO_WKUP_CNT_X32, 0xff); + field_set(&rdata, E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_PIN_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 19, 16, 0x6); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_PIN_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_ANA_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDVAL_1_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //Setting TX FFE + E56phyTxFfeCfg(hw, TXGBE_LINK_SPEED_10GB_FULL); + + addr = E56PHY_RXS_RXS_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_DSER_DATA_SEL, 0x0); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_TRAIN_CLK_GATE_BYPASS_EN, 0x1fff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_OSC_CAL_N_CDR_0 *)&rdata)->prediv0 = 0xfa0; + ((E56G_RXS0_OSC_CAL_N_CDR_0 *)&rdata)->target_cnt0= 0x203a; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_4_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->osc_range_sel0= 0x2; + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->vco_code_init= 0x7ff; + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->osc_current_boost_en0= 0x1; + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->bbcdr_current_boost0 = 0x0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_SDM_WIDTH, 0x3); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_POSTLOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_POSTLOCK, 0xc); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BBCDR_RDY_CNT, 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_PRELOCK, 0x7); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_POSTLOCK, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_INTL_CONFIG_0_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_INTL_CONFIG_0 *)&rdata)->adc_intl2slice_delay0 = 0x5555; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_INTL_CONFIG_2_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_INTL_CONFIG_2 *)&rdata)->interleaver_hbw_disable0 = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_LTH, 0x56); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_UTH, 0x6a); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_LTH, 0x1e8); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_UTH, 0x78); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_LTH, 0x100); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_UTH, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_LTH, 0x4); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_UTH, 0x37); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_TXFFE_TRAIN_MOD_TYPE, 0x38); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_0_VGA_TARGET, 0x34); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT123, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT123, 0xa); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT0, 0x9); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT123, 0x9); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_1_LFEQ_LUT, 0x1ffffea); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P1, S10G_PHY_RX_CTLE_TAP_FRACP1); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P2, S10G_PHY_RX_CTLE_TAP_FRACP2); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P3, S10G_PHY_RX_CTLE_TAP_FRACP3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P1, S10G_PHY_RX_CTLE_TAPWT_WEIGHT1); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P2, S10G_PHY_RX_CTLE_TAPWT_WEIGHT2); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P3, S10G_PHY_RX_CTLE_TAPWT_WEIGHT3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_SLICE_DATA_AVG_CNT, 0x3); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_DATA_AVG_CNT, 0x3); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_FE_OFFSET_DAC_CLK_CNT_X8, 0xc); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_1_SAMP_ADAPT_CFG, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_FFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_FFE_TRAINING_0_FFE_TAP_EN, 0xf9ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_IDLE_DETECT_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0xa); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDVAL_11, ana_test_adc_clkgen_i, 0x0); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_2, ovrd_en_ana_test_adc_clkgen_i, 0x0); + + addr = E56PHY_RXS_ANA_OVRDVAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_0_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 4, 0, 0x6); + field_set(&rdata, 14, 13, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_BBCDR_VCOFILT_BYP_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_TEST_BBCDR_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_15_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 2, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_17_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_17_ANA_VGA2_BOOST_CSTM_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_ANABS_CONFIG_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_VGA2_BOOST_CSTM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_14_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_EYE_SCAN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_EYE_SCAN_1_EYE_SCAN_REF_TIMER, 0x400); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_RINGO_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 9, 4, 0x366); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_3_CTRL_FSM_TIMEOUT_X64K, 0x80); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_ON_PERIOD_X64K, 0x18); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_PERIOD_X512K, 0x3e); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_5_USE_RECENT_MARKER_OFFSET, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_GAIN_CAL_ERR, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_DO_RX_ADC_OFST_CAL, 0x3); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_RX_ERR_ACTION_EN, 0x40); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST0_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST1_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST2_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST3_WAIT_CNT_X4096, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST4_WAIT_CNT_X4096, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST5_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST6_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST7_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST8_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST9_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST10_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST11_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST12_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST13_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST14_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST15_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_7_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST4_EN, 0x4bf); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST5_EN, 0xc4bf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_8_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_8_TRAIN_ST7_EN, 0x47ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_12_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_12_TRAIN_ST15_EN, 0x67ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_13_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST0_DONE_EN, 0x8001); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST1_DONE_EN, 0x8002); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_14_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_14_TRAIN_ST3_DONE_EN, 0x8008); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_15_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_15_TRAIN_ST4_DONE_EN, 0x8004); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_17_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_17_TRAIN_ST8_DONE_EN, 0x20c0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_18_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_18_TRAIN_ST10_DONE_EN, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_29_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_29_TRAIN_ST15_DC_EN, 0x3f6d); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_33_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN0_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN1_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_34_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN2_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN3_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_KRT_TFSM_CFG_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X1000K, 0x49); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X8000K, 0x37); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_HOLDOFF_TIMER_X256K, 0x2f); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_FETX_FFE_TRAIN_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_2, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + return 0; +} + +static int E56phyRxsOscInitForTempTrackRange(struct txgbe_hw *hw, u32 speed) +{ + int status = 0; + unsigned int addr, rdata, timer; + int T=40; + int RX_COARSE_MID_TD, CMVAR_RANGE_H = 0, CMVAR_RANGE_L = 0; + int OFFSET_CENTRE_RANGE_H, OFFSET_CENTRE_RANGE_L, RANGE_FINAL; + int osc_freq_err_occur; + int i = 0; + int lane_num = 1; + //1. Read the temperature T just before RXS is enabled. + txgbe_e56_get_temp(hw, &T); + + //2. Define software variable RX_COARSE_MID_TD (RX Coarse Code mid value dependent upon temperature) + if(T < -5) { RX_COARSE_MID_TD = 10; } + else if(T < 30) { RX_COARSE_MID_TD = 9; } + else if(T < 65) { RX_COARSE_MID_TD = 8; } + else if(T < 100) { RX_COARSE_MID_TD = 7; } + else { RX_COARSE_MID_TD = 6; } + + //Set CMVAR_RANGE_H/L based on the link speed mode + if (speed == TXGBE_LINK_SPEED_10GB_FULL || speed == TXGBE_LINK_SPEED_40GB_FULL) { //10G mode + CMVAR_RANGE_H = S10G_CMVAR_RANGE_H; + CMVAR_RANGE_L = S10G_CMVAR_RANGE_L; + } else if (speed == TXGBE_LINK_SPEED_25GB_FULL) { //25G mode + CMVAR_RANGE_H = S25G_CMVAR_RANGE_H; + CMVAR_RANGE_L = S25G_CMVAR_RANGE_L; + } + + if (speed == TXGBE_LINK_SPEED_40GB_FULL) + lane_num = 4; + // TBD select all lane + //3. Program ALIAS::RXS::RANGE_SEL = CMVAR::RANGE_H + // RXS0_ANA_OVRDVAL[5] + // ana_bbcdr_osc_range_sel_i[1:0] + for (i = 0; i < lane_num; i++) { + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS_ANA_OVRDVAL_5_ANA_BBCDR_OSC_RANGE_SEL_I, + CMVAR_RANGE_H); + txgbe_wr32_ephy(hw, addr, rdata); + + // RXS0_ANA_OVRDEN[0] + // [29] ovrd_en_ana_bbcdr_osc_range_sel_i + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_BBCDR_OSC_RANGE_SEL_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //4. Do SEQ::RX_ENABLE to enable RXS, and let it stop after oscillator calibration. + //This needs to be done by blocking the RX power-up fsm at the state following the oscillator calibration state. + //Follow below steps to do the same before SEQ::RX_ENABLE. + //a. ALIAS::PDIG::CTRL_FSM_RX_ST can be stopped at RX_SAMP_CAL_ST which is the state + //after RX_OSC_CAL_ST by configuring ALIAS::RXS::SAMP_CAL_DONE=0b0 + + // RXS0_OVRDVAL[0] + // [22] rxs0_rx0_samp_cal_done_o + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_0_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_0_RXS0_RX0_SAMP_CAL_DONE_O, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + // RXS0_OVRDEN[0] + // [27] ovrd_en_rxs0_rx0_samp_cal_done_o + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDEN_0_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDEN_0_OVRD_EN_RXS0_RX0_SAMP_CAL_DONE_O, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //Do SEQ::RX_ENABLE to enable RXS + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, (0x1 << i)); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Poll ALIAS::PDIG::CTRL_FSM_RX_ST and confirm its value is RX_SAMP_CAL_ST + // poll CTRL_FSM_RX_ST + rdata = 0; + timer = 0; + osc_freq_err_occur = 0; + while((rdata >> (i * 8) & 0x3f) != 0x9) { //Bit[5:0]!= 0x9 + udelay(500); + // INTR[0] + // [11:8] intr_rx_osc_freq_err + rdata = 0; + addr = E56PHY_INTR_0_ADDR; + rdata = rd32_ephy(hw, addr); + // TBD is always osc_freq_err occur? + if(rdata & (0x100 << i)) { + osc_freq_err_occur = 1; + break; + } + rdata = 0; + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + rdata = rd32_ephy(hw, addr); + + if (timer++ > PHYINIT_TIMEOUT) { + printk("ERROR: Wait E56PHY_CTRL_FSM_RX_STAT_0_ADDR Timeout!!!\n"); + break; + return -1; + } + } + + //5/6.Define software variable as OFFSET_CENTRE_RANGE_H = ALIAS::RXS::COARSE + //- RX_COARSE_MID_TD. Clear the INTR. + rdata = 0; + addr = E56PHY_RXS_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + OFFSET_CENTRE_RANGE_H = (rdata >> 4) & 0xf; + if(OFFSET_CENTRE_RANGE_H > RX_COARSE_MID_TD) { + OFFSET_CENTRE_RANGE_H = OFFSET_CENTRE_RANGE_H - RX_COARSE_MID_TD; + } else { + OFFSET_CENTRE_RANGE_H = RX_COARSE_MID_TD - OFFSET_CENTRE_RANGE_H; + } + + //7. Do SEQ::RX_DISABLE to disable RXS. Poll ALIAS::PDIG::CTRL_FSM_RX_ST and confirm + //its value is POWERDN_ST + + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + timer = 0; + while(1) { + udelay(500); + rdata = 0; + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + rdata = rd32_ephy(hw, addr); + if(((rdata >> (i * 8)) & 0x3f) == 0x21) { break; } + if (timer++ > PHYINIT_TIMEOUT) { + printk("ERROR: Wait E56PHY_CTRL_FSM_RX_STAT_0_ADDR Timeout!!!\n"); + break; + return -1; + } + } + + //8. Since RX power-up fsm is stopped in RX_SAMP_CAL_ST, it is possible the timeout interrupt is set. + //Clear the same by clearing ALIAS::PDIG::INTR_CTRL_FSM_RX_ERR. + //Also clear ALIAS::PDIG::INTR_RX_OSC_FREQ_ERR which could also be set. + udelay(500); + rdata = 0; + addr = E56PHY_INTR_0_ADDR; + rdata = rd32_ephy(hw, addr); + + udelay(500); + addr = E56PHY_INTR_0_ADDR; + txgbe_wr32_ephy(hw, addr, rdata); + + udelay(500); + rdata = 0; + addr = E56PHY_INTR_0_ADDR; + rdata = rd32_ephy(hw, addr); + // next round + + //9. Program ALIAS::RXS::RANGE_SEL = CMVAR::RANGE_L + // RXS0_ANA_OVRDVAL[5] + // ana_bbcdr_osc_range_sel_i[1:0] + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS_ANA_OVRDVAL_5_ANA_BBCDR_OSC_RANGE_SEL_I, + CMVAR_RANGE_L); + txgbe_wr32_ephy(hw, addr, rdata); + + // RXS0_ANA_OVRDEN[0] + // [29] ovrd_en_ana_bbcdr_osc_range_sel_i + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_BBCDR_OSC_RANGE_SEL_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //10. Do SEQ::RX_ENABLE to enable RXS, and let it stop after oscillator calibration. + // RXS0_OVRDVAL[0] + // [22] rxs0_rx0_samp_cal_done_o + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_0_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_0_RXS0_RX0_SAMP_CAL_DONE_O, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + // RXS0_OVRDEN[0] + // [27] ovrd_en_rxs0_rx0_samp_cal_done_o + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDEN_0_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDEN_0_OVRD_EN_RXS0_RX0_SAMP_CAL_DONE_O, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, (0x1 << i)); + txgbe_wr32_ephy(hw, addr, rdata); + + // poll CTRL_FSM_RX_ST + timer = 0; + osc_freq_err_occur = 0; + while(((rdata >> (i * 8)) & 0x3f) != 0x9) { //Bit[5:0]!= 0x9 + udelay(500); + // INTR[0] + // [11:8] intr_rx_osc_freq_err + rdata = 0; + addr = E56PHY_INTR_0_ADDR; + rdata = rd32_ephy(hw, addr); + // TBD is always osc_freq_err occur? + if((rdata & 0x100) == 0x100) { + osc_freq_err_occur = 1; + break; + } + rdata = 0; + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + rdata = rd32_ephy(hw, addr); + if (timer++ > PHYINIT_TIMEOUT) { + printk("ERROR: Wait E56PHY_CTRL_FSM_RX_STAT_0_ADDR Timeout!!!\n"); + break; + return -1; + } //if (timer++ > PHYINIT_TIMEOUT) { + } + + //11/12.Define software variable as OFFSET_CENTRE_RANGE_L = ALIAS::RXS::COARSE - + //RX_COARSE_MID_TD. Clear the INTR. + rdata = 0; + addr = E56PHY_RXS_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + OFFSET_CENTRE_RANGE_L = (rdata >> 4) & 0xf; + if(OFFSET_CENTRE_RANGE_L > RX_COARSE_MID_TD) { + OFFSET_CENTRE_RANGE_L = OFFSET_CENTRE_RANGE_L - RX_COARSE_MID_TD; + } else { + OFFSET_CENTRE_RANGE_L = RX_COARSE_MID_TD - OFFSET_CENTRE_RANGE_L; + } + + //13. Perform below calculation in software. Goal is to pick range value which is closer to RX_COARSE_MID_TD + if (OFFSET_CENTRE_RANGE_L < OFFSET_CENTRE_RANGE_H) { + RANGE_FINAL = CMVAR_RANGE_L; + } + else { + RANGE_FINAL = CMVAR_RANGE_H; + } + + //14. Do SEQ::RX_DISABLE to disable RXS. Poll ALIAS::PDIG::CTRL_FSM_RX_ST + //and confirm its value is POWERDN_ST + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + timer = 0; + while(1) { + udelay(500); + rdata = 0; + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + rdata = rd32_ephy(hw, addr); + if(((rdata >> (i * 8)) & 0x3f) == 0x21) { break; } + if (timer++ > PHYINIT_TIMEOUT) { + printk("ERROR: Wait E56PHY_CTRL_FSM_RX_STAT_0_ADDR Timeout!!!\n"); + break; + return -1; + } //if (timer++ > PHYINIT_TIMEOUT) { + } + + //15. Since RX power-up fsm is stopped in RX_SAMP_CAL_ST, + //it is possible the timeout interrupt is set. Clear the same by clearing + //ALIAS::PDIG::INTR_CTRL_FSM_RX_ERR. Also clear ALIAS::PDIG::INTR_RX_OSC_FREQ_ERR + //which could also be set. + udelay(500); + rdata = 0; + addr = E56PHY_INTR_0_ADDR; + rdata = rd32_ephy(hw, addr); + udelay(500); + txgbe_wr32_ephy(hw, addr, rdata); + + udelay(500); + rdata = 0; + addr = E56PHY_INTR_0_ADDR; + rdata = rd32_ephy(hw, addr); + + //16. Program ALIAS::RXS::RANGE_SEL = RANGE_FINAL + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_5_ANA_BBCDR_OSC_RANGE_SEL_I, RANGE_FINAL); + txgbe_wr32_ephy(hw, addr, rdata); + + //17. Program following before enabling RXS. Purpose is to disable power-up FSM control on ADC offset adaptation + //Note: this step will be done in 2.3.3 RXS calibration and adaptation sequence + + //18. After this SEQ::RX_ENABLE can be done at any time. Note to ensure that ALIAS::RXS::RANGE_SEL = RANGE_FINAL configuration is retained. + //Rmove the OVRDEN on rxs0_rx0_samp_cal_done_o + + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDEN_0_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDEN_0_OVRD_EN_RXS0_RX0_SAMP_CAL_DONE_O, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + } + //Do SEQ::RX_ENABLE + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + if (speed == TXGBE_LINK_SPEED_40GB_FULL) + field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0xf); + else + field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + return status; +} + +static int E56phySetRxsUfineLeMax40G(struct txgbe_hw *hw, u32 speed) +{ + int status = 0; + unsigned int rdata; + unsigned int ULTRAFINE_CODE; + int i = 0; + unsigned int CMVAR_UFINE_MAX = 0; + u32 addr; + + for (i = 0; i < 4; i++) { + if (speed == TXGBE_LINK_SPEED_10GB_FULL || speed == TXGBE_LINK_SPEED_40GB_FULL ) { + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX; + } + else if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + CMVAR_UFINE_MAX = S25G_CMVAR_UFINE_MAX; + } + + //a. Assign software defined variables as below �C + //ii. ULTRAFINE_CODE = ALIAS::RXS::ULTRAFINE + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + ULTRAFINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i); + + //b. Perform the below logic sequence �C + while (ULTRAFINE_CODE > CMVAR_UFINE_MAX) { + ULTRAFINE_CODE = ULTRAFINE_CODE - 1; + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = ULTRAFINE_CODE; + txgbe_wr32_ephy(hw, addr, rdata); + + //Set ovrd_en=1 to overide ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + + // Wait until 1milliseconds or greater + msleep(10); + } + } + return status; +} + +static int E56phySetRxsUfineLeMax(struct txgbe_hw *hw, u32 speed) +{ + int status = 0; + unsigned int rdata; + unsigned int ULTRAFINE_CODE; + + unsigned int CMVAR_UFINE_MAX = 0; + + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX; + } + else if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + CMVAR_UFINE_MAX = S25G_CMVAR_UFINE_MAX; + } + + //a. Assign software defined variables as below �C + //ii. ULTRAFINE_CODE = ALIAS::RXS::ULTRAFINE + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + ULTRAFINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i); + + //b. Perform the below logic sequence �C + while (ULTRAFINE_CODE > CMVAR_UFINE_MAX) { + ULTRAFINE_CODE = ULTRAFINE_CODE - 1; + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i, ULTRAFINE_CODE); + //Set ovrd_en=1 to overide ASIC value + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i, 1); + // Wait until 1milliseconds or greater + msleep(10); + } + + return status; +} + +//-------------------------------------------------------------- +//compare function for qsort() +//-------------------------------------------------------------- +static int compare(const void *a, const void *b) +{ + const int *num1 = (const int *)a; + const int *num2 = (const int *)b; + + if (*num1 < *num2) { + return -1; + } else if (*num1 > *num2) { + return 1; + } else { + return 0; + } +} +static int E56phyRxRdSecondCode40g(struct txgbe_hw *hw, int *SECOND_CODE, int lane) +{ + int status = 0, i, N, median; + unsigned int rdata; + u32 addr; + int arraySize, RXS_BBCDR_SECOND_ORDER_ST[5]; + + + //Set ovrd_en=0 to read ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (lane * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_int_cstm_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //As status update from RXS hardware is asynchronous to read status of SECOND_ORDER, follow sequence mentioned below. + N =5; + for (i=0; i5degC after the CDR locks for the first time or after the +//ious time this sequence was run. It is recommended to call this sequence periodically (eg: once every 100ms) or trigger +// sequence if the temperature drifts by >=5degC. Temperature must be read from an on-die temperature sensor. +//-------------------------------------------------------------- +int txgbe_temp_track_seq_40g(struct txgbe_hw *hw, u32 speed) +{ + int status = 0; + unsigned int rdata; + int SECOND_CODE; + int COARSE_CODE; + int FINE_CODE; + int ULTRAFINE_CODE; + + int CMVAR_SEC_LOW_TH ; + int CMVAR_UFINE_MAX = 0; + int CMVAR_FINE_MAX ; + int CMVAR_UFINE_UMAX_WRAP = 0; + int CMVAR_COARSE_MAX ; + int CMVAR_UFINE_FMAX_WRAP = 0; + int CMVAR_FINE_FMAX_WRAP = 0; + int CMVAR_SEC_HIGH_TH ; + int CMVAR_UFINE_MIN ; + int CMVAR_FINE_MIN ; + int CMVAR_UFINE_UMIN_WRAP ; + int CMVAR_COARSE_MIN ; + int CMVAR_UFINE_FMIN_WRAP ; + int CMVAR_FINE_FMIN_WRAP ; + int i; + u32 addr; + int temp; + + struct txgbe_adapter *adapter = hw->back; + + for (i = 0; i < 4; i++) { + if(speed == TXGBE_LINK_SPEED_10GB_FULL || speed == TXGBE_LINK_SPEED_40GB_FULL) { + CMVAR_SEC_LOW_TH = S10G_CMVAR_SEC_LOW_TH ; + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX ; + CMVAR_FINE_MAX = S10G_CMVAR_FINE_MAX ; + CMVAR_UFINE_UMAX_WRAP = S10G_CMVAR_UFINE_UMAX_WRAP; + CMVAR_COARSE_MAX = S10G_CMVAR_COARSE_MAX ; + CMVAR_UFINE_FMAX_WRAP = S10G_CMVAR_UFINE_FMAX_WRAP; + CMVAR_FINE_FMAX_WRAP = S10G_CMVAR_FINE_FMAX_WRAP ; + CMVAR_SEC_HIGH_TH = S10G_CMVAR_SEC_HIGH_TH ; + CMVAR_UFINE_MIN = S10G_CMVAR_UFINE_MIN ; + CMVAR_FINE_MIN = S10G_CMVAR_FINE_MIN ; + CMVAR_UFINE_UMIN_WRAP = S10G_CMVAR_UFINE_UMIN_WRAP; + CMVAR_COARSE_MIN = S10G_CMVAR_COARSE_MIN ; + CMVAR_UFINE_FMIN_WRAP = S10G_CMVAR_UFINE_FMIN_WRAP; + CMVAR_FINE_FMIN_WRAP = S10G_CMVAR_FINE_FMIN_WRAP ; + } + else if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + CMVAR_SEC_LOW_TH = S25G_CMVAR_SEC_LOW_TH ; + CMVAR_UFINE_MAX = S25G_CMVAR_UFINE_MAX ; + CMVAR_FINE_MAX = S25G_CMVAR_FINE_MAX ; + CMVAR_UFINE_UMAX_WRAP = S25G_CMVAR_UFINE_UMAX_WRAP; + CMVAR_COARSE_MAX = S25G_CMVAR_COARSE_MAX ; + CMVAR_UFINE_FMAX_WRAP = S25G_CMVAR_UFINE_FMAX_WRAP; + CMVAR_FINE_FMAX_WRAP = S25G_CMVAR_FINE_FMAX_WRAP ; + CMVAR_SEC_HIGH_TH = S25G_CMVAR_SEC_HIGH_TH ; + CMVAR_UFINE_MIN = S25G_CMVAR_UFINE_MIN ; + CMVAR_FINE_MIN = S25G_CMVAR_FINE_MIN ; + CMVAR_UFINE_UMIN_WRAP = S25G_CMVAR_UFINE_UMIN_WRAP; + CMVAR_COARSE_MIN = S25G_CMVAR_COARSE_MIN ; + CMVAR_UFINE_FMIN_WRAP = S25G_CMVAR_UFINE_FMIN_WRAP; + CMVAR_FINE_FMIN_WRAP = S25G_CMVAR_FINE_FMIN_WRAP ; + } else { + printk("Error Speed\n"); + return 0; + } + + status = txgbe_e56_get_temp(hw, &temp); + if (status) + return 0; + + adapter->amlite_temp = temp; + + //Assign software defined variables as below �C + //a. SECOND_CODE = ALIAS::RXS::SECOND_ORDER + status |= E56phyRxRdSecondCode40g(hw, &SECOND_CODE, i); + + //b. COARSE_CODE = ALIAS::RXS::COARSE + //c. FINE_CODE = ALIAS::RXS::FINE + //d. ULTRAFINE_CODE = ALIAS::RXS::ULTRAFINE + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + COARSE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i); + FINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i); + ULTRAFINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i); + + if (SECOND_CODE <= CMVAR_SEC_LOW_TH) { + if (ULTRAFINE_CODE < CMVAR_UFINE_MAX) { + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = ULTRAFINE_CODE + 1; + txgbe_wr32_ephy(hw, addr, rdata); + + //Set ovrd_en=1 to overide ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + } else if (FINE_CODE < CMVAR_FINE_MAX) { + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_UMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = FINE_CODE + 1; + txgbe_wr32_ephy(hw, addr, rdata); + //Note: All two of above code updates should be written in a single register write + //Set ovrd_en=1 to overide ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + } else if (COARSE_CODE < CMVAR_COARSE_MAX) { + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_FMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = CMVAR_FINE_FMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i) = COARSE_CODE + 1; + txgbe_wr32_ephy(hw, addr, rdata); + + //Note: All three of above code updates should be written in a single register write + //Set ovrd_en=1 to overide ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_coarse_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + } else { + printk("ERROR: (SECOND_CODE <= CMVAR_SEC_LOW_TH) temperature tracking occurs Error condition\n"); + } + } else if (SECOND_CODE >= CMVAR_SEC_HIGH_TH) { + if (ULTRAFINE_CODE > CMVAR_UFINE_MIN) { + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = ULTRAFINE_CODE - 1; + txgbe_wr32_ephy(hw, addr, rdata); + + //Set ovrd_en=1 to overide ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + } else if (FINE_CODE > CMVAR_FINE_MIN) { + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_UMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = FINE_CODE - 1; + txgbe_wr32_ephy(hw, addr, rdata); + + //Note: All two of above code updates should be written in a single register write + //Set ovrd_en=1 to overide ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + } else if (COARSE_CODE > CMVAR_COARSE_MIN) { + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_FMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = CMVAR_FINE_FMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i) = COARSE_CODE - 1; + txgbe_wr32_ephy(hw, addr, rdata); + + //Note: All three of above code updates should be written in a single register write + //Set ovrd_en=1 to overide ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_coarse_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + } else { + printk("ERROR: (SECOND_CODE >= CMVAR_SEC_HIGH_TH) temperature tracking occurs Error condition\n"); + } + } + } + return status; +} + + +//-------------------------------------------------------------- +//2.3.4 RXS post CDR lock temperature tracking sequence +// +//Below sequence must be run before the temperature drifts by >5degC after the CDR locks for the first time or after the +//ious time this sequence was run. It is recommended to call this sequence periodically (eg: once every 100ms) or trigger +// sequence if the temperature drifts by >=5degC. Temperature must be read from an on-die temperature sensor. +//-------------------------------------------------------------- +int txgbe_temp_track_seq(struct txgbe_hw *hw, u32 speed) +{ + struct txgbe_adapter *adapter = hw->back; + int status = 0; + unsigned int rdata; + int SECOND_CODE; + int COARSE_CODE; + int FINE_CODE; + int ULTRAFINE_CODE; + + int CMVAR_SEC_LOW_TH ; + int CMVAR_UFINE_MAX = 0; + int CMVAR_FINE_MAX ; + int CMVAR_UFINE_UMAX_WRAP = 0; + int CMVAR_COARSE_MAX ; + int CMVAR_UFINE_FMAX_WRAP = 0; + int CMVAR_FINE_FMAX_WRAP = 0; + int CMVAR_SEC_HIGH_TH ; + int CMVAR_UFINE_MIN ; + int CMVAR_FINE_MIN ; + int CMVAR_UFINE_UMIN_WRAP ; + int CMVAR_COARSE_MIN ; + int CMVAR_UFINE_FMIN_WRAP ; + int CMVAR_FINE_FMIN_WRAP ; + int temp; + + if(speed == TXGBE_LINK_SPEED_10GB_FULL) { + CMVAR_SEC_LOW_TH = S10G_CMVAR_SEC_LOW_TH ; + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX ; + CMVAR_FINE_MAX = S10G_CMVAR_FINE_MAX ; + CMVAR_UFINE_UMAX_WRAP = S10G_CMVAR_UFINE_UMAX_WRAP; + CMVAR_COARSE_MAX = S10G_CMVAR_COARSE_MAX ; + CMVAR_UFINE_FMAX_WRAP = S10G_CMVAR_UFINE_FMAX_WRAP; + CMVAR_FINE_FMAX_WRAP = S10G_CMVAR_FINE_FMAX_WRAP ; + CMVAR_SEC_HIGH_TH = S10G_CMVAR_SEC_HIGH_TH ; + CMVAR_UFINE_MIN = S10G_CMVAR_UFINE_MIN ; + CMVAR_FINE_MIN = S10G_CMVAR_FINE_MIN ; + CMVAR_UFINE_UMIN_WRAP = S10G_CMVAR_UFINE_UMIN_WRAP; + CMVAR_COARSE_MIN = S10G_CMVAR_COARSE_MIN ; + CMVAR_UFINE_FMIN_WRAP = S10G_CMVAR_UFINE_FMIN_WRAP; + CMVAR_FINE_FMIN_WRAP = S10G_CMVAR_FINE_FMIN_WRAP ; + } + else if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + CMVAR_SEC_LOW_TH = S25G_CMVAR_SEC_LOW_TH ; + CMVAR_UFINE_MAX = S25G_CMVAR_UFINE_MAX ; + CMVAR_FINE_MAX = S25G_CMVAR_FINE_MAX ; + CMVAR_UFINE_UMAX_WRAP = S25G_CMVAR_UFINE_UMAX_WRAP; + CMVAR_COARSE_MAX = S25G_CMVAR_COARSE_MAX ; + CMVAR_UFINE_FMAX_WRAP = S25G_CMVAR_UFINE_FMAX_WRAP; + CMVAR_FINE_FMAX_WRAP = S25G_CMVAR_FINE_FMAX_WRAP ; + CMVAR_SEC_HIGH_TH = S25G_CMVAR_SEC_HIGH_TH ; + CMVAR_UFINE_MIN = S25G_CMVAR_UFINE_MIN ; + CMVAR_FINE_MIN = S25G_CMVAR_FINE_MIN ; + CMVAR_UFINE_UMIN_WRAP = S25G_CMVAR_UFINE_UMIN_WRAP; + CMVAR_COARSE_MIN = S25G_CMVAR_COARSE_MIN ; + CMVAR_UFINE_FMIN_WRAP = S25G_CMVAR_UFINE_FMIN_WRAP; + CMVAR_FINE_FMIN_WRAP = S25G_CMVAR_FINE_FMIN_WRAP ; + } else { + printk("Error Speed\n"); + return 0; + } + + status = txgbe_e56_get_temp(hw, &temp); + if (status) + return 0; + + adapter->amlite_temp = temp; + + //Assign software defined variables as below �C + //a. SECOND_CODE = ALIAS::RXS::SECOND_ORDER + status |= E56phyRxRdSecondCode(hw, &SECOND_CODE); + + //b. COARSE_CODE = ALIAS::RXS::COARSE + //c. FINE_CODE = ALIAS::RXS::FINE + //d. ULTRAFINE_CODE = ALIAS::RXS::ULTRAFINE + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + COARSE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i); + FINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i); + ULTRAFINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i); + + if (SECOND_CODE <= CMVAR_SEC_LOW_TH) { + if (ULTRAFINE_CODE < CMVAR_UFINE_MAX) { + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i, ULTRAFINE_CODE + 1); + //Set ovrd_en=1 to overide ASIC value + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (FINE_CODE < CMVAR_FINE_MAX) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_UMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = FINE_CODE + 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + //Note: All two of above code updates should be written in a single register write + //Set ovrd_en=1 to overide ASIC value + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (COARSE_CODE < CMVAR_COARSE_MAX) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_FMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = CMVAR_FINE_FMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i) = COARSE_CODE + 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + //Note: All three of above code updates should be written in a single register write + //Set ovrd_en=1 to overide ASIC value + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_coarse_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else { + printk("ERROR: (SECOND_CODE <= CMVAR_SEC_LOW_TH) temperature tracking occurs Error condition\n"); + } + } else if (SECOND_CODE >= CMVAR_SEC_HIGH_TH) { + if (ULTRAFINE_CODE > CMVAR_UFINE_MIN) { + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i, ULTRAFINE_CODE - 1); + //Set ovrd_en=1 to overide ASIC value + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (FINE_CODE > CMVAR_FINE_MIN) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_UMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = FINE_CODE - 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + //Note: All two of above code updates should be written in a single register write + //Set ovrd_en=1 to overide ASIC value + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (COARSE_CODE > CMVAR_COARSE_MIN) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_FMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = CMVAR_FINE_FMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i) = COARSE_CODE - 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + //Note: All three of above code updates should be written in a single register write + //Set ovrd_en=1 to overide ASIC value + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_coarse_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else { + printk("ERROR: (SECOND_CODE >= CMVAR_SEC_HIGH_TH) temperature tracking occurs Error condition\n"); + } + } + + return status; +} + +static int E56phyCtleBypassSeq(struct txgbe_hw *hw, u32 speed) +{ + int status = 0; + unsigned int rdata; + + + //1. Program the following RXS registers as mentioned below. + //RXS::ANA_OVRDVAL[0]::ana_ctle_bypass_i = 1��b1 + //RXS::ANA_OVRDEN[0]::ovrd_en_ana_ctle_bypass_i = 1��b1 + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_0, ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_0, ovrd_en_ana_ctle_bypass_i, 1); + + //RXS::ANA_OVRDVAL[3]::ana_ctle_cz_cstm_i[4:0] = 0 + //RXS::ANA_OVRDEN[0]::ovrd_en_ana_ctle_cz_cstm_i = 1��b1 + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_3, ana_ctle_cz_cstm_i, 0); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_0, ovrd_en_ana_ctle_cz_cstm_i, 1); + + //2. Program the following PDIG registers as mentioned below. + //PDIG::RXS_OVRDVAL[1]::rxs_rx0_ctle_train_en_i = 1��b0 + //PDIG::RXS_OVRDEN[1]::ovrd_en_rxs_rx0_ctle_train_en_i = 1��b1 + // + //PDIG::RXS_OVRDVAL[1]::rxs_rx0_ctle_train_done_o = 1��b1 + //PDIG::RXS_OVRDEN[1]::ovrd_en_rxs_rx0_ctle_train_done_o = 1��b1 + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_en_i) = 0; + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS0_OVRDVAL_1); + + EPHY_RREG(E56G__PMD_RXS0_OVRDEN_1); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_en_i) = 1; + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS0_OVRDEN_1); + + if (speed == TXGBE_LINK_SPEED_40GB_FULL) { + //1. Program the following RXS registers as mentioned below. + //RXS::ANA_OVRDVAL[0]::ana_ctle_bypass_i = 1��b1 + //RXS::ANA_OVRDEN[0]::ovrd_en_ana_ctle_bypass_i = 1��b1 + txgbe_e56_ephy_config(E56G__RXS1_ANA_OVRDVAL_0, ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS1_ANA_OVRDEN_0, ovrd_en_ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS2_ANA_OVRDVAL_0, ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS2_ANA_OVRDEN_0, ovrd_en_ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDVAL_0, ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDEN_0, ovrd_en_ana_ctle_bypass_i, 1); + + //RXS::ANA_OVRDVAL[3]::ana_ctle_cz_cstm_i[4:0] = 0 + //RXS::ANA_OVRDEN[0]::ovrd_en_ana_ctle_cz_cstm_i = 1��b1 + txgbe_e56_ephy_config(E56G__RXS1_ANA_OVRDVAL_3, ana_ctle_cz_cstm_i, 0); + txgbe_e56_ephy_config(E56G__RXS1_ANA_OVRDEN_0, ovrd_en_ana_ctle_cz_cstm_i, 1); + txgbe_e56_ephy_config(E56G__RXS2_ANA_OVRDVAL_3, ana_ctle_cz_cstm_i, 0); + txgbe_e56_ephy_config(E56G__RXS2_ANA_OVRDEN_0, ovrd_en_ana_ctle_cz_cstm_i, 1); + txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDVAL_3, ana_ctle_cz_cstm_i, 0); + txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDEN_0, ovrd_en_ana_ctle_cz_cstm_i, 1); + + //2. Program the following PDIG registers as mentioned below. + //PDIG::RXS_OVRDVAL[1]::rxs_rx0_ctle_train_en_i = 1��b0 + //PDIG::RXS_OVRDEN[1]::ovrd_en_rxs_rx0_ctle_train_en_i = 1��b1 + // + //PDIG::RXS_OVRDVAL[1]::rxs_rx0_ctle_train_done_o = 1��b1 + //PDIG::RXS_OVRDEN[1]::ovrd_en_rxs_rx0_ctle_train_done_o = 1��b1 + EPHY_RREG(E56G__PMD_RXS1_OVRDVAL_1); + EPHY_XFLD(E56G__PMD_RXS1_OVRDVAL_1, rxs1_rx0_ctle_train_en_i) = 0; + EPHY_XFLD(E56G__PMD_RXS1_OVRDVAL_1, rxs1_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS1_OVRDVAL_1); + EPHY_RREG(E56G__PMD_RXS2_OVRDVAL_1); + EPHY_XFLD(E56G__PMD_RXS2_OVRDVAL_1, rxs2_rx0_ctle_train_en_i) = 0; + EPHY_XFLD(E56G__PMD_RXS2_OVRDVAL_1, rxs2_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS2_OVRDVAL_1); + EPHY_RREG(E56G__PMD_RXS3_OVRDVAL_1); + EPHY_XFLD(E56G__PMD_RXS3_OVRDVAL_1, rxs3_rx0_ctle_train_en_i) = 0; + EPHY_XFLD(E56G__PMD_RXS3_OVRDVAL_1, rxs3_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS3_OVRDVAL_1); + + EPHY_RREG(E56G__PMD_RXS1_OVRDEN_1); + EPHY_XFLD(E56G__PMD_RXS1_OVRDEN_1, ovrd_en_rxs1_rx0_ctle_train_en_i) = 1; + EPHY_XFLD(E56G__PMD_RXS1_OVRDEN_1, ovrd_en_rxs1_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS1_OVRDEN_1); + EPHY_RREG(E56G__PMD_RXS2_OVRDEN_1); + EPHY_XFLD(E56G__PMD_RXS2_OVRDEN_1, ovrd_en_rxs2_rx0_ctle_train_en_i) = 1; + EPHY_XFLD(E56G__PMD_RXS2_OVRDEN_1, ovrd_en_rxs2_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS2_OVRDEN_1); + EPHY_RREG(E56G__PMD_RXS3_OVRDEN_1); + EPHY_XFLD(E56G__PMD_RXS3_OVRDEN_1, ovrd_en_rxs3_rx0_ctle_train_en_i) = 1; + EPHY_XFLD(E56G__PMD_RXS3_OVRDEN_1, ovrd_en_rxs3_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS3_OVRDEN_1); + + } + return status; +} + +static int E56phyRxsCalibAdaptSeq40G(struct txgbe_hw *hw, u32 speed) +{ + int status = 0, i, j; + u32 addr, timer; + u32 rdata = 0x0; + u32 bypassCtle = true; + + for (i = 0; i < 4; i++) { + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_2_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_OFST_ADAPT_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_2_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_DONE_O, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_DONE_O, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_ADAPT_EN_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_2_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_INTL_ADAPT_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } + + if (bypassCtle == 1) + E56phyCtleBypassSeq(hw, speed); + + //2. Follow sequence described in 2.3.2 RXS Osc Initialization for temperature tracking range here. RXS would be enabled at the end of this sequence. For the case when PAM4 KR training is not enabled (including PAM4 mode without KR training), wait until ALIAS::PDIG::CTRL_FSM_RX_ST would return RX_TRAIN_15_ST (RX_RDY_ST). + E56phyRxsOscInitForTempTrackRange(hw, speed); + + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + timer = 0; + rdata = 0; + while(EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx0_st) != E56PHY_RX_RDY_ST || + EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx1_st) != E56PHY_RX_RDY_ST || + EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx2_st) != E56PHY_RX_RDY_ST || + EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx3_st) != E56PHY_RX_RDY_ST) { + rdata = rd32_ephy(hw, addr); + udelay(500); + if (timer++ > PHYINIT_TIMEOUT) { + //Do SEQ::RX_DISABLE + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + return TXGBE_ERR_TIMEOUT; + } + } + + //RXS ADC adaptation sequence + //E56phyRxsAdcAdaptSeq + rdata = 0; + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_cdr_rdy_o) != 1) { + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + udelay(500); + if (timer++ > PHYINIT_TIMEOUT) + return TXGBE_ERR_TIMEOUT; + } + + rdata = 0; + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS1_OVRDVAL_1, rxs1_rx0_cdr_rdy_o) != 1) { + EPHY_RREG(E56G__PMD_RXS1_OVRDVAL_1); + udelay(500); + if (timer++ > PHYINIT_TIMEOUT) + return TXGBE_ERR_TIMEOUT; + } + rdata = 0; + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS2_OVRDVAL_1, rxs2_rx0_cdr_rdy_o) != 1) { + EPHY_RREG(E56G__PMD_RXS2_OVRDVAL_1); + udelay(500); + if (timer++ > PHYINIT_TIMEOUT) + return TXGBE_ERR_TIMEOUT; + } + + rdata = 0; + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS3_OVRDVAL_1, rxs3_rx0_cdr_rdy_o) != 1) { + EPHY_RREG(E56G__PMD_RXS3_OVRDVAL_1); + udelay(500); + if (timer++ > PHYINIT_TIMEOUT) + return TXGBE_ERR_TIMEOUT; + } + + for (i = 0; i < 4; i++) { + //4. Disable VGA and CTLE training so that they don't interfere with ADC calibration + //a. Set ALIAS::RXS::VGA_TRAIN_EN = 0b0 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_VGA_TRAIN_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_VGA_TRAIN_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Set ALIAS::RXS::CTLE_TRAIN_EN = 0b0 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_CTLE_TRAIN_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_CTLE_TRAIN_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //5. Perform ADC interleaver calibration + //a. Remove the OVERRIDE on ALIAS::RXS::ADC_INTL_CAL_DONE + addr = E56PHY_RXS0_OVRDEN_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_DONE_O, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + timer = 0; + while(((rdata >> E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_DONE_O_LSB) & 1) != 1) { + rdata = rd32_ephy(hw, addr); + udelay(1000); + + if (timer++ > PHYINIT_TIMEOUT) { + break; + } + } + + //6. Perform ADC offset adaptation and ADC gain adaptation, repeat them a few times and after that keep it disabled. + for(j = 0; j < 16; j++) { + //a. ALIAS::RXS::ADC_OFST_ADAPT_EN = 0b1 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Wait for 1ms or greater + addr = E56G__PMD_RXS0_OVRDEN_2_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0; + addr = E56G__PMD_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_adc_ofst_adapt_done_o) != 1) { + rdata = rd32_ephy(hw, addr); + udelay(500); + if (timer++ > PHYINIT_TIMEOUT) { + break; + } + } + + //c. ALIAS::RXS::ADC_OFST_ADAPT_EN = 0b0 + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + //d. ALIAS::RXS::ADC_GAIN_ADAPT_EN = 0b1 + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //e. Wait for 1ms or greater + addr = E56G__PMD_RXS0_OVRDEN_2_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0; + timer = 0; + addr = E56G__PMD_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_adc_gain_adapt_done_o) != 1) { + rdata = rd32_ephy(hw, addr); + udelay(500); + + if (timer++ > PHYINIT_TIMEOUT) { + break; + } + } + + //f. ALIAS::RXS::ADC_GAIN_ADAPT_EN = 0b0 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + } + //g. Repeat #a to #f total 16 times + + + //7. Perform ADC interleaver adaptation for 10ms or greater, and after that disable it + //a. ALIAS::RXS::ADC_INTL_ADAPT_EN = 0b1 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_ADAPT_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + //b. Wait for 10ms or greater + msleep(10); + + //c. ALIAS::RXS::ADC_INTL_ADAPT_EN = 0b0 + addr = E56G__PMD_RXS0_OVRDEN_2_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_intl_adapt_en_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //8. Now re-enable VGA and CTLE trainings, so that it continues to adapt tracking changes in temperature or voltage + //<1>Set ALIAS::RXS::VGA_TRAIN_EN = 0b1 + // Set ALIAS::RXS::CTLE_TRAIN_EN = 0b1 + addr = E56G__PMD_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_vga_train_en_i) = 1; + if(bypassCtle == 0) { + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_en_i) = 1; + } + //printf("Setting RXS0_OVRDVAL[1]::rxs0_rx0_ffe_train_en_i to 1\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ffe_train_en_i) = 1; + //printf("Setting RXS0_OVRDVAL[1]::rxs0_rx0_dfe_train_en_i to 1\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_dfe_train_en_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + + // + //EPHY_RREG(E56G__PMD_RXS0_OVRDEN_1); + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_ffe_train_en_i to 1\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ffe_train_en_i) = 1; + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_dfe_train_en_i to 1\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_dfe_train_en_i) = 1; + //EPHY_WREG(E56G__PMD_RXS0_OVRDEN_1); + + + //<2>wait for ALIAS::RXS::VGA_TRAIN_DONE = 1 + // wait for ALIAS::RXS::CTLE_TRAIN_DONE = 1 + addr = E56G__PMD_RXS0_OVRDEN_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_vga_train_done_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0; + timer = 0; + addr = E56G__PMD_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_vga_train_done_o) != 1) { + rdata = rd32_ephy(hw, addr); + udelay(500); + + if (timer++ > PHYINIT_TIMEOUT) { + break; + //return -1; + } //if (timer++ > PHYINIT_TIMEOUT) { + } //while + + if(bypassCtle == 0) { + addr = E56G__PMD_RXS0_OVRDEN_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_done_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0; + timer = 0; + addr = E56G__PMD_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_done_o) != 1) { + rdata = rd32_ephy(hw, addr); + udelay(500); + + if (timer++ > PHYINIT_TIMEOUT) { + break; + //return -1; + } //if (timer++ > PHYINIT_TIMEOUT) { + } //while + } + + //a. Remove the OVERRIDE on ALIAS::RXS::VGA_TRAIN_EN + addr = E56G__PMD_RXS0_OVRDEN_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_vga_train_en_i) = 0; + //b. Remove the OVERRIDE on ALIAS::RXS::CTLE_TRAIN_EN + if(bypassCtle == 0) { + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_en_i) = 0; + } + ////Remove the OVERRIDE on ALIAS::RXS::FFE_TRAIN_EN + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_ffe_train_en_i to 0\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ffe_train_en_i) = 0; + ////Remove the OVERRIDE on ALIAS::RXS::DFE_TRAIN_EN + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_dfe_train_en_i to 0\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_dfe_train_en_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + } + return status; +} + +static int E56phyRxsCalibAdaptSeq(struct txgbe_hw *hw, u32 speed) +{ + int status = 0, i; + u32 addr, timer; + u32 rdata = 0x0; + u32 bypassCtle = true; + + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) { + bypassCtle = false; + } else { + bypassCtle = true; + } + + if (hw->mac.type == txgbe_mac_aml) { + msleep(350); + rdata = rd32(hw, TXGBE_GPIO_EXT); + if (rdata & (TXGBE_SFP1_MOD_ABS_LS | TXGBE_SFP1_RX_LOS_LS)) { + return TXGBE_ERR_PHY_INIT_NOT_DONE; + } + } + + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_OFST_ADAPT_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_DONE_O, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_DONE_O, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_ADAPT_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_INTL_ADAPT_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + if (bypassCtle == 1) + E56phyCtleBypassSeq(hw, speed); + + //2. Follow sequence described in 2.3.2 RXS Osc Initialization for temperature tracking range here. RXS would be enabled at the end of this sequence. For the case when PAM4 KR training is not enabled (including PAM4 mode without KR training), wait until ALIAS::PDIG::CTRL_FSM_RX_ST would return RX_TRAIN_15_ST (RX_RDY_ST). + E56phyRxsOscInitForTempTrackRange(hw, speed); + + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + timer = 0; + rdata = 0; + while(EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx0_st) != E56PHY_RX_RDY_ST) { + rdata = rd32_ephy(hw, addr); + udelay(500); + EPHY_RREG(E56G__PMD_CTRL_FSM_RX_STAT_0); + if (timer++ > PHYINIT_TIMEOUT) { + //Do SEQ::RX_DISABLE + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + return TXGBE_ERR_TIMEOUT; + } + } + + //RXS ADC adaptation sequence + //E56phyRxsAdcAdaptSeq + rdata = 0; + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_cdr_rdy_o) != 1) { + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + udelay(500); + if (timer++ > PHYINIT_TIMEOUT) + return TXGBE_ERR_TIMEOUT; + } + + //4. Disable VGA and CTLE training so that they don't interfere with ADC calibration + //a. Set ALIAS::RXS::VGA_TRAIN_EN = 0b0 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_VGA_TRAIN_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_VGA_TRAIN_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Set ALIAS::RXS::CTLE_TRAIN_EN = 0b0 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_CTLE_TRAIN_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_CTLE_TRAIN_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //5. Perform ADC interleaver calibration + //a. Remove the OVERRIDE on ALIAS::RXS::ADC_INTL_CAL_DONE + addr = E56PHY_RXS0_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_DONE_O, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + timer = 0; + while(((rdata >> E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_DONE_O_LSB) & 1) != 1) { + rdata = rd32_ephy(hw, addr); + udelay(1000); + + if (timer++ > PHYINIT_TIMEOUT) { + break; + } + } + + //6. Perform ADC offset adaptation and ADC gain adaptation, repeat them a few times and after that keep it disabled. + for(i = 0; i < 16; i++) { + //a. ALIAS::RXS::ADC_OFST_ADAPT_EN = 0b1 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Wait for 1ms or greater + txgbe_e56_ephy_config(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o, 0); + rdata = 0; + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_adc_ofst_adapt_done_o) != 1) { + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + udelay(500); + if (timer++ > PHYINIT_TIMEOUT) { + break; + } + } + + //c. ALIAS::RXS::ADC_OFST_ADAPT_EN = 0b0 + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + //d. ALIAS::RXS::ADC_GAIN_ADAPT_EN = 0b1 + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //e. Wait for 1ms or greater + txgbe_e56_ephy_config(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o, 0); + rdata = 0; + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_adc_gain_adapt_done_o) != 1) { + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + udelay(500); + + if (timer++ > PHYINIT_TIMEOUT) { + break; + } + } + + //f. ALIAS::RXS::ADC_GAIN_ADAPT_EN = 0b0 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + } + //g. Repeat #a to #f total 16 times + + + //7. Perform ADC interleaver adaptation for 10ms or greater, and after that disable it + //a. ALIAS::RXS::ADC_INTL_ADAPT_EN = 0b1 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_ADAPT_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + //b. Wait for 10ms or greater + msleep(10); + + //c. ALIAS::RXS::ADC_INTL_ADAPT_EN = 0b0 + txgbe_e56_ephy_config(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_intl_adapt_en_i, 0); + + //8. Now re-enable VGA and CTLE trainings, so that it continues to adapt tracking changes in temperature or voltage + //<1>Set ALIAS::RXS::VGA_TRAIN_EN = 0b1 + // Set ALIAS::RXS::CTLE_TRAIN_EN = 0b1 + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_vga_train_en_i) = 1; + if(bypassCtle == 0) { + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_en_i) = 1; + } + //printf("Setting RXS0_OVRDVAL[1]::rxs0_rx0_ffe_train_en_i to 1\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ffe_train_en_i) = 1; + //printf("Setting RXS0_OVRDVAL[1]::rxs0_rx0_dfe_train_en_i to 1\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_dfe_train_en_i) = 1; + EPHY_WREG(E56G__PMD_RXS0_OVRDVAL_1); + // + //EPHY_RREG(E56G__PMD_RXS0_OVRDEN_1); + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_ffe_train_en_i to 1\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ffe_train_en_i) = 1; + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_dfe_train_en_i to 1\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_dfe_train_en_i) = 1; + //EPHY_WREG(E56G__PMD_RXS0_OVRDEN_1); + + + //<2>wait for ALIAS::RXS::VGA_TRAIN_DONE = 1 + // wait for ALIAS::RXS::CTLE_TRAIN_DONE = 1 + txgbe_e56_ephy_config(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_vga_train_done_o, 0); + rdata = 0; + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_vga_train_done_o) != 1) { + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + udelay(500); + + if (timer++ > PHYINIT_TIMEOUT) { + break; + //return -1; + } //if (timer++ > PHYINIT_TIMEOUT) { + } //while + + if(bypassCtle == 0) { + txgbe_e56_ephy_config(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_done_o, 0); + rdata = 0; + timer = 0; + while(EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_done_o) != 1) { + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + udelay(500); + + if (timer++ > PHYINIT_TIMEOUT) { + break; + //return -1; + } //if (timer++ > PHYINIT_TIMEOUT) { + } //while + } + + //a. Remove the OVERRIDE on ALIAS::RXS::VGA_TRAIN_EN + EPHY_RREG(E56G__PMD_RXS0_OVRDEN_1); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_vga_train_en_i) = 0; + //b. Remove the OVERRIDE on ALIAS::RXS::CTLE_TRAIN_EN + if(bypassCtle == 0) { + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_en_i) = 0; + } + ////Remove the OVERRIDE on ALIAS::RXS::FFE_TRAIN_EN + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_ffe_train_en_i to 0\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ffe_train_en_i) = 0; + ////Remove the OVERRIDE on ALIAS::RXS::DFE_TRAIN_EN + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_dfe_train_en_i to 0\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_dfe_train_en_i) = 0; + EPHY_WREG(E56G__PMD_RXS0_OVRDEN_1); + + return status; +} + +u32 txgbe_e56_cfg_temp(struct txgbe_hw *hw) +{ + u32 status; + u32 value; + int temp; + + status = txgbe_e56_get_temp(hw, &temp); + if (status) + temp = DEFAULT_TEMP; + + if (temp < DEFAULT_TEMP) { + value = rd32_ephy(hw, CMS_ANA_OVRDEN0); + field_set(&value, 25, 25, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDEN0, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL2); + field_set(&value, 20, 16, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL2, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDEN1); + field_set(&value, 12, 12, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDEN1, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL7); + field_set(&value, 8, 4, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL7, value); + } else if (temp > HIGH_TEMP) { + value = rd32_ephy(hw, CMS_ANA_OVRDEN0); + field_set(&value, 25, 25, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDEN0, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL2); + field_set(&value, 20, 16, 0x3); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL2, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDEN1); + field_set(&value, 12, 12, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDEN1, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL7); + field_set(&value, 8, 4, 0x3); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL7, value); + } else { + value = rd32_ephy(hw, CMS_ANA_OVRDEN1); + field_set(&value, 4, 4, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDEN1, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL4); + field_set(&value, 24, 24, 0x1); + field_set(&value, 31, 29, 0x4); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL4, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL5); + field_set(&value, 1, 0, 0x0); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL5, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDEN1); + field_set(&value, 23, 23, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDEN1, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL9); + field_set(&value, 24, 24, 0x1); + field_set(&value, 31, 29, 0x4); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL9, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL10); + field_set(&value, 1, 0, 0x0); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL10, value); + } + + return 0; +} + +int txgbe_e56_config_rx_40G(struct txgbe_hw *hw, u32 speed) +{ + struct txgbe_adapter *adapter = hw->back; + s32 status; + + status = E56phyRxsCalibAdaptSeq40G(hw, speed); + if (status) + return status; + + //Step 2 of 2.3.4 + E56phySetRxsUfineLeMax40G(hw, speed); + + //2.3.4 RXS post CDR lock temperature tracking sequence + txgbe_temp_track_seq_40g(hw, speed); + + adapter->link_valid = true; + return 0; +} + +static int txgbe_e56_config_rx(struct txgbe_hw *hw, u32 speed) +{ + s32 status; + + if (speed == TXGBE_LINK_SPEED_40GB_FULL) { + txgbe_e56_config_rx_40G(hw, speed); + } else { + status = E56phyRxsCalibAdaptSeq(hw, speed); + if (status) + return status; + + //Step 2 of 2.3.4 + E56phySetRxsUfineLeMax(hw, speed); + + //2.3.4 RXS post CDR lock temperature tracking sequence + txgbe_temp_track_seq(hw, speed); + } + return 0; +} + +//-------------------------------------------------------------- +//2.2.10 SEQ::RX_DISABLE +//Use PDIG::PMD_CFG[0]::rx_en_cfg[] = 0b0 to powerdown specific RXS lanes. +//Completion of RXS powerdown can be confirmed by observing ALIAS::PDIG::CTRL_FSM_RX_ST = POWERDN_ST +//-------------------------------------------------------------- +static int txgbe_e56_disable_rx40G(struct txgbe_hw *hw) +{ + int status = 0; + unsigned int rdata, timer; + unsigned int addr, temp; + int i; + + for (i = 0; i < 4; i++) { + //1. Disable OVERRIDE on below aliases + //a. ALIAS::RXS::RANGE_SEL + rdata = 0x0000; + addr = E56G__RXS0_ANA_OVRDEN_0_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_0, ovrd_en_ana_bbcdr_osc_range_sel_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + //b. ALIAS::RXS::COARSE + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_coarse_i) = 0; + //c. ALIAS::RXS::FINE + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 0; + //d. ALIAS::RXS::ULTRAFINE + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //e. ALIAS::RXS::SAMP_CAL_DONE + addr = E56G__PMD_RXS0_OVRDEN_0_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_0, ovrd_en_rxs0_rx0_samp_cal_done_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__PMD_RXS0_OVRDEN_2_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + //f. ALIAS::RXS::ADC_OFST_ADAPT_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_ofst_adapt_en_i) = 0; + //g. ALIAS::RXS::ADC_GAIN_ADAPT_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_gain_adapt_en_i) = 0; + //j. ALIAS::RXS::ADC_INTL_ADAPT_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_intl_adapt_en_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__PMD_RXS0_OVRDEN_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + //h. ALIAS::RXS::ADC_INTL_CAL_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_adc_intl_cal_en_i) = 0; + //i. ALIAS::RXS::ADC_INTL_CAL_DONE + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_adc_intl_cal_done_o) = 0; + //k. ALIAS::RXS::CDR_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_cdr_en_i) = 0; + //l. ALIAS::RXS::VGA_TRAIN_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_vga_train_en_i) = 0; + //m. ALIAS::RXS::CTLE_TRAIN_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_en_i) = 0; + //p. ALIAS::RXS::RX_FETX_TRAIN_DONE + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_txffe_train_done_o) = 0; + //r. ALIAS::RXS::RX_TXFFE_COEFF_CHANGE + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_txffe_coeff_change_o) = 0; + //s. ALIAS::RXS::RX_TXFFE_TRAIN_ENACK + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_txffe_train_enack_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__PMD_RXS0_OVRDEN_3_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + //n. ALIAS::RXS::RX_FETX_MOD_TYPE + //o. ALIAS::RXS::RX_FETX_MOD_TYPE_UPDATE + temp = EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_3, ovrd_en_rxs0_rx0_spareout_o); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_3, ovrd_en_rxs0_rx0_spareout_o) = temp & 0x8F; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS0_DIG_OVRDEN_1_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + //q. ALIAS::RXS::SLICER_THRESHOLD_OVRD_EN + EPHY_XFLD(E56G__RXS0_DIG_OVRDEN_1, top_comp_th_ovrd_en) = 0; + EPHY_XFLD(E56G__RXS0_DIG_OVRDEN_1, mid_comp_th_ovrd_en) = 0; + EPHY_XFLD(E56G__RXS0_DIG_OVRDEN_1, bot_comp_th_ovrd_en) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //2. Disable pattern checker �C + addr = E56G__RXS0_DFT_1_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_DFT_1, ber_en) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //3. Disable internal serial loopback mode �C + addr = E56G__RXS0_ANA_OVRDEN_3_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_3, ovrd_en_ana_sel_lpbk_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS0_ANA_OVRDEN_2_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_2, ovrd_en_ana_en_adccal_lpbk_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //4. Enable bypass of clock gates in RXS - + addr = E56G__RXS0_RXS_CFG_0_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_RXS_CFG_0, train_clk_gate_bypass_en) = 0x1FFF; + txgbe_wr32_ephy(hw, addr, rdata); + } + + //5. Disable KR training mode �C + //a. ALIAS::PDIG::KR_TRAINING_MODE = 0b0 + addr = E56G__PMD_BASER_PMD_CONTROL_ADDR; + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_BASER_PMD_CONTROL, training_enable_ln0) = 0; + EPHY_XFLD(E56G__PMD_BASER_PMD_CONTROL, training_enable_ln1) = 0; + EPHY_XFLD(E56G__PMD_BASER_PMD_CONTROL, training_enable_ln2) = 0; + EPHY_XFLD(E56G__PMD_BASER_PMD_CONTROL, training_enable_ln3) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //6. Disable RX to TX parallel loopback �C + //a. ALIAS::PDIG::RX_TO_TX_LPBK_EN = 0b0 + addr = E56G__PMD_PMD_CFG_5_ADDR; + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_PMD_CFG_5, rx_to_tx_lpbk_en) = 0x0; + txgbe_wr32_ephy(hw, addr, rdata); + + //The FSM to disable RXS is present in PDIG. The FSM disables the RXS when �C + //PDIG::PMD_CFG[0]::rx_en_cfg[] = 0b0 + txgbe_e56_ephy_config(E56G__PMD_PMD_CFG_0, rx_en_cfg, 0); + + //Wait RX FSM to be POWERDN_ST + timer = 0; + + while (EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx0_st) != 0x21 || + EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx1_st) != 0x21 || + EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx2_st) != 0x21 || + EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx3_st) != 0x21) { + rdata = 0; + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + rdata = rd32_ephy(hw, addr); + udelay(100); + if (timer++ > PHYINIT_TIMEOUT) { + printk("ERROR: Wait E56PHY_CTRL_FSM_RX_STAT_0_ADDR Timeout!!!\n"); + break; + } + } + + return status; +} + +//-------------------------------------------------------------- +//2.2.10 SEQ::RX_DISABLE +//Use PDIG::PMD_CFG[0]::rx_en_cfg[] = 0b0 to powerdown specific RXS lanes. +//Completion of RXS powerdown can be confirmed by observing ALIAS::PDIG::CTRL_FSM_RX_ST = POWERDN_ST +//-------------------------------------------------------------- +static int txgbe_e56_disable_rx(struct txgbe_hw *hw) +{ + int status = 0; + unsigned int rdata, timer; + unsigned int addr, temp; + + //1. Disable OVERRIDE on below aliases + //a. ALIAS::RXS::RANGE_SEL + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_0, ovrd_en_ana_bbcdr_osc_range_sel_i, 0); + + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + //b. ALIAS::RXS::COARSE + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_coarse_i) = 0; + //c. ALIAS::RXS::FINE + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 0; + //d. ALIAS::RXS::ULTRAFINE + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 0; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + + //e. ALIAS::RXS::SAMP_CAL_DONE + txgbe_e56_ephy_config(E56G__PMD_RXS0_OVRDEN_0, ovrd_en_rxs0_rx0_samp_cal_done_o, 0); + + EPHY_RREG(E56G__PMD_RXS0_OVRDEN_2); + //f. ALIAS::RXS::ADC_OFST_ADAPT_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_ofst_adapt_en_i) = 0; + //g. ALIAS::RXS::ADC_GAIN_ADAPT_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_gain_adapt_en_i) = 0; + //j. ALIAS::RXS::ADC_INTL_ADAPT_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, ovrd_en_rxs0_rx0_adc_intl_adapt_en_i) = 0; + EPHY_WREG(E56G__PMD_RXS0_OVRDEN_2); + + EPHY_RREG(E56G__PMD_RXS0_OVRDEN_1); + //h. ALIAS::RXS::ADC_INTL_CAL_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_adc_intl_cal_en_i) = 0; + //i. ALIAS::RXS::ADC_INTL_CAL_DONE + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_adc_intl_cal_done_o) = 0; + //k. ALIAS::RXS::CDR_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_cdr_en_i) = 0; + //l. ALIAS::RXS::VGA_TRAIN_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_vga_train_en_i) = 0; + //m. ALIAS::RXS::CTLE_TRAIN_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_en_i) = 0; + //p. ALIAS::RXS::RX_FETX_TRAIN_DONE + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_txffe_train_done_o) = 0; + //r. ALIAS::RXS::RX_TXFFE_COEFF_CHANGE + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_txffe_coeff_change_o) = 0; + //s. ALIAS::RXS::RX_TXFFE_TRAIN_ENACK + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_txffe_train_enack_o) = 0; + EPHY_WREG(E56G__PMD_RXS0_OVRDEN_1); + + EPHY_RREG(E56G__PMD_RXS0_OVRDEN_3); + //n. ALIAS::RXS::RX_FETX_MOD_TYPE + //o. ALIAS::RXS::RX_FETX_MOD_TYPE_UPDATE + temp = EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_3, ovrd_en_rxs0_rx0_spareout_o); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_3, ovrd_en_rxs0_rx0_spareout_o) = temp & 0x8F; + EPHY_WREG(E56G__PMD_RXS0_OVRDEN_3); + + //q. ALIAS::RXS::SLICER_THRESHOLD_OVRD_EN + EPHY_RREG(E56G__RXS0_DIG_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_DIG_OVRDEN_1, top_comp_th_ovrd_en) = 0; + EPHY_XFLD(E56G__RXS0_DIG_OVRDEN_1, mid_comp_th_ovrd_en) = 0; + EPHY_XFLD(E56G__RXS0_DIG_OVRDEN_1, bot_comp_th_ovrd_en) = 0; + EPHY_WREG(E56G__RXS0_DIG_OVRDEN_1); + + //2. Disable pattern checker �C + txgbe_e56_ephy_config(E56G__RXS0_DFT_1, ber_en, 0); + + //3. Disable internal serial loopback mode �C + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_3, ovrd_en_ana_sel_lpbk_i, 0); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_2, ovrd_en_ana_en_adccal_lpbk_i, 0); + + //4. Enable bypass of clock gates in RXS - + txgbe_e56_ephy_config(E56G__RXS0_RXS_CFG_0, train_clk_gate_bypass_en, 0x1FFF); + + //5. Disable KR training mode �C + //a. ALIAS::PDIG::KR_TRAINING_MODE = 0b0 + txgbe_e56_ephy_config(E56G__PMD_BASER_PMD_CONTROL, training_enable_ln0, 0); + + //6. Disable RX to TX parallel loopback �C + //a. ALIAS::PDIG::RX_TO_TX_LPBK_EN = 0b0 + txgbe_e56_ephy_config(E56G__PMD_PMD_CFG_5, rx_to_tx_lpbk_en, 0); + + //The FSM to disable RXS is present in PDIG. The FSM disables the RXS when �C + //PDIG::PMD_CFG[0]::rx_en_cfg[] = 0b0 + txgbe_e56_ephy_config(E56G__PMD_PMD_CFG_0, rx_en_cfg, 0); + + //Wait RX FSM to be POWERDN_ST + timer = 0; + while(1) { + rdata = 0; + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + rdata = rd32_ephy(hw, addr); + if((rdata & 0x3f) == 0x21) { break; } + udelay(100); + if (timer++ > PHYINIT_TIMEOUT) { + printk("ERROR: Wait E56PHY_CTRL_FSM_RX_STAT_0_ADDR Timeout!!!\n"); + break; + } + } + + return status; +} + +int txgbe_e56_reconfig_rx(struct txgbe_hw *hw, u32 speed) +{ + int status = 0; + u32 rdata; + u32 addr; + + wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, + ~TXGBE_MAC_TX_CFG_TE); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + ~TXGBE_MAC_RX_CFG_RE); + + TCALL(hw, mac.ops.disable_sec_tx_path); + + if (hw->mac.type == txgbe_mac_aml) { + rdata = rd32(hw, TXGBE_GPIO_EXT); + if (rdata & (TXGBE_SFP1_MOD_ABS_LS | TXGBE_SFP1_RX_LOS_LS)) { + return TXGBE_ERR_TIMEOUT; + } + } + + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ENABLE_ADDR, 0x0); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, 0x0); + + + if (hw->mac.type == txgbe_mac_aml40) { + //14. Do SEQ::RX_DISABLE to disable RXS. Poll ALIAS::PDIG::CTRL_FSM_RX_ST + //and confirm its value is POWERDN_ST + txgbe_e56_disable_rx40G(hw); + status = txgbe_e56_config_rx_40G(hw, speed); + } else { + //14. Do SEQ::RX_DISABLE to disable RXS. Poll ALIAS::PDIG::CTRL_FSM_RX_ST + //and confirm its value is POWERDN_ST + txgbe_e56_disable_rx(hw); + status = txgbe_e56_config_rx(hw, speed); + } + + addr = E56PHY_INTR_0_ADDR; + txgbe_wr32_ephy(hw, addr, E56PHY_INTR_0_IDLE_ENTRY1); + + addr = E56PHY_INTR_1_ADDR; + txgbe_wr32_ephy(hw, addr, E56PHY_INTR_1_IDLE_EXIT1); + + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ENABLE_ADDR, E56PHY_INTR_0_IDLE_ENTRY1); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, E56PHY_INTR_1_IDLE_EXIT1); + + TCALL(hw, mac.ops.enable_sec_tx_path); + + return status; +} + +//Reference setting code for SFP mode +int txgbe_set_link_to_amlite(struct txgbe_hw *hw, u32 speed) +{ + struct txgbe_adapter *adapter = hw->back; + u32 value = 0; + u32 ppl_lock = false; + int status = 0; + u32 reset = 0; + + if ((rd32(hw, TXGBE_EPHY_STAT) & TXGBE_EPHY_STAT_PPL_LOCK) + == TXGBE_EPHY_STAT_PPL_LOCK) { + ppl_lock = true; + wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, + ~TXGBE_MAC_TX_CFG_TE); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + ~TXGBE_MAC_RX_CFG_RE); + + TCALL(hw, mac.ops.disable_sec_tx_path); + } + TCALL(hw, mac.ops.disable_tx_laser); + + if (hw->bus.lan_id == 0) { + reset = TXGBE_MIS_RST_LAN0_EPHY_RST; + } else { + reset = TXGBE_MIS_RST_LAN1_EPHY_RST; + } + + wr32(hw, TXGBE_MIS_RST, + reset | rd32(hw, TXGBE_MIS_RST)); + TXGBE_WRITE_FLUSH(hw); + usec_delay(10); + + /////////////////////////// XLGPCS REGS Start + value = txgbe_rd32_epcs(hw, VR_PCS_DIG_CTRL1); + value |= 0x8000; + txgbe_wr32_epcs(hw, VR_PCS_DIG_CTRL1, value); + + udelay(1000); + value = txgbe_rd32_epcs(hw, VR_PCS_DIG_CTRL1); + if ((value & 0x8000)) { + status = TXGBE_ERR_PHY_INIT_NOT_DONE; + TCALL(hw, mac.ops.enable_tx_laser); + goto out; + } + + value = txgbe_rd32_epcs(hw, SR_AN_CTRL); + field_set(&value, 12, 12, 0); + txgbe_wr32_epcs(hw, SR_AN_CTRL, value); + + if (speed == TXGBE_LINK_SPEED_40GB_FULL) { + value = txgbe_rd32_epcs(hw, SR_PCS_CTRL1); + field_set(&value, 5, 2, 0x3); + txgbe_wr32_epcs(hw, SR_PCS_CTRL1, value); + + value = txgbe_rd32_epcs(hw, SR_PCS_CTRL2); + field_set(&value, 3, 0, 0x4); + txgbe_wr32_epcs(hw, SR_PCS_CTRL2, value); + + value = rd32_ephy(hw, ANA_OVRDVAL0); + field_set(&value, 29, 29, 0x1); + field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDVAL0, value); + + value = rd32_ephy(hw, ANA_OVRDVAL5); + field_set(&value, 24, 24, 0x0); + txgbe_wr32_ephy(hw, ANA_OVRDVAL5, value); + + value = rd32_ephy(hw, ANA_OVRDEN0); + field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN0, value); + + value = rd32_ephy(hw, ANA_OVRDEN1); + field_set(&value, 30, 30, 0x1); + field_set(&value, 25, 25, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN1, value); + + value = rd32_ephy(hw, PLL0_CFG0); + field_set(&value, 25, 24, 0x1); + field_set(&value, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL0_CFG0, value); + + value = rd32_ephy(hw, PLL0_CFG2); + field_set(&value, 12, 8, 0x4); + txgbe_wr32_ephy(hw, PLL0_CFG2, value); + + value = rd32_ephy(hw, PLL1_CFG0); + field_set(&value, 25, 24, 0x1); + field_set(&value, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL1_CFG0, value); + + value = rd32_ephy(hw, PLL1_CFG2); + field_set(&value, 12, 8, 0x8); + txgbe_wr32_ephy(hw, PLL1_CFG2, value); + + value = rd32_ephy(hw, PLL0_DIV_CFG0); + field_set(&value, 18, 8, 0x294); + field_set(&value, 4, 0, 0x8); + txgbe_wr32_ephy(hw, PLL0_DIV_CFG0, value); + + value = rd32_ephy(hw, DATAPATH_CFG0); + field_set(&value, 30, 28, 0x7); + field_set(&value, 26, 24, 0x5); + field_set(&value, 18, 16, 0x5); + field_set(&value, 14, 12, 0x5); + field_set(&value, 10, 8, 0x5); + txgbe_wr32_ephy(hw, DATAPATH_CFG0, value); + + value = rd32_ephy(hw, DATAPATH_CFG1); + field_set(&value, 26, 24, 0x5); + field_set(&value, 10, 8, 0x5); + field_set(&value, 18, 16, 0x5); + field_set(&value, 2, 0, 0x5); + txgbe_wr32_ephy(hw, DATAPATH_CFG1, value); + + value = rd32_ephy(hw, AN_CFG1); + field_set(&value, 4, 0, 0x2); + txgbe_wr32_ephy(hw, AN_CFG1, value); + + txgbe_e56_cfg_temp(hw); + txgbe_e56_cfg_40g(hw); + + value = rd32_ephy(hw, PMD_CFG0); + field_set(&value, 21, 20, 0x3); + field_set(&value, 19, 12, 0xf); //TX_EN set + field_set(&value, 8, 8, 0x0); + field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, PMD_CFG0, value); + } + + if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + value = txgbe_rd32_epcs(hw, SR_PCS_CTRL1); + field_set(&value, 5, 2, 5); + txgbe_wr32_epcs(hw, SR_PCS_CTRL1, value); + + value = txgbe_rd32_epcs(hw, SR_PCS_CTRL2); + field_set(&value, 3, 0, 7); + txgbe_wr32_epcs(hw, SR_PCS_CTRL2, value); + + value = txgbe_rd32_epcs(hw, SR_PMA_CTRL2); + field_set(&value, 6, 0, 0x39); + txgbe_wr32_epcs(hw, SR_PMA_CTRL2, value); + + value = rd32_ephy(hw, ANA_OVRDVAL0); + field_set(&value, 29, 29, 0x1); + field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDVAL0, value); + + value = rd32_ephy(hw, ANA_OVRDVAL5); + //Update to 0 from SNPS for PIN CLKP/N: Enable the termination of the input buffer + field_set(&value, 24, 24, 0x0); + txgbe_wr32_ephy(hw, ANA_OVRDVAL5, value); + + value = rd32_ephy(hw, ANA_OVRDEN0); + field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN0, value); + + value = rd32_ephy(hw, ANA_OVRDEN1); + field_set(&value, 30, 30, 0x1); + field_set(&value, 25, 25, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN1, value); + + value = rd32_ephy(hw, PLL0_CFG0); + field_set(&value, 25, 24, 0x1); + field_set(&value, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL0_CFG0, value); + + value = rd32_ephy(hw, PLL0_CFG2); + field_set(&value, 12, 8, 0x4); + txgbe_wr32_ephy(hw, PLL0_CFG2, value); + + value = rd32_ephy(hw, PLL1_CFG0); + field_set(&value, 25, 24, 0x1); + field_set(&value, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL1_CFG0, value); + + value = rd32_ephy(hw, PLL1_CFG2); + field_set(&value, 12, 8, 0x8); + txgbe_wr32_ephy(hw, PLL1_CFG2, value); + + value = rd32_ephy(hw, PLL0_DIV_CFG0); + field_set(&value, 18, 8, 0x294); + field_set(&value, 4, 0, 0x8); + txgbe_wr32_ephy(hw, PLL0_DIV_CFG0, value); + + value = rd32_ephy(hw, DATAPATH_CFG0); + field_set(&value, 30, 28, 0x7); + field_set(&value, 26, 24, 0x5); + field_set(&value, 18, 16, 0x3); + field_set(&value, 14, 12, 0x5); + field_set(&value, 10, 8, 0x5); + txgbe_wr32_ephy(hw, DATAPATH_CFG0, value); + + value = rd32_ephy(hw, DATAPATH_CFG1); + field_set(&value, 26, 24, 0x5); + field_set(&value, 10, 8, 0x5); + field_set(&value, 18, 16, 0x3); + field_set(&value, 2, 0, 0x3); + txgbe_wr32_ephy(hw, DATAPATH_CFG1, value); + + value = rd32_ephy(hw, AN_CFG1); + field_set(&value, 4, 0, 0x9); + txgbe_wr32_ephy(hw, AN_CFG1, value); + + txgbe_e56_cfg_temp(hw); + txgbe_e56_cfg_25g(hw); + + value = rd32_ephy(hw, PMD_CFG0); + field_set(&value, 21, 20, 0x3); + field_set(&value, 19, 12, 0x1); //TX_EN set + field_set(&value, 8, 8, 0x0); + field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, PMD_CFG0, value); + } + + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + value = txgbe_rd32_epcs(hw, SR_PCS_CTRL1); + field_set(&value, 5, 2, 0); + txgbe_wr32_epcs(hw, SR_PCS_CTRL1, value); + + value = txgbe_rd32_epcs(hw, SR_PCS_CTRL2); + field_set(&value, 3, 0, 0); + txgbe_wr32_epcs(hw, SR_PCS_CTRL2, value); + + value = txgbe_rd32_epcs(hw, SR_PMA_CTRL2); + field_set(&value, 6, 0, 0xb); + txgbe_wr32_epcs(hw, SR_PMA_CTRL2, value); + + value = rd32_ephy(hw, ANA_OVRDVAL0); + field_set(&value, 29, 29, 0x1); + field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDVAL0, value); + + value = rd32_ephy(hw, ANA_OVRDVAL5); + field_set(&value, 24, 24, 0x0); + txgbe_wr32_ephy(hw, ANA_OVRDVAL5, value); + + value = rd32_ephy(hw, ANA_OVRDEN0); + field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN0, value); + + value = rd32_ephy(hw, ANA_OVRDEN1); + field_set(&value, 30, 30, 0x1); + field_set(&value, 25, 25, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN1, value); + + value = rd32_ephy(hw, PLL0_CFG0); + field_set(&value, 25, 24, 0x1); + field_set(&value, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL0_CFG0, value); + + value = rd32_ephy(hw, PLL0_CFG2); + field_set(&value, 12, 8, 0x4); + txgbe_wr32_ephy(hw, PLL0_CFG2, value); + + value = rd32_ephy(hw, PLL1_CFG0); + field_set(&value, 25, 24, 0x1); + field_set(&value, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL1_CFG0, value); + + value = rd32_ephy(hw, PLL1_CFG2); + field_set(&value, 12, 8, 0x8); + txgbe_wr32_ephy(hw, PLL1_CFG2, value); + + value = rd32_ephy(hw, PLL0_DIV_CFG0); + field_set(&value, 18, 8, 0x294); + field_set(&value, 4, 0, 0x8); + txgbe_wr32_ephy(hw, PLL0_DIV_CFG0, value); + + value = rd32_ephy(hw, DATAPATH_CFG0); + field_set(&value, 30, 28, 0x7); + field_set(&value, 26, 24, 0x5); + field_set(&value, 18, 16, 0x5); + field_set(&value, 14, 12, 0x5); + field_set(&value, 10, 8, 0x5); + txgbe_wr32_ephy(hw, DATAPATH_CFG0, value); + + value = rd32_ephy(hw, DATAPATH_CFG1); + field_set(&value, 26, 24, 0x5); + field_set(&value, 10, 8, 0x5); + field_set(&value, 18, 16, 0x5); + field_set(&value, 2, 0, 0x5); + txgbe_wr32_ephy(hw, DATAPATH_CFG1, value); + + value = rd32_ephy(hw, AN_CFG1); + field_set(&value, 4, 0, 0x2); + txgbe_wr32_ephy(hw, AN_CFG1, value); + + txgbe_e56_cfg_temp(hw); + txgbe_e56_cfg_10g(hw); + + value = rd32_ephy(hw, PMD_CFG0); + field_set(&value, 21, 20, 0x3); + field_set(&value, 19, 12, 0x1); //TX_EN set + field_set(&value, 8, 8, 0x0); + field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, PMD_CFG0, value); + } + + TCALL(hw, mac.ops.enable_tx_laser); + + status = txgbe_e56_config_rx(hw, speed); + + value = rd32_ephy(hw, E56PHY_RXS_IDLE_DETECT_1_ADDR); + field_set(&value, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0x28); + field_set(&value, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0xa); + txgbe_wr32_ephy(hw, E56PHY_RXS_IDLE_DETECT_1_ADDR, value); + + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ADDR, E56PHY_INTR_0_IDLE_ENTRY1); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ADDR, E56PHY_INTR_1_IDLE_EXIT1); + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ENABLE_ADDR, E56PHY_INTR_0_IDLE_ENTRY1); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, E56PHY_INTR_1_IDLE_EXIT1); + + if (adapter->fec_link_mode != TXGBE_PHY_FEC_AUTO) { + adapter->cur_fec_link = adapter->fec_link_mode; + txgbe_e56_set_fec_mode(hw, adapter->cur_fec_link); + } + + if (status) + goto out; + +out: + if (ppl_lock) { + TCALL(hw, mac.ops.enable_sec_tx_path); + } + + return status; +} + +int txgbe_get_cur_fec_mode(struct txgbe_hw *hw) +{ + struct txgbe_adapter *adapter = hw->back; + u32 value = 0; + + mutex_lock(&adapter->e56_lock); + value = txgbe_rd32_epcs(hw, SR_PMA_RS_FEC_CTRL); + mutex_unlock(&adapter->e56_lock); + + if (value & 0x4) + return TXGBE_PHY_FEC_RS; + + mutex_lock(&adapter->e56_lock); + value = txgbe_rd32_epcs(hw, SR_PMA_KR_FEC_CTRL); + mutex_unlock(&adapter->e56_lock); + + if (value & 0x1) + return TXGBE_PHY_FEC_BASER; + + return TXGBE_PHY_FEC_OFF; +} + +int txgbe_e56_set_fec_mode(struct txgbe_hw *hw, u8 fec_mode) +{ + u32 value = 0; + + if (fec_mode & TXGBE_PHY_FEC_RS) { + //disable BASER FEC + value = txgbe_rd32_epcs(hw, SR_PMA_KR_FEC_CTRL); + field_set(&value, 0, 0, 0); + txgbe_wr32_epcs(hw, SR_PMA_KR_FEC_CTRL, value); + + //enable RS FEC + txgbe_wr32_epcs(hw, 0x180a3, 0x68c1); + txgbe_wr32_epcs(hw, 0x180a4, 0x3321); + txgbe_wr32_epcs(hw, 0x180a5, 0x973e); + txgbe_wr32_epcs(hw, 0x180a6, 0xccde); + + txgbe_wr32_epcs(hw, 0x38018, 1024); + value = txgbe_rd32_epcs(hw, 0x100c8); + field_set(&value, 2, 2, 1); + txgbe_wr32_epcs(hw, 0x100c8, value); + } else if (fec_mode & TXGBE_PHY_FEC_BASER) { + //disable RS FEC + txgbe_wr32_epcs(hw, 0x180a3, 0x7690); + txgbe_wr32_epcs(hw, 0x180a4, 0x3347); + txgbe_wr32_epcs(hw, 0x180a5, 0x896f); + txgbe_wr32_epcs(hw, 0x180a6, 0xccb8); + txgbe_wr32_epcs(hw, 0x38018, 0x3fff); + value = txgbe_rd32_epcs(hw, 0x100c8); + field_set(&value, 2, 2, 0); + txgbe_wr32_epcs(hw, 0x100c8, value); + + //enable BASER FEC + value = txgbe_rd32_epcs(hw, SR_PMA_KR_FEC_CTRL); + field_set(&value, 0, 0, 1); + txgbe_wr32_epcs(hw, SR_PMA_KR_FEC_CTRL, value); + } else { + //disable RS FEC + txgbe_wr32_epcs(hw, 0x180a3, 0x7690); + txgbe_wr32_epcs(hw, 0x180a4, 0x3347); + txgbe_wr32_epcs(hw, 0x180a5, 0x896f); + txgbe_wr32_epcs(hw, 0x180a6, 0xccb8); + txgbe_wr32_epcs(hw, 0x38018, 0x3fff); + value = txgbe_rd32_epcs(hw, 0x100c8); + field_set(&value, 2, 2, 0); + txgbe_wr32_epcs(hw, 0x100c8, value); + + //disable BASER FEC + value = txgbe_rd32_epcs(hw, SR_PMA_KR_FEC_CTRL); + field_set(&value, 0, 0, 0); + txgbe_wr32_epcs(hw, SR_PMA_KR_FEC_CTRL, value); + } + + return 0; +} + +int txgbe_e56_fec_mode_polling(struct txgbe_hw *hw, bool *link_up) +{ + struct txgbe_adapter *adapter = hw->back; + int i = 0, j = 0; + u32 speed; + + do { + if (!(adapter->fec_link_mode & BIT(j))) { + j += 1; + continue; + } + + adapter->cur_fec_link = adapter->fec_link_mode & BIT(j); + + mutex_lock(&adapter->e56_lock); + txgbe_e56_set_fec_mode(hw, adapter->cur_fec_link); + mutex_unlock(&adapter->e56_lock); + + for (i = 0; i < 4; i++) { + msleep(250); + txgbe_e56_check_phy_link(hw, &speed, link_up); + if (*link_up) + return 0; + } + + j += 1; + } while (j < 3); + + return 0; +} + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.h new file mode 100644 index 0000000000000000000000000000000000000000..4c03564ff1859692cbcc404283ffb92e0f743b70 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.h @@ -0,0 +1,1809 @@ +#ifndef _TXGBE_E56_H_ +#define _TXGBE_E56_H_ + +#include "txgbe_type.h" +#include "txgbe.h" + +#define EPHY_RREG(REG) \ +do {\ + rdata = 0; \ + rdata = rd32_ephy(hw, REG##_ADDR); \ +} while(0) + +#define EPHY_WREG(REG) \ +do { \ + txgbe_wr32_ephy(hw, REG##_ADDR, rdata); \ +} while(0) + +#define EPCS_RREG(REG) \ +do {\ + rdata = 0; \ + rdata = txgbe_rd32_epcs(hw, REG##_ADDR); \ +} while(0) + +#define EPCS_WREG(REG) \ +do { \ + txgbe_wr32_epcs(hw, REG##_ADDR, rdata); \ +} while(0) + +#define txgbe_e56_ephy_config(reg, field, val) \ +do { \ + EPHY_RREG(reg); \ + EPHY_XFLD(reg, field) = (val); \ + EPHY_WREG(reg); \ +} while(0) + +#define txgbe_e56_epcs_config(reg, field, val) \ +do { \ + EPCS_RREG(reg); \ + EPCS_XFLD(reg, field) = (val); \ + EPCS_WREG(reg); \ +} while(0) + +//-------------------------------- +//LAN GPIO define for SFP+ module +//-------------------------------- +//-- Fields +#define SFP1_RS0 5,5 +#define SFP1_RS1 4,4 +#define SFP1_RX_LOS 3,3 +#define SFP1_MOD_ABS 2,2 +#define SFP1_TX_DISABLE 1,1 +#define SFP1_TX_FAULT 0,0 +#define EPHY_XFLD(REG, FLD) ((REG *)&rdata)->FLD +#define EPCS_XFLD(REG, FLD) ((REG *)&rdata)->FLD + +typedef union { + struct { + u32 ana_refclk_buf_daisy_en_i : 1; + u32 ana_refclk_buf_pad_en_i : 1; + u32 ana_vddinoff_dcore_dig_o : 1; + u32 ana_lcpll_en_clkout_hf_left_top_i : 1; + u32 ana_lcpll_en_clkout_hf_right_top_i : 1; + u32 ana_lcpll_en_clkout_hf_left_bot_i : 1; + u32 ana_lcpll_en_clkout_hf_right_bot_i : 1; + u32 ana_lcpll_en_clkout_lf_left_top_i : 1; + u32 ana_lcpll_en_clkout_lf_right_top_i : 1; + u32 ana_lcpll_en_clkout_lf_left_bot_i : 1; + u32 ana_lcpll_en_clkout_lf_right_bot_i : 1; + u32 ana_bg_en_i : 1; + u32 ana_en_rescal_i : 1; + u32 ana_rescal_comp_o : 1; + u32 ana_en_ldo_core_i : 1; + u32 ana_lcpll_hf_en_bias_i : 1; + u32 ana_lcpll_hf_en_loop_i : 1; + u32 ana_lcpll_hf_en_cp_i : 1; + u32 ana_lcpll_hf_set_lpf_i : 1; + u32 ana_lcpll_hf_en_vco_i : 1; + u32 ana_lcpll_hf_vco_amp_status_o : 1; + u32 ana_lcpll_hf_en_odiv_i : 1; + u32 ana_lcpll_lf_en_bias_i : 1; + u32 ana_lcpll_lf_en_loop_i : 1; + u32 ana_lcpll_lf_en_cp_i : 1; + u32 ana_lcpll_lf_set_lpf_i : 1; + u32 ana_lcpll_lf_en_vco_i : 1; + u32 ana_lcpll_lf_vco_amp_status_o : 1; + u32 ana_lcpll_lf_en_odiv_i : 1; + u32 ana_lcpll_hf_refclk_select_i : 1; + u32 ana_lcpll_lf_refclk_select_i : 1; + u32 rsvd0 : 1; + }; + u32 reg; +} E56G_CMS_ANA_OVRDVAL_0; + +#define E56G_CMS_ANA_OVRDVAL_0_ADDR 0xcb0 +/* AMLITE ETH PHY Registers */ +#define SR_PMA_KR_FEC_CTRL 0x100ab +#define SR_AN_CTRL 0x70000 +#define VR_PCS_DIG_CTRL1 0x38000 +#define SR_PCS_CTRL1 0x30000 +#define SR_PCS_CTRL2 0x30007 +#define SR_PMA_CTRL2 0x10007 +#define VR_PCS_DIG_CTRL3 0x38003 +#define VR_PMA_CTRL3 0x180a8 +#define VR_PMA_CTRL4 0x180a9 +#define SR_PMA_RS_FEC_CTRL 0x100c8 +#define CMS_ANA_OVRDEN0 0xca4 +#define ANA_OVRDEN1 0xca8 +#define ANA_OVRDVAL0 0xcb0 +#define ANA_OVRDVAL5 0xcc4 +#define OSC_CAL_N_CDR4 0x14 +#define PLL0_CFG0 0xc10 +#define PLL0_CFG2 0xc18 +#define PLL0_DIV_CFG0 0xc1c +#define PLL1_CFG0 0xc48 +#define PLL1_CFG2 0xc50 +#define CMS_PIN_OVRDEN0 0xc8c +#define CMS_PIN_OVRDVAL0 0xc94 +#define DATAPATH_CFG0 0x142c +#define DATAPATH_CFG1 0x1430 +#define AN_CFG1 0x1438 +#define SPARE52 0x16fc +#define RXS_CFG0 0x000 +#define PMD_CFG0 0x1400 +#define SR_PCS_STS1 0x30001 +#define PMD_CTRL_FSM_TX_STAT0 0x14dc +#define CMS_ANA_OVRDEN0 0xca4 +#define CMS_ANA_OVRDEN1 0xca8 +#define CMS_ANA_OVRDVAL2 0xcb8 +#define CMS_ANA_OVRDVAL4 0xcc0 +#define CMS_ANA_OVRDVAL5 0xcc4 +#define CMS_ANA_OVRDVAL7 0xccc +#define CMS_ANA_OVRDVAL9 0xcd4 +#define CMS_ANA_OVRDVAL10 0xcd8 + +#define TXS_TXS_CFG1 0x804 +#define TXS_WKUP_CNT 0x808 +#define TXS_PIN_OVRDEN0 0x80c +#define TXS_PIN_OVRDVAL6 0x82c +#define TXS_ANA_OVRDVAL1 0x854 + +#define E56PHY_CMS_BASE_ADDR 0x0C00 + +#define E56PHY_CMS_PIN_OVRDEN_0_ADDR (E56PHY_CMS_BASE_ADDR+0x8C) +#define E56PHY_CMS_PIN_OVRDEN_0_OVRD_EN_PLL0_TX_SIGNAL_TYPE_I 12,12 + +#define E56PHY_CMS_PIN_OVRDVAL_0_ADDR (E56PHY_CMS_BASE_ADDR+0x94) +#define E56PHY_CMS_PIN_OVRDVAL_0_INT_PLL0_TX_SIGNAL_TYPE_I 10,10 + +#define E56PHY_CMS_ANA_OVRDEN_0_ADDR (E56PHY_CMS_BASE_ADDR+0xA4) + +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_VCO_SWING_CTRL_I 29,29 + + +#define E56PHY_CMS_ANA_OVRDEN_1_ADDR (E56PHY_CMS_BASE_ADDR+0xA8) +#define E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_HF_TEST_IN_I 4,4 + +#define E56PHY_CMS_ANA_OVRDVAL_2_ADDR (E56PHY_CMS_BASE_ADDR+0xB8) + +#define E56PHY_CMS_ANA_OVRDVAL_2_ANA_LCPLL_HF_VCO_SWING_CTRL_I 31,28 + +#define E56PHY_CMS_ANA_OVRDVAL_4_ADDR (E56PHY_CMS_BASE_ADDR+0xC0) + + +#define E56PHY_TXS_BASE_ADDR 0x0800 +#define E56PHY_TXS1_BASE_ADDR 0x0900 +#define E56PHY_TXS2_BASE_ADDR 0x0A00 +#define E56PHY_TXS3_BASE_ADDR 0x0B00 +#define E56PHY_TXS_OFFSET 0x0100 + +#define E56PHY_PMD_RX_OFFSET 0x02C + +#define E56PHY_TXS_TXS_CFG_1_ADDR (E56PHY_TXS_BASE_ADDR+0x04) +#define E56PHY_TXS_TXS_CFG_1_ADAPTATION_WAIT_CNT_X256 7,4 +#define E56PHY_TXS_WKUP_CNT_ADDR (E56PHY_TXS_BASE_ADDR+0x08) +#define E56PHY_TXS_WKUP_CNTLDO_WKUP_CNT_X32 7,0 +#define E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32 15,8 + + +#define E56PHY_TXS_PIN_OVRDEN_0_ADDR (E56PHY_TXS_BASE_ADDR+0x0C) +#define E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I 28,28 + +#define E56PHY_TXS_PIN_OVRDVAL_6_ADDR (E56PHY_TXS_BASE_ADDR+0x2C) + +#define E56PHY_TXS_ANA_OVRDVAL_1_ADDR (E56PHY_TXS_BASE_ADDR+0x54) +#define E56PHY_TXS_ANA_OVRDVAL_1_ANA_TEST_DAC_I 23,8 + +#define E56PHY_TXS_ANA_OVRDEN_0_ADDR (E56PHY_TXS_BASE_ADDR+0x44) +#define E56PHY_TXS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_DAC_I 13,13 + +#define E56PHY_RXS_BASE_ADDR 0x0000 +#define E56PHY_RXS1_BASE_ADDR 0x0200 +#define E56PHY_RXS2_BASE_ADDR 0x0400 +#define E56PHY_RXS3_BASE_ADDR 0x0600 +#define E56PHY_RXS_OFFSET 0x0200 + +#define E56PHY_RXS_RXS_CFG_0_ADDR (E56PHY_RXS_BASE_ADDR+0x000) +#define E56PHY_RXS_RXS_CFG_0_DSER_DATA_SEL 1,1 +#define E56PHY_RXS_RXS_CFG_0_TRAIN_CLK_GATE_BYPASS_EN 17,4 + +#define E56PHY_RXS_OSC_CAL_N_CDR_1_ADDR (E56PHY_RXS_BASE_ADDR+0x008) +#define E56PHY_RXS_OSC_CAL_N_CDR_1_PREDIV1 15,0 +#define E56PHY_RXS_OSC_CAL_N_CDR_1_PREDIV1_LSB 0 +#define E56PHY_RXS_OSC_CAL_N_CDR_1_TARGET_CNT1 31,16 +#define E56PHY_RXS_OSC_CAL_N_CDR_1_TARGET_CNT1_LSB 16 + + +#define E56PHY_RXS_OSC_CAL_N_CDR_4_ADDR (E56PHY_RXS_BASE_ADDR+0x014) +#define E56PHY_RXS_OSC_CAL_N_CDR_4_OSC_RANGE_SEL1 3,2 +#define E56PHY_RXS_OSC_CAL_N_CDR_4_VCO_CODE_INIT 18,8 +#define E56PHY_RXS_OSC_CAL_N_CDR_4_OSC_CURRENT_BOOST_EN1 21,21 +#define E56PHY_RXS_OSC_CAL_N_CDR_4_BBCDR_CURRENT_BOOST1 27,26 + +#define E56PHY_RXS_OSC_CAL_N_CDR_5_ADDR (E56PHY_RXS_BASE_ADDR+0x018) +#define E56PHY_RXS_OSC_CAL_N_CDR_5_SDM_WIDTH 3,2 +#define E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_PRELOCK 15,12 +#define E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_POSTLOCK 19,16 +#define E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_POSTLOCK 23,20 +#define E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_PRELOCK 27,24 +#define E56PHY_RXS_OSC_CAL_N_CDR_5_BBCDR_RDY_CNT 30,28 + +#define E56PHY_RXS_OSC_CAL_N_CDR_6_ADDR (E56PHY_RXS_BASE_ADDR+0x01C) +#define E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_PRELOCK 3,0 +#define E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_POSTLOCK 7,4 + +#define E56PHY_RXS_INTL_CONFIG_0_ADDR (E56PHY_RXS_BASE_ADDR+0x020) +#define E56PHY_RXS_INTL_CONFIG_0_ADC_INTL2SLICE_DELAY1 31,16 + +#define E56PHY_RXS_INTL_CONFIG_2_ADDR (E56PHY_RXS_BASE_ADDR+0x028) +#define E56PHY_RXS_INTL_CONFIG_2_INTERLEAVER_HBW_DISABLE1 1,1 + +#define E56PHY_RXS_TXFFE_TRAINING_0_ADDR (E56PHY_RXS_BASE_ADDR+0x02C) +#define E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_LTH 18,12 +#define E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_UTH 26,20 + +#define E56PHY_RXS_TXFFE_TRAINING_1_ADDR (E56PHY_RXS_BASE_ADDR+0x030) +#define E56PHY_RXS_TXFFE_TRAINING_1_C1_LTH 8,0 +#define E56PHY_RXS_TXFFE_TRAINING_1_C1_UTH 20,12 + +#define E56PHY_RXS_TXFFE_TRAINING_2_ADDR (E56PHY_RXS_BASE_ADDR+0x034) +#define E56PHY_RXS_TXFFE_TRAINING_2_CM1_LTH 8,0 +#define E56PHY_RXS_TXFFE_TRAINING_2_CM1_UTH 20,12 + + +#define E56PHY_RXS_TXFFE_TRAINING_3_ADDR (E56PHY_RXS_BASE_ADDR+0x038) +#define E56PHY_RXS_TXFFE_TRAINING_3_CM2_LTH 8,0 +#define E56PHY_RXS_TXFFE_TRAINING_3_CM2_UTH 20,12 +#define E56PHY_RXS_TXFFE_TRAINING_3_TXFFE_TRAIN_MOD_TYPE 26,21 + +#define E56PHY_RXS_VGA_TRAINING_0_ADDR (E56PHY_RXS_BASE_ADDR+0x04C) +#define E56PHY_RXS_VGA_TRAINING_0_VGA_TARGET 18,12 + + +#define E56PHY_RXS_VGA_TRAINING_1_ADDR (E56PHY_RXS_BASE_ADDR+0x050) +#define E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT0 4,0 +#define E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT0 12,8 +#define E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT123 20,16 +#define E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT123 28,24 + +#define E56PHY_RXS_CTLE_TRAINING_0_ADDR (E56PHY_RXS_BASE_ADDR+0x054) +#define E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT0 24,20 +#define E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT123 31,27 + +#define E56PHY_RXS_CTLE_TRAINING_1_ADDR (E56PHY_RXS_BASE_ADDR+0x058) +#define E56PHY_RXS_CTLE_TRAINING_1_LFEQ_LUT 24,0 + +#define E56PHY_RXS_CTLE_TRAINING_2_ADDR (E56PHY_RXS_BASE_ADDR+0x05C) +#define E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P1 5,0 +#define E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P2 13,8 +#define E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P3 21,16 + + +#define E56PHY_RXS_CTLE_TRAINING_3_ADDR (E56PHY_RXS_BASE_ADDR+0x060) +#define E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P1 9,8 +#define E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P2 11,10 +#define E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P3 13,12 + +#define E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADDR (E56PHY_RXS_BASE_ADDR+0x064) +#define E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_SLICE_DATA_AVG_CNT 5,4 +#define E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_DATA_AVG_CNT 9,8 +#define E56PHY_RXS_OFFSET_N_GAIN_CAL_0_FE_OFFSET_DAC_CLK_CNT_X8 31,28 + +#define E56PHY_RXS_OFFSET_N_GAIN_CAL_1_ADDR (E56PHY_RXS_BASE_ADDR+0x068) +#define E56PHY_RXS_OFFSET_N_GAIN_CAL_1_SAMP_ADAPT_CFG 31,28 + +#define E56PHY_RXS_FFE_TRAINING_0_ADDR (E56PHY_RXS_BASE_ADDR+0x070) +#define E56PHY_RXS_FFE_TRAINING_0_FFE_TAP_EN 23,8 + +#define E56PHY_RXS_IDLE_DETECT_1_ADDR (E56PHY_RXS_BASE_ADDR+0x088) +#define E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX 22,16 +#define E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN 30,24 + +#define E56PHY_RXS_ANA_OVRDEN_0_ADDR (E56PHY_RXS_BASE_ADDR+0x08C) +#define E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RTERM_I 0,0 +#define E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_TRIM_RTERM_I 1,1 +#define E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_BBCDR_OSC_RANGE_SEL_I 29,29 + +#define E56PHY_RXS_ANA_OVRDEN_1_ADDR (E56PHY_RXS_BASE_ADDR+0x090) +#define E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_BBCDR_VCOFILT_BYP_I 0,0 +#define E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_TEST_BBCDR_I 9,9 + +#define E56PHY_RXS_ANA_OVRDEN_3_ADDR (E56PHY_RXS_BASE_ADDR+0x098) +#define E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_ANABS_CONFIG_I 15,15 +#define E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_VGA2_BOOST_CSTM_I 25,25 + +#define E56PHY_RXS_ANA_OVRDEN_4_ADDR (E56PHY_RXS_BASE_ADDR+0x09C) +#define E56PHY_RXS_ANA_OVRDVAL_0_ADDR (E56PHY_RXS_BASE_ADDR+0x0A0) +#define E56PHY_RXS_ANA_OVRDVAL_0_ANA_EN_RTERM_I 0,0 + +#define E56PHY_RXS_ANA_OVRDVAL_6_ADDR (E56PHY_RXS_BASE_ADDR+0x0B8) +#define E56PHY_RXS_ANA_OVRDVAL_14_ADDR (E56PHY_RXS_BASE_ADDR+0x0D8) +#define E56PHY_RXS_ANA_OVRDVAL_15_ADDR (E56PHY_RXS_BASE_ADDR+0x0DC) +#define E56PHY_RXS_ANA_OVRDVAL_17_ADDR (E56PHY_RXS_BASE_ADDR+0x0E4) +#define E56PHY_RXS_ANA_OVRDVAL_17_ANA_VGA2_BOOST_CSTM_I 18,16 + +#define E56PHY_RXS_EYE_SCAN_1_ADDR (E56PHY_RXS_BASE_ADDR+0x1A4) +#define E56PHY_RXS_EYE_SCAN_1_EYE_SCAN_REF_TIMER 31,0 + +#define E56PHY_RXS_ANA_OVRDVAL_5_ADDR (E56PHY_RXS_BASE_ADDR+0x0B4) +#define E56PHY_RXS_ANA_OVRDVAL_5_ANA_BBCDR_OSC_RANGE_SEL_I 1,0 + +#define E56PHY_RXS_RINGO_0_ADDR (E56PHY_RXS_BASE_ADDR+0x1FC) + +#define E56PHY_PMD_BASE_ADDR 0x1400 +#define E56PHY_PMD_CFG_0_ADDR (E56PHY_PMD_BASE_ADDR+0x000) +#define E56PHY_PMD_CFG_0_RX_EN_CFG 19,16 + +#define E56PHY_PMD_CFG_3_ADDR (E56PHY_PMD_BASE_ADDR+0x00C) +#define E56PHY_PMD_CFG_3_CTRL_FSM_TIMEOUT_X64K 31,24 +#define E56PHY_PMD_CFG_4_ADDR (E56PHY_PMD_BASE_ADDR+0x010) +#define E56PHY_PMD_CFG_4_TRAIN_DC_ON_PERIOD_X64K 7,0 +#define E56PHY_PMD_CFG_4_TRAIN_DC_PERIOD_X512K 15,8 +#define E56PHY_PMD_CFG_5_ADDR (E56PHY_PMD_BASE_ADDR+0x014) +#define E56PHY_PMD_CFG_5_USE_RECENT_MARKER_OFFSET 12,12 +#define E56PHY_CTRL_FSM_CFG_0_ADDR (E56PHY_PMD_BASE_ADDR+0x040) +#define E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_OFST_CAL_ERR 4,4 +#define E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_GAIN_CAL_ERR 5,5 +#define E56PHY_CTRL_FSM_CFG_0_DO_RX_ADC_OFST_CAL 9,8 +#define E56PHY_CTRL_FSM_CFG_0_RX_ERR_ACTION_EN 31,24 + + +#define E56PHY_CTRL_FSM_CFG_1_ADDR (E56PHY_PMD_BASE_ADDR+0x044) +#define E56PHY_CTRL_FSM_CFG_1_TRAIN_ST0_WAIT_CNT_X4096 7,0 +#define E56PHY_CTRL_FSM_CFG_1_TRAIN_ST1_WAIT_CNT_X4096 15,8 +#define E56PHY_CTRL_FSM_CFG_1_TRAIN_ST2_WAIT_CNT_X4096 23,16 +#define E56PHY_CTRL_FSM_CFG_1_TRAIN_ST3_WAIT_CNT_X4096 31,24 + +#define E56PHY_CTRL_FSM_CFG_2_ADDR (E56PHY_PMD_BASE_ADDR+0x048) +#define E56PHY_CTRL_FSM_CFG_2_TRAIN_ST4_WAIT_CNT_X4096 7,0 +#define E56PHY_CTRL_FSM_CFG_2_TRAIN_ST5_WAIT_CNT_X4096 15,8 +#define E56PHY_CTRL_FSM_CFG_2_TRAIN_ST6_WAIT_CNT_X4096 23,16 +#define E56PHY_CTRL_FSM_CFG_2_TRAIN_ST7_WAIT_CNT_X4096 31,24 + +#define E56PHY_CTRL_FSM_CFG_3_ADDR (E56PHY_PMD_BASE_ADDR+0x04C) +#define E56PHY_CTRL_FSM_CFG_3_TRAIN_ST8_WAIT_CNT_X4096 7,0 + +#define E56PHY_CTRL_FSM_CFG_3_TRAIN_ST9_WAIT_CNT_X4096 15,8 +#define E56PHY_CTRL_FSM_CFG_3_TRAIN_ST10_WAIT_CNT_X4096 23,16 +#define E56PHY_CTRL_FSM_CFG_3_TRAIN_ST11_WAIT_CNT_X4096 31,24 + +#define E56PHY_CTRL_FSM_CFG_4_ADDR (E56PHY_PMD_BASE_ADDR+0x050) +#define E56PHY_CTRL_FSM_CFG_4_TRAIN_ST12_WAIT_CNT_X4096 7,0 +#define E56PHY_CTRL_FSM_CFG_4_TRAIN_ST13_WAIT_CNT_X4096 15,8 +#define E56PHY_CTRL_FSM_CFG_4_TRAIN_ST14_WAIT_CNT_X4096 23,16 +#define E56PHY_CTRL_FSM_CFG_4_TRAIN_ST15_WAIT_CNT_X4096 31,24 + +#define E56PHY_CTRL_FSM_CFG_7_ADDR (E56PHY_PMD_BASE_ADDR+0x05C) +#define E56PHY_CTRL_FSM_CFG_7_TRAIN_ST4_EN 15,0 +#define E56PHY_CTRL_FSM_CFG_7_TRAIN_ST5_EN 31,16 + +#define E56PHY_CTRL_FSM_CFG_8_ADDR (E56PHY_PMD_BASE_ADDR+0x060) +#define E56PHY_CTRL_FSM_CFG_8_TRAIN_ST7_EN 31,16 + +#define E56PHY_CTRL_FSM_CFG_12_ADDR (E56PHY_PMD_BASE_ADDR+0x070) +#define E56PHY_CTRL_FSM_CFG_12_TRAIN_ST15_EN 31,16 + +#define E56PHY_CTRL_FSM_CFG_13_ADDR (E56PHY_PMD_BASE_ADDR+0x074) +#define E56PHY_CTRL_FSM_CFG_13_TRAIN_ST0_DONE_EN 15,0 +#define E56PHY_CTRL_FSM_CFG_13_TRAIN_ST1_DONE_EN 31,16 + +#define E56PHY_CTRL_FSM_CFG_14_ADDR (E56PHY_PMD_BASE_ADDR+0x078) +#define E56PHY_CTRL_FSM_CFG_14_TRAIN_ST3_DONE_EN 31,16 + +#define E56PHY_CTRL_FSM_CFG_15_ADDR (E56PHY_PMD_BASE_ADDR+0x07C) +#define E56PHY_CTRL_FSM_CFG_15_TRAIN_ST4_DONE_EN 15,0 + +#define E56PHY_CTRL_FSM_CFG_17_ADDR (E56PHY_PMD_BASE_ADDR+0x084) +#define E56PHY_CTRL_FSM_CFG_17_TRAIN_ST8_DONE_EN 15,0 + +#define E56PHY_CTRL_FSM_CFG_18_ADDR (E56PHY_PMD_BASE_ADDR+0x088) +#define E56PHY_CTRL_FSM_CFG_18_TRAIN_ST10_DONE_EN 15,0 + +#define E56PHY_CTRL_FSM_CFG_29_ADDR (E56PHY_PMD_BASE_ADDR+0x0B4) +#define E56PHY_CTRL_FSM_CFG_29_TRAIN_ST15_DC_EN 31,16 + +#define E56PHY_CTRL_FSM_CFG_33_ADDR (E56PHY_PMD_BASE_ADDR+0x0C4) +#define E56PHY_CTRL_FSM_CFG_33_TRAIN0_RATE_SEL 15,0 +#define E56PHY_CTRL_FSM_CFG_33_TRAIN1_RATE_SEL 31,16 + +#define E56PHY_CTRL_FSM_CFG_34_ADDR (E56PHY_PMD_BASE_ADDR+0x0C8) +#define E56PHY_CTRL_FSM_CFG_34_TRAIN2_RATE_SEL 15,0 +#define E56PHY_CTRL_FSM_CFG_34_TRAIN3_RATE_SEL 31,16 + +#define E56PHY_CTRL_FSM_RX_STAT_0_ADDR (E56PHY_PMD_BASE_ADDR+0x0FC) +#define E56PHY_RXS0_OVRDEN_0_ADDR (E56PHY_PMD_BASE_ADDR+0x130) +#define E56PHY_RXS0_OVRDEN_0_OVRD_EN_RXS0_RX0_SAMP_CAL_DONE_O 27,27 + +#define E56PHY_RXS0_OVRDEN_1_ADDR (E56PHY_PMD_BASE_ADDR+0x134) +#define E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_VGA_TRAIN_EN_I 14,14 +#define E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_CTLE_TRAIN_EN_I 16,16 +#define E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_CDR_EN_I 18,18 +#define E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_EN_I 23,23 +#define E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_DONE_O 24,24 +#define E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_DONE_O_LSB 24 + + +#define E56PHY_RXS0_OVRDEN_2_ADDR (E56PHY_PMD_BASE_ADDR+0x138) +#define E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_OFST_ADAPT_EN_I 0,0 +#define E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_GAIN_ADAPT_EN_I 3,3 +#define E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_INTL_ADAPT_EN_I 6,6 + +#define E56PHY_RXS0_OVRDVAL_0_ADDR (E56PHY_PMD_BASE_ADDR+0x140) +#define E56PHY_RXS0_OVRDVAL_0_RXS0_RX0_SAMP_CAL_DONE_O 22,22 + +#define E56PHY_RXS0_OVRDVAL_1_ADDR (E56PHY_PMD_BASE_ADDR+0x144) +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_VGA_TRAIN_EN_I 7,7 +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_CTLE_TRAIN_EN_I 9,9 +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_CDR_EN_I 11,11 +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_EN_I 16,16 +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_DONE_O 17,17 +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_DONE_O_LSB 17 +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I 25,25 +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I 28,28 +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_ADAPT_EN_I 31,31 + +#define E56PHY_INTR_0_IDLE_ENTRY1 0x10000000 +#define E56PHY_INTR_0_ADDR (E56PHY_PMD_BASE_ADDR+0x1EC) +#define E56PHY_INTR_0_ENABLE_ADDR (E56PHY_PMD_BASE_ADDR+0x1E0) + +#define E56PHY_INTR_1_IDLE_EXIT1 0x1 +#define E56PHY_INTR_1_ADDR (E56PHY_PMD_BASE_ADDR+0x1F0) +#define E56PHY_INTR_1_ENABLE_ADDR (E56PHY_PMD_BASE_ADDR+0x1E4) + +#define E56PHY_KRT_TFSM_CFG_ADDR (E56PHY_PMD_BASE_ADDR+0x2B8) +#define E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X1000K 7,0 +#define E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X8000K 15,8 +#define E56PHY_KRT_TFSM_CFGKRT_TFSM_HOLDOFF_TIMER_X256K 23,16 + +#define E56PHY_FETX_FFE_TRAIN_CFG_0_ADDR (E56PHY_PMD_BASE_ADDR+0x2BC) +#define E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_2 9,8 +#define E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_3 13,12 + +#define PHYINIT_TIMEOUT 1000 //PHY initialization timeout value in 0.5ms unit + +#define E56G__BASEADDR 0x0 + +typedef union { + struct { + u32 ana_lcpll_lf_vco_swing_ctrl_i : 4; + u32 ana_lcpll_lf_lpf_setcode_calib_i : 5; + u32 rsvd0 : 3; + u32 ana_lcpll_lf_vco_coarse_bin_i : 5; + u32 rsvd1 : 3; + u32 ana_lcpll_lf_vco_fine_therm_i : 8; + u32 ana_lcpll_lf_clkout_fb_ctrl_i : 2; + u32 rsvd2 : 2; + }; + u32 reg; +} E56G_CMS_ANA_OVRDVAL_7; +#define E56G_CMS_ANA_OVRDVAL_7_ADDR (E56G__BASEADDR+0xccc) + +typedef union { + struct { + u32 ovrd_en_ana_lcpll_hf_vco_amp_status_o : 1; + u32 ovrd_en_ana_lcpll_hf_clkout_fb_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_hf_clkdiv_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_hf_en_odiv_i : 1; + u32 ovrd_en_ana_lcpll_hf_test_in_i : 1; + u32 ovrd_en_ana_lcpll_hf_test_out_o : 1; + u32 ovrd_en_ana_lcpll_lf_en_bias_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_loop_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_cp_i : 1; + u32 ovrd_en_ana_lcpll_lf_icp_base_i : 1; + u32 ovrd_en_ana_lcpll_lf_icp_fine_i : 1; + u32 ovrd_en_ana_lcpll_lf_lpf_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_lpf_setcode_calib_i : 1; + u32 ovrd_en_ana_lcpll_lf_set_lpf_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_vco_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_sel_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_swing_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_coarse_bin_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_fine_therm_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_amp_status_o : 1; + u32 ovrd_en_ana_lcpll_lf_clkout_fb_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_clkdiv_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_odiv_i : 1; + u32 ovrd_en_ana_lcpll_lf_test_in_i : 1; + u32 ovrd_en_ana_lcpll_lf_test_out_o : 1; + u32 ovrd_en_ana_lcpll_hf_refclk_select_i : 1; + u32 ovrd_en_ana_lcpll_lf_refclk_select_i : 1; + u32 ovrd_en_ana_lcpll_hf_clk_ref_sel_i : 1; + u32 ovrd_en_ana_lcpll_lf_clk_ref_sel_i : 1; + u32 ovrd_en_ana_test_bias_i : 1; + u32 ovrd_en_ana_test_slicer_i : 1; + u32 ovrd_en_ana_test_sampler_i : 1; + }; + u32 reg; +} E56G_CMS_ANA_OVRDEN_1; + +#define E56G_CMS_ANA_OVRDEN_1_ADDR (E56G__BASEADDR+0xca8) + +typedef union { + struct { + u32 ana_lcpll_lf_test_in_i : 32; + }; + u32 reg; +} E56G_CMS_ANA_OVRDVAL_9; + +#define E56G_CMS_ANA_OVRDVAL_9_ADDR (E56G__BASEADDR+0xcd4) + +typedef union { + struct { + u32 ovrd_en_ana_bbcdr_vcofilt_byp_i : 1; + u32 ovrd_en_ana_bbcdr_coarse_i : 1; + u32 ovrd_en_ana_bbcdr_fine_i : 1; + u32 ovrd_en_ana_bbcdr_ultrafine_i : 1; + u32 ovrd_en_ana_en_bbcdr_i : 1; + u32 ovrd_en_ana_bbcdr_divctrl_i : 1; + u32 ovrd_en_ana_bbcdr_int_cstm_i : 1; + u32 ovrd_en_ana_bbcdr_prop_step_i : 1; + u32 ovrd_en_ana_en_bbcdr_clk_i : 1; + u32 ovrd_en_ana_test_bbcdr_i : 1; + u32 ovrd_en_ana_bbcdr_en_elv_cnt_ping0_pong1_i : 1; + u32 ovrd_en_ana_bbcdr_clrz_elv_cnt_ping_i : 1; + u32 ovrd_en_ana_bbcdr_clrz_elv_cnt_pong_i : 1; + u32 ovrd_en_ana_bbcdr_clrz_cnt_sync_i : 1; + u32 ovrd_en_ana_bbcdr_en_elv_cnt_rd_i : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_rdout_0_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_rdout_90_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_rdout_180_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_rdout_270_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_ping_0_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_ping_90_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_ping_180_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_ping_270_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_pong_0_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_pong_90_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_pong_180_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_pong_270_o : 1; + u32 ovrd_en_ana_en_bbcdr_samp_dac_i : 1; + u32 ovrd_en_ana_bbcdr_dac0_i : 1; + u32 ovrd_en_ana_bbcdr_dac90_i : 1; + u32 ovrd_en_ana_vga2_cload_in_cstm_i : 1; + u32 ovrd_en_ana_intlvr_cut_bw_i : 1; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDEN_1; + +#define E56G__RXS0_ANA_OVRDEN_1_ADDR (E56G__BASEADDR+0x90) + +//-----Access structure typedef for Register:E56G__RXS0_OSC_CAL_N_CDR_0 +typedef union { + struct { + u32 prediv0 : 16; + u32 target_cnt0 : 16; + }; + u32 reg; +} E56G_RXS0_OSC_CAL_N_CDR_0; +//-----MACRO defines for Register:E56G__RXS0_OSC_CAL_N_CDR_0 +#define E56G_RXS0_OSC_CAL_N_CDR_0_ADDR (E56G__BASEADDR+0x4) + +typedef union { + struct { + u32 osc_range_sel0 : 2; + u32 osc_range_sel1 : 2; + u32 osc_range_sel2 : 2; + u32 osc_range_sel3 : 2; + u32 vco_code_init : 11; + u32 calibrate_range_sel : 1; + u32 osc_current_boost_en0 : 1; + u32 osc_current_boost_en1 : 1; + u32 osc_current_boost_en2 : 1; + u32 osc_current_boost_en3 : 1; + u32 bbcdr_current_boost0 : 2; + u32 bbcdr_current_boost1 : 2; + u32 bbcdr_current_boost2 : 2; + u32 bbcdr_current_boost3 : 2; + }; + u32 reg; +} E56G_RXS0_OSC_CAL_N_CDR_4; +//-----MACRO defines for Register:E56G__RXS0_OSC_CAL_N_CDR_4 +#define E56G_RXS0_OSC_CAL_N_CDR_4_ADDR (E56G__BASEADDR+0x14) + +//-----Access structure typedef for Register:E56G__RXS0_INTL_CONFIG_0 +typedef union { + struct { + u32 adc_intl2slice_delay0 : 16; + u32 adc_intl2slice_delay1 : 16; + }; + u32 reg; +} E56G_RXS0_INTL_CONFIG_0; +//-----MACRO defines for Register:E56G__RXS0_INTL_CONFIG_0 +#define E56G_RXS0_INTL_CONFIG_0_ADDR (E56G__BASEADDR+0x20) + +//-----Access structure typedef for Register:E56G__RXS0_INTL_CONFIG_2 +typedef union { + struct { + u32 interleaver_hbw_disable0 : 1; + u32 interleaver_hbw_disable1 : 1; + u32 interleaver_hbw_disable2 : 1; + u32 interleaver_hbw_disable3 : 1; + u32 rsvd0 : 28; + }; + u32 reg; +} E56G_RXS0_INTL_CONFIG_2; +//-----MACRO defines for Register:E56G__RXS0_INTL_CONFIG_2 +#define E56G_RXS0_INTL_CONFIG_2_ADDR (E56G__BASEADDR+0x28) + +typedef union { + struct { + u32 ovrd_en_ana_bbcdr_dac180_i : 1; + u32 ovrd_en_ana_bbcdr_dac270_i : 1; + u32 ovrd_en_ana_bbcdr_en_samp_cal_cnt_i : 1; + u32 ovrd_en_ana_bbcdr_clrz_samp_cal_cnt_i : 1; + u32 ovrd_en_ana_bbcdr_samp_cnt_0_o : 1; + u32 ovrd_en_ana_bbcdr_samp_cnt_90_o : 1; + u32 ovrd_en_ana_bbcdr_samp_cnt_180_o : 1; + u32 ovrd_en_ana_bbcdr_samp_cnt_270_o : 1; + u32 ovrd_en_ana_en_adcbuf1_i : 1; + u32 ovrd_en_ana_test_adcbuf1_i : 1; + u32 ovrd_en_ana_en_adc_clk4ui_i : 1; + u32 ovrd_en_ana_adc_clk_skew0_i : 1; + u32 ovrd_en_ana_adc_clk_skew90_i : 1; + u32 ovrd_en_ana_adc_clk_skew180_i : 1; + u32 ovrd_en_ana_adc_clk_skew270_i : 1; + u32 ovrd_en_ana_adc_update_skew_i : 1; + u32 ovrd_en_ana_en_adc_pi_i : 1; + u32 ovrd_en_ana_adc_pictrl_quad_i : 1; + u32 ovrd_en_ana_adc_pctrl_code_i : 1; + u32 ovrd_en_ana_adc_clkdiv_i : 1; + u32 ovrd_en_ana_test_adc_clkgen_i : 1; + u32 ovrd_en_ana_en_adc_i : 1; + u32 ovrd_en_ana_en_adc_vref_i : 1; + u32 ovrd_en_ana_vref_cnfg_i : 1; + u32 ovrd_en_ana_adc_data_cstm_o : 1; + u32 ovrd_en_ana_en_adccal_lpbk_i : 1; + u32 ovrd_en_ana_sel_adcoffset_cal_i : 1; + u32 ovrd_en_ana_sel_adcgain_cal_i : 1; + u32 ovrd_en_ana_adcgain_cal_swing_ctrl_i : 1; + u32 ovrd_en_ana_adc_gain_i : 1; + u32 ovrd_en_ana_vga_cload_out_cstm_i : 1; + u32 ovrd_en_ana_vga2_cload_out_cstm_i : 1; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDEN_2; +//-----MACRO defines for Register:E56G__RXS0_ANA_OVRDEN_2 +#define E56G__RXS0_ANA_OVRDEN_2_ADDR (E56G__BASEADDR+0x94) + +//-----Access structure typedef for Register:E56G__RXS0_ANA_OVRDEN_3 +typedef union { + struct { + u32 ovrd_en_ana_adc_offset_i : 1; + u32 ovrd_en_ana_adc_slice_addr_i : 1; + u32 ovrd_en_ana_slice_wr_i : 1; + u32 ovrd_en_ana_test_adc_i : 1; + u32 ovrd_en_ana_test_adc_o : 1; + u32 ovrd_en_ana_spare_o : 8; + u32 ovrd_en_ana_sel_lpbk_i : 1; + u32 ovrd_en_ana_ana_debug_sel_i : 1; + u32 ovrd_en_ana_anabs_config_i : 1; + u32 ovrd_en_ana_en_anabs_i : 1; + u32 ovrd_en_ana_anabs_rxn_o : 1; + u32 ovrd_en_ana_anabs_rxp_o : 1; + u32 ovrd_en_ana_dser_clk_en_i : 1; + u32 ovrd_en_ana_dser_clk_config_i : 1; + u32 ovrd_en_ana_en_mmcdr_clk_obs_i : 1; + u32 ovrd_en_ana_skew_coarse0_fine1_i : 1; + u32 ovrd_en_ana_vddinoff_acore_dig_o : 1; + u32 ovrd_en_ana_vddinoff_dcore_dig_o : 1; + u32 ovrd_en_ana_vga2_boost_cstm_i : 1; + u32 ovrd_en_ana_adc_sel_vbgr_bias_i : 1; + u32 ovrd_en_ana_adc_nbuf_cnfg_i : 1; + u32 ovrd_en_ana_adc_pbuf_cnfg_i : 1; + u32 rsvd0 : 3; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDEN_3; +//-----MACRO defines for Register:E56G__RXS0_ANA_OVRDEN_3 +#define E56G__RXS0_ANA_OVRDEN_3_NUM 1 +#define E56G__RXS0_ANA_OVRDEN_3_ADDR (E56G__BASEADDR+0x98) + +//-----Access structure typedef for Register:E56G__RXS0_RXS_CFG_0 +typedef union { + struct { + u32 pam4_ab_swap_en : 1; + u32 dser_data_sel : 1; + u32 signal_type : 1; + u32 precode_en : 1; + u32 train_clk_gate_bypass_en : 14; + u32 rsvd0 : 14; + }; + u32 reg; +} E56G__RXS0_RXS_CFG_0; +//-----MACRO defines for Register:E56G__RXS0_RXS_CFG_0 +#define E56G__RXS0_RXS_CFG_0_NUM 1 +#define E56G__RXS0_RXS_CFG_0_ADDR (E56G__BASEADDR+0x0) + +//-----Access structure typedef for Register:E56G__PMD_BASER_PMD_CONTROL +typedef union { + struct { + u32 restart_training_ln0 : 1; + u32 training_enable_ln0 : 1; + u32 restart_training_ln1 : 1; + u32 training_enable_ln1 : 1; + u32 restart_training_ln2 : 1; + u32 training_enable_ln2 : 1; + u32 restart_training_ln3 : 1; + u32 training_enable_ln3 : 1; + u32 rsvd0 : 24; + }; + u32 reg; +} E56G__PMD_BASER_PMD_CONTROL; +//-----MACRO defines for Register:E56G__PMD_BASER_PMD_CONTROL +#define E56G__PMD_BASER_PMD_CONTROL_NUM 1 +#define E56G__PMD_BASER_PMD_CONTROL_ADDR (E56G__BASEADDR+0x1640) + +//-----Access structure typedef for Register:E56G__PMD_PMD_CFG_5 +typedef union { + struct { + u32 rx_to_tx_lpbk_en : 4; + u32 sel_wp_pmt_out : 4; + u32 sel_wp_pmt_clkout : 4; + u32 use_recent_marker_offset : 1; + u32 interrupt_debug_mode : 1; + u32 rsvd0 : 2; + u32 tx_ffe_coeff_update : 4; + u32 rsvd1 : 12; + }; + u32 reg; +} E56G__PMD_PMD_CFG_5; +//-----MACRO defines for Register:E56G__PMD_PMD_CFG_5 +#define E56G__PMD_PMD_CFG_5_NUM 1 +#define E56G__PMD_PMD_CFG_5_ADDR (E56G__BASEADDR+0x1414) + +//-----Access structure typedef for Register:E56G__PMD_PMD_CFG_0 +typedef union { + struct { + u32 soft_reset : 1; + u32 pmd_en : 1; + u32 rsvd0 : 2; + u32 pll_refclk_sel : 2; + u32 rsvd1 : 2; + u32 pmd_mode : 1; + u32 rsvd2 : 3; + u32 tx_en_cfg : 4; + u32 rx_en_cfg : 4; + u32 pll_en_cfg : 2; + u32 rsvd3 : 2; + u32 pam4_precode_no_krt_en : 4; + u32 rsvd4 : 4; + }; + u32 reg; +} E56G__PMD_PMD_CFG_0; +//-----MACRO defines for Register:E56G__PMD_PMD_CFG_0 +#define E56G__PMD_PMD_CFG_0_NUM 1 +#define E56G__PMD_PMD_CFG_0_ADDR (E56G__BASEADDR+0x1400) + +//-----Access structure typedef for Register:E56G__PMD_RXS0_OVRDEN_2 +typedef union { + struct { + u32 ovrd_en_rxs0_rx0_adc_ofst_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o : 1; + u32 ovrd_en_rxs0_rx0_adc_ofst_adapt_error_o : 1; + u32 ovrd_en_rxs0_rx0_adc_gain_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_gain_adapt_done_o : 1; + u32 ovrd_en_rxs0_rx0_adc_gain_adapt_error_o : 1; + u32 ovrd_en_rxs0_rx0_adc_intl_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_intl_adapt_done_o : 1; + u32 ovrd_en_rxs0_rx0_adc_intl_adapt_error_o : 1; + u32 ovrd_en_rxs0_rx0_fe_ofst_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_fe_ofst_adapt_done_o : 1; + u32 ovrd_en_rxs0_rx0_fe_ofst_adapt_error_o : 1; + u32 ovrd_en_rxs0_rx0_samp_th_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_samp_th_adapt_done_o : 1; + u32 ovrd_en_rxs0_rx0_efuse_bits_i : 1; + u32 ovrd_en_rxs0_rx0_wp_pmt_in_i : 1; + u32 ovrd_en_rxs0_rx0_wp_pmt_out_o : 1; + u32 rsvd0 : 15; + }; + u32 reg; +} E56G__PMD_RXS0_OVRDEN_2; +//-----MACRO defines for Register:E56G__PMD_RXS0_OVRDEN_2 +#define E56G__PMD_RXS0_OVRDEN_2_ADDR (E56G__BASEADDR+0x1538) + +typedef union { + struct { + u32 ana_bbcdr_osc_range_sel_i : 2; + u32 rsvd0 : 2; + u32 ana_bbcdr_coarse_i : 4; + u32 ana_bbcdr_fine_i : 3; + u32 rsvd1 : 1; + u32 ana_bbcdr_ultrafine_i : 3; + u32 rsvd2 : 1; + u32 ana_bbcdr_divctrl_i : 2; + u32 rsvd3 : 2; + u32 ana_bbcdr_int_cstm_i : 5; + u32 rsvd4 : 3; + u32 ana_bbcdr_prop_step_i : 4; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDVAL_5; +//-----MACRO defines for Register:E56G__RXS0_ANA_OVRDVAL_5 +#define E56G__RXS0_ANA_OVRDVAL_5_ADDR (E56G__BASEADDR+0xb4) + +typedef union { + struct { + u32 ana_adc_pictrl_quad_i : 2; + u32 rsvd0 : 2; + u32 ana_adc_clkdiv_i : 2; + u32 rsvd1 : 2; + u32 ana_test_adc_clkgen_i : 4; + u32 ana_vref_cnfg_i : 4; + u32 ana_adcgain_cal_swing_ctrl_i : 4; + u32 ana_adc_gain_i : 4; + u32 ana_adc_offset_i : 4; + u32 ana_ana_debug_sel_i : 4; + }; + u32 reg; +} E56G__RXS3_ANA_OVRDVAL_11; +//-----MACRO defines for Register:E56G__RXS3_ANA_OVRDVAL_11 +#define E56G__RXS3_ANA_OVRDVAL_11_ADDR (E56G__BASEADDR+0x6cc) + +typedef union { + struct { + u32 rxs0_rx0_fe_ofst_cal_error_o : 1; + u32 rxs0_rx0_fom_en_i : 1; + u32 rxs0_rx0_idle_detect_en_i : 1; + u32 rxs0_rx0_idle_o : 1; + u32 rxs0_rx0_txffe_train_en_i : 1; + u32 rxs0_rx0_txffe_train_enack_o : 1; + u32 rxs0_rx0_txffe_train_done_o : 1; + u32 rxs0_rx0_vga_train_en_i : 1; + u32 rxs0_rx0_vga_train_done_o : 1; + u32 rxs0_rx0_ctle_train_en_i : 1; + u32 rxs0_rx0_ctle_train_done_o : 1; + u32 rxs0_rx0_cdr_en_i : 1; + u32 rxs0_rx0_cdr_rdy_o : 1; + u32 rxs0_rx0_ffe_train_en_i : 1; + u32 rxs0_rx0_ffe_train_done_o : 1; + u32 rxs0_rx0_mmpd_en_i : 1; + u32 rxs0_rx0_adc_intl_cal_en_i : 1; + u32 rxs0_rx0_adc_intl_cal_done_o : 1; + u32 rxs0_rx0_adc_intl_cal_error_o : 1; + u32 rxs0_rx0_dfe_train_en_i : 1; + u32 rxs0_rx0_dfe_train_done_o : 1; + u32 rxs0_rx0_vga_adapt_en_i : 1; + u32 rxs0_rx0_vga_adapt_done_o : 1; + u32 rxs0_rx0_ctle_adapt_en_i : 1; + u32 rxs0_rx0_ctle_adapt_done_o : 1; + u32 rxs0_rx0_adc_ofst_adapt_en_i : 1; + u32 rxs0_rx0_adc_ofst_adapt_done_o : 1; + u32 rxs0_rx0_adc_ofst_adapt_error_o : 1; + u32 rxs0_rx0_adc_gain_adapt_en_i : 1; + u32 rxs0_rx0_adc_gain_adapt_done_o : 1; + u32 rxs0_rx0_adc_gain_adapt_error_o : 1; + u32 rxs0_rx0_adc_intl_adapt_en_i : 1; + }; + u32 reg; +} E56G__PMD_RXS0_OVRDVAL_1; +#define E56G__PMD_RXS0_OVRDVAL_1_ADDR (E56G__BASEADDR+0x1544) + +//-----Access structure typedef for Register:E56G__PMD_RXS1_OVRDVAL_1 +typedef union { + struct { + u32 rxs1_rx0_fe_ofst_cal_error_o : 1; + u32 rxs1_rx0_fom_en_i : 1; + u32 rxs1_rx0_idle_detect_en_i : 1; + u32 rxs1_rx0_idle_o : 1; + u32 rxs1_rx0_txffe_train_en_i : 1; + u32 rxs1_rx0_txffe_train_enack_o : 1; + u32 rxs1_rx0_txffe_train_done_o : 1; + u32 rxs1_rx0_vga_train_en_i : 1; + u32 rxs1_rx0_vga_train_done_o : 1; + u32 rxs1_rx0_ctle_train_en_i : 1; + u32 rxs1_rx0_ctle_train_done_o : 1; + u32 rxs1_rx0_cdr_en_i : 1; + u32 rxs1_rx0_cdr_rdy_o : 1; + u32 rxs1_rx0_ffe_train_en_i : 1; + u32 rxs1_rx0_ffe_train_done_o : 1; + u32 rxs1_rx0_mmpd_en_i : 1; + u32 rxs1_rx0_adc_intl_cal_en_i : 1; + u32 rxs1_rx0_adc_intl_cal_done_o : 1; + u32 rxs1_rx0_adc_intl_cal_error_o : 1; + u32 rxs1_rx0_dfe_train_en_i : 1; + u32 rxs1_rx0_dfe_train_done_o : 1; + u32 rxs1_rx0_vga_adapt_en_i : 1; + u32 rxs1_rx0_vga_adapt_done_o : 1; + u32 rxs1_rx0_ctle_adapt_en_i : 1; + u32 rxs1_rx0_ctle_adapt_done_o : 1; + u32 rxs1_rx0_adc_ofst_adapt_en_i : 1; + u32 rxs1_rx0_adc_ofst_adapt_done_o : 1; + u32 rxs1_rx0_adc_ofst_adapt_error_o : 1; + u32 rxs1_rx0_adc_gain_adapt_en_i : 1; + u32 rxs1_rx0_adc_gain_adapt_done_o : 1; + u32 rxs1_rx0_adc_gain_adapt_error_o : 1; + u32 rxs1_rx0_adc_intl_adapt_en_i : 1; + }; + u32 reg; +} E56G__PMD_RXS1_OVRDVAL_1; +//-----MACRO defines for Register:E56G__PMD_RXS1_OVRDVAL_1 +#define E56G__PMD_RXS1_OVRDVAL_1_ADDR (E56G__BASEADDR+0x1570) + +//-----Access structure typedef for Register:E56G__PMD_RXS2_OVRDVAL_1 +typedef union { + struct { + u32 rxs2_rx0_fe_ofst_cal_error_o : 1; + u32 rxs2_rx0_fom_en_i : 1; + u32 rxs2_rx0_idle_detect_en_i : 1; + u32 rxs2_rx0_idle_o : 1; + u32 rxs2_rx0_txffe_train_en_i : 1; + u32 rxs2_rx0_txffe_train_enack_o : 1; + u32 rxs2_rx0_txffe_train_done_o : 1; + u32 rxs2_rx0_vga_train_en_i : 1; + u32 rxs2_rx0_vga_train_done_o : 1; + u32 rxs2_rx0_ctle_train_en_i : 1; + u32 rxs2_rx0_ctle_train_done_o : 1; + u32 rxs2_rx0_cdr_en_i : 1; + u32 rxs2_rx0_cdr_rdy_o : 1; + u32 rxs2_rx0_ffe_train_en_i : 1; + u32 rxs2_rx0_ffe_train_done_o : 1; + u32 rxs2_rx0_mmpd_en_i : 1; + u32 rxs2_rx0_adc_intl_cal_en_i : 1; + u32 rxs2_rx0_adc_intl_cal_done_o : 1; + u32 rxs2_rx0_adc_intl_cal_error_o : 1; + u32 rxs2_rx0_dfe_train_en_i : 1; + u32 rxs2_rx0_dfe_train_done_o : 1; + u32 rxs2_rx0_vga_adapt_en_i : 1; + u32 rxs2_rx0_vga_adapt_done_o : 1; + u32 rxs2_rx0_ctle_adapt_en_i : 1; + u32 rxs2_rx0_ctle_adapt_done_o : 1; + u32 rxs2_rx0_adc_ofst_adapt_en_i : 1; + u32 rxs2_rx0_adc_ofst_adapt_done_o : 1; + u32 rxs2_rx0_adc_ofst_adapt_error_o : 1; + u32 rxs2_rx0_adc_gain_adapt_en_i : 1; + u32 rxs2_rx0_adc_gain_adapt_done_o : 1; + u32 rxs2_rx0_adc_gain_adapt_error_o : 1; + u32 rxs2_rx0_adc_intl_adapt_en_i : 1; + }; + u32 reg; +} E56G__PMD_RXS2_OVRDVAL_1; +//-----MACRO defines for Register:E56G__PMD_RXS2_OVRDVAL_1 +#define E56G__PMD_RXS2_OVRDVAL_1_ADDR (E56G__BASEADDR+0x159c) + +//-----Access structure typedef for Register:E56G__PMD_RXS3_OVRDVAL_1 +typedef union { + struct { + u32 rxs3_rx0_fe_ofst_cal_error_o : 1; + u32 rxs3_rx0_fom_en_i : 1; + u32 rxs3_rx0_idle_detect_en_i : 1; + u32 rxs3_rx0_idle_o : 1; + u32 rxs3_rx0_txffe_train_en_i : 1; + u32 rxs3_rx0_txffe_train_enack_o : 1; + u32 rxs3_rx0_txffe_train_done_o : 1; + u32 rxs3_rx0_vga_train_en_i : 1; + u32 rxs3_rx0_vga_train_done_o : 1; + u32 rxs3_rx0_ctle_train_en_i : 1; + u32 rxs3_rx0_ctle_train_done_o : 1; + u32 rxs3_rx0_cdr_en_i : 1; + u32 rxs3_rx0_cdr_rdy_o : 1; + u32 rxs3_rx0_ffe_train_en_i : 1; + u32 rxs3_rx0_ffe_train_done_o : 1; + u32 rxs3_rx0_mmpd_en_i : 1; + u32 rxs3_rx0_adc_intl_cal_en_i : 1; + u32 rxs3_rx0_adc_intl_cal_done_o : 1; + u32 rxs3_rx0_adc_intl_cal_error_o : 1; + u32 rxs3_rx0_dfe_train_en_i : 1; + u32 rxs3_rx0_dfe_train_done_o : 1; + u32 rxs3_rx0_vga_adapt_en_i : 1; + u32 rxs3_rx0_vga_adapt_done_o : 1; + u32 rxs3_rx0_ctle_adapt_en_i : 1; + u32 rxs3_rx0_ctle_adapt_done_o : 1; + u32 rxs3_rx0_adc_ofst_adapt_en_i : 1; + u32 rxs3_rx0_adc_ofst_adapt_done_o : 1; + u32 rxs3_rx0_adc_ofst_adapt_error_o : 1; + u32 rxs3_rx0_adc_gain_adapt_en_i : 1; + u32 rxs3_rx0_adc_gain_adapt_done_o : 1; + u32 rxs3_rx0_adc_gain_adapt_error_o : 1; + u32 rxs3_rx0_adc_intl_adapt_en_i : 1; + }; + u32 reg; +} E56G__PMD_RXS3_OVRDVAL_1; +//-----MACRO defines for Register:E56G__PMD_RXS3_OVRDVAL_1 +#define E56G__PMD_RXS3_OVRDVAL_1_ADDR (E56G__BASEADDR+0x15c8) + +//-----Access structure typedef for Register:E56G__PMD_CTRL_FSM_RX_STAT_0 +typedef union { + struct { + u32 ctrl_fsm_rx0_st : 6; + u32 rsvd0 : 2; + u32 ctrl_fsm_rx1_st : 6; + u32 rsvd1 : 2; + u32 ctrl_fsm_rx2_st : 6; + u32 rsvd2 : 2; + u32 ctrl_fsm_rx3_st : 6; + u32 rsvd3 : 2; + }; + u32 reg; +} E56G__PMD_CTRL_FSM_RX_STAT_0; +//-----MACRO defines for Register:E56G__PMD_CTRL_FSM_RX_STAT_0 +#define E56G__PMD_CTRL_FSM_RX_STAT_0_ADDR (E56G__BASEADDR+0x14fc) + +typedef union { + struct { + u32 ana_en_rterm_i : 1; + u32 ana_en_bias_i : 1; + u32 ana_en_ldo_i : 1; + u32 ana_rstn_i : 1; + u32 ana_en_blwc_i : 1; + u32 ana_en_acc_amp_i : 1; + u32 ana_en_acc_dac_i : 1; + u32 ana_en_afe_offset_cal_i : 1; + u32 ana_clk_offsetcal_i : 1; + u32 ana_acc_os_comp_o : 1; + u32 ana_en_ctle_i : 1; + u32 ana_ctle_bypass_i : 1; + u32 ana_en_ctlecdr_i : 1; + u32 ana_cdr_ctle_boost_i : 1; + u32 ana_en_vga_i : 1; + u32 ana_en_bbcdr_vco_i : 1; + u32 ana_bbcdr_vcofilt_byp_i : 1; + u32 ana_en_bbcdr_i : 1; + u32 ana_en_bbcdr_clk_i : 1; + u32 ana_bbcdr_en_elv_cnt_ping0_pong1_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_ping_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_pong_i : 1; + u32 ana_bbcdr_clrz_cnt_sync_i : 1; + u32 ana_bbcdr_en_elv_cnt_rd_i : 1; + u32 ana_bbcdr_elv_cnt_ping_0_o : 1; + u32 ana_bbcdr_elv_cnt_ping_90_o : 1; + u32 ana_bbcdr_elv_cnt_ping_180_o : 1; + u32 ana_bbcdr_elv_cnt_ping_270_o : 1; + u32 ana_bbcdr_elv_cnt_pong_0_o : 1; + u32 ana_bbcdr_elv_cnt_pong_90_o : 1; + u32 ana_bbcdr_elv_cnt_pong_180_o : 1; + u32 ana_bbcdr_elv_cnt_pong_270_o : 1; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDVAL_0; +#define E56G__RXS0_ANA_OVRDVAL_0_ADDR (E56G__BASEADDR+0xa0) + +//-----Access structure typedef for Register:E56G__RXS1_ANA_OVRDVAL_0 +typedef union { + struct { + u32 ana_en_rterm_i : 1; + u32 ana_en_bias_i : 1; + u32 ana_en_ldo_i : 1; + u32 ana_rstn_i : 1; + u32 ana_en_blwc_i : 1; + u32 ana_en_acc_amp_i : 1; + u32 ana_en_acc_dac_i : 1; + u32 ana_en_afe_offset_cal_i : 1; + u32 ana_clk_offsetcal_i : 1; + u32 ana_acc_os_comp_o : 1; + u32 ana_en_ctle_i : 1; + u32 ana_ctle_bypass_i : 1; + u32 ana_en_ctlecdr_i : 1; + u32 ana_cdr_ctle_boost_i : 1; + u32 ana_en_vga_i : 1; + u32 ana_en_bbcdr_vco_i : 1; + u32 ana_bbcdr_vcofilt_byp_i : 1; + u32 ana_en_bbcdr_i : 1; + u32 ana_en_bbcdr_clk_i : 1; + u32 ana_bbcdr_en_elv_cnt_ping0_pong1_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_ping_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_pong_i : 1; + u32 ana_bbcdr_clrz_cnt_sync_i : 1; + u32 ana_bbcdr_en_elv_cnt_rd_i : 1; + u32 ana_bbcdr_elv_cnt_ping_0_o : 1; + u32 ana_bbcdr_elv_cnt_ping_90_o : 1; + u32 ana_bbcdr_elv_cnt_ping_180_o : 1; + u32 ana_bbcdr_elv_cnt_ping_270_o : 1; + u32 ana_bbcdr_elv_cnt_pong_0_o : 1; + u32 ana_bbcdr_elv_cnt_pong_90_o : 1; + u32 ana_bbcdr_elv_cnt_pong_180_o : 1; + u32 ana_bbcdr_elv_cnt_pong_270_o : 1; + }; + u32 reg; +} E56G__RXS1_ANA_OVRDVAL_0; +//-----MACRO defines for Register:E56G__RXS1_ANA_OVRDVAL_0 +#define E56G__RXS1_ANA_OVRDVAL_0_ADDR (E56G__BASEADDR+0x2a0) + +//-----Access structure typedef for Register:E56G__RXS2_ANA_OVRDVAL_0 +typedef union { + struct { + u32 ana_en_rterm_i : 1; + u32 ana_en_bias_i : 1; + u32 ana_en_ldo_i : 1; + u32 ana_rstn_i : 1; + u32 ana_en_blwc_i : 1; + u32 ana_en_acc_amp_i : 1; + u32 ana_en_acc_dac_i : 1; + u32 ana_en_afe_offset_cal_i : 1; + u32 ana_clk_offsetcal_i : 1; + u32 ana_acc_os_comp_o : 1; + u32 ana_en_ctle_i : 1; + u32 ana_ctle_bypass_i : 1; + u32 ana_en_ctlecdr_i : 1; + u32 ana_cdr_ctle_boost_i : 1; + u32 ana_en_vga_i : 1; + u32 ana_en_bbcdr_vco_i : 1; + u32 ana_bbcdr_vcofilt_byp_i : 1; + u32 ana_en_bbcdr_i : 1; + u32 ana_en_bbcdr_clk_i : 1; + u32 ana_bbcdr_en_elv_cnt_ping0_pong1_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_ping_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_pong_i : 1; + u32 ana_bbcdr_clrz_cnt_sync_i : 1; + u32 ana_bbcdr_en_elv_cnt_rd_i : 1; + u32 ana_bbcdr_elv_cnt_ping_0_o : 1; + u32 ana_bbcdr_elv_cnt_ping_90_o : 1; + u32 ana_bbcdr_elv_cnt_ping_180_o : 1; + u32 ana_bbcdr_elv_cnt_ping_270_o : 1; + u32 ana_bbcdr_elv_cnt_pong_0_o : 1; + u32 ana_bbcdr_elv_cnt_pong_90_o : 1; + u32 ana_bbcdr_elv_cnt_pong_180_o : 1; + u32 ana_bbcdr_elv_cnt_pong_270_o : 1; + }; + u32 reg; +} E56G__RXS2_ANA_OVRDVAL_0; +//-----MACRO defines for Register:E56G__RXS2_ANA_OVRDVAL_0 +#define E56G__RXS2_ANA_OVRDVAL_0_ADDR (E56G__BASEADDR+0x4a0) + +//-----Access structure typedef for Register:E56G__RXS3_ANA_OVRDVAL_0 +typedef union { + struct { + u32 ana_en_rterm_i : 1; + u32 ana_en_bias_i : 1; + u32 ana_en_ldo_i : 1; + u32 ana_rstn_i : 1; + u32 ana_en_blwc_i : 1; + u32 ana_en_acc_amp_i : 1; + u32 ana_en_acc_dac_i : 1; + u32 ana_en_afe_offset_cal_i : 1; + u32 ana_clk_offsetcal_i : 1; + u32 ana_acc_os_comp_o : 1; + u32 ana_en_ctle_i : 1; + u32 ana_ctle_bypass_i : 1; + u32 ana_en_ctlecdr_i : 1; + u32 ana_cdr_ctle_boost_i : 1; + u32 ana_en_vga_i : 1; + u32 ana_en_bbcdr_vco_i : 1; + u32 ana_bbcdr_vcofilt_byp_i : 1; + u32 ana_en_bbcdr_i : 1; + u32 ana_en_bbcdr_clk_i : 1; + u32 ana_bbcdr_en_elv_cnt_ping0_pong1_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_ping_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_pong_i : 1; + u32 ana_bbcdr_clrz_cnt_sync_i : 1; + u32 ana_bbcdr_en_elv_cnt_rd_i : 1; + u32 ana_bbcdr_elv_cnt_ping_0_o : 1; + u32 ana_bbcdr_elv_cnt_ping_90_o : 1; + u32 ana_bbcdr_elv_cnt_ping_180_o : 1; + u32 ana_bbcdr_elv_cnt_ping_270_o : 1; + u32 ana_bbcdr_elv_cnt_pong_0_o : 1; + u32 ana_bbcdr_elv_cnt_pong_90_o : 1; + u32 ana_bbcdr_elv_cnt_pong_180_o : 1; + u32 ana_bbcdr_elv_cnt_pong_270_o : 1; + }; + u32 reg; +} E56G__RXS3_ANA_OVRDVAL_0; +//-----MACRO defines for Register:E56G__RXS3_ANA_OVRDVAL_0 +#define E56G__RXS3_ANA_OVRDVAL_0_ADDR (E56G__BASEADDR+0x6a0) + +//-----Access structure typedef for Register:E56G__RXS0_ANA_OVRDEN_0 +typedef union { + struct { + u32 ovrd_en_ana_en_rterm_i : 1; + u32 ovrd_en_ana_trim_rterm_i : 1; + u32 ovrd_en_ana_en_bias_i : 1; + u32 ovrd_en_ana_test_bias_i : 1; + u32 ovrd_en_ana_en_ldo_i : 1; + u32 ovrd_en_ana_test_ldo_i : 1; + u32 ovrd_en_ana_rstn_i : 1; + u32 ovrd_en_ana_en_blwc_i : 1; + u32 ovrd_en_ana_en_acc_amp_i : 1; + u32 ovrd_en_ana_en_acc_dac_i : 1; + u32 ovrd_en_ana_en_afe_offset_cal_i : 1; + u32 ovrd_en_ana_clk_offsetcal_i : 1; + u32 ovrd_en_ana_acc_os_code_i : 1; + u32 ovrd_en_ana_acc_os_comp_o : 1; + u32 ovrd_en_ana_test_acc_i : 1; + u32 ovrd_en_ana_en_ctle_i : 1; + u32 ovrd_en_ana_ctle_bypass_i : 1; + u32 ovrd_en_ana_ctle_cz_cstm_i : 1; + u32 ovrd_en_ana_ctle_cload_cstm_i : 1; + u32 ovrd_en_ana_test_ctle_i : 1; + u32 ovrd_en_ana_lfeq_ctrl_cstm_i : 1; + u32 ovrd_en_ana_en_ctlecdr_i : 1; + u32 ovrd_en_ana_cdr_ctle_boost_i : 1; + u32 ovrd_en_ana_test_ctlecdr_i : 1; + u32 ovrd_en_ana_en_vga_i : 1; + u32 ovrd_en_ana_vga_gain_cstm_i : 1; + u32 ovrd_en_ana_vga_cload_in_cstm_i : 1; + u32 ovrd_en_ana_test_vga_i : 1; + u32 ovrd_en_ana_en_bbcdr_vco_i : 1; + u32 ovrd_en_ana_bbcdr_osc_range_sel_i : 1; + u32 ovrd_en_ana_sel_vga_gain_byp_i : 1; + u32 ovrd_en_ana_vga2_gain_cstm_i : 1; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDEN_0; +//-----MACRO defines for Register:E56G__RXS0_ANA_OVRDEN_0 +#define E56G__RXS0_ANA_OVRDEN_0_ADDR (E56G__BASEADDR+0x8c) + +//-----Access structure typedef for Register:E56G__RXS1_ANA_OVRDEN_0 +typedef union { + struct { + u32 ovrd_en_ana_en_rterm_i : 1; + u32 ovrd_en_ana_trim_rterm_i : 1; + u32 ovrd_en_ana_en_bias_i : 1; + u32 ovrd_en_ana_test_bias_i : 1; + u32 ovrd_en_ana_en_ldo_i : 1; + u32 ovrd_en_ana_test_ldo_i : 1; + u32 ovrd_en_ana_rstn_i : 1; + u32 ovrd_en_ana_en_blwc_i : 1; + u32 ovrd_en_ana_en_acc_amp_i : 1; + u32 ovrd_en_ana_en_acc_dac_i : 1; + u32 ovrd_en_ana_en_afe_offset_cal_i : 1; + u32 ovrd_en_ana_clk_offsetcal_i : 1; + u32 ovrd_en_ana_acc_os_code_i : 1; + u32 ovrd_en_ana_acc_os_comp_o : 1; + u32 ovrd_en_ana_test_acc_i : 1; + u32 ovrd_en_ana_en_ctle_i : 1; + u32 ovrd_en_ana_ctle_bypass_i : 1; + u32 ovrd_en_ana_ctle_cz_cstm_i : 1; + u32 ovrd_en_ana_ctle_cload_cstm_i : 1; + u32 ovrd_en_ana_test_ctle_i : 1; + u32 ovrd_en_ana_lfeq_ctrl_cstm_i : 1; + u32 ovrd_en_ana_en_ctlecdr_i : 1; + u32 ovrd_en_ana_cdr_ctle_boost_i : 1; + u32 ovrd_en_ana_test_ctlecdr_i : 1; + u32 ovrd_en_ana_en_vga_i : 1; + u32 ovrd_en_ana_vga_gain_cstm_i : 1; + u32 ovrd_en_ana_vga_cload_in_cstm_i : 1; + u32 ovrd_en_ana_test_vga_i : 1; + u32 ovrd_en_ana_en_bbcdr_vco_i : 1; + u32 ovrd_en_ana_bbcdr_osc_range_sel_i : 1; + u32 ovrd_en_ana_sel_vga_gain_byp_i : 1; + u32 ovrd_en_ana_vga2_gain_cstm_i : 1; + }; + u32 reg; +} E56G__RXS1_ANA_OVRDEN_0; +//-----MACRO defines for Register:E56G__RXS1_ANA_OVRDEN_0 +#define E56G__RXS1_ANA_OVRDEN_0_ADDR (E56G__BASEADDR+0x28c) + +//-----Access structure typedef for Register:E56G__RXS2_ANA_OVRDEN_0 +typedef union { + struct { + u32 ovrd_en_ana_en_rterm_i : 1; + u32 ovrd_en_ana_trim_rterm_i : 1; + u32 ovrd_en_ana_en_bias_i : 1; + u32 ovrd_en_ana_test_bias_i : 1; + u32 ovrd_en_ana_en_ldo_i : 1; + u32 ovrd_en_ana_test_ldo_i : 1; + u32 ovrd_en_ana_rstn_i : 1; + u32 ovrd_en_ana_en_blwc_i : 1; + u32 ovrd_en_ana_en_acc_amp_i : 1; + u32 ovrd_en_ana_en_acc_dac_i : 1; + u32 ovrd_en_ana_en_afe_offset_cal_i : 1; + u32 ovrd_en_ana_clk_offsetcal_i : 1; + u32 ovrd_en_ana_acc_os_code_i : 1; + u32 ovrd_en_ana_acc_os_comp_o : 1; + u32 ovrd_en_ana_test_acc_i : 1; + u32 ovrd_en_ana_en_ctle_i : 1; + u32 ovrd_en_ana_ctle_bypass_i : 1; + u32 ovrd_en_ana_ctle_cz_cstm_i : 1; + u32 ovrd_en_ana_ctle_cload_cstm_i : 1; + u32 ovrd_en_ana_test_ctle_i : 1; + u32 ovrd_en_ana_lfeq_ctrl_cstm_i : 1; + u32 ovrd_en_ana_en_ctlecdr_i : 1; + u32 ovrd_en_ana_cdr_ctle_boost_i : 1; + u32 ovrd_en_ana_test_ctlecdr_i : 1; + u32 ovrd_en_ana_en_vga_i : 1; + u32 ovrd_en_ana_vga_gain_cstm_i : 1; + u32 ovrd_en_ana_vga_cload_in_cstm_i : 1; + u32 ovrd_en_ana_test_vga_i : 1; + u32 ovrd_en_ana_en_bbcdr_vco_i : 1; + u32 ovrd_en_ana_bbcdr_osc_range_sel_i : 1; + u32 ovrd_en_ana_sel_vga_gain_byp_i : 1; + u32 ovrd_en_ana_vga2_gain_cstm_i : 1; + }; + u32 reg; +} E56G__RXS2_ANA_OVRDEN_0; +//-----MACRO defines for Register:E56G__RXS2_ANA_OVRDEN_0 +#define E56G__RXS2_ANA_OVRDEN_0_ADDR (E56G__BASEADDR+0x48c) + +//-----Access structure typedef for Register:E56G__RXS3_ANA_OVRDEN_0 +typedef union { + struct { + u32 ovrd_en_ana_en_rterm_i : 1; + u32 ovrd_en_ana_trim_rterm_i : 1; + u32 ovrd_en_ana_en_bias_i : 1; + u32 ovrd_en_ana_test_bias_i : 1; + u32 ovrd_en_ana_en_ldo_i : 1; + u32 ovrd_en_ana_test_ldo_i : 1; + u32 ovrd_en_ana_rstn_i : 1; + u32 ovrd_en_ana_en_blwc_i : 1; + u32 ovrd_en_ana_en_acc_amp_i : 1; + u32 ovrd_en_ana_en_acc_dac_i : 1; + u32 ovrd_en_ana_en_afe_offset_cal_i : 1; + u32 ovrd_en_ana_clk_offsetcal_i : 1; + u32 ovrd_en_ana_acc_os_code_i : 1; + u32 ovrd_en_ana_acc_os_comp_o : 1; + u32 ovrd_en_ana_test_acc_i : 1; + u32 ovrd_en_ana_en_ctle_i : 1; + u32 ovrd_en_ana_ctle_bypass_i : 1; + u32 ovrd_en_ana_ctle_cz_cstm_i : 1; + u32 ovrd_en_ana_ctle_cload_cstm_i : 1; + u32 ovrd_en_ana_test_ctle_i : 1; + u32 ovrd_en_ana_lfeq_ctrl_cstm_i : 1; + u32 ovrd_en_ana_en_ctlecdr_i : 1; + u32 ovrd_en_ana_cdr_ctle_boost_i : 1; + u32 ovrd_en_ana_test_ctlecdr_i : 1; + u32 ovrd_en_ana_en_vga_i : 1; + u32 ovrd_en_ana_vga_gain_cstm_i : 1; + u32 ovrd_en_ana_vga_cload_in_cstm_i : 1; + u32 ovrd_en_ana_test_vga_i : 1; + u32 ovrd_en_ana_en_bbcdr_vco_i : 1; + u32 ovrd_en_ana_bbcdr_osc_range_sel_i : 1; + u32 ovrd_en_ana_sel_vga_gain_byp_i : 1; + u32 ovrd_en_ana_vga2_gain_cstm_i : 1; + }; + u32 reg; +} E56G__RXS3_ANA_OVRDEN_0; +//-----MACRO defines for Register:E56G__RXS3_ANA_OVRDEN_0 +#define E56G__RXS3_ANA_OVRDEN_0_NUM 1 +#define E56G__RXS3_ANA_OVRDEN_0_ADDR (E56G__BASEADDR+0x68c) + +//-----Access structure typedef for Register:E56G__RXS0_ANA_OVRDVAL_3 +typedef union { + struct { + u32 ana_ctle_cz_cstm_i : 5; + u32 rsvd0 : 3; + u32 ana_ctle_cload_cstm_i : 5; + u32 rsvd1 : 3; + u32 ana_test_ctle_i : 2; + u32 rsvd2 : 2; + u32 ana_lfeq_ctrl_cstm_i : 4; + u32 ana_test_ctlecdr_i : 2; + u32 rsvd3 : 2; + u32 ana_vga_cload_in_cstm_i : 3; + u32 rsvd4 : 1; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDVAL_3; +//-----MACRO defines for Register:E56G__RXS0_ANA_OVRDVAL_3 +#define E56G__RXS0_ANA_OVRDVAL_3_NUM 1 +#define E56G__RXS0_ANA_OVRDVAL_3_ADDR (E56G__BASEADDR+0xac) + +//-----Access structure typedef for Register:E56G__RXS1_ANA_OVRDVAL_3 +typedef union { + struct { + u32 ana_ctle_cz_cstm_i : 5; + u32 rsvd0 : 3; + u32 ana_ctle_cload_cstm_i : 5; + u32 rsvd1 : 3; + u32 ana_test_ctle_i : 2; + u32 rsvd2 : 2; + u32 ana_lfeq_ctrl_cstm_i : 4; + u32 ana_test_ctlecdr_i : 2; + u32 rsvd3 : 2; + u32 ana_vga_cload_in_cstm_i : 3; + u32 rsvd4 : 1; + }; + u32 reg; +} E56G__RXS1_ANA_OVRDVAL_3; +//-----MACRO defines for Register:E56G__RXS1_ANA_OVRDVAL_3 +#define E56G__RXS1_ANA_OVRDVAL_3_ADDR (E56G__BASEADDR+0x2ac) + +//-----Access structure typedef for Register:E56G__RXS2_ANA_OVRDVAL_3 +typedef union { + struct { + u32 ana_ctle_cz_cstm_i : 5; + u32 rsvd0 : 3; + u32 ana_ctle_cload_cstm_i : 5; + u32 rsvd1 : 3; + u32 ana_test_ctle_i : 2; + u32 rsvd2 : 2; + u32 ana_lfeq_ctrl_cstm_i : 4; + u32 ana_test_ctlecdr_i : 2; + u32 rsvd3 : 2; + u32 ana_vga_cload_in_cstm_i : 3; + u32 rsvd4 : 1; + }; + u32 reg; +} E56G__RXS2_ANA_OVRDVAL_3; +//-----MACRO defines for Register:E56G__RXS2_ANA_OVRDVAL_3 +#define E56G__RXS2_ANA_OVRDVAL_3_ADDR (E56G__BASEADDR+0x4ac) + +//-----Access structure typedef for Register:E56G__RXS3_ANA_OVRDVAL_3 +typedef union { + struct { + u32 ana_ctle_cz_cstm_i : 5; + u32 rsvd0 : 3; + u32 ana_ctle_cload_cstm_i : 5; + u32 rsvd1 : 3; + u32 ana_test_ctle_i : 2; + u32 rsvd2 : 2; + u32 ana_lfeq_ctrl_cstm_i : 4; + u32 ana_test_ctlecdr_i : 2; + u32 rsvd3 : 2; + u32 ana_vga_cload_in_cstm_i : 3; + u32 rsvd4 : 1; + }; + u32 reg; +} E56G__RXS3_ANA_OVRDVAL_3; +//-----MACRO defines for Register:E56G__RXS3_ANA_OVRDVAL_3 +#define E56G__RXS3_ANA_OVRDVAL_3_ADDR (E56G__BASEADDR+0x6ac) + +//-----Access structure typedef for Register:E56G__PMD_RXS0_OVRDEN_0 +typedef union { + struct { + u32 ovrd_en_rxs0_rx0_rstn_i : 1; + u32 ovrd_en_rxs0_rx0_bitclk_divctrl_i : 1; + u32 ovrd_en_rxs0_rx0_bitclk_rate_i : 1; + u32 ovrd_en_rxs0_rx0_symdata_width_i : 1; + u32 ovrd_en_rxs0_rx0_symdata_o : 1; + u32 ovrd_en_rxs0_rx0_precode_en_i : 1; + u32 ovrd_en_rxs0_rx0_signal_type_i : 1; + u32 ovrd_en_rxs0_rx0_sync_detect_en_i : 1; + u32 ovrd_en_rxs0_rx0_sync_o : 1; + u32 ovrd_en_rxs0_rx0_rate_select_i : 1; + u32 ovrd_en_rxs0_rx0_rterm_en_i : 1; + u32 ovrd_en_rxs0_rx0_bias_en_i : 1; + u32 ovrd_en_rxs0_rx0_ldo_en_i : 1; + u32 ovrd_en_rxs0_rx0_ldo_rdy_i : 1; + u32 ovrd_en_rxs0_rx0_blwc_en_i : 1; + u32 ovrd_en_rxs0_rx0_ctle_en_i : 1; + u32 ovrd_en_rxs0_rx0_vga_en_i : 1; + u32 ovrd_en_rxs0_rx0_osc_sel_i : 1; + u32 ovrd_en_rxs0_rx0_osc_en_i : 1; + u32 ovrd_en_rxs0_rx0_clkgencdr_en_i : 1; + u32 ovrd_en_rxs0_rx0_ctlecdr_en_i : 1; + u32 ovrd_en_rxs0_rx0_samp_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_en_i : 1; + u32 ovrd_en_rxs0_rx0_osc_cal_en_i : 1; + u32 ovrd_en_rxs0_rx0_osc_cal_done_o : 1; + u32 ovrd_en_rxs0_rx0_osc_freq_error_o : 1; + u32 ovrd_en_rxs0_rx0_samp_cal_en_i : 1; + u32 ovrd_en_rxs0_rx0_samp_cal_done_o : 1; + u32 ovrd_en_rxs0_rx0_samp_cal_err_o : 1; + u32 ovrd_en_rxs0_rx0_adc_ofst_cal_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_ofst_cal_done_o : 1; + u32 ovrd_en_rxs0_rx0_adc_ofst_cal_error_o : 1; + }; + u32 reg; +} E56G__PMD_RXS0_OVRDEN_0; +//-----MACRO defines for Register:E56G__PMD_RXS0_OVRDEN_0 +#define E56G__PMD_RXS0_OVRDEN_0_NUM 1 +#define E56G__PMD_RXS0_OVRDEN_0_ADDR (E56G__BASEADDR+0x1530) + +//-----Access structure typedef for Register:E56G__RXS0_DFT_1 +typedef union { + struct { + u32 ber_en : 1; + u32 rsvd0 : 3; + u32 read_mode_en : 1; + u32 rsvd1 : 3; + u32 err_cnt_mode_all0_one1 : 1; + u32 rsvd2 : 3; + u32 init_lfsr_mode_continue0_restart1 : 1; + u32 rsvd3 : 3; + u32 pattern_sel : 4; + u32 rsvd4 : 12; + }; + u32 reg; +} E56G__RXS0_DFT_1; +//-----MACRO defines for Register:E56G__RXS0_DFT_1 +#define E56G__RXS0_DFT_1_NUM 1 +#define E56G__RXS0_DFT_1_ADDR (E56G__BASEADDR+0xec) + +//-----Access structure typedef for Register:E56G__PMD_RXS0_OVRDEN_1 +typedef union { + struct { + u32 ovrd_en_rxs0_rx0_adc_gain_cal_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_gain_cal_done_o : 1; + u32 ovrd_en_rxs0_rx0_adc_gain_cal_error_o : 1; + u32 ovrd_en_rxs0_rx0_fe_ofst_cal_en_i : 1; + u32 ovrd_en_rxs0_rx0_fe_ofst_cal_done_o : 1; + u32 ovrd_en_rxs0_rx0_fe_ofst_cal_error_o : 1; + u32 ovrd_en_rxs0_rx0_fom_en_i : 1; + u32 ovrd_en_rxs0_rx0_idle_detect_en_i : 1; + u32 ovrd_en_rxs0_rx0_idle_o : 1; + u32 ovrd_en_rxs0_rx0_txffe_train_en_i : 1; + u32 ovrd_en_rxs0_rx0_txffe_coeff_rst_i : 1; + u32 ovrd_en_rxs0_rx0_txffe_train_enack_o : 1; + u32 ovrd_en_rxs0_rx0_txffe_train_done_o : 1; + u32 ovrd_en_rxs0_rx0_txffe_coeff_change_o : 1; + u32 ovrd_en_rxs0_rx0_vga_train_en_i : 1; + u32 ovrd_en_rxs0_rx0_vga_train_done_o : 1; + u32 ovrd_en_rxs0_rx0_ctle_train_en_i : 1; + u32 ovrd_en_rxs0_rx0_ctle_train_done_o : 1; + u32 ovrd_en_rxs0_rx0_cdr_en_i : 1; + u32 ovrd_en_rxs0_rx0_cdr_rdy_o : 1; + u32 ovrd_en_rxs0_rx0_ffe_train_en_i : 1; + u32 ovrd_en_rxs0_rx0_ffe_train_done_o : 1; + u32 ovrd_en_rxs0_rx0_mmpd_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_intl_cal_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_intl_cal_done_o : 1; + u32 ovrd_en_rxs0_rx0_adc_intl_cal_error_o : 1; + u32 ovrd_en_rxs0_rx0_dfe_train_en_i : 1; + u32 ovrd_en_rxs0_rx0_dfe_train_done_o : 1; + u32 ovrd_en_rxs0_rx0_vga_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_vga_adapt_done_o : 1; + u32 ovrd_en_rxs0_rx0_ctle_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_ctle_adapt_done_o : 1; + }; + u32 reg; +} E56G__PMD_RXS0_OVRDEN_1; +//-----MACRO defines for Register:E56G__PMD_RXS0_OVRDEN_1 +#define E56G__PMD_RXS0_OVRDEN_1_NUM 1 +#define E56G__PMD_RXS0_OVRDEN_1_ADDR (E56G__BASEADDR+0x1534) + +//-----Access structure typedef for Register:E56G__PMD_RXS0_OVRDEN_3 +typedef union { + struct { + u32 ovrd_en_rxs0_rx0_sparein_i : 8; + u32 ovrd_en_rxs0_rx0_spareout_o : 8; + u32 rsvd0 : 16; + }; + u32 reg; +} E56G__PMD_RXS0_OVRDEN_3; +//-----MACRO defines for Register:E56G__PMD_RXS0_OVRDEN_3 +#define E56G__PMD_RXS0_OVRDEN_3_NUM 1 +#define E56G__PMD_RXS0_OVRDEN_3_ADDR (E56G__BASEADDR+0x153c) + +//-----Access structure typedef for Register:E56G__RXS0_DIG_OVRDEN_1 +typedef union { + struct { + u32 vco_code_cont_adj_done_ovrd_en : 1; + u32 dfe_coeffl_ovrd_en : 1; + u32 dfe_coeffh_ovrd_en : 1; + u32 rsvd0 : 1; + u32 top_comp_th_ovrd_en : 1; + u32 mid_comp_th_ovrd_en : 1; + u32 bot_comp_th_ovrd_en : 1; + u32 rsvd1 : 1; + u32 level_target_ovrd_en : 4; + u32 ffe_coeff_c0to3_ovrd_en : 4; + u32 ffe_coeff_c4to7_ovrd_en : 4; + u32 ffe_coeff_c8to11_ovrd_en : 4; + u32 ffe_coeff_c12to15_ovrd_en : 4; + u32 ffe_coeff_update_ovrd_en : 1; + u32 rsvd2 : 3; + }; + u32 reg; +} E56G__RXS0_DIG_OVRDEN_1; +//-----MACRO defines for Register:E56G__RXS0_DIG_OVRDEN_1 +#define E56G__RXS0_DIG_OVRDEN_1_NUM 1 +#define E56G__RXS0_DIG_OVRDEN_1_ADDR (E56G__BASEADDR+0x160) + +//-----Access structure typedef for Register:E56G__PMD_RXS1_OVRDEN_1 +typedef union { + struct { + u32 ovrd_en_rxs1_rx0_adc_gain_cal_en_i : 1; + u32 ovrd_en_rxs1_rx0_adc_gain_cal_done_o : 1; + u32 ovrd_en_rxs1_rx0_adc_gain_cal_error_o : 1; + u32 ovrd_en_rxs1_rx0_fe_ofst_cal_en_i : 1; + u32 ovrd_en_rxs1_rx0_fe_ofst_cal_done_o : 1; + u32 ovrd_en_rxs1_rx0_fe_ofst_cal_error_o : 1; + u32 ovrd_en_rxs1_rx0_fom_en_i : 1; + u32 ovrd_en_rxs1_rx0_idle_detect_en_i : 1; + u32 ovrd_en_rxs1_rx0_idle_o : 1; + u32 ovrd_en_rxs1_rx0_txffe_train_en_i : 1; + u32 ovrd_en_rxs1_rx0_txffe_coeff_rst_i : 1; + u32 ovrd_en_rxs1_rx0_txffe_train_enack_o : 1; + u32 ovrd_en_rxs1_rx0_txffe_train_done_o : 1; + u32 ovrd_en_rxs1_rx0_txffe_coeff_change_o : 1; + u32 ovrd_en_rxs1_rx0_vga_train_en_i : 1; + u32 ovrd_en_rxs1_rx0_vga_train_done_o : 1; + u32 ovrd_en_rxs1_rx0_ctle_train_en_i : 1; + u32 ovrd_en_rxs1_rx0_ctle_train_done_o : 1; + u32 ovrd_en_rxs1_rx0_cdr_en_i : 1; + u32 ovrd_en_rxs1_rx0_cdr_rdy_o : 1; + u32 ovrd_en_rxs1_rx0_ffe_train_en_i : 1; + u32 ovrd_en_rxs1_rx0_ffe_train_done_o : 1; + u32 ovrd_en_rxs1_rx0_mmpd_en_i : 1; + u32 ovrd_en_rxs1_rx0_adc_intl_cal_en_i : 1; + u32 ovrd_en_rxs1_rx0_adc_intl_cal_done_o : 1; + u32 ovrd_en_rxs1_rx0_adc_intl_cal_error_o : 1; + u32 ovrd_en_rxs1_rx0_dfe_train_en_i : 1; + u32 ovrd_en_rxs1_rx0_dfe_train_done_o : 1; + u32 ovrd_en_rxs1_rx0_vga_adapt_en_i : 1; + u32 ovrd_en_rxs1_rx0_vga_adapt_done_o : 1; + u32 ovrd_en_rxs1_rx0_ctle_adapt_en_i : 1; + u32 ovrd_en_rxs1_rx0_ctle_adapt_done_o : 1; + }; + u32 reg; +} E56G__PMD_RXS1_OVRDEN_1; +//-----MACRO defines for Register:E56G__PMD_RXS1_OVRDEN_1 +#define E56G__PMD_RXS1_OVRDEN_1_ADDR (E56G__BASEADDR+0x1560) + +//-----Access structure typedef for Register:E56G__PMD_RXS2_OVRDEN_1 +typedef union { + struct { + u32 ovrd_en_rxs2_rx0_adc_gain_cal_en_i : 1; + u32 ovrd_en_rxs2_rx0_adc_gain_cal_done_o : 1; + u32 ovrd_en_rxs2_rx0_adc_gain_cal_error_o : 1; + u32 ovrd_en_rxs2_rx0_fe_ofst_cal_en_i : 1; + u32 ovrd_en_rxs2_rx0_fe_ofst_cal_done_o : 1; + u32 ovrd_en_rxs2_rx0_fe_ofst_cal_error_o : 1; + u32 ovrd_en_rxs2_rx0_fom_en_i : 1; + u32 ovrd_en_rxs2_rx0_idle_detect_en_i : 1; + u32 ovrd_en_rxs2_rx0_idle_o : 1; + u32 ovrd_en_rxs2_rx0_txffe_train_en_i : 1; + u32 ovrd_en_rxs2_rx0_txffe_coeff_rst_i : 1; + u32 ovrd_en_rxs2_rx0_txffe_train_enack_o : 1; + u32 ovrd_en_rxs2_rx0_txffe_train_done_o : 1; + u32 ovrd_en_rxs2_rx0_txffe_coeff_change_o : 1; + u32 ovrd_en_rxs2_rx0_vga_train_en_i : 1; + u32 ovrd_en_rxs2_rx0_vga_train_done_o : 1; + u32 ovrd_en_rxs2_rx0_ctle_train_en_i : 1; + u32 ovrd_en_rxs2_rx0_ctle_train_done_o : 1; + u32 ovrd_en_rxs2_rx0_cdr_en_i : 1; + u32 ovrd_en_rxs2_rx0_cdr_rdy_o : 1; + u32 ovrd_en_rxs2_rx0_ffe_train_en_i : 1; + u32 ovrd_en_rxs2_rx0_ffe_train_done_o : 1; + u32 ovrd_en_rxs2_rx0_mmpd_en_i : 1; + u32 ovrd_en_rxs2_rx0_adc_intl_cal_en_i : 1; + u32 ovrd_en_rxs2_rx0_adc_intl_cal_done_o : 1; + u32 ovrd_en_rxs2_rx0_adc_intl_cal_error_o : 1; + u32 ovrd_en_rxs2_rx0_dfe_train_en_i : 1; + u32 ovrd_en_rxs2_rx0_dfe_train_done_o : 1; + u32 ovrd_en_rxs2_rx0_vga_adapt_en_i : 1; + u32 ovrd_en_rxs2_rx0_vga_adapt_done_o : 1; + u32 ovrd_en_rxs2_rx0_ctle_adapt_en_i : 1; + u32 ovrd_en_rxs2_rx0_ctle_adapt_done_o : 1; + }; + u32 reg; +} E56G__PMD_RXS2_OVRDEN_1; +//-----MACRO defines for Register:E56G__PMD_RXS2_OVRDEN_1 +#define E56G__PMD_RXS2_OVRDEN_1_ADDR (E56G__BASEADDR+0x158c) + +//-----Access structure typedef for Register:E56G__PMD_RXS3_OVRDEN_1 +typedef union { + struct { + u32 ovrd_en_rxs3_rx0_adc_gain_cal_en_i : 1; + u32 ovrd_en_rxs3_rx0_adc_gain_cal_done_o : 1; + u32 ovrd_en_rxs3_rx0_adc_gain_cal_error_o : 1; + u32 ovrd_en_rxs3_rx0_fe_ofst_cal_en_i : 1; + u32 ovrd_en_rxs3_rx0_fe_ofst_cal_done_o : 1; + u32 ovrd_en_rxs3_rx0_fe_ofst_cal_error_o : 1; + u32 ovrd_en_rxs3_rx0_fom_en_i : 1; + u32 ovrd_en_rxs3_rx0_idle_detect_en_i : 1; + u32 ovrd_en_rxs3_rx0_idle_o : 1; + u32 ovrd_en_rxs3_rx0_txffe_train_en_i : 1; + u32 ovrd_en_rxs3_rx0_txffe_coeff_rst_i : 1; + u32 ovrd_en_rxs3_rx0_txffe_train_enack_o : 1; + u32 ovrd_en_rxs3_rx0_txffe_train_done_o : 1; + u32 ovrd_en_rxs3_rx0_txffe_coeff_change_o : 1; + u32 ovrd_en_rxs3_rx0_vga_train_en_i : 1; + u32 ovrd_en_rxs3_rx0_vga_train_done_o : 1; + u32 ovrd_en_rxs3_rx0_ctle_train_en_i : 1; + u32 ovrd_en_rxs3_rx0_ctle_train_done_o : 1; + u32 ovrd_en_rxs3_rx0_cdr_en_i : 1; + u32 ovrd_en_rxs3_rx0_cdr_rdy_o : 1; + u32 ovrd_en_rxs3_rx0_ffe_train_en_i : 1; + u32 ovrd_en_rxs3_rx0_ffe_train_done_o : 1; + u32 ovrd_en_rxs3_rx0_mmpd_en_i : 1; + u32 ovrd_en_rxs3_rx0_adc_intl_cal_en_i : 1; + u32 ovrd_en_rxs3_rx0_adc_intl_cal_done_o : 1; + u32 ovrd_en_rxs3_rx0_adc_intl_cal_error_o : 1; + u32 ovrd_en_rxs3_rx0_dfe_train_en_i : 1; + u32 ovrd_en_rxs3_rx0_dfe_train_done_o : 1; + u32 ovrd_en_rxs3_rx0_vga_adapt_en_i : 1; + u32 ovrd_en_rxs3_rx0_vga_adapt_done_o : 1; + u32 ovrd_en_rxs3_rx0_ctle_adapt_en_i : 1; + u32 ovrd_en_rxs3_rx0_ctle_adapt_done_o : 1; + }; + u32 reg; +} E56G__PMD_RXS3_OVRDEN_1; +//-----MACRO defines for Register:E56G__PMD_RXS3_OVRDEN_1 +#define E56G__PMD_RXS3_OVRDEN_1_ADDR (E56G__BASEADDR+0x15b8) + +#define E56G__RXS0_FOM_18__ADDR (E56G__BASEADDR+0x1f8) +#define E56G__RXS0_FOM_18__DFE_COEFFL_HINT__MSB 11 +#define E56G__RXS0_FOM_18__DFE_COEFFL_HINT__LSB 0 +#define E56G__RXS0_FOM_18__DFE_COEFFH_HINT__MSB 23 +#define E56G__RXS0_FOM_18__DFE_COEFFH_HINT__LSB 12 +#define E56G__RXS0_FOM_18__DFE_COEFF_HINT_LOAD__MSB 25 +#define E56G__RXS0_FOM_18__DFE_COEFF_HINT_LOAD__LSB 25 + +#define DEFAULT_TEMP 40 +#define HIGH_TEMP 70 + +#define E56PHY_RX_RDY_ST 0x1B + +#define S10G_CMVAR_RANGE_H 0x3 +#define S10G_CMVAR_RANGE_L 0x2 +#define S25G_CMVAR_RANGE_H 0x1 +#define S25G_CMVAR_RANGE_L 0x0 + +#define S25G_CMVAR_RANGE_H 0x1 +#define S25G_CMVAR_RANGE_L 0x0 +#define S25G_CMVAR_SEC_LOW_TH 0x1A +#define S25G_CMVAR_SEC_HIGH_TH 0x1D +#define S25G_CMVAR_UFINE_MAX 0x2 +#define S25G_CMVAR_FINE_MAX 0x7 +#define S25G_CMVAR_COARSE_MAX 0xF +#define S25G_CMVAR_UFINE_UMAX_WRAP 0x0 +#define S25G_CMVAR_UFINE_FMAX_WRAP 0x0 +#define S25G_CMVAR_FINE_FMAX_WRAP 0x2 +#define S25G_CMVAR_UFINE_MIN 0x0 +#define S25G_CMVAR_FINE_MIN 0x0 +#define S25G_CMVAR_COARSE_MIN 0x1 +#define S25G_CMVAR_UFINE_UMIN_WRAP 0x2 +#define S25G_CMVAR_UFINE_FMIN_WRAP 0x2 +#define S25G_CMVAR_FINE_FMIN_WRAP 0x5 + +#define S10G_CMVAR_RANGE_H 0x3 +#define S10G_CMVAR_RANGE_L 0x2 +#define S10G_CMVAR_SEC_LOW_TH 0x1A +#define S10G_CMVAR_SEC_HIGH_TH 0x1D +#define S10G_CMVAR_UFINE_MAX 0x7 +#define S10G_CMVAR_FINE_MAX 0x7 +#define S10G_CMVAR_COARSE_MAX 0xF +#define S10G_CMVAR_UFINE_UMAX_WRAP 0x6 +#define S10G_CMVAR_UFINE_FMAX_WRAP 0x7 +#define S10G_CMVAR_FINE_FMAX_WRAP 0x1 +#define S10G_CMVAR_UFINE_MIN 0x0 +#define S10G_CMVAR_FINE_MIN 0x0 +#define S10G_CMVAR_COARSE_MIN 0x1 +#define S10G_CMVAR_UFINE_UMIN_WRAP 0x2 +#define S10G_CMVAR_UFINE_FMIN_WRAP 0x2 +#define S10G_CMVAR_FINE_FMIN_WRAP 0x5 + +#define S10G_TX_FFE_CFG_MAIN 0x2c2c2c2c +#define S10G_TX_FFE_CFG_PRE1 0x0 +#define S10G_TX_FFE_CFG_PRE2 0x0 +#define S10G_TX_FFE_CFG_POST 0x6060606 +#define S25G_TX_FFE_CFG_MAIN 49 +#define S25G_TX_FFE_CFG_PRE1 4 +#define S25G_TX_FFE_CFG_PRE2 1 +#define S25G_TX_FFE_CFG_POST 9 + +/* for dac test*/ +#define S25G_TX_FFE_CFG_DAC_MAIN 0x2a +#define S25G_TX_FFE_CFG_DAC_PRE1 0x3 +#define S25G_TX_FFE_CFG_DAC_PRE2 0x0 +#define S25G_TX_FFE_CFG_DAC_POST 0x11 + +#define BYPASS_CTLE_TAG 0x0 + +#define S10G_PHY_RX_CTLE_TAPWT_WEIGHT1 0x1 +#define S10G_PHY_RX_CTLE_TAPWT_WEIGHT2 0x0 +#define S10G_PHY_RX_CTLE_TAPWT_WEIGHT3 0x0 +#define S10G_PHY_RX_CTLE_TAP_FRACP1 0x18 +#define S10G_PHY_RX_CTLE_TAP_FRACP2 0x0 +#define S10G_PHY_RX_CTLE_TAP_FRACP3 0x0 + +#define S25G_PHY_RX_CTLE_TAPWT_WEIGHT1 0x1 +#define S25G_PHY_RX_CTLE_TAPWT_WEIGHT2 0x0 +#define S25G_PHY_RX_CTLE_TAPWT_WEIGHT3 0x0 +#define S25G_PHY_RX_CTLE_TAP_FRACP1 0x18 +#define S25G_PHY_RX_CTLE_TAP_FRACP2 0x0 +#define S25G_PHY_RX_CTLE_TAP_FRACP3 0x0 + +#define TXGBE_E56_PHY_LINK_UP 0x4 + +#define __bf_shf_m(x) (__builtin_ffsll(x) - 1) + +#define FIELD_PREP_M(_mask, _val) \ + ({ \ + ((typeof(_mask))(_val) << __bf_shf_m(_mask)) & (_mask); \ + }) + +/** + * FIELD_GET_M() - extract a bitfield element + * @_mask: shifted mask defining the field's length and position + * @_reg: value of entire bitfield + * + * FIELD_GET_M() extracts the field specified by @_mask from the + * bitfield passed in as @_reg by masking and shifting it down. + */ +#define FIELD_GET_M(_mask, _reg) \ + ({ \ + (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf_m(_mask)); \ + }) + +void field_set(u32 *psrcdata, u32 bithigh, u32 bitlow, u32 setvalue); +int E56phyRxRdSecondCode(struct txgbe_hw *hw, int *SECOND_CODE); +u32 txgbe_e56_cfg_25g(struct txgbe_hw *hw); +u32 txgbe_e56_cfg_10g(struct txgbe_hw *hw); +u32 txgbe_e56_cfg_40g(struct txgbe_hw *hw); + +int txgbe_set_link_to_amlite(struct txgbe_hw *hw, u32 speed); +u32 txgbe_e56_cfg_temp(struct txgbe_hw *hw); +int txgbe_e56_get_temp(struct txgbe_hw *hw, int *pTempData); +int txgbe_e56_reconfig_rx(struct txgbe_hw *hw, u32 speed); +int txgbe_e56_config_rx_40G(struct txgbe_hw *hw, u32 speed); +int txgbe_temp_track_seq(struct txgbe_hw *hw, u32 speed); +int txgbe_temp_track_seq_40g(struct txgbe_hw *hw, u32 speed); +int txgbe_get_cur_fec_mode(struct txgbe_hw *hw); +int txgbe_e56_set_fec_mode(struct txgbe_hw *hw, u8 fec_mode); +int txgbe_e56_fec_mode_polling(struct txgbe_hw *hw, bool *link_up); +s32 txgbe_e56_check_phy_link(struct txgbe_hw *hw, u32 *speed, + bool *link_up); + +#endif /* _TXGBE_E56_H_ */ + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.c new file mode 100644 index 0000000000000000000000000000000000000000..6b6a99ba7bc544203167b08de4cf88f78c26a2d1 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.c @@ -0,0 +1,2679 @@ +#include "txgbe_e56.h" +#include "txgbe_hw.h" + +#include "txgbe.h" +#include "txgbe_type.h" +#include "txgbe_e56_bp.h" +#include "txgbe_bp.h" + +static int E56phySetRxsUfineLeMax(struct txgbe_adapter *adapter, u32 speed) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rdata, addr; + u32 ULTRAFINE_CODE[4] = {0}; + int lane_num = 0, lane_idx = 0; + u32 CMVAR_UFINE_MAX = 0; + + switch (speed) { + case 10: + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX; + lane_num = 1; + break; + case 40: + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX; + lane_num = 4; + break; + case 25: + CMVAR_UFINE_MAX = S25G_CMVAR_UFINE_MAX; + lane_num = 1; + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid speed\n", __func__, __LINE__); + break; + } + + for (lane_idx = 0; lane_idx < lane_num; lane_idx++) { + /* ii get rx ana_bbcdr_ultrafine_i[14, 12] per lane */ + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + ULTRAFINE_CODE[lane_idx] = FIELD_GET_M(GENMASK(14, 12), rdata); + kr_dbg(KR_MODE, "ULTRAFINE_CODE[%d] = %d, CMVAR_UFINE_MAX: %x\n", + lane_idx, ULTRAFINE_CODE[lane_idx], CMVAR_UFINE_MAX); + } + + + for (lane_idx = 0; lane_idx < lane_num; lane_idx++) { + //b. Perform the below logic sequence + while (ULTRAFINE_CODE[lane_idx] > CMVAR_UFINE_MAX) { + ULTRAFINE_CODE[lane_idx] -= 1; + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + + (E56PHY_RXS_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 14, 12, ULTRAFINE_CODE[lane_idx]); + txgbe_wr32_ephy(hw, addr, rdata); + + /* ovrd_en_ana_bbcdr_ultrafine=1 override ASIC value */ + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + + (E56PHY_RXS_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_wr32_ephy(hw, addr, rdata | BIT(3)); + + // Wait until 1milliseconds or greater + usec_delay(1000); + } + } + + return 0; +} + +static int E56phyRxsOscInitForTempTrackRange(struct txgbe_adapter *adapter, + u32 speed) +{ + int OFFSET_CENTRE_RANGE_H[4] = {0}, OFFSET_CENTRE_RANGE_L[4] = {}, RANGE_FINAL[4] = {}; + int RX_COARSE_MID_TD, CMVAR_RANGE_H = 0, CMVAR_RANGE_L = 0; + struct txgbe_hw *hw = &adapter->hw; + int status = 0, lane_num = 0; + int T = 40, lane_id = 0; + u32 addr, rdata; + + /* Set CMVAR_RANGE_H/L based on the link speed mode */ + switch (speed) { + case 10: + CMVAR_RANGE_H = S10G_CMVAR_RANGE_H; + CMVAR_RANGE_L = S10G_CMVAR_RANGE_L; + lane_num = 1; + break; + case 40: + CMVAR_RANGE_H = S10G_CMVAR_RANGE_H; + CMVAR_RANGE_L = S10G_CMVAR_RANGE_L; + lane_num = 4; + break; + case 25: + CMVAR_RANGE_H = S25G_CMVAR_RANGE_H; + CMVAR_RANGE_L = S25G_CMVAR_RANGE_L; + lane_num = 1; + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid speed\n", __func__, __LINE__); + break; + } + + /* 1. Read the temperature T just before RXS is enabled. */ + txgbe_e56_get_temp(hw, &T); + + /* 2. Define software variable RX_COARSE_MID_TD */ + if (T < -5) + RX_COARSE_MID_TD = 10; + else if (T < 30) + RX_COARSE_MID_TD = 9; + else if (T < 65) + RX_COARSE_MID_TD = 8; + else if (T < 100) + RX_COARSE_MID_TD = 7; + else + RX_COARSE_MID_TD = 6; + + + for (lane_id = 0; lane_id < lane_num; lane_id++) { + addr = 0x0b4 + (0x200 * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 1, 0, CMVAR_RANGE_H); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x08c + (0x200 * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 29, 29, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1540 + (0x02c * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 22, 22, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1530 + (0x02c * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 27, 27, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, 19, 16, GENMASK(lane_num - 1, 0)); + txgbe_wr32_ephy(hw, 0x1400, rdata); + status |= read_poll_timeout(rd32_ephy, rdata, + (((rdata & 0x3f3f3f3f) & GENMASK(8 * lane_num - 1, 0)) + == (0x09090909 & GENMASK(8 * lane_num - 1, 0))), + 100, 200000, false, hw, + E56PHY_CTRL_FSM_RX_STAT_0_ADDR); + if (status) + kr_dbg(KR_MODE, "Wait fsm_rx_sts 1 = %x : %d, Wait rx_sts %s.\n", + rdata, status, status ? "FAILED" : "SUCCESS"); + + for (lane_id = 0; lane_id < lane_num; lane_id++) { + addr = 0x0b4 + (0x0200 * lane_id); + rdata = rd32_ephy(hw, addr); + OFFSET_CENTRE_RANGE_H[lane_id] = (rdata >> 4) & 0xf; + if (OFFSET_CENTRE_RANGE_H[lane_id] > RX_COARSE_MID_TD) + OFFSET_CENTRE_RANGE_H[lane_id] = OFFSET_CENTRE_RANGE_H[lane_id] - + RX_COARSE_MID_TD; + else + OFFSET_CENTRE_RANGE_H[lane_id] = RX_COARSE_MID_TD - + OFFSET_CENTRE_RANGE_H[lane_id]; + } + + //7. Do SEQ::RX_DISABLE to disable RXS. + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, 19, 16, 0x0); + txgbe_wr32_ephy(hw, 0x1400, rdata); + status |= read_poll_timeout(rd32_ephy, rdata, + (((rdata & 0x3f3f3f3f) & GENMASK(8 * lane_num - 1, 0)) + == (0x21212121 & GENMASK(8 * lane_num - 1, 0))), + 100, 200000, false, hw, + E56PHY_CTRL_FSM_RX_STAT_0_ADDR); + if (status) + kr_dbg(KR_MODE, "Wait fsm_rx_sts 2 = %x : %d, Wait rx_sts %s.\n", + rdata, status, status ? "FAILED" : "SUCCESS"); + rdata = rd32_ephy(hw, 0x15ec); + txgbe_wr32_ephy(hw, 0x15ec, rdata); + + for (lane_id = 0; lane_id < lane_num; lane_id++) { + addr = 0x0b4 + (0x200 * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 1, 0, CMVAR_RANGE_L); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x08c + (0x200 * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 29, 29, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1540 + (0x02c * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 22, 22, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1530 + (0x02c * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 27, 27, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, 19, 16, 0xf); + txgbe_wr32_ephy(hw, 0x1400, rdata); + status |= read_poll_timeout(rd32_ephy, rdata, + (((rdata & 0x3f3f3f3f) & GENMASK(8 * lane_num - 1, 0)) + == (0x09090909 & GENMASK(8 * lane_num - 1, 0))), + 100, 200000, false, hw, + E56PHY_CTRL_FSM_RX_STAT_0_ADDR); + if (status) + kr_dbg(KR_MODE, "Wait fsm_rx_sts 3 = %x : %d, Wait rx_sts %s.\n", + rdata, status, status ? "FAILED" : "SUCCESS"); + + for (lane_id = 0; lane_id < lane_num; lane_id++) { + addr = 0x0b4 + (0x0200 * lane_id); + rdata = rd32_ephy(hw, addr); + OFFSET_CENTRE_RANGE_L[lane_id] = (rdata >> 4) & 0xf; + if (OFFSET_CENTRE_RANGE_L[lane_id] > RX_COARSE_MID_TD) + OFFSET_CENTRE_RANGE_L[lane_id] = OFFSET_CENTRE_RANGE_L[lane_id] - + RX_COARSE_MID_TD; + else + OFFSET_CENTRE_RANGE_L[lane_id] = RX_COARSE_MID_TD - + OFFSET_CENTRE_RANGE_L[lane_id]; + } + + for (lane_id = 0; lane_id < lane_num; lane_id++) { + RANGE_FINAL[lane_id] = OFFSET_CENTRE_RANGE_L[lane_id] < + OFFSET_CENTRE_RANGE_H[lane_id] ? + CMVAR_RANGE_L : CMVAR_RANGE_H; + kr_dbg(KR_MODE, "lane_id:%d-RANGE_L:%x-RANGE_H:%x-RANGE_FINAL:%x\n", + lane_id, OFFSET_CENTRE_RANGE_L[lane_id], + OFFSET_CENTRE_RANGE_H[lane_id], RANGE_FINAL[lane_id]); + } + + //7. Do SEQ::RX_DISABLE to disable RXS. + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, 19, 16, 0x0); + txgbe_wr32_ephy(hw, 0x1400, rdata); + status |= read_poll_timeout(rd32_ephy, rdata, + (((rdata & 0x3f3f3f3f) & GENMASK(8 * lane_num - 1, 0)) + == (0x21212121 & GENMASK(8 * lane_num - 1, 0))), + 100, 200000, false, hw, + E56PHY_CTRL_FSM_RX_STAT_0_ADDR); + if (status) + kr_dbg(KR_MODE, "Wait fsm_rx_sts 4 = %x : %d, Wait rx_sts %s.\n", + rdata, status, status ? "FAILED" : "SUCCESS"); + rdata = rd32_ephy(hw, 0x15ec); + txgbe_wr32_ephy(hw, 0x15ec, rdata); + + for (lane_id = 0; lane_id < lane_num; lane_id++) { + addr = 0x0b4 + (0x0200 * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 1, 0, RANGE_FINAL[lane_id]); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = 0x1544 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 25, 25, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1538 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 0, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 28, 28, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1538 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 3, 3, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = 0x1544 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 16, 16, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1534 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 23, 23, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 17, 17, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1534 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 24, 24, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 31, 31, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1538 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 6, 6, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1530 + (0x02c * lane_id); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 27, 27, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + } + + //Do SEQ::RX_ENABLE + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, GENMASK(lane_num - 1, 0)); + txgbe_wr32_ephy(hw, 0x1400, rdata); + + return status; +} + +static int E56phyRxsPostCdrLockTempTrackSeq(struct txgbe_adapter *adapter, + u32 speed) +{ + struct txgbe_hw *hw = &adapter->hw; + + int status = 0; + u32 rdata; + int SECOND_CODE; + int COARSE_CODE; + int FINE_CODE; + int ULTRAFINE_CODE; + + int CMVAR_SEC_LOW_TH ; + int CMVAR_UFINE_MAX = 0; + int CMVAR_FINE_MAX ; + int CMVAR_UFINE_UMAX_WRAP = 0; + int CMVAR_COARSE_MAX ; + int CMVAR_UFINE_FMAX_WRAP = 0; + int CMVAR_FINE_FMAX_WRAP = 0; + int CMVAR_SEC_HIGH_TH ; + int CMVAR_UFINE_MIN ; + int CMVAR_FINE_MIN ; + int CMVAR_UFINE_UMIN_WRAP ; + int CMVAR_COARSE_MIN ; + int CMVAR_UFINE_FMIN_WRAP ; + int CMVAR_FINE_FMIN_WRAP ; + + if(speed == 10) { + CMVAR_SEC_LOW_TH = S10G_CMVAR_SEC_LOW_TH; + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX; + CMVAR_FINE_MAX = S10G_CMVAR_FINE_MAX; + CMVAR_UFINE_UMAX_WRAP = S10G_CMVAR_UFINE_UMAX_WRAP; + CMVAR_COARSE_MAX = S10G_CMVAR_COARSE_MAX; + CMVAR_UFINE_FMAX_WRAP = S10G_CMVAR_UFINE_FMAX_WRAP; + CMVAR_FINE_FMAX_WRAP = S10G_CMVAR_FINE_FMAX_WRAP; + CMVAR_SEC_HIGH_TH = S10G_CMVAR_SEC_HIGH_TH; + CMVAR_UFINE_MIN = S10G_CMVAR_UFINE_MIN; + CMVAR_FINE_MIN = S10G_CMVAR_FINE_MIN; + CMVAR_UFINE_UMIN_WRAP = S10G_CMVAR_UFINE_UMIN_WRAP; + CMVAR_COARSE_MIN = S10G_CMVAR_COARSE_MIN; + CMVAR_UFINE_FMIN_WRAP = S10G_CMVAR_UFINE_FMIN_WRAP; + CMVAR_FINE_FMIN_WRAP = S10G_CMVAR_FINE_FMIN_WRAP; + } else if (speed == 25) { + CMVAR_SEC_LOW_TH = S25G_CMVAR_SEC_LOW_TH; + CMVAR_UFINE_MAX = S25G_CMVAR_UFINE_MAX; + CMVAR_FINE_MAX = S25G_CMVAR_FINE_MAX; + CMVAR_UFINE_UMAX_WRAP = S25G_CMVAR_UFINE_UMAX_WRAP; + CMVAR_COARSE_MAX = S25G_CMVAR_COARSE_MAX; + CMVAR_UFINE_FMAX_WRAP = S25G_CMVAR_UFINE_FMAX_WRAP; + CMVAR_FINE_FMAX_WRAP = S25G_CMVAR_FINE_FMAX_WRAP; + CMVAR_SEC_HIGH_TH = S25G_CMVAR_SEC_HIGH_TH; + CMVAR_UFINE_MIN = S25G_CMVAR_UFINE_MIN; + CMVAR_FINE_MIN = S25G_CMVAR_FINE_MIN; + CMVAR_UFINE_UMIN_WRAP = S25G_CMVAR_UFINE_UMIN_WRAP; + CMVAR_COARSE_MIN = S25G_CMVAR_COARSE_MIN; + CMVAR_UFINE_FMIN_WRAP = S25G_CMVAR_UFINE_FMIN_WRAP; + CMVAR_FINE_FMIN_WRAP = S25G_CMVAR_FINE_FMIN_WRAP; + } + + status |= E56phyRxRdSecondCode(hw, &SECOND_CODE); + + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + COARSE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i); + FINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i); + ULTRAFINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i); + + if (SECOND_CODE <= CMVAR_SEC_LOW_TH) { + if (ULTRAFINE_CODE < CMVAR_UFINE_MAX) { + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i, ULTRAFINE_CODE + 1); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (FINE_CODE < CMVAR_FINE_MAX) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_UMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = FINE_CODE + 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (COARSE_CODE < CMVAR_COARSE_MAX) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_FMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = CMVAR_FINE_FMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i) = COARSE_CODE + 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_coarse_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else { + kr_dbg(KR_MODE, "ERROR: (SECOND_CODE <= CMVAR_SEC_LOW_TH) temperature tracking occurs Error condition\n"); + } + } else if (SECOND_CODE >= CMVAR_SEC_HIGH_TH) { + if (ULTRAFINE_CODE > CMVAR_UFINE_MIN) { + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i, ULTRAFINE_CODE - 1); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (FINE_CODE > CMVAR_FINE_MIN) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_UMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = FINE_CODE - 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (COARSE_CODE > CMVAR_COARSE_MIN) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i) = CMVAR_UFINE_FMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = CMVAR_FINE_FMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i) = COARSE_CODE - 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_coarse_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else { + kr_dbg(KR_MODE, "ERROR: (SECOND_CODE >= CMVAR_SEC_HIGH_TH) temperature tracking occurs Error condition\n"); + } + } + + return status; +} + +static int E56phyCtleBypassSeq(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int status = 0; + u32 rdata; + + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_0, ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_0, ovrd_en_ana_ctle_bypass_i, 1); + + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_3, ana_ctle_cz_cstm_i, 0); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_0, ovrd_en_ana_ctle_cz_cstm_i, 1); + + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_en_i) = 0; + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS0_OVRDVAL_1); + + EPHY_RREG(E56G__PMD_RXS0_OVRDEN_1); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_en_i) = 1; + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS0_OVRDEN_1); + + return status; +} + +static int E56phyRxsAdcAdaptSeq(struct txgbe_adapter *adapter, u32 bypassCtle) +{ + struct txgbe_hw *hw = &adapter->hw; + int lane_num = 0, lane_idx = 0; + u32 rdata = 0, addr = 0; + int status = 0; + + int timer = 0, j = 0; + + switch (adapter->bp_link_mode) { + case 10: + lane_num = 1; + break; + case 40: + lane_num = 4; + break; + case 25: + lane_num = 1; + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid speed\n", __func__, __LINE__); + break; + } + + for (lane_idx = 0; lane_idx < lane_num; lane_idx++) { + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + /* Wait RXS0-3_OVRDVAL[1]::rxs0-3_rx0_cdr_rdy_o = 1 */ + status = read_poll_timeout(rd32_ephy, rdata, (rdata & BIT(12)), + 100, 200000, false, hw, 0x1544); + if (status) + kr_dbg(KR_MODE, "rxs%d_rx0_cdr_rdy_o = %x, %s.\n", + lane_idx, rdata, + status ? "FAILED" : "SUCCESS"); + } + for (lane_idx = 0; lane_idx < lane_num; lane_idx++) { + //4. Disable VGA and CTLE training so that they don't interfere with ADC calibration + //a. Set ALIAS::RXS::VGA_TRAIN_EN = 0b0 + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 7, 7, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1534 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 14, 14, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Set ALIAS::RXS::CTLE_TRAIN_EN = 0b0 + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 9, 9, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1534 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 16, 16, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //5. Perform ADC interleaver calibration + //a. Remove the OVERRIDE on ALIAS::RXS::ADC_INTL_CAL_DONE + addr = 0x1534 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 24, 24, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 16, 16, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + /* Wait rxs0_rx0_adc_intl_cal_done_o bit17 = 1 */ + status = read_poll_timeout(rd32_ephy, rdata, (rdata & BIT(17)), + 100, 200000, false, hw, addr); + if (status) + kr_dbg(KR_MODE, "rxs0_rx0_adc_intl_cal_done_o = %x, %s.\n", rdata, + status ? "FAILED" : "SUCCESS"); + + /* 6. Perform ADC offset adaptation and ADC gain adaptation, + * repeat them a few times and after that keep it disabled. + */ + for (j = 0; j < 16; j++) { + //a. ALIAS::RXS::ADC_OFST_ADAPT_EN = 0b1 + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 25, 25, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Wait for 1ms or greater + //usec_delay(1000); + /* set ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o bit1=0 */ + addr = 0x1538 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 1, 1, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + /* Wait rxs0_rx0_adc_ofst_adapt_done_o bit26 = 0 */ + status = read_poll_timeout(rd32_ephy, rdata, + !(rdata & BIT(26)), + 100, 200000, false, hw, addr); + if (status) + kr_dbg(KR_MODE, + "rxs0_rx0_adc_ofst_adapt_done_o %d = %x, %s.\n", + j, rdata, status ? "FAILED" : "SUCCESS"); + + //c. ALIAS::RXS::ADC_OFST_ADAPT_EN = 0b0 + rdata = 0x0000; + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 25, 25, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + //d. ALIAS::RXS::ADC_GAIN_ADAPT_EN = 0b1 + rdata = 0x0000; + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 28, 28, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //e. Wait for 1ms or greater + //usec_delay(1000); + /* set ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o bit1=0 */ + addr = 0x1538 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 1, 1, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + /* Wait rxs0_rx0_adc_gain_adapt_done_o bit29 = 0 */ + status = read_poll_timeout(rd32_ephy, rdata, !(rdata & BIT(29)), + 100, 200000, false, hw, addr); + if (status) + kr_dbg(KR_MODE, + "rxs0_rx0_adc_gain_adapt_done_o %d = %x, %s.\n", + j, rdata, status ? "FAILED" : "SUCCESS"); + + //f. ALIAS::RXS::ADC_GAIN_ADAPT_EN = 0b0 + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 28, 28, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + } + //g. Repeat #a to #f total 16 times + + /* 7. Perform ADC interleaver adaptation for 10ms or greater, + * and after that disable it + */ + //a. ALIAS::RXS::ADC_INTL_ADAPT_EN = 0b1 + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 31, 31, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + //b. Wait for 10ms or greater + msleep(20); + + //c. ALIAS::RXS::ADC_INTL_ADAPT_EN = 0b0 + /* set ovrd_en_rxs0_rx0_adc_intl_adapt_en_i=0*/ + addr = 0x1538 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 6, 6, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + /* 8. Now re-enable VGA and CTLE trainings, so that it continues + * to adapt tracking changes in temperature or voltage + * <1>Set ALIAS::RXS::VGA_TRAIN_EN = 0b1 + */ + /* set rxs0_rx0_vga_train_en_i=1*/ + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 7, 7, 0x1); + if (bypassCtle == 0) + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_en_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + + //<2>wait for ALIAS::RXS::VGA_TRAIN_DONE = 1 + /* set ovrd_en_rxs0_rx0_vga_train_done_o = 0*/ + addr = 0x1534 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 15, 15, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + /* Wait rxs0_rx0_vga_train_done_o bit8 = 0 */ + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + status = read_poll_timeout(rd32_ephy, rdata, (rdata & BIT(8)), + 100, 300000, false, hw, addr); + if (status) + kr_dbg(KR_MODE, "rxs0_rx0_vga_train_done_o = %x, %s.\n", rdata, + status ? "FAILED" : "SUCCESS"); + + if (bypassCtle == 0) { + addr = 0x1534 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_ctle_train_done_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0; + timer = 0; + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + while (EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, + rxs0_rx0_ctle_train_done_o) != 1) { + rdata = rd32_ephy(hw, addr); + udelay(500); + + if (timer++ > PHYINIT_TIMEOUT) + break; + } + } + + //a. Remove the OVERRIDE on ALIAS::RXS::VGA_TRAIN_EN + addr = 0x1534 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 15, 15, 0); + //b. Remove the OVERRIDE on ALIAS::RXS::CTLE_TRAIN_EN + if (bypassCtle == 0) + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_ctle_train_en_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + } + + return status; +} + +static int E56phyRxsCalibAdaptSeq(struct txgbe_adapter *adapter, u8 bplinkmode, + u32 bypassCtle) +{ + struct txgbe_hw *hw = &adapter->hw; + int lane_num = 0, lane_idx = 0; + int status = 0; + u32 rdata, addr; + + switch (bplinkmode) { + case 10: + lane_num = 1; + break; + case 40: + lane_num = 4; + break; + case 25: + lane_num = 1; + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid speed\n", __func__, __LINE__); + break; + } + + for (lane_idx = 0; lane_idx < lane_num; lane_idx++) { + rdata = 0x0000; + addr = 0x1544 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 25, 25, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1538 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 0, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 28, 28, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1538 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 3, 3, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = 0x1544 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 16, 16, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1534 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 23, 23, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 17, 17, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1534 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 24, 24, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 31, 31, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1538 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 6, 6, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } + if (bypassCtle != 0) + status |= E56phyCtleBypassSeq(adapter); + + status |= E56phyRxsOscInitForTempTrackRange(adapter, bplinkmode); + + /* Wait an fsm_rx_sts 25G */ + kr_dbg(KR_MODE, + "Wait CTRL_FSM_RX_STAT[0]::ctrl_fsm_rx0_st to be ready ...\n"); + + status |= read_poll_timeout(rd32_ephy, rdata, + (((rdata & 0x3f3f3f3f) & GENMASK(8 * lane_num - 1, 0)) + == (0x1b1b1b1b & GENMASK(8 * lane_num - 1, 0))), + 1000, 300000, false, hw, + E56PHY_CTRL_FSM_RX_STAT_0_ADDR); + kr_dbg(KR_MODE, "wait ctrl_fsm_rx0_st = %x, %s.\n", + rdata, status ? "FAILED" : "SUCCESS"); + + + return status; +} + +static int E56phyCmsCfgForTempTrackRange(struct txgbe_adapter *adapter, u8 bplinkmode) +{ + struct txgbe_hw *hw = &adapter->hw; + int status = 0, T = 40; + u32 addr, rdata; + + status = txgbe_e56_get_temp(hw, &T); + if (T < 40) { + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDVAL_2_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_LF_LPF_SETCODE_CALIB_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_7_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDVAL_7_ANA_LCPLL_LF_LPF_SETCODE_CALIB_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } else if (T > 70) { + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDVAL_2_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I, 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, + E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_LF_LPF_SETCODE_CALIB_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_7_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDVAL_7_ANA_LCPLL_LF_LPF_SETCODE_CALIB_I, 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + } else { + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_HF_TEST_IN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 24, 24, 0x1); + field_set(&rdata, 31, 29, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 1, 0, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_LF_TEST_IN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_9_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 24, 24, 0x1); + field_set(&rdata, 31, 29, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_10_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 1, 0, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + } + return status; +} + +static int E56phyTxFfeCfg(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + /* Setting the TX EQ main/pre1/pre2/post value */ + adapter->aml_txeq.main = S25G_TX_FFE_CFG_DAC_MAIN; + adapter->aml_txeq.pre1 = S25G_TX_FFE_CFG_DAC_PRE1; + adapter->aml_txeq.pre2 = S25G_TX_FFE_CFG_DAC_PRE2; + adapter->aml_txeq.post = S25G_TX_FFE_CFG_DAC_POST; + txgbe_wr32_ephy(hw, 0x141c, adapter->aml_txeq.main); + txgbe_wr32_ephy(hw, 0x1420, adapter->aml_txeq.pre1); + txgbe_wr32_ephy(hw, 0x1424, adapter->aml_txeq.pre2); + txgbe_wr32_ephy(hw, 0x1428, adapter->aml_txeq.post); + + return 0; +} + + +static int E56phy25gCfg(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 addr, rdata; + + rdata = 0x0000; + addr = E56PHY_CMS_PIN_OVRDVAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_PIN_OVRDVAL_0_INT_PLL0_TX_SIGNAL_TYPE_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_PIN_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_PIN_OVRDEN_0_OVRD_EN_PLL0_TX_SIGNAL_TYPE_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDVAL_2_ANA_LCPLL_HF_VCO_SWING_CTRL_I, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_VCO_SWING_CTRL_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 23, 0, 0x260000); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_HF_TEST_IN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_TXS_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_TXS_CFG_1_ADAPTATION_WAIT_CNT_X256, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_WKUP_CNT_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_WKUP_CNTLDO_WKUP_CNT_X32, 0xff); + field_set(&rdata, E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_PIN_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 27, 24, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_PIN_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_ANA_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDVAL_1_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + E56phyTxFfeCfg(adapter); + + rdata = 0x0000; + addr = E56PHY_RXS_RXS_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_DSER_DATA_SEL, 0x0); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_TRAIN_CLK_GATE_BYPASS_EN, 0x1fff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_1_PREDIV1, 0x700); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_1_TARGET_CNT1, 0x2418); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_OSC_RANGE_SEL1, 0x1); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_VCO_CODE_INIT, 0x7fb); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_OSC_CURRENT_BOOST_EN1, 0x0); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_BBCDR_CURRENT_BOOST1, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_SDM_WIDTH, 0x3); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_POSTLOCK, 0x3); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_POSTLOCK, 0xa); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BBCDR_RDY_CNT, 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_PRELOCK, 0x7); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_POSTLOCK, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_INTL_CONFIG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_INTL_CONFIG_0_ADC_INTL2SLICE_DELAY1, 0x3333); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_INTL_CONFIG_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_INTL_CONFIG_2_INTERLEAVER_HBW_DISABLE1, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_LTH, 0x56); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_UTH, 0x6a); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_LTH, 0x1f8); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_UTH, 0xf0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_LTH, 0x100); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_UTH, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_LTH, 0x4); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_UTH, 0x37); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_TXFFE_TRAIN_MOD_TYPE, 0x38); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56G__RXS0_FOM_18__ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56G__RXS0_FOM_18__DFE_COEFFL_HINT__MSB, + E56G__RXS0_FOM_18__DFE_COEFFL_HINT__LSB, 0x0); + //change 0x90 to 0x0 to fix 25G link up keep when cable unplugged + field_set(&rdata, E56G__RXS0_FOM_18__DFE_COEFFH_HINT__MSB, + E56G__RXS0_FOM_18__DFE_COEFFH_HINT__LSB, 0x0); + field_set(&rdata, E56G__RXS0_FOM_18__DFE_COEFF_HINT_LOAD__MSB, + E56G__RXS0_FOM_18__DFE_COEFF_HINT_LOAD__LSB, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_0_VGA_TARGET, 0x34); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT123, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT123, 0xa); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT0, 0x9); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT123, 0x9); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_1_LFEQ_LUT, 0x1ffffea); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P1, 18); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P2, 0); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P3, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P1, 1); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P2, 0); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P3, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_SLICE_DATA_AVG_CNT, 0x3); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_DATA_AVG_CNT, 0x3); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_FE_OFFSET_DAC_CLK_CNT_X8, 0xc); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_1_SAMP_ADAPT_CFG, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_FFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_FFE_TRAINING_0_FFE_TAP_EN, 0xf9ff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_IDLE_DETECT_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0xa); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + //txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDVAL_11, ana_test_adc_clkgen_i, 0x0); + //txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_2, ovrd_en_ana_test_adc_clkgen_i, 0x0); + addr = 0x6cc; + rdata = 0x8020000; + txgbe_wr32_ephy(hw, addr, rdata); + addr = 0x94; + rdata = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_0_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 4, 0, 0x0); + field_set(&rdata, 14, 13, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_BBCDR_VCOFILT_BYP_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_TEST_BBCDR_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_15_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 2, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_17_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_17_ANA_VGA2_BOOST_CSTM_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_ANABS_CONFIG_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_VGA2_BOOST_CSTM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_14_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_EYE_SCAN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_EYE_SCAN_1_EYE_SCAN_REF_TIMER, 0x400); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_RINGO_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 21, 12, 0x366); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_PMD_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_3_CTRL_FSM_TIMEOUT_X64K, 0x80); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_PMD_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_ON_PERIOD_X64K, 0x18); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_PERIOD_X512K, 0x3e); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_PMD_CFG_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_5_USE_RECENT_MARKER_OFFSET, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_GAIN_CAL_ERR, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_DO_RX_ADC_OFST_CAL, 0x3); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_RX_ERR_ACTION_EN, 0x40); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST0_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST1_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST2_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST3_WAIT_CNT_X4096, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST4_WAIT_CNT_X4096, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST5_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST6_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST7_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST8_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST9_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST10_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST11_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST12_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST13_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST14_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST15_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_7_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST4_EN, 0x4bf); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST5_EN, 0xc4bf); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_8_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_8_TRAIN_ST7_EN, 0x47ff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_12_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_12_TRAIN_ST15_EN, 0x67ff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_13_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST0_DONE_EN, 0x8001); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST1_DONE_EN, 0x8002); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_14_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_14_TRAIN_ST3_DONE_EN, 0x8008); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_15_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_15_TRAIN_ST4_DONE_EN, 0x8004); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_17_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_17_TRAIN_ST8_DONE_EN, 0x20c0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_18_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_18_TRAIN_ST10_DONE_EN, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_29_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_29_TRAIN_ST15_DC_EN, 0x3f6d); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_33_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN0_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN1_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_34_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN2_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN3_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_KRT_TFSM_CFG_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X1000K, 0x49); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X8000K, 0x37); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_HOLDOFF_TIMER_X256K, 0x2f); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_FETX_FFE_TRAIN_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_2, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + return 0; +} + +static int E56phy10gCfg(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 addr, rdata; + + rdata = 0x0000; + addr = E56G__CMS_ANA_OVRDVAL_7_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G__CMS_ANA_OVRDVAL_7 *)&rdata)->ana_lcpll_lf_vco_swing_ctrl_i = 0xf; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56G__CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G__CMS_ANA_OVRDEN_1 *)&rdata)->ovrd_en_ana_lcpll_lf_vco_swing_ctrl_i = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56G__CMS_ANA_OVRDVAL_9_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 23, 0, 0x260000); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56G__CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G__CMS_ANA_OVRDEN_1 *)&rdata)->ovrd_en_ana_lcpll_lf_test_in_i = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_TXS_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_TXS_CFG_1_ADAPTATION_WAIT_CNT_X256, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_WKUP_CNT_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_WKUP_CNTLDO_WKUP_CNT_X32, 0xff); + field_set(&rdata, E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_PIN_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 19, 16, 0x6); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_PIN_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_ANA_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDVAL_1_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_TXS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + E56phyTxFfeCfg(adapter); + + rdata = 0x0000; + addr = E56PHY_RXS_RXS_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_DSER_DATA_SEL, 0x0); + field_set(&rdata, E56PHY_RXS_RXS_CFG_0_TRAIN_CLK_GATE_BYPASS_EN, 0x1fff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_OSC_CAL_N_CDR_0 *)&rdata)->prediv0 = 0xfa0; + ((E56G_RXS0_OSC_CAL_N_CDR_0 *)&rdata)->target_cnt0= 0x203a; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_4_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->osc_range_sel0= 0x2; + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->vco_code_init= 0x7ff; + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->osc_current_boost_en0= 0x1; + ((E56G_RXS0_OSC_CAL_N_CDR_4 *)&rdata)->bbcdr_current_boost0 = 0x0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_SDM_WIDTH, 0x3); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_POSTLOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_POSTLOCK, 0xc); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_PRELOCK, 0xf); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BBCDR_RDY_CNT, 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_PRELOCK, 0x7); + field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_POSTLOCK, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_INTL_CONFIG_0_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_INTL_CONFIG_0 *)&rdata)->adc_intl2slice_delay0 = 0x5555; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_INTL_CONFIG_2_ADDR; + rdata = rd32_ephy(hw, addr); + ((E56G_RXS0_INTL_CONFIG_2 *)&rdata)->interleaver_hbw_disable0 = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_LTH, 0x56); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_UTH, 0x6a); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_LTH, 0x1e8); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_UTH, 0x78); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_LTH, 0x100); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_UTH, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_LTH, 0x4); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_UTH, 0x37); + field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_TXFFE_TRAIN_MOD_TYPE, 0x38); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_0_VGA_TARGET, 0x34); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT0, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT123, 0xa); + field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT123, 0xa); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT0, 0x9); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT123, 0x9); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_1_LFEQ_LUT, 0x1ffffea); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P1, 0x18); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P2, 0); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P3, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P1, 1); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P2, 0); + field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P3, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_SLICE_DATA_AVG_CNT, 0x3); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_DATA_AVG_CNT, 0x3); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_FE_OFFSET_DAC_CLK_CNT_X8, 0xc); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_1_SAMP_ADAPT_CFG, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_FFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_FFE_TRAINING_0_FFE_TAP_EN, 0xf9ff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_IDLE_DETECT_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0xa); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + //txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDVAL_11, ana_test_adc_clkgen_i, 0x0); + //txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_2, ovrd_en_ana_test_adc_clkgen_i, 0x0); + addr = 0x6cc; + rdata = 0x8020000; + txgbe_wr32_ephy(hw, addr, rdata); + addr = 0x94; + rdata = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_0_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 4, 0, 0x6); + field_set(&rdata, 14, 13, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_BBCDR_VCOFILT_BYP_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_TEST_BBCDR_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_15_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 2, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_17_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_17_ANA_VGA2_BOOST_CSTM_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_ANABS_CONFIG_I, 0x1); + field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_VGA2_BOOST_CSTM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_14_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_EYE_SCAN_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_RXS_EYE_SCAN_1_EYE_SCAN_REF_TIMER, 0x400); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_RINGO_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, 21, 12, 0x366); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_PMD_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_3_CTRL_FSM_TIMEOUT_X64K, 0x80); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_PMD_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_ON_PERIOD_X64K, 0x18); + field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_PERIOD_X512K, 0x3e); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_PMD_CFG_5_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_PMD_CFG_5_USE_RECENT_MARKER_OFFSET, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_GAIN_CAL_ERR, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_DO_RX_ADC_OFST_CAL, 0x3); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_RX_ERR_ACTION_EN, 0x40); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST0_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST1_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST2_WAIT_CNT_X4096, 0xff); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST3_WAIT_CNT_X4096, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_2_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST4_WAIT_CNT_X4096, 0x1); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST5_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST6_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST7_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST8_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST9_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST10_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST11_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST12_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST13_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST14_WAIT_CNT_X4096, 0x4); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST15_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_7_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST4_EN, 0x4bf); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST5_EN, 0xc4bf); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_8_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_8_TRAIN_ST7_EN, 0x47ff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_12_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_12_TRAIN_ST15_EN, 0x67ff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_13_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST0_DONE_EN, 0x8001); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST1_DONE_EN, 0x8002); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_14_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_14_TRAIN_ST3_DONE_EN, 0x8008); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_15_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_15_TRAIN_ST4_DONE_EN, 0x8004); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_17_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_17_TRAIN_ST8_DONE_EN, 0x20c0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_18_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_18_TRAIN_ST10_DONE_EN, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_29_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_29_TRAIN_ST15_DC_EN, 0x3f6d); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_33_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN0_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN1_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_34_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN2_RATE_SEL, 0x8000); + field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN3_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_KRT_TFSM_CFG_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X1000K, 0x49); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X8000K, 0x37); + field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_HOLDOFF_TIMER_X256K, 0x2f); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_FETX_FFE_TRAIN_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + field_set(&rdata, E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_2, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + return 0; +} + +static int setphylinkmode(struct txgbe_adapter *adapter, + u8 bplinkmode, u32 bypassCtle) +{ + struct txgbe_hw *hw = &adapter->hw; + int lane_num = 0, status = 0; + u32 rdata = 0; + + u32 speed_select = 0; + u32 pcs_type_sel = 0; + u32 cns_en = 0; + u32 rsfec_en = 0; + u32 pma_type = 0; + u32 an0_rate_select = 0; + + switch (bplinkmode) { + case 10: + bplinkmode = 10; + lane_num = 1; + speed_select = 0; /* 10 Gb/s */ + pcs_type_sel = 0; /* 10GBASE-R PCS Type */ + cns_en = 0; /* CNS_EN disable */ + rsfec_en = 0; /* RS-FEC disable */ + pma_type = 0xb; /* 10GBASE-KR PMA/PMD type */ + an0_rate_select = 2; /* 10G-KR */ + break; + case 40: + bplinkmode = 40; + lane_num = 4; + speed_select = 3; /* 40 Gb/s */ + pcs_type_sel = 4; /* 40GBASE-R PCS Type */ + cns_en = 0; /* CNS_EN disable */ + rsfec_en = 0; /* RS-FEC disable */ + pma_type = 0b0100001; /* 40GBASE-CR PMA/PMD type */ + an0_rate_select = 4; /* 40G-KR: 3 40G-CR: 4 */ + break; + case 25: + bplinkmode = 25; + lane_num = 1; + speed_select = 5; /* 25 Gb/s */ + pcs_type_sel = 7; /* 25GBASE-R PCS Type */ + cns_en = 1; /* CNS_EN */ + rsfec_en = 1; /* RS-FEC enable*/ + pma_type = 0b0111001; /* 25GBASE-KR PMA/PMD type */ + an0_rate_select = 9; /* 9/10/17 25GK/CR-S or 25GK/CR */ + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid bplinkmode\n", __func__, __LINE__); + break; + } + + adapter->curbp_link_mode = bplinkmode; + /* To switch to the 40G mode Ethernet operation, complete the following steps:*/ + /* 1. Initiate the vendor-specific software reset by programming + * the VR_RST field (bit [15]) of the VR_PCS_DIG_CTRL1 register to 1. + */ + rdata = txgbe_rd32_epcs(hw, 0x038000); + txgbe_wr32_epcs(hw, 0x038000, rdata | BIT(15)); + + /* 2. Wait for the hardware to clear the value for the VR_RST + * field (bit [15]) of the VR_PCS_DIG_CTRL1 register. + */ + kr_dbg(KR_MODE, "Wait for the bit [15] (VR_RST) to get cleared.\n"); + status = read_poll_timeout(txgbe_rd32_epcs, rdata, + FIELD_GET_M(BIT(15), rdata) == 0, 100, + 200000, false, hw, + 0x038000); + kr_dbg(KR_MODE, "Wait PHY VR_RST = %x, Wait VR_RST %s.\n", + rdata, status ? "FAILED" : "SUCCESS"); + + /* wait rx/tx/cm powerdn_st according pmd 50 2.0.5 */ + status = read_poll_timeout(rd32_ephy, rdata, + (rdata & GENMASK(3, 0)) == 0x9, 100, + 200000, false, hw, 0x14d4); + kr_dbg(KR_MODE, "wait ctrl_fsm_cm_st = %x, %s.\n", + rdata, status ? "FAILED" : "SUCCESS"); + + /* 3. Write 4'b0011 to bits [5:2] of the SR_PCS_CTRL1 register. + * 10G: 0 25G: 5 40G: 3 + */ + rdata = txgbe_rd32_epcs(hw, 0x030000); + field_set(&rdata, 5, 2, speed_select); + txgbe_wr32_epcs(hw, 0x030000, rdata); + + /* 4. Write pcs mode sel to bits [3:0] of the SR_PCS_CTRL2 register. + * 10G: 0 25G: 4'b0111 40G: 4'b0100 + */ + rdata = txgbe_rd32_epcs(hw, 0x030007); + field_set(&rdata, 3, 0, pcs_type_sel); + txgbe_wr32_epcs(hw, 0x030007, rdata); + + /* 0 1 1 1 0 0 1 : 25GBASE-KR or 25GBASE-KR-S PMA/PMD type + * 0 1 1 1 0 0 0 : 25GBASE-CR or 25GBASE-CR-S PMA/PMD type + * 0 1 0 0 0 0 1 : 40GBASE-CR4 PMA/PMD type + * 0 1 0 0 0 0 0 : 40GBASE-KR4 PMA/PMD type + * 0 0 0 1 0 1 1 : 10GBASE-KR PMA/PMD type + */ + rdata = txgbe_rd32_epcs(hw, 0x010007); + field_set(&rdata, 6, 0, pma_type); + txgbe_wr32_epcs(hw, 0x010007, rdata); + + /* 5. Write only 25g en to Bits [1:0] of VR_PCS_DIG_CTRL3 register. */ + rdata = txgbe_rd32_epcs(hw, 0x38003); + field_set(&rdata, 1, 0, cns_en); + txgbe_wr32_epcs(hw, 0x38003, rdata); + + /* 6. Program PCS_AM_CNT field of VR_PCS_AM_CNT register to 'd16383 to + * configure the alignment marker interval. To speed-up simulation, + * program a smaller value to this field. + */ + if (bplinkmode == 40) + txgbe_wr32_epcs(hw, 0x38018, 16383); + + /* 7. Program bit [2] of SR_PMA_RS_FEC_CTRL register to 0 + * if previously 1 (as RS-FEC is supported in 25G Mode). + */ + + rdata = txgbe_rd32_epcs(hw, 0x100c8); + field_set(&rdata, 2, 2, rsfec_en); + txgbe_wr32_epcs(hw, 0x100c8, rdata); + + /* 8. To enable BASE-R FEC (if desired), set bit [0]. + * in SR_PMA_KR_FEC_CTRL register + */ + + /* 3. temp applied */ + //status = E56phyCmsCfgForTempTrackRange(adapter, bplinkmode); + + /* 4. set phy an status to 0 */ + //txgbe_wr32_ephy(hw, 0x1640, 0x0000); + rdata = rd32_ephy(hw, 0x1434); + field_set(&rdata, 7, 4, 0xe); // anstatus in single mode just set to 0xe + txgbe_wr32_ephy(hw, 0x1434, rdata); + + /* 9. Program Enterprise 56G PHY regs through its own APB interface: + * a. Program PHY registers as mentioned in Table 6-6 on page 1197 to + * configure the PHY to 40G + * Mode. For fast-simulation mode, additionally program, + * the registers shown in the Table 6-7 on page 1199 + * b. Enable the PMD by setting pmd_en field in PMD_CFG[0] (0x1400) + * register + */ + + rdata = 0x0000; + rdata = rd32_ephy(hw, ANA_OVRDVAL0); + field_set(&rdata, 29, 29, 0x1); + field_set(&rdata, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDVAL0, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, ANA_OVRDVAL5); + field_set(&rdata, 24, 24, 0x0); + txgbe_wr32_ephy(hw, ANA_OVRDVAL5, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, ANA_OVRDEN0); + field_set(&rdata, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN0, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, ANA_OVRDEN1); + field_set(&rdata, 30, 30, 0x1); + field_set(&rdata, 25, 25, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN1, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, PLL0_CFG0); + field_set(&rdata, 25, 24, 0x1); + field_set(&rdata, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL0_CFG0, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, PLL0_CFG2); + field_set(&rdata, 12, 8, 0x4); + txgbe_wr32_ephy(hw, PLL0_CFG2, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, PLL1_CFG0); + field_set(&rdata, 25, 24, 0x1); + field_set(&rdata, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL1_CFG0, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, PLL1_CFG2); + field_set(&rdata, 12, 8, 0x8); + txgbe_wr32_ephy(hw, PLL1_CFG2, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, PLL0_DIV_CFG0); + field_set(&rdata, 18, 8, 0x294); + field_set(&rdata, 4, 0, 0x8); + txgbe_wr32_ephy(hw, PLL0_DIV_CFG0, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, DATAPATH_CFG0); + field_set(&rdata, 30, 28, 0x7); + field_set(&rdata, 26, 24, 0x5); + if (bplinkmode == 10 || bplinkmode == 40) + field_set(&rdata, 18, 16, 0x5); + else if (bplinkmode == 25) + field_set(&rdata, 18, 16, 0x3); + field_set(&rdata, 14, 12, 0x5); + field_set(&rdata, 10, 8, 0x5); + txgbe_wr32_ephy(hw, DATAPATH_CFG0, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, DATAPATH_CFG1); + field_set(&rdata, 26, 24, 0x5); + field_set(&rdata, 10, 8, 0x5); + if (bplinkmode == 10 || bplinkmode == 40) { + field_set(&rdata, 18, 16, 0x5); + field_set(&rdata, 2, 0, 0x5); + } else if (bplinkmode == 25) { + field_set(&rdata, 18, 16, 0x3); + field_set(&rdata, 2, 0, 0x3); + } + txgbe_wr32_ephy(hw, DATAPATH_CFG1, rdata); + + rdata = rd32_ephy(hw, AN_CFG1); + field_set(&rdata, 4, 0, an0_rate_select); + txgbe_wr32_ephy(hw, AN_CFG1, rdata); + + status = E56phyCmsCfgForTempTrackRange(adapter, bplinkmode); + + if (bplinkmode == 10) + E56phy10gCfg(adapter); + else if (bplinkmode == 25) + E56phy25gCfg(adapter); + else if (bplinkmode == 40) + txgbe_e56_cfg_40g(hw); + + return status; +} + +int txgbe_e56_set_phylinkmode(struct txgbe_adapter *adapter, + u8 bplinkmode, u32 bypassCtle) +{ + struct txgbe_hw *hw = &adapter->hw; + int status = 0; + u32 rdata; + + switch (bplinkmode) { + case TXGBE_LINK_SPEED_10GB_FULL: + case 10: + bplinkmode = 10; + break; + case TXGBE_LINK_SPEED_40GB_FULL: + case 40: + bplinkmode = 40; + break; + case TXGBE_LINK_SPEED_25GB_FULL: + case 25: + bplinkmode = 25; + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid bplinkmode\n", __func__, __LINE__); + break; + } + + adapter->an_done = false; + if (adapter->curbp_link_mode == 10) + return 0; + kr_dbg(KR_MODE, "Setup to backplane mode ==========\n"); + + if (adapter->backplane_an) { + u32 backplane_mode = 0; + u32 fec_advertise = 0; + + adapter->an_done = false; + /* pcs + phy rst */ + rdata = rd32(hw, 0x1000c); + if (hw->bus.lan_id == 1) + rdata |= BIT(16); + else + rdata |= BIT(19); + wr32(hw, 0x1000c, rdata); + msleep(20); + + /* clear interrupt */ + txgbe_wr32_epcs(hw, 0x070000, 0); + txgbe_wr32_epcs(hw, 0x030000, 0x8000); + rdata = txgbe_rd32_epcs(hw, 0x070000); + field_set(&rdata, 12, 12, 0x1); + txgbe_wr32_epcs(hw, 0x070000, rdata); + txgbe_wr32_epcs(hw, 0x078002, 0x0000); + /* pcs case fec en to work around first */ + txgbe_wr32_epcs(hw, 0x100ab, 1); + + if (txgbe_is_backplane(hw)) { + if ((hw->device_id & 0xFF) == 0x10) { + backplane_mode |= 0x80; + fec_advertise |= TXGBE_10G_FEC_ABL; + } else if ((hw->device_id & 0xFF) == 0x25) { + backplane_mode |= 0xc000; + fec_advertise |= TXGBE_25G_RS_FEC_REQ | + TXGBE_25G_BASE_FEC_REQ; + } else if ((hw->device_id & 0xFF) == 0x40) { + backplane_mode |= BIT(9) | BIT(8); + fec_advertise |= TXGBE_10G_FEC_ABL; + } + } else { + if ((hw->phy.fiber_suppport_speed & TXGBE_LINK_SPEED_10GB_FULL) + == TXGBE_LINK_SPEED_10GB_FULL) { + backplane_mode |= 0x80; + fec_advertise |= TXGBE_10G_FEC_ABL; + } + + if ((hw->phy.fiber_suppport_speed & TXGBE_LINK_SPEED_25GB_FULL) + == TXGBE_LINK_SPEED_25GB_FULL) { + backplane_mode |= 0xc000; + fec_advertise |= TXGBE_25G_RS_FEC_REQ | + TXGBE_25G_BASE_FEC_REQ; + } + + if ((hw->phy.fiber_suppport_speed & TXGBE_LINK_SPEED_40GB_FULL) + == TXGBE_LINK_SPEED_40GB_FULL) { + backplane_mode |= BIT(9) | BIT(8); + fec_advertise |= TXGBE_10G_FEC_ABL; + } + } + + txgbe_wr32_epcs(hw, 0x070010, 0x0001); + + /* 10GKR:7-25KR:14/15-40GKR:8-40GCR:9 */ + txgbe_wr32_epcs(hw, 0x070011, backplane_mode | 0x11); + + /* BASE-R FEC */ + rdata = txgbe_rd32_epcs(hw, 0x70012); + txgbe_wr32_epcs(hw, 0x70012, fec_advertise); + + txgbe_wr32_epcs(hw, 0x070016, 0x0000); + txgbe_wr32_epcs(hw, 0x070017, 0x0); + txgbe_wr32_epcs(hw, 0x070018, 0x0); + + /* config timer */ + txgbe_wr32_epcs(hw, 0x078004, 0x003c); + txgbe_wr32_epcs(hw, 0x078005, CL74_KRTR_TRAINNING_TIMEOUT); + txgbe_wr32_epcs(hw, 0x078006, 25); + txgbe_wr32_epcs(hw, 0x078000, 0x0008 | BIT(2)); + + kr_dbg(KR_MODE, "1.2 Wait 10G KR phy/pcs mode init ....\n"); + status = setphylinkmode(adapter, 10, bypassCtle); + if (status) + return status; + + /* 5. CM_ENABLE */ + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, 21, 20, 0x3);//pll en + field_set(&rdata, 19, 12, 0x0);// tx disable + field_set(&rdata, 8, 8, 0x0);// pmd mode + field_set(&rdata, 1, 1, 0x1);// pmd en + txgbe_wr32_ephy(hw, 0x1400, rdata); + + /* 6, TX_ENABLE */ + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, 19, 12, 0x1);// tx en + txgbe_wr32_ephy(hw, 0x1400, rdata); + + kr_dbg(KR_MODE, "1.3 Wait 10G PHY RXS....\n"); + status = E56phyRxsOscInitForTempTrackRange(adapter, 10); + if (status) + return status; + + /* Wait an 10g fsm_rx_sts */ + status = read_poll_timeout(rd32_ephy, rdata, + ((rdata & 0x3f) == 0xb), 1000, + 200000, false, hw, + E56PHY_CTRL_FSM_RX_STAT_0_ADDR); + kr_dbg(KR_MODE, "Wait 10g fsm_rx_sts = %x, Wait rx_sts %s.\n", rdata, + status ? "FAILED" : "SUCCESS"); + + rdata = txgbe_rd32_epcs(hw, 0x070000); + field_set(&rdata, 12, 12, 0x1); + txgbe_wr32_epcs(hw, 0x070000, rdata); + kr_dbg(KR_MODE, "Setup the backplane mode========end ==\n"); + } else { + if ((hw->phy.fiber_suppport_speed & TXGBE_LINK_SPEED_40GB_FULL) + == TXGBE_LINK_SPEED_40GB_FULL) + txgbe_set_link_to_amlite(hw, + TXGBE_LINK_SPEED_40GB_FULL); + else if ((hw->phy.fiber_suppport_speed & TXGBE_LINK_SPEED_25GB_FULL) + == TXGBE_LINK_SPEED_25GB_FULL) + txgbe_set_link_to_amlite(hw, + TXGBE_LINK_SPEED_25GB_FULL); + else if (hw->phy.fiber_suppport_speed == + TXGBE_LINK_SPEED_10GB_FULL) + txgbe_set_link_to_amlite(hw, + TXGBE_LINK_SPEED_10GB_FULL); + } + + return status; +} + +static void txgbe_e56_print_page_status(struct txgbe_adapter *adapter, + bkpan73ability *tBkpAn73Ability, + bkpan73ability *tLpBkpAn73Ability) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rdata = 0; + + /* Read the local AN73 Base Page Ability Registers */ + kr_dbg(KR_MODE, "Read the local Base Page Ability Registers\n"); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG1); + tBkpAn73Ability->nextPage = (rdata & BIT(15)) ? 1 : 0; + kr_dbg(KR_MODE, "\tread 70010 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG2); + kr_dbg(KR_MODE, "\tread 70011 data %0x\n", rdata); + tBkpAn73Ability->linkAbility = (rdata >> 5) & GENMASK(10, 0); + /* amber-lite only support 10GKR - 25GKR/CR - 25GKR-S/CR-S */ + kr_dbg(KR_MODE, "\t10GKR : %x\t25GKR/CR-S: %x\t25GKR/CR : %x\n", + tBkpAn73Ability->linkAbility & BIT(ABILITY_10GBASE_KR) ? 1 : 0, + tBkpAn73Ability->linkAbility & BIT(ABILITY_25GBASE_KRCR_S) ? 1 : 0, + tBkpAn73Ability->linkAbility & BIT(ABILITY_25GBASE_KRCR) ? 1 : 0); + kr_dbg(KR_MODE, "\t40GCR4 : %x\t40GKR4 : %x\n", + tBkpAn73Ability->linkAbility & BIT(ABILITY_40GBASE_CR4) ? 1 : 0, + tBkpAn73Ability->linkAbility & BIT(ABILITY_40GBASE_KR4) ? 1 : 0); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG3); + kr_dbg(KR_MODE, "\tF1:FEC Req\tF0:FEC Sup\tF3:25GFEC\tF2:25GRS\n"); + kr_dbg(KR_MODE, "\tF1: %d\t\tF0: %d\t\tF3: %d\t\tF2: %d\n", + ((rdata >> 15) & 0x01), ((rdata >> 14) & 0x01), + ((rdata >> 13) & 0x01), ((rdata >> 12) & 0x01)); + tBkpAn73Ability->fecAbility = rdata; + kr_dbg(KR_MODE, "\tread 70012 data %0x\n", rdata); + + /* Read the link partner AN73 Base Page Ability Registers */ + kr_dbg(KR_MODE, "Read the link partner Base Page Ability Registers\n"); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_LP_ABL1); + tLpBkpAn73Ability->nextPage = (rdata & BIT(15)) ? 1 : 0; + kr_dbg(KR_MODE, "\tread 70013 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_LP_ABL2); + tLpBkpAn73Ability->linkAbility = (rdata >> 5) & GENMASK(10, 0); + kr_dbg(KR_MODE, "\tread 70014 data %0x\n", rdata); + kr_dbg(KR_MODE, "\tKX : %x\tKX4 : %x\n", + tLpBkpAn73Ability->linkAbility & BIT(ABILITY_1000BASE_KX) ? 1 : 0, + tLpBkpAn73Ability->linkAbility & BIT(ABILITY_10GBASE_KX4) ? 1 : 0); + kr_dbg(KR_MODE, "\t10GKR : %x\t25GKR/CR-S: %x\t25GKR/CR : %x\n", + tLpBkpAn73Ability->linkAbility & BIT(ABILITY_10GBASE_KR) ? 1 : 0, + tLpBkpAn73Ability->linkAbility & BIT(ABILITY_25GBASE_KRCR_S) ? 1 : 0, + tLpBkpAn73Ability->linkAbility & BIT(ABILITY_25GBASE_KRCR) ? 1 : 0); + kr_dbg(KR_MODE, "\t40GCR4 : %x\t40GKR4 : %x\n", + tLpBkpAn73Ability->linkAbility & BIT(ABILITY_40GBASE_CR4) ? 1 : 0, + tLpBkpAn73Ability->linkAbility & BIT(ABILITY_40GBASE_KR4) ? 1 : 0); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_LP_ABL3); + kr_dbg(KR_MODE, "\tF1:FEC Req\tF0:FEC Sup\tF3:25GFEC\tF2:25GRS\n"); + kr_dbg(KR_MODE, "\tF1: %d\t\tF0: %d\t\tF3: %d\t\tF2: %d\n", + ((rdata >> 15) & 0x01), ((rdata >> 14) & 0x01), + ((rdata >> 13) & 0x01), ((rdata >> 12) & 0x01)); + tLpBkpAn73Ability->fecAbility = rdata; + adapter->fec_mode = 0; + if (rdata & TXGBE_25G_RS_FEC_REQ) + adapter->fec_mode |= TXGBE_25G_RS_FEC_REQ; + if (rdata & TXGBE_25G_BASE_FEC_REQ) + adapter->fec_mode |= TXGBE_25G_BASE_FEC_REQ; + if (rdata & TXGBE_10G_FEC_ABL) + adapter->fec_mode |= TXGBE_10G_FEC_ABL; + if (rdata & TXGBE_10G_FEC_REQ) + adapter->fec_mode |= TXGBE_10G_FEC_REQ; + kr_dbg(KR_MODE, "\tread 70015 data %0x\n", rdata); + + kr_dbg(KR_MODE, "\tread 70016 data %0x\n", txgbe_rd32_epcs(hw, 0x70016)); + kr_dbg(KR_MODE, "\tread 70017 data %0x\n", txgbe_rd32_epcs(hw, 0x70017)); + kr_dbg(KR_MODE, "\tread 70018 data %0x\n", txgbe_rd32_epcs(hw, 0x70018)); + kr_dbg(KR_MODE, "\tread 70019 data %0x\n", txgbe_rd32_epcs(hw, 0x70019)); + kr_dbg(KR_MODE, "\tread 7001a data %0x\n", txgbe_rd32_epcs(hw, 0x7001a)); + kr_dbg(KR_MODE, "\tread 7001b data %0x\n", txgbe_rd32_epcs(hw, 0x7001b)); + +} + +static int chk_bkp_ability(struct txgbe_adapter *adapter, + bkpan73ability tBkpAn73Ability, + bkpan73ability tLpBkpAn73Ability) +{ + unsigned int comLinkAbility; + + kr_dbg(KR_MODE, "CheckBkpAn73Ability():\n"); + /* Check the common link ability and take action based on the result*/ + comLinkAbility = tBkpAn73Ability.linkAbility & + tLpBkpAn73Ability.linkAbility; + kr_dbg(KR_MODE, "comAbility= 0x%x, Ability= 0x%x, lpAbility= 0x%x\n", + comLinkAbility, tBkpAn73Ability.linkAbility, + tLpBkpAn73Ability.linkAbility); + + if (comLinkAbility == 0) { + adapter->bp_link_mode = 0; + kr_dbg(KR_MODE, "Do not support any compatible speed mode!\n"); + return -EINVAL; + } else if (comLinkAbility & BIT(ABILITY_40GBASE_CR4)) { + kr_dbg(KR_MODE, "Link mode is [ABILITY_40GBASE_CR4].\n"); + adapter->bp_link_mode = 40; + } else if (comLinkAbility & BIT(ABILITY_25GBASE_KRCR_S)) { + kr_dbg(KR_MODE, "Link mode is [ABILITY_25GBASE_KRCR_S].\n"); + adapter->fec_mode = TXGBE_25G_RS_FEC_REQ; + adapter->bp_link_mode = 25; + } else if (comLinkAbility & BIT(ABILITY_25GBASE_KRCR)) { + kr_dbg(KR_MODE, "Link mode is [ABILITY_25GBASE_KRCR].\n"); + adapter->bp_link_mode = 25; + } else if (comLinkAbility & BIT(ABILITY_10GBASE_KR)) { + kr_dbg(KR_MODE, "Link mode is [ABILITY_10GBASE_KR].\n"); + adapter->bp_link_mode = 10; + } + + return 0; +} + +static int txgbe_e56_exchange_page(struct txgbe_adapter *adapter) +{ + bkpan73ability tBkpAn73Ability = {0}, tLpBkpAn73Ability = {0}; + struct txgbe_hw *hw = &adapter->hw; + u32 an_int, base_page = 0; + int count = 0; + + an_int = txgbe_rd32_epcs(hw, 0x78002); + if (!(an_int & TXGBE_E56_AN_PG_RCV)) + return -EINVAL; + + /* 500ms timeout */ + for (count = 0; count < 5000; count++) { + u32 fsm = txgbe_rd32_epcs(hw, 0x78010); + kr_dbg(KR_MODE, "-----count----- %d - fsm: %x\n", + count, fsm); + if (an_int & TXGBE_E56_AN_PG_RCV) { + u8 next_page = 0; + u32 rdata, addr; + + txgbe_e56_print_page_status(adapter, &tBkpAn73Ability, + &tLpBkpAn73Ability); + addr = base_page == 0 ? 0x70013 : 0x70019; + rdata = txgbe_rd32_epcs(hw, addr); + if (rdata & BIT(14)) { + if (rdata & BIT(15)) { + /* always set null message */ + txgbe_wr32_epcs(hw, 0x70016, 0x2001); + kr_dbg(KR_MODE, "write 70016 0x%0x\n", + 0x2001); + rdata = txgbe_rd32_epcs(hw, 0x70010); + txgbe_wr32_epcs(hw, 0x70010, + rdata | BIT(15)); + kr_dbg(KR_MODE, "write 70010 0x%0x\n", + rdata); + next_page = 1; + } else { + next_page = 0; + } + base_page = 1; + } + /* clear an pacv int */ + rdata = txgbe_rd32_epcs(hw, 0x78002); + kr_dbg(KR_MODE, "read 78002 data %0x and clear pacv\n", rdata); + field_set(&rdata, 2, 2, 0x0); + txgbe_wr32_epcs(hw, 0x78002, rdata); + if (next_page == 0) { + if ((fsm & 0x8) == 0x8) { + adapter->fsm = 0x8; + goto check_ability; + } + } + } + usec_delay(100); + } + +check_ability: + return chk_bkp_ability(adapter, tBkpAn73Ability, tLpBkpAn73Ability); +} + +static int txgbe_e56_cl72_trainning(struct txgbe_adapter *adapter) +{ + u32 bylinkmode = adapter->bp_link_mode; + struct txgbe_hw *hw = &adapter->hw; + u8 bypassCtle = hw->bypassCtle; + int status = 0, pTempData = 0; + u32 lane_num = 0, lane_idx = 0; + u32 pmd_ctrl = 0; + u32 txffe = 0; + int ret = 0; + u32 rdata; + + u8 pll_en_cfg = 0; + u8 pmd_mode = 0; + + switch (bylinkmode) { + case 10: + bylinkmode = 10; + lane_num = 1; + pll_en_cfg = 3; + pmd_mode = 0; + break; + case 40: + bylinkmode = 40; + lane_num = 4; + pll_en_cfg = 0; /* pll_en_cfg : single link to 0 */ + pmd_mode = 1; /* pmd mode : 1 - single link */ + break; + case 25: + bylinkmode = 25; + lane_num = 1; + pll_en_cfg = 3; + pmd_mode = 0; + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid speed\n", __func__, __LINE__); + break; + } + + kr_dbg(KR_MODE, "2.3 Wait %dG KR phy mode init ....\n", bylinkmode); + status = setphylinkmode(adapter, bylinkmode, bypassCtle); + + /* 13. set phy an status to 1 - AN_CFG[0]: 4-7 lane0-lane3 */ + rdata = rd32_ephy(hw, 0x1434); + field_set(&rdata, 7, 4, GENMASK(lane_num - 1, 0)); + txgbe_wr32_ephy(hw, 0x1434, rdata); + + /* 14 and 15. kr training: set BASER_PMD_CONTROL[0, 7] for lane0-4 */ + rdata = rd32_ephy(hw, 0x1640); + field_set(&rdata, 7, 0, GENMASK(2 * lane_num - 1, 0)); + txgbe_wr32_ephy(hw, 0x1640, rdata); + + /* 16. enable CMS and its internal PLL */ + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, 21, 20, pll_en_cfg); + field_set(&rdata, 19, 12, 0); /* tx/rx off */ + field_set(&rdata, 8, 8, pmd_mode); + field_set(&rdata, 1, 1, 0x1); /* pmd en */ + txgbe_wr32_ephy(hw, 0x1400, rdata); + + /* 17. tx enable PMD_CFG[0] */ + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, 15, 12, GENMASK(lane_num - 1, 0)); /* tx en */ + txgbe_wr32_ephy(hw, 0x1400, rdata); + + /* 18 */ + /* 19. rxs calibration and adaotation sequeence */ + kr_dbg(KR_MODE, "2.4 Wait %dG RXS.... fsm: %x\n", + bylinkmode, txgbe_rd32_epcs(hw, 0x78010)); + status = E56phyRxsCalibAdaptSeq(adapter, bylinkmode, bypassCtle); + ret |= status; + /* 20 */ + kr_dbg(KR_MODE, "2.5 Wait %dG phy calibration.... fsm: %x\n", + bylinkmode, txgbe_rd32_epcs(hw, 0x78010)); + E56phySetRxsUfineLeMax(adapter, bylinkmode); + status = txgbe_e56_get_temp(hw, &pTempData); + if (bylinkmode == 40) + status = txgbe_temp_track_seq_40g(hw, TXGBE_LINK_SPEED_40GB_FULL); + else + status = E56phyRxsPostCdrLockTempTrackSeq(adapter, bylinkmode); + /* 21 */ + kr_dbg(KR_MODE, "2.6 Wait %dG phy kr training check.... fsm: %x\n", + bylinkmode, txgbe_rd32_epcs(hw, 0x78010)); + status = read_poll_timeout(rd32_ephy, rdata, + ((rdata & 0xe) & GENMASK(lane_num, 1)) == + (0xe & GENMASK(lane_num, 1)), 100, + 200000, false, hw, 0x163c); + pmd_ctrl = rd32_ephy(hw, 0x1644); + kr_dbg(KR_MODE, "KR TRAINNING CHECK = %x, %s. pmd_ctrl:%lx-%lx-%lx-%lx\n", + rdata, status ? "FAILED" : "SUCCESS", + FIELD_GET_M(GENMASK(3, 0), pmd_ctrl), + FIELD_GET_M(GENMASK(7, 4), pmd_ctrl), + FIELD_GET_M(GENMASK(11, 8), pmd_ctrl), + FIELD_GET_M(GENMASK(15, 12), pmd_ctrl)); + ret |= status; + kr_dbg(KR_MODE, "before: %x-%x-%x-%x\n", + rd32_ephy(hw, 0x141c), rd32_ephy(hw, 0x1420), + rd32_ephy(hw, 0x1424), rd32_ephy(hw, 0x1428)); + + for (lane_idx = 0; lane_idx < lane_num; lane_idx++) { + txffe = rd32_ephy(hw, 0x828 + lane_idx * 0x100); + kr_dbg(KR_MODE, "after[%x]: %lx-%lx-%lx-%lx\n", lane_idx, + FIELD_GET_M(GENMASK(6, 0), txffe), + FIELD_GET_M(GENMASK(21, 16), txffe), + FIELD_GET_M(GENMASK(29, 24), txffe), + FIELD_GET_M(GENMASK(13, 8), txffe)); + } + + /* 22 */ + kr_dbg(KR_MODE, "2.7 Wait %dG phy Rx adc.... fsm:%x\n", + bylinkmode, txgbe_rd32_epcs(hw, 0x78010)); + status = E56phyRxsAdcAdaptSeq(adapter, bypassCtle); + + return ret; +} + +static int handle_e56_bkp_an73_flow(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int status = 0; + u32 rdata; + + kr_dbg(KR_MODE, "2.1 Wait page changed ....\n"); + status = txgbe_e56_exchange_page(adapter); + if (status) { + kr_dbg(KR_MODE, "Exchange page failed\n"); + return status; + } + + kr_dbg(KR_MODE, "2.2 Wait page changed ..done..\n"); + txgbe_wr32_epcs(hw, 0x100ab, 0); + if (AN_TRAINNING_MODE) { + rdata = txgbe_rd32_epcs(hw, 0x70000); + kr_dbg(KR_MODE, "read 0x70000 data %0x\n", rdata); + txgbe_wr32_epcs(hw, 0x70000, 0); + kr_dbg(KR_MODE, "write 0x70000 0x%0x\n", 0); + } + + rdata = txgbe_rd32_epcs(hw, 0x78002); + kr_dbg(KR_MODE, "read 78002 data %0x and clear page int\n", rdata); + field_set(&rdata, 2, 2, 0x0); + txgbe_wr32_epcs(hw, 0x78002, rdata); + + /* 10 RXS_DISABLE - TXS_DISABLE - CMS_DISABLE */ + /* dis phy tx/rx lane */ + rdata = rd32_ephy(hw, 0x1400); + field_set(&rdata, 19, 16, 0x0); + field_set(&rdata, 15, 12, 0x0); + field_set(&rdata, 1, 1, 0x0); + txgbe_wr32_ephy(hw, 0x1400, rdata); + kr_dbg(KR_MODE, "Ephy Write A: 0x%x, D: 0x%x\n", 0x1400, rdata); + /* wait rx/tx/cm powerdn_st according pmd 50 2.0.5 */ + status = read_poll_timeout(rd32_ephy, rdata, + (rdata & GENMASK(3, 0)) == 0x9, 100, + 200000, false, hw, 0x14d4); + kr_dbg(KR_MODE, "wait ctrl_fsm_cm_st = %x, %s.\n", + rdata, status ? "FAILED" : "SUCCESS"); + + if (adapter->fec_mode & TXGBE_25G_RS_FEC_REQ) { + txgbe_wr32_epcs(hw, 0x180a3, 0x68c1); + txgbe_wr32_epcs(hw, 0x180a4, 0x3321); + txgbe_wr32_epcs(hw, 0x180a5, 0x973e); + txgbe_wr32_epcs(hw, 0x180a6, 0xccde); + + txgbe_wr32_epcs(hw, 0x38018, 1024); + rdata = txgbe_rd32_epcs(hw, 0x100c8); + field_set(&rdata, 2, 2, 1); + txgbe_wr32_epcs(hw, 0x100c8, rdata); + kr_dbg(KR_MODE, "Advertised FEC modes : %s\n", "RS-FEC"); + adapter->cur_fec_link = TXGBE_PHY_FEC_RS; + } else if (adapter->fec_mode & TXGBE_25G_BASE_FEC_REQ) { + /* FEC: FC-FEC/BASE-R */ + txgbe_wr32_epcs(hw, 0x100ab, BIT(0)); + kr_dbg(KR_MODE, "Epcs Write A: 0x%x, D: 0x%x\n", 0x100ab, 1); + kr_dbg(KR_MODE, "Advertised FEC modes : %s\n", "25GBASE-R"); + adapter->cur_fec_link = TXGBE_PHY_FEC_BASER; + } else if (adapter->fec_mode & (TXGBE_10G_FEC_REQ)) { + /* FEC: FC-FEC/BASE-R */ + txgbe_wr32_epcs(hw, 0x100ab, BIT(0)); + kr_dbg(KR_MODE, "Epcs Write A: 0x%x, D: 0x%x\n", 0x100ab, 1); + kr_dbg(KR_MODE, "Advertised FEC modes : %s\n", "BASE-R"); + adapter->cur_fec_link = TXGBE_PHY_FEC_BASER; + } else { + kr_dbg(KR_MODE, "Advertised FEC modes : %s\n", "NONE"); + adapter->cur_fec_link = TXGBE_PHY_FEC_OFF; + } + + status = txgbe_e56_cl72_trainning(adapter); + rdata = rd32_ephy(hw, E56PHY_RXS_IDLE_DETECT_1_ADDR); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0x28); + field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0xa); + txgbe_wr32_ephy(hw, E56PHY_RXS_IDLE_DETECT_1_ADDR, rdata); + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ADDR, E56PHY_INTR_0_IDLE_ENTRY1); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ADDR, E56PHY_INTR_1_IDLE_EXIT1); + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ENABLE_ADDR, E56PHY_INTR_0_IDLE_ENTRY1); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, E56PHY_INTR_1_IDLE_EXIT1); + + return status; +} + +void txgbe_e56_bp_watchdog_event(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 rlu = 0, an_int = 0, an_int1 = 0; + struct txgbe_hw *hw = &adapter->hw; + u32 value = 0, fsm = 0; + int ret = 0; + + if (!(hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || + txgbe_is_backplane(hw))) + return; + + /* only continue if link is down */ + if (netif_carrier_ok(netdev)) + return; + + if (!adapter->backplane_an) + return; + + value = txgbe_rd32_epcs(hw, 0x78002); + an_int = value; + if (value & TXGBE_E56_AN_INT_CMPLT) { + adapter->an_done = true; + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + field_set(&value, 0, 0, 0); + txgbe_wr32_epcs(hw, 0x78002, value); + } + + if (value & TXGBE_E56_AN_INC_LINK) { + field_set(&value, 1, 1, 0); + txgbe_wr32_epcs(hw, 0x78002, value); + } + + if (value & TXGBE_E56_AN_TXDIS) { + field_set(&value, 3, 3, 0); + txgbe_wr32_epcs(hw, 0x78002, value); + mutex_lock(&adapter->e56_lock); + txgbe_e56_set_phylinkmode(adapter, 10, hw->bypassCtle); + mutex_unlock(&adapter->e56_lock); + goto an_status; + } + + if (value & TXGBE_E56_AN_PG_RCV) { + kr_dbg(KR_MODE, "Enter training\n"); + ret = handle_e56_bkp_an73_flow(adapter); + if (!AN_TRAINNING_MODE) { + fsm = txgbe_rd32_epcs(hw, 0x78010); + if (fsm & 0x8) + goto an_status; + if (ret) { + kr_dbg(KR_MODE, "Training FAILED, do reset\n"); + mutex_lock(&adapter->e56_lock); + txgbe_e56_set_phylinkmode(adapter, 10, hw->bypassCtle); + mutex_unlock(&adapter->e56_lock); + } else { + kr_dbg(KR_MODE, "ALL SUCCESSED\n"); + } + } else { + if (ret) { + kr_dbg(KR_MODE, "Training FAILED, do reset\n"); + mutex_lock(&adapter->e56_lock); + txgbe_e56_set_phylinkmode(adapter, 10, hw->bypassCtle); + mutex_unlock(&adapter->e56_lock); + } else { + adapter->an_done = true; + } + } + } + +an_status: + an_int1 = txgbe_rd32_epcs(hw, 0x78002); + if (an_int1 & TXGBE_E56_AN_INT_CMPLT) { + adapter->an_done = true; + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + } + rlu = txgbe_rd32_epcs(hw, 0x30001); + kr_dbg(KR_MODE, "RLU:%x MLU:%x INT:%x-%x CTL:%x fsm:%x pmd_cfg0:%x an_done:%d by:%d\n", + txgbe_rd32_epcs(hw, 0x30001), rd32(hw, 0x14404), + an_int, an_int1, + txgbe_rd32_epcs(hw, 0x70000), + txgbe_rd32_epcs(hw, 0x78010), + rd32_ephy(hw, 0x1400), + adapter->an_done, + hw->bypassCtle); +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.h new file mode 100644 index 0000000000000000000000000000000000000000..ec43df3da0ce45c9d22499510032576de16c4ba4 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.h @@ -0,0 +1,268 @@ +#ifndef _TXGBE_E56_BP_H_ +#define _TXGBE_E56_BP_H_ + +#define TXGBE_E56_AN_TXDIS BIT(3) +#define TXGBE_E56_AN_PG_RCV BIT(2) +#define TXGBE_E56_AN_INC_LINK BIT(1) +#define TXGBE_E56_AN_INT_CMPLT BIT(0) + +#define TXGBE_10G_FEC_REQ BIT(15) +#define TXGBE_10G_FEC_ABL BIT(14) +#define TXGBE_25G_BASE_FEC_REQ BIT(13) +#define TXGBE_25G_RS_FEC_REQ BIT(12) + +typedef union { + struct { + u32 tx0_cursor_factor : 7; + u32 rsvd0 : 1; + u32 tx1_cursor_factor : 7; + u32 rsvd1 : 1; + u32 tx2_cursor_factor : 7; + u32 rsvd2 : 1; + u32 tx3_cursor_factor : 7; + u32 rsvd3 : 1; + }; + u32 reg; +} E56G__PMD_TX_FFE_CFG_1; + +#define E56G__PMD_TX_FFE_CFG_1_NUM 1 +#define E56G__PMD_TX_FFE_CFG_1_ADDR (E56G__BASEADDR+0x141c) +#define E56G__PMD_TX_FFE_CFG_1_PTR ((E56G__PMD_TX_FFE_CFG_1 *)(E56G__PMD_TX_FFE_CFG_1_ADDR)) +#define E56G__PMD_TX_FFE_CFG_1_STRIDE 4 +#define E56G__PMD_TX_FFE_CFG_1_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_1_ACC_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_1_READ_MSB 30 +#define E56G__PMD_TX_FFE_CFG_1_READ_LSB 0 +#define E56G__PMD_TX_FFE_CFG_1_WRITE_MSB 30 +#define E56G__PMD_TX_FFE_CFG_1_WRITE_LSB 0 +#define E56G__PMD_TX_FFE_CFG_1_RESET_VALUE 0x3f3f3f3f + +typedef union { + struct { + u32 tx0_precursor1_factor : 6; + u32 rsvd0 : 2; + u32 tx1_precursor1_factor : 6; + u32 rsvd1 : 2; + u32 tx2_precursor1_factor : 6; + u32 rsvd2 : 2; + u32 tx3_precursor1_factor : 6; + u32 rsvd3 : 2; + }; + u32 reg; +} E56G__PMD_TX_FFE_CFG_2; + +#define E56G__PMD_TX_FFE_CFG_2_NUM 1 +#define E56G__PMD_TX_FFE_CFG_2_ADDR (E56G__BASEADDR+0x1420) +#define E56G__PMD_TX_FFE_CFG_2_PTR ((E56G__PMD_TX_FFE_CFG_2 *)(E56G__PMD_TX_FFE_CFG_2_ADDR)) +#define E56G__PMD_TX_FFE_CFG_2_STRIDE 4 +#define E56G__PMD_TX_FFE_CFG_2_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_2_ACC_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_2_READ_MSB 29 +#define E56G__PMD_TX_FFE_CFG_2_READ_LSB 0 +#define E56G__PMD_TX_FFE_CFG_2_WRITE_MSB 29 +#define E56G__PMD_TX_FFE_CFG_2_WRITE_LSB 0 +#define E56G__PMD_TX_FFE_CFG_2_RESET_VALUE 0x0 + +typedef union { + struct { + u32 tx0_precursor2_factor : 6; + u32 rsvd0 : 2; + u32 tx1_precursor2_factor : 6; + u32 rsvd1 : 2; + u32 tx2_precursor2_factor : 6; + u32 rsvd2 : 2; + u32 tx3_precursor2_factor : 6; + u32 rsvd3 : 2; + }; + u32 reg; +} E56G__PMD_TX_FFE_CFG_3; +#define E56G__PMD_TX_FFE_CFG_3_NUM 1 +#define E56G__PMD_TX_FFE_CFG_3_ADDR (E56G__BASEADDR+0x1424) +#define E56G__PMD_TX_FFE_CFG_3_PTR ((E56G__PMD_TX_FFE_CFG_3 *)(E56G__PMD_TX_FFE_CFG_3_ADDR)) +#define E56G__PMD_TX_FFE_CFG_3_STRIDE 4 +#define E56G__PMD_TX_FFE_CFG_3_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_3_ACC_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_3_READ_MSB 29 +#define E56G__PMD_TX_FFE_CFG_3_READ_LSB 0 +#define E56G__PMD_TX_FFE_CFG_3_WRITE_MSB 29 +#define E56G__PMD_TX_FFE_CFG_3_WRITE_LSB 0 +#define E56G__PMD_TX_FFE_CFG_3_RESET_VALUE 0x0 + +typedef union { + struct { + u32 tx0_postcursor_factor : 6; + u32 rsvd0 : 2; + u32 tx1_postcursor_factor : 6; + u32 rsvd1 : 2; + u32 tx2_postcursor_factor : 6; + u32 rsvd2 : 2; + u32 tx3_postcursor_factor : 6; + u32 rsvd3 : 2; + }; + u32 reg; +} E56G__PMD_TX_FFE_CFG_4; +#define E56G__PMD_TX_FFE_CFG_4_NUM 1 +#define E56G__PMD_TX_FFE_CFG_4_ADDR (E56G__BASEADDR+0x1428) +#define E56G__PMD_TX_FFE_CFG_4_PTR ((E56G__PMD_TX_FFE_CFG_4 *)(E56G__PMD_TX_FFE_CFG_4_ADDR)) +#define E56G__PMD_TX_FFE_CFG_4_STRIDE 4 +#define E56G__PMD_TX_FFE_CFG_4_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_4_ACC_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_4_READ_MSB 29 +#define E56G__PMD_TX_FFE_CFG_4_READ_LSB 0 +#define E56G__PMD_TX_FFE_CFG_4_WRITE_MSB 29 +#define E56G__PMD_TX_FFE_CFG_4_WRITE_LSB 0 +#define E56G__PMD_TX_FFE_CFG_4_RESET_VALUE 0x0 + +typedef union { + struct { + u32 ana_lcpll_lf_vco_swing_ctrl_i : 4; + u32 ana_lcpll_lf_lpf_setcode_calib_i : 5; + u32 rsvd0 : 3; + u32 ana_lcpll_lf_vco_coarse_bin_i : 5; + u32 rsvd1 : 3; + u32 ana_lcpll_lf_vco_fine_therm_i : 8; + u32 ana_lcpll_lf_clkout_fb_ctrl_i : 2; + u32 rsvd2 : 2; + }; + u32 reg; +} E56G__CMS_ANA_OVRDVAL_7; +#define E56G__CMS_ANA_OVRDVAL_7_NUM 1 +#define E56G__CMS_ANA_OVRDVAL_7_ADDR (E56G__BASEADDR+0xccc) +#define E56G__CMS_ANA_OVRDVAL_7_PTR ((E56G__CMS_ANA_OVRDVAL_7 *)(E56G__CMS_ANA_OVRDVAL_7_ADDR)) +#define E56G__CMS_ANA_OVRDVAL_7_STRIDE 4 +#define E56G__CMS_ANA_OVRDVAL_7_SIZE 32 +#define E56G__CMS_ANA_OVRDVAL_7_ACC_SIZE 32 +#define E56G__CMS_ANA_OVRDVAL_7_READ_MSB 29 +#define E56G__CMS_ANA_OVRDVAL_7_READ_LSB 0 +#define E56G__CMS_ANA_OVRDVAL_7_WRITE_MSB 29 +#define E56G__CMS_ANA_OVRDVAL_7_WRITE_LSB 0 +#define E56G__CMS_ANA_OVRDVAL_7_RESET_VALUE 0x0 + +typedef union { + struct { + u32 ovrd_en_ana_lcpll_hf_vco_amp_status_o : 1; + u32 ovrd_en_ana_lcpll_hf_clkout_fb_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_hf_clkdiv_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_hf_en_odiv_i : 1; + u32 ovrd_en_ana_lcpll_hf_test_in_i : 1; + u32 ovrd_en_ana_lcpll_hf_test_out_o : 1; + u32 ovrd_en_ana_lcpll_lf_en_bias_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_loop_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_cp_i : 1; + u32 ovrd_en_ana_lcpll_lf_icp_base_i : 1; + u32 ovrd_en_ana_lcpll_lf_icp_fine_i : 1; + u32 ovrd_en_ana_lcpll_lf_lpf_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_lpf_setcode_calib_i : 1; + u32 ovrd_en_ana_lcpll_lf_set_lpf_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_vco_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_sel_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_swing_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_coarse_bin_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_fine_therm_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_amp_status_o : 1; + u32 ovrd_en_ana_lcpll_lf_clkout_fb_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_clkdiv_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_odiv_i : 1; + u32 ovrd_en_ana_lcpll_lf_test_in_i : 1; + u32 ovrd_en_ana_lcpll_lf_test_out_o : 1; + u32 ovrd_en_ana_lcpll_hf_refclk_select_i : 1; + u32 ovrd_en_ana_lcpll_lf_refclk_select_i : 1; + u32 ovrd_en_ana_lcpll_hf_clk_ref_sel_i : 1; + u32 ovrd_en_ana_lcpll_lf_clk_ref_sel_i : 1; + u32 ovrd_en_ana_test_bias_i : 1; + u32 ovrd_en_ana_test_slicer_i : 1; + u32 ovrd_en_ana_test_sampler_i : 1; + }; + u32 reg; +} E56G__CMS_ANA_OVRDEN_1; +#define E56G__CMS_ANA_OVRDEN_1_NUM 1 +#define E56G__CMS_ANA_OVRDEN_1_ADDR (E56G__BASEADDR+0xca8) +#define E56G__CMS_ANA_OVRDEN_1_PTR ((E56G__CMS_ANA_OVRDEN_1 *)(E56G__CMS_ANA_OVRDEN_1_ADDR)) +#define E56G__CMS_ANA_OVRDEN_1_STRIDE 4 +#define E56G__CMS_ANA_OVRDEN_1_SIZE 32 +#define E56G__CMS_ANA_OVRDEN_1_ACC_SIZE 32 +#define E56G__CMS_ANA_OVRDEN_1_READ_MSB 31 +#define E56G__CMS_ANA_OVRDEN_1_READ_LSB 0 +#define E56G__CMS_ANA_OVRDEN_1_WRITE_MSB 31 +#define E56G__CMS_ANA_OVRDEN_1_WRITE_LSB 0 +#define E56G__CMS_ANA_OVRDEN_1_RESET_VALUE 0x0 + +typedef union { + struct { + u32 ana_lcpll_lf_test_in_i : 32; + }; + u32 reg; +} E56G__CMS_ANA_OVRDVAL_9; +#define E56G__CMS_ANA_OVRDVAL_9_NUM 1 +#define E56G__CMS_ANA_OVRDVAL_9_ADDR (E56G__BASEADDR+0xcd4) +#define E56G__CMS_ANA_OVRDVAL_9_PTR ((E56G__CMS_ANA_OVRDVAL_9 *)(E56G__CMS_ANA_OVRDVAL_9_ADDR)) +#define E56G__CMS_ANA_OVRDVAL_9_STRIDE 4 +#define E56G__CMS_ANA_OVRDVAL_9_SIZE 32 +#define E56G__CMS_ANA_OVRDVAL_9_ACC_SIZE 32 +#define E56G__CMS_ANA_OVRDVAL_9_READ_MSB 31 +#define E56G__CMS_ANA_OVRDVAL_9_READ_LSB 0 +#define E56G__CMS_ANA_OVRDVAL_9_WRITE_MSB 31 +#define E56G__CMS_ANA_OVRDVAL_9_WRITE_LSB 0 +#define E56G__CMS_ANA_OVRDVAL_9_RESET_VALUE 0x0 + +#define SFP2_RS0 5 +#define SFP2_RS1 4 +#define SFP2_TX_DISABLE 1 +#define SFP2_TX_FAULT 0 +#define SFP2_RX_LOS_BIT 3 +#ifdef PHYINIT_TIMEOUT +#undef PHYINIT_TIMEOUT +#define PHYINIT_TIMEOUT 2000 +#endif + +#define E56PHY_CMS_ANA_OVRDEN_0_ADDR (E56PHY_CMS_BASE_ADDR+0xA4) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_REFCLK_BUF_DAISY_EN_I 0, 0 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_REFCLK_BUF_PAD_EN_I 1, 1 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_REFCLK_BUF_PAD_EN_I_LSB 1 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_VDDINOFF_DCORE_DIG_O 2, 2 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_BG_EN_I 11, 11 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_BG_EN_I_LSB 11 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_BG_TESTIN_I 12, 12 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_BG_TESTIN_I_LSB 12 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RESCAL_I 13, 13 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RESCAL_I_LSB 13 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_RESCAL_COMP_O 14, 14 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_RESCAL_COMP_O_LSB 14 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_RESCAL_CODE_I 15, 15 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_RESCAL_CODE_I_LSB 15 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_LDO_CORE_I 16, 16 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_LDO_CORE_I_LSB 16 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_LDO_I 17, 17 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_LDO_I_LSB 17 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_ANA_DEBUG_SEL_I 18, 18 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_ANA_DEBUG_SEL_I_LSB 18 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_EN_BIAS_I 19, 19 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_EN_BIAS_I_LSB 19 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_EN_LOOP_I 20, 20 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_EN_LOOP_I_LSB 20 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_EN_CP_I 21, 21 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_EN_CP_I_LSB 21 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_ICP_BASE_I 22, 22 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_ICP_BASE_I_LSB 22 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_ICP_FINE_I 23, 23 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_ICP_FINE_I_LSB 23 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_LPF_CTRL_I 24, 24 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_LPF_CTRL_I_LSB 24 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I 25, 25 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I_LSB 25 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_SET_LPF_I 26, 26 + +#define E56PHY_CMS_ANA_OVRDVAL_2_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I 20, 16 +#define E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_LF_LPF_SETCODE_CALIB_I 12, 12 +#define E56PHY_CMS_ANA_OVRDVAL_7_ADDR (E56PHY_CMS_BASE_ADDR+0xCC) +#define E56PHY_CMS_ANA_OVRDVAL_5_ADDR (E56PHY_CMS_BASE_ADDR+0xC4) +#define E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_LF_TEST_IN_I 23, 23 +#define E56PHY_CMS_ANA_OVRDVAL_9_ADDR (E56PHY_CMS_BASE_ADDR+0xD4) +#define E56PHY_CMS_ANA_OVRDVAL_10_ADDR (E56PHY_CMS_BASE_ADDR+0xD8) +#define E56PHY_CMS_ANA_OVRDVAL_7_ANA_LCPLL_LF_LPF_SETCODE_CALIB_I 8, 4 + +int txgbe_e56_set_phylinkmode(struct txgbe_adapter *adapter, + unsigned char byLinkMode, unsigned int bypassCtle); +void txgbe_e56_bp_watchdog_event(struct txgbe_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 1367acf48766e3a270ae5ff6dfcafae57a3e55b8..3138c8d19dc93d4bc0effda7fecfc20b4955754a 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_ethtool.c, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_ethtool.c, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List @@ -38,6 +38,7 @@ #include "txgbe_hw.h" #if defined(ETHTOOL_GMODULEINFO)||defined(HAVE_ETHTOOL_SET_PHYS_ID) #include "txgbe_phy.h" +#include "txgbe_e56.h" #endif #ifdef HAVE_ETHTOOL_GET_TS_INFO #include @@ -107,6 +108,15 @@ static struct txgbe_stats txgbe_gstrings_stats[] = { TXGBE_STAT("tx_broadcast", stats.bptc), TXGBE_STAT("rx_multicast", stats.mprc), TXGBE_STAT("tx_multicast", stats.mptc), + TXGBE_STAT("rx_mac_good", stats.tpr), + TXGBE_STAT("rdb_pkts", stats.rdpc), + TXGBE_STAT("rdb_drop", stats.rddc), + TXGBE_STAT("tdm_pkts", stats.tdmpc), + TXGBE_STAT("tdm_drop", stats.tdmdc), + TXGBE_STAT("tdb_pkts", stats.tdbpc), + TXGBE_STAT("rx_parser_pkts", stats.psrpc), + TXGBE_STAT("rx_parser_drop", stats.psrdc), + TXGBE_STAT("lsec_untag_pkts", stats.untag), TXGBE_STAT("rx_no_buffer_count", stats.rnbc[0]), TXGBE_STAT("tx_timeout_count", tx_timeout_count), TXGBE_STAT("tx_restart_queue", restart_queue), @@ -214,6 +224,9 @@ struct txgbe_priv_flags { static const struct txgbe_priv_flags txgbe_gstrings_priv_flags[] = { TXGBE_PRIV_FLAG("lldp", TXGBE_ETH_PRIV_FLAG_LLDP, 0), +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + TXGBE_PRIV_FLAG("legacy-rx", TXGBE_ETH_PRIV_FLAG_LEGACY_RX, 0), +#endif }; #define TXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(txgbe_gstrings_priv_flags) @@ -227,7 +240,7 @@ static const struct txgbe_priv_flags txgbe_gstrings_priv_flags[] = { #define txgbe_isbackplane(type) \ ((type == txgbe_media_type_backplane) ? true : false) -#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#ifdef ETHTOOL_GLINKSETTINGS static int txgbe_set_advertising_1g_10gtypes(struct txgbe_hw *hw, struct ethtool_link_ksettings *cmd, u32 advertised_speed) { @@ -251,6 +264,8 @@ static int txgbe_set_advertising_1g_10gtypes(struct txgbe_hw *hw, } break; case txgbe_sfp_type_sr: + case txgbe_sfp_type_25g_sr_core0: + case txgbe_sfp_type_25g_sr_core1: if (advertised_speed & TXGBE_LINK_SPEED_10GB_FULL) { ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseSR_Full); @@ -261,6 +276,8 @@ static int txgbe_set_advertising_1g_10gtypes(struct txgbe_hw *hw, } break; case txgbe_sfp_type_lr: + case txgbe_sfp_type_25g_lr_core0: + case txgbe_sfp_type_25g_lr_core1: if (advertised_speed & TXGBE_LINK_SPEED_10GB_FULL) { ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseLR_Full); @@ -308,10 +325,14 @@ static int txgbe_set_supported_1g_10gtypes(struct txgbe_hw *hw, 10000baseLR_Full); break; case txgbe_sfp_type_sr: + case txgbe_sfp_type_25g_sr_core0: + case txgbe_sfp_type_25g_sr_core1: ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseSR_Full); break; case txgbe_sfp_type_lr: + case txgbe_sfp_type_25g_lr_core0: + case txgbe_sfp_type_25g_lr_core1: ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseLR_Full); break; @@ -336,8 +357,8 @@ static int txgbe_set_supported_1g_10gtypes(struct txgbe_hw *hw, return 0; } -int txgbe_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) +static int txgbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct txgbe_adapter *adapter = netdev_priv(netdev); struct txgbe_hw *hw = &adapter->hw; @@ -354,107 +375,158 @@ int txgbe_get_link_ksettings(struct net_device *netdev, if((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4) autoneg = adapter->backplane_an ? 1:0; else if((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) - autoneg = adapter->an37?1:0; + autoneg = adapter->autoneg?1:0; - /* set the supported link speeds */ if (hw->phy.media_type == txgbe_media_type_copper) { - if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) - ethtool_link_ksettings_add_link_mode(cmd, supported, + if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); - if (supported_link & TXGBE_LINK_SPEED_1GB_FULL) - ethtool_link_ksettings_add_link_mode(cmd, supported, + if (supported_link & TXGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); - if (supported_link & TXGBE_LINK_SPEED_100_FULL) - ethtool_link_ksettings_add_link_mode(cmd, supported, - 100baseT_Full); + if (supported_link & TXGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Full); - if (supported_link & TXGBE_LINK_SPEED_10_FULL) - ethtool_link_ksettings_add_link_mode(cmd, supported, + if (supported_link & TXGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); - }else if (hw->phy.media_type == txgbe_media_type_fiber) { + } else if (hw->phy.media_type == txgbe_media_type_fiber_qsfp) { + if (supported_link & TXGBE_LINK_SPEED_40GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 40000baseSR4_Full); + } else if (hw->phy.media_type == txgbe_media_type_fiber) { + if (supported_link & TXGBE_LINK_SPEED_25GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 25000baseSR_Full); + if ((supported_link & TXGBE_LINK_SPEED_10GB_FULL) || - (supported_link & TXGBE_LINK_SPEED_1GB_FULL)) + (supported_link & TXGBE_LINK_SPEED_1GB_FULL)) txgbe_set_supported_1g_10gtypes(hw, cmd); - if (hw->phy.multispeed_fiber) - ethtool_link_ksettings_add_link_mode(cmd, supported, + if (hw->phy.multispeed_fiber && hw->mac.type == txgbe_mac_sp) + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseX_Full); - }else { - ethtool_link_ksettings_add_link_mode(cmd, supported, - 10000baseKR_Full); - ethtool_link_ksettings_add_link_mode(cmd, supported, + } else { + switch (hw->phy.link_mode) { + case TXGBE_PHYSICAL_LAYER_10GBASE_KX4: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseKX4_Full); + break; + case TXGBE_PHYSICAL_LAYER_10GBASE_KR: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseKR_Full); + break; + case TXGBE_PHYSICAL_LAYER_1000BASE_KX: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseKX_Full); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseKX4_Full); + break; + } } - - /* set the advertised speeds */ + + /* set the advertised speeds */ if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_40GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 40000baseSR4_Full); + } + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_25GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 25000baseSR_Full); + } if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10GB_FULL) { if (hw->phy.media_type == txgbe_media_type_copper) { - ethtool_link_ksettings_add_link_mode(cmd, advertising, + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full); } else if (hw->phy.media_type == txgbe_media_type_fiber) { - txgbe_set_advertising_1g_10gtypes(hw, cmd, + txgbe_set_advertising_1g_10gtypes(hw, cmd, hw->phy.autoneg_advertised); } else { - ethtool_link_ksettings_add_link_mode(cmd, advertising, + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseKR_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseKX4_Full); } } if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_1GB_FULL) { if (hw->phy.media_type == txgbe_media_type_copper) - ethtool_link_ksettings_add_link_mode(cmd, advertising, + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); else if (hw->phy.media_type == txgbe_media_type_fiber) - txgbe_set_advertising_1g_10gtypes(hw, cmd, + txgbe_set_advertising_1g_10gtypes(hw, cmd, hw->phy.autoneg_advertised); else - ethtool_link_ksettings_add_link_mode(cmd, advertising, + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseKX_Full); } if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_100_FULL) { - ethtool_link_ksettings_add_link_mode(cmd, advertising, + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); } if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10_FULL) { ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); - } + } } else { + if (supported_link & TXGBE_LINK_SPEED_40GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 40000baseSR4_Full); + } + if (supported_link & TXGBE_LINK_SPEED_25GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 25000baseSR_Full); + } if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) { if (hw->phy.media_type == txgbe_media_type_copper) { - ethtool_link_ksettings_add_link_mode(cmd, advertising, + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full); } else if (hw->phy.media_type == txgbe_media_type_fiber) { - txgbe_set_advertising_1g_10gtypes(hw, cmd, + txgbe_set_advertising_1g_10gtypes(hw, cmd, TXGBE_LINK_SPEED_10GB_FULL); } else { - ethtool_link_ksettings_add_link_mode(cmd, advertising, + switch (hw->phy.link_mode) { + case TXGBE_PHYSICAL_LAYER_10GBASE_KX4: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseKX4_Full); + break; + case TXGBE_PHYSICAL_LAYER_10GBASE_KR: + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseKR_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseKX4_Full); + break; + } } } if (supported_link & TXGBE_LINK_SPEED_1GB_FULL) { if (hw->phy.media_type == txgbe_media_type_copper) - ethtool_link_ksettings_add_link_mode(cmd, advertising, + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); else if (hw->phy.media_type == txgbe_media_type_fiber) - txgbe_set_advertising_1g_10gtypes(hw, cmd, + txgbe_set_advertising_1g_10gtypes(hw, cmd, TXGBE_LINK_SPEED_1GB_FULL); else - ethtool_link_ksettings_add_link_mode(cmd, advertising, + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseKX_Full); } if (supported_link & TXGBE_LINK_SPEED_100_FULL) { if (hw->phy.media_type == txgbe_media_type_copper) - ethtool_link_ksettings_add_link_mode(cmd, advertising, + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); } if (supported_link & TXGBE_LINK_SPEED_10_FULL) { if (hw->phy.media_type == txgbe_media_type_copper) - ethtool_link_ksettings_add_link_mode(cmd, advertising, + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); } } @@ -466,7 +538,6 @@ int txgbe_get_link_ksettings(struct net_device *netdev, } else cmd->base.autoneg = AUTONEG_DISABLE; - /* Determine the remaining settings based on the PHY type. */ switch (adapter->hw.phy.type) { case txgbe_phy_tn: @@ -484,6 +555,8 @@ int txgbe_get_link_ksettings(struct net_device *netdev, case txgbe_phy_nl: case txgbe_phy_sfp_passive_tyco: case txgbe_phy_sfp_passive_unknown: + case txgbe_phy_sfp_active_unknown: + case txgbe_phy_sfp_ftl_active: case txgbe_phy_sfp_ftl: case txgbe_phy_sfp_avago: case txgbe_phy_sfp_intel: @@ -493,6 +566,8 @@ int txgbe_get_link_ksettings(struct net_device *netdev, case txgbe_sfp_type_da_cu: case txgbe_sfp_type_da_cu_core0: case txgbe_sfp_type_da_cu_core1: + case txgbe_qsfp_type_40g_cu_core0: + case txgbe_qsfp_type_40g_cu_core1: ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_DA; @@ -505,6 +580,20 @@ int txgbe_get_link_ksettings(struct net_device *netdev, case txgbe_sfp_type_1g_sx_core1: case txgbe_sfp_type_1g_lx_core0: case txgbe_sfp_type_1g_lx_core1: + case txgbe_sfp_type_da_act_lmt_core0: + case txgbe_sfp_type_da_act_lmt_core1: + case txgbe_sfp_type_25g_sr_core0: + case txgbe_sfp_type_25g_sr_core1: + case txgbe_sfp_type_25g_lr_core0: + case txgbe_sfp_type_25g_lr_core1: + case txgbe_sfp_type_25g_aoc_core0: + case txgbe_sfp_type_25g_aoc_core1: + case txgbe_qsfp_type_40g_sr_core0: + case txgbe_qsfp_type_40g_sr_core1: + case txgbe_qsfp_type_40g_lr_core0: + case txgbe_qsfp_type_40g_lr_core1: + case txgbe_qsfp_type_40g_active_core0: + case txgbe_qsfp_type_40g_active_core1: ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_FIBRE; @@ -566,7 +655,7 @@ int txgbe_get_link_ksettings(struct net_device *netdev, /* Indicate pause support */ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); - + switch (hw->fc.requested_mode) { case txgbe_fc_full: ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); @@ -588,6 +677,12 @@ int txgbe_get_link_ksettings(struct net_device *netdev, if (link_up) { switch (link_speed) { + case TXGBE_LINK_SPEED_40GB_FULL: + cmd->base.speed = SPEED_40000; + break; + case TXGBE_LINK_SPEED_25GB_FULL: + cmd->base.speed = SPEED_25000; + break; case TXGBE_LINK_SPEED_10GB_FULL: cmd->base.speed = SPEED_10000; break; @@ -602,35 +697,41 @@ int txgbe_get_link_ksettings(struct net_device *netdev, break; default: break; - } + } cmd->base.duplex = DUPLEX_FULL; } else { cmd->base.speed = -1; cmd->base.duplex = -1; } - if(!(ethtool_link_ksettings_test_link_mode(cmd, advertising, - 10000baseT_Full) || - ethtool_link_ksettings_test_link_mode(cmd, advertising, - 10000baseKR_Full)|| - ethtool_link_ksettings_test_link_mode(cmd, advertising, - 10000baseKX4_Full)|| - ethtool_link_ksettings_test_link_mode(cmd, advertising, - 10000baseLR_Full))&& - (ethtool_link_ksettings_test_link_mode(cmd, advertising, - 1000baseT_Full) || - ethtool_link_ksettings_test_link_mode(cmd, advertising, - 1000baseKX_Full)|| - ethtool_link_ksettings_test_link_mode(cmd, advertising, - 1000baseX_Full)) - ){ - if(!adapter->an37) - ethtool_link_ksettings_del_link_mode(cmd, advertising, Autoneg); - else ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); - cmd->base.autoneg = adapter->an37; - } + + if (!netif_carrier_ok(netdev)) { + cmd->base.speed = -1; + cmd->base.duplex = -1; + } + +#ifdef ETHTOOL_GFECPARAM + if (hw->mac.type == txgbe_mac_aml) { + ethtool_link_ksettings_add_link_mode(cmd, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(cmd, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode(cmd, supported, FEC_BASER); + if (adapter->fec_link_mode & TXGBE_PHY_FEC_OFF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, FEC_NONE); + if (adapter->fec_link_mode & TXGBE_PHY_FEC_RS) + ethtool_link_ksettings_add_link_mode(cmd, advertising, FEC_RS); + if (adapter->fec_link_mode & TXGBE_PHY_FEC_BASER) + ethtool_link_ksettings_add_link_mode(cmd, advertising, FEC_BASER); + } +#endif + + if(!adapter->autoneg) + ethtool_link_ksettings_del_link_mode(cmd, advertising, Autoneg); + else + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + cmd->base.autoneg = adapter->autoneg; + return 0; } -#else /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ +#else /* !ETHTOOL_GLINKSETTINGS */ static __u32 txgbe_backplane_type(struct txgbe_hw *hw) { __u32 mode = 0x00; @@ -652,7 +753,7 @@ static __u32 txgbe_backplane_type(struct txgbe_hw *hw) } return mode; } - + int txgbe_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { @@ -668,7 +769,7 @@ int txgbe_get_settings(struct net_device *netdev, if((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4) autoneg = adapter->backplane_an ? 1:0; else if((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) - autoneg = adapter->an37?1:0; + autoneg = adapter->autoneg?1:0; /* set the supported link speeds */ if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) @@ -703,9 +804,14 @@ int txgbe_get_settings(struct net_device *netdev, } else { /* default modes in case phy.autoneg_advertised isn't set */ if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) - ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (supported_link & TXGBE_LINK_SPEED_1GB_FULL) - ecmd->advertising |= ADVERTISED_1000baseT_Full; + ecmd->advertising |= (txgbe_isbackplane(hw->phy.media_type)) ? + txgbe_backplane_type(hw) : SUPPORTED_10000baseT_Full; + if (supported_link & TXGBE_LINK_SPEED_1GB_FULL) { + if (ecmd->supported & SUPPORTED_1000baseKX_Full) + ecmd->advertising |= ADVERTISED_1000baseKX_Full; + else + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } if (supported_link & TXGBE_LINK_SPEED_100_FULL) ecmd->advertising |= ADVERTISED_100baseT_Full; if (hw->phy.multispeed_fiber && !autoneg) { @@ -742,6 +848,7 @@ int txgbe_get_settings(struct net_device *netdev, case txgbe_phy_nl: case txgbe_phy_sfp_passive_tyco: case txgbe_phy_sfp_passive_unknown: + case txgbe_phy_sfp_ftl_active: case txgbe_phy_sfp_ftl: case txgbe_phy_sfp_avago: case txgbe_phy_sfp_intel: @@ -751,6 +858,8 @@ int txgbe_get_settings(struct net_device *netdev, case txgbe_sfp_type_da_cu: case txgbe_sfp_type_da_cu_core0: case txgbe_sfp_type_da_cu_core1: + case txgbe_qsfp_type_40g_cu_core0: + case txgbe_qsfp_type_40g_cu_core1: ecmd->supported |= SUPPORTED_FIBRE; ecmd->advertising |= ADVERTISED_FIBRE; ecmd->port = PORT_DA; @@ -763,6 +872,20 @@ int txgbe_get_settings(struct net_device *netdev, case txgbe_sfp_type_1g_sx_core1: case txgbe_sfp_type_1g_lx_core0: case txgbe_sfp_type_1g_lx_core1: + case txgbe_sfp_type_da_act_lmt_core0: + case txgbe_sfp_type_da_act_lmt_core1: + case txgbe_sfp_type_25g_sr_core0: + case txgbe_sfp_type_25g_sr_core1: + case txgbe_sfp_type_25g_lr_core0: + case txgbe_sfp_type_25g_lr_core1: + case txgbe_sfp_type_25g_aoc_core0: + case txgbe_sfp_type_25g_aoc_core1: + case txgbe_qsfp_type_40g_sr_core0: + case txgbe_qsfp_type_40g_sr_core1: + case txgbe_qsfp_type_40g_lr_core0: + case txgbe_qsfp_type_40g_lr_core1: + case txgbe_qsfp_type_40g_active_core0: + case txgbe_qsfp_type_40g_active_core1: ecmd->supported |= SUPPORTED_FIBRE; ecmd->advertising |= ADVERTISED_FIBRE; ecmd->port = PORT_FIBRE; @@ -842,6 +965,9 @@ int txgbe_get_settings(struct net_device *netdev, if (link_up) { switch (link_speed) { + case TXGBE_LINK_SPEED_25GB_FULL: + ecmd->speed = SPEED_25000; + break; case TXGBE_LINK_SPEED_10GB_FULL: ecmd->speed = SPEED_10000; break; @@ -862,49 +988,61 @@ int txgbe_get_settings(struct net_device *netdev, ecmd->speed = -1; ecmd->duplex = -1; } - if((ecmd->advertising & ETHTOOL_LINK_MODE_SPEED_MASK) == ADVERTISED_1000baseT_Full || - (ecmd->advertising & ETHTOOL_LINK_MODE_SPEED_MASK) == ADVERTISED_1000baseKX_Full){ - if(!adapter->an37) - ecmd->advertising &= ~ADVERTISED_Autoneg; - } - ecmd->autoneg = adapter->an37?AUTONEG_ENABLE:AUTONEG_DISABLE; + if(!adapter->autoneg) + ecmd->advertising &= ~ADVERTISED_Autoneg; + ecmd->autoneg = adapter->autoneg?AUTONEG_ENABLE:AUTONEG_DISABLE; return 0; } -#endif /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ +#endif /* !ETHTOOL_GLINKSETTINGS */ + +#ifdef ETHTOOL_GLINKSETTINGS -#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE static int txgbe_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { struct txgbe_adapter *adapter = netdev_priv(netdev); struct txgbe_hw *hw = &adapter->hw; - u32 advertised, old; + u32 advertised, old, link_support; + bool autoneg; s32 err = 0; struct ethtool_link_ksettings temp_ks; u32 curr_autoneg = 2; if((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4) - adapter->backplane_an = cmd->base.autoneg?1:0; - + adapter->backplane_an = cmd->base.autoneg ? 1 : 0; + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) + adapter->autoneg = cmd->base.autoneg ? 1 : 0; if ((hw->phy.media_type == txgbe_media_type_copper) || (hw->phy.multispeed_fiber)) { memcpy(&temp_ks, cmd, sizeof(struct ethtool_link_ksettings)); /* To be compatible with test cases */ if (hw->phy.media_type == txgbe_media_type_fiber) { + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 25000baseSR_Full)) { + ethtool_link_ksettings_add_link_mode(&temp_ks, supported, + 25000baseSR_Full); + } + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseT_Full)) { ethtool_link_ksettings_add_link_mode(&temp_ks, supported, 10000baseT_Full); +#ifndef HAVE_NO_ETHTOOL_10000SR ethtool_link_ksettings_del_link_mode(&temp_ks, supported, 10000baseSR_Full); +#endif +#ifndef HAVE_NO_ETHTOOL_10000LR ethtool_link_ksettings_del_link_mode(&temp_ks, supported, 10000baseLR_Full); +#endif } if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 1000baseT_Full)) { ethtool_link_ksettings_add_link_mode(&temp_ks, supported, 1000baseT_Full); +#ifndef HAVE_NO_ETHTOOL_1000X ethtool_link_ksettings_del_link_mode(&temp_ks, supported, 1000baseX_Full); +#endif } } @@ -916,52 +1054,61 @@ static int txgbe_set_link_ksettings(struct net_device *netdev, __ETHTOOL_LINK_MODE_MASK_NBITS)) return -EINVAL; - /* only allow one speed at a time if no autoneg */ - if (!cmd->base.autoneg && hw->phy.multispeed_fiber) { - if ((ethtool_link_ksettings_test_link_mode(cmd, advertising, - 10000baseSR_Full) && - ethtool_link_ksettings_test_link_mode(cmd, advertising, - 1000baseX_Full)) | - (ethtool_link_ksettings_test_link_mode(cmd, advertising, - 10000baseLR_Full) && - ethtool_link_ksettings_test_link_mode(cmd, advertising, - 1000baseX_Full))) - return -EINVAL; - } old = hw->phy.autoneg_advertised; advertised = 0; + if (!cmd->base.autoneg) { + if (cmd->base.speed == SPEED_25000) + advertised |= TXGBE_LINK_SPEED_25GB_FULL; + else if (cmd->base.speed == SPEED_10000) + advertised |= TXGBE_LINK_SPEED_10GB_FULL; + else if (cmd->base.speed == SPEED_1000) + advertised |= TXGBE_LINK_SPEED_1GB_FULL; + else + advertised |= old; + }else{ + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 25000baseSR_Full)) + advertised |= TXGBE_LINK_SPEED_25GB_FULL; - if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseSR_Full) || - ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseLR_Full) || - ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseT_Full)) - advertised |= TXGBE_LINK_SPEED_10GB_FULL; - - if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 1000baseX_Full) || - ethtool_link_ksettings_test_link_mode(cmd, advertising, 1000baseT_Full)) - advertised |= TXGBE_LINK_SPEED_1GB_FULL; + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseSR_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseLR_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseT_Full)) + advertised |= TXGBE_LINK_SPEED_10GB_FULL; - if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 100baseT_Full)) - advertised |= TXGBE_LINK_SPEED_100_FULL; + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 1000baseX_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, 1000baseT_Full)) + advertised |= TXGBE_LINK_SPEED_1GB_FULL; - if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10baseT_Full)) - advertised |= TXGBE_LINK_SPEED_10_FULL; + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 100baseT_Full)) + advertised |= TXGBE_LINK_SPEED_100_FULL; - if (((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) || - ((advertised & TXGBE_LINK_SPEED_1GB_FULL) && hw->phy.multispeed_fiber)) - adapter->an37 = cmd->base.autoneg ? 1 : 0; + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10baseT_Full)) + advertised |= TXGBE_LINK_SPEED_10_FULL; + } if (advertised == TXGBE_LINK_SPEED_1GB_FULL && hw->phy.media_type != txgbe_media_type_copper) { curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); curr_autoneg = !!(curr_autoneg & (0x1 << 12)); - if (old == advertised && (curr_autoneg == adapter->an37)) - return -EINVAL; + if (old == advertised && (curr_autoneg == !!(cmd->base.autoneg))) + return 0; } + + err = TCALL(hw, mac.ops.get_link_capabilities, + &link_support, &autoneg); + if (err) + e_info(probe, "get link capabiliyies failed with code %d\n", err); + if (!(link_support & advertised)) { + e_info(probe, "unsupported advertised: %x", advertised); + return -EINVAL; + } + /* this sets the link speed and restarts auto-neg */ while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) usleep_range(1000, 2000); - + adapter->autoneg = cmd->base.autoneg ? 1 : 0; hw->mac.autotry_restart = true; + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + txgbe_service_event_schedule(adapter); err = TCALL(hw, mac.ops.setup_link, advertised, true); if (err) { e_info(probe, "setup link failed with code %d\n", err); @@ -969,47 +1116,51 @@ static int txgbe_set_link_ksettings(struct net_device *netdev, } if ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) TCALL(hw, mac.ops.flap_tx_laser); + + /* notify fw autoneg status */ + txgbe_hic_write_autoneg_status(hw, cmd->base.autoneg); + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); } else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4 || (hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) { if (!cmd->base.autoneg) { if (ethtool_link_ksettings_test_link_mode(cmd, advertising, - 10000baseKR_Full) | + 10000baseKR_Full) & ethtool_link_ksettings_test_link_mode(cmd, advertising, - 1000baseKX_Full) | + 1000baseKX_Full) & ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseKX4_Full)) return -EINVAL; - } else { - err = txgbe_set_link_to_kr(hw, 1); - return -EINVAL; } + advertised = 0; if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseKR_Full)) { err = txgbe_set_link_to_kr(hw, 1); advertised |= TXGBE_LINK_SPEED_10GB_FULL; - return -EINVAL; } else if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseKX4_Full)) { err = txgbe_set_link_to_kx4(hw, 1); advertised |= TXGBE_LINK_SPEED_10GB_FULL; - return -EINVAL; } else if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 1000baseKX_Full)) { advertised |= TXGBE_LINK_SPEED_1GB_FULL; err = txgbe_set_link_to_kx(hw, TXGBE_LINK_SPEED_1GB_FULL, 0); - return -EINVAL; + txgbe_set_sgmii_an37_ability(hw); } if (err) - return -EINVAL; + err = -EACCES; + return err; } else { /* in this case we currently only support 10Gb/FULL */ u32 speed = cmd->base.speed; - if ((ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseT_Full) || + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + return -EINVAL; + } else if ((ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseT_Full) || ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseKR_Full) || ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseKX4_Full) || - ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseLR_Full))) { + ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseLR_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseSR_Full))) { if ((cmd->base.autoneg == AUTONEG_ENABLE) || (!ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseT_Full)) || @@ -1027,8 +1178,12 @@ static int txgbe_set_link_ksettings(struct net_device *netdev, 1000baseT_Full)) { ethtool_link_ksettings_add_link_mode(&temp_ks, supported, 1000baseT_Full); +#ifndef HAVE_NO_ETHTOOL_1000X ethtool_link_ksettings_del_link_mode(&temp_ks, supported, 1000baseX_Full); +#endif + ethtool_link_ksettings_del_link_mode(&temp_ks, supported, + 1000baseKX_Full); } if (!bitmap_subset(cmd->link_modes.advertising, @@ -1044,18 +1199,24 @@ static int txgbe_set_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_test_link_mode(cmd, advertising, 1000baseT_Full)) advertised |= TXGBE_LINK_SPEED_1GB_FULL; - adapter->an37 = cmd->base.autoneg?1:0; +#if 0 + if (hw->mac.type == txgbe_mac_aml) { + curr_autoneg = txgbe_rd32_epcs(hw, SR_AN_CTRL); + curr_autoneg = !!(curr_autoneg & (0x1 << 12)); + } +#endif if (advertised == TXGBE_LINK_SPEED_1GB_FULL) { curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); curr_autoneg = !!(curr_autoneg & (0x1 << 12)); } - if (old == advertised && (curr_autoneg == adapter->an37)) + if (old == advertised && (curr_autoneg == !!cmd->base.autoneg)) return -EINVAL; /* this sets the link speed and restarts auto-neg */ while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) usleep_range(1000, 2000); + adapter->autoneg = cmd->base.autoneg?1:0; hw->mac.autotry_restart = true; err = TCALL(hw, mac.ops.setup_link, advertised, true); if (err) { @@ -1064,15 +1225,20 @@ static int txgbe_set_link_ksettings(struct net_device *netdev, } if ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) TCALL(hw, mac.ops.flap_tx_laser); + + /* notify fw autoneg status */ + txgbe_hic_write_autoneg_status(hw, cmd->base.autoneg); + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); } + adapter->autoneg = cmd->base.autoneg?1:0; } if (err) return -EINVAL; return err; } -#else /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ +#else /* !ETHTOOL_GLINKSETTINGS */ static int txgbe_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { @@ -1083,7 +1249,9 @@ static int txgbe_set_settings(struct net_device *netdev, s32 err = 0; if((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4) - adapter->backplane_an = ecmd->autoneg?1:0; + adapter->backplane_an = ecmd->autoneg ? 1 : 0; + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) + adapter->autoneg = ecmd->autoneg? 1 : 0; if ((hw->phy.media_type == txgbe_media_type_copper) || (hw->phy.multispeed_fiber)) { @@ -1103,6 +1271,7 @@ static int txgbe_set_settings(struct net_device *netdev, old = hw->phy.autoneg_advertised; advertised = 0; + if (ecmd->advertising & ADVERTISED_10000baseT_Full) advertised |= TXGBE_LINK_SPEED_10GB_FULL; @@ -1115,21 +1284,23 @@ static int txgbe_set_settings(struct net_device *netdev, if (ecmd->advertising & ADVERTISED_10baseT_Full) advertised |= TXGBE_LINK_SPEED_10_FULL; - if (((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) || - ((advertised & TXGBE_LINK_SPEED_1GB_FULL) && hw->phy.multispeed_fiber)) - adapter->an37 = ecmd->autoneg ? 1 : 0; - if (advertised == TXGBE_LINK_SPEED_1GB_FULL && hw->phy.media_type != txgbe_media_type_copper) { curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); curr_autoneg = !!(curr_autoneg & (0x1 << 12)); - if (old == advertised && (curr_autoneg == adapter->an37)) + if (old == advertised && (curr_autoneg == !!ecmd->autoneg)) return err; } + + if (advertised == TXGBE_LINK_SPEED_10GB_FULL && + ecmd->autoneg == AUTONEG_DISABLE) + return -EINVAL; + /* this sets the link speed and restarts auto-neg */ while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) usleep_range(1000, 2000); + adapter->autoneg = ecmd->autoneg ? 1 : 0; hw->mac.autotry_restart = true; err = TCALL(hw, mac.ops.setup_link, advertised, true); if (err) { @@ -1139,6 +1310,10 @@ static int txgbe_set_settings(struct net_device *netdev, if ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) TCALL(hw, mac.ops.flap_tx_laser); + + /* notify fw autoneg status */ + txgbe_hic_write_autoneg_status(hw, ecmd->autoneg); + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); } else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4 || (hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) { @@ -1147,33 +1322,27 @@ static int txgbe_set_settings(struct net_device *netdev, (ADVERTISED_10000baseKR_Full | ADVERTISED_1000baseKX_Full | ADVERTISED_10000baseKX4_Full)) return -EINVAL; - } else { - err = txgbe_set_link_to_kr(hw, 1); - return err; } + advertised = 0; - if (ecmd->advertising & ADVERTISED_10000baseKR_Full){ + if (ecmd->advertising & ADVERTISED_10000baseKR_Full) { err = txgbe_set_link_to_kr(hw, 1); advertised |= TXGBE_LINK_SPEED_10GB_FULL; - return err; - } else if (ecmd->advertising & ADVERTISED_10000baseKX4_Full){ + } else if (ecmd->advertising & ADVERTISED_10000baseKX4_Full) { err = txgbe_set_link_to_kx4(hw, 1); advertised |= TXGBE_LINK_SPEED_10GB_FULL; - return err; - } else if (ecmd->advertising & ADVERTISED_1000baseKX_Full){ + } else if (ecmd->advertising & ADVERTISED_1000baseKX_Full) { advertised |= TXGBE_LINK_SPEED_1GB_FULL; err = txgbe_set_link_to_kx(hw, TXGBE_LINK_SPEED_1GB_FULL, 0); - return err; + txgbe_set_sgmii_an37_ability(hw); } + if (err) + return -EACCES; return err; } else { /* in this case we currently only support 10Gb/FULL and 1Gb/FULL*/ - u32 speed = ethtool_cmd_speed(ecmd); - if(ecmd->advertising & ADVERTISED_10000baseT_Full){ - if ((ecmd->autoneg == AUTONEG_ENABLE) || - (ecmd->advertising != ADVERTISED_10000baseT_Full) || - (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) - return -EINVAL; + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + return -EINVAL; } else if (ecmd->advertising & ADVERTISED_1000baseT_Full) { if (ecmd->advertising & ~ecmd->supported) return -EINVAL; @@ -1184,17 +1353,17 @@ static int txgbe_set_settings(struct net_device *netdev, if (ecmd->advertising & ADVERTISED_1000baseT_Full) advertised |= TXGBE_LINK_SPEED_1GB_FULL; - adapter->an37 = ecmd->autoneg ? 1 : 0; if (advertised == TXGBE_LINK_SPEED_1GB_FULL) { curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); curr_autoneg = !!(curr_autoneg & (0x1 << 12)); } - if (old == advertised && (curr_autoneg == adapter->an37)) + if (old == advertised && (curr_autoneg == !!ecmd->autoneg)) return err; /* this sets the link speed and restarts auto-neg */ while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) usleep_range(1000, 2000); + adapter->autoneg = ecmd->autoneg ? 1 : 0; hw->mac.autotry_restart = true; err = TCALL(hw, mac.ops.setup_link, advertised, true); if (err) { @@ -1203,8 +1372,13 @@ static int txgbe_set_settings(struct net_device *netdev, } if ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) TCALL(hw, mac.ops.flap_tx_laser); + + /* notify fw autoneg status */ + txgbe_hic_write_autoneg_status(hw, ecmd->autoneg); + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); } + adapter->autoneg = ecmd->autoneg ? 1 : 0; } if (err) @@ -1212,8 +1386,109 @@ static int txgbe_set_settings(struct net_device *netdev, return err; } -#endif /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ +#endif /* !ETHTOOL_GLINKSETTINGS */ +#ifdef ETHTOOL_GFECPARAM +static int txgbe_get_fec_param(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + int err = 0; + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 supported_link = 0; + bool autoneg = false; + u32 speed = 0; + bool link_up; + + TCALL(hw, mac.ops.get_link_capabilities, &supported_link, &autoneg); + + if (hw->mac.type != txgbe_mac_aml) { + err = -EAGAIN; + goto done; + } + TCALL(hw, mac.ops.check_link, &speed, &link_up, false); + fecparam->fec = 0; + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + fecparam->fec |= ETHTOOL_FEC_OFF; + fecparam->active_fec = ETHTOOL_FEC_OFF; + goto done; + } + if (adapter->fec_link_mode == TXGBE_PHY_FEC_AUTO) + fecparam->fec |= ETHTOOL_FEC_AUTO; + else if (adapter->fec_link_mode & TXGBE_PHY_FEC_BASER) + fecparam->fec |= ETHTOOL_FEC_BASER; + else if (adapter->fec_link_mode & TXGBE_PHY_FEC_RS) + fecparam->fec |= ETHTOOL_FEC_RS; + else + fecparam->fec |= ETHTOOL_FEC_OFF; + + if (!link_up) { + fecparam->active_fec = ETHTOOL_FEC_OFF; + goto done; + } + switch (adapter->cur_fec_link) { + case TXGBE_PHY_FEC_BASER: + fecparam->active_fec = ETHTOOL_FEC_BASER; + break; + case TXGBE_PHY_FEC_RS: + fecparam->active_fec = ETHTOOL_FEC_RS; + break; + case TXGBE_PHY_FEC_OFF: + fecparam->active_fec = ETHTOOL_FEC_OFF; + break; + default: + fecparam->active_fec = ETHTOOL_FEC_OFF; + break; + } +done: + return err; +} + +static int txgbe_set_fec_param(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + int err = 0; + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u8 cur_fec_mode = adapter->fec_link_mode; + bool autoneg = false; + u32 supported_link = 0; + + TCALL(hw, mac.ops.get_link_capabilities, &supported_link, &autoneg); + + if (hw->mac.type != txgbe_mac_aml) { + err = -EAGAIN; + goto done; + } + switch (fecparam->fec) { + case ETHTOOL_FEC_AUTO: + adapter->fec_link_mode = TXGBE_PHY_FEC_AUTO; + break; + case ETHTOOL_FEC_BASER: + adapter->fec_link_mode = TXGBE_PHY_FEC_BASER; + break; + case ETHTOOL_FEC_OFF: + case ETHTOOL_FEC_NONE: + adapter->fec_link_mode = TXGBE_PHY_FEC_OFF; + break; + case ETHTOOL_FEC_RS: + adapter->fec_link_mode = TXGBE_PHY_FEC_RS; + break; + default: + e_warn(drv, "Unsupported FEC mode: %d", + fecparam->fec); + err = -EINVAL; + goto done; + } + if (cur_fec_mode != adapter->fec_link_mode) { + /* reset link */ + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + txgbe_service_event_schedule(adapter); + } +done: + return err; +} +#endif /* ETHTOOL_GFECPARAM */ static void txgbe_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { @@ -1810,10 +2085,10 @@ static int txgbe_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) #endif { + struct txgbe_ring *tx_ring = NULL, *rx_ring = NULL; struct txgbe_adapter *adapter = netdev_priv(netdev); - struct txgbe_ring *temp_ring; - int i, err = 0; u32 new_rx_count, new_tx_count; + int i, j, err = 0; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; @@ -1854,19 +2129,11 @@ static int txgbe_set_ringparam(struct net_device *netdev, adapter->tx_ring_count = new_tx_count; adapter->xdp_ring_count = new_tx_count; adapter->rx_ring_count = new_rx_count; - goto clear_reset; - } - - /* allocate temporary buffer to store rings in */ - i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); - temp_ring = vmalloc(i * sizeof(struct txgbe_ring)); - - if (!temp_ring) { - err = -ENOMEM; - goto clear_reset; + goto done; } - txgbe_down(adapter); + i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues, + adapter->num_rx_queues); /* * Setup new Tx resources and free the old Tx resources in that order. @@ -1875,66 +2142,181 @@ static int txgbe_set_ringparam(struct net_device *netdev, * have resources even in the case of an allocation failure. */ if (new_tx_count != adapter->tx_ring_count) { + netdev_info(netdev, + "Changing Tx descriptor count from %d to %d.\n", + adapter->tx_ring[0]->count, new_tx_count); + tx_ring = kcalloc(i, sizeof(struct txgbe_ring), GFP_KERNEL); + if (!tx_ring) { + err = -ENOMEM; + goto done; + } + for (i = 0; i < adapter->num_tx_queues; i++) { - memcpy(&temp_ring[i], adapter->tx_ring[i], + memcpy(&tx_ring[i], adapter->tx_ring[i], sizeof(struct txgbe_ring)); - temp_ring[i].count = new_tx_count; - err = txgbe_setup_tx_resources(&temp_ring[i]); + tx_ring[i].count = new_tx_count; + /* the desc and bi pointers will be reallocated + * in the setup call + */ + tx_ring[i].desc = NULL; + tx_ring[i].tx_buffer_info = NULL; + err = txgbe_setup_tx_resources(&tx_ring[i]); if (err) { while (i) { i--; - txgbe_free_tx_resources(&temp_ring[i]); + txgbe_free_tx_resources(&tx_ring[i]); } - goto err_setup; + + kfree(tx_ring); + tx_ring = NULL; + err = -ENOMEM; + + goto done; } } - for (i = 0; i < adapter->num_tx_queues; i++) { - txgbe_free_tx_resources(adapter->tx_ring[i]); - - memcpy(adapter->tx_ring[i], &temp_ring[i], + for (j = 0; j < adapter->num_xdp_queues; j++, i++) { + memcpy(&tx_ring[i], adapter->xdp_ring[j], sizeof(struct txgbe_ring)); - } - adapter->tx_ring_count = new_tx_count; + tx_ring[i].count = new_tx_count; + /* the desc and bi pointers will be reallocated + * in the setup call + */ + tx_ring[i].desc = NULL; + tx_ring[i].tx_buffer_info = NULL; + err = txgbe_setup_tx_resources(&tx_ring[i]); + if (err) { + while (i) { + i--; + txgbe_free_tx_resources(&tx_ring[i]); + } + + kfree(tx_ring); + tx_ring = NULL; + err = -ENOMEM; + + goto done; + } + } } /* Repeat the process for the Rx rings if needed */ if (new_rx_count != adapter->rx_ring_count) { + netdev_info(netdev, + "Changing Rx descriptor count from %d to %d\n", + adapter->rx_ring[0]->count, new_rx_count); + rx_ring = kcalloc(i, sizeof(struct txgbe_ring), GFP_KERNEL); + if (!rx_ring) { + err = -ENOMEM; + goto free_tx; + } + for (i = 0; i < adapter->num_rx_queues; i++) { - memcpy(&temp_ring[i], adapter->rx_ring[i], + u16 unused; + + memcpy(&rx_ring[i], adapter->rx_ring[i], sizeof(struct txgbe_ring)); #ifdef HAVE_XDP_BUFF_RXQ - xdp_rxq_info_unreg(&temp_ring[i].xdp_rxq); + xdp_rxq_info_unreg(&rx_ring[i].xdp_rxq); #endif - temp_ring[i].count = new_rx_count; - err = txgbe_setup_rx_resources(&temp_ring[i]); + rx_ring[i].count = new_rx_count; + /* the desc and bi pointers will be reallocated + * in the setup call + */ + rx_ring[i].desc = NULL; + rx_ring[i].rx_buffer_info = NULL; + err = txgbe_setup_rx_resources(&rx_ring[i]); + if (err) + goto rx_unwind; + + unused = txgbe_desc_unused(&rx_ring[i]); + err = txgbe_alloc_rx_buffers(&rx_ring[i], unused); +rx_unwind: if (err) { - while (i) { - i--; - txgbe_free_rx_resources(&temp_ring[i]); - } - goto err_setup; + err = -ENOMEM; + + do { + txgbe_free_rx_resources(&rx_ring[i]); + } while (i--); + kfree(rx_ring); + rx_ring = NULL; + + goto free_tx; } } + } + + /* Bring interface down, copy in the new ring info, + * then restore the interface + */ + txgbe_down(adapter); + + if (tx_ring) { + for (i = 0; i < adapter->num_tx_queues; i++) { + txgbe_free_tx_resources(adapter->tx_ring[i]); + memcpy(adapter->tx_ring[i], &tx_ring[i], + sizeof(struct txgbe_ring)); + } + + for (j = 0; j < adapter->num_xdp_queues; j++, i++) { + txgbe_free_tx_resources(adapter->xdp_ring[j]); + memcpy(adapter->xdp_ring[j], &tx_ring[i], + sizeof(struct txgbe_ring)); + } + kfree(tx_ring); + tx_ring = NULL; + } + if (rx_ring) { for (i = 0; i < adapter->num_rx_queues; i++) { txgbe_free_rx_resources(adapter->rx_ring[i]); + /* this is to fake out the allocation routine + * into thinking it has to realloc everything + * but the recycling logic will let us re-use + * the buffers allocated above + */ + rx_ring[i].next_to_use = 0; + rx_ring[i].next_to_clean = 0; + rx_ring[i].next_to_alloc = 0; + /* do a struct copy */ + memcpy(adapter->rx_ring[i], &rx_ring[i], + sizeof(struct txgbe_ring)); + } + kfree(rx_ring); + rx_ring = NULL; + } - memcpy(adapter->rx_ring[i], &temp_ring[i], - sizeof(struct txgbe_ring)); + adapter->tx_ring_count = new_tx_count; + adapter->xdp_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + + txgbe_up(adapter); + +free_tx: +/* error cleanup if the Rx allocations failed after getting Tx */ + if (tx_ring) { + for (i = 0; i < adapter->num_tx_queues; i++) { + txgbe_free_tx_resources(adapter->tx_ring[i]); + memcpy(adapter->tx_ring[i], &tx_ring[i], + sizeof(struct txgbe_ring)); } - adapter->rx_ring_count = new_rx_count; + for (j = 0; j < adapter->num_xdp_queues; j++, i++) { + txgbe_free_tx_resources(adapter->xdp_ring[j]); + memcpy(adapter->xdp_ring[j], &tx_ring[i], + sizeof(struct txgbe_ring)); + } + + kfree(tx_ring); + tx_ring = NULL; } -err_setup: - txgbe_up(adapter); - vfree(temp_ring); -clear_reset: +done: clear_bit(__TXGBE_RESETTING, &adapter->state); + return err; } @@ -1991,10 +2373,7 @@ static int txgbe_get_sset_count(struct net_device *netdev, int sset) static u32 txgbe_get_priv_flags(struct net_device *dev) { struct txgbe_adapter *adapter = netdev_priv(dev); - struct txgbe_hw *hw = &adapter->hw; - u32 i , ret_flags = 0; - if(txgbe_is_lldp(hw)) - e_err(drv, "Can not get lldp flags from flash\n"); + u32 i, ret_flags = 0; for (i = 0; i < TXGBE_PRIV_FLAGS_STR_LEN; i++) { const struct txgbe_priv_flags *priv_flags; @@ -2035,7 +2414,7 @@ static int txgbe_set_priv_flags(struct net_device *dev, u32 flags) /* If this is a read-only flag, it can't be changed */ if (priv_flags->read_only && - ((orig_flags ^ new_flags) & ~BIT(i))) + ((orig_flags ^ new_flags) & BIT(i))) return -EOPNOTSUPP; } @@ -2046,12 +2425,26 @@ static int txgbe_set_priv_flags(struct net_device *dev, u32 flags) if (changed_flags & TXGBE_ETH_PRIV_FLAG_LLDP) reset_needed = 1; - if (changed_flags & TXGBE_ETH_PRIV_FLAG_LLDP) + if (changed_flags & TXGBE_ETH_PRIV_FLAG_LLDP) { status = txgbe_hic_write_lldp(&adapter->hw, (u32)(new_flags & TXGBE_ETH_PRIV_FLAG_LLDP)); + if (!status) + adapter->eth_priv_flags = new_flags; + } - if(!status) +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + if (changed_flags & TXGBE_ETH_PRIV_FLAG_LEGACY_RX) { adapter->eth_priv_flags = new_flags; + if (adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LEGACY_RX) + adapter->flags2 |= TXGBE_FLAG2_RX_LEGACY; + else + adapter->flags2 &= ~TXGBE_FLAG2_RX_LEGACY; + + /* reset interface to repopulate queues */ + if (netif_running(dev)) + txgbe_reinit_locked(adapter); + } +#endif return status; } @@ -2576,36 +2969,137 @@ static void txgbe_free_desc_rings(struct txgbe_adapter *adapter) txgbe_free_rx_resources(&adapter->test_rx_ring); } -static int txgbe_setup_desc_rings(struct txgbe_adapter *adapter) +static void txgbe_loopback_configure_tx_ring(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) { - struct txgbe_ring *tx_ring = &adapter->test_tx_ring; - struct txgbe_ring *rx_ring = &adapter->test_rx_ring; struct txgbe_hw *hw = &adapter->hw; - int ret_val; - int err; + u64 tdba = ring->dma; + int wait_loop = 10; + u32 txdctl = TXGBE_PX_TR_CFG_ENABLE; + u8 reg_idx = ring->reg_idx; +#ifdef HAVE_AF_XDP_ZC_SUPPORT + ring->xsk_pool = NULL; + if (ring_is_xdp(ring)) + ring->xsk_pool = txgbe_xsk_umem(adapter, ring); +#endif + /* disable queue to avoid issues while updating state */ + wr32(hw, TXGBE_PX_TR_CFG(reg_idx), TXGBE_PX_TR_CFG_SWFLSH); + TXGBE_WRITE_FLUSH(hw); - TCALL(hw, mac.ops.setup_rxpba, 0, 0, PBA_STRATEGY_EQUAL); + wr32(hw, TXGBE_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32)); + wr32(hw, TXGBE_PX_TR_BAH(reg_idx), tdba >> 32); - /* Setup Tx descriptor ring and Tx buffers */ - tx_ring->count = TXGBE_DEFAULT_TXD; - tx_ring->queue_index = 0; - tx_ring->dev = pci_dev_to_dev(adapter->pdev); - tx_ring->netdev = adapter->netdev; - tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; + /* reset head and tail pointers */ + wr32(hw, TXGBE_PX_TR_RP(reg_idx), 0); + wr32(hw, TXGBE_PX_TR_WP(reg_idx), 0); + ring->tail = adapter->io_addr + TXGBE_PX_TR_WP(reg_idx); - err = txgbe_setup_tx_resources(tx_ring); - if (err) - return 1; + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; - wr32m(&adapter->hw, TXGBE_TDM_CTL, - TXGBE_TDM_CTL_TE, TXGBE_TDM_CTL_TE); + txdctl |= TXGBE_RING_SIZE(ring) << TXGBE_PX_TR_CFG_TR_SIZE_SHIFT; - txgbe_configure_tx_ring(adapter, tx_ring); + /* + * set WTHRESH to encourage burst writeback, it should not be set + * higher than 1 when: + * - ITR is 0 as it could cause false TX hangs + * - ITR is set to > 100k int/sec and BQL is enabled + * + * In order to avoid issues WTHRESH + PTHRESH should always be equal + * to or less than the number of on chip descriptors, which is + * currently 40. + */ + + txdctl |= 0x20 << TXGBE_PX_TR_CFG_WTHRESH_SHIFT; + + /* reinitialize flowdirector state */ + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) { + ring->atr_sample_rate = adapter->atr_sample_rate; + ring->atr_count = 0; + set_bit(__TXGBE_TX_FDIR_INIT_DONE, &ring->state); + } else { + ring->atr_sample_rate = 0; + } + + /* initialize XPS */ + if (!test_and_set_bit(__TXGBE_TX_XPS_INIT_DONE, &ring->state)) { + struct txgbe_q_vector *q_vector = ring->q_vector; + + if (q_vector) + netif_set_xps_queue(adapter->netdev, + &q_vector->affinity_mask, + ring->queue_index); + } + + clear_bit(__TXGBE_HANG_CHECK_ARMED, &ring->state); + + /* enable queue */ + wr32(hw, TXGBE_PX_TR_CFG(reg_idx), txdctl); + + /* poll to verify queue is enabled */ + do { + msleep(20); + txdctl = rd32(hw, TXGBE_PX_TR_CFG(reg_idx)); + } while (--wait_loop && !(txdctl & TXGBE_PX_TR_CFG_ENABLE)); + if (!wait_loop) + e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +} + + +static int txgbe_setup_desc_rings(struct txgbe_adapter *adapter) +{ + struct txgbe_ring *tx_ring = &adapter->test_tx_ring; + struct txgbe_ring *rx_ring = &adapter->test_rx_ring; + struct txgbe_hw *hw = &adapter->hw; + int ret_val; + int err; + + TCALL(hw, mac.ops.setup_rxpba, 0, 0, PBA_STRATEGY_EQUAL); + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = TXGBE_DEFAULT_TXD; + tx_ring->queue_index = 0; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; + + err = txgbe_setup_tx_resources(tx_ring); + if (err) + return 1; + + wr32m(&adapter->hw, TXGBE_TDM_CTL, + TXGBE_TDM_CTL_TE, TXGBE_TDM_CTL_TE); + + txgbe_loopback_configure_tx_ring(adapter, tx_ring); /* enable mac transmitter */ - wr32m(hw, TXGBE_MAC_TX_CFG, - TXGBE_MAC_TX_CFG_TE | TXGBE_MAC_TX_CFG_SPEED_MASK, - TXGBE_MAC_TX_CFG_TE | TXGBE_MAC_TX_CFG_SPEED_10G); + + if (hw->mac.type == txgbe_mac_aml40) { + wr32(hw, TXGBE_MAC_TX_CFG, (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_40G); + } else if (hw->mac.type == txgbe_mac_aml) { + if ((rd32(hw, TXGBE_CFG_PORT_ST) & TXGBE_CFG_PORT_ST_AML_LINK_10G) == + TXGBE_CFG_PORT_ST_AML_LINK_10G) + wr32(hw, TXGBE_MAC_TX_CFG, (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_10G); + else + wr32(hw, TXGBE_MAC_TX_CFG, (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_25G); + } else { + if(txgbe_check_reset_blocked(hw) && (hw->phy.autoneg_advertised == TXGBE_LINK_SPEED_1GB_FULL || + adapter->link_speed == TXGBE_LINK_SPEED_1GB_FULL)) + wr32m(hw, TXGBE_MAC_TX_CFG, + TXGBE_MAC_TX_CFG_TE | TXGBE_MAC_TX_CFG_SPEED_MASK, + TXGBE_MAC_TX_CFG_TE | TXGBE_MAC_TX_CFG_SPEED_1G); + else + wr32m(hw, TXGBE_MAC_TX_CFG, + TXGBE_MAC_TX_CFG_TE | TXGBE_MAC_TX_CFG_SPEED_MASK, + TXGBE_MAC_TX_CFG_TE | TXGBE_MAC_TX_CFG_SPEED_10G); + } /* Setup Rx Descriptor ring and Rx buffers */ rx_ring->count = TXGBE_DEFAULT_RXD; @@ -2662,6 +3156,14 @@ static int txgbe_setup_config(struct txgbe_adapter *adapter) wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, TXGBE_CFG_PORT_CTL_FORCE_LKUP, ~TXGBE_CFG_PORT_CTL_FORCE_LKUP); + /* enable mac transmitter */ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + wr32m(hw, TXGBE_TSC_CTL, + TXGBE_TSC_CTL_TX_DIS | TXGBE_TSC_MACTX_AFIFO_RD_WTRMRK, 0xd0000); + + wr32m(hw, TXGBE_RSC_CTL, + TXGBE_RSC_CTL_RX_DIS, 0); + } TXGBE_WRITE_FLUSH(hw); usleep_range(10000, 20000); @@ -2669,6 +3171,28 @@ static int txgbe_setup_config(struct txgbe_adapter *adapter) return 0; } +static int txgbe_setup_mac_loopback_test(struct txgbe_adapter *adapter) +{ + wr32m(&adapter->hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_LM | TXGBE_MAC_RX_CFG_RE, + TXGBE_MAC_RX_CFG_LM | TXGBE_MAC_RX_CFG_RE); + + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_FORCE_LKUP, TXGBE_CFG_PORT_CTL_FORCE_LKUP); + + return 0; +} + +static void txgbe_mac_loopback_cleanup(struct txgbe_adapter *adapter) +{ + wr32m(&adapter->hw, TXGBE_TSC_CTL, + TXGBE_TSC_MACTX_AFIFO_RD_WTRMRK, 0x20000); + wr32m(&adapter->hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_LM, ~TXGBE_MAC_RX_CFG_LM); + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_FORCE_LKUP, ~TXGBE_CFG_PORT_CTL_FORCE_LKUP); +} + static int txgbe_setup_phy_loopback_test(struct txgbe_adapter *adapter) { struct txgbe_hw *hw = &adapter->hw; @@ -2883,25 +3407,31 @@ static int txgbe_run_loopback_test(struct txgbe_adapter *adapter) static int txgbe_loopback_test(struct txgbe_adapter *adapter, u64 *data) { + struct txgbe_hw *hw = &adapter->hw; /* Let firmware know the driver has taken over */ wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, TXGBE_CFG_PORT_CTL_DRV_LOAD, TXGBE_CFG_PORT_CTL_DRV_LOAD); - - *data = txgbe_setup_desc_rings(adapter); - if (*data) - goto out; - *data = txgbe_setup_config(adapter); if (*data) goto err_loopback; - *data = txgbe_setup_phy_loopback_test(adapter); + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + *data = txgbe_setup_mac_loopback_test(adapter); + else + *data = txgbe_setup_phy_loopback_test(adapter); if (*data) goto err_loopback; + + *data = txgbe_setup_desc_rings(adapter); + if (*data) + goto out; *data = txgbe_run_loopback_test(adapter); if (*data) e_info(hw, "phy loopback testing failed\n"); - txgbe_phy_loopback_cleanup(adapter); + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + txgbe_mac_loopback_cleanup(adapter); + else + txgbe_phy_loopback_cleanup(adapter); err_loopback: txgbe_free_desc_rings(adapter); @@ -2988,8 +3518,7 @@ static void txgbe_diag_test(struct net_device *netdev, eth_test->flags |= ETH_TEST_FL_FAILED; if (((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || - ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP) || - (adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)){ + ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP)){ e_info(hw, "skip MAC loopback diagnostic when veto set\n"); data[3] = 0; goto skip_loopback; @@ -3015,8 +3544,6 @@ static void txgbe_diag_test(struct net_device *netdev, clear_bit(__TXGBE_TESTING, &adapter->state); if (if_running) txgbe_open(netdev); - else - TCALL(hw, mac.ops.disable_tx_laser); } else { e_info(hw, "online testing starting\n"); @@ -3037,21 +3564,6 @@ static void txgbe_diag_test(struct net_device *netdev, msleep_interruptible(4 * 1000); } - -static int txgbe_wol_exclusion(struct txgbe_adapter *adapter, - struct ethtool_wolinfo *wol) -{ - int retval = 0; - - /* WOL not supported for all devices */ - if (!txgbe_wol_supported(adapter)) { - retval = 1; - wol->supported = 0; - } - - return retval; -} - static void txgbe_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { @@ -3062,9 +3574,9 @@ static void txgbe_get_wol(struct net_device *netdev, WAKE_BCAST | WAKE_MAGIC; wol->wolopts = 0; - if (txgbe_wol_exclusion(adapter, wol) || - !device_can_wakeup(pci_dev_to_dev(adapter->pdev))) + if (!device_can_wakeup(pci_dev_to_dev(adapter->pdev))) return; + if((hw->subsystem_device_id & TXGBE_WOL_MASK) != TXGBE_WOL_SUP) return; @@ -3082,16 +3594,12 @@ static void txgbe_get_wol(struct net_device *netdev, static int txgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { - - struct txgbe_adapter *adapter = netdev_priv(netdev); struct txgbe_hw *hw = &adapter->hw; if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) return -EOPNOTSUPP; - if (txgbe_wol_exclusion(adapter, wol)) - return wol->wolopts ? -EOPNOTSUPP : 0; if((hw->subsystem_device_id & TXGBE_WOL_MASK) != TXGBE_WOL_SUP) return -EOPNOTSUPP; @@ -3135,6 +3643,8 @@ static int txgbe_set_phys_id(struct net_device *netdev, switch (state) { case ETHTOOL_ID_ACTIVE: + if (hw->mac.type == txgbe_mac_aml || (hw->mac.type == txgbe_mac_aml40)) + txgbe_hic_notify_led_active(hw, 1); adapter->led_reg = rd32(hw, TXGBE_CFG_LED_CTL); return 2; @@ -3186,6 +3696,8 @@ static int txgbe_set_phys_id(struct net_device *netdev, case ETHTOOL_ID_INACTIVE: /* Restore LED settings */ + if (hw->mac.type == txgbe_mac_aml || (hw->mac.type == txgbe_mac_aml40)) + txgbe_hic_notify_led_active(hw, 0); wr32(&adapter->hw, TXGBE_CFG_LED_CTL, adapter->led_reg); if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) { @@ -3278,6 +3790,9 @@ static int txgbe_get_coalesce(struct net_device *netdev, else ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + if (adapter->rx_itr_setting == 1) + ec->use_adaptive_rx_coalesce = 1; + /* if in mixed tx/rx queues per vector mode, report only rx settings */ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) return 0; @@ -3343,14 +3858,14 @@ static int txgbe_set_coalesce(struct net_device *netdev, u16 tx_itr_param, rx_itr_param; u16 tx_itr_prev; bool need_reset = false; - +#if 0 if(ec->tx_max_coalesced_frames_irq == adapter->tx_work_limit && ((adapter->rx_itr_setting <= 1) ? (ec->rx_coalesce_usecs == adapter->rx_itr_setting) : (ec->rx_coalesce_usecs == adapter->rx_itr_setting >> 2))) { e_info(probe, "no coalesce parameters changed, aborting\n"); return -EINVAL; } - +#endif if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { /* reject Tx specific changes in case of mixed RxTx vectors */ if (ec->tx_coalesce_usecs) @@ -3360,13 +3875,35 @@ static int txgbe_set_coalesce(struct net_device *netdev, tx_itr_prev = adapter->tx_itr_setting; } - if (ec->tx_max_coalesced_frames_irq) - adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; + if (ec->tx_max_coalesced_frames_irq) { + if (ec->tx_max_coalesced_frames_irq <= TXGBE_MAX_TX_WORK) + adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; + else + return -EINVAL; + } else + return -EINVAL; if ((ec->rx_coalesce_usecs > (TXGBE_MAX_EITR >> 2)) || (ec->tx_coalesce_usecs > (TXGBE_MAX_EITR >> 2))) return -EINVAL; + if (ec->use_adaptive_tx_coalesce) + return -EINVAL; + + if (ec->use_adaptive_rx_coalesce) { + adapter->rx_itr_setting = 1; + return 0; + } else { + /* restore to default rxusecs value when adaptive itr turn off */ + /* user shall turn off adaptive itr and set user-defined rx usecs value + * in two cmds separately. + */ + if (adapter->rx_itr_setting == 1) { + adapter->rx_itr_setting = TXGBE_20K_ITR; + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + } + } + if (ec->rx_coalesce_usecs > 1) adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; else @@ -3676,6 +4213,38 @@ static int txgbe_set_flags(struct net_device *netdev, u32 data) #endif /* ETHTOOL_GFLAGS */ #endif /* HAVE_NDO_SET_FEATURES */ #ifdef ETHTOOL_GRXRINGS +static int txgbe_match_etype_entry(struct txgbe_adapter *adapter, u16 sw_idx) +{ + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; + int i; + + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (ef_info->etype_filters[i].rule_idx == sw_idx) + break; + } + + return i; +} + +static int txgbe_get_etype_rule(struct txgbe_adapter *adapter, + struct ethtool_rx_flow_spec *fsp, int ef_idx) +{ + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; + u8 mask[6] = {0, 0, 0, 0, 0, 0}; + u8 mac[6] = {0, 0, 0, 0, 0, 0}; + + fsp->flow_type = ETHER_FLOW; + ether_addr_copy(fsp->h_u.ether_spec.h_dest, mac); + ether_addr_copy(fsp->m_u.ether_spec.h_dest, mask); + ether_addr_copy(fsp->h_u.ether_spec.h_source, mac); + ether_addr_copy(fsp->m_u.ether_spec.h_source, mask); + fsp->h_u.ether_spec.h_proto = htons(ef_info->etype_filters[ef_idx].ethertype); + fsp->m_u.ether_spec.h_proto = 0xFFFF; + fsp->ring_cookie = ef_info->etype_filters[ef_idx].action; + + return 0; +} + static int txgbe_get_ethtool_fdir_entry(struct txgbe_adapter *adapter, struct ethtool_rxnfc *cmd) { @@ -3685,6 +4254,14 @@ static int txgbe_get_ethtool_fdir_entry(struct txgbe_adapter *adapter, struct hlist_node *node; struct txgbe_fdir_filter *rule = NULL; + if (adapter->etype_filter_info.count > 0) { + int ef_idx; + + ef_idx = txgbe_match_etype_entry(adapter, fsp->location); + if (ef_idx < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS) + return txgbe_get_etype_rule(adapter, fsp, ef_idx); + } + /* report total rule count */ cmd->data = (1024 << adapter->fdir_pballoc) - 2; @@ -3747,9 +4324,10 @@ static int txgbe_get_ethtool_fdir_all(struct txgbe_adapter *adapter, struct ethtool_rxnfc *cmd, u32 *rule_locs) { + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; struct hlist_node *node; struct txgbe_fdir_filter *rule; - int cnt = 0; + int cnt = 0, i; /* report total rule count */ cmd->data = (1024 << adapter->fdir_pballoc) - 2; @@ -3762,6 +4340,13 @@ static int txgbe_get_ethtool_fdir_all(struct txgbe_adapter *adapter, cnt++; } + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (ef_info->ethertype_mask & (1 << i)) { + rule_locs[cnt] = ef_info->etype_filters[i].rule_idx; + cnt++; + } + } + cmd->rule_cnt = cnt; return 0; @@ -3825,7 +4410,8 @@ static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, ret = 0; break; case ETHTOOL_GRXCLSRLCNT: - cmd->rule_cnt = adapter->fdir_filter_count; + cmd->rule_cnt = adapter->fdir_filter_count + + adapter->etype_filter_info.count; ret = 0; break; case ETHTOOL_GRXCLSRULE: @@ -3845,6 +4431,161 @@ static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } +static int +txgbe_ethertype_filter_lookup(struct txgbe_etype_filter_info *ef_info, + u16 ethertype) +{ + int i; + + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (ef_info->etype_filters[i].ethertype == ethertype && + (ef_info->ethertype_mask & (1 << i))) + return i; + } + return -1; +} + +static int +txgbe_ethertype_filter_insert(struct txgbe_etype_filter_info *ef_info, + struct txgbe_ethertype_filter *etype_filter) +{ + int i; + + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (ef_info->ethertype_mask & (1 << i)) { + continue; + } + ef_info->ethertype_mask |= 1 << i; + ef_info->etype_filters[i].ethertype = etype_filter->ethertype; + ef_info->etype_filters[i].etqf = etype_filter->etqf; + ef_info->etype_filters[i].etqs = etype_filter->etqs; + ef_info->etype_filters[i].rule_idx = etype_filter->rule_idx; + ef_info->etype_filters[i].action = etype_filter->action; + break; + } + + return (i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS ? i : -1); +} + +static int txgbe_add_ethertype_filter(struct txgbe_adapter *adapter, + struct ethtool_rx_flow_spec *fsp) +{ + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; + struct txgbe_ethertype_filter etype_filter; + struct txgbe_hw *hw = &adapter->hw; + u16 ethertype; + u32 etqf = 0; + u32 etqs = 0; + u8 queue, vf; + u32 ring; + int ret; + + ethertype = ntohs(fsp->h_u.ether_spec.h_proto); + if (!ethertype) { + e_err(drv, "protocol number is missing for ethertype filter\n"); + return -EINVAL; + } + if (ethertype == ETH_P_IP || ethertype == ETH_P_IPV6) { + e_err(drv, "unsupported ether_type(0x%04x) in ethertype filter\n", + ethertype); + return -EINVAL; + } + + ret = txgbe_ethertype_filter_lookup(ef_info, ethertype); + if (ret >= 0) { + e_err(drv, "ethertype (0x%04x) filter exists.", ethertype); + return -EEXIST; + } + + /* ring_cookie is a masked into a set of queues and txgbe pools */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + e_err(drv, "drop option is unsupported."); + return -EINVAL; + } + + ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + if (!vf && ring >= adapter->num_rx_queues) + return -EINVAL; + else if (vf && ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) + return -EINVAL; + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * adapter->num_rx_queues_per_pool) + ring; + + etqs |= queue << TXGBE_RDB_ETYPE_CLS_RX_QUEUE_SHIFT; + etqs |= TXGBE_RDB_ETYPE_CLS_QUEUE_EN; + etqf = TXGBE_PSR_ETYPE_SWC_FILTER_EN | ethertype; + if (adapter->num_vfs) { + u8 pool; + + if (!vf) + pool = adapter->num_vfs; + else + pool = vf - 1; + + etqf |= TXGBE_PSR_ETYPE_SWC_POOL_ENABLE; + etqf |= pool << TXGBE_PSR_ETYPE_SWC_POOL_SHIFT; + } + + etype_filter.ethertype = ethertype; + etype_filter.etqf = etqf; + etype_filter.etqs = etqs; + etype_filter.rule_idx = fsp->location; + etype_filter.action = fsp->ring_cookie; + ret = txgbe_ethertype_filter_insert(ef_info, &etype_filter); + if (ret < 0) { + e_err(drv, "ethertype filters are full."); + return -ENOSPC; + } + + wr32(hw, TXGBE_PSR_ETYPE_SWC(ret), etqf); + wr32(hw, TXGBE_RDB_ETYPE_CLS(ret), etqs); + TXGBE_WRITE_FLUSH(hw); + + ef_info->count++; + + return 0; +} + +static int txgbe_del_ethertype_filter(struct txgbe_adapter *adapter, u16 sw_idx) +{ + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; + struct txgbe_hw *hw = &adapter->hw; + u16 ethertype; + int idx; + + idx = txgbe_match_etype_entry(adapter, sw_idx); + if (idx == TXGBE_MAX_PSR_ETYPE_SWC_FILTERS) + return -EINVAL; + + ethertype = ef_info->etype_filters[idx].ethertype; + if (!ethertype) { + e_err(drv, "ethertype filter doesn't exist."); + return -ENOENT; + } + + ef_info->ethertype_mask &= ~(1 << idx); + ef_info->etype_filters[idx].ethertype = 0; + ef_info->etype_filters[idx].etqf = 0; + ef_info->etype_filters[idx].etqs = 0; + ef_info->etype_filters[idx].etqs = FALSE; + ef_info->etype_filters[idx].rule_idx = 0; + + wr32(hw, TXGBE_PSR_ETYPE_SWC(idx), 0); + wr32(hw, TXGBE_RDB_ETYPE_CLS(idx), 0); + TXGBE_WRITE_FLUSH(hw); + + ef_info->count--; + + return 0; + +} + static int txgbe_update_ethtool_fdir_entry(struct txgbe_adapter *adapter, struct txgbe_fdir_filter *input, u16 sw_idx) @@ -3982,10 +4723,13 @@ static int txgbe_add_ethtool_fdir_entry(struct txgbe_adapter *adapter, int err; u16 ptype = 0; + if ((fsp->flow_type & ~FLOW_EXT) == ETHER_FLOW) + return txgbe_add_ethertype_filter(adapter, fsp); + if (!(adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE)) return -EOPNOTSUPP; - /* ring_cookie is a masked into a set of queues and ixgbe pools or + /* ring_cookie is a masked into a set of queues and txgbe pools or * we use drop index */ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { @@ -4162,6 +4906,12 @@ static int txgbe_del_ethtool_fdir_entry(struct txgbe_adapter *adapter, (struct ethtool_rx_flow_spec *)&cmd->fs; int err; + if (adapter->etype_filter_info.count > 0) { + err = txgbe_del_ethertype_filter(adapter, fsp->location); + if (!err) + return 0; + } + spin_lock(&adapter->fdir_perfect_lock); err = txgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); spin_unlock(&adapter->fdir_perfect_lock); @@ -4340,18 +5090,27 @@ static void txgbe_get_reta(struct txgbe_adapter *adapter, u32 *indir) indir[i] = adapter->rss_indir_tbl[i]; } +#ifdef HAVE_ETHTOOL_RXFH_RXFHPARAMS +static int txgbe_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) +#else #ifdef HAVE_RXFH_HASHFUNC static int txgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) #else /* HAVE_RXFH_HASHFUNC */ static int txgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) #endif /* HAVE_RXFH_HASHFUNC */ +#endif /* HAVE_ETHTOOL_RXFH_RXFHPARAMS */ { struct txgbe_adapter *adapter = netdev_priv(netdev); - +#ifdef HAVE_ETHTOOL_RXFH_RXFHPARAMS + u8 *key = rxfh->key; + u32 *indir = rxfh->indir; +#else #ifdef HAVE_RXFH_HASHFUNC if (hfunc) *hfunc = ETH_RSS_HASH_TOP; +#endif #endif if (indir) @@ -4362,6 +5121,11 @@ static int txgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) return 0; } +#ifdef HAVE_ETHTOOL_RXFH_RXFHPARAMS +static int txgbe_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +#else #ifdef HAVE_RXFH_HASHFUNC static int txgbe_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) @@ -4373,12 +5137,19 @@ static int txgbe_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key) #endif /* HAVE_RXFH_NONCONST */ #endif /* HAVE_RXFH_HASHFUNC */ +#endif /* HAVE_ETHTOOL_RXFH_RXFHPARAMS */ { struct txgbe_adapter *adapter = netdev_priv(netdev); int i; u32 reta_entries = txgbe_rss_indir_tbl_entries(adapter); + struct txgbe_hw *hw = &adapter->hw; +#ifdef HAVE_ETHTOOL_RXFH_RXFHPARAMS + u8 hfunc = rxfh->hfunc; + u8 *key = rxfh->key; + u32 *indir = rxfh->indir; +#endif -#ifdef HAVE_RXFH_HASHFUNC +#if (defined(HAVE_RXFH_HASHFUNC) || defined(HAVE_ETHTOOL_RXFH_RXFHPARAMS)) if (hfunc) return -EINVAL; #endif @@ -4398,22 +5169,43 @@ static int txgbe_set_rxfh(struct net_device *netdev, const u32 *indir, if (indir[i] >= max_queues) return -EINVAL; - for (i = 0; i < reta_entries; i++) - adapter->rss_indir_tbl[i] = indir[i]; + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + txgbe_store_vfreta(adapter); + } else { + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + txgbe_store_reta(adapter); + } } - if (key) + if (key) { memcpy(adapter->rss_key, key, txgbe_get_rxfh_key_size(netdev)); - txgbe_store_reta(adapter); + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + unsigned int pf_pool = adapter->num_vfs; + for (i = 0; i < 10; i++) + wr32(hw, TXGBE_RDB_VMRSSRK(i, pf_pool), adapter->rss_key[i]); + } else { + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + wr32(hw, TXGBE_RDB_RSSRK(i), adapter->rss_key[i]); + } + } return 0; } #endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ #ifdef HAVE_ETHTOOL_GET_TS_INFO +#ifdef HAVE_KERNEL_ETHTOOL_TS_INFO +static int txgbe_get_ts_info(struct net_device *dev, + struct kernel_ethtool_ts_info *info) +#else static int txgbe_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) +#endif { struct txgbe_adapter *adapter = netdev_priv(dev); @@ -4469,7 +5261,7 @@ static unsigned int txgbe_max_channels(struct txgbe_adapter *adapter) max_combined = 1; } else if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { /* SR-IOV currently only allows one queue on the PF */ - max_combined = 1; + max_combined = adapter->ring_feature[RING_F_RSS].mask + 1; } else if (tcs > 1) { /* For DCB report channels per traffic class */ if (tcs > 4) { @@ -4548,6 +5340,11 @@ static int txgbe_set_channels(struct net_device *dev, if (count > txgbe_max_channels(adapter)) return -EINVAL; + if (count < adapter->active_vlan_limited + 1) { + e_dev_info("vlan rate limit active, can't set less than active " + "limited vlan + 1:%d", (adapter->active_vlan_limited + 1)); + return -EINVAL; + } /* update feature limits from largest to smallest supported values */ adapter->ring_feature[RING_F_FDIR].limit = count; @@ -4576,49 +5373,106 @@ static int txgbe_get_module_info(struct net_device *dev, struct txgbe_hw *hw = &adapter->hw; u32 status; u8 sff8472_rev, addr_mode; + u8 identifier = 0; + u8 sff8636_rev = 0; bool page_swap = false; u32 swfw_mask = hw->phy.phy_semaphore_mask; + u32 value; - if (0 != TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask)) - return -EBUSY; - - if (!test_bit(__TXGBE_DOWN, &adapter->state)) - cancel_work_sync(&adapter->sfp_sta_task); - - /* Check whether we support SFF-8472 or not */ - status = TCALL(hw, phy.ops.read_i2c_eeprom, - TXGBE_SFF_SFF_8472_COMP, - &sff8472_rev); - if (status != 0) - goto ERROR_IO; - - /* addressing mode is not supported */ - status = TCALL(hw, phy.ops.read_i2c_eeprom, - TXGBE_SFF_SFF_8472_SWAP, - &addr_mode); - if (status != 0) - goto ERROR_IO; - - - if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) { - e_err(drv, "Address change required to access page 0xA2, " - "but not supported. Please report the module type to the " - "driver maintainers.\n"); - page_swap = true; - } - - if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap || - !(addr_mode & TXGBE_SFF_DDM_IMPLEMENTED)) { - /* We have a SFP, but it does not support SFF-8472 */ - modinfo->type = ETH_MODULE_SFF_8079; - modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; - } else { - /* We have a SFP which supports a revision of SFF-8472. */ - modinfo->type = ETH_MODULE_SFF_8472; - modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + if (hw->mac.type == txgbe_mac_aml40) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_PRST_LS) { + return -EIO; + } + + if (!netif_carrier_ok(dev)) { + e_err(drv, "\"Ethool -m\" is supported only when link is up for 40G.\n"); + return -EIO; + } } - TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + if (hw->mac.type == txgbe_mac_aml) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_ABS_LS) { + return -EIO; + } + } + + if (hw->mac.type != txgbe_mac_sp) { + if (0 != TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask)) + return -EBUSY; + + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + cancel_work_sync(&adapter->sfp_sta_task); + + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_IDENTIFIER, + &identifier); + if (status != 0) + goto ERROR_IO; + + switch (identifier) { + case TXGBE_SFF_IDENTIFIER_SFP: + /* Check whether we support SFF-8472 or not */ + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_SFF_8472_COMP, + &sff8472_rev); + if (status != 0) + goto ERROR_IO; + + /* addressing mode is not supported */ + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_SFF_8472_SWAP, + &addr_mode); + if (status != 0) + goto ERROR_IO; + + if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) { + e_err(drv, "Address change required to access page 0xA2, " + "but not supported. Please report the module type to the " + "driver maintainers.\n"); + page_swap = true; + } + + if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap || + !(addr_mode & TXGBE_SFF_DDM_IMPLEMENTED)) { + /* We have a SFP, but it does not support SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + break; + case TXGBE_SFF_IDENTIFIER_QSFP: + case TXGBE_SFF_IDENTIFIER_QSFP_PLUS: + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_SFF_REVISION_ADDR, + &sff8636_rev); + if (status != 0) + goto ERROR_IO; + + /* Check revision compliance */ + if (sff8636_rev > 0x02) { + /* Module is SFF-8636 compliant */ + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = TXGBE_MODULE_QSFP_MAX_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = TXGBE_MODULE_QSFP_MAX_LEN; + } + break; + default: + e_err(drv, "SFF Module Type not recognized.\n"); + return -EINVAL; + } + + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + } else { + modinfo->type = adapter->eeprom_type; + modinfo->eeprom_len = adapter->eeprom_len; + } return 0; @@ -4627,81 +5481,208 @@ static int txgbe_get_module_info(struct net_device *dev, return -EIO; } +#define SFF_A2_ALRM_FLG 0x170 +#define SFF_A2_WARN_FLG 0x174 +#define SFF_A2_TEMP 0x160 +#define SFF_A2_RX_PWR 0x169 + static int txgbe_get_module_eeprom(struct net_device *dev, struct ethtool_eeprom *ee, u8 *data) { struct txgbe_adapter *adapter = netdev_priv(dev); struct txgbe_hw *hw = &adapter->hw; - u32 status = TXGBE_ERR_PHY_ADDR_INVALID; - u8 databyte = 0xFF; int i = 0; + bool is_sfp = false; + u32 value; + u8 identifier = 0; u32 swfw_mask = hw->phy.phy_semaphore_mask; + u8 databyte; + s32 status = 0; - if (0 != TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask)) - return -EBUSY; - - if (!test_bit(__TXGBE_DOWN, &adapter->state)) - cancel_work_sync(&adapter->sfp_sta_task); + if (hw->mac.type == txgbe_mac_aml40) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_PRST_LS) { + return -EIO; + } + } + if (hw->mac.type == txgbe_mac_aml) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_ABS_LS) { + return -EIO; + } + } - if (ee->len == 0) - goto ERROR_INVAL; + if (hw->mac.type != txgbe_mac_sp) { + if (0 != TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask)) + return -EBUSY; - for (i = ee->offset; i < ee->offset + ee->len; i++) { - /* I2C reads can take long time */ - if (test_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) - goto ERROR_BUSY; + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + cancel_work_sync(&adapter->sfp_sta_task); - if (i < ETH_MODULE_SFF_8079_LEN) - status = TCALL(hw, phy.ops.read_i2c_eeprom, i, - &databyte); - else - status = TCALL(hw, phy.ops.read_i2c_sff8472, i, - &databyte); + if (ee->len == 0) + goto ERROR_INVAL; + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_IDENTIFIER, + &identifier); if (status != 0) goto ERROR_IO; - data[i - ee->offset] = databyte; - } + if (identifier == TXGBE_SFF_IDENTIFIER_SFP) + is_sfp = true; - TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + memset(data, 0, ee->len); + for (i = 0; i < ee->len; i++) { + u32 offset = i + ee->offset; + u32 page = 0; - return 0; + /* I2C reads can take long time */ + if (test_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + goto ERROR_BUSY; -ERROR_INVAL: + if (is_sfp) { + if (offset < ETH_MODULE_SFF_8079_LEN) + status = TCALL(hw, phy.ops.read_i2c_eeprom, offset, + &databyte); + else + status = TCALL(hw, phy.ops.read_i2c_sff8472, offset, + &databyte); + + if (status != 0) + goto ERROR_IO; + } else { + while (offset >= ETH_MODULE_SFF_8436_LEN) { + offset -= ETH_MODULE_SFF_8436_LEN / 2; + page++; + } + + if (page == 0 || !(data[0x2] & 0x4)) { + status = TCALL(hw, phy.ops.read_i2c_sff8636, page, offset, + &databyte); + + if (status != 0) + goto ERROR_IO; + } + } + data[i] = databyte; + } + } else { + if (ee->len == 0) + goto ERROR_INVAL; + + if (0 != TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask)) + return -EBUSY; + + /*when down, can't know sfp change, get eeprom from i2c*/ + if (test_bit(__TXGBE_DOWN, &adapter->state)) { + for (i = ee->offset; i < ee->offset + ee->len; i++) { + /* I2C reads can take long time */ + if (test_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + goto ERROR_BUSY; + + if (i < ETH_MODULE_SFF_8079_LEN) + status = TCALL(hw, phy.ops.read_i2c_eeprom, i, + &databyte); + else + status = TCALL(hw, phy.ops.read_i2c_sff8472, i, + &databyte); + + if (status != 0) + goto ERROR_IO; + + data[i - ee->offset] = databyte; + } + } else { + if (adapter->eeprom_type == ETH_MODULE_SFF_8472) { + + cancel_work_sync(&adapter->sfp_sta_task); + + /*alarm flag*/ + for (i = SFF_A2_ALRM_FLG; i <= SFF_A2_ALRM_FLG + 1; i++){ + status = TCALL(hw, phy.ops.read_i2c_sff8472, i, + &databyte); + + if (status != 0) + goto ERROR_IO; + + adapter->i2c_eeprom[i] = databyte; + } + /*warm flag*/ + for (i = SFF_A2_WARN_FLG; i <= SFF_A2_WARN_FLG + 1; i++){ + status = TCALL(hw, phy.ops.read_i2c_sff8472, i, + &databyte); + + if (status != 0) + goto ERROR_IO; + + adapter->i2c_eeprom[i] = databyte; + } + /*dom monitor value*/ + for (i = SFF_A2_TEMP; i <= SFF_A2_RX_PWR + 1; i++){ + status = TCALL(hw, phy.ops.read_i2c_sff8472, i, + &databyte); + + if (status != 0) + goto ERROR_IO; + + adapter->i2c_eeprom[i] = databyte; + } + } + for (i = ee->offset; i < ee->offset + ee->len; i++) + data[i - ee->offset] = adapter->i2c_eeprom[i]; + } + } TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); - return -EINVAL; + return 0; ERROR_BUSY: TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); return -EBUSY; ERROR_IO: TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); return -EIO; +ERROR_INVAL: + return -EINVAL; } #endif /* ETHTOOL_GMODULEINFO */ #ifdef ETHTOOL_GEEE +#ifdef HAVE_ETHTOOL_KEEE +static int txgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata) +#else static int txgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +#endif { return 0; } #endif /* ETHTOOL_GEEE */ #ifdef ETHTOOL_SEEE +#ifdef HAVE_ETHTOOL_KEEE +static int txgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata) +#else static int txgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +#endif { struct txgbe_adapter *adapter = netdev_priv(netdev); struct txgbe_hw *hw = &adapter->hw; +#ifdef HAVE_ETHTOOL_KEEE + struct ethtool_keee eee_data; +#else struct ethtool_eee eee_data; +#endif s32 ret_val; if (!(hw->mac.ops.setup_eee && (adapter->flags2 & TXGBE_FLAG2_EEE_CAPABLE))) return -EOPNOTSUPP; +#ifdef HAVE_ETHTOOL_KEEE + memset(&eee_data, 0, sizeof(struct ethtool_keee)); +#else memset(&eee_data, 0, sizeof(struct ethtool_eee)); +#endif ret_val = txgbe_get_eee(netdev, &eee_data); if (ret_val) @@ -4775,20 +5756,17 @@ static int txgbe_set_flash(struct net_device *netdev, struct ethtool_flash *ef) static struct ethtool_ops txgbe_ethtool_ops = { -#if (defined ETHTOOL_COALESCE_USECS) && (defined ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ) - .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, -#elif (defined ETHTOOL_COALESCE_USECS) - .supported_coalesce_params = ETHTOOL_COALESCE_USECS, -#elif (defined ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ) - .supported_coalesce_params = ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, -#endif -#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#ifdef ETHTOOL_GLINKSETTINGS .get_link_ksettings = txgbe_get_link_ksettings, .set_link_ksettings = txgbe_set_link_ksettings, #else .get_settings = txgbe_get_settings, .set_settings = txgbe_set_settings, #endif +#ifdef ETHTOOL_GFECPARAM + .get_fecparam = txgbe_get_fec_param, + .set_fecparam = txgbe_set_fec_param, +#endif /* ETHTOOL_GFECPARAM */ .get_drvinfo = txgbe_get_drvinfo, .get_regs_len = txgbe_get_regs_len, .get_regs = txgbe_get_regs, @@ -4828,6 +5806,12 @@ static struct ethtool_ops txgbe_ethtool_ops = { #ifdef HAVE_ETHTOOL_GET_PERM_ADDR .get_perm_addr = ethtool_op_get_perm_addr, #endif + +#ifdef HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES_IRQ | + ETHTOOL_COALESCE_USE_ADAPTIVE, +#endif .get_coalesce = txgbe_get_coalesce, .set_coalesce = txgbe_set_coalesce, #ifndef HAVE_NDO_SET_FEATURES diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.c index f9cfe1581b5bf75c133d19d936beaa24f6a3b7aa..2f885119ea927ede1a5fb11b5ee079f59a0f07c5 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.c @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_fcoe.c, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_fcoe.c, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List @@ -821,7 +821,7 @@ static void txgbe_fcoe_ddp_disable(struct txgbe_adapter *adapter) * txgbe_fcoe_enable - turn on FCoE offload feature * @netdev: the corresponding netdev * - * Turns on FCoE offload feature in sapphire. + * Turns on FCoE offload feature in sapphire/amber-lite. * * Returns : 0 indicates success or -EINVAL on failure */ @@ -868,7 +868,7 @@ int txgbe_fcoe_enable(struct net_device *netdev) * txgbe_fcoe_disable - turn off FCoE offload feature * @netdev: the corresponding netdev * - * Turns off FCoE offload feature in sapphire. + * Turns off FCoE offload feature in sapphire/amber-lite. * * Returns : 0 indicates success or -EINVAL on failure */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.h index 93ebc91c85dd69d94fefd29340adb2fce92aff49..bb0f4c7997ebaec76fc041e00a7067a381bb1460 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.h @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_fcoe.h, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_fcoe.h, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index d380ac75e2525da91cd50a71ca705991d2515791..9eaf70a435eda119a1cb40948a220ea07c4279bc 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_82599.c, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_82599.c, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List @@ -26,6 +26,8 @@ #include "txgbe_hw.h" #include "txgbe_phy.h" #include "txgbe_dcb.h" +#include "txgbe_e56.h" +#include "txgbe_e56_bp.h" #include "txgbe.h" #define TXGBE_SP_MAX_TX_QUEUES 128 @@ -45,7 +47,7 @@ STATIC s32 txgbe_get_san_mac_addr_offset(struct txgbe_hw *hw, STATIC s32 txgbe_setup_copper_link(struct txgbe_hw *hw, u32 speed, bool autoneg_wait_to_complete); -s32 txgbe_check_mac_link(struct txgbe_hw *hw, u32 *speed, +s32 txgbe_check_mac_link_sp(struct txgbe_hw *hw, u32 *speed, bool *link_up, bool link_up_wait_to_complete); @@ -112,7 +114,74 @@ void txgbe_wr32_epcs(struct txgbe_hw *hw, u32 addr, u32 data) wr32(hw, portRegOffset, data); } +#if 0 +s32 txgbe_set_amlite_pcs_mode(struct txgbe_hw *hw, int eth_mode) { + + u32 ss52, data; + u32 speed_select, pcs_type_select, pma_type; + u32 pcs_dig_ctrl3, vr_pcs_ctrl3, vr_pcs_ctrl3; + u32 sr_pma_rs_fec_ctl; + + ss52 = txgbe_rd32_epcs(hw, SR_PCS_CTL1); + data = txgbe_rd32_epcs(hw, SR_PCS_CTL2); + + switch (eth_mode) { + case ETH_RATE_10G: + speed_select = 0x0; + pcs_type_select = 0x0; + pma_type = 0xb; + pcs_dig_ctrl3 = 0x0; + break; + case ETH_RATE_25G: + speed_select = 0x5; + pcs_type_select = 0x7; + pma_type = 0x39; + pcs_dig_ctrl3 = 0x0; + break; + default: + ERROR_REPORT2(TXGBE_ERROR_UNSUPPORTED, + "Erroe Eth_mode"); + return -1; + } + +} + +s32 txgbe_set_amlite_phy_mode(struct txgbe_hw *hw, int eth_mode) { + + u32 ss52, data; + u32 pll0_div_cfg, pin_ovrden, pin_ovrdval; + u32 datapath_cfg0, an_cfg; + + ss52 = txgbe_rd32_epcs(hw, SR_PCS_CTL1); + + switch (eth_mode) { + case ETH_RATE_10G: + pll0_div_cfg = 0x29408; + pin_ovrden = 0x0; + pin_ovrdval = 0x0; + datapath_cfg0; + break; + case ETH_RATE_25G: + pll0_div_cfg = 0x29408; + pin_ovrden = 0x0; + pin_ovrdval = 0x0; + datapath_cfg0; + break; + default: + ERROR_REPORT2(TXGBE_ERROR_UNSUPPORTED, + "Erroe Eth_mode"); + return -1; + } + + + +} +s32 txgbe_set_amlite_an_status(struct txgbe_hw *hw, bool autoneg) { + if (autoneg) + else +} +#endif /** * txgbe_dcb_get_rtrup2tc - read rtrup2tc reg * @hw: pointer to hardware structure @@ -257,9 +326,11 @@ bool txgbe_device_supports_autoneg_fc(struct txgbe_hw *hw) u8 device_type = hw->subsystem_device_id & 0xF0; switch (hw->phy.media_type) { + case txgbe_media_type_fiber_qsfp: case txgbe_media_type_fiber: TCALL(hw, mac.ops.check_link, &speed, &link_up, false); /* if link is down, assume supported */ + /* amlite TODO*/ if (link_up) supported = speed == TXGBE_LINK_SPEED_1GB_FULL ? true : false; @@ -297,6 +368,10 @@ s32 txgbe_setup_fc(struct txgbe_hw *hw) u32 value = 0; u32 pcap_backplane = 0; + /*amlite TODO*/ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + return 0; + /* Validate the requested mode */ if (hw->fc.strict_ieee && hw->fc.requested_mode == txgbe_fc_rx_pause) { ERROR_REPORT1(TXGBE_ERROR_UNSUPPORTED, @@ -558,6 +633,7 @@ s32 txgbe_get_mac_addr(struct txgbe_hw *hw, u8 *mac_addr) **/ void txgbe_set_pci_config_data(struct txgbe_hw *hw, u16 link_status) { + /* amlite: TODO */ if (hw->bus.type == txgbe_bus_type_unknown) hw->bus.type = txgbe_bus_type_pci_express; @@ -813,14 +889,15 @@ STATIC s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw) */ if (i >= timeout) { ERROR_REPORT1(TXGBE_ERROR_POLLING, - "SWESMBI Software EEPROM semaphore not granted.\n"); + "SWESMBI Software EEPROM semaphore not granted, MNG_SW_SM_SM is 0x%08x.\n", + swsm); txgbe_release_eeprom_semaphore(hw); status = TXGBE_ERR_EEPROM; } } else { ERROR_REPORT1(TXGBE_ERROR_POLLING, "Software semaphore SMBI between device drivers " - "not granted.\n"); + "not granted, MNG_SW_SM_SM is 0x%08x.\n", swsm); } return status; @@ -1041,7 +1118,7 @@ s32 txgbe_init_rx_addrs(struct txgbe_hw *hw) * * Adds it to unused receive address register or goes into promiscuous mode. **/ -void txgbe_add_uc_addr(struct txgbe_hw *hw, u8 *addr, u32 vmdq) +static void txgbe_add_uc_addr(struct txgbe_hw *hw, u8 *addr, u32 vmdq) { u32 rar_entries = hw->mac.num_rar_entries; u32 rar; @@ -1179,7 +1256,7 @@ STATIC s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr) * * Sets the bit-vector in the multicast table. **/ -void txgbe_set_mta(struct txgbe_hw *hw, u8 *mc_addr) +static void txgbe_set_mta(struct txgbe_hw *hw, u8 *mc_addr) { u32 vector; u32 vector_bit; @@ -1310,11 +1387,13 @@ s32 txgbe_disable_mc(struct txgbe_hw *hw) **/ s32 txgbe_fc_enable(struct txgbe_hw *hw) { + u32 mflcn_reg = 0; + u32 fccfg_reg = 0; s32 ret_val = 0; - u32 mflcn_reg, fccfg_reg; - u32 reg; - u32 fcrtl, fcrth; - int i; + u32 fcrtl = 0; + u32 fcrth = 0; + u32 reg = 0; + int i = 0; /* Validate the water mark configuration */ if (!hw->fc.pause_time) { @@ -1403,8 +1482,8 @@ s32 txgbe_fc_enable(struct txgbe_hw *hw) for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { if ((hw->fc.current_mode & txgbe_fc_tx_pause) && hw->fc.high_water[i]) { - fcrtl = (hw->fc.low_water[i] << 10) | - TXGBE_RDB_RFCL_XONE; + fcrtl = (hw->fc.low_water[i] << 10); + wr32(hw, TXGBE_RDB_RFCL(i), fcrtl); fcrth = (hw->fc.high_water[i] << 10) | TXGBE_RDB_RFCH_XOFFE; @@ -2584,139 +2663,242 @@ u8 txgbe_calculate_checksum(u8 *buffer, u32 length) s32 txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer, u32 length, u32 timeout, bool return_data) { - u32 hicr, i, bi; + struct txgbe_adapter *adapter = (struct txgbe_adapter *)hw->back; + struct txgbe_hic_hdr *send_hdr = (struct txgbe_hic_hdr *)buffer; u32 hdr_size = sizeof(struct txgbe_hic_hdr); - u16 buf_len; - u32 dword_len; - s32 status = 0; + struct txgbe_hic_hdr *recv_hdr; u32 buf[64] = {}; + u32 hicr, i, bi; + s32 status = 0; + u32 dword_len; + u16 buf_len; + u8 send_cmd; if (length == 0 || length > TXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { - DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "Buffer length failure buffersize=%d.\n", length); return TXGBE_ERR_HOST_INTERFACE_COMMAND; } - if (TCALL(hw, mac.ops.acquire_swfw_sync, TXGBE_MNG_SWFW_SYNC_SW_MB) - != 0) { - return TXGBE_ERR_SWFW_SYNC; - } - + if (hw->mac.type == txgbe_mac_sp) + if (TCALL(hw, mac.ops.acquire_swfw_sync, TXGBE_MNG_SWFW_SYNC_SW_MB) + != 0) { + return TXGBE_ERR_SWFW_SYNC; + } /* Calculate length in DWORDs. We must be DWORD aligned */ if ((length % (sizeof(u32))) != 0) { - DEBUGOUT("Buffer length failure, not aligned to dword"); + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "Buffer length failure, not aligned to dword"); status = TXGBE_ERR_INVALID_ARGUMENT; goto rel_out; } dword_len = length >> 2; - /* The device driver writes the relevant command block - * into the ram area. - */ - for (i = 0; i < dword_len; i++) { - if (txgbe_check_mng_access(hw)) { - wr32a(hw, TXGBE_MNG_MBOX, + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + /* try to get lock and lock */ + /* wait max to 50ms to get lock */ + WARN_ON(in_interrupt()); + while (test_and_set_bit(__TXGBE_SWFW_BUSY, &adapter->state)) { + timeout--; + if (!timeout) + return TXGBE_ERR_TIMEOUT; + usleep_range(1000, 2000); + } + + /* index to unique seq id for each mbox message */ + send_hdr->cksum_or_index.index = adapter->swfw_index; + send_cmd = send_hdr->cmd; + + /* write data to SW-FW mbox array */ + for (i = 0; i < dword_len; i++) { + wr32a(hw, TXGBE_AML_MNG_MBOX_SW2FW, i, TXGBE_CPU_TO_LE32(buffer[i])); /* write flush */ - buf[i] = rd32a(hw, TXGBE_MNG_MBOX, i); - } - else { - status = TXGBE_ERR_MNG_ACCESS_FAILED; - goto rel_out; + buf[i] = rd32a(hw, TXGBE_AML_MNG_MBOX_SW2FW, i); } - } - /* Setting this bit tells the ARC that a new command is pending. */ - if (txgbe_check_mng_access(hw)) - wr32m(hw, TXGBE_MNG_MBOX_CTL, - TXGBE_MNG_MBOX_CTL_SWRDY, TXGBE_MNG_MBOX_CTL_SWRDY); - else { - status = TXGBE_ERR_MNG_ACCESS_FAILED; - goto rel_out; - } - for (i = 0; i < timeout; i++) { - if (txgbe_check_mng_access(hw)) { - hicr = rd32(hw, TXGBE_MNG_MBOX_CTL); - if ((hicr & TXGBE_MNG_MBOX_CTL_FWRDY)) + /* amlite: generate interrupt to notify FW */ + wr32m(hw, TXGBE_AML_MNG_MBOX_CTL_SW2FW, + TXGBE_AML_MNG_MBOX_NOTIFY, 0); + wr32m(hw, TXGBE_AML_MNG_MBOX_CTL_SW2FW, + TXGBE_AML_MNG_MBOX_NOTIFY, TXGBE_AML_MNG_MBOX_NOTIFY); + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* polling reply from FW */ + timeout = 50; + do { + timeout--; + usleep_range(1000, 2000); + + /* read hdr */ + for (bi = 0; bi < dword_len; bi++) { + buffer[bi] = rd32a(hw, TXGBE_AML_MNG_MBOX_FW2SW, bi); + TXGBE_LE32_TO_CPUS(&buffer[bi]); + } + + /* check hdr */ + recv_hdr = (struct txgbe_hic_hdr *)buffer; + + if ((recv_hdr->cmd == send_cmd) && + (recv_hdr->cksum_or_index.index == adapter->swfw_index)) { break; + } + } while (timeout); + + if (!timeout) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "Polling from FW messages timeout, cmd is 0x%x, index is %d\n", + send_cmd, adapter->swfw_index); + status = TXGBE_ERR_TIMEOUT; + goto rel_out; } - msec_delay(1); - } - buf[0] = rd32(hw, TXGBE_MNG_MBOX); + /* expect no reply from FW then return */ + /* release lock if return */ + if (!return_data) + goto rel_out; - if ((buf[0] & 0xff0000) >> 16 == 0x80) { - DEBUGOUT("It's unknown cmd.\n"); - status = TXGBE_ERR_MNG_ACCESS_FAILED; - goto rel_out; - } - /* Check command completion */ - if (timeout != 0 && i == timeout) { - ERROR_REPORT1(TXGBE_ERROR_CAUTION, - "Command has failed with no status valid.\n"); + /* If there is any thing in data position pull it in */ + buf_len = recv_hdr->buf_len; + if (buf_len == 0) { + goto rel_out; + } + if (length < buf_len + hdr_size) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "Buffer not large enough for reply message.\n"); + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } - ERROR_REPORT1(TXGBE_ERROR_CAUTION, "write value:\n"); - for (i = 0; i < dword_len; i++) { - ERROR_REPORT1(TXGBE_ERROR_CAUTION, "%x ", buffer[i]); + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + for (; bi <= dword_len; bi++) { + buffer[bi] = rd32a(hw, TXGBE_AML_MNG_MBOX_FW2SW, bi); + TXGBE_LE32_TO_CPUS(&buffer[bi]); } - ERROR_REPORT1(TXGBE_ERROR_CAUTION, "read value:\n"); + } else if (hw->mac.type == txgbe_mac_sp) { + /* legacy sw-fw mbox */ + /* The device driver writes the relevant command block + * into the ram area. + */ for (i = 0; i < dword_len; i++) { - ERROR_REPORT1(TXGBE_ERROR_CAUTION, "%x ", buf[i]); + if (txgbe_check_mng_access(hw)) { + wr32a(hw, TXGBE_MNG_MBOX, + i, TXGBE_CPU_TO_LE32(buffer[i])); + /* write flush */ + buf[i] = rd32a(hw, TXGBE_MNG_MBOX, i); + } + else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } } - printk("===%x= %x=\n", buffer[0] & 0xff, (~buf[0] >> 24)); - if( (buffer[0] & 0xff) != (~buf[0] >> 24)) { - status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + /* Setting this bit tells the ARC that a new command is pending. */ + if (txgbe_check_mng_access(hw)) + wr32m(hw, TXGBE_MNG_MBOX_CTL, + TXGBE_MNG_MBOX_CTL_SWRDY, TXGBE_MNG_MBOX_CTL_SWRDY); + else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; goto rel_out; } - } - if (!return_data) - goto rel_out; + for (i = 0; i < timeout; i++) { + if (txgbe_check_mng_access(hw)) { + hicr = rd32(hw, TXGBE_MNG_MBOX_CTL); + if ((hicr & TXGBE_MNG_MBOX_CTL_FWRDY)) + break; + } + msec_delay(1); + } - /* Calculate length in DWORDs */ - dword_len = hdr_size >> 2; + buf[0] = rd32(hw, TXGBE_MNG_MBOX); - /* first pull in the header so we know the buffer length */ - for (bi = 0; bi < dword_len; bi++) { - if (txgbe_check_mng_access(hw)) { - buffer[bi] = rd32a(hw, TXGBE_MNG_MBOX, - bi); - TXGBE_LE32_TO_CPUS(&buffer[bi]); - } else { + if ((buf[0] & 0xff0000) >> 16 == 0x80) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "It's unknown cmd.\n"); status = TXGBE_ERR_MNG_ACCESS_FAILED; goto rel_out; } - } + /* Check command completion */ + if (timeout != 0 && i == timeout) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "Command has failed with no status valid.\n"); + + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "write value:\n"); + for (i = 0; i < dword_len; i++) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "%x ", buffer[i]); + } + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "read value:\n"); + for (i = 0; i < dword_len; i++) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "%x ", buf[i]); + } + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "===%x= %x=\n", buffer[0] & 0xff, (~buf[0] >> 24)); + if( (buffer[0] & 0xff) != (~buf[0] >> 24)) { + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + } - /* If there is any thing in data position pull it in */ - buf_len = ((struct txgbe_hic_hdr *)buffer)->buf_len; - if (buf_len == 0) - goto rel_out; + if (!return_data) + goto rel_out; - if (length < buf_len + hdr_size) { - DEBUGOUT("Buffer not large enough for reply message.\n"); - status = TXGBE_ERR_HOST_INTERFACE_COMMAND; - goto rel_out; - } + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* first pull in the header so we know the buffer length */ + for (bi = 0; bi < dword_len; bi++) { + if (txgbe_check_mng_access(hw)) { + buffer[bi] = rd32a(hw, TXGBE_MNG_MBOX, + bi); + TXGBE_LE32_TO_CPUS(&buffer[bi]); + } else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } - /* Calculate length in DWORDs, add 3 for odd lengths */ - dword_len = (buf_len + 3) >> 2; + /* If there is any thing in data position pull it in */ + buf_len = ((struct txgbe_hic_hdr *)buffer)->buf_len; + if (buf_len == 0) + goto rel_out; - /* Pull in the rest of the buffer (bi is where we left off) */ - for (; bi <= dword_len; bi++) { - if (txgbe_check_mng_access(hw)) { - buffer[bi] = rd32a(hw, TXGBE_MNG_MBOX, - bi); - TXGBE_LE32_TO_CPUS(&buffer[bi]); - } else { - status = TXGBE_ERR_MNG_ACCESS_FAILED; + if (length < buf_len + hdr_size) { + DEBUGOUT("Buffer not large enough for reply message.\n"); + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; goto rel_out; } + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + + /* Pull in the rest of the buffer (bi is where we left off) */ + for (; bi <= dword_len; bi++) { + if (txgbe_check_mng_access(hw)) { + buffer[bi] = rd32a(hw, TXGBE_MNG_MBOX, + bi); + TXGBE_LE32_TO_CPUS(&buffer[bi]); + } else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } } rel_out: - TCALL(hw, mac.ops.release_swfw_sync, TXGBE_MNG_SWFW_SYNC_SW_MB); + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + /* index++, index replace txgbe_hic_hdr.checksum */ + adapter->swfw_index = send_hdr->cksum_or_index.index == TXGBE_HIC_HDR_INDEX_MAX ? + 0 : send_hdr->cksum_or_index.index + 1; + + clear_bit(__TXGBE_SWFW_BUSY, &adapter->state); + } else + TCALL(hw, mac.ops.release_swfw_sync, TXGBE_MNG_SWFW_SYNC_SW_MB); + return status; } @@ -2748,12 +2930,15 @@ s32 txgbe_set_fw_drv_ver(struct txgbe_hw *hw, u8 maj, u8 min, fw_cmd.ver_min = min; fw_cmd.ver_build = build; fw_cmd.ver_sub = sub; - fw_cmd.hdr.checksum = 0; - fw_cmd.hdr.checksum = txgbe_calculate_checksum((u8 *)&fw_cmd, - (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); fw_cmd.pad = 0; fw_cmd.pad2 = 0; + if (hw->mac.type == txgbe_mac_sp) { + fw_cmd.hdr.cksum_or_index.checksum = 0; + fw_cmd.hdr.cksum_or_index.checksum = txgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + } + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { ret_val = txgbe_host_interface_command(hw, (u32 *)&fw_cmd, sizeof(fw_cmd), @@ -2794,9 +2979,11 @@ s32 txgbe_reset_hostif(struct txgbe_hw *hw) reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; reset_cmd.lan_id = hw->bus.lan_id; reset_cmd.reset_type = (u16)hw->reset_type; - reset_cmd.hdr.checksum = 0; - reset_cmd.hdr.checksum = txgbe_calculate_checksum((u8 *)&reset_cmd, - (FW_CEM_HDR_LEN + reset_cmd.hdr.buf_len)); + if (hw->mac.type == txgbe_mac_sp) { + reset_cmd.hdr.cksum_or_index.checksum = 0; + reset_cmd.hdr.cksum_or_index.checksum = txgbe_calculate_checksum((u8 *)&reset_cmd, + (FW_CEM_HDR_LEN + reset_cmd.hdr.buf_len)); + } for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { status = txgbe_host_interface_command(hw, (u32 *)&reset_cmd, @@ -2820,45 +3007,7 @@ s32 txgbe_reset_hostif(struct txgbe_hw *hw) return status; } - -s32 txgbe_setup_mac_link_hostif(struct txgbe_hw *hw, u32 speed) -{ - struct txgbe_hic_phy_cfg cmd; - int i; - s32 status = 0; - - cmd.hdr.cmd = FW_SETUP_MAC_LINK_CMD; - cmd.hdr.buf_len = FW_SETUP_MAC_LINK_LEN; - cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; - cmd.lan_id = hw->bus.lan_id; - cmd.phy_mode = 0; - cmd.phy_speed = (u16)speed; - cmd.hdr.checksum = 0; - cmd.hdr.checksum = txgbe_calculate_checksum((u8 *)&cmd, - (FW_CEM_HDR_LEN + cmd.hdr.buf_len)); - - for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { - status = txgbe_host_interface_command(hw, (u32 *)&cmd, - sizeof(cmd), - TXGBE_HI_COMMAND_TIMEOUT, - true); - if (status != 0) - continue; - - if (cmd.hdr.cmd_or_resp.ret_status == - FW_CEM_RESP_STATUS_SUCCESS) - status = 0; - else - status = TXGBE_ERR_HOST_INTERFACE_COMMAND; - - break; - } - - return status; - -} - -u16 txgbe_crc16_ccitt(const u8 *buf, int size) +static u16 txgbe_crc16_ccitt(const u8 *buf, int size) { u16 crc = 0; int i; @@ -2887,9 +3036,13 @@ s32 txgbe_upgrade_flash_hostif(struct txgbe_hw *hw, u32 region, start_cmd.hdr.buf_len = FW_FLASH_UPGRADE_START_LEN; start_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; start_cmd.module_id = (u8)region; - start_cmd.hdr.checksum = 0; - start_cmd.hdr.checksum = txgbe_calculate_checksum((u8 *)&start_cmd, - (FW_CEM_HDR_LEN + start_cmd.hdr.buf_len)); + + if (hw->mac.type == txgbe_mac_sp) { + start_cmd.hdr.cksum_or_index.checksum = 0; + start_cmd.hdr.cksum_or_index.checksum = txgbe_calculate_checksum((u8 *)&start_cmd, + (FW_CEM_HDR_LEN + start_cmd.hdr.buf_len)); + } + start_cmd.pad2 = 0; start_cmd.pad3 = 0; @@ -2951,8 +3104,9 @@ s32 txgbe_upgrade_flash_hostif(struct txgbe_hw *hw, u32 region, return status; } - verify_cmd.hdr.checksum = txgbe_calculate_checksum((u8 *)&verify_cmd, - (FW_CEM_HDR_LEN + verify_cmd.hdr.buf_len)); + if (hw->mac.type == txgbe_mac_sp) + verify_cmd.hdr.cksum_or_index.checksum = txgbe_calculate_checksum((u8 *)&verify_cmd, + (FW_CEM_HDR_LEN + verify_cmd.hdr.buf_len)); status = txgbe_host_interface_command(hw, (u32 *)&verify_cmd, sizeof(verify_cmd), @@ -2985,7 +3139,7 @@ static int txgbe_fmgr_cmd_op(struct txgbe_hw *hw, u32 cmd, u32 cmd_addr) return -ETIMEDOUT; time_out = time_out + 1; - udelay(5); + udelay(50); } return 0; @@ -3039,66 +3193,7 @@ int txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr, u32 *data) return ret; } - -int txgbe_flash_write_cab(struct txgbe_hw *hw,u32 addr, u32 value,u16 lan_id) -{ - int status; - struct txgbe_hic_read_cab buffer; - - buffer.hdr.req.cmd = 0xE2; - buffer.hdr.req.buf_lenh = 0x6; - buffer.hdr.req.buf_lenl = 0x0; - buffer.hdr.req.checksum = 0xFF; - - /* convert offset from words to bytes */ - buffer.dbuf.d16[0] = cpu_to_le16(lan_id); - /* one word */ - buffer.dbuf.d32[0] = htonl(addr); - buffer.dbuf.d32[1] = htonl(value); - - status = txgbe_host_interface_command(hw, (u32 *)&buffer, - sizeof(buffer), 5000, true); - printk("0x1e100 :%08x\n",rd32(hw,0x1e100)); - printk("0x1e104 :%08x\n",rd32(hw,0x1e104)); - printk("0x1e108 :%08x\n",rd32(hw,0x1e108)); - printk("0x1e10c :%08x\n",rd32(hw,0x1e10c)); - - return status; -} - -int txgbe_flash_read_cab(struct txgbe_hw *hw, u32 addr ,u16 lan_id ) -{ - int status; - struct txgbe_hic_read_cab buffer; - - buffer.hdr.req.cmd = 0xE1; - buffer.hdr.req.buf_lenh = 0xaa; - buffer.hdr.req.buf_lenl = 0; - buffer.hdr.req.checksum = 0xFF; - - /* convert offset from words to bytes */ - buffer.dbuf.d16[0] = cpu_to_le16(lan_id); - /* one word */ - buffer.dbuf.d32[0] = htonl(addr); - - status = txgbe_host_interface_command(hw, (u32 *)&buffer, - sizeof(buffer), 5000, true); - - if (status) - return status; - if (txgbe_check_mng_access(hw)) { - printk("0x1e100 :%08x\n",rd32(hw,0x1e100)); - printk("0x1e104 :%08x\n",rd32(hw,0x1e104)); - printk("0x1e108 :%08x\n",rd32(hw,0x1e108)); - printk("0x1e10c :%08x\n",rd32(hw,0x1e10c)); - } else { - status = -147; - return status; - } - - return rd32(hw,0x1e108); -} -int txgbe_flash_write_unlock(struct txgbe_hw *hw) +static int txgbe_flash_write_unlock(struct txgbe_hw *hw) { int status; struct txgbe_hic_read_shadow_ram buffer; @@ -3106,28 +3201,9 @@ int txgbe_flash_write_unlock(struct txgbe_hw *hw) buffer.hdr.req.cmd = 0x40; buffer.hdr.req.buf_lenh = 0; buffer.hdr.req.buf_lenl = 0; - buffer.hdr.req.checksum = 0xFF; - - /* convert offset from words to bytes */ - buffer.address = 0; - /* one word */ - buffer.length = 0; - - status = txgbe_host_interface_command(hw, (u32 *)&buffer, - sizeof(buffer), 5000,false); - - return status; -} - -int txgbe_flash_write_lock(struct txgbe_hw *hw) -{ - int status; - struct txgbe_hic_read_shadow_ram buffer; - buffer.hdr.req.cmd = 0x39; - buffer.hdr.req.buf_lenh = 0; - buffer.hdr.req.buf_lenl = 0; - buffer.hdr.req.checksum = 0xFF; + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.req.cksum_or_index.checksum = 0xFF; /* convert offset from words to bytes */ buffer.address = 0; @@ -3135,7 +3211,7 @@ int txgbe_flash_write_lock(struct txgbe_hw *hw) buffer.length = 0; status = txgbe_host_interface_command(hw, (u32 *)&buffer, - sizeof(buffer), 5000, false); + sizeof(buffer), 5000,false); return status; } @@ -3145,39 +3221,70 @@ int txgbe_upgrade_flash(struct txgbe_hw *hw, u32 region, { u32 mac_addr0_dword0_t, mac_addr0_dword1_t, mac_addr1_dword0_t, mac_addr1_dword1_t; u32 serial_num_dword0_t, serial_num_dword1_t, serial_num_dword2_t; + struct txgbe_adapter *adapter = hw->back; u8 status = 0, skip = 0, flash_vendor = 0; u32 sector_num = 0, read_data = 0, i = 0; + u32 sn[24]; + char sn_str[40]; + u8 sn_is_str = true; + u8 *vpd_tend = NULL; + u32 curadr = 0; + u32 vpdadr = 0; + u8 id_str_len, pn_str_len, sn_str_len, rv_str_len; + u32 mac_addr0_dword0_addr, mac_addr0_dword1_addr; + u32 mac_addr1_dword0_addr, mac_addr1_dword1_addr; + u16 subsystem_device_id; + u16 device_id; + u16 vpd_ro_len; + u32 chksum = 0; + u32 upgrade_check = 0x0; + int err = 0; + + if (hw->mac.type == txgbe_mac_sp) { + upgrade_check = PRB_CTL; + subsystem_device_id = data[0xfffdc] << 8 | data[0xfffdd]; + device_id = data[0xfffde] << 8 | data[0xfffdf]; + } else { + upgrade_check = PRB_SCRATCH; + if (data[0x3000] == 0x25 && data[0x3001] == 0x20) { + subsystem_device_id = data[0x302c] << 8 | data[0x302d]; + device_id = data[0x302e] << 8 | data[0x302f]; + } else { + subsystem_device_id = data[0xfffdc] << 8 | data[0xfffdd]; + device_id = data[0xfffde] << 8 | data[0xfffdf]; + } + } - read_data = rd32(hw, 0x10200); + read_data = rd32(hw, upgrade_check); if (read_data & 0x80000000) { - printk("The flash has been successfully upgraded once, please reboot to make it work.\n"); + e_info(drv, "The flash has been successfully upgraded once, please reboot to make it work.\n"); return -EOPNOTSUPP; } /*check sub_id*/; - printk("Checking sub_id .......\n"); - printk("The card's sub_id : %04x\n", hw->subsystem_device_id); - printk("The image's sub_id : %04x\n", data[0xfffdc] << 8 | data[0xfffdd]); - if ((hw->subsystem_device_id & 0xfff) == - ((data[0xfffdc] << 8 | data[0xfffdd]) & 0xfff)){ - printk("It is a right image\n"); - } else if (hw->subsystem_device_id == 0xffff){ - printk("update anyway\n"); + e_info(drv, "Checking sub_id .......\n"); + e_info(drv, "The card's sub_id : %04x\n", hw->subsystem_device_id); + e_info(drv, "The image's sub_id : %04x\n", subsystem_device_id); + + if ((hw->subsystem_device_id & 0xfff) == (subsystem_device_id & 0xfff)) { + e_info(drv, "It is a right image\n"); + } else if (hw->subsystem_device_id == 0xffff) { + e_info(drv, "update anyway\n"); } else { - printk("====The Gigabit image is not match the Gigabit card====\n"); - printk("====Please check your image====\n"); + e_err(drv, "====The Gigabit image is not match the Gigabit card====\n"); + e_err(drv, "====Please check your image====\n"); return -EOPNOTSUPP; } - + /*check dev_id*/ - printk("Checking dev_id .......\n"); - printk("The image's dev_id : %04x\n", data[0xfffde] << 8 | data[0xfffdf]); - printk("The card's dev_id : %04x\n", hw->device_id); - if (!((hw->device_id & 0xfff0) == ((data[0xfffde] << 8 | data[0xfffdf]) & 0xfff0)) && + e_info(drv, "Checking dev_id .......\n"); + e_info(drv, "The image's dev_id : %04x\n", device_id); + e_info(drv, "The card's dev_id : %04x\n", hw->device_id); + if (!((hw->device_id & 0xfff0) == (device_id & 0xfff0)) && !(hw->device_id == 0xffff)) { - printk("====The Gigabit image is not match the Gigabit card====\n"); - printk("====Please check your image====\n"); + e_err(drv, "====The Gigabit image is not match the Gigabit card====\n"); + e_err(drv, "====Please check your image====\n"); return -EOPNOTSUPP; } @@ -3187,29 +3294,123 @@ int txgbe_upgrade_flash(struct txgbe_hw *hw, u32 region, msleep(1000); - txgbe_flash_read_dword(hw, MAC_ADDR0_WORD0_OFFSET_1G, &mac_addr0_dword0_t); - txgbe_flash_read_dword(hw, MAC_ADDR0_WORD1_OFFSET_1G, &mac_addr0_dword1_t); + switch (hw->mac.type) { + case txgbe_mac_sp: + mac_addr0_dword0_addr = MAC_ADDR0_WORD0_OFFSET_1G; + mac_addr0_dword1_addr = MAC_ADDR0_WORD1_OFFSET_1G; + mac_addr1_dword0_addr = MAC_ADDR1_WORD0_OFFSET_1G; + mac_addr1_dword1_addr = MAC_ADDR1_WORD1_OFFSET_1G; + break; + case txgbe_mac_aml: + case txgbe_mac_aml40: + mac_addr0_dword0_addr = AMLITE_MAC_ADDR0_WORD0_OFFSET; + mac_addr0_dword1_addr = AMLITE_MAC_ADDR0_WORD1_OFFSET; + mac_addr1_dword0_addr = AMLITE_MAC_ADDR1_WORD0_OFFSET; + mac_addr1_dword1_addr = AMLITE_MAC_ADDR1_WORD1_OFFSET; + break; + default: + e_err(drv, "====Error mac type====\n"); + return -EOPNOTSUPP; + } + + txgbe_flash_read_dword(hw, mac_addr0_dword0_addr, &mac_addr0_dword0_t); + txgbe_flash_read_dword(hw, mac_addr0_dword1_addr, &mac_addr0_dword1_t); mac_addr0_dword1_t = mac_addr0_dword1_t & U16_MAX; - txgbe_flash_read_dword(hw, MAC_ADDR1_WORD0_OFFSET_1G, &mac_addr1_dword0_t); - txgbe_flash_read_dword(hw, MAC_ADDR1_WORD1_OFFSET_1G, &mac_addr1_dword1_t); + txgbe_flash_read_dword(hw, mac_addr1_dword0_addr, &mac_addr1_dword0_t); + txgbe_flash_read_dword(hw, mac_addr1_dword1_addr, &mac_addr1_dword1_t); mac_addr1_dword1_t = mac_addr1_dword1_t & U16_MAX; + for (i = 0; i < 24; i++) { + txgbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4 * i, &sn[i]); + } + if (sn[23] == U32_MAX) + sn_is_str = false; + txgbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G, &serial_num_dword0_t); txgbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4, &serial_num_dword1_t); txgbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 8, &serial_num_dword2_t); - printk("Old: MAC Address0 is: 0x%04x%08x\n", mac_addr0_dword1_t, mac_addr0_dword0_t); - printk(" MAC Address1 is: 0x%04x%08x\n", mac_addr1_dword1_t, mac_addr1_dword0_t); + e_info(drv, "Old: MAC Address0 is: 0x%04x%08x\n", mac_addr0_dword1_t, mac_addr0_dword0_t); + e_info(drv, " MAC Address1 is: 0x%04x%08x\n", mac_addr1_dword1_t, mac_addr1_dword0_t); - status = fmgr_usr_cmd_op(hw, 0x6); /* write enable*/ status = fmgr_usr_cmd_op(hw, 0x98); /* global protection un-lock*/ txgbe_flash_write_unlock(hw); msleep(1000); - + + //rebuild vpd + vpd_tend = kcalloc(256, sizeof(u8), GFP_KERNEL); + if (!vpd_tend) { + e_info(drv, "failed to allocate memory for vpd\n"); + return -ENOMEM; + } + memset(vpd_tend, 0xff, 256 * sizeof(u8)); + + curadr = TXGBE_VPD_OFFSET + 1; + id_str_len = data[curadr] | data[curadr + 1] << 8; + curadr += (7 + id_str_len); + pn_str_len = data[curadr]; + curadr += 1 + pn_str_len; + + for (i = 0; i < curadr - TXGBE_VPD_OFFSET; i++) { + vpd_tend[i] = data[TXGBE_VPD_OFFSET + i]; + } + + memset(sn_str, 0x0, sizeof(sn_str)); + if (sn_is_str) { + for (i = 0; i < 24; i++) { + sn_str[i] = sn[23-i]; + } + sn_str_len = strlen(sn_str); + } else { + sn_str_len = 0x12; + sprintf(sn_str ,"%02x%08x%08x",(serial_num_dword2_t & 0xff), serial_num_dword1_t, serial_num_dword0_t); + } + + vpdadr = curadr - TXGBE_VPD_OFFSET; + + if (data[curadr] == 'S' && data[curadr + 1] == 'N') { + if (data[curadr + 2]) { + for (i = sn_str_len; i < data[curadr + 2]; i++) + sn_str[i] = 0x20; + sn_str_len = data[curadr + 2]; + } + curadr += 3 + data[curadr + 2]; + rv_str_len = data[2 + curadr]; + } else { + rv_str_len = data[2 + curadr]; + } + + vpd_tend[vpdadr] = 'S'; + vpd_tend[vpdadr + 1] = 'N'; + vpd_tend[vpdadr + 2] = sn_str_len; + + for (i = 0; i < sn_str_len; i++) + vpd_tend[vpdadr + 3 + i] = sn_str[i]; + + vpdadr = vpdadr+ 3 + sn_str_len; + + for (i = 0; i < 3; i++) + vpd_tend[vpdadr + i] = data [curadr + i]; + + vpdadr += 3; + for (i = 0; i < rv_str_len; i++) + vpd_tend[vpdadr + i] = 0x0; + + vpdadr += rv_str_len; + vpd_ro_len = pn_str_len + sn_str_len + rv_str_len + 9; + vpd_tend[4 + id_str_len] = vpd_ro_len & 0xff; + vpd_tend[5 + id_str_len] = (vpd_ro_len >> 8) & 0xff; + + for (i = 0; i < vpdadr; i++) + chksum += vpd_tend[i]; + chksum = ~(chksum & 0xff) + 1; + vpd_tend[vpdadr - rv_str_len] = chksum; + vpd_tend[vpdadr] = 0x78; + /*Note: for Spanish FLASH, first 8 sectors (4KB) in sector0 (64KB) need to use a special erase command (4K sector erase)*/ if (flash_vendor == 1) { - wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c720); + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c720); for (i = 0; i < 8; i++) { txgbe_flash_erase_sector(hw, i * 128); msleep(20); // 20 ms @@ -3221,13 +3422,14 @@ int txgbe_upgrade_flash(struct txgbe_hw *hw, u32 region, /* Winbond Flash, erase chip command is okay, but erase sector doestn't work*/ if (flash_vendor == 2) { status = txgbe_flash_erase_chip(hw); - printk("Erase chip command, return status = %0d\n", status); + e_err(drv, "Erase chip command, return status = %0d\n", status); msleep(1000); } else { wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c720); for (i = 0; i < sector_num; i++) { status = txgbe_flash_erase_sector(hw, i * SPI_SECTOR_SIZE); - printk("Erase sector[%2d] command, return status = %0d\n", i, status); + if (status) + e_err(drv, "Erase sector[%2d] command, return status = %0d\n", i, status); msleep(50); } wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c7d8); @@ -3237,34 +3439,72 @@ int txgbe_upgrade_flash(struct txgbe_hw *hw, u32 region, for (i = 0; i < size / 4; i++) { read_data = data[4 * i + 3] << 24 | data[4 * i + 2] << 16 | data[4 * i + 1] << 8 | data[4 * i]; read_data = __le32_to_cpu(read_data); - skip = ((i * 4 == MAC_ADDR0_WORD0_OFFSET_1G) || (i * 4 == MAC_ADDR0_WORD1_OFFSET_1G) || - (i * 4 == MAC_ADDR1_WORD0_OFFSET_1G) || (i * 4 == MAC_ADDR1_WORD1_OFFSET_1G) || - (i * 4 >= PRODUCT_SERIAL_NUM_OFFSET_1G && i * 4 <= PRODUCT_SERIAL_NUM_OFFSET_1G + 8)); + skip = ((i * 4 == mac_addr0_dword0_addr) || (i * 4 == mac_addr0_dword1_addr) || + (i * 4 == mac_addr1_dword0_addr) || (i * 4 == mac_addr1_dword1_addr) || + (i * 4 >= PRODUCT_SERIAL_NUM_OFFSET_1G && i * 4 <= PRODUCT_SERIAL_NUM_OFFSET_1G + 92) || + (i * 4 >= TXGBE_VPD_OFFSET && i * 4 < TXGBE_VPD_END) || + (i * 4 == 0x15c)); if (read_data != U32_MAX && !skip) { status = txgbe_flash_write_dword(hw, i * 4, read_data); if (status) { - printk("ERROR: Program 0x%08x @addr: 0x%08x is failed !!\n", read_data, i * 4); + e_err(drv, "ERROR: Program 0x%08x @addr: 0x%08x is failed !!\n", read_data, i * 4); txgbe_flash_read_dword(hw, i * 4, &read_data); - printk(" Read data from Flash is: 0x%08x\n", read_data); - return 1; + e_err(drv, " Read data from Flash is: 0x%08x\n", read_data); + err = -EBUSY; + goto err_exit; } } - if (i % 1024 == 0) { - printk("\b\b\b\b%3d%%", (int)(i * 4 * 100 / size)); - } } - txgbe_flash_write_dword(hw, MAC_ADDR0_WORD0_OFFSET_1G, mac_addr0_dword0_t); - txgbe_flash_write_dword(hw, MAC_ADDR0_WORD1_OFFSET_1G, (mac_addr0_dword1_t | 0x80000000));//lan0 - txgbe_flash_write_dword(hw, MAC_ADDR1_WORD0_OFFSET_1G, mac_addr1_dword0_t); - txgbe_flash_write_dword(hw, MAC_ADDR1_WORD1_OFFSET_1G, (mac_addr1_dword1_t | 0x80000000));//lan1 - txgbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G, serial_num_dword0_t); - txgbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4, serial_num_dword1_t); - txgbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 8, serial_num_dword2_t); - - wr32(hw, 0x10200, rd32(hw, 0x10200) | 0x80000000); + for (i = 0; i < 256 / 4; i++) { + read_data = vpd_tend[4 * i + 3] << 24 | vpd_tend[4 * i + 2] << 16 | vpd_tend[4 * i + 1] << 8 | vpd_tend[4 * i]; + read_data = __le32_to_cpu(read_data); + if (read_data != U32_MAX) { + status = txgbe_flash_write_dword(hw, TXGBE_VPD_OFFSET + i * 4, read_data); + if (status) { + e_err(drv, "ERROR: Program 0x%08x @addr: 0x%08x is failed !!\n", read_data, i * 4); + txgbe_flash_read_dword(hw, i * 4, &read_data); + e_err(drv, " Read data from Flash is: 0x%08x\n", read_data); + err = -EBUSY; + goto err_exit; + } + } + } - return 0; + chksum = 0; + for (i = 0; i< 0x1000; i += 2) { + if (i >= TXGBE_VPD_OFFSET && i < TXGBE_VPD_END) { + chksum += (vpd_tend[i - TXGBE_VPD_OFFSET + 1] << 8 | vpd_tend[i - TXGBE_VPD_OFFSET]); + } else if (i == 0x15e) { + continue; + } else { + chksum += (data[i + 1] << 8 | data[i]); + } + } + chksum = 0xbaba - chksum; + chksum &= 0xffff; + status = txgbe_flash_write_dword(hw, 0x15e, 0xffff0000 | chksum); + + txgbe_flash_write_dword(hw, mac_addr0_dword0_addr, mac_addr0_dword0_t); + txgbe_flash_write_dword(hw, mac_addr0_dword1_addr, (mac_addr0_dword1_t | 0x80000000));//lan0 + txgbe_flash_write_dword(hw, mac_addr1_dword0_addr, mac_addr1_dword0_t); + txgbe_flash_write_dword(hw, mac_addr1_dword1_addr, (mac_addr1_dword1_t | 0x80000000));//lan1 + if (sn_is_str) { + for (i = 0; i < 24; i++) { + txgbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4 * i, sn[i]); + } + } else { + txgbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G, serial_num_dword0_t); + txgbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4, serial_num_dword1_t); + txgbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 8, serial_num_dword2_t); + } + + wr32(hw, upgrade_check, rd32(hw, upgrade_check) | 0x80000000); + +err_exit: + if (vpd_tend) + kfree(vpd_tend); + return err; } @@ -3360,44 +3600,70 @@ s32 txgbe_get_thermal_sensor_data(struct txgbe_hw *hw) s64 tsv; int i = 0; struct txgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + u32 data_code; + int temp_data, temp_fraction; /* Only support thermal sensors attached to physical port 0 */ if (hw->bus.lan_id) return TXGBE_NOT_IMPLEMENTED; - tsv = (s64)(rd32(hw, TXGBE_TS_ST) & - TXGBE_TS_ST_DATA_OUT_MASK); - - tsv = tsv < 1200 ? tsv : 1200; - tsv = -(48380 << 8) / 1000 - + tsv * (31020 << 8) / 100000 - - tsv * tsv * (18201 << 8) / 100000000 - + tsv * tsv * tsv * (81542 << 8) / 1000000000000 - - tsv * tsv * tsv * tsv * (16743 << 8) / 1000000000000000; - tsv >>= 8; - - data->sensor.temp = (s16)tsv; - - for (i = 0; i < 100 ; i++){ - tsv = (s64)rd32(hw, TXGBE_TS_ST); - if( tsv >> 16 == 0x1 ){ - tsv = tsv & TXGBE_TS_ST_DATA_OUT_MASK; - tsv = tsv < 1200 ? tsv : 1200; - tsv = -(48380 << 8) / 1000 - + tsv * (31020 << 8) / 100000 - - tsv * tsv * (18201 << 8) / 100000000 - + tsv * tsv * tsv * (81542 << 8) / 1000000000000 - - tsv * tsv * tsv * tsv * (16743 << 8) / 1000000000000000; - tsv >>= 8; - - data->sensor.temp = (s16)tsv; + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + wr32(hw, TXGBE_AML_TS_ENA, 0x0001); + + while(1) { + data_code = rd32(hw, TXGBE_AML_TS_STS); + if ((data_code & TXGBE_AML_TS_STS_VLD) != 0) break; - }else{ - msleep(1); - continue; + msleep(1); + if (i++ > PHYINIT_TIMEOUT) { + printk("ERROR: Wait 0x1033c Timeout!!!\n"); + return -1; } + } + + data_code = data_code & 0xFFF; + temp_data = 419400 + 2205 * (data_code * 1000 / 4094 - 500); + + //Change double Temperature to int + tsv = temp_data/10000; + temp_fraction = temp_data - (tsv * 10000); + if (temp_fraction >= 5000) { + tsv += 1; + } + data->sensor.temp = (s16)tsv; + } else { + tsv = (s64)(rd32(hw, TXGBE_TS_ST) & + TXGBE_TS_ST_DATA_OUT_MASK); + + tsv = tsv < 1200 ? tsv : 1200; + tsv = -(48380 << 8) / 1000 + + tsv * (31020 << 8) / 100000 + - tsv * tsv * (18201 << 8) / 100000000 + + tsv * tsv * tsv * (81542 << 8) / 1000000000000 + - tsv * tsv * tsv * tsv * (16743 << 8) / 1000000000000000; + tsv >>= 8; + + data->sensor.temp = (s16)tsv; + + for (i = 0; i < 100 ; i++){ + tsv = (s64)rd32(hw, TXGBE_TS_ST); + if( tsv >> 16 == 0x1 ){ + tsv = tsv & TXGBE_TS_ST_DATA_OUT_MASK; + tsv = tsv < 1200 ? tsv : 1200; + tsv = -(48380 << 8) / 1000 + + tsv * (31020 << 8) / 100000 + - tsv * tsv * (18201 << 8) / 100000000 + + tsv * tsv * tsv * (81542 << 8) / 1000000000000 + - tsv * tsv * tsv * tsv * (16743 << 8) / 1000000000000000; + tsv >>= 8; + data->sensor.temp = (s16)tsv; + break; + }else{ + msleep(1); + continue; + } + } } - return 0; } @@ -3420,17 +3686,30 @@ s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw) if (hw->bus.lan_id) return TXGBE_NOT_IMPLEMENTED; - wr32(hw, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD); - wr32(hw, TXGBE_TS_INT_EN, - TXGBE_TS_INT_EN_ALARM_INT_EN | TXGBE_TS_INT_EN_DALARM_INT_EN); - wr32(hw, TXGBE_TS_EN, TXGBE_TS_EN_ENA); - - data->sensor.alarm_thresh = 100; - wr32(hw, TXGBE_TS_ALARM_THRE, 677); data->sensor.dalarm_thresh = 90; - wr32(hw, TXGBE_TS_DALARM_THRE, 614); + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + wr32(hw, TXGBE_AML_TS_ENA, 0x0); + wr32(hw, TXGBE_AML_INTR_RAW_LO, TXGBE_AML_INTR_CL_LO); + wr32(hw, TXGBE_AML_INTR_RAW_HI, TXGBE_AML_INTR_CL_HI); + + wr32(hw, TXGBE_AML_INTR_HIGH_EN, TXGBE_AML_INTR_EN_HI); + wr32(hw, TXGBE_AML_INTR_LOW_EN, TXGBE_AML_INTR_EN_LO); + + wr32m(hw, TXGBE_AML_TS_CTL1, TXGBE_AML_EVAL_MODE_MASK, 0x10); + wr32m(hw, TXGBE_AML_TS_CTL1, TXGBE_AML_ALARM_THRE_MASK, 0x186a0000); //100 degree centigrade + wr32m(hw, TXGBE_AML_TS_CTL1, TXGBE_AML_DALARM_THRE_MASK, 0x16f60); //90 degree centigrade + wr32(hw, TXGBE_AML_TS_ENA, 0x1); + } else { + wr32(hw, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD); + wr32(hw, TXGBE_TS_INT_EN, + TXGBE_TS_INT_EN_ALARM_INT_EN | TXGBE_TS_INT_EN_DALARM_INT_EN); + wr32(hw, TXGBE_TS_EN, TXGBE_TS_EN_ENA); + + wr32(hw, TXGBE_TS_ALARM_THRE, 677); + wr32(hw, TXGBE_TS_DALARM_THRE, 614); + } return status; } @@ -3576,10 +3855,55 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, /* Try each speed one by one, highest priority first. We do this in * software because 10Gb fiber doesn't support speed autonegotiation. - */ + */ + if (speed & TXGBE_LINK_SPEED_25GB_FULL) { + speedcnt++; + highest_link_speed = TXGBE_LINK_SPEED_25GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = TCALL(hw, mac.ops.check_link, + &link_speed, &link_up, false); + if (status != 0) + return status; + + if ((link_speed == TXGBE_LINK_SPEED_25GB_FULL) && link_up) + goto out; + + /* Allow module to change analog characteristics (1G->10G) */ + msec_delay(40); + + status = TCALL(hw, mac.ops.setup_mac_link, + TXGBE_LINK_SPEED_25GB_FULL, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /* Flap the Tx laser if it has not already been done */ + TCALL(hw, mac.ops.flap_tx_laser); + + /* Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted. sapphire uses the same timing for 10g SFI. + */ + for (i = 0; i < 5; i++) { + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = TCALL(hw, mac.ops.check_link, + &link_speed, &link_up, false); + if (status != 0) + return status; + + if (link_up) + goto out; + } + } + if (speed & TXGBE_LINK_SPEED_10GB_FULL) { speedcnt++; - highest_link_speed = TXGBE_LINK_SPEED_10GB_FULL; + if (highest_link_speed == TXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = TXGBE_LINK_SPEED_10GB_FULL; /* If we already have link at this speed, just jump out */ status = TCALL(hw, mac.ops.check_link, @@ -3633,13 +3957,13 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, if (status != 0) return status; - if (link_speed == TXGBE_LINK_SPEED_1GB_FULL){ + if (link_speed == TXGBE_LINK_SPEED_1GB_FULL) { curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); curr_autoneg = !!(curr_autoneg & (0x1 << 12)); } if ((link_speed == TXGBE_LINK_SPEED_1GB_FULL) && link_up - &&(adapter->an37 == curr_autoneg)) + && (adapter->autoneg == curr_autoneg)) goto out; /* Allow module to change analog characteristics (10G->1G) */ @@ -3680,6 +4004,12 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, /* Set autoneg_advertised value based on input link speed */ hw->phy.autoneg_advertised = 0; + if (speed & TXGBE_LINK_SPEED_40GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_40GB_FULL; + + if (speed & TXGBE_LINK_SPEED_25GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_25GB_FULL; + if (speed & TXGBE_LINK_SPEED_10GB_FULL) hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_10GB_FULL; @@ -4037,7 +4367,7 @@ txgbe_dptype txgbe_ptype_lookup[256] = { }; -void txgbe_init_mac_link_ops(struct txgbe_hw *hw) +void txgbe_init_mac_link_ops_sp(struct txgbe_hw *hw) { struct txgbe_mac_info *mac = &hw->mac; @@ -4054,11 +4384,11 @@ void txgbe_init_mac_link_ops(struct txgbe_hw *hw) if (hw->phy.multispeed_fiber) { /* Set up dual speed SFP+ support */ mac->ops.setup_link = txgbe_setup_mac_link_multispeed_fiber; - mac->ops.setup_mac_link = txgbe_setup_mac_link; + mac->ops.setup_mac_link = txgbe_setup_mac_link_sp; mac->ops.set_rate_select_speed = txgbe_set_hard_rate_select_speed; } else { - mac->ops.setup_link = txgbe_setup_mac_link; + mac->ops.setup_link = txgbe_setup_mac_link_sp; mac->ops.set_rate_select_speed = txgbe_set_hard_rate_select_speed; } @@ -4073,19 +4403,18 @@ void txgbe_init_mac_link_ops(struct txgbe_hw *hw) * not known. Perform the SFP init if necessary. * **/ -s32 txgbe_init_phy_ops(struct txgbe_hw *hw) +s32 txgbe_init_phy_ops_sp(struct txgbe_hw *hw) { struct txgbe_mac_info *mac = &hw->mac; s32 ret_val = 0; - txgbe_init_i2c(hw); /* Identify the PHY or SFP module */ ret_val = TCALL(hw, phy.ops.identify); if (ret_val == TXGBE_ERR_SFP_NOT_SUPPORTED) goto init_phy_ops_out; /* Setup function pointers based on detected SFP module and speeds */ - txgbe_init_mac_link_ops(hw); + txgbe_init_mac_link_ops_sp(hw); if (hw->phy.sfp_type != txgbe_sfp_type_unknown) hw->phy.ops.reset = NULL; @@ -4103,16 +4432,111 @@ s32 txgbe_init_phy_ops(struct txgbe_hw *hw) return ret_val; } +static s32 txgbe_setup_sfp_modules_sp(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + + DEBUGFUNC("txgbe_setup_sfp_modules_sp"); + + if (hw->phy.sfp_type != txgbe_sfp_type_unknown) { + txgbe_init_mac_link_ops_sp(hw); + } + + return ret_val; +} + /** - * txgbe_init_ops - Inits func ptrs and MAC type + * txgbe_init_ops_sp - Inits func ptrs and MAC type * @hw: pointer to hardware structure * * Initialize the function pointers and assign the MAC type for sapphire. * Does not touch the hardware. **/ -s32 txgbe_init_ops(struct txgbe_hw *hw) +static s32 txgbe_init_ops_sp(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + struct txgbe_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + ret_val = txgbe_init_ops_generic(hw); + + /* PHY */ + phy->ops.init = txgbe_init_phy_ops_sp; + + /* MAC */ + mac->ops.get_media_type = txgbe_get_media_type_sp; + mac->ops.setup_sfp = txgbe_setup_sfp_modules_sp; + + /* LINK */ + mac->ops.get_link_capabilities = txgbe_get_link_capabilities_sp; + mac->ops.setup_link = txgbe_setup_mac_link_sp; + mac->ops.check_link = txgbe_check_mac_link_sp; + + return ret_val; +} + +static void txgbe_set_mac_type(struct txgbe_hw *hw) +{ + switch (hw->device_id) { + case TXGBE_DEV_ID_SP1000: + case TXGBE_DEV_ID_WX1820: + hw->mac.type = txgbe_mac_sp; + break; + case TXGBE_DEV_ID_AML: + case TXGBE_DEV_ID_AML5025: + case TXGBE_DEV_ID_AML5125: + hw->mac.type = txgbe_mac_aml; + break; + case TXGBE_DEV_ID_AML5040: + case TXGBE_DEV_ID_AML5140: + hw->mac.type = txgbe_mac_aml40; + break; + default: + hw->mac.type = txgbe_mac_unknown; + break; + } +} + +/** + * txgbe_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + * + * This will assign function pointers and assign the MAC type and PHY code. + * Does not touch the hardware. This function must be called prior to any + * other function in the shared code. The txgbe_hw structure should be + * memset to 0 prior to calling this function. The following fields in + * hw structure should be filled in prior to calling this function: + * hw_addr, back, device_id, vendor_id, subsystem_device_id, + * subsystem_vendor_id, and revision_id + **/ +int txgbe_init_shared_code(struct txgbe_hw *hw) +{ + s32 status; + + txgbe_set_mac_type(hw); + + switch (hw->mac.type) { + case txgbe_mac_sp: + status = txgbe_init_ops_sp(hw); + break; + case txgbe_mac_aml: + status = txgbe_init_ops_aml(hw); + break; + case txgbe_mac_aml40: + status = txgbe_init_ops_aml40(hw); + break; + default: + status = TXGBE_ERR_DEVICE_NOT_SUPPORTED; + break; + } + + return status; +} + + +s32 txgbe_init_ops_generic(struct txgbe_hw *hw) { struct txgbe_mac_info *mac = &hw->mac; struct txgbe_phy_info *phy = &hw->phy; @@ -4132,6 +4556,7 @@ s32 txgbe_init_ops(struct txgbe_hw *hw) phy->ops.read_i2c_byte = txgbe_read_i2c_byte; phy->ops.write_i2c_byte = txgbe_write_i2c_byte; phy->ops.read_i2c_sff8472 = txgbe_read_i2c_sff8472; + phy->ops.read_i2c_sff8636 = txgbe_read_i2c_sff8636; phy->ops.read_i2c_eeprom = txgbe_read_i2c_eeprom; phy->ops.read_i2c_sfp_phy = txgbe_read_i2c_sfp_phy; phy->ops.write_i2c_eeprom = txgbe_write_i2c_eeprom; @@ -4139,7 +4564,6 @@ s32 txgbe_init_ops(struct txgbe_hw *hw) phy->sfp_type = txgbe_sfp_type_unknown; phy->ops.check_overtemp = txgbe_tn_check_overtemp; phy->ops.identify = txgbe_identify_phy; - phy->ops.init = txgbe_init_phy_ops; /* MAC */ mac->ops.init_hw = txgbe_init_hw; @@ -4151,7 +4575,7 @@ s32 txgbe_init_ops(struct txgbe_hw *hw) mac->ops.acquire_swfw_sync = txgbe_acquire_swfw_sync; mac->ops.release_swfw_sync = txgbe_release_swfw_sync; mac->ops.reset_hw = txgbe_reset_hw; - mac->ops.get_media_type = txgbe_get_media_type; + mac->ops.get_media_type = NULL; mac->ops.disable_sec_rx_path = txgbe_disable_sec_rx_path; mac->ops.enable_sec_rx_path = txgbe_enable_sec_rx_path; mac->ops.disable_sec_tx_path = txgbe_disable_sec_tx_path; @@ -4195,8 +4619,8 @@ s32 txgbe_init_ops(struct txgbe_hw *hw) mac->ops.setup_fc = txgbe_setup_fc; /* Link */ - mac->ops.get_link_capabilities = txgbe_get_link_capabilities; - mac->ops.check_link = txgbe_check_mac_link; + mac->ops.get_link_capabilities = NULL; + mac->ops.check_link = NULL; mac->ops.setup_rxpba = txgbe_set_rxpba; mac->mcft_size = TXGBE_SP_MC_TBL_SIZE; mac->vft_size = TXGBE_SP_VFT_TBL_SIZE; @@ -4247,7 +4671,7 @@ s32 txgbe_init_ops(struct txgbe_hw *hw) * * Determines the link capabilities by reading the AUTOC register. **/ -s32 txgbe_get_link_capabilities(struct txgbe_hw *hw, +s32 txgbe_get_link_capabilities_sp(struct txgbe_hw *hw, u32 *speed, bool *autoneg) { @@ -4255,27 +4679,31 @@ s32 txgbe_get_link_capabilities(struct txgbe_hw *hw, u32 sr_pcs_ctl, sr_pma_mmd_ctl1, sr_an_mmd_ctl; u32 sr_an_mmd_adv_reg2; - /* Check if 1G SFP module. */ - if (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || + if (hw->phy.multispeed_fiber) { + *speed = TXGBE_LINK_SPEED_10GB_FULL | + TXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + } else if (hw->dac_sfp) { + *autoneg = true; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KR; + *speed = TXGBE_LINK_SPEED_10GB_FULL; + } else if (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1 || hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core0 || hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core1 || hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core0 || hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core1) { + /* Check if 1G SFP module. */ *speed = TXGBE_LINK_SPEED_1GB_FULL; *autoneg = true; - } else if (hw->phy.multispeed_fiber) { - *speed = TXGBE_LINK_SPEED_10GB_FULL | - TXGBE_LINK_SPEED_1GB_FULL; - *autoneg = true; - } + } /* SFP */ - else if (txgbe_get_media_type(hw) == txgbe_media_type_fiber) { + else if (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber) { *speed = TXGBE_LINK_SPEED_10GB_FULL; *autoneg = false; } /* XAUI */ - else if ((txgbe_get_media_type(hw) == txgbe_media_type_copper) && + else if ((TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_copper) && ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI || (hw->subsystem_device_id & 0xF0) == TXGBE_ID_SFI_XAUI)) { *speed = TXGBE_LINK_SPEED_10GB_FULL; @@ -4374,12 +4802,12 @@ s32 txgbe_get_link_capabilities(struct txgbe_hw *hw, } /** - * txgbe_get_media_type - Get media type + * txgbe_get_media_type_sp - Get media type * @hw: pointer to hardware structure * * Returns the media type (fiber, copper, backplane) **/ -enum txgbe_media_type txgbe_get_media_type(struct txgbe_hw *hw) +enum txgbe_media_type txgbe_get_media_type_sp(struct txgbe_hw *hw) { enum txgbe_media_type media_type; u8 device_type = hw->subsystem_device_id & 0xF0; @@ -4449,17 +4877,33 @@ void txgbe_disable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) { u32 esdp_reg = rd32(hw, TXGBE_GPIO_DR); - if (!(TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber)) + if (!((TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber) || + (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber_qsfp))) return; /* Blocked by MNG FW so bail */ txgbe_check_reset_blocked(hw); - if (txgbe_close_notify(hw)) + if (txgbe_close_notify(hw)) { /* over write led when ifconfig down */ - TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_UP | TXGBE_LED_LINK_10G | TXGBE_LED_LINK_1G | TXGBE_LED_LINK_ACTIVE); - + if (hw->mac.type == txgbe_mac_aml40) { + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_UP | TXGBE_AMLITE_LED_LINK_40G | + TXGBE_AMLITE_LED_LINK_ACTIVE); + } else if (hw->mac.type == txgbe_mac_aml) + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_UP | TXGBE_AMLITE_LED_LINK_25G | + TXGBE_AMLITE_LED_LINK_10G | TXGBE_AMLITE_LED_LINK_ACTIVE); + else + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_UP | TXGBE_LED_LINK_10G | TXGBE_LED_LINK_1G | TXGBE_LED_LINK_ACTIVE); + } /* Disable Tx laser; allow 100us to go dark per spec */ - esdp_reg |= TXGBE_GPIO_DR_1 | TXGBE_GPIO_DR_0; + if (hw->mac.type == txgbe_mac_aml40) { + wr32m(hw, TXGBE_GPIO_DDR, TXGBE_GPIO_DR_1, TXGBE_GPIO_DR_1); + esdp_reg &= ~TXGBE_GPIO_DR_1; + } else if (hw->mac.type == txgbe_mac_aml) { + esdp_reg |= TXGBE_GPIO_DR_1; + } else { + esdp_reg |= TXGBE_GPIO_DR_1 | TXGBE_GPIO_DR_0; + } + wr32(hw, TXGBE_GPIO_DR, esdp_reg); TXGBE_WRITE_FLUSH(hw); usec_delay(100); @@ -4475,15 +4919,22 @@ void txgbe_disable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) **/ void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) { - if (!(TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber)) + if (!((TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber) || + (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber_qsfp))) return; + if (txgbe_open_notify(hw)) /* recover led configure when ifconfig up */ wr32(hw, TXGBE_CFG_LED_CTL, 0); /* Enable Tx laser; allow 100ms to light up */ - wr32m(hw, TXGBE_GPIO_DR, - TXGBE_GPIO_DR_0 | TXGBE_GPIO_DR_1, 0); + if (hw->mac.type == txgbe_mac_aml40) { + wr32m(hw, TXGBE_GPIO_DDR, TXGBE_GPIO_DR_1, TXGBE_GPIO_DR_1); + wr32m(hw, TXGBE_GPIO_DR, TXGBE_GPIO_DR_1, TXGBE_GPIO_DR_1); + } else { + wr32m(hw, TXGBE_GPIO_DR, + TXGBE_GPIO_DR_0 | TXGBE_GPIO_DR_1, 0); + } TXGBE_WRITE_FLUSH(hw); msec_delay(100); } @@ -4502,7 +4953,8 @@ void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) **/ void txgbe_flap_tx_laser_multispeed_fiber(struct txgbe_hw *hw) { - if (!(TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber)) + if (!((TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber) || + (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber_qsfp))) return; /* Blocked by MNG FW so bail */ @@ -4528,6 +4980,9 @@ void txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw, u32 esdp_reg = rd32(hw, TXGBE_GPIO_DR); switch (speed) { + case TXGBE_LINK_SPEED_25GB_FULL: + /*amlite TODO*/ + break; case TXGBE_LINK_SPEED_10GB_FULL: esdp_reg |= TXGBE_GPIO_DR_5 | TXGBE_GPIO_DR_4; break; @@ -4547,28 +5002,6 @@ void txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw, TXGBE_WRITE_FLUSH(hw); } - -s32 txgbe_enable_rx_adapter(struct txgbe_hw *hw) -{ - u32 value; - - value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL); - value |= 1 << 12; - txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, value); - - value = 0; - while (!(value >> 11)) { - value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_AD_ACK); - msleep(1); - } - - value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL); - value &= ~(1 << 12); - txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, value); - - return 0; -} - s32 txgbe_set_sgmii_an37_ability(struct txgbe_hw *hw) { u32 value; @@ -4579,7 +5012,7 @@ s32 txgbe_set_sgmii_an37_ability(struct txgbe_hw *hw) /* for sgmii + external phy, set to 0x0105 (phy sgmii mode) */ /* for sgmii direct link, set to 0x010c (mac sgmii mode) */ if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII || - txgbe_get_media_type(hw) == txgbe_media_type_fiber) { + TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber) { txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_CTL, 0x010c); } else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_SGMII || (hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) { @@ -4590,7 +5023,7 @@ s32 txgbe_set_sgmii_an37_ability(struct txgbe_hw *hw) value = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); value = (value & ~0x1200) | (0x1 << 9); - if(adapter->an37) + if(adapter->autoneg) value |= (0x1 << 12); txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_CTL, value); @@ -4598,6 +5031,27 @@ s32 txgbe_set_sgmii_an37_ability(struct txgbe_hw *hw) return 0; } +int txgbe_enable_rx_adapter(struct txgbe_hw *hw) +{ + int ret = 0; + u32 value; + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL); + value |= BIT(12); + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, value); + + value = 0; + ret = read_poll_timeout(txgbe_rd32_epcs, value, (value & BIT(11)), 1000, + 200000, false, hw, TXGBE_PHY_RX_AD_ACK); + if (ret) + return -ETIMEDOUT; + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL); + value &= ~BIT(12); + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, value); + + return 0; +} s32 txgbe_set_link_to_kr(struct txgbe_hw *hw, bool autoneg) { @@ -4618,18 +5072,19 @@ s32 txgbe_set_link_to_kr(struct txgbe_hw *hw, bool autoneg) status = TXGBE_ERR_XPCS_POWER_UP_FAILED; goto out; } - e_dev_info("It is set to kr.\n"); txgbe_wr32_epcs(hw, 0x78002, 0x0); txgbe_wr32_epcs(hw, 0x78001, 0x7); if (AN73_TRAINNING_MODE == 1) - txgbe_wr32_epcs(hw, 0x78003, 0x1); + txgbe_wr32_epcs(hw, TXGBE_VR_AN_KR_MODE_CL, 0x1); /* 2. Disable xpcs AN-73 */ - if (adapter->backplane_an == 1){ + if (adapter->backplane_an == 1) { txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000); + /* bit8:CA_TX_EQ bit7:an_preset bit6:TX_EQ_OVR_RIDE */ value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); value &= ~0x40; + value |= BIT(8); txgbe_wr32_epcs(hw, 0x18037, value); } else { txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0); @@ -4638,13 +5093,9 @@ s32 txgbe_set_link_to_kr(struct txgbe_hw *hw, bool autoneg) if (KR_FEC == 1) txgbe_wr32_epcs(hw, 0x70012, 0xc000 | txgbe_rd32_epcs(hw, 0x70012)); - if (KR_AN73_PRESET == 1) - txgbe_wr32_epcs(hw, 0x18037, 0x80 | txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1)); - if (KR_POLLING == 1) { - txgbe_wr32_epcs(hw, 0x18006, 0xffff); - txgbe_wr32_epcs(hw, 0x18008, 0xA697); - } + //txgbe_wr32_epcs(hw, 0x18006, 0xffff); + //txgbe_wr32_epcs(hw, 0x18008, 0xA697); /* 3. Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL3 Register */ /* Bit[10:0](MPLLA_BANDWIDTH) = 11'd123 (default: 11'd16) */ @@ -4685,7 +5136,7 @@ s32 txgbe_set_link_to_kr(struct txgbe_hw *hw, bool autoneg) } if ((KR_SET == 1) || (adapter->ffe_set == TXGBE_BP_M_KR)) { - e_dev_info("Set KR TX_EQ MAIN:%d PRE:%d POST:%d\n", + e_info(hw, "Set KR TX_EQ MAIN:%d PRE:%d POST:%d\n", adapter->ffe_main,adapter->ffe_pre,adapter->ffe_post); value = (0x1804 & ~0x3F3F); value |= adapter->ffe_main << 8 | adapter->ffe_pre; @@ -4930,7 +5381,8 @@ s32 txgbe_set_link_to_kx(struct txgbe_hw *hw, struct txgbe_adapter *adapter = hw->back; /* check link status, if already set, skip setting it again */ - if (hw->link_status == TXGBE_LINK_STATUS_KX) { + if (hw->link_status == TXGBE_LINK_STATUS_KX && + (hw->subsystem_device_id & 0xF0) != TXGBE_ID_MAC_SGMII) { goto out; } e_dev_info("It is set to kx. speed =0x%x\n", speed); @@ -5146,7 +5598,7 @@ s32 txgbe_set_link_to_kx(struct txgbe_hw *hw, return status; } -s32 txgbe_set_link_to_sfi(struct txgbe_hw *hw, +static s32 txgbe_set_link_to_sfi(struct txgbe_hw *hw, u32 speed) { u32 i; @@ -5157,7 +5609,7 @@ s32 txgbe_set_link_to_sfi(struct txgbe_hw *hw, /* Set the module link speed */ TCALL(hw, mac.ops.set_rate_select_speed, speed); - + /* 1. Wait xpcs power-up good */ for (i = 0; i < TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME; i++) { if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS) & @@ -5444,7 +5896,7 @@ s32 txgbe_set_link_to_sfi(struct txgbe_hw *hw, * * Set the link speed in the AUTOC register and restarts link. **/ -s32 txgbe_setup_mac_link(struct txgbe_hw *hw, +s32 txgbe_setup_mac_link_sp(struct txgbe_hw *hw, u32 speed, bool autoneg_wait_to_complete) { @@ -5471,24 +5923,27 @@ s32 txgbe_setup_mac_link(struct txgbe_hw *hw, if ( ! (((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_KR_KX_KX4) || ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_MAC_XAUI) || - ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_MAC_SGMII))){ + ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_MAC_SGMII) || + hw->dac_sfp)) { status = TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false); - if (link_speed == TXGBE_LINK_SPEED_1GB_FULL) { - curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); - curr_autoneg = !!(curr_autoneg & (0x1 << 12)); - } + if (link_speed == TXGBE_LINK_SPEED_1GB_FULL) { + curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); + curr_autoneg = !!(curr_autoneg & (0x1 << 12)); + } if (status != 0) goto out; + if ((link_speed == speed) && link_up && !(speed == TXGBE_LINK_SPEED_1GB_FULL && - (adapter->an37 != curr_autoneg))) - goto out; + (adapter->autoneg != curr_autoneg))) + goto out; } - if ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_KR_KX_KX4) { + if ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_KR_KX_KX4 || + hw->dac_sfp) { txgbe_set_link_to_kr(hw, autoneg); #if 0 if (!autoneg) { @@ -5514,7 +5969,7 @@ s32 txgbe_setup_mac_link(struct txgbe_hw *hw, ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_MAC_XAUI) || (hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_SGMII || ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_MAC_SGMII) || - (txgbe_get_media_type(hw) == txgbe_media_type_copper && + (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_copper && (hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_SFI_XAUI)) { if (speed == TXGBE_LINK_SPEED_10GB_FULL) { txgbe_set_link_to_kx4(hw, 0); @@ -5523,7 +5978,7 @@ s32 txgbe_setup_mac_link(struct txgbe_hw *hw, txgbe_set_sgmii_an37_ability(hw); hw->phy.autoneg_advertised |= speed; } - } else if (txgbe_get_media_type(hw) == txgbe_media_type_fiber) { + } else if (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber) { if (!((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP && (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1))) { @@ -5539,6 +5994,7 @@ s32 txgbe_setup_mac_link(struct txgbe_hw *hw, return status; } + /** * txgbe_setup_copper_link - Set the PHY autoneg advertised field * @hw: pointer to hardware structure @@ -5560,21 +6016,79 @@ STATIC s32 txgbe_setup_copper_link(struct txgbe_hw *hw, if (link_speed != TXGBE_LINK_SPEED_UNKNOWN) /* Set up MAC */ - status = txgbe_setup_mac_link(hw, link_speed, autoneg_wait_to_complete); + status = txgbe_setup_mac_link_sp(hw, link_speed, autoneg_wait_to_complete); else { status = 0; } return status; } -int txgbe_reset_misc(struct txgbe_hw *hw) +int txgbe_reconfig_mac(struct txgbe_hw *hw) { - int i; + u32 mac_wdg_timeout; + u32 mac_flow_ctrl; + + mac_wdg_timeout = rd32(hw, TXGBE_MAC_WDG_TIMEOUT); + mac_flow_ctrl = rd32(hw, TXGBE_MAC_RX_FLOW_CTRL); + + if (hw->bus.lan_id == 0) + wr32(hw, TXGBE_MIS_RST, TXGBE_MIS_RST_LAN0_MAC_RST); + else if (hw->bus.lan_id == 1) + wr32(hw, TXGBE_MIS_RST, TXGBE_MIS_RST_LAN1_MAC_RST); + + /* wait for mac rst complete */ + usec_delay(1500); + wr32m(hw, TXGBE_MAC_MISC_CTL, TXGBE_MAC_MISC_LINK_STS_MOD, + TXGBE_LINK_BOTH_PCS_MAC); + + /* receive packets that size > 2048 */ + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_JE, TXGBE_MAC_RX_CFG_JE); + + /* clear counters on read */ + wr32m(hw, TXGBE_MMC_CONTROL, + TXGBE_MMC_CONTROL_RSTONRD, TXGBE_MMC_CONTROL_RSTONRD); + + wr32m(hw, TXGBE_MAC_RX_FLOW_CTRL, + TXGBE_MAC_RX_FLOW_CTRL_RFE, TXGBE_MAC_RX_FLOW_CTRL_RFE); + + wr32(hw, TXGBE_MAC_PKT_FLT, + TXGBE_MAC_PKT_FLT_PR); + + wr32(hw, TXGBE_MAC_WDG_TIMEOUT, mac_wdg_timeout); + wr32(hw, TXGBE_MAC_RX_FLOW_CTRL, mac_flow_ctrl); + + return 0; +} + +static int txgbe_reset_misc(struct txgbe_hw *hw) +{ + struct txgbe_adapter *adapter = hw->back; u32 value; + u32 err; + int i; - value = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2); - if ((value & 0x3) != TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X) { - hw->link_status = TXGBE_LINK_STATUS_NONE; + if (hw->mac.type == txgbe_mac_aml40) { + if (!(rd32(hw, TXGBE_EPHY_STAT) & TXGBE_EPHY_STAT_PPL_LOCK)) { + err = TCALL(hw, mac.ops.setup_link, TXGBE_LINK_SPEED_40GB_FULL, false); + if (err) { + e_dev_info("txgbe_reset_misc setup phy failed\n"); + return err; + } + } + } else if (hw->mac.type == txgbe_mac_aml) { + if ((rd32(hw, TXGBE_EPHY_STAT) & TXGBE_EPHY_STAT_PPL_LOCK) + != TXGBE_EPHY_STAT_PPL_LOCK) { + err = TCALL(hw, mac.ops.setup_link, TXGBE_LINK_SPEED_AMLITE_AUTONEG, false); + if (err) { + e_dev_info("txgbe_reset_misc setup phy failed\n"); + return err; + } + } + } else { + value = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2); + if ((value & 0x3) != TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X) + hw->link_status = TXGBE_LINK_STATUS_NONE; } /* receive packets that size > 2048 */ @@ -5594,13 +6108,6 @@ int txgbe_reset_misc(struct txgbe_hw *hw) wr32m(hw, TXGBE_MIS_RST_ST, TXGBE_MIS_RST_ST_RST_INIT, 0xA00); - /* errata 4: initialize mng flex tbl and wakeup flex tbl*/ - wr32(hw, TXGBE_PSR_MNG_FLEX_SEL, 0); - for (i = 0; i < 16; i++) { - wr32(hw, TXGBE_PSR_MNG_FLEX_DW_L(i), 0); - wr32(hw, TXGBE_PSR_MNG_FLEX_DW_H(i), 0); - wr32(hw, TXGBE_PSR_MNG_FLEX_MSK(i), 0); - } wr32(hw, TXGBE_PSR_LAN_FLEX_SEL, 0); for (i = 0; i < 16; i++) { wr32(hw, TXGBE_PSR_LAN_FLEX_DW_L(i), 0); @@ -5627,17 +6134,18 @@ int txgbe_reset_misc(struct txgbe_hw *hw) **/ s32 txgbe_reset_hw(struct txgbe_hw *hw) { - s32 status; - u32 reset = 0; - u32 i; u32 sr_pcs_ctl, sr_pma_mmd_ctl1, sr_an_mmd_ctl, sr_an_mmd_adv_reg2; - u32 vr_xs_or_pcs_mmd_digi_ctl1, curr_vr_xs_or_pcs_mmd_digi_ctl1; - u32 curr_sr_pcs_ctl, curr_sr_pma_mmd_ctl1; - u32 curr_sr_an_mmd_ctl, curr_sr_an_mmd_adv_reg2; + u32 curr_sr_an_mmd_ctl = 0, curr_sr_an_mmd_adv_reg2 = 0; + u32 curr_sr_pcs_ctl = 0, curr_sr_pma_mmd_ctl1 = 0; + struct txgbe_adapter *adapter = hw->back; + u32 curr_vr_xs_or_pcs_mmd_digi_ctl1 = 0; + u32 vr_xs_or_pcs_mmd_digi_ctl1; u32 reset_status = 0; u32 rst_delay = 0; - struct txgbe_adapter *adapter = hw->back; + u32 reset = 0; + s32 status; u32 value; + u32 i; /* Call adapter stop to disable tx/rx and clear interrupts */ status = TCALL(hw, mac.ops.stop_adapter); @@ -5650,15 +6158,16 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw) if (status == TXGBE_ERR_SFP_NOT_SUPPORTED) goto reset_hw_out; - /* remember internel phy regs from before we reset */ - curr_sr_pcs_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2); - curr_sr_pma_mmd_ctl1 = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); - curr_sr_an_mmd_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_CTL); - curr_sr_an_mmd_adv_reg2 = txgbe_rd32_epcs(hw, - TXGBE_SR_AN_MMD_ADV_REG2); - curr_vr_xs_or_pcs_mmd_digi_ctl1 = - txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1); - + if (hw->mac.type == txgbe_mac_sp) { + /* remember internel phy regs from before we reset */ + curr_sr_pcs_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2); + curr_sr_pma_mmd_ctl1 = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); + curr_sr_an_mmd_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_CTL); + curr_sr_an_mmd_adv_reg2 = txgbe_rd32_epcs(hw, + TXGBE_SR_AN_MMD_ADV_REG2); + curr_vr_xs_or_pcs_mmd_digi_ctl1 = + txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1); + } /* * Issue global reset to the MAC. Needs to be SW reset if link is up. * If link reset is used when link is up, it might reset the PHY when @@ -5734,94 +6243,108 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw) if (status != 0) goto reset_hw_out; - /* - * Store the original AUTOC/AUTOC2 values if they have not been - * stored off yet. Otherwise restore the stored original - * values since the reset operation sets back to defaults. - */ - sr_pcs_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2); - sr_pma_mmd_ctl1 = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); - sr_an_mmd_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_CTL); - sr_an_mmd_adv_reg2 = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG2); - vr_xs_or_pcs_mmd_digi_ctl1 = - txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1); - - if (hw->mac.orig_link_settings_stored == false) { - hw->mac.orig_sr_pcs_ctl2 = sr_pcs_ctl; - hw->mac.orig_sr_pma_mmd_ctl1 = sr_pma_mmd_ctl1; - hw->mac.orig_sr_an_mmd_ctl = sr_an_mmd_ctl; - hw->mac.orig_sr_an_mmd_adv_reg2 = sr_an_mmd_adv_reg2; - hw->mac.orig_vr_xs_or_pcs_mmd_digi_ctl1 = - vr_xs_or_pcs_mmd_digi_ctl1; - hw->mac.orig_link_settings_stored = true; + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + wr32(hw, TXGBE_LINKUP_FILTER, TXGBE_LINKUP_FILTER_TIME); + wr32m(hw, TXGBE_MAC_MISC_CTL, TXGBE_MAC_MISC_LINK_STS_MOD, + TXGBE_LINK_BOTH_PCS_MAC); + /* amlite: bme */ + wr32(hw, PX_PF_BME, 0x1); + /* amlite: rdm_rsc_ctl_free_ctl set to 1 */ + wr32m(hw, TXGBE_RDM_RSC_CTL, TXGBE_RDM_RSC_CTL_FREE_CTL, + TXGBE_RDM_RSC_CTL_FREE_CTL); + adapter->an_done = false; + adapter->cur_fec_link = TXGBE_PHY_FEC_AUTO; } else { - - /* If MNG FW is running on a multi-speed device that - * doesn't autoneg with out driver support we need to - * leave LMS in the state it was before we MAC reset. - * Likewise if we support WoL we don't want change the - * LMS state. + /* + * Store the original AUTOC/AUTOC2 values if they have not been + * stored off yet. Otherwise restore the stored original + * values since the reset operation sets back to defaults. */ + sr_pcs_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2); + sr_pma_mmd_ctl1 = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); + sr_an_mmd_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_CTL); + sr_an_mmd_adv_reg2 = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG2); + vr_xs_or_pcs_mmd_digi_ctl1 = + txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1); + + if (hw->mac.orig_link_settings_stored == false) { + hw->mac.orig_sr_pcs_ctl2 = sr_pcs_ctl; + hw->mac.orig_sr_pma_mmd_ctl1 = sr_pma_mmd_ctl1; + hw->mac.orig_sr_an_mmd_ctl = sr_an_mmd_ctl; + hw->mac.orig_sr_an_mmd_adv_reg2 = sr_an_mmd_adv_reg2; + hw->mac.orig_vr_xs_or_pcs_mmd_digi_ctl1 = + vr_xs_or_pcs_mmd_digi_ctl1; + hw->mac.orig_link_settings_stored = true; + } else { - hw->mac.orig_sr_pcs_ctl2 = curr_sr_pcs_ctl; - hw->mac.orig_sr_pma_mmd_ctl1 = curr_sr_pma_mmd_ctl1; - hw->mac.orig_sr_an_mmd_ctl = curr_sr_an_mmd_ctl; - hw->mac.orig_sr_an_mmd_adv_reg2 = - curr_sr_an_mmd_adv_reg2; - hw->mac.orig_vr_xs_or_pcs_mmd_digi_ctl1 = - curr_vr_xs_or_pcs_mmd_digi_ctl1; + /* If MNG FW is running on a multi-speed device that + * doesn't autoneg with out driver support we need to + * leave LMS in the state it was before we MAC reset. + * Likewise if we support WoL we don't want change the + * LMS state. + */ - } + hw->mac.orig_sr_pcs_ctl2 = curr_sr_pcs_ctl; + hw->mac.orig_sr_pma_mmd_ctl1 = curr_sr_pma_mmd_ctl1; + hw->mac.orig_sr_an_mmd_ctl = curr_sr_an_mmd_ctl; + hw->mac.orig_sr_an_mmd_adv_reg2 = + curr_sr_an_mmd_adv_reg2; + hw->mac.orig_vr_xs_or_pcs_mmd_digi_ctl1 = + curr_vr_xs_or_pcs_mmd_digi_ctl1; + } + } /*make sure phy power is up*/ msleep(100); + + if (hw->mac.type == txgbe_mac_sp) { /*A temporary solution for set to sfi*/ - if(SFI_SET == 1 || adapter->ffe_set == TXGBE_BP_M_SFI) { - e_dev_info("Set SFI TX_EQ MAIN:%d PRE:%d POST:%d\n", - adapter->ffe_main,adapter->ffe_pre,adapter->ffe_post); - /* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register Bit[13:8](TX_EQ_MAIN) - * = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4 - */ - value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); - value = (value & ~0x3F3F) | (adapter->ffe_main << 8) | adapter->ffe_pre; - txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); - /* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register Bit[6](TX_EQ_OVR_RIDE) - * = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36 - */ - value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); - value = (value & ~0x7F) | adapter->ffe_post | (1 << 6); - txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); - } + if(SFI_SET == 1 || adapter->ffe_set == TXGBE_BP_M_SFI) { + e_dev_info("Set SFI TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main,adapter->ffe_pre,adapter->ffe_post); + /* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register Bit[13:8](TX_EQ_MAIN) + * = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); + value = (value & ~0x3F3F) | (adapter->ffe_main << 8) | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + /* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register Bit[6](TX_EQ_OVR_RIDE) + * = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value = (value & ~0x7F) | adapter->ffe_post | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } - if (KR_SET == 1 || adapter->ffe_set == TXGBE_BP_M_KR) { - e_dev_info("Set KR TX_EQ MAIN:%d PRE:%d POST:%d\n", - adapter->ffe_main,adapter->ffe_pre,adapter->ffe_post); - value = (0x1804 & ~0x3F3F); - value |= adapter->ffe_main << 8 | adapter->ffe_pre; - txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + if (KR_SET == 1 || adapter->ffe_set == TXGBE_BP_M_KR) { + e_dev_info("Set KR TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main,adapter->ffe_pre,adapter->ffe_post); + value = (0x1804 & ~0x3F3F); + value |= adapter->ffe_main << 8 | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); - value = (0x50 & ~0x7F) | (1 << 6)| adapter->ffe_post; - txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); - } + value = (0x50 & ~0x7F) | (1 << 6)| adapter->ffe_post; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } - if(KX_SET == 1 || adapter->ffe_set == TXGBE_BP_M_KX) { - e_dev_info("Set KX TX_EQ MAIN:%d PRE:%d POST:%d\n", - adapter->ffe_main,adapter->ffe_pre,adapter->ffe_post); - /* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register Bit[13:8](TX_EQ_MAIN) - * = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4 - */ - value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); - value = (value & ~0x3F3F) | (adapter->ffe_main << 8) | adapter->ffe_pre; - txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); - /* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register Bit[6](TX_EQ_OVR_RIDE) - * = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36 - */ - value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); - value = (value & ~0x7F) | adapter->ffe_post | (1 << 6); - txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); - + if(KX_SET == 1 || adapter->ffe_set == TXGBE_BP_M_KX) { + e_dev_info("Set KX TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main,adapter->ffe_pre,adapter->ffe_post); + /* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register Bit[13:8](TX_EQ_MAIN) + * = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); + value = (value & ~0x3F3F) | (adapter->ffe_main << 8) | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + /* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register Bit[6](TX_EQ_OVR_RIDE) + * = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value = (value & ~0x7F) | adapter->ffe_post | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + + } } - /* * Store MAC address from RAR0, clear receive address registers, and * clear the multicast table. Also reset num_rar_entries to 128, @@ -5913,8 +6436,9 @@ s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw) wr32(hw, TXGBE_RDB_FDIR_FREE, 0); TXGBE_WRITE_FLUSH(hw); /* - * sapphire adapters flow director init flow cannot be restarted, - * Workaround sapphire silicon errata by performing the following steps + * sapphire/amber-lite adapters flow director init flow cannot be + * restarted, Workaround sapphire/amber-lite + * silicon errata by performing the following steps * before re-writing the FDIRCTRL control register with the same value. * - write 1 to bit 8 of FDIRCMD register & * - write 0 to bit 8 of FDIRCMD register @@ -6587,10 +7111,12 @@ s32 txgbe_start_hw(struct txgbe_hw *hw) /* Setup flow control */ ret_val = TCALL(hw, mac.ops.setup_fc); - /* Clear the rate limiters */ - for (i = 0; i < hw->mac.max_tx_queues; i++) { - wr32(hw, TXGBE_TDM_RP_IDX, i); - wr32(hw, TXGBE_TDM_RP_RATE, 0); + if (hw->mac.type == txgbe_mac_sp) { + /* Clear the rate limiters */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + wr32(hw, TXGBE_TDM_RP_IDX, i); + wr32(hw, TXGBE_TDM_RP_RATE, 0); + } } TXGBE_WRITE_FLUSH(hw); @@ -6639,7 +7165,8 @@ s32 txgbe_identify_phy(struct txgbe_hw *hw) txgbe_get_phy_id(hw); hw->phy.type = txgbe_get_phy_type_from_id(hw); status = 0; - } else if (media_type == txgbe_media_type_fiber) { + } else if (media_type == txgbe_media_type_fiber || + media_type == txgbe_media_type_fiber_qsfp) { status = txgbe_identify_module(hw); } else { hw->phy.type = txgbe_phy_none; @@ -6659,21 +7186,57 @@ s32 txgbe_identify_phy(struct txgbe_hw *hw) return status; } +int txgbe_set_pps(struct txgbe_hw *hw, bool enable, u64 nsec, u64 cycles) +{ + int status; + struct txgbe_hic_set_pps pps_cmd; + int i; + + pps_cmd.hdr.cmd = FW_PPS_SET_CMD; + pps_cmd.hdr.buf_len = FW_PPS_SET_LEN; + pps_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + pps_cmd.lan_id = hw->bus.lan_id; + pps_cmd.enable = enable; + pps_cmd.nsec = nsec; + pps_cmd.cycles = cycles; + pps_cmd.hdr.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + /* send reset request to FW and wait for response */ + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + status = txgbe_host_interface_command(hw, (u32 *)&pps_cmd, + sizeof(pps_cmd), + TXGBE_HI_COMMAND_TIMEOUT, + true); + msleep(1); + if (status != 0) + continue; + + if (pps_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + break; + } + + return status; + +} /** - * txgbe_enable_rx_dma - Enable the Rx DMA unit on sapphire + * txgbe_enable_rx_dma - Enable the Rx DMA unit on sapphire/amber-lite * @hw: pointer to hardware structure * @regval: register value to write to RXCTRL * - * Enables the Rx DMA unit for sapphire + * Enables the Rx DMA unit for sapphire/amber-lite **/ s32 txgbe_enable_rx_dma(struct txgbe_hw *hw, u32 regval) { /* - * Workaround for sapphire silicon errata when enabling the Rx datapath. - * If traffic is incoming before we enable the Rx unit, it could hang - * the Rx DMA unit. Therefore, make sure the security engine is - * completely disabled prior to enabling the Rx unit. + * Workaround for sapphire/amber-lite silicon errata when enabling the + * Rx datapath. If traffic is incoming before we enable the Rx unit, it + * could hang the Rx DMA unit. Therefore, make sure the security engine + * is completely disabled prior to enabling the Rx unit. */ TCALL(hw, mac.ops.disable_sec_rx_path); @@ -6846,7 +7409,7 @@ s32 txgbe_init_eeprom_params(struct txgbe_hw *hw) * * Reads a 16 bit word from the EEPROM using the hostif. **/ -s32 txgbe_read_ee_hostif_data(struct txgbe_hw *hw, u16 offset, +static s32 txgbe_read_ee_hostif_data(struct txgbe_hw *hw, u16 offset, u16 *data) { s32 status; @@ -6855,7 +7418,9 @@ s32 txgbe_read_ee_hostif_data(struct txgbe_hw *hw, u16 offset, buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; buffer.hdr.req.buf_lenh = 0; buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.req.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; /* convert offset from words to bytes */ buffer.address = TXGBE_CPU_TO_BE32(offset * 2); @@ -6868,10 +7433,14 @@ s32 txgbe_read_ee_hostif_data(struct txgbe_hw *hw, u16 offset, if (status) return status; - if (txgbe_check_mng_access(hw)) - *data = (u16)rd32a(hw, TXGBE_MNG_MBOX, - FW_NVM_DATA_OFFSET); - else { + if (txgbe_check_mng_access(hw)) { + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + *data = (u16)rd32a(hw, TXGBE_AML_MNG_MBOX_FW2SW, + FW_NVM_DATA_OFFSET); + else if (hw->mac.type == txgbe_mac_sp) + *data = (u16)rd32a(hw, TXGBE_MNG_MBOX, + FW_NVM_DATA_OFFSET); + } else { status = TXGBE_ERR_MNG_ACCESS_FAILED; return status; } @@ -6921,6 +7490,7 @@ s32 txgbe_read_ee_hostif_buffer(struct txgbe_hw *hw, u32 current_word = 0; u16 words_to_read; s32 status; + u32 reg; u32 i; u32 value = 0; @@ -6940,7 +7510,9 @@ s32 txgbe_read_ee_hostif_buffer(struct txgbe_hw *hw, buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; buffer.hdr.req.buf_lenh = 0; buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.req.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; /* convert offset from words to bytes */ buffer.address = TXGBE_CPU_TO_BE32((offset + current_word) * 2); @@ -6956,11 +7528,14 @@ s32 txgbe_read_ee_hostif_buffer(struct txgbe_hw *hw, goto out; } + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + reg = TXGBE_AML_MNG_MBOX_FW2SW; + else + reg = TXGBE_MNG_MBOX; + for (i = 0; i < words_to_read; i++) { - u32 reg = TXGBE_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + - 2 * i; if (txgbe_check_mng_access(hw)) - value = rd32(hw, reg); + value = rd32(hw, reg + (FW_NVM_DATA_OFFSET << 2) + 2 * i); else { status = TXGBE_ERR_MNG_ACCESS_FAILED; return status; @@ -6991,7 +7566,7 @@ s32 txgbe_read_ee_hostif_buffer(struct txgbe_hw *hw, * * Write a 16 bit word to the EEPROM using the hostif. **/ -s32 txgbe_write_ee_hostif_data(struct txgbe_hw *hw, u16 offset, +static s32 txgbe_write_ee_hostif_data(struct txgbe_hw *hw, u16 offset, u16 data) { s32 status; @@ -7000,7 +7575,9 @@ s32 txgbe_write_ee_hostif_data(struct txgbe_hw *hw, u16 offset, buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; buffer.hdr.req.buf_lenh = 0; buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; +#ifndef TXGBE_SWFW_MBOX_AML + buffer.hdr.req.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; +#endif /* one word */ buffer.length = TXGBE_CPU_TO_BE16(sizeof(u16)); @@ -7023,7 +7600,9 @@ s32 txgbe_close_notify(struct txgbe_hw *hw) buffer.hdr.req.cmd = FW_DW_CLOSE_NOTIFY; buffer.hdr.req.buf_lenh = 0; buffer.hdr.req.buf_lenl = 0; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.req.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; /* one word */ buffer.length = 0; @@ -7058,7 +7637,9 @@ s32 txgbe_open_notify(struct txgbe_hw *hw) buffer.hdr.req.cmd = FW_DW_OPEN_NOTIFY; buffer.hdr.req.buf_lenh = 0; buffer.hdr.req.buf_lenl = 0; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.req.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; /* one word */ buffer.length = 0; @@ -7191,9 +7772,13 @@ s32 txgbe_calc_eeprom_checksum(struct txgbe_hw *hw) local_buffer = buffer; } - for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) + for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) { + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + if ((i > (TXGBE_SHOWROM_I2C_PTR / 2)) && (i < (TXGBE_SHOWROM_I2C_END / 2))) + local_buffer[i] = 0xffff; if (i != hw->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM) checksum += local_buffer[i]; + } checksum = (u16)TXGBE_EEPROM_SUM - checksum; if (eeprom_ptrs) @@ -7304,7 +7889,9 @@ s32 txgbe_update_flash(struct txgbe_hw *hw) buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; buffer.req.buf_lenh = 0; buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; - buffer.req.checksum = FW_DEFAULT_CHECKSUM; + + if (hw->mac.type == txgbe_mac_sp) + buffer.req.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; status = txgbe_host_interface_command(hw, (u32 *)&buffer, sizeof(buffer), @@ -7323,12 +7910,12 @@ s32 txgbe_update_flash(struct txgbe_hw *hw) * * Reads the links register to determine if link is up and the current speed **/ -s32 txgbe_check_mac_link(struct txgbe_hw *hw, u32 *speed, +s32 txgbe_check_mac_link_sp(struct txgbe_hw *hw, u32 *speed, bool *link_up, bool link_up_wait_to_complete) { u32 links_reg = 0; - u32 i; u16 value = 0; + u32 i; if (link_up_wait_to_complete) { for (i = 0; i < TXGBE_LINK_UP_TIME; i++) { @@ -7336,14 +7923,14 @@ s32 txgbe_check_mac_link(struct txgbe_hw *hw, u32 *speed, ((hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI)) { /* read ext phy link status */ txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8008, &value); - if (value & 0x400) { + if (value & 0x400) *link_up = true; - } else { + else *link_up = false; - } } else { *link_up = true; } + if (*link_up) { links_reg = rd32(hw, TXGBE_CFG_PORT_ST); @@ -7361,24 +7948,22 @@ s32 txgbe_check_mac_link(struct txgbe_hw *hw, u32 *speed, ((hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI)) { /* read ext phy link status */ txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8008, &value); - if (value & 0x400) { + if (value & 0x400) *link_up = true; - } else { + else *link_up = false; - } } else { *link_up = true; } if (*link_up) { links_reg = rd32(hw, TXGBE_CFG_PORT_ST); - if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) *link_up = true; - } else { + else *link_up = false; - } } } - + /* sync link status to fw for ocp card */ if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) wr32(hw, TXGBE_TSC_LSEC_PKTNUM0, value); @@ -7389,43 +7974,42 @@ s32 txgbe_check_mac_link(struct txgbe_hw *hw, u32 *speed, (hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core1)) { *link_up = hw->f2c_mod_status; - if (*link_up) { + if (*link_up) /* recover led configure when link up */ wr32(hw, TXGBE_CFG_LED_CTL, 0); - } else { + else /* over write led when link down */ - TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_UP | TXGBE_LED_LINK_10G | - TXGBE_LED_LINK_1G | TXGBE_LED_LINK_ACTIVE); - } + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_UP | TXGBE_LED_LINK_10G | + TXGBE_LED_LINK_1G | TXGBE_LED_LINK_ACTIVE); } if (*link_up) { if (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_copper && ((hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI)) { - if ((value & 0xc000) == 0xc000) { + if ((value & 0xc000) == 0xc000) *speed = TXGBE_LINK_SPEED_10GB_FULL; - } else if ((value & 0xc000) == 0x8000) { + else if ((value & 0xc000) == 0x8000) *speed = TXGBE_LINK_SPEED_1GB_FULL; - } else if ((value & 0xc000) == 0x4000) { + else if ((value & 0xc000) == 0x4000) *speed = TXGBE_LINK_SPEED_100_FULL; - } else if ((value & 0xc000) == 0x0000) { + else if ((value & 0xc000) == 0x0000) *speed = TXGBE_LINK_SPEED_10_FULL; - } } else { if ((links_reg & TXGBE_CFG_PORT_ST_LINK_10G) == - TXGBE_CFG_PORT_ST_LINK_10G) { + TXGBE_CFG_PORT_ST_LINK_10G) *speed = TXGBE_LINK_SPEED_10GB_FULL; - } else if ((links_reg & TXGBE_CFG_PORT_ST_LINK_1G) == - TXGBE_CFG_PORT_ST_LINK_1G){ + else if ((links_reg & TXGBE_CFG_PORT_ST_LINK_1G) == + TXGBE_CFG_PORT_ST_LINK_1G) *speed = TXGBE_LINK_SPEED_1GB_FULL; - } else if ((links_reg & TXGBE_CFG_PORT_ST_LINK_100M) == - TXGBE_CFG_PORT_ST_LINK_100M){ + else if ((links_reg & TXGBE_CFG_PORT_ST_LINK_100M) == + TXGBE_CFG_PORT_ST_LINK_100M) *speed = TXGBE_LINK_SPEED_100_FULL; - } else + else *speed = TXGBE_LINK_SPEED_10_FULL; } - } else + } else { *speed = TXGBE_LINK_SPEED_UNKNOWN; + } return 0; } @@ -7457,7 +8041,10 @@ s32 txgbe_hic_write_lldp(struct txgbe_hw *hw,u32 open) buffer.hdr.cmd = 0xf1 - open; buffer.hdr.buf_len = 0x1; buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; - buffer.hdr.checksum = FW_DEFAULT_CHECKSUM; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + buffer.func = PCI_FUNC(pdev->devfn); status = txgbe_host_interface_command(hw, (u32 *)&buffer, sizeof(buffer), 5000, false); @@ -7465,25 +8052,161 @@ s32 txgbe_hic_write_lldp(struct txgbe_hw *hw,u32 open) } +static int txgbe_hic_get_lldp(struct txgbe_hw *hw) +{ + int status; + struct txgbe_hic_write_lldp buffer; + + buffer.hdr.cmd = 0xf2; + buffer.hdr.buf_len = 0x1; + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + buffer.func = hw->bus.lan_id; + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, true); + if (buffer.hdr.cmd_or_resp.ret_status != FW_CEM_RESP_STATUS_SUCCESS) + return -1; + else + return (int)buffer.func; +} + int txgbe_is_lldp(struct txgbe_hw *hw) { u32 tmp = 0, lldp_flash_data = 0, i = 0; struct txgbe_adapter *adapter = hw->back; s32 status = 0; - for (; i < 0x1000 / sizeof(u32); i++) { - status = txgbe_flash_read_dword(hw, TXGBE_LLDP_REG + i * 4, &tmp); - if(status) - return status; - if (tmp == U32_MAX) - break; - lldp_flash_data = tmp; + status = txgbe_hic_get_lldp(hw); + if (status != -1) { + if (status) + adapter->eth_priv_flags |= TXGBE_ETH_PRIV_FLAG_LLDP; + else + adapter->eth_priv_flags &= ~TXGBE_ETH_PRIV_FLAG_LLDP; + return 0; + } else { + for (; i < 0x1000 / sizeof(u32); i++) { + status = txgbe_flash_read_dword(hw, TXGBE_LLDP_REG + i * 4, &tmp); + if(status) + return status; + if (tmp == U32_MAX) + break; + lldp_flash_data = tmp; + + } + if (lldp_flash_data & BIT(hw->bus.lan_id)) + adapter->eth_priv_flags |= TXGBE_ETH_PRIV_FLAG_LLDP; + else + adapter->eth_priv_flags &= ~TXGBE_ETH_PRIV_FLAG_LLDP; } - if (lldp_flash_data & BIT(hw->bus.lan_id)) - adapter->eth_priv_flags |= TXGBE_ETH_PRIV_FLAG_LLDP; - else - adapter->eth_priv_flags &= ~TXGBE_ETH_PRIV_FLAG_LLDP; + return 0; +} + +void txgbe_hic_write_autoneg_status(struct txgbe_hw *hw, bool autoneg) +{ + struct txgbe_adapter *adapter = hw->back; + struct txgbe_hic_write_autoneg buffer; + + /* only support sp temporarily */ + if (hw->mac.type != txgbe_mac_sp) + return; + + /* only 0x64e20011 and above 0x20011 support */ + if (adapter->etrack_id != 0x64e20011 && + (adapter->etrack_id & 0xfffff) < 0x20012) + return; + + buffer.hdr.cmd = FW_AN_STA_CMD; + buffer.hdr.buf_len = FW_AN_STA_LEN; + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + buffer.lan_id = hw->bus.lan_id; + buffer.autoneg = autoneg; + buffer.hdr.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, false); +} + +void txgbe_set_queue_rate_limit(struct txgbe_hw *hw, int queue, u16 max_tx_rate) +{ + struct txgbe_adapter *adapter = hw->back; + int factor_int; + int factor_fra; + int link_speed; + int bcnrc_val; + + /* + * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported + * and 0x004 otherwise. + */ + + wr32(hw, TXGBE_TDM_MMW, 0x14); + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + if (max_tx_rate) { + u16 frac; + + link_speed = txgbe_link_mbps(adapter); + max_tx_rate = max_tx_rate * 105 / 100; //necessary offset by test + /* Calculate the rate factor values to set */ + factor_int = link_speed / max_tx_rate; + frac = (link_speed % max_tx_rate) * 10000 / max_tx_rate; + factor_fra = txgbe_frac_to_bi(frac, 10000, 14); + if (max_tx_rate > link_speed) { + factor_int = 1; + factor_fra = 0; + } + + wr32(hw, TXGBE_TDM_RL_QUEUE_IDX, queue); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_FACTOR_INT_MASK, factor_int << TXGBE_TDM_FACTOR_INT_SHIFT); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_FACTOR_FRA_MASK, factor_fra << TXGBE_TDM_FACTOR_FRA_SHIFT); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_RL_EN, TXGBE_TDM_RL_EN); + } else { + wr32(hw, TXGBE_TDM_RL_QUEUE_IDX, queue); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_RL_EN, 0); + } + } else { + bcnrc_val = TXGBE_TDM_RP_RATE_MAX(max_tx_rate); + + wr32(hw, TXGBE_TDM_RP_IDX, queue); + wr32(hw, TXGBE_TDM_RP_RATE, bcnrc_val); + if (max_tx_rate) + wr32m(hw, TXGBE_TDM_RP_CTL, + TXGBE_TDM_RP_CTL_RLEN, TXGBE_TDM_RP_CTL_RLEN); + else + wr32m(hw, TXGBE_TDM_RP_CTL, + TXGBE_TDM_RP_CTL_RLEN, 0); + } + +} + +int txgbe_hic_notify_led_active(struct txgbe_hw *hw, int active_flag) +{ + int status; + struct txgbe_led_active_set buffer; + + buffer.hdr.cmd = 0xf8; + buffer.hdr.buf_len = 0x1; + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + buffer.active_flag = active_flag; + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, true); return 0; } + +bool txgbe_is_backplane(struct txgbe_hw *hw) +{ + + return hw->mac.ops.get_media_type(hw) == txgbe_media_type_backplane ? + true : false; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h index 89c4899cb94808ac8e1372b153db2e1e2532b931..3250a90fc55b246cf4be1209b8bc8d4e7b6d078e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -47,12 +47,22 @@ #define SPI_CMD_CFG1_ADDR 0x10118 // Flash command configuration register 1 #define MISC_RST_REG_ADDR 0x1000c // Misc reset register address #define MGR_FLASH_RELOAD_REG_ADDR 0x101a0 // MGR reload flash read +#define PRB_CTL 0x10200 // used to check whether has been upgraded +#define PRB_SCRATCH 0x10230 // used to check whether has been upgraded #define MAC_ADDR0_WORD0_OFFSET_1G 0x006000c // MAC Address for LAN0, stored in external FLASH #define MAC_ADDR0_WORD1_OFFSET_1G 0x0060014 #define MAC_ADDR1_WORD0_OFFSET_1G 0x007000c // MAC Address for LAN1, stored in external FLASH #define MAC_ADDR1_WORD1_OFFSET_1G 0x0070014 + +#define AMLITE_MAC_ADDR0_WORD0_OFFSET 0x00f010c // MAC Address for LAN0, stored in external FLASH +#define AMLITE_MAC_ADDR0_WORD1_OFFSET 0x00f0114 +#define AMLITE_MAC_ADDR1_WORD0_OFFSET 0x00f020c // MAC Address for LAN1, stored in external FLASH +#define AMLITE_MAC_ADDR1_WORD1_OFFSET 0x00f0214 + #define PRODUCT_SERIAL_NUM_OFFSET_1G 0x00f0000 // Product Serial Number, stored in external FLASH last sector +#define TXGBE_VPD_OFFSET 0x500 +#define TXGBE_VPD_END 0x600 struct txgbe_hic_read_cab { union txgbe_hic_hdr2 hdr; @@ -63,6 +73,32 @@ struct txgbe_hic_read_cab { } dbuf; }; +#ifndef read_poll_timeout +#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \ + sleep_before_read, args...) \ +({ \ + u64 __timeout_us = (timeout_us); \ + unsigned long __sleep_us = (sleep_us); \ + ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ + might_sleep_if((__sleep_us) != 0); \ + if (sleep_before_read && __sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + for (;;) { \ + (val) = op(args); \ + if (cond) \ + break; \ + if (__timeout_us && \ + ktime_compare(ktime_get(), __timeout) > 0) { \ + (val) = op(args); \ + break; \ + } \ + if (__sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + cpu_relax(); \ + } \ + (cond) ? 0 : -ETIMEDOUT; \ +}) +#endif /** * Packet Type decoding @@ -125,6 +161,10 @@ struct txgbe_dec_ptype { }; typedef struct txgbe_dec_ptype txgbe_dptype; +u32 rd32_ephy(struct txgbe_hw *hw, u32 addr); +u32 txgbe_rd32_epcs(struct txgbe_hw *hw, u32 addr); +void txgbe_wr32_ephy(struct txgbe_hw *hw, u32 addr, u32 data); +void txgbe_wr32_epcs(struct txgbe_hw *hw, u32 addr, u32 data); void txgbe_dcb_get_rtrup2tc(struct txgbe_hw *hw, u8 *map); u16 txgbe_get_pcie_msix_count(struct txgbe_hw *hw); @@ -248,23 +288,25 @@ void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input, u32 txgbe_atr_compute_sig_hash(union txgbe_atr_hash_dword input, union txgbe_atr_hash_dword common); -s32 txgbe_get_link_capabilities(struct txgbe_hw *hw, +s32 txgbe_get_link_capabilities_sp(struct txgbe_hw *hw, u32 *speed, bool *autoneg); -enum txgbe_media_type txgbe_get_media_type(struct txgbe_hw *hw); +enum txgbe_media_type txgbe_get_media_type_sp(struct txgbe_hw *hw); void txgbe_disable_tx_laser_multispeed_fiber(struct txgbe_hw *hw); void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw); void txgbe_flap_tx_laser_multispeed_fiber(struct txgbe_hw *hw); void txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw, u32 speed); -s32 txgbe_setup_mac_link(struct txgbe_hw *hw, u32 speed, +int txgbe_init_shared_code(struct txgbe_hw *hw); +s32 txgbe_setup_mac_link_sp(struct txgbe_hw *hw, u32 speed, bool autoneg_wait_to_complete); -void txgbe_init_mac_link_ops(struct txgbe_hw *hw); +void txgbe_init_mac_link_ops_sp(struct txgbe_hw *hw); s32 txgbe_reset_hw(struct txgbe_hw *hw); s32 txgbe_identify_phy(struct txgbe_hw *hw); -s32 txgbe_init_phy_ops(struct txgbe_hw *hw); +s32 txgbe_init_phy_ops_sp(struct txgbe_hw *hw); s32 txgbe_enable_rx_dma(struct txgbe_hw *hw, u32 regval); -s32 txgbe_init_ops(struct txgbe_hw *hw); +s32 txgbe_init_ops_generic(struct txgbe_hw *hw); s32 txgbe_setup_eee(struct txgbe_hw *hw, bool enable_eee); +int txgbe_reconfig_mac(struct txgbe_hw *hw); s32 txgbe_init_flash_params(struct txgbe_hw *hw); s32 txgbe_read_flash_buffer(struct txgbe_hw *hw, u32 offset, @@ -311,6 +353,17 @@ s32 txgbe_set_link_to_kx(struct txgbe_hw *hw, int txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr, u32 *data); s32 txgbe_hic_write_lldp(struct txgbe_hw *hw,u32 open); int txgbe_is_lldp(struct txgbe_hw *hw); +s32 txgbe_set_sgmii_an37_ability(struct txgbe_hw *hw); +int txgbe_set_pps(struct txgbe_hw *hw, bool enable, u64 nsec, u64 cycles); +void txgbe_hic_write_autoneg_status(struct txgbe_hw *hw,bool autoneg); +int txgbe_enable_rx_adapter(struct txgbe_hw *hw); + +extern s32 txgbe_init_ops_aml(struct txgbe_hw *hw); +extern s32 txgbe_init_ops_aml40(struct txgbe_hw *hw); + +void txgbe_set_queue_rate_limit(struct txgbe_hw *hw, int queue, u16 max_tx_rate); +int txgbe_hic_notify_led_active(struct txgbe_hw *hw, int active_flag); +bool txgbe_is_backplane(struct txgbe_hw *hw); #endif /* _TXGBE_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_kcompat.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_kcompat.h index 877a03c3cc47cdce782002186760461c4904e5ee..f9907bb60c420e030ff5f4902a6cce4320cffcc1 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_kcompat.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_kcompat.h @@ -125,6 +125,37 @@ #define CL72_KRTR_PRBS31_EN 0 #endif +#ifndef TXGBE_SWFW_MBOX_AML +#define TXGBE_SWFW_MBOX_AML +#endif + +#ifndef TXGBE_DMA_RESET +#define TXGBE_DMA_RESET 1 +#endif + +#ifndef TXGBE_1588_PPS_LEVEL +#define TXGBE_1588_PPS_LEVEL 1 +#endif + +#ifndef TXGBE_1588_PPS_WIDTH +#define TXGBE_1588_PPS_WIDTH 100 +#endif + +#ifndef TXGBE_1588_TOD_ENABLE +#define TXGBE_1588_TOD_ENABLE 1 +#endif + +#ifndef CL72_KRTR_PRBS_MODE_EN +#define CL72_KRTR_PRBS_MODE_EN 0xffff /*open kr prbs check */ +#endif + +#ifndef CL74_KRTR_TRAINNING_TIMEOUT +#define CL74_KRTR_TRAINNING_TIMEOUT 3000 +#endif +#ifndef AN_TRAINNING_MODE +#define AN_TRAINNING_MODE 0 +#endif + /**************************performance************************************/ /**************************sfi************************************/ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c index e93909431305ed2cecbd4a67bc9dcc14d4123615..f70d309baa35380f2cbf23a65957db76eedd6e72 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_lib.c, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_lib.c, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List @@ -799,6 +799,7 @@ static void txgbe_add_ring(struct txgbe_ring *ring, ring->next = head->ring; head->ring = ring; head->count++; + head->next_update = jiffies + 1; } /** @@ -821,7 +822,11 @@ static int txgbe_alloc_q_vector(struct txgbe_adapter *adapter, { struct txgbe_q_vector *q_vector; struct txgbe_ring *ring; +#ifdef HAVE_CPUMASK_LOCAL_SPREAD + int node = dev_to_node(&adapter->pdev->dev); +#else int node = -1; +#endif #ifdef HAVE_IRQ_AFFINITY_HINT int cpu = -1; u8 tcs = netdev_get_num_tc(adapter->netdev); @@ -839,7 +844,11 @@ static int txgbe_alloc_q_vector(struct txgbe_adapter *adapter, u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; if (rss_i > 1 && adapter->atr_sample_rate) { if (cpu_online(v_idx)) { +#ifdef HAVE_CPUMASK_LOCAL_SPREAD + cpu = cpumask_local_spread(v_idx, node); +#else cpu = v_idx; +#endif node = cpu_to_node(cpu); } } @@ -896,8 +905,11 @@ static int txgbe_alloc_q_vector(struct txgbe_adapter *adapter, q_vector->tx.work_limit = adapter->tx_work_limit; q_vector->rx.work_limit = adapter->rx_work_limit; - /* initialize pointer to rings */ - ring = q_vector->ring; + /* Initialize setting for adaptive ITR */ + q_vector->tx.itr = TXGBE_ITR_ADAPTIVE_MAX_USECS | + TXGBE_ITR_ADAPTIVE_LATENCY; + q_vector->rx.itr = TXGBE_ITR_ADAPTIVE_MAX_USECS | + TXGBE_ITR_ADAPTIVE_LATENCY; /* intialize ITR */ if (txr_count && !rxr_count) { @@ -914,6 +926,9 @@ static int txgbe_alloc_q_vector(struct txgbe_adapter *adapter, q_vector->itr = adapter->rx_itr_setting; } + /* initialize pointer to rings */ + ring = q_vector->ring; + while (txr_count) { /* assign generic ring traits */ ring->dev = pci_dev_to_dev(adapter->pdev); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index b0fdc93954d0a78bdd522893cf2b30f1d4beff64..50b14823e4ab5e96596bb45957c0cad443c2d8de 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_main.c, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_main.c, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List @@ -74,17 +74,18 @@ #include #endif /* NETIF_F_HW_TC */ - #include "txgbe_dcb.h" #include "txgbe_sriov.h" #include "txgbe_hw.h" #include "txgbe_phy.h" #include "txgbe_pcierr.h" #include "txgbe_bp.h" +#include "txgbe_e56.h" +#include "txgbe_e56_bp.h" char txgbe_driver_name[32] = TXGBE_NAME; static const char txgbe_driver_string[] = - "WangXun 10 Gigabit PCI Express Network Driver"; + "WangXun RP1000/RP2000/FF50XX PCI Express Network Driver"; #define DRV_HW_PERF @@ -97,17 +98,17 @@ static const char txgbe_driver_string[] = #define RELEASE_TAG #if (defined(TXGBE_SUPPORT_KYLIN_FT) || defined(TXGBE_SUPPORT_KYLIN_LX)) -#define DRV_VERSION __stringify(1.3.5.1oc) +#define DRV_VERSION __stringify(2.1.1klos) #elif defined(CONFIG_EULER_KERNEL) -#define DRV_VERSION __stringify(1.3.5.1oc) +#define DRV_VERSION __stringify(2.1.1elos) #elif defined(CONFIG_UOS_KERNEL) -#define DRV_VERSION __stringify(1.3.5.1oc) +#define DRV_VERSION __stringify(2.1.1uos) #else -#define DRV_VERSION __stringify(1.3.5.1oc) +#define DRV_VERSION __stringify(2.1.1) #endif const char txgbe_driver_version[32] = DRV_VERSION; static const char txgbe_copyright[] = - "Copyright (c) 2015 -2017 Beijing WangXun Technology Co., Ltd"; + "Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd"; static const char txgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. " "If the problem persists, restart the computer, or " @@ -127,6 +128,11 @@ static const char txgbe_underheat_msg[] = static const struct pci_device_id txgbe_pci_tbl[] = { { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_SP1000), 0}, { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_WX1820), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_AML), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_AML5025), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_AML5125), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_AML5040), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_AML5140), 0}, /* required last entry */ { .device = 0 } }; @@ -134,7 +140,7 @@ MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl); MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, "); -MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver"); +MODULE_DESCRIPTION("WangXun(R) RP1000/RP2000/FF50XX PCI Express Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); @@ -195,9 +201,53 @@ void txgbe_print_tx_hang_status(struct txgbe_adapter *adapter) e_info(probe, "Tx flow control Status[TDB_TFCS 0xCE00]: 0x%x\n", rd32(&adapter->hw, TXGBE_TDB_TFCS)); + + e_info(tx_err, "tdm_desc_fatal_0: 0x%x\n", + rd32(&adapter->hw, 0x180d0)); + e_info(tx_err, "tdm_desc_fatal_1: 0x%x\n", + rd32(&adapter->hw, 0x180d4)); + e_info(tx_err, "tdm_desc_fatal_2: 0x%x\n", + rd32(&adapter->hw, 0x180d8)); + e_info(tx_err, "tdm_desc_fatal_3: 0x%x\n", + rd32(&adapter->hw, 0x180dc)); + e_info(tx_err, "tdm_desc_nonfatal_0: 0x%x\n", + rd32(&adapter->hw, 0x180c0)); + e_info(tx_err, "tdm_desc_nonfatal_1: 0x%x\n", + rd32(&adapter->hw, 0x180c4)); + e_info(tx_err, "tdm_desc_nonfatal_2: 0x%x\n", + rd32(&adapter->hw, 0x180c8)); + e_info(tx_err, "tdm_desc_nonfatal_3: 0x%x\n", + rd32(&adapter->hw, 0x180cc)); + return; } +static void txgbe_dump_all_ring_desc(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + union txgbe_tx_desc *tx_desc; + struct txgbe_ring *tx_ring; + int i, j; + + if (!netif_msg_tx_err(adapter)) + return; + + e_warn(tx_err, "Dump desc base addr\n"); + + for (i = 0; i < adapter->num_tx_queues; i++) { + e_warn(tx_err, "q_%d:0x%x%x\n", i, rd32(hw, TXGBE_PX_TR_BAH(i)), rd32(hw, TXGBE_PX_TR_BAL(i))); + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + tx_ring = adapter->tx_ring[i]; + for (j = 0; j < tx_ring->count; j++) { + tx_desc = TXGBE_TX_DESC(tx_ring, j); + if (tx_desc->read.olinfo_status != 0x1) + e_warn(tx_err, "queue[%d][%d]:0x%llx, 0x%x, 0x%x\n", + i, j, tx_desc->read.buffer_addr, tx_desc->read.cmd_type_len, tx_desc->read.olinfo_status); + } + } +} static void txgbe_check_minimum_link(struct txgbe_adapter *adapter, int expected_gts) @@ -421,6 +471,7 @@ void txgbe_unmap_and_free_tx_resource(struct txgbe_ring *ring, tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); + tx_buffer->va = NULL; /* tx_buffer must be completely set up in the transmit path */ } @@ -556,9 +607,10 @@ static inline bool txgbe_check_tx_hang(struct txgbe_ring *tx_ring) static void txgbe_tx_timeout_dorecovery(struct txgbe_adapter *adapter) { /* schedule immediate reset if we believe we hung */ - if (adapter->hw.bus.lan_id == 0) + + if (adapter->hw.bus.lan_id == 0) { adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; - else + } else wr32(&adapter->hw, TXGBE_MIS_PF_SM, 1); txgbe_service_event_schedule(adapter); } @@ -569,9 +621,16 @@ static void txgbe_tx_timeout_dorecovery(struct txgbe_adapter *adapter) **/ static void txgbe_tx_timeout_reset(struct txgbe_adapter *adapter) { + struct txgbe_hw *hw = &adapter->hw; + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { - adapter->flags2 |= TXGBE_FLAG2_PF_RESET_REQUESTED; - e_warn(drv, "initiating reset due to tx timeout\n"); + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + adapter->flags2 |= TXGBE_FLAG2_DMA_RESET_REQUESTED; + e_warn(drv, "initiating dma reset due to tx timeout\n"); + } else { + adapter->flags2 |= TXGBE_FLAG2_PF_RESET_REQUESTED; + e_warn(drv, "initiating reset due to tx timeout\n"); + } txgbe_service_event_schedule(adapter); } } @@ -588,16 +647,13 @@ static void txgbe_tx_timeout(struct net_device *netdev) { struct txgbe_adapter *adapter = netdev_priv(netdev); struct txgbe_hw *hw = &adapter->hw; - bool real_tx_hang = false; - int i; - u16 value = 0; + bool tdm_desc_fatal = false; u32 value2 = 0, value3 = 0; + bool real_tx_hang = false; + u16 pci_cmd = 0; u32 head, tail; u16 vid = 0; - u32 value_uncor = 0; - u32 value_cor = 0; - int pos; - bool ims_status =false; + int i; #define TX_TIMEO_LIMIT 16000 for (i = 0; i < adapter->num_tx_queues; i++) { @@ -609,19 +665,21 @@ static void txgbe_tx_timeout(struct net_device *netdev) pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); ERROR_REPORT1(TXGBE_ERROR_POLLING, "pci vendor id is 0x%x\n", vid); - pci_read_config_word(adapter->pdev, PCI_COMMAND, &value); - ERROR_REPORT1(TXGBE_ERROR_POLLING, "pci command reg is 0x%x.\n", value); + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "pci command reg is 0x%x.\n", pci_cmd); - value2 = rd32(&adapter->hw,0x10000); - ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x10000 value is 0x%08x\n", value2); - value2 = rd32(&adapter->hw,0x180d0); - ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x180d0 value is 0x%08x\n", value2); - value2 = rd32(&adapter->hw,0x180d4); - ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x180d4 value is 0x%08x\n", value2); - value2 = rd32(&adapter->hw,0x180d8); - ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x180d8 value is 0x%08x\n", value2); - value2 = rd32(&adapter->hw,0x180dc); - ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x180dc value is 0x%08x\n", value2); + if (hw->mac.type == txgbe_mac_sp) { + value2 = rd32(&adapter->hw,0x10000); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x10000 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw,0x180d0); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x180d0 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw,0x180d4); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x180d4 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw,0x180d8); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x180d8 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw,0x180dc); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x180dc value is 0x%08x\n", value2); + } for (i = 0; i < adapter->num_tx_queues; i++) { head = rd32(&adapter->hw, TXGBE_PX_TR_RP(adapter->tx_ring[i]->reg_idx)); @@ -639,40 +697,16 @@ static void txgbe_tx_timeout(struct net_device *netdev) ERROR_REPORT1(TXGBE_ERROR_POLLING, "PX_IMS0 value is 0x%08x, PX_IMS1 value is 0x%08x\n", value2, value3); - pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_ERR); - if (!pos) - return; - - pci_read_config_dword(adapter->pdev, pos + PCI_ERR_UNCOR_STATUS, &value_uncor); - pci_read_config_dword(adapter->pdev, pos + PCI_ERR_COR_STATUS, &value_cor); - - if (adapter->num_tx_queues <= 32) - ims_status = (value2 != TXGBE_FAILED_READ_CFG_DWORD) ? true : false; - else - ims_status = ((value2 != TXGBE_FAILED_READ_CFG_DWORD) && - (value3 != TXGBE_FAILED_READ_CFG_DWORD)) ? - true : false; - - /* only ims is not equal to zero, - * can access pcie configuration space, - * and aer error is not detected, - * lan reset or do recovery can be skipped. - */ - if ((value2 || value3) && - ims_status && - !value_uncor && - !value_cor) { - ERROR_REPORT1(TXGBE_ERROR_POLLING, "clear interrupt mask.\n"); - wr32(&adapter->hw, TXGBE_PX_ICS(0), value2); - wr32(&adapter->hw, TXGBE_PX_IMC(0), value2); - wr32(&adapter->hw, TXGBE_PX_ICS(1), value3); - wr32(&adapter->hw, TXGBE_PX_IMC(1), value3); - - goto out; - } + /* only check pf queue tdm desc error */ + if ((rd32(&adapter->hw, TXGBE_TDM_DESC_FATAL(0)) & 0xffffffff) || + (rd32(&adapter->hw, TXGBE_TDM_DESC_FATAL(1)) & 0xffffffff)) + tdm_desc_fatal = true; + /* PCIe link loss, tdm desc fatal error or memory space can't access */ if (TXGBE_RECOVER_CHECK == 1) { - if (vid == TXGBE_FAILED_READ_CFG_WORD) { + if (vid == TXGBE_FAILED_READ_CFG_WORD || + tdm_desc_fatal || + !(pci_cmd & 0x2)) { txgbe_tx_timeout_dorecovery(adapter); } else { txgbe_print_tx_hang_status(adapter); @@ -682,17 +716,22 @@ static void txgbe_tx_timeout(struct net_device *netdev) txgbe_tx_timeout_dorecovery(adapter); } -out: return; } + +static inline u16 txgbe_desc_buf_unmapped(struct txgbe_ring *ring, u16 ntc, u16 ntf) +{ + return ((ntc >= ntf) ? 0 : ring->count) + ntc - ntf; +} + /** * txgbe_ - Reclaim resources after transmit completes * @q_vector: structure containing interrupt and ring information * @tx_ring: tx ring to clean **/ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, - struct txgbe_ring *tx_ring, int napi_budget) + struct txgbe_ring *tx_ring, int napi_budget) { struct txgbe_adapter *adapter = q_vector->adapter; struct txgbe_tx_buffer *tx_buffer; @@ -701,6 +740,18 @@ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, unsigned int budget = q_vector->tx.work_limit; unsigned int i = tx_ring->next_to_clean; u16 vid = 0; + int j = 0; + u32 size; + unsigned int ntf; + struct txgbe_tx_buffer *free_tx_buffer; + u32 unmapped_descs = 0; + bool first_dma; +#ifdef TXGBE_TXHEAD_WB + u32 head = 0; + u32 temp = tx_ring->next_to_clean; + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + head = *(tx_ring->headwb_mem); +#endif if (test_bit(__TXGBE_DOWN, &adapter->state)) return true; @@ -719,9 +770,21 @@ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, /* prevent any other reads prior to eop_desc */ smp_rmb(); - /* if DD is not set pending work has not been completed */ - if (!(eop_desc->wb.status & cpu_to_le32(TXGBE_TXD_STAT_DD))) - break; +#ifdef TXGBE_TXHEAD_WB + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + /* we have caught up to head, no work left to do */ + if (temp == head) { + break; + } else if (head > temp && !(tx_buffer->next_eop >= temp && (tx_buffer->next_eop < head))) { + break; + } else if (!(tx_buffer->next_eop >= temp || (tx_buffer->next_eop < head))) { + break; + } + } else +#endif + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(TXGBE_TXD_STAT_DD))) + break; /* clear next_to_watch to prevent false hangs */ tx_buffer->next_to_watch = NULL; @@ -730,38 +793,21 @@ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; -#ifdef HAVE_XDP_SUPPORT - if (ring_is_xdp(tx_ring)) -#ifdef HAVE_XDP_FRAME_STRUCT - xdp_return_frame(tx_buffer->xdpf); -#else - page_frag_free(tx_buffer->data); -#endif - else - napi_consume_skb(tx_buffer->skb, napi_budget); + if (tx_buffer->skb) { +#ifdef HAVE_PTP_1588_CLOCK + if (!ring_is_xdp(tx_ring) && +#ifdef SKB_SHARED_TX_IS_UNION + !(skb_tx(tx_buffer->skb)->in_progress == 1)) #else - napi_consume_skb(tx_buffer->skb, napi_budget); + !(skb_shinfo(tx_buffer->skb)->tx_flags & SKBTX_IN_PROGRESS)) #endif - - /* unmap skb header data */ - dma_unmap_single(tx_ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - - /* clear tx_buffer data */ -#ifdef HAVE_XDP_SUPPORT - if (ring_is_xdp(tx_ring)) -#ifdef HAVE_XDP_FRAME_STRUCT - tx_buffer->xdpf = NULL; #else - tx_buffer->data = NULL; -#endif - else + if (!ring_is_xdp(tx_ring)) #endif - tx_buffer->skb = NULL; - dma_unmap_len_set(tx_buffer, len, 0); - + skb_orphan(tx_buffer->skb); + } else{ + dev_err(tx_ring->dev, "skb is NULL.\n"); + } /* unmap remaining buffers */ while (tx_desc != eop_desc) { tx_buffer++; @@ -773,16 +819,7 @@ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, tx_desc = TXGBE_TX_DESC(tx_ring, 0); } - /* unmap any remaining paged data */ - if (dma_unmap_len(tx_buffer, len)) { - dma_unmap_page(tx_ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - dma_unmap_len_set(tx_buffer, len, 0); - } } - /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; tx_desc++; @@ -801,7 +838,89 @@ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, } while (likely(budget)); i += tx_ring->count; + + first_dma = false; + ntf = tx_ring->next_to_free; + free_tx_buffer = &tx_ring->tx_buffer_info[ntf]; + ntf -= tx_ring->count; + unmapped_descs = txgbe_desc_buf_unmapped(tx_ring, i, tx_ring->next_to_free); + while (unmapped_descs > adapter->desc_reserved) { +#ifdef HAVE_XDP_SUPPORT + if (ring_is_xdp(tx_ring)) { +#ifdef HAVE_XDP_FRAME_STRUCT + if (free_tx_buffer->xdpf) { + xdp_return_frame(free_tx_buffer->xdpf); + first_dma = true; + } +#else + if (free_tx_buffer->data) { + page_frag_free(free_tx_buffer->data); + first_dma = true; + } +#endif + } else + if (free_tx_buffer->skb) { + dev_consume_skb_any(free_tx_buffer->skb); + first_dma = true; + } +#else + if (free_tx_buffer->skb) { + dev_consume_skb_any(free_tx_buffer->skb); + first_dma = true; + } +#endif + if (first_dma) { + if (dma_unmap_len(free_tx_buffer, len)) { + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(free_tx_buffer, dma), + dma_unmap_len(free_tx_buffer, len), + DMA_TO_DEVICE); + } + /* clear tx_buffer data */ +#ifdef HAVE_XDP_SUPPORT + if (ring_is_xdp(tx_ring)) +#ifdef HAVE_XDP_FRAME_STRUCT + free_tx_buffer->xdpf = NULL; +#else + free_tx_buffer->data = NULL; +#endif + else +#endif + /* clear tx_buffer data */ + free_tx_buffer->skb = NULL; + dma_unmap_len_set(free_tx_buffer, len, 0); + free_tx_buffer->va = NULL; + first_dma = false; + } else { + /* unmap any remaining paged data */ + if (dma_unmap_len(free_tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(free_tx_buffer, dma), + dma_unmap_len(free_tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(free_tx_buffer, len, 0); + free_tx_buffer->va = NULL; + } + + } + + free_tx_buffer++; + ntf++; + if (unlikely(!ntf)) { + ntf -= tx_ring->count; + free_tx_buffer = tx_ring->tx_buffer_info; + } + + unmapped_descs--; + }; + + ntf += tx_ring->count; + tx_ring->next_to_free = ntf; + /* need update next_to_free before next_to_clean */ + wmb(); tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); tx_ring->stats.bytes += total_bytes; tx_ring->stats.packets += total_packets; @@ -812,7 +931,6 @@ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, if (check_for_tx_hang(tx_ring) && txgbe_check_tx_hang(tx_ring)) { /* schedule immediate reset if we believe we hung */ struct txgbe_hw *hw = &adapter->hw; -// u16 value = 0; e_err(drv, "Detected Tx Unit Hang%s\n" " Tx Queue <%d>\n" @@ -829,6 +947,36 @@ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, tx_ring->next_to_use, i, tx_ring->tx_buffer_info[i].time_stamp, jiffies); + if (netif_msg_tx_err(adapter)) { + for (j = 0; j < tx_ring->count; j++) { + tx_desc = TXGBE_TX_DESC(tx_ring, j); + if (tx_desc->read.olinfo_status != 0x1) + e_warn(tx_err, "q_[%d][%d]:0x%llx, 0x%x, 0x%x\n", + tx_ring->reg_idx, j, tx_desc->read.buffer_addr, tx_desc->read.cmd_type_len, tx_desc->read.olinfo_status); + } + } + + if (netif_msg_pktdata(adapter)) { + for (j = 0; j < tx_ring->count; j++) { + tx_buffer = &tx_ring->tx_buffer_info[j]; + size = dma_unmap_len(tx_buffer, len); + if (size != 0 && tx_buffer->va) { + e_err(pktdata, "tx buffer[%d][%d]: \n", tx_ring->queue_index, j); + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, tx_buffer->va, size, true); + } + } + + for (j = 0; j < tx_ring->count; j++) { + tx_buffer = &tx_ring->tx_buffer_info[j]; + if (tx_buffer->skb) { + e_err(pktdata, "****skb in tx buffer[%d][%d]: *******\n", tx_ring->queue_index, j); + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, tx_buffer->skb, sizeof(struct sk_buff), true); + } + } + } + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); if (vid == TXGBE_FAILED_READ_CFG_WORD) { e_info(hw, "pcie link has been lost.\n"); @@ -840,22 +988,16 @@ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, e_info(probe, "tx hang %d detected on queue %d, resetting adapter\n", adapter->tx_timeout_count + 1, tx_ring->queue_index); - -#ifdef TXGBE_RECOVER_CHECK - if (vid == TXGBE_FAILED_READ_CFG_WORD) { -#endif - /* schedule immediate reset if we believe we hung */ - if (adapter->hw.bus.lan_id == 0) { - adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; - txgbe_service_event_schedule(adapter); - } else - wr32(&adapter->hw, TXGBE_MIS_PF_SM, 1); -#ifdef TXGBE_RECOVER_CHECK + if (TXGBE_RECOVER_CHECK == 1) { + if (vid == TXGBE_FAILED_READ_CFG_WORD) { + txgbe_tx_timeout_dorecovery(adapter); + } else { + txgbe_print_tx_hang_status(adapter); + txgbe_tx_timeout_reset(adapter); + } } else { - txgbe_print_tx_hang_status(adapter); - txgbe_tx_timeout_reset(adapter); + txgbe_tx_timeout_dorecovery(adapter); } -#endif /* the adapter is about to reset, no point in enabling stuff */ return true; @@ -976,7 +1118,8 @@ static inline void txgbe_rx_checksum(struct txgbe_ring *ring, return; /*likely incorrect csum if IPv6 Dest Header found */ - if (dptype.prot != TXGBE_DEC_PTYPE_PROT_SCTP && TXGBE_RXD_IPV6EX(rx_desc)) + if (dptype.prot != TXGBE_DEC_PTYPE_PROT_SCTP && + txgbe_test_staterr(rx_desc, TXGBE_RXD_IPV6EX)) return; /* if L4 checksum error */ @@ -1048,6 +1191,12 @@ static bool txgbe_alloc_mapped_page(struct txgbe_ring *rx_ring, { struct page *page = bi->page; dma_addr_t dma; +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif /* since we are recycling buffers we should seldom need to alloc */ if (likely(page)) @@ -1061,8 +1210,14 @@ static bool txgbe_alloc_mapped_page(struct txgbe_ring *rx_ring, } /* map page for use */ - dma = dma_map_page(rx_ring->dev, page, 0, - txgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + TXGBE_RX_DMA_ATTR); +#endif /* * if mapping failed free memory back to system since @@ -1088,12 +1243,34 @@ static bool txgbe_alloc_mapped_page(struct txgbe_ring *rx_ring, } #endif +/** + * txgbe_release_rx_desc - Store the new tail and head values + * @rx_ring: ring to bump + * @val: new head index + **/ +static void txgbe_release_rx_desc(struct txgbe_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; +#endif + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); +} + /** * txgbe_alloc_rx_buffers - Replace used receive buffers * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace **/ -void txgbe_alloc_rx_buffers(struct txgbe_ring *rx_ring, u16 cleaned_count) +bool txgbe_alloc_rx_buffers(struct txgbe_ring *rx_ring, u16 cleaned_count) { union txgbe_rx_desc *rx_desc; struct txgbe_rx_buffer *bi; @@ -1101,7 +1278,7 @@ void txgbe_alloc_rx_buffers(struct txgbe_ring *rx_ring, u16 cleaned_count) /* nothing to do */ if (!cleaned_count) - return; + return false; rx_desc = TXGBE_RX_DESC(rx_ring, i); bi = &rx_ring->rx_buffer_info[i]; @@ -1110,18 +1287,18 @@ void txgbe_alloc_rx_buffers(struct txgbe_ring *rx_ring, u16 cleaned_count) do { #ifdef CONFIG_TXGBE_DISABLE_PACKET_SPLIT if (!txgbe_alloc_mapped_skb(rx_ring, bi)) - break; + goto no_buffers; rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); #else if (ring_is_hs_enabled(rx_ring)) { if (!txgbe_alloc_mapped_skb(rx_ring, bi)) - break; + goto no_buffers; rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); } if (!txgbe_alloc_mapped_page(rx_ring, bi)) - break; + goto no_buffers; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, bi->page_dma, @@ -1144,26 +1321,26 @@ void txgbe_alloc_rx_buffers(struct txgbe_ring *rx_ring, u16 cleaned_count) /* clear the status bits for the next_to_use descriptor */ rx_desc->wb.upper.status_error = 0; + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; cleaned_count--; } while (cleaned_count); i += rx_ring->count; - if (rx_ring->next_to_use != i) { - rx_ring->next_to_use = i; -#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT - /* update next to alloc since we have filled the ring */ - rx_ring->next_to_alloc = i; -#endif - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - writel(i, rx_ring->tail); - } + if (rx_ring->next_to_use != i) + txgbe_release_rx_desc(rx_ring, i); + + return false; + +no_buffers: + i += rx_ring->count; + + if (rx_ring->next_to_use != i) + txgbe_release_rx_desc(rx_ring, i); + + return true; } static inline u16 txgbe_get_hlen(struct txgbe_ring *rx_ring, @@ -1666,7 +1843,7 @@ static void txgbe_lro_receive(struct txgbe_q_vector *q_vector, static void txgbe_set_rsc_gso_size(struct txgbe_ring __maybe_unused *ring, struct sk_buff *skb) { - u16 hdr_len = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb)); + u16 hdr_len = skb_headlen(skb); /* set gso_size to avoid messing up TCP MSS */ skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), @@ -1998,7 +2175,7 @@ bool txgbe_cleanup_headers(struct txgbe_ring *rx_ring, } /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb) && !skb_headlen(skb)) + if (!skb_headlen(skb)) txgbe_pull_tail(skb); #if IS_ENABLED(CONFIG_FCOE) @@ -2032,21 +2209,18 @@ static void txgbe_reuse_rx_page(struct txgbe_ring *rx_ring, /* update, and store next to alloc */ nta++; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - /* transfer page from old buffer to new buffer */ -#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT - new_buff->page_dma = old_buff->page_dma; - new_buff->page = old_buff->page; - new_buff->page_offset = old_buff->page_offset; - new_buff->pagecnt_bias = old_buff->pagecnt_bias; -#endif - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, new_buff->page_dma, - new_buff->page_offset, - txgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls and unnecessary copy of skb. + */ + new_buff->page_dma = old_buff->page_dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; } + static inline bool txgbe_page_is_reserved(struct page *page) { return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); @@ -2067,36 +2241,63 @@ static inline bool txgbe_page_is_reserved(struct page *page) * The function will then update the page offset if necessary and return * true if the buffer can be reused by the adapter. **/ -static bool txgbe_add_rx_frag(struct txgbe_ring *rx_ring, +static void txgbe_add_rx_frag(struct txgbe_ring *rx_ring, struct txgbe_rx_buffer *rx_buffer, - union txgbe_rx_desc *rx_desc, - struct sk_buff *skb) + struct sk_buff *skb, + unsigned int size) { - struct page *page = rx_buffer->page; - unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) - unsigned int truesize = txgbe_rx_bufsz(rx_ring); + unsigned int truesize = txgbe_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); - unsigned int last_offset = txgbe_rx_pg_size(rx_ring) - - txgbe_rx_bufsz(rx_ring); + unsigned int truesize = rx_ring->rx_offset ? + SKB_DATA_ALIGN(txgbe_rx_offset(rx_ring) + size) : + SKB_DATA_ALIGN(size); #endif - if ((size <= TXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb) && - !ring_is_hs_enabled(rx_ring)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; - - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - rx_buffer->pagecnt_bias++; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); - /* page is not reserved, we can reuse buffer as-is */ - if (likely(!txgbe_page_is_reserved(page))) - return true; - return false; - } +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, truesize); +static unsigned int txgbe_rx_frame_truesize(struct txgbe_ring *rx_ring, + unsigned int size) +{ + + unsigned int truesize; +#if (PAGE_SIZE < 8192) + truesize = txgbe_rx_pg_size(rx_ring) / 2; +#else + truesize = SKB_DATA_ALIGN(TXGBE_SKB_PAD + size) +#ifdef HAVE_XDP_BUFF_FRAME_SZ + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +#endif + ; +#endif + return truesize; +} + +static void txgbe_rx_buffer_flip(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + unsigned int size) +{ + unsigned int truesize = txgbe_rx_frame_truesize(rx_ring, size); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + + +static bool txgbe_can_reuse_rx_page(struct txgbe_rx_buffer *rx_buffer) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; /* avoid re-using remote pages */ if (unlikely(txgbe_page_is_reserved(page))) @@ -2105,21 +2306,21 @@ static bool txgbe_add_rx_frag(struct txgbe_ring *rx_ring, #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ #ifdef HAVE_PAGE_COUNT_BULK_UPDATE - if (unlikely((page_ref_count(page) - rx_buffer->pagecnt_bias) > 1)) + if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) #else - if (unlikely((page_count(page) - rx_buffer->pagecnt_bias) > 1)) + if (unlikely((page_count(page) - pagecnt_bias) > 1)) #endif return false; - - /* flip page offset to other buffer */ - rx_buffer->page_offset ^= truesize; #else - /* move offset up to the next cache line */ - rx_buffer->page_offset += truesize; - - if (rx_buffer->page_offset > last_offset) + /* The last offset is a bit aggressive in that we assume the + * worst case of FCoE being enabled and using a 3K buffer. + * However this should have minimal impact as the 1K extra is + * still less than one buffer in size. + */ +#define TXGBE_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - TXGBE_RXBUFFER_3K) + if (rx_buffer->page_offset > TXGBE_LAST_OFFSET) return false; - #endif #ifdef HAVE_PAGE_COUNT_BULK_UPDATE @@ -2127,7 +2328,7 @@ static bool txgbe_add_rx_frag(struct txgbe_ring *rx_ring, * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds. */ - if (unlikely(rx_buffer->pagecnt_bias == 1)) { + if (unlikely(pagecnt_bias == 1)) { page_ref_add(page, USHRT_MAX - 1); rx_buffer->pagecnt_bias = USHRT_MAX; } @@ -2135,7 +2336,7 @@ static bool txgbe_add_rx_frag(struct txgbe_ring *rx_ring, /* Even if we own the page, we are not allowed to use atomic_set() * This would break get_page_unless_zero() users. */ - if (likely(!rx_buffer->pagecnt_bias)) { + if (likely(!pagecnt_bias)) { page_ref_inc(page); rx_buffer->pagecnt_bias = 1; } @@ -2144,285 +2345,45 @@ static bool txgbe_add_rx_frag(struct txgbe_ring *rx_ring, return true; } -static struct sk_buff *txgbe_fetch_rx_buffer(struct txgbe_ring *rx_ring, - union txgbe_rx_desc *rx_desc) +static void txgbe_put_rx_buffer(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + struct sk_buff *skb) { - struct txgbe_rx_buffer *rx_buffer; - struct sk_buff *skb; - struct page *page; - - rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; - page = rx_buffer->page; - prefetchw(page); +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) - skb = rx_buffer->skb; + DEFINE_DMA_ATTRS(attrs); - if (likely(!skb)) { - void *page_addr = page_address(page) + - rx_buffer->page_offset; + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); - /* prefetch first cache line of first page */ - prefetch(page_addr); -#if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); #endif - - /* allocate a skb to store the frags */ - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - TXGBE_RX_HDR_SIZE); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_rx_buff_failed++; - return NULL; - } - - /* - * we will be copying header into skb->data in - * pskb_may_pull so it is in our interest to prefetch - * it now to avoid a possible cache miss - */ - prefetchw(skb->data); - - /* - * Delay unmapping of the first packet. It carries the - * header information, HW may still access the header - * after the writeback. Only unmap it when EOP is - * reached - */ - if (likely(txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP))) - goto dma_sync; - - TXGBE_CB(skb)->dma = rx_buffer->page_dma; - } else { - if (txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP)) - txgbe_dma_sync_frag(rx_ring, skb); - -dma_sync: - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->page_dma, - rx_buffer->page_offset, - txgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); - rx_buffer->skb = NULL; - } - if(!rx_ring->xdp_prog) - rx_buffer->pagecnt_bias--; - - /* pull page into skb */ - if (txgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { - /* hand second half of page back to the ring */ - txgbe_reuse_rx_page(rx_ring, rx_buffer); - } else{ - if (TXGBE_CB(skb)->dma == rx_buffer->page_dma) { - /* the page has been released from the ring */ - TXGBE_CB(skb)->page_released = true; - } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buffer->page_dma, - txgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE); - } - __page_frag_cache_drain(rx_buffer->page, - rx_buffer->pagecnt_bias); - } - /* clear contents of buffer_info */ - rx_buffer->page = NULL; - - return skb; -} - -static struct sk_buff *txgbe_fetch_rx_buffer_hs(struct txgbe_ring *rx_ring, - union txgbe_rx_desc *rx_desc) -{ - struct txgbe_rx_buffer *rx_buffer; - struct sk_buff *skb; - struct page *page; - int hdr_len = 0; - - rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; - page = rx_buffer->page; - prefetchw(page); - - skb = rx_buffer->skb; - rx_buffer->skb = NULL; - prefetchw(skb->data); - - if (!skb_is_nonlinear(skb)) { - hdr_len = txgbe_get_hlen(rx_ring, rx_desc); - if (hdr_len > 0) { - __skb_put(skb, hdr_len); - TXGBE_CB(skb)->dma_released = true; - TXGBE_CB(skb)->dma = rx_buffer->dma; - rx_buffer->dma = 0; - } else { - dma_unmap_single(rx_ring->dev, - rx_buffer->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_buffer->dma = 0; - if (likely(txgbe_test_staterr(rx_desc, - TXGBE_RXD_STAT_EOP))) - goto dma_sync; - TXGBE_CB(skb)->dma = rx_buffer->page_dma; - goto add_frag; - } - } - - if (txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP)) { - if (skb_headlen(skb)) { - if (TXGBE_CB(skb)->dma_released == true) { - dma_unmap_single(rx_ring->dev, - TXGBE_CB(skb)->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - TXGBE_CB(skb)->dma = 0; - TXGBE_CB(skb)->dma_released = false; - } - } else - txgbe_dma_sync_frag(rx_ring, skb); - } - rx_buffer->pagecnt_bias--; -dma_sync: - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->page_dma, - rx_buffer->page_offset, - txgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); -add_frag: - /* pull page into skb */ - if (txgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + if (txgbe_can_reuse_rx_page(rx_buffer)) { /* hand second half of page back to the ring */ txgbe_reuse_rx_page(rx_ring, rx_buffer); } else { - if (TXGBE_CB(skb)->dma == rx_buffer->page_dma) { + if (!IS_ERR(skb) && TXGBE_CB(skb)->dma == rx_buffer->page_dma) { /* the page has been released from the ring */ TXGBE_CB(skb)->page_released = true; } else { /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buffer->page_dma, - txgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE); - } - __page_frag_cache_drain(rx_buffer->page, - rx_buffer->pagecnt_bias); - } - - /* clear contents of buffer_info */ - rx_buffer->page = NULL; - - return skb; -} - -static unsigned int txgbe_rx_frame_truesize(struct txgbe_ring *rx_ring, - unsigned int size) -{ - - unsigned int truesize; -#if (PAGE_SIZE < 8192) - truesize = txgbe_rx_pg_size(rx_ring) / 2; -#else - truesize = SKB_DATA_ALIGN(TXGBE_SKB_PAD + size) -#ifdef HAVE_XDP_BUFF_FRAME_SZ - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) -#endif - ; -#endif - return truesize; -} - -static void txgbe_rx_buffer_flip(struct txgbe_ring *rx_ring, - struct txgbe_rx_buffer *rx_buffer, - unsigned int size) -{ - unsigned int truesize = txgbe_rx_frame_truesize(rx_ring, size); -#if (PAGE_SIZE < 8192) - rx_buffer->page_offset ^= truesize; -#else - rx_buffer->page_offset += truesize; -#endif -} - - -static bool txgbe_can_reuse_rx_page(struct txgbe_rx_buffer *rx_buffer, - struct txgbe_ring *rx_ring) -{ - unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; - struct page *page = rx_buffer->page; -#if (PAGE_SIZE < 8192) - /* if we are only owner of page we can reuse it */ -#ifdef HAVE_PAGE_COUNT_BULK_UPDATE - if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) -#else - if (unlikely((page_count(page) - pagecnt_bias) > 1)) -#endif - return false; -#else - unsigned int last_offset = SKB_WITH_OVERHEAD(PAGE_SIZE) - TXGBE_RXBUFFER_3K; - - if (rx_buffer->page_offset > last_offset) - return false; -#endif - - /* avoid re-using remote pages */ - if (unlikely(txgbe_page_is_reserved(page))) - return false; - -#ifdef HAVE_PAGE_COUNT_BULK_UPDATE - /* If we have drained the page fragment pool we need to update - * the pagecnt_bias and page count so that we fully restock the - * number of references the driver holds. - */ - if (unlikely(pagecnt_bias == 1)) { - page_ref_add(page, USHRT_MAX - 1); - rx_buffer->pagecnt_bias = USHRT_MAX; - } -#else - /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. - */ - if (likely(!pagecnt_bias)) { - page_ref_inc(page); - rx_buffer->pagecnt_bias = 1; - } -#endif - return true; -} - -static void txgbe_put_rx_buffer(struct txgbe_ring *rx_ring, - struct txgbe_rx_buffer *rx_buffer, - struct sk_buff *skb) -{ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->page_dma, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, #if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) - - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); - dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); - + &attrs); +#else + TXGBE_RX_DMA_ATTR); #endif - if (txgbe_can_reuse_rx_page(rx_buffer, rx_ring)) { - /* hand second half of page back to the ring */ - txgbe_reuse_rx_page(rx_ring, rx_buffer); - } else { - /* We are not reusing the buffer so unmap it and free - * any references we are holding to it - */ - if (IS_ERR(skb)) - dma_unmap_page(rx_ring->dev, rx_buffer->page_dma, - txgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE); + } __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); } /* clear contents of rx_buffer */ - rx_buffer->page_dma = 0; rx_buffer->page = NULL; rx_buffer->skb = NULL; } - #ifdef HAVE_XDP_SUPPORT #ifdef HAVE_XDP_FRAME_STRUCT int txgbe_xmit_xdp_ring(struct txgbe_ring *ring, struct xdp_frame *xdpf) @@ -2496,6 +2457,9 @@ int txgbe_xmit_xdp_ring(struct txgbe_ring *ring, struct xdp_buff *xdp) i = 0; tx_buffer->next_to_watch = tx_desc; +#ifdef TXGBE_TXHEAD_WB + tx_buffer->next_eop = i; +#endif ring->next_to_use = i; return TXGBE_XDP_TX; @@ -2575,6 +2539,157 @@ txgbe_run_xdp(struct txgbe_adapter __maybe_unused *adapter, return ERR_PTR(-result); } +static struct txgbe_rx_buffer *txgbe_get_rx_buffer(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff **skb, + const unsigned int size) +{ + struct txgbe_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + *skb = rx_buffer->skb; + + /* Delay unmapping of the first packet. It carries the header + * information, HW may still access the header after the writeback. + * Only unmap it when EOP is reached + */ + if (!txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP)) { + if (!*skb) + goto skip_sync; + } else { + if (*skb) + txgbe_dma_sync_frag(rx_ring, *skb); + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); +skip_sync: + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC +static struct sk_buff *txgbe_build_skb(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union txgbe_rx_desc *rx_desc) +{ +#ifdef HAVE_XDP_BUFF_DATA_META + unsigned int metasize = xdp->data - xdp->data_meta; + void *va = xdp->data_meta; +#else + void *va = xdp->data; +#endif +#if (PAGE_SIZE < 8192) + unsigned int truesize = txgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* build an skb around the page buffer */ + skb = build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, xdp->data_end - xdp->data); +#ifdef HAVE_XDP_BUFF_DATA_META + if (metasize) + skb_metadata_set(skb, metasize); +#endif + + /* record DMA address if this is the start of a chain of buffers */ + if (!txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP)) + TXGBE_CB(skb)->dma = rx_buffer->page_dma; + + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return skb; +} +#endif + +static struct sk_buff *txgbe_construct_skb(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union txgbe_rx_desc *rx_desc) +{ + unsigned int size = xdp->data_end - xdp->data; +#if (PAGE_SIZE < 8192) + unsigned int truesize = txgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(xdp->data); +#if L1_CACHE_BYTES < 128 + prefetch(xdp->data + L1_CACHE_BYTES); +#endif + /* Note, we get here by enabling legacy-rx via: + * + * ethtool --set-priv-flags legacy-rx on + * + * In this mode, we currently get 0 extra XDP headroom as + * opposed to having legacy-rx off, where we process XDP + * packets going to stack via txgbe_build_skb(). The latter + * provides us currently with 192 bytes of headroom. + * + * For txgbe_construct_skb() mode it means that the + * xdp->data_meta will always point to xdp->data, since + * the helper cannot expand the head. Should this ever + * change in future for legacy-rx mode on, then lets also + * add xdp->data_meta handling here. + */ + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, TXGBE_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + if (size > TXGBE_RX_HDR_SIZE) { + if (!txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP)) + TXGBE_CB(skb)->dma = rx_buffer->page_dma; + + skb_add_rx_frag(skb, 0, rx_buffer->page, + xdp->data - page_address(rx_buffer->page), + size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + memcpy(__skb_put(skb, size), + xdp->data, ALIGN(size, sizeof(long))); + rx_buffer->pagecnt_bias++; + } + + return skb; +} + /** * txgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @q_vector: structure containing interrupt and ring information @@ -2595,6 +2710,7 @@ static int txgbe_clean_rx_irq(struct txgbe_q_vector *q_vector, unsigned int total_rx_bytes = 0, total_rx_packets = 0, xdp_xmit = 0; u16 cleaned_count = txgbe_desc_unused(rx_ring); struct txgbe_adapter *adapter = q_vector->adapter; + unsigned int offset = txgbe_rx_offset(rx_ring); #if IS_ENABLED(CONFIG_FCOE) int ddp_bytes; unsigned int mss = 0; @@ -2605,20 +2721,20 @@ static int txgbe_clean_rx_irq(struct txgbe_q_vector *q_vector, #ifdef HAVE_XDP_BUFF_RXQ xdp.rxq = &rx_ring->xdp_rxq; #endif -#ifdef HAVE_XDP_BUFF_FRAME_SZ + /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ +#ifdef HAVE_XDP_BUFF_FRAME_SZ #if (PAGE_SIZE < 8192) - if(rx_ring->xdp_prog) xdp.frame_sz = txgbe_rx_frame_truesize(rx_ring, 0); #endif #endif - do { - struct txgbe_rx_buffer *rx_buffer; - union txgbe_rx_desc *rx_desc; - struct sk_buff *skb = NULL; - unsigned int size = 0; - rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + while (likely(total_rx_packets < budget)) { + union txgbe_rx_desc *rx_desc; + struct txgbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + unsigned int size; + /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= TXGBE_RX_BUFFER_WRITE) { @@ -2627,30 +2743,30 @@ static int txgbe_clean_rx_irq(struct txgbe_q_vector *q_vector, } rx_desc = TXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); - +#if 0 if (!txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_DD)) break; - +#endif size = le16_to_cpu(rx_desc->wb.upper.length); - if (!size) { + if (!size) break; - } + /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we know the * descriptor has been written back */ dma_rmb(); + rx_buffer = txgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); - if (adapter->xdp_prog) { - prefetchw(rx_buffer->page); - rx_buffer->pagecnt_bias--; + /* retrieve a buffer from the ring */ + if (!skb) { xdp.data = page_address(rx_buffer->page) + rx_buffer->page_offset; #ifdef HAVE_XDP_BUFF_DATA_META xdp.data_meta = xdp.data; -#endif /* HAVE_XDP_BUFF_DATA_META */ - xdp.data_hard_start = xdp.data - txgbe_rx_offset(rx_ring); +#endif + xdp.data_hard_start = xdp.data - offset; xdp.data_end = xdp.data + size; #ifdef HAVE_XDP_BUFF_FRAME_SZ @@ -2658,12 +2774,14 @@ static int txgbe_clean_rx_irq(struct txgbe_q_vector *q_vector, /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp.frame_sz = txgbe_rx_frame_truesize(rx_ring, size); #endif -#endif +#endif + //skb = txgbe_run_xdp(adapter, rx_ring, &xdp); skb = txgbe_run_xdp(adapter, rx_ring, rx_buffer, &xdp); } if (IS_ERR(skb)) { unsigned int xdp_res = -PTR_ERR(skb); + if (xdp_res & (TXGBE_XDP_TX | TXGBE_XDP_REDIR)) { xdp_xmit |= xdp_res; txgbe_rx_buffer_flip(rx_ring, rx_buffer, size); @@ -2672,22 +2790,26 @@ static int txgbe_clean_rx_irq(struct txgbe_q_vector *q_vector, } total_rx_packets++; total_rx_bytes += size; + } else if (skb) { + txgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + } else if (ring_uses_build_skb(rx_ring)) { + skb = txgbe_build_skb(rx_ring, rx_buffer, + &xdp, rx_desc); +#endif } else { - /* retrieve a buffer from the ring */ - if (ring_is_hs_enabled(rx_ring)) - skb = txgbe_fetch_rx_buffer_hs(rx_ring, rx_desc); - else - skb = txgbe_fetch_rx_buffer(rx_ring, rx_desc); + skb = txgbe_construct_skb(rx_ring, rx_buffer, + &xdp, rx_desc); } + /* exit if we failed to retrieve a buffer */ if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; rx_buffer->pagecnt_bias++; break; } - if (IS_ERR(skb)) { - txgbe_put_rx_buffer(rx_ring, rx_buffer, skb); - } + txgbe_put_rx_buffer(rx_ring, rx_buffer, skb); cleaned_count++; /* place incomplete frames back on ring for completion */ @@ -2697,6 +2819,7 @@ static int txgbe_clean_rx_irq(struct txgbe_q_vector *q_vector, /* verify the packet layout is correct */ if (txgbe_cleanup_headers(rx_ring, rx_desc, skb)) continue; + /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; @@ -2736,7 +2859,7 @@ static int txgbe_clean_rx_irq(struct txgbe_q_vector *q_vector, /* update budget accounting */ total_rx_packets++; - } while (likely(total_rx_packets < budget)); + } #ifdef HAVE_XDP_SUPPORT if (xdp_xmit & TXGBE_XDP_TX) { @@ -3009,6 +3132,7 @@ enum latency_range { latency_invalid = 255 }; +#if 0 /** * txgbe_update_itr - update the dynamic ITR value based on statistics * @q_vector: structure containing interrupt and ring information @@ -3033,46 +3157,248 @@ static void txgbe_update_itr(struct txgbe_q_vector *q_vector, u64 bytes_perint; u8 itr_setting = ring_container->itr; - if (packets == 0) - return; + if (packets == 0) + return; + + /* simple throttlerate management + * 0-10MB/s lowest (100000 ints/s) + * 10-20MB/s low (20000 ints/s) + * 20-1249MB/s bulk (12000 ints/s) + */ + /* what was last interrupt timeslice? */ + timepassed_us = q_vector->itr >> 2; + if (timepassed_us == 0) + return; + bytes_perint = bytes / timepassed_us; /* bytes/usec */ + + switch (itr_setting) { + case lowest_latency: + if (bytes_perint > 10) { + itr_setting = low_latency; + } + break; + case low_latency: + if (bytes_perint > 20) { + itr_setting = bulk_latency; + } else if (bytes_perint <= 10) { + itr_setting = lowest_latency; + } + break; + case bulk_latency: + if (bytes_perint <= 20) { + itr_setting = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itr_setting; +} +#endif + +static inline bool txgbe_container_is_rx(struct txgbe_q_vector *q_vector, + struct txgbe_ring_container *rc) +{ + return &q_vector->rx == rc; +} +/** + * ixgbe_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + **/ +static void txgbe_update_itr(struct txgbe_q_vector *q_vector, + struct txgbe_ring_container *ring_container) +{ + unsigned int itr = TXGBE_ITR_ADAPTIVE_MIN_USECS | + TXGBE_ITR_ADAPTIVE_LATENCY; + unsigned int avg_wire_size, packets, bytes; + unsigned long next_update = jiffies; + + /* If we don't have any rings just leave ourselves set for maximum + * possible latency so we take ourselves out of the equation. + */ + if (!ring_container->ring) + return; + + /* If we didn't update within up to 1 - 2 jiffies we can assume + * that either packets are coming in so slow there hasn't been + * any work, or that there is so much work that NAPI is dealing + * with interrupt moderation and we don't need to do anything. + */ + if (time_after(next_update, ring_container->next_update)) + goto clear_counts; + + packets = ring_container->total_packets; + bytes = ring_container->total_bytes; + + if (txgbe_container_is_rx(q_vector, ring_container)) { + /* If Rx and there are 1 to 23 packets and bytes are less than + * 12112 assume insufficient data to use bulk rate limiting + * approach. Instead we will focus on simply trying to target + * receiving 8 times as much data in the next interrupt. + */ + if (packets && packets < 24 && bytes < 12112) { + itr = TXGBE_ITR_ADAPTIVE_LATENCY; + avg_wire_size = (bytes + packets * 24) * 2; + avg_wire_size = clamp_t(unsigned int, + avg_wire_size, 2560, 12800); + goto adjust_for_speed; + } + } + + /* Less than 48 packets we can assume that our current interrupt delay + * is only slightly too low. As such we should increase it by a small + * fixed amount. + */ + if (packets < 48) { + itr = (q_vector->itr >> 2) + TXGBE_ITR_ADAPTIVE_MIN_INC; + if (itr > TXGBE_ITR_ADAPTIVE_MAX_USECS) + itr = TXGBE_ITR_ADAPTIVE_MAX_USECS; + + /* If sample size is 0 - 7 we should probably switch + * to latency mode instead of trying to control + * things as though we are in bulk. + * + * Otherwise if the number of packets is less than 48 + * we should maintain whatever mode we are currently + * in. The range between 8 and 48 is the cross-over + * point between latency and bulk traffic. + */ + if (packets < 8) + itr += TXGBE_ITR_ADAPTIVE_LATENCY; + else + itr += ring_container->itr & TXGBE_ITR_ADAPTIVE_LATENCY; + goto clear_counts; + } - /* simple throttlerate management - * 0-10MB/s lowest (100000 ints/s) - * 10-20MB/s low (20000 ints/s) - * 20-1249MB/s bulk (12000 ints/s) + /* Between 48 and 96 is our "goldilocks" zone where we are working + * out "just right". Just report that our current ITR is good for us. */ - /* what was last interrupt timeslice? */ - timepassed_us = q_vector->itr >> 2; - if (timepassed_us == 0) - return; - bytes_perint = bytes / timepassed_us; /* bytes/usec */ + if (packets < 96) { + itr = q_vector->itr >> 2; + goto clear_counts; + } - switch (itr_setting) { - case lowest_latency: - if (bytes_perint > 10) { - itr_setting = low_latency; - } + /* If packet count is 96 or greater we are likely looking at a slight + * overrun of the delay we want. Try halving our delay to see if that + * will cut the number of packets in half per interrupt. + */ + if (packets < 256) { + itr = q_vector->itr >> 3; + if (itr < TXGBE_ITR_ADAPTIVE_MIN_USECS) + itr = TXGBE_ITR_ADAPTIVE_MIN_USECS; + goto clear_counts; + } + + /* The paths below assume we are dealing with a bulk ITR since number + * of packets is 256 or greater. We are just going to have to compute + * a value and try to bring the count under control, though for smaller + * packet sizes there isn't much we can do as NAPI polling will likely + * be kicking in sooner rather than later. + */ + itr = TXGBE_ITR_ADAPTIVE_BULK; + + /* If packet counts are 256 or greater we can assume we have a gross + * overestimation of what the rate should be. Instead of trying to fine + * tune it just use the formula below to try and dial in an exact value + * give the current packet size of the frame. + */ + avg_wire_size = bytes / packets; + + /* The following is a crude approximation of: + * wmem_default / (size + overhead) = desired_pkts_per_int + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate + * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value + * + * Assuming wmem_default is 212992 and overhead is 640 bytes per + * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the + * formula down to + * + * (170 * (size + 24)) / (size + 640) = ITR + * + * We first do some math on the packet size and then finally bitshift + * by 8 after rounding up. We also have to account for PCIe link speed + * difference as ITR scales based on this. + */ + if (avg_wire_size <= 60) { + /* Start at 50k ints/sec */ + avg_wire_size = 5120; + } else if (avg_wire_size <= 316) { + /* 50K ints/sec to 16K ints/sec */ + avg_wire_size *= 40; + avg_wire_size += 2720; + } else if (avg_wire_size <= 1084) { + /* 16K ints/sec to 9.2K ints/sec */ + avg_wire_size *= 15; + avg_wire_size += 11452; + } else if (avg_wire_size <= 1980) { + /* 9.2K ints/sec to 8K ints/sec */ + avg_wire_size *= 5; + avg_wire_size += 22420; + } else { + /* plateau at a limit of 8K ints/sec */ + avg_wire_size = 32256; + } + +adjust_for_speed: + /* Resultant value is 256 times larger than it needs to be. This + * gives us room to adjust the value as needed to either increase + * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. + * + * Use addition as we have already recorded the new latency flag + * for the ITR value. + */ + switch (q_vector->adapter->link_speed) { + case TXGBE_LINK_SPEED_25GB_FULL: + itr += DIV_ROUND_UP(avg_wire_size, + TXGBE_ITR_ADAPTIVE_MIN_INC * 512) * + TXGBE_ITR_ADAPTIVE_MIN_INC; break; - case low_latency: - if (bytes_perint > 20) { - itr_setting = bulk_latency; - } else if (bytes_perint <= 10) { - itr_setting = lowest_latency; - } + case TXGBE_LINK_SPEED_10GB_FULL: + case TXGBE_LINK_SPEED_100_FULL: + default: + itr += DIV_ROUND_UP(avg_wire_size, + TXGBE_ITR_ADAPTIVE_MIN_INC * 256) * + TXGBE_ITR_ADAPTIVE_MIN_INC; break; - case bulk_latency: - if (bytes_perint <= 20) { - itr_setting = low_latency; - } + case TXGBE_LINK_SPEED_1GB_FULL: + case TXGBE_LINK_SPEED_10_FULL: + itr += DIV_ROUND_UP(avg_wire_size, + TXGBE_ITR_ADAPTIVE_MIN_INC * 64) * + TXGBE_ITR_ADAPTIVE_MIN_INC; break; } - /* clear work counters since we have the values we need */ + /* In the case of a latency specific workload only allow us to + * reduce the ITR by at most 2us. By doing this we should dial + * in so that our number of interrupts is no more than 2x the number + * of packets for the least busy workload. So for example in the case + * of a TCP worload the ack packets being received would set the + * the interrupt rate as they are a latency specific workload. + */ + if ((itr & TXGBE_ITR_ADAPTIVE_LATENCY) && itr < ring_container->itr) + itr = ring_container->itr - TXGBE_ITR_ADAPTIVE_MIN_INC; +clear_counts: + /* write back value */ + ring_container->itr = itr; + + /* next update should occur within next jiffy */ + ring_container->next_update = next_update + 1; + ring_container->total_bytes = 0; ring_container->total_packets = 0; - - /* write updated itr to ring container */ - ring_container->itr = itr_setting; } /** @@ -3088,13 +3414,18 @@ void txgbe_write_eitr(struct txgbe_q_vector *q_vector) struct txgbe_adapter *adapter = q_vector->adapter; struct txgbe_hw *hw = &adapter->hw; int v_idx = q_vector->v_idx; - u32 itr_reg = q_vector->itr & TXGBE_MAX_EITR; + u32 itr_reg; + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + itr_reg = (q_vector->itr >> 3) & TXGBE_AMLITE_MAX_EITR; + else + itr_reg = q_vector->itr & TXGBE_MAX_EITR; itr_reg |= TXGBE_PX_ITR_CNT_WDIS; wr32(hw, TXGBE_PX_ITR(v_idx), itr_reg); } +#if 0 static void txgbe_set_itr(struct txgbe_q_vector *q_vector) { u16 new_itr = q_vector->itr; @@ -3131,7 +3462,29 @@ static void txgbe_set_itr(struct txgbe_q_vector *q_vector) txgbe_write_eitr(q_vector); } } +#endif + +static void txgbe_set_itr(struct txgbe_q_vector *q_vector) +{ + u32 new_itr; + + txgbe_update_itr(q_vector, &q_vector->tx); + txgbe_update_itr(q_vector, &q_vector->rx); + + /* use the smallest value of new ITR delay calculations */ + new_itr = min(q_vector->rx.itr, q_vector->tx.itr); + /* Clear latency flag if set, shift into correct position */ + new_itr &= TXGBE_ITR_ADAPTIVE_MASK_USECS; + new_itr <<= 2; + + if (new_itr != q_vector->itr) { + /* save the algorithm value here */ + q_vector->itr = new_itr; + + txgbe_write_eitr(q_vector); + } +} /** * txgbe_check_overtemp_subtask - check for over temperature * @adapter: pointer to adapter @@ -3141,6 +3494,7 @@ static void txgbe_check_overtemp_subtask(struct txgbe_adapter *adapter) struct txgbe_hw *hw = &adapter->hw; u32 eicr = adapter->interrupt_event; s32 temp_state; + u16 value = 0; #ifdef HAVE_VIRTUAL_STATION struct net_device *upper; struct list_head *iter; @@ -3150,20 +3504,27 @@ static void txgbe_check_overtemp_subtask(struct txgbe_adapter *adapter) return; if (!(adapter->flags2 & TXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) return; - if (!(adapter->flags2 & TXGBE_FLAG2_TEMP_SENSOR_EVENT)) - return; + /*when pci lose link, not check over heat*/ + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &value); + if (value == TXGBE_FAILED_READ_CFG_WORD) + return ; - adapter->flags2 &= ~TXGBE_FLAG2_TEMP_SENSOR_EVENT; + if (!(adapter->flags3 & TXGBE_FLAG3_TEMP_SENSOR_INPROGRESS)) { + if (!(adapter->flags2 & TXGBE_FLAG2_TEMP_SENSOR_EVENT)) + return; - /* - * Since the warning interrupt is for both ports - * we don't have to check if: - * - This interrupt wasn't for our port. - * - We may have missed the interrupt so always have to - * check if we got a LSC - */ - if (!(eicr & TXGBE_PX_MISC_IC_OVER_HEAT)) - return; + adapter->flags2 &= ~TXGBE_FLAG2_TEMP_SENSOR_EVENT; + + /* + * Since the warning interrupt is for both ports + * we don't have to check if: + * - This interrupt wasn't for our port. + * - We may have missed the interrupt so always have to + * check if we got a LSC + */ + if (!(eicr & TXGBE_PX_MISC_IC_OVER_HEAT)) + return; + } temp_state = TCALL(hw, phy.ops.check_overtemp); if (!temp_state || temp_state == TXGBE_NOT_IMPLEMENTED) @@ -3171,7 +3532,15 @@ static void txgbe_check_overtemp_subtask(struct txgbe_adapter *adapter) if (temp_state == TXGBE_ERR_UNDERTEMP && test_bit(__TXGBE_HANGING, &adapter->state)) { + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) { + adapter->flags3 &= ~TXGBE_FLAG3_TEMP_SENSOR_INPROGRESS; + // re-enable over_heat misx itr + wr32m(&adapter->hw, TXGBE_PX_MISC_IEN, TXGBE_PX_MISC_IEN_OVER_HEAT, + TXGBE_PX_MISC_IEN_OVER_HEAT); + } e_crit(drv, "%s\n", txgbe_underheat_msg); + wr32m(&adapter->hw, TXGBE_RDB_PB_CTL, TXGBE_RDB_PB_CTL_RXEN, TXGBE_RDB_PB_CTL_RXEN); netif_carrier_on(adapter->netdev); @@ -3186,6 +3555,9 @@ static void txgbe_check_overtemp_subtask(struct txgbe_adapter *adapter) clear_bit(__TXGBE_HANGING, &adapter->state); } else if (temp_state == TXGBE_ERR_OVERTEMP && !test_and_set_bit(__TXGBE_HANGING, &adapter->state)) { + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) + adapter->flags3 |= TXGBE_FLAG3_TEMP_SENSOR_INPROGRESS; e_crit(drv, "%s\n", txgbe_overheat_msg); netif_carrier_off(adapter->netdev); #ifdef HAVE_VIRTUAL_STATION @@ -3224,32 +3596,47 @@ static void txgbe_check_sfp_event(struct txgbe_adapter *adapter, u32 eicr) u32 eicr_mask = TXGBE_PX_MISC_IC_GPIO; u32 reg; - if (eicr & eicr_mask) { - if (!test_bit(__TXGBE_DOWN, &adapter->state)) { - wr32(hw, TXGBE_GPIO_INTMASK, 0xFF); - reg = rd32(hw, TXGBE_GPIO_INTSTATUS); - if (reg & TXGBE_GPIO_INTSTATUS_2) { - adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET; - wr32(hw, TXGBE_GPIO_EOI, - TXGBE_GPIO_EOI_2); - adapter->sfp_poll_time = 0; - txgbe_service_event_schedule(adapter); - } - if (reg & TXGBE_GPIO_INTSTATUS_3) { - adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; - wr32(hw, TXGBE_GPIO_EOI, - TXGBE_GPIO_EOI_3); - txgbe_service_event_schedule(adapter); + if (hw->mac.type == txgbe_mac_aml40) { + if (eicr & eicr_mask) { + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + wr32(hw, TXGBE_GPIO_INTMASK, 0xFF); + reg = rd32(hw, TXGBE_GPIO_INTSTATUS); + if (reg & TXGBE_GPIO_INTSTATUS_4) { + adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET; + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_4); + adapter->sfp_poll_time = 0; + txgbe_service_event_schedule(adapter); + } } + } + } else if (hw->mac.type == txgbe_mac_sp || hw->mac.type == txgbe_mac_aml) { + if (eicr & eicr_mask) { + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + wr32(hw, TXGBE_GPIO_INTMASK, 0xFF); + reg = rd32(hw, TXGBE_GPIO_INTSTATUS); + if (reg & TXGBE_GPIO_INTSTATUS_2) { + adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET; + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_2); + adapter->sfp_poll_time = 0; + txgbe_service_event_schedule(adapter); + } + if (reg & TXGBE_GPIO_INTSTATUS_3) { + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_3); + txgbe_service_event_schedule(adapter); + } - if (reg & TXGBE_GPIO_INTSTATUS_6) { - wr32(hw, TXGBE_GPIO_EOI, - TXGBE_GPIO_EOI_6); - adapter->flags |= - TXGBE_FLAG_NEED_LINK_CONFIG; - txgbe_service_event_schedule(adapter); + if (reg & TXGBE_GPIO_INTSTATUS_6) { + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_6); + adapter->flags |= + TXGBE_FLAG_NEED_LINK_CONFIG; + txgbe_service_event_schedule(adapter); + } } - wr32(hw, TXGBE_GPIO_INTMASK, 0x0); } } } @@ -3257,13 +3644,26 @@ static void txgbe_check_sfp_event(struct txgbe_adapter *adapter, u32 eicr) static void txgbe_check_lsc(struct txgbe_adapter *adapter) { adapter->lsc_int++; + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; if (!test_bit(__TXGBE_DOWN, &adapter->state)) { txgbe_service_event_schedule(adapter); } } +static void txgbe_check_phy_event(struct txgbe_adapter *adapter) +{ + + adapter->flags3 |= TXGBE_FLAG3_PHY_EVENT; + + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + txgbe_service_event_schedule(adapter); + } + +} + /** * txgbe_irq_enable - Enable default interrupt generation settings * @adapter: board private structure @@ -3274,16 +3674,9 @@ void txgbe_irq_enable(struct txgbe_adapter *adapter, bool queues, bool flush) struct txgbe_hw *hw = &adapter->hw; u8 device_type = hw->subsystem_device_id & 0xF0; - /* enable gpio interrupt */ - if (device_type != TXGBE_ID_MAC_XAUI && - device_type != TXGBE_ID_MAC_SGMII) { - mask |= TXGBE_GPIO_INTEN_2; - mask |= TXGBE_GPIO_INTEN_3; - mask |= TXGBE_GPIO_INTEN_6; - } - wr32(&adapter->hw, TXGBE_GPIO_INTEN, mask); - - if (device_type != TXGBE_ID_MAC_XAUI && + if (hw->mac.type == txgbe_mac_aml40) { + mask = TXGBE_GPIO_INTTYPE_LEVEL_4; + } else if (device_type != TXGBE_ID_MAC_XAUI && device_type != TXGBE_ID_MAC_SGMII) { mask = TXGBE_GPIO_INTTYPE_LEVEL_2 | TXGBE_GPIO_INTTYPE_LEVEL_3 | TXGBE_GPIO_INTTYPE_LEVEL_6; @@ -3293,9 +3686,15 @@ void txgbe_irq_enable(struct txgbe_adapter *adapter, bool queues, bool flush) /* enable misc interrupt */ mask = TXGBE_PX_MISC_IEN_MASK; + if (hw->mac.type != txgbe_mac_sp) + mask &= ~TXGBE_PX_MISC_IEN_ETH_EVENT; + if (adapter->flags2 & TXGBE_FLAG2_TEMP_SENSOR_CAPABLE) mask |= TXGBE_PX_MISC_IEN_OVER_HEAT; + if (adapter->flags3 & TXGBE_FLAG3_TEMP_SENSOR_INPROGRESS) + mask &= ~TXGBE_PX_MISC_IEN_OVER_HEAT; + if ((adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) && !(adapter->flags2 & TXGBE_FLAG2_FDIR_REQUIRES_REINIT)) mask |= TXGBE_PX_MISC_IEN_FLOW_DIR; @@ -3304,6 +3703,9 @@ void txgbe_irq_enable(struct txgbe_adapter *adapter, bool queues, bool flush) mask |= TXGBE_PX_MISC_IEN_TIMESYNC; #endif /* HAVE_PTP_1588_CLOCK */ + if (netif_msg_tx_err(adapter)) + mask |= TXGBE_PX_MISC_IEN_TXDESC; + wr32(&adapter->hw, TXGBE_PX_MISC_IEN, mask); /* unmask interrupt */ txgbe_intr_enable(&adapter->hw, TXGBE_INTR_MISC(adapter)); @@ -3313,6 +3715,68 @@ void txgbe_irq_enable(struct txgbe_adapter *adapter, bool queues, bool flush) /* flush configuration */ if (flush) TXGBE_WRITE_FLUSH(&adapter->hw); + + /* enable gpio interrupt */ + if (hw->mac.type == txgbe_mac_aml40) { + mask = TXGBE_GPIO_INTEN_4; + } else if (device_type != TXGBE_ID_MAC_XAUI && + device_type != TXGBE_ID_MAC_SGMII) { + mask = TXGBE_GPIO_INTEN_2 | TXGBE_GPIO_INTEN_3 | + TXGBE_GPIO_INTEN_6; + } + wr32(&adapter->hw, TXGBE_GPIO_INTEN, mask); + +} + +static void txgbe_do_lan_reset(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring *tx_ring; + u32 reset = 0; + u32 i; + + for (i = 0; i < adapter->num_tx_queues; i++) + tx_ring = adapter->tx_ring[i]; + + usec_delay(1000); + if (hw->bus.lan_id == 0) { + reset = TXGBE_MIS_RST_LAN0_RST; + } else { + reset = TXGBE_MIS_RST_LAN1_RST; + } + + wr32(hw, TXGBE_MIS_RST, + reset | rd32(hw, TXGBE_MIS_RST)); + TXGBE_WRITE_FLUSH(hw); + usec_delay(10); +} + +static void txgbe_tx_ring_recovery(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 desc_error[4] = {0, 0, 0, 0}; + u32 i; + + /* check tdm fatal error */ + for (i = 0; i < 4; i++) { + desc_error[i] = rd32(hw, TXGBE_TDM_DESC_FATAL(i)); + if (desc_error[i] != 0) { + e_err(drv, "TDM fatal error queue\n"); + txgbe_tx_timeout_reset(adapter); + return; + } + } + + /* check tdm non-fatal error */ + for (i = 0; i < 4; i++) + desc_error[i] = rd32(hw, TXGBE_TDM_DESC_NONFATAL(i)); + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (desc_error[i / 32] & (1 << i % 32)) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_Q_RESET; + e_err(drv, "TDM non-fatal error, queue[%d]", i); + } + } } static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data) @@ -3322,20 +3786,12 @@ static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data) u32 eicr; u32 ecc; u32 value = 0; + u16 vid; eicr = txgbe_misc_isb(adapter, TXGBE_ISB_MISC); - if (eicr & TXGBE_PX_MISC_IC_ETH_AN) { - if (adapter->backplane_an == 1 && (KR_POLLING == 0)) { - value = txgbe_rd32_epcs(hw, 0x78002); - value = value & 0x4; - if (value == 0x4) { - if (!(adapter->flags2 & TXGBE_FLAG2_KR_TRAINING)) { - adapter->flags2 |= TXGBE_FLAG2_KR_TRAINING; - txgbe_service_event_schedule(adapter); - } - } - } + if (adapter->backplane_an) + txgbe_check_lsc(adapter); } if(BOND_CHECK_LINK_MODE == 1){ @@ -3349,8 +3805,17 @@ static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data) } } } else { - if (eicr & (TXGBE_PX_MISC_IC_ETH_LK | TXGBE_PX_MISC_IC_ETH_LKDN)) - txgbe_check_lsc(adapter); + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + if (eicr & TXGBE_PX_MISC_AML_ETH_LK_CHANGE) + txgbe_check_lsc(adapter); + if (eicr & TXGBE_PX_MISC_AML_ETH_PHY_EVENT) + txgbe_check_phy_event(adapter); + } else { + if (eicr & (TXGBE_PX_MISC_IC_ETH_LK | + TXGBE_PX_MISC_IC_ETH_LKDN | + TXGBE_PX_MISC_IC_ETH_EVENT)) + txgbe_check_lsc(adapter); + } } if (eicr & TXGBE_PX_MISC_IC_VF_MBOX) @@ -3359,11 +3824,18 @@ static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data) if (eicr & TXGBE_PX_MISC_IC_PCIE_REQ_ERR) { ERROR_REPORT1(TXGBE_ERROR_POLLING, "lan id %d, PCIe request error founded.\n", hw->bus.lan_id); - if (hw->bus.lan_id == 0) { - adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; - txgbe_service_event_schedule(adapter); - } else - wr32(&adapter->hw, TXGBE_MIS_PF_SM, 1); + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); + if (vid == TXGBE_FAILED_READ_CFG_WORD) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, "PCIe link is lost.\n"); + /*when pci lose link, not check over heat*/ + if (hw->bus.lan_id == 0) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + txgbe_service_event_schedule(adapter); + } else + wr32(&adapter->hw, TXGBE_MIS_PF_SM, 1); + } else { + adapter->flags2 |= TXGBE_FLAG2_DMA_RESET_REQUESTED; + } } if (eicr & TXGBE_PX_MISC_IC_INT_ERR) { @@ -3378,15 +3850,22 @@ static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data) txgbe_service_event_schedule(adapter); } if (eicr & TXGBE_PX_MISC_IC_DEV_RST) { - adapter->flags2 |= TXGBE_FLAG2_RESET_INTR_RECEIVED; - txgbe_service_event_schedule(adapter); + value = rd32(hw, TXGBE_TSC_LSEC_PKTNUM1); //This reg is used by fw to tell drv not to drv rst + if(!(value & 0x1)){ + adapter->flags2 |= TXGBE_FLAG2_RESET_INTR_RECEIVED; + txgbe_service_event_schedule(adapter); + } } - if ((eicr & TXGBE_PX_MISC_IC_STALL) || - (eicr & TXGBE_PX_MISC_IC_ETH_EVENT)) { + if (eicr & TXGBE_PX_MISC_IC_STALL) { adapter->flags2 |= TXGBE_FLAG2_PF_RESET_REQUESTED; txgbe_service_event_schedule(adapter); } + if (eicr & TXGBE_PX_MISC_IC_TXDESC) { + txgbe_tx_ring_recovery(adapter); + txgbe_service_event_schedule(adapter); + } + #ifdef HAVE_TX_MQ /* Handle Flow Director Full threshold interrupt */ if (eicr & TXGBE_PX_MISC_IC_FLOW_DIR) { @@ -3408,6 +3887,15 @@ static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data) } } #endif + +#if 0 + /* amlite: add SWFW mbox int */ + if (hw->mac.type == txgbe_mac_aml && eicr & TXGBE_PX_MISC_IC_MNG_HOST_MBOX) { + adapter->flags |= TXGBE_FLAG_SWFW_MBOX_NOTIFY; + txgbe_service_event_schedule(adapter); + } +#endif + txgbe_check_sfp_event(adapter, eicr); txgbe_check_overtemp_event(adapter, eicr); @@ -3420,6 +3908,8 @@ static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data) if (!test_bit(__TXGBE_DOWN, &adapter->state)) txgbe_irq_enable(adapter, false, false); + wr32(hw, TXGBE_GPIO_INTMASK, 0x0); + return IRQ_HANDLED; } @@ -3475,6 +3965,9 @@ int txgbe_poll(struct napi_struct *napi, int budget) if (!txgbe_qv_lock_napi(q_vector)) return budget; #endif + /* Exit if we are called by netpoll */ + if (budget <= 0) + return budget; /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling */ @@ -3619,16 +4112,8 @@ static irqreturn_t txgbe_intr(int __always_unused irq, void *data) eicr_misc = txgbe_misc_isb(adapter, TXGBE_ISB_MISC); if (eicr_misc & TXGBE_PX_MISC_IC_ETH_AN) { - if (adapter->backplane_an == 1 && (KR_POLLING == 0)) { - value = txgbe_rd32_epcs(hw, 0x78002); - value = value & 0x4; - if (value == 0x4) { - if (!(adapter->flags2 & TXGBE_FLAG2_KR_TRAINING)) { - adapter->flags2 |= TXGBE_FLAG2_KR_TRAINING; - txgbe_service_event_schedule(adapter); - } - } - } + if (adapter->backplane_an) + txgbe_service_event_schedule(adapter); } if(BOND_CHECK_LINK_MODE == 1){ @@ -3655,8 +4140,11 @@ static irqreturn_t txgbe_intr(int __always_unused irq, void *data) } if (eicr_misc & TXGBE_PX_MISC_IC_DEV_RST) { - adapter->flags2 |= TXGBE_FLAG2_RESET_INTR_RECEIVED; - txgbe_service_event_schedule(adapter); + value = rd32(hw, TXGBE_TSC_LSEC_PKTNUM1); //This reg is used by fw to tell drv not to drv rst + if(!(value & 0x1)){ + adapter->flags2 |= TXGBE_FLAG2_RESET_INTR_RECEIVED; + txgbe_service_event_schedule(adapter); + } } txgbe_check_sfp_event(adapter, eicr_misc); txgbe_check_overtemp_event(adapter, eicr_misc); @@ -3778,6 +4266,45 @@ static void txgbe_configure_msi_and_legacy(struct txgbe_adapter *adapter) e_info(hw, "Legacy interrupt IVAR setup done\n"); } +/* amlite: tx header wb */ +#ifdef TXGBE_TXHEAD_WB +static int txgbe_setup_headwb_resources(struct txgbe_ring *ring) +{ + struct txgbe_adapter *adapter; + struct txgbe_hw *hw; + struct device *dev = ring->dev; + u8 headwb_size = 0; + + if (ring->q_vector) { + adapter = ring->q_vector->adapter; + hw = &adapter->hw; + if (hw->mac.type == txgbe_mac_sp) + return 0; + } else { + return 0; + } + + if (TXGBE_TXHEAD_WB == 1) + headwb_size = 16; + else if (TXGBE_TXHEAD_WB == 2) + headwb_size = 16; + else + headwb_size = 1; + + ring->headwb_mem = dma_alloc_coherent(dev, + sizeof(u32) * headwb_size, + &ring->headwb_dma, + GFP_KERNEL); + if (!ring->headwb_mem) { + e_err(drv, "txgbe_setup_headwb_resources no mem\n"); + return -ENOMEM; + } + memset(ring->headwb_mem, 0, sizeof(u32) * headwb_size); + + return 0; +} +#endif + /** * txgbe_configure_tx_ring - Configure Tx ring after Reset * @adapter: board private structure @@ -3814,6 +4341,7 @@ void txgbe_configure_tx_ring(struct txgbe_adapter *adapter, /* reset ntu and ntc to place SW in sync with hardwdare */ ring->next_to_clean = 0; ring->next_to_use = 0; + ring->next_to_free = 0; txdctl |= TXGBE_RING_SIZE(ring) << TXGBE_PX_TR_CFG_TR_SIZE_SHIFT; @@ -3851,10 +4379,22 @@ void txgbe_configure_tx_ring(struct txgbe_adapter *adapter, clear_bit(__TXGBE_HANG_CHECK_ARMED, &ring->state); +#ifdef TXGBE_TXHEAD_WB + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + wr32(hw, TXGBE_PX_TR_HEAD_ADDRL(reg_idx), + ring->headwb_dma & DMA_BIT_MASK(32)); + wr32(hw, TXGBE_PX_TR_HEAD_ADDRH(reg_idx), ring->headwb_dma >> 32); + + if (TXGBE_TXHEAD_WB == 1) + txdctl |= TXGBE_PX_TR_CFG_HEAD_WB | TXGBE_PX_TR_CFG_HEAD_WB_64BYTE; + else + txdctl |= TXGBE_PX_TR_CFG_HEAD_WB; + } +#endif + /* enable queue */ wr32(hw, TXGBE_PX_TR_CFG(reg_idx), txdctl); - /* poll to verify queue is enabled */ do { msleep(1); @@ -3864,8 +4404,6 @@ void txgbe_configure_tx_ring(struct txgbe_adapter *adapter, e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); } - - /** * txgbe_configure_tx - Configure Transmit Unit after Reset * @adapter: board private structure @@ -3894,6 +4432,11 @@ static void txgbe_configure_tx(struct txgbe_adapter *adapter) for (i = 0; i < adapter->num_xdp_queues; i++) txgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]); wr32m(hw, TXGBE_TSC_BUF_AE, 0x3FF, 0x10); + + /* enable mac transmitter */ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + TCALL(hw, mac.ops.enable_sec_tx_path); + /* enable mac transmitter */ wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, TXGBE_MAC_TX_CFG_TE); @@ -3993,7 +4536,12 @@ static void txgbe_configure_srrctl(struct txgbe_adapter *adapter, srrctl |= xsk_buf_len >> TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT; } else { #endif /* HAVE_AF_XDP_ZC_SUPPORT */ - srrctl |= txgbe_rx_bufsz(rx_ring) >> TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT; + //srrctl |= txgbe_rx_bufsz(rx_ring) >> TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT; + if (test_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state)) { + srrctl |= TXGBE_RXBUFFER_3K >> TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT; + } else { + srrctl |= TXGBE_RXBUFFER_2K >> TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT; + } if (ring_is_hs_enabled(rx_ring)) srrctl |= TXGBE_PX_RR_CFG_SPLIT_MODE; #if 0 @@ -4013,7 +4561,10 @@ static void txgbe_configure_srrctl(struct txgbe_adapter *adapter, */ u32 txgbe_rss_indir_tbl_entries(struct txgbe_adapter *adapter) { - return 128; + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + return 64; + else + return 128; } /** @@ -4025,26 +4576,45 @@ u32 txgbe_rss_indir_tbl_entries(struct txgbe_adapter *adapter) */ void txgbe_store_reta(struct txgbe_adapter *adapter) { - u32 i, reta_entries = txgbe_rss_indir_tbl_entries(adapter); + u32 i, reta_entries = txgbe_rss_indir_tbl_entries(adapter); + struct txgbe_hw *hw = &adapter->hw; + u32 reta = 0; + u8 *indir_tbl = adapter->rss_indir_tbl; + + /* Fill out the redirection table as follows: + * - 8 bit wide entries containing 4 bit RSS index + */ + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + reta |= indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + wr32(hw, TXGBE_RDB_RSSTBL(i >> 2), reta); + reta = 0; + } + } +} + +void txgbe_store_vfreta(struct txgbe_adapter *adapter) +{ + u32 reta_entries = txgbe_rss_indir_tbl_entries(adapter); + unsigned int pf_pool = adapter->num_vfs; + u8 *indir_tbl = adapter->rss_indir_tbl; struct txgbe_hw *hw = &adapter->hw; u32 reta = 0; - u8 *indir_tbl = adapter->rss_indir_tbl; - - /* Fill out the redirection table as follows: - * - 8 bit wide entries containing 4 bit RSS index - */ + u32 i; /* Write redirection table to HW */ for (i = 0; i < reta_entries; i++) { reta |= indir_tbl[i] << (i & 0x3) * 8; if ((i & 3) == 3) { - wr32(hw, TXGBE_RDB_RSSTBL(i >> 2), reta); + wr32(hw, TXGBE_RDB_VMRSSTBL(i >> 2, pf_pool), reta); reta = 0; } } } -static void txgbe_setup_reta(struct txgbe_adapter *adapter) +void txgbe_setup_reta(struct txgbe_adapter *adapter) { struct txgbe_hw *hw = &adapter->hw; u32 i, j; @@ -4076,6 +4646,28 @@ static void txgbe_setup_reta(struct txgbe_adapter *adapter) txgbe_store_reta(adapter); } +static void txgbe_setup_vfreta(struct txgbe_adapter *adapter) +{ + u32 reta_entries = txgbe_rss_indir_tbl_entries(adapter); + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + unsigned int pf_pool = adapter->num_vfs; + struct txgbe_hw *hw = &adapter->hw; + u32 i, j; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + wr32(hw, TXGBE_RDB_VMRSSRK(i, pf_pool), *(adapter->rss_key + i)); + + for (i = 0, j = 0; i < reta_entries; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + txgbe_store_vfreta(adapter); +} + static void txgbe_setup_mrqc(struct txgbe_adapter *adapter) { struct txgbe_hw *hw = &adapter->hw; @@ -4102,15 +4694,28 @@ static void txgbe_setup_mrqc(struct txgbe_adapter *adapter) rss_field |= TXGBE_RDB_RA_CTL_RSS_IPV6_UDP; netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); - - txgbe_setup_reta(adapter); - /* Consistent with the X710 that vf do not make its own receive-hash rules */ - //rss_field |= TXGBE_RDB_RA_CTL_MULTI_RSS; + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + unsigned int pool = adapter->num_vfs; + u32 vfmrqc; + + /* Setup RSS through the VF registers */ + txgbe_setup_vfreta(adapter); + + vfmrqc = rd32(hw , TXGBE_RDB_PL_CFG(pool)); + vfmrqc &= ~TXGBE_RDB_PL_CFG_RSS_MASK; + vfmrqc |= rss_field | TXGBE_RDB_PL_CFG_RSS_EN; + wr32(hw, TXGBE_RDB_PL_CFG(pool), vfmrqc); + + /* Enable VF RSS mode */ + rss_field |= TXGBE_RDB_RA_CTL_MULTI_RSS; + } else { + txgbe_setup_reta(adapter); + } if (adapter->flags2 & TXGBE_FLAG2_RSS_ENABLED) rss_field |= TXGBE_RDB_RA_CTL_RSS_EN; - + wr32(hw, TXGBE_RDB_RA_CTL, rss_field); } @@ -4195,37 +4800,6 @@ static void txgbe_rx_desc_queue_enable(struct txgbe_adapter *adapter, "not set within the polling period\n", reg_idx); } } - -/* disable the specified tx ring/queue */ -void txgbe_disable_tx_queue(struct txgbe_adapter *adapter, - struct txgbe_ring *ring) -{ - struct txgbe_hw *hw = &adapter->hw; - int wait_loop = TXGBE_MAX_RX_DESC_POLL; - u32 rxdctl, reg_offset, enable_mask; - u8 reg_idx = ring->reg_idx; - - if (TXGBE_REMOVED(hw->hw_addr)) - return; - - reg_offset = TXGBE_PX_TR_CFG(reg_idx); - enable_mask = TXGBE_PX_TR_CFG_ENABLE; - - /* write value back with TDCFG.ENABLE bit cleared */ - wr32m(hw, reg_offset, enable_mask, 0); - - /* the hardware may take up to 100us to really disable the tx queue */ - do { - udelay(10); - rxdctl = rd32(hw, reg_offset); - } while (--wait_loop && (rxdctl & enable_mask)); - - if (!wait_loop) { - e_err(drv, "TDCFG.ENABLE on Tx queue %d not cleared within " - "the polling period\n", reg_idx); - } -} - /* disable the specified rx ring/queue */ void txgbe_disable_rx_queue(struct txgbe_adapter *adapter, struct txgbe_ring *ring) @@ -4258,9 +4832,13 @@ void txgbe_configure_rx_ring(struct txgbe_adapter *adapter, struct txgbe_ring *ring) { struct txgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; u64 rdba = ring->dma; u32 rxdctl; u16 reg_idx = ring->reg_idx; +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) || defined(NETIF_F_HW_VLAN_STAG_FILTER) || defined(NETIF_F_HW_VLAN_FILTER) + netdev_features_t features = netdev->features; +#endif /* disable queue to avoid issues while updating state */ rxdctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); @@ -4298,7 +4876,24 @@ void txgbe_configure_rx_ring(struct txgbe_adapter *adapter, else rxdctl |= (ring->count / 128) << TXGBE_PX_RR_CFG_RR_SIZE_SHIFT; +#if (defined NETIF_F_HW_VLAN_CTAG_RX) + if (features & NETIF_F_HW_VLAN_CTAG_RX) +#elif (defined NETIF_F_HW_VLAN_STAG_RX) + if (features & NETIF_F_HW_VLAN_STAG_RX) +#else + if (features & NETIF_F_HW_VLAN_RX) +#endif + rxdctl |= TXGBE_PX_RR_CFG_VLAN; + else + rxdctl &= ~TXGBE_PX_RR_CFG_VLAN; + rxdctl |= 0x1 << TXGBE_PX_RR_CFG_RR_THER_SHIFT; + +#ifdef TXGBE_TXHEAD_WB + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + rxdctl |= TXGBE_PX_RR_CFG_DESC_MERGE; +#endif + wr32(hw, TXGBE_PX_RR_CFG(reg_idx), rxdctl); /* reset head and tail pointers */ @@ -4311,6 +4906,7 @@ void txgbe_configure_rx_ring(struct txgbe_adapter *adapter, ring->next_to_use = 0; #ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT ring->next_to_alloc = 0; + ring->rx_offset = txgbe_rx_offset(ring); #endif txgbe_configure_srrctl(adapter, ring); @@ -4531,9 +5127,9 @@ static void txgbe_set_rx_buffer_len(struct txgbe_adapter *adapter) */ for (i = 0; i < adapter->num_rx_queues; i++) { rx_ring = adapter->rx_ring[i]; - +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT clear_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state); - +#endif if (adapter->flags & TXGBE_FLAG_RX_HS_ENABLED) { rx_ring->rx_buf_len = TXGBE_RX_HDR_SIZE; set_ring_hs_enabled(rx_ring); @@ -4545,19 +5141,36 @@ static void txgbe_set_rx_buffer_len(struct txgbe_adapter *adapter) else clear_ring_rsc_enabled(rx_ring); -#ifdef HAVE_XDP_SUPPORT -#if (PAGE_SIZE < 8192) - if(adapter->xdp_prog) - if (TXGBE_2K_TOO_SMALL_WITH_PADDING || - (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) - set_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state); -#endif +#ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT + clear_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state); + clear_bit(__TXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); +#if IS_ENABLED(CONFIG_FCOE) + + if (test_bit(__TXGBE_RX_FCOE, &rx_ring->state)) + set_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state); #endif -#ifdef CONFIG_TXGBE_DISABLE_PACKET_SPLIT +#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC + if (adapter->flags2 & TXGBE_FLAG2_RX_LEGACY) + continue; - rx_ring->rx_buf_len = rx_buf_len; + set_bit(__TXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); + +#if (PAGE_SIZE < 8192) + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) + set_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state); + if (TXGBE_2K_TOO_SMALL_WITH_PADDING || + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) + set_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state); +#endif +#else /* !HAVE_SWIOTLB_SKIP_CPU_SYNC */ + adapter->flags2 |= TXGBE_FLAG2_RX_LEGACY; + adapter->eth_priv_flags |= TXGBE_ETH_PRIV_FLAG_LEGACY_RX; +#endif /* !HAVE_SWIOTLB_SKIP_CPU_SYNC */ +#else /* CONFIG_TXGBE_DISABLE_PACKET_SPLIT */ + + rx_ring->rx_buf_len = rx_buf_len; #if IS_ENABLED(CONFIG_FCOE) if (test_bit(__TXGBE_RX_FCOE, &rx_ring->state) && (rx_buf_len < TXGBE_FCOE_JUMBO_FRAME_SIZE)) @@ -4601,6 +5214,12 @@ static void txgbe_configure_rx(struct txgbe_adapter *adapter) /* set_rx_buffer_len must be called before ring initialization */ txgbe_set_rx_buffer_len(adapter); + wr32(hw, TXGBE_RDM_DCACHE_CTL, TXGBE_RDM_DCACHE_CTL_EN); + wr32m(hw, TXGBE_RDM_RSC_CTL, TXGBE_RDM_RSC_CTL_FREE_CTL, + TXGBE_RDM_RSC_CTL_FREE_CTL); + wr32m(hw, TXGBE_RDM_RSC_CTL, TXGBE_RDM_RSC_CTL_FREE_CNT_DIS, + ~TXGBE_RDM_RSC_CTL_FREE_CNT_DIS); + /* * Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring @@ -5016,7 +5635,7 @@ static void txgbe_mac_set_default_filter(struct txgbe_adapter *adapter, TXGBE_PSR_MAC_SWC_AD_H_AV); } -int txgbe_add_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool) +int txgbe_add_mac_filter(struct txgbe_adapter *adapter, const u8 *addr, u16 pool) { struct txgbe_hw *hw = &adapter->hw; u32 i; @@ -5025,11 +5644,10 @@ int txgbe_add_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool) return -EINVAL; for (i = 0; i < hw->mac.num_rar_entries; i++) { - - if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) - { + if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) { if (ether_addr_equal(addr, adapter->mac_table[i].addr)) { if (adapter->mac_table[i].pools != (1ULL << pool)) { + adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN); adapter->mac_table[i].pools |= (1ULL << pool); txgbe_sync_mac_table(adapter); @@ -5037,7 +5655,9 @@ int txgbe_add_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool) } } } + } + for (i = 0; i < hw->mac.num_rar_entries; i++) { if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) { continue; } @@ -5065,7 +5685,7 @@ static void txgbe_flush_sw_mac_table(struct txgbe_adapter *adapter) txgbe_sync_mac_table(adapter); } -int txgbe_del_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool) +int txgbe_del_mac_filter(struct txgbe_adapter *adapter, const u8 *addr, u16 pool) { /* search table for addr, if found, set to 0 and sync */ u32 i; @@ -5078,7 +5698,8 @@ int txgbe_del_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool) if (ether_addr_equal(addr, adapter->mac_table[i].addr)){ if (adapter->mac_table[i].pools & (1ULL << pool)) { adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; - adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; + if (adapter->mac_table[i].pools == (1ULL << pool)) + adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; adapter->mac_table[i].pools &= ~(1ULL << pool) ; txgbe_sync_mac_table(adapter); @@ -5142,35 +5763,40 @@ int txgbe_write_uc_addr_list(struct net_device *netdev, int pool) return count; } -#endif - -int txgbe_add_cloud_switcher(struct txgbe_adapter *adapter, u32 key, u16 pool) +static int txgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) { - struct txgbe_hw *hw = &adapter->hw; + struct txgbe_adapter *adapter = netdev_priv(netdev); + int ret; - UNREFERENCED_PARAMETER(pool); + ret = txgbe_add_mac_filter(adapter, addr, VMDQ_P(0)); - wr32(hw, TXGBE_PSR_CL_SWC_IDX, 0); - wr32(hw, TXGBE_PSR_CL_SWC_KEY, key); - wr32(hw, TXGBE_PSR_CL_SWC_CTL, - TXGBE_PSR_CL_SWC_CTL_VLD | TXGBE_PSR_CL_SWC_CTL_DST_MSK); - wr32(hw, TXGBE_PSR_CL_SWC_VM_L, 0x1); - wr32(hw, TXGBE_PSR_CL_SWC_VM_H, 0x0); + return min_t(int, ret, 0); +} + +static int txgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + txgbe_del_mac_filter(adapter, addr, VMDQ_P(0)); return 0; } +#endif -int txgbe_del_cloud_switcher(struct txgbe_adapter *adapter, u32 key, u16 pool) +static int txgbe_add_cloud_switcher(struct txgbe_adapter *adapter, + u32 key, u16 pool) { - /* search table for addr, if found, set to 0 and sync */ struct txgbe_hw *hw = &adapter->hw; - UNREFERENCED_PARAMETER(key); UNREFERENCED_PARAMETER(pool); wr32(hw, TXGBE_PSR_CL_SWC_IDX, 0); - wr32(hw, TXGBE_PSR_CL_SWC_CTL, 0); + wr32(hw, TXGBE_PSR_CL_SWC_KEY, key); + wr32(hw, TXGBE_PSR_CL_SWC_CTL, + TXGBE_PSR_CL_SWC_CTL_VLD | TXGBE_PSR_CL_SWC_CTL_DST_MSK); + wr32(hw, TXGBE_PSR_CL_SWC_VM_L, 0x1); + wr32(hw, TXGBE_PSR_CL_SWC_VM_H, 0x0); return 0; } @@ -5181,7 +5807,6 @@ static void txgbe_vlan_promisc_enable(struct txgbe_adapter *adapter) { struct txgbe_hw *hw = &adapter->hw; u32 vlnctrl, i; - u32 vlvfb; u32 vind; u32 bits; @@ -5209,7 +5834,6 @@ static void txgbe_vlan_promisc_enable(struct txgbe_adapter *adapter) vind = VMDQ_P(0); for (i = TXGBE_PSR_VLAN_SWC_ENTRIES; --i;) { wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, i); - vlvfb = rd32(hw, TXGBE_PSR_VLAN_SWC_IDX); if (vind < 32) { bits = rd32(hw, @@ -5244,7 +5868,7 @@ static void txgbe_scrub_vfta(struct txgbe_adapter *adapter) for (i = TXGBE_PSR_VLAN_SWC_ENTRIES; --i;) { wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, i); - vlvf = rd32(hw, TXGBE_PSR_VLAN_SWC_IDX); + vlvf = rd32(hw, TXGBE_PSR_VLAN_SWC); /* pull VLAN ID from VLVF */ vid = vlvf & ~TXGBE_PSR_VLAN_SWC_VIEN; @@ -5397,10 +6021,12 @@ void txgbe_set_rx_mode(struct net_device *netdev) * sufficient space to store all the addresses then enable * unicast promiscuous mode */ - count = txgbe_write_uc_addr_list(netdev, VMDQ_P(0)); - if (count < 0) { + if (__dev_uc_sync(netdev, txgbe_uc_sync, txgbe_uc_unsync)) { vmolr &= ~TXGBE_PSR_VM_L2CTL_ROPE; - vmolr |= TXGBE_PSR_VM_L2CTL_UPE; + fctrl |= TXGBE_PSR_CTL_UPE; + e_dev_warn("netdev uc count is %d, hw available mac entry count is %d," + "enable promisc mode\n", + netdev_uc_count(netdev), txgbe_available_rars(adapter)); } /* @@ -5418,30 +6044,6 @@ void txgbe_set_rx_mode(struct net_device *netdev) wr32(hw, TXGBE_PSR_CTL, fctrl); wr32(hw, TXGBE_PSR_VM_L2CTL(VMDQ_P(0)), vmolr); -#if (defined NETIF_F_HW_VLAN_CTAG_RX) && (defined NETIF_F_HW_VLAN_STAG_RX) - if ((features & NETIF_F_HW_VLAN_CTAG_RX) && - (features & NETIF_F_HW_VLAN_STAG_RX)) -#elif (defined NETIF_F_HW_VLAN_CTAG_RX) - if (features & NETIF_F_HW_VLAN_CTAG_RX) -#elif (defined NETIF_F_HW_VLAN_STAG_RX) - if (features & NETIF_F_HW_VLAN_STAG_RX) -#else - if (features & NETIF_F_HW_VLAN_RX) -#endif - txgbe_vlan_strip_enable(adapter); - else - txgbe_vlan_strip_disable(adapter); - -#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) - if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { -#if defined(NETIF_F_HW_VLAN_STAG_FILTER) - netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; - } else { - netdev->features &= ~NETIF_F_HW_VLAN_STAG_FILTER; -#endif - } -#endif - #if defined(NETIF_F_HW_VLAN_CTAG_FILTER) if (features & NETIF_F_HW_VLAN_CTAG_FILTER) txgbe_vlan_promisc_disable(adapter); @@ -5833,6 +6435,23 @@ static void txgbe_configure_pb(struct txgbe_adapter *adapter) txgbe_pbthresh_setup(adapter); } +static void txgbe_ethertype_filter_restore(struct txgbe_adapter *adapter) +{ + struct txgbe_etype_filter_info *filter_info = &adapter->etype_filter_info; + struct txgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (filter_info->ethertype_mask & (1 << i)) { + wr32(hw, TXGBE_PSR_ETYPE_SWC(i), + filter_info->etype_filters[i].etqf); + wr32(hw, TXGBE_RDB_ETYPE_CLS(i), + filter_info->etype_filters[i].etqs); + TXGBE_WRITE_FLUSH(hw); + } + } +} + static void txgbe_fdir_filter_restore(struct txgbe_adapter *adapter) { struct txgbe_hw *hw = &adapter->hw; @@ -5896,7 +6515,7 @@ void txgbe_configure_isb(struct txgbe_adapter *adapter) wr32(hw, TXGBE_PX_ISB_ADDR_H, adapter->isb_dma >> 32); } -void txgbe_configure_port(struct txgbe_adapter *adapter) +static void txgbe_configure_port(struct txgbe_adapter *adapter) { struct txgbe_hw *hw = &adapter->hw; u32 value, i; @@ -5936,6 +6555,9 @@ void txgbe_configure_port(struct txgbe_adapter *adapter) TXGBE_CFG_PORT_CTL_D_VLAN | TXGBE_CFG_PORT_CTL_QINQ, value); + if (adapter->tx_unidir_mode) + wr32m(hw, TXGBE_CFG_PORT_CTL, TXGBE_CFG_PORT_CTL_FORCE_LKUP, + TXGBE_CFG_PORT_CTL_FORCE_LKUP); wr32(hw, TXGBE_CFG_TAG_TPID(0), ETH_P_8021Q | ETH_P_8021AD << 16); @@ -6123,6 +6745,20 @@ static void txgbe_configure_dfwd(struct txgbe_adapter *adapter) } #endif /*HAVE_VIRTUAL_STATION*/ +static void txgbe_configure_desc_chk(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i; + + if (!netif_msg_tx_err(adapter)) + return; + + for (i = 0; i < 4; i++) + wr32(hw, TXGBE_TDM_DESC_CHK(i), 0xFFFFFFFF); + + e_info(drv, "enable desc check\n"); +} + static void txgbe_configure(struct txgbe_adapter *adapter) { struct txgbe_hw *hw = &adapter->hw; @@ -6145,6 +6781,7 @@ static void txgbe_configure(struct txgbe_adapter *adapter) TCALL(hw, mac.ops.disable_sec_rx_path); + txgbe_ethertype_filter_restore(adapter); if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) { txgbe_init_fdir_signature(&adapter->hw, adapter->fdir_pballoc); @@ -6174,6 +6811,7 @@ static void txgbe_configure(struct txgbe_adapter *adapter) txgbe_configure_tx(adapter); txgbe_configure_rx(adapter); + txgbe_configure_desc_chk(adapter); txgbe_configure_isb(adapter); #ifdef HAVE_VIRTUAL_STATION txgbe_configure_dfwd(adapter); @@ -6183,6 +6821,7 @@ static void txgbe_configure(struct txgbe_adapter *adapter) static bool txgbe_is_sfp(struct txgbe_hw *hw) { switch (TCALL(hw, mac.ops.get_media_type)) { + case txgbe_media_type_fiber_qsfp: case txgbe_media_type_fiber: return true; default: @@ -6190,16 +6829,6 @@ static bool txgbe_is_sfp(struct txgbe_hw *hw) } } -static bool txgbe_is_backplane(struct txgbe_hw *hw) -{ - switch (TCALL(hw, mac.ops.get_media_type)) { - case txgbe_media_type_backplane: - return true; - default: - return false; - } -} - /** * txgbe_sfp_link_config - set up SFP+ link * @adapter: pointer to private adapter struct @@ -6320,9 +6949,6 @@ static void txgbe_up_complete(struct txgbe_adapter *adapter) u32 links_reg; u16 value; - /* workaround gpio int lost in lldp-on condition */ - reinit_gpio_int(adapter); - txgbe_get_hw_control(adapter); txgbe_setup_gpie(adapter); if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) @@ -6330,14 +6956,26 @@ static void txgbe_up_complete(struct txgbe_adapter *adapter) else txgbe_configure_msi_and_legacy(adapter); - /* enable the optics for SFP+ fiber */ - TCALL(hw, mac.ops.enable_tx_laser); + /* enable the optics for SFP+ fiber + * or power up mv phy + */ + if (hw->phy.media_type == txgbe_media_type_fiber || + hw->phy.media_type == txgbe_media_type_fiber_qsfp) + TCALL(hw, mac.ops.enable_tx_laser); + if(!((( hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) { + if (hw->phy.media_type == txgbe_media_type_copper && + (hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI) + txgbe_external_phy_resume(hw); + } + smp_mb__before_atomic(); clear_bit(__TXGBE_DOWN, &adapter->state); txgbe_napi_enable_all(adapter); #ifndef TXGBE_NO_LLI txgbe_configure_lli(adapter); #endif + if (txgbe_is_sfp(hw)) { txgbe_sfp_link_config(adapter); } else if (txgbe_is_backplane(hw)) { @@ -6348,21 +6986,66 @@ static void txgbe_up_complete(struct txgbe_adapter *adapter) if (err) e_err(probe, "link_config FAILED %d\n", err); } - links_reg = rd32(hw, TXGBE_CFG_PORT_ST); - if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { - if (links_reg & TXGBE_CFG_PORT_ST_LINK_10G) { - wr32(hw, TXGBE_MAC_TX_CFG, - (rd32(hw, TXGBE_MAC_TX_CFG) & - ~TXGBE_MAC_TX_CFG_SPEED_MASK) | - TXGBE_MAC_TX_CFG_SPEED_10G); - } else if (links_reg & (TXGBE_CFG_PORT_ST_LINK_1G | TXGBE_CFG_PORT_ST_LINK_100M)) { - wr32(hw, TXGBE_MAC_TX_CFG, - (rd32(hw, TXGBE_MAC_TX_CFG) & - ~TXGBE_MAC_TX_CFG_SPEED_MASK) | - TXGBE_MAC_TX_CFG_SPEED_1G); + + if (hw->mac.type == txgbe_mac_aml40) { + hw->mac.ops.clear_hw_cntrs(hw); + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + if (links_reg & TXGBE_CFG_PORT_ST_AML_LINK_40G) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | + TXGBE_MAC_TX_CFG_AML_SPEED_40G); + } + } + + wr32(hw, TXGBE_GPIO_DDR, + TXGBE_GPIO_DDR_0 | TXGBE_GPIO_DDR_1 | TXGBE_GPIO_DDR_3); + wr32(hw, TXGBE_GPIO_DR, TXGBE_GPIO_DR_1); + } else if (hw->mac.type == txgbe_mac_aml) { + hw->mac.ops.clear_hw_cntrs(hw); + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + if (links_reg & TXGBE_CFG_PORT_ST_AML_LINK_25G) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | + TXGBE_MAC_TX_CFG_AML_SPEED_25G); + } else if (links_reg & TXGBE_CFG_PORT_ST_AML_LINK_10G) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | + TXGBE_MAC_TX_CFG_AML_SPEED_10G); + } + } + + wr32(hw, TXGBE_GPIO_INT_POLARITY, 0x0); + wr32(hw, TXGBE_GPIO_DDR, + TXGBE_GPIO_DDR_0 | TXGBE_GPIO_DDR_1 | TXGBE_GPIO_DDR_4 | TXGBE_GPIO_DDR_5); + wr32(hw, TXGBE_GPIO_DR, TXGBE_GPIO_DR_4 | TXGBE_GPIO_DR_5); + + msleep(10); + wr32(hw, TXGBE_GPIO_DR, TXGBE_GPIO_DR_0); + } else { + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + if (links_reg & TXGBE_CFG_PORT_ST_LINK_10G) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_SPEED_MASK) | + TXGBE_MAC_TX_CFG_SPEED_10G); + } else if (links_reg & (TXGBE_CFG_PORT_ST_LINK_1G | TXGBE_CFG_PORT_ST_LINK_100M)) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_SPEED_MASK) | + TXGBE_MAC_TX_CFG_SPEED_1G); + } } } + /* workaround gpio int lost in lldp-on condition */ + reinit_gpio_int(adapter); + /* clear any pending interrupts, may auto mask */ rd32(hw, TXGBE_PX_IC(0)); rd32(hw, TXGBE_PX_IC(1)); @@ -6374,9 +7057,9 @@ static void txgbe_up_complete(struct txgbe_adapter *adapter) if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) { txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8011, &value); /* only enable T unit int */ - txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xf043, 0x1); + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xf043, 0x1); /* active high */ - txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xf041, 0x0); + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xf041, 0x0); /* enable AN complete and link status change int */ txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8010, 0xc00); } @@ -6391,6 +7074,8 @@ static void txgbe_up_complete(struct txgbe_adapter *adapter) #ifdef POLL_LINK_STATUS mod_timer(&adapter->link_check_timer,jiffies); #endif + adapter->flags2 |= TXGBE_FLAG2_SERVICE_RUNNING; + /* PCIE recovery: record lan status */ if (hw->bus.lan_id == 0) { wr32m(hw, TXGBE_MIS_PRB_CTL, @@ -6445,6 +7130,69 @@ void txgbe_reinit_locked(struct txgbe_adapter *adapter) adapter->flags2 &= ~TXGBE_FLAG2_KR_PRO_REINIT; } +static void txgbe_reinit_locked_dma_reset(struct txgbe_adapter *adapter) +{ +#ifdef TXGBE_DMA_RESET + struct txgbe_hw *hw = &adapter->hw; + int i; +#endif + + if (adapter->flags2 & TXGBE_FLAG2_KR_PRO_REINIT) { + return; + } + + adapter->flags2 |= TXGBE_FLAG2_KR_PRO_REINIT; + + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ +#ifdef HAVE_NETIF_TRANS_UPDATE + netif_trans_update(adapter->netdev); +#else + adapter->netdev->trans_start = jiffies; +#endif + + while (test_and_set_bit(__TXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + txgbe_down(adapter); + +#ifdef TXGBE_DMA_RESET + if (TXGBE_DMA_RESET == 1) { + e_info(probe, "dma reset\n"); + + if (rd32(hw, PX_PF_PEND) & 0x3) { + e_dev_err("PX_PF_PEND case dma reset exit\n"); + goto skip_dma_rst; + } + + for (i = 0; i < 4; i++) { + if (rd32(hw, PX_VF_PEND(i))) { + e_dev_err("PX_VF_PEND case dma reset exit\n"); + goto skip_dma_rst; + } + } + wr32(hw, TXGBE_MIS_RST, + 1 << 4); + TXGBE_WRITE_FLUSH(hw); + msleep(1000); + + /* amlite: bme */ + wr32(hw, PX_PF_BME, 0x1); + } +skip_dma_rst: +#endif + /* + * If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + msleep(2000); + txgbe_up(adapter); + clear_bit(__TXGBE_RESETTING, &adapter->state); + adapter->flags2 &= ~TXGBE_FLAG2_KR_PRO_REINIT; +} + void txgbe_up(struct txgbe_adapter *adapter) { /* hardware has been reset, we need to reload some things */ @@ -6479,6 +7227,7 @@ void txgbe_reset(struct txgbe_adapter *adapter) break; case TXGBE_ERR_MASTER_REQUESTS_PENDING: e_dev_err("master disable timed out\n"); + txgbe_tx_timeout_dorecovery(adapter); break; case TXGBE_ERR_EEPROM_VERSION: /* We are running on a pre-production device, log a warning */ @@ -6507,8 +7256,6 @@ void txgbe_reset(struct txgbe_adapter *adapter) hw->mac.dmac_config.link_speed = 0; hw->mac.dmac_config.fcoe_tc = 0; hw->mac.dmac_config.num_tcs = 0; - if (txgbe_is_lldp(hw)) - e_dev_err("Can not get lldp flags from flash\n"); #ifdef HAVE_PTP_1588_CLOCK if (test_bit(__TXGBE_PTP_RUNNING, &adapter->state)) @@ -6526,6 +7273,13 @@ void txgbe_clean_rx_ring(struct txgbe_ring *rx_ring) unsigned long size; u16 i; +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + #ifdef HAVE_AF_XDP_ZC_SUPPORT if (rx_ring->xsk_pool) { txgbe_xsk_clean_rx_ring(rx_ring); @@ -6551,20 +7305,16 @@ void txgbe_clean_rx_ring(struct txgbe_ring *rx_ring) if (rx_buffer->skb) { struct sk_buff *skb = rx_buffer->skb; #ifndef CONFIG_TXGBE_DISABLE_PACKET_SPLIT - if (TXGBE_CB(skb)->dma_released) { - dma_unmap_single(dev, - TXGBE_CB(skb)->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - TXGBE_CB(skb)->dma = 0; - TXGBE_CB(skb)->dma_released = false; - } - if (TXGBE_CB(skb)->page_released) - dma_unmap_page(dev, - TXGBE_CB(skb)->dma, - txgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); + dma_unmap_page_attrs(rx_ring->dev, + TXGBE_CB(skb)->dma, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + TXGBE_RX_DMA_ATTR); +#endif #else /* We need to clean up RSC frag lists */ skb = txgbe_merge_active_tail(skb); @@ -6583,9 +7333,25 @@ void txgbe_clean_rx_ring(struct txgbe_ring *rx_ring) if (!rx_buffer->page) continue; - dma_unmap_page(dev, rx_buffer->page_dma, - txgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE); + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + rx_buffer->page_offset, + txgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->page_dma, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#if defined(HAVE_STRUCT_DMA_ATTRS) && defined(HAVE_SWIOTLB_SKIP_CPU_SYNC) + &attrs); +#else + TXGBE_RX_DMA_ATTR); +#endif + __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); rx_buffer->page = NULL; @@ -6698,7 +7464,7 @@ static void txgbe_fdir_filter_exit(struct txgbe_adapter *adapter) spin_unlock(&adapter->fdir_perfect_lock); } -void txgbe_disable_device(struct txgbe_adapter *adapter) +static void txgbe_disable_device(struct txgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct txgbe_hw *hw = &adapter->hw; @@ -6761,6 +7527,7 @@ void txgbe_disable_device(struct txgbe_adapter *adapter) #ifdef POLL_LINK_STATUS del_timer_sync(&adapter->link_check_timer); #endif + adapter->flags2 &= ~TXGBE_FLAG2_SERVICE_RUNNING; hw->f2c_mod_status = false; cancel_work_sync(&adapter->sfp_sta_task); @@ -6783,11 +7550,7 @@ void txgbe_disable_device(struct txgbe_adapter *adapter) for (i = 0 ; i < adapter->num_vfs; i++) adapter->vfinfo[i].clear_to_send = 0; - /* ping all the active vfs to let them know we are going down */ - txgbe_ping_all_vfs(adapter); - /* Disable all VFTE/VFRE TX/RX */ - //txgbe_disable_tx_rx(adapter); txgbe_set_all_vfs(adapter); } @@ -6824,35 +7587,21 @@ void txgbe_down(struct txgbe_adapter *adapter) #endif txgbe_reset(adapter); - if(!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || - adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) - /* power down the optics for SFP+ fiber */ - TCALL(&adapter->hw, mac.ops.disable_tx_laser); + /* power down the optics for SFP+ fiber or mv phy */ + if(!((( hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) { + if (hw->phy.media_type == txgbe_media_type_fiber || + hw->phy.media_type == txgbe_media_type_fiber_qsfp) + TCALL(hw, mac.ops.disable_tx_laser); + else if (hw->phy.media_type == txgbe_media_type_copper && + (hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI) + txgbe_external_phy_suspend(hw); + } txgbe_clean_all_tx_rings(adapter); txgbe_clean_all_rx_rings(adapter); } -/** - * txgbe_init_shared_code - Initialize the shared code - * @hw: pointer to hardware structure - * - * This will assign function pointers and assign the MAC type and PHY code. - * Does not touch the hardware. This function must be called prior to any - * other function in the shared code. The txgbe_hw structure should be - * memset to 0 prior to calling this function. The following fields in - * hw structure should be filled in prior to calling this function: - * hw_addr, back, device_id, vendor_id, subsystem_device_id, - * subsystem_vendor_id, and revision_id - **/ -s32 txgbe_init_shared_code(struct txgbe_hw *hw) -{ - s32 status; - - status = txgbe_init_ops(hw); - return status; -} - /** * txgbe_sw_init - Initialize general software structures (struct txgbe_adapter) * @adapter: board private structure to initialize @@ -6879,6 +7628,8 @@ static int __devinit txgbe_sw_init(struct txgbe_adapter *adapter) int j, bwg_pct; #endif /* CONFIG_DCB */ u32 fw_version; + u32 flash_header; + u32 flash_header_index; /* PCI config space info */ hw->vendor_id = pdev->vendor; @@ -6891,13 +7642,29 @@ static int __devinit txgbe_sw_init(struct txgbe_adapter *adapter) goto out; } + err = txgbe_init_shared_code(hw); + if (err) { + e_err(probe, "init_shared_code failed: %d\n", err); + goto out; + } + + txgbe_flash_read_dword(hw, 0x0, &flash_header); + if (((flash_header >> 16) & 0xffff) == TXGBE_FLASH_HEADER_FLAG) + flash_header_index = 0x0; + else + flash_header_index = 0x1; + hw->oem_svid = pdev->subsystem_vendor; - hw->oem_ssid = pdev->subsystem_device; + hw->oem_ssid = pdev->subsystem_device; if (pdev->subsystem_vendor == 0x8088) { hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; } else { - txgbe_flash_read_dword(hw, 0xfffdc, &ssid); + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + txgbe_flash_read_dword(hw, (flash_header_index * 0x10000) + 0x302c, &ssid); + else + txgbe_flash_read_dword(hw, 0xfffdc, &ssid); + if (ssid == 0x1) { e_err(probe, "read of internel subsystem device id failed\n"); err = -ENODEV; @@ -6908,16 +7675,10 @@ static int __devinit txgbe_sw_init(struct txgbe_adapter *adapter) hw->subsystem_device_id << 8; } - txgbe_flash_read_dword(hw, 0x13a, &fw_version); - snprintf(adapter->fw_version, sizeof(adapter->fw_version), + txgbe_flash_read_dword(hw, (flash_header_index * 0x10000) + 0x13a, &fw_version); + snprintf(adapter->fl_version, sizeof(adapter->fw_version), "0x%08x", fw_version); - - err = txgbe_init_shared_code(hw); - if (err) { - e_err(probe, "init_shared_code failed: %d\n", err); - goto out; - } adapter->mac_table = kzalloc(sizeof(struct txgbe_mac_addr) * hw->mac.num_rar_entries, GFP_ATOMIC); @@ -6928,7 +7689,7 @@ static int __devinit txgbe_sw_init(struct txgbe_adapter *adapter) } memcpy(adapter->rss_key, def_rss_key, sizeof(def_rss_key)); -#ifndef HAVE_NO_BITMAP +#ifdef HAVE_AF_XDP_SUPPORT adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL); if (!adapter->af_xdp_zc_qps) return -ENOMEM; @@ -6957,8 +7718,6 @@ static int __devinit txgbe_sw_init(struct txgbe_adapter *adapter) adapter->flags2 |= TXGBE_FLAG2_TEMP_SENSOR_CAPABLE; hw->phy.smart_speed = txgbe_smart_speed_off; adapter->flags2 |= TXGBE_FLAG2_EEE_CAPABLE; - if(txgbe_is_lldp(hw)) - e_dev_err("Can not get lldp flags from flash\n"); #if IS_ENABLED(CONFIG_FCOE) /* FCoE support exists, always init the FCoE lock */ @@ -6968,6 +7727,8 @@ static int __devinit txgbe_sw_init(struct txgbe_adapter *adapter) /* n-tuple support exists, always init our spinlock */ spin_lock_init(&adapter->fdir_perfect_lock); + mutex_init(&adapter->e56_lock); + #if IS_ENABLED(CONFIG_DCB) adapter->dcb_cfg.num_tcs.pg_tcs = 8; @@ -7019,6 +7780,8 @@ static int __devinit txgbe_sw_init(struct txgbe_adapter *adapter) hw->fc.send_xon = true; hw->fc.disable_fc_autoneg = false; + hw->dac_sfp = false; + /* set default ring sizes */ adapter->tx_ring_count = TXGBE_DEFAULT_TXD; adapter->rx_ring_count = TXGBE_DEFAULT_RXD; @@ -7034,6 +7797,20 @@ static int __devinit txgbe_sw_init(struct txgbe_adapter *adapter) adapter->num_vmdqs = 1; set_bit(0, &adapter->fwd_bitmask); set_bit(__TXGBE_DOWN, &adapter->state); + + adapter->fec_link_mode = TXGBE_PHY_FEC_AUTO; + adapter->cur_fec_link = TXGBE_PHY_FEC_AUTO; + + adapter->link_valid = true; + + if (hw->mac.type == txgbe_mac_sp) + adapter->desc_reserved = DESC_RESERVED; + else + adapter->desc_reserved = DESC_RESERVED_AML; + + bitmap_zero(adapter->limited_vlans, 4096); + + memset(adapter->i2c_eeprom, 0, sizeof(u8)*512); out: return err; } @@ -7078,6 +7855,10 @@ int txgbe_setup_tx_resources(struct txgbe_ring *tx_ring) if (!tx_ring->desc) goto err; +#ifdef TXGBE_TXHEAD_WB + txgbe_setup_headwb_resources(tx_ring); +#endif + return 0; err: @@ -7175,6 +7956,9 @@ int txgbe_setup_rx_resources(struct txgbe_ring *rx_ring) if (!rx_ring->desc) goto err; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + if (!rx_ring->q_vector) return 0; #ifdef HAVE_XDP_BUFF_RXQ @@ -7274,6 +8058,37 @@ void txgbe_free_isb_resources(struct txgbe_adapter *adapter) adapter->isb_mem = NULL; } +#ifdef TXGBE_TXHEAD_WB +void txgbe_free_headwb_resources(struct txgbe_ring *ring) +{ + u8 headwb_size = 0; + struct txgbe_adapter *adapter; + struct txgbe_hw *hw; + + if (ring->q_vector) { + adapter = ring->q_vector->adapter; + hw = &adapter->hw; + if (hw->mac.type == txgbe_mac_sp) + return; + } else { + return; + } + + if (TXGBE_TXHEAD_WB == 1) + headwb_size = 16; + else if (TXGBE_TXHEAD_WB == 2) + headwb_size = 16; + else + headwb_size = 1; + + if (ring->headwb_mem) { + dma_free_coherent(ring->dev, sizeof(u32) * headwb_size, + ring->headwb_mem, ring->headwb_dma); + ring->headwb_mem = NULL; + } +} +#endif + /** * txgbe_free_tx_resources - Free Tx Resources per Queue * @tx_ring: Tx descriptor ring for a specific queue @@ -7294,6 +8109,10 @@ void txgbe_free_tx_resources(struct txgbe_ring *tx_ring) dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; + +#ifdef TXGBE_TXHEAD_WB + txgbe_free_headwb_resources(tx_ring); +#endif } /** @@ -7497,7 +8316,7 @@ int txgbe_open(struct net_device *netdev) vxlan_get_rx_port(netdev); #endif /* HAVE_UDP_ENC_RX_OFFLOAD */ #endif /* HAVE_UDP_ENC_RX_OFFLOAD && HAVE_UDP_TUNNEL_NIC_INFO */ - + return 0; err_set_queues: @@ -7530,9 +8349,18 @@ static void txgbe_close_suspend(struct txgbe_adapter *adapter) txgbe_ptp_suspend(adapter); #endif txgbe_disable_device(adapter); + + /* power down the optics for SFP+ fiber or mv phy */ if(!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || - adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) - TCALL(hw, mac.ops.disable_tx_laser); + adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) { + if (hw->phy.media_type == txgbe_media_type_fiber || + hw->phy.media_type == txgbe_media_type_fiber_qsfp) + TCALL(hw, mac.ops.disable_tx_laser); + else if (hw->phy.media_type == txgbe_media_type_copper && + (hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI) + txgbe_external_phy_suspend(hw); + } + txgbe_clean_all_tx_rings(adapter); txgbe_clean_all_rx_rings(adapter); @@ -7559,8 +8387,9 @@ int txgbe_close(struct net_device *netdev) struct txgbe_adapter *adapter = netdev_priv(netdev); struct txgbe_hw *hw = &adapter->hw; - if ( hw->subsystem_device_id == TXGBE_ID_WX1820_KR_KX_KX4 || - hw->subsystem_device_id == TXGBE_ID_SP1000_KR_KX_KX4 ){ + if (hw->subsystem_device_id == TXGBE_ID_WX1820_KR_KX_KX4 || + hw->subsystem_device_id == TXGBE_ID_SP1000_KR_KX_KX4 || + hw->dac_sfp) { txgbe_bp_close_protect(adapter); } @@ -7576,6 +8405,8 @@ int txgbe_close(struct net_device *netdev) txgbe_free_all_tx_resources(adapter); txgbe_fdir_filter_exit(adapter); + memset(&adapter->ft_filter_info, 0, + sizeof(struct txgbe_5tuple_filter_info)); txgbe_release_hw_control(adapter); @@ -7920,6 +8751,7 @@ void txgbe_update_stats(struct txgbe_adapter *adapter) #ifndef TXGBE_NO_LRO u32 flushed = 0, coal = 0; #endif + u8 pf_queue_offset = 0; if (test_bit(__TXGBE_DOWN, &adapter->state) || test_bit(__TXGBE_RESETTING, &adapter->state)) @@ -8057,11 +8889,21 @@ void txgbe_update_stats(struct txgbe_adapter *adapter) bprc = rd32(hw, TXGBE_RX_BC_FRAMES_GOOD_LOW); hwstats->bprc += bprc; hwstats->mprc = 0; - - for (i = 0; i < 128; i++) + hwstats->rdpc += rd32(hw, TXGBE_RDB_PKT_CNT); + hwstats->rddc += rd32(hw, TXGBE_RDB_DRP_CNT); + hwstats->psrpc += rd32(hw, TXGBE_PSR_PKT_CNT); + hwstats->psrdc += rd32(hw, TXGBE_PSR_DBG_DRP_CNT); + hwstats->untag += rd32(hw, TXGBE_RSEC_LSEC_UNTAG_PKT); + hwstats->tdmpc += rd32(hw, TXGBE_TDM_PKT_CNT); + hwstats->tdmdc += rd32(hw, TXGBE_TDM_DRP_CNT); + hwstats->tdbpc += rd32(hw, TXGBE_TDB_OUT_PKT_CNT); + + pf_queue_offset = adapter->ring_feature[RING_F_VMDQ].offset * + (adapter->ring_feature[RING_F_RSS].mask + 1); + + for (i = pf_queue_offset; i < 128; i++) hwstats->mprc += rd32(hw, TXGBE_PX_MPRC(i)); - hwstats->roc += rd32(hw, TXGBE_RX_OVERSIZE_FRAMES_GOOD); hwstats->rlec += rd32(hw, TXGBE_RX_LEN_ERROR_FRAMES_LOW); lxon = rd32(hw, TXGBE_RDB_LXONTXC); @@ -8126,6 +8968,20 @@ static void txgbe_fdir_reinit_subtask(struct txgbe_adapter *adapter) } #endif /* HAVE_TX_MQ */ +void txgbe_irq_rearm_queues(struct txgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + wr32(&adapter->hw, TXGBE_PX_IMC(0), mask); + wr32(&adapter->hw, TXGBE_PX_ICS(0), mask); + + mask = (qmask >> 32); + wr32(&adapter->hw, TXGBE_PX_IMC(1), mask); + wr32(&adapter->hw, TXGBE_PX_ICS(1), mask); +} + /** * txgbe_check_hang_subtask - check for hung queues and dropped interrupts * @adapter - pointer to the device adapter structure @@ -8138,6 +8994,7 @@ static void txgbe_fdir_reinit_subtask(struct txgbe_adapter *adapter) static void txgbe_check_hang_subtask(struct txgbe_adapter *adapter) { int i; + u64 eics = 0; /* If we're down or resetting, just bail */ if (test_bit(__TXGBE_DOWN, &adapter->state) || @@ -8153,6 +9010,17 @@ static void txgbe_check_hang_subtask(struct txgbe_adapter *adapter) set_check_for_tx_hang(adapter->xdp_ring[i]); } + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) { + /* get one bit for every active tx/rx interrupt vector */ + for (i = 0; i < adapter->num_q_vectors; i++) { + struct txgbe_q_vector *qv = adapter->q_vector[i]; + if (qv->rx.ring || qv->tx.ring) + eics |= BIT_ULL(i); + } + } + /* Cause software interrupt to ensure rings are cleaned */ + txgbe_irq_rearm_queues(adapter, eics); + } /** @@ -8172,6 +9040,9 @@ static void txgbe_watchdog_update_link(struct txgbe_adapter *adapter) #ifndef POLL_LINK_STATUS if (!(adapter->flags & TXGBE_FLAG_NEED_LINK_UPDATE)) return; + + if (test_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + msleep(20); #endif link_speed = TXGBE_LINK_SPEED_10GB_FULL; @@ -8184,10 +9055,13 @@ static void txgbe_watchdog_update_link(struct txgbe_adapter *adapter) adapter->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE; } - for(i=0;i<3;i++){ + for(i = 0;i < 3;i++){ TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false); - msleep(1); + msleep(10); } +#else + if (adapter->link_up == link_up && adapter->link_speed == link_speed) + return; #endif adapter->link_up = link_up; @@ -8212,25 +9086,68 @@ static void txgbe_watchdog_update_link(struct txgbe_adapter *adapter) txgbe_ptp_start_cyclecounter(adapter); #endif - if (link_speed & TXGBE_LINK_SPEED_10GB_FULL) { - wr32(hw, TXGBE_MAC_TX_CFG, - (rd32(hw, TXGBE_MAC_TX_CFG) & - ~TXGBE_MAC_TX_CFG_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | - TXGBE_MAC_TX_CFG_SPEED_10G); - } else if (link_speed & (TXGBE_LINK_SPEED_1GB_FULL | - TXGBE_LINK_SPEED_100_FULL | TXGBE_LINK_SPEED_10_FULL)) { - wr32(hw, TXGBE_MAC_TX_CFG, - (rd32(hw, TXGBE_MAC_TX_CFG) & - ~TXGBE_MAC_TX_CFG_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | - TXGBE_MAC_TX_CFG_SPEED_1G); - } + if (hw->mac.type == txgbe_mac_aml40) { + if (!(hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || + txgbe_is_backplane(hw))) + txgbe_reconfig_mac(hw); + + if (link_speed & TXGBE_LINK_SPEED_40GB_FULL) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_40G); + } + /* enable mac receiver */ + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_RE, TXGBE_MAC_RX_CFG_RE); + } else if (hw->mac.type == txgbe_mac_aml) { + if (!(hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1 || + txgbe_is_backplane(hw))) + txgbe_reconfig_mac(hw); + + if (link_speed & TXGBE_LINK_SPEED_25GB_FULL) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_25G); + } else if (link_speed & TXGBE_LINK_SPEED_10GB_FULL) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_10G); + } else { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_1G); + } + + /* enable mac receiver */ + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_RE, TXGBE_MAC_RX_CFG_RE); + } else { + if (link_speed & TXGBE_LINK_SPEED_10GB_FULL) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_SPEED_10G); + } else if (link_speed & (TXGBE_LINK_SPEED_1GB_FULL | + TXGBE_LINK_SPEED_100_FULL | TXGBE_LINK_SPEED_10_FULL)) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_SPEED_1G); + } - /* Re configure MAC RX */ - reg = rd32(hw, TXGBE_MAC_RX_CFG); - wr32(hw, TXGBE_MAC_RX_CFG, reg); - wr32(hw, TXGBE_MAC_PKT_FLT, TXGBE_MAC_PKT_FLT_PR); - reg = rd32(hw, TXGBE_MAC_WDG_TIMEOUT); - wr32(hw, TXGBE_MAC_WDG_TIMEOUT, reg); + /* Re configure MAC RX */ + reg = rd32(hw, TXGBE_MAC_RX_CFG); + wr32(hw, TXGBE_MAC_RX_CFG, reg); + wr32(hw, TXGBE_MAC_PKT_FLT, TXGBE_MAC_PKT_FLT_PR); + reg = rd32(hw, TXGBE_MAC_WDG_TIMEOUT); + wr32(hw, TXGBE_MAC_WDG_TIMEOUT, reg); + } } if (hw->mac.ops.dmac_config && hw->mac.dmac_config.watchdog_timer) { @@ -8303,7 +9220,33 @@ static void txgbe_watchdog_link_is_up(struct txgbe_adapter *adapter) flow_rx = (rd32(hw, TXGBE_MAC_RX_FLOW_CTRL) & 0x101) == 0x1; flow_tx = !!(TXGBE_RDB_RFCC_RFCE_802_3X & rd32(hw, TXGBE_RDB_RFCC)); - e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", + + switch (adapter->link_speed) { + case TXGBE_LINK_SPEED_40GB_FULL: + adapter->speed = SPEED_40000; + break; + case TXGBE_LINK_SPEED_25GB_FULL: + adapter->speed = SPEED_25000; + break; + case TXGBE_LINK_SPEED_10GB_FULL: + adapter->speed = SPEED_10000; + break; + case TXGBE_LINK_SPEED_1GB_FULL: + default: + adapter->speed = SPEED_1000; + break; + } + +#ifndef POLL_LINK_STATUS + if (hw->mac.type == txgbe_mac_aml) + adapter->cur_fec_link = txgbe_get_cur_fec_mode(hw); +#endif + + e_info(drv, "NIC Link is Up %s, Flow Control: %s%s\n", + (link_speed == TXGBE_LINK_SPEED_40GB_FULL ? + "40 Gbps" : + (link_speed == TXGBE_LINK_SPEED_25GB_FULL ? + "25 Gbps" : (link_speed == TXGBE_LINK_SPEED_10GB_FULL ? "10 Gbps" : (link_speed == TXGBE_LINK_SPEED_1GB_FULL ? @@ -8312,12 +9255,30 @@ static void txgbe_watchdog_link_is_up(struct txgbe_adapter *adapter) "100 Mbps" : (link_speed == TXGBE_LINK_SPEED_10_FULL ? "10 Mbps" : - "unknown speed")))), + "unknown speed")))))), ((flow_rx && flow_tx) ? "RX/TX" : (flow_rx ? "RX" : - (flow_tx ? "TX" : "None")))); - + (flow_tx ? "TX" : "None"))), + ((hw->mac.type == txgbe_mac_aml && link_speed == TXGBE_LINK_SPEED_25GB_FULL) ? + ((adapter->cur_fec_link == TXGBE_PHY_FEC_BASER) ? ", FEC: BASE-R" :\ + (adapter->cur_fec_link == TXGBE_PHY_FEC_RS) ? ", FEC: RS" :\ + (adapter->cur_fec_link == TXGBE_PHY_FEC_OFF) ? ", FEC: OFF":"") : "")); + + if (!adapter->backplane_an && + (hw->dac_sfp || + (hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_KR_KX_KX4) + && hw->mac.type == txgbe_mac_sp) + txgbe_enable_rx_adapter(hw); + + if (adapter->tx_unidir_mode) { + wr32m(hw, 0x11004, BIT(10), BIT(10)); + wr32m(hw, 0x11004, BIT(0), BIT(0)); + e_dev_info("Enable loopback and disable rx : %x\n.", + rd32(hw, 0x11004)); + } + txgbe_check_vlan_rate_limit(adapter); netif_carrier_on(netdev); + txgbe_check_vf_rate_limit(adapter); netif_tx_wake_all_queues(netdev); #ifdef HAVE_VIRTUAL_STATION @@ -8337,7 +9298,26 @@ static void txgbe_watchdog_link_is_up(struct txgbe_adapter *adapter) txgbe_update_default_up(adapter); /* ping all the active vfs to let them know link has changed */ - txgbe_ping_all_vfs(adapter); + //txgbe_ping_all_vfs(adapter); + txgbe_ping_all_vfs_with_link_status(adapter, true); +} + +static void txgbe_link_down_flush_tx(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + if (hw->mac.type != txgbe_mac_aml && + hw->mac.type != txgbe_mac_aml40) + return; + + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + ~TXGBE_MAC_RX_CFG_RE); + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_LM, TXGBE_MAC_RX_CFG_LM); + + mdelay(20); + + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_LM, 0); } /** @@ -8349,29 +9329,42 @@ static void txgbe_watchdog_link_is_down(struct txgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct txgbe_hw *hw = &adapter->hw; + adapter->link_up = false; adapter->link_speed = 0; - if ( hw->subsystem_device_id == TXGBE_ID_WX1820_KR_KX_KX4 || - hw->subsystem_device_id == TXGBE_ID_SP1000_KR_KX_KX4 ){ - txgbe_bp_down_event(adapter); - } + if (hw->mac.type == txgbe_mac_sp) + if (hw->subsystem_device_id == TXGBE_ID_WX1820_KR_KX_KX4 || + hw->subsystem_device_id == TXGBE_ID_SP1000_KR_KX_KX4 || + hw->dac_sfp) + txgbe_bp_down_event(adapter); /* only continue if link was up previously */ if (!netif_carrier_ok(netdev)) return; + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + #ifdef HAVE_PTP_1588_CLOCK if (test_bit(__TXGBE_PTP_RUNNING, &adapter->state)) txgbe_ptp_start_cyclecounter(adapter); #endif + if (hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || + txgbe_is_backplane(hw)) + adapter->an_done = false; + e_info(drv, "NIC Link is Down\n"); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); + txgbe_link_down_flush_tx(adapter); /* ping all the active vfs to let them know link has changed */ - txgbe_ping_all_vfs(adapter); + //txgbe_ping_all_vfs(adapter); + txgbe_ping_all_vfs_with_link_status(adapter, false); } static bool txgbe_ring_tx_pending(struct txgbe_adapter *adapter) @@ -8509,10 +9502,16 @@ static void txgbe_watchdog_subtask(struct txgbe_adapter *adapter) test_bit(__TXGBE_RESETTING, &adapter->state)) return; - if ( hw->subsystem_device_id == TXGBE_ID_WX1820_KR_KX_KX4 || - hw->subsystem_device_id == TXGBE_ID_SP1000_KR_KX_KX4 ){ - txgbe_bp_watchdog_event(adapter); - } + if (hw->mac.type == txgbe_mac_sp) + if (hw->subsystem_device_id == TXGBE_ID_WX1820_KR_KX_KX4 || + hw->subsystem_device_id == TXGBE_ID_SP1000_KR_KX_KX4 || + hw->dac_sfp) + txgbe_bp_watchdog_event(adapter); + + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) + txgbe_e56_bp_watchdog_event(adapter); + #ifndef POLL_LINK_STATUS if(BOND_CHECK_LINK_MODE == 1){ value = rd32(hw, 0x14404); @@ -8522,7 +9521,7 @@ static void txgbe_watchdog_subtask(struct txgbe_adapter *adapter) } if (!(adapter->flags2 & TXGBE_FLAG2_LINK_DOWN)) txgbe_watchdog_update_link(adapter); - + if (adapter->link_up) txgbe_watchdog_link_is_up(adapter); else @@ -8533,11 +9532,47 @@ static void txgbe_watchdog_subtask(struct txgbe_adapter *adapter) txgbe_spoof_check(adapter); #endif /* CONFIG_PCI_IOV */ + txgbe_update_stats(adapter); txgbe_watchdog_flush_tx(adapter); } +static void txgbe_phy_event_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rdata; + + /* if interface is down do nothing */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_REMOVING, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + if (!(adapter->flags3 & TXGBE_FLAG3_PHY_EVENT)) + return; + + adapter->flags3 &= ~TXGBE_FLAG3_PHY_EVENT; + + mutex_lock(&adapter->e56_lock); + rdata = rd32_ephy(hw, E56PHY_INTR_0_ADDR); + if (rdata & E56PHY_INTR_0_IDLE_ENTRY1) { + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ENABLE_ADDR, 0x0); + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ADDR, E56PHY_INTR_0_IDLE_ENTRY1); + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ENABLE_ADDR, E56PHY_INTR_0_IDLE_ENTRY1); + } + + rdata = rd32_ephy(hw, E56PHY_INTR_1_ADDR); + if (rdata & E56PHY_INTR_1_IDLE_EXIT1) { + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, 0x0); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ADDR, E56PHY_INTR_1_IDLE_EXIT1); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, E56PHY_INTR_1_IDLE_EXIT1); + + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + } + mutex_unlock(&adapter->e56_lock); +} + /** * txgbe_sfp_detection_subtask - poll for SFP+ cable * @adapter - the txgbe adapter structure @@ -8545,7 +9580,7 @@ static void txgbe_watchdog_subtask(struct txgbe_adapter *adapter) static void txgbe_sfp_detection_subtask(struct txgbe_adapter *adapter) { struct txgbe_hw *hw = &adapter->hw; - struct txgbe_mac_info *mac = &hw->mac; + u32 value = 0; s32 err; /* not searching for SFP so there is nothing to do here */ @@ -8563,6 +9598,32 @@ static void txgbe_sfp_detection_subtask(struct txgbe_adapter *adapter) adapter->sfp_poll_time = jiffies + TXGBE_SFP_POLL_JIFFIES - 1; + if (hw->mac.type == txgbe_mac_aml40) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_PRST_LS) { + err = TXGBE_ERR_SFP_NOT_PRESENT; + adapter->flags2 &= ~TXGBE_FLAG2_SFP_NEEDS_RESET; + goto sfp_out; + } + } + + if (hw->mac.type == txgbe_mac_aml) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_ABS_LS) { + err = TXGBE_ERR_SFP_NOT_PRESENT; + adapter->flags2 &= ~TXGBE_FLAG2_SFP_NEEDS_RESET; + goto sfp_out; + } + } + + /* wait for sfp module ready*/ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + msleep(200); + + adapter->eeprom_type = 0; + adapter->eeprom_len = 0; + memset(adapter->i2c_eeprom, 0, sizeof(u8)*512); + err = TCALL(hw, phy.ops.identify_sfp); if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) goto sfp_out; @@ -8583,19 +9644,9 @@ static void txgbe_sfp_detection_subtask(struct txgbe_adapter *adapter) adapter->flags2 &= ~TXGBE_FLAG2_SFP_NEEDS_RESET; - if (hw->phy.multispeed_fiber) { - /* Set up dual speed SFP+ support */ - mac->ops.setup_link = txgbe_setup_mac_link_multispeed_fiber; - mac->ops.setup_mac_link = txgbe_setup_mac_link; - mac->ops.set_rate_select_speed = - txgbe_set_hard_rate_select_speed; - } else { - mac->ops.setup_link = txgbe_setup_mac_link; - mac->ops.set_rate_select_speed = - txgbe_set_hard_rate_select_speed; - hw->phy.autoneg_advertised = 0; - } + err = hw->mac.ops.setup_sfp(hw); + hw->phy.autoneg_advertised = 0; adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); @@ -8619,13 +9670,14 @@ static void txgbe_sfp_link_config_subtask(struct txgbe_adapter *adapter) u32 speed; bool autoneg = false; u16 value; + u32 gssr = hw->phy.phy_semaphore_mask; u8 device_type = hw->subsystem_device_id & 0xF0; if (!(adapter->flags & TXGBE_FLAG_NEED_LINK_CONFIG)) return; /* someone else is in init, wait until next service event */ - if (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + if (test_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) return; adapter->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; @@ -8636,34 +9688,48 @@ static void txgbe_sfp_link_config_subtask(struct txgbe_adapter *adapter) if (value & 0x400) adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; if (!(value & 0x800)) { - clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); return; } } - if(device_type == TXGBE_ID_MAC_XAUI || - (txgbe_get_media_type(hw) == txgbe_media_type_copper && + if(device_type == TXGBE_ID_MAC_XAUI || + (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_copper && device_type == TXGBE_ID_SFI_XAUI)) { - speed = TXGBE_LINK_SPEED_10GB_FULL; - } else if (device_type == TXGBE_ID_MAC_SGMII) { - speed = TXGBE_LINK_SPEED_1GB_FULL; - }else { - speed = hw->phy.autoneg_advertised; - if ((!speed) && (hw->mac.ops.get_link_capabilities)) { - TCALL(hw, mac.ops.get_link_capabilities, &speed, &autoneg); - /* setup the highest link when no autoneg */ - if (!autoneg) { - if (speed & TXGBE_LINK_SPEED_10GB_FULL) - speed = TXGBE_LINK_SPEED_10GB_FULL; - } - } - } + speed = TXGBE_LINK_SPEED_10GB_FULL; + } else if (device_type == TXGBE_ID_MAC_SGMII) { + speed = TXGBE_LINK_SPEED_1GB_FULL; + } else { + speed = hw->phy.autoneg_advertised; + if ((!speed) && (hw->mac.ops.get_link_capabilities)) { + TCALL(hw, mac.ops.get_link_capabilities, &speed, &autoneg); + /* setup the highest link when no autoneg */ + if (!autoneg) { + if (speed & TXGBE_LINK_SPEED_25GB_FULL) + speed = TXGBE_LINK_SPEED_25GB_FULL; + else if (speed & TXGBE_LINK_SPEED_10GB_FULL) + speed = TXGBE_LINK_SPEED_10GB_FULL; + } + } + } + + /* firmware is configuring phy now, delay host driver config action */ + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) { + if (TCALL(hw, mac.ops.acquire_swfw_sync, gssr) != 0) { + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + e_warn(probe, "delay config ephy\n"); + return; + } + } TCALL(hw, mac.ops.setup_link, speed, false); + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) + TCALL(hw, mac.ops.release_swfw_sync, gssr); + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; adapter->link_check_timeout = jiffies; - clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); } static void txgbe_sfp_reset_eth_phy_subtask(struct txgbe_adapter *adapter) @@ -8678,6 +9744,9 @@ static void txgbe_sfp_reset_eth_phy_subtask(struct txgbe_adapter *adapter) adapter->flags2 &= ~TXGBE_FLAG_NEED_ETH_PHY_RESET; + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + return; + TCALL(hw, mac.ops.check_link, &speed, &linkup, false); if (!linkup) { txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, @@ -8707,7 +9776,7 @@ static void txgbe_service_timer(struct timer_list *t) /* poll faster when waiting for link */ if (adapter->flags & TXGBE_FLAG_NEED_LINK_UPDATE) { if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4) - next_event_offset = HZ ; + next_event_offset = HZ / 10; else if (BOND_CHECK_LINK_MODE == 1) next_event_offset = HZ / 100; else @@ -8721,12 +9790,12 @@ static void txgbe_service_timer(struct timer_list *t) TXGBE_MIS_PRB_CTL_LAN1_UP); if (val & TXGBE_MIS_PRB_CTL_LAN0_UP) { if (hw->bus.lan_id == 0) { - adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; e_info(probe, "txgbe_service_timer: set recover on Lan0\n"); } } else if (val & TXGBE_MIS_PRB_CTL_LAN1_UP) { if (hw->bus.lan_id == 1) { - adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; e_info(probe, "txgbe_service_timer: set recover on Lan1\n"); } } @@ -8740,7 +9809,6 @@ static void txgbe_service_timer(struct timer_list *t) (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1) || (hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core0) || (hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core1)) { - next_event_offset = HZ/10; queue_work(txgbe_wq, &adapter->sfp_sta_task); } } @@ -8805,6 +9873,46 @@ static void txgbe_sfp_phy_status_work(struct work_struct *work) } } +static void txgbe_amlit_temp_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 link_speed = 0, val = 0; + s32 status = 0; + int temp; + + if (hw->mac.type != txgbe_mac_aml && + hw->mac.type != txgbe_mac_aml40) + return; + + if (!netif_carrier_ok(adapter->netdev)) + return; + + status = txgbe_e56_get_temp(hw, &temp); + if (status) + return; + + if (!(temp - adapter->amlite_temp > 4 || + adapter->amlite_temp - temp > 4)) + return; + + adapter->amlite_temp = temp; + val = rd32(hw, TXGBE_CFG_PORT_ST); + if (val & TXGBE_AMLITE_CFG_LED_CTL_LINK_40G_SEL) + link_speed = TXGBE_LINK_SPEED_40GB_FULL; + else if (val & TXGBE_AMLITE_CFG_LED_CTL_LINK_25G_SEL) + link_speed = TXGBE_LINK_SPEED_25GB_FULL; + else + link_speed = TXGBE_LINK_SPEED_10GB_FULL; + + mutex_lock(&adapter->e56_lock); + if (hw->mac.type == txgbe_mac_aml) + txgbe_temp_track_seq(hw, link_speed); + else if (hw->mac.type == txgbe_mac_aml40) + txgbe_temp_track_seq_40g(hw, link_speed); + mutex_unlock(&adapter->e56_lock); + +} + #ifdef POLL_LINK_STATUS /** * txgbe_service_timer - Timer Call-back @@ -8836,11 +9944,19 @@ static void txgbe_reset_subtask(struct txgbe_adapter *adapter) { u32 reset_flag = 0; u32 value = 0; + union txgbe_tx_desc *tx_desc; + int i, j; + u32 desc_error[4] = {0, 0, 0, 0}; + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring *tx_ring; + struct txgbe_tx_buffer *tx_buffer; + u32 size; if (!(adapter->flags2 & (TXGBE_FLAG2_PF_RESET_REQUESTED | TXGBE_FLAG2_DEV_RESET_REQUESTED | TXGBE_FLAG2_GLOBAL_RESET_REQUESTED | - TXGBE_FLAG2_RESET_INTR_RECEIVED))) + TXGBE_FLAG2_RESET_INTR_RECEIVED | + TXGBE_FLAG2_DMA_RESET_REQUESTED))) return; /* If we're already down, just bail */ @@ -8848,6 +9964,49 @@ static void txgbe_reset_subtask(struct txgbe_adapter *adapter) test_bit(__TXGBE_REMOVING, &adapter->state)) return; + if (netif_msg_tx_err(adapter)) { + for (i = 0; i < 4; i++) + desc_error[i] = rd32(hw, TXGBE_TDM_DESC_FATAL(i)); + + /* check tdm fatal error */ + for (i = 0; i < adapter->num_tx_queues; i++) { + if (desc_error[i / 32] & (1 << i % 32)) { + e_err(tx_err, "TDM fatal error queue[%d]", i); + tx_ring = adapter->tx_ring[i]; + e_warn(tx_err, "queue[%d] RP = 0x%x\n", i , + rd32(&adapter->hw, TXGBE_PX_TR_RP(adapter->tx_ring[i]->reg_idx))); + for (j = 0; j < tx_ring->count; j++) { + tx_desc = TXGBE_TX_DESC(tx_ring, j); + if (tx_desc->read.olinfo_status != 1) + e_warn(tx_err, "queue[%d][%d]:0x%llx, 0x%x, 0x%x\n", + i, j, tx_desc->read.buffer_addr, tx_desc->read.cmd_type_len, tx_desc->read.olinfo_status); + } + for (j = 0; j < tx_ring->count; j++) { + tx_buffer = &tx_ring->tx_buffer_info[j]; + size = dma_unmap_len(tx_buffer, len); + if (size != 0 && tx_buffer->va) { + e_err(pktdata, "tx buffer[%d][%d]: \n", i, j); + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, tx_buffer->va, size, true); + } + } + + for (j = 0; j < tx_ring->count; j++) { + tx_buffer = &tx_ring->tx_buffer_info[j]; + if (tx_buffer->skb) { + e_err(pktdata, "****skb in tx buffer[%d][%d]: *******\n", i, j); + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, tx_buffer->skb, sizeof(struct sk_buff), true); + } + } + netif_stop_subqueue(tx_ring->netdev, i); + if (!ring_is_xdp(tx_ring)) + netdev_tx_reset_queue(txring_txq(tx_ring)); + txgbe_do_lan_reset(adapter); + } + } + } + netdev_err(adapter->netdev, "Reset adapter\n"); adapter->tx_timeout_count++; @@ -8864,6 +10023,10 @@ static void txgbe_reset_subtask(struct txgbe_adapter *adapter) reset_flag |= TXGBE_FLAG2_PF_RESET_REQUESTED; adapter->flags2 &= ~TXGBE_FLAG2_PF_RESET_REQUESTED; } + if (adapter->flags2 & TXGBE_FLAG2_DMA_RESET_REQUESTED) { + reset_flag |= TXGBE_FLAG2_DMA_RESET_REQUESTED; + adapter->flags2 &= ~TXGBE_FLAG2_DMA_RESET_REQUESTED; + } if (adapter->flags2 & TXGBE_FLAG2_RESET_INTR_RECEIVED) { /* If there's a recovery already waiting, it takes @@ -8909,6 +10072,8 @@ static void txgbe_reset_subtask(struct txgbe_adapter *adapter) /*debug to up*/ /*txgbe_dump(adapter);*/ txgbe_reinit_locked(adapter); + } else if (reset_flag & TXGBE_FLAG2_DMA_RESET_REQUESTED) { + txgbe_reinit_locked_dma_reset(adapter); } else if (reset_flag & TXGBE_FLAG2_GLOBAL_RESET_REQUESTED) { /* Request a Global Reset * @@ -8940,21 +10105,124 @@ static void txgbe_check_pcie_subtask(struct txgbe_adapter *adapter) if (!(adapter->flags2 & TXGBE_FLAG2_PCIE_NEED_RECOVER)) return; - txgbe_print_tx_hang_status(adapter); + txgbe_print_tx_hang_status(adapter); + txgbe_dump_all_ring_desc(adapter); + + wr32m(&adapter->hw, TXGBE_MIS_PF_SM, TXGBE_MIS_PF_SM_SM, 0); + + if ((TXGBE_PCIE_RECOVER == 1) && !(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) { + status = txgbe_check_recovery_capability(adapter->pdev); + if (status) { + e_info(probe, "do recovery\n"); + txgbe_pcie_do_recovery(adapter->pdev); + } else { + e_err(drv, "This platform can't support pcie recovery, skip it\n"); + } + } + + adapter->flags2 &= ~TXGBE_FLAG2_PCIE_NEED_RECOVER; +} +#if 0 +static void txgbe_swfw_mbox_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 bi; + u32 hdr_size = sizeof(struct txgbe_hic_hdr); + u16 buf_len; + u32 dword_len; + + if (!(adapter->flags & TXGBE_FLAG_SWFW_MBOX_NOTIFY)) + return; + + printk("recv a mbox notify\n"); + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* first pull in the header so we know the buffer length */ + for (bi = 0; bi < dword_len; bi++) { + adapter->swfw_mbox_buf[bi] = rd32a(hw, TXGBE_AML_MNG_MBOX_FW2SW, bi); + TXGBE_LE32_TO_CPUS(&adapter->swfw_mbox_buf[bi]); + } + + /* If there is any thing in data position pull it in */ + buf_len = ((struct txgbe_hic_hdr *)adapter->swfw_mbox_buf)->buf_len; + if (buf_len == 0) + return; + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + + /* Pull in the rest of the buffer (bi is where we left off) */ + for (; bi <= dword_len; bi++) { + adapter->swfw_mbox_buf[bi] = rd32a(hw, TXGBE_AML_MNG_MBOX_FW2SW, + bi); + TXGBE_LE32_TO_CPUS(&adapter->swfw_mbox_buf[bi]); + } + + printk("recv mbox data, store in swfw_mbox_buf\n"); + + + /* amlite: check if it is a reply, then inform */ + adapter->flags2 |= TXGBE_FLAG2_SWFW_MBOX_REPLY; + + adapter->flags &= ~TXGBE_FLAG_SWFW_MBOX_NOTIFY; +} +#endif +static void txgbe_tx_queue_clear_error_task(struct txgbe_adapter *adapter) { + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring *tx_ring; + u32 desc_error[4] = {0, 0, 0, 0}; + union txgbe_tx_desc *tx_desc; + u32 i, j; + struct txgbe_tx_buffer *tx_buffer; + u32 size; + + for (i = 0; i < 4; i++) + desc_error[i] = rd32(hw, TXGBE_TDM_DESC_NONFATAL(i)); + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (desc_error[i / 32] & (1 << i % 32)) { + tx_ring = adapter->tx_ring[i]; + netif_stop_subqueue(tx_ring->netdev, i); + msec_delay(10); + + e_err(tx_err, "queue[%d] RP = 0x%x\n", i , + rd32(&adapter->hw, TXGBE_PX_TR_RP(adapter->tx_ring[i]->reg_idx))); + for (j = 0; j < tx_ring->count; j++) { + tx_desc = TXGBE_TX_DESC(tx_ring, j); + if (tx_desc->read.olinfo_status != 0x1) + e_warn(tx_err, "queue[%d][%d]:0x%llx, 0x%x, 0x%x\n", + i, j, tx_desc->read.buffer_addr, tx_desc->read.cmd_type_len, tx_desc->read.olinfo_status); + } + + for (j = 0; j < tx_ring->count; j++) { + tx_buffer = &tx_ring->tx_buffer_info[j]; + size = dma_unmap_len(tx_buffer, len); + if (size != 0 && tx_buffer->va) { + e_warn(pktdata, "tx buffer[%d][%d]: \n", i, j); + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, tx_buffer->va, size, true); + } + } + + for (j = 0; j < tx_ring->count; j++) { + tx_buffer = &tx_ring->tx_buffer_info[j]; + if (tx_buffer->skb) { + e_err(pktdata, "****skb in tx buffer[%d][%d]: *******\n", i, j); + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, tx_buffer->skb, sizeof(struct sk_buff), true); + } + } + + wr32(hw, TXGBE_TDM_DESC_NONFATAL(i / 32), BIT(i % 32)); - wr32m(&adapter->hw, TXGBE_MIS_PF_SM, TXGBE_MIS_PF_SM_SM, 0); + txgbe_clean_tx_ring(tx_ring); - if ((TXGBE_PCIE_RECOVER == 1) && !(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) { - status = txgbe_check_recovery_capability(adapter->pdev); - if (status) { - e_info(probe, "do recovery\n"); - txgbe_pcie_do_recovery(adapter->pdev); - } else { - e_err(drv, "This platform can't support pcie recovery, skip it\n"); + txgbe_configure_tx_ring(adapter, tx_ring); + netif_start_subqueue(tx_ring->netdev, i); } } - - adapter->flags2 &= ~TXGBE_FLAG2_PCIE_NEED_RECOVER; } /** @@ -8966,6 +10234,8 @@ static void txgbe_service_task(struct work_struct *work) struct txgbe_adapter *adapter = container_of(work, struct txgbe_adapter, service_task); + struct txgbe_hw *hw = &adapter->hw; + if (TXGBE_REMOVED(adapter->hw.hw_addr)) { if (!test_bit(__TXGBE_DOWN, &adapter->state)) { rtnl_lock(); @@ -8991,8 +10261,17 @@ static void txgbe_service_task(struct work_struct *work) #endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ txgbe_check_pcie_subtask(adapter); +/* txgbe_swfw_mbox_subtask(adapter); */ txgbe_reset_subtask(adapter); + txgbe_phy_event_subtask(adapter); txgbe_sfp_detection_subtask(adapter); + if (!(hw->mac.type == txgbe_mac_sp || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || + txgbe_is_backplane(hw))) + txgbe_watchdog_subtask(adapter); txgbe_sfp_link_config_subtask(adapter); txgbe_sfp_reset_eth_phy_subtask(adapter); txgbe_check_overtemp_subtask(adapter); @@ -9009,41 +10288,12 @@ static void txgbe_service_task(struct work_struct *work) txgbe_ptp_rx_hang(adapter); } #endif /* HAVE_PTP_1588_CLOCK */ + txgbe_tx_queue_clear_error_task(adapter); + txgbe_amlit_temp_subtask(adapter); txgbe_service_event_complete(adapter); } -static u8 get_ipv6_proto(struct sk_buff *skb, int offset) -{ - struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset); - u8 nexthdr = hdr->nexthdr; - - offset += sizeof(struct ipv6hdr); - - while (ipv6_ext_hdr(nexthdr)) { - struct ipv6_opt_hdr _hdr, *hp; - - if (nexthdr == NEXTHDR_NONE) - break; - - hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr); - if (!hp) - break; - - if (nexthdr == NEXTHDR_FRAGMENT) { - break; - } else if (nexthdr == NEXTHDR_AUTH) { - offset += ipv6_authlen(hp); - } else { - offset += ipv6_optlen(hp); - } - - nexthdr = hp->nexthdr; - } - - return nexthdr; -} - union network_header { struct iphdr *ipv4; struct ipv6hdr *ipv6; @@ -9061,6 +10311,9 @@ static txgbe_dptype encode_tx_desc_ptype(const struct txgbe_tx_buffer *first) #endif u8 l4_prot = 0; u8 ptype = 0; + unsigned char *exthdr; + unsigned char *l4_hdr; + __be16 frag_off; #ifdef HAVE_ENCAP_TSO_OFFLOAD if (skb->encapsulation) { @@ -9074,7 +10327,12 @@ static txgbe_dptype encode_tx_desc_ptype(const struct txgbe_tx_buffer *first) ptype = TXGBE_PTYPE_TUN_IPV4; break; case __constant_htons(ETH_P_IPV6): - tun_prot = get_ipv6_proto(skb, skb_network_offset(skb)); + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb)+ sizeof(struct ipv6hdr); + tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); if (tun_prot == NEXTHDR_FRAGMENT) goto encap_frag; ptype = TXGBE_PTYPE_TUN_IPV6; @@ -9083,7 +10341,8 @@ static txgbe_dptype encode_tx_desc_ptype(const struct txgbe_tx_buffer *first) goto exit; } - if (tun_prot == IPPROTO_IPIP) { + if (tun_prot == IPPROTO_IPIP || + tun_prot == IPPROTO_IPV6) { hdr.raw = (void *)inner_ip_hdr(skb); ptype |= TXGBE_PTYPE_PKT_IPIP; } else if (tun_prot == IPPROTO_UDP) { @@ -9132,8 +10391,12 @@ static txgbe_dptype encode_tx_desc_ptype(const struct txgbe_tx_buffer *first) } break; case 6: - l4_prot = get_ipv6_proto(skb, - skb_inner_network_offset(skb)); + l4_hdr = skb_inner_transport_header(skb); + exthdr = skb_inner_network_header(skb) + sizeof(struct ipv6hdr); + l4_prot = inner_ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); ptype |= TXGBE_PTYPE_PKT_IPV6; if (l4_prot == NEXTHDR_FRAGMENT) { ptype |= TXGBE_PTYPE_TYP_IPFRAG; @@ -9157,7 +10420,13 @@ static txgbe_dptype encode_tx_desc_ptype(const struct txgbe_tx_buffer *first) break; #ifdef NETIF_F_IPV6_CSUM case __constant_htons(ETH_P_IPV6): - l4_prot = get_ipv6_proto(skb, skb_network_offset(skb)); + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb)+ sizeof(struct ipv6hdr); + l4_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); + ptype = TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6; if (l4_prot == NEXTHDR_FRAGMENT) { ptype |= TXGBE_PTYPE_TYP_IPFRAG; @@ -9227,6 +10496,9 @@ static int txgbe_tso(struct txgbe_ring *tx_ring, u32 tunhdr_eiplen_tunlen = 0; #ifdef HAVE_ENCAP_TSO_OFFLOAD u8 tun_prot = 0; + unsigned char *exthdr; + unsigned char *l4_hdr; + __be16 frag_off; bool enc = skb->encapsulation; #endif /* HAVE_ENCAP_TSO_OFFLOAD */ #ifdef NETIF_F_TSO6 @@ -9322,7 +10594,12 @@ static int txgbe_tso(struct txgbe_ring *tx_ring, first->tx_flags |= TXGBE_TX_FLAGS_OUTER_IPV4; break; case __constant_htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb)+ sizeof(struct ipv6hdr); tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); break; default: break; @@ -9347,6 +10624,7 @@ static int txgbe_tso(struct txgbe_ring *tx_ring, TXGBE_TXD_TUNNEL_LEN_SHIFT); break; case IPPROTO_IPIP: + case IPPROTO_IPV6: tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb)- (char *)ip_hdr(skb)) >> 2) << TXGBE_TXD_OUTER_IPLEN_SHIFT; @@ -9361,7 +10639,7 @@ static int txgbe_tso(struct txgbe_ring *tx_ring, #else vlan_macip_lens = skb_network_header_len(skb) >> 1; #endif /* HAVE_ENCAP_TSO_OFFLOAD */ - vlan_macip_lens |= skb_network_offset(skb) << TXGBE_TXD_MACLEN_SHIFT; + vlan_macip_lens |= skb_network_offset(skb) << TXGBE_TXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & TXGBE_TX_FLAGS_VLAN_MASK; type_tucmd = dptype.ptype << 24; @@ -9371,6 +10649,7 @@ static int txgbe_tso(struct txgbe_ring *tx_ring, TXGBE_TX_FLAGS_HW_VLAN, 0x1 << TXGBE_TXD_TAG_TPID_SEL_SHIFT); #endif + txgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, type_tucmd, mss_l4len_idx); @@ -9391,6 +10670,7 @@ static void txgbe_tx_csum(struct txgbe_ring *tx_ring, u32 type_tucmd; if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: if (!(first->tx_flags & TXGBE_TX_FLAGS_HW_VLAN) && !(first->tx_flags & TXGBE_TX_FLAGS_CC)) return; @@ -9398,6 +10678,9 @@ static void txgbe_tx_csum(struct txgbe_ring *tx_ring, TXGBE_TXD_MACLEN_SHIFT; } else { u8 l4_prot = 0; + unsigned char *exthdr; + unsigned char *l4_hdr; + __be16 frag_off; #ifdef HAVE_ENCAP_TSO_OFFLOAD union { struct iphdr *ipv4; @@ -9408,7 +10691,7 @@ static void txgbe_tx_csum(struct txgbe_ring *tx_ring, struct tcphdr *tcphdr; u8 *raw; } transport_hdr; - + if (skb->encapsulation) { network_hdr.raw = skb_inner_network_header(skb); transport_hdr.raw = skb_inner_transport_header(skb); @@ -9419,7 +10702,12 @@ static void txgbe_tx_csum(struct txgbe_ring *tx_ring, tun_prot = ip_hdr(skb)->protocol; break; case __constant_htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb)+ sizeof(struct ipv6hdr); tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); break; default: if (unlikely(net_ratelimit())) { @@ -9449,6 +10737,7 @@ static void txgbe_tx_csum(struct txgbe_ring *tx_ring, TXGBE_TXD_TUNNEL_LEN_SHIFT); break; case IPPROTO_IPIP: + case IPPROTO_IPV6: tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb)- (char *)ip_hdr(skb)) >> 2) << @@ -9474,7 +10763,11 @@ static void txgbe_tx_csum(struct txgbe_ring *tx_ring, case 6: vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1; + exthdr = network_hdr.raw + sizeof(struct ipv6hdr); l4_prot = network_hdr.ipv6->nexthdr; + if (transport_hdr.raw != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); break; default: break; @@ -9489,7 +10782,12 @@ static void txgbe_tx_csum(struct txgbe_ring *tx_ring, #ifdef NETIF_F_IPV6_CSUM case __constant_htons(ETH_P_IPV6): vlan_macip_lens |= skb_network_header_len(skb) >> 1; + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb)+ sizeof(struct ipv6hdr); l4_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); break; #endif /* NETIF_F_IPV6_CSUM */ default: @@ -9518,7 +10816,8 @@ static void txgbe_tx_csum(struct txgbe_ring *tx_ring, TXGBE_TXD_L4LEN_SHIFT; break; default: - break; + skb_checksum_help(skb); + goto csum_failed; } /* update TX checksum flag */ @@ -9633,6 +10932,7 @@ static int txgbe_tx_map(struct txgbe_ring *tx_ring, struct txgbe_tx_buffer *first, const u8 hdr_len) { + struct txgbe_adapter *adapter = netdev_priv(tx_ring->netdev); struct sk_buff *skb = first->skb; struct txgbe_tx_buffer *tx_buffer; union txgbe_tx_desc *tx_desc; @@ -9665,10 +10965,13 @@ static int txgbe_tx_map(struct txgbe_ring *tx_ring, tx_buffer = first; + tx_buffer->va = skb->data; + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { - if (dma_mapping_error(tx_ring->dev, dma)) + if (dma_mapping_error(tx_ring->dev, dma)) { + tx_buffer->va = NULL; goto dma_error; - + } /* record length, and DMA address */ dma_unmap_len_set(tx_buffer, len, size); dma_unmap_addr_set(tx_buffer, dma, dma); @@ -9717,6 +11020,7 @@ static int txgbe_tx_map(struct txgbe_ring *tx_ring, DMA_TO_DEVICE); tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_buffer->va = skb_frag_address_safe(frag); } /* write last descriptor with RS and EOP bits */ @@ -9744,15 +11048,21 @@ static int txgbe_tx_map(struct txgbe_ring *tx_ring, /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; + /* set next_eop for amlite tx head wb*/ +#ifdef TXGBE_TXHEAD_WB + first->next_eop = i; +#endif + i++; if (i == tx_ring->count) i = 0; tx_ring->next_to_use = i; - txgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); + txgbe_maybe_stop_tx(tx_ring, adapter->desc_reserved + DESC_NEEDED); - if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more() || + (txgbe_desc_unused(tx_ring) <= (tx_ring->count >> 1))) { writel(i, tx_ring->tail); #ifndef SPIN_UNLOCK_IMPLIES_MMIOWB @@ -9782,6 +11092,7 @@ static int txgbe_tx_map(struct txgbe_ring *tx_ring, dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); dma_unmap_len_set(tx_buffer, len, 0); + tx_buffer->va = NULL; if (tx_buffer == first) break; if (i == 0) @@ -10141,7 +11452,6 @@ static void txgbe_xdp_flush(struct net_device *dev) #endif /*HAVE_XDP_SUPPORT*/ #ifdef HAVE_NETDEV_SELECT_QUEUE -#if IS_ENABLED(CONFIG_FCOE) #if defined(HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED) static u16 txgbe_select_queue(struct net_device *dev, struct sk_buff *skb, @@ -10162,9 +11472,32 @@ static u16 txgbe_select_queue(struct net_device *dev, struct sk_buff *skb) #endif /* HAVE_NDO_SELECT_QUEUE_ACCEL */ { struct txgbe_adapter *adapter = netdev_priv(dev); + int queue; +#if IS_ENABLED(CONFIG_FCOE) struct txgbe_ring_feature *f; int txq; +#endif + + if (adapter->vlan_rate_link_speed) { + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED || + adapter->flags & TXGBE_FLAG_FCOE_ENABLED) +#if IS_ENABLED(CONFIG_FCOE) + goto fcoe; +#else + goto skip_select; +#endif + if (skb_vlan_tag_present(skb)) { + u16 vlan_id = skb_vlan_tag_get_id(skb); + if (test_bit(vlan_id, adapter->limited_vlans)) { + int r_idx = adapter->num_tx_queues - 1 - + txgbe_find_nth_limited_vlan(adapter, vlan_id); + return r_idx; + } + } + } +#if IS_ENABLED(CONFIG_FCOE) +fcoe: /* * only execute the code below if protocol is FCoE * or FIP and we have FCoE enabled on the adapter @@ -10178,28 +11511,35 @@ static u16 txgbe_select_queue(struct net_device *dev, struct sk_buff *skb) break; fallthrough; default: -#if defined(HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED) - return netdev_pick_tx(dev, skb, sb_dev); -#elif defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) - return fallback(dev, skb, sb_dev); -#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) - return fallback(dev, skb); -#else - return __netdev_pick_tx(dev, skb); -#endif + goto skip_select; } f = &adapter->ring_feature[RING_F_FCOE]; txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : - smp_processor_id(); + smp_processor_id(); while (txq >= f->indices) txq -= f->indices; return txq + f->offset; +#endif/*FCOE*/ +skip_select: +#if defined(HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED) + queue = netdev_pick_tx(dev, skb, sb_dev); +#elif defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) + queue = fallback(dev, skb, sb_dev); +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) + queue = fallback(dev, skb); +#else + queue = __netdev_pick_tx(dev, skb); +#endif + if (adapter->vlan_rate_link_speed) + queue = queue % (adapter->num_tx_queues - + adapter->active_vlan_limited); + + return queue; } -#endif /* CONFIG_FCOE */ #endif /* HAVE_NETDEV_SELECT_QUEUE */ /** @@ -10214,7 +11554,7 @@ static u16 txgbe_select_queue(struct net_device *dev, struct sk_buff *skb) * May return error in out of memory cases. The skb is freed on error. */ -int txgbe_skb_pad_nonzero(struct sk_buff *skb, int pad) +static int txgbe_skb_pad_nonzero(struct sk_buff *skb, int pad) { int err; int ntail; @@ -10282,7 +11622,7 @@ netdev_tx_t txgbe_xmit_frame_ring(struct sk_buff *skb, count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> frags[f])); - if (txgbe_maybe_stop_tx(tx_ring, count + 3)) { + if (txgbe_maybe_stop_tx(tx_ring, count + adapter->desc_reserved + 3)) { tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; } @@ -10308,19 +11648,17 @@ netdev_tx_t txgbe_xmit_frame_ring(struct sk_buff *skb, tx_flags |= ntohs(vhdr->h_vlan_TCI) << TXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= TXGBE_TX_FLAGS_SW_VLAN; + vlan_addlen += VLAN_HLEN; } - if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { - struct vlan_hdr *vhdr, _vhdr; - vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); - if (!vhdr) - goto out_drop; - - protocol = vhdr->h_vlan_encapsulated_proto; + if (protocol == htons(ETH_P_8021Q) || + protocol == htons(ETH_P_8021AD)) { tx_flags |= TXGBE_TX_FLAGS_SW_VLAN; vlan_addlen += VLAN_HLEN; } + protocol = vlan_get_protocol(skb); + #ifdef HAVE_PTP_1588_CLOCK #ifdef SKB_SHARED_TX_IS_UNION if (unlikely(skb_tx(skb)->hardware) && @@ -10482,6 +11820,11 @@ static netdev_tx_t txgbe_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; #ifdef HAVE_TX_MQ + if (!adapter->num_tx_queues) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + if (r_idx >= adapter->num_tx_queues) r_idx = r_idx % adapter->num_tx_queues; tx_ring = adapter->tx_ring[r_idx]; @@ -10520,7 +11863,7 @@ static int txgbe_set_mac(struct net_device *netdev, void *p) txgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); //memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - eth_hw_addr_set(netdev, addr->sa_data); + eth_hw_addr_set(netdev, (u8 *)addr->sa_data); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); txgbe_mac_set_default_filter(adapter, hw->mac.addr); @@ -10612,11 +11955,214 @@ static int txgbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, } } +int txgbe_find_nth_limited_vlan(struct txgbe_adapter *adapter, int vlan) +{ + return bitmap_weight(adapter->limited_vlans, vlan+1) - 1; +} + +void txgbe_del_vlan_limit(struct txgbe_adapter *adapter, int vlan) +{ + int new_queue_rate_limit[64]; + int idx = 0; + int i = 0, j = 0; + + if (!test_bit(vlan, adapter->limited_vlans)) + return; + + idx = txgbe_find_nth_limited_vlan(adapter, vlan); + for (; i < bitmap_weight(adapter->limited_vlans, 4096); i++) { + if (i != idx) + new_queue_rate_limit[j++] = adapter->queue_rate_limit[i]; + } + + memcpy(adapter->queue_rate_limit, new_queue_rate_limit, sizeof(int) * 64); + clear_bit(vlan, adapter->limited_vlans); + +} + +void txgbe_set_vlan_limit(struct txgbe_adapter *adapter, int vlan, int rate_limit) +{ + int new_queue_rate_limit[64]; + int idx = 0; + int i = 0, j = 0; + + if (test_and_set_bit(vlan, adapter->limited_vlans)) { + idx = txgbe_find_nth_limited_vlan(adapter, vlan); + adapter->queue_rate_limit[idx] = rate_limit; + return; + } + + idx = txgbe_find_nth_limited_vlan(adapter, vlan); + for (; j < bitmap_weight(adapter->limited_vlans, 4096); j++) { + if (j == idx) + new_queue_rate_limit[j] = rate_limit; + else + new_queue_rate_limit[j] = adapter->queue_rate_limit[i++]; + } + + memcpy(adapter->queue_rate_limit, new_queue_rate_limit, sizeof(int) * 64); + +} + +void txgbe_check_vlan_rate_limit(struct txgbe_adapter *adapter) +{ + int i; + + if (!adapter->vlan_rate_link_speed) + return; + + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED || + adapter->flags & TXGBE_FLAG_FCOE_ENABLED) { + e_dev_info("Can't limit vlan rate when enable SRIOV or FCOE"); + goto resume_rate; + } + + if (txgbe_link_mbps(adapter) != adapter->vlan_rate_link_speed) { + dev_info(pci_dev_to_dev(adapter->pdev), + "Link speed has been changed. vlan Transmit rate is disabled\n"); + goto resume_rate; + } + + if (adapter->active_vlan_limited > adapter->num_tx_queues) { + e_dev_err("limited vlan bigger than num of tx ring, " + "disabled vlan limit\n"); + goto resume_rate; + } + + for (i = 0; i < adapter->active_vlan_limited; i++) { + txgbe_set_queue_rate_limit(&adapter->hw, + (adapter->num_tx_queues - i - 1), adapter->queue_rate_limit[i]); + } + return; +resume_rate: + e_dev_info("clear all vlan limit"); + bitmap_zero(adapter->limited_vlans, 4096); + adapter->active_vlan_limited = bitmap_weight(adapter->limited_vlans, 4096); + adapter->vlan_rate_link_speed = 0; + memset(adapter->queue_rate_limit, 0, sizeof(int) * 64); + adapter->vlan_rate_link_speed = 0; + for (i = 0; i < adapter->num_tx_queues; i++) + txgbe_set_queue_rate_limit(&adapter->hw, i , 0); + +} + +struct vlan_rate_param { + int count; + unsigned short vlans[64]; + unsigned int rates[64]; +}; + +#define SIOCSVLANRATE (SIOCDEVPRIVATE+0xe) +#define SIOCGVLANRATE (SIOCDEVPRIVATE+0xf) + +static int txgbe_vlan_rate_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct vlan_rate_param param; + int i; + int link_speed; + int set_num = 0; + + if (cmd != SIOCSVLANRATE) + return -EOPNOTSUPP; + + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED || + adapter->flags & TXGBE_FLAG_FCOE_ENABLED){ + e_dev_err("Not support vlan limit when enable SRIOV of FCOE"); + return -EINVAL; + } + + if (!netif_carrier_ok(netdev) || + adapter->link_speed < TXGBE_LINK_SPEED_1GB_FULL) { + e_dev_err("please set vlan rate limit when link up, speed 1G not support"); + return -EINVAL; + } + + link_speed = txgbe_link_mbps(adapter); + + if (copy_from_user(¶m, ifr->ifr_data, sizeof(param))) + return -EFAULT; + + if (param.count == 0) { + e_dev_info("clear all vlan limit"); + bitmap_zero(adapter->limited_vlans, 4096); + adapter->vlan_rate_link_speed = 0; + memset(adapter->queue_rate_limit, 0, sizeof(int) * 64); + goto after_set; + } + + for (i = 0; i < param.count; i++) { + if ((param.vlans[i] > 4095) || + (param.rates[i] != 0 && param.rates[i] <= 10) || + (param.rates[i] > link_speed)) { + e_dev_err("Invalid param: VLAN_ID(0~4095): %d, rate(0,11~linkspeed):%d\n", + param.vlans[i], param.rates[i]); + return -EINVAL; + } + } + + for (i = 0; i < param.count; i++) + if (param.rates[i]) + set_num++; + else + if (test_bit(param.vlans[i], adapter->limited_vlans) && + param.rates[i] == 0) + set_num--; + + if (param.count <= 0 || param.count > 64 || + (set_num + adapter->active_vlan_limited > adapter->num_tx_queues - 1)) { + e_dev_err("Invalid VLAN set count: %d, now active limited vlan count:%d " + "total num of limited vlan should not bigger than (num_of_txring - 1):%d", + set_num, adapter->active_vlan_limited, adapter->num_tx_queues - 1); + return -EINVAL; + } + + adapter->vlan_rate_link_speed = link_speed; + for (i = 0; i < param.count; i++) + if (param.rates[i]) + txgbe_set_vlan_limit(adapter, param.vlans[i], param.rates[i]); + else + txgbe_del_vlan_limit(adapter, param.vlans[i]); +after_set: + /*clear all rate limit*/ + for (i = 0; i < adapter->num_tx_queues; i++) + txgbe_set_queue_rate_limit(&adapter->hw, i, 0); + + adapter->active_vlan_limited = bitmap_weight(adapter->limited_vlans, 4096); + + for (i = 0; i < adapter->active_vlan_limited; i++) { + txgbe_set_queue_rate_limit(&adapter->hw, + adapter->num_tx_queues - i - 1, adapter->queue_rate_limit[i]); + } + return 0; +} + +static int txgbe_get_vlan_rate_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct vlan_rate_param param; + int i = 0, n = 0; + + if (cmd != SIOCGVLANRATE) + return -EOPNOTSUPP; + + for_each_set_bit(i, adapter->limited_vlans, 4096) { + param.vlans[n] = i; + param.rates[n] = adapter->queue_rate_limit[n]; + n++; + } + param.count = n; + + if (copy_to_user(ifr->ifr_data, ¶m, sizeof(param))) + return -EFAULT; + + return 0; +} + static int txgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { #ifdef HAVE_PTP_1588_CLOCK struct txgbe_adapter *adapter = netdev_priv(netdev); - #endif switch (cmd) { #ifdef HAVE_PTP_1588_CLOCK @@ -10634,11 +12180,24 @@ static int txgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) case SIOCGMIIREG: case SIOCSMIIREG: return txgbe_mii_ioctl(netdev, ifr, cmd); + case SIOCSVLANRATE: + return txgbe_vlan_rate_ioctl(netdev, ifr, cmd); + case SIOCGVLANRATE: + return txgbe_get_vlan_rate_ioctl(netdev, ifr, cmd); default: return -EOPNOTSUPP; } } + +#ifdef HAVE_NDO_IOCTLPRIVATE +static int txgbe_siocdevprivate(struct net_device *netdev, struct ifreq *ifr, + void __user *data, int cmd) +{ + return txgbe_ioctl(netdev, ifr, cmd); +} +#endif + #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling 'interrupt' - used by things like netconsole to send skbs @@ -10898,6 +12457,17 @@ static netdev_features_t txgbe_fix_features(struct net_device *netdev, features &= ~NETIF_F_LRO; } } + +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { +#if defined(NETIF_F_HW_VLAN_STAG_FILTER) + features |= NETIF_F_HW_VLAN_STAG_FILTER; + } else { + features &= ~NETIF_F_HW_VLAN_STAG_FILTER; +#endif + } +#endif + #if (defined NETIF_F_HW_VLAN_CTAG_RX) && (defined NETIF_F_HW_VLAN_STAG_RX) if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) features &= ~NETIF_F_HW_VLAN_STAG_RX; @@ -11010,16 +12580,23 @@ static int txgbe_set_features(struct net_device *netdev, netdev->features = features; +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + need_reset = true; +#endif +#ifdef NETIF_F_HW_VLAN_FILTER + if (changed & NETIF_F_HW_VLAN_RX) + need_reset = true; +#endif + if (need_reset) txgbe_do_reset(netdev); #ifdef NETIF_F_HW_VLAN_CTAG_FILTER - else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER)) + else if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) txgbe_set_rx_mode(netdev); #endif #ifdef NETIF_F_HW_VLAN_FILTER - else if (changed & (NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER)) + else if (changed & NETIF_F_HW_VLAN_FILTER) txgbe_set_rx_mode(netdev); #endif return 0; @@ -11040,9 +12617,6 @@ static void txgbe_add_udp_tunnel_port(struct net_device *dev, struct txgbe_hw *hw = &adapter->hw; u16 port = ntohs(ti->port); - if (ti->sa_family != AF_INET) - return; - switch (ti->type) { case UDP_TUNNEL_TYPE_VXLAN: if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) @@ -11088,7 +12662,7 @@ static void txgbe_add_udp_tunnel_port(struct net_device *dev, } /** - * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports + * txgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports * @dev: The port's netdev * @ti: Tunnel endpoint information **/ @@ -11101,15 +12675,12 @@ static void txgbe_del_udp_tunnel_port(struct net_device *dev, ti->type != UDP_TUNNEL_TYPE_GENEVE) return; - if (ti->sa_family != AF_INET) - return; - switch (ti->type) { case UDP_TUNNEL_TYPE_VXLAN: if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) return; - if (adapter->vxlan_port != ti->port) { + if (adapter->vxlan_port != ntohs(ti->port)) { netdev_info(dev, "VXLAN port %d not found\n", ntohs(ti->port)); return; @@ -11122,14 +12693,13 @@ static void txgbe_del_udp_tunnel_port(struct net_device *dev, // if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) // return; - if (adapter->geneve_port != ti->port) { + if (adapter->geneve_port != ntohs(ti->port)) { netdev_info(dev, "GENEVE port %d not found\n", ntohs(ti->port)); return; } adapter->geneve_port = 0; - wr32(&adapter->hw, TXGBE_CFG_GENEVE, 0); break; default: return; @@ -11141,11 +12711,6 @@ static int txgbe_udp_tunnel_set(struct net_device *dev, unsigned int table, unsigned int entry, struct udp_tunnel_info *ti) { - const struct udp_tunnel_nic_info *tni = dev->udp_tunnel_nic_info; - - if (tni->flags & UDP_TUNNEL_NIC_INFO_IPV4_ONLY) - ti->sa_family = AF_INET; - txgbe_add_udp_tunnel_port(dev, ti); return 0; } @@ -11154,11 +12719,6 @@ static int txgbe_udp_tunnel_unset(struct net_device *dev, unsigned int table, unsigned int entry, struct udp_tunnel_info *ti) { - const struct udp_tunnel_nic_info *tni = dev->udp_tunnel_nic_info; - - if (tni->flags & UDP_TUNNEL_NIC_INFO_IPV4_ONLY) - ti->sa_family = AF_INET; - txgbe_del_udp_tunnel_port(dev, ti); return 0; @@ -11167,7 +12727,7 @@ static int txgbe_udp_tunnel_unset(struct net_device *dev, static const struct udp_tunnel_nic_info txgbe_udp_tunnels = { .set_port = txgbe_udp_tunnel_set, .unset_port = txgbe_udp_tunnel_unset, - .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, .tables = { { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, @@ -11378,6 +12938,7 @@ static netdev_features_t txgbe_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { +#ifndef HAVE_VLAN_NUM_ERROR u32 vlan_num = 0; u16 vlan_depth = skb->mac_len; __be16 type = skb->protocol; @@ -11398,12 +12959,12 @@ txgbe_features_check(struct sk_buff *skb, struct net_device *dev, vh = (struct vlan_hdr *)(skb->data + vlan_depth); type = vh->h_vlan_encapsulated_proto; vlan_depth += VLAN_HLEN; - } if (vlan_num > 2) features &= ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); +#endif if (skb->encapsulation) { if (unlikely(skb_inner_mac_header(skb) - @@ -11411,6 +12972,15 @@ txgbe_features_check(struct sk_buff *skb, struct net_device *dev, TXGBE_MAX_TUNNEL_HDR_LEN)) return features & ~NETIF_F_CSUM_MASK; } + + if (skb->encapsulation) { + if (skb->inner_protocol_type == ENCAP_TYPE_ETHER && + skb->inner_protocol != htons(ETH_P_IP) && + skb->inner_protocol != htons(ETH_P_IPV6) && + skb->inner_protocol != htons(ETH_P_TEB)) + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + } + return features; } #endif /* HAVE_NDO_FEATURES_CHECK */ @@ -11536,13 +13106,9 @@ static const struct net_device_ops txgbe_netdev_ops = { .ndo_stop = txgbe_close, .ndo_start_xmit = txgbe_xmit_frame, #ifdef HAVE_NETDEV_SELECT_QUEUE -#if IS_ENABLED(CONFIG_FCOE) .ndo_select_queue = txgbe_select_queue, #else -#ifndef HAVE_MQPRIO .ndo_select_queue = __netdev_pick_tx, -#endif -#endif #endif /* HAVE_NETDEV_SELECT_QUEUE */ .ndo_set_rx_mode = txgbe_set_rx_mode, .ndo_validate_addr = eth_validate_addr, @@ -11558,7 +13124,14 @@ static const struct net_device_ops txgbe_netdev_ops = { .ndo_vlan_rx_add_vid = txgbe_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = txgbe_vlan_rx_kill_vid, #endif - .ndo_do_ioctl = txgbe_ioctl, +#ifdef HAVE_NDO_ETH_IOCTL + .ndo_eth_ioctl = txgbe_ioctl, +#else + .ndo_do_ioctl = txgbe_ioctl, +#endif /* HAVE_NDO_ETH_IOCTL */ +#ifdef HAVE_NDO_IOCTLPRIVATE + .ndo_siocdevprivate = txgbe_siocdevprivate, +#endif #ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT /* RHEL7 requires this to be defined to enable extended ops. RHEL7 uses the * function get_ndo_ext to retrieve offsets for extended fields from with the @@ -11737,7 +13310,7 @@ void txgbe_assign_netdev_ops(struct net_device *dev) dev->poll_controller = &txgbe_netpoll; #endif #ifdef HAVE_NETDEV_SELECT_QUEUE -#if IS_ENABLED(CONFIG_FCOE) +#if HAVE_NETDEV_SELECT_QUEUE dev->select_queue = &txgbe_select_queue; #else dev->select_queue = &__netdev_pick_tx; @@ -11755,30 +13328,6 @@ void txgbe_assign_netdev_ops(struct net_device *dev) dev->watchdog_timeo = 5 * HZ; } -/** - * txgbe_wol_supported - Check whether device supports WoL - * @adapter: the adapter private structure - * @device_id: the device ID - * @subdev_id: the subsystem device ID - * - * This function is used by probe and ethtool to determine - * which devices have WoL support - * - **/ -int txgbe_wol_supported(struct txgbe_adapter *adapter) -{ - struct txgbe_hw *hw = &adapter->hw; - u16 wol_cap = adapter->eeprom_cap & TXGBE_DEVICE_CAPS_WOL_MASK; - - /* check eeprom to see if WOL is enabled */ - if ((wol_cap == TXGBE_DEVICE_CAPS_WOL_PORT0_1) || - ((wol_cap == TXGBE_DEVICE_CAPS_WOL_PORT0) && - (hw->bus.func == 0))) - return true; - else - return false; -} - /** * txgbe_probe - Device Initialization Routine * @pdev: PCI device information struct @@ -11828,7 +13377,6 @@ static int __devinit txgbe_probe(struct pci_dev *pdev, if (err) return err; - if (!dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)) && !dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64))) { pci_using_dac = 1; @@ -11854,7 +13402,6 @@ static int __devinit txgbe_probe(struct pci_dev *pdev, goto err_pci_reg; } - hw = vmalloc(sizeof(struct txgbe_hw)); if (!hw) { pr_info("Unable to allocate memory for early mac " @@ -11865,6 +13412,9 @@ static int __devinit txgbe_probe(struct pci_dev *pdev, vfree(hw); } +#ifdef HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING + pci_enable_pcie_error_reporting(pdev); +#endif pci_set_master(pdev); /* errata 16 */ if (MAX_REQUEST_SIZE == 512) { @@ -11896,6 +13446,7 @@ static int __devinit txgbe_probe(struct pci_dev *pdev, adapter->indices = indices; #endif #endif /* HAVE_TX_MQ */ + adapter->netdev = netdev; adapter->pdev = pdev; hw = &adapter->hw; @@ -11941,7 +13492,12 @@ static int __devinit txgbe_probe(struct pci_dev *pdev, goto err_sw_init; /* reset_hw fills in the perm_addr as well */ hw->phy.reset_if_overtemp = true; + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + txgbe_get_hw_control(adapter); err = TCALL(hw, mac.ops.reset_hw); + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + txgbe_release_hw_control(adapter); + /* Store the permanent mac address */ TCALL(hw, mac.ops.get_mac_addr, hw->mac.perm_addr); hw->phy.reset_if_overtemp = false; @@ -11958,15 +13514,17 @@ static int __devinit txgbe_probe(struct pci_dev *pdev, goto err_sw_init; } + if (txgbe_is_lldp(hw)) + e_dev_err("Can not get lldp flags from flash\n"); #ifdef CONFIG_PCI_IOV #ifdef HAVE_SRIOV_CONFIGURE - if (adapter->num_vfs > 0) { + if (adapter->max_vfs > 0) { e_dev_warn("Enabling SR-IOV VFs using the max_vfs module " "parameter is deprecated.\n"); e_dev_warn("Please use the pci sysfs interface instead. Ex:\n"); e_dev_warn("echo '%d' > /sys/bus/pci/devices/%04x:%02x:%02x.%1x" "/sriov_numvfs\n", - adapter->num_vfs, + adapter->max_vfs, pci_domain_nr(pdev->bus), pdev->bus->number, PCI_SLOT(pdev->devfn), @@ -12039,9 +13597,9 @@ static int __devinit txgbe_probe(struct pci_dev *pdev, NETIF_F_HW_VLAN_CTAG_RX; #endif -#ifdef NETIF_F_HW_VLAN_STAG_TX - netdev->features |= NETIF_F_HW_VLAN_STAG_TX | - NETIF_F_HW_VLAN_STAG_RX; +#ifdef NETIF_F_HW_VLAN_CTAG_TX + /* set this bit last since it cannot be part of hw_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; #endif #ifdef NETIF_F_HW_VLAN_TX @@ -12079,10 +13637,11 @@ static int __devinit txgbe_probe(struct pci_dev *pdev, #endif #endif /* HAVE_NDO_SET_FEATURES */ -#ifdef NETIF_F_HW_VLAN_CTAG_TX - /* set this bit last since it cannot be part of hw_features */ - netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; +#ifdef NETIF_F_HW_VLAN_STAG_TX + netdev->features |= NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_STAG_RX; #endif + #ifdef NETIF_F_HW_VLAN_STAG_TX netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; #endif @@ -12196,12 +13755,14 @@ static int __devinit txgbe_probe(struct pci_dev *pdev, TCALL(hw, eeprom.ops.init_params); /* make sure the EEPROM is good */ + if (TCALL(hw, eeprom.ops.validate_checksum, NULL)) { e_dev_err("The EEPROM Checksum Is Not Valid\n"); wr32(hw, TXGBE_MIS_RST, TXGBE_MIS_RST_SW_RST); err = -EIO; goto err_sw_init; } + eth_hw_addr_set(netdev, hw->mac.perm_addr); if (!is_valid_ether_addr(netdev->dev_addr)) { @@ -12211,6 +13772,10 @@ static int __devinit txgbe_probe(struct pci_dev *pdev, } txgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + memset(&adapter->etype_filter_info, 0, + sizeof(struct txgbe_etype_filter_info)); + memset(&adapter->ft_filter_info, 0, + sizeof(struct txgbe_5tuple_filter_info)); timer_setup(&adapter->service_timer, txgbe_service_timer, 0); @@ -12233,12 +13798,8 @@ static int __devinit txgbe_probe(struct pci_dev *pdev, /* WOL not supported for all devices */ adapter->wol = 0; - TCALL(hw, eeprom.ops.read, - hw->eeprom.sw_region_offset + TXGBE_DEVICE_CAPS, - &adapter->eeprom_cap); - if((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP && - hw->bus.lan_id == 0) { + if ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP) { adapter->wol = TXGBE_PSR_WKUP_CTL_MAG; wr32(hw, TXGBE_PSR_WKUP_CTL, adapter->wol); } @@ -12284,8 +13845,21 @@ static int __devinit txgbe_probe(struct pci_dev *pdev, "0x%08x", etrack_id); } - if (hw->bus.lan_id == 0) - e_dev_info("Shadow Ram Firmware Version: %s\n", adapter->eeprom_id); + adapter->etrack_id = etrack_id; + + if (strcmp(adapter->eeprom_id, adapter->fl_version) == 0) { + memcpy(adapter->fw_version, adapter->eeprom_id, sizeof(adapter->eeprom_id)); + + if (hw->bus.lan_id == 0) + e_dev_info("Running Firmware Version: %s\n", adapter->eeprom_id); + } else { + snprintf(adapter->fw_version, sizeof(adapter->fw_version), "%s,ACT.%s", + adapter->fl_version, adapter->eeprom_id); + + if (hw->bus.lan_id == 0) + e_dev_info("Running Firmware Version: %s, Flash Firmware Version: %s\n", + adapter->eeprom_id, adapter->fl_version); + } /* reset the hardware with the new settings */ err = TCALL(hw, mac.ops.start_hw); @@ -12320,10 +13894,17 @@ static int __devinit txgbe_probe(struct pci_dev *pdev, pci_save_state(pdev); #endif + /* power down the optics for SFP+ fiber or mv phy */ if(!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || - adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) - /* power down the optics for SFP+ fiber */ - TCALL(hw, mac.ops.disable_tx_laser); + adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) { + if (hw->phy.media_type == txgbe_media_type_fiber || + hw->phy.media_type == txgbe_media_type_fiber_qsfp) + TCALL(hw, mac.ops.disable_tx_laser); + else if (hw->phy.media_type == txgbe_media_type_copper && + (hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI) + txgbe_external_phy_suspend(hw); + } + /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); @@ -12427,14 +14008,13 @@ static int __devinit txgbe_probe(struct pci_dev *pdev, txgbe_vf_configuration(pdev, (i | 0x10000000)); } #endif - /* firmware requires blank driver version */ - TCALL(hw, mac.ops.set_fw_drv_ver, 0xFF, 0xFF, 0xFF, 0xFF); + #if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) /* add san mac addr to netdev */ txgbe_add_sanmac_netdev(netdev); #endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */ - e_info(probe, "WangXun(R) 10 Gigabit Network Connection\n"); + e_info(probe, "WangXun(R) RP1000/RP2000/FF50XX Network Connection\n"); cards_found++; #ifdef TXGBE_SYSFS if (txgbe_sysfs_init(adapter)) @@ -12453,20 +14033,21 @@ static int __devinit txgbe_probe(struct pci_dev *pdev, if (txgbe_mng_present(hw) && txgbe_is_sfp(hw) && ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)) TCALL(hw, mac.ops.setup_link, - TXGBE_LINK_SPEED_10GB_FULL | TXGBE_LINK_SPEED_1GB_FULL, - true); + TXGBE_LINK_SPEED_25GB_FULL | TXGBE_LINK_SPEED_10GB_FULL | + TXGBE_LINK_SPEED_1GB_FULL, true); TCALL(hw, mac.ops.setup_eee, (adapter->flags2 & TXGBE_FLAG2_EEE_CAPABLE) && (adapter->flags2 & TXGBE_FLAG2_EEE_ENABLED)); - if (TXGBE_DIS_COMP_TIMEOUT == 1) { - pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &pvalue); - pvalue = pvalue | 0x10; - pcie_capability_write_word(pdev, PCI_EXP_DEVCTL2, pvalue); - adapter->cmplt_to_dis = true; - e_info(probe, "disable completion timeout\n"); - } + if (hw->mac.type == txgbe_mac_sp) + if (TXGBE_DIS_COMP_TIMEOUT == 1) { + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &pvalue); + pvalue = pvalue | 0x10; + pcie_capability_write_word(pdev, PCI_EXP_DEVCTL2, pvalue); + adapter->cmplt_to_dis = true; + e_info(probe, "disable completion timeout\n"); + } return 0; @@ -12517,6 +14098,8 @@ static void __devexit txgbe_remove(struct pci_dev *pdev) if (!adapter) return; + mutex_destroy(&adapter->e56_lock); + hw = &adapter->hw; txgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); @@ -12581,7 +14164,9 @@ static void __devexit txgbe_remove(struct pci_dev *pdev) #endif disable_dev = !test_and_set_bit(__TXGBE_DISABLED, &adapter->state); free_netdev(netdev); - +#ifdef HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING + pci_disable_pcie_error_reporting(pdev); +#endif if (disable_dev) pci_disable_device(pdev); } @@ -12772,12 +14357,13 @@ static pci_ers_result_t txgbe_io_slot_reset(struct pci_dev *pdev) e_info(hw, "in txgbe_io_slot_reset\n"); - if (adapter->cmplt_to_dis) { - pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &value); - value |= 0x10; - pcie_capability_write_word(adapter->pdev, PCI_EXP_DEVCTL2, value); - adapter->cmplt_to_dis = false; - } + if (adapter->hw.mac.type == txgbe_mac_sp) + if (adapter->cmplt_to_dis) { + pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &value); + value |= 0x10; + pcie_capability_write_word(adapter->pdev, PCI_EXP_DEVCTL2, value); + adapter->cmplt_to_dis = false; + } if (pci_enable_device_mem(pdev)) { e_err(probe, "Cannot re-enable PCI device after reset.\n"); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.c index 691e6957a24a5d3061a1618e1cb46a71830aad82..588c0331f97dca6140861ff7fd1516dbdce244b7 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.c @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_mbx.c, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_mbx.c, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List @@ -130,7 +130,7 @@ int txgbe_check_for_rst(struct txgbe_hw *hw, u16 mbx_id) * * returns SUCCESS if it successfully received a message notification **/ -int txgbe_poll_for_msg(struct txgbe_hw *hw, u16 mbx_id) +static int txgbe_poll_for_msg(struct txgbe_hw *hw, u16 mbx_id) { struct txgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; @@ -160,7 +160,7 @@ int txgbe_poll_for_msg(struct txgbe_hw *hw, u16 mbx_id) * * returns SUCCESS if it successfully received a message acknowledgement **/ -int txgbe_poll_for_ack(struct txgbe_hw *hw, u16 mbx_id) +static int txgbe_poll_for_ack(struct txgbe_hw *hw, u16 mbx_id) { struct txgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; @@ -261,7 +261,7 @@ void txgbe_init_mbx_ops(struct txgbe_hw *hw) * This function is used to read the v2p mailbox without losing the read to * clear status bits. **/ -u32 txgbe_read_v2p_mailbox(struct txgbe_hw *hw) +static u32 txgbe_read_v2p_mailbox(struct txgbe_hw *hw) { u32 v2p_mailbox = rd32(hw, TXGBE_VXMAILBOX); @@ -282,7 +282,7 @@ u32 txgbe_read_v2p_mailbox(struct txgbe_hw *hw) * This function is used to check for the read to clear bits within * the V2P mailbox. **/ -int txgbe_check_for_bit_vf(struct txgbe_hw *hw, u32 mask) +static int txgbe_check_for_bit_vf(struct txgbe_hw *hw, u32 mask) { u32 mailbox = txgbe_read_v2p_mailbox(hw); @@ -298,7 +298,7 @@ int txgbe_check_for_bit_vf(struct txgbe_hw *hw, u32 mask) * * returns SUCCESS if the PF has set the Status bit or else ERR_MBX **/ -int txgbe_check_for_msg_vf(struct txgbe_hw *hw, u16 mbx_id) +static int txgbe_check_for_msg_vf(struct txgbe_hw *hw, u16 mbx_id) { int err = TXGBE_ERR_MBX; @@ -320,7 +320,7 @@ int txgbe_check_for_msg_vf(struct txgbe_hw *hw, u16 mbx_id) * * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX **/ -int txgbe_check_for_ack_vf(struct txgbe_hw *hw, u16 mbx_id) +static int txgbe_check_for_ack_vf(struct txgbe_hw *hw, u16 mbx_id) { int err = TXGBE_ERR_MBX; @@ -342,7 +342,7 @@ int txgbe_check_for_ack_vf(struct txgbe_hw *hw, u16 mbx_id) * * returns true if the PF has set the reset done bit or else false **/ -int txgbe_check_for_rst_vf(struct txgbe_hw *hw, u16 mbx_id) +static int txgbe_check_for_rst_vf(struct txgbe_hw *hw, u16 mbx_id) { int err = TXGBE_ERR_MBX; @@ -362,7 +362,7 @@ int txgbe_check_for_rst_vf(struct txgbe_hw *hw, u16 mbx_id) * * return SUCCESS if we obtained the mailbox lock **/ -int txgbe_obtain_mbx_lock_vf(struct txgbe_hw *hw) +static int txgbe_obtain_mbx_lock_vf(struct txgbe_hw *hw) { int err = TXGBE_ERR_MBX; u32 mailbox; @@ -390,7 +390,7 @@ int txgbe_obtain_mbx_lock_vf(struct txgbe_hw *hw) * * returns SUCCESS if it successfully copied message into the buffer **/ -int txgbe_write_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size, +static int txgbe_write_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { int err; @@ -430,7 +430,7 @@ int txgbe_write_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size, * * returns SUCCESS if it successfuly read message from buffer **/ -int txgbe_read_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size, +static int txgbe_read_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { int err = 0; @@ -488,7 +488,7 @@ void txgbe_init_mbx_params_vf(struct txgbe_hw *hw) mbx->stats.rsts = 0; } -int txgbe_check_for_bit_pf(struct txgbe_hw *hw, u32 mask, int index) +static int txgbe_check_for_bit_pf(struct txgbe_hw *hw, u32 mask, int index) { u32 mbvficr = rd32(hw, TXGBE_MBVFICR(index)); int err = TXGBE_ERR_MBX; @@ -508,7 +508,7 @@ int txgbe_check_for_bit_pf(struct txgbe_hw *hw, u32 mask, int index) * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ -int txgbe_check_for_msg_pf(struct txgbe_hw *hw, u16 vf) +static int txgbe_check_for_msg_pf(struct txgbe_hw *hw, u16 vf) { int err = TXGBE_ERR_MBX; int index = TXGBE_MBVFICR_INDEX(vf); @@ -530,7 +530,7 @@ int txgbe_check_for_msg_pf(struct txgbe_hw *hw, u16 vf) * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ -int txgbe_check_for_ack_pf(struct txgbe_hw *hw, u16 vf) +static int txgbe_check_for_ack_pf(struct txgbe_hw *hw, u16 vf) { int err = TXGBE_ERR_MBX; int index = TXGBE_MBVFICR_INDEX(vf); @@ -552,7 +552,7 @@ int txgbe_check_for_ack_pf(struct txgbe_hw *hw, u16 vf) * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ -int txgbe_check_for_rst_pf(struct txgbe_hw *hw, u16 vf) +static int txgbe_check_for_rst_pf(struct txgbe_hw *hw, u16 vf) { u32 reg_offset = (vf < 32) ? 0 : 1; u32 vf_shift = vf % 32; @@ -577,22 +577,31 @@ int txgbe_check_for_rst_pf(struct txgbe_hw *hw, u16 vf) * * return SUCCESS if we obtained the mailbox lock **/ -int txgbe_obtain_mbx_lock_pf(struct txgbe_hw *hw, u16 vf) +static int txgbe_obtain_mbx_lock_pf(struct txgbe_hw *hw, u16 vf) { + struct txgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; int err = TXGBE_ERR_MBX; u32 mailbox; - /* Take ownership of the buffer */ - wr32(hw, TXGBE_PXMAILBOX(vf), TXGBE_PXMAILBOX_PFU); + while (countdown--) { + /* Take ownership of the buffer */ + wr32(hw, TXGBE_PXMAILBOX(vf), TXGBE_PXMAILBOX_PFU); - /* reserve mailbox for vf use */ - mailbox = rd32(hw, TXGBE_PXMAILBOX(vf)); - if (mailbox & TXGBE_PXMAILBOX_PFU) - err = 0; - else - ERROR_REPORT2(TXGBE_ERROR_POLLING, - "Failed to obtain mailbox lock for PF%d", vf); + /* reserve mailbox for vf use */ + mailbox = rd32(hw, TXGBE_PXMAILBOX(vf)); + if (mailbox & TXGBE_PXMAILBOX_PFU) { + err = 0; + break; + } + /* Wait a bit before trying again */ + usec_delay(mbx->udelay); + } + + if (err) + ERROR_REPORT2(TXGBE_ERROR_POLLING, + "Failed to obtain mailbox lock for PF%d", vf); return err; } @@ -606,7 +615,7 @@ int txgbe_obtain_mbx_lock_pf(struct txgbe_hw *hw, u16 vf) * * returns SUCCESS if it successfully copied message into the buffer **/ -int txgbe_write_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, +static int txgbe_write_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, u16 vf) { int err; @@ -649,7 +658,7 @@ int txgbe_write_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, * memory buffer. The presumption is that the caller knows that there was * a message due to a VF request so no polling for message is needed. **/ -int txgbe_read_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, +static int txgbe_read_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, u16 vf) { int err; @@ -686,8 +695,8 @@ void txgbe_init_mbx_params_pf(struct txgbe_hw *hw) { struct txgbe_mbx_info *mbx = &hw->mbx; - mbx->timeout = 0; - mbx->udelay = 0; + mbx->timeout = TXGBE_VF_MBX_INIT_TIMEOUT; + mbx->udelay = TXGBE_VF_MBX_INIT_DELAY; mbx->size = TXGBE_VXMAILBOX_SIZE; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h index 70a355d3b26845d536520f5bfefa4b0e8ad4141e..4fd40404a28b6e77143fae872d4fe4d49366e39f 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_mbx.h, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_mbx.h, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List @@ -81,6 +81,7 @@ #define TXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still * clear to send requests */ #define TXGBE_VT_MSGINFO_SHIFT 16 +#define TXGBE_VT_MSGINFO_VLAN_OFFLOAD_SHIFT 17 /* bits 23:16 are used for extra info for certain messages */ #define TXGBE_VT_MSGINFO_MASK (0xFF << TXGBE_VT_MSGINFO_SHIFT) @@ -97,6 +98,8 @@ enum txgbe_pfvf_api_rev { txgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ txgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ txgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + txgbe_mbox_api_21, /* API version 2.1 */ + txgbe_mbox_api_22, /* API version 2.2 */ txgbe_mbox_api_unknown, /* indicates that API version is not known */ }; @@ -120,9 +123,17 @@ enum txgbe_pfvf_api_rev { #define TXGBE_VF_UPDATE_XCAST_MODE 0x0c #define TXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */ #define TXGBE_VF_GET_FW_VERSION 0x11 /* get fw version */ + +/* mailbox API, version 2.1 VF requests */ +#define TXGBE_VF_SET_5TUPLE 0x20 /* VF request PF for 5-tuple filter */ +#define TXGBE_VF_QUEUE_RATE_LIMIT 0x21 /* VF request PF to set vf-queue rate limit */ + +/* mailbox API, version 2.2 VF requests */ +#define TXGBE_VF_QUEUE_RATE_LIMIT 0x21 /* VF request PF to set vf-queue rate limit */ + #define TXGBE_VF_BACKUP 0x8001 /* VF requests backup */ -/* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */ +/* mode choices for TXGBE_VF_UPDATE_XCAST_MODE */ enum txgbevf_xcast_modes { TXGBEVF_XCAST_MODE_NONE = 0, TXGBEVF_XCAST_MODE_MULTI, @@ -130,6 +141,25 @@ enum txgbevf_xcast_modes { TXGBEVF_XCAST_MODE_PROMISC, }; +enum txgbevf_5tuple_msg { + TXGBEVF_5T_REQ = 0, + TXGBEVF_5T_CMD, + TXGBEVF_5T_CTRL0, + TXGBEVF_5T_CTRL1, + TXGBEVF_5T_PORT, + TXGBEVF_5T_DA, + TXGBEVF_5T_SA, + TXGBEVF_5T_MAX, /* must be last */ +}; + +#define TXGBEVF_5T_ADD_SHIFT 31 + +enum txgbevf_queue_rate_limit_msg { + TXGBEVF_Q_RATE_REQ = 0, + TXGBEVF_Q_RATE_INDEX, + TXGBEVF_Q_RATE_LIMIT, +}; + /* GET_QUEUES return data indices within the mailbox */ #define TXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ #define TXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ @@ -142,6 +172,9 @@ enum txgbevf_xcast_modes { #define TXGBE_VF_MC_TYPE_WORD 3 #define TXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ +#define TXGBE_PF_NOFITY_VF_LINK_STATUS 0x1 +#define TXGBE_PF_NOFITY_VF_NET_NOT_RUNNING BIT(31) + /* mailbox API, version 2.0 VF requests */ #define TXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ @@ -157,7 +190,7 @@ enum txgbevf_xcast_modes { #define TXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */ #define TXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ -#define TXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ +#define TXGBE_VF_MBX_INIT_DELAY 50 /* microseconds between retries */ int txgbe_read_mbx(struct txgbe_hw *, u32 *, u16, u16); int txgbe_write_mbx(struct txgbe_hw *, u32 *, u16, u16); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.c index 2671cbfbdbd5a92cd5cffeff46aa18105b612f76..2a4cb1c13a5857e5a23048dcfbc20e087639eaea 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.c @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -186,7 +186,7 @@ MTD_STATUS mtdWait(IN MTD_UINT x) #define MTD_REG_CCCR9 31, 0xF05E /* do not enclose in parentheses */ #define MTD_REG_SCR 31, 0xF0F0 /* do not enclose in parentheses */ #define MTD_REG_ECSR 31, 0xF0F5 /* do not enclose in parentheses */ -MTD_STATUS mtdCheckDeviceCapabilities( +static MTD_STATUS mtdCheckDeviceCapabilities( IN MTD_DEV_PTR devPtr, IN MTD_U16 port, OUT MTD_BOOL * phyHasMacsec, @@ -298,7 +298,7 @@ MTD_STATUS mtdCheckDeviceCapabilities( return MTD_OK; } -MTD_STATUS mtdIsPhyReadyAfterReset( +static MTD_STATUS mtdIsPhyReadyAfterReset( IN MTD_DEV_PTR devPtr, IN MTD_U16 port, OUT MTD_BOOL * phyReady) @@ -351,7 +351,7 @@ MTD_STATUS mtdSoftwareReset( } } -MTD_STATUS mtdIsPhyReadyAfterHardwareReset( +static MTD_STATUS mtdIsPhyReadyAfterHardwareReset( IN MTD_DEV_PTR devPtr, IN MTD_U16 port, OUT MTD_BOOL *phyReady) @@ -931,7 +931,7 @@ MTD_STATUS mtdSetPauseAdvertisement( autonegotiation like fast retrain, fast retrain type, etc. ******************************************************************************/ -MTD_STATUS mtdAutonegIsCompleted( +static MTD_STATUS mtdAutonegIsCompleted( IN MTD_DEV_PTR devPtr, IN MTD_U16 port, OUT MTD_BOOL *anStatusReady) @@ -1219,7 +1219,7 @@ MTD_STATUS mtdIsPhyRevisionValid(IN MTD_DEVICE_ID phyRev) } /* mtdCunit.c */ -MTD_STATUS mtdCunitSwReset( +static MTD_STATUS mtdCunitSwReset( IN MTD_DEV_PTR devPtr, IN MTD_U16 port) { @@ -1227,7 +1227,7 @@ MTD_STATUS mtdCunitSwReset( } /* mtdHxunit.c */ -MTD_STATUS mtdRerunSerdesAutoInitializationUseAutoMode( +static MTD_STATUS mtdRerunSerdesAutoInitializationUseAutoMode( IN MTD_DEV_PTR devPtr, IN MTD_U16 port) { @@ -1375,7 +1375,7 @@ MTD_STATUS mtdSetMacInterfaceControl( * None * *******************************************************************************/ -MTD_SEM mtdSemCreate( +static MTD_SEM mtdSemCreate( IN MTD_DEV * dev, IN MTD_SEM_BEGIN_STATE state) { @@ -1405,7 +1405,7 @@ MTD_SEM mtdSemCreate( * None * *******************************************************************************/ -MTD_STATUS mtdSemDelete( +static MTD_STATUS mtdSemDelete( IN MTD_DEV * dev, IN MTD_SEM smid) { diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.h index 4c305e574c5df22f6e96e7ead1096501d634b7ab..c9f2e2f2c5c050261fd6aa1ebdf54a6528161270 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.h @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_osdep.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_osdep.h index 826053e9b8d66b50c9e439f4d99a85b58624e775..aea928277e55320dffaf9eef7c86bf50e0fb44a1 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_osdep.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_osdep.h @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_osdep.h, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_osdep.h, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_param.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_param.c index 20e0d46877f45cf1114ebad49f988763fb6fd87d..35a43ab9e0d9d11132f83b5ed51235bb66280f18 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_param.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_param.c @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_param.c, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_param.c, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List @@ -67,6 +67,15 @@ MODULE_PARM_DESC(X, desc); #endif /* module_param_array */ +/* Tx unidirectional mode + * + * Valid Range: [0, 1] + * + * Default Value: 0 + */ +TXGBE_PARAM(TX_UNIDIR_MODE, "Tx Unidirectional Mode [0, 1]"); +#define TX_DEFAULT_UNIDIR_MODE 0 + /* ffe_main (KR/KX4/KX/SFI) * * Valid Range: 0-60 @@ -523,6 +532,33 @@ void __devinit txgbe_check_options(struct txgbe_adapter *adapter) bd = TXGBE_MAX_NIC; #endif } + { + u32 tx_unidir_mode; + static struct txgbe_option opt = { + .type = range_option, + .name = "TX_UNIDIR_MODE", + .err = + "using default of "__MODULE_STRING(TX_DEFAULT_UNIDIR_MODE), + .def = 0, + .arg = { .r = { .min = 0, + .max = 1} } + }; + +#ifdef module_param_array + if (num_TX_UNIDIR_MODE > bd) { +#endif + tx_unidir_mode = TX_UNIDIR_MODE[bd]; + if (tx_unidir_mode == OPTION_UNSET) + tx_unidir_mode = TX_UNIDIR_MODE[bd]; + txgbe_validate_option(&tx_unidir_mode, &opt); + adapter->tx_unidir_mode = tx_unidir_mode; +#ifdef module_param_array + } else { + adapter->tx_unidir_mode = 0; + } +#endif + } + { /* MAIN */ u32 ffe_main; static struct txgbe_option opt = { @@ -831,7 +867,8 @@ void __devinit txgbe_check_options(struct txgbe_adapter *adapter) .arg = { .r = { .min = 0, .max = 1} } }; - u32 rss = RSS[bd]; + u32 rss = min_t(int, txgbe_max_rss_indices(adapter), + num_online_cpus()); /* adjust Max allowed RSS queues based on MAC type */ opt.arg.r.max = min_t(int, txgbe_max_rss_indices(adapter), num_online_cpus()); @@ -839,6 +876,7 @@ void __devinit txgbe_check_options(struct txgbe_adapter *adapter) #ifdef module_param_array if (num_RSS > bd) { #endif + rss = RSS[bd]; txgbe_validate_option(&rss, &opt); /* base it off num_online_cpus() with hardware limit */ if (!rss) @@ -937,7 +975,7 @@ void __devinit txgbe_check_options(struct txgbe_adapter *adapter) "Disabling SR-IOV.\n"); } - adapter->num_vfs = vfs; + adapter->max_vfs = vfs; if (vfs) *aflags |= TXGBE_FLAG_SRIOV_ENABLED; @@ -946,10 +984,10 @@ void __devinit txgbe_check_options(struct txgbe_adapter *adapter) #ifdef module_param_array } else { if (opt.def == OPTION_DISABLED) { - adapter->num_vfs = 0; + adapter->max_vfs = 0; *aflags &= ~TXGBE_FLAG_SRIOV_ENABLED; } else { - adapter->num_vfs = opt.def; + adapter->max_vfs = opt.def; *aflags |= TXGBE_FLAG_SRIOV_ENABLED; } } @@ -962,14 +1000,14 @@ void __devinit txgbe_check_options(struct txgbe_adapter *adapter) "IOV is not supported on this " "hardware. Disabling IOV.\n"); *aflags &= ~TXGBE_FLAG_SRIOV_ENABLED; - adapter->num_vfs = 0; + adapter->max_vfs = 0; } else if (!(*aflags & TXGBE_FLAG_MQ_CAPABLE)) { DPRINTK(PROBE, INFO, "IOV is not supported while multiple " "queues are disabled. " "Disabling IOV.\n"); *aflags &= ~TXGBE_FLAG_SRIOV_ENABLED; - adapter->num_vfs = 0; + adapter->max_vfs = 0; } } } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index ed5473ba924a012962bb1690e3dd0ba6f7074ad5..54444bb43e9f6227dc341aa993009c1f3f9a0d53 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_phy.c, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_phy.c, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List @@ -23,6 +23,7 @@ #include "txgbe_phy.h" #include "txgbe_mtd.h" +#include "txgbe.h" /** * txgbe_check_reset_blocked - check status of MNG FW veto bit @@ -57,6 +58,42 @@ s32 txgbe_get_phy_id(struct txgbe_hw *hw) u16 phy_id_high = 0; u16 phy_id_low = 0; u8 numport, thisport; + u32 i = 0; + + if (hw->mac.type == txgbe_mac_aml) { + hw->phy.addr = 0; + + for (i = 0; i < 32; i++) { + hw->phy.addr = i; + status = txgbe_read_phy_reg_mdi(hw, TXGBE_MDIO_PHY_ID_HIGH, 0, &phy_id_high); + if (status) { + printk("txgbe_read_phy_reg_mdi failed 1\n"); + return status; + } + printk("%d: phy_id_high 0x%x\n", i, phy_id_high); + if ((phy_id_high & 0xFFFF) == 0x0141) { + break; + } + } + + if (i == 32) { + printk("txgbe_read_phy_reg_mdi failed\n"); + return TXGBE_ERR_PHY; + } + + status = txgbe_read_phy_reg_mdi(hw, TXGBE_MDIO_PHY_ID_LOW, 0, &phy_id_low); + if (status) { + printk("txgbe_read_phy_reg_mdi failed 2\n"); + return status; + } + hw->phy.id = (u32)(phy_id_high & 0xFFFF) << 6; + hw->phy.id |= (u32)((phy_id_low & 0xFC00) >> 10); + + printk("txgbe_get_phy_id: phy_id 0x%x", hw->phy.id); + + return status; + + } status = mtdHwXmdioRead(&hw->phy_dev, hw->phy.addr, TXGBE_MDIO_PMA_PMD_DEV_TYPE, @@ -463,10 +500,12 @@ s32 txgbe_identify_module(struct txgbe_hw *hw) s32 status = TXGBE_ERR_SFP_NOT_PRESENT; switch (TCALL(hw, mac.ops.get_media_type)) { + case txgbe_media_type_fiber_qsfp: + status = txgbe_identify_qsfp_module(hw); + break; case txgbe_media_type_fiber: status = txgbe_identify_sfp_module(hw); break; - default: hw->phy.sfp_type = txgbe_sfp_type_not_present; status = TXGBE_ERR_SFP_NOT_PRESENT; @@ -490,15 +529,30 @@ s32 txgbe_identify_sfp_module(struct txgbe_hw *hw) u8 identifier = 0; u8 comp_codes_1g = 0; u8 comp_codes_10g = 0; + u8 comp_codes_25g = 0; + u8 comp_copper_len = 0; u8 oui_bytes[3] = {0, 0, 0}; u8 cable_tech = 0; u8 cable_spec = 0; u8 vendor_name[3] = {0, 0, 0}; u16 phy_data = 0; u32 swfw_mask = hw->phy.phy_semaphore_mask; + u32 value; + u8 sff8472_rev, addr_mode, databyte; + bool page_swap = false; + struct txgbe_adapter *adapter = hw->back; + int i; + + if (hw->mac.type == txgbe_mac_aml) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_ABS_LS) { + hw->phy.sfp_type = txgbe_sfp_type_not_present; + return TXGBE_ERR_SFP_NOT_PRESENT; + } + } if (0 != TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask)) - return TXGBE_ERR_SWFW_SYNC; + return TXGBE_ERR_SWFW_SYNC; if (TCALL(hw, mac.ops.get_media_type) != txgbe_media_type_fiber) { hw->phy.sfp_type = txgbe_sfp_type_not_present; @@ -531,6 +585,18 @@ s32 txgbe_identify_sfp_module(struct txgbe_hw *hw) if (status != 0) goto err_read_i2c_eeprom; + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_25GBE_COMP_CODES, + &comp_codes_25g); + if (status != 0) + goto err_read_i2c_eeprom; + + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_COPPER_LENGTH, + &comp_copper_len); + if (status != 0) + goto err_read_i2c_eeprom; + status = TCALL(hw, phy.ops.read_i2c_eeprom, TXGBE_SFF_CABLE_TECHNOLOGY, &cable_tech); @@ -561,7 +627,36 @@ s32 txgbe_identify_sfp_module(struct txgbe_hw *hw) else hw->phy.sfp_type = txgbe_sfp_type_da_cu_core1; + + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) { + hw->dac_sfp = true; + } + + if (comp_copper_len == 0x1) + hw->bypassCtle = true; + else + hw->bypassCtle = false; + + if (comp_codes_25g == TXGBE_SFF_25GBASECR_91FEC || + comp_codes_25g == TXGBE_SFF_25GBASECR_74FEC || + comp_codes_25g == TXGBE_SFF_25GBASECR_NOFEC) { + hw->phy.fiber_suppport_speed = + TXGBE_LINK_SPEED_25GB_FULL | + TXGBE_LINK_SPEED_10GB_FULL; + } else { + hw->phy.fiber_suppport_speed |= + TXGBE_LINK_SPEED_10GB_FULL; + } + if (!AUTO) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_sfp_type_25g_sr_core0; + else + hw->phy.sfp_type = txgbe_sfp_type_25g_sr_core1; + } + } else if (cable_tech & TXGBE_SFF_DA_ACTIVE_CABLE) { + hw->dac_sfp = false; TCALL(hw, phy.ops.read_i2c_eeprom, TXGBE_SFF_CABLE_SPEC_COMP, &cable_spec); @@ -569,14 +664,44 @@ s32 txgbe_identify_sfp_module(struct txgbe_hw *hw) TXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = - txgbe_sfp_type_da_act_lmt_core0; + txgbe_sfp_type_da_act_lmt_core0; else hw->phy.sfp_type = txgbe_sfp_type_da_act_lmt_core1; } else { - hw->phy.sfp_type = - txgbe_sfp_type_unknown; + hw->phy.sfp_type = txgbe_sfp_type_unknown; } + + if (comp_codes_25g == TXGBE_SFF_25GAUI_C2M_AOC_BER_5 || + comp_codes_25g == TXGBE_SFF_25GAUI_C2M_ACC_BER_5 || + comp_codes_25g == TXGBE_SFF_25GAUI_C2M_AOC_BER_12 || + comp_codes_25g == TXGBE_SFF_25GAUI_C2M_ACC_BER_12) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_25g_aoc_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_25g_aoc_core1; + } + } else if (comp_codes_25g == TXGBE_SFF_25GAUI_C2M_AOC_BER_5 || + comp_codes_25g == TXGBE_SFF_25GAUI_C2M_ACC_BER_5 || + comp_codes_25g == TXGBE_SFF_25GAUI_C2M_AOC_BER_12 || + comp_codes_25g == TXGBE_SFF_25GAUI_C2M_ACC_BER_12) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_sfp_type_25g_aoc_core0; + else + hw->phy.sfp_type = txgbe_sfp_type_25g_aoc_core1; + } else if (comp_codes_25g == TXGBE_SFF_25GBASESR_CAPABLE || + comp_codes_25g == TXGBE_SFF_25GBASEER_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_sfp_type_25g_sr_core0; + else + hw->phy.sfp_type = txgbe_sfp_type_25g_sr_core1; + } else if (comp_codes_25g == TXGBE_SFF_25GBASELR_CAPABLE ) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_sfp_type_25g_lr_core0; + else + hw->phy.sfp_type = txgbe_sfp_type_25g_lr_core1; } else if (comp_codes_10g & (TXGBE_SFF_10GBASESR_CAPABLE | TXGBE_SFF_10GBASELR_CAPABLE)) { @@ -617,12 +742,20 @@ s32 txgbe_identify_sfp_module(struct txgbe_hw *hw) /* Determine if the SFP+ PHY is dual speed or not. */ hw->phy.multispeed_fiber = false; - if (((comp_codes_1g & TXGBE_SFF_1GBASESX_CAPABLE) && - (comp_codes_10g & TXGBE_SFF_10GBASESR_CAPABLE)) || - ((comp_codes_1g & TXGBE_SFF_1GBASELX_CAPABLE) && - (comp_codes_10g & TXGBE_SFF_10GBASELR_CAPABLE))) - hw->phy.multispeed_fiber = true; - + if (hw->mac.type == txgbe_mac_aml) { + if ((comp_codes_25g == TXGBE_SFF_25GBASESR_CAPABLE || + comp_codes_25g == TXGBE_SFF_25GBASELR_CAPABLE || + comp_codes_25g == TXGBE_SFF_25GBASEER_CAPABLE) && + ((comp_codes_10g & TXGBE_SFF_10GBASESR_CAPABLE) || + (comp_codes_10g & TXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + } else { + if (((comp_codes_1g & TXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & TXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & TXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & TXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + } /* Determine PHY vendor */ if (hw->phy.type != txgbe_phy_nl) { hw->phy.id = identifier; @@ -725,7 +858,7 @@ s32 txgbe_identify_sfp_module(struct txgbe_hw *hw) } /* Verify supported 1G SFP modules */ - if (comp_codes_10g == 0 && + if (comp_codes_10g == 0 && comp_codes_25g == 0 && !(hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1 || hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core0 || @@ -737,7 +870,52 @@ s32 txgbe_identify_sfp_module(struct txgbe_hw *hw) goto out; } } + if (hw->mac.type == txgbe_mac_sp) { + /*record eeprom info*/ + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_SFF_8472_COMP, + &sff8472_rev); + if (status != 0) + goto err_read_i2c_eeprom; + + /* addressing mode is not supported */ + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_SFF_8472_SWAP, + &addr_mode); + if (status != 0) + goto err_read_i2c_eeprom; + if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) { + e_err(drv, "Address change required to access page 0xA2, " + "but not supported. Please report the module type to the " + "driver maintainers.\n"); + page_swap = true; + } + + if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap || + !(addr_mode & TXGBE_SFF_DDM_IMPLEMENTED)) { + /* We have a SFP, but it does not support SFF-8472 */ + adapter->eeprom_type = ETH_MODULE_SFF_8079; + adapter->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + adapter->eeprom_type = ETH_MODULE_SFF_8472; + adapter->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + for (i = 0; i < adapter->eeprom_len; i++) { + if (i < ETH_MODULE_SFF_8079_LEN) + status = TCALL(hw, phy.ops.read_i2c_eeprom, i, + &databyte); + else + status = TCALL(hw, phy.ops.read_i2c_sff8472, i, + &databyte); + + if (status != 0) + goto err_read_i2c_eeprom; + + adapter->i2c_eeprom[i] = databyte; + } + } out: TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); @@ -754,6 +932,106 @@ s32 txgbe_identify_sfp_module(struct txgbe_hw *hw) return TXGBE_ERR_SFP_NOT_PRESENT; } +s32 txgbe_identify_qsfp_module(struct txgbe_hw *hw) +{ + s32 status = TXGBE_ERR_PHY_ADDR_INVALID; + u8 identifier = 0, transceiver_type = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + u32 value; + + if (hw->mac.type == txgbe_mac_aml40) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_PRST_LS) { + hw->phy.sfp_type = txgbe_sfp_type_not_present; + return TXGBE_ERR_SFP_NOT_PRESENT; + } + } + + if (0 != TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask)) + return TXGBE_ERR_SWFW_SYNC; + + if (TCALL(hw, mac.ops.get_media_type) != txgbe_media_type_fiber_qsfp) { + hw->phy.sfp_type = txgbe_sfp_type_not_present; + status = TXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + /* LAN ID is needed for I2C access */ + txgbe_init_i2c(hw); + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_IDENTIFIER, + &identifier); + + if (status != 0) + goto err_read_i2c_eeprom; + + if (identifier == TXGBE_SFF_IDENTIFIER_QSFP || + identifier == TXGBE_SFF_IDENTIFIER_QSFP_PLUS) { + hw->phy.type = txgbe_phy_sfp_unknown; + + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_ETHERNET_COMP_OFFSET, + &transceiver_type); + if (status != 0) + goto err_read_i2c_eeprom; + + if (transceiver_type & TXGBE_SFF_ETHERNET_40G_CR4) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_qsfp_type_40g_cu_core0; + else + hw->phy.sfp_type = txgbe_qsfp_type_40g_cu_core1; + hw->phy.fiber_suppport_speed = + TXGBE_LINK_SPEED_40GB_FULL | + TXGBE_LINK_SPEED_10GB_FULL; + + if (!AUTO) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_qsfp_type_40g_sr_core0; + else + hw->phy.sfp_type = txgbe_qsfp_type_40g_sr_core1; + } + } + + if (transceiver_type & TXGBE_SFF_ETHERNET_40G_SR4) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_qsfp_type_40g_sr_core0; + else + hw->phy.sfp_type = txgbe_qsfp_type_40g_sr_core1; + } + + if (transceiver_type & TXGBE_SFF_ETHERNET_40G_LR4) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_qsfp_type_40g_lr_core0; + else + hw->phy.sfp_type = txgbe_qsfp_type_40g_lr_core1; + } + + if (transceiver_type & TXGBE_SFF_ETHERNET_40G_ACTIVE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_qsfp_type_40g_active_core0; + else + hw->phy.sfp_type = txgbe_qsfp_type_40g_active_core1; + } + + } else { + hw->phy.type = txgbe_phy_sfp_unsupported; + status = TXGBE_ERR_SFP_NOT_SUPPORTED; + } +out: + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + + return status; + +err_read_i2c_eeprom: + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + + hw->phy.sfp_type = txgbe_sfp_type_not_present; + hw->phy.id = 0; + hw->phy.type = txgbe_phy_unknown; + + return TXGBE_ERR_SFP_NOT_PRESENT; +} + s32 txgbe_init_i2c(struct txgbe_hw *hw) { wr32(hw, TXGBE_I2C_ENABLE, 0); @@ -771,12 +1049,20 @@ s32 txgbe_init_i2c(struct txgbe_hw *hw) * SCL_Low_time = [(LCNT + 1) * ic_clk] - SCL_Fall_time + SCL_Rise_time * set I2C Frequency to Standard Speed Mode 100KHz */ - wr32(hw, TXGBE_I2C_SS_SCL_HCNT, 780); - wr32(hw, TXGBE_I2C_SS_SCL_LCNT, 780); - + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + wr32(hw, TXGBE_I2C_SS_SCL_HCNT, 2000); + wr32(hw, TXGBE_I2C_SS_SCL_LCNT, 2000); + + wr32m(hw, TXGBE_I2C_SDA_HOLD, + TXGBE_I2C_SDA_RX_HOLD | TXGBE_I2C_SDA_TX_HOLD, 0x640064); + } else if (hw->mac.type == txgbe_mac_sp) { + wr32(hw, TXGBE_I2C_SS_SCL_HCNT, 780); + wr32(hw, TXGBE_I2C_SS_SCL_LCNT, 780); + } + wr32(hw, TXGBE_I2C_RX_TL, 0); /* 1byte for rx full signal */ wr32(hw, TXGBE_I2C_TX_TL, 4); - + wr32(hw, TXGBE_I2C_SCL_STUCK_TIMEOUT, 0xFFFFFF); wr32(hw, TXGBE_I2C_SDA_STUCK_TIMEOUT, 0xFFFFFF); @@ -863,7 +1149,29 @@ s32 txgbe_read_i2c_sff8472(struct txgbe_hw *hw, u8 byte_offset, TXGBE_I2C_EEPROM_DEV_ADDR2, sff8472_data); } - + +/** + * txgbe_read_i2c_sff8636 - Reads 8 bit word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's SFF-8472 data over I2C + **/ +s32 txgbe_read_i2c_sff8636(struct txgbe_hw *hw, u8 page ,u8 byte_offset, + u8 *sff8636_data) +{ + txgbe_init_i2c(hw); + TCALL(hw, phy.ops.write_i2c_byte, TXGBE_SFF_QSFP_PAGE_SELECT, + TXGBE_I2C_EEPROM_DEV_ADDR, + page); + + return TCALL(hw, phy.ops.read_i2c_byte, byte_offset, + TXGBE_I2C_EEPROM_DEV_ADDR, + sff8636_data); +} + + /** * txgbe_read_i2c_sfp_phy - Reads 16 bit word over I2C interface * @hw: pointer to hardware structure @@ -1109,16 +1417,12 @@ s32 txgbe_read_i2c_word(struct txgbe_hw *hw, u16 byte_offset, * a specified device address. **/ STATIC s32 txgbe_write_i2c_byte_int(struct txgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data, bool lock) + u8 dev_addr, u8 data) { s32 status = 0; - u32 swfw_mask = hw->phy.phy_semaphore_mask; UNREFERENCED_PARAMETER(dev_addr); - if (lock && 0 != TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask)) - return TXGBE_ERR_SWFW_SYNC; - /* wait tx empty */ status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, TXGBE_I2C_INTR_STAT_TX_EMPTY, TXGBE_I2C_INTR_STAT_TX_EMPTY, @@ -1126,8 +1430,7 @@ STATIC s32 txgbe_write_i2c_byte_int(struct txgbe_hw *hw, u8 byte_offset, if (status != 0) goto out; - wr32(hw, TXGBE_I2C_DATA_CMD, - byte_offset | TXGBE_I2C_DATA_CMD_STOP); + wr32(hw, TXGBE_I2C_DATA_CMD, byte_offset); wr32(hw, TXGBE_I2C_DATA_CMD, data | TXGBE_I2C_DATA_CMD_WRITE); @@ -1135,11 +1438,7 @@ STATIC s32 txgbe_write_i2c_byte_int(struct txgbe_hw *hw, u8 byte_offset, status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, TXGBE_I2C_INTR_STAT_RX_FULL, TXGBE_I2C_INTR_STAT_RX_FULL, TXGBE_I2C_TIMEOUT, 10); - out: - if (lock) - TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); - return status; } @@ -1156,7 +1455,7 @@ s32 txgbe_write_i2c_byte(struct txgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { return txgbe_write_i2c_byte_int(hw, byte_offset, dev_addr, - data, true); + data); } @@ -1172,14 +1471,28 @@ s32 txgbe_tn_check_overtemp(struct txgbe_hw *hw) s32 status = 0; u32 ts_state; - /* Check that the LASI temp alarm status was triggered */ - ts_state = rd32(hw, TXGBE_TS_ALARM_ST); - - if (ts_state & TXGBE_TS_ALARM_ST_DALARM) - status = TXGBE_ERR_UNDERTEMP; - else if (ts_state & TXGBE_TS_ALARM_ST_ALARM) - status = TXGBE_ERR_OVERTEMP; + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) { + ts_state = rd32(hw, TXGBE_AML_INTR_HIGH_STS); + if (ts_state) { + wr32(hw, TXGBE_AML_INTR_RAW_HI, TXGBE_AML_INTR_CL_HI); + wr32(hw, TXGBE_AML_INTR_RAW_LO, TXGBE_AML_INTR_CL_LO); + status = TXGBE_ERR_OVERTEMP; + } else { + ts_state = rd32(hw, TXGBE_AML_INTR_LOW_STS); + if (ts_state) { + status = TXGBE_ERR_UNDERTEMP; + } + } + } else { + /* Check that the LASI temp alarm status was triggered */ + ts_state = rd32(hw, TXGBE_TS_ALARM_ST); + if (ts_state & TXGBE_TS_ALARM_ST_DALARM) + status = TXGBE_ERR_UNDERTEMP; + else if (ts_state & TXGBE_TS_ALARM_ST_ALARM) + status = TXGBE_ERR_OVERTEMP; + } return status; } @@ -1240,3 +1553,52 @@ s32 txgbe_get_lp_advertised_pause(struct txgbe_hw *hw, u8 *pause_bit) { return mtdGetLPAdvertisedPause(&hw->phy_dev, hw->phy.addr, pause_bit); } + +s32 txgbe_external_phy_suspend(struct txgbe_hw *hw) +{ + s32 status = 0; + u16 value = 0; + + status = mtdHwXmdioRead(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_VENDOR_SPECIFIC_2_DEV_TYPE, + TXGBE_MDIO_VENDOR_SPECIFIC_2_PORT_CTRL, &value); + + if (status) + goto out; + + value |= TXGBE_MDIO_VENDOR_SPECIFIC_2_POWER; + + status = mtdHwXmdioWrite(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_VENDOR_SPECIFIC_2_DEV_TYPE, + TXGBE_MDIO_VENDOR_SPECIFIC_2_PORT_CTRL, value); + +out: + return status; +} + +s32 txgbe_external_phy_resume(struct txgbe_hw *hw) +{ + s32 status = 0; + u16 value = 0; + + status = mtdHwXmdioRead(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_VENDOR_SPECIFIC_2_DEV_TYPE, + TXGBE_MDIO_VENDOR_SPECIFIC_2_PORT_CTRL, &value); + + if (status) + goto out; + + if (!(value & ~TXGBE_MDIO_VENDOR_SPECIFIC_2_POWER)) + goto out; + + value |= TXGBE_MDIO_VENDOR_SPECIFIC_2_SW_RST; + value &= ~TXGBE_MDIO_VENDOR_SPECIFIC_2_POWER; + + status = mtdHwXmdioWrite(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_VENDOR_SPECIFIC_2_DEV_TYPE, + TXGBE_MDIO_VENDOR_SPECIFIC_2_PORT_CTRL, value); + +out: + return status; +} + diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h index 67647c7a986d0b1f2662c4113e425016079bdc2a..52dee34ad32f82aaf52e66f8e9d376167fc593eb 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_phy.h, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_phy.h, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List @@ -39,11 +39,15 @@ /* EEPROM byte offsets */ #define TXGBE_SFF_IDENTIFIER 0x0 #define TXGBE_SFF_IDENTIFIER_SFP 0x3 +#define TXGBE_SFF_IDENTIFIER_QSFP 0xC +#define TXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD #define TXGBE_SFF_VENDOR_OUI_BYTE0 0x25 #define TXGBE_SFF_VENDOR_OUI_BYTE1 0x26 #define TXGBE_SFF_VENDOR_OUI_BYTE2 0x27 #define TXGBE_SFF_1GBE_COMP_CODES 0x6 #define TXGBE_SFF_10GBE_COMP_CODES 0x3 +#define TXGBE_SFF_25GBE_COMP_CODES 0x24 +#define TXGBE_SFF_COPPER_LENGTH 0x12 #define TXGBE_SFF_CABLE_TECHNOLOGY 0x8 #define TXGBE_SFF_CABLE_SPEC_COMP 0x3C #define TXGBE_SFF_DDM_IMPLEMENTED 0x40 @@ -51,6 +55,11 @@ #define TXGBE_SFF_SFF_8472_COMP 0x5E #define TXGBE_SFF_SFF_8472_OSCB 0x6E #define TXGBE_SFF_SFF_8472_ESCB 0x76 +#define TXGBE_SFF_SFF_REVISION_ADDR 0x01 +#define TXGBE_SFF_QSFP_PAGE_SELECT 0x7F + +#define TXGBE_MODULE_QSFP_MAX_LEN 640 + #define TXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD #define TXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5 #define TXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6 @@ -67,12 +76,37 @@ /* Bitmasks */ #define TXGBE_SFF_DA_PASSIVE_CABLE 0x4 #define TXGBE_SFF_DA_ACTIVE_CABLE 0x8 -#define TXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 #define TXGBE_SFF_1GBASESX_CAPABLE 0x1 #define TXGBE_SFF_1GBASELX_CAPABLE 0x2 #define TXGBE_SFF_1GBASET_CAPABLE 0x8 #define TXGBE_SFF_10GBASESR_CAPABLE 0x10 #define TXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define TXGBE_SFF_25GBASESR_CAPABLE 0x2 +#define TXGBE_SFF_25GBASELR_CAPABLE 0x3 +#define TXGBE_SFF_25GBASEER_CAPABLE 0x4 +#define TXGBE_SFF_25GBASECR_91FEC 0xB +#define TXGBE_SFF_25GBASECR_74FEC 0xC +#define TXGBE_SFF_25GBASECR_NOFEC 0xD +#define TXGBE_SFF_40GBASE_SR_CAPABLE 0x10 +#define TXGBE_SFF_4x10GBASESR_CAP 0x11 +#define TXGBE_SFF_40GBASEPSM4_Parallel 0x12 +#define TXGBE_SFF_40GBASE_SWMD4_CAP 0x1f +#define TXGBE_SFF_COPPER_5M 0x5 +#define TXGBE_SFF_COPPER_3M 0x3 +#define TXGBE_SFF_COPPER_1M 0x1 + +#define TXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define TXGBE_SFF_25GAUI_C2M_AOC_BER_5 0x1 +#define TXGBE_SFF_25GAUI_C2M_ACC_BER_5 0x8 +#define TXGBE_SFF_25GAUI_C2M_AOC_BER_12 0x18 +#define TXGBE_SFF_25GAUI_C2M_ACC_BER_12 0x19 + +#define TXGBE_ETHERNET_COMP_OFFSET 0x83 +#define TXGBE_SFF_ETHERNET_40G_CR4 BIT(3) +#define TXGBE_SFF_ETHERNET_40G_SR4 BIT(2) +#define TXGBE_SFF_ETHERNET_40G_LR4 BIT(1) +#define TXGBE_SFF_ETHERNET_40G_ACTIVE BIT(0) + #define TXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 #define TXGBE_SFF_SOFT_RS_SELECT_10G 0x8 #define TXGBE_SFF_SOFT_RS_SELECT_1G 0x0 @@ -168,6 +202,7 @@ s32 txgbe_get_phy_firmware_version(struct txgbe_hw *hw, s32 txgbe_identify_module(struct txgbe_hw *hw); s32 txgbe_identify_sfp_module(struct txgbe_hw *hw); +s32 txgbe_identify_qsfp_module(struct txgbe_hw *hw); s32 txgbe_tn_check_overtemp(struct txgbe_hw *hw); s32 txgbe_init_i2c(struct txgbe_hw *hw); s32 txgbe_clear_i2c(struct txgbe_hw *hw); @@ -186,6 +221,8 @@ s32 txgbe_write_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset, u8 eeprom_data); s32 txgbe_read_i2c_sff8472(struct txgbe_hw *hw, u8 byte_offset, u8 *sff8472_data); +s32 txgbe_read_i2c_sff8636(struct txgbe_hw *hw, u8 page ,u8 byte_offset, + u8 *sff8636_data); s32 txgbe_read_i2c_sfp_phy(struct txgbe_hw *hw, u16 byte_offset, u16 *data); @@ -194,6 +231,8 @@ s32 txgbe_uninit_external_phy(struct txgbe_hw *hw); s32 txgbe_set_phy_pause_advertisement(struct txgbe_hw *hw, u32 pause_bit); s32 txgbe_get_phy_advertised_pause(struct txgbe_hw *hw, u8 *pause_bit); s32 txgbe_get_lp_advertised_pause(struct txgbe_hw *hw, u8 *pause_bit); +s32 txgbe_external_phy_suspend(struct txgbe_hw *hw); +s32 txgbe_external_phy_resume(struct txgbe_hw *hw); MTD_STATUS txgbe_read_mdio( MTD_DEV * dev, diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_procfs.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_procfs.c index c569cfc340f01070b78c7429a4e59e9b53c6e8b0..bae9c0ec4c8a66055606895c8dd1153666cab480 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_procfs.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_procfs.c @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_procfs.h, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_procfs.h, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ptp.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ptp.c index dfefe228afc52ebd5854d31985f11368a5b86bbb..24474c0cae2d2fe5c5855269a6460ba4ecaeb930 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ptp.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ptp.c @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_ptp.c, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_ptp.c, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List @@ -23,6 +23,7 @@ #include "txgbe.h" +#include "txgbe_hw.h" #include /* @@ -55,7 +56,7 @@ * 100 Mbps 6.25 MHz 160*10^-9 0xA00000(0xFFFF/ns) * 10 Mbps 0.625 MHz 1600*10^-9 0xC7F380(0xFFF/ns) * FPGA 31.25 MHz 32 *10^-9 0x800000(0x3FFFF/ns) - * + * AMLITE 400MHZ 2.5*10^-9 0x0A0000 * These diagrams are only for the 10Gb link period * * +--------------+ +--------------+ @@ -71,16 +72,104 @@ #define TXGBE_INCVAL_100 0xA00000 #define TXGBE_INCVAL_10 0xC7F380 #define TXGBE_INCVAL_FPGA 0x800000 +#define TXGBE_INCVAL_AML 0xA00000 #define TXGBE_INCVAL_SHIFT_10GB 20 #define TXGBE_INCVAL_SHIFT_1GB 18 #define TXGBE_INCVAL_SHIFT_100 15 #define TXGBE_INCVAL_SHIFT_10 12 #define TXGBE_INCVAL_SHIFT_FPGA 17 +#define TXGBE_INCVAL_SHIFT_AML 21 #define TXGBE_OVERFLOW_PERIOD (HZ * 30) #define TXGBE_PTP_TX_TIMEOUT (HZ) +#define NS_PER_SEC 1000000000ULL +#define NS_PER_MSEC 1000000ULL + +static void txgbe_ptp_setup_sdp(struct txgbe_adapter *adapter) +{ + struct cyclecounter *cc = &adapter->hw_cc; + struct txgbe_hw *hw = &adapter->hw; + u32 tsauxc, rem, tssdp, tssdp1; + u32 trgttiml0,trgttimh0, trgttiml1, trgttimh1; + u64 ns = 0; + unsigned long flags; + + if (hw->mac.type != txgbe_mac_aml && + hw->mac.type != txgbe_mac_aml40) + return; + + if (TXGBE_1588_PPS_WIDTH * NS_PER_MSEC >= NS_PER_SEC) { + e_dev_err("PTP pps width cannot be longer than 1s!\n"); + return; + } + + /* disable the pin first */ + wr32(hw, TXGBE_TSEC_1588_AUX_CTL, 0); + TXGBE_WRITE_FLUSH(hw); + + if (!(adapter->flags2 & TXGBE_FLAG2_PTP_PPS_ENABLED)) { + if (adapter->pps_enabled == 1) { + adapter->pps_enabled = 0; + if (TXGBE_1588_TOD_ENABLE) + txgbe_set_pps(hw, adapter->pps_enabled, 0, 0); + } + return; + } + + adapter->pps_enabled = 1; + + tssdp = TXGBE_TSEC_1588_SDP_FUN_SEL_TT0; + tssdp |= TXGBE_1588_PPS_LEVEL ? TXGBE_TSEC_1588_SDP_OUT_LEVEL_HIGH : TXGBE_TSEC_1588_SDP_OUT_LEVEL_LOW; + tsauxc = TXGBE_TSEC_1588_AUX_CTL_PLSG | TXGBE_TSEC_1588_AUX_CTL_EN_TT0 | + TXGBE_TSEC_1588_AUX_CTL_EN_TT1 | TXGBE_TSEC_1588_AUX_CTL_EN_TS0; + + tssdp1 = TXGBE_TSEC_1588_SDP_FUN_SEL_TS0; + + /* Read the current clock time, and save the cycle counter value */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->hw_tc); + adapter->pps_edge_start = adapter->hw_tc.cycle_last; + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + adapter->pps_edge_end = adapter->pps_edge_start; + + /* Figure out how far past the next second we are */ + div_u64_rem(ns, NS_PER_SEC, &rem); + + /* Figure out how many nanoseconds to add to round the clock edge up + * to the next full second + */ + rem = (NS_PER_SEC - rem); + + /* Adjust the clock edge to align with the next full second. */ + adapter->pps_edge_start += div_u64(((u64)rem << cc->shift), cc->mult); + trgttiml0 = (u32)adapter->pps_edge_start; + trgttimh0 = (u32)(adapter->pps_edge_start >> 32); + + if (TXGBE_1588_TOD_ENABLE) + txgbe_set_pps(hw, adapter->pps_enabled, ns + rem, adapter->pps_edge_start); + + rem += TXGBE_1588_PPS_WIDTH * NS_PER_MSEC; + adapter->pps_edge_end += div_u64(((u64)rem << cc->shift), cc->mult); + trgttiml1 = (u32)adapter->pps_edge_end; + trgttimh1 = (u32)(adapter->pps_edge_end >> 32); + + wr32(hw, TXGBE_TSEC_1588_TRGT_L(0), trgttiml0); + wr32(hw, TXGBE_TSEC_1588_TRGT_H(0), trgttimh0); + wr32(hw, TXGBE_TSEC_1588_TRGT_L(1), trgttiml1); + wr32(hw, TXGBE_TSEC_1588_TRGT_H(1), trgttimh1); + wr32(hw, TXGBE_TSEC_1588_SDP(0), tssdp); + wr32(hw, TXGBE_TSEC_1588_SDP(1), tssdp1); + wr32(hw, TXGBE_TSEC_1588_AUX_CTL, tsauxc); + wr32(hw, TXGBE_TSEC_1588_INT_EN, TXGBE_TSEC_1588_INT_EN_TT1); + TXGBE_WRITE_FLUSH(hw); + + rem = NS_PER_SEC; + /* Adjust the clock edge to align with the next full second. */ + adapter->sec_to_cc = div_u64(((u64)rem << cc->shift), cc->mult); +} + /** * txgbe_ptp_read - read raw cycle counter (to be used by time counter) * @hw_cc: the cyclecounter structure @@ -135,6 +224,35 @@ static void txgbe_ptp_convert_to_hwtstamp(struct txgbe_adapter *adapter, hwtstamp->hwtstamp = ns_to_ktime(ns); } +#ifdef HAVE_PTP_CLOCK_INFO_ADJFINE +/** + * txgbe_ptp_adjfreq + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + */ +static int txgbe_ptp_adjfine(struct ptp_clock_info *ptp, long ppb) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + struct txgbe_hw *hw = &adapter->hw; + u64 incval; + + smp_mb(); + incval = READ_ONCE(adapter->base_incval); + incval = adjust_by_scaled_ppm(incval, ppb); + + if (incval > TXGBE_TSC_1588_INC_IV(~0)) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + wr32(hw, TXGBE_TSC_1588_INC, + TXGBE_TSC_1588_INC_IVP(incval, 2)); + + return 0; +} + +#else /** * txgbe_ptp_adjfreq * @ptp: the ptp clock structure @@ -143,7 +261,6 @@ static void txgbe_ptp_convert_to_hwtstamp(struct txgbe_adapter *adapter, * adjust the frequency of the ptp cycle counter by the * indicated ppb from the base frequency. */ -#ifndef HAVE_NOT_PTT_ADJFREQ static int txgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) { struct txgbe_adapter *adapter = @@ -194,8 +311,50 @@ static int txgbe_ptp_adjtime(struct ptp_clock_info *ptp, timecounter_adjtime(&adapter->hw_tc, delta); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); + + return 0; +} + +#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 +#ifdef HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +/** + * txgbe_ptp_gettimex + * @ptp: the ptp clock structure + * @ts: timespec to hold the PHC timestamp + * @sts: structure to hold the system time before and after reading the PHC + * + * read the timecounter and return the correct value on ns, + * after converting it into a struct timespec. + */ +static int txgbe_ptp_gettimex(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + struct txgbe_hw *hw = &adapter->hw; + unsigned long flags; + u64 ns, stamp; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + ptp_read_system_prets(sts); + stamp = rd32(hw, TXGBE_TSC_1588_SYSTIML); + ptp_read_system_postts(sts); + stamp |= (u64)rd32(hw, TXGBE_TSC_1588_SYSTIMH) << 32; + + ns = timecounter_cyc2time(&adapter->hw_tc, stamp); + + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + return 0; } +#endif +#endif /** * txgbe_ptp_gettime64 @@ -245,6 +404,9 @@ static int txgbe_ptp_settime64(struct ptp_clock_info *ptp, timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ns); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); + return 0; } @@ -285,6 +447,28 @@ static int txgbe_ptp_settime(struct ptp_clock_info *ptp, static int txgbe_ptp_feature_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + struct txgbe_hw *hw = &adapter->hw; + /** + * When PPS is enabled, unmask the interrupt for the ClockOut + * feature, so that the interrupt handler can send the PPS + * event when the clock SDP triggers. Clear mask when PPS is + * disabled + */ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp) + return -ENOTSUPP; + + if (on) + adapter->flags2 |= TXGBE_FLAG2_PTP_PPS_ENABLED; + else + adapter->flags2 &= ~TXGBE_FLAG2_PTP_PPS_ENABLED; + + adapter->ptp_setup_sdp(adapter); + return 0; + } + return -ENOTSUPP; } @@ -298,9 +482,12 @@ static int txgbe_ptp_feature_enable(struct ptp_clock_info *ptp, */ void txgbe_ptp_check_pps_event(struct txgbe_adapter *adapter) { - struct ptp_clock_event event; - - event.type = PTP_CLOCK_PPS; + struct txgbe_hw *hw = &adapter->hw; + struct cyclecounter *cc = &adapter->hw_cc; + u32 tsauxc, rem, int_status; + u32 trgttiml0,trgttimh0, trgttiml1, trgttimh1; + u64 ns = 0; + unsigned long flags; /* this check is necessary in case the interrupt was enabled via some * alternative means (ex. debug_fs). Better to check here than @@ -309,7 +496,54 @@ void txgbe_ptp_check_pps_event(struct txgbe_adapter *adapter) if (!adapter->ptp_clock) return; - /* we don't config PPS on SDP yet, so just return. + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + int_status = rd32(hw, TXGBE_TSEC_1588_INT_ST); + if (int_status & TXGBE_TSEC_1588_INT_ST_TT1) { + /* disable the pin first */ + wr32(hw, TXGBE_TSEC_1588_AUX_CTL, 0); + TXGBE_WRITE_FLUSH(hw); + + tsauxc = TXGBE_TSEC_1588_AUX_CTL_PLSG | TXGBE_TSEC_1588_AUX_CTL_EN_TT0 | + TXGBE_TSEC_1588_AUX_CTL_EN_TT1 | TXGBE_TSEC_1588_AUX_CTL_EN_TS0; + + /* Read the current clock time, and save the cycle counter value */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->hw_tc); + adapter->pps_edge_start = adapter->hw_tc.cycle_last; + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + adapter->pps_edge_end = adapter->pps_edge_start; + + /* Figure out how far past the next second we are */ + div_u64_rem(ns, NS_PER_SEC, &rem); + + /* Figure out how many nanoseconds to add to round the clock edge up + * to the next full second + */ + rem = (NS_PER_SEC - rem); + + /* Adjust the clock edge to align with the next full second. */ + adapter->pps_edge_start += div_u64(((u64)rem << cc->shift), cc->mult); + + /* Adjust the clock edge to align with the next full second. */ + trgttiml0 = (u32)adapter->pps_edge_start; + trgttimh0 = (u32)(adapter->pps_edge_start >> 32); + + rem += TXGBE_1588_PPS_WIDTH * NS_PER_MSEC; + adapter->pps_edge_end += div_u64(((u64)rem << cc->shift), cc->mult); + + trgttiml1 = (u32)adapter->pps_edge_end; + trgttimh1 = (u32)(adapter->pps_edge_end >> 32); + + wr32(hw, TXGBE_TSEC_1588_TRGT_L(0), trgttiml0); + wr32(hw, TXGBE_TSEC_1588_TRGT_H(0), trgttimh0); + wr32(hw, TXGBE_TSEC_1588_TRGT_L(1), trgttiml1); + wr32(hw, TXGBE_TSEC_1588_TRGT_H(1), trgttimh1); + + wr32(hw, TXGBE_TSEC_1588_AUX_CTL, tsauxc); + TXGBE_WRITE_FLUSH(hw); + } + } + /* we don't config PPS on SDP for txgbe_mac_sp yet, so just return. * ptp_clock_event(adapter->ptp_clock, &event); */ } @@ -672,6 +906,7 @@ int txgbe_ptp_set_ts_config(struct txgbe_adapter *adapter, struct ifreq *ifr) static void txgbe_ptp_link_speed_adjust(struct txgbe_adapter *adapter, u32 *shift, u32 *incval) { + struct txgbe_hw *hw = &adapter->hw; /** * Scale the NIC cycle counter by a large factor so that * relatively small corrections to the frequency can be added @@ -686,26 +921,32 @@ static void txgbe_ptp_link_speed_adjust(struct txgbe_adapter *adapter, * link speed is 10Gb. Set the registers correctly even when link is * down to preserve the clock setting */ - switch (adapter->link_speed) { - case TXGBE_LINK_SPEED_10_FULL: - *shift = TXGBE_INCVAL_SHIFT_10; - *incval = TXGBE_INCVAL_10; - break; - case TXGBE_LINK_SPEED_100_FULL: - *shift = TXGBE_INCVAL_SHIFT_100; - *incval = TXGBE_INCVAL_100; - break; - case TXGBE_LINK_SPEED_1GB_FULL: - *shift = TXGBE_INCVAL_SHIFT_1GB; - *incval = TXGBE_INCVAL_1GB; - break; - case TXGBE_LINK_SPEED_10GB_FULL: - default: /* TXGBE_LINK_SPEED_10GB_FULL */ - *shift = TXGBE_INCVAL_SHIFT_10GB; - *incval = TXGBE_INCVAL_10GB; - break; - } + /*amlite TODO*/ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + *shift = TXGBE_INCVAL_SHIFT_AML; + *incval = TXGBE_INCVAL_AML; + } else { + switch (adapter->link_speed) { + case TXGBE_LINK_SPEED_10_FULL: + *shift = TXGBE_INCVAL_SHIFT_10; + *incval = TXGBE_INCVAL_10; + break; + case TXGBE_LINK_SPEED_100_FULL: + *shift = TXGBE_INCVAL_SHIFT_100; + *incval = TXGBE_INCVAL_100; + break; + case TXGBE_LINK_SPEED_1GB_FULL: + *shift = TXGBE_INCVAL_SHIFT_1GB; + *incval = TXGBE_INCVAL_1GB; + break; + case TXGBE_LINK_SPEED_10GB_FULL: + default: /* TXGBE_LINK_SPEED_10GB_FULL */ + *shift = TXGBE_INCVAL_SHIFT_10GB; + *incval = TXGBE_INCVAL_10GB; + break; + } + } return; } @@ -756,6 +997,14 @@ void txgbe_ptp_start_cyclecounter(struct txgbe_adapter *adapter) spin_unlock_irqrestore(&adapter->tmreg_lock, flags); } +static void txgbe_ptp_init_systime(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + wr32(hw, TXGBE_TSC_1588_SYSTIML, 0); + wr32(hw, TXGBE_TSC_1588_SYSTIMH, 0); + TXGBE_WRITE_FLUSH(hw); +} + /** * txgbe_ptp_reset * @adapter: the txgbe private board structure @@ -770,18 +1019,28 @@ void txgbe_ptp_start_cyclecounter(struct txgbe_adapter *adapter) */ void txgbe_ptp_reset(struct txgbe_adapter *adapter) { + struct txgbe_hw *hw = &adapter->hw; unsigned long flags; /* reset the hardware timestamping mode */ txgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); txgbe_ptp_start_cyclecounter(adapter); + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + txgbe_ptp_init_systime(adapter); + spin_lock_irqsave(&adapter->tmreg_lock, flags); timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ktime_to_ns(ktime_get_real())); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); adapter->last_overflow_check = jiffies; + + /* Now that the shift has been calculated and the systime + * registers reset, (re-)enable the Clock out feature + */ + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); } /** @@ -798,6 +1057,7 @@ void txgbe_ptp_reset(struct txgbe_adapter *adapter) static long txgbe_ptp_create_clock(struct txgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct txgbe_hw *hw = &adapter->hw; long err; /* do nothing if we already have a clock device */ @@ -811,19 +1071,32 @@ static long txgbe_ptp_create_clock(struct txgbe_adapter *adapter) adapter->ptp_caps.n_alarm = 0; adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.n_per_out = 0; - adapter->ptp_caps.pps = 0; -#ifndef HAVE_NOT_PTT_ADJFREQ - adapter->ptp_caps.adjfreq = txgbe_ptp_adjfreq; + + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) + adapter->ptp_caps.pps = 1; + else + adapter->ptp_caps.pps = 0; + +#ifdef HAVE_PTP_CLOCK_INFO_ADJFINE + adapter->ptp_caps.adjfine = txgbe_ptp_adjfine; +#else + adapter->ptp_caps.adjfreq = txgbe_ptp_adjfreq; #endif adapter->ptp_caps.adjtime = txgbe_ptp_adjtime; #ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 +#ifdef HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL + adapter->ptp_caps.gettimex64 = txgbe_ptp_gettimex; +#else adapter->ptp_caps.gettime64 = txgbe_ptp_gettime64; +#endif /* HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL */ adapter->ptp_caps.settime64 = txgbe_ptp_settime64; #else adapter->ptp_caps.gettime = txgbe_ptp_gettime; adapter->ptp_caps.settime = txgbe_ptp_settime; #endif adapter->ptp_caps.enable = txgbe_ptp_feature_enable; + adapter->ptp_setup_sdp = txgbe_ptp_setup_sdp; adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, pci_dev_to_dev(adapter->pdev)); @@ -891,6 +1164,9 @@ void txgbe_ptp_suspend(struct txgbe_adapter *adapter) adapter->flags2 &= ~TXGBE_FLAG2_PTP_PPS_ENABLED; + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); + cancel_work_sync(&adapter->ptp_tx_work); txgbe_ptp_clear_tx_timestamp(adapter); } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c index a1bcaadfcc7d2ae16344770fa1d3ec7b46bd8359..2c5e0f8542d29445023b6f7f5ec61f8ae4dc1e05 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_sriov.c, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_sriov.c, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List @@ -38,10 +38,12 @@ #include "txgbe_sriov.h" static void txgbe_set_vf_rx_tx(struct txgbe_adapter *adapter, int vf); - +static int txgbe_set_queue_rate_limit_vf(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf); #ifdef CONFIG_PCI_IOV -static int __txgbe_enable_sriov(struct txgbe_adapter *adapter) +static int __txgbe_enable_sriov(struct txgbe_adapter *adapter, + unsigned int num_vfs) { struct txgbe_hw *hw = &adapter->hw; int num_vf_macvlans, i; @@ -49,9 +51,9 @@ static int __txgbe_enable_sriov(struct txgbe_adapter *adapter) u32 value = 0; adapter->flags |= TXGBE_FLAG_SRIOV_ENABLED; - e_dev_info("SR-IOV enabled with %d VFs\n", adapter->num_vfs); + e_dev_info("SR-IOV enabled with %d VFs\n", num_vfs); - if (adapter->num_vfs != 1) { + if (num_vfs != 1) { if (adapter->ring_feature[RING_F_RSS].indices == 4) value = TXGBE_CFG_PORT_CTL_NUM_VT_32; else /* adapter->ring_feature[RING_F_RSS].indices <= 2 */ @@ -65,10 +67,10 @@ static int __txgbe_enable_sriov(struct txgbe_adapter *adapter) adapter->flags |= TXGBE_FLAG_VMDQ_ENABLED; if (!adapter->ring_feature[RING_F_VMDQ].limit) adapter->ring_feature[RING_F_VMDQ].limit = 1; - adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs; + adapter->ring_feature[RING_F_VMDQ].offset = num_vfs; num_vf_macvlans = hw->mac.num_rar_entries - - (TXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); + (TXGBE_MAX_PF_MACVLANS + 1 + num_vfs); adapter->mv_list = mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans), @@ -91,11 +93,13 @@ static int __txgbe_enable_sriov(struct txgbe_adapter *adapter) /* If call to enable VFs succeeded then allocate memory * for per VF control structures. */ - adapter->vfinfo = kcalloc(adapter->num_vfs, + adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), GFP_KERNEL); if (!adapter->vfinfo) return -ENOMEM; + adapter->num_vfs = num_vfs; + /* enable L2 switch and replication */ adapter->flags |= TXGBE_FLAG_SRIOV_L2SWITCH_ENABLE | TXGBE_FLAG_SRIOV_REPLICATION_ENABLE; @@ -129,6 +133,7 @@ static int __txgbe_enable_sriov(struct txgbe_adapter *adapter) /* enable spoof checking for all VFs */ adapter->vfinfo[i].spoofchk_enabled = true; adapter->vfinfo[i].link_enable = true; + adapter->vfinfo[i].link_state = TXGBE_VF_LINK_STATE_AUTO; #ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN /* We support VF RSS querying only for 82599 and x540 @@ -255,9 +260,10 @@ static void txgbe_put_vfs(struct txgbe_adapter *adapter) void txgbe_enable_sriov(struct txgbe_adapter *adapter) { int pre_existing_vfs = 0; + unsigned int num_vfs; pre_existing_vfs = pci_num_vf(adapter->pdev); - if (!pre_existing_vfs && !adapter->num_vfs) + if (!pre_existing_vfs && !adapter->max_vfs) return; /* If there are pre-existing VFs then we have to force @@ -267,7 +273,7 @@ void txgbe_enable_sriov(struct txgbe_adapter *adapter) * have been created via the new PCI SR-IOV sysfs interface. */ if (pre_existing_vfs) { - adapter->num_vfs = pre_existing_vfs; + num_vfs = pre_existing_vfs; dev_warn(&adapter->pdev->dev, "Virtual Functions already enabled for this device -" "Please reload all VF drivers to avoid spoofed packet " @@ -275,16 +281,16 @@ void txgbe_enable_sriov(struct txgbe_adapter *adapter) } else { int err; /* - * The sapphire supports up to 64 VFs per physical function - * but this implementation limits allocation to 63 so that - * basic networking resources are still available to the + * The sapphire/amber-lite supports up to 64 VFs per physical + * function but this implementation limits allocation to 63 so + * that basic networking resources are still available to the * physical function. If the user requests greater thn * 63 VFs then it is an error - reset to default of zero. */ - adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, + num_vfs = min_t(unsigned int, adapter->max_vfs, TXGBE_MAX_VFS_DRV_LIMIT); - err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + err = pci_enable_sriov(adapter->pdev, num_vfs); if (err) { e_err(probe, "Failed to enable PCI sriov: %d\n", err); adapter->num_vfs = 0; @@ -292,7 +298,7 @@ void txgbe_enable_sriov(struct txgbe_adapter *adapter) } } - if (!__txgbe_enable_sriov(adapter)) { + if (!__txgbe_enable_sriov(adapter, num_vfs)) { txgbe_get_vfs(adapter); return; } @@ -476,47 +482,15 @@ static int txgbe_set_vf_lpe(struct txgbe_adapter *adapter, u32 max_frame, u32 max_frs, reg_val; /* - * For sapphire we have to keep all PFs and VFs operating with - * the same max_frame value in order to avoid sending an oversize + * For sapphire/amber-lite we have to keep all PFs and VFs operating + * with the same max_frame value in order to avoid sending an oversize * frame to a VF. In order to guarantee this is handled correctly * for all cases we have several special exceptions to take into * account before we can enable the VF for receive */ - struct net_device *dev = adapter->netdev; - int pf_max_frame = dev->mtu + ETH_HLEN; u32 reg_offset, vf_shift, vfre; s32 err = 0; -#if IS_ENABLED(CONFIG_FCOE) - if (dev->features & NETIF_F_FCOE_MTU) - pf_max_frame = max_t(int, pf_max_frame, - TXGBE_FCOE_JUMBO_FRAME_SIZE); -#endif /* CONFIG_FCOE */ - - switch (adapter->vfinfo[vf].vf_api) { - case txgbe_mbox_api_11: - case txgbe_mbox_api_12: - case txgbe_mbox_api_13: - /* - * Version 1.1 supports jumbo frames on VFs if PF has - * jumbo frames enabled which means legacy VFs are - * disabled - */ - if (pf_max_frame > ETH_FRAME_LEN) - break; - fallthrough; - default: - /* - * If the PF or VF are running w/ jumbo frames enabled - * we need to shut down the VF Rx path as we cannot - * support jumbo frames on legacy VFs - */ - if ((pf_max_frame > ETH_FRAME_LEN) || - (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) - err = -EINVAL; - break; - } - /* determine VF receive enable location */ vf_shift = vf % 32; reg_offset = vf / 32; @@ -529,9 +503,10 @@ static int txgbe_set_vf_lpe(struct txgbe_adapter *adapter, u32 max_frame, vfre |= 1 << vf_shift; wr32(hw, TXGBE_RDM_VF_RE(reg_offset), vfre); - if (err) { - e_err(drv, "VF max_frame %d out of range\n", max_frame); - return err; + /* pull current max frame size from hardware */ + max_frs = rd32(hw, TXGBE_PSR_MAX_SZ); + if (max_frs < max_frame) { + wr32(hw, TXGBE_PSR_MAX_SZ, max_frame); } /* pull current max frame size from hardware */ @@ -560,12 +535,14 @@ void txgbe_set_vmolr(struct txgbe_hw *hw, u16 vf, bool aupe) } static void txgbe_set_vmvir(struct txgbe_adapter *adapter, - u16 vid, u16 qos, u16 vf) + u16 vid, u16 qos, u16 vf, __be16 vlan_proto) { struct txgbe_hw *hw = &adapter->hw; u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | TXGBE_TDM_VLAN_INS_VLANA_DEFAULT; + if (vlan_proto == htons(ETH_P_8021AD)) + vmvir |= 1 << TXGBE_TDM_VLAN_INS_TPID_SEL_SHIFT; wr32(hw, TXGBE_TDM_VLAN_INS(vf), vmvir); } @@ -594,10 +571,10 @@ static inline void txgbe_vf_reset_event(struct txgbe_adapter *adapter, u16 vf) } else { if (vfinfo->pf_qos || !num_tcs) txgbe_set_vmvir(adapter, vfinfo->pf_vlan, - vfinfo->pf_qos, vf); + vfinfo->pf_qos, vf, vfinfo->vlan_proto); else txgbe_set_vmvir(adapter, vfinfo->pf_vlan, - adapter->default_up, vf); + adapter->default_up, vf, vfinfo->vlan_proto); if (vfinfo->spoofchk_enabled) TCALL(hw, mac.ops.set_vlan_anti_spoofing, true, vf); @@ -640,6 +617,8 @@ static int txgbe_negotiate_vf_api(struct txgbe_adapter *adapter, case txgbe_mbox_api_11: case txgbe_mbox_api_12: case txgbe_mbox_api_13: + case txgbe_mbox_api_21: + case txgbe_mbox_api_22: adapter->vfinfo[vf].vf_api = api; return 0; default: @@ -661,6 +640,8 @@ static int txgbe_get_vf_queues(struct txgbe_adapter *adapter, /* verify the PF is supporting the correct APIs */ switch (adapter->vfinfo[vf].vf_api) { + case txgbe_mbox_api_22: + case txgbe_mbox_api_21: case txgbe_mbox_api_20: case txgbe_mbox_api_13: case txgbe_mbox_api_12: @@ -891,7 +872,7 @@ static int txgbe_set_vf_mac_addr(struct txgbe_adapter *adapter, return -1; } - if (adapter->vfinfo[vf].pf_set_mac && + if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, ETH_ALEN)) { u8 *pm = adapter->vfinfo[vf].vf_mac_addresses; @@ -940,16 +921,21 @@ static int txgbe_set_vf_vlan_msg(struct txgbe_adapter *adapter, struct txgbe_hw *hw = &adapter->hw; int add = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) >> TXGBE_VT_MSGINFO_SHIFT; int vid = (msgbuf[1] & TXGBE_PSR_VLAN_SWC_VLANID_MASK); + int vlan_offload = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) >> TXGBE_VT_MSGINFO_VLAN_OFFLOAD_SHIFT; int err; u8 tcs = netdev_get_num_tc(adapter->netdev); if (adapter->vfinfo[vf].pf_vlan || tcs) { - e_warn(drv, - "VF %d attempted to override administratively set VLAN " - "configuration\n" - "Reload the VF driver to resume operations\n", - vf); - return 0; + if (!vlan_offload) + return 0; + else { + e_warn(drv, + "VF %d attempted to override administratively set VLAN " + "configuration\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } } if (add) @@ -1020,7 +1006,8 @@ static int txgbe_set_vf_macvlan_msg(struct txgbe_adapter *adapter, TXGBE_VT_MSGINFO_SHIFT; int err; - if (adapter->vfinfo[vf].pf_set_mac && index > 0) { + if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted + && index > 0) { e_warn(drv, "VF %d requested MACVLAN filter but is administratively denied\n", vf); @@ -1068,10 +1055,19 @@ static int txgbe_update_vf_xcast_mode(struct txgbe_adapter *adapter, return -EOPNOTSUPP; /* Fall threw */ case txgbe_mbox_api_13: + case txgbe_mbox_api_20: + case txgbe_mbox_api_21: + case txgbe_mbox_api_22: break; default: return -EOPNOTSUPP; } + + if (xcast_mode > TXGBEVF_XCAST_MODE_MULTI && + !adapter->vfinfo[vf].trusted) { + xcast_mode = TXGBEVF_XCAST_MODE_MULTI; + } + if (adapter->vfinfo[vf].xcast_mode == xcast_mode) goto out; @@ -1086,8 +1082,9 @@ static int txgbe_update_vf_xcast_mode(struct txgbe_adapter *adapter, enable = TXGBE_PSR_VM_L2CTL_BAM | TXGBE_PSR_VM_L2CTL_ROMPE; break; case TXGBEVF_XCAST_MODE_ALLMULTI: - disable = TXGBE_PSR_VM_L2CTL_UPE | TXGBE_PSR_VM_L2CTL_VPE; - enable = TXGBE_PSR_VM_L2CTL_BAM | TXGBE_PSR_VM_L2CTL_ROMPE | TXGBE_PSR_VM_L2CTL_MPE; + disable = TXGBE_PSR_VM_L2CTL_UPE; + enable = TXGBE_PSR_VM_L2CTL_BAM | TXGBE_PSR_VM_L2CTL_ROMPE | + TXGBE_PSR_VM_L2CTL_MPE | TXGBE_PSR_VM_L2CTL_VPE; break; case TXGBEVF_XCAST_MODE_PROMISC: disable = 0; @@ -1120,12 +1117,13 @@ static int txgbe_get_vf_link_state(struct txgbe_adapter *adapter, switch (adapter->vfinfo[vf].vf_api) { case txgbe_mbox_api_12: case txgbe_mbox_api_13: + case txgbe_mbox_api_21: + case txgbe_mbox_api_22: break; default: return -EOPNOTSUPP; } - - *link_state = adapter->vfinfo[vf].link_enable; + *link_state = adapter->vfinfo[vf].link_state; return 0; } @@ -1140,6 +1138,8 @@ static int txgbe_get_fw_version(struct txgbe_adapter *adapter, switch (adapter->vfinfo[vf].vf_api) { case txgbe_mbox_api_12: case txgbe_mbox_api_13: + case txgbe_mbox_api_21: + case txgbe_mbox_api_22: break; default: return -EOPNOTSUPP; @@ -1152,6 +1152,89 @@ static int txgbe_get_fw_version(struct txgbe_adapter *adapter, return 0; } +static int txgbe_add_5tuple_filter_vf(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct txgbe_5tuple_filter_info *filter = &adapter->ft_filter_info; + struct txgbe_hw *hw = &adapter->hw; + u16 index, sw_idx, i, j; + + /* + * look for an unused 5tuple filter index, + * and insert the filter to list. + */ + for (sw_idx = 0; sw_idx < TXGBE_MAX_RDB_5T_CTL0_FILTERS; sw_idx++) { + i = sw_idx / (sizeof(uint32_t) * 8); + j = sw_idx % (sizeof(uint32_t) * 8); + if (!(filter->fivetuple_mask[i] & (1 << j))) { + filter->fivetuple_mask[i] |= 1 << j; + break; + } + } + if (sw_idx >= TXGBE_MAX_RDB_5T_CTL0_FILTERS) { + e_err(drv, "5tuple filters are full.\n"); + return -ENOSYS; + } + + /* convert filter index on each vf to the global index */ + index = msgbuf[TXGBEVF_5T_CMD] & 0xFFFF; + adapter->vfinfo[vf].ft_filter_idx[index] = sw_idx; + + /* pool index */ + msgbuf[TXGBEVF_5T_CTRL0] |= vf << TXGBE_RDB_5T_CTL0_POOL_SHIFT; + /* compute absolute queue index */ + msgbuf[TXGBEVF_5T_CTRL1] += (vf * adapter->num_rx_queues_per_pool) << + TXGBE_RDB_5T_CTL1_RING_SHIFT; + + wr32(hw, TXGBE_RDB_5T_CTL0(sw_idx), msgbuf[TXGBEVF_5T_CTRL0]); + wr32(hw, TXGBE_RDB_5T_CTL1(sw_idx), msgbuf[TXGBEVF_5T_CTRL1]); + wr32(hw, TXGBE_RDB_5T_SDP(sw_idx), msgbuf[TXGBEVF_5T_PORT]); + wr32(hw, TXGBE_RDB_5T_DA(sw_idx), msgbuf[TXGBEVF_5T_DA]); + wr32(hw, TXGBE_RDB_5T_SA(sw_idx), msgbuf[TXGBEVF_5T_SA]); + + return 0; +} + +static void txgbe_del_5tuple_filter_vf(struct txgbe_adapter *adapter, + u32 cmd, u32 vf) +{ + struct txgbe_5tuple_filter_info *filter = &adapter->ft_filter_info; + struct txgbe_hw *hw = &adapter->hw; + u16 index, sw_idx; + + /* convert the global index to filter index on each vf */ + index = cmd & 0xFFFF; + sw_idx = adapter->vfinfo[vf].ft_filter_idx[index]; + + filter->fivetuple_mask[sw_idx / (sizeof(uint32_t) * 8)] &= + ~(1 << (sw_idx % (sizeof(uint32_t) * 8))); + + wr32(hw, TXGBE_RDB_5T_CTL0(sw_idx), 0); + wr32(hw, TXGBE_RDB_5T_CTL1(sw_idx), 0); + wr32(hw, TXGBE_RDB_5T_SDP(sw_idx), 0); + wr32(hw, TXGBE_RDB_5T_DA(sw_idx), 0); + wr32(hw, TXGBE_RDB_5T_SA(sw_idx), 0); +} + +static int txgbe_set_5tuple_filter_vf(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u32 cmd = msgbuf[TXGBEVF_5T_CMD]; + bool add; + + /* verify the PF is supporting the correct API */ + if (adapter->vfinfo[vf].vf_api < txgbe_mbox_api_21) + return -EOPNOTSUPP; + + add = !!(cmd & BIT(TXGBEVF_5T_ADD_SHIFT)); + if (add) + return txgbe_add_5tuple_filter_vf(adapter, msgbuf, vf); + + txgbe_del_5tuple_filter_vf(adapter, cmd, vf); + + return 0; +} + static int txgbe_rcv_msg_from_vf(struct txgbe_adapter *adapter, u16 vf) { u16 mbx_size = TXGBE_VXMAILBOX_SIZE; @@ -1222,6 +1305,12 @@ static int txgbe_rcv_msg_from_vf(struct txgbe_adapter *adapter, u16 vf) case TXGBE_VF_GET_FW_VERSION: retval = txgbe_get_fw_version(adapter, msgbuf, vf); break; + case TXGBE_VF_SET_5TUPLE: + retval = txgbe_set_5tuple_filter_vf(adapter, msgbuf, vf); + break; + case TXGBE_VF_QUEUE_RATE_LIMIT: + retval = txgbe_set_queue_rate_limit_vf(adapter, msgbuf, vf); + break; case TXGBE_VF_BACKUP: #ifdef CONFIG_PCI_IOV retval = txgbe_vf_backup(adapter, vf); @@ -1313,8 +1402,47 @@ void txgbe_ping_all_vfs(struct txgbe_adapter *adapter) } } + +void txgbe_ping_vf_with_link_status(struct txgbe_adapter *adapter, bool link_up, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 msgbuf[2] = {0, 0}; + + if (vf > adapter->num_vfs) + return; + + msgbuf[0] = TXGBE_PF_NOFITY_VF_LINK_STATUS | TXGBE_PF_CONTROL_MSG; + msgbuf[1] = (adapter->speed << 1) | link_up; + //if (adapter->notify_down) + // msgbuf[1] |= TXGBE_PF_NOFITY_VF_NET_NOT_RUNNING; + if (adapter->vfinfo[vf].clear_to_send) + msgbuf[0] |= TXGBE_VT_MSGTYPE_CTS; + txgbe_write_mbx(hw, msgbuf, 2, vf); +} + +void txgbe_ping_all_vfs_with_link_status(struct txgbe_adapter *adapter, bool link_up) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 msgbuf[2] = {0, 0}; + u16 i; + + if (!adapter->num_vfs) + return; + + msgbuf[0] = TXGBE_PF_NOFITY_VF_LINK_STATUS | TXGBE_PF_CONTROL_MSG; + if (link_up) + msgbuf[1] = (adapter->speed << 1) | link_up; + //if (adapter->notify_down) + // msgbuf[1] |= TXGBE_PF_NOFITY_VF_NET_NOT_RUNNING; + for (i = 0 ; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) + msgbuf[0] |= TXGBE_VT_MSGTYPE_CTS; + txgbe_write_mbx(hw, msgbuf, 2, i); + } +} + /** - * ixgbe_set_all_vfs - update vfs queues + * txgbe_set_all_vfs - update vfs queues * @adapter: Pointer to adapter struct * * Update setting transmit and receive queues for all vfs @@ -1386,9 +1514,7 @@ static int txgbe_pci_sriov_enable(struct pci_dev __maybe_unused *dev, goto err_out; } - adapter->num_vfs = num_vfs; - - err = __txgbe_enable_sriov(adapter); + err = __txgbe_enable_sriov(adapter, num_vfs); if (err) goto err_out; @@ -1447,35 +1573,56 @@ int txgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) s32 retval = 0; struct txgbe_adapter *adapter = netdev_priv(netdev); - if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) + if (vf < 0 || (vf >= adapter->num_vfs)) return -EINVAL; - dev_info(pci_dev_to_dev(adapter->pdev), - "setting MAC %pM on VF %d\n", mac, vf); - dev_info(pci_dev_to_dev(adapter->pdev), - "Reload the VF driver to make this change effective.\n"); - retval = txgbe_set_vf_mac(adapter, vf, mac); - if (retval >= 0) { - adapter->vfinfo[vf].pf_set_mac = true; - if (test_bit(__TXGBE_DOWN, &adapter->state)) { - dev_warn(pci_dev_to_dev(adapter->pdev), - "The VF MAC address has been set, but the PF " - "device is not up.\n"); + if (is_valid_ether_addr(mac)) { + dev_info(pci_dev_to_dev(adapter->pdev), + "setting MAC %pM on VF %d\n", mac, vf); + dev_info(pci_dev_to_dev(adapter->pdev), + "Reload the VF driver to make this change effective.\n"); + retval = txgbe_set_vf_mac(adapter, vf, mac); + if (retval >= 0) { + adapter->vfinfo[vf].pf_set_mac = true; + if (test_bit(__TXGBE_DOWN, &adapter->state)) { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF MAC address has been set, but the PF " + "device is not up.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), + "Bring the PF device up before attempting to " + "use the VF device.\n"); + } + } else { dev_warn(pci_dev_to_dev(adapter->pdev), - "Bring the PF device up before attempting to " - "use the VF device.\n"); + "The VF MAC address was NOT set due to invalid or " + "duplicate MAC address.\n"); + } + } else if (is_zero_ether_addr(mac)) { + unsigned char *vf_mac_addr = + adapter->vfinfo[vf].vf_mac_addresses; + + /* nothing to do */ + if (is_zero_ether_addr(vf_mac_addr)) + return 0; + + dev_info(pci_dev_to_dev(adapter->pdev), "removing MAC on VF %d\n", + vf); + + retval = txgbe_del_mac_filter(adapter, vf_mac_addr, vf); + if (retval >= 0) { + adapter->vfinfo[vf].pf_set_mac = false; + memcpy(vf_mac_addr, mac, ETH_ALEN); + } else { + dev_warn(pci_dev_to_dev(adapter->pdev), "Could NOT remove the VF MAC address.\n"); } } else { - dev_warn(pci_dev_to_dev(adapter->pdev), - "The VF MAC address was NOT set due to invalid or " - "duplicate MAC address.\n"); + retval = -EINVAL; } - return retval; } static int txgbe_enable_port_vlan(struct txgbe_adapter *adapter, - int vf, u16 vlan, u8 qos) + int vf, u16 vlan, u8 qos, __be16 vlan_proto) { struct txgbe_hw *hw = &adapter->hw; int err; @@ -1483,7 +1630,7 @@ static int txgbe_enable_port_vlan(struct txgbe_adapter *adapter, err = txgbe_set_vf_vlan(adapter, true, vlan, vf); if (err) goto out; - txgbe_set_vmvir(adapter, vlan, qos, vf); + txgbe_set_vmvir(adapter, vlan, qos, vf, vlan_proto); txgbe_set_vmolr(hw, vf, false); if (adapter->vfinfo[vf].spoofchk_enabled) TCALL(hw, mac.ops.set_vlan_anti_spoofing, true, vf); @@ -1493,6 +1640,7 @@ static int txgbe_enable_port_vlan(struct txgbe_adapter *adapter, txgbe_write_hide_vlan(adapter, vf, 1); adapter->vfinfo[vf].pf_vlan = vlan; adapter->vfinfo[vf].pf_qos = qos; + adapter->vfinfo[vf].vlan_proto = vlan_proto; dev_info(pci_dev_to_dev(adapter->pdev), "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); if (test_bit(__TXGBE_DOWN, &adapter->state)) { @@ -1524,6 +1672,7 @@ static int txgbe_disable_port_vlan(struct txgbe_adapter *adapter, int vf) txgbe_write_hide_vlan(adapter, vf, 0); adapter->vfinfo[vf].pf_vlan = 0; adapter->vfinfo[vf].pf_qos = 0; + adapter->vfinfo[vf].vlan_proto = 0; return err; } @@ -1535,8 +1684,11 @@ int txgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, int txgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) #endif { - int err = 0; struct txgbe_adapter *adapter = netdev_priv(netdev); +#ifndef IFLA_VF_VLAN_INFO_MAX + __be16 vlan_proto = htons(ETH_P_8021Q); +#endif + int err = 0; /* VLAN IDs accepted range 0-4094 */ if ((vf >= adapter->num_vfs) || (vlan > VLAN_VID_MASK-1) || (qos > 7)) @@ -1545,6 +1697,7 @@ int txgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) if (vlan_proto != htons(ETH_P_8021Q) && vlan_proto != htons(ETH_P_8021AD)) return -EPROTONOSUPPORT; #endif + if (vlan || qos) { /* * Check if there is already a port VLAN set, if so @@ -1558,8 +1711,7 @@ int txgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) err = txgbe_disable_port_vlan(adapter, vf); if (err) goto out; - err = txgbe_enable_port_vlan(adapter, vf, vlan, qos); - + err = txgbe_enable_port_vlan(adapter, vf, vlan, qos, vlan_proto); } else { err = txgbe_disable_port_vlan(adapter, vf); } @@ -1568,11 +1720,46 @@ int txgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) } #endif /* IFLA_VF_MAX */ +int txgbe_link_mbps(struct txgbe_adapter *adapter) +{ + switch (adapter->link_speed) { + case TXGBE_LINK_SPEED_40GB_FULL: + return 40000; + case TXGBE_LINK_SPEED_25GB_FULL: + return 25000; + case TXGBE_LINK_SPEED_10GB_FULL: + return 10000; + case TXGBE_LINK_SPEED_1GB_FULL: + return 1000; + default: + return 0; + } +} + +u16 txgbe_frac_to_bi(u16 frac, u16 denom, int max_bits) +{ + u16 value = 0; + + while (frac > 0 && max_bits > 0) { + max_bits -= 1; + frac *= 2; + if (frac >= denom) { + value |= BIT(max_bits); + frac -= denom; + } + } + + return value; +} + static void txgbe_set_vf_rate_limit(struct txgbe_adapter *adapter, int vf) { struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; struct txgbe_hw *hw = &adapter->hw; u32 bcnrc_val; + int factor_int; + int factor_fra; + int link_speed; u16 queue, queues_per_pool; u16 max_tx_rate = adapter->vfinfo[vf].max_tx_rate; #ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE @@ -1582,12 +1769,106 @@ static void txgbe_set_vf_rate_limit(struct txgbe_adapter *adapter, int vf) /* determine how many queues per pool based on VMDq mask */ queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); - max_tx_rate /= queues_per_pool; - bcnrc_val = TXGBE_TDM_RP_RATE_MAX(max_tx_rate); + /* + * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported + * and 0x004 otherwise. + */ + wr32(hw, TXGBE_TDM_MMW, 0x14); + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + if (max_tx_rate) { + u16 frac; + + link_speed = adapter->vf_rate_link_speed / 1000 * 1024; + + /* Calculate the rate factor values to set */ + factor_int = link_speed / max_tx_rate; + frac = (link_speed % max_tx_rate) * 10000 / max_tx_rate; + factor_fra = txgbe_frac_to_bi(frac, 10000, 14); + + wr32(hw, TXGBE_TDM_RL_VM_IDX, vf); + wr32m(hw, TXGBE_TDM_RL_VM_CFG, + TXGBE_TDM_FACTOR_INT_MASK, factor_int << TXGBE_TDM_FACTOR_INT_SHIFT); + wr32m(hw, TXGBE_TDM_RL_VM_CFG, + TXGBE_TDM_FACTOR_FRA_MASK, factor_fra << TXGBE_TDM_FACTOR_FRA_SHIFT); + wr32m(hw, TXGBE_TDM_RL_VM_CFG, + TXGBE_TDM_RL_EN, TXGBE_TDM_RL_EN); + } else { + wr32(hw, TXGBE_TDM_RL_VM_IDX, vf); + wr32m(hw, TXGBE_TDM_RL_VM_CFG, + TXGBE_TDM_RL_EN, 0); + } + } else { + max_tx_rate /= queues_per_pool; + bcnrc_val = TXGBE_TDM_RP_RATE_MAX(max_tx_rate); #ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE - min_tx_rate /= queues_per_pool; - bcnrc_val |= TXGBE_TDM_RP_RATE_MIN(min_tx_rate); + min_tx_rate /= queues_per_pool; + bcnrc_val |= TXGBE_TDM_RP_RATE_MIN(min_tx_rate); #endif + /* write value for all Tx queues belonging to VF */ + for (queue = 0; queue < queues_per_pool; queue++) { + unsigned int reg_idx = (vf * queues_per_pool) + queue; + + wr32(hw, TXGBE_TDM_RP_IDX, reg_idx); + wr32(hw, TXGBE_TDM_RP_RATE, bcnrc_val); + if (max_tx_rate) + wr32m(hw, TXGBE_TDM_RP_CTL, + TXGBE_TDM_RP_CTL_RLEN, TXGBE_TDM_RP_CTL_RLEN); + else + wr32m(hw, TXGBE_TDM_RP_CTL, + TXGBE_TDM_RP_CTL_RLEN, 0); + } + } +} + +void txgbe_check_vf_rate_limit(struct txgbe_adapter *adapter) +{ + int i; + + /* VF Tx rate limit was not set */ + if (!adapter->vf_rate_link_speed) + return; + + if (txgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { + adapter->vf_rate_link_speed = 0; + dev_info(pci_dev_to_dev(adapter->pdev), + "Link speed has been changed. VF Transmit rate is disabled\n"); + } + + for (i = 0; i < adapter->num_vfs; i++) { + if (!adapter->vf_rate_link_speed) + adapter->vfinfo[i].max_tx_rate = 0; + + txgbe_set_vf_rate_limit(adapter, i); + } +} + +static int +txgbe_set_queue_rate_limit_vf(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct txgbe_hw *hw = &adapter->hw; + u16 queue, queues_per_pool, max_tx_rate; + int factor_int, factor_fra, link_speed; + u32 reg_idx; + + if (hw->mac.type != txgbe_mac_aml) + return -EOPNOTSUPP; + + /* verify the PF is supporting the correct API */ + if (adapter->vfinfo[vf].vf_api < txgbe_mbox_api_22) + return -EOPNOTSUPP; + + /* determine how many queues per pool based on VMDq mask */ + queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + queue = msgbuf[TXGBEVF_Q_RATE_INDEX]; + max_tx_rate = msgbuf[TXGBEVF_Q_RATE_LIMIT]; + + /* convert queue index on each vf to the global index */ + reg_idx = (vf * queues_per_pool) + queue; /* * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM @@ -1596,19 +1877,34 @@ static void txgbe_set_vf_rate_limit(struct txgbe_adapter *adapter, int vf) */ wr32(hw, TXGBE_TDM_MMW, 0x14); - /* write value for all Tx queues belonging to VF */ - for (queue = 0; queue < queues_per_pool; queue++) { - unsigned int reg_idx = (vf * queues_per_pool) + queue; + if (max_tx_rate) { + u16 frac; - wr32(hw, TXGBE_TDM_RP_IDX, reg_idx); - wr32(hw, TXGBE_TDM_RP_RATE, bcnrc_val); - if (max_tx_rate) - wr32m(hw, TXGBE_TDM_RP_CTL, - TXGBE_TDM_RP_CTL_RLEN, TXGBE_TDM_RP_CTL_RLEN); - else - wr32m(hw, TXGBE_TDM_RP_CTL, - TXGBE_TDM_RP_CTL_RLEN, 0); + link_speed = txgbe_link_mbps(adapter) / 1000 * 1024; + + /* Calculate the rate factor values to set */ + factor_int = link_speed / max_tx_rate; + frac = (link_speed % max_tx_rate) * 10000 / max_tx_rate; + factor_fra = txgbe_frac_to_bi(frac, 10000, 14); + + wr32(hw, TXGBE_TDM_RL_QUEUE_IDX, reg_idx); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_FACTOR_INT_MASK, factor_int << TXGBE_TDM_FACTOR_INT_SHIFT); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_FACTOR_FRA_MASK, factor_fra << TXGBE_TDM_FACTOR_FRA_SHIFT); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_RL_EN, TXGBE_TDM_RL_EN); + } else { + wr32(hw, TXGBE_TDM_RL_QUEUE_IDX, reg_idx); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_RL_EN, 0); } + + adapter->vfinfo[vf].queue_max_tx_rate[queue] = max_tx_rate; + e_info(drv, "set vf %d queue %d max_tx_rate to %d Mbps", + vf, queue, max_tx_rate); + + return 0; } #ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE @@ -1621,6 +1917,7 @@ int txgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate) #endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ { struct txgbe_adapter *adapter = netdev_priv(netdev); + int link_speed; /* verify VF is active */ if (vf >= adapter->num_vfs) @@ -1634,10 +1931,16 @@ int txgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate) if (adapter->link_speed < TXGBE_LINK_SPEED_1GB_FULL) return -EINVAL; + link_speed = txgbe_link_mbps(adapter); + /* rate limit cannot be less than 10Mbs or greater than link speed */ + if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) + return -EINVAL; + /* store values */ #ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE adapter->vfinfo[vf].min_tx_rate = min_tx_rate; #endif + adapter->vf_rate_link_speed = link_speed; adapter->vfinfo[vf].max_tx_rate = max_tx_rate; /* update hardware configuration */ @@ -1682,7 +1985,7 @@ int txgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) #endif /* HAVE_VF_SPOOFCHK_CONFIGURE */ /** - * ixgbe_set_vf_rx_tx - Set VF rx tx + * txgbe_set_vf_rx_tx - Set VF rx tx * @adapter: Pointer to adapter struct * @vf: VF identifier * @@ -1717,6 +2020,15 @@ static void txgbe_set_vf_rx_tx(struct txgbe_adapter *adapter, int vf) if (reg_cur_rx & reg_req_rx) wr32(hw, TXGBE_RDM_VFRE_CLR(reg_offset), reg_req_rx); } + if(adapter->vfinfo[vf].link_state == IFLA_VF_LINK_STATE_ENABLE && + !(rd32(hw,TXGBE_MAC_TX_CFG) & TXGBE_MAC_TX_CFG_TE)) { + wr32m(hw,TXGBE_MAC_TX_CFG,TXGBE_MAC_TX_CFG_TE, + TXGBE_MAC_TX_CFG_TE); + TXGBE_WRITE_FLUSH(hw); + wr32m(hw,TXGBE_MAC_TX_CFG,TXGBE_MAC_TX_CFG_TE, + TXGBE_MAC_TX_CFG_TE); + } + } @@ -1730,29 +2042,35 @@ static void txgbe_set_vf_rx_tx(struct txgbe_adapter *adapter, int vf) **/ void txgbe_set_vf_link_state(struct txgbe_adapter *adapter, int vf, int state) { + bool link_up = adapter->link_up; adapter->vfinfo[vf].link_state = state; switch (state) { - case IFLA_VF_LINK_STATE_AUTO: - if (test_bit(__TXGBE_DOWN, &adapter->state)) + case TXGBE_VF_LINK_STATE_AUTO: + if (test_bit(__TXGBE_DOWN, &adapter->state)) { adapter->vfinfo[vf].link_enable = false; - else + } else { + link_up = adapter->link_up; adapter->vfinfo[vf].link_enable = true; + } break; - case IFLA_VF_LINK_STATE_ENABLE: + case TXGBE_VF_LINK_STATE_ENABLE: adapter->vfinfo[vf].link_enable = true; + link_up = true; break; - case IFLA_VF_LINK_STATE_DISABLE: + case TXGBE_VF_LINK_STATE_DISABLE: adapter->vfinfo[vf].link_enable = false; + link_up = false; break; } - txgbe_set_vf_rx_tx(adapter, vf); - /* restart the VF */ adapter->vfinfo[vf].clear_to_send = false; txgbe_ping_vf(adapter, vf); + txgbe_ping_vf_with_link_status(adapter, link_up, vf); + + txgbe_set_vf_rx_tx(adapter, vf); } #ifdef HAVE_NDO_SET_VF_LINK_STATE @@ -1780,18 +2098,18 @@ int txgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state) switch (state) { case IFLA_VF_LINK_STATE_ENABLE: dev_info(pci_dev_to_dev(adapter->pdev), - "NDO set VF %d link state %d - not supported\n", - vf, state); + "NDO set VF %d link state enable\n", vf); + txgbe_set_vf_link_state(adapter, vf, TXGBE_VF_LINK_STATE_ENABLE); break; case IFLA_VF_LINK_STATE_DISABLE: dev_info(pci_dev_to_dev(adapter->pdev), "NDO set VF %d link state disable\n", vf); - txgbe_set_vf_link_state(adapter, vf, state); + txgbe_set_vf_link_state(adapter, vf, TXGBE_VF_LINK_STATE_DISABLE); break; case IFLA_VF_LINK_STATE_AUTO: dev_info(pci_dev_to_dev(adapter->pdev), "NDO set VF %d link state auto\n", vf); - txgbe_set_vf_link_state(adapter, vf, state); + txgbe_set_vf_link_state(adapter, vf, TXGBE_VF_LINK_STATE_AUTO); break; default: dev_err(pci_dev_to_dev(adapter->pdev), @@ -1803,6 +2121,18 @@ int txgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state) } #endif /* HAVE_NDO_SET_VF_LINK_STATE */ +int txgbe_trans_vf_link_state(int state) +{ + switch(state){ + case TXGBE_VF_LINK_STATE_ENABLE: + return IFLA_VF_LINK_STATE_ENABLE; + case TXGBE_VF_LINK_STATE_DISABLE: + return IFLA_VF_LINK_STATE_DISABLE; + case TXGBE_VF_LINK_STATE_AUTO: + return IFLA_VF_LINK_STATE_AUTO; + } + return IFLA_VF_LINK_STATE_AUTO; +} int txgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) @@ -1822,6 +2152,9 @@ int txgbe_ndo_get_vf_config(struct net_device *netdev, ivi->vlan = adapter->vfinfo[vf].pf_vlan; ivi->qos = adapter->vfinfo[vf].pf_qos; +#ifdef IFLA_VF_VLAN_INFO_MAX + ivi->vlan_proto = adapter->vfinfo[vf].vlan_proto; +#endif #ifdef HAVE_VF_SPOOFCHK_CONFIGURE ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; #endif @@ -1829,7 +2162,7 @@ int txgbe_ndo_get_vf_config(struct net_device *netdev, ivi->trusted = adapter->vfinfo[vf].trusted; #endif #ifdef HAVE_NDO_SET_VF_LINK_STATE - ivi->linkstate = adapter->vfinfo[vf].link_state; + ivi->linkstate = txgbe_trans_vf_link_state(adapter->vfinfo[vf].link_state); #endif return 0; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.h index 7c64f68605b54d30fb487bdcc1cbdf21a8eed42c..1e119cc5c0afb633d12f1d8994b0ddf46dd38fcd 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.h @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -24,6 +24,10 @@ */ #define TXGBE_MAX_VFS_DRV_LIMIT (TXGBE_MAX_VF_FUNCTIONS - 1) +#define TXGBE_VF_LINK_STATE_DISABLE 0 +#define TXGBE_VF_LINK_STATE_AUTO 1 +#define TXGBE_VF_LINK_STATE_ENABLE 2 + void txgbe_restore_vf_multicasts(struct txgbe_adapter *adapter); int txgbe_set_vf_vlan(struct txgbe_adapter *adapter, int add, int vid, u16 vf); void txgbe_set_vmolr(struct txgbe_hw *hw, u16 vf, bool aupe); @@ -32,6 +36,9 @@ int txgbe_set_vf_mac(struct txgbe_adapter *adapter, u16 vf, unsigned char *mac_addr); void txgbe_disable_tx_rx(struct txgbe_adapter *adapter); void txgbe_ping_all_vfs(struct txgbe_adapter *adapter); +void txgbe_ping_all_vfs_with_link_status(struct txgbe_adapter *adapter, bool link_up); +void txgbe_ping_vf_with_link_status(struct txgbe_adapter *adapter, bool link_up, u16 vf); +int txgbe_trans_vf_link_state(int state); void txgbe_set_all_vfs(struct txgbe_adapter *adapter); #ifdef IFLA_VF_MAX int txgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); @@ -66,6 +73,7 @@ int txgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); void txgbe_enable_sriov(struct txgbe_adapter *adapter); #endif int txgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +void txgbe_check_vf_rate_limit(struct txgbe_adapter *adapter); void txgbe_set_vf_link_state(struct txgbe_adapter *adapter, int vf, int state); /* diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_sysfs.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_sysfs.c index bc9c946080469aec0a95b132ed55c9105cc07d49..04a00fed59c27a149a0b2e288898d7dacaef2b59 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_sysfs.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_sysfs.c @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_sysfs.c, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_sysfs.c, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index aa7a8ff22eb404ec6f152240f20ee77a614a2160..fa2ebe2e907863fd283d202f8d9698333de042a0 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -1,6 +1,6 @@ /* - * WangXun 10 Gigabit PCI Express Linux driver - * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * WangXun RP1000/RP2000/FF50XX PCI Express Linux driver + * Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -14,7 +14,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * based on ixgbe_type.h, Copyright(c) 1999 - 2017 Intel Corporation. + * based on txgbe_type.h, Copyright(c) 1999 - 2017 Intel Corporation. * Contact Information: * Linux NICS * e1000-devel Mailing List @@ -99,6 +99,11 @@ /* Device IDs */ #define TXGBE_DEV_ID_SP1000 0x1001 #define TXGBE_DEV_ID_WX1820 0x2001 +#define TXGBE_DEV_ID_AML 0x5000 +#define TXGBE_DEV_ID_AML5025 0x5025 +#define TXGBE_DEV_ID_AML5125 0x5125 +#define TXGBE_DEV_ID_AML5040 0x5040 +#define TXGBE_DEV_ID_AML5140 0x5140 /* Subsystem IDs */ /* SFP */ @@ -131,12 +136,14 @@ #define TXGBE_WOL_MASK 0x4000 #define TXGBE_DEV_MASK 0xf0 +#define TXGBE_FLASH_HEADER_FLAG 0x5aa5 + /* Combined interface*/ #define TXGBE_ID_SFI_XAUI 0x50 /* Revision ID */ -#define TXGBE_SP_MPW 1 +#define TXGBE_SP_MPW 0xfe /* MDIO Manageable Devices (MMDs). */ #define TXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 /* PMA and PMD */ @@ -144,6 +151,7 @@ #define TXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 /* PHY Extender Sublayer */ #define TXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 /* Auto-Negotiation */ #define TXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Vendor specific 1 */ +#define TXGBE_MDIO_VENDOR_SPECIFIC_2_DEV_TYPE 0x1F /* Vendor specific 2 */ /* phy register definitions */ /* VENDOR_SPECIFIC_1_DEV regs */ @@ -151,6 +159,11 @@ #define TXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ #define TXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */ +/* VENDOR_SPECIFIC_2_DEV regs */ +#define TXGBE_MDIO_VENDOR_SPECIFIC_2_PORT_CTRL 0xF001 +#define TXGBE_MDIO_VENDOR_SPECIFIC_2_SW_RST BIT(15) +#define TXGBE_MDIO_VENDOR_SPECIFIC_2_POWER BIT(11) + /* AUTO_NEG_DEV regs */ #define TXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ #define TXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ @@ -210,6 +223,36 @@ #define TNX_FW_REV 0xB #define AQ_FW_REV 0x20 +/* AMLITE ETH PHY Registers */ +#define SR_AN_CTRL 0x70000 +#define VR_PCS_DIG_CTRL1 0x38000 +#define SR_PCS_CTRL1 0x30000 +#define SR_PCS_CTRL2 0x30007 +#define SR_PMA_CTRL2 0x10007 +#define VR_PCS_DIG_CTRL3 0x38003 +#define VR_PMA_CTRL3 0x180a8 +#define VR_PMA_CTRL4 0x180a9 +#define SR_PMA_RS_FEC_CTRL 0x100c8 +#define ANA_OVRDEN0 0xca4 +#define ANA_OVRDEN1 0xca8 +#define ANA_OVRDVAL0 0xcb0 +#define ANA_OVRDVAL5 0xcc4 +#define OSC_CAL_N_CDR4 0x14 +#define PLL0_CFG0 0xc10 +#define PLL0_CFG2 0xc18 +#define PLL0_DIV_CFG0 0xc1c +#define PLL1_CFG0 0xc48 +#define PLL1_CFG2 0xc50 +#define PIN_OVRDEN0 0xc8c +#define PIN_OVRDVAL0 0xc94 +#define DATAPATH_CFG0 0x142c +#define DATAPATH_CFG1 0x1430 +#define AN_CFG1 0x1438 +#define SPARE52 0x16fc +#define RXS_CFG0 0x000 +#define PMD_CFG0 0x1400 +#define SR_PCS_STS1 0x30001 + /* ETH PHY Registers */ #define TXGBE_SR_XS_PCS_MMD_STATUS1 0x30001 #define TXGBE_SR_PCS_CTL2 0x30007 @@ -228,7 +271,10 @@ #define TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM 0x400 #define TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM 0x800 #define TXGBE_SR_AN_MMD_ADV_REG2 0x70011 +#define TXGBE_SR_AN_MMD_ADV_REG3 0x70012 #define TXGBE_SR_AN_MMD_LP_ABL1 0x70013 +#define TXGBE_SR_AN_MMD_LP_ABL2 0x70014 +#define TXGBE_SR_AN_MMD_LP_ABL3 0x70015 #define TXGBE_VR_AN_KR_MODE_CL 0x78003 #define TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1 0x38000 #define TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS 0x38010 @@ -354,6 +400,92 @@ #define TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME 100 #define TXGBE_PHY_INIT_DONE_POLLING_TIME 100 +/* amlite: FPGA */ +/* PHY MDI STANDARD CONFIG */ +#define TXGBE_MDI_PHY_ID1_OFFSET 2 +#define TXGBE_MDI_PHY_ID2_OFFSET 3 +#define TXGBE_MDI_PHY_ID_MASK 0xFFFFFC00U +#define TXGBE_MDI_PHY_SPEED_SELECT1 0x0040 +#define TXGBE_MDI_PHY_DUPLEX 0x0100 +#define TXGBE_MDI_PHY_RESTART_AN 0x0200 +#define TXGBE_MDI_PHY_ANE 0x1000 +#define TXGBE_MDI_PHY_SPEED_SELECT0 0x2000 +#define TXGBE_MDI_PHY_RESET 0x8000 + +#define TXGBE_PHY_RST_WAIT_PERIOD 50 + +#define TXGBE_MDI_PHY_INT_LSC 0x0400 +#define TXGBE_MDI_PHY_INT_ANC 0x0800 + +#define MV1119_CTRL 0 /* Page Any, Control reg */ +#define MV1119_STUS 1 /* Page Any, Status reg */ +#define MV1119_PHY_ID_1 2 /* Page Any, Phy Identifier 1 */ +#define MV1119_PHY_ID_2 3 /* Page Any, Phy Identifier 2 */ +#define MV1119_AUTO_NEGO_ADVER 4 /* Page Any, Auto-Negotiation Advertisement reg */ +#define MV1119_LK_PARTNER_ABILITY 5 /* Page Any, Link Partner Ability reg */ +#define MV1119_AUTO_NEGO_EX 6 /* Page Any, Auto-Negotiation Expansion reg */ +#define MV1119_NEXT_PAGE_TRANS 7 /* Page Any, Next Page Transmit reg */ +#define MV1119_LK_PARTNER_NEXT_PAGE 8 /* Page Any, Link Partner Next Page reg */ +#define MV1119_1000BASE_T_CTRL 9 /* Page Any, 1000BASE-T Control reg */ +#define MV1119_1000BASE_T_STUS 10 /* Page Any, 1000BASE-T Status reg */ +#define MV1119_EX_STUS 15 /* Page Any, Extended Status reg */ +#define MV1119_CO_SPEC_CTRL_1 16 /* Page 0, Copper Specific Control reg 1 */ +#define MV1119_CO_SPEC_STUS_1 17 /* Page 0, Copper Specific Status reg 1 */ +#define MV1119_CO_SPEC_INT_EN 18 /* Page 0, Copper Specific Interrupt Enable reg */ +#define MV1119_CO_SPEC_STUS_2 19 /* Page 0, Copper Specific Status reg 2 */ +#define MV1119_CO_SPEC_CTRL_3 20 /* Page 0, Copper Specific Control reg 3 */ +#define MV1119_RECE_ERR_COUT 21 /* Page 0, Receive Error Counter reg */ +#define MV1119_PAGE_ADD 22 /* Page Any, Page Address */ +#define MV1119_GLO_INT_STUS 23 /* Page 0,2, Global Interrupt Status */ +#define MV1119_CO_SPEC_CTRL_2 26 /* Page 0, Copper Specific Control reg 2 */ +#define MV1119_MAC_SPEC_CTRL_1 16 /* Page 2, MAC Specific Control reg 1 */ +#define MV1119_MAC_SPEC_INT_EN 18 /* Page 2, MAC Specific Interrupt Enable reg */ +#define MV1119_MAC_SPEC_STUS_2 19 /* Page 2, MAC Specific Status reg 2 */ +#define MV1119_MAC_SPEC_CTRL 21 /* Page 2, MAC Specific Control reg */ +#define MV1119_LED_FUN_CTRL 16 /* Page 3, LED Function Control reg */ +#define MV1119_LED_POLAR_CTRL 17 /* Page 3, LED Polarity Control reg */ +#define MV1119_LED_TIME_CTRL 18 /* Page 3, LED Timer Control reg */ + +#define CBIT(_x) (1 << (_x)) + +#define MV1119_C_RESET CBIT(15) +#define MV1119_C_LOOPBACK CBIT(14) +#define MV1119_C_AUTO_NE_EN CBIT(12) +#define MV1119_C_POWER_DOWN CBIT(11) +#define MV1119_C_RE_CO_AUTO_NE CBIT(9) +#define MV1119_C_CO_DUPLEX_MODE CBIT(8) +#define MV1119_C_SPEED_SELECT1 CBIT(6) +#define MV1119_C_10M 0x00 +#define MV1119_C_100M CBIT(13) +#define MV1119_C_1000M CBIT(6) +#define MV1119_C_FULL_DUP CBIT(8) +#define MV1119_C_HALF_DUP 0x00 +#define MV1119_ANA_ASYM_PAUSE CBIT(11) +#define MV1119_ANA_PAUSE CBIT(10) +#define MV1119_ANA_100FULL CBIT(8) +#define MV1119_ANA_100HALF CBIT(7) +#define MV1119_ANA_10FULL CBIT(6) +#define MV1119_ANA_10HALF CBIT(5) +#define MV1119_1000BC_1000FULL CBIT(9) +#define MV1119_1000BC_1000HALF CBIT(8) +#define MV1119_CSS1_SPEED (CBIT(14) | CBIT(15)) +#define MV1119_CSS1_DUPLEX CBIT(13) +#define MV1119_CSS1_LINK CBIT(10) +#define MV1119_CSS2_AUTO_NE_ERR CBIT(15) +#define MV1119_CSS2_SPEED_CH CBIT(14) +#define MV1119_CSS2_DUPLEX_CH CBIT(13) +#define MV1119_CSS2_AUTO_NE_COMPLETE CBIT(11) +#define MV1119_CSS2_CO_LINK_STATUS_CH CBIT(10) +#define MV1119_CSC_DOWNSHIFT_COUNT (CBIT(12) | CBIT(13) | CBIT(14)) +#define MV1119_CSC_DOWNSHIFT_EN CBIT(11) +#define MV1119_CSC_POWER_DOWN CBIT(2) + + +#define MV1119_ANA_100 (MV1119_ANA_100FULL | MV1119_ANA_100HALF) +#define MV1119_ANA_10 (MV1119_ANA_10FULL | MV1119_ANA_10HALF) +#define MV1119_ANA_100_AND_10 (MV1119_ANA_100 | MV1119_ANA_10) +#define MV1119_1000BC_1000 (MV1119_1000BC_1000FULL | MV1119_1000BC_1000HALF) + /**************** Global Registers ****************************/ /* chip control Registers */ #define TXGBE_MIS_RST 0x1000C @@ -365,9 +497,17 @@ #define TXGBE_MIS_SWSM 0x1002C #define TXGBE_MIS_RST_ST 0x10030 +#define PX_PF_PEND 0x4C0 +#define PX_VF_PEND(i) (0x4D0 + 4 * (i)) /* i = [0,3]*/ +#define PX_PF_BME 0x4B8 + #define TXGBE_MIS_RST_SW_RST 0x00000001U #define TXGBE_MIS_RST_LAN0_RST 0x00000002U #define TXGBE_MIS_RST_LAN1_RST 0x00000004U +#define TXGBE_MIS_RST_LAN0_EPHY_RST 0x00080000U +#define TXGBE_MIS_RST_LAN1_EPHY_RST 0x00010000U +#define TXGBE_MIS_RST_LAN0_MAC_RST 0x00100000U +#define TXGBE_MIS_RST_LAN1_MAC_RST 0x00020000U #define TXGBE_MIS_RST_LAN0_CHG_ETH_MODE 0x20000000U #define TXGBE_MIS_RST_LAN1_CHG_ETH_MODE 0x40000000U #define TXGBE_MIS_RST_GLOBAL_RST 0x80000000U @@ -420,6 +560,33 @@ #define TXGBE_TS_INT_EN_DALARM_INT_EN 0x00000002U #define TXGBE_TS_INT_EN_ALARM_INT_EN 0x00000001U +/* Sensors for AMLITE PVT(Process Voltage Temperature) */ +#define TXGBE_AML_INTR_RAW_HI 0x10300 +#define TXGBE_AML_INTR_RAW_ME 0x10304 +#define TXGBE_AML_INTR_RAW_LO 0x10308 +#define TXGBE_AML_TS_CTL1 0x10330 +#define TXGBE_AML_TS_CTL2 0x10334 +#define TXGBE_AML_TS_ENA 0x10338 +#define TXGBE_AML_TS_STS 0x1033C +#define TXGBE_AML_INTR_HIGH_EN 0x10318 +#define TXGBE_AML_INTR_MED_EN 0x1031C +#define TXGBE_AML_INTR_LOW_EN 0x10320 +#define TXGBE_AML_INTR_HIGH_STS 0x1030C +#define TXGBE_AML_INTR_MED_STS 0x10310 +#define TXGBE_AML_INTR_LOW_STS 0x10314 + +#define TXGBE_AML_TS_STS_VLD 0x1000 +#define TXGBE_AML_INTR_EN_HI 0x00000002U +#define TXGBE_AML_INTR_EN_ME 0x00000001U +#define TXGBE_AML_INTR_EN_LO 0x00000001U +#define TXGBE_AML_INTR_CL_HI 0x00000002U +#define TXGBE_AML_INTR_CL_ME 0x00000001U +#define TXGBE_AML_INTR_CL_LO 0x00000001U +#define TXGBE_AML_EVAL_MODE_MASK 0x010U +#define TXGBE_AML_CAL_MODE_MASK 0x08U +#define TXGBE_AML_ALARM_THRE_MASK 0x1FFE0000U +#define TXGBE_AML_DALARM_THRE_MASK 0x0001FFE0U + struct txgbe_thermal_diode_data { s16 temp; s16 alarm_thresh; @@ -513,6 +680,9 @@ struct txgbe_thermal_sensor_data { #define TXGBE_I2C_TXFLR 0x14974 /* Transmit FIFO Level Reg */ #define TXGBE_I2C_RXFLR 0x14978 /* Receive FIFO Level Reg */ #define TXGBE_I2C_SDA_HOLD 0x1497C /* SDA hold time length reg */ +#define TXGBE_I2C_SDA_RX_HOLD 0xff0000 /* SDA rx hold time length reg */ +#define TXGBE_I2C_SDA_TX_HOLD 0xffff /* SDA tx hold time length reg */ + #define TXGBE_I2C_TX_ABRT_SOURCE 0x14980 /* I2C TX Abort Status Reg */ #define TXGBE_I2C_SDA_SETUP 0x14994 /* I2C SDA Setup Register */ #define TXGBE_I2C_ENABLE_STATUS 0x1499C /* I2C Enable Status Register */ @@ -544,7 +714,15 @@ struct txgbe_thermal_sensor_data { #define TXGBE_CFG_GENEVE 0x14418 #define TXGBE_CFG_TEREDO 0x1441C #define TXGBE_CFG_TCP_TIME 0x14420 +#define TXGBE_LINKUP_FILTER 0x14428 +#define TXGBE_LINKUP_FILTER_TIME 30 #define TXGBE_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4)) + +/*AML LINK STATUS OVERWRITE*/ +#define TXGBE_AML_EPCS_MISC_CTL 0x13240 +#define TXGBE_AML_LINK_STATUS_OVRD_EN 0x00000020 +#define TXGBE_AML_LINK_STATUS_OVRD_VAL 0x00000010 + /* port cfg bit */ #define TXGBE_CFG_PORT_CTL_PFRSTD 0x00004000U /* Phy Function Reset Done */ #define TXGBE_CFG_PORT_CTL_D_VLAN 0x00000001U /* double vlan*/ @@ -568,13 +746,27 @@ struct txgbe_thermal_sensor_data { #define TXGBE_CFG_PORT_ST_LINK_100M 0x00000008U #define TXGBE_CFG_PORT_ST_LAN_ID(_r) ((0x00000100U & (_r)) >> 8) #define TXGBE_LINK_UP_TIME 90 + +/* amlite: diff from sapphire */ +#define TXGBE_CFG_PORT_ST_AML_LINK_10G 0x00000010U +#define TXGBE_CFG_PORT_ST_AML_LINK_25G 0x00000008U +#define TXGBE_CFG_PORT_ST_AML_LINK_40G 0x00000004U +#define TXGBE_CFG_PORT_ST_AML_LINK_50G 0x00000002U + /* LED CTL Bit */ -#define TXGBE_CFG_LED_CTL_LINK_BSY_SEL 0x00000010U -#define TXGBE_CFG_LED_CTL_LINK_100M_SEL 0x00000008U -#define TXGBE_CFG_LED_CTL_LINK_1G_SEL 0x00000004U -#define TXGBE_CFG_LED_CTL_LINK_10G_SEL 0x00000002U -#define TXGBE_CFG_LED_CTL_LINK_UP_SEL 0x00000001U -#define TXGBE_CFG_LED_CTL_LINK_OD_SHIFT 16 +#define TXGBE_CFG_LED_CTL_LINK_BSY_SEL 0x00000010U +#define TXGBE_CFG_LED_CTL_LINK_100M_SEL 0x00000008U +#define TXGBE_CFG_LED_CTL_LINK_1G_SEL 0x00000004U +#define TXGBE_CFG_LED_CTL_LINK_10G_SEL 0x00000002U +#define TXGBE_CFG_LED_CTL_LINK_UP_SEL 0x00000001U +#define TXGBE_CFG_LED_CTL_LINK_OD_SHIFT 16 + +#define TXGBE_AMLITE_CFG_LED_CTL_LINK_BSY_SEL 0x00000020U +#define TXGBE_AMLITE_CFG_LED_CTL_LINK_10G_SEL 0x00000010U +#define TXGBE_AMLITE_CFG_LED_CTL_LINK_25G_SEL 0x00000008U +#define TXGBE_AMLITE_CFG_LED_CTL_LINK_40G_SEL 0x00000004U +#define TXGBE_AMLITE_CFG_LED_CTL_LINK_50G_SEL 0x00000002U + /* LED modes */ #define TXGBE_LED_LINK_UP TXGBE_CFG_LED_CTL_LINK_UP_SEL #define TXGBE_LED_LINK_10G TXGBE_CFG_LED_CTL_LINK_10G_SEL @@ -582,6 +774,12 @@ struct txgbe_thermal_sensor_data { #define TXGBE_LED_LINK_1G TXGBE_CFG_LED_CTL_LINK_1G_SEL #define TXGBE_LED_LINK_100M TXGBE_CFG_LED_CTL_LINK_100M_SEL +#define TXGBE_AMLITE_LED_LINK_ACTIVE TXGBE_AMLITE_CFG_LED_CTL_LINK_BSY_SEL +#define TXGBE_AMLITE_LED_LINK_10G TXGBE_AMLITE_CFG_LED_CTL_LINK_10G_SEL +#define TXGBE_AMLITE_LED_LINK_25G TXGBE_AMLITE_CFG_LED_CTL_LINK_25G_SEL +#define TXGBE_AMLITE_LED_LINK_40G TXGBE_AMLITE_CFG_LED_CTL_LINK_40G_SEL +#define TXGBE_AMLITE_LED_LINK_50G TXGBE_AMLITE_CFG_LED_CTL_LINK_50G_SEL + /* GPIO Registers */ #define TXGBE_GPIO_DR 0x14800 #define TXGBE_GPIO_DDR 0x14804 @@ -589,8 +787,12 @@ struct txgbe_thermal_sensor_data { #define TXGBE_GPIO_INTEN 0x14830 #define TXGBE_GPIO_INTMASK 0x14834 #define TXGBE_GPIO_INTTYPE_LEVEL 0x14838 +#define TXGBE_GPIO_INT_POLARITY 0x1483C #define TXGBE_GPIO_INTSTATUS 0x14844 +#define TXGBE_GPIO_DEBOUNCE 0x14848 #define TXGBE_GPIO_EOI 0x1484C +#define TXGBE_GPIO_EXT 0x14850 + /*GPIO bit */ #define TXGBE_GPIO_DR_0 0x00000001U /* SDP0 Data Value */ #define TXGBE_GPIO_DR_1 0x00000002U /* SDP1 Data Value */ @@ -612,21 +814,33 @@ struct txgbe_thermal_sensor_data { #define TXGBE_GPIO_INTEN_1 0x00000002U /* SDP1 interrupt enable */ #define TXGBE_GPIO_INTEN_2 0x00000004U /* SDP2 interrupt enable */ #define TXGBE_GPIO_INTEN_3 0x00000008U /* SDP3 interrupt enable */ +#define TXGBE_GPIO_INTEN_4 0x00000010U /* SDP4 interrupt enable */ #define TXGBE_GPIO_INTEN_5 0x00000020U /* SDP5 interrupt enable */ #define TXGBE_GPIO_INTEN_6 0x00000040U /* SDP6 interrupt enable */ #define TXGBE_GPIO_INTTYPE_LEVEL_2 0x00000004U /* SDP2 interrupt type level */ #define TXGBE_GPIO_INTTYPE_LEVEL_3 0x00000008U /* SDP3 interrupt type level */ +#define TXGBE_GPIO_INTTYPE_LEVEL_4 0x00000010U /* SDP3 interrupt type level */ #define TXGBE_GPIO_INTTYPE_LEVEL_5 0x00000020U /* SDP5 interrupt type level */ #define TXGBE_GPIO_INTTYPE_LEVEL_6 0x00000040U /* SDP6 interrupt type level */ +#define TXGBE_GPIO_INT_POLARITY_3 0x00000008U +#define TXGBE_GPIO_INT_POLARITY_4 0x00000010U +#define TXGBE_GPIO_INT_DEBOUNCE_2 0x00000004U +#define TXGBE_GPIO_INT_DEBOUNCE_3 0x00000008U #define TXGBE_GPIO_INTSTATUS_1 0x00000002U /* SDP1 interrupt status */ #define TXGBE_GPIO_INTSTATUS_2 0x00000004U /* SDP2 interrupt status */ #define TXGBE_GPIO_INTSTATUS_3 0x00000008U /* SDP3 interrupt status */ +#define TXGBE_GPIO_INTSTATUS_4 0x00000010U /* SDP4 interrupt status */ #define TXGBE_GPIO_INTSTATUS_5 0x00000020U /* SDP5 interrupt status */ #define TXGBE_GPIO_INTSTATUS_6 0x00000040U /* SDP6 interrupt status */ #define TXGBE_GPIO_EOI_2 0x00000004U /* SDP2 interrupt clear */ #define TXGBE_GPIO_EOI_3 0x00000008U /* SDP3 interrupt clear */ +#define TXGBE_GPIO_EOI_4 0x00000010U /* SDP3 interrupt clear */ #define TXGBE_GPIO_EOI_5 0x00000020U /* SDP5 interrupt clear */ #define TXGBE_GPIO_EOI_6 0x00000040U /* SDP6 interrupt clear */ +#define TXGBE_SFP1_MOD_ABS_LS 0x00000004U /* GPIO_EXT SFP ABSENT*/ +#define TXGBE_SFP1_RX_LOS_LS 0x00000008U /* GPIO_EXT RX LOSS */ + +#define TXGBE_SFP1_MOD_PRST_LS 0x00000010U /* GPIO_EXT SFP ABSENT*/ /* TPH registers */ #define TXGBE_CFG_TPH_TDESC 0x14F00 /* TPH conf for Tx desc write back */ @@ -665,6 +879,8 @@ struct txgbe_thermal_sensor_data { #define TXGBE_TDM_TCP_FLG_L 0x18078 #define TXGBE_TDM_TCP_FLG_H 0x1807C #define TXGBE_TDM_VLAN_INS(_i) (0x18100 + ((_i) * 4)) /* 64 of these 0 - 63 */ +#define TXGBE_TDM_DESC_FATAL(i) (0x0180D0 + (i) * 4) /*0-3*/ + /* TDM CTL BIT */ #define TXGBE_TDM_CTL_TE 0x1 /* Transmit Enable */ #define TXGBE_TDM_CTL_PADDING 0x2 /* Padding byte number for ipsec ESP */ @@ -672,6 +888,7 @@ struct txgbe_thermal_sensor_data { /* Per VF Port VLAN insertion rules */ #define TXGBE_TDM_VLAN_INS_VLANA_DEFAULT 0x40000000U /*Always use default VLAN*/ #define TXGBE_TDM_VLAN_INS_VLANA_NEVER 0x80000000U /* Never insert VLAN tag */ +#define TXGBE_TDM_VLAN_INS_TPID_SEL_SHIFT 24 /*Tag tpid sel*/ #define TXGBE_TDM_RP_CTL 0x18400 #define TXGBE_TDM_RP_CTL_RST ((0x1) << 0) @@ -682,6 +899,22 @@ struct txgbe_thermal_sensor_data { #define TXGBE_TDM_RP_RATE_MIN(v) ((0x3FFF & (v))) #define TXGBE_TDM_RP_RATE_MAX(v) ((0x3FFF & (v)) << 16) +#define TXGBE_TDM_RL_QUEUE_IDX 0x18210 +#define TXGBE_TDM_RL_QUEUE_CFG 0x18214 + +#define TXGBE_TDM_RL_VM_IDX 0x18218 +#define TXGBE_TDM_RL_VM_CFG 0x1821C +#define TXGBE_TDM_RL_CFG 0x18400 +#define TXGBE_TDM_RL_EN 0x00000001U +#define TXGBE_TDM_FACTOR_INT 0x00000001U +#define TXGBE_TDM_FACTOR_FRA 0x00000001U +#define TXGBE_TDM_FACTOR_INT_SHIFT 16 +#define TXGBE_TDM_FACTOR_FRA_SHIFT 2 +#define TXGBE_TDM_FACTOR_INT_MASK 0xffff0000 +#define TXGBE_TDM_FACTOR_FRA_MASK 0xfffc + +#define TXGBE_TDM_RL_EN 0x00000001U + /* qos */ #define TXGBE_TDM_PBWARB_CTL 0x18200 #define TXGBE_TDM_PBWARB_CFG(_i) (0x18220 + ((_i) * 4)) /* 8 of these (0-7) */ @@ -694,6 +927,7 @@ struct txgbe_thermal_sensor_data { /* etag */ #define TXGBE_TDM_ETAG_INS(_i) (0x18700 + ((_i) * 4)) /* 64 of these 0 - 63 */ /* statistic */ +#define TXGBE_TDM_DRP_CNT 0x18300 #define TXGBE_TDM_SEC_DRP 0x18304 #define TXGBE_TDM_PKT_CNT 0x18308 #define TXGBE_TDM_OS2BMC_CNT 0x18314 @@ -710,6 +944,13 @@ struct txgbe_thermal_sensor_data { /* VFRE bitmask */ #define TXGBE_RDM_VF_RE_ENABLE_ALL 0xFFFFFFFFU +#define TXGBE_RDM_DCACHE_CTL 0x120A8 +#define TXGBE_RDM_DCACHE_CTL_EN 0x1 +#define TXGBE_RDM_RSC_CTL_FREE_CNT_DIS 0x100 + +/* amlite: rdm_rsc_ctl_free_ctl */ +#define TXGBE_RDM_RSC_CTL_FREE_CTL 0x00000080U + /* FCoE DMA Context Registers */ #define TXGBE_RDM_FCPTRL 0x12410 #define TXGBE_RDM_FCPTRH 0x12414 @@ -799,6 +1040,8 @@ struct txgbe_thermal_sensor_data { #define TXGBE_RDB_FCRE_TBL_RING(_v) (((_v) & 0x7F)) /* output queue number */ /* statistic */ #define TXGBE_RDB_MPCNT(_i) (0x19040 + ((_i) * 4)) /* 8 of 3FA0-3FBC*/ +#define TXGBE_RDB_PKT_CNT 0x19060 +#define TXGBE_RDB_DRP_CNT 0x19068 #define TXGBE_RDB_LXONTXC 0x1921C #define TXGBE_RDB_LXOFFTXC 0x19218 #define TXGBE_RDB_PXON2OFFCNT(_i) (0x19280 + ((_i) * 4)) /* 8 of these */ @@ -816,6 +1059,8 @@ struct txgbe_thermal_sensor_data { #define TXGBE_RDB_PL_CFG_TUN_TUNHDR 0x10 #define TXGBE_RDB_PL_CFG_RSS_PL_MASK 0x7 #define TXGBE_RDB_PL_CFG_RSS_PL_SHIFT 29 +#define TXGBE_RDB_PL_CFG_RSS_EN 0x1000000 +#define TXGBE_RDB_PL_CFG_RSS_MASK 0xFF0000 /* RQTC Bit Masks and Shifts */ #define TXGBE_RDB_RSS_TC_SHIFT_TC(_i) ((_i) * 4) #define TXGBE_RDB_RSS_TC_TC0_MASK (0x7 << 0) @@ -854,6 +1099,8 @@ enum { /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ #define TXGBE_RDB_5T_CTL1_SIZE_BP 0x00001000U /* Packet size bypass */ #define TXGBE_RDB_5T_CTL1_LLI 0x00100000U /* Enables low latency Int */ +#define TXGBE_RDB_5T_CTL1_RING_MASK 0x0FE00000U /* Rx queue index mask */ +#define TXGBE_RDB_5T_CTL1_RING_SHIFT 21 #define TXGBE_RDB_LLI_THRE_PRIORITY_MASK 0x00070000U /* VLAN priority mask */ #define TXGBE_RDB_LLI_THRE_PRIORITY_EN 0x00080000U /* VLAN priority enable */ #define TXGBE_RDB_LLI_THRE_CMN_EN 0x00100000U /* cmn packet receiveed */ @@ -887,7 +1134,7 @@ enum { #define TXGBE_RDB_PB_CTL_DISABLED 0x1 #define TXGBE_RDB_RA_CTL_RSS_EN 0x00000004U /* RSS Enable */ -//#define TXGBE_RDB_RA_CTL_MULTI_RSS 0x00000001U /* VF RSS Hash Rule Enable */ +#define TXGBE_RDB_RA_CTL_MULTI_RSS 0x00000001U /* VF RSS Hash Rule Enable */ #define TXGBE_RDB_RA_CTL_RSS_MASK 0xFFFF0000U #define TXGBE_RDB_RA_CTL_RSS_IPV4_TCP 0x00010000U #define TXGBE_RDB_RA_CTL_RSS_IPV4 0x00020000U @@ -976,6 +1223,8 @@ enum txgbe_fdir_pballoc_type { #define TXGBE_PSR_CTL 0x15000 #define TXGBE_PSR_VLAN_CTL 0x15088 #define TXGBE_PSR_VM_CTL 0x151B0 +#define TXGBE_PSR_PKT_CNT 0x151B8 +#define TXGBE_PSR_DBG_DRP_CNT 0x151C0 /* Header split receive */ #define TXGBE_PSR_CTL_SW_EN 0x00040000U #define TXGBE_PSR_CTL_RSC_DIS 0x00010000U @@ -1018,7 +1267,7 @@ enum txgbe_fdir_pballoc_type { /* etype switcher 1st stage */ #define TXGBE_PSR_ETYPE_SWC(_i) (0x15128 + ((_i) * 4)) /* EType Queue Filter */ /* ETYPE Queue Filter/Select Bit Masks */ -#define TXGBE_MAX_PSR_ETYPE_SWC_FILTERS 8 +#define TXGBE_MAX_PSR_ETYPE_SWC_FILTERS 2 /* now only support 2 custom filters */ #define TXGBE_PSR_ETYPE_SWC_FCOE 0x08000000U /* bit 27 */ #define TXGBE_PSR_ETYPE_SWC_TX_ANTISPOOF 0x20000000U /* bit 29 */ #define TXGBE_PSR_ETYPE_SWC_1588 0x40000000U /* bit 30 */ @@ -1234,6 +1483,9 @@ enum txgbe_fdir_pballoc_type { #define TXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ #define TXGBE_MAX_PB 8 +/* statistic */ +#define TXGBE_TDB_OUT_PKT_CNT 0x1CF00 + /****************************** TSEC *****************************************/ /* Security Control Registers */ #define TXGBE_TSC_CTL 0x1D000 @@ -1250,6 +1502,7 @@ enum txgbe_fdir_pballoc_type { #define TXGBE_TSC_ST_SECTX_RDY 0x00000001U #define TXGBE_TSC_ST_OFF_DIS 0x00000002U #define TXGBE_TSC_ST_ECC_TXERR 0x00000004U +#define TXGBE_TSC_MACTX_AFIFO_RD_WTRMRK 0x000f0000U /* LinkSec (MacSec) Registers */ #define TXGBE_TSC_LSEC_CAP 0x1D200 @@ -1290,10 +1543,44 @@ enum txgbe_fdir_pballoc_type { #define TXGBE_TSC_1588_ADJL 0x1D418 /* Time Adjustment Offset reg Low */ #define TXGBE_TSC_1588_ADJH 0x1D41C /* Time Adjustment Offset reg High*/ + /* 1588 fields */ #define TXGBE_TSC_1588_CTL_VALID 0x00000001U /* Tx timestamp valid */ #define TXGBE_TSC_1588_CTL_ENABLED 0x00000010U /* Tx timestamping enabled */ +#define TXGBE_TSEC_1588_AUX_CTL 0x1D428 +#define TXGBE_TSEC_1588_TRGT_L(i) (0x1D42C + ((i) * 8)) /* [0,1] */ +#define TXGBE_TSEC_1588_TRGT_H(i) (0x1D430 + ((i) * 8)) /* [0,1] */ +#define TXGBE_TSEC_1588_FREQ_CLK_L(i) (0x1D43C + ((i) * 8)) /* [0,1] */ +#define TXGBE_TSEC_1588_FREQ_CLK_H(i) (0x1D440 + ((i) * 8)) /* [0,1] */ +#define TXGBE_TSEC_1588_AUX_STMP_L(i) (0x1D44C + ((i) * 8)) /* [0,1] */ +#define TXGBE_TSEC_1588_AUX_STMP_H(i) (0x1D450 + ((i) * 8)) /* [0,1] */ +#define TXGBE_TSEC_1588_SDP(n) (0x1D45C + ((n) * 4)) /* [0,3] */ + +#define TXGBE_TSEC_1588_INT_ST 0x1D420 +#define TXGBE_TSEC_1588_INT_EN 0x1D424 + +#define TXGBE_TSEC_1588_INT_ST_TT0 0x10 +#define TXGBE_TSEC_1588_INT_ST_TT1 0x20 +#define TXGBE_TSEC_1588_INT_EN_TT0 0x10 +#define TXGBE_TSEC_1588_INT_EN_TT1 0x20 + +#define TXGBE_TSEC_1588_AUX_CTL_EN_TT0 0x1 +#define TXGBE_TSEC_1588_AUX_CTL_PLSG 0x2 +#define TXGBE_TSEC_1588_AUX_CTL_EN_TT1 0x4 +#define TXGBE_TSEC_1588_AUX_CTL_EN_TS0 0x100 +#define TXGBE_TSEC_1588_AUX_CTL_EN_TS1 0x400 + +#define TXGBE_TSEC_1588_SDP_FUN_SEL_TT0 0x1 +#define TXGBE_TSEC_1588_SDP_FUN_SEL_TT1 0x2 +#define TXGBE_TSEC_1588_SDP_FUN_SEL_CL0 0x3 +#define TXGBE_TSEC_1588_SDP_FUN_SEL_CL1 0x4 +#define TXGBE_TSEC_1588_SDP_FUN_SEL_TS0 0x5 +#define TXGBE_TSEC_1588_SDP_FUN_SEL_TS1 0x6 +#define TXGBE_TSEC_1588_SDP_FUN_SEL_MASK 0x7 +#define TXGBE_TSEC_1588_SDP_OUT_LEVEL_LOW 0x10 +#define TXGBE_TSEC_1588_SDP_OUT_LEVEL_HIGH 0x0 + /********************************* RSEC **************************************/ /* general rsec */ @@ -1320,8 +1607,8 @@ enum txgbe_fdir_pballoc_type { #define TXGBE_RSC_LSEC_PKNUM1 0x1721C #define TXGBE_RSC_LSEC_KEY0(_n) 0x17220 #define TXGBE_RSC_LSEC_KEY1(_n) 0x17230 -#define TXGBE_RSC_LSEC_UNTAG_PKT 0x17240 -#define TXGBE_RSC_LSEC_DEC_OCTET 0x17244 +#define TXGBE_RSEC_LSEC_UNTAG_PKT 0x17240 +#define TXGBE_RSC_LSEC_DEC_OCTET 0x17244 #define TXGBE_RSC_LSEC_VLD_OCTET 0x17248 #define TXGBE_RSC_LSEC_BAD_PKT 0x1724C #define TXGBE_RSC_LSEC_NOSCI_PKT 0x17250 @@ -1370,6 +1657,14 @@ enum txgbe_fdir_pballoc_type { #define TXGBE_MNG_OS2BMC_CNT 0x1E094 #define TXGBE_MNG_BMC2OS_CNT 0x1E090 +/* amlite: swfw mailbox changes */ +#define TXGBE_AML_MNG_MBOX_CTL_SW2FW 0x1E0A0 +#define TXGBE_AML_MNG_MBOX_SW2FW 0x1E200 +#define TXGBE_AML_MNG_MBOX_CTL_FW2SW 0x1E0A4 +#define TXGBE_AML_MNG_MBOX_FW2SW 0x1E300 + +#define TXGBE_AML_MNG_MBOX_NOTIFY 0x80000000U + /* Firmware Semaphore Register */ #define TXGBE_MNG_FW_SM_MODE_MASK 0xE #define TXGBE_MNG_FW_SM_TS_ENABLED 0x1 @@ -1396,6 +1691,7 @@ enum txgbe_fdir_pballoc_type { #define TXGBE_MAC_RX_FLOW_CTRL 0x11090 #define TXGBE_MAC_ADDRESS0_HIGH 0x11300 #define TXGBE_MAC_ADDRESS0_LOW 0x11304 +#define TXGBE_MAC_MISC_CTL 0x11f00 #define TXGBE_MAC_TX_CFG_TE 0x00000001U #define TXGBE_MAC_TX_CFG_SPEED_MASK 0x60000000U @@ -1407,6 +1703,35 @@ enum txgbe_fdir_pballoc_type { #define TXGBE_MAC_WDG_TIMEOUT_PWE 0x00000100U #define TXGBE_MAC_WDG_TIMEOUT_WTO_MASK 0x0000000FU #define TXGBE_MAC_WDG_TIMEOUT_WTO_DELTA 2 +#define TXGBE_MAC_MISC_LINK_STS_MOD 0x1 + +#define TXGBE_LINK_BOTH_PCS_MAC 0x1 + + +#define TXGBE_EPHY_STAT 0x13404 +#define TXGBE_EPHY_STAT_PPL_LOCK 0x3 + +/* amlite: new MAC_TX_CONFIG */ +/* +{SS_3, SS_2, SS} +SS_3 in bit27, SS_2 in bit30, SS in bits29~28 +■ 4'b0000 : 40-gigabit operation using XLGMII +■ 4'b0001 : 25-gigabit operation using XLGMII +■ 4'b0010 : 50-gigabit operation using XLGMII +■ 4'b0011 : 100-gigabit operation using XLGMII +■ 4'b0100 : 10-gigabit operation using XGMII +■ 4'b0101 : 5-gigabit operation using XGMII +■ 4'b0110 : 2.5-gigabit operation using GMII +■ 4'b0111 : 1-gigabit operation using GMII +■ 4'b1000 : 2.5-gigabit operation using XGMII +■ 4'b1001-4'b1111: Reserved +*/ +#define TXGBE_MAC_TX_CFG_AML_SPEED_MASK 0x78000000U +#define TXGBE_MAC_TX_CFG_AML_SPEED_50G 0x20000000U +#define TXGBE_MAC_TX_CFG_AML_SPEED_40G 0x00000000U +#define TXGBE_MAC_TX_CFG_AML_SPEED_25G 0x10000000U +#define TXGBE_MAC_TX_CFG_AML_SPEED_10G 0x40000000U +#define TXGBE_MAC_TX_CFG_AML_SPEED_1G 0x70000000U #define TXGBE_MAC_RX_FLOW_CTRL_RFE 0x00000001U /* receive fc enable */ #define TXGBE_MAC_RX_FLOW_CTRL_PFCE 0x00000100U /* pfc enable */ @@ -1428,6 +1753,9 @@ enum TXGBE_MSCA_CMD_value { #define TXGBE_MSCC_CR(v) ((0x8U & (v)) << 19) #define TXGBE_MSCC_BUSY ((0x1U) << 22) +#define TXGBE_MAC_MDIO_CLAUSE_22_PORT 0x11220 +#define TXGBE_MAC_MDIO_CLAUSE_ALL_PRTCL22 0xF + /* EEE registers */ /* statistic */ @@ -1488,14 +1816,19 @@ enum TXGBE_MSCA_CMD_value { #define TXGBE_PX_MISC_IC_ETH_AN 0x00080000U /* link auto-nego done */ #define TXGBE_PX_MISC_IC_INT_ERR 0x00100000U /* integrity error */ #define TXGBE_PX_MISC_IC_SPI 0x00200000U /* SPI interface */ +#define TXGBE_PX_MISC_IC_TXDESC 0x00400000U /* tx desc error */ #define TXGBE_PX_MISC_IC_VF_MBOX 0x00800000U /* VF-PF message box */ #define TXGBE_PX_MISC_IC_GPIO 0x04000000U /* GPIO interrupt */ #define TXGBE_PX_MISC_IC_PCIE_REQ_ERR 0x08000000U /* pcie request error int */ #define TXGBE_PX_MISC_IC_OVER_HEAT 0x10000000U /* overheat detection */ #define TXGBE_PX_MISC_IC_PROBE_MATCH 0x20000000U /* probe match */ -#define TXGBE_PX_MISC_IC_MNG_HOST_MBOX 0x40000000U /* mng mailbox */ +//#define TXGBE_PX_MISC_IC_MNG_HOST_MBOX 0x40000000U /* mng mailbox */ #define TXGBE_PX_MISC_IC_TIMER 0x80000000U /* tcp timer */ +#define TXGBE_PX_MISC_AML_ETH_LK_CHANGE 0x00000100U /* link change */ +#define TXGBE_PX_MISC_AML_ETH_PHY_EVENT 0x00040000U /* Eth phy event */ + + /* Extended Interrupt Cause Set */ #define TXGBE_PX_MISC_ICS_ETH_LKDN 0x00000100U #define TXGBE_PX_MISC_ICS_DEV_RST 0x00000400U @@ -1515,7 +1848,7 @@ enum TXGBE_MSCA_CMD_value { #define TXGBE_PX_MISC_ICS_PCIE_REQ_ERR 0x08000000U #define TXGBE_PX_MISC_ICS_OVER_HEAT 0x10000000U #define TXGBE_PX_MISC_ICS_PROBE_MATCH 0x20000000U -#define TXGBE_PX_MISC_ICS_MNG_HOST_MBOX 0x40000000U +//#define TXGBE_PX_MISC_ICS_MNG_HOST_MBOX 0x40000000U #define TXGBE_PX_MISC_ICS_TIMER 0x80000000U /* Extended Interrupt Enable Set */ @@ -1532,12 +1865,13 @@ enum TXGBE_MSCA_CMD_value { #define TXGBE_PX_MISC_IEN_ETH_AN 0x00080000U #define TXGBE_PX_MISC_IEN_INT_ERR 0x00100000U #define TXGBE_PX_MISC_IEN_SPI 0x00200000U +#define TXGBE_PX_MISC_IEN_TXDESC 0x00400000U #define TXGBE_PX_MISC_IEN_VF_MBOX 0x00800000U #define TXGBE_PX_MISC_IEN_GPIO 0x04000000U #define TXGBE_PX_MISC_IEN_PCIE_REQ_ERR 0x08000000U #define TXGBE_PX_MISC_IEN_OVER_HEAT 0x10000000U #define TXGBE_PX_MISC_IEN_PROBE_MATCH 0x20000000U -#define TXGBE_PX_MISC_IEN_MNG_HOST_MBOX 0x40000000U +//#define TXGBE_PX_MISC_IEN_MNG_HOST_MBOX 0x40000000U #define TXGBE_PX_MISC_IEN_TIMER 0x80000000U #define TXGBE_PX_MISC_IEN_MASK ( \ @@ -1549,7 +1883,6 @@ enum TXGBE_MSCA_CMD_value { TXGBE_PX_MISC_IEN_INT_ERR | \ TXGBE_PX_MISC_IEN_VF_MBOX | \ TXGBE_PX_MISC_IEN_GPIO | \ - TXGBE_PX_MISC_IEN_MNG_HOST_MBOX | \ TXGBE_PX_MISC_IEN_STALL | \ TXGBE_PX_MISC_IEN_PCIE_REQ_ERR | \ TXGBE_PX_MISC_IEN_TIMER) @@ -1567,6 +1900,7 @@ enum TXGBE_MSCA_CMD_value { #define TXGBE_MAX_INT_RATE 500000 #define TXGBE_MIN_INT_RATE 980 #define TXGBE_MAX_EITR 0x00000FF8U +#define TXGBE_AMLITE_MAX_EITR 0x00000FFFU #define TXGBE_MIN_EITR 8 #define TXGBE_PX_ITR_ITR_INT_MASK 0x00000FF8U #define TXGBE_PX_ITR_LLI_CREDIT 0x001f0000U @@ -1580,12 +1914,19 @@ enum TXGBE_MSCA_CMD_value { #define TXGBE_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40)) #define TXGBE_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40)) #define TXGBE_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40)) + +/* amlite: tx head wb */ +#define TXGBE_PX_TR_HEAD_ADDRL(_i) (0x03028 + ((_i) * 0x40)) +#define TXGBE_PX_TR_HEAD_ADDRH(_i) (0x0302C + ((_i) * 0x40)) + /* Transmit Config masks */ #define TXGBE_PX_TR_CFG_ENABLE (1) /* Ena specific Tx Queue */ #define TXGBE_PX_TR_CFG_TR_SIZE_SHIFT 1 /* tx desc number per ring */ #define TXGBE_PX_TR_CFG_SWFLSH (1 << 26) /* Tx Desc. wr-bk flushing */ #define TXGBE_PX_TR_CFG_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ #define TXGBE_PX_TR_CFG_THRE_SHIFT 8 +#define TXGBE_PX_TR_CFG_HEAD_WB (1 << 27) /* amlite head wb */ +#define TXGBE_PX_TR_CFG_HEAD_WB_64BYTE (1 << 28) /* amlite head wb 64byte */ #define TXGBE_PX_TR_RPn(q_per_pool, vf_number, vf_q_index) \ @@ -1599,6 +1940,10 @@ enum TXGBE_MSCA_CMD_value { #define TXGBE_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40)) #define TXGBE_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) #define TXGBE_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) + +#define TXGBE_TDM_DESC_CHK(i) 0x0180B0 + (i) * 4 /*0-3*/ +#define TXGBE_TDM_DESC_NONFATAL(i) (0x0180C0 + (i) * 4) /*0-3*/ +#define TXGBE_TDM_DESC_FATAL(i) (0x0180D0 + (i) * 4) /*0-3*/ /* PX_RR_CFG bit definitions */ #define TXGBE_PX_RR_CFG_RR_SIZE_SHIFT 1 #define TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT 2 /* so many KBs */ @@ -1625,6 +1970,9 @@ enum TXGBE_MSCA_CMD_value { #define TXGBE_PX_RR_CFG_RR_SZ 0x0000007EU #define TXGBE_PX_RR_CFG_RR_EN 0x00000001U +/* amlite: desc merge */ +#define TXGBE_PX_RR_CFG_DESC_MERGE 0x00080000U + /* statistic */ #define TXGBE_PX_MPRC(_i) (0x1020 + ((_i) * 64)) #define TXGBE_VX_GPRC(_i) (0x01014 + (0x40 * (_i))) @@ -1720,6 +2068,9 @@ enum TXGBE_MSCA_CMD_value { #define TXGBE_ALT_MAC_ADDR_PTR 0x37 #define TXGBE_FREE_SPACE_PTR 0x3E #define TXGBE_SW_REGION_PTR 0x1C +#define TXGBE_SHOWROM_I2C_PTR 0xB00 +#define TXGBE_SHOWROM_I2C_END 0xF00 + #define TXGBE_SAN_MAC_ADDR_PTR 0x18 #define TXGBE_DEVICE_CAPS 0x1C @@ -1893,6 +2244,7 @@ enum TXGBE_MSCA_CMD_value { #define TXGBE_RXD_STAT_FCSTAT_FCPRSP 0x00080000U /* 10: Recv. FCP_RSP */ #define TXGBE_RXD_STAT_FCSTAT_DDP 0x000C0000U /* 11: Ctxt w/ DDP */ +#define TXGBE_RXD_IPV6EX 0x00001000U /* IPv6EX */ #define TXGBE_RXD_ERR_MASK 0xfff00000U /* RDESC.ERRORS mask */ #define TXGBE_RXD_ERR_SHIFT 20 /* RDESC.ERRORS shift */ #define TXGBE_RXD_ERR_FCEOFE 0x80000000U /* FCEOFe/IPE */ @@ -2030,9 +2382,6 @@ enum txgbe_l2_ptypes { #define TXGBE_PTYPE_TYP(_pt) ((_pt) & 0x0F) #define TXGBE_PTYPE_TYPL4(_pt) ((_pt) & 0x07) -#define TXGBE_RXD_IPV6EX(_rxd) \ - ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1) - /* Security Processing bit Indication */ #define TXGBE_RXD_LNKSEC_STATUS_SECP 0x00020000U #define TXGBE_RXD_LNKSEC_ERROR_NO_SA_MATCH 0x08000000U @@ -2242,6 +2591,26 @@ union txgbe_atr_hash_dword { __be32 dword; }; +struct txgbe_ethertype_filter { + u16 rule_idx; + u64 action; + u16 ethertype; + u32 etqf; + u32 etqs; +}; + +/* Structure to store ethertype filters' info. */ +struct txgbe_etype_filter_info { + int count; + u8 ethertype_mask; /* Bit mask for every used ethertype filter */ + /* store used ethertype filters */ + struct txgbe_ethertype_filter etype_filters[TXGBE_MAX_PSR_ETYPE_SWC_FILTERS]; +}; + +/* Structure to store 5-tuple filters' info. */ +struct txgbe_5tuple_filter_info { + u32 fivetuple_mask[4]; /* Bit mask for max 128 filters */ +}; /****************** Manageablility Host Interface defines ********************/ #define TXGBE_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */ @@ -2283,10 +2652,16 @@ union txgbe_atr_hash_dword { #define FW_FLASH_UPGRADE_VERIFY_LEN 0x4 #define FW_DW_OPEN_NOTIFY 0xE9 #define FW_DW_CLOSE_NOTIFY 0xEA +#define FW_PPS_SET_CMD 0xF6 +#define FW_PPS_SET_LEN 0x14 +#define FW_AN_STA_CMD 0xF3 +#define FW_AN_STA_LEN 0x1 #define TXGBE_CHECKSUM_CAP_ST_PASS 0x80658383 #define TXGBE_CHECKSUM_CAP_ST_FAIL 0x70657376 +#define TXGBE_HIC_HDR_INDEX_MAX 255 + /* Host Interface Command Structures */ struct txgbe_hic_hdr { u8 cmd; @@ -2295,21 +2670,30 @@ struct txgbe_hic_hdr { u8 cmd_resv; u8 ret_status; } cmd_or_resp; - u8 checksum; + union { + u8 checksum; + u8 index; + } cksum_or_index; }; struct txgbe_hic_hdr2_req { u8 cmd; u8 buf_lenh; u8 buf_lenl; - u8 checksum; + union { + u8 checksum; + u8 index; + } cksum_or_index; }; struct txgbe_hic_hdr2_rsp { u8 cmd; u8 buf_lenl; u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ - u8 checksum; + union { + u8 checksum; + u8 index; + } cksum_or_index; }; union txgbe_hic_hdr2 { @@ -2408,6 +2792,27 @@ struct txgbe_hic_write_lldp{ u16 pad3; }; +struct txgbe_hic_set_pps { + struct txgbe_hic_hdr hdr; + u8 lan_id; + u8 enable; + u16 pad2; + u64 nsec; + u64 cycles; +}; + +struct txgbe_hic_write_autoneg { + struct txgbe_hic_hdr hdr; + u8 lan_id; + bool autoneg; + u16 pad; +}; + +struct txgbe_led_active_set { + struct txgbe_hic_hdr hdr; + u32 active_flag; +}; + /* Number of 100 microseconds we wait for PCI Express master disable */ #define TXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 @@ -2446,6 +2851,18 @@ typedef u32 txgbe_autoneg_advertised; TXGBE_LINK_SPEED_10GB_FULL | \ TXGBE_LINK_SPEED_10_FULL) +#define TXGBE_LINK_SPEED_25GB_FULL 0x10 +#define TXGBE_LINK_SPEED_40GB_FULL 0x20 +#define TXGBE_LINK_SPEED_50GB_FULL 0x40 + +#define TXGBE_LINK_SPEED_AMLITE_AUTONEG (TXGBE_LINK_SPEED_10GB_FULL | \ + TXGBE_LINK_SPEED_25GB_FULL) +/* Amlite eth mode */ +enum amlite_eth_mode { + ETH_RATE_10G = 0, + ETH_RATE_25G +}; + /* Physical layer type */ typedef u32 txgbe_physical_layer; #define TXGBE_PHYSICAL_LAYER_UNKNOWN 0 @@ -2614,6 +3031,20 @@ enum txgbe_sfp_type { txgbe_sfp_type_1g_lx_core1 = 14, txgbe_sfp_type_10g_cu_core0 = 15, /* add for qi'an'xin 10G fiber2copper sfp */ txgbe_sfp_type_10g_cu_core1 = 16, + txgbe_sfp_type_25g_sr_core0 = 17, + txgbe_sfp_type_25g_sr_core1 = 18, + txgbe_sfp_type_25g_lr_core0 = 19, + txgbe_sfp_type_25g_lr_core1 = 20, + txgbe_sfp_type_25g_aoc_core0 = 21, + txgbe_sfp_type_25g_aoc_core1 = 22, + txgbe_qsfp_type_40g_cu_core0 = 23, + txgbe_qsfp_type_40g_cu_core1 = 24, + txgbe_qsfp_type_40g_sr_core0 = 25, + txgbe_qsfp_type_40g_sr_core1 = 26, + txgbe_qsfp_type_40g_lr_core0 = 27, + txgbe_qsfp_type_40g_lr_core1 = 28, + txgbe_qsfp_type_40g_active_core0 = 29, + txgbe_qsfp_type_40g_active_core1 = 30, txgbe_sfp_type_not_present = 0xFFFE, txgbe_sfp_type_unknown = 0xFFFF }; @@ -2621,9 +3052,10 @@ enum txgbe_sfp_type { enum txgbe_media_type { txgbe_media_type_unknown = 0, txgbe_media_type_fiber, + txgbe_media_type_fiber_qsfp, txgbe_media_type_copper, txgbe_media_type_backplane, - txgbe_media_type_virtual + txgbe_media_type_virtual, }; /* Flow Control Settings */ @@ -2738,6 +3170,14 @@ struct txgbe_hw_stats { u64 gprc; u64 bprc; u64 mprc; + u64 rdpc; + u64 rddc; + u64 psrpc; + u64 psrdc; + u64 untag; + u64 tdmpc; + u64 tdmdc; + u64 tdbpc; u64 gptc; u64 gorc; u64 gotc; @@ -2829,6 +3269,7 @@ struct txgbe_mac_operations { s32 (*stop_adapter)(struct txgbe_hw *); s32 (*get_bus_info)(struct txgbe_hw *); void (*set_lan_id)(struct txgbe_hw *); + s32 (*setup_sfp)(struct txgbe_hw *); s32 (*enable_rx_dma)(struct txgbe_hw *, u32); s32 (*disable_sec_rx_path)(struct txgbe_hw *); s32 (*enable_sec_rx_path)(struct txgbe_hw *); @@ -2897,6 +3338,7 @@ struct txgbe_mac_operations { struct txgbe_phy_operations { s32 (*identify)(struct txgbe_hw *); s32 (*identify_sfp)(struct txgbe_hw *); + s32 (*setup_sfp)(struct txgbe_hw *); s32 (*init)(struct txgbe_hw *); s32 (*reset)(struct txgbe_hw *); s32 (*read_reg)(struct txgbe_hw *, u32, u32, u16 *); @@ -2911,6 +3353,7 @@ struct txgbe_phy_operations { s32 (*read_i2c_byte)(struct txgbe_hw *, u8, u8, u8 *); s32 (*write_i2c_byte)(struct txgbe_hw *, u8, u8, u8); s32 (*read_i2c_sff8472)(struct txgbe_hw *, u8, u8 *); + s32 (*read_i2c_sff8636)(struct txgbe_hw *, u8, u8, u8 *); s32 (*read_i2c_eeprom)(struct txgbe_hw *, u8, u8 *); s32 (*read_i2c_sfp_phy)(struct txgbe_hw *, u16, u16 *); s32 (*write_i2c_eeprom)(struct txgbe_hw *, u8, u8); @@ -2935,9 +3378,17 @@ struct txgbe_flash_info { u16 address_bits; }; +enum txgbe_mac_type { + txgbe_mac_unknown = 0, + txgbe_mac_sp, + txgbe_mac_aml, + txgbe_mac_aml40 +}; #define TXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 struct txgbe_mac_info { + enum txgbe_mac_type type; + struct txgbe_mac_operations ops; u8 addr[TXGBE_ETH_LENGTH_OF_ADDRESS]; u8 perm_addr[TXGBE_ETH_LENGTH_OF_ADDRESS]; @@ -2974,6 +3425,7 @@ struct txgbe_mac_info { bool thermal_sensor_enabled; struct txgbe_dmac_config dmac_config; bool set_lben; + bool autoneg; }; struct txgbe_phy_info { @@ -2982,6 +3434,7 @@ struct txgbe_phy_info { u32 addr; u32 id; enum txgbe_sfp_type sfp_type; + u32 fiber_suppport_speed; bool sfp_setup_needed; u32 revision; enum txgbe_media_type media_type; @@ -3008,6 +3461,13 @@ struct txgbe_mbx_operations { s32 (*check_for_rst)(struct txgbe_hw *, u16); }; +struct phytxeq { + u32 main; //TX EQ main (bit[5:0]) + u32 pre1; //TX EQ pre1 (bit[5:0]) + u32 pre2; //TX EQ pre2 (bit[5:0]) + u32 post; //TX EQ post (bit[5:0]) +}; + struct txgbe_mbx_stats { u32 msgs_tx; u32 msgs_rx; @@ -3067,6 +3527,9 @@ struct txgbe_hw { u16 oem_ssid; u16 oem_svid; bool f2c_mod_status; /* fiber to copper modules internal phy link status */ + bool dac_sfp; /* force dac sfp to kr mode */ + bool bypassCtle; /* DAC cable length */ + u32 q_tx_regs[512]; }; #define TCALL(hw, func, args...) (((hw)->func != NULL) \ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_xsk.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_xsk.c index d205ee23d15a4d3435e5d882117521ccdaf17f99..b33ed67c6460b5d52084fc23c67635535adb3534 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_xsk.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_xsk.c @@ -104,7 +104,7 @@ void txgbe_txrx_ring_disable(struct txgbe_adapter *adapter, int ring) txgbe_disable_txr(adapter, xdp_ring); txgbe_disable_rxr_hw(adapter, rx_ring); - if (ring_is_xdp(tx_ring)) + if (xdp_ring) synchronize_rcu(); /* Rx/Tx/XDP Tx share the same napi context. */ @@ -146,7 +146,7 @@ void txgbe_txrx_ring_enable(struct txgbe_adapter *adapter, int ring) txgbe_configure_rx_ring(adapter, rx_ring); clear_bit(__TXGBE_TX_DISABLED, &tx_ring->state); - if (ring_is_xdp(tx_ring)) + if (xdp_ring) clear_bit(__TXGBE_TX_DISABLED, &xdp_ring->state); } @@ -331,6 +331,9 @@ static int txgbe_xsk_umem_enable(struct txgbe_adapter *adapter, if (if_running) txgbe_txrx_ring_disable(adapter, qid); + /*to avoid xsk fd get issue in some kernel version*/ + msleep(400); + set_bit(qid, adapter->af_xdp_zc_qps); err = txgbe_add_xsk_umem(adapter, pool, qid); if (err) @@ -710,9 +713,14 @@ static struct sk_buff *txgbe_construct_skb_zc(struct txgbe_ring *rx_ring, struct sk_buff *skb; /* allocate a skb to store the frags */ +#ifdef NEED_NAPI_ALLOC_SKB_NO_MASK + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + xdp_buffer->data_end - xdp_buffer->data_hard_start); +#else skb = __napi_alloc_skb(&rx_ring->q_vector->napi, xdp_buffer->data_end - xdp_buffer->data_hard_start, GFP_ATOMIC | __GFP_NOWARN); +#endif if (unlikely(!skb)) return NULL; @@ -946,6 +954,7 @@ void txgbe_xsk_clean_rx_ring(struct txgbe_ring *rx_ring) static bool txgbe_xmit_zc(struct txgbe_ring *xdp_ring, unsigned int budget) { + unsigned int sent_frames = 0, total_bytes = 0; union txgbe_tx_desc *tx_desc = NULL; u16 ntu = xdp_ring->next_to_use; struct txgbe_tx_buffer *tx_bi; @@ -1016,16 +1025,31 @@ static bool txgbe_xmit_zc(struct txgbe_ring *xdp_ring, unsigned int budget) #endif smp_wmb(); tx_bi->next_to_watch = tx_desc; +#ifdef TXGBE_TXHEAD_WB + tx_bi->next_eop = ntu; +#endif + xdp_ring->next_rs_idx = ntu; ntu++; if (ntu == xdp_ring->count) ntu = 0; xdp_ring->next_to_use = ntu; + sent_frames++; + total_bytes += tx_bi->bytecount; } if (tx_desc) { + cmd_type |= TXGBE_TXD_RS; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); wmb(); writel(xdp_ring->next_to_use, xdp_ring->tail); xsk_tx_release(xdp_ring->xsk_pool); + + u64_stats_update_begin(&xdp_ring->syncp); + xdp_ring->stats.bytes += total_bytes; + xdp_ring->stats.packets += sent_frames; + u64_stats_update_end(&xdp_ring->syncp); + xdp_ring->q_vector->tx.total_bytes += total_bytes; + xdp_ring->q_vector->tx.total_packets += sent_frames; } return (budget > 0) && work_done; @@ -1040,60 +1064,91 @@ static void txgbe_clean_xdp_tx_buffer(struct txgbe_ring *tx_ring, dma_unmap_addr(tx_bi, dma), dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); dma_unmap_len_set(tx_bi, len, 0); + tx_bi->va = NULL; } bool txgbe_clean_xdp_tx_irq(struct txgbe_q_vector *q_vector, struct txgbe_ring *tx_ring) { - u32 ntu = tx_ring->next_to_use, ntc = tx_ring->next_to_clean; - union txgbe_tx_desc *tx_desc; + u32 next_rs_idx = tx_ring->next_rs_idx; + union txgbe_tx_desc *next_rs_desc; + u32 ntc = tx_ring->next_to_clean; struct txgbe_tx_buffer *tx_bi; - unsigned int total_packets = 0, total_bytes = 0; + u16 frames_ready = 0; u32 xsk_frames = 0; + u16 i; - tx_bi = &tx_ring->tx_buffer_info[ntc]; - tx_desc = TXGBE_TX_DESC(tx_ring, ntc); - while (ntc != ntu) { - if (!(tx_desc->wb.status & cpu_to_le32(TXGBE_TXD_STAT_DD))) - break; +#ifdef TXGBE_TXHEAD_WB + u32 head = 0; + u32 temp = tx_ring->next_to_clean; + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + head = *(tx_ring->headwb_mem); +#endif +#ifdef TXGBE_TXHEAD_WB + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + /* we have caught up to head, no work left to do */ + if (temp == head) { + goto out_xmit; + } else if (head > temp && !(next_rs_idx >= temp && (next_rs_idx < head))) { + goto out_xmit; + } else if (!(next_rs_idx >= temp || (next_rs_idx < head))) { + goto out_xmit; + } else { + if (next_rs_idx >= ntc) + frames_ready = next_rs_idx - ntc; + else + frames_ready = next_rs_idx + tx_ring->count - ntc; + } + } else { + next_rs_desc = TXGBE_TX_DESC(tx_ring, next_rs_idx); + if (next_rs_desc->wb.status & + cpu_to_le32(TXGBE_TXD_STAT_DD)) { + if (next_rs_idx >= ntc) + frames_ready = next_rs_idx - ntc; + else + frames_ready = next_rs_idx + tx_ring->count - ntc; + } + } +#else + next_rs_desc = TXGBE_TX_DESC(tx_ring, next_rs_idx); + if (next_rs_desc->wb.status & + cpu_to_le32(TXGBE_TXD_STAT_DD)) { + if (next_rs_idx >= ntc) + frames_ready = next_rs_idx - ntc; + else + frames_ready = next_rs_idx + tx_ring->count - ntc; + } +#endif + if (!frames_ready) + goto out_xmit; - total_bytes += tx_bi->bytecount; - total_packets += tx_bi->gso_segs; + if (likely(!tx_ring->xdp_tx_active)) { + xsk_frames = frames_ready; + } else { + for (i = 0; i < frames_ready; i++) { + tx_bi = &tx_ring->tx_buffer_info[ntc]; - if (tx_bi->xdpf) - txgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); - else - xsk_frames++; + if (tx_bi->xdpf) + txgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); + else + xsk_frames++; - tx_bi->xdpf = NULL; + tx_bi->xdpf = NULL; - tx_bi++; - tx_desc++; - ntc++; - if (unlikely(ntc == tx_ring->count)) { - ntc = 0; - tx_bi = tx_ring->tx_buffer_info; - tx_desc = TXGBE_TX_DESC(tx_ring, 0); + ++ntc; + if (ntc >= tx_ring->count) + ntc = 0; } - - /* issue prefetch for next Tx descriptor */ - prefetch(tx_desc); } - tx_ring->next_to_clean = ntc; + tx_ring->next_to_clean += frames_ready; if (unlikely(tx_ring->next_to_clean >= tx_ring->count)) tx_ring->next_to_clean -= tx_ring->count; - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->stats.bytes += total_bytes; - tx_ring->stats.packets += total_packets; - u64_stats_update_end(&tx_ring->syncp); - tx_ring->q_vector->tx.total_bytes += total_bytes; - tx_ring->q_vector->tx.total_packets += total_packets; - - if (xsk_frames) { + if (xsk_frames) xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); - } + +out_xmit: return txgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); } @@ -1128,6 +1183,7 @@ int txgbe_xsk_async_xmit(struct net_device *dev, u32 qid) void txgbe_xsk_clean_tx_ring(struct txgbe_ring *tx_ring) { + unsigned long size = sizeof(struct txgbe_tx_buffer) * tx_ring->count; u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; struct txgbe_tx_buffer *tx_bi; u32 xsk_frames = 0; @@ -1149,5 +1205,10 @@ void txgbe_xsk_clean_tx_ring(struct txgbe_ring *tx_ring) if (xsk_frames) xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); } #endif /* HAVE_AF_XDP_ZC_SUPPORT */