linux/drivers/net/ixgbe/ixgbe_dcb_82598.c

339 lines
9.4 KiB
C
Raw Normal View History

/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "ixgbe.h"
#include "ixgbe_type.h"
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82598.h"
/**
* ixgbe_dcb_config_packet_buffers_82598 - Configure packet buffers
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure packet buffers for DCB mode.
*/
static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, u8 rx_pba)
{
s32 ret_val = 0;
u32 value = IXGBE_RXPBSIZE_64KB;
u8 i = 0;
/* Setup Rx packet buffer sizes */
switch (rx_pba) {
case pba_80_48:
/* Setup the first four at 80KB */
value = IXGBE_RXPBSIZE_80KB;
for (; i < 4; i++)
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
/* Setup the last four at 48KB...don't re-init i */
value = IXGBE_RXPBSIZE_48KB;
/* Fall Through */
case pba_equal:
default:
for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
/* Setup Tx packet buffer sizes */
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i),
IXGBE_TXPBSIZE_40KB);
}
break;
}
return ret_val;
}
/**
* ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *prio_type)
{
u32 reg = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
u8 i = 0;
reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
/* Enable Arbiter */
reg &= ~IXGBE_RMCS_ARBDIS;
/* Enable Receive Recycle within the BWG */
reg |= IXGBE_RMCS_RRM;
/* Enable Deficit Fixed Priority arbitration*/
reg |= IXGBE_RMCS_DFP;
IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
credit_refill = refill[i];
credit_max = max[i];
reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
if (prio_type[i] == prio_link)
reg |= IXGBE_RT2CR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
}
reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
reg |= IXGBE_RDRXCTL_RDMTS_1_2;
reg |= IXGBE_RDRXCTL_MPBEN;
reg |= IXGBE_RDRXCTL_MCEN;
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
/* Make sure there is enough descriptors before arbitration */
reg &= ~IXGBE_RXCTRL_DMBYPS;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
return 0;
}
/**
* ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
u8 *prio_type)
{
u32 reg, max_credits;
u8 i;
reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
/* Enable arbiter */
reg &= ~IXGBE_DPMCS_ARBDIS;
/* Enable DFP and Recycle mode */
reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
reg |= IXGBE_DPMCS_TSOEF;
/* Configure Max TSO packet size 34KB including payload and headers */
reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
max_credits = max[i];
reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
reg |= refill[i];
reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
if (prio_type[i] == prio_group)
reg |= IXGBE_TDTQ2TCCR_GSP;
if (prio_type[i] == prio_link)
reg |= IXGBE_TDTQ2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
}
return 0;
}
/**
* ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
u8 *prio_type)
{
u32 reg;
u8 i;
reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
/* Enable Data Plane Arbiter */
reg &= ~IXGBE_PDPMCS_ARBDIS;
/* Enable DFP and Transmit Recycle Mode */
reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
reg = refill[i];
reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
if (prio_type[i] == prio_group)
reg |= IXGBE_TDPT2TCCR_GSP;
if (prio_type[i] == prio_link)
reg |= IXGBE_TDPT2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
}
/* Enable Tx packet buffer division */
reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
reg |= IXGBE_DTXCTL_ENDBUBD;
IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
return 0;
}
/**
* ixgbe_dcb_config_pfc_82598 - Config priority flow control
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Priority Flow Control for each traffic class.
*/
s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
{
u32 reg, rx_pba_size;
u8 i;
if (pfc_en) {
/* Enable Transmit Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
reg &= ~IXGBE_RMCS_TFCE_802_3X;
/* correct the reporting of our flow control status */
reg |= IXGBE_RMCS_TFCE_PRIORITY;
IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
/* Enable Receive Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
reg &= ~IXGBE_FCTRL_RFCE;
reg |= IXGBE_FCTRL_RPFCE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
/* Configure pause time */
for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
/* Configure flow control refresh threshold value */
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
}
/*
* Configure flow control thresholds and enable priority flow control
* for each traffic class.
*/
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
int enabled = pfc_en & (1 << i);
ixgbe: DCB set PFC high and low water marks per data sheet specs Currently the high and low water marks for PFC are being set conservatively for jumbo frames. This means the RX buffers are being underutilized in the default 1500 MTU. This patch fixes this so that the water marks are set as described in the data sheet considering the MTU size. The equation used is, RTT * 1.44 + MTU * 1.44 + MTU Where RTT is the round trip time and MTU is the max frame size in KB. To avoid floating point arithmetic FC_HIGH_WATER is defined ((((RTT + MTU) * 144) + 99) / 100) + MTU This changes how the hardware field fc.low_water and fc.high_water are used. With this change they are no longer storing the actual low water and high water markers but are storing the required head room in the buffer. This simplifies the logic and we do not need to account for the size of the buffer when setting the thresholds. Testing with iperf and 16 threads showed a slight uptick in throughput over a single traffic class .1-.2Gbps and a reduction in pause frames. Without the patch a 30 second run would show ~10-15 pause frames being transmitted with the patch ~2-5 are seen. Test were run back to back with 82599. Note RXPBSIZE is in KB and low and high water marks fields are also in KB. However the FCRT* registers are 32B granularity and right shifted 5 into the register, (((rx_pbsize - water_mark) * 1024) / 32) << 5 is the most explicit conversion here we simplify (rx_pbsize - water_mark) * 32 << 5 = (rx_pbsize - water_mark) << 10 This patch updates the PFC thresholds and legacy FC thresholds. Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2010-11-17 03:26:44 +00:00
rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
reg = (rx_pba_size - hw->fc.low_water) << 10;
if (enabled == pfc_enabled_tx ||
enabled == pfc_enabled_full)
reg |= IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
ixgbe: DCB set PFC high and low water marks per data sheet specs Currently the high and low water marks for PFC are being set conservatively for jumbo frames. This means the RX buffers are being underutilized in the default 1500 MTU. This patch fixes this so that the water marks are set as described in the data sheet considering the MTU size. The equation used is, RTT * 1.44 + MTU * 1.44 + MTU Where RTT is the round trip time and MTU is the max frame size in KB. To avoid floating point arithmetic FC_HIGH_WATER is defined ((((RTT + MTU) * 144) + 99) / 100) + MTU This changes how the hardware field fc.low_water and fc.high_water are used. With this change they are no longer storing the actual low water and high water markers but are storing the required head room in the buffer. This simplifies the logic and we do not need to account for the size of the buffer when setting the thresholds. Testing with iperf and 16 threads showed a slight uptick in throughput over a single traffic class .1-.2Gbps and a reduction in pause frames. Without the patch a 30 second run would show ~10-15 pause frames being transmitted with the patch ~2-5 are seen. Test were run back to back with 82599. Note RXPBSIZE is in KB and low and high water marks fields are also in KB. However the FCRT* registers are 32B granularity and right shifted 5 into the register, (((rx_pbsize - water_mark) * 1024) / 32) << 5 is the most explicit conversion here we simplify (rx_pbsize - water_mark) * 32 << 5 = (rx_pbsize - water_mark) << 10 This patch updates the PFC thresholds and legacy FC thresholds. Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2010-11-17 03:26:44 +00:00
reg = (rx_pba_size - hw->fc.high_water) << 10;
if (enabled == pfc_enabled_tx ||
enabled == pfc_enabled_full)
reg |= IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
}
return 0;
}
/**
* ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
* @hw: pointer to hardware structure
*
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
u8 j = 0;
/* Receive Queues stats setting - 8 queues per statistics reg */
for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
reg |= ((0x1010101) * j);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
reg |= ((0x1010101) * j);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
}
/* Transmit Queues stats setting - 4 queues per statistics reg */
for (i = 0; i < 8; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
reg |= ((0x1010101) * i);
IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
}
return 0;
}
/**
* ixgbe_dcb_hw_config_82598 - Config and enable DCB
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
u8 rx_pba, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type)
{
ixgbe_dcb_config_packet_buffers_82598(hw, rx_pba);
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
bwg_id, prio_type);
ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
bwg_id, prio_type);
ixgbe_dcb_config_pfc_82598(hw, pfc_en);
ixgbe_dcb_config_tc_stats_82598(hw);
return 0;
}