Discussion:
[dpdk-dev] [PATCH 0/5] bnxt patchset
Ajit Khaparde
2018-01-22 06:20:41 UTC
Permalink
Please consider applying this patchset.

Ajit Khaparde (4):
net/bnxt: fix size of tx ring in HW
net/bnxt: use driver specific dynamic log type
net/bnxt: register for more async events
net/bnxt: check if MAC address is all zeros

Somnath Kotur (1):
net/bnxt: Support for rx/tx_queue_start/stop ops

drivers/net/bnxt/bnxt.h | 8 +
drivers/net/bnxt/bnxt_cpr.c | 19 ++-
drivers/net/bnxt/bnxt_ethdev.c | 356 ++++++++++++++++++++++++----------------
drivers/net/bnxt/bnxt_filter.c | 44 ++---
drivers/net/bnxt/bnxt_filter.h | 1 +
drivers/net/bnxt/bnxt_hwrm.c | 151 ++++++++---------
drivers/net/bnxt/bnxt_hwrm.h | 11 ++
drivers/net/bnxt/bnxt_irq.c | 4 +-
drivers/net/bnxt/bnxt_ring.c | 12 +-
drivers/net/bnxt/bnxt_rxq.c | 22 +--
drivers/net/bnxt/bnxt_rxq.h | 2 +-
drivers/net/bnxt/bnxt_rxr.c | 23 +--
drivers/net/bnxt/bnxt_rxr.h | 3 +-
drivers/net/bnxt/bnxt_stats.c | 16 +-
drivers/net/bnxt/bnxt_txq.c | 10 +-
drivers/net/bnxt/bnxt_txq.h | 1 -
drivers/net/bnxt/bnxt_txr.c | 34 +++-
drivers/net/bnxt/bnxt_txr.h | 2 +
drivers/net/bnxt/bnxt_vnic.c | 14 +-
drivers/net/bnxt/rte_pmd_bnxt.c | 48 +++---
20 files changed, 464 insertions(+), 317 deletions(-)
--
2.14.3 (Apple Git-98)
Ajit Khaparde
2018-01-22 06:20:42 UTC
Permalink
During Tx ring allocation, the actual ring size configured in the HW
ends up being twice the number of txd parameter specified to the driver.
The power of 2 ring size wrongly adds a +1 while sending the ring
create command to the FW.

Fixes: 6eb3cc2294fd ("net/bnxt: add initial Tx code")
Signed-off-by: Ajit Khaparde <***@broadcom.com>
---
drivers/net/bnxt/bnxt_txr.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index ac77434b7..2f2c87119 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -101,7 +101,7 @@ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
if (ring == NULL)
return -ENOMEM;
txr->tx_ring_struct = ring;
- ring->ring_size = rte_align32pow2(txq->nb_tx_desc + 1);
+ ring->ring_size = rte_align32pow2(txq->nb_tx_desc);
ring->ring_mask = ring->ring_size - 1;
ring->bd = (void *)txr->tx_desc_ring;
ring->bd_dma = txr->tx_desc_mapping;
--
2.14.3 (Apple Git-98)
Ferruh Yigit
2018-01-22 12:22:56 UTC
Permalink
Post by Ajit Khaparde
During Tx ring allocation, the actual ring size configured in the HW
ends up being twice the number of txd parameter specified to the driver.
The power of 2 ring size wrongly adds a +1 while sending the ring
create command to the FW.
Fixes: 6eb3cc2294fd ("net/bnxt: add initial Tx code")
If you request this fix to be backported to the stable trees, please add
Post by Ajit Khaparde
---
drivers/net/bnxt/bnxt_txr.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index ac77434b7..2f2c87119 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -101,7 +101,7 @@ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
if (ring == NULL)
return -ENOMEM;
txr->tx_ring_struct = ring;
- ring->ring_size = rte_align32pow2(txq->nb_tx_desc + 1);
+ ring->ring_size = rte_align32pow2(txq->nb_tx_desc);
ring->ring_mask = ring->ring_size - 1;
ring->bd = (void *)txr->tx_desc_ring;
ring->bd_dma = txr->tx_desc_mapping;
Ajit Khaparde
2018-01-22 06:20:44 UTC
Permalink
Register for async events from the FW.
New events we are registering for include Link speed config changes,
PF driver unload and VF config change. Also log a message when the
async event arrives on the completion ring.

Signed-off-by: Ajit Khaparde <***@broadcom.com>
---
drivers/net/bnxt/bnxt_cpr.c | 11 ++++++++++-
drivers/net/bnxt/bnxt_hwrm.c | 9 +++++++--
drivers/net/bnxt/bnxt_hwrm.h | 11 +++++++++++
3 files changed, 28 insertions(+), 3 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 663a5223d..737bb060a 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -57,8 +57,17 @@ void bnxt_handle_async_event(struct bnxt *bp,
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
bnxt_link_update_op(bp->eth_dev, 1);
break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
+ PMD_DRV_LOG(INFO, "Async event: PF driver unloaded\n");
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
+ PMD_DRV_LOG(INFO, "Async event: VF config changed\n");
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
+ PMD_DRV_LOG(INFO, "Port conn async event\n");
+ break;
default:
- PMD_DRV_LOG(DEBUG, "handle_async_event id = 0x%x\n", event_id);
+ PMD_DRV_LOG(INFO, "handle_async_event id = 0x%x\n", event_id);
break;
}
}
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index fdca424a9..75e03ad5d 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -637,8 +637,13 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
sizeof(bp->pf.vf_req_fwd)));
}

- req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
- //memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
+ req.async_event_fwd[0] |=
+ rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
+ ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
+ ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
+ req.async_event_fwd[1] |=
+ rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
+ ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);

rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));

diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 46f6f3208..108f8e81d 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -42,6 +42,17 @@ struct bnxt_filter_info;
struct bnxt_cp_ring_info;

#define HWRM_SEQ_ID_INVALID -1U
+/* Convert Bit field location to value */
+#define ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE)
+#define ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED)
+#define ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE)
+#define ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD \
+ (1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD - 32))
+#define ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE \
+ (1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE - 32))

int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
--
2.14.3 (Apple Git-98)
Ajit Khaparde
2018-01-22 06:20:43 UTC
Permalink
This patch implements driver specific log type doing away with
usage of RTE_LOG() for logging.
Signed-off-by: Ajit Khaparde <***@broadcom.com>
---
drivers/net/bnxt/bnxt.h | 8 ++
drivers/net/bnxt/bnxt_cpr.c | 10 +-
drivers/net/bnxt/bnxt_ethdev.c | 233 +++++++++++++++++++++-------------------
drivers/net/bnxt/bnxt_filter.c | 42 ++++----
drivers/net/bnxt/bnxt_hwrm.c | 142 ++++++++++++------------
drivers/net/bnxt/bnxt_irq.c | 4 +-
drivers/net/bnxt/bnxt_ring.c | 12 +--
drivers/net/bnxt/bnxt_rxq.c | 22 ++--
drivers/net/bnxt/bnxt_rxr.c | 19 ++--
drivers/net/bnxt/bnxt_stats.c | 16 +--
drivers/net/bnxt/bnxt_txq.c | 10 +-
drivers/net/bnxt/bnxt_vnic.c | 14 +--
drivers/net/bnxt/rte_pmd_bnxt.c | 48 ++++-----
13 files changed, 297 insertions(+), 283 deletions(-)

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index cf0b1d27c..6776c64a5 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -334,4 +334,12 @@ int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg);

bool is_bnxt_supported(struct rte_eth_dev *dev);
extern const struct rte_flow_ops bnxt_flow_ops;
+
+extern int bnxt_logtype_driver;
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, bnxt_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt, ## args)
#endif
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index cde8adc3b..663a5223d 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -58,7 +58,7 @@ void bnxt_handle_async_event(struct bnxt *bp,
bnxt_link_update_op(bp->eth_dev, 1);
break;
default:
- RTE_LOG(DEBUG, PMD, "handle_async_event id = 0x%x\n", event_id);
+ PMD_DRV_LOG(DEBUG, "handle_async_event id = 0x%x\n", event_id);
break;
}
}
@@ -74,7 +74,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
int rc;

if (bp->pf.active_vfs <= 0) {
- RTE_LOG(ERR, PMD, "Forwarded VF with no active VFs\n");
+ PMD_DRV_LOG(ERR, "Forwarded VF with no active VFs\n");
return;
}

@@ -93,7 +93,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)

if (fw_vf_id < bp->pf.first_vf_id ||
fw_vf_id >= (bp->pf.first_vf_id) + bp->pf.active_vfs) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n",
fw_vf_id, bp->pf.first_vf_id,
(bp->pf.first_vf_id) + bp->pf.active_vfs - 1,
@@ -130,7 +130,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
/* Forward */
rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to send FWD req VF 0x%x, type 0x%x.\n",
fw_vf_id - bp->pf.first_vf_id,
rte_le_to_cpu_16(fwd_cmd->req_type));
@@ -141,7 +141,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
reject:
rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to send REJECT req VF 0x%x, type 0x%x.\n",
fw_vf_id - bp->pf.first_vf_id,
rte_le_to_cpu_16(fwd_cmd->req_type));
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 057786a62..af4673dc2 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -58,6 +58,7 @@
#define DRV_MODULE_NAME "bnxt"
static const char bnxt_version[] =
"Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
+int bnxt_logtype_driver;

#define PCI_VENDOR_ID_BROADCOM 0x14E4

@@ -223,25 +224,25 @@ static int bnxt_init_chip(struct bnxt *bp)

rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
goto err_out;
}

rc = bnxt_alloc_hwrm_rings(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
goto err_out;
}

rc = bnxt_alloc_all_hwrm_ring_grps(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
goto err_out;
}

rc = bnxt_mq_rx_configure(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
goto err_out;
}

@@ -251,14 +252,14 @@ static int bnxt_init_chip(struct bnxt *bp)

rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n",
+ PMD_DRV_LOG(ERR, "HWRM vnic %d alloc failure rc: %x\n",
i, rc);
goto err_out;
}

rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM vnic %d ctx alloc failure rc: %x\n",
i, rc);
goto err_out;
@@ -266,14 +267,14 @@ static int bnxt_init_chip(struct bnxt *bp)

rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n",
+ PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
i, rc);
goto err_out;
}

rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM vnic %d filter failure rc: %x\n",
i, rc);
goto err_out;
@@ -294,7 +295,7 @@ static int bnxt_init_chip(struct bnxt *bp)
}
rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM vnic %d set RSS failure rc: %x\n",
i, rc);
goto err_out;
@@ -310,7 +311,7 @@ static int bnxt_init_chip(struct bnxt *bp)
}
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM cfa l2 rx mask failure rc: %x\n", rc);
goto err_out;
}
@@ -320,10 +321,10 @@ static int bnxt_init_chip(struct bnxt *bp)
!RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
intr_vector = bp->eth_dev->data->nb_rx_queues;
- RTE_LOG(INFO, PMD, "%s(): intr_vector = %d\n", __func__,
+ PMD_DRV_LOG(INFO, "%s(): intr_vector = %d\n", __func__,
intr_vector);
if (intr_vector > bp->rx_cp_nr_rings) {
- RTE_LOG(ERR, PMD, "At most %d intr queues supported",
+ PMD_DRV_LOG(ERR, "At most %d intr queues supported",
bp->rx_cp_nr_rings);
return -ENOTSUP;
}
@@ -337,11 +338,11 @@ static int bnxt_init_chip(struct bnxt *bp)
bp->eth_dev->data->nb_rx_queues *
sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
- RTE_LOG(ERR, PMD, "Failed to allocate %d rx_queues"
+ PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
" intr_vec", bp->eth_dev->data->nb_rx_queues);
return -ENOMEM;
}
- RTE_LOG(DEBUG, PMD, "%s(): intr_handle->intr_vec = %p "
+ PMD_DRV_LOG(DEBUG, "%s(): intr_handle->intr_vec = %p "
"intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
__func__, intr_handle->intr_vec, intr_handle->nb_efd,
intr_handle->max_intr);
@@ -359,14 +360,14 @@ static int bnxt_init_chip(struct bnxt *bp)

rc = bnxt_get_hwrm_link_config(bp, &new);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
goto err_out;
}

if (!bp->link_info.link_up) {
rc = bnxt_set_hwrm_link_config(bp, true);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM link config failure rc: %x\n", rc);
goto err_out;
}
@@ -537,13 +538,13 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
bp->max_stat_ctx ||
(uint32_t)(eth_dev->data->nb_rx_queues + 1) > bp->max_ring_grps) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Insufficient resources to support requested config\n");
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Num Queues Requested: Tx %d, Rx %d\n",
eth_dev->data->nb_tx_queues,
eth_dev->data->nb_rx_queues);
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Res available: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d\n",
bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
bp->max_stat_ctx, bp->max_ring_grps);
@@ -567,13 +568,13 @@ static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
struct rte_eth_link *link = &eth_dev->data->dev_link;

if (link->link_status)
- RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n",
+ PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
eth_dev->data->port_id,
(uint32_t)link->link_speed,
(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- RTE_LOG(INFO, PMD, "Port %d Link Down\n",
+ PMD_DRV_LOG(INFO, "Port %d Link Down\n",
eth_dev->data->port_id);
}

@@ -590,7 +591,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
int rc;

if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
}
@@ -729,25 +730,25 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
struct bnxt_filter_info *filter;

if (BNXT_VF(bp)) {
- RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n");
+ PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
return -ENOTSUP;
}

if (!vnic) {
- RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
+ PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
return -EINVAL;
}
/* Attach requested MAC address to the new l2_filter */
STAILQ_FOREACH(filter, &vnic->filter, next) {
if (filter->mac_index == index) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"MAC addr already existed for pool %d\n", pool);
return -EINVAL;
}
}
filter = bnxt_alloc_filter(bp);
if (!filter) {
- RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
return -ENODEV;
}
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
@@ -770,7 +771,7 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
if (rc) {
new.link_speed = ETH_LINK_SPEED_100M;
new.link_duplex = ETH_LINK_FULL_DUPLEX;
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to retrieve link rc = 0x%x!\n", rc);
goto out;
}
@@ -861,7 +862,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
return -EINVAL;

if (reta_size != HW_HASH_INDEX_SIZE) {
- RTE_LOG(ERR, PMD, "The configured hash table lookup size "
+ PMD_DRV_LOG(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
return -EINVAL;
@@ -893,7 +894,7 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
return -EINVAL;

if (reta_size != HW_HASH_INDEX_SIZE) {
- RTE_LOG(ERR, PMD, "The configured hash table lookup size "
+ PMD_DRV_LOG(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
return -EINVAL;
@@ -924,7 +925,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
*/
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
if (!rss_conf->rss_hf)
- RTE_LOG(ERR, PMD, "Hash type NONE\n");
+ PMD_DRV_LOG(ERR, "Hash type NONE\n");
} else {
if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
return -EINVAL;
@@ -1013,7 +1014,7 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
}
if (hash_types) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unknwon RSS config from firmware (%08x), RSS disabled",
vnic->hash_type);
return -ENOTSUP;
@@ -1062,7 +1063,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;

if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
- RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n");
+ PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
return -ENOTSUP;
}

@@ -1122,10 +1123,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
if (bp->vxlan_port_cnt) {
- RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
+ PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
if (bp->vxlan_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Only one port allowed\n");
+ PMD_DRV_LOG(ERR, "Only one port allowed\n");
return -ENOSPC;
}
bp->vxlan_port_cnt++;
@@ -1137,10 +1138,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
break;
case RTE_TUNNEL_TYPE_GENEVE:
if (bp->geneve_port_cnt) {
- RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
+ PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
if (bp->geneve_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Only one port allowed\n");
+ PMD_DRV_LOG(ERR, "Only one port allowed\n");
return -ENOSPC;
}
bp->geneve_port_cnt++;
@@ -1151,7 +1152,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
bp->geneve_port_cnt++;
break;
default:
- RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
return -ENOTSUP;
}
rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
@@ -1171,11 +1172,11 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
if (!bp->vxlan_port_cnt) {
- RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
+ PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
}
if (bp->vxlan_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
+ PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
udp_tunnel->udp_port, bp->vxlan_port);
return -EINVAL;
}
@@ -1188,11 +1189,11 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
break;
case RTE_TUNNEL_TYPE_GENEVE:
if (!bp->geneve_port_cnt) {
- RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
+ PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
}
if (bp->geneve_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
+ PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
udp_tunnel->udp_port, bp->geneve_port);
return -EINVAL;
}
@@ -1204,7 +1205,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
port = bp->geneve_fw_dst_port_id;
break;
default:
- RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
return -ENOTSUP;
}

@@ -1261,7 +1262,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)

new_filter = bnxt_alloc_filter(bp);
if (!new_filter) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"MAC/VLAN filter alloc failed\n");
rc = -ENOMEM;
goto exit;
@@ -1279,7 +1280,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
new_filter);
if (rc)
goto exit;
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Del Vlan filter for %d\n",
vlan_id);
}
@@ -1334,7 +1335,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
}
new_filter = bnxt_alloc_filter(bp);
if (!new_filter) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"MAC/VLAN filter alloc failed\n");
rc = -ENOMEM;
goto exit;
@@ -1354,7 +1355,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
new_filter);
if (rc)
goto exit;
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Added Vlan filter for %d\n", vlan_id);
cont:
filter = temp_filter;
@@ -1389,7 +1390,7 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
for (i = 0; i < 4095; i++)
bnxt_del_vlan_filter(bp, i);
}
- RTE_LOG(INFO, PMD, "VLAN Filtering: %d\n",
+ PMD_DRV_LOG(INFO, "VLAN Filtering: %d\n",
dev->data->dev_conf.rxmode.hw_vlan_filter);
}

@@ -1403,12 +1404,12 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
vnic->vlan_strip = false;
bnxt_hwrm_vnic_cfg(bp, vnic);
}
- RTE_LOG(INFO, PMD, "VLAN Strip Offload: %d\n",
+ PMD_DRV_LOG(INFO, "VLAN Strip Offload: %d\n",
dev->data->dev_conf.rxmode.hw_vlan_strip);
}

if (mask & ETH_VLAN_EXTEND_MASK)
- RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n");
+ PMD_DRV_LOG(ERR, "Extend VLAN Not supported\n");

return 0;
}
@@ -1444,7 +1445,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
if (rc)
break;
filter->mac_index = 0;
- RTE_LOG(DEBUG, PMD, "Set MAC addr\n");
+ PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
}
}

@@ -1547,7 +1548,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2;

if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
- RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n",
+ PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
ETHER_MIN_MTU, max_dev_mtu);
return -EINVAL;
}
@@ -1565,7 +1566,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;

eth_dev->data->mtu = new_mtu;
- RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu);
+ PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);

for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
@@ -1592,7 +1593,7 @@ bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
int rc;

if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"PVID cannot be modified for this function\n");
return -ENOTSUP;
}
@@ -1753,13 +1754,13 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,

if (efilter->ether_type == ETHER_TYPE_IPv4 ||
efilter->ether_type == ETHER_TYPE_IPv6) {
- RTE_LOG(ERR, PMD, "invalid ether_type(0x%04x) in"
+ PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
" ethertype filter.", efilter->ether_type);
*ret = -EINVAL;
goto exit;
}
if (efilter->queue >= bp->rx_nr_rings) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
*ret = -EINVAL;
goto exit;
}
@@ -1767,7 +1768,7 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
if (vnic == NULL) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
*ret = -EINVAL;
goto exit;
}
@@ -1818,7 +1819,7 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
return 0;

if (arg == NULL) {
- RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
filter_op);
return -EINVAL;
}
@@ -1835,7 +1836,7 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,

bfilter = bnxt_get_unused_filter(bp);
if (bfilter == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not enough resources for a new filter.\n");
return -ENOMEM;
}
@@ -1879,11 +1880,11 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
next);
bnxt_free_filter(bp, filter1);
} else if (ret == 0) {
- RTE_LOG(ERR, PMD, "No matching filter found\n");
+ PMD_DRV_LOG(ERR, "No matching filter found\n");
}
break;
default:
- RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
ret = -EINVAL;
goto error;
}
@@ -1902,7 +1903,7 @@ parse_ntuple_filter(struct bnxt *bp,
uint32_t en = 0;

if (nfilter->queue >= bp->rx_nr_rings) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", nfilter->queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
return -EINVAL;
}

@@ -1914,7 +1915,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid dst_port mask.");
+ PMD_DRV_LOG(ERR, "invalid dst_port mask.");
return -EINVAL;
}

@@ -1932,7 +1933,7 @@ parse_ntuple_filter(struct bnxt *bp,
en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
break;
default:
- RTE_LOG(ERR, PMD, "invalid protocol mask.");
+ PMD_DRV_LOG(ERR, "invalid protocol mask.");
return -EINVAL;
}

@@ -1944,7 +1945,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid dst_ip mask.");
+ PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
return -EINVAL;
}

@@ -1956,7 +1957,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid src_ip mask.");
+ PMD_DRV_LOG(ERR, "invalid src_ip mask.");
return -EINVAL;
}

@@ -1968,7 +1969,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid src_port mask.");
+ PMD_DRV_LOG(ERR, "invalid src_port mask.");
return -EINVAL;
}

@@ -2021,18 +2022,18 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,
int ret;

if (nfilter->flags != RTE_5TUPLE_FLAGS) {
- RTE_LOG(ERR, PMD, "only 5tuple is supported.");
+ PMD_DRV_LOG(ERR, "only 5tuple is supported.");
return -EINVAL;
}

if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
- RTE_LOG(ERR, PMD, "Ntuple filter: TCP flags not supported\n");
+ PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
return -EINVAL;
}

bfilter = bnxt_get_unused_filter(bp);
if (bfilter == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not enough resources for a new filter.\n");
return -ENOMEM;
}
@@ -2059,7 +2060,7 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,

if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
bfilter->dst_id == mfilter->dst_id) {
- RTE_LOG(ERR, PMD, "filter exists.\n");
+ PMD_DRV_LOG(ERR, "filter exists.\n");
ret = -EEXIST;
goto free_filter;
} else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
@@ -2068,12 +2069,12 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,
ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
- RTE_LOG(ERR, PMD, "filter with matching pattern exists.\n");
- RTE_LOG(ERR, PMD, " Updated it to the new destination queue\n");
+ PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
+ PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
goto free_filter;
}
if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
- RTE_LOG(ERR, PMD, "filter doesn't exist.");
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
ret = -ENOENT;
goto free_filter;
}
@@ -2118,7 +2119,7 @@ bnxt_ntuple_filter(struct rte_eth_dev *dev,
return 0;

if (arg == NULL) {
- RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
filter_op);
return -EINVAL;
}
@@ -2135,7 +2136,7 @@ bnxt_ntuple_filter(struct rte_eth_dev *dev,
filter_op);
break;
default:
- RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
ret = -EINVAL;
break;
}
@@ -2337,7 +2338,7 @@ bnxt_parse_fdir_filter(struct bnxt *bp,
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
if (vnic == NULL) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", fdir->action.rx_queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
return -EINVAL;
}

@@ -2441,7 +2442,7 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
/* FALLTHROUGH */
filter = bnxt_get_unused_filter(bp);
if (filter == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not enough resources for a new flow.\n");
return -ENOMEM;
}
@@ -2453,12 +2454,12 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,

match = bnxt_match_fdir(bp, filter);
if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
- RTE_LOG(ERR, PMD, "Flow already exists.\n");
+ PMD_DRV_LOG(ERR, "Flow already exists.\n");
ret = -EEXIST;
goto free_filter;
}
if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
- RTE_LOG(ERR, PMD, "Flow does not exist.\n");
+ PMD_DRV_LOG(ERR, "Flow does not exist.\n");
ret = -ENOENT;
goto free_filter;
}
@@ -2505,10 +2506,10 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_STATS:
case RTE_ETH_FILTER_INFO:
/* FALLTHROUGH */
- RTE_LOG(ERR, PMD, "operation %u not implemented", filter_op);
+ PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
break;
default:
- RTE_LOG(ERR, PMD, "unknown operation %u", filter_op);
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
ret = -EINVAL;
break;
}
@@ -2529,7 +2530,7 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,

switch (filter_type) {
case RTE_ETH_FILTER_TUNNEL:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"filter type: %d: To be implemented\n", filter_type);
break;
case RTE_ETH_FILTER_FDIR:
@@ -2547,7 +2548,7 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
*(const void **)arg = &bnxt_flow_ops;
break;
default:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Filter type (%d) not supported", filter_type);
ret = -EINVAL;
break;
@@ -2841,7 +2842,7 @@ bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
uint32_t dir_entries;
uint32_t entry_length;

- RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x\n",
+ PMD_DRV_LOG(INFO, "%s(): %04x:%02x:%02x:%02x\n",
__func__, bp->pdev->addr.domain, bp->pdev->addr.bus,
bp->pdev->addr.devid, bp->pdev->addr.function);

@@ -2860,7 +2861,7 @@ bnxt_get_eeprom_op(struct rte_eth_dev *dev,
uint32_t index;
uint32_t offset;

- RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
+ PMD_DRV_LOG(INFO, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
"len = %d\n", __func__, bp->pdev->addr.domain,
bp->pdev->addr.bus, bp->pdev->addr.devid,
bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
@@ -2929,13 +2930,13 @@ bnxt_set_eeprom_op(struct rte_eth_dev *dev,
uint8_t index, dir_op;
uint16_t type, ext, ordinal, attr;

- RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
+ PMD_DRV_LOG(INFO, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
"len = %d\n", __func__, bp->pdev->addr.domain,
bp->pdev->addr.bus, bp->pdev->addr.devid,
bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD, "NVM write not supported from a VF\n");
+ PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
return -EINVAL;
}

@@ -3056,7 +3057,7 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev)

/* enable device (incl. PCI PM wakeup), and bus-mastering */
if (!pci_dev->mem_resource[0].addr) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Cannot find PCI device base address, aborting\n");
rc = -ENODEV;
goto init_err_disable;
@@ -3067,7 +3068,7 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev)

bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
if (!bp->bar0) {
- RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
+ PMD_DRV_LOG(ERR, "Cannot map device registers, aborting\n");
rc = -ENOMEM;
goto init_err_release;
}
@@ -3103,7 +3104,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
int rc;

if (version_printed++ == 0)
- RTE_LOG(INFO, PMD, "%s\n", bnxt_version);
+ PMD_DRV_LOG(INFO, "%s\n", bnxt_version);

rte_eth_copy_pci_info(eth_dev, pci_dev);

@@ -3120,7 +3121,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)

rc = bnxt_init_board(eth_dev);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Board initialization failed rc: %x\n", rc);
goto error;
}
@@ -3151,13 +3152,13 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map address to physical memory\n");
return -ENOMEM;
}
@@ -3186,13 +3187,13 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map address to physical memory\n");
return -ENOMEM;
}
@@ -3207,7 +3208,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)

rc = bnxt_alloc_hwrm_resources(bp);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm resource allocation failure rc: %x\n", rc);
goto error_free;
}
@@ -3216,31 +3217,31 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
goto error_free;
rc = bnxt_hwrm_queue_qportcfg(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm queue qportcfg failed\n");
+ PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n");
goto error_free;
}

rc = bnxt_hwrm_func_qcfg(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm func qcfg failed\n");
+ PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n");
goto error_free;
}

/* Get the MAX capabilities for this function */
rc = bnxt_hwrm_func_qcaps(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc);
goto error_free;
}
if (bp->max_tx_rings == 0) {
- RTE_LOG(ERR, PMD, "No TX rings available!\n");
+ PMD_DRV_LOG(ERR, "No TX rings available!\n");
rc = -EBUSY;
goto error_free;
}
eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
if (eth_dev->data->mac_addrs == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to alloc %u bytes needed to store MAC addr tbl",
ETHER_ADDR_LEN * bp->max_l2_ctx);
rc = -ENOMEM;
@@ -3252,7 +3253,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)

if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
/* 1 ring is for default completion ring */
- RTE_LOG(ERR, PMD, "Insufficient resource: Ring Group\n");
+ PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
rc = -ENOSPC;
goto error_free;
}
@@ -3260,7 +3261,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
bp->grp_info = rte_zmalloc("bnxt_grp_info",
sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
if (!bp->grp_info) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to alloc %zu bytes to store group info table\n",
sizeof(*bp->grp_info) * bp->max_ring_grps);
rc = -ENOMEM;
@@ -3273,7 +3274,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
} else {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Firmware too old for VF mailbox functionality\n");
memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
}
@@ -3293,20 +3294,20 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
rc = bnxt_hwrm_func_driver_register(bp);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to register driver");
rc = -EBUSY;
goto error_free;
}

- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
pci_dev->mem_resource[0].phys_addr,
pci_dev->mem_resource[0].addr);

rc = bnxt_hwrm_func_reset(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc);
rc = -EIO;
goto error_free;
}
@@ -3318,13 +3319,13 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
if (bp->pdev->max_vfs) {
rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
if (rc) {
- RTE_LOG(ERR, PMD, "Failed to allocate VFs\n");
+ PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
goto error_free;
}
} else {
rc = bnxt_hwrm_allocate_pf_only(bp);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to allocate PF resources\n");
goto error_free;
}
@@ -3433,6 +3434,16 @@ bool is_bnxt_supported(struct rte_eth_dev *dev)
return is_device_supported(dev, &bnxt_rte_pmd);
}

+static void
+bnxt_init_log(void)
+{
+ bnxt_logtype_driver = rte_log_register("pmd.bnxt.driver");
+ if (bnxt_logtype_driver >= 0)
+ rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE);
+}
+
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(bnxt_init_log);
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index 22cfbd372..0716dd8fd 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -56,7 +56,7 @@ struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
/* Find the 1st unused filter from the free_filter_list pool*/
filter = STAILQ_FIRST(&bp->free_filter_list);
if (!filter) {
- RTE_LOG(ERR, PMD, "No more free filter resources\n");
+ PMD_DRV_LOG(ERR, "No more free filter resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
@@ -77,7 +77,7 @@ struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)

filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
if (!filter) {
- RTE_LOG(ERR, PMD, "Failed to alloc memory for VF %hu filters\n",
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
vf);
return NULL;
}
@@ -145,11 +145,11 @@ void bnxt_free_filter_mem(struct bnxt *bp)
for (i = 0; i < max_filters; i++) {
filter = &bp->filter_info[i];
if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
- RTE_LOG(ERR, PMD, "HWRM filter is not freed??\n");
+ PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
/* Call HWRM to try to free filter again */
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
if (rc)
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM filter cannot be freed rc = %d\n",
rc);
}
@@ -172,7 +172,7 @@ int bnxt_alloc_filter_mem(struct bnxt *bp)
max_filters * sizeof(struct bnxt_filter_info),
0);
if (filter_mem == NULL) {
- RTE_LOG(ERR, PMD, "Failed to alloc memory for %d filters",
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
max_filters);
return -ENOMEM;
}
@@ -187,7 +187,7 @@ struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
/* Find the 1st unused filter from the free_filter_list pool*/
filter = STAILQ_FIRST(&bp->free_filter_list);
if (!filter) {
- RTE_LOG(ERR, PMD, "No more free filter resources\n");
+ PMD_DRV_LOG(ERR, "No more free filter resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
@@ -281,7 +281,7 @@ bnxt_filter_type_check(const struct rte_flow_item pattern[],
/* FALLTHROUGH */
/* need ntuple match, reset exact match */
if (!use_ntuple) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"VLAN flow cannot use NTUPLE filter\n");
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -292,7 +292,7 @@ bnxt_filter_type_check(const struct rte_flow_item pattern[],
use_ntuple |= 1;
break;
default:
- RTE_LOG(ERR, PMD, "Unknown Flow type");
+ PMD_DRV_LOG(ERR, "Unknown Flow type");
use_ntuple |= 1;
}
item++;
@@ -329,7 +329,7 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
int dflt_vnic;

use_ntuple = bnxt_filter_type_check(pattern, error);
- RTE_LOG(DEBUG, PMD, "Use NTUPLE %d\n", use_ntuple);
+ PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
if (use_ntuple < 0)
return use_ntuple;

@@ -791,7 +791,7 @@ bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
return f0;

//This flow needs DST MAC which is not same as port/l2
- RTE_LOG(DEBUG, PMD, "Create L2 filter for DST MAC\n");
+ PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
filter1 = bnxt_get_unused_filter(bp);
if (filter1 == NULL)
return NULL;
@@ -828,7 +828,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
int rc;

if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
- RTE_LOG(ERR, PMD, "Cannot create flow on RSS queues\n");
+ PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"Cannot create flow on RSS queues");
@@ -857,7 +857,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
rc = -rte_errno;
goto ret;
}
- RTE_LOG(DEBUG, PMD, "Queue index %d\n", act_q->index);
+ PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);

vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
@@ -875,7 +875,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
goto ret;
}
filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
- RTE_LOG(DEBUG, PMD, "VNIC found\n");
+ PMD_DRV_LOG(DEBUG, "VNIC found\n");
break;
case RTE_FLOW_ACTION_TYPE_DROP:
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
@@ -990,7 +990,7 @@ bnxt_flow_validate(struct rte_eth_dev *dev,

filter = bnxt_get_unused_filter(bp);
if (filter == NULL) {
- RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
+ PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
return -ENOMEM;
}

@@ -1092,13 +1092,13 @@ bnxt_flow_create(struct rte_eth_dev *dev,

ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Not a validate flow.\n");
+ PMD_DRV_LOG(ERR, "Not a validate flow.\n");
goto free_flow;
}

filter = bnxt_get_unused_filter(bp);
if (filter == NULL) {
- RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
+ PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
goto free_flow;
}

@@ -1109,15 +1109,15 @@ bnxt_flow_create(struct rte_eth_dev *dev,

ret = bnxt_match_filter(bp, filter);
if (ret == -EEXIST) {
- RTE_LOG(DEBUG, PMD, "Flow already exists.\n");
+ PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
/* Clear the filter that was created as part of
* validate_and_parse_flow() above
*/
bnxt_hwrm_clear_l2_filter(bp, filter);
goto free_filter;
} else if (ret == -EXDEV) {
- RTE_LOG(DEBUG, PMD, "Flow with same pattern exists");
- RTE_LOG(DEBUG, PMD, "Updating with different destination\n");
+ PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
+ PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
update_flow = true;
}

@@ -1145,7 +1145,7 @@ bnxt_flow_create(struct rte_eth_dev *dev,
ret = -EXDEV;
goto free_flow;
}
- RTE_LOG(ERR, PMD, "Successfully created flow.\n");
+ PMD_DRV_LOG(ERR, "Successfully created flow.\n");
STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
return flow;
}
@@ -1181,7 +1181,7 @@ bnxt_flow_destroy(struct rte_eth_dev *dev,

ret = bnxt_match_filter(bp, filter);
if (ret == 0)
- RTE_LOG(ERR, PMD, "Could not find matching flow\n");
+ PMD_DRV_LOG(ERR, "Could not find matching flow\n");
if (filter->filter_type == HWRM_CFA_EM_FILTER)
ret = bnxt_hwrm_clear_em_filter(bp, filter);
if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index d88061c9f..fdca424a9 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -79,7 +79,7 @@ static int page_getenum(size_t size)
return 22;
if (size <= 1 << 30)
return 30;
- RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
+ PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
return sizeof(void *) * 8 - 1;
}

@@ -161,7 +161,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
}

if (i >= HWRM_CMD_TIMEOUT) {
- RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
+ PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
req->req_type);
goto err_ret;
}
@@ -194,8 +194,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,

#define HWRM_CHECK_RESULT() do {\
if (rc) { \
- RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
- __func__, rc); \
+ PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
rte_spinlock_unlock(&bp->hwrm_lock); \
return rc; \
} \
@@ -204,18 +203,15 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
if (resp->resp_len >= 16) { \
struct hwrm_err_output *tmp_hwrm_err_op = \
(void *)resp; \
- RTE_LOG(ERR, PMD, \
- "%s error %d:%d:%08x:%04x\n", \
- __func__, \
+ PMD_DRV_LOG(ERR, \
+ "error %d:%d:%08x:%04x\n", \
rc, tmp_hwrm_err_op->cmd_err, \
rte_le_to_cpu_32(\
tmp_hwrm_err_op->opaque_0), \
rte_le_to_cpu_16(\
tmp_hwrm_err_op->opaque_1)); \
- } \
- else { \
- RTE_LOG(ERR, PMD, \
- "%s error %d\n", __func__, rc); \
+ } else { \
+ PMD_DRV_LOG(ERR, "error %d\n", rc); \
} \
rte_spinlock_unlock(&bp->hwrm_lock); \
return rc; \
@@ -369,7 +365,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
conf->pool_map[j].pools & (1UL << j)) {
- RTE_LOG(DEBUG, PMD,
+ PMD_DRV_LOG(DEBUG,
"Add vlan %u to vmdq pool %u\n",
conf->pool_map[j].vlan_id, j);

@@ -545,7 +541,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
getpagesize(),
getpagesize());
if (bp->pf.vf_info[i].vlan_table == NULL)
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Fail to alloc VLAN table for VF %d\n",
i);
else
@@ -556,7 +552,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
getpagesize(),
getpagesize());
if (bp->pf.vf_info[i].vlan_as_table == NULL)
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Alloc VLAN AS table for VF %d fail\n",
i);
else
@@ -588,7 +584,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
- RTE_LOG(INFO, PMD, "PTP SUPPORTED");
+ PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
HWRM_UNLOCK();
bnxt_hwrm_ptp_qcfg(bp);
}
@@ -676,13 +672,13 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)

HWRM_CHECK_RESULT();

- RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
+ PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
resp->hwrm_intf_maj, resp->hwrm_intf_min,
resp->hwrm_intf_upd,
resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
(resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
- RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
+ PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);

my_version = HWRM_VERSION_MAJOR << 16;
@@ -694,28 +690,28 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
fw_version |= resp->hwrm_intf_upd;

if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
- RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
+ PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
rc = -EINVAL;
goto error;
}

if (my_version != fw_version) {
- RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
+ PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
if (my_version < fw_version) {
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Firmware API version is newer than driver.\n");
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"The driver may be missing features.\n");
} else {
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Firmware API version is older than driver.\n");
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Not all driver features may be functional.\n");
}
}

if (bp->max_req_len > resp->max_req_win_len) {
- RTE_LOG(ERR, PMD, "Unsupported request length\n");
+ PMD_DRV_LOG(ERR, "Unsupported request length\n");
rc = -EINVAL;
}
bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
@@ -738,7 +734,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr =
rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unable to map response buffer to physical memory.\n");
rc = -ENOMEM;
goto error;
@@ -750,7 +746,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
(dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
- RTE_LOG(DEBUG, PMD, "Short command supported\n");
+ PMD_DRV_LOG(DEBUG, "Short command supported\n");

rte_free(bp->hwrm_short_cmd_req_addr);

@@ -765,7 +761,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
if (bp->hwrm_short_cmd_req_dma_addr == 0) {
rte_free(bp->hwrm_short_cmd_req_addr);
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unable to map buffer to physical memory.\n");
rc = -ENOMEM;
goto error;
@@ -814,7 +810,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
/* Setting Fixed Speed. But AutoNeg is ON, So disable it */
if (bp->link_info.auto_mode && conf->link_speed) {
req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
- RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
+ PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
}

req.flags = rte_cpu_to_le_32(conf->phy_flags);
@@ -853,7 +849,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
} else {
req.flags =
rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
- RTE_LOG(INFO, PMD, "Force Link Down\n");
+ PMD_DRV_LOG(INFO, "Force Link Down\n");
}

rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -971,7 +967,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
break;
default:
- RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
+ PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
ring_type);
HWRM_UNLOCK();
return -1;
@@ -985,22 +981,22 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
rc = rte_le_to_cpu_16(resp->error_code);
switch (ring_type) {
case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm_ring_alloc cp failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm_ring_alloc rx failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm_ring_alloc tx failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
default:
- RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
}
@@ -1032,19 +1028,19 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,

switch (ring_type) {
case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
- RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
+ PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
- RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
+ PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
- RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
+ PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
rc);
return rc;
default:
- RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
return rc;
}
}
@@ -1168,7 +1164,7 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;

/* map ring groups to this vnic */
- RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
+ PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
vnic->start_grp_id, vnic->end_grp_id);
for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
@@ -1188,7 +1184,7 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)

vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
HWRM_UNLOCK();
- RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
return rc;
}

@@ -1258,7 +1254,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct bnxt_plcmodes_cfg pmodes;

if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
return rc;
}

@@ -1323,7 +1319,7 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;

if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
return rc;
}
HWRM_PREP(req, VNIC_QCFG);
@@ -1375,7 +1371,7 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)

vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
HWRM_UNLOCK();
- RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
+ PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);

return rc;
}
@@ -1388,7 +1384,7 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
bp->hwrm_cmd_resp_addr;

if (vnic->rss_rule == 0xffff) {
- RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
+ PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
@@ -1412,7 +1408,7 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;

if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
return rc;
}

@@ -1854,7 +1850,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr =
rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -1890,7 +1886,7 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)

STAILQ_FOREACH(flow, &vnic->flow_list, next) {
filter = flow->filter;
- RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type);
+ PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
if (filter->filter_type == HWRM_CFA_EM_FILTER)
rc = bnxt_hwrm_clear_em_filter(bp, filter);
else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
@@ -2032,7 +2028,7 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
break;
default:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unsupported link speed %d; default to AUTO\n",
conf_link_speed);
break;
@@ -2056,20 +2052,20 @@ static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;

if (one_speed & (one_speed - 1)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Invalid advertised speeds (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
}
if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unsupported advertised speed (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
}
} else {
if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unsupported advertised speeds (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
@@ -2141,7 +2137,7 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
default:
- RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
+ PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
hw_link_speed);
break;
}
@@ -2161,7 +2157,7 @@ static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
eth_link_duplex = ETH_LINK_HALF_DUPLEX;
break;
default:
- RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
+ PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
hw_link_duplex);
break;
}
@@ -2175,7 +2171,7 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)

rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Get link config failed with rc %d\n", rc);
goto exit;
}
@@ -2229,7 +2225,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
bp->link_info.media_type ==
HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
- RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
+ PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
return -EINVAL;
}

@@ -2243,7 +2239,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
port_phy_cfg:
rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Set link config failed with rc %d\n", rc);
}

@@ -2420,11 +2416,11 @@ static void reserve_resources_from_vf(struct bnxt *bp,
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));

if (rc) {
- RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
copy_func_cfg_to_qcaps(cfg_req, resp);
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
- RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
copy_func_cfg_to_qcaps(cfg_req, resp);
}

@@ -2455,11 +2451,11 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
return -1;
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
- RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
return -1;
}
rc = rte_le_to_cpu_16(resp->vlan);
@@ -2495,7 +2491,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
int rc;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
+ PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
return -1;
}

@@ -2522,7 +2518,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
size_t req_buf_sz;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
+ PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
return -1;
}

@@ -2588,9 +2584,9 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);

if (rc || resp->error_code) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to initizlie VF %d\n", i);
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not all VFs available. (%d, %d)\n",
rc, resp->error_code);
HWRM_UNLOCK();
@@ -2740,7 +2736,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
req.req_buf_page_addr[0] =
rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
if (req.req_buf_page_addr[0] == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map buffer address to physical memory\n");
return -ENOMEM;
}
@@ -3162,7 +3158,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
return -ENOMEM;
dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -3198,7 +3194,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,

dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -3259,7 +3255,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,

dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -3316,19 +3312,19 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,

if (req.vnic_id_tbl_addr == 0) {
HWRM_UNLOCK();
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map VNIC ID table address to physical memory\n");
return -ENOMEM;
}
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc) {
HWRM_UNLOCK();
- RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
return -1;
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
HWRM_UNLOCK();
- RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
return -1;
}
rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
@@ -3459,7 +3455,7 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
}
}
/* Could not find a default VNIC. */
- RTE_LOG(ERR, PMD, "No default VNIC\n");
+ PMD_DRV_LOG(ERR, "No default VNIC\n");
exit:
rte_free(vnic_ids);
return -1;
@@ -3549,7 +3545,7 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
if (filter->fw_em_filter_id == UINT64_MAX)
return 0;

- RTE_LOG(ERR, PMD, "Clear EM filter\n");
+ PMD_DRV_LOG(ERR, "Clear EM filter\n");
HWRM_PREP(req, CFA_EM_FLOW_FREE);

req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
index 49436cfd9..8ab986936 100644
--- a/drivers/net/bnxt/bnxt_irq.c
+++ b/drivers/net/bnxt/bnxt_irq.c
@@ -84,7 +84,7 @@ static void bnxt_int_handler(void *param)
cpr->cp_ring_struct))
goto no_more;
}
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Ignoring %02x completion\n", CMP_TYPE(cmp));
break;
}
@@ -154,7 +154,7 @@ int bnxt_setup_int(struct bnxt *bp)
return 0;

setup_exit:
- RTE_LOG(ERR, PMD, "bnxt_irq_tbl setup failed\n");
+ PMD_DRV_LOG(ERR, "bnxt_irq_tbl setup failed\n");
return rc;
}

diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 59d1035fd..8fb897216 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -176,15 +176,15 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
for (sz = 0; sz < total_alloc_len; sz += getpagesize())
rte_mem_lock_page(((char *)mz->addr) + sz);
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map ring address to physical memory\n");
return -ENOMEM;
}
@@ -326,7 +326,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
ring = rxr->ag_ring_struct;
/* Agg ring */
if (ring == NULL) {
- RTE_LOG(ERR, PMD, "Alloc AGG Ring is NULL!\n");
+ PMD_DRV_LOG(ERR, "Alloc AGG Ring is NULL!\n");
goto err_out;
}

@@ -336,7 +336,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
cp_ring->fw_ring_id);
if (rc)
goto err_out;
- RTE_LOG(DEBUG, PMD, "Alloc AGG Done!\n");
+ PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n");
rxr->ag_prod = 0;
rxr->ag_doorbell =
(char *)pci_dev->mem_resource[2].addr +
@@ -347,7 +347,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
if (bnxt_init_one_rx_ring(rxq)) {
- RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!\n");
+ PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
return -ENOMEM;
}
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index f7fbb2856..736936a55 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -75,7 +75,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
if (bp->rx_cp_nr_rings < 2) {
vnic = bnxt_alloc_vnic(bp);
if (!vnic) {
- RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
+ PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -92,7 +92,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
vnic->end_grp_id = vnic->start_grp_id;
filter = bnxt_alloc_filter(bp);
if (!filter) {
- RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -121,7 +121,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
pools = bp->rx_cp_nr_rings;
break;
default:
- RTE_LOG(ERR, PMD, "Unsupported mq_mod %d\n",
+ PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
dev_conf->rxmode.mq_mode);
rc = -EINVAL;
goto err_out;
@@ -135,7 +135,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
for (i = 0; i < pools; i++) {
vnic = bnxt_alloc_vnic(bp);
if (!vnic) {
- RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
+ PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -166,7 +166,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
}
filter = bnxt_alloc_filter(bp);
if (!filter) {
- RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -312,14 +312,14 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
int rc = 0;

if (queue_idx >= bp->max_rx_rings) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Cannot create Rx ring %d. Only %d rings available\n",
queue_idx, bp->max_rx_rings);
return -ENOSPC;
}

if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
- RTE_LOG(ERR, PMD, "nb_desc %d is invalid\n", nb_desc);
+ PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
rc = -EINVAL;
goto out;
}
@@ -332,7 +332,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq) {
- RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!\n");
+ PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
rc = -ENOMEM;
goto out;
}
@@ -341,8 +341,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;

- RTE_LOG(DEBUG, PMD, "RX Buf size is %d\n", rxq->rx_buf_use_size);
- RTE_LOG(DEBUG, PMD, "RX Buf MTU %d\n", eth_dev->data->mtu);
+ PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_use_size);
+ PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);

rc = bnxt_init_rx_ring_struct(rxq, socket_id);
if (rc)
@@ -357,7 +357,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
/* Allocate RX ring hardware descriptors */
if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
"rxr")) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"ring_dma_zone_reserve for rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
rc = -ENOMEM;
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 82c93d6dc..3f07c11b5 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -95,9 +95,9 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
}

if (rxbd == NULL)
- RTE_LOG(ERR, PMD, "Jumbo Frame. rxbd is NULL\n");
+ PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
if (rx_buf == NULL)
- RTE_LOG(ERR, PMD, "Jumbo Frame. rx_buf is NULL\n");
+ PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");


rx_buf->mbuf = mbuf;
@@ -234,7 +234,7 @@ static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
/* TODO batch allocation for better performance */
while (rte_bitmap_get(rxr->ag_bitmap, next)) {
if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"agg mbuf alloc failed: prod=0x%x\n", next);
break;
}
@@ -512,7 +512,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
*/
prod = RING_NEXT(rxr->rx_ring_struct, prod);
if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
- RTE_LOG(ERR, PMD, "mbuf alloc failed with prod=0x%x\n", prod);
+ PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n", prod);
rc = -ENOMEM;
goto rx;
}
@@ -601,7 +601,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxr->rx_prod = i;
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
} else {
- RTE_LOG(ERR, PMD, "Alloc mbuf failed\n");
+ PMD_DRV_LOG(ERR, "Alloc mbuf failed\n");
break;
}
}
@@ -744,7 +744,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
prod = rxr->rx_prod;
for (i = 0; i < ring->ring_size; i++) {
if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"init'ed rx ring %d with %d/%d mbufs only\n",
rxq->queue_id, i, ring->ring_size);
break;
@@ -752,7 +752,6 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->rx_prod = prod;
prod = RING_NEXT(rxr->rx_ring_struct, prod);
}
- RTE_LOG(DEBUG, PMD, "%s\n", __func__);

ring = rxr->ag_ring_struct;
type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
@@ -761,7 +760,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)

for (i = 0; i < ring->ring_size; i++) {
if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"init'ed AG ring %d with %d/%d mbufs only\n",
rxq->queue_id, i, ring->ring_size);
break;
@@ -769,7 +768,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->ag_prod = prod;
prod = RING_NEXT(rxr->ag_ring_struct, prod);
}
- RTE_LOG(DEBUG, PMD, "%s AGG Done!\n", __func__);
+ PMD_DRV_LOG(DEBUG, "AGG Done!\n");

if (rxr->tpa_info) {
for (i = 0; i < BNXT_TPA_MAX; i++) {
@@ -781,7 +780,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
}
}
}
- RTE_LOG(DEBUG, PMD, "%s TPA alloc Done!\n", __func__);
+ PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n");

return 0;
}
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 470c6438d..bd93cc834 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -237,7 +237,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,

memset(bnxt_stats, 0, sizeof(*bnxt_stats));
if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
- RTE_LOG(ERR, PMD, "Device Initialization not complete!\n");
+ PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
return 0;
}

@@ -272,7 +272,7 @@ void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;

if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
- RTE_LOG(ERR, PMD, "Device Initialization not complete!\n");
+ PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
return;
}

@@ -289,7 +289,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
uint64_t tx_drop_pkts;

if (!(bp->flags & BNXT_FLAG_PORT_STATS)) {
- RTE_LOG(ERR, PMD, "xstats not supported for VF\n");
+ PMD_DRV_LOG(ERR, "xstats not supported for VF\n");
return 0;
}

@@ -371,11 +371,11 @@ void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev)
bnxt_hwrm_port_clr_stats(bp);

if (BNXT_VF(bp))
- RTE_LOG(ERR, PMD, "Operation not supported on a VF device\n");
+ PMD_DRV_LOG(ERR, "Operation not supported on a VF device\n");
if (!BNXT_SINGLE_PF(bp))
- RTE_LOG(ERR, PMD, "Operation not supported on a MF device\n");
+ PMD_DRV_LOG(ERR, "Operation not supported on a MF device\n");
if (!(bp->flags & BNXT_FLAG_PORT_STATS))
- RTE_LOG(ERR, PMD, "Operation not supported\n");
+ PMD_DRV_LOG(ERR, "Operation not supported\n");
}

int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids,
@@ -394,7 +394,7 @@ int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids,
bnxt_dev_xstats_get_by_id_op(dev, NULL, values_copy, stat_cnt);
for (i = 0; i < limit; i++) {
if (ids[i] >= stat_cnt) {
- RTE_LOG(ERR, PMD, "id value isn't valid");
+ PMD_DRV_LOG(ERR, "id value isn't valid");
return -1;
}
values[i] = values_copy[ids[i]];
@@ -420,7 +420,7 @@ int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev,

for (i = 0; i < limit; i++) {
if (ids[i] >= stat_cnt) {
- RTE_LOG(ERR, PMD, "id value isn't valid");
+ PMD_DRV_LOG(ERR, "id value isn't valid");
return -1;
}
strcpy(xstats_names[i].name,
diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
index 25c33f5e4..53524346d 100644
--- a/drivers/net/bnxt/bnxt_txq.c
+++ b/drivers/net/bnxt/bnxt_txq.c
@@ -109,14 +109,14 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
int rc = 0;

if (queue_idx >= bp->max_tx_rings) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Cannot create Tx ring %d. Only %d rings available\n",
queue_idx, bp->max_tx_rings);
return -ENOSPC;
}

if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
- RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
+ PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
rc = -EINVAL;
goto out;
}
@@ -131,7 +131,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!txq) {
- RTE_LOG(ERR, PMD, "bnxt_tx_queue allocation failed!");
+ PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
rc = -ENOMEM;
goto out;
}
@@ -149,14 +149,14 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
/* Allocate TX ring hardware descriptors */
if (bnxt_alloc_rings(bp, queue_idx, txq->tx_ring, NULL, txq->cp_ring,
"txr")) {
- RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for tx_ring failed!");
+ PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
bnxt_tx_queue_release_op(txq);
rc = -ENOMEM;
goto out;
}

if (bnxt_init_one_tx_ring(txq)) {
- RTE_LOG(ERR, PMD, "bnxt_init_one_tx_ring failed!");
+ PMD_DRV_LOG(ERR, "bnxt_init_one_tx_ring failed!");
bnxt_tx_queue_release_op(txq);
rc = -ENOMEM;
goto out;
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 5bac26053..d4aeb4ca8 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -107,7 +107,7 @@ int bnxt_free_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic,
}
temp = STAILQ_NEXT(temp, next);
}
- RTE_LOG(ERR, PMD, "VNIC %p is not found in pool[%d]\n", vnic, pool);
+ PMD_DRV_LOG(ERR, "VNIC %p is not found in pool[%d]\n", vnic, pool);
return -EINVAL;
}

@@ -118,7 +118,7 @@ struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp)
/* Find the 1st unused vnic from the free_vnic_list pool*/
vnic = STAILQ_FIRST(&bp->free_vnic_list);
if (!vnic) {
- RTE_LOG(ERR, PMD, "No more free VNIC resources\n");
+ PMD_DRV_LOG(ERR, "No more free VNIC resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_vnic_list, next);
@@ -194,13 +194,13 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
}
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map vnic address to physical memory\n");
return -ENOMEM;
}
@@ -241,7 +241,7 @@ void bnxt_free_vnic_mem(struct bnxt *bp)
for (i = 0; i < max_vnics; i++) {
vnic = &bp->vnic_info[i];
if (vnic->fw_vnic_id != (uint16_t)HWRM_NA_SIGNATURE) {
- RTE_LOG(ERR, PMD, "VNIC is not freed yet!\n");
+ PMD_DRV_LOG(ERR, "VNIC is not freed yet!\n");
/* TODO Call HWRM to free VNIC */
}
}
@@ -260,7 +260,7 @@ int bnxt_alloc_vnic_mem(struct bnxt *bp)
vnic_mem = rte_zmalloc("bnxt_vnic_info",
max_vnics * sizeof(struct bnxt_vnic_info), 0);
if (vnic_mem == NULL) {
- RTE_LOG(ERR, PMD, "Failed to alloc memory for %d VNICs",
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for %d VNICs",
max_vnics);
return -ENOMEM;
}
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c
index 595208997..cae95f8fa 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.c
+++ b/drivers/net/bnxt/rte_pmd_bnxt.c
@@ -85,7 +85,7 @@ int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on)
bp = (struct bnxt *)eth_dev->data->dev_private;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set Tx loopback on non-PF port %d!\n",
port);
return -ENOTSUP;
@@ -127,7 +127,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
bp = (struct bnxt *)eth_dev->data->dev_private;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set all queues drop on non-PF port!\n");
return -ENOTSUP;
}
@@ -140,7 +140,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
bp->vnic_info[i].bd_stall = !on;
rc = bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[i]);
if (rc) {
- RTE_LOG(ERR, PMD, "Failed to update PF VNIC %d.\n", i);
+ PMD_DRV_LOG(ERR, "Failed to update PF VNIC %d.\n", i);
return rc;
}
}
@@ -151,7 +151,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
rte_pmd_bnxt_set_all_queues_drop_en_cb, &on,
bnxt_hwrm_vnic_cfg);
if (rc) {
- RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", i);
+ PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", i);
break;
}
}
@@ -180,7 +180,7 @@ int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf,
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VF %d mac address on non-PF port %d!\n",
vf, port);
return -ENOTSUP;
@@ -224,7 +224,7 @@ int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf,

/* Requested BW can't be greater than link speed */
if (tot_rate > eth_dev->data->dev_link.link_speed) {
- RTE_LOG(ERR, PMD, "Rate > Link speed. Set to %d\n", tot_rate);
+ PMD_DRV_LOG(ERR, "Rate > Link speed. Set to %d\n", tot_rate);
return -EINVAL;
}

@@ -262,7 +262,7 @@ int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
bp = (struct bnxt *)dev->data->dev_private;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set mac spoof on non-PF port %d!\n", port);
return -EINVAL;
}
@@ -314,7 +314,7 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
bp = (struct bnxt *)dev->data->dev_private;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VLAN spoof on non-PF port %d!\n", port);
return -EINVAL;
}
@@ -333,7 +333,7 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
rc = -1;
}
} else {
- RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf);
+ PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", vf);
}

return rc;
@@ -367,7 +367,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VF %d stripq on non-PF port %d!\n",
vf, port);
return -ENOTSUP;
@@ -377,7 +377,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
rte_pmd_bnxt_set_vf_vlan_stripq_cb, &on,
bnxt_hwrm_vnic_cfg);
if (rc)
- RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf);
+ PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", vf);

return rc;
}
@@ -407,7 +407,7 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
return -EINVAL;

if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) {
- RTE_LOG(ERR, PMD, "Currently cannot toggle this setting\n");
+ PMD_DRV_LOG(ERR, "Currently cannot toggle this setting\n");
return -ENOTSUP;
}

@@ -430,7 +430,7 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
&bp->pf.vf_info[vf].l2_rx_mask,
bnxt_set_rx_mask_no_vlan);
if (rc)
- RTE_LOG(ERR, PMD, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");
+ PMD_DRV_LOG(ERR, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");

return rc;
}
@@ -442,7 +442,7 @@ static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf)
struct bnxt_vnic_info vnic;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VLAN table on non-PF port!\n");
return -EINVAL;
}
@@ -455,7 +455,7 @@ static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf)
/* This simply indicates there's no driver loaded.
* This is not an error.
*/
- RTE_LOG(ERR, PMD, "Unable to get default VNIC for VF %d\n", vf);
+ PMD_DRV_LOG(ERR, "Unable to get default VNIC for VF %d\n", vf);
} else {
memset(&vnic, 0, sizeof(vnic));
vnic.fw_vnic_id = dflt_vnic;
@@ -518,9 +518,9 @@ int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
/* Now check that there's space */
if (cnt == getpagesize() / sizeof(struct
bnxt_vlan_antispoof_table_entry)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"VLAN anti-spoof table is full\n");
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"VF %d cannot add VLAN %u\n",
i, vlan);
rc = -1;
@@ -585,7 +585,7 @@ int rte_pmd_bnxt_get_vf_stats(uint16_t port,
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to get VF %d stats on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -612,7 +612,7 @@ int rte_pmd_bnxt_reset_vf_stats(uint16_t port,
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to reset VF %d stats on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -638,7 +638,7 @@ int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id)
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to query VF %d RX stats on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -665,7 +665,7 @@ int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id,
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to query VF %d TX drops on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -697,7 +697,7 @@ int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct ether_addr *addr,
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to config VF %d MAC on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -773,7 +773,7 @@ rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf,
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VF %d vlan insert on non-PF port %d!\n",
vf, port);
return -ENOTSUP;
@@ -807,7 +807,7 @@ int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on)
bp = (struct bnxt *)dev->data->dev_private;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set persist stats on non-PF port %d!\n",
port);
return -EINVAL;
--
2.14.3 (Apple Git-98)
Ferruh Yigit
2018-01-22 12:23:49 UTC
Permalink
Post by Ajit Khaparde
This patch implements driver specific log type doing away with
usage of RTE_LOG() for logging.
<...>
Post by Ajit Khaparde
@@ -3433,6 +3434,16 @@ bool is_bnxt_supported(struct rte_eth_dev *dev)
return is_device_supported(dev, &bnxt_rte_pmd);
}
+static void
+bnxt_init_log(void)
+{
+ bnxt_logtype_driver = rte_log_register("pmd.bnxt.driver");
+ if (bnxt_logtype_driver >= 0)
+ rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE);
+}
+
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(bnxt_init_log);
Compiler giving warning for this [1], briefly asks putting this line above
"bnxt_init_log":

[1]
...dpdk/drivers/net/bnxt/bnxt_ethdev.c:3518:1: error: attribute declaration must
precede definition [-Werror,-Wignored-attributes]


RTE_INIT(bnxt_init_log);
^


<...>
Ajit Khaparde
2018-01-25 22:47:50 UTC
Permalink
Please apply this patchset.
I have incorporated most of the comments that we got in v1.
The switch to SPDX tags is in the works and will be submitted separately.

Ajit Khaparde (6):
net/bnxt: fix size of tx ring in HW
net/bnxt: use driver specific dynamic log type
net/bnxt: register for more async events
net/bnxt: check if MAC address is all zeros
net/bnxt: add 100G speed detection
net/bnxt: fix number of pools for RSS

Somnath Kotur (1):
net/bnxt: support for rx/tx_queue_start/stop ops

drivers/net/bnxt/bnxt.h | 8 ++
drivers/net/bnxt/bnxt_cpr.c | 19 ++-
drivers/net/bnxt/bnxt_ethdev.c | 282 +++++++++++++++++++++-------------------
drivers/net/bnxt/bnxt_filter.c | 44 +++----
drivers/net/bnxt/bnxt_filter.h | 1 +
drivers/net/bnxt/bnxt_hwrm.c | 181 +++++++++++++++-----------
drivers/net/bnxt/bnxt_hwrm.h | 13 ++
drivers/net/bnxt/bnxt_irq.c | 4 +-
drivers/net/bnxt/bnxt_ring.c | 12 +-
drivers/net/bnxt/bnxt_rxq.c | 77 +++++++++--
drivers/net/bnxt/bnxt_rxq.h | 6 +-
drivers/net/bnxt/bnxt_rxr.c | 23 ++--
drivers/net/bnxt/bnxt_rxr.h | 3 +-
drivers/net/bnxt/bnxt_stats.c | 16 +--
drivers/net/bnxt/bnxt_txq.c | 10 +-
drivers/net/bnxt/bnxt_txq.h | 1 -
drivers/net/bnxt/bnxt_txr.c | 34 ++++-
drivers/net/bnxt/bnxt_txr.h | 2 +
drivers/net/bnxt/bnxt_vnic.c | 14 +-
drivers/net/bnxt/bnxt_vnic.h | 1 -
drivers/net/bnxt/rte_pmd_bnxt.c | 48 +++----
21 files changed, 481 insertions(+), 318 deletions(-)
--
2.14.3 (Apple Git-98)
Ajit Khaparde
2018-01-25 22:47:51 UTC
Permalink
During Tx ring allocation, the actual ring size configured in the HW
ends up being twice the number of txd parameter specified to the driver.
The power of 2 ring size wrongly adds a +1 while sending the ring
create command to the FW.

Fixes: 6eb3cc2294fd ("net/bnxt: add initial Tx code")
Cc: ***@dpdk.org
Signed-off-by: Ajit Khaparde <***@broadcom.com>
---
drivers/net/bnxt/bnxt_txr.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index ac77434b7..2f2c87119 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -101,7 +101,7 @@ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
if (ring == NULL)
return -ENOMEM;
txr->tx_ring_struct = ring;
- ring->ring_size = rte_align32pow2(txq->nb_tx_desc + 1);
+ ring->ring_size = rte_align32pow2(txq->nb_tx_desc);
ring->ring_mask = ring->ring_size - 1;
ring->bd = (void *)txr->tx_desc_ring;
ring->bd_dma = txr->tx_desc_mapping;
--
2.14.3 (Apple Git-98)
Ajit Khaparde
2018-01-25 22:47:52 UTC
Permalink
This patch implements driver specific log type doing away with
usage of RTE_LOG() for logging.
Signed-off-by: Ajit Khaparde <***@broadcom.com>
--
v1 -> v2: address review comments
---
drivers/net/bnxt/bnxt.h | 8 ++
drivers/net/bnxt/bnxt_cpr.c | 10 +-
drivers/net/bnxt/bnxt_ethdev.c | 241 +++++++++++++++++++++-------------------
drivers/net/bnxt/bnxt_filter.c | 42 +++----
drivers/net/bnxt/bnxt_hwrm.c | 142 ++++++++++++-----------
drivers/net/bnxt/bnxt_irq.c | 4 +-
drivers/net/bnxt/bnxt_ring.c | 12 +-
drivers/net/bnxt/bnxt_rxq.c | 22 ++--
drivers/net/bnxt/bnxt_rxr.c | 19 ++--
drivers/net/bnxt/bnxt_stats.c | 16 +--
drivers/net/bnxt/bnxt_txq.c | 10 +-
drivers/net/bnxt/bnxt_vnic.c | 14 +--
drivers/net/bnxt/rte_pmd_bnxt.c | 48 ++++----
13 files changed, 300 insertions(+), 288 deletions(-)

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index cf0b1d27c..6776c64a5 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -334,4 +334,12 @@ int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg);

bool is_bnxt_supported(struct rte_eth_dev *dev);
extern const struct rte_flow_ops bnxt_flow_ops;
+
+extern int bnxt_logtype_driver;
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, bnxt_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt, ## args)
#endif
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index cde8adc3b..663a5223d 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -58,7 +58,7 @@ void bnxt_handle_async_event(struct bnxt *bp,
bnxt_link_update_op(bp->eth_dev, 1);
break;
default:
- RTE_LOG(DEBUG, PMD, "handle_async_event id = 0x%x\n", event_id);
+ PMD_DRV_LOG(DEBUG, "handle_async_event id = 0x%x\n", event_id);
break;
}
}
@@ -74,7 +74,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
int rc;

if (bp->pf.active_vfs <= 0) {
- RTE_LOG(ERR, PMD, "Forwarded VF with no active VFs\n");
+ PMD_DRV_LOG(ERR, "Forwarded VF with no active VFs\n");
return;
}

@@ -93,7 +93,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)

if (fw_vf_id < bp->pf.first_vf_id ||
fw_vf_id >= (bp->pf.first_vf_id) + bp->pf.active_vfs) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n",
fw_vf_id, bp->pf.first_vf_id,
(bp->pf.first_vf_id) + bp->pf.active_vfs - 1,
@@ -130,7 +130,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
/* Forward */
rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to send FWD req VF 0x%x, type 0x%x.\n",
fw_vf_id - bp->pf.first_vf_id,
rte_le_to_cpu_16(fwd_cmd->req_type));
@@ -141,7 +141,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
reject:
rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to send REJECT req VF 0x%x, type 0x%x.\n",
fw_vf_id - bp->pf.first_vf_id,
rte_le_to_cpu_16(fwd_cmd->req_type));
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 057786a62..daed1fc80 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -58,6 +58,7 @@
#define DRV_MODULE_NAME "bnxt"
static const char bnxt_version[] =
"Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
+int bnxt_logtype_driver;

#define PCI_VENDOR_ID_BROADCOM 0x14E4

@@ -223,25 +224,25 @@ static int bnxt_init_chip(struct bnxt *bp)

rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
goto err_out;
}

rc = bnxt_alloc_hwrm_rings(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
goto err_out;
}

rc = bnxt_alloc_all_hwrm_ring_grps(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
goto err_out;
}

rc = bnxt_mq_rx_configure(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
goto err_out;
}

@@ -251,14 +252,14 @@ static int bnxt_init_chip(struct bnxt *bp)

rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n",
+ PMD_DRV_LOG(ERR, "HWRM vnic %d alloc failure rc: %x\n",
i, rc);
goto err_out;
}

rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM vnic %d ctx alloc failure rc: %x\n",
i, rc);
goto err_out;
@@ -266,14 +267,14 @@ static int bnxt_init_chip(struct bnxt *bp)

rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n",
+ PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
i, rc);
goto err_out;
}

rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM vnic %d filter failure rc: %x\n",
i, rc);
goto err_out;
@@ -294,7 +295,7 @@ static int bnxt_init_chip(struct bnxt *bp)
}
rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM vnic %d set RSS failure rc: %x\n",
i, rc);
goto err_out;
@@ -310,7 +311,7 @@ static int bnxt_init_chip(struct bnxt *bp)
}
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM cfa l2 rx mask failure rc: %x\n", rc);
goto err_out;
}
@@ -320,10 +321,9 @@ static int bnxt_init_chip(struct bnxt *bp)
!RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
intr_vector = bp->eth_dev->data->nb_rx_queues;
- RTE_LOG(INFO, PMD, "%s(): intr_vector = %d\n", __func__,
- intr_vector);
+ PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
if (intr_vector > bp->rx_cp_nr_rings) {
- RTE_LOG(ERR, PMD, "At most %d intr queues supported",
+ PMD_DRV_LOG(ERR, "At most %d intr queues supported",
bp->rx_cp_nr_rings);
return -ENOTSUP;
}
@@ -337,13 +337,13 @@ static int bnxt_init_chip(struct bnxt *bp)
bp->eth_dev->data->nb_rx_queues *
sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
- RTE_LOG(ERR, PMD, "Failed to allocate %d rx_queues"
+ PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
" intr_vec", bp->eth_dev->data->nb_rx_queues);
return -ENOMEM;
}
- RTE_LOG(DEBUG, PMD, "%s(): intr_handle->intr_vec = %p "
+ PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
"intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
- __func__, intr_handle->intr_vec, intr_handle->nb_efd,
+ intr_handle->intr_vec, intr_handle->nb_efd,
intr_handle->max_intr);
}

@@ -359,14 +359,14 @@ static int bnxt_init_chip(struct bnxt *bp)

rc = bnxt_get_hwrm_link_config(bp, &new);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
goto err_out;
}

if (!bp->link_info.link_up) {
rc = bnxt_set_hwrm_link_config(bp, true);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM link config failure rc: %x\n", rc);
goto err_out;
}
@@ -537,13 +537,13 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
bp->max_stat_ctx ||
(uint32_t)(eth_dev->data->nb_rx_queues + 1) > bp->max_ring_grps) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Insufficient resources to support requested config\n");
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Num Queues Requested: Tx %d, Rx %d\n",
eth_dev->data->nb_tx_queues,
eth_dev->data->nb_rx_queues);
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Res available: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d\n",
bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
bp->max_stat_ctx, bp->max_ring_grps);
@@ -567,13 +567,13 @@ static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
struct rte_eth_link *link = &eth_dev->data->dev_link;

if (link->link_status)
- RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n",
+ PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
eth_dev->data->port_id,
(uint32_t)link->link_speed,
(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- RTE_LOG(INFO, PMD, "Port %d Link Down\n",
+ PMD_DRV_LOG(INFO, "Port %d Link Down\n",
eth_dev->data->port_id);
}

@@ -590,7 +590,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
int rc;

if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
}
@@ -729,25 +729,25 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
struct bnxt_filter_info *filter;

if (BNXT_VF(bp)) {
- RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n");
+ PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
return -ENOTSUP;
}

if (!vnic) {
- RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
+ PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
return -EINVAL;
}
/* Attach requested MAC address to the new l2_filter */
STAILQ_FOREACH(filter, &vnic->filter, next) {
if (filter->mac_index == index) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"MAC addr already existed for pool %d\n", pool);
return -EINVAL;
}
}
filter = bnxt_alloc_filter(bp);
if (!filter) {
- RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
return -ENODEV;
}
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
@@ -770,7 +770,7 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
if (rc) {
new.link_speed = ETH_LINK_SPEED_100M;
new.link_duplex = ETH_LINK_FULL_DUPLEX;
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to retrieve link rc = 0x%x!\n", rc);
goto out;
}
@@ -861,7 +861,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
return -EINVAL;

if (reta_size != HW_HASH_INDEX_SIZE) {
- RTE_LOG(ERR, PMD, "The configured hash table lookup size "
+ PMD_DRV_LOG(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
return -EINVAL;
@@ -893,7 +893,7 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
return -EINVAL;

if (reta_size != HW_HASH_INDEX_SIZE) {
- RTE_LOG(ERR, PMD, "The configured hash table lookup size "
+ PMD_DRV_LOG(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
return -EINVAL;
@@ -924,7 +924,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
*/
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
if (!rss_conf->rss_hf)
- RTE_LOG(ERR, PMD, "Hash type NONE\n");
+ PMD_DRV_LOG(ERR, "Hash type NONE\n");
} else {
if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
return -EINVAL;
@@ -1013,7 +1013,7 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
}
if (hash_types) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unknwon RSS config from firmware (%08x), RSS disabled",
vnic->hash_type);
return -ENOTSUP;
@@ -1062,7 +1062,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;

if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
- RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n");
+ PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
return -ENOTSUP;
}

@@ -1122,10 +1122,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
if (bp->vxlan_port_cnt) {
- RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
+ PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
if (bp->vxlan_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Only one port allowed\n");
+ PMD_DRV_LOG(ERR, "Only one port allowed\n");
return -ENOSPC;
}
bp->vxlan_port_cnt++;
@@ -1137,10 +1137,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
break;
case RTE_TUNNEL_TYPE_GENEVE:
if (bp->geneve_port_cnt) {
- RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
+ PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
if (bp->geneve_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Only one port allowed\n");
+ PMD_DRV_LOG(ERR, "Only one port allowed\n");
return -ENOSPC;
}
bp->geneve_port_cnt++;
@@ -1151,7 +1151,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
bp->geneve_port_cnt++;
break;
default:
- RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
return -ENOTSUP;
}
rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
@@ -1171,11 +1171,11 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
if (!bp->vxlan_port_cnt) {
- RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
+ PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
}
if (bp->vxlan_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
+ PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
udp_tunnel->udp_port, bp->vxlan_port);
return -EINVAL;
}
@@ -1188,11 +1188,11 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
break;
case RTE_TUNNEL_TYPE_GENEVE:
if (!bp->geneve_port_cnt) {
- RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
+ PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
}
if (bp->geneve_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
+ PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
udp_tunnel->udp_port, bp->geneve_port);
return -EINVAL;
}
@@ -1204,7 +1204,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
port = bp->geneve_fw_dst_port_id;
break;
default:
- RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
return -ENOTSUP;
}

@@ -1261,7 +1261,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)

new_filter = bnxt_alloc_filter(bp);
if (!new_filter) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"MAC/VLAN filter alloc failed\n");
rc = -ENOMEM;
goto exit;
@@ -1279,7 +1279,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
new_filter);
if (rc)
goto exit;
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Del Vlan filter for %d\n",
vlan_id);
}
@@ -1334,7 +1334,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
}
new_filter = bnxt_alloc_filter(bp);
if (!new_filter) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"MAC/VLAN filter alloc failed\n");
rc = -ENOMEM;
goto exit;
@@ -1354,7 +1354,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
new_filter);
if (rc)
goto exit;
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Added Vlan filter for %d\n", vlan_id);
cont:
filter = temp_filter;
@@ -1389,7 +1389,7 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
for (i = 0; i < 4095; i++)
bnxt_del_vlan_filter(bp, i);
}
- RTE_LOG(INFO, PMD, "VLAN Filtering: %d\n",
+ PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
dev->data->dev_conf.rxmode.hw_vlan_filter);
}

@@ -1403,12 +1403,12 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
vnic->vlan_strip = false;
bnxt_hwrm_vnic_cfg(bp, vnic);
}
- RTE_LOG(INFO, PMD, "VLAN Strip Offload: %d\n",
+ PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
dev->data->dev_conf.rxmode.hw_vlan_strip);
}

if (mask & ETH_VLAN_EXTEND_MASK)
- RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n");
+ PMD_DRV_LOG(ERR, "Extend VLAN Not supported\n");

return 0;
}
@@ -1444,7 +1444,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
if (rc)
break;
filter->mac_index = 0;
- RTE_LOG(DEBUG, PMD, "Set MAC addr\n");
+ PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
}
}

@@ -1547,7 +1547,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2;

if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
- RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n",
+ PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
ETHER_MIN_MTU, max_dev_mtu);
return -EINVAL;
}
@@ -1565,7 +1565,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;

eth_dev->data->mtu = new_mtu;
- RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu);
+ PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);

for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
@@ -1592,7 +1592,7 @@ bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
int rc;

if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"PVID cannot be modified for this function\n");
return -ENOTSUP;
}
@@ -1753,13 +1753,13 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,

if (efilter->ether_type == ETHER_TYPE_IPv4 ||
efilter->ether_type == ETHER_TYPE_IPv6) {
- RTE_LOG(ERR, PMD, "invalid ether_type(0x%04x) in"
+ PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
" ethertype filter.", efilter->ether_type);
*ret = -EINVAL;
goto exit;
}
if (efilter->queue >= bp->rx_nr_rings) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
*ret = -EINVAL;
goto exit;
}
@@ -1767,7 +1767,7 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
if (vnic == NULL) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
*ret = -EINVAL;
goto exit;
}
@@ -1818,7 +1818,7 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
return 0;

if (arg == NULL) {
- RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
filter_op);
return -EINVAL;
}
@@ -1835,7 +1835,7 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,

bfilter = bnxt_get_unused_filter(bp);
if (bfilter == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not enough resources for a new filter.\n");
return -ENOMEM;
}
@@ -1879,11 +1879,11 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
next);
bnxt_free_filter(bp, filter1);
} else if (ret == 0) {
- RTE_LOG(ERR, PMD, "No matching filter found\n");
+ PMD_DRV_LOG(ERR, "No matching filter found\n");
}
break;
default:
- RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
ret = -EINVAL;
goto error;
}
@@ -1902,7 +1902,7 @@ parse_ntuple_filter(struct bnxt *bp,
uint32_t en = 0;

if (nfilter->queue >= bp->rx_nr_rings) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", nfilter->queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
return -EINVAL;
}

@@ -1914,7 +1914,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid dst_port mask.");
+ PMD_DRV_LOG(ERR, "invalid dst_port mask.");
return -EINVAL;
}

@@ -1932,7 +1932,7 @@ parse_ntuple_filter(struct bnxt *bp,
en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
break;
default:
- RTE_LOG(ERR, PMD, "invalid protocol mask.");
+ PMD_DRV_LOG(ERR, "invalid protocol mask.");
return -EINVAL;
}

@@ -1944,7 +1944,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid dst_ip mask.");
+ PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
return -EINVAL;
}

@@ -1956,7 +1956,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid src_ip mask.");
+ PMD_DRV_LOG(ERR, "invalid src_ip mask.");
return -EINVAL;
}

@@ -1968,7 +1968,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid src_port mask.");
+ PMD_DRV_LOG(ERR, "invalid src_port mask.");
return -EINVAL;
}

@@ -2021,18 +2021,18 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,
int ret;

if (nfilter->flags != RTE_5TUPLE_FLAGS) {
- RTE_LOG(ERR, PMD, "only 5tuple is supported.");
+ PMD_DRV_LOG(ERR, "only 5tuple is supported.");
return -EINVAL;
}

if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
- RTE_LOG(ERR, PMD, "Ntuple filter: TCP flags not supported\n");
+ PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
return -EINVAL;
}

bfilter = bnxt_get_unused_filter(bp);
if (bfilter == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not enough resources for a new filter.\n");
return -ENOMEM;
}
@@ -2059,7 +2059,7 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,

if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
bfilter->dst_id == mfilter->dst_id) {
- RTE_LOG(ERR, PMD, "filter exists.\n");
+ PMD_DRV_LOG(ERR, "filter exists.\n");
ret = -EEXIST;
goto free_filter;
} else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
@@ -2068,12 +2068,12 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,
ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
- RTE_LOG(ERR, PMD, "filter with matching pattern exists.\n");
- RTE_LOG(ERR, PMD, " Updated it to the new destination queue\n");
+ PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
+ PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
goto free_filter;
}
if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
- RTE_LOG(ERR, PMD, "filter doesn't exist.");
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
ret = -ENOENT;
goto free_filter;
}
@@ -2118,7 +2118,7 @@ bnxt_ntuple_filter(struct rte_eth_dev *dev,
return 0;

if (arg == NULL) {
- RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
filter_op);
return -EINVAL;
}
@@ -2135,7 +2135,7 @@ bnxt_ntuple_filter(struct rte_eth_dev *dev,
filter_op);
break;
default:
- RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
ret = -EINVAL;
break;
}
@@ -2337,7 +2337,7 @@ bnxt_parse_fdir_filter(struct bnxt *bp,
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
if (vnic == NULL) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", fdir->action.rx_queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
return -EINVAL;
}

@@ -2441,7 +2441,7 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
/* FALLTHROUGH */
filter = bnxt_get_unused_filter(bp);
if (filter == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not enough resources for a new flow.\n");
return -ENOMEM;
}
@@ -2453,12 +2453,12 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,

match = bnxt_match_fdir(bp, filter);
if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
- RTE_LOG(ERR, PMD, "Flow already exists.\n");
+ PMD_DRV_LOG(ERR, "Flow already exists.\n");
ret = -EEXIST;
goto free_filter;
}
if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
- RTE_LOG(ERR, PMD, "Flow does not exist.\n");
+ PMD_DRV_LOG(ERR, "Flow does not exist.\n");
ret = -ENOENT;
goto free_filter;
}
@@ -2505,10 +2505,10 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_STATS:
case RTE_ETH_FILTER_INFO:
/* FALLTHROUGH */
- RTE_LOG(ERR, PMD, "operation %u not implemented", filter_op);
+ PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
break;
default:
- RTE_LOG(ERR, PMD, "unknown operation %u", filter_op);
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
ret = -EINVAL;
break;
}
@@ -2529,7 +2529,7 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,

switch (filter_type) {
case RTE_ETH_FILTER_TUNNEL:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"filter type: %d: To be implemented\n", filter_type);
break;
case RTE_ETH_FILTER_FDIR:
@@ -2547,7 +2547,7 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
*(const void **)arg = &bnxt_flow_ops;
break;
default:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Filter type (%d) not supported", filter_type);
ret = -EINVAL;
break;
@@ -2841,8 +2841,8 @@ bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
uint32_t dir_entries;
uint32_t entry_length;

- RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x\n",
- __func__, bp->pdev->addr.domain, bp->pdev->addr.bus,
+ PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x\n",
+ bp->pdev->addr.domain, bp->pdev->addr.bus,
bp->pdev->addr.devid, bp->pdev->addr.function);

rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
@@ -2860,8 +2860,8 @@ bnxt_get_eeprom_op(struct rte_eth_dev *dev,
uint32_t index;
uint32_t offset;

- RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
- "len = %d\n", __func__, bp->pdev->addr.domain,
+ PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
+ "len = %d\n", bp->pdev->addr.domain,
bp->pdev->addr.bus, bp->pdev->addr.devid,
bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);

@@ -2929,13 +2929,13 @@ bnxt_set_eeprom_op(struct rte_eth_dev *dev,
uint8_t index, dir_op;
uint16_t type, ext, ordinal, attr;

- RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
- "len = %d\n", __func__, bp->pdev->addr.domain,
+ PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
+ "len = %d\n", bp->pdev->addr.domain,
bp->pdev->addr.bus, bp->pdev->addr.devid,
bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD, "NVM write not supported from a VF\n");
+ PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
return -EINVAL;
}

@@ -3056,7 +3056,7 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev)

/* enable device (incl. PCI PM wakeup), and bus-mastering */
if (!pci_dev->mem_resource[0].addr) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Cannot find PCI device base address, aborting\n");
rc = -ENODEV;
goto init_err_disable;
@@ -3067,7 +3067,7 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev)

bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
if (!bp->bar0) {
- RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
+ PMD_DRV_LOG(ERR, "Cannot map device registers, aborting\n");
rc = -ENOMEM;
goto init_err_release;
}
@@ -3103,7 +3103,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
int rc;

if (version_printed++ == 0)
- RTE_LOG(INFO, PMD, "%s\n", bnxt_version);
+ PMD_DRV_LOG(INFO, "%s\n", bnxt_version);

rte_eth_copy_pci_info(eth_dev, pci_dev);

@@ -3120,7 +3120,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)

rc = bnxt_init_board(eth_dev);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Board initialization failed rc: %x\n", rc);
goto error;
}
@@ -3151,13 +3151,13 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map address to physical memory\n");
return -ENOMEM;
}
@@ -3186,13 +3186,13 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map address to physical memory\n");
return -ENOMEM;
}
@@ -3207,7 +3207,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)

rc = bnxt_alloc_hwrm_resources(bp);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm resource allocation failure rc: %x\n", rc);
goto error_free;
}
@@ -3216,31 +3216,31 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
goto error_free;
rc = bnxt_hwrm_queue_qportcfg(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm queue qportcfg failed\n");
+ PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n");
goto error_free;
}

rc = bnxt_hwrm_func_qcfg(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm func qcfg failed\n");
+ PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n");
goto error_free;
}

/* Get the MAX capabilities for this function */
rc = bnxt_hwrm_func_qcaps(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc);
goto error_free;
}
if (bp->max_tx_rings == 0) {
- RTE_LOG(ERR, PMD, "No TX rings available!\n");
+ PMD_DRV_LOG(ERR, "No TX rings available!\n");
rc = -EBUSY;
goto error_free;
}
eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
if (eth_dev->data->mac_addrs == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to alloc %u bytes needed to store MAC addr tbl",
ETHER_ADDR_LEN * bp->max_l2_ctx);
rc = -ENOMEM;
@@ -3252,7 +3252,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)

if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
/* 1 ring is for default completion ring */
- RTE_LOG(ERR, PMD, "Insufficient resource: Ring Group\n");
+ PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
rc = -ENOSPC;
goto error_free;
}
@@ -3260,7 +3260,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
bp->grp_info = rte_zmalloc("bnxt_grp_info",
sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
if (!bp->grp_info) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to alloc %zu bytes to store group info table\n",
sizeof(*bp->grp_info) * bp->max_ring_grps);
rc = -ENOMEM;
@@ -3273,7 +3273,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
} else {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Firmware too old for VF mailbox functionality\n");
memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
}
@@ -3293,20 +3293,20 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
rc = bnxt_hwrm_func_driver_register(bp);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to register driver");
rc = -EBUSY;
goto error_free;
}

- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
pci_dev->mem_resource[0].phys_addr,
pci_dev->mem_resource[0].addr);

rc = bnxt_hwrm_func_reset(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc);
rc = -EIO;
goto error_free;
}
@@ -3318,13 +3318,13 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
if (bp->pdev->max_vfs) {
rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
if (rc) {
- RTE_LOG(ERR, PMD, "Failed to allocate VFs\n");
+ PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
goto error_free;
}
} else {
rc = bnxt_hwrm_allocate_pf_only(bp);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to allocate PF resources\n");
goto error_free;
}
@@ -3433,6 +3433,15 @@ bool is_bnxt_supported(struct rte_eth_dev *dev)
return is_device_supported(dev, &bnxt_rte_pmd);
}

+RTE_INIT(bnxt_init_log);
+static void
+bnxt_init_log(void)
+{
+ bnxt_logtype_driver = rte_log_register("pmd.bnxt.driver");
+ if (bnxt_logtype_driver >= 0)
+ rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE);
+}
+
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index 22cfbd372..0716dd8fd 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -56,7 +56,7 @@ struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
/* Find the 1st unused filter from the free_filter_list pool*/
filter = STAILQ_FIRST(&bp->free_filter_list);
if (!filter) {
- RTE_LOG(ERR, PMD, "No more free filter resources\n");
+ PMD_DRV_LOG(ERR, "No more free filter resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
@@ -77,7 +77,7 @@ struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)

filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
if (!filter) {
- RTE_LOG(ERR, PMD, "Failed to alloc memory for VF %hu filters\n",
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
vf);
return NULL;
}
@@ -145,11 +145,11 @@ void bnxt_free_filter_mem(struct bnxt *bp)
for (i = 0; i < max_filters; i++) {
filter = &bp->filter_info[i];
if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
- RTE_LOG(ERR, PMD, "HWRM filter is not freed??\n");
+ PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
/* Call HWRM to try to free filter again */
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
if (rc)
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM filter cannot be freed rc = %d\n",
rc);
}
@@ -172,7 +172,7 @@ int bnxt_alloc_filter_mem(struct bnxt *bp)
max_filters * sizeof(struct bnxt_filter_info),
0);
if (filter_mem == NULL) {
- RTE_LOG(ERR, PMD, "Failed to alloc memory for %d filters",
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
max_filters);
return -ENOMEM;
}
@@ -187,7 +187,7 @@ struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
/* Find the 1st unused filter from the free_filter_list pool*/
filter = STAILQ_FIRST(&bp->free_filter_list);
if (!filter) {
- RTE_LOG(ERR, PMD, "No more free filter resources\n");
+ PMD_DRV_LOG(ERR, "No more free filter resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
@@ -281,7 +281,7 @@ bnxt_filter_type_check(const struct rte_flow_item pattern[],
/* FALLTHROUGH */
/* need ntuple match, reset exact match */
if (!use_ntuple) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"VLAN flow cannot use NTUPLE filter\n");
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -292,7 +292,7 @@ bnxt_filter_type_check(const struct rte_flow_item pattern[],
use_ntuple |= 1;
break;
default:
- RTE_LOG(ERR, PMD, "Unknown Flow type");
+ PMD_DRV_LOG(ERR, "Unknown Flow type");
use_ntuple |= 1;
}
item++;
@@ -329,7 +329,7 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
int dflt_vnic;

use_ntuple = bnxt_filter_type_check(pattern, error);
- RTE_LOG(DEBUG, PMD, "Use NTUPLE %d\n", use_ntuple);
+ PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
if (use_ntuple < 0)
return use_ntuple;

@@ -791,7 +791,7 @@ bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
return f0;

//This flow needs DST MAC which is not same as port/l2
- RTE_LOG(DEBUG, PMD, "Create L2 filter for DST MAC\n");
+ PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
filter1 = bnxt_get_unused_filter(bp);
if (filter1 == NULL)
return NULL;
@@ -828,7 +828,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
int rc;

if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
- RTE_LOG(ERR, PMD, "Cannot create flow on RSS queues\n");
+ PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"Cannot create flow on RSS queues");
@@ -857,7 +857,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
rc = -rte_errno;
goto ret;
}
- RTE_LOG(DEBUG, PMD, "Queue index %d\n", act_q->index);
+ PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);

vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
@@ -875,7 +875,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
goto ret;
}
filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
- RTE_LOG(DEBUG, PMD, "VNIC found\n");
+ PMD_DRV_LOG(DEBUG, "VNIC found\n");
break;
case RTE_FLOW_ACTION_TYPE_DROP:
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
@@ -990,7 +990,7 @@ bnxt_flow_validate(struct rte_eth_dev *dev,

filter = bnxt_get_unused_filter(bp);
if (filter == NULL) {
- RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
+ PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
return -ENOMEM;
}

@@ -1092,13 +1092,13 @@ bnxt_flow_create(struct rte_eth_dev *dev,

ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Not a validate flow.\n");
+ PMD_DRV_LOG(ERR, "Not a validate flow.\n");
goto free_flow;
}

filter = bnxt_get_unused_filter(bp);
if (filter == NULL) {
- RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
+ PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
goto free_flow;
}

@@ -1109,15 +1109,15 @@ bnxt_flow_create(struct rte_eth_dev *dev,

ret = bnxt_match_filter(bp, filter);
if (ret == -EEXIST) {
- RTE_LOG(DEBUG, PMD, "Flow already exists.\n");
+ PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
/* Clear the filter that was created as part of
* validate_and_parse_flow() above
*/
bnxt_hwrm_clear_l2_filter(bp, filter);
goto free_filter;
} else if (ret == -EXDEV) {
- RTE_LOG(DEBUG, PMD, "Flow with same pattern exists");
- RTE_LOG(DEBUG, PMD, "Updating with different destination\n");
+ PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
+ PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
update_flow = true;
}

@@ -1145,7 +1145,7 @@ bnxt_flow_create(struct rte_eth_dev *dev,
ret = -EXDEV;
goto free_flow;
}
- RTE_LOG(ERR, PMD, "Successfully created flow.\n");
+ PMD_DRV_LOG(ERR, "Successfully created flow.\n");
STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
return flow;
}
@@ -1181,7 +1181,7 @@ bnxt_flow_destroy(struct rte_eth_dev *dev,

ret = bnxt_match_filter(bp, filter);
if (ret == 0)
- RTE_LOG(ERR, PMD, "Could not find matching flow\n");
+ PMD_DRV_LOG(ERR, "Could not find matching flow\n");
if (filter->filter_type == HWRM_CFA_EM_FILTER)
ret = bnxt_hwrm_clear_em_filter(bp, filter);
if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index d88061c9f..fdca424a9 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -79,7 +79,7 @@ static int page_getenum(size_t size)
return 22;
if (size <= 1 << 30)
return 30;
- RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
+ PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
return sizeof(void *) * 8 - 1;
}

@@ -161,7 +161,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
}

if (i >= HWRM_CMD_TIMEOUT) {
- RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
+ PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
req->req_type);
goto err_ret;
}
@@ -194,8 +194,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,

#define HWRM_CHECK_RESULT() do {\
if (rc) { \
- RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
- __func__, rc); \
+ PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
rte_spinlock_unlock(&bp->hwrm_lock); \
return rc; \
} \
@@ -204,18 +203,15 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
if (resp->resp_len >= 16) { \
struct hwrm_err_output *tmp_hwrm_err_op = \
(void *)resp; \
- RTE_LOG(ERR, PMD, \
- "%s error %d:%d:%08x:%04x\n", \
- __func__, \
+ PMD_DRV_LOG(ERR, \
+ "error %d:%d:%08x:%04x\n", \
rc, tmp_hwrm_err_op->cmd_err, \
rte_le_to_cpu_32(\
tmp_hwrm_err_op->opaque_0), \
rte_le_to_cpu_16(\
tmp_hwrm_err_op->opaque_1)); \
- } \
- else { \
- RTE_LOG(ERR, PMD, \
- "%s error %d\n", __func__, rc); \
+ } else { \
+ PMD_DRV_LOG(ERR, "error %d\n", rc); \
} \
rte_spinlock_unlock(&bp->hwrm_lock); \
return rc; \
@@ -369,7 +365,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
conf->pool_map[j].pools & (1UL << j)) {
- RTE_LOG(DEBUG, PMD,
+ PMD_DRV_LOG(DEBUG,
"Add vlan %u to vmdq pool %u\n",
conf->pool_map[j].vlan_id, j);

@@ -545,7 +541,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
getpagesize(),
getpagesize());
if (bp->pf.vf_info[i].vlan_table == NULL)
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Fail to alloc VLAN table for VF %d\n",
i);
else
@@ -556,7 +552,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
getpagesize(),
getpagesize());
if (bp->pf.vf_info[i].vlan_as_table == NULL)
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Alloc VLAN AS table for VF %d fail\n",
i);
else
@@ -588,7 +584,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
- RTE_LOG(INFO, PMD, "PTP SUPPORTED");
+ PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
HWRM_UNLOCK();
bnxt_hwrm_ptp_qcfg(bp);
}
@@ -676,13 +672,13 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)

HWRM_CHECK_RESULT();

- RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
+ PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
resp->hwrm_intf_maj, resp->hwrm_intf_min,
resp->hwrm_intf_upd,
resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
(resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
- RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
+ PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);

my_version = HWRM_VERSION_MAJOR << 16;
@@ -694,28 +690,28 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
fw_version |= resp->hwrm_intf_upd;

if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
- RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
+ PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
rc = -EINVAL;
goto error;
}

if (my_version != fw_version) {
- RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
+ PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
if (my_version < fw_version) {
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Firmware API version is newer than driver.\n");
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"The driver may be missing features.\n");
} else {
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Firmware API version is older than driver.\n");
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Not all driver features may be functional.\n");
}
}

if (bp->max_req_len > resp->max_req_win_len) {
- RTE_LOG(ERR, PMD, "Unsupported request length\n");
+ PMD_DRV_LOG(ERR, "Unsupported request length\n");
rc = -EINVAL;
}
bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
@@ -738,7 +734,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr =
rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unable to map response buffer to physical memory.\n");
rc = -ENOMEM;
goto error;
@@ -750,7 +746,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
(dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
- RTE_LOG(DEBUG, PMD, "Short command supported\n");
+ PMD_DRV_LOG(DEBUG, "Short command supported\n");

rte_free(bp->hwrm_short_cmd_req_addr);

@@ -765,7 +761,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
if (bp->hwrm_short_cmd_req_dma_addr == 0) {
rte_free(bp->hwrm_short_cmd_req_addr);
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unable to map buffer to physical memory.\n");
rc = -ENOMEM;
goto error;
@@ -814,7 +810,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
/* Setting Fixed Speed. But AutoNeg is ON, So disable it */
if (bp->link_info.auto_mode && conf->link_speed) {
req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
- RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
+ PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
}

req.flags = rte_cpu_to_le_32(conf->phy_flags);
@@ -853,7 +849,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
} else {
req.flags =
rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
- RTE_LOG(INFO, PMD, "Force Link Down\n");
+ PMD_DRV_LOG(INFO, "Force Link Down\n");
}

rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -971,7 +967,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
break;
default:
- RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
+ PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
ring_type);
HWRM_UNLOCK();
return -1;
@@ -985,22 +981,22 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
rc = rte_le_to_cpu_16(resp->error_code);
switch (ring_type) {
case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm_ring_alloc cp failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm_ring_alloc rx failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm_ring_alloc tx failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
default:
- RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
}
@@ -1032,19 +1028,19 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,

switch (ring_type) {
case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
- RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
+ PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
- RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
+ PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
- RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
+ PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
rc);
return rc;
default:
- RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
return rc;
}
}
@@ -1168,7 +1164,7 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;

/* map ring groups to this vnic */
- RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
+ PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
vnic->start_grp_id, vnic->end_grp_id);
for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
@@ -1188,7 +1184,7 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)

vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
HWRM_UNLOCK();
- RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
return rc;
}

@@ -1258,7 +1254,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct bnxt_plcmodes_cfg pmodes;

if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
return rc;
}

@@ -1323,7 +1319,7 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;

if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
return rc;
}
HWRM_PREP(req, VNIC_QCFG);
@@ -1375,7 +1371,7 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)

vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
HWRM_UNLOCK();
- RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
+ PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);

return rc;
}
@@ -1388,7 +1384,7 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
bp->hwrm_cmd_resp_addr;

if (vnic->rss_rule == 0xffff) {
- RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
+ PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
@@ -1412,7 +1408,7 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;

if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
return rc;
}

@@ -1854,7 +1850,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr =
rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -1890,7 +1886,7 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)

STAILQ_FOREACH(flow, &vnic->flow_list, next) {
filter = flow->filter;
- RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type);
+ PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
if (filter->filter_type == HWRM_CFA_EM_FILTER)
rc = bnxt_hwrm_clear_em_filter(bp, filter);
else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
@@ -2032,7 +2028,7 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
break;
default:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unsupported link speed %d; default to AUTO\n",
conf_link_speed);
break;
@@ -2056,20 +2052,20 @@ static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;

if (one_speed & (one_speed - 1)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Invalid advertised speeds (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
}
if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unsupported advertised speed (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
}
} else {
if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unsupported advertised speeds (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
@@ -2141,7 +2137,7 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
default:
- RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
+ PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
hw_link_speed);
break;
}
@@ -2161,7 +2157,7 @@ static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
eth_link_duplex = ETH_LINK_HALF_DUPLEX;
break;
default:
- RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
+ PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
hw_link_duplex);
break;
}
@@ -2175,7 +2171,7 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)

rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Get link config failed with rc %d\n", rc);
goto exit;
}
@@ -2229,7 +2225,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
bp->link_info.media_type ==
HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
- RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
+ PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
return -EINVAL;
}

@@ -2243,7 +2239,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
port_phy_cfg:
rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Set link config failed with rc %d\n", rc);
}

@@ -2420,11 +2416,11 @@ static void reserve_resources_from_vf(struct bnxt *bp,
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));

if (rc) {
- RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
copy_func_cfg_to_qcaps(cfg_req, resp);
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
- RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
copy_func_cfg_to_qcaps(cfg_req, resp);
}

@@ -2455,11 +2451,11 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
return -1;
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
- RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
return -1;
}
rc = rte_le_to_cpu_16(resp->vlan);
@@ -2495,7 +2491,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
int rc;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
+ PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
return -1;
}

@@ -2522,7 +2518,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
size_t req_buf_sz;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
+ PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
return -1;
}

@@ -2588,9 +2584,9 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);

if (rc || resp->error_code) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to initizlie VF %d\n", i);
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not all VFs available. (%d, %d)\n",
rc, resp->error_code);
HWRM_UNLOCK();
@@ -2740,7 +2736,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
req.req_buf_page_addr[0] =
rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
if (req.req_buf_page_addr[0] == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map buffer address to physical memory\n");
return -ENOMEM;
}
@@ -3162,7 +3158,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
return -ENOMEM;
dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -3198,7 +3194,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,

dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -3259,7 +3255,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,

dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -3316,19 +3312,19 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,

if (req.vnic_id_tbl_addr == 0) {
HWRM_UNLOCK();
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map VNIC ID table address to physical memory\n");
return -ENOMEM;
}
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc) {
HWRM_UNLOCK();
- RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
return -1;
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
HWRM_UNLOCK();
- RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
return -1;
}
rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
@@ -3459,7 +3455,7 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
}
}
/* Could not find a default VNIC. */
- RTE_LOG(ERR, PMD, "No default VNIC\n");
+ PMD_DRV_LOG(ERR, "No default VNIC\n");
exit:
rte_free(vnic_ids);
return -1;
@@ -3549,7 +3545,7 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
if (filter->fw_em_filter_id == UINT64_MAX)
return 0;

- RTE_LOG(ERR, PMD, "Clear EM filter\n");
+ PMD_DRV_LOG(ERR, "Clear EM filter\n");
HWRM_PREP(req, CFA_EM_FLOW_FREE);

req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
index 49436cfd9..8ab986936 100644
--- a/drivers/net/bnxt/bnxt_irq.c
+++ b/drivers/net/bnxt/bnxt_irq.c
@@ -84,7 +84,7 @@ static void bnxt_int_handler(void *param)
cpr->cp_ring_struct))
goto no_more;
}
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Ignoring %02x completion\n", CMP_TYPE(cmp));
break;
}
@@ -154,7 +154,7 @@ int bnxt_setup_int(struct bnxt *bp)
return 0;

setup_exit:
- RTE_LOG(ERR, PMD, "bnxt_irq_tbl setup failed\n");
+ PMD_DRV_LOG(ERR, "bnxt_irq_tbl setup failed\n");
return rc;
}

diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 59d1035fd..8fb897216 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -176,15 +176,15 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
for (sz = 0; sz < total_alloc_len; sz += getpagesize())
rte_mem_lock_page(((char *)mz->addr) + sz);
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map ring address to physical memory\n");
return -ENOMEM;
}
@@ -326,7 +326,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
ring = rxr->ag_ring_struct;
/* Agg ring */
if (ring == NULL) {
- RTE_LOG(ERR, PMD, "Alloc AGG Ring is NULL!\n");
+ PMD_DRV_LOG(ERR, "Alloc AGG Ring is NULL!\n");
goto err_out;
}

@@ -336,7 +336,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
cp_ring->fw_ring_id);
if (rc)
goto err_out;
- RTE_LOG(DEBUG, PMD, "Alloc AGG Done!\n");
+ PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n");
rxr->ag_prod = 0;
rxr->ag_doorbell =
(char *)pci_dev->mem_resource[2].addr +
@@ -347,7 +347,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
if (bnxt_init_one_rx_ring(rxq)) {
- RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!\n");
+ PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
return -ENOMEM;
}
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index f7fbb2856..736936a55 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -75,7 +75,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
if (bp->rx_cp_nr_rings < 2) {
vnic = bnxt_alloc_vnic(bp);
if (!vnic) {
- RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
+ PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -92,7 +92,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
vnic->end_grp_id = vnic->start_grp_id;
filter = bnxt_alloc_filter(bp);
if (!filter) {
- RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -121,7 +121,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
pools = bp->rx_cp_nr_rings;
break;
default:
- RTE_LOG(ERR, PMD, "Unsupported mq_mod %d\n",
+ PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
dev_conf->rxmode.mq_mode);
rc = -EINVAL;
goto err_out;
@@ -135,7 +135,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
for (i = 0; i < pools; i++) {
vnic = bnxt_alloc_vnic(bp);
if (!vnic) {
- RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
+ PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -166,7 +166,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
}
filter = bnxt_alloc_filter(bp);
if (!filter) {
- RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -312,14 +312,14 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
int rc = 0;

if (queue_idx >= bp->max_rx_rings) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Cannot create Rx ring %d. Only %d rings available\n",
queue_idx, bp->max_rx_rings);
return -ENOSPC;
}

if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
- RTE_LOG(ERR, PMD, "nb_desc %d is invalid\n", nb_desc);
+ PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
rc = -EINVAL;
goto out;
}
@@ -332,7 +332,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq) {
- RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!\n");
+ PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
rc = -ENOMEM;
goto out;
}
@@ -341,8 +341,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;

- RTE_LOG(DEBUG, PMD, "RX Buf size is %d\n", rxq->rx_buf_use_size);
- RTE_LOG(DEBUG, PMD, "RX Buf MTU %d\n", eth_dev->data->mtu);
+ PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_use_size);
+ PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);

rc = bnxt_init_rx_ring_struct(rxq, socket_id);
if (rc)
@@ -357,7 +357,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
/* Allocate RX ring hardware descriptors */
if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
"rxr")) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"ring_dma_zone_reserve for rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
rc = -ENOMEM;
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 82c93d6dc..3f07c11b5 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -95,9 +95,9 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
}

if (rxbd == NULL)
- RTE_LOG(ERR, PMD, "Jumbo Frame. rxbd is NULL\n");
+ PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
if (rx_buf == NULL)
- RTE_LOG(ERR, PMD, "Jumbo Frame. rx_buf is NULL\n");
+ PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");


rx_buf->mbuf = mbuf;
@@ -234,7 +234,7 @@ static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
/* TODO batch allocation for better performance */
while (rte_bitmap_get(rxr->ag_bitmap, next)) {
if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"agg mbuf alloc failed: prod=0x%x\n", next);
break;
}
@@ -512,7 +512,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
*/
prod = RING_NEXT(rxr->rx_ring_struct, prod);
if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
- RTE_LOG(ERR, PMD, "mbuf alloc failed with prod=0x%x\n", prod);
+ PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n", prod);
rc = -ENOMEM;
goto rx;
}
@@ -601,7 +601,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxr->rx_prod = i;
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
} else {
- RTE_LOG(ERR, PMD, "Alloc mbuf failed\n");
+ PMD_DRV_LOG(ERR, "Alloc mbuf failed\n");
break;
}
}
@@ -744,7 +744,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
prod = rxr->rx_prod;
for (i = 0; i < ring->ring_size; i++) {
if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"init'ed rx ring %d with %d/%d mbufs only\n",
rxq->queue_id, i, ring->ring_size);
break;
@@ -752,7 +752,6 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->rx_prod = prod;
prod = RING_NEXT(rxr->rx_ring_struct, prod);
}
- RTE_LOG(DEBUG, PMD, "%s\n", __func__);

ring = rxr->ag_ring_struct;
type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
@@ -761,7 +760,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)

for (i = 0; i < ring->ring_size; i++) {
if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"init'ed AG ring %d with %d/%d mbufs only\n",
rxq->queue_id, i, ring->ring_size);
break;
@@ -769,7 +768,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->ag_prod = prod;
prod = RING_NEXT(rxr->ag_ring_struct, prod);
}
- RTE_LOG(DEBUG, PMD, "%s AGG Done!\n", __func__);
+ PMD_DRV_LOG(DEBUG, "AGG Done!\n");

if (rxr->tpa_info) {
for (i = 0; i < BNXT_TPA_MAX; i++) {
@@ -781,7 +780,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
}
}
}
- RTE_LOG(DEBUG, PMD, "%s TPA alloc Done!\n", __func__);
+ PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n");

return 0;
}
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 470c6438d..bd93cc834 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -237,7 +237,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,

memset(bnxt_stats, 0, sizeof(*bnxt_stats));
if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
- RTE_LOG(ERR, PMD, "Device Initialization not complete!\n");
+ PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
return 0;
}

@@ -272,7 +272,7 @@ void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;

if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
- RTE_LOG(ERR, PMD, "Device Initialization not complete!\n");
+ PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
return;
}

@@ -289,7 +289,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
uint64_t tx_drop_pkts;

if (!(bp->flags & BNXT_FLAG_PORT_STATS)) {
- RTE_LOG(ERR, PMD, "xstats not supported for VF\n");
+ PMD_DRV_LOG(ERR, "xstats not supported for VF\n");
return 0;
}

@@ -371,11 +371,11 @@ void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev)
bnxt_hwrm_port_clr_stats(bp);

if (BNXT_VF(bp))
- RTE_LOG(ERR, PMD, "Operation not supported on a VF device\n");
+ PMD_DRV_LOG(ERR, "Operation not supported on a VF device\n");
if (!BNXT_SINGLE_PF(bp))
- RTE_LOG(ERR, PMD, "Operation not supported on a MF device\n");
+ PMD_DRV_LOG(ERR, "Operation not supported on a MF device\n");
if (!(bp->flags & BNXT_FLAG_PORT_STATS))
- RTE_LOG(ERR, PMD, "Operation not supported\n");
+ PMD_DRV_LOG(ERR, "Operation not supported\n");
}

int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids,
@@ -394,7 +394,7 @@ int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids,
bnxt_dev_xstats_get_by_id_op(dev, NULL, values_copy, stat_cnt);
for (i = 0; i < limit; i++) {
if (ids[i] >= stat_cnt) {
- RTE_LOG(ERR, PMD, "id value isn't valid");
+ PMD_DRV_LOG(ERR, "id value isn't valid");
return -1;
}
values[i] = values_copy[ids[i]];
@@ -420,7 +420,7 @@ int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev,

for (i = 0; i < limit; i++) {
if (ids[i] >= stat_cnt) {
- RTE_LOG(ERR, PMD, "id value isn't valid");
+ PMD_DRV_LOG(ERR, "id value isn't valid");
return -1;
}
strcpy(xstats_names[i].name,
diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
index 25c33f5e4..53524346d 100644
--- a/drivers/net/bnxt/bnxt_txq.c
+++ b/drivers/net/bnxt/bnxt_txq.c
@@ -109,14 +109,14 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
int rc = 0;

if (queue_idx >= bp->max_tx_rings) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Cannot create Tx ring %d. Only %d rings available\n",
queue_idx, bp->max_tx_rings);
return -ENOSPC;
}

if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
- RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
+ PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
rc = -EINVAL;
goto out;
}
@@ -131,7 +131,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!txq) {
- RTE_LOG(ERR, PMD, "bnxt_tx_queue allocation failed!");
+ PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
rc = -ENOMEM;
goto out;
}
@@ -149,14 +149,14 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
/* Allocate TX ring hardware descriptors */
if (bnxt_alloc_rings(bp, queue_idx, txq->tx_ring, NULL, txq->cp_ring,
"txr")) {
- RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for tx_ring failed!");
+ PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
bnxt_tx_queue_release_op(txq);
rc = -ENOMEM;
goto out;
}

if (bnxt_init_one_tx_ring(txq)) {
- RTE_LOG(ERR, PMD, "bnxt_init_one_tx_ring failed!");
+ PMD_DRV_LOG(ERR, "bnxt_init_one_tx_ring failed!");
bnxt_tx_queue_release_op(txq);
rc = -ENOMEM;
goto out;
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 5bac26053..d4aeb4ca8 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -107,7 +107,7 @@ int bnxt_free_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic,
}
temp = STAILQ_NEXT(temp, next);
}
- RTE_LOG(ERR, PMD, "VNIC %p is not found in pool[%d]\n", vnic, pool);
+ PMD_DRV_LOG(ERR, "VNIC %p is not found in pool[%d]\n", vnic, pool);
return -EINVAL;
}

@@ -118,7 +118,7 @@ struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp)
/* Find the 1st unused vnic from the free_vnic_list pool*/
vnic = STAILQ_FIRST(&bp->free_vnic_list);
if (!vnic) {
- RTE_LOG(ERR, PMD, "No more free VNIC resources\n");
+ PMD_DRV_LOG(ERR, "No more free VNIC resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_vnic_list, next);
@@ -194,13 +194,13 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
}
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map vnic address to physical memory\n");
return -ENOMEM;
}
@@ -241,7 +241,7 @@ void bnxt_free_vnic_mem(struct bnxt *bp)
for (i = 0; i < max_vnics; i++) {
vnic = &bp->vnic_info[i];
if (vnic->fw_vnic_id != (uint16_t)HWRM_NA_SIGNATURE) {
- RTE_LOG(ERR, PMD, "VNIC is not freed yet!\n");
+ PMD_DRV_LOG(ERR, "VNIC is not freed yet!\n");
/* TODO Call HWRM to free VNIC */
}
}
@@ -260,7 +260,7 @@ int bnxt_alloc_vnic_mem(struct bnxt *bp)
vnic_mem = rte_zmalloc("bnxt_vnic_info",
max_vnics * sizeof(struct bnxt_vnic_info), 0);
if (vnic_mem == NULL) {
- RTE_LOG(ERR, PMD, "Failed to alloc memory for %d VNICs",
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for %d VNICs",
max_vnics);
return -ENOMEM;
}
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c
index 595208997..cae95f8fa 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.c
+++ b/drivers/net/bnxt/rte_pmd_bnxt.c
@@ -85,7 +85,7 @@ int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on)
bp = (struct bnxt *)eth_dev->data->dev_private;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set Tx loopback on non-PF port %d!\n",
port);
return -ENOTSUP;
@@ -127,7 +127,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
bp = (struct bnxt *)eth_dev->data->dev_private;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set all queues drop on non-PF port!\n");
return -ENOTSUP;
}
@@ -140,7 +140,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
bp->vnic_info[i].bd_stall = !on;
rc = bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[i]);
if (rc) {
- RTE_LOG(ERR, PMD, "Failed to update PF VNIC %d.\n", i);
+ PMD_DRV_LOG(ERR, "Failed to update PF VNIC %d.\n", i);
return rc;
}
}
@@ -151,7 +151,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
rte_pmd_bnxt_set_all_queues_drop_en_cb, &on,
bnxt_hwrm_vnic_cfg);
if (rc) {
- RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", i);
+ PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", i);
break;
}
}
@@ -180,7 +180,7 @@ int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf,
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VF %d mac address on non-PF port %d!\n",
vf, port);
return -ENOTSUP;
@@ -224,7 +224,7 @@ int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf,

/* Requested BW can't be greater than link speed */
if (tot_rate > eth_dev->data->dev_link.link_speed) {
- RTE_LOG(ERR, PMD, "Rate > Link speed. Set to %d\n", tot_rate);
+ PMD_DRV_LOG(ERR, "Rate > Link speed. Set to %d\n", tot_rate);
return -EINVAL;
}

@@ -262,7 +262,7 @@ int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
bp = (struct bnxt *)dev->data->dev_private;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set mac spoof on non-PF port %d!\n", port);
return -EINVAL;
}
@@ -314,7 +314,7 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
bp = (struct bnxt *)dev->data->dev_private;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VLAN spoof on non-PF port %d!\n", port);
return -EINVAL;
}
@@ -333,7 +333,7 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
rc = -1;
}
} else {
- RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf);
+ PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", vf);
}

return rc;
@@ -367,7 +367,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VF %d stripq on non-PF port %d!\n",
vf, port);
return -ENOTSUP;
@@ -377,7 +377,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
rte_pmd_bnxt_set_vf_vlan_stripq_cb, &on,
bnxt_hwrm_vnic_cfg);
if (rc)
- RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf);
+ PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", vf);

return rc;
}
@@ -407,7 +407,7 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
return -EINVAL;

if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) {
- RTE_LOG(ERR, PMD, "Currently cannot toggle this setting\n");
+ PMD_DRV_LOG(ERR, "Currently cannot toggle this setting\n");
return -ENOTSUP;
}

@@ -430,7 +430,7 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
&bp->pf.vf_info[vf].l2_rx_mask,
bnxt_set_rx_mask_no_vlan);
if (rc)
- RTE_LOG(ERR, PMD, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");
+ PMD_DRV_LOG(ERR, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");

return rc;
}
@@ -442,7 +442,7 @@ static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf)
struct bnxt_vnic_info vnic;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VLAN table on non-PF port!\n");
return -EINVAL;
}
@@ -455,7 +455,7 @@ static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf)
/* This simply indicates there's no driver loaded.
* This is not an error.
*/
- RTE_LOG(ERR, PMD, "Unable to get default VNIC for VF %d\n", vf);
+ PMD_DRV_LOG(ERR, "Unable to get default VNIC for VF %d\n", vf);
} else {
memset(&vnic, 0, sizeof(vnic));
vnic.fw_vnic_id = dflt_vnic;
@@ -518,9 +518,9 @@ int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
/* Now check that there's space */
if (cnt == getpagesize() / sizeof(struct
bnxt_vlan_antispoof_table_entry)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"VLAN anti-spoof table is full\n");
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"VF %d cannot add VLAN %u\n",
i, vlan);
rc = -1;
@@ -585,7 +585,7 @@ int rte_pmd_bnxt_get_vf_stats(uint16_t port,
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to get VF %d stats on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -612,7 +612,7 @@ int rte_pmd_bnxt_reset_vf_stats(uint16_t port,
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to reset VF %d stats on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -638,7 +638,7 @@ int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id)
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to query VF %d RX stats on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -665,7 +665,7 @@ int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id,
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to query VF %d TX drops on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -697,7 +697,7 @@ int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct ether_addr *addr,
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to config VF %d MAC on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -773,7 +773,7 @@ rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf,
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VF %d vlan insert on non-PF port %d!\n",
vf, port);
return -ENOTSUP;
@@ -807,7 +807,7 @@ int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on)
bp = (struct bnxt *)dev->data->dev_private;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set persist stats on non-PF port %d!\n",
port);
return -EINVAL;
--
2.14.3 (Apple Git-98)
Ajit Khaparde
2018-01-25 22:47:54 UTC
Permalink
In certain cases the MAC address of a port could be all zeros.
Catch it early, log a message and fail the initiaization.

Signed-off-by: Ajit Khaparde <***@broadcom.com>
---
drivers/net/bnxt/bnxt_ethdev.c | 10 ++++++++++
drivers/net/bnxt/bnxt_filter.c | 2 +-
drivers/net/bnxt/bnxt_filter.h | 1 +
3 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index daed1fc80..76fff711f 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -3246,6 +3246,16 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
rc = -ENOMEM;
goto error_free;
}
+
+ if (check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {
+ PMD_DRV_LOG(ERR,
+ "Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
+ bp->dflt_mac_addr[0], bp->dflt_mac_addr[1],
+ bp->dflt_mac_addr[2], bp->dflt_mac_addr[3],
+ bp->dflt_mac_addr[4], bp->dflt_mac_addr[5]);
+ rc = -EINVAL;
+ goto error_free;
+ }
/* Copy the permanent MAC from the qcap response address now. */
memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index 0716dd8fd..032e8eed0 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -250,7 +250,7 @@ nxt_non_void_action(const struct rte_flow_action *cur)
}
}

-static inline int check_zero_bytes(const uint8_t *bytes, int len)
+int check_zero_bytes(const uint8_t *bytes, int len)
{
int i;
for (i = 0; i < len; i++)
diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h
index 2591a87e2..a3c702df6 100644
--- a/drivers/net/bnxt/bnxt_filter.h
+++ b/drivers/net/bnxt/bnxt_filter.h
@@ -97,6 +97,7 @@ struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp);
void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter);
struct bnxt_filter_info *bnxt_get_l2_filter(struct bnxt *bp,
struct bnxt_filter_info *nf, struct bnxt_vnic_info *vnic);
+int check_zero_bytes(const uint8_t *bytes, int len);

#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR \
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR
--
2.14.3 (Apple Git-98)
Ajit Khaparde
2018-01-25 22:47:55 UTC
Permalink
Currently this is implemented entirely in the PMD as there is no explicit
support in the HW. Re-program the RSS Table without this queue on stop
and add it back to the table on start.

Signed-off-by: Somnath Kotur <***@broadcom.com>
Signed-off-by: Ajit Khaparde <***@broadcom.com>
--
v1->v2: address review comments
---
drivers/net/bnxt/bnxt_ethdev.c | 33 +++++++++-----------------
drivers/net/bnxt/bnxt_hwrm.c | 28 ++++++++++++++++++++++
drivers/net/bnxt/bnxt_hwrm.h | 2 ++
drivers/net/bnxt/bnxt_rxq.c | 53 ++++++++++++++++++++++++++++++++++++++++++
drivers/net/bnxt/bnxt_rxq.h | 6 ++++-
drivers/net/bnxt/bnxt_rxr.c | 4 ++++
drivers/net/bnxt/bnxt_rxr.h | 3 ++-
drivers/net/bnxt/bnxt_txq.h | 1 -
drivers/net/bnxt/bnxt_txr.c | 32 +++++++++++++++++++++++++
drivers/net/bnxt/bnxt_txr.h | 2 ++
drivers/net/bnxt/bnxt_vnic.h | 1 -
11 files changed, 139 insertions(+), 26 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 76fff711f..2268aba2a 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -202,7 +202,7 @@ static int bnxt_alloc_mem(struct bnxt *bp)

static int bnxt_init_chip(struct bnxt *bp)
{
- unsigned int i, rss_idx, fw_idx;
+ unsigned int i;
struct rte_eth_link new;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
@@ -279,27 +279,12 @@ static int bnxt_init_chip(struct bnxt *bp)
i, rc);
goto err_out;
}
- if (vnic->rss_table && vnic->hash_type) {
- /*
- * Fill the RSS hash & redirection table with
- * ring group ids for all VNICs
- */
- for (rss_idx = 0, fw_idx = 0;
- rss_idx < HW_HASH_INDEX_SIZE;
- rss_idx++, fw_idx++) {
- if (vnic->fw_grp_ids[fw_idx] ==
- INVALID_HW_RING_ID)
- fw_idx = 0;
- vnic->rss_table[rss_idx] =
- vnic->fw_grp_ids[fw_idx];
- }
- rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
- if (rc) {
- PMD_DRV_LOG(ERR,
- "HWRM vnic %d set RSS failure rc: %x\n",
- i, rc);
- goto err_out;
- }
+
+ rc = bnxt_vnic_rss_configure(bp, vnic);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "HWRM vnic set RSS failure rc: %x\n", rc);
+ goto err_out;
}

bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
@@ -3022,6 +3007,10 @@ static const struct eth_dev_ops bnxt_dev_ops = {
.rx_queue_count = bnxt_rx_queue_count_op,
.rx_descriptor_status = bnxt_rx_descriptor_status_op,
.tx_descriptor_status = bnxt_tx_descriptor_status_op,
+ .rx_queue_start = bnxt_rx_queue_start,
+ .rx_queue_stop = bnxt_rx_queue_stop,
+ .tx_queue_start = bnxt_tx_queue_start,
+ .tx_queue_stop = bnxt_tx_queue_stop,
.filter_ctrl = bnxt_filter_ctrl_op,
.dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
.get_eeprom_length = bnxt_get_eeprom_length_op,
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 75e03ad5d..8fac05251 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -3676,3 +3676,31 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,

return 0;
}
+
+int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ unsigned int rss_idx, fw_idx, i;
+
+ if (vnic->rss_table && vnic->hash_type) {
+ /*
+ * Fill the RSS hash & redirection table with
+ * ring group ids for all VNICs
+ */
+ for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
+ rss_idx++, fw_idx++) {
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ fw_idx %= bp->rx_cp_nr_rings;
+ if (vnic->fw_grp_ids[fw_idx] !=
+ INVALID_HW_RING_ID)
+ break;
+ fw_idx++;
+ }
+ if (i == bp->rx_cp_nr_rings)
+ return 0;
+ vnic->rss_table[rss_idx] =
+ vnic->fw_grp_ids[fw_idx];
+ }
+ return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+ }
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 108f8e81d..f11e72a35 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -187,4 +187,6 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
uint16_t dir_attr, const uint8_t *data,
size_t data_len);
int bnxt_hwrm_ptp_cfg(struct bnxt *bp);
+int bnxt_vnic_rss_configure(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic);
#endif
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 736936a55..16304865d 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -405,3 +405,56 @@ bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
}
return rc;
}
+
+int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
+ struct bnxt_vnic_info *vnic = NULL;
+
+ if (rxq == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ rxq->rx_deferred_start = false;
+ PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ vnic = rxq->vnic;
+ if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
+ return 0;
+ PMD_DRV_LOG(DEBUG, "vnic = %p fw_grp_id = %d\n",
+ vnic, bp->grp_info[rx_queue_id + 1].fw_grp_id);
+ vnic->fw_grp_ids[rx_queue_id] =
+ bp->grp_info[rx_queue_id + 1].fw_grp_id;
+ return bnxt_vnic_rss_configure(bp, vnic);
+ }
+
+ return 0;
+}
+
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
+ struct bnxt_vnic_info *vnic = NULL;
+
+ if (rxq == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rxq->rx_deferred_start = true;
+ PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
+
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ vnic = rxq->vnic;
+ vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
+ return bnxt_vnic_rss_configure(bp, vnic);
+ }
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index be190195a..c7acaa755 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -50,6 +50,7 @@ struct bnxt_rx_queue {
uint16_t reg_idx; /* RX queue register index */
uint16_t port_id; /* Device port identifier */
uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
+ uint8_t rx_deferred_start; /* not in global dev start */

struct bnxt *bp;
int index;
@@ -75,5 +76,8 @@ int bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev,
uint16_t queue_id);
int bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev,
uint16_t queue_id);
-
+int bnxt_rx_queue_start(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
#endif
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 3f07c11b5..9e70c8604 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -545,6 +545,10 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t ag_prod = rxr->ag_prod;
int rc = 0;

+ /* If Rx Q was stopped return */
+ if (rxq->rx_deferred_start)
+ return 0;
+
/* Handle RX burst request */
while (1) {
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h
index a94373d19..f3ed49bd6 100644
--- a/drivers/net/bnxt/bnxt_rxr.h
+++ b/drivers/net/bnxt/bnxt_rxr.h
@@ -120,5 +120,6 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
void bnxt_free_rx_rings(struct bnxt *bp);
int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
-
+int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
#endif
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
index f753c10f2..e27c34fa9 100644
--- a/drivers/net/bnxt/bnxt_txq.h
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -71,5 +71,4 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
uint16_t nb_desc,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-
#endif
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 2f2c87119..2c81a37c2 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -349,6 +349,11 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Handle TX completions */
bnxt_handle_tx_cp(txq);

+ /* Tx queue was stopped; wait for it to be restarted */
+ if (txq->tx_deferred_start) {
+ PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
+ return 0;
+ }
/* Handle TX burst request */
for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
if (bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq)) {
@@ -364,3 +369,30 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,

return nb_tx_pkts;
}
+
+int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ txq->tx_deferred_start = false;
+ PMD_DRV_LOG(DEBUG, "Tx queue started\n");
+
+ return 0;
+}
+
+int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+
+ /* Handle TX completions */
+ bnxt_handle_tx_cp(txq);
+
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ txq->tx_deferred_start = true;
+ PMD_DRV_LOG(DEBUG, "Tx queue stopped\n");
+
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index 2feac51db..d88b15ab8 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -68,6 +68,8 @@ int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);

#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
index 875dc3c1c..d8d35c7dc 100644
--- a/drivers/net/bnxt/bnxt_vnic.h
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -93,5 +93,4 @@ void bnxt_free_vnic_attributes(struct bnxt *bp);
int bnxt_alloc_vnic_attributes(struct bnxt *bp);
void bnxt_free_vnic_mem(struct bnxt *bp);
int bnxt_alloc_vnic_mem(struct bnxt *bp);
-
#endif
--
2.14.3 (Apple Git-98)
Ajit Khaparde
2018-01-25 22:47:57 UTC
Permalink
While using RSS, the pool count should be 1.
Fixes: 8103a57ab432a ("net/bnxt: handle Rx multi queue creation properly")
Cc: ***@dpdk.org
Signed-off-by: Ajit Khaparde <***@broadcom.com>
---
drivers/net/bnxt/bnxt_rxq.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 16304865d..d49f35462 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -118,7 +118,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
pools = max_pools;
break;
case ETH_MQ_RX_RSS:
- pools = bp->rx_cp_nr_rings;
+ pools = 1;
break;
default:
PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
--
2.14.3 (Apple Git-98)
Ajit Khaparde
2018-01-25 22:47:56 UTC
Permalink
When the driver is loaded on a 100G NIC, the port speed is not
displayed correctly. Parse the 100G speed before displaying it.

Signed-off-by: Ajit Khaparde <***@broadcom.com>
---
drivers/net/bnxt/bnxt_hwrm.c | 2 ++
1 file changed, 2 insertions(+)

diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 8fac05251..d412e51fc 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -2139,6 +2139,8 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
eth_link_speed = ETH_SPEED_NUM_50G;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
+ eth_link_speed = ETH_SPEED_NUM_100G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
default:
--
2.14.3 (Apple Git-98)
Ferruh Yigit
2018-01-26 17:08:39 UTC
Permalink
Post by Ajit Khaparde
When the driver is loaded on a 100G NIC, the port speed is not
displayed correctly. Parse the 100G speed before displaying it.
---
drivers/net/bnxt/bnxt_hwrm.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 8fac05251..d412e51fc 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -2139,6 +2139,8 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
break;
eth_link_speed = ETH_SPEED_NUM_50G;
This case doesn't have a "break" statement, which looks like unintentional, but
if it is intentional please put /* Fallthrough */ comment to prevent build error
[1].


[1]
...dpdk/drivers/net/bnxt/bnxt_hwrm.c: In function ‘bnxt_parse_hw_link_speed’:
...dpdk/drivers/net/bnxt/bnxt_hwrm.c:2141:18: error: this statement may fall
through [-Werror=implicit-fallthrough=]
eth_link_speed = ETH_SPEED_NUM_50G;
...dpdk/drivers/net/bnxt/bnxt_hwrm.c:2142:2: note: here
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
^~~~
Post by Ajit Khaparde
+ eth_link_speed = ETH_SPEED_NUM_100G;
break;
Ajit Khaparde
2018-01-26 17:31:53 UTC
Permalink
Please apply this patchset.

Ajit Khaparde (6):
net/bnxt: fix size of tx ring in HW
net/bnxt: use driver specific dynamic log type
net/bnxt: register for more async events
net/bnxt: check if MAC address is all zeros
net/bnxt: add 100G speed detection
net/bnxt: fix number of pools for RSS

Somnath Kotur (1):
net/bnxt: support for rx/tx_queue_start/stop ops

drivers/net/bnxt/bnxt.h | 8 ++
drivers/net/bnxt/bnxt_cpr.c | 19 ++-
drivers/net/bnxt/bnxt_ethdev.c | 282 +++++++++++++++++++++-------------------
drivers/net/bnxt/bnxt_filter.c | 44 +++----
drivers/net/bnxt/bnxt_filter.h | 1 +
drivers/net/bnxt/bnxt_hwrm.c | 182 +++++++++++++++-----------
drivers/net/bnxt/bnxt_hwrm.h | 13 ++
drivers/net/bnxt/bnxt_irq.c | 4 +-
drivers/net/bnxt/bnxt_ring.c | 12 +-
drivers/net/bnxt/bnxt_rxq.c | 77 +++++++++--
drivers/net/bnxt/bnxt_rxq.h | 6 +-
drivers/net/bnxt/bnxt_rxr.c | 23 ++--
drivers/net/bnxt/bnxt_rxr.h | 3 +-
drivers/net/bnxt/bnxt_stats.c | 16 +--
drivers/net/bnxt/bnxt_txq.c | 10 +-
drivers/net/bnxt/bnxt_txq.h | 1 -
drivers/net/bnxt/bnxt_txr.c | 34 ++++-
drivers/net/bnxt/bnxt_txr.h | 2 +
drivers/net/bnxt/bnxt_vnic.c | 14 +-
drivers/net/bnxt/bnxt_vnic.h | 1 -
drivers/net/bnxt/rte_pmd_bnxt.c | 48 +++----
21 files changed, 482 insertions(+), 318 deletions(-)
--
2.14.3 (Apple Git-98)
Ajit Khaparde
2018-01-26 17:31:54 UTC
Permalink
During Tx ring allocation, the actual ring size configured in the HW
ends up being twice the number of txd parameter specified to the driver.
The power of 2 ring size wrongly adds a +1 while sending the ring
create command to the FW.

Fixes: 6eb3cc2294fd ("net/bnxt: add initial Tx code")
Cc: ***@dpdk.org
Signed-off-by: Ajit Khaparde <***@broadcom.com>
---
drivers/net/bnxt/bnxt_txr.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index ac77434b7..2f2c87119 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -101,7 +101,7 @@ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
if (ring == NULL)
return -ENOMEM;
txr->tx_ring_struct = ring;
- ring->ring_size = rte_align32pow2(txq->nb_tx_desc + 1);
+ ring->ring_size = rte_align32pow2(txq->nb_tx_desc);
ring->ring_mask = ring->ring_size - 1;
ring->bd = (void *)txr->tx_desc_ring;
ring->bd_dma = txr->tx_desc_mapping;
--
2.14.3 (Apple Git-98)
Ajit Khaparde
2018-01-26 17:31:55 UTC
Permalink
This patch implements driver specific log type doing away with
usage of RTE_LOG() for logging.
Signed-off-by: Ajit Khaparde <***@broadcom.com>
--
v1 -> v2: address review comments
---
drivers/net/bnxt/bnxt.h | 8 ++
drivers/net/bnxt/bnxt_cpr.c | 10 +-
drivers/net/bnxt/bnxt_ethdev.c | 241 +++++++++++++++++++++-------------------
drivers/net/bnxt/bnxt_filter.c | 42 +++----
drivers/net/bnxt/bnxt_hwrm.c | 142 ++++++++++++-----------
drivers/net/bnxt/bnxt_irq.c | 4 +-
drivers/net/bnxt/bnxt_ring.c | 12 +-
drivers/net/bnxt/bnxt_rxq.c | 22 ++--
drivers/net/bnxt/bnxt_rxr.c | 19 ++--
drivers/net/bnxt/bnxt_stats.c | 16 +--
drivers/net/bnxt/bnxt_txq.c | 10 +-
drivers/net/bnxt/bnxt_vnic.c | 14 +--
drivers/net/bnxt/rte_pmd_bnxt.c | 48 ++++----
13 files changed, 300 insertions(+), 288 deletions(-)

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index cf0b1d27c..6776c64a5 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -334,4 +334,12 @@ int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg);

bool is_bnxt_supported(struct rte_eth_dev *dev);
extern const struct rte_flow_ops bnxt_flow_ops;
+
+extern int bnxt_logtype_driver;
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, bnxt_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt, ## args)
#endif
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index cde8adc3b..663a5223d 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -58,7 +58,7 @@ void bnxt_handle_async_event(struct bnxt *bp,
bnxt_link_update_op(bp->eth_dev, 1);
break;
default:
- RTE_LOG(DEBUG, PMD, "handle_async_event id = 0x%x\n", event_id);
+ PMD_DRV_LOG(DEBUG, "handle_async_event id = 0x%x\n", event_id);
break;
}
}
@@ -74,7 +74,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
int rc;

if (bp->pf.active_vfs <= 0) {
- RTE_LOG(ERR, PMD, "Forwarded VF with no active VFs\n");
+ PMD_DRV_LOG(ERR, "Forwarded VF with no active VFs\n");
return;
}

@@ -93,7 +93,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)

if (fw_vf_id < bp->pf.first_vf_id ||
fw_vf_id >= (bp->pf.first_vf_id) + bp->pf.active_vfs) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n",
fw_vf_id, bp->pf.first_vf_id,
(bp->pf.first_vf_id) + bp->pf.active_vfs - 1,
@@ -130,7 +130,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
/* Forward */
rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to send FWD req VF 0x%x, type 0x%x.\n",
fw_vf_id - bp->pf.first_vf_id,
rte_le_to_cpu_16(fwd_cmd->req_type));
@@ -141,7 +141,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
reject:
rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to send REJECT req VF 0x%x, type 0x%x.\n",
fw_vf_id - bp->pf.first_vf_id,
rte_le_to_cpu_16(fwd_cmd->req_type));
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 057786a62..daed1fc80 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -58,6 +58,7 @@
#define DRV_MODULE_NAME "bnxt"
static const char bnxt_version[] =
"Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
+int bnxt_logtype_driver;

#define PCI_VENDOR_ID_BROADCOM 0x14E4

@@ -223,25 +224,25 @@ static int bnxt_init_chip(struct bnxt *bp)

rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
goto err_out;
}

rc = bnxt_alloc_hwrm_rings(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
goto err_out;
}

rc = bnxt_alloc_all_hwrm_ring_grps(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
goto err_out;
}

rc = bnxt_mq_rx_configure(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
goto err_out;
}

@@ -251,14 +252,14 @@ static int bnxt_init_chip(struct bnxt *bp)

rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n",
+ PMD_DRV_LOG(ERR, "HWRM vnic %d alloc failure rc: %x\n",
i, rc);
goto err_out;
}

rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM vnic %d ctx alloc failure rc: %x\n",
i, rc);
goto err_out;
@@ -266,14 +267,14 @@ static int bnxt_init_chip(struct bnxt *bp)

rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n",
+ PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
i, rc);
goto err_out;
}

rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM vnic %d filter failure rc: %x\n",
i, rc);
goto err_out;
@@ -294,7 +295,7 @@ static int bnxt_init_chip(struct bnxt *bp)
}
rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM vnic %d set RSS failure rc: %x\n",
i, rc);
goto err_out;
@@ -310,7 +311,7 @@ static int bnxt_init_chip(struct bnxt *bp)
}
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM cfa l2 rx mask failure rc: %x\n", rc);
goto err_out;
}
@@ -320,10 +321,9 @@ static int bnxt_init_chip(struct bnxt *bp)
!RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
intr_vector = bp->eth_dev->data->nb_rx_queues;
- RTE_LOG(INFO, PMD, "%s(): intr_vector = %d\n", __func__,
- intr_vector);
+ PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
if (intr_vector > bp->rx_cp_nr_rings) {
- RTE_LOG(ERR, PMD, "At most %d intr queues supported",
+ PMD_DRV_LOG(ERR, "At most %d intr queues supported",
bp->rx_cp_nr_rings);
return -ENOTSUP;
}
@@ -337,13 +337,13 @@ static int bnxt_init_chip(struct bnxt *bp)
bp->eth_dev->data->nb_rx_queues *
sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
- RTE_LOG(ERR, PMD, "Failed to allocate %d rx_queues"
+ PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
" intr_vec", bp->eth_dev->data->nb_rx_queues);
return -ENOMEM;
}
- RTE_LOG(DEBUG, PMD, "%s(): intr_handle->intr_vec = %p "
+ PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
"intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
- __func__, intr_handle->intr_vec, intr_handle->nb_efd,
+ intr_handle->intr_vec, intr_handle->nb_efd,
intr_handle->max_intr);
}

@@ -359,14 +359,14 @@ static int bnxt_init_chip(struct bnxt *bp)

rc = bnxt_get_hwrm_link_config(bp, &new);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
goto err_out;
}

if (!bp->link_info.link_up) {
rc = bnxt_set_hwrm_link_config(bp, true);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM link config failure rc: %x\n", rc);
goto err_out;
}
@@ -537,13 +537,13 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
bp->max_stat_ctx ||
(uint32_t)(eth_dev->data->nb_rx_queues + 1) > bp->max_ring_grps) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Insufficient resources to support requested config\n");
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Num Queues Requested: Tx %d, Rx %d\n",
eth_dev->data->nb_tx_queues,
eth_dev->data->nb_rx_queues);
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Res available: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d\n",
bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
bp->max_stat_ctx, bp->max_ring_grps);
@@ -567,13 +567,13 @@ static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
struct rte_eth_link *link = &eth_dev->data->dev_link;

if (link->link_status)
- RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n",
+ PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
eth_dev->data->port_id,
(uint32_t)link->link_speed,
(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- RTE_LOG(INFO, PMD, "Port %d Link Down\n",
+ PMD_DRV_LOG(INFO, "Port %d Link Down\n",
eth_dev->data->port_id);
}

@@ -590,7 +590,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
int rc;

if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
}
@@ -729,25 +729,25 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
struct bnxt_filter_info *filter;

if (BNXT_VF(bp)) {
- RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n");
+ PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
return -ENOTSUP;
}

if (!vnic) {
- RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
+ PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
return -EINVAL;
}
/* Attach requested MAC address to the new l2_filter */
STAILQ_FOREACH(filter, &vnic->filter, next) {
if (filter->mac_index == index) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"MAC addr already existed for pool %d\n", pool);
return -EINVAL;
}
}
filter = bnxt_alloc_filter(bp);
if (!filter) {
- RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
return -ENODEV;
}
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
@@ -770,7 +770,7 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
if (rc) {
new.link_speed = ETH_LINK_SPEED_100M;
new.link_duplex = ETH_LINK_FULL_DUPLEX;
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to retrieve link rc = 0x%x!\n", rc);
goto out;
}
@@ -861,7 +861,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
return -EINVAL;

if (reta_size != HW_HASH_INDEX_SIZE) {
- RTE_LOG(ERR, PMD, "The configured hash table lookup size "
+ PMD_DRV_LOG(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
return -EINVAL;
@@ -893,7 +893,7 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
return -EINVAL;

if (reta_size != HW_HASH_INDEX_SIZE) {
- RTE_LOG(ERR, PMD, "The configured hash table lookup size "
+ PMD_DRV_LOG(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
return -EINVAL;
@@ -924,7 +924,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
*/
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
if (!rss_conf->rss_hf)
- RTE_LOG(ERR, PMD, "Hash type NONE\n");
+ PMD_DRV_LOG(ERR, "Hash type NONE\n");
} else {
if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
return -EINVAL;
@@ -1013,7 +1013,7 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
}
if (hash_types) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unknwon RSS config from firmware (%08x), RSS disabled",
vnic->hash_type);
return -ENOTSUP;
@@ -1062,7 +1062,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;

if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
- RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n");
+ PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
return -ENOTSUP;
}

@@ -1122,10 +1122,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
if (bp->vxlan_port_cnt) {
- RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
+ PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
if (bp->vxlan_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Only one port allowed\n");
+ PMD_DRV_LOG(ERR, "Only one port allowed\n");
return -ENOSPC;
}
bp->vxlan_port_cnt++;
@@ -1137,10 +1137,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
break;
case RTE_TUNNEL_TYPE_GENEVE:
if (bp->geneve_port_cnt) {
- RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
+ PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
if (bp->geneve_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Only one port allowed\n");
+ PMD_DRV_LOG(ERR, "Only one port allowed\n");
return -ENOSPC;
}
bp->geneve_port_cnt++;
@@ -1151,7 +1151,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
bp->geneve_port_cnt++;
break;
default:
- RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
return -ENOTSUP;
}
rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
@@ -1171,11 +1171,11 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
if (!bp->vxlan_port_cnt) {
- RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
+ PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
}
if (bp->vxlan_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
+ PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
udp_tunnel->udp_port, bp->vxlan_port);
return -EINVAL;
}
@@ -1188,11 +1188,11 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
break;
case RTE_TUNNEL_TYPE_GENEVE:
if (!bp->geneve_port_cnt) {
- RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
+ PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
}
if (bp->geneve_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
+ PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
udp_tunnel->udp_port, bp->geneve_port);
return -EINVAL;
}
@@ -1204,7 +1204,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
port = bp->geneve_fw_dst_port_id;
break;
default:
- RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
return -ENOTSUP;
}

@@ -1261,7 +1261,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)

new_filter = bnxt_alloc_filter(bp);
if (!new_filter) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"MAC/VLAN filter alloc failed\n");
rc = -ENOMEM;
goto exit;
@@ -1279,7 +1279,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
new_filter);
if (rc)
goto exit;
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Del Vlan filter for %d\n",
vlan_id);
}
@@ -1334,7 +1334,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
}
new_filter = bnxt_alloc_filter(bp);
if (!new_filter) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"MAC/VLAN filter alloc failed\n");
rc = -ENOMEM;
goto exit;
@@ -1354,7 +1354,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
new_filter);
if (rc)
goto exit;
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Added Vlan filter for %d\n", vlan_id);
cont:
filter = temp_filter;
@@ -1389,7 +1389,7 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
for (i = 0; i < 4095; i++)
bnxt_del_vlan_filter(bp, i);
}
- RTE_LOG(INFO, PMD, "VLAN Filtering: %d\n",
+ PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
dev->data->dev_conf.rxmode.hw_vlan_filter);
}

@@ -1403,12 +1403,12 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
vnic->vlan_strip = false;
bnxt_hwrm_vnic_cfg(bp, vnic);
}
- RTE_LOG(INFO, PMD, "VLAN Strip Offload: %d\n",
+ PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
dev->data->dev_conf.rxmode.hw_vlan_strip);
}

if (mask & ETH_VLAN_EXTEND_MASK)
- RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n");
+ PMD_DRV_LOG(ERR, "Extend VLAN Not supported\n");

return 0;
}
@@ -1444,7 +1444,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
if (rc)
break;
filter->mac_index = 0;
- RTE_LOG(DEBUG, PMD, "Set MAC addr\n");
+ PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
}
}

@@ -1547,7 +1547,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2;

if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
- RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n",
+ PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
ETHER_MIN_MTU, max_dev_mtu);
return -EINVAL;
}
@@ -1565,7 +1565,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;

eth_dev->data->mtu = new_mtu;
- RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu);
+ PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);

for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
@@ -1592,7 +1592,7 @@ bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
int rc;

if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"PVID cannot be modified for this function\n");
return -ENOTSUP;
}
@@ -1753,13 +1753,13 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,

if (efilter->ether_type == ETHER_TYPE_IPv4 ||
efilter->ether_type == ETHER_TYPE_IPv6) {
- RTE_LOG(ERR, PMD, "invalid ether_type(0x%04x) in"
+ PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
" ethertype filter.", efilter->ether_type);
*ret = -EINVAL;
goto exit;
}
if (efilter->queue >= bp->rx_nr_rings) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
*ret = -EINVAL;
goto exit;
}
@@ -1767,7 +1767,7 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
if (vnic == NULL) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
*ret = -EINVAL;
goto exit;
}
@@ -1818,7 +1818,7 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
return 0;

if (arg == NULL) {
- RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
filter_op);
return -EINVAL;
}
@@ -1835,7 +1835,7 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,

bfilter = bnxt_get_unused_filter(bp);
if (bfilter == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not enough resources for a new filter.\n");
return -ENOMEM;
}
@@ -1879,11 +1879,11 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
next);
bnxt_free_filter(bp, filter1);
} else if (ret == 0) {
- RTE_LOG(ERR, PMD, "No matching filter found\n");
+ PMD_DRV_LOG(ERR, "No matching filter found\n");
}
break;
default:
- RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
ret = -EINVAL;
goto error;
}
@@ -1902,7 +1902,7 @@ parse_ntuple_filter(struct bnxt *bp,
uint32_t en = 0;

if (nfilter->queue >= bp->rx_nr_rings) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", nfilter->queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
return -EINVAL;
}

@@ -1914,7 +1914,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid dst_port mask.");
+ PMD_DRV_LOG(ERR, "invalid dst_port mask.");
return -EINVAL;
}

@@ -1932,7 +1932,7 @@ parse_ntuple_filter(struct bnxt *bp,
en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
break;
default:
- RTE_LOG(ERR, PMD, "invalid protocol mask.");
+ PMD_DRV_LOG(ERR, "invalid protocol mask.");
return -EINVAL;
}

@@ -1944,7 +1944,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid dst_ip mask.");
+ PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
return -EINVAL;
}

@@ -1956,7 +1956,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid src_ip mask.");
+ PMD_DRV_LOG(ERR, "invalid src_ip mask.");
return -EINVAL;
}

@@ -1968,7 +1968,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid src_port mask.");
+ PMD_DRV_LOG(ERR, "invalid src_port mask.");
return -EINVAL;
}

@@ -2021,18 +2021,18 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,
int ret;

if (nfilter->flags != RTE_5TUPLE_FLAGS) {
- RTE_LOG(ERR, PMD, "only 5tuple is supported.");
+ PMD_DRV_LOG(ERR, "only 5tuple is supported.");
return -EINVAL;
}

if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
- RTE_LOG(ERR, PMD, "Ntuple filter: TCP flags not supported\n");
+ PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
return -EINVAL;
}

bfilter = bnxt_get_unused_filter(bp);
if (bfilter == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not enough resources for a new filter.\n");
return -ENOMEM;
}
@@ -2059,7 +2059,7 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,

if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
bfilter->dst_id == mfilter->dst_id) {
- RTE_LOG(ERR, PMD, "filter exists.\n");
+ PMD_DRV_LOG(ERR, "filter exists.\n");
ret = -EEXIST;
goto free_filter;
} else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
@@ -2068,12 +2068,12 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,
ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
- RTE_LOG(ERR, PMD, "filter with matching pattern exists.\n");
- RTE_LOG(ERR, PMD, " Updated it to the new destination queue\n");
+ PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
+ PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
goto free_filter;
}
if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
- RTE_LOG(ERR, PMD, "filter doesn't exist.");
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
ret = -ENOENT;
goto free_filter;
}
@@ -2118,7 +2118,7 @@ bnxt_ntuple_filter(struct rte_eth_dev *dev,
return 0;

if (arg == NULL) {
- RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
filter_op);
return -EINVAL;
}
@@ -2135,7 +2135,7 @@ bnxt_ntuple_filter(struct rte_eth_dev *dev,
filter_op);
break;
default:
- RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
ret = -EINVAL;
break;
}
@@ -2337,7 +2337,7 @@ bnxt_parse_fdir_filter(struct bnxt *bp,
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
if (vnic == NULL) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", fdir->action.rx_queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
return -EINVAL;
}

@@ -2441,7 +2441,7 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
/* FALLTHROUGH */
filter = bnxt_get_unused_filter(bp);
if (filter == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not enough resources for a new flow.\n");
return -ENOMEM;
}
@@ -2453,12 +2453,12 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,

match = bnxt_match_fdir(bp, filter);
if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
- RTE_LOG(ERR, PMD, "Flow already exists.\n");
+ PMD_DRV_LOG(ERR, "Flow already exists.\n");
ret = -EEXIST;
goto free_filter;
}
if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
- RTE_LOG(ERR, PMD, "Flow does not exist.\n");
+ PMD_DRV_LOG(ERR, "Flow does not exist.\n");
ret = -ENOENT;
goto free_filter;
}
@@ -2505,10 +2505,10 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_STATS:
case RTE_ETH_FILTER_INFO:
/* FALLTHROUGH */
- RTE_LOG(ERR, PMD, "operation %u not implemented", filter_op);
+ PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
break;
default:
- RTE_LOG(ERR, PMD, "unknown operation %u", filter_op);
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
ret = -EINVAL;
break;
}
@@ -2529,7 +2529,7 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,

switch (filter_type) {
case RTE_ETH_FILTER_TUNNEL:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"filter type: %d: To be implemented\n", filter_type);
break;
case RTE_ETH_FILTER_FDIR:
@@ -2547,7 +2547,7 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
*(const void **)arg = &bnxt_flow_ops;
break;
default:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Filter type (%d) not supported", filter_type);
ret = -EINVAL;
break;
@@ -2841,8 +2841,8 @@ bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
uint32_t dir_entries;
uint32_t entry_length;

- RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x\n",
- __func__, bp->pdev->addr.domain, bp->pdev->addr.bus,
+ PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x\n",
+ bp->pdev->addr.domain, bp->pdev->addr.bus,
bp->pdev->addr.devid, bp->pdev->addr.function);

rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
@@ -2860,8 +2860,8 @@ bnxt_get_eeprom_op(struct rte_eth_dev *dev,
uint32_t index;
uint32_t offset;

- RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
- "len = %d\n", __func__, bp->pdev->addr.domain,
+ PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
+ "len = %d\n", bp->pdev->addr.domain,
bp->pdev->addr.bus, bp->pdev->addr.devid,
bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);

@@ -2929,13 +2929,13 @@ bnxt_set_eeprom_op(struct rte_eth_dev *dev,
uint8_t index, dir_op;
uint16_t type, ext, ordinal, attr;

- RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
- "len = %d\n", __func__, bp->pdev->addr.domain,
+ PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
+ "len = %d\n", bp->pdev->addr.domain,
bp->pdev->addr.bus, bp->pdev->addr.devid,
bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD, "NVM write not supported from a VF\n");
+ PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
return -EINVAL;
}

@@ -3056,7 +3056,7 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev)

/* enable device (incl. PCI PM wakeup), and bus-mastering */
if (!pci_dev->mem_resource[0].addr) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Cannot find PCI device base address, aborting\n");
rc = -ENODEV;
goto init_err_disable;
@@ -3067,7 +3067,7 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev)

bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
if (!bp->bar0) {
- RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
+ PMD_DRV_LOG(ERR, "Cannot map device registers, aborting\n");
rc = -ENOMEM;
goto init_err_release;
}
@@ -3103,7 +3103,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
int rc;

if (version_printed++ == 0)
- RTE_LOG(INFO, PMD, "%s\n", bnxt_version);
+ PMD_DRV_LOG(INFO, "%s\n", bnxt_version);

rte_eth_copy_pci_info(eth_dev, pci_dev);

@@ -3120,7 +3120,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)

rc = bnxt_init_board(eth_dev);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Board initialization failed rc: %x\n", rc);
goto error;
}
@@ -3151,13 +3151,13 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map address to physical memory\n");
return -ENOMEM;
}
@@ -3186,13 +3186,13 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map address to physical memory\n");
return -ENOMEM;
}
@@ -3207,7 +3207,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)

rc = bnxt_alloc_hwrm_resources(bp);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm resource allocation failure rc: %x\n", rc);
goto error_free;
}
@@ -3216,31 +3216,31 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
goto error_free;
rc = bnxt_hwrm_queue_qportcfg(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm queue qportcfg failed\n");
+ PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n");
goto error_free;
}

rc = bnxt_hwrm_func_qcfg(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm func qcfg failed\n");
+ PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n");
goto error_free;
}

/* Get the MAX capabilities for this function */
rc = bnxt_hwrm_func_qcaps(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc);
goto error_free;
}
if (bp->max_tx_rings == 0) {
- RTE_LOG(ERR, PMD, "No TX rings available!\n");
+ PMD_DRV_LOG(ERR, "No TX rings available!\n");
rc = -EBUSY;
goto error_free;
}
eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
if (eth_dev->data->mac_addrs == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to alloc %u bytes needed to store MAC addr tbl",
ETHER_ADDR_LEN * bp->max_l2_ctx);
rc = -ENOMEM;
@@ -3252,7 +3252,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)

if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
/* 1 ring is for default completion ring */
- RTE_LOG(ERR, PMD, "Insufficient resource: Ring Group\n");
+ PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
rc = -ENOSPC;
goto error_free;
}
@@ -3260,7 +3260,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
bp->grp_info = rte_zmalloc("bnxt_grp_info",
sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
if (!bp->grp_info) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to alloc %zu bytes to store group info table\n",
sizeof(*bp->grp_info) * bp->max_ring_grps);
rc = -ENOMEM;
@@ -3273,7 +3273,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
} else {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Firmware too old for VF mailbox functionality\n");
memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
}
@@ -3293,20 +3293,20 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
rc = bnxt_hwrm_func_driver_register(bp);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to register driver");
rc = -EBUSY;
goto error_free;
}

- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
pci_dev->mem_resource[0].phys_addr,
pci_dev->mem_resource[0].addr);

rc = bnxt_hwrm_func_reset(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc);
rc = -EIO;
goto error_free;
}
@@ -3318,13 +3318,13 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
if (bp->pdev->max_vfs) {
rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
if (rc) {
- RTE_LOG(ERR, PMD, "Failed to allocate VFs\n");
+ PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
goto error_free;
}
} else {
rc = bnxt_hwrm_allocate_pf_only(bp);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to allocate PF resources\n");
goto error_free;
}
@@ -3433,6 +3433,15 @@ bool is_bnxt_supported(struct rte_eth_dev *dev)
return is_device_supported(dev, &bnxt_rte_pmd);
}

+RTE_INIT(bnxt_init_log);
+static void
+bnxt_init_log(void)
+{
+ bnxt_logtype_driver = rte_log_register("pmd.bnxt.driver");
+ if (bnxt_logtype_driver >= 0)
+ rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE);
+}
+
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index 22cfbd372..0716dd8fd 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -56,7 +56,7 @@ struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
/* Find the 1st unused filter from the free_filter_list pool*/
filter = STAILQ_FIRST(&bp->free_filter_list);
if (!filter) {
- RTE_LOG(ERR, PMD, "No more free filter resources\n");
+ PMD_DRV_LOG(ERR, "No more free filter resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
@@ -77,7 +77,7 @@ struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)

filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
if (!filter) {
- RTE_LOG(ERR, PMD, "Failed to alloc memory for VF %hu filters\n",
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
vf);
return NULL;
}
@@ -145,11 +145,11 @@ void bnxt_free_filter_mem(struct bnxt *bp)
for (i = 0; i < max_filters; i++) {
filter = &bp->filter_info[i];
if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
- RTE_LOG(ERR, PMD, "HWRM filter is not freed??\n");
+ PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
/* Call HWRM to try to free filter again */
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
if (rc)
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM filter cannot be freed rc = %d\n",
rc);
}
@@ -172,7 +172,7 @@ int bnxt_alloc_filter_mem(struct bnxt *bp)
max_filters * sizeof(struct bnxt_filter_info),
0);
if (filter_mem == NULL) {
- RTE_LOG(ERR, PMD, "Failed to alloc memory for %d filters",
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
max_filters);
return -ENOMEM;
}
@@ -187,7 +187,7 @@ struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
/* Find the 1st unused filter from the free_filter_list pool*/
filter = STAILQ_FIRST(&bp->free_filter_list);
if (!filter) {
- RTE_LOG(ERR, PMD, "No more free filter resources\n");
+ PMD_DRV_LOG(ERR, "No more free filter resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
@@ -281,7 +281,7 @@ bnxt_filter_type_check(const struct rte_flow_item pattern[],
/* FALLTHROUGH */
/* need ntuple match, reset exact match */
if (!use_ntuple) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"VLAN flow cannot use NTUPLE filter\n");
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -292,7 +292,7 @@ bnxt_filter_type_check(const struct rte_flow_item pattern[],
use_ntuple |= 1;
break;
default:
- RTE_LOG(ERR, PMD, "Unknown Flow type");
+ PMD_DRV_LOG(ERR, "Unknown Flow type");
use_ntuple |= 1;
}
item++;
@@ -329,7 +329,7 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
int dflt_vnic;

use_ntuple = bnxt_filter_type_check(pattern, error);
- RTE_LOG(DEBUG, PMD, "Use NTUPLE %d\n", use_ntuple);
+ PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
if (use_ntuple < 0)
return use_ntuple;

@@ -791,7 +791,7 @@ bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
return f0;

//This flow needs DST MAC which is not same as port/l2
- RTE_LOG(DEBUG, PMD, "Create L2 filter for DST MAC\n");
+ PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
filter1 = bnxt_get_unused_filter(bp);
if (filter1 == NULL)
return NULL;
@@ -828,7 +828,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
int rc;

if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
- RTE_LOG(ERR, PMD, "Cannot create flow on RSS queues\n");
+ PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"Cannot create flow on RSS queues");
@@ -857,7 +857,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
rc = -rte_errno;
goto ret;
}
- RTE_LOG(DEBUG, PMD, "Queue index %d\n", act_q->index);
+ PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);

vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
@@ -875,7 +875,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
goto ret;
}
filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
- RTE_LOG(DEBUG, PMD, "VNIC found\n");
+ PMD_DRV_LOG(DEBUG, "VNIC found\n");
break;
case RTE_FLOW_ACTION_TYPE_DROP:
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
@@ -990,7 +990,7 @@ bnxt_flow_validate(struct rte_eth_dev *dev,

filter = bnxt_get_unused_filter(bp);
if (filter == NULL) {
- RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
+ PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
return -ENOMEM;
}

@@ -1092,13 +1092,13 @@ bnxt_flow_create(struct rte_eth_dev *dev,

ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Not a validate flow.\n");
+ PMD_DRV_LOG(ERR, "Not a validate flow.\n");
goto free_flow;
}

filter = bnxt_get_unused_filter(bp);
if (filter == NULL) {
- RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
+ PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
goto free_flow;
}

@@ -1109,15 +1109,15 @@ bnxt_flow_create(struct rte_eth_dev *dev,

ret = bnxt_match_filter(bp, filter);
if (ret == -EEXIST) {
- RTE_LOG(DEBUG, PMD, "Flow already exists.\n");
+ PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
/* Clear the filter that was created as part of
* validate_and_parse_flow() above
*/
bnxt_hwrm_clear_l2_filter(bp, filter);
goto free_filter;
} else if (ret == -EXDEV) {
- RTE_LOG(DEBUG, PMD, "Flow with same pattern exists");
- RTE_LOG(DEBUG, PMD, "Updating with different destination\n");
+ PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
+ PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
update_flow = true;
}

@@ -1145,7 +1145,7 @@ bnxt_flow_create(struct rte_eth_dev *dev,
ret = -EXDEV;
goto free_flow;
}
- RTE_LOG(ERR, PMD, "Successfully created flow.\n");
+ PMD_DRV_LOG(ERR, "Successfully created flow.\n");
STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
return flow;
}
@@ -1181,7 +1181,7 @@ bnxt_flow_destroy(struct rte_eth_dev *dev,

ret = bnxt_match_filter(bp, filter);
if (ret == 0)
- RTE_LOG(ERR, PMD, "Could not find matching flow\n");
+ PMD_DRV_LOG(ERR, "Could not find matching flow\n");
if (filter->filter_type == HWRM_CFA_EM_FILTER)
ret = bnxt_hwrm_clear_em_filter(bp, filter);
if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index d88061c9f..fdca424a9 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -79,7 +79,7 @@ static int page_getenum(size_t size)
return 22;
if (size <= 1 << 30)
return 30;
- RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
+ PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
return sizeof(void *) * 8 - 1;
}

@@ -161,7 +161,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
}

if (i >= HWRM_CMD_TIMEOUT) {
- RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
+ PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
req->req_type);
goto err_ret;
}
@@ -194,8 +194,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,

#define HWRM_CHECK_RESULT() do {\
if (rc) { \
- RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
- __func__, rc); \
+ PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
rte_spinlock_unlock(&bp->hwrm_lock); \
return rc; \
} \
@@ -204,18 +203,15 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
if (resp->resp_len >= 16) { \
struct hwrm_err_output *tmp_hwrm_err_op = \
(void *)resp; \
- RTE_LOG(ERR, PMD, \
- "%s error %d:%d:%08x:%04x\n", \
- __func__, \
+ PMD_DRV_LOG(ERR, \
+ "error %d:%d:%08x:%04x\n", \
rc, tmp_hwrm_err_op->cmd_err, \
rte_le_to_cpu_32(\
tmp_hwrm_err_op->opaque_0), \
rte_le_to_cpu_16(\
tmp_hwrm_err_op->opaque_1)); \
- } \
- else { \
- RTE_LOG(ERR, PMD, \
- "%s error %d\n", __func__, rc); \
+ } else { \
+ PMD_DRV_LOG(ERR, "error %d\n", rc); \
} \
rte_spinlock_unlock(&bp->hwrm_lock); \
return rc; \
@@ -369,7 +365,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
conf->pool_map[j].pools & (1UL << j)) {
- RTE_LOG(DEBUG, PMD,
+ PMD_DRV_LOG(DEBUG,
"Add vlan %u to vmdq pool %u\n",
conf->pool_map[j].vlan_id, j);

@@ -545,7 +541,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
getpagesize(),
getpagesize());
if (bp->pf.vf_info[i].vlan_table == NULL)
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Fail to alloc VLAN table for VF %d\n",
i);
else
@@ -556,7 +552,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
getpagesize(),
getpagesize());
if (bp->pf.vf_info[i].vlan_as_table == NULL)
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Alloc VLAN AS table for VF %d fail\n",
i);
else
@@ -588,7 +584,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
- RTE_LOG(INFO, PMD, "PTP SUPPORTED");
+ PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
HWRM_UNLOCK();
bnxt_hwrm_ptp_qcfg(bp);
}
@@ -676,13 +672,13 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)

HWRM_CHECK_RESULT();

- RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
+ PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
resp->hwrm_intf_maj, resp->hwrm_intf_min,
resp->hwrm_intf_upd,
resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
(resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
- RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
+ PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);

my_version = HWRM_VERSION_MAJOR << 16;
@@ -694,28 +690,28 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
fw_version |= resp->hwrm_intf_upd;

if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
- RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
+ PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
rc = -EINVAL;
goto error;
}

if (my_version != fw_version) {
- RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
+ PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
if (my_version < fw_version) {
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Firmware API version is newer than driver.\n");
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"The driver may be missing features.\n");
} else {
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Firmware API version is older than driver.\n");
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Not all driver features may be functional.\n");
}
}

if (bp->max_req_len > resp->max_req_win_len) {
- RTE_LOG(ERR, PMD, "Unsupported request length\n");
+ PMD_DRV_LOG(ERR, "Unsupported request length\n");
rc = -EINVAL;
}
bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
@@ -738,7 +734,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr =
rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unable to map response buffer to physical memory.\n");
rc = -ENOMEM;
goto error;
@@ -750,7 +746,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
(dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
- RTE_LOG(DEBUG, PMD, "Short command supported\n");
+ PMD_DRV_LOG(DEBUG, "Short command supported\n");

rte_free(bp->hwrm_short_cmd_req_addr);

@@ -765,7 +761,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
if (bp->hwrm_short_cmd_req_dma_addr == 0) {
rte_free(bp->hwrm_short_cmd_req_addr);
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unable to map buffer to physical memory.\n");
rc = -ENOMEM;
goto error;
@@ -814,7 +810,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
/* Setting Fixed Speed. But AutoNeg is ON, So disable it */
if (bp->link_info.auto_mode && conf->link_speed) {
req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
- RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
+ PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
}

req.flags = rte_cpu_to_le_32(conf->phy_flags);
@@ -853,7 +849,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
} else {
req.flags =
rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
- RTE_LOG(INFO, PMD, "Force Link Down\n");
+ PMD_DRV_LOG(INFO, "Force Link Down\n");
}

rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -971,7 +967,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
break;
default:
- RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
+ PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
ring_type);
HWRM_UNLOCK();
return -1;
@@ -985,22 +981,22 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
rc = rte_le_to_cpu_16(resp->error_code);
switch (ring_type) {
case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm_ring_alloc cp failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm_ring_alloc rx failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm_ring_alloc tx failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
default:
- RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
}
@@ -1032,19 +1028,19 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,

switch (ring_type) {
case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
- RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
+ PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
- RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
+ PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
- RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
+ PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
rc);
return rc;
default:
- RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
return rc;
}
}
@@ -1168,7 +1164,7 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;

/* map ring groups to this vnic */
- RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
+ PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
vnic->start_grp_id, vnic->end_grp_id);
for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
@@ -1188,7 +1184,7 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)

vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
HWRM_UNLOCK();
- RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
return rc;
}

@@ -1258,7 +1254,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct bnxt_plcmodes_cfg pmodes;

if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
return rc;
}

@@ -1323,7 +1319,7 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;

if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
return rc;
}
HWRM_PREP(req, VNIC_QCFG);
@@ -1375,7 +1371,7 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)

vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
HWRM_UNLOCK();
- RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
+ PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);

return rc;
}
@@ -1388,7 +1384,7 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
bp->hwrm_cmd_resp_addr;

if (vnic->rss_rule == 0xffff) {
- RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
+ PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
@@ -1412,7 +1408,7 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;

if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
return rc;
}

@@ -1854,7 +1850,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr =
rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -1890,7 +1886,7 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)

STAILQ_FOREACH(flow, &vnic->flow_list, next) {
filter = flow->filter;
- RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type);
+ PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
if (filter->filter_type == HWRM_CFA_EM_FILTER)
rc = bnxt_hwrm_clear_em_filter(bp, filter);
else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
@@ -2032,7 +2028,7 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
break;
default:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unsupported link speed %d; default to AUTO\n",
conf_link_speed);
break;
@@ -2056,20 +2052,20 @@ static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;

if (one_speed & (one_speed - 1)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Invalid advertised speeds (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
}
if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unsupported advertised speed (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
}
} else {
if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unsupported advertised speeds (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
@@ -2141,7 +2137,7 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
default:
- RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
+ PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
hw_link_speed);
break;
}
@@ -2161,7 +2157,7 @@ static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
eth_link_duplex = ETH_LINK_HALF_DUPLEX;
break;
default:
- RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
+ PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
hw_link_duplex);
break;
}
@@ -2175,7 +2171,7 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)

rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Get link config failed with rc %d\n", rc);
goto exit;
}
@@ -2229,7 +2225,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
bp->link_info.media_type ==
HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
- RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
+ PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
return -EINVAL;
}

@@ -2243,7 +2239,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
port_phy_cfg:
rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Set link config failed with rc %d\n", rc);
}

@@ -2420,11 +2416,11 @@ static void reserve_resources_from_vf(struct bnxt *bp,
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));

if (rc) {
- RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
copy_func_cfg_to_qcaps(cfg_req, resp);
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
- RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
copy_func_cfg_to_qcaps(cfg_req, resp);
}

@@ -2455,11 +2451,11 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
return -1;
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
- RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
return -1;
}
rc = rte_le_to_cpu_16(resp->vlan);
@@ -2495,7 +2491,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
int rc;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
+ PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
return -1;
}

@@ -2522,7 +2518,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
size_t req_buf_sz;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
+ PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
return -1;
}

@@ -2588,9 +2584,9 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);

if (rc || resp->error_code) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to initizlie VF %d\n", i);
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not all VFs available. (%d, %d)\n",
rc, resp->error_code);
HWRM_UNLOCK();
@@ -2740,7 +2736,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
req.req_buf_page_addr[0] =
rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
if (req.req_buf_page_addr[0] == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map buffer address to physical memory\n");
return -ENOMEM;
}
@@ -3162,7 +3158,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
return -ENOMEM;
dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -3198,7 +3194,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,

dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -3259,7 +3255,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,

dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -3316,19 +3312,19 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,

if (req.vnic_id_tbl_addr == 0) {
HWRM_UNLOCK();
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map VNIC ID table address to physical memory\n");
return -ENOMEM;
}
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc) {
HWRM_UNLOCK();
- RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
return -1;
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
HWRM_UNLOCK();
- RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
return -1;
}
rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
@@ -3459,7 +3455,7 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
}
}
/* Could not find a default VNIC. */
- RTE_LOG(ERR, PMD, "No default VNIC\n");
+ PMD_DRV_LOG(ERR, "No default VNIC\n");
exit:
rte_free(vnic_ids);
return -1;
@@ -3549,7 +3545,7 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
if (filter->fw_em_filter_id == UINT64_MAX)
return 0;

- RTE_LOG(ERR, PMD, "Clear EM filter\n");
+ PMD_DRV_LOG(ERR, "Clear EM filter\n");
HWRM_PREP(req, CFA_EM_FLOW_FREE);

req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
index 49436cfd9..8ab986936 100644
--- a/drivers/net/bnxt/bnxt_irq.c
+++ b/drivers/net/bnxt/bnxt_irq.c
@@ -84,7 +84,7 @@ static void bnxt_int_handler(void *param)
cpr->cp_ring_struct))
goto no_more;
}
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Ignoring %02x completion\n", CMP_TYPE(cmp));
break;
}
@@ -154,7 +154,7 @@ int bnxt_setup_int(struct bnxt *bp)
return 0;

setup_exit:
- RTE_LOG(ERR, PMD, "bnxt_irq_tbl setup failed\n");
+ PMD_DRV_LOG(ERR, "bnxt_irq_tbl setup failed\n");
return rc;
}

diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 59d1035fd..8fb897216 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -176,15 +176,15 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
for (sz = 0; sz < total_alloc_len; sz += getpagesize())
rte_mem_lock_page(((char *)mz->addr) + sz);
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map ring address to physical memory\n");
return -ENOMEM;
}
@@ -326,7 +326,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
ring = rxr->ag_ring_struct;
/* Agg ring */
if (ring == NULL) {
- RTE_LOG(ERR, PMD, "Alloc AGG Ring is NULL!\n");
+ PMD_DRV_LOG(ERR, "Alloc AGG Ring is NULL!\n");
goto err_out;
}

@@ -336,7 +336,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
cp_ring->fw_ring_id);
if (rc)
goto err_out;
- RTE_LOG(DEBUG, PMD, "Alloc AGG Done!\n");
+ PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n");
rxr->ag_prod = 0;
rxr->ag_doorbell =
(char *)pci_dev->mem_resource[2].addr +
@@ -347,7 +347,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
if (bnxt_init_one_rx_ring(rxq)) {
- RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!\n");
+ PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
return -ENOMEM;
}
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index f7fbb2856..736936a55 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -75,7 +75,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
if (bp->rx_cp_nr_rings < 2) {
vnic = bnxt_alloc_vnic(bp);
if (!vnic) {
- RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
+ PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -92,7 +92,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
vnic->end_grp_id = vnic->start_grp_id;
filter = bnxt_alloc_filter(bp);
if (!filter) {
- RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -121,7 +121,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
pools = bp->rx_cp_nr_rings;
break;
default:
- RTE_LOG(ERR, PMD, "Unsupported mq_mod %d\n",
+ PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
dev_conf->rxmode.mq_mode);
rc = -EINVAL;
goto err_out;
@@ -135,7 +135,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
for (i = 0; i < pools; i++) {
vnic = bnxt_alloc_vnic(bp);
if (!vnic) {
- RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
+ PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -166,7 +166,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
}
filter = bnxt_alloc_filter(bp);
if (!filter) {
- RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -312,14 +312,14 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
int rc = 0;

if (queue_idx >= bp->max_rx_rings) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Cannot create Rx ring %d. Only %d rings available\n",
queue_idx, bp->max_rx_rings);
return -ENOSPC;
}

if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
- RTE_LOG(ERR, PMD, "nb_desc %d is invalid\n", nb_desc);
+ PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
rc = -EINVAL;
goto out;
}
@@ -332,7 +332,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq) {
- RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!\n");
+ PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
rc = -ENOMEM;
goto out;
}
@@ -341,8 +341,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;

- RTE_LOG(DEBUG, PMD, "RX Buf size is %d\n", rxq->rx_buf_use_size);
- RTE_LOG(DEBUG, PMD, "RX Buf MTU %d\n", eth_dev->data->mtu);
+ PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_use_size);
+ PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);

rc = bnxt_init_rx_ring_struct(rxq, socket_id);
if (rc)
@@ -357,7 +357,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
/* Allocate RX ring hardware descriptors */
if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
"rxr")) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"ring_dma_zone_reserve for rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
rc = -ENOMEM;
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 82c93d6dc..3f07c11b5 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -95,9 +95,9 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
}

if (rxbd == NULL)
- RTE_LOG(ERR, PMD, "Jumbo Frame. rxbd is NULL\n");
+ PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
if (rx_buf == NULL)
- RTE_LOG(ERR, PMD, "Jumbo Frame. rx_buf is NULL\n");
+ PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");


rx_buf->mbuf = mbuf;
@@ -234,7 +234,7 @@ static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
/* TODO batch allocation for better performance */
while (rte_bitmap_get(rxr->ag_bitmap, next)) {
if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"agg mbuf alloc failed: prod=0x%x\n", next);
break;
}
@@ -512,7 +512,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
*/
prod = RING_NEXT(rxr->rx_ring_struct, prod);
if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
- RTE_LOG(ERR, PMD, "mbuf alloc failed with prod=0x%x\n", prod);
+ PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n", prod);
rc = -ENOMEM;
goto rx;
}
@@ -601,7 +601,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxr->rx_prod = i;
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
} else {
- RTE_LOG(ERR, PMD, "Alloc mbuf failed\n");
+ PMD_DRV_LOG(ERR, "Alloc mbuf failed\n");
break;
}
}
@@ -744,7 +744,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
prod = rxr->rx_prod;
for (i = 0; i < ring->ring_size; i++) {
if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"init'ed rx ring %d with %d/%d mbufs only\n",
rxq->queue_id, i, ring->ring_size);
break;
@@ -752,7 +752,6 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->rx_prod = prod;
prod = RING_NEXT(rxr->rx_ring_struct, prod);
}
- RTE_LOG(DEBUG, PMD, "%s\n", __func__);

ring = rxr->ag_ring_struct;
type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
@@ -761,7 +760,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)

for (i = 0; i < ring->ring_size; i++) {
if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"init'ed AG ring %d with %d/%d mbufs only\n",
rxq->queue_id, i, ring->ring_size);
break;
@@ -769,7 +768,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->ag_prod = prod;
prod = RING_NEXT(rxr->ag_ring_struct, prod);
}
- RTE_LOG(DEBUG, PMD, "%s AGG Done!\n", __func__);
+ PMD_DRV_LOG(DEBUG, "AGG Done!\n");

if (rxr->tpa_info) {
for (i = 0; i < BNXT_TPA_MAX; i++) {
@@ -781,7 +780,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
}
}
}
- RTE_LOG(DEBUG, PMD, "%s TPA alloc Done!\n", __func__);
+ PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n");

return 0;
}
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 470c6438d..bd93cc834 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -237,7 +237,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,

memset(bnxt_stats, 0, sizeof(*bnxt_stats));
if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
- RTE_LOG(ERR, PMD, "Device Initialization not complete!\n");
+ PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
return 0;
}

@@ -272,7 +272,7 @@ void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;

if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
- RTE_LOG(ERR, PMD, "Device Initialization not complete!\n");
+ PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
return;
}

@@ -289,7 +289,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
uint64_t tx_drop_pkts;

if (!(bp->flags & BNXT_FLAG_PORT_STATS)) {
- RTE_LOG(ERR, PMD, "xstats not supported for VF\n");
+ PMD_DRV_LOG(ERR, "xstats not supported for VF\n");
return 0;
}

@@ -371,11 +371,11 @@ void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev)
bnxt_hwrm_port_clr_stats(bp);

if (BNXT_VF(bp))
- RTE_LOG(ERR, PMD, "Operation not supported on a VF device\n");
+ PMD_DRV_LOG(ERR, "Operation not supported on a VF device\n");
if (!BNXT_SINGLE_PF(bp))
- RTE_LOG(ERR, PMD, "Operation not supported on a MF device\n");
+ PMD_DRV_LOG(ERR, "Operation not supported on a MF device\n");
if (!(bp->flags & BNXT_FLAG_PORT_STATS))
- RTE_LOG(ERR, PMD, "Operation not supported\n");
+ PMD_DRV_LOG(ERR, "Operation not supported\n");
}

int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids,
@@ -394,7 +394,7 @@ int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids,
bnxt_dev_xstats_get_by_id_op(dev, NULL, values_copy, stat_cnt);
for (i = 0; i < limit; i++) {
if (ids[i] >= stat_cnt) {
- RTE_LOG(ERR, PMD, "id value isn't valid");
+ PMD_DRV_LOG(ERR, "id value isn't valid");
return -1;
}
values[i] = values_copy[ids[i]];
@@ -420,7 +420,7 @@ int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev,

for (i = 0; i < limit; i++) {
if (ids[i] >= stat_cnt) {
- RTE_LOG(ERR, PMD, "id value isn't valid");
+ PMD_DRV_LOG(ERR, "id value isn't valid");
return -1;
}
strcpy(xstats_names[i].name,
diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
index 25c33f5e4..53524346d 100644
--- a/drivers/net/bnxt/bnxt_txq.c
+++ b/drivers/net/bnxt/bnxt_txq.c
@@ -109,14 +109,14 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
int rc = 0;

if (queue_idx >= bp->max_tx_rings) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Cannot create Tx ring %d. Only %d rings available\n",
queue_idx, bp->max_tx_rings);
return -ENOSPC;
}

if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
- RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
+ PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
rc = -EINVAL;
goto out;
}
@@ -131,7 +131,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!txq) {
- RTE_LOG(ERR, PMD, "bnxt_tx_queue allocation failed!");
+ PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
rc = -ENOMEM;
goto out;
}
@@ -149,14 +149,14 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
/* Allocate TX ring hardware descriptors */
if (bnxt_alloc_rings(bp, queue_idx, txq->tx_ring, NULL, txq->cp_ring,
"txr")) {
- RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for tx_ring failed!");
+ PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
bnxt_tx_queue_release_op(txq);
rc = -ENOMEM;
goto out;
}

if (bnxt_init_one_tx_ring(txq)) {
- RTE_LOG(ERR, PMD, "bnxt_init_one_tx_ring failed!");
+ PMD_DRV_LOG(ERR, "bnxt_init_one_tx_ring failed!");
bnxt_tx_queue_release_op(txq);
rc = -ENOMEM;
goto out;
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 5bac26053..d4aeb4ca8 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -107,7 +107,7 @@ int bnxt_free_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic,
}
temp = STAILQ_NEXT(temp, next);
}
- RTE_LOG(ERR, PMD, "VNIC %p is not found in pool[%d]\n", vnic, pool);
+ PMD_DRV_LOG(ERR, "VNIC %p is not found in pool[%d]\n", vnic, pool);
return -EINVAL;
}

@@ -118,7 +118,7 @@ struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp)
/* Find the 1st unused vnic from the free_vnic_list pool*/
vnic = STAILQ_FIRST(&bp->free_vnic_list);
if (!vnic) {
- RTE_LOG(ERR, PMD, "No more free VNIC resources\n");
+ PMD_DRV_LOG(ERR, "No more free VNIC resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_vnic_list, next);
@@ -194,13 +194,13 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
}
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map vnic address to physical memory\n");
return -ENOMEM;
}
@@ -241,7 +241,7 @@ void bnxt_free_vnic_mem(struct bnxt *bp)
for (i = 0; i < max_vnics; i++) {
vnic = &bp->vnic_info[i];
if (vnic->fw_vnic_id != (uint16_t)HWRM_NA_SIGNATURE) {
- RTE_LOG(ERR, PMD, "VNIC is not freed yet!\n");
+ PMD_DRV_LOG(ERR, "VNIC is not freed yet!\n");
/* TODO Call HWRM to free VNIC */
}
}
@@ -260,7 +260,7 @@ int bnxt_alloc_vnic_mem(struct bnxt *bp)
vnic_mem = rte_zmalloc("bnxt_vnic_info",
max_vnics * sizeof(struct bnxt_vnic_info), 0);
if (vnic_mem == NULL) {
- RTE_LOG(ERR, PMD, "Failed to alloc memory for %d VNICs",
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for %d VNICs",
max_vnics);
return -ENOMEM;
}
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c
index 595208997..cae95f8fa 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.c
+++ b/drivers/net/bnxt/rte_pmd_bnxt.c
@@ -85,7 +85,7 @@ int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on)
bp = (struct bnxt *)eth_dev->data->dev_private;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set Tx loopback on non-PF port %d!\n",
port);
return -ENOTSUP;
@@ -127,7 +127,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
bp = (struct bnxt *)eth_dev->data->dev_private;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set all queues drop on non-PF port!\n");
return -ENOTSUP;
}
@@ -140,7 +140,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
bp->vnic_info[i].bd_stall = !on;
rc = bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[i]);
if (rc) {
- RTE_LOG(ERR, PMD, "Failed to update PF VNIC %d.\n", i);
+ PMD_DRV_LOG(ERR, "Failed to update PF VNIC %d.\n", i);
return rc;
}
}
@@ -151,7 +151,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
rte_pmd_bnxt_set_all_queues_drop_en_cb, &on,
bnxt_hwrm_vnic_cfg);
if (rc) {
- RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", i);
+ PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", i);
break;
}
}
@@ -180,7 +180,7 @@ int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf,
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VF %d mac address on non-PF port %d!\n",
vf, port);
return -ENOTSUP;
@@ -224,7 +224,7 @@ int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf,

/* Requested BW can't be greater than link speed */
if (tot_rate > eth_dev->data->dev_link.link_speed) {
- RTE_LOG(ERR, PMD, "Rate > Link speed. Set to %d\n", tot_rate);
+ PMD_DRV_LOG(ERR, "Rate > Link speed. Set to %d\n", tot_rate);
return -EINVAL;
}

@@ -262,7 +262,7 @@ int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
bp = (struct bnxt *)dev->data->dev_private;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set mac spoof on non-PF port %d!\n", port);
return -EINVAL;
}
@@ -314,7 +314,7 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
bp = (struct bnxt *)dev->data->dev_private;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VLAN spoof on non-PF port %d!\n", port);
return -EINVAL;
}
@@ -333,7 +333,7 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
rc = -1;
}
} else {
- RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf);
+ PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", vf);
}

return rc;
@@ -367,7 +367,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VF %d stripq on non-PF port %d!\n",
vf, port);
return -ENOTSUP;
@@ -377,7 +377,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
rte_pmd_bnxt_set_vf_vlan_stripq_cb, &on,
bnxt_hwrm_vnic_cfg);
if (rc)
- RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf);
+ PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", vf);

return rc;
}
@@ -407,7 +407,7 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
return -EINVAL;

if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) {
- RTE_LOG(ERR, PMD, "Currently cannot toggle this setting\n");
+ PMD_DRV_LOG(ERR, "Currently cannot toggle this setting\n");
return -ENOTSUP;
}

@@ -430,7 +430,7 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
&bp->pf.vf_info[vf].l2_rx_mask,
bnxt_set_rx_mask_no_vlan);
if (rc)
- RTE_LOG(ERR, PMD, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");
+ PMD_DRV_LOG(ERR, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");

return rc;
}
@@ -442,7 +442,7 @@ static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf)
struct bnxt_vnic_info vnic;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VLAN table on non-PF port!\n");
return -EINVAL;
}
@@ -455,7 +455,7 @@ static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf)
/* This simply indicates there's no driver loaded.
* This is not an error.
*/
- RTE_LOG(ERR, PMD, "Unable to get default VNIC for VF %d\n", vf);
+ PMD_DRV_LOG(ERR, "Unable to get default VNIC for VF %d\n", vf);
} else {
memset(&vnic, 0, sizeof(vnic));
vnic.fw_vnic_id = dflt_vnic;
@@ -518,9 +518,9 @@ int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
/* Now check that there's space */
if (cnt == getpagesize() / sizeof(struct
bnxt_vlan_antispoof_table_entry)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"VLAN anti-spoof table is full\n");
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"VF %d cannot add VLAN %u\n",
i, vlan);
rc = -1;
@@ -585,7 +585,7 @@ int rte_pmd_bnxt_get_vf_stats(uint16_t port,
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to get VF %d stats on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -612,7 +612,7 @@ int rte_pmd_bnxt_reset_vf_stats(uint16_t port,
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to reset VF %d stats on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -638,7 +638,7 @@ int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id)
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to query VF %d RX stats on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -665,7 +665,7 @@ int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id,
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to query VF %d TX drops on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -697,7 +697,7 @@ int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct ether_addr *addr,
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to config VF %d MAC on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -773,7 +773,7 @@ rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf,
return -EINVAL;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VF %d vlan insert on non-PF port %d!\n",
vf, port);
return -ENOTSUP;
@@ -807,7 +807,7 @@ int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on)
bp = (struct bnxt *)dev->data->dev_private;

if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set persist stats on non-PF port %d!\n",
port);
return -EINVAL;
--
2.14.3 (Apple Git-98)
Ajit Khaparde
2018-01-26 17:31:56 UTC
Permalink
Register for async events from the FW.
New events we are registering for include Link speed config changes,
PF driver unload and VF config change. Also log a message when the
async event arrives on the completion ring.

Signed-off-by: Ajit Khaparde <***@broadcom.com>
---
drivers/net/bnxt/bnxt_cpr.c | 11 ++++++++++-
drivers/net/bnxt/bnxt_hwrm.c | 9 +++++++--
drivers/net/bnxt/bnxt_hwrm.h | 11 +++++++++++
3 files changed, 28 insertions(+), 3 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 663a5223d..737bb060a 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -57,8 +57,17 @@ void bnxt_handle_async_event(struct bnxt *bp,
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
bnxt_link_update_op(bp->eth_dev, 1);
break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
+ PMD_DRV_LOG(INFO, "Async event: PF driver unloaded\n");
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
+ PMD_DRV_LOG(INFO, "Async event: VF config changed\n");
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
+ PMD_DRV_LOG(INFO, "Port conn async event\n");
+ break;
default:
- PMD_DRV_LOG(DEBUG, "handle_async_event id = 0x%x\n", event_id);
+ PMD_DRV_LOG(INFO, "handle_async_event id = 0x%x\n", event_id);
break;
}
}
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index fdca424a9..75e03ad5d 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -637,8 +637,13 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
sizeof(bp->pf.vf_req_fwd)));
}

- req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
- //memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
+ req.async_event_fwd[0] |=
+ rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
+ ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
+ ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
+ req.async_event_fwd[1] |=
+ rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
+ ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);

rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));

diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 46f6f3208..108f8e81d 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -42,6 +42,17 @@ struct bnxt_filter_info;
struct bnxt_cp_ring_info;

#define HWRM_SEQ_ID_INVALID -1U
+/* Convert Bit field location to value */
+#define ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE)
+#define ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED)
+#define ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE)
+#define ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD \
+ (1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD - 32))
+#define ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE \
+ (1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE - 32))

int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
--
2.14.3 (Apple Git-98)
Ajit Khaparde
2018-01-26 17:31:57 UTC
Permalink
In certain cases the MAC address of a port could be all zeros.
Catch it early, log a message and fail the initiaization.

Signed-off-by: Ajit Khaparde <***@broadcom.com>
---
drivers/net/bnxt/bnxt_ethdev.c | 10 ++++++++++
drivers/net/bnxt/bnxt_filter.c | 2 +-
drivers/net/bnxt/bnxt_filter.h | 1 +
3 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index daed1fc80..76fff711f 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -3246,6 +3246,16 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
rc = -ENOMEM;
goto error_free;
}
+
+ if (check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {
+ PMD_DRV_LOG(ERR,
+ "Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
+ bp->dflt_mac_addr[0], bp->dflt_mac_addr[1],
+ bp->dflt_mac_addr[2], bp->dflt_mac_addr[3],
+ bp->dflt_mac_addr[4], bp->dflt_mac_addr[5]);
+ rc = -EINVAL;
+ goto error_free;
+ }
/* Copy the permanent MAC from the qcap response address now. */
memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index 0716dd8fd..032e8eed0 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -250,7 +250,7 @@ nxt_non_void_action(const struct rte_flow_action *cur)
}
}

-static inline int check_zero_bytes(const uint8_t *bytes, int len)
+int check_zero_bytes(const uint8_t *bytes, int len)
{
int i;
for (i = 0; i < len; i++)
diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h
index 2591a87e2..a3c702df6 100644
--- a/drivers/net/bnxt/bnxt_filter.h
+++ b/drivers/net/bnxt/bnxt_filter.h
@@ -97,6 +97,7 @@ struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp);
void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter);
struct bnxt_filter_info *bnxt_get_l2_filter(struct bnxt *bp,
struct bnxt_filter_info *nf, struct bnxt_vnic_info *vnic);
+int check_zero_bytes(const uint8_t *bytes, int len);

#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR \
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR
--
2.14.3 (Apple Git-98)
Ajit Khaparde
2018-01-26 17:31:58 UTC
Permalink
Currently this is implemented entirely in the PMD as there is no explicit
support in the HW. Re-program the RSS Table without this queue on stop
and add it back to the table on start.

Signed-off-by: Somnath Kotur <***@broadcom.com>
Signed-off-by: Ajit Khaparde <***@broadcom.com>
--
v1->v2: address review comments
---
drivers/net/bnxt/bnxt_ethdev.c | 33 +++++++++-----------------
drivers/net/bnxt/bnxt_hwrm.c | 28 ++++++++++++++++++++++
drivers/net/bnxt/bnxt_hwrm.h | 2 ++
drivers/net/bnxt/bnxt_rxq.c | 53 ++++++++++++++++++++++++++++++++++++++++++
drivers/net/bnxt/bnxt_rxq.h | 6 ++++-
drivers/net/bnxt/bnxt_rxr.c | 4 ++++
drivers/net/bnxt/bnxt_rxr.h | 3 ++-
drivers/net/bnxt/bnxt_txq.h | 1 -
drivers/net/bnxt/bnxt_txr.c | 32 +++++++++++++++++++++++++
drivers/net/bnxt/bnxt_txr.h | 2 ++
drivers/net/bnxt/bnxt_vnic.h | 1 -
11 files changed, 139 insertions(+), 26 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 76fff711f..2268aba2a 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -202,7 +202,7 @@ static int bnxt_alloc_mem(struct bnxt *bp)

static int bnxt_init_chip(struct bnxt *bp)
{
- unsigned int i, rss_idx, fw_idx;
+ unsigned int i;
struct rte_eth_link new;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
@@ -279,27 +279,12 @@ static int bnxt_init_chip(struct bnxt *bp)
i, rc);
goto err_out;
}
- if (vnic->rss_table && vnic->hash_type) {
- /*
- * Fill the RSS hash & redirection table with
- * ring group ids for all VNICs
- */
- for (rss_idx = 0, fw_idx = 0;
- rss_idx < HW_HASH_INDEX_SIZE;
- rss_idx++, fw_idx++) {
- if (vnic->fw_grp_ids[fw_idx] ==
- INVALID_HW_RING_ID)
- fw_idx = 0;
- vnic->rss_table[rss_idx] =
- vnic->fw_grp_ids[fw_idx];
- }
- rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
- if (rc) {
- PMD_DRV_LOG(ERR,
- "HWRM vnic %d set RSS failure rc: %x\n",
- i, rc);
- goto err_out;
- }
+
+ rc = bnxt_vnic_rss_configure(bp, vnic);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "HWRM vnic set RSS failure rc: %x\n", rc);
+ goto err_out;
}

bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
@@ -3022,6 +3007,10 @@ static const struct eth_dev_ops bnxt_dev_ops = {
.rx_queue_count = bnxt_rx_queue_count_op,
.rx_descriptor_status = bnxt_rx_descriptor_status_op,
.tx_descriptor_status = bnxt_tx_descriptor_status_op,
+ .rx_queue_start = bnxt_rx_queue_start,
+ .rx_queue_stop = bnxt_rx_queue_stop,
+ .tx_queue_start = bnxt_tx_queue_start,
+ .tx_queue_stop = bnxt_tx_queue_stop,
.filter_ctrl = bnxt_filter_ctrl_op,
.dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
.get_eeprom_length = bnxt_get_eeprom_length_op,
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 75e03ad5d..8fac05251 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -3676,3 +3676,31 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,

return 0;
}
+
+int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ unsigned int rss_idx, fw_idx, i;
+
+ if (vnic->rss_table && vnic->hash_type) {
+ /*
+ * Fill the RSS hash & redirection table with
+ * ring group ids for all VNICs
+ */
+ for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
+ rss_idx++, fw_idx++) {
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ fw_idx %= bp->rx_cp_nr_rings;
+ if (vnic->fw_grp_ids[fw_idx] !=
+ INVALID_HW_RING_ID)
+ break;
+ fw_idx++;
+ }
+ if (i == bp->rx_cp_nr_rings)
+ return 0;
+ vnic->rss_table[rss_idx] =
+ vnic->fw_grp_ids[fw_idx];
+ }
+ return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+ }
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 108f8e81d..f11e72a35 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -187,4 +187,6 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
uint16_t dir_attr, const uint8_t *data,
size_t data_len);
int bnxt_hwrm_ptp_cfg(struct bnxt *bp);
+int bnxt_vnic_rss_configure(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic);
#endif
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 736936a55..16304865d 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -405,3 +405,56 @@ bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
}
return rc;
}
+
+int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
+ struct bnxt_vnic_info *vnic = NULL;
+
+ if (rxq == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ rxq->rx_deferred_start = false;
+ PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ vnic = rxq->vnic;
+ if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
+ return 0;
+ PMD_DRV_LOG(DEBUG, "vnic = %p fw_grp_id = %d\n",
+ vnic, bp->grp_info[rx_queue_id + 1].fw_grp_id);
+ vnic->fw_grp_ids[rx_queue_id] =
+ bp->grp_info[rx_queue_id + 1].fw_grp_id;
+ return bnxt_vnic_rss_configure(bp, vnic);
+ }
+
+ return 0;
+}
+
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
+ struct bnxt_vnic_info *vnic = NULL;
+
+ if (rxq == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rxq->rx_deferred_start = true;
+ PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
+
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ vnic = rxq->vnic;
+ vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
+ return bnxt_vnic_rss_configure(bp, vnic);
+ }
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index be190195a..c7acaa755 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -50,6 +50,7 @@ struct bnxt_rx_queue {
uint16_t reg_idx; /* RX queue register index */
uint16_t port_id; /* Device port identifier */
uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
+ uint8_t rx_deferred_start; /* not in global dev start */

struct bnxt *bp;
int index;
@@ -75,5 +76,8 @@ int bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev,
uint16_t queue_id);
int bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev,
uint16_t queue_id);
-
+int bnxt_rx_queue_start(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
#endif
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 3f07c11b5..9e70c8604 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -545,6 +545,10 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t ag_prod = rxr->ag_prod;
int rc = 0;

+ /* If Rx Q was stopped return */
+ if (rxq->rx_deferred_start)
+ return 0;
+
/* Handle RX burst request */
while (1) {
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h
index a94373d19..f3ed49bd6 100644
--- a/drivers/net/bnxt/bnxt_rxr.h
+++ b/drivers/net/bnxt/bnxt_rxr.h
@@ -120,5 +120,6 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
void bnxt_free_rx_rings(struct bnxt *bp);
int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
-
+int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
#endif
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
index f753c10f2..e27c34fa9 100644
--- a/drivers/net/bnxt/bnxt_txq.h
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -71,5 +71,4 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
uint16_t nb_desc,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-
#endif
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 2f2c87119..2c81a37c2 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -349,6 +349,11 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Handle TX completions */
bnxt_handle_tx_cp(txq);

+ /* Tx queue was stopped; wait for it to be restarted */
+ if (txq->tx_deferred_start) {
+ PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
+ return 0;
+ }
/* Handle TX burst request */
for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
if (bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq)) {
@@ -364,3 +369,30 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,

return nb_tx_pkts;
}
+
+int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ txq->tx_deferred_start = false;
+ PMD_DRV_LOG(DEBUG, "Tx queue started\n");
+
+ return 0;
+}
+
+int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+
+ /* Handle TX completions */
+ bnxt_handle_tx_cp(txq);
+
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ txq->tx_deferred_start = true;
+ PMD_DRV_LOG(DEBUG, "Tx queue stopped\n");
+
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index 2feac51db..d88b15ab8 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -68,6 +68,8 @@ int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);

#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
index 875dc3c1c..d8d35c7dc 100644
--- a/drivers/net/bnxt/bnxt_vnic.h
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -93,5 +93,4 @@ void bnxt_free_vnic_attributes(struct bnxt *bp);
int bnxt_alloc_vnic_attributes(struct bnxt *bp);
void bnxt_free_vnic_mem(struct bnxt *bp);
int bnxt_alloc_vnic_mem(struct bnxt *bp);
-
#endif
--
2.14.3 (Apple Git-98)
Ajit Khaparde
2018-01-26 17:31:59 UTC
Permalink
When the driver is loaded on a 100G NIC, the port speed is not
displayed correctly. Parse the 100G speed before displaying it.

Signed-off-by: Ajit Khaparde <***@broadcom.com>
--
v1->v2: add a missing break statement
---
drivers/net/bnxt/bnxt_hwrm.c | 3 +++
1 file changed, 3 insertions(+)

diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 8fac05251..4987cf0a9 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -2140,6 +2140,9 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
eth_link_speed = ETH_SPEED_NUM_50G;
break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
+ eth_link_speed = ETH_SPEED_NUM_100G;
+ break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
default:
PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
--
2.14.3 (Apple Git-98)
Ajit Khaparde
2018-01-26 17:32:00 UTC
Permalink
While using RSS, the pool count should be 1.
Fixes: 8103a57ab432a ("net/bnxt: handle Rx multi queue creation properly")
Cc: ***@dpdk.org
Signed-off-by: Ajit Khaparde <***@broadcom.com>
---
drivers/net/bnxt/bnxt_rxq.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 16304865d..d49f35462 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -118,7 +118,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
pools = max_pools;
break;
case ETH_MQ_RX_RSS:
- pools = bp->rx_cp_nr_rings;
+ pools = 1;
break;
default:
PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
--
2.14.3 (Apple Git-98)
Ferruh Yigit
2018-01-26 18:00:49 UTC
Permalink
Post by Ajit Khaparde
Please apply this patchset.
net/bnxt: fix size of tx ring in HW
net/bnxt: use driver specific dynamic log type
net/bnxt: register for more async events
net/bnxt: check if MAC address is all zeros
net/bnxt: add 100G speed detection
net/bnxt: fix number of pools for RSS
net/bnxt: support for rx/tx_queue_start/stop ops
Series applied to dpdk-next-net/master, thanks.

Ajit Khaparde
2018-01-26 17:33:45 UTC
Permalink
Post by Ajit Khaparde
Post by Ajit Khaparde
When the driver is loaded on a 100G NIC, the port speed is not
displayed correctly. Parse the 100G speed before displaying it.
---
drivers/net/bnxt/bnxt_hwrm.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 8fac05251..d412e51fc 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -2139,6 +2139,8 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t
hw_link_speed)
Post by Ajit Khaparde
break;
eth_link_speed = ETH_SPEED_NUM_50G;
This case doesn't have a "break" statement, which looks like
unintentional, but
if it is intentional please put /* Fallthrough */ comment to prevent build error
[1].
​No. Its unintentional. I copy pasted those lines from the test setup and
​missed it.

I am sending a fresh set. ​Thanks​
Post by Ajit Khaparde
[1]
...dpdk/drivers/net/bnxt/bnxt_hwrm.c: In function
...dpdk/drivers/net/bnxt/bnxt_hwrm.c:2141:18: error: this statement may fall
through [-Werror=implicit-fallthrough=]
eth_link_speed = ETH_SPEED_NUM_50G;
...dpdk/drivers/net/bnxt/bnxt_hwrm.c:2142:2: note: here
^~~~
Post by Ajit Khaparde
+ eth_link_speed = ETH_SPEED_NUM_100G;
break;
Ajit Khaparde
2018-01-25 22:47:53 UTC
Permalink
Register for async events from the FW.
New events we are registering for include Link speed config changes,
PF driver unload and VF config change. Also log a message when the
async event arrives on the completion ring.

Signed-off-by: Ajit Khaparde <***@broadcom.com>
---
drivers/net/bnxt/bnxt_cpr.c | 11 ++++++++++-
drivers/net/bnxt/bnxt_hwrm.c | 9 +++++++--
drivers/net/bnxt/bnxt_hwrm.h | 11 +++++++++++
3 files changed, 28 insertions(+), 3 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 663a5223d..737bb060a 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -57,8 +57,17 @@ void bnxt_handle_async_event(struct bnxt *bp,
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
bnxt_link_update_op(bp->eth_dev, 1);
break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
+ PMD_DRV_LOG(INFO, "Async event: PF driver unloaded\n");
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
+ PMD_DRV_LOG(INFO, "Async event: VF config changed\n");
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
+ PMD_DRV_LOG(INFO, "Port conn async event\n");
+ break;
default:
- PMD_DRV_LOG(DEBUG, "handle_async_event id = 0x%x\n", event_id);
+ PMD_DRV_LOG(INFO, "handle_async_event id = 0x%x\n", event_id);
break;
}
}
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index fdca424a9..75e03ad5d 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -637,8 +637,13 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
sizeof(bp->pf.vf_req_fwd)));
}

- req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
- //memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
+ req.async_event_fwd[0] |=
+ rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
+ ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
+ ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
+ req.async_event_fwd[1] |=
+ rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
+ ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);

rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));

diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 46f6f3208..108f8e81d 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -42,6 +42,17 @@ struct bnxt_filter_info;
struct bnxt_cp_ring_info;

#define HWRM_SEQ_ID_INVALID -1U
+/* Convert Bit field location to value */
+#define ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE)
+#define ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED)
+#define ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE)
+#define ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD \
+ (1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD - 32))
+#define ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE \
+ (1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE - 32))

int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
--
2.14.3 (Apple Git-98)
Ferruh Yigit
2018-01-26 17:35:15 UTC
Permalink
Post by Ajit Khaparde
Please apply this patchset.
I have incorporated most of the comments that we got in v1.
The switch to SPDX tags is in the works and will be submitted separately.
net/bnxt: fix size of tx ring in HW
net/bnxt: use driver specific dynamic log type
net/bnxt: register for more async events
net/bnxt: check if MAC address is all zeros
net/bnxt: add 100G speed detection
net/bnxt: fix number of pools for RSS
net/bnxt: support for rx/tx_queue_start/stop ops
Series applied to dpdk-next-net/master, thanks.

Except 6/7, which looks like can be independent patch. And since rc2 is close, I
didn't want that patch to block the set.
Ferruh Yigit
2018-01-26 17:37:18 UTC
Permalink
Post by Ferruh Yigit
Post by Ajit Khaparde
Please apply this patchset.
I have incorporated most of the comments that we got in v1.
The switch to SPDX tags is in the works and will be submitted separately.
net/bnxt: fix size of tx ring in HW
net/bnxt: use driver specific dynamic log type
net/bnxt: register for more async events
net/bnxt: check if MAC address is all zeros
net/bnxt: add 100G speed detection
net/bnxt: fix number of pools for RSS
net/bnxt: support for rx/tx_queue_start/stop ops
Series applied to dpdk-next-net/master, thanks.
Except 6/7, which looks like can be independent patch. And since rc2 is close, I
didn't want that patch to block the set.
Ahh, new version arrived while I am writing this mail, so I will drop existing
ones and apply new version of the set. Thanks.
Ajit Khaparde
2018-01-22 06:20:45 UTC
Permalink
In certain cases the MAC address of a port could be all zeros.
Catch it early, log a message and fail the initiaization.

Signed-off-by: Ajit Khaparde <***@broadcom.com>
---
drivers/net/bnxt/bnxt_ethdev.c | 10 ++++++++++
drivers/net/bnxt/bnxt_filter.c | 2 +-
drivers/net/bnxt/bnxt_filter.h | 1 +
3 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index af4673dc2..ebc2dfab2 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -3247,6 +3247,16 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
rc = -ENOMEM;
goto error_free;
}
+
+ if (check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {
+ PMD_DRV_LOG(ERR,
+ "Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
+ bp->dflt_mac_addr[0], bp->dflt_mac_addr[1],
+ bp->dflt_mac_addr[2], bp->dflt_mac_addr[3],
+ bp->dflt_mac_addr[4], bp->dflt_mac_addr[5]);
+ rc = -EINVAL;
+ goto error_free;
+ }
/* Copy the permanent MAC from the qcap response address now. */
memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index 0716dd8fd..032e8eed0 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -250,7 +250,7 @@ nxt_non_void_action(const struct rte_flow_action *cur)
}
}

-static inline int check_zero_bytes(const uint8_t *bytes, int len)
+int check_zero_bytes(const uint8_t *bytes, int len)
{
int i;
for (i = 0; i < len; i++)
diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h
index 2591a87e2..a3c702df6 100644
--- a/drivers/net/bnxt/bnxt_filter.h
+++ b/drivers/net/bnxt/bnxt_filter.h
@@ -97,6 +97,7 @@ struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp);
void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter);
struct bnxt_filter_info *bnxt_get_l2_filter(struct bnxt *bp,
struct bnxt_filter_info *nf, struct bnxt_vnic_info *vnic);
+int check_zero_bytes(const uint8_t *bytes, int len);

#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR \
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR
--
2.14.3 (Apple Git-98)
Ferruh Yigit
2018-01-22 12:26:20 UTC
Permalink
Post by Ajit Khaparde
In certain cases the MAC address of a port could be all zeros.
Catch it early, log a message and fail the initiaization.
<...>
Post by Ajit Khaparde
@@ -3247,6 +3247,16 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
rc = -ENOMEM;
goto error_free;
}
+
+ if (check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {
There is already a function is_zero_ether_addr() which can be used here that
prevents exposing your check_zero_bytes() function, up to you which one to use.
Ajit Khaparde
2018-01-22 06:20:46 UTC
Permalink
Currently this is implemented entirely in the PMD as there is no explicit
support in the HW. Re-program the RSS Table without this queue on stop
and add it back to the table on start.

Signed-off-by: Somnath Kotur <***@broadcom.com>
Signed-off-by: Ajit Khaparde <***@broadcom.com>
---
drivers/net/bnxt/bnxt_ethdev.c | 125 ++++++++++++++++++++++++++++++-----------
drivers/net/bnxt/bnxt_rxq.h | 2 +-
drivers/net/bnxt/bnxt_rxr.c | 4 ++
drivers/net/bnxt/bnxt_rxr.h | 3 +-
drivers/net/bnxt/bnxt_txq.h | 1 -
drivers/net/bnxt/bnxt_txr.c | 32 +++++++++++
drivers/net/bnxt/bnxt_txr.h | 2 +
7 files changed, 133 insertions(+), 36 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index ebc2dfab2..82d2416ba 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -1,4 +1,4 @@
-/*-
+/*
* BSD LICENSE
*
* Copyright(c) Broadcom Limited.
@@ -200,9 +200,37 @@ static int bnxt_alloc_mem(struct bnxt *bp)
return rc;
}

+static int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ unsigned int rss_idx, fw_idx, i;
+
+ if (vnic->rss_table && vnic->hash_type) {
+ /*
+ * Fill the RSS hash & redirection table with
+ * ring group ids for all VNICs
+ */
+ for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
+ rss_idx++, fw_idx++) {
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ fw_idx %= bp->rx_cp_nr_rings;
+ if (vnic->fw_grp_ids[fw_idx] !=
+ INVALID_HW_RING_ID)
+ break;
+ fw_idx++;
+ }
+ if (i == bp->rx_cp_nr_rings)
+ return 0;
+ vnic->rss_table[rss_idx] =
+ vnic->fw_grp_ids[fw_idx];
+ }
+ return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+ }
+ return 0;
+}
+
static int bnxt_init_chip(struct bnxt *bp)
{
- unsigned int i, rss_idx, fw_idx;
+ unsigned int i;
struct rte_eth_link new;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
@@ -279,27 +307,12 @@ static int bnxt_init_chip(struct bnxt *bp)
i, rc);
goto err_out;
}
- if (vnic->rss_table && vnic->hash_type) {
- /*
- * Fill the RSS hash & redirection table with
- * ring group ids for all VNICs
- */
- for (rss_idx = 0, fw_idx = 0;
- rss_idx < HW_HASH_INDEX_SIZE;
- rss_idx++, fw_idx++) {
- if (vnic->fw_grp_ids[fw_idx] ==
- INVALID_HW_RING_ID)
- fw_idx = 0;
- vnic->rss_table[rss_idx] =
- vnic->fw_grp_ids[fw_idx];
- }
- rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
- if (rc) {
- PMD_DRV_LOG(ERR,
- "HWRM vnic %d set RSS failure rc: %x\n",
- i, rc);
- goto err_out;
- }
+
+ rc = bnxt_vnic_rss_configure(bp, vnic);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "HWRM vnic set RSS failure rc: %x\n", rc);
+ goto err_out;
}

bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
@@ -321,8 +334,7 @@ static int bnxt_init_chip(struct bnxt *bp)
!RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
intr_vector = bp->eth_dev->data->nb_rx_queues;
- PMD_DRV_LOG(INFO, "%s(): intr_vector = %d\n", __func__,
- intr_vector);
+ PMD_DRV_LOG(INFO, "intr_vector = %d\n", intr_vector);
if (intr_vector > bp->rx_cp_nr_rings) {
PMD_DRV_LOG(ERR, "At most %d intr queues supported",
bp->rx_cp_nr_rings);
@@ -342,9 +354,9 @@ static int bnxt_init_chip(struct bnxt *bp)
" intr_vec", bp->eth_dev->data->nb_rx_queues);
return -ENOMEM;
}
- PMD_DRV_LOG(DEBUG, "%s(): intr_handle->intr_vec = %p "
+ PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
"intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
- __func__, intr_handle->intr_vec, intr_handle->nb_efd,
+ intr_handle->intr_vec, intr_handle->nb_efd,
intr_handle->max_intr);
}

@@ -2842,8 +2854,8 @@ bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
uint32_t dir_entries;
uint32_t entry_length;

- PMD_DRV_LOG(INFO, "%s(): %04x:%02x:%02x:%02x\n",
- __func__, bp->pdev->addr.domain, bp->pdev->addr.bus,
+ PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x\n",
+ bp->pdev->addr.domain, bp->pdev->addr.bus,
bp->pdev->addr.devid, bp->pdev->addr.function);

rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
@@ -2861,8 +2873,8 @@ bnxt_get_eeprom_op(struct rte_eth_dev *dev,
uint32_t index;
uint32_t offset;

- PMD_DRV_LOG(INFO, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
- "len = %d\n", __func__, bp->pdev->addr.domain,
+ PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
+ "len = %d\n", bp->pdev->addr.domain,
bp->pdev->addr.bus, bp->pdev->addr.devid,
bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);

@@ -2930,8 +2942,8 @@ bnxt_set_eeprom_op(struct rte_eth_dev *dev,
uint8_t index, dir_op;
uint16_t type, ext, ordinal, attr;

- PMD_DRV_LOG(INFO, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
- "len = %d\n", __func__, bp->pdev->addr.domain,
+ PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
+ "len = %d\n", bp->pdev->addr.domain,
bp->pdev->addr.bus, bp->pdev->addr.devid,
bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);

@@ -2969,6 +2981,49 @@ bnxt_set_eeprom_op(struct rte_eth_dev *dev,
return 0;
}

+int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
+ struct bnxt_vnic_info *vnic = NULL;
+
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ rxq->rx_deferred_start = false;
+ PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ vnic = rxq->vnic;
+ if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
+ return 0;
+ PMD_DRV_LOG(DEBUG, "vnic = %p fw_grp_id = %d\n",
+ vnic, bp->grp_info[rx_queue_id + 1].fw_grp_id);
+ vnic->fw_grp_ids[rx_queue_id] =
+ bp->grp_info[rx_queue_id + 1].fw_grp_id;
+ return bnxt_vnic_rss_configure(bp, vnic);
+ }
+
+ return 0;
+}
+
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
+ struct bnxt_vnic_info *vnic = NULL;
+
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rxq->rx_deferred_start = true;
+ PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
+
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ vnic = rxq->vnic;
+ vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
+ return bnxt_vnic_rss_configure(bp, vnic);
+ }
+ return 0;
+}
+
/*
* Initialization
*/
@@ -3023,6 +3078,10 @@ static const struct eth_dev_ops bnxt_dev_ops = {
.rx_queue_count = bnxt_rx_queue_count_op,
.rx_descriptor_status = bnxt_rx_descriptor_status_op,
.tx_descriptor_status = bnxt_tx_descriptor_status_op,
+ .rx_queue_start = bnxt_rx_queue_start,
+ .rx_queue_stop = bnxt_rx_queue_stop,
+ .tx_queue_start = bnxt_tx_queue_start,
+ .tx_queue_stop = bnxt_tx_queue_stop,
.filter_ctrl = bnxt_filter_ctrl_op,
.dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
.get_eeprom_length = bnxt_get_eeprom_length_op,
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index be190195a..6bceee087 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -50,6 +50,7 @@ struct bnxt_rx_queue {
uint16_t reg_idx; /* RX queue register index */
uint16_t port_id; /* Device port identifier */
uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
+ uint8_t rx_deferred_start; /* not in global dev start */

struct bnxt *bp;
int index;
@@ -75,5 +76,4 @@ int bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev,
uint16_t queue_id);
int bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev,
uint16_t queue_id);
-
#endif
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 3f07c11b5..9e70c8604 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -545,6 +545,10 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t ag_prod = rxr->ag_prod;
int rc = 0;

+ /* If Rx Q was stopped return */
+ if (rxq->rx_deferred_start)
+ return 0;
+
/* Handle RX burst request */
while (1) {
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h
index a94373d19..f3ed49bd6 100644
--- a/drivers/net/bnxt/bnxt_rxr.h
+++ b/drivers/net/bnxt/bnxt_rxr.h
@@ -120,5 +120,6 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
void bnxt_free_rx_rings(struct bnxt *bp);
int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
-
+int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
#endif
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
index f753c10f2..e27c34fa9 100644
--- a/drivers/net/bnxt/bnxt_txq.h
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -71,5 +71,4 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
uint16_t nb_desc,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-
#endif
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 2f2c87119..2c81a37c2 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -349,6 +349,11 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Handle TX completions */
bnxt_handle_tx_cp(txq);

+ /* Tx queue was stopped; wait for it to be restarted */
+ if (txq->tx_deferred_start) {
+ PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
+ return 0;
+ }
/* Handle TX burst request */
for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
if (bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq)) {
@@ -364,3 +369,30 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,

return nb_tx_pkts;
}
+
+int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ txq->tx_deferred_start = false;
+ PMD_DRV_LOG(DEBUG, "Tx queue started\n");
+
+ return 0;
+}
+
+int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+
+ /* Handle TX completions */
+ bnxt_handle_tx_cp(txq);
+
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ txq->tx_deferred_start = true;
+ PMD_DRV_LOG(DEBUG, "Tx queue stopped\n");
+
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index 2feac51db..d88b15ab8 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -68,6 +68,8 @@ int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);

#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
--
2.14.3 (Apple Git-98)
Ferruh Yigit
2018-01-22 12:25:15 UTC
Permalink
Post by Ajit Khaparde
Currently this is implemented entirely in the PMD as there is no explicit
support in the HW. Re-program the RSS Table without this queue on stop
and add it back to the table on start.
<...>
Post by Ajit Khaparde
@@ -1,4 +1,4 @@
-/*-
+/*
* BSD LICENSE
*
* Copyright(c) Broadcom Limited.
Unrelated but since I saw this, do you plan switching to the SPDX tags?

<...>
Post by Ajit Khaparde
@@ -321,8 +334,7 @@ static int bnxt_init_chip(struct bnxt *bp)
!RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
intr_vector = bp->eth_dev->data->nb_rx_queues;
- PMD_DRV_LOG(INFO, "%s(): intr_vector = %d\n", __func__,
- intr_vector);
+ PMD_DRV_LOG(INFO, "intr_vector = %d\n", intr_vector);
With new logging macro "__func__" is duplicate, why not fix them all in the
patch 2/5 that introduces new macro?

<...>
Post by Ajit Khaparde
@@ -2969,6 +2981,49 @@ bnxt_set_eeprom_op(struct rte_eth_dev *dev,
return 0;
}
+int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
+ struct bnxt_vnic_info *vnic = NULL;
+
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ rxq->rx_deferred_start = false;
+ PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ vnic = rxq->vnic;
+ if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
+ return 0;
+ PMD_DRV_LOG(DEBUG, "vnic = %p fw_grp_id = %d\n",
+ vnic, bp->grp_info[rx_queue_id + 1].fw_grp_id);
+ vnic->fw_grp_ids[rx_queue_id] =
+ bp->grp_info[rx_queue_id + 1].fw_grp_id;
+ return bnxt_vnic_rss_configure(bp, vnic);
+ }
+
+ return 0;
+}
+
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
+ struct bnxt_vnic_info *vnic = NULL;
+
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rxq->rx_deferred_start = true;
+ PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
+
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ vnic = rxq->vnic;
+ vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
+ return bnxt_vnic_rss_configure(bp, vnic);
+ }
+ return 0;
+}
There is already a source file "bnxt_rxq.c", which seems for Rxq related
functions, why not add new functions there?

<...>
Post by Ajit Khaparde
@@ -364,3 +369,30 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx_pkts;
}
+
+int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ txq->tx_deferred_start = false;
+ PMD_DRV_LOG(DEBUG, "Tx queue started\n");
+
+ return 0;
+}
+
+int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+
+ /* Handle TX completions */
+ bnxt_handle_tx_cp(txq);
+
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ txq->tx_deferred_start = true;
+ PMD_DRV_LOG(DEBUG, "Tx queue stopped\n");
+
+ return 0;
+}
Similar question for these functions, they seem implemented in txr (Tx Ring ?)
source file, bnxt_txq.c seems better fit, what do you think?

<...>
Loading...