summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-09-11 14:31:54 -0700
committerDavid S. Miller <davem@davemloft.net>2020-09-11 14:31:54 -0700
commit5bc461fd565a487c53bac230af090c104535f43f (patch)
tree6d0e593a6395c2067631528fc0df729787584695
parente7a08121e0f4954101605b30c2cf725ccdef7ec7 (diff)
parent7e074d5a76caffac4da8acdd65e0d1144a347e45 (diff)
Merge branch 'Add-GVE-Features'
David Awogbemila says: ==================== Add GVE Features. Note: Patch 4 in v3 was dropped. Patch 4 (patch 5 in v3): Start/stop timer only when report stats is enabled/disabled. Patch 7 (patch 8 in v3): Use netdev_info, not dev_info, to log device link status. ==================== Acked-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/google/gve/gve.h106
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c315
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h62
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c365
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c301
-rw-r--r--drivers/net/ethernet/google/gve/gve_register.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c37
7 files changed, 1023 insertions, 164 deletions
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index ebc37e256922..f5c80229ea96 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -27,6 +27,17 @@
/* 1 for management, 1 for rx, 1 for tx */
#define GVE_MIN_MSIX 3
+/* Numbers of gve tx/rx stats in stats report. */
+#define GVE_TX_STATS_REPORT_NUM 5
+#define GVE_RX_STATS_REPORT_NUM 2
+
+/* Interval to schedule a stats report update, 20000ms. */
+#define GVE_STATS_REPORT_TIMER_PERIOD 20000
+
+/* Numbers of NIC tx/rx stats in stats report. */
+#define NIC_TX_STATS_REPORT_NUM 0
+#define NIC_RX_STATS_REPORT_NUM 4
+
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */
@@ -71,6 +82,11 @@ struct gve_rx_ring {
u32 cnt; /* free-running total number of completed packets */
u32 fill_cnt; /* free-running total number of descs and buffs posted */
u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
+ u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
+ u64 rx_copied_pkt; /* free-running total number of copied packets */
+ u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
+ u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
+ u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
u32 q_num; /* queue index */
u32 ntfy_id; /* notification block index */
struct gve_queue_resources *q_resources; /* head and tail pointer idx */
@@ -202,24 +218,63 @@ struct gve_priv {
dma_addr_t adminq_bus_addr;
u32 adminq_mask; /* masks prod_cnt to adminq size */
u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
-
+ u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
+ u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
+ /* free-running count of per AQ cmd executed */
+ u32 adminq_describe_device_cnt;
+ u32 adminq_cfg_device_resources_cnt;
+ u32 adminq_register_page_list_cnt;
+ u32 adminq_unregister_page_list_cnt;
+ u32 adminq_create_tx_queue_cnt;
+ u32 adminq_create_rx_queue_cnt;
+ u32 adminq_destroy_tx_queue_cnt;
+ u32 adminq_destroy_rx_queue_cnt;
+ u32 adminq_dcfg_device_resources_cnt;
+ u32 adminq_set_driver_parameter_cnt;
+ u32 adminq_report_stats_cnt;
+ u32 adminq_report_link_speed_cnt;
+
+ /* Global stats */
+ u32 interface_up_cnt; /* count of times interface turned up since last reset */
+ u32 interface_down_cnt; /* count of times interface turned down since last reset */
+ u32 reset_cnt; /* count of reset */
+ u32 page_alloc_fail; /* count of page alloc fails */
+ u32 dma_mapping_error; /* count of dma mapping errors */
+ u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
struct workqueue_struct *gve_wq;
struct work_struct service_task;
+ struct work_struct stats_report_task;
unsigned long service_task_flags;
unsigned long state_flags;
+
+ struct gve_stats_report *stats_report;
+ u64 stats_report_len;
+ dma_addr_t stats_report_bus; /* dma address for the stats report */
+ unsigned long ethtool_flags;
+
+ unsigned long stats_report_timer_period;
+ struct timer_list stats_report_timer;
+
+ /* Gvnic device link speed from hypervisor. */
+ u64 link_speed;
};
-enum gve_service_task_flags {
- GVE_PRIV_FLAGS_DO_RESET = BIT(1),
- GVE_PRIV_FLAGS_RESET_IN_PROGRESS = BIT(2),
- GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = BIT(3),
+enum gve_service_task_flags_bit {
+ GVE_PRIV_FLAGS_DO_RESET = 1,
+ GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2,
+ GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3,
+ GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
};
-enum gve_state_flags {
- GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = BIT(1),
- GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = BIT(2),
- GVE_PRIV_FLAGS_DEVICE_RINGS_OK = BIT(3),
- GVE_PRIV_FLAGS_NAPI_ENABLED = BIT(4),
+enum gve_state_flags_bit {
+ GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1,
+ GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2,
+ GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3,
+ GVE_PRIV_FLAGS_NAPI_ENABLED = 4,
+};
+
+enum gve_ethtool_flags_bit {
+ GVE_PRIV_FLAGS_REPORT_STATS = 0,
};
static inline bool gve_get_do_reset(struct gve_priv *priv)
@@ -269,6 +324,22 @@ static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
}
+static inline bool gve_get_do_report_stats(struct gve_priv *priv)
+{
+ return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
+ &priv->service_task_flags);
+}
+
+static inline void gve_set_do_report_stats(struct gve_priv *priv)
+{
+ set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
+}
+
+static inline void gve_clear_do_report_stats(struct gve_priv *priv)
+{
+ clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
+}
+
static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
{
return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
@@ -329,6 +400,16 @@ static inline void gve_clear_napi_enabled(struct gve_priv *priv)
clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
}
+static inline bool gve_get_report_stats(struct gve_priv *priv)
+{
+ return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
+}
+
+static inline void gve_clear_report_stats(struct gve_priv *priv)
+{
+ clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
+}
+
/* Returns the address of the ntfy_blocks irq doorbell
*/
static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
@@ -426,7 +507,8 @@ static inline bool gve_can_recycle_pages(struct net_device *dev)
}
/* buffers */
-int gve_alloc_page(struct device *dev, struct page **page, dma_addr_t *dma,
+int gve_alloc_page(struct gve_priv *priv, struct device *dev,
+ struct page **page, dma_addr_t *dma,
enum dma_data_direction);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
@@ -450,6 +532,8 @@ int gve_reset(struct gve_priv *priv, bool attempt_teardown);
int gve_adjust_queues(struct gve_priv *priv,
struct gve_queue_config new_rx_config,
struct gve_queue_config new_tx_config);
+/* report stats handling */
+void gve_handle_report_stats(struct gve_priv *priv);
/* exported by ethtool.c */
extern const struct ethtool_ops gve_ethtool_ops;
/* needed by ethtool */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index c3ba7baf0107..24ae6a28a806 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -23,6 +23,20 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
priv->adminq_mask = (PAGE_SIZE / sizeof(union gve_adminq_command)) - 1;
priv->adminq_prod_cnt = 0;
+ priv->adminq_cmd_fail = 0;
+ priv->adminq_timeouts = 0;
+ priv->adminq_describe_device_cnt = 0;
+ priv->adminq_cfg_device_resources_cnt = 0;
+ priv->adminq_register_page_list_cnt = 0;
+ priv->adminq_unregister_page_list_cnt = 0;
+ priv->adminq_create_tx_queue_cnt = 0;
+ priv->adminq_create_rx_queue_cnt = 0;
+ priv->adminq_destroy_tx_queue_cnt = 0;
+ priv->adminq_destroy_rx_queue_cnt = 0;
+ priv->adminq_dcfg_device_resources_cnt = 0;
+ priv->adminq_set_driver_parameter_cnt = 0;
+ priv->adminq_report_stats_cnt = 0;
+ priv->adminq_report_link_speed_cnt = 0;
/* Setup Admin queue with the device */
iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
@@ -81,17 +95,18 @@ static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)
return false;
}
-static int gve_adminq_parse_err(struct device *dev, u32 status)
+static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
{
if (status != GVE_ADMINQ_COMMAND_PASSED &&
- status != GVE_ADMINQ_COMMAND_UNSET)
- dev_err(dev, "AQ command failed with status %d\n", status);
-
+ status != GVE_ADMINQ_COMMAND_UNSET) {
+ dev_err(&priv->pdev->dev, "AQ command failed with status %d\n", status);
+ priv->adminq_cmd_fail++;
+ }
switch (status) {
case GVE_ADMINQ_COMMAND_PASSED:
return 0;
case GVE_ADMINQ_COMMAND_UNSET:
- dev_err(dev, "parse_aq_err: err and status both unset, this should not be possible.\n");
+ dev_err(&priv->pdev->dev, "parse_aq_err: err and status both unset, this should not be possible.\n");
return -EINVAL;
case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
@@ -116,36 +131,145 @@ static int gve_adminq_parse_err(struct device *dev, u32 status)
case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
return -ENOTSUPP;
default:
- dev_err(dev, "parse_aq_err: unknown status code %d\n", status);
+ dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status);
return -EINVAL;
}
}
+/* Flushes all AQ commands currently queued and waits for them to complete.
+ * If there are failures, it will return the first error.
+ */
+static int gve_adminq_kick_and_wait(struct gve_priv *priv)
+{
+ u32 tail, head;
+ int i;
+
+ tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
+ head = priv->adminq_prod_cnt;
+
+ gve_adminq_kick_cmd(priv, head);
+ if (!gve_adminq_wait_for_cmd(priv, head)) {
+ dev_err(&priv->pdev->dev, "AQ commands timed out, need to reset AQ\n");
+ priv->adminq_timeouts++;
+ return -ENOTRECOVERABLE;
+ }
+
+ for (i = tail; i < head; i++) {
+ union gve_adminq_command *cmd;
+ u32 status, err;
+
+ cmd = &priv->adminq[i & priv->adminq_mask];
+ status = be32_to_cpu(READ_ONCE(cmd->status));
+ err = gve_adminq_parse_err(priv, status);
+ if (err)
+ // Return the first error if we failed.
+ return err;
+ }
+
+ return 0;
+}
+
/* This function is not threadsafe - the caller is responsible for any
* necessary locks.
*/
-int gve_adminq_execute_cmd(struct gve_priv *priv,
- union gve_adminq_command *cmd_orig)
+static int gve_adminq_issue_cmd(struct gve_priv *priv,
+ union gve_adminq_command *cmd_orig)
{
union gve_adminq_command *cmd;
- u32 status = 0;
- u32 prod_cnt;
+ u32 opcode;
+ u32 tail;
+
+ tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
+
+ // Check if next command will overflow the buffer.
+ if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) {
+ int err;
+
+ // Flush existing commands to make room.
+ err = gve_adminq_kick_and_wait(priv);
+ if (err)
+ return err;
+
+ // Retry.
+ tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
+ if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) {
+ // This should never happen. We just flushed the
+ // command queue so there should be enough space.
+ return -ENOMEM;
+ }
+ }
cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
priv->adminq_prod_cnt++;
- prod_cnt = priv->adminq_prod_cnt;
memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
-
- gve_adminq_kick_cmd(priv, prod_cnt);
- if (!gve_adminq_wait_for_cmd(priv, prod_cnt)) {
- dev_err(&priv->pdev->dev, "AQ command timed out, need to reset AQ\n");
- return -ENOTRECOVERABLE;
+ opcode = be32_to_cpu(READ_ONCE(cmd->opcode));
+
+ switch (opcode) {
+ case GVE_ADMINQ_DESCRIBE_DEVICE:
+ priv->adminq_describe_device_cnt++;
+ break;
+ case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:
+ priv->adminq_cfg_device_resources_cnt++;
+ break;
+ case GVE_ADMINQ_REGISTER_PAGE_LIST:
+ priv->adminq_register_page_list_cnt++;
+ break;
+ case GVE_ADMINQ_UNREGISTER_PAGE_LIST:
+ priv->adminq_unregister_page_list_cnt++;
+ break;
+ case GVE_ADMINQ_CREATE_TX_QUEUE:
+ priv->adminq_create_tx_queue_cnt++;
+ break;
+ case GVE_ADMINQ_CREATE_RX_QUEUE:
+ priv->adminq_create_rx_queue_cnt++;
+ break;
+ case GVE_ADMINQ_DESTROY_TX_QUEUE:
+ priv->adminq_destroy_tx_queue_cnt++;
+ break;
+ case GVE_ADMINQ_DESTROY_RX_QUEUE:
+ priv->adminq_destroy_rx_queue_cnt++;
+ break;
+ case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:
+ priv->adminq_dcfg_device_resources_cnt++;
+ break;
+ case GVE_ADMINQ_SET_DRIVER_PARAMETER:
+ priv->adminq_set_driver_parameter_cnt++;
+ break;
+ case GVE_ADMINQ_REPORT_STATS:
+ priv->adminq_report_stats_cnt++;
+ break;
+ case GVE_ADMINQ_REPORT_LINK_SPEED:
+ priv->adminq_report_link_speed_cnt++;
+ break;
+ default:
+ dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
}
- memcpy(cmd_orig, cmd, sizeof(*cmd));
- status = be32_to_cpu(READ_ONCE(cmd->status));
- return gve_adminq_parse_err(&priv->pdev->dev, status);
+ return 0;
+}
+
+/* This function is not threadsafe - the caller is responsible for any
+ * necessary locks.
+ * The caller is also responsible for making sure there are no commands
+ * waiting to be executed.
+ */
+static int gve_adminq_execute_cmd(struct gve_priv *priv, union gve_adminq_command *cmd_orig)
+{
+ u32 tail, head;
+ int err;
+
+ tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
+ head = priv->adminq_prod_cnt;
+ if (tail != head)
+ // This is not a valid path
+ return -EINVAL;
+
+ err = gve_adminq_issue_cmd(priv, cmd_orig);
+ if (err)
+ return err;
+
+ return gve_adminq_kick_and_wait(priv);
}
/* The device specifies that the management vector can either be the first irq
@@ -190,29 +314,50 @@ int gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
return gve_adminq_execute_cmd(priv, &cmd);
}
-int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
+static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
{
struct gve_tx_ring *tx = &priv->tx[queue_index];
union gve_adminq_command cmd;
+ int err;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
.queue_id = cpu_to_be32(queue_index),
.reserved = 0,
- .queue_resources_addr = cpu_to_be64(tx->q_resources_bus),
+ .queue_resources_addr =
+ cpu_to_be64(tx->q_resources_bus),
.tx_ring_addr = cpu_to_be64(tx->bus),
.queue_page_list_id = cpu_to_be32(tx->tx_fifo.qpl->id),
.ntfy_id = cpu_to_be32(tx->ntfy_id),
};
- return gve_adminq_execute_cmd(priv, &cmd);
+ err = gve_adminq_issue_cmd(priv, &cmd);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < num_queues; i++) {
+ err = gve_adminq_create_tx_queue(priv, i);
+ if (err)
+ return err;
+ }
+
+ return gve_adminq_kick_and_wait(priv);
}
-int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
+static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
{
struct gve_rx_ring *rx = &priv->rx[queue_index];
union gve_adminq_command cmd;
+ int err;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
@@ -227,12 +372,31 @@ int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
.queue_page_list_id = cpu_to_be32(rx->data.qpl->id),
};
- return gve_adminq_execute_cmd(priv, &cmd);
+ err = gve_adminq_issue_cmd(priv, &cmd);
+ if (err)
+ return err;
+
+ return 0;
}
-int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
+int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < num_queues; i++) {
+ err = gve_adminq_create_rx_queue(priv, i);
+ if (err)
+ return err;
+ }
+
+ return gve_adminq_kick_and_wait(priv);
+}
+
+static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
{
union gve_adminq_command cmd;
+ int err;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
@@ -240,12 +404,31 @@ int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
.queue_id = cpu_to_be32(queue_index),
};
- return gve_adminq_execute_cmd(priv, &cmd);
+ err = gve_adminq_issue_cmd(priv, &cmd);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 num_queues)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < num_queues; i++) {
+ err = gve_adminq_destroy_tx_queue(priv, i);
+ if (err)
+ return err;
+ }
+
+ return gve_adminq_kick_and_wait(priv);
}
-int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
+static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
{
union gve_adminq_command cmd;
+ int err;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
@@ -253,7 +436,25 @@ int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
.queue_id = cpu_to_be32(queue_index),
};
- return gve_adminq_execute_cmd(priv, &cmd);
+ err = gve_adminq_issue_cmd(priv, &cmd);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < num_queues; i++) {
+ err = gve_adminq_destroy_rx_queue(priv, i);
+ if (err)
+ return err;
+ }
+
+ return gve_adminq_kick_and_wait(priv);
}
int gve_adminq_describe_device(struct gve_priv *priv)
@@ -283,8 +484,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
- netif_err(priv, drv, priv->dev, "Tx desc count %d too low\n",
- priv->tx_desc_cnt);
+ dev_err(&priv->pdev->dev, "Tx desc count %d too low\n", priv->tx_desc_cnt);
err = -EINVAL;
goto free_device_descriptor;
}
@@ -293,8 +493,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
< PAGE_SIZE ||
priv->rx_desc_cnt * sizeof(priv->rx->data.data_ring[0])
< PAGE_SIZE) {
- netif_err(priv, drv, priv->dev, "Rx desc count %d too low\n",
- priv->rx_desc_cnt);
+ dev_err(&priv->pdev->dev, "Rx desc count %d too low\n", priv->rx_desc_cnt);
err = -EINVAL;
goto free_device_descriptor;
}
@@ -302,8 +501,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
be64_to_cpu(descriptor->max_registered_pages);
mtu = be16_to_cpu(descriptor->mtu);
if (mtu < ETH_MIN_MTU) {
- netif_err(priv, drv, priv->dev, "MTU %d below minimum MTU\n",
- mtu);
+ dev_err(&priv->pdev->dev, "MTU %d below minimum MTU\n", mtu);
err = -EINVAL;
goto free_device_descriptor;
}
@@ -311,12 +509,12 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->num_event_counters = be16_to_cpu(descriptor->counters);
ether_addr_copy(priv->dev->dev_addr, descriptor->mac);
mac = descriptor->mac;
- netif_info(priv, drv, priv->dev, "MAC addr: %pM\n", mac);
+ dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
priv->rx_pages_per_qpl = be16_to_cpu(descriptor->rx_pages_per_qpl);
if (priv->rx_pages_per_qpl < priv->rx_desc_cnt) {
- netif_err(priv, drv, priv->dev, "rx_pages_per_qpl cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
- priv->rx_pages_per_qpl);
+ dev_err(&priv->pdev->dev, "rx_pages_per_qpl cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
+ priv->rx_pages_per_qpl);
priv->rx_desc_cnt = priv->rx_pages_per_qpl;
}
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
@@ -385,3 +583,46 @@ int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
return gve_adminq_execute_cmd(priv, &cmd);
}
+
+int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
+ dma_addr_t stats_report_addr, u64 interval)
+{
+ union gve_adminq_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_STATS);
+ cmd.report_stats = (struct gve_adminq_report_stats) {
+ .stats_report_len = cpu_to_be64(stats_report_len),
+ .stats_report_addr = cpu_to_be64(stats_report_addr),
+ .interval = cpu_to_be64(interval),
+ };
+
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
+int gve_adminq_report_link_speed(struct gve_priv *priv)
+{
+ union gve_adminq_command gvnic_cmd;
+ dma_addr_t link_speed_region_bus;
+ __be64 *link_speed_region;
+ int err;
+
+ link_speed_region =
+ dma_alloc_coherent(&priv->pdev->dev, sizeof(*link_speed_region),
+ &link_speed_region_bus, GFP_KERNEL);
+
+ if (!link_speed_region)
+ return -ENOMEM;
+
+ memset(&gvnic_cmd, 0, sizeof(gvnic_cmd));
+ gvnic_cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_LINK_SPEED);
+ gvnic_cmd.report_link_speed.link_speed_address =
+ cpu_to_be64(link_speed_region_bus);
+
+ err = gve_adminq_execute_cmd(priv, &gvnic_cmd);
+
+ priv->link_speed = be64_to_cpu(*link_speed_region);
+ dma_free_coherent(&priv->pdev->dev, sizeof(*link_speed_region), link_speed_region,
+ link_speed_region_bus);
+ return err;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 4dfa06edc0f8..281de8326bc5 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -21,6 +21,8 @@ enum gve_adminq_opcodes {
GVE_ADMINQ_DESTROY_RX_QUEUE = 0x8,
GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES = 0x9,
GVE_ADMINQ_SET_DRIVER_PARAMETER = 0xB,
+ GVE_ADMINQ_REPORT_STATS = 0xC,
+ GVE_ADMINQ_REPORT_LINK_SPEED = 0xD
};
/* Admin queue status codes */
@@ -172,6 +174,51 @@ struct gve_adminq_set_driver_parameter {
static_assert(sizeof(struct gve_adminq_set_driver_parameter) == 16);
+struct gve_adminq_report_stats {
+ __be64 stats_report_len;
+ __be64 stats_report_addr;
+ __be64 interval;
+};
+
+static_assert(sizeof(struct gve_adminq_report_stats) == 24);
+
+struct gve_adminq_report_link_speed {
+ __be64 link_speed_address;
+};
+
+static_assert(sizeof(struct gve_adminq_report_link_speed) == 8);
+
+struct stats {
+ __be32 stat_name;
+ __be32 queue_id;
+ __be64 value;
+};
+
+static_assert(sizeof(struct stats) == 16);
+
+struct gve_stats_report {
+ __be64 written_count;
+ struct stats stats[0];
+};
+
+static_assert(sizeof(struct gve_stats_report) == 8);
+
+enum gve_stat_names {
+ // stats from gve
+ TX_WAKE_CNT = 1,
+ TX_STOP_CNT = 2,
+ TX_FRAMES_SENT = 3,
+ TX_BYTES_SENT = 4,
+ TX_LAST_COMPLETION_PROCESSED = 5,
+ RX_NEXT_EXPECTED_SEQUENCE = 6,
+ RX_BUFFERS_POSTED = 7,
+ // stats from NIC
+ RX_QUEUE_DROP_CNT = 65,
+ RX_NO_BUFFERS_POSTED = 66,
+ RX_DROPS_PACKET_OVER_MRU = 67,
+ RX_DROPS_INVALID_CHECKSUM = 68,
+};
+
union gve_adminq_command {
struct {
__be32 opcode;
@@ -187,6 +234,8 @@ union gve_adminq_command {
struct gve_adminq_register_page_list reg_page_list;
struct gve_adminq_unregister_page_list unreg_page_list;
struct gve_adminq_set_driver_parameter set_driver_param;
+ struct gve_adminq_report_stats report_stats;
+ struct gve_adminq_report_link_speed report_link_speed;
};
};
u8 reserved[64];
@@ -197,8 +246,6 @@ static_assert(sizeof(union gve_adminq_command) == 64);
int gve_adminq_alloc(struct device *dev, struct gve_priv *priv);
void gve_adminq_free(struct device *dev, struct gve_priv *priv);
void gve_adminq_release(struct gve_priv *priv);
-int gve_adminq_execute_cmd(struct gve_priv *priv,
- union gve_adminq_command *cmd_orig);
int gve_adminq_describe_device(struct gve_priv *priv);
int gve_adminq_configure_device_resources(struct gve_priv *priv,
dma_addr_t counter_array_bus_addr,
@@ -206,12 +253,15 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
dma_addr_t db_array_bus_addr,
u32 num_ntfy_blks);
int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
-int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_id);
-int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_id);
-int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_id);
-int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_id);
+int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues);
+int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 queue_id);
+int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
+int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_register_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl);
int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
+int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
+ dma_addr_t stats_report_addr, u64 interval);
+int gve_adminq_report_link_speed(struct gve_priv *priv);
#endif /* _GVE_ADMINQ_H */
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index d8fa816f4473..7b44769bd87c 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -6,6 +6,7 @@
#include <linux/rtnetlink.h>
#include "gve.h"
+#include "gve_adminq.h"
static void gve_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@@ -34,41 +35,84 @@ static u32 gve_get_msglevel(struct net_device *netdev)
static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes",
"rx_dropped", "tx_dropped", "tx_timeouts",
+ "rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
+ "interface_up_cnt", "interface_down_cnt", "reset_cnt",
+ "page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
+};
+
+static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
+ "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_bytes[%u]",
+ "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
+ "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
+ "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
+};
+
+static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
+ "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_bytes[%u]",
+ "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
+};
+
+static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
+ "adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
+ "adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
+ "adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
+ "adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
+ "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
+ "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
+ "adminq_report_stats_cnt", "adminq_report_link_speed_cnt"
+};
+
+static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
+ "report-stats",
};
#define GVE_MAIN_STATS_LEN ARRAY_SIZE(gve_gstrings_main_stats)
-#define NUM_GVE_TX_CNTS 5
-#define NUM_GVE_RX_CNTS 2
+#define GVE_ADMINQ_STATS_LEN ARRAY_SIZE(gve_gstrings_adminq_stats)
+#define NUM_GVE_TX_CNTS ARRAY_SIZE(gve_gstrings_tx_stats)
+#define NUM_GVE_RX_CNTS ARRAY_SIZE(gve_gstrings_rx_stats)
+#define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags)
static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct gve_priv *priv = netdev_priv(netdev);
char *s = (char *)data;
- int i;
+ int i, j;
- if (stringset != ETH_SS_STATS)
- return;
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(s, *gve_gstrings_main_stats,
+ sizeof(gve_gstrings_main_stats));
+ s += sizeof(gve_gstrings_main_stats);
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ for (j = 0; j < NUM_GVE_RX_CNTS; j++) {
+ snprintf(s, ETH_GSTRING_LEN,
+ gve_gstrings_rx_stats[j], i);
+ s += ETH_GSTRING_LEN;
+ }
+ }
- memcpy(s, *gve_gstrings_main_stats,
- sizeof(gve_gstrings_main_stats));
- s += sizeof(gve_gstrings_main_stats);
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- snprintf(s, ETH_GSTRING_LEN, "rx_desc_cnt[%u]", i);
- s += ETH_GSTRING_LEN;
- snprintf(s, ETH_GSTRING_LEN, "rx_desc_fill_cnt[%u]", i);
- s += ETH_GSTRING_LEN;
- }
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
- snprintf(s, ETH_GSTRING_LEN, "tx_req[%u]", i);
- s += ETH_GSTRING_LEN;
- snprintf(s, ETH_GSTRING_LEN, "tx_done[%u]", i);
- s += ETH_GSTRING_LEN;
- snprintf(s, ETH_GSTRING_LEN, "tx_wake[%u]", i);
- s += ETH_GSTRING_LEN;
- snprintf(s, ETH_GSTRING_LEN, "tx_stop[%u]", i);
- s += ETH_GSTRING_LEN;
- snprintf(s, ETH_GSTRING_LEN, "tx_event_counter[%u]", i);
- s += ETH_GSTRING_LEN;
+ for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ for (j = 0; j < NUM_GVE_TX_CNTS; j++) {
+ snprintf(s, ETH_GSTRING_LEN,
+ gve_gstrings_tx_stats[j], i);
+ s += ETH_GSTRING_LEN;
+ }
+ }
+
+ memcpy(s, *gve_gstrings_adminq_stats,
+ sizeof(gve_gstrings_adminq_stats));
+ s += sizeof(gve_gstrings_adminq_stats);
+ break;
+
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(s, *gve_gstrings_priv_flags,
+ sizeof(gve_gstrings_priv_flags));
+ s += sizeof(gve_gstrings_priv_flags);
+ break;
+
+ default:
+ break;
}
}
@@ -78,9 +122,11 @@ static int gve_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_STATS:
- return GVE_MAIN_STATS_LEN +
+ return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
(priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
(priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS);
+ case ETH_SS_PRIV_FLAGS:
+ return GVE_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@@ -90,24 +136,56 @@ static void
gve_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
- struct gve_priv *priv = netdev_priv(netdev);
- u64 rx_pkts, rx_bytes, tx_pkts, tx_bytes;
+ u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
+ tmp_rx_desc_err_dropped_pkt, tmp_tx_pkts, tmp_tx_bytes;
+ u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts,
+ rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes;
+ int stats_idx, base_stats_idx, max_stats_idx;
+ struct stats *report_stats;
+ int *rx_qid_to_stats_idx;
+ int *tx_qid_to_stats_idx;
+ struct gve_priv *priv;
+ bool skip_nic_stats;
unsigned int start;
int ring;
- int i;
+ int i, j;
ASSERT_RTNL();
- for (rx_pkts = 0, rx_bytes = 0, ring = 0;
+ priv = netdev_priv(netdev);
+ report_stats = priv->stats_report->stats;
+ rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues,
+ sizeof(int), GFP_KERNEL);
+ if (!rx_qid_to_stats_idx)
+ return;
+ tx_qid_to_stats_idx = kmalloc_array(priv->tx_cfg.num_queues,
+ sizeof(int), GFP_KERNEL);
+ if (!tx_qid_to_stats_idx) {
+ kfree(rx_qid_to_stats_idx);
+ return;
+ }
+ for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0,
+ rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0;
ring < priv->rx_cfg.num_queues; ring++) {
if (priv->rx) {
do {
+ struct gve_rx_ring *rx = &priv->rx[ring];
+
start =
u64_stats_fetch_begin(&priv->rx[ring].statss);
- rx_pkts += priv->rx[ring].rpackets;
- rx_bytes += priv->rx[ring].rbytes;
+ tmp_rx_pkts = rx->rpackets;
+ tmp_rx_bytes = rx->rbytes;
+ tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
+ tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
+ tmp_rx_desc_err_dropped_pkt =
+ rx->rx_desc_err_dropped_pkt;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
+ rx_pkts += tmp_rx_pkts;
+ rx_bytes += tmp_rx_bytes;
+ rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
+ rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
+ rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
}
}
for (tx_pkts = 0, tx_bytes = 0, ring = 0;
@@ -116,10 +194,12 @@ gve_get_ethtool_stats(struct net_device *netdev,
do {
start =
u64_stats_fetch_begin(&priv->tx[ring].statss);
- tx_pkts += priv->tx[ring].pkt_done;
- tx_bytes += priv->tx[ring].bytes_done;
+ tmp_tx_pkts = priv->tx[ring].pkt_done;
+ tmp_tx_bytes = priv->tx[ring].bytes_done;
} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
start));
+ tx_pkts += tmp_tx_pkts;
+ tx_bytes += tmp_tx_bytes;
}
}
@@ -128,22 +208,102 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = tx_pkts;
data[i++] = rx_bytes;
data[i++] = tx_bytes;
- /* Skip rx_dropped and tx_dropped */
- i += 2;
+ /* total rx dropped packets */
+ data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail +
+ rx_desc_err_dropped_pkt;
+ /* Skip tx_dropped */
+ i++;
+
data[i++] = priv->tx_timeo_cnt;
+ data[i++] = rx_skb_alloc_fail;
+ data[i++] = rx_buf_alloc_fail;
+ data[i++] = rx_desc_err_dropped_pkt;
+ data[i++] = priv->interface_up_cnt;
+ data[i++] = priv->interface_down_cnt;
+ data[i++] = priv->reset_cnt;
+ data[i++] = priv->page_alloc_fail;
+ data[i++] = priv->dma_mapping_error;
+ data[i++] = priv->stats_report_trigger_cnt;
i = GVE_MAIN_STATS_LEN;
+ /* For rx cross-reporting stats, start from nic rx stats in report */
+ base_stats_idx = GVE_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
+ GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
+ max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
+ base_stats_idx;
+ /* Preprocess the stats report for rx, map queue id to start index */
+ skip_nic_stats = false;
+ for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
+ stats_idx += NIC_RX_STATS_REPORT_NUM) {
+ u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
+ u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
+
+ if (stat_name == 0) {
+ /* no stats written by NIC yet */
+ skip_nic_stats = true;
+ break;
+ }
+ rx_qid_to_stats_idx[queue_id] = stats_idx;
+ }
/* walk RX rings */
if (priv->rx) {
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
struct gve_rx_ring *rx = &priv->rx[ring];
- data[i++] = rx->cnt;
data[i++] = rx->fill_cnt;
+ data[i++] = rx->cnt;
+ do {
+ start =
+ u64_stats_fetch_begin(&priv->rx[ring].statss);
+ tmp_rx_bytes = rx->rbytes;
+ tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
+ tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
+ tmp_rx_desc_err_dropped_pkt =
+ rx->rx_desc_err_dropped_pkt;
+ } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ start));
+ data[i++] = tmp_rx_bytes;
+ /* rx dropped packets */
+ data[i++] = tmp_rx_skb_alloc_fail +
+ tmp_rx_buf_alloc_fail +
+ tmp_rx_desc_err_dropped_pkt;
+ data[i++] = rx->rx_copybreak_pkt;
+ data[i++] = rx->rx_copied_pkt;
+ /* stats from NIC */
+ if (skip_nic_stats) {
+ /* skip NIC rx stats */
+ i += NIC_RX_STATS_REPORT_NUM;
+ continue;
+ }
+ for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
+ u64 value =
+ be64_to_cpu(report_stats[rx_qid_to_stats_idx[ring] + j].value);
+
+ data[i++] = value;
+ }
}
} else {
i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
}
+
+ /* For tx cross-reporting stats, start from nic tx stats in report */
+ base_stats_idx = max_stats_idx;
+ max_stats_idx = NIC_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
+ max_stats_idx;
+ /* Preprocess the stats report for tx, map queue id to start index */
+ skip_nic_stats = false;
+ for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
+ stats_idx += NIC_TX_STATS_REPORT_NUM) {
+ u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
+ u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
+
+ if (stat_name == 0) {
+ /* no stats written by NIC yet */
+ skip_nic_stats = true;
+ break;
+ }
+ tx_qid_to_stats_idx[queue_id] = stats_idx;
+ }
/* walk TX rings */
if (priv->tx) {
for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
@@ -151,14 +311,51 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = tx->req;
data[i++] = tx->done;
+ do {
+ start =
+ u64_stats_fetch_begin(&priv->tx[ring].statss);
+ tmp_tx_bytes = tx->bytes_done;
+ } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ start));
+ data[i++] = tmp_tx_bytes;
data[i++] = tx->wake_queue;
data[i++] = tx->stop_queue;
data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv,
tx));
+ /* stats from NIC */
+ if (skip_nic_stats) {
+ /* skip NIC tx stats */
+ i += NIC_TX_STATS_REPORT_NUM;
+ continue;
+ }
+ for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
+ u64 value =
+ be64_to_cpu(report_stats[tx_qid_to_stats_idx[ring] + j].value);
+ data[i++] = value;
+ }
}
} else {
i += priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS;
}
+
+ kfree(rx_qid_to_stats_idx);
+ kfree(tx_qid_to_stats_idx);
+ /* AQ Stats */
+ data[i++] = priv->adminq_prod_cnt;
+ data[i++] = priv->adminq_cmd_fail;
+ data[i++] = priv->adminq_timeouts;
+ data[i++] = priv->adminq_describe_device_cnt;
+ data[i++] = priv->adminq_cfg_device_resources_cnt;
+ data[i++] = priv->adminq_register_page_list_cnt;
+ data[i++] = priv->adminq_unregister_page_list_cnt;
+ data[i++] = priv->adminq_create_tx_queue_cnt;
+ data[i++] = priv->adminq_create_rx_queue_cnt;
+ data[i++] = priv->adminq_destroy_tx_queue_cnt;
+ data[i++] = priv->adminq_destroy_rx_queue_cnt;
+ data[i++] = priv->adminq_dcfg_device_resources_cnt;
+ data[i++] = priv->adminq_set_driver_parameter_cnt;
+ data[i++] = priv->adminq_report_stats_cnt;
+ data[i++] = priv->adminq_report_link_speed_cnt;
}
static void gve_get_channels(struct net_device *netdev,
@@ -230,6 +427,95 @@ static int gve_user_reset(struct net_device *netdev, u32 *flags)
return -EOPNOTSUPP;
}
+static int gve_get_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *etuna, void *value)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ switch (etuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)value = priv->rx_copybreak;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int gve_set_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *etuna,
+ const void *value)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ u32 len;
+
+ switch (etuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ len = *(u32 *)value;
+ if (len > PAGE_SIZE / 2)
+ return -EINVAL;
+ priv->rx_copybreak = len;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 gve_get_priv_flags(struct net_device *netdev)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ u32 ret_flags = 0;
+
+ /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */
+ if (priv->ethtool_flags & BIT(0))
+ ret_flags |= BIT(0);
+ return ret_flags;
+}
+
+static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ u64 ori_flags, new_flags;
+
+ ori_flags = READ_ONCE(priv->ethtool_flags);
+ new_flags = ori_flags;
+
+ /* Only one priv flag exists: report-stats (BIT(0))*/
+ if (flags & BIT(0))
+ new_flags |= BIT(0);
+ else
+ new_flags &= ~(BIT(0));
+ priv->ethtool_flags = new_flags;
+ /* start report-stats timer when user turns report stats on. */
+ if (flags & BIT(0)) {
+ mod_timer(&priv->stats_report_timer,
+ round_jiffies(jiffies +
+ msecs_to_jiffies(priv->stats_report_timer_period)));
+ }
+ /* Zero off gve stats when report-stats turned off and */
+ /* delete report stats timer. */
+ if (!(flags & BIT(0)) && (ori_flags & BIT(0))) {
+ int tx_stats_num = GVE_TX_STATS_REPORT_NUM *
+ priv->tx_cfg.num_queues;
+ int rx_stats_num = GVE_RX_STATS_REPORT_NUM *
+ priv->rx_cfg.num_queues;
+
+ memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) *
+ sizeof(struct stats));
+ del_timer_sync(&priv->stats_report_timer);
+ }
+ return 0;
+}
+
+static int gve_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ int err = gve_adminq_report_link_speed(priv);
+
+ cmd->base.speed = priv->link_speed;
+ return err;
+}
+
const struct ethtool_ops gve_ethtool_ops = {
.get_drvinfo = gve_get_drvinfo,
.get_strings = gve_get_strings,
@@ -242,4 +528,9 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_ringparam = gve_get_ringparam,
.reset = gve_user_reset,
+ .get_tunable = gve_get_tunable,
+ .set_tunable = gve_set_tunable,
+ .get_priv_flags = gve_get_priv_flags,
+ .set_priv_flags = gve_set_priv_flags,
+ .get_link_ksettings = gve_get_link_ksettings
};
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index e032563ceefd..48a433154ce0 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -78,6 +78,66 @@ static void gve_free_counter_array(struct gve_priv *priv)
priv->counter_array = NULL;
}
+/* NIC requests to report stats */
+static void gve_stats_report_task(struct work_struct *work)
+{
+ struct gve_priv *priv = container_of(work, struct gve_priv,
+ stats_report_task);
+ if (gve_get_do_report_stats(priv)) {
+ gve_handle_report_stats(priv);
+ gve_clear_do_report_stats(priv);
+ }
+}
+
+static void gve_stats_report_schedule(struct gve_priv *priv)
+{
+ if (!gve_get_probe_in_progress(priv) &&
+ !gve_get_reset_in_progress(priv)) {
+ gve_set_do_report_stats(priv);
+ queue_work(priv->gve_wq, &priv->stats_report_task);
+ }
+}
+
+static void gve_stats_report_timer(struct timer_list *t)
+{
+ struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
+
+ mod_timer(&priv->stats_report_timer,
+ round_jiffies(jiffies +
+ msecs_to_jiffies(priv->stats_report_timer_period)));
+ gve_stats_report_schedule(priv);
+}
+
+static int gve_alloc_stats_report(struct gve_priv *priv)
+{
+ int tx_stats_num, rx_stats_num;
+
+ tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
+ priv->tx_cfg.num_queues;
+ rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
+ priv->rx_cfg.num_queues;
+ priv->stats_report_len = sizeof(struct gve_stats_report) +
+ (tx_stats_num + rx_stats_num) *
+ sizeof(struct stats);
+ priv->stats_report =
+ dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
+ &priv->stats_report_bus, GFP_KERNEL);
+ if (!priv->stats_report)
+ return -ENOMEM;
+ /* Set up timer for the report-stats task */
+ timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
+ priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
+ return 0;
+}
+
+static void gve_free_stats_report(struct gve_priv *priv)
+{
+ del_timer_sync(&priv->stats_report_timer);
+ dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
+ priv->stats_report, priv->stats_report_bus);
+ priv->stats_report = NULL;
+}
+
static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
{
struct gve_priv *priv = arg;
@@ -270,6 +330,9 @@ static int gve_setup_device_resources(struct gve_priv *priv)
err = gve_alloc_notify_blocks(priv);
if (err)
goto abort_with_counter;
+ err = gve_alloc_stats_report(priv);
+ if (err)
+ goto abort_with_ntfy_blocks;
err = gve_adminq_configure_device_resources(priv,
priv->counter_array_bus,
priv->num_event_counters,
@@ -279,10 +342,18 @@ static int gve_setup_device_resources(struct gve_priv *priv)
dev_err(&priv->pdev->dev,
"could not setup device_resources: err=%d\n", err);
err = -ENXIO;
- goto abort_with_ntfy_blocks;
+ goto abort_with_stats_report;
}
+ err = gve_adminq_report_stats(priv, priv->stats_report_len,
+ priv->stats_report_bus,
+ GVE_STATS_REPORT_TIMER_PERIOD);
+ if (err)
+ dev_err(&priv->pdev->dev,
+ "Failed to report stats: err=%d\n", err);
gve_set_device_resources_ok(priv);
return 0;
+abort_with_stats_report:
+ gve_free_stats_report(priv);
abort_with_ntfy_blocks:
gve_free_notify_blocks(priv);
abort_with_counter:
@@ -298,6 +369,13 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
/* Tell device its resources are being freed */
if (gve_get_device_resources_ok(priv)) {
+ /* detach the stats report */
+ err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
+ if (err) {
+ dev_err(&priv->pdev->dev,
+ "Failed to detach stats report: err=%d\n", err);
+ gve_trigger_reset(priv);
+ }
err = gve_adminq_deconfigure_device_resources(priv);
if (err) {
dev_err(&priv->pdev->dev,
@@ -308,6 +386,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
}
gve_free_counter_array(priv);
gve_free_notify_blocks(priv);
+ gve_free_stats_report(priv);
gve_clear_device_resources_ok(priv);
}
@@ -371,36 +450,37 @@ static int gve_create_rings(struct gve_priv *priv)
int err;
int i;
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
- err = gve_adminq_create_tx_queue(priv, i);
- if (err) {
- netif_err(priv, drv, priv->dev, "failed to create tx queue %d\n",
- i);
- /* This failure will trigger a reset - no need to clean
- * up
- */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "created tx queue %d\n", i);
+ err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
+ if (err) {
+ netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
+ priv->tx_cfg.num_queues);
+ /* This failure will trigger a reset - no need to clean
+ * up
+ */
+ return err;
}
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- err = gve_adminq_create_rx_queue(priv, i);
- if (err) {
- netif_err(priv, drv, priv->dev, "failed to create rx queue %d\n",
- i);
- /* This failure will trigger a reset - no need to clean
- * up
- */
- return err;
- }
- /* Rx data ring has been prefilled with packet buffers at
- * queue allocation time.
- * Write the doorbell to provide descriptor slots and packet
- * buffers to the NIC.
+ netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
+ priv->tx_cfg.num_queues);
+
+ err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
+ if (err) {
+ netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
+ priv->rx_cfg.num_queues);
+ /* This failure will trigger a reset - no need to clean
+ * up
*/
- gve_rx_write_doorbell(priv, &priv->rx[i]);
- netif_dbg(priv, drv, priv->dev, "created rx queue %d\n", i);
+ return err;
}
+ netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
+ priv->rx_cfg.num_queues);
+
+ /* Rx data ring has been prefilled with packet buffers at queue
+ * allocation time.
+ * Write the doorbell to provide descriptor slots and packet buffers
+ * to the NIC.
+ */
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
+ gve_rx_write_doorbell(priv, &priv->rx[i]);
return 0;
}
@@ -458,34 +538,23 @@ free_tx:
static int gve_destroy_rings(struct gve_priv *priv)
{
int err;
- int i;
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
- err = gve_adminq_destroy_tx_queue(priv, i);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to destroy tx queue %d\n",
- i);
- /* This failure will trigger a reset - no need to clean
- * up
- */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "destroyed tx queue %d\n", i);
+ err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "failed to destroy tx queues\n");
+ /* This failure will trigger a reset - no need to clean up */
+ return err;
}
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- err = gve_adminq_destroy_rx_queue(priv, i);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to destroy rx queue %d\n",
- i);
- /* This failure will trigger a reset - no need to clean
- * up
- */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "destroyed rx queue %d\n", i);
+ netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
+ err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "failed to destroy rx queues\n");
+ /* This failure will trigger a reset - no need to clean up */
+ return err;
}
+ netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
return 0;
}
@@ -514,14 +583,18 @@ static void gve_free_rings(struct gve_priv *priv)
}
}
-int gve_alloc_page(struct device *dev, struct page **page, dma_addr_t *dma,
+int gve_alloc_page(struct gve_priv *priv, struct device *dev,
+ struct page **page, dma_addr_t *dma,
enum dma_data_direction dir)
{
*page = alloc_page(GFP_KERNEL);
- if (!*page)
+ if (!*page) {
+ priv->page_alloc_fail++;
return -ENOMEM;
+ }
*dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
if (dma_mapping_error(dev, *dma)) {
+ priv->dma_mapping_error++;
put_page(*page);
return -ENOMEM;
}
@@ -556,7 +629,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
return -ENOMEM;
for (i = 0; i < pages; i++) {
- err = gve_alloc_page(&priv->pdev->dev, &qpl->pages[i],
+ err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
&qpl->page_buses[i],
gve_qpl_dma_dir(priv, id));
/* caller handles clean up */
@@ -695,8 +768,14 @@ static int gve_open(struct net_device *dev)
goto reset;
gve_set_device_rings_ok(priv);
+ if (gve_get_report_stats(priv))
+ mod_timer(&priv->stats_report_timer,
+ round_jiffies(jiffies +
+ msecs_to_jiffies(priv->stats_report_timer_period)));
+
gve_turnup(priv);
- netif_carrier_on(dev);
+ queue_work(priv->gve_wq, &priv->service_task);
+ priv->interface_up_cnt++;
return 0;
free_rings:
@@ -735,9 +814,11 @@ static int gve_close(struct net_device *dev)
goto err;
gve_clear_device_rings_ok(priv);
}
+ del_timer_sync(&priv->stats_report_timer);
gve_free_rings(priv);
gve_free_qpls(priv);
+ priv->interface_down_cnt++;
return 0;
err:
@@ -817,6 +898,7 @@ static void gve_turndown(struct gve_priv *priv)
netif_tx_disable(priv->dev);
gve_clear_napi_enabled(priv);
+ gve_clear_report_stats(priv);
}
static void gve_turnup(struct gve_priv *priv)
@@ -867,6 +949,10 @@ static void gve_handle_status(struct gve_priv *priv, u32 status)
dev_info(&priv->pdev->dev, "Device requested reset.\n");
gve_set_do_reset(priv);
}
+ if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
+ priv->stats_report_trigger_cnt++;
+ gve_set_do_report_stats(priv);
+ }
}
static void gve_handle_reset(struct gve_priv *priv)
@@ -885,16 +971,95 @@ static void gve_handle_reset(struct gve_priv *priv)
}
}
-/* Handle NIC status register changes and reset requests */
+void gve_handle_report_stats(struct gve_priv *priv)
+{
+ int idx, stats_idx = 0, tx_bytes;
+ unsigned int start = 0;
+ struct stats *stats = priv->stats_report->stats;
+
+ if (!gve_get_report_stats(priv))
+ return;
+
+ be64_add_cpu(&priv->stats_report->written_count, 1);
+ /* tx stats */
+ if (priv->tx) {
+ for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
+ do {
+ start = u64_stats_fetch_begin(&priv->tx[idx].statss);
+ tx_bytes = priv->tx[idx].bytes_done;
+ } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_WAKE_CNT),
+ .value = cpu_to_be64(priv->tx[idx].wake_queue),
+ .queue_id = cpu_to_be32(idx),
+ };
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_STOP_CNT),
+ .value = cpu_to_be64(priv->tx[idx].stop_queue),
+ .queue_id = cpu_to_be32(idx),
+ };
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_FRAMES_SENT),
+ .value = cpu_to_be64(priv->tx[idx].req),
+ .queue_id = cpu_to_be32(idx),
+ };
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_BYTES_SENT),
+ .value = cpu_to_be64(tx_bytes),
+ .queue_id = cpu_to_be32(idx),
+ };
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
+ .value = cpu_to_be64(priv->tx[idx].done),
+ .queue_id = cpu_to_be32(idx),
+ };
+ }
+ }
+ /* rx stats */
+ if (priv->rx) {
+ for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
+ .value = cpu_to_be64(priv->rx[idx].desc.seqno),
+ .queue_id = cpu_to_be32(idx),
+ };
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
+ .value = cpu_to_be64(priv->rx[0].fill_cnt),
+ .queue_id = cpu_to_be32(idx),
+ };
+ }
+ }
+}
+
+static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
+{
+ if (!gve_get_napi_enabled(priv))
+ return;
+
+ if (link_status == netif_carrier_ok(priv->dev))
+ return;
+
+ if (link_status) {
+ netdev_info(priv->dev, "Device link is up.\n");
+ netif_carrier_on(priv->dev);
+ } else {
+ netdev_info(priv->dev, "Device link is down.\n");
+ netif_carrier_off(priv->dev);
+ }
+}
+
+/* Handle NIC status register changes, reset requests and report stats */
static void gve_service_task(struct work_struct *work)
{
struct gve_priv *priv = container_of(work, struct gve_priv,
service_task);
+ u32 status = ioread32be(&priv->reg_bar0->device_status);
- gve_handle_status(priv,
- ioread32be(&priv->reg_bar0->device_status));
+ gve_handle_status(priv, status);
gve_handle_reset(priv);
+ gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
}
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
@@ -924,7 +1089,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->dev->max_mtu = PAGE_SIZE;
err = gve_adminq_set_mtu(priv, priv->dev->mtu);
if (err) {
- netif_err(priv, drv, priv->dev, "Could not set mtu");
+ dev_err(&priv->pdev->dev, "Could not set mtu");
goto err;
}
}
@@ -964,10 +1129,10 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->rx_cfg.num_queues);
}
- netif_info(priv, drv, priv->dev, "TX queues %d, RX queues %d\n",
- priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
- netif_info(priv, drv, priv->dev, "Max TX queues %d, Max RX queues %d\n",
- priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
+ dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
+ priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
+ dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
+ priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
setup_device:
err = gve_setup_device_resources(priv);
@@ -1047,6 +1212,10 @@ int gve_reset(struct gve_priv *priv, bool attempt_teardown)
/* Set it all back up */
err = gve_reset_recovery(priv, was_up);
gve_clear_reset_in_progress(priv);
+ priv->reset_cnt++;
+ priv->interface_up_cnt = 0;
+ priv->interface_down_cnt = 0;
+ priv->stats_report_trigger_cnt = 0;
return err;
}
@@ -1149,6 +1318,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->db_bar2 = db_bar;
priv->service_task_flags = 0x0;
priv->state_flags = 0x0;
+ priv->ethtool_flags = 0x0;
gve_set_probe_in_progress(priv);
priv->gve_wq = alloc_ordered_workqueue("gve", 0);
@@ -1158,6 +1328,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto abort_with_netdev;
}
INIT_WORK(&priv->service_task, gve_service_task);
+ INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
priv->tx_cfg.max_queues = max_tx_queues;
priv->rx_cfg.max_queues = max_rx_queues;
diff --git a/drivers/net/ethernet/google/gve/gve_register.h b/drivers/net/ethernet/google/gve/gve_register.h
index 84ab8893aadd..fb655463c357 100644
--- a/drivers/net/ethernet/google/gve/gve_register.h
+++ b/drivers/net/ethernet/google/gve/gve_register.h
@@ -23,5 +23,6 @@ struct gve_registers {
enum gve_device_status_flags {
GVE_DEVICE_STATUS_RESET_MASK = BIT(1),
GVE_DEVICE_STATUS_LINK_STATUS_MASK = BIT(2),
+ GVE_DEVICE_STATUS_REPORT_STATS_MASK = BIT(3),
};
#endif /* _GVE_REGISTER_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 9f52e72ff641..008fa897a3e6 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -225,7 +225,8 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
return PKT_HASH_TYPE_L2;
}
-static struct sk_buff *gve_rx_copy(struct net_device *dev,
+static struct sk_buff *gve_rx_copy(struct gve_rx_ring *rx,
+ struct net_device *dev,
struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info,
u16 len)
@@ -242,6 +243,11 @@ static struct sk_buff *gve_rx_copy(struct net_device *dev,
skb_copy_to_linear_data(skb, va, len);
skb->protocol = eth_type_trans(skb, dev);
+
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_copied_pkt++;
+ u64_stats_update_end(&rx->statss);
+
return skb;
}
@@ -284,8 +290,12 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
u16 len;
/* drop this packet */
- if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR))
+ if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_desc_err_dropped_pkt++;
+ u64_stats_update_end(&rx->statss);
return true;
+ }
len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
page_info = &rx->data.page_info[idx];
@@ -300,11 +310,14 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
if (PAGE_SIZE == 4096) {
if (len <= priv->rx_copybreak) {
/* Just copy small packets */
- skb = gve_rx_copy(dev, napi, page_info, len);
+ skb = gve_rx_copy(rx, dev, napi, page_info, len);
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_copybreak_pkt++;
+ u64_stats_update_end(&rx->statss);
goto have_skb;
}
if (unlikely(!gve_can_recycle_pages(dev))) {
- skb = gve_rx_copy(dev, napi, page_info, len);
+ skb = gve_rx_copy(rx, dev, napi, page_info, len);
goto have_skb;
}
pagecount = page_count(page_info->page);
@@ -314,8 +327,12 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
* stack.
*/
skb = gve_rx_add_frags(dev, napi, page_info, len);
- if (!skb)
+ if (!skb) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_skb_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
return true;
+ }
/* Make sure the kernel stack can't release the page */
get_page(page_info->page);
/* "flip" to other packet buffer on this page */
@@ -324,21 +341,25 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
/* We have previously passed the other half of this
* page up the stack, but it has not yet been freed.
*/
- skb = gve_rx_copy(dev, napi, page_info, len);
+ skb = gve_rx_copy(rx, dev, napi, page_info, len);
} else {
WARN(pagecount < 1, "Pagecount should never be < 1");
return false;
}
} else {
- skb = gve_rx_copy(dev, napi, page_info, len);
+ skb = gve_rx_copy(rx, dev, napi, page_info, len);
}
have_skb:
/* We didn't manage to allocate an skb but we haven't had any
* reset worthy failures.
*/
- if (!skb)
+ if (!skb) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_skb_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
return true;
+ }
if (likely(feat & NETIF_F_RXCSUM)) {
/* NIC passes up the partial sum */