virtio: add debug/performance stats to network driver

ifconfig down or remove the module to get the statistics dump.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 drivers/net/virtio_net.c |  108 ++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 97 insertions(+), 11 deletions(-)

diff -r f9e16244d300 drivers/net/virtio_net.c
--- a/drivers/net/virtio_net.c	Thu Apr 24 12:17:51 2008 +1000
+++ b/drivers/net/virtio_net.c	Thu Apr 24 12:22:24 2008 +1000
@@ -51,6 +51,33 @@ struct virtnet_info
 	/* Receive & send queues. */
 	struct sk_buff_head recv;
 	struct sk_buff_head send;
+
+	struct {
+		unsigned int kfree_xmit_skb;
+		unsigned int free_old_xmit_skbs;
+		unsigned int last_xmit_send;
+		unsigned int last_xmit_fail;
+		unsigned int sendq_kicks;
+		unsigned int sendq_packets;
+		unsigned int sendq_partial_csum;
+		unsigned int sendq_gso;
+		unsigned int sendq_sglen;
+		unsigned int sendq_full;
+		unsigned int sendq_disabled;
+		unsigned int sendq_enabled;
+		unsigned int sendq_enable_morework;
+		unsigned int sendq_cancelled;
+		unsigned int recvq_packets;
+		unsigned int recvq_refills;
+		unsigned int recvq_disabled;
+		unsigned int recvq_enabled;
+		unsigned int recvq_enable_morework;
+		unsigned int recvq_reschedule_failed;
+		unsigned int recvq_partial_csum;
+		unsigned int recvq_gso;
+		unsigned int recvq_kicks;
+		unsigned int poll;
+	} stats;
 };
 
 static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb)
@@ -69,15 +96,19 @@ static void skb_xmit_done(struct virtque
 
 	/* Suppress further interrupts. */
 	svq->vq_ops->disable_cb(svq);
+	vi->stats.sendq_disabled++;
 	/* We were waiting for more output buffers. */
 	netif_wake_queue(vi->dev);
 }
 
-static void receive_skb(struct net_device *dev, struct sk_buff *skb,
+static void receive_skb(struct virtnet_info *vi, struct sk_buff *skb,
 			unsigned len)
 {
 	struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
+	struct net_device *dev = vi->dev;
 	int err;
+
+	vi->stats.recvq_packets++;
 
 	if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
 		pr_debug("%s: short packet %i\n", dev->name, len);
@@ -101,12 +132,14 @@ static void receive_skb(struct net_devic
 	dev->stats.rx_packets++;
 
 	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+		vi->stats.recvq_partial_csum++;
 		pr_debug("Needs csum!\n");
 		if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset))
 			goto frame_err;
 	}
 
 	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+		vi->stats.recvq_gso++;
 		pr_debug("GSO!\n");
 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
 		case VIRTIO_NET_HDR_GSO_TCPV4:
@@ -121,7 +154,7 @@ static void receive_skb(struct net_devic
 		default:
 			if (net_ratelimit())
 				printk(KERN_WARNING "%s: bad gso type %u.\n",
-				       dev->name, hdr->gso_type);
+				       vi->dev->name, hdr->gso_type);
 			goto frame_err;
 		}
 
@@ -132,7 +165,7 @@ static void receive_skb(struct net_devic
 		if (skb_shinfo(skb)->gso_size == 0) {
 			if (net_ratelimit())
 				printk(KERN_WARNING "%s: zero gso size.\n",
-				       dev->name);
+				       vi->dev->name);
 			goto frame_err;
 		}
 
@@ -145,7 +178,7 @@ static void receive_skb(struct net_devic
 	return;
 
 frame_err:
-	dev->stats.rx_frame_errors++;
+	vi->dev->stats.rx_frame_errors++;
 drop:
 	dev_kfree_skb(skb);
 }
@@ -195,6 +228,7 @@ static void try_fill_recv(struct virtnet
 	}
 	if (unlikely(vi->num > vi->max))
 		vi->max = vi->num;
+	vi->stats.recvq_kicks++;
 	vi->rvq->vq_ops->kick(vi->rvq);
 }
 
@@ -204,6 +238,7 @@ static void skb_recv_done(struct virtque
 	/* Schedule NAPI, Suppress further interrupts if successful. */
 	if (netif_rx_schedule_prep(vi->dev, &vi->napi)) {
 		rvq->vq_ops->disable_cb(rvq);
+		vi->stats.recvq_disabled++;
 		__netif_rx_schedule(vi->dev, &vi->napi);
 	}
 }
@@ -214,28 +249,36 @@ static int virtnet_poll(struct napi_stru
 	struct sk_buff *skb = NULL;
 	unsigned int len, received = 0;
 
+	vi->stats.poll++;
 again:
 	while (received < budget &&
 	       (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
 		__skb_unlink(skb, &vi->recv);
-		receive_skb(vi->dev, skb, len);
+		receive_skb(vi, skb, len);
 		vi->num--;
 		received++;
 	}
 
 	/* FIXME: If we oom and completely run out of inbufs, we need
 	 * to start a timer trying to fill more. */
-	if (vi->num < vi->max / 2)
+	if (vi->num < vi->max / 2) {
+		vi->stats.recvq_refills++;
 		try_fill_recv(vi);
+	}
 
 	/* Out of packets? */
 	if (received < budget) {
 		netif_rx_complete(vi->dev, napi);
-		if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
-		    && napi_schedule_prep(napi)) {
-			vi->rvq->vq_ops->disable_cb(vi->rvq);
-			__netif_rx_schedule(vi->dev, napi);
-			goto again;
+		vi->stats.recvq_enabled++;
+		if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))) {
+			vi->stats.recvq_enable_morework++;
+			if (napi_schedule_prep(napi)) {
+				vi->rvq->vq_ops->disable_cb(vi->rvq);
+				vi->stats.recvq_disabled++;
+				__netif_rx_schedule(vi->dev, napi);
+				goto again;
+			}
+			vi->stats.recvq_reschedule_failed++;
 		}
 	}
 
@@ -247,12 +290,14 @@ static void free_old_xmit_skbs(struct vi
 	struct sk_buff *skb;
 	unsigned int len;
 
+	vi->stats.free_old_xmit_skbs++;
 	while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
 		pr_debug("Sent skb %p\n", skb);
 		__skb_unlink(skb, &vi->send);
 		vi->dev->stats.tx_bytes += skb->len;
 		vi->dev->stats.tx_packets++;
 		kfree_skb(skb);
+		vi->stats.kfree_xmit_skb++;
 	}
 }
 
@@ -269,12 +314,14 @@ static int xmit_skb(struct virtnet_info 
 		 dest[0], dest[1], dest[2],
 		 dest[3], dest[4], dest[5]);
 
+	vi->stats.sendq_packets++;
 	/* Encode metadata header at front. */
 	hdr = skb_vnet_hdr(skb);
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 		hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
 		hdr->csum_start = skb->csum_start - skb_headroom(skb);
 		hdr->csum_offset = skb->csum_offset;
+		vi->stats.sendq_partial_csum++;
 	} else {
 		hdr->flags = 0;
 		hdr->csum_offset = hdr->csum_start = 0;
@@ -283,6 +330,7 @@ static int xmit_skb(struct virtnet_info 
 	if (skb_is_gso(skb)) {
 		hdr->hdr_len = skb_transport_header(skb) - skb->data;
 		hdr->gso_size = skb_shinfo(skb)->gso_size;
+		vi->stats.sendq_gso++;
 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
 			hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
@@ -300,6 +348,7 @@ static int xmit_skb(struct virtnet_info 
 
 	vnet_hdr_to_sg(sg, skb);
 	num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
+	vi->stats.sendq_sglen += num;
 
 	return vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
 }
@@ -314,8 +363,11 @@ again:
 
 	/* If we has a buffer left over from last time, send it now. */
 	if (vi->last_xmit_skb) {
+		vi->stats.last_xmit_send++;
 		if (xmit_skb(vi, vi->last_xmit_skb) != 0) {
 			/* Drop this skb: we only queue one. */
+			vi->stats.sendq_full++;
+			vi->stats.last_xmit_fail++;
 			kfree_skb(skb);
 			goto stop_queue;
 		}
@@ -329,6 +381,7 @@ again:
 		goto stop_queue;
 	}
 done:
+	vi->stats.sendq_kicks++;
 	vi->svq->vq_ops->kick(vi->svq);
 	return NETDEV_TX_OK;
 
@@ -338,8 +391,11 @@ stop_queue:
 
 	/* Activate callback for using skbs: if this returns false it
 	 * means some were used in the meantime. */
+	vi->stats.sendq_enabled++;
 	if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+		vi->stats.sendq_enable_morework++;
 		vi->svq->vq_ops->disable_cb(vi->svq);
+		vi->stats.sendq_disabled++;
 		netif_start_queue(dev);
 		goto again;
 	}
@@ -359,6 +415,8 @@ static int virtnet_open(struct net_devic
 {
 	struct virtnet_info *vi = netdev_priv(dev);
 
+	memset(&vi->stats, 0, sizeof(vi->stats));
+
 	napi_enable(&vi->napi);
 
 	/* If all buffers were filled by other side before we napi_enabled, we
@@ -367,6 +425,7 @@ static int virtnet_open(struct net_devic
 	 * We synchronize against interrupts via NAPI_STATE_SCHED */
 	if (netif_rx_schedule_prep(dev, &vi->napi)) {
 		vi->rvq->vq_ops->disable_cb(vi->rvq);
+		vi->stats.recvq_disabled++;
 		__netif_rx_schedule(dev, &vi->napi);
 	}
 	return 0;
@@ -377,6 +436,33 @@ static int virtnet_close(struct net_devi
 	struct virtnet_info *vi = netdev_priv(dev);
 
 	napi_disable(&vi->napi);
+
+	printk("Stats for %s\n", dev->name);
+	printk("kfree_xmit_skb = %u\n",  vi->stats.kfree_xmit_skb);
+	printk("free_old_xmit_skbs = %u\n", vi->stats.free_old_xmit_skbs);
+	printk("last_xmit_send = %u\n", vi->stats.last_xmit_send);
+	printk("last_xmit_fail = %u\n", vi->stats.last_xmit_fail);
+	printk("sendq_kicks = %u\n", vi->stats.sendq_kicks);
+	printk("sendq_packets = %u\n", vi->stats.sendq_packets);
+	printk("sendq_partial_csum = %u\n", vi->stats.sendq_partial_csum);
+	printk("sendq_gso = %u\n", vi->stats.sendq_gso);
+	printk("sendq_sglen = %u\n", vi->stats.sendq_sglen);
+	printk("sendq_full = %u\n", vi->stats.sendq_full);
+	printk("sendq_disabled = %u\n", vi->stats.sendq_disabled);
+	printk("sendq_enabled = %u\n", vi->stats.sendq_enabled);
+	printk("sendq_enable_morework = %u\n", vi->stats.sendq_enable_morework);
+	printk("sendq_cancelled = %u\n", vi->stats.sendq_cancelled);
+	printk("recvq_packets = %u\n", vi->stats.recvq_packets);
+	printk("recvq_refills = %u\n", vi->stats.recvq_refills);
+	printk("recvq_disabled = %u\n", vi->stats.recvq_disabled);
+	printk("recvq_enabled = %u\n", vi->stats.recvq_enabled);
+	printk("recvq_enable_morework = %u\n", vi->stats.recvq_enable_morework);
+	printk("recvq_reschedule_failed = %u\n",
+	       vi->stats.recvq_reschedule_failed);
+	printk("recvq_partial_csum = %u\n", vi->stats.recvq_partial_csum);
+	printk("recvq_gso = %u\n", vi->stats.recvq_gso);
+	printk("recvq_kicks = %u\n", vi->stats.recvq_kicks);
+	printk("poll = %u\n", vi->stats.poll);
 
 	return 0;
 }
