virtio: add debug/performance stats to network driver

ifconfig down or remove the module to get the statistics dump.
---
 drivers/net/virtio_net.c |  103 +++++++++++++++++++++++++++++++++++++++++------
 1 file changed, 92 insertions(+), 11 deletions(-)

diff -r 75fdfd465949 drivers/net/virtio_net.c
--- a/drivers/net/virtio_net.c	Thu Apr 10 15:56:39 2008 +1000
+++ b/drivers/net/virtio_net.c	Thu Apr 10 15:58:13 2008 +1000
@@ -47,6 +47,31 @@ struct virtnet_info
 	/* Receive & send queues. */
 	struct sk_buff_head recv;
 	struct sk_buff_head send;
+
+	struct {
+		unsigned int kfree_xmit_skb;
+		unsigned int free_old_xmit_skbs;
+		unsigned int sendq_kicks;
+		unsigned int sendq_packets;
+		unsigned int sendq_partial_csum;
+		unsigned int sendq_gso;
+		unsigned int sendq_sglen;
+		unsigned int sendq_full;
+		unsigned int sendq_disabled;
+		unsigned int sendq_enabled;
+		unsigned int sendq_enable_morework;
+		unsigned int sendq_cancelled;
+		unsigned int recvq_packets;
+		unsigned int recvq_refills;
+		unsigned int recvq_disabled;
+		unsigned int recvq_enabled;
+		unsigned int recvq_enable_morework;
+		unsigned int recvq_reschedule_failed;
+		unsigned int recvq_partial_csum;
+		unsigned int recvq_gso;
+		unsigned int recvq_kicks;
+		unsigned int poll;
+	} stats;
 };
 
 static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb)
@@ -65,14 +90,18 @@ static void skb_xmit_done(struct virtque
 
 	/* Suppress further interrupts. */
 	svq->vq_ops->disable_cb(svq);
+	vi->stats.sendq_disabled++;
 	/* We were waiting for more output buffers. */
 	netif_wake_queue(vi->dev);
 }
 
-static void receive_skb(struct net_device *dev, struct sk_buff *skb,
+static void receive_skb(struct virtnet_info *vi, struct sk_buff *skb,
 			unsigned len)
 {
 	struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
+	struct net_device *dev = vi->dev;
+
+	vi->stats.recvq_packets++;
 
 	if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
 		pr_debug("%s: short packet %i\n", dev->name, len);
@@ -90,12 +119,14 @@ static void receive_skb(struct net_devic
 	dev->stats.rx_packets++;
 
 	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+		vi->stats.recvq_partial_csum++;
 		pr_debug("Needs csum!\n");
 		if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset))
 			goto frame_err;
 	}
 
 	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+		vi->stats.recvq_gso++;
 		pr_debug("GSO!\n");
 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
 		case VIRTIO_NET_HDR_GSO_TCPV4:
@@ -110,7 +141,7 @@ static void receive_skb(struct net_devic
 		default:
 			if (net_ratelimit())
 				printk(KERN_WARNING "%s: bad gso type %u.\n",
-				       dev->name, hdr->gso_type);
+				       vi->dev->name, hdr->gso_type);
 			goto frame_err;
 		}
 
@@ -121,7 +152,7 @@ static void receive_skb(struct net_devic
 		if (skb_shinfo(skb)->gso_size == 0) {
 			if (net_ratelimit())
 				printk(KERN_WARNING "%s: zero gso size.\n",
-				       dev->name);
+				       vi->dev->name);
 			goto frame_err;
 		}
 
@@ -134,7 +165,7 @@ static void receive_skb(struct net_devic
 	return;
 
 frame_err:
-	dev->stats.rx_frame_errors++;
+	vi->dev->stats.rx_frame_errors++;
 drop:
 	dev_kfree_skb(skb);
 }
@@ -166,6 +197,7 @@ static void try_fill_recv(struct virtnet
 	}
 	if (unlikely(vi->num > vi->max))
 		vi->max = vi->num;
+	vi->stats.recvq_kicks++;
 	vi->rvq->vq_ops->kick(vi->rvq);
 }
 
@@ -175,6 +207,7 @@ static void skb_recv_done(struct virtque
 	/* Schedule NAPI, Suppress further interrupts if successful. */
 	if (netif_rx_schedule_prep(vi->dev, &vi->napi)) {
 		rvq->vq_ops->disable_cb(rvq);
+		vi->stats.recvq_disabled++;
 		__netif_rx_schedule(vi->dev, &vi->napi);
 	}
 }
@@ -185,28 +218,36 @@ static int virtnet_poll(struct napi_stru
 	struct sk_buff *skb = NULL;
 	unsigned int len, received = 0;
 
+	vi->stats.poll++;
 again:
 	while (received < budget &&
 	       (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
 		__skb_unlink(skb, &vi->recv);
-		receive_skb(vi->dev, skb, len);
+		receive_skb(vi, skb, len);
 		vi->num--;
 		received++;
 	}
 
 	/* FIXME: If we oom and completely run out of inbufs, we need
 	 * to start a timer trying to fill more. */
-	if (vi->num < vi->max / 2)
+	if (vi->num < vi->max / 2) {
+		vi->stats.recvq_refills++;
 		try_fill_recv(vi);
+	}
 
 	/* Out of packets? */
 	if (received < budget) {
 		netif_rx_complete(vi->dev, napi);
-		if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
-		    && napi_schedule_prep(napi)) {
-			vi->rvq->vq_ops->disable_cb(vi->rvq);
-			__netif_rx_schedule(vi->dev, napi);
-			goto again;
+		vi->stats.recvq_enabled++;
+		if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))) {
+			vi->stats.recvq_enable_morework++;
+			if (napi_schedule_prep(napi)) {
+				vi->rvq->vq_ops->disable_cb(vi->rvq);
+				vi->stats.recvq_disabled++;
+				__netif_rx_schedule(vi->dev, napi);
+				goto again;
+			}
+			vi->stats.recvq_reschedule_failed++;
 		}
 	}
 
@@ -218,12 +259,14 @@ static void free_old_xmit_skbs(struct vi
 	struct sk_buff *skb;
 	unsigned int len;
 
+	vi->stats.free_old_xmit_skbs++;
 	while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
 		pr_debug("Sent skb %p\n", skb);
 		__skb_unlink(skb, &vi->send);
 		vi->dev->stats.tx_bytes += len;
 		vi->dev->stats.tx_packets++;
 		kfree_skb(skb);
+		vi->stats.kfree_xmit_skb++;
 	}
 }
 
@@ -240,12 +283,14 @@ static int start_xmit(struct sk_buff *sk
 
 	pr_debug("%s: xmit %p %s\n", dev->name, skb, print_mac(mac, dest));
 
+	vi->stats.sendq_packets++;
 	/* Encode metadata header at front. */
 	hdr = skb_vnet_hdr(skb);
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 		hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
 		hdr->csum_start = skb->csum_start - skb_headroom(skb);
 		hdr->csum_offset = skb->csum_offset;
+		vi->stats.sendq_partial_csum++;
 	} else {
 		hdr->flags = 0;
 		hdr->csum_offset = hdr->csum_start = 0;
@@ -254,6 +299,7 @@ static int start_xmit(struct sk_buff *sk
 	if (skb_is_gso(skb)) {
 		hdr->hdr_len = skb_transport_header(skb) - skb->data;
 		hdr->gso_size = skb_shinfo(skb)->gso_size;
+		vi->stats.sendq_gso++;
 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
 			hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
@@ -272,19 +318,25 @@ static int start_xmit(struct sk_buff *sk
 	vnet_hdr_to_sg(sg, skb);
 	num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
 	__skb_queue_head(&vi->send, skb);
+	vi->stats.sendq_sglen += num;
 
 again:
 	/* Free up any pending old buffers before queueing new ones. */
 	free_old_xmit_skbs(vi);
 	err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
 	if (err) {
+		vi->stats.sendq_full++;
+
 		pr_debug("%s: virtio not prepared to send\n", dev->name);
 		netif_stop_queue(dev);
 
 		/* Activate callback for using skbs: if this returns false it
 		 * means some were used in the meantime. */
+		vi->stats.sendq_enabled++;
 		if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+			vi->stats.sendq_enable_morework++;
 			vi->svq->vq_ops->disable_cb(vi->svq);
+			vi->stats.sendq_disabled++;
 			netif_start_queue(dev);
 			goto again;
 		}
@@ -292,6 +344,7 @@ again:
 
 		return NETDEV_TX_BUSY;
 	}
+	vi->stats.sendq_kicks++;
 	vi->svq->vq_ops->kick(vi->svq);
 
 	return 0;
@@ -310,6 +363,8 @@ static int virtnet_open(struct net_devic
 {
 	struct virtnet_info *vi = netdev_priv(dev);
 
+	memset(&vi->stats, 0, sizeof(vi->stats));
+
 	napi_enable(&vi->napi);
 
 	/* If all buffers were filled by other side before we napi_enabled, we
@@ -318,6 +373,7 @@ static int virtnet_open(struct net_devic
 	 * We synchronize against interrupts via NAPI_STATE_SCHED */
 	if (netif_rx_schedule_prep(dev, &vi->napi)) {
 		vi->rvq->vq_ops->disable_cb(vi->rvq);
+		vi->stats.recvq_disabled++;
 		__netif_rx_schedule(dev, &vi->napi);
 	}
 	return 0;
@@ -328,6 +384,31 @@ static int virtnet_close(struct net_devi
 	struct virtnet_info *vi = netdev_priv(dev);
 
 	napi_disable(&vi->napi);
+
+	printk("Stats for %s\n", dev->name);
+	printk("kfree_xmit_skb = %u\n",  vi->stats.kfree_xmit_skb);
+	printk("free_old_xmit_skbs = %u\n", vi->stats.free_old_xmit_skbs);
+	printk("sendq_kicks = %u\n", vi->stats.sendq_kicks);
+	printk("sendq_packets = %u\n", vi->stats.sendq_packets);
+	printk("sendq_partial_csum = %u\n", vi->stats.sendq_partial_csum);
+	printk("sendq_gso = %u\n", vi->stats.sendq_gso);
+	printk("sendq_sglen = %u\n", vi->stats.sendq_sglen);
+	printk("sendq_full = %u\n", vi->stats.sendq_full);
+	printk("sendq_disabled = %u\n", vi->stats.sendq_disabled);
+	printk("sendq_enabled = %u\n", vi->stats.sendq_enabled);
+	printk("sendq_enable_morework = %u\n", vi->stats.sendq_enable_morework);
+	printk("sendq_cancelled = %u\n", vi->stats.sendq_cancelled);
+	printk("recvq_packets = %u\n", vi->stats.recvq_packets);
+	printk("recvq_refills = %u\n", vi->stats.recvq_refills);
+	printk("recvq_disabled = %u\n", vi->stats.recvq_disabled);
+	printk("recvq_enabled = %u\n", vi->stats.recvq_enabled);
+	printk("recvq_enable_morework = %u\n", vi->stats.recvq_enable_morework);
+	printk("recvq_reschedule_failed = %u\n",
+	       vi->stats.recvq_reschedule_failed);
+	printk("recvq_partial_csum = %u\n", vi->stats.recvq_partial_csum);
+	printk("recvq_gso = %u\n", vi->stats.recvq_gso);
+	printk("recvq_kicks = %u\n", vi->stats.recvq_kicks);
+	printk("poll = %u\n", vi->stats.poll);
 
 	return 0;
 }
