---
 include/linux/netdevice.h |    2 +
 include/linux/skbuff.h    |    3 ++
 net/core/skbuff.c         |   61 ++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 66 insertions(+)

diff -r f08d39abf24c include/linux/netdevice.h
--- a/include/linux/netdevice.h	Wed Jul 30 14:37:09 2008 +1000
+++ b/include/linux/netdevice.h	Thu Jul 31 11:05:54 2008 +1000
@@ -626,6 +626,8 @@ struct net_device
 	unsigned char		broadcast[MAX_ADDR_LEN];	/* hw bcast add	*/
 
 	struct netdev_queue	rx_queue;
+
+	struct page		*pskb_pages; /* Cache for dev_alloc_pskb */
 
 	struct netdev_queue	*_tx ____cacheline_aligned_in_smp;
 
diff -r f08d39abf24c include/linux/skbuff.h
--- a/include/linux/skbuff.h	Wed Jul 30 14:37:09 2008 +1000
+++ b/include/linux/skbuff.h	Thu Jul 31 11:05:54 2008 +1000
@@ -364,6 +364,9 @@ static inline struct sk_buff *alloc_skb_
 {
 	return __alloc_skb(size, priority, 1, -1);
 }
+
+extern struct sk_buff *alloc_pskb(unsigned int linear, unsigned int nonlinear,
+				  gfp_t priority);
 
 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
 extern struct sk_buff *skb_clone(struct sk_buff *skb,
diff -r f08d39abf24c net/core/skbuff.c
--- a/net/core/skbuff.c	Wed Jul 30 14:37:09 2008 +1000
+++ b/net/core/skbuff.c	Thu Jul 31 11:05:54 2008 +1000
@@ -69,6 +69,8 @@
 
 static struct kmem_cache *skbuff_head_cache __read_mostly;
 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
+static DEFINE_SPINLOCK(pskb_pages_lock);
+static struct page *pskb_pages;
 
 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
 				  struct pipe_buffer *buf)
@@ -235,6 +237,65 @@ nodata:
 	skb = NULL;
 	goto out;
 }
+
+static void give_a_page(struct page *p)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pskb_pages_lock, flags);
+	p->private = (unsigned long)pskb_pages_cache;
+	pskb_pages_cache = p;
+	spin_unlock_irqrestore(&pskb_pages_lock, flags);
+}
+
+static struct page *get_a_page(gfp_t priority)
+{
+	unsigned long flags;
+	struct page *p;
+
+	spin_lock_irqsave(&pskb_pages_lock, flags);
+	p = pskb_pages_cache;
+	if (p)
+		pskb_pages_cache = (struct page *)pg->private;
+	spin_unlock_irqrestore(&pskb_pages_lock, flags);
+
+	if (!p)
+		p = alloc_page(priority);
+	return p;
+}
+
+struct sk_buff *alloc_pskb(unsigned int linear, unsigned int nonlinear,
+			   gfp_t priority)
+{
+	struct sk_buff *skb = alloc_skb(linear, priority);
+	unsigned int i;
+
+	if (unlikely(!skb))
+		return NULL;
+
+	BUG_ON(nonlinear > MAX_SKB_FRAGS * PAGE_SIZE);
+
+	for (i = 0; i < MAX_SKB_FRAGS; i++) {
+		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+		f->page = get_a_page(priority);
+		if (unlikely(f->page)) {
+			kfree_skb(skb);
+			return NULL;
+		}
+
+		f->page_offset = 0;
+		f->size = PAGE_SIZE;
+
+		skb->data_len += PAGE_SIZE;
+		skb->len += PAGE_SIZE;
+
+		skb_shinfo(skb)->nr_frags++;
+	}
+	return skb;
+
+
+
+
 
 /**
  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
