commit 823c8f14d46d488294e0f1444350cbef41f052aa
Author: Michael Young <m.a.young(a)durham.ac.uk>
Date: Mon Feb 28 23:18:20 2011 +0000
get updated devel/next-2.6.38
kernel.spec | 3 +
xen.pcifront.next-2.6.38.patch |74926 ++++++++++++++++++++--------------------
2 files changed, 37556 insertions(+), 37373 deletions(-)
---
diff --git a/kernel.spec b/kernel.spec
index 7c72fc7..9d227bb 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -1971,6 +1971,9 @@ fi
# and build.
%changelog
+* Mon Feb 28 2011 Michael Young <m.a.young(a)durham.ac.uk>
+- get updated devel/next-2.6.38
+
* Fri Feb 25 2011 Chuck Ebbert <cebbert(a)redhat.com> 2.6.38-0.rc6.git6.1
- Linux 2.6.38-rc6-git6
- Build in virtio_pci driver so virtio_console will be built-in (#677713)
diff --git a/xen.pcifront.next-2.6.38.patch b/xen.pcifront.next-2.6.38.patch
index a0e7a79..c80ccc1 100644
--- a/xen.pcifront.next-2.6.38.patch
+++ b/xen.pcifront.next-2.6.38.patch
@@ -1,12591 +1,14558 @@
-From 1e13f505ecbc011465783283ebfa05a42f7ce18f Mon Sep 17 00:00:00 2001
-From: Ian Campbell <ijc(a)hellion.org.uk>
-Date: Thu, 3 Dec 2009 22:04:06 +0000
-Subject: [PATCH 001/244] xen: export xen_gsi_from_irq, it is required by modular pciback
+From 03c4949992e2b7e84b7cdeb156d803db3f848b6c Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Mon, 9 Feb 2009 12:05:52 -0800
+Subject: [PATCH 001/203] xen: netback: Initial import of linux-2.6.18-xen.hg netback driver.
+
+This corresponds to 774:107e10e0e07c in that tree.
Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
-Cc: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
-Cc: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
---
- drivers/xen/events.c | 1 +
- 1 files changed, 1 insertions(+), 0 deletions(-)
+ drivers/xen/Kconfig | 7 +
+ drivers/xen/Makefile | 1 +
+ drivers/xen/netback/Makefile | 3 +
+ drivers/xen/netback/common.h | 217 ++++++
+ drivers/xen/netback/interface.c | 336 ++++++++
+ drivers/xen/netback/netback.c | 1637 +++++++++++++++++++++++++++++++++++++++
+ drivers/xen/netback/xenbus.c | 454 +++++++++++
+ 7 files changed, 2655 insertions(+), 0 deletions(-)
+ create mode 100644 drivers/xen/netback/Makefile
+ create mode 100644 drivers/xen/netback/common.h
+ create mode 100644 drivers/xen/netback/interface.c
+ create mode 100644 drivers/xen/netback/netback.c
+ create mode 100644 drivers/xen/netback/xenbus.c
-diff --git a/drivers/xen/events.c b/drivers/xen/events.c
-index 97612f5..a04da4b 100644
---- a/drivers/xen/events.c
-+++ b/drivers/xen/events.c
-@@ -778,6 +778,7 @@ int xen_gsi_from_irq(unsigned irq)
- {
- return gsi_from_irq(irq);
- }
-+EXPORT_SYMBOL_GPL(xen_gsi_from_irq);
+diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
+index 5a48ce9..7e83d43 100644
+--- a/drivers/xen/Kconfig
++++ b/drivers/xen/Kconfig
+@@ -37,6 +37,13 @@ config XEN_BACKEND
+ Support for backend device drivers that provide I/O services
+ to other virtual machines.
- int xen_irq_from_pirq(unsigned pirq)
- {
---
-1.7.4
-
-
-From f0885b9401a859bc7bed849925a703c03d00119b Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Mon, 8 Nov 2010 14:13:35 -0500
-Subject: [PATCH 002/244] xen/pci: Add xen_[find|register|unregister]_device_domain_owner functions.
-
-Xen PCI backend performs ownership (MSI/MSI-X) changes on the behalf of
-the guest. This means we need some mechanism to find, set and unset
-the domain id of the guest.
-
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
----
- arch/x86/include/asm/xen/pci.h | 16 +++++++++
- arch/x86/pci/xen.c | 73 ++++++++++++++++++++++++++++++++++++++++
- 2 files changed, 89 insertions(+), 0 deletions(-)
-
-diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h
-index 2329b3e..8474b4b 100644
---- a/arch/x86/include/asm/xen/pci.h
-+++ b/arch/x86/include/asm/xen/pci.h
-@@ -15,10 +15,26 @@ static inline int pci_xen_hvm_init(void)
- #endif
- #if defined(CONFIG_XEN_DOM0)
- void __init xen_setup_pirqs(void);
-+int xen_find_device_domain_owner(struct pci_dev *dev);
-+int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
-+int xen_unregister_device_domain_owner(struct pci_dev *dev);
- #else
- static inline void __init xen_setup_pirqs(void)
- {
- }
-+static inline int xen_find_device_domain_owner(struct pci_dev *dev)
-+{
-+ return -1;
-+}
-+static inline int xen_register_device_domain_owner(struct pci_dev *dev,
-+ uint16_t domain)
++config XEN_NETDEV_BACKEND
++ bool "Xen backend network device"
++ depends on XEN_BACKEND && NET
++ help
++ Implement the network backend driver, which passes packets
++ from the guest domain's frontend drivers to the network.
++
+ config XENFS
+ tristate "Xen filesystem"
+ default y
+diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
+index 533a199..c0e0509 100644
+--- a/drivers/xen/Makefile
++++ b/drivers/xen/Makefile
+@@ -9,6 +9,7 @@ obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
+ obj-$(CONFIG_XEN_BALLOON) += balloon.o
+ obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
+ obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
++obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/
+ obj-$(CONFIG_XENFS) += xenfs/
+ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
+ obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o
+diff --git a/drivers/xen/netback/Makefile b/drivers/xen/netback/Makefile
+new file mode 100644
+index 0000000..f4a0c51
+--- /dev/null
++++ b/drivers/xen/netback/Makefile
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_XEN_NETDEV_BACKEND) := netbk.o
++
++netbk-y := netback.o xenbus.o interface.o
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+new file mode 100644
+index 0000000..9a54d57
+--- /dev/null
++++ b/drivers/xen/netback/common.h
+@@ -0,0 +1,217 @@
++/******************************************************************************
++ * arch/xen/drivers/netif/backend/common.h
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __NETIF__BACKEND__COMMON_H__
++#define __NETIF__BACKEND__COMMON_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/ip.h>
++#include <linux/in.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/wait.h>
++#include <xen/evtchn.h>
++#include <xen/interface/io/netif.h>
++#include <asm/io.h>
++#include <asm/pgalloc.h>
++#include <xen/interface/grant_table.h>
++#include <xen/gnttab.h>
++#include <xen/driver_util.h>
++#include <xen/xenbus.h>
++
++#define DPRINTK(_f, _a...) \
++ pr_debug("(file=%s, line=%d) " _f, \
++ __FILE__ , __LINE__ , ## _a )
++#define IPRINTK(fmt, args...) \
++ printk(KERN_INFO "xen_net: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++ printk(KERN_WARNING "xen_net: " fmt, ##args)
++
++typedef struct netif_st {
++ /* Unique identifier for this interface. */
++ domid_t domid;
++ unsigned int handle;
++
++ u8 fe_dev_addr[6];
++
++ /* Physical parameters of the comms window. */
++ grant_handle_t tx_shmem_handle;
++ grant_ref_t tx_shmem_ref;
++ grant_handle_t rx_shmem_handle;
++ grant_ref_t rx_shmem_ref;
++ unsigned int irq;
++
++ /* The shared rings and indexes. */
++ netif_tx_back_ring_t tx;
++ netif_rx_back_ring_t rx;
++ struct vm_struct *tx_comms_area;
++ struct vm_struct *rx_comms_area;
++
++ /* Set of features that can be turned on in dev->features. */
++ int features;
++
++ /* Internal feature information. */
++ u8 can_queue:1; /* can queue packets for receiver? */
++ u8 copying_receiver:1; /* copy packets to receiver? */
++
++ /* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
++ RING_IDX rx_req_cons_peek;
++
++ /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
++ unsigned long credit_bytes;
++ unsigned long credit_usec;
++ unsigned long remaining_credit;
++ struct timer_list credit_timeout;
++
++ /* Enforce draining of the transmit queue. */
++ struct timer_list tx_queue_timeout;
++
++ /* Miscellaneous private stuff. */
++ struct list_head list; /* scheduling list */
++ atomic_t refcnt;
++ struct net_device *dev;
++ struct net_device_stats stats;
++
++ unsigned int carrier;
++
++ wait_queue_head_t waiting_to_free;
++} netif_t;
++
++/*
++ * Implement our own carrier flag: the network stack's version causes delays
++ * when the carrier is re-enabled (in particular, dev_activate() may not
++ * immediately be called, which can cause packet loss; also the etherbridge
++ * can be rather lazy in activating its port).
++ */
++#define netback_carrier_on(netif) ((netif)->carrier = 1)
++#define netback_carrier_off(netif) ((netif)->carrier = 0)
++#define netback_carrier_ok(netif) ((netif)->carrier)
++
++enum {
++ NETBK_DONT_COPY_SKB,
++ NETBK_DELAYED_COPY_SKB,
++ NETBK_ALWAYS_COPY_SKB,
++};
++
++extern int netbk_copy_skb_mode;
++
++/* Function pointers into netback accelerator plugin modules */
++struct netback_accel_hooks {
++ struct module *owner;
++ int (*probe)(struct xenbus_device *dev);
++ int (*remove)(struct xenbus_device *dev);
++};
++
++/* Structure to track the state of a netback accelerator plugin */
++struct netback_accelerator {
++ struct list_head link;
++ int id;
++ char *eth_name;
++ atomic_t use_count;
++ struct netback_accel_hooks *hooks;
++};
++
++struct backend_info {
++ struct xenbus_device *dev;
++ netif_t *netif;
++ enum xenbus_state frontend_state;
++
++ /* State relating to the netback accelerator */
++ void *netback_accel_priv;
++ /* The accelerator that this backend is currently using */
++ struct netback_accelerator *accelerator;
++};
++
++#define NETBACK_ACCEL_VERSION 0x00010001
++
++/*
++ * Connect an accelerator plugin module to netback. Returns zero on
++ * success, < 0 on error, > 0 (with highest version number supported)
++ * if version mismatch.
++ */
++extern int netback_connect_accelerator(unsigned version,
++ int id, const char *eth_name,
++ struct netback_accel_hooks *hooks);
++/* Disconnect a previously connected accelerator plugin module */
++extern void netback_disconnect_accelerator(int id, const char *eth_name);
++
++
++extern
++void netback_probe_accelerators(struct backend_info *be,
++ struct xenbus_device *dev);
++extern
++void netback_remove_accelerators(struct backend_info *be,
++ struct xenbus_device *dev);
++extern
++void netif_accel_init(void);
++
++
++#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
++#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
++
++void netif_disconnect(netif_t *netif);
++
++netif_t *netif_alloc(domid_t domid, unsigned int handle);
++int netif_map(netif_t *netif, unsigned long tx_ring_ref,
++ unsigned long rx_ring_ref, unsigned int evtchn);
++
++#define netif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define netif_put(_b) \
++ do { \
++ if ( atomic_dec_and_test(&(_b)->refcnt) ) \
++ wake_up(&(_b)->waiting_to_free); \
++ } while (0)
++
++void netif_xenbus_init(void);
++
++#define netif_schedulable(netif) \
++ (netif_running((netif)->dev) && netback_carrier_ok(netif))
++
++void netif_schedule_work(netif_t *netif);
++void netif_deschedule_work(netif_t *netif);
++
++int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
++struct net_device_stats *netif_be_get_stats(struct net_device *dev);
++irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++
++static inline int netbk_can_queue(struct net_device *dev)
+{
-+ return -1;
++ netif_t *netif = netdev_priv(dev);
++ return netif->can_queue;
+}
-+static inline int xen_unregister_device_domain_owner(struct pci_dev *dev)
++
++static inline int netbk_can_sg(struct net_device *dev)
+{
-+ return -1;
++ netif_t *netif = netdev_priv(dev);
++ return netif->features & NETIF_F_SG;
+}
- #endif
-
- #if defined(CONFIG_PCI_MSI)
-diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
-index 117f5b8..6d2a986 100644
---- a/arch/x86/pci/xen.c
-+++ b/arch/x86/pci/xen.c
-@@ -412,3 +412,76 @@ void __init xen_setup_pirqs(void)
- }
- }
- #endif
+
-+struct xen_device_domain_owner {
-+ domid_t domain;
-+ struct pci_dev *dev;
-+ struct list_head list;
-+};
++#endif /* __NETIF__BACKEND__COMMON_H__ */
+diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+new file mode 100644
+index 0000000..7e67941
+--- /dev/null
++++ b/drivers/xen/netback/interface.c
+@@ -0,0 +1,336 @@
++/******************************************************************************
++ * arch/xen/drivers/netif/backend/interface.c
++ *
++ * Network-device interface management.
++ *
++ * Copyright (c) 2004-2005, Keir Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
+
-+static DEFINE_SPINLOCK(dev_domain_list_spinlock);
-+static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list);
++#include "common.h"
++#include <linux/ethtool.h>
++#include <linux/rtnetlink.h>
+
-+static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
++/*
++ * Module parameter 'queue_length':
++ *
++ * Enables queuing in the network stack when a client has run out of receive
++ * descriptors. Although this feature can improve receive bandwidth by avoiding
++ * packet loss, it can also result in packets sitting in the 'tx_queue' for
++ * unbounded time. This is bad if those packets hold onto foreign resources.
++ * For example, consider a packet that holds onto resources belonging to the
++ * guest for which it is queued (e.g., packet received on vif1.0, destined for
++ * vif1.1 which is not activated in the guest): in this situation the guest
++ * will never be destroyed, unless vif1.1 is taken down. To avoid this, we
++ * run a timer (tx_queue_timeout) to drain the queue when the interface is
++ * blocked.
++ */
++static unsigned long netbk_queue_length = 32;
++module_param_named(queue_length, netbk_queue_length, ulong, 0);
++
++static void __netif_up(netif_t *netif)
+{
-+ struct xen_device_domain_owner *owner;
++ enable_irq(netif->irq);
++ netif_schedule_work(netif);
++}
+
-+ list_for_each_entry(owner, &dev_domain_list, list) {
-+ if (owner->dev == dev)
-+ return owner;
-+ }
-+ return NULL;
++static void __netif_down(netif_t *netif)
++{
++ disable_irq(netif->irq);
++ netif_deschedule_work(netif);
+}
+
-+int xen_find_device_domain_owner(struct pci_dev *dev)
++static int net_open(struct net_device *dev)
+{
-+ struct xen_device_domain_owner *owner;
-+ int domain = -ENODEV;
++ netif_t *netif = netdev_priv(dev);
++ if (netback_carrier_ok(netif)) {
++ __netif_up(netif);
++ netif_start_queue(dev);
++ }
++ return 0;
++}
+
-+ spin_lock(&dev_domain_list_spinlock);
-+ owner = find_device(dev);
-+ if (owner)
-+ domain = owner->domain;
-+ spin_unlock(&dev_domain_list_spinlock);
-+ return domain;
++static int net_close(struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++ if (netback_carrier_ok(netif))
++ __netif_down(netif);
++ netif_stop_queue(dev);
++ return 0;
+}
-+EXPORT_SYMBOL(xen_find_device_domain_owner);
+
-+int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
++static int netbk_change_mtu(struct net_device *dev, int mtu)
+{
-+ struct xen_device_domain_owner *owner;
++ int max = netbk_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
+
-+ owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL);
-+ if (!owner)
-+ return -ENODEV;
++ if (mtu > max)
++ return -EINVAL;
++ dev->mtu = mtu;
++ return 0;
++}
+
-+ spin_lock(&dev_domain_list_spinlock);
-+ if (find_device(dev)) {
-+ spin_unlock(&dev_domain_list_spinlock);
-+ kfree(owner);
-+ return -EEXIST;
++static int netbk_set_sg(struct net_device *dev, u32 data)
++{
++ if (data) {
++ netif_t *netif = netdev_priv(dev);
++
++ if (!(netif->features & NETIF_F_SG))
++ return -ENOSYS;
+ }
-+ owner->domain = domain;
-+ owner->dev = dev;
-+ list_add_tail(&owner->list, &dev_domain_list);
-+ spin_unlock(&dev_domain_list_spinlock);
-+ return 0;
++
++ return ethtool_op_set_sg(dev, data);
+}
-+EXPORT_SYMBOL(xen_register_device_domain_owner);
+
-+int xen_unregister_device_domain_owner(struct pci_dev *dev)
++static int netbk_set_tso(struct net_device *dev, u32 data)
+{
-+ struct xen_device_domain_owner *owner;
++ if (data) {
++ netif_t *netif = netdev_priv(dev);
+
-+ spin_lock(&dev_domain_list_spinlock);
-+ owner = find_device(dev);
-+ if (!owner) {
-+ spin_unlock(&dev_domain_list_spinlock);
-+ return -ENODEV;
++ if (!(netif->features & NETIF_F_TSO))
++ return -ENOSYS;
+ }
-+ list_del(&owner->list);
-+ spin_unlock(&dev_domain_list_spinlock);
-+ kfree(owner);
-+ return 0;
++
++ return ethtool_op_set_tso(dev, data);
+}
-+EXPORT_SYMBOL(xen_unregister_device_domain_owner);
---
-1.7.4
-
-
-From da24916fdf04d7b4a32c5b9d2c09e47775496e1d Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Mon, 8 Nov 2010 14:23:17 -0500
-Subject: [PATCH 003/244] xen: Check if the PCI device is owned by a domain different than DOMID_SELF.
-
-We check if there is a domain owner for the PCI device. In case of failure
-(meaning no domain has registered for this device) we make
-DOMID_SELF the owner.
-
-[v2: deal with rebasing on v2.6.37-1]
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
-Acked-by: Xiantao Zhang <xiantao.zhang(a)intel.com>
----
- drivers/xen/events.c | 16 +++++++++++++---
- 1 files changed, 13 insertions(+), 3 deletions(-)
-
-diff --git a/drivers/xen/events.c b/drivers/xen/events.c
-index a04da4b..96c93e7 100644
---- a/drivers/xen/events.c
-+++ b/drivers/xen/events.c
-@@ -40,6 +40,7 @@
- #include <asm/xen/pci.h>
- #include <asm/xen/hypercall.h>
- #include <asm/xen/hypervisor.h>
-+#include <asm/xen/pci.h>
-
- #include <xen/xen.h>
- #include <xen/hvm.h>
-@@ -97,6 +98,7 @@ struct irq_info
- unsigned short gsi;
- unsigned char vector;
- unsigned char flags;
-+ uint16_t domid;
- } pirq;
- } u;
- };
-@@ -158,7 +160,8 @@ static struct irq_info mk_pirq_info(unsigned short evtchn, unsigned short pirq,
- {
- return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
- .cpu = 0,
-- .u.pirq = { .pirq = pirq, .gsi = gsi, .vector = vector } };
-+ .u.pirq = { .pirq = pirq, .gsi = gsi,
-+ .vector = vector, .domid = DOMID_SELF } };
- }
-
- /*
-@@ -688,11 +691,16 @@ int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type)
- int irq = -1;
- struct physdev_map_pirq map_irq;
- int rc;
-+ domid_t domid;
- int pos;
- u32 table_offset, bir;
-
-+ domid = rc = xen_find_device_domain_owner(dev);
-+ if (rc < 0)
-+ domid = DOMID_SELF;
-+
- memset(&map_irq, 0, sizeof(map_irq));
-- map_irq.domid = DOMID_SELF;
-+ map_irq.domid = domid;
- map_irq.type = MAP_PIRQ_TYPE_MSI;
- map_irq.index = -1;
- map_irq.pirq = -1;
-@@ -727,6 +735,8 @@ int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type)
- goto out;
- }
- irq_info[irq] = mk_pirq_info(0, map_irq.pirq, 0, map_irq.index);
-+ if (domid)
-+ irq_info[irq].u.pirq.domid = domid;
-
- set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
- handle_level_irq,
-@@ -753,7 +763,7 @@ int xen_destroy_irq(int irq)
-
- if (xen_initial_domain()) {
- unmap_irq.pirq = info->u.pirq.pirq;
-- unmap_irq.domid = DOMID_SELF;
-+ unmap_irq.domid = info->u.pirq.domid;
- rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
- if (rc) {
- printk(KERN_WARNING "unmap irq failed %d\n", rc);
---
-1.7.4
-
-
-From 30fecb8166bdd163bdaab795b573cf988f60fbbe Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Mon, 8 Nov 2010 14:26:36 -0500
-Subject: [PATCH 004/244] xen: Add support to check if IRQ line is shared with other domains.
-
-We do this via the PHYSDEVOP_irq_status_query support hypervisor call.
-We will get a positive value if another domain has binded its
-PIRQ to the specified IRQ line.
-
-[v2: Deal with v2.6.37-rc1 rebase fallout]
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
----
- drivers/xen/events.c | 13 +++++++++++++
- include/xen/events.h | 3 +++
- 2 files changed, 16 insertions(+), 0 deletions(-)
-
-diff --git a/drivers/xen/events.c b/drivers/xen/events.c
-index 96c93e7..690dfad 100644
---- a/drivers/xen/events.c
-+++ b/drivers/xen/events.c
-@@ -1398,6 +1398,19 @@ void xen_poll_irq(int irq)
- xen_poll_irq_timeout(irq, 0 /* no timeout */);
- }
-
-+/* Check whether the IRQ line is shared with other guests. */
-+int xen_ignore_irq(int irq)
-+{
-+ struct irq_info *info = info_for_irq(irq);
-+ struct physdev_irq_status_query irq_status = { .irq =
-+ info->u.pirq.gsi };
-+
-+ if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
-+ return 0;
-+ return !(irq_status.flags & XENIRQSTAT_shared);
-+}
-+EXPORT_SYMBOL_GPL(xen_ignore_irq);
-+
- void xen_irq_resume(void)
- {
- unsigned int cpu, irq, evtchn;
-diff --git a/include/xen/events.h b/include/xen/events.h
-index 646dd17..553c664 100644
---- a/include/xen/events.h
-+++ b/include/xen/events.h
-@@ -89,4 +89,7 @@ int xen_vector_from_irq(unsigned pirq);
- /* Return irq from pirq */
- int xen_irq_from_pirq(unsigned pirq);
-
-+/* Determine whether to ignore this IRQ if passed to a guest. */
-+int xen_ignore_irq(int irq);
+
- #endif /* _XEN_EVENTS_H */
---
-1.7.4
-
-
-From 909e45104de4414897cefce2f6bbed07fc4de4b3 Mon Sep 17 00:00:00 2001
-From: Ian Campbell <ian.campbell(a)citrix.com>
-Date: Mon, 9 Feb 2009 12:05:50 -0800
-Subject: [PATCH 005/244] xen: implement bind_interdomain_evtchn_to_irqhandler for backend drivers
-
-Impact: new Xen-internal API
-
-Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
-Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
----
- drivers/xen/events.c | 38 ++++++++++++++++++++++++++++++++++++++
- include/xen/events.h | 6 ++++++
- 2 files changed, 44 insertions(+), 0 deletions(-)
-
-diff --git a/drivers/xen/events.c b/drivers/xen/events.c
-index 690dfad..95eea13 100644
---- a/drivers/xen/events.c
-+++ b/drivers/xen/events.c
-@@ -849,6 +849,21 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
- return irq;
- }
-
-+static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
-+ unsigned int remote_port)
++static struct ethtool_ops network_ethtool_ops =
+{
-+ struct evtchn_bind_interdomain bind_interdomain;
-+ int err;
-+
-+ bind_interdomain.remote_dom = remote_domain;
-+ bind_interdomain.remote_port = remote_port;
-+
-+ err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
-+ &bind_interdomain);
-+
-+ return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
-+}
++ .get_tx_csum = ethtool_op_get_tx_csum,
++ .set_tx_csum = ethtool_op_set_tx_csum,
++ .get_sg = ethtool_op_get_sg,
++ .set_sg = netbk_set_sg,
++ .get_tso = ethtool_op_get_tso,
++ .set_tso = netbk_set_tso,
++ .get_link = ethtool_op_get_link,
++};
+
-
- int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
- {
-@@ -944,6 +959,29 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
- }
- EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
-
-+int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
-+ unsigned int remote_port,
-+ irq_handler_t handler,
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id)
++netif_t *netif_alloc(domid_t domid, unsigned int handle)
+{
-+ int irq, retval;
++ int err = 0;
++ struct net_device *dev;
++ netif_t *netif;
++ char name[IFNAMSIZ] = {};
+
-+ irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
-+ if (irq < 0)
-+ return irq;
++ snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
++ dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
++ if (dev == NULL) {
++ DPRINTK("Could not create netif: out of memory\n");
++ return ERR_PTR(-ENOMEM);
++ }
+
-+ retval = request_irq(irq, handler, irqflags, devname, dev_id);
-+ if (retval != 0) {
-+ unbind_from_irq(irq);
-+ return retval;
-+ }
++ netif = netdev_priv(dev);
++ memset(netif, 0, sizeof(*netif));
++ netif->domid = domid;
++ netif->handle = handle;
++ atomic_set(&netif->refcnt, 1);
++ init_waitqueue_head(&netif->waiting_to_free);
++ netif->dev = dev;
+
-+ return irq;
-+}
-+EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
++ netback_carrier_off(netif);
+
- int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
- irq_handler_t handler,
- unsigned long irqflags, const char *devname, void *dev_id)
-diff --git a/include/xen/events.h b/include/xen/events.h
-index 553c664..2fe1644 100644
---- a/include/xen/events.h
-+++ b/include/xen/events.h
-@@ -23,6 +23,12 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
- unsigned long irqflags,
- const char *devname,
- void *dev_id);
-+int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
-+ unsigned int remote_port,
-+ irq_handler_t handler,
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id);
-
- /*
- * Common unbind function for all event sources. Takes IRQ to unbind from.
---
-1.7.4
-
-
-From b4f664c8de09ab8537e1cd194df29056f803062e Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Mon, 8 Nov 2010 14:46:33 -0500
-Subject: [PATCH 006/244] pci/xen: Make xen_[find|register|unregister]_domain_owner be _GPL
-
-EXPORT_SYMBOL -> EXPORT_SYMBOL_GPL.
-
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
----
- arch/x86/pci/xen.c | 6 +++---
- 1 files changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
-index 6d2a986..0fa23c8 100644
---- a/arch/x86/pci/xen.c
-+++ b/arch/x86/pci/xen.c
-@@ -445,7 +445,7 @@ int xen_find_device_domain_owner(struct pci_dev *dev)
- spin_unlock(&dev_domain_list_spinlock);
- return domain;
- }
--EXPORT_SYMBOL(xen_find_device_domain_owner);
-+EXPORT_SYMBOL_GPL(xen_find_device_domain_owner);
-
- int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
- {
-@@ -467,7 +467,7 @@ int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
- spin_unlock(&dev_domain_list_spinlock);
- return 0;
- }
--EXPORT_SYMBOL(xen_register_device_domain_owner);
-+EXPORT_SYMBOL_GPL(xen_register_device_domain_owner);
-
- int xen_unregister_device_domain_owner(struct pci_dev *dev)
- {
-@@ -484,4 +484,4 @@ int xen_unregister_device_domain_owner(struct pci_dev *dev)
- kfree(owner);
- return 0;
- }
--EXPORT_SYMBOL(xen_unregister_device_domain_owner);
-+EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
---
-1.7.4
-
-
-From 443b2aafbdb509f218fcb8f4665f063e3a5e1a92 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Tue, 13 Oct 2009 17:22:20 -0400
-Subject: [PATCH 007/244] xen-pciback: Initial copy from linux-2.6.18.hg off pciback driver.
-
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
----
- drivers/xen/pciback/Makefile | 17 +
- drivers/xen/pciback/conf_space.c | 435 ++++++++
- drivers/xen/pciback/conf_space.h | 126 +++
- drivers/xen/pciback/conf_space_capability.c | 69 ++
- drivers/xen/pciback/conf_space_capability.h | 23 +
- drivers/xen/pciback/conf_space_capability_msi.c | 79 ++
- drivers/xen/pciback/conf_space_capability_pm.c | 126 +++
- drivers/xen/pciback/conf_space_capability_vpd.c | 40 +
- drivers/xen/pciback/conf_space_header.c | 317 ++++++
- drivers/xen/pciback/conf_space_quirks.c | 138 +++
- drivers/xen/pciback/conf_space_quirks.h | 35 +
- drivers/xen/pciback/controller.c | 443 ++++++++
- drivers/xen/pciback/passthrough.c | 176 +++
- drivers/xen/pciback/pci_stub.c | 1316 +++++++++++++++++++++++
- drivers/xen/pciback/pciback.h | 126 +++
- drivers/xen/pciback/pciback_ops.c | 134 +++
- drivers/xen/pciback/slot.c | 187 ++++
- drivers/xen/pciback/vpci.c | 242 +++++
- drivers/xen/pciback/xenbus.c | 710 ++++++++++++
- 19 files changed, 4739 insertions(+), 0 deletions(-)
- create mode 100644 drivers/xen/pciback/Makefile
- create mode 100644 drivers/xen/pciback/conf_space.c
- create mode 100644 drivers/xen/pciback/conf_space.h
- create mode 100644 drivers/xen/pciback/conf_space_capability.c
- create mode 100644 drivers/xen/pciback/conf_space_capability.h
- create mode 100644 drivers/xen/pciback/conf_space_capability_msi.c
- create mode 100644 drivers/xen/pciback/conf_space_capability_pm.c
- create mode 100644 drivers/xen/pciback/conf_space_capability_vpd.c
- create mode 100644 drivers/xen/pciback/conf_space_header.c
- create mode 100644 drivers/xen/pciback/conf_space_quirks.c
- create mode 100644 drivers/xen/pciback/conf_space_quirks.h
- create mode 100644 drivers/xen/pciback/controller.c
- create mode 100644 drivers/xen/pciback/passthrough.c
- create mode 100644 drivers/xen/pciback/pci_stub.c
- create mode 100644 drivers/xen/pciback/pciback.h
- create mode 100644 drivers/xen/pciback/pciback_ops.c
- create mode 100644 drivers/xen/pciback/slot.c
- create mode 100644 drivers/xen/pciback/vpci.c
- create mode 100644 drivers/xen/pciback/xenbus.c
-
-diff --git a/drivers/xen/pciback/Makefile b/drivers/xen/pciback/Makefile
-new file mode 100644
-index 0000000..106dae7
---- /dev/null
-+++ b/drivers/xen/pciback/Makefile
-@@ -0,0 +1,17 @@
-+obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback.o
++ netif->credit_bytes = netif->remaining_credit = ~0UL;
++ netif->credit_usec = 0UL;
++ init_timer(&netif->credit_timeout);
++ /* Initialize 'expires' now: it's used to track the credit window. */
++ netif->credit_timeout.expires = jiffies;
+
-+pciback-y := pci_stub.o pciback_ops.o xenbus.o
-+pciback-y += conf_space.o conf_space_header.o \
-+ conf_space_capability.o \
-+ conf_space_capability_vpd.o \
-+ conf_space_capability_pm.o \
-+ conf_space_quirks.o
-+pciback-$(CONFIG_PCI_MSI) += conf_space_capability_msi.o
-+pciback-$(CONFIG_XEN_PCIDEV_BACKEND_VPCI) += vpci.o
-+pciback-$(CONFIG_XEN_PCIDEV_BACKEND_SLOT) += slot.o
-+pciback-$(CONFIG_XEN_PCIDEV_BACKEND_PASS) += passthrough.o
-+pciback-$(CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER) += controller.o
++ init_timer(&netif->tx_queue_timeout);
+
-+ifeq ($(CONFIG_XEN_PCIDEV_BE_DEBUG),y)
-+EXTRA_CFLAGS += -DDEBUG
-+endif
-diff --git a/drivers/xen/pciback/conf_space.c b/drivers/xen/pciback/conf_space.c
-new file mode 100644
-index 0000000..0c76db1
---- /dev/null
-+++ b/drivers/xen/pciback/conf_space.c
-@@ -0,0 +1,435 @@
-+/*
-+ * PCI Backend - Functions for creating a virtual configuration space for
-+ * exported PCI Devices.
-+ * It's dangerous to allow PCI Driver Domains to change their
-+ * device's resources (memory, i/o ports, interrupts). We need to
-+ * restrict changes to certain PCI Configuration registers:
-+ * BARs, INTERRUPT_PIN, most registers in the header...
-+ *
-+ * Author: Ryan Wilson <hap9(a)epoch.ncsc.mil>
-+ */
++ dev->hard_start_xmit = netif_be_start_xmit;
++ dev->get_stats = netif_be_get_stats;
++ dev->open = net_open;
++ dev->stop = net_close;
++ dev->change_mtu = netbk_change_mtu;
++ dev->features = NETIF_F_IP_CSUM;
+
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include "pciback.h"
-+#include "conf_space.h"
-+#include "conf_space_quirks.h"
++ SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
+
-+static int permissive;
-+module_param(permissive, bool, 0644);
++ dev->tx_queue_len = netbk_queue_length;
+
-+#define DEFINE_PCI_CONFIG(op,size,type) \
-+int pciback_##op##_config_##size \
-+(struct pci_dev *dev, int offset, type value, void *data) \
-+{ \
-+ return pci_##op##_config_##size (dev, offset, value); \
-+}
++ /*
++ * Initialise a dummy MAC address. We choose the numerically
++ * largest non-broadcast address to prevent the address getting
++ * stolen by an Ethernet bridge for STP purposes.
++ * (FE:FF:FF:FF:FF:FF)
++ */
++ memset(dev->dev_addr, 0xFF, ETH_ALEN);
++ dev->dev_addr[0] &= ~0x01;
+
-+DEFINE_PCI_CONFIG(read, byte, u8 *)
-+DEFINE_PCI_CONFIG(read, word, u16 *)
-+DEFINE_PCI_CONFIG(read, dword, u32 *)
++ rtnl_lock();
++ err = register_netdevice(dev);
++ rtnl_unlock();
++ if (err) {
++ DPRINTK("Could not register new net device %s: err=%d\n",
++ dev->name, err);
++ free_netdev(dev);
++ return ERR_PTR(err);
++ }
+
-+DEFINE_PCI_CONFIG(write, byte, u8)
-+DEFINE_PCI_CONFIG(write, word, u16)
-+DEFINE_PCI_CONFIG(write, dword, u32)
++ DPRINTK("Successfully created netif\n");
++ return netif;
++}
+
-+static int conf_space_read(struct pci_dev *dev,
-+ const struct config_field_entry *entry,
-+ int offset, u32 *value)
++static int map_frontend_pages(
++ netif_t *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
+{
-+ int ret = 0;
-+ const struct config_field *field = entry->field;
++ struct gnttab_map_grant_ref op;
+
-+ *value = 0;
++ gnttab_set_map_op(&op, (unsigned long)netif->tx_comms_area->addr,
++ GNTMAP_host_map, tx_ring_ref, netif->domid);
+
-+ switch (field->size) {
-+ case 1:
-+ if (field->u.b.read)
-+ ret = field->u.b.read(dev, offset, (u8 *) value,
-+ entry->data);
-+ break;
-+ case 2:
-+ if (field->u.w.read)
-+ ret = field->u.w.read(dev, offset, (u16 *) value,
-+ entry->data);
-+ break;
-+ case 4:
-+ if (field->u.dw.read)
-+ ret = field->u.dw.read(dev, offset, value, entry->data);
-+ break;
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status) {
++ DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
++ return op.status;
+ }
-+ return ret;
-+}
+
-+static int conf_space_write(struct pci_dev *dev,
-+ const struct config_field_entry *entry,
-+ int offset, u32 value)
-+{
-+ int ret = 0;
-+ const struct config_field *field = entry->field;
++ netif->tx_shmem_ref = tx_ring_ref;
++ netif->tx_shmem_handle = op.handle;
+
-+ switch (field->size) {
-+ case 1:
-+ if (field->u.b.write)
-+ ret = field->u.b.write(dev, offset, (u8) value,
-+ entry->data);
-+ break;
-+ case 2:
-+ if (field->u.w.write)
-+ ret = field->u.w.write(dev, offset, (u16) value,
-+ entry->data);
-+ break;
-+ case 4:
-+ if (field->u.dw.write)
-+ ret = field->u.dw.write(dev, offset, value,
-+ entry->data);
-+ break;
++ gnttab_set_map_op(&op, (unsigned long)netif->rx_comms_area->addr,
++ GNTMAP_host_map, rx_ring_ref, netif->domid);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status) {
++ DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
++ return op.status;
+ }
-+ return ret;
-+}
+
-+static inline u32 get_mask(int size)
-+{
-+ if (size == 1)
-+ return 0xff;
-+ else if (size == 2)
-+ return 0xffff;
-+ else
-+ return 0xffffffff;
-+}
++ netif->rx_shmem_ref = rx_ring_ref;
++ netif->rx_shmem_handle = op.handle;
+
-+static inline int valid_request(int offset, int size)
-+{
-+ /* Validate request (no un-aligned requests) */
-+ if ((size == 1 || size == 2 || size == 4) && (offset % size) == 0)
-+ return 1;
+ return 0;
+}
+
-+static inline u32 merge_value(u32 val, u32 new_val, u32 new_val_mask,
-+ int offset)
++static void unmap_frontend_pages(netif_t *netif)
+{
-+ if (offset >= 0) {
-+ new_val_mask <<= (offset * 8);
-+ new_val <<= (offset * 8);
-+ } else {
-+ new_val_mask >>= (offset * -8);
-+ new_val >>= (offset * -8);
-+ }
-+ val = (val & ~new_val_mask) | (new_val & new_val_mask);
++ struct gnttab_unmap_grant_ref op;
+
-+ return val;
-+}
++ gnttab_set_unmap_op(&op, (unsigned long)netif->tx_comms_area->addr,
++ GNTMAP_host_map, netif->tx_shmem_handle);
+
-+static int pcibios_err_to_errno(int err)
-+{
-+ switch (err) {
-+ case PCIBIOS_SUCCESSFUL:
-+ return XEN_PCI_ERR_success;
-+ case PCIBIOS_DEVICE_NOT_FOUND:
-+ return XEN_PCI_ERR_dev_not_found;
-+ case PCIBIOS_BAD_REGISTER_NUMBER:
-+ return XEN_PCI_ERR_invalid_offset;
-+ case PCIBIOS_FUNC_NOT_SUPPORTED:
-+ return XEN_PCI_ERR_not_implemented;
-+ case PCIBIOS_SET_FAILED:
-+ return XEN_PCI_ERR_access_denied;
-+ }
-+ return err;
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++
++ gnttab_set_unmap_op(&op, (unsigned long)netif->rx_comms_area->addr,
++ GNTMAP_host_map, netif->rx_shmem_handle);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
+}
+
-+int pciback_config_read(struct pci_dev *dev, int offset, int size,
-+ u32 * ret_val)
++int netif_map(netif_t *netif, unsigned long tx_ring_ref,
++ unsigned long rx_ring_ref, unsigned int evtchn)
+{
-+ int err = 0;
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ const struct config_field_entry *cfg_entry;
-+ const struct config_field *field;
-+ int req_start, req_end, field_start, field_end;
-+ /* if read fails for any reason, return 0 (as if device didn't respond) */
-+ u32 value = 0, tmp_val;
++ int err = -ENOMEM;
++ netif_tx_sring_t *txs;
++ netif_rx_sring_t *rxs;
+
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x\n",
-+ pci_name(dev), size, offset);
++ /* Already connected through? */
++ if (netif->irq)
++ return 0;
+
-+ if (!valid_request(offset, size)) {
-+ err = XEN_PCI_ERR_invalid_offset;
-+ goto out;
-+ }
++ netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
++ if (netif->tx_comms_area == NULL)
++ return -ENOMEM;
++ netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
++ if (netif->rx_comms_area == NULL)
++ goto err_rx;
+
-+ /* Get the real value first, then modify as appropriate */
-+ switch (size) {
-+ case 1:
-+ err = pci_read_config_byte(dev, offset, (u8 *) & value);
-+ break;
-+ case 2:
-+ err = pci_read_config_word(dev, offset, (u16 *) & value);
-+ break;
-+ case 4:
-+ err = pci_read_config_dword(dev, offset, &value);
-+ break;
-+ }
++ err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
++ if (err)
++ goto err_map;
+
-+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-+ field = cfg_entry->field;
++ err = bind_interdomain_evtchn_to_irqhandler(
++ netif->domid, evtchn, netif_be_int, 0,
++ netif->dev->name, netif);
++ if (err < 0)
++ goto err_hypervisor;
++ netif->irq = err;
++ disable_irq(netif->irq);
+
-+ req_start = offset;
-+ req_end = offset + size;
-+ field_start = OFFSET(cfg_entry);
-+ field_end = OFFSET(cfg_entry) + field->size;
++ txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
++ BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
+
-+ if ((req_start >= field_start && req_start < field_end)
-+ || (req_end > field_start && req_end <= field_end)) {
-+ err = conf_space_read(dev, cfg_entry, field_start,
-+ &tmp_val);
-+ if (err)
-+ goto out;
++ rxs = (netif_rx_sring_t *)
++ ((char *)netif->rx_comms_area->addr);
++ BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
+
-+ value = merge_value(value, tmp_val,
-+ get_mask(field->size),
-+ field_start - req_start);
-+ }
-+ }
++ netif->rx_req_cons_peek = 0;
+
-+ out:
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x = %x\n",
-+ pci_name(dev), size, offset, value);
++ netif_get(netif);
+
-+ *ret_val = value;
-+ return pcibios_err_to_errno(err);
++ rtnl_lock();
++ netback_carrier_on(netif);
++ if (netif_running(netif->dev))
++ __netif_up(netif);
++ rtnl_unlock();
++
++ return 0;
++err_hypervisor:
++ unmap_frontend_pages(netif);
++err_map:
++ free_vm_area(netif->rx_comms_area);
++err_rx:
++ free_vm_area(netif->tx_comms_area);
++ return err;
+}
+
-+int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value)
++void netif_disconnect(netif_t *netif)
+{
-+ int err = 0, handled = 0;
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ const struct config_field_entry *cfg_entry;
-+ const struct config_field *field;
-+ u32 tmp_val;
-+ int req_start, req_end, field_start, field_end;
++ if (netback_carrier_ok(netif)) {
++ rtnl_lock();
++ netback_carrier_off(netif);
++ netif_carrier_off(netif->dev); /* discard queued packets */
++ if (netif_running(netif->dev))
++ __netif_down(netif);
++ rtnl_unlock();
++ netif_put(netif);
++ }
+
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG
-+ "pciback: %s: write request %d bytes at 0x%x = %x\n",
-+ pci_name(dev), size, offset, value);
++ atomic_dec(&netif->refcnt);
++ wait_event(netif->waiting_to_free, atomic_read(&netif->refcnt) == 0);
+
-+ if (!valid_request(offset, size))
-+ return XEN_PCI_ERR_invalid_offset;
++ del_timer_sync(&netif->credit_timeout);
++ del_timer_sync(&netif->tx_queue_timeout);
+
-+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-+ field = cfg_entry->field;
++ if (netif->irq)
++ unbind_from_irqhandler(netif->irq, netif);
+
-+ req_start = offset;
-+ req_end = offset + size;
-+ field_start = OFFSET(cfg_entry);
-+ field_end = OFFSET(cfg_entry) + field->size;
++ unregister_netdev(netif->dev);
+
-+ if ((req_start >= field_start && req_start < field_end)
-+ || (req_end > field_start && req_end <= field_end)) {
-+ tmp_val = 0;
++ if (netif->tx.sring) {
++ unmap_frontend_pages(netif);
++ free_vm_area(netif->tx_comms_area);
++ free_vm_area(netif->rx_comms_area);
++ }
+
-+ err = pciback_config_read(dev, field_start,
-+ field->size, &tmp_val);
-+ if (err)
-+ break;
++ free_netdev(netif->dev);
++}
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+new file mode 100644
+index 0000000..db629d4
+--- /dev/null
++++ b/drivers/xen/netback/netback.c
+@@ -0,0 +1,1637 @@
++/******************************************************************************
++ * drivers/xen/netback/netback.c
++ *
++ * Back-end of the driver for virtual network devices. This portion of the
++ * driver exports a 'unified' network-device interface that can be accessed
++ * by any operating system that implements a compatible front end. A
++ * reference front-end implementation can be found in:
++ * drivers/xen/netfront/netfront.c
++ *
++ * Copyright (c) 2002-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
+
-+ tmp_val = merge_value(tmp_val, value, get_mask(size),
-+ req_start - field_start);
++#include "common.h"
++#include <xen/balloon.h>
++#include <xen/interface/memory.h>
+
-+ err = conf_space_write(dev, cfg_entry, field_start,
-+ tmp_val);
++/*define NETBE_DEBUG_INTERRUPT*/
+
-+ /* handled is set true here, but not every byte
-+ * may have been written! Properly detecting if
-+ * every byte is handled is unnecessary as the
-+ * flag is used to detect devices that need
-+ * special helpers to work correctly.
-+ */
-+ handled = 1;
-+ }
-+ }
++struct netbk_rx_meta {
++ skb_frag_t frag;
++ int id;
++ u8 copy:1;
++};
+
-+ if (!handled && !err) {
-+ /* By default, anything not specificially handled above is
-+ * read-only. The permissive flag changes this behavior so
-+ * that anything not specifically handled above is writable.
-+ * This means that some fields may still be read-only because
-+ * they have entries in the config_field list that intercept
-+ * the write and do nothing. */
-+ if (dev_data->permissive || permissive) {
-+ switch (size) {
-+ case 1:
-+ err = pci_write_config_byte(dev, offset,
-+ (u8) value);
-+ break;
-+ case 2:
-+ err = pci_write_config_word(dev, offset,
-+ (u16) value);
-+ break;
-+ case 4:
-+ err = pci_write_config_dword(dev, offset,
-+ (u32) value);
-+ break;
-+ }
-+ } else if (!dev_data->warned_on_write) {
-+ dev_data->warned_on_write = 1;
-+ dev_warn(&dev->dev, "Driver tried to write to a "
-+ "read-only configuration space field at offset "
-+ "0x%x, size %d. This may be harmless, but if "
-+ "you have problems with your device:\n"
-+ "1) see permissive attribute in sysfs\n"
-+ "2) report problems to the xen-devel "
-+ "mailing list along with details of your "
-+ "device obtained from lspci.\n", offset, size);
-+ }
-+ }
++struct netbk_tx_pending_inuse {
++ struct list_head list;
++ unsigned long alloc_time;
++};
+
-+ return pcibios_err_to_errno(err);
-+}
++static void netif_idx_release(u16 pending_idx);
++static void make_tx_response(netif_t *netif,
++ netif_tx_request_t *txp,
++ s8 st);
++static netif_rx_response_t *make_rx_response(netif_t *netif,
++ u16 id,
++ s8 st,
++ u16 offset,
++ u16 size,
++ u16 flags);
+
-+void pciback_config_free_dyn_fields(struct pci_dev *dev)
-+{
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ struct config_field_entry *cfg_entry, *t;
-+ const struct config_field *field;
++static void net_tx_action(unsigned long unused);
++static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
+
-+ dev_dbg(&dev->dev,
-+ "free-ing dynamically allocated virtual configuration space fields\n");
-+ if (!dev_data)
-+ return;
++static void net_rx_action(unsigned long unused);
++static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
+
-+ list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
-+ field = cfg_entry->field;
++static struct timer_list net_timer;
++static struct timer_list netbk_tx_pending_timer;
+
-+ if (field->clean) {
-+ field->clean((struct config_field *)field);
++#define MAX_PENDING_REQS 256
+
-+ if (cfg_entry->data)
-+ kfree(cfg_entry->data);
++static struct sk_buff_head rx_queue;
+
-+ list_del(&cfg_entry->list);
-+ kfree(cfg_entry);
-+ }
++static struct page **mmap_pages;
++static inline unsigned long idx_to_pfn(unsigned int idx)
++{
++ return page_to_pfn(mmap_pages[idx]);
++}
+
-+ }
++static inline unsigned long idx_to_kaddr(unsigned int idx)
++{
++ return (unsigned long)pfn_to_kaddr(idx_to_pfn(idx));
+}
+
-+void pciback_config_reset_dev(struct pci_dev *dev)
++/* extra field used in struct page */
++static inline void netif_set_page_index(struct page *pg, unsigned int index)
+{
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ const struct config_field_entry *cfg_entry;
-+ const struct config_field *field;
++ *(unsigned long *)&pg->mapping = index;
++}
+
-+ dev_dbg(&dev->dev, "resetting virtual configuration space\n");
-+ if (!dev_data)
-+ return;
++static inline int netif_page_index(struct page *pg)
++{
++ unsigned long idx = (unsigned long)pg->mapping;
+
-+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-+ field = cfg_entry->field;
++ if (!PageForeign(pg))
++ return -1;
+
-+ if (field->reset)
-+ field->reset(dev, OFFSET(cfg_entry), cfg_entry->data);
-+ }
++ if ((idx >= MAX_PENDING_REQS) || (mmap_pages[idx] != pg))
++ return -1;
++
++ return idx;
+}
+
-+void pciback_config_free_dev(struct pci_dev *dev)
-+{
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ struct config_field_entry *cfg_entry, *t;
-+ const struct config_field *field;
++#define PKT_PROT_LEN 64
+
-+ dev_dbg(&dev->dev, "free-ing virtual configuration space fields\n");
-+ if (!dev_data)
-+ return;
++static struct pending_tx_info {
++ netif_tx_request_t req;
++ netif_t *netif;
++} pending_tx_info[MAX_PENDING_REQS];
++static u16 pending_ring[MAX_PENDING_REQS];
++typedef unsigned int PEND_RING_IDX;
++#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
++static PEND_RING_IDX pending_prod, pending_cons;
++#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
+
-+ list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
-+ list_del(&cfg_entry->list);
++/* Freed TX SKBs get batched on this ring before return to pending_ring. */
++static u16 dealloc_ring[MAX_PENDING_REQS];
++static PEND_RING_IDX dealloc_prod, dealloc_cons;
+
-+ field = cfg_entry->field;
++/* Doubly-linked list of in-use pending entries. */
++static struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS];
++static LIST_HEAD(pending_inuse_head);
+
-+ if (field->release)
-+ field->release(dev, OFFSET(cfg_entry), cfg_entry->data);
++static struct sk_buff_head tx_queue;
+
-+ kfree(cfg_entry);
-+ }
-+}
++static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
++static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
++static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
+
-+int pciback_config_add_field_offset(struct pci_dev *dev,
-+ const struct config_field *field,
-+ unsigned int base_offset)
-+{
-+ int err = 0;
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ struct config_field_entry *cfg_entry;
-+ void *tmp;
++static struct list_head net_schedule_list;
++static spinlock_t net_schedule_list_lock;
+
-+ cfg_entry = kmalloc(sizeof(*cfg_entry), GFP_KERNEL);
-+ if (!cfg_entry) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
++#define MAX_MFN_ALLOC 64
++static unsigned long mfn_list[MAX_MFN_ALLOC];
++static unsigned int alloc_index = 0;
+
-+ cfg_entry->data = NULL;
-+ cfg_entry->field = field;
-+ cfg_entry->base_offset = base_offset;
++/* Setting this allows the safe use of this driver without netloop. */
++static int MODPARM_copy_skb = 1;
++module_param_named(copy_skb, MODPARM_copy_skb, bool, 0);
++MODULE_PARM_DESC(copy_skb, "Copy data received from netfront without netloop");
+
-+ /* silently ignore duplicate fields */
-+ err = pciback_field_is_dup(dev,OFFSET(cfg_entry));
-+ if (err)
-+ goto out;
++int netbk_copy_skb_mode;
+
-+ if (field->init) {
-+ tmp = field->init(dev, OFFSET(cfg_entry));
++static inline unsigned long alloc_mfn(void)
++{
++ BUG_ON(alloc_index == 0);
++ return mfn_list[--alloc_index];
++}
+
-+ if (IS_ERR(tmp)) {
-+ err = PTR_ERR(tmp);
-+ goto out;
-+ }
++static int check_mfn(int nr)
++{
++ struct xen_memory_reservation reservation = {
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++ int rc;
+
-+ cfg_entry->data = tmp;
-+ }
++ if (likely(alloc_index >= nr))
++ return 0;
+
-+ dev_dbg(&dev->dev, "added config field at offset 0x%02x\n",
-+ OFFSET(cfg_entry));
-+ list_add_tail(&cfg_entry->list, &dev_data->config_fields);
++ set_xen_guest_handle(reservation.extent_start, mfn_list + alloc_index);
++ reservation.nr_extents = MAX_MFN_ALLOC - alloc_index;
++ rc = HYPERVISOR_memory_op(XENMEM_increase_reservation, &reservation);
++ if (likely(rc > 0))
++ alloc_index += rc;
+
-+ out:
-+ if (err)
-+ kfree(cfg_entry);
++ return alloc_index >= nr ? 0 : -ENOMEM;
++}
+
-+ return err;
++static inline void maybe_schedule_tx_action(void)
++{
++ smp_mb();
++ if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
++ !list_empty(&net_schedule_list))
++ tasklet_schedule(&net_tx_tasklet);
+}
+
-+/* This sets up the device's virtual configuration space to keep track of
-+ * certain registers (like the base address registers (BARs) so that we can
-+ * keep the client from manipulating them directly.
-+ */
-+int pciback_config_init_dev(struct pci_dev *dev)
++static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
+{
-+ int err = 0;
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct skb_shared_info *ninfo;
++ struct sk_buff *nskb;
++ unsigned long offset;
++ int ret;
++ int len;
++ int headlen;
+
-+ dev_dbg(&dev->dev, "initializing virtual configuration space\n");
++ BUG_ON(skb_shinfo(skb)->frag_list != NULL);
+
-+ INIT_LIST_HEAD(&dev_data->config_fields);
++ nskb = alloc_skb(SKB_MAX_HEAD(0), GFP_ATOMIC | __GFP_NOWARN);
++ if (unlikely(!nskb))
++ goto err;
+
-+ err = pciback_config_header_add_fields(dev);
-+ if (err)
-+ goto out;
++ skb_reserve(nskb, 16 + NET_IP_ALIGN);
++ headlen = nskb->end - nskb->data;
++ if (headlen > skb_headlen(skb))
++ headlen = skb_headlen(skb);
++ ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen);
++ BUG_ON(ret);
+
-+ err = pciback_config_capability_add_fields(dev);
-+ if (err)
-+ goto out;
++ ninfo = skb_shinfo(nskb);
++ ninfo->gso_size = skb_shinfo(skb)->gso_size;
++ ninfo->gso_type = skb_shinfo(skb)->gso_type;
+
-+ err = pciback_config_quirks_init(dev);
++ offset = headlen;
++ len = skb->len - headlen;
+
-+ out:
-+ return err;
-+}
++ nskb->len = skb->len;
++ nskb->data_len = len;
++ nskb->truesize += len;
+
-+int pciback_config_init(void)
-+{
-+ return pciback_config_capability_init();
-+}
-diff --git a/drivers/xen/pciback/conf_space.h b/drivers/xen/pciback/conf_space.h
-new file mode 100644
-index 0000000..fe746ef
---- /dev/null
-+++ b/drivers/xen/pciback/conf_space.h
-@@ -0,0 +1,126 @@
-+/*
-+ * PCI Backend - Common data structures for overriding the configuration space
-+ *
-+ * Author: Ryan Wilson <hap9(a)epoch.ncsc.mil>
-+ */
++ while (len) {
++ struct page *page;
++ int copy;
++ int zero;
+
-+#ifndef __XEN_PCIBACK_CONF_SPACE_H__
-+#define __XEN_PCIBACK_CONF_SPACE_H__
++ if (unlikely(ninfo->nr_frags >= MAX_SKB_FRAGS)) {
++ dump_stack();
++ goto err_free;
++ }
+
-+#include <linux/list.h>
-+#include <linux/err.h>
++ copy = len >= PAGE_SIZE ? PAGE_SIZE : len;
++ zero = len >= PAGE_SIZE ? 0 : __GFP_ZERO;
+
-+/* conf_field_init can return an errno in a ptr with ERR_PTR() */
-+typedef void *(*conf_field_init) (struct pci_dev * dev, int offset);
-+typedef void (*conf_field_reset) (struct pci_dev * dev, int offset, void *data);
-+typedef void (*conf_field_free) (struct pci_dev * dev, int offset, void *data);
++ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | zero);
++ if (unlikely(!page))
++ goto err_free;
+
-+typedef int (*conf_dword_write) (struct pci_dev * dev, int offset, u32 value,
-+ void *data);
-+typedef int (*conf_word_write) (struct pci_dev * dev, int offset, u16 value,
-+ void *data);
-+typedef int (*conf_byte_write) (struct pci_dev * dev, int offset, u8 value,
-+ void *data);
-+typedef int (*conf_dword_read) (struct pci_dev * dev, int offset, u32 * value,
-+ void *data);
-+typedef int (*conf_word_read) (struct pci_dev * dev, int offset, u16 * value,
-+ void *data);
-+typedef int (*conf_byte_read) (struct pci_dev * dev, int offset, u8 * value,
-+ void *data);
++ ret = skb_copy_bits(skb, offset, page_address(page), copy);
++ BUG_ON(ret);
+
-+/* These are the fields within the configuration space which we
-+ * are interested in intercepting reads/writes to and changing their
-+ * values.
-+ */
-+struct config_field {
-+ unsigned int offset;
-+ unsigned int size;
-+ unsigned int mask;
-+ conf_field_init init;
-+ conf_field_reset reset;
-+ conf_field_free release;
-+ void (*clean) (struct config_field * field);
-+ union {
-+ struct {
-+ conf_dword_write write;
-+ conf_dword_read read;
-+ } dw;
-+ struct {
-+ conf_word_write write;
-+ conf_word_read read;
-+ } w;
-+ struct {
-+ conf_byte_write write;
-+ conf_byte_read read;
-+ } b;
-+ } u;
-+ struct list_head list;
-+};
++ ninfo->frags[ninfo->nr_frags].page = page;
++ ninfo->frags[ninfo->nr_frags].page_offset = 0;
++ ninfo->frags[ninfo->nr_frags].size = copy;
++ ninfo->nr_frags++;
+
-+struct config_field_entry {
-+ struct list_head list;
-+ const struct config_field *field;
-+ unsigned int base_offset;
-+ void *data;
-+};
++ offset += copy;
++ len -= copy;
++ }
+
-+#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
++ offset = nskb->data - skb->data;
+
-+/* Add fields to a device - the add_fields macro expects to get a pointer to
-+ * the first entry in an array (of which the ending is marked by size==0)
-+ */
-+int pciback_config_add_field_offset(struct pci_dev *dev,
-+ const struct config_field *field,
-+ unsigned int offset);
++ nskb->h.raw = skb->h.raw + offset;
++ nskb->nh.raw = skb->nh.raw + offset;
++ nskb->mac.raw = skb->mac.raw + offset;
+
-+static inline int pciback_config_add_field(struct pci_dev *dev,
-+ const struct config_field *field)
-+{
-+ return pciback_config_add_field_offset(dev, field, 0);
-+}
++ return nskb;
+
-+static inline int pciback_config_add_fields(struct pci_dev *dev,
-+ const struct config_field *field)
-+{
-+ int i, err = 0;
-+ for (i = 0; field[i].size != 0; i++) {
-+ err = pciback_config_add_field(dev, &field[i]);
-+ if (err)
-+ break;
-+ }
-+ return err;
++ err_free:
++ kfree_skb(nskb);
++ err:
++ return NULL;
+}
+
-+static inline int pciback_config_add_fields_offset(struct pci_dev *dev,
-+ const struct config_field *field,
-+ unsigned int offset)
++static inline int netbk_max_required_rx_slots(netif_t *netif)
+{
-+ int i, err = 0;
-+ for (i = 0; field[i].size != 0; i++) {
-+ err = pciback_config_add_field_offset(dev, &field[i], offset);
-+ if (err)
-+ break;
-+ }
-+ return err;
++ if (netif->features & (NETIF_F_SG|NETIF_F_TSO))
++ return MAX_SKB_FRAGS + 2; /* header + extra_info + frags */
++ return 1; /* all in one */
+}
+
-+/* Read/Write the real configuration space */
-+int pciback_read_config_byte(struct pci_dev *dev, int offset, u8 * value,
-+ void *data);
-+int pciback_read_config_word(struct pci_dev *dev, int offset, u16 * value,
-+ void *data);
-+int pciback_read_config_dword(struct pci_dev *dev, int offset, u32 * value,
-+ void *data);
-+int pciback_write_config_byte(struct pci_dev *dev, int offset, u8 value,
-+ void *data);
-+int pciback_write_config_word(struct pci_dev *dev, int offset, u16 value,
-+ void *data);
-+int pciback_write_config_dword(struct pci_dev *dev, int offset, u32 value,
-+ void *data);
-+
-+int pciback_config_capability_init(void);
-+
-+int pciback_config_header_add_fields(struct pci_dev *dev);
-+int pciback_config_capability_add_fields(struct pci_dev *dev);
-+
-+#endif /* __XEN_PCIBACK_CONF_SPACE_H__ */
-diff --git a/drivers/xen/pciback/conf_space_capability.c b/drivers/xen/pciback/conf_space_capability.c
-new file mode 100644
-index 0000000..50efca4
---- /dev/null
-+++ b/drivers/xen/pciback/conf_space_capability.c
-@@ -0,0 +1,69 @@
-+/*
-+ * PCI Backend - Handles the virtual fields found on the capability lists
-+ * in the configuration space.
-+ *
-+ * Author: Ryan Wilson <hap9(a)epoch.ncsc.mil>
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include "pciback.h"
-+#include "conf_space.h"
-+#include "conf_space_capability.h"
-+
-+static LIST_HEAD(capabilities);
++static inline int netbk_queue_full(netif_t *netif)
++{
++ RING_IDX peek = netif->rx_req_cons_peek;
++ RING_IDX needed = netbk_max_required_rx_slots(netif);
+
-+static const struct config_field caplist_header[] = {
-+ {
-+ .offset = PCI_CAP_LIST_ID,
-+ .size = 2, /* encompass PCI_CAP_LIST_ID & PCI_CAP_LIST_NEXT */
-+ .u.w.read = pciback_read_config_word,
-+ .u.w.write = NULL,
-+ },
-+ {}
-+};
++ return ((netif->rx.sring->req_prod - peek) < needed) ||
++ ((netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek) < needed);
++}
+
-+static inline void register_capability(struct pciback_config_capability *cap)
++static void tx_queue_callback(unsigned long data)
+{
-+ list_add_tail(&cap->cap_list, &capabilities);
++ netif_t *netif = (netif_t *)data;
++ if (netif_schedulable(netif))
++ netif_wake_queue(netif->dev);
+}
+
-+int pciback_config_capability_add_fields(struct pci_dev *dev)
++int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
-+ int err = 0;
-+ struct pciback_config_capability *cap;
-+ int cap_offset;
++ netif_t *netif = netdev_priv(dev);
+
-+ list_for_each_entry(cap, &capabilities, cap_list) {
-+ cap_offset = pci_find_capability(dev, cap->capability);
-+ if (cap_offset) {
-+ dev_dbg(&dev->dev, "Found capability 0x%x at 0x%x\n",
-+ cap->capability, cap_offset);
++ BUG_ON(skb->dev != dev);
+
-+ err = pciback_config_add_fields_offset(dev,
-+ caplist_header,
-+ cap_offset);
-+ if (err)
-+ goto out;
-+ err = pciback_config_add_fields_offset(dev,
-+ cap->fields,
-+ cap_offset);
-+ if (err)
-+ goto out;
-+ }
++ /* Drop the packet if the target domain has no receive buffers. */
++ if (unlikely(!netif_schedulable(netif) || netbk_queue_full(netif)))
++ goto drop;
++
++ /*
++ * Copy the packet here if it's destined for a flipping interface
++ * but isn't flippable (e.g. extra references to data).
++ * XXX For now we also copy skbuffs whose head crosses a page
++ * boundary, because netbk_gop_skb can't handle them.
++ */
++ if (!netif->copying_receiver ||
++ ((skb_headlen(skb) + offset_in_page(skb->data)) >= PAGE_SIZE)) {
++ struct sk_buff *nskb = netbk_copy_skb(skb);
++ if ( unlikely(nskb == NULL) )
++ goto drop;
++ /* Copy only the header fields we use in this driver. */
++ nskb->dev = skb->dev;
++ nskb->ip_summed = skb->ip_summed;
++ nskb->proto_data_valid = skb->proto_data_valid;
++ dev_kfree_skb(skb);
++ skb = nskb;
+ }
+
-+ out:
-+ return err;
-+}
++ netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 +
++ !!skb_shinfo(skb)->gso_size;
++ netif_get(netif);
+
-+extern struct pciback_config_capability pciback_config_capability_vpd;
-+extern struct pciback_config_capability pciback_config_capability_pm;
++ if (netbk_can_queue(dev) && netbk_queue_full(netif)) {
++ netif->rx.sring->req_event = netif->rx_req_cons_peek +
++ netbk_max_required_rx_slots(netif);
++ mb(); /* request notification /then/ check & stop the queue */
++ if (netbk_queue_full(netif)) {
++ netif_stop_queue(dev);
++ /*
++ * Schedule 500ms timeout to restart the queue, thus
++ * ensuring that an inactive queue will be drained.
++ * Packets will be immediately be dropped until more
++ * receive buffers become available (see
++ * netbk_queue_full() check above).
++ */
++ netif->tx_queue_timeout.data = (unsigned long)netif;
++ netif->tx_queue_timeout.function = tx_queue_callback;
++ __mod_timer(&netif->tx_queue_timeout, jiffies + HZ/2);
++ }
++ }
+
-+int pciback_config_capability_init(void)
-+{
-+ register_capability(&pciback_config_capability_vpd);
-+ register_capability(&pciback_config_capability_pm);
++ skb_queue_tail(&rx_queue, skb);
++ tasklet_schedule(&net_rx_tasklet);
++
++ return 0;
+
++ drop:
++ netif->stats.tx_dropped++;
++ dev_kfree_skb(skb);
+ return 0;
+}
-diff --git a/drivers/xen/pciback/conf_space_capability.h b/drivers/xen/pciback/conf_space_capability.h
-new file mode 100644
-index 0000000..823392e
---- /dev/null
-+++ b/drivers/xen/pciback/conf_space_capability.h
-@@ -0,0 +1,23 @@
++
++#if 0
++static void xen_network_done_notify(void)
++{
++ static struct net_device *eth0_dev = NULL;
++ if (unlikely(eth0_dev == NULL))
++ eth0_dev = __dev_get_by_name("eth0");
++ netif_rx_schedule(eth0_dev);
++}
+/*
-+ * PCI Backend - Data structures for special overlays for structures on
-+ * the capability list.
-+ *
-+ * Author: Ryan Wilson <hap9(a)epoch.ncsc.mil>
++ * Add following to poll() function in NAPI driver (Tigon3 is example):
++ * if ( xen_network_done() )
++ * tg3_enable_ints(tp);
+ */
++int xen_network_done(void)
++{
++ return skb_queue_empty(&rx_queue);
++}
++#endif
+
-+#ifndef __PCIBACK_CONFIG_CAPABILITY_H__
-+#define __PCIBACK_CONFIG_CAPABILITY_H__
++struct netrx_pending_operations {
++ unsigned trans_prod, trans_cons;
++ unsigned mmu_prod, mmu_mcl;
++ unsigned mcl_prod, mcl_cons;
++ unsigned copy_prod, copy_cons;
++ unsigned meta_prod, meta_cons;
++ mmu_update_t *mmu;
++ gnttab_transfer_t *trans;
++ gnttab_copy_t *copy;
++ multicall_entry_t *mcl;
++ struct netbk_rx_meta *meta;
++};
+
-+#include <linux/pci.h>
-+#include <linux/list.h>
++/* Set up the grant operations for this fragment. If it's a flipping
++ interface, we also set up the unmap request from here. */
++static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
++ int i, struct netrx_pending_operations *npo,
++ struct page *page, unsigned long size,
++ unsigned long offset)
++{
++ mmu_update_t *mmu;
++ gnttab_transfer_t *gop;
++ gnttab_copy_t *copy_gop;
++ multicall_entry_t *mcl;
++ netif_rx_request_t *req;
++ unsigned long old_mfn, new_mfn;
++ int idx = netif_page_index(page);
+
-+struct pciback_config_capability {
-+ struct list_head cap_list;
++ old_mfn = virt_to_mfn(page_address(page));
+
-+ int capability;
++ req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);
++ if (netif->copying_receiver) {
++ /* The fragment needs to be copied rather than
++ flipped. */
++ meta->copy = 1;
++ copy_gop = npo->copy + npo->copy_prod++;
++ copy_gop->flags = GNTCOPY_dest_gref;
++ if (idx > -1) {
++ struct pending_tx_info *src_pend = &pending_tx_info[idx];
++ copy_gop->source.domid = src_pend->netif->domid;
++ copy_gop->source.u.ref = src_pend->req.gref;
++ copy_gop->flags |= GNTCOPY_source_gref;
++ } else {
++ copy_gop->source.domid = DOMID_SELF;
++ copy_gop->source.u.gmfn = old_mfn;
++ }
++ copy_gop->source.offset = offset;
++ copy_gop->dest.domid = netif->domid;
++ copy_gop->dest.offset = 0;
++ copy_gop->dest.u.ref = req->gref;
++ copy_gop->len = size;
++ } else {
++ meta->copy = 0;
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ new_mfn = alloc_mfn();
+
-+ /* If the device has the capability found above, add these fields */
-+ const struct config_field *fields;
-+};
++ /*
++ * Set the new P2M table entry before
++ * reassigning the old data page. Heed the
++ * comment in pgtable-2level.h:pte_page(). :-)
++ */
++ set_phys_to_machine(page_to_pfn(page), new_mfn);
+
-+#endif
-diff --git a/drivers/xen/pciback/conf_space_capability_msi.c b/drivers/xen/pciback/conf_space_capability_msi.c
-new file mode 100644
-index 0000000..762e396
---- /dev/null
-+++ b/drivers/xen/pciback/conf_space_capability_msi.c
-@@ -0,0 +1,79 @@
-+/*
-+ * PCI Backend -- Configuration overlay for MSI capability
-+ */
-+#include <linux/pci.h>
-+#include <linux/slab.h>
-+#include "conf_space.h"
-+#include "conf_space_capability.h"
-+#include <xen/interface/io/pciif.h>
-+#include "pciback.h"
++ mcl = npo->mcl + npo->mcl_prod++;
++ MULTI_update_va_mapping(mcl,
++ (unsigned long)page_address(page),
++ pfn_pte_ma(new_mfn, PAGE_KERNEL),
++ 0);
+
-+int pciback_enable_msi(struct pciback_device *pdev,
-+ struct pci_dev *dev, struct xen_pci_op *op)
++ mmu = npo->mmu + npo->mmu_prod++;
++ mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
++ MMU_MACHPHYS_UPDATE;
++ mmu->val = page_to_pfn(page);
++ }
++
++ gop = npo->trans + npo->trans_prod++;
++ gop->mfn = old_mfn;
++ gop->domid = netif->domid;
++ gop->ref = req->gref;
++ }
++ return req->id;
++}
++
++static void netbk_gop_skb(struct sk_buff *skb,
++ struct netrx_pending_operations *npo)
+{
-+ int otherend = pdev->xdev->otherend_id;
-+ int status;
++ netif_t *netif = netdev_priv(skb->dev);
++ int nr_frags = skb_shinfo(skb)->nr_frags;
++ int i;
++ int extra;
++ struct netbk_rx_meta *head_meta, *meta;
+
-+ status = pci_enable_msi(dev);
++ head_meta = npo->meta + npo->meta_prod++;
++ head_meta->frag.page_offset = skb_shinfo(skb)->gso_type;
++ head_meta->frag.size = skb_shinfo(skb)->gso_size;
++ extra = !!head_meta->frag.size + 1;
+
-+ if (status) {
-+ printk("error enable msi for guest %x status %x\n", otherend, status);
-+ op->value = 0;
-+ return XEN_PCI_ERR_op_failed;
++ for (i = 0; i < nr_frags; i++) {
++ meta = npo->meta + npo->meta_prod++;
++ meta->frag = skb_shinfo(skb)->frags[i];
++ meta->id = netbk_gop_frag(netif, meta, i + extra, npo,
++ meta->frag.page,
++ meta->frag.size,
++ meta->frag.page_offset);
+ }
+
-+ op->value = dev->irq;
-+ return 0;
++ /*
++ * This must occur at the end to ensure that we don't trash skb_shinfo
++ * until we're done. We know that the head doesn't cross a page
++ * boundary because such packets get copied in netif_be_start_xmit.
++ */
++ head_meta->id = netbk_gop_frag(netif, head_meta, 0, npo,
++ virt_to_page(skb->data),
++ skb_headlen(skb),
++ offset_in_page(skb->data));
++
++ netif->rx.req_cons += nr_frags + extra;
+}
+
-+int pciback_disable_msi(struct pciback_device *pdev,
-+ struct pci_dev *dev, struct xen_pci_op *op)
++static inline void netbk_free_pages(int nr_frags, struct netbk_rx_meta *meta)
+{
-+ pci_disable_msi(dev);
++ int i;
+
-+ op->value = dev->irq;
-+ return 0;
++ for (i = 0; i < nr_frags; i++)
++ put_page(meta[i].frag.page);
+}
+
-+int pciback_enable_msix(struct pciback_device *pdev,
-+ struct pci_dev *dev, struct xen_pci_op *op)
++/* This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
++ used to set up the operations on the top of
++ netrx_pending_operations, which have since been done. Check that
++ they didn't give any errors and advance over them. */
++static int netbk_check_gop(int nr_frags, domid_t domid,
++ struct netrx_pending_operations *npo)
+{
-+ int i, result;
-+ struct msix_entry *entries;
-+
-+ if (op->value > SH_INFO_MAX_VEC)
-+ return -EINVAL;
++ multicall_entry_t *mcl;
++ gnttab_transfer_t *gop;
++ gnttab_copy_t *copy_op;
++ int status = NETIF_RSP_OKAY;
++ int i;
+
-+ entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
-+ if (entries == NULL)
-+ return -ENOMEM;
++ for (i = 0; i <= nr_frags; i++) {
++ if (npo->meta[npo->meta_cons + i].copy) {
++ copy_op = npo->copy + npo->copy_cons++;
++ if (copy_op->status != GNTST_okay) {
++ DPRINTK("Bad status %d from copy to DOM%d.\n",
++ copy_op->status, domid);
++ status = NETIF_RSP_ERROR;
++ }
++ } else {
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ mcl = npo->mcl + npo->mcl_cons++;
++ /* The update_va_mapping() must not fail. */
++ BUG_ON(mcl->result != 0);
++ }
+
-+ for (i = 0; i < op->value; i++) {
-+ entries[i].entry = op->msix_entries[i].entry;
-+ entries[i].vector = op->msix_entries[i].vector;
++ gop = npo->trans + npo->trans_cons++;
++ /* Check the reassignment error code. */
++ if (gop->status != 0) {
++ DPRINTK("Bad status %d from grant transfer to DOM%u\n",
++ gop->status, domid);
++ /*
++ * Page no longer belongs to us unless
++ * GNTST_bad_page, but that should be
++ * a fatal error anyway.
++ */
++ BUG_ON(gop->status == GNTST_bad_page);
++ status = NETIF_RSP_ERROR;
++ }
++ }
+ }
+
-+ result = pci_enable_msix(dev, entries, op->value);
-+
-+ for (i = 0; i < op->value; i++) {
-+ op->msix_entries[i].entry = entries[i].entry;
-+ op->msix_entries[i].vector = entries[i].vector;
-+ }
++ return status;
++}
+
-+ kfree(entries);
++static void netbk_add_frag_responses(netif_t *netif, int status,
++ struct netbk_rx_meta *meta, int nr_frags)
++{
++ int i;
++ unsigned long offset;
+
-+ op->value = result;
++ for (i = 0; i < nr_frags; i++) {
++ int id = meta[i].id;
++ int flags = (i == nr_frags - 1) ? 0 : NETRXF_more_data;
+
-+ return result;
++ if (meta[i].copy)
++ offset = 0;
++ else
++ offset = meta[i].frag.page_offset;
++ make_rx_response(netif, id, status, offset,
++ meta[i].frag.size, flags);
++ }
+}
+
-+int pciback_disable_msix(struct pciback_device *pdev,
-+ struct pci_dev *dev, struct xen_pci_op *op)
++static void net_rx_action(unsigned long unused)
+{
++ netif_t *netif = NULL;
++ s8 status;
++ u16 id, irq, flags;
++ netif_rx_response_t *resp;
++ multicall_entry_t *mcl;
++ struct sk_buff_head rxq;
++ struct sk_buff *skb;
++ int notify_nr = 0;
++ int ret;
++ int nr_frags;
++ int count;
++ unsigned long offset;
+
-+ pci_disable_msix(dev);
++ /*
++ * Putting hundreds of bytes on the stack is considered rude.
++ * Static works because a tasklet can only be on one CPU at any time.
++ */
++ static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+3];
++ static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
++ static gnttab_transfer_t grant_trans_op[NET_RX_RING_SIZE];
++ static gnttab_copy_t grant_copy_op[NET_RX_RING_SIZE];
++ static unsigned char rx_notify[NR_IRQS];
++ static u16 notify_list[NET_RX_RING_SIZE];
++ static struct netbk_rx_meta meta[NET_RX_RING_SIZE];
+
-+ op->value = dev->irq;
-+ return 0;
-+}
++ struct netrx_pending_operations npo = {
++ mmu: rx_mmu,
++ trans: grant_trans_op,
++ copy: grant_copy_op,
++ mcl: rx_mcl,
++ meta: meta};
+
-diff --git a/drivers/xen/pciback/conf_space_capability_pm.c b/drivers/xen/pciback/conf_space_capability_pm.c
-new file mode 100644
-index 0000000..e2f99c7
---- /dev/null
-+++ b/drivers/xen/pciback/conf_space_capability_pm.c
-@@ -0,0 +1,126 @@
-+/*
-+ * PCI Backend - Configuration space overlay for power management
-+ *
-+ * Author: Ryan Wilson <hap9(a)epoch.ncsc.mil>
-+ */
++ skb_queue_head_init(&rxq);
+
-+#include <linux/pci.h>
-+#include "conf_space.h"
-+#include "conf_space_capability.h"
++ count = 0;
+
-+static int pm_caps_read(struct pci_dev *dev, int offset, u16 *value,
-+ void *data)
-+{
-+ int err;
-+ u16 real_value;
++ while ((skb = skb_dequeue(&rx_queue)) != NULL) {
++ nr_frags = skb_shinfo(skb)->nr_frags;
++ *(int *)skb->cb = nr_frags;
+
-+ err = pci_read_config_word(dev, offset, &real_value);
-+ if (err)
-+ goto out;
++ if (!xen_feature(XENFEAT_auto_translated_physmap) &&
++ !((netif_t *)netdev_priv(skb->dev))->copying_receiver &&
++ check_mfn(nr_frags + 1)) {
++ /* Memory squeeze? Back off for an arbitrary while. */
++ if ( net_ratelimit() )
++ WPRINTK("Memory squeeze in netback "
++ "driver.\n");
++ mod_timer(&net_timer, jiffies + HZ);
++ skb_queue_head(&rx_queue, skb);
++ break;
++ }
+
-+ *value = real_value & ~PCI_PM_CAP_PME_MASK;
++ netbk_gop_skb(skb, &npo);
+
-+ out:
-+ return err;
-+}
++ count += nr_frags + 1;
+
-+/* PM_OK_BITS specifies the bits that the driver domain is allowed to change.
-+ * Can't allow driver domain to enable PMEs - they're shared */
-+#define PM_OK_BITS (PCI_PM_CTRL_PME_STATUS|PCI_PM_CTRL_DATA_SEL_MASK)
++ __skb_queue_tail(&rxq, skb);
+
-+static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
-+ void *data)
-+{
-+ int err;
-+ u16 old_value;
-+ pci_power_t new_state, old_state;
++ /* Filled the batch queue? */
++ if (count + MAX_SKB_FRAGS >= NET_RX_RING_SIZE)
++ break;
++ }
+
-+ err = pci_read_config_word(dev, offset, &old_value);
-+ if (err)
-+ goto out;
++ BUG_ON(npo.meta_prod > ARRAY_SIZE(meta));
+
-+ old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
-+ new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
++ npo.mmu_mcl = npo.mcl_prod;
++ if (npo.mcl_prod) {
++ BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
++ BUG_ON(npo.mmu_prod > ARRAY_SIZE(rx_mmu));
++ mcl = npo.mcl + npo.mcl_prod++;
+
-+ new_value &= PM_OK_BITS;
-+ if ((old_value & PM_OK_BITS) != new_value) {
-+ new_value = (old_value & ~PM_OK_BITS) | new_value;
-+ err = pci_write_config_word(dev, offset, new_value);
-+ if (err)
-+ goto out;
-+ }
++ BUG_ON(mcl[-1].op != __HYPERVISOR_update_va_mapping);
++ mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
+
-+ /* Let pci core handle the power management change */
-+ dev_dbg(&dev->dev, "set power state to %x\n", new_state);
-+ err = pci_set_power_state(dev, new_state);
-+ if (err) {
-+ err = PCIBIOS_SET_FAILED;
-+ goto out;
++ mcl->op = __HYPERVISOR_mmu_update;
++ mcl->args[0] = (unsigned long)rx_mmu;
++ mcl->args[1] = npo.mmu_prod;
++ mcl->args[2] = 0;
++ mcl->args[3] = DOMID_SELF;
+ }
+
-+ /*
-+ * Device may lose PCI config info on D3->D0 transition. This
-+ * is a problem for some guests which will not reset BARs. Even
-+ * those that have a go will be foiled by our BAR-write handler
-+ * which will discard the write! Since Linux won't re-init
-+ * the config space automatically in all cases, we do it here.
-+ * Future: Should we re-initialise all first 64 bytes of config space?
-+ */
-+ if (new_state == PCI_D0 &&
-+ (old_state == PCI_D3hot || old_state == PCI_D3cold) &&
-+ !(old_value & PCI_PM_CTRL_NO_SOFT_RESET))
-+ pci_restore_bars(dev);
++ if (npo.trans_prod) {
++ BUG_ON(npo.trans_prod > ARRAY_SIZE(grant_trans_op));
++ mcl = npo.mcl + npo.mcl_prod++;
++ mcl->op = __HYPERVISOR_grant_table_op;
++ mcl->args[0] = GNTTABOP_transfer;
++ mcl->args[1] = (unsigned long)grant_trans_op;
++ mcl->args[2] = npo.trans_prod;
++ }
+
-+ out:
-+ return err;
-+}
++ if (npo.copy_prod) {
++ BUG_ON(npo.copy_prod > ARRAY_SIZE(grant_copy_op));
++ mcl = npo.mcl + npo.mcl_prod++;
++ mcl->op = __HYPERVISOR_grant_table_op;
++ mcl->args[0] = GNTTABOP_copy;
++ mcl->args[1] = (unsigned long)grant_copy_op;
++ mcl->args[2] = npo.copy_prod;
++ }
+
-+/* Ensure PMEs are disabled */
-+static void *pm_ctrl_init(struct pci_dev *dev, int offset)
-+{
-+ int err;
-+ u16 value;
++ /* Nothing to do? */
++ if (!npo.mcl_prod)
++ return;
+
-+ err = pci_read_config_word(dev, offset, &value);
-+ if (err)
-+ goto out;
++ BUG_ON(npo.mcl_prod > ARRAY_SIZE(rx_mcl));
+
-+ if (value & PCI_PM_CTRL_PME_ENABLE) {
-+ value &= ~PCI_PM_CTRL_PME_ENABLE;
-+ err = pci_write_config_word(dev, offset, value);
-+ }
++ ret = HYPERVISOR_multicall(npo.mcl, npo.mcl_prod);
++ BUG_ON(ret != 0);
++ /* The mmu_machphys_update() must not fail. */
++ BUG_ON(npo.mmu_mcl && npo.mcl[npo.mmu_mcl].result != 0);
+
-+ out:
-+ return ERR_PTR(err);
-+}
++ while ((skb = __skb_dequeue(&rxq)) != NULL) {
++ nr_frags = *(int *)skb->cb;
+
-+static const struct config_field caplist_pm[] = {
-+ {
-+ .offset = PCI_PM_PMC,
-+ .size = 2,
-+ .u.w.read = pm_caps_read,
-+ },
-+ {
-+ .offset = PCI_PM_CTRL,
-+ .size = 2,
-+ .init = pm_ctrl_init,
-+ .u.w.read = pciback_read_config_word,
-+ .u.w.write = pm_ctrl_write,
-+ },
-+ {
-+ .offset = PCI_PM_PPB_EXTENSIONS,
-+ .size = 1,
-+ .u.b.read = pciback_read_config_byte,
-+ },
-+ {
-+ .offset = PCI_PM_DATA_REGISTER,
-+ .size = 1,
-+ .u.b.read = pciback_read_config_byte,
-+ },
-+ {}
-+};
++ netif = netdev_priv(skb->dev);
++ /* We can't rely on skb_release_data to release the
++ pages used by fragments for us, since it tries to
++ touch the pages in the fraglist. If we're in
++ flipping mode, that doesn't work. In copying mode,
++ we still have access to all of the pages, and so
++ it's safe to let release_data deal with it. */
++ /* (Freeing the fragments is safe since we copy
++ non-linear skbs destined for flipping interfaces) */
++ if (!netif->copying_receiver) {
++ atomic_set(&(skb_shinfo(skb)->dataref), 1);
++ skb_shinfo(skb)->frag_list = NULL;
++ skb_shinfo(skb)->nr_frags = 0;
++ netbk_free_pages(nr_frags, meta + npo.meta_cons + 1);
++ }
+
-+struct pciback_config_capability pciback_config_capability_pm = {
-+ .capability = PCI_CAP_ID_PM,
-+ .fields = caplist_pm,
-+};
-diff --git a/drivers/xen/pciback/conf_space_capability_vpd.c b/drivers/xen/pciback/conf_space_capability_vpd.c
-new file mode 100644
-index 0000000..920cb4a
---- /dev/null
-+++ b/drivers/xen/pciback/conf_space_capability_vpd.c
-@@ -0,0 +1,40 @@
-+/*
-+ * PCI Backend - Configuration space overlay for Vital Product Data
-+ *
-+ * Author: Ryan Wilson <hap9(a)epoch.ncsc.mil>
-+ */
++ netif->stats.tx_bytes += skb->len;
++ netif->stats.tx_packets++;
+
-+#include <linux/pci.h>
-+#include "conf_space.h"
-+#include "conf_space_capability.h"
++ status = netbk_check_gop(nr_frags, netif->domid, &npo);
+
-+static int vpd_address_write(struct pci_dev *dev, int offset, u16 value,
-+ void *data)
-+{
-+ /* Disallow writes to the vital product data */
-+ if (value & PCI_VPD_ADDR_F)
-+ return PCIBIOS_SET_FAILED;
-+ else
-+ return pci_write_config_word(dev, offset, value);
-+}
++ id = meta[npo.meta_cons].id;
++ flags = nr_frags ? NETRXF_more_data : 0;
+
-+static const struct config_field caplist_vpd[] = {
-+ {
-+ .offset = PCI_VPD_ADDR,
-+ .size = 2,
-+ .u.w.read = pciback_read_config_word,
-+ .u.w.write = vpd_address_write,
-+ },
-+ {
-+ .offset = PCI_VPD_DATA,
-+ .size = 4,
-+ .u.dw.read = pciback_read_config_dword,
-+ .u.dw.write = NULL,
-+ },
-+ {}
-+};
-+
-+struct pciback_config_capability pciback_config_capability_vpd = {
-+ .capability = PCI_CAP_ID_VPD,
-+ .fields = caplist_vpd,
-+};
-diff --git a/drivers/xen/pciback/conf_space_header.c b/drivers/xen/pciback/conf_space_header.c
-new file mode 100644
-index 0000000..f794e12
---- /dev/null
-+++ b/drivers/xen/pciback/conf_space_header.c
-@@ -0,0 +1,317 @@
-+/*
-+ * PCI Backend - Handles the virtual fields in the configuration space headers.
-+ *
-+ * Author: Ryan Wilson <hap9(a)epoch.ncsc.mil>
-+ */
++ if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
++ flags |= NETRXF_csum_blank | NETRXF_data_validated;
++ else if (skb->proto_data_valid) /* remote but checksummed? */
++ flags |= NETRXF_data_validated;
+
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include "pciback.h"
-+#include "conf_space.h"
++ if (meta[npo.meta_cons].copy)
++ offset = 0;
++ else
++ offset = offset_in_page(skb->data);
++ resp = make_rx_response(netif, id, status, offset,
++ skb_headlen(skb), flags);
+
-+struct pci_bar_info {
-+ u32 val;
-+ u32 len_val;
-+ int which;
-+};
++ if (meta[npo.meta_cons].frag.size) {
++ struct netif_extra_info *gso =
++ (struct netif_extra_info *)
++ RING_GET_RESPONSE(&netif->rx,
++ netif->rx.rsp_prod_pvt++);
+
-+#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
-+#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
++ resp->flags |= NETRXF_extra_info;
+
-+static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
-+{
-+ int err;
++ gso->u.gso.size = meta[npo.meta_cons].frag.size;
++ gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
++ gso->u.gso.pad = 0;
++ gso->u.gso.features = 0;
+
-+ if (!dev->is_enabled && is_enable_cmd(value)) {
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: enable\n",
-+ pci_name(dev));
-+ err = pci_enable_device(dev);
-+ if (err)
-+ return err;
-+ } else if (dev->is_enabled && !is_enable_cmd(value)) {
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: disable\n",
-+ pci_name(dev));
-+ pci_disable_device(dev);
-+ }
++ gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
++ gso->flags = 0;
++ }
+
-+ if (!dev->is_busmaster && is_master_cmd(value)) {
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: set bus master\n",
-+ pci_name(dev));
-+ pci_set_master(dev);
-+ }
++ netbk_add_frag_responses(netif, status,
++ meta + npo.meta_cons + 1,
++ nr_frags);
+
-+ if (value & PCI_COMMAND_INVALIDATE) {
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG
-+ "pciback: %s: enable memory-write-invalidate\n",
-+ pci_name(dev));
-+ err = pci_set_mwi(dev);
-+ if (err) {
-+ printk(KERN_WARNING
-+ "pciback: %s: cannot enable memory-write-invalidate (%d)\n",
-+ pci_name(dev), err);
-+ value &= ~PCI_COMMAND_INVALIDATE;
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret);
++ irq = netif->irq;
++ if (ret && !rx_notify[irq]) {
++ rx_notify[irq] = 1;
++ notify_list[notify_nr++] = irq;
+ }
-+ }
-+
-+ return pci_write_config_word(dev, offset, value);
-+}
+
-+static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
-+{
-+ struct pci_bar_info *bar = data;
++ if (netif_queue_stopped(netif->dev) &&
++ netif_schedulable(netif) &&
++ !netbk_queue_full(netif))
++ netif_wake_queue(netif->dev);
+
-+ if (unlikely(!bar)) {
-+ printk(KERN_WARNING "pciback: driver data not found for %s\n",
-+ pci_name(dev));
-+ return XEN_PCI_ERR_op_failed;
++ netif_put(netif);
++ dev_kfree_skb(skb);
++ npo.meta_cons += nr_frags + 1;
+ }
+
-+ /* A write to obtain the length must happen as a 32-bit write.
-+ * This does not (yet) support writing individual bytes
-+ */
-+ if (value == ~PCI_ROM_ADDRESS_ENABLE)
-+ bar->which = 1;
-+ else {
-+ u32 tmpval;
-+ pci_read_config_dword(dev, offset, &tmpval);
-+ if (tmpval != bar->val && value == bar->val) {
-+ /* Allow restoration of bar value. */
-+ pci_write_config_dword(dev, offset, bar->val);
-+ }
-+ bar->which = 0;
++ while (notify_nr != 0) {
++ irq = notify_list[--notify_nr];
++ rx_notify[irq] = 0;
++ notify_remote_via_irq(irq);
+ }
+
-+ /* Do we need to support enabling/disabling the rom address here? */
-+
-+ return 0;
++ /* More work to do? */
++ if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
++ tasklet_schedule(&net_rx_tasklet);
++#if 0
++ else
++ xen_network_done_notify();
++#endif
+}
+
-+/* For the BARs, only allow writes which write ~0 or
-+ * the correct resource information
-+ * (Needed for when the driver probes the resource usage)
-+ */
-+static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
++static void net_alarm(unsigned long unused)
+{
-+ struct pci_bar_info *bar = data;
-+
-+ if (unlikely(!bar)) {
-+ printk(KERN_WARNING "pciback: driver data not found for %s\n",
-+ pci_name(dev));
-+ return XEN_PCI_ERR_op_failed;
-+ }
++ tasklet_schedule(&net_rx_tasklet);
++}
+
-+ /* A write to obtain the length must happen as a 32-bit write.
-+ * This does not (yet) support writing individual bytes
-+ */
-+ if (value == ~0)
-+ bar->which = 1;
-+ else {
-+ u32 tmpval;
-+ pci_read_config_dword(dev, offset, &tmpval);
-+ if (tmpval != bar->val && value == bar->val) {
-+ /* Allow restoration of bar value. */
-+ pci_write_config_dword(dev, offset, bar->val);
-+ }
-+ bar->which = 0;
-+ }
++static void netbk_tx_pending_timeout(unsigned long unused)
++{
++ tasklet_schedule(&net_tx_tasklet);
++}
+
-+ return 0;
++struct net_device_stats *netif_be_get_stats(struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++ return &netif->stats;
+}
+
-+static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data)
++static int __on_net_schedule_list(netif_t *netif)
+{
-+ struct pci_bar_info *bar = data;
++ return netif->list.next != NULL;
++}
+
-+ if (unlikely(!bar)) {
-+ printk(KERN_WARNING "pciback: driver data not found for %s\n",
-+ pci_name(dev));
-+ return XEN_PCI_ERR_op_failed;
++static void remove_from_net_schedule_list(netif_t *netif)
++{
++ spin_lock_irq(&net_schedule_list_lock);
++ if (likely(__on_net_schedule_list(netif))) {
++ list_del(&netif->list);
++ netif->list.next = NULL;
++ netif_put(netif);
+ }
-+
-+ *value = bar->which ? bar->len_val : bar->val;
-+
-+ return 0;
++ spin_unlock_irq(&net_schedule_list_lock);
+}
+
-+static inline void read_dev_bar(struct pci_dev *dev,
-+ struct pci_bar_info *bar_info, int offset,
-+ u32 len_mask)
++static void add_to_net_schedule_list_tail(netif_t *netif)
+{
-+ pci_read_config_dword(dev, offset, &bar_info->val);
-+ pci_write_config_dword(dev, offset, len_mask);
-+ pci_read_config_dword(dev, offset, &bar_info->len_val);
-+ pci_write_config_dword(dev, offset, bar_info->val);
++ if (__on_net_schedule_list(netif))
++ return;
++
++ spin_lock_irq(&net_schedule_list_lock);
++ if (!__on_net_schedule_list(netif) &&
++ likely(netif_schedulable(netif))) {
++ list_add_tail(&netif->list, &net_schedule_list);
++ netif_get(netif);
++ }
++ spin_unlock_irq(&net_schedule_list_lock);
+}
+
-+static void *bar_init(struct pci_dev *dev, int offset)
++/*
++ * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
++ * If this driver is pipelining transmit requests then we can be very
++ * aggressive in avoiding new-packet notifications -- frontend only needs to
++ * send a notification if there are no outstanding unreceived responses.
++ * If we may be buffer transmit buffers for any reason then we must be rather
++ * more conservative and treat this as the final check for pending work.
++ */
++void netif_schedule_work(netif_t *netif)
+{
-+ struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
++ int more_to_do;
+
-+ if (!bar)
-+ return ERR_PTR(-ENOMEM);
++#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
++ more_to_do = RING_HAS_UNCONSUMED_REQUESTS(&netif->tx);
++#else
++ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
++#endif
+
-+ read_dev_bar(dev, bar, offset, ~0);
-+ bar->which = 0;
++ if (more_to_do) {
++ add_to_net_schedule_list_tail(netif);
++ maybe_schedule_tx_action();
++ }
++}
+
-+ return bar;
++void netif_deschedule_work(netif_t *netif)
++{
++ remove_from_net_schedule_list(netif);
+}
+
-+static void *rom_init(struct pci_dev *dev, int offset)
++
++static void tx_add_credit(netif_t *netif)
+{
-+ struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
++ unsigned long max_burst, max_credit;
+
-+ if (!bar)
-+ return ERR_PTR(-ENOMEM);
++ /*
++ * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
++ * Otherwise the interface can seize up due to insufficient credit.
++ */
++ max_burst = RING_GET_REQUEST(&netif->tx, netif->tx.req_cons)->size;
++ max_burst = min(max_burst, 131072UL);
++ max_burst = max(max_burst, netif->credit_bytes);
+
-+ read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
-+ bar->which = 0;
++ /* Take care that adding a new chunk of credit doesn't wrap to zero. */
++ max_credit = netif->remaining_credit + netif->credit_bytes;
++ if (max_credit < netif->remaining_credit)
++ max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
+
-+ return bar;
++ netif->remaining_credit = min(max_credit, max_burst);
+}
+
-+static void bar_reset(struct pci_dev *dev, int offset, void *data)
++static void tx_credit_callback(unsigned long data)
+{
-+ struct pci_bar_info *bar = data;
-+
-+ bar->which = 0;
++ netif_t *netif = (netif_t *)data;
++ tx_add_credit(netif);
++ netif_schedule_work(netif);
+}
+
-+static void bar_release(struct pci_dev *dev, int offset, void *data)
++static inline int copy_pending_req(PEND_RING_IDX pending_idx)
+{
-+ kfree(data);
++ return gnttab_copy_grant_page(grant_tx_handle[pending_idx],
++ &mmap_pages[pending_idx]);
+}
+
-+static int interrupt_read(struct pci_dev *dev, int offset, u8 * value,
-+ void *data)
++inline static void net_tx_action_dealloc(void)
+{
-+ *value = (u8) dev->irq;
++ struct netbk_tx_pending_inuse *inuse, *n;
++ gnttab_unmap_grant_ref_t *gop;
++ u16 pending_idx;
++ PEND_RING_IDX dc, dp;
++ netif_t *netif;
++ int ret;
++ LIST_HEAD(list);
+
-+ return 0;
-+}
++ dc = dealloc_cons;
++ gop = tx_unmap_ops;
+
-+static int bist_write(struct pci_dev *dev, int offset, u8 value, void *data)
-+{
-+ u8 cur_value;
-+ int err;
++ /*
++ * Free up any grants we have finished using
++ */
++ do {
++ dp = dealloc_prod;
+
-+ err = pci_read_config_byte(dev, offset, &cur_value);
-+ if (err)
-+ goto out;
++ /* Ensure we see all indices enqueued by netif_idx_release(). */
++ smp_rmb();
+
-+ if ((cur_value & ~PCI_BIST_START) == (value & ~PCI_BIST_START)
-+ || value == PCI_BIST_START)
-+ err = pci_write_config_byte(dev, offset, value);
++ while (dc != dp) {
++ unsigned long pfn;
+
-+ out:
-+ return err;
-+}
++ pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
++ list_move_tail(&pending_inuse[pending_idx].list, &list);
+
-+static const struct config_field header_common[] = {
-+ {
-+ .offset = PCI_COMMAND,
-+ .size = 2,
-+ .u.w.read = pciback_read_config_word,
-+ .u.w.write = command_write,
-+ },
-+ {
-+ .offset = PCI_INTERRUPT_LINE,
-+ .size = 1,
-+ .u.b.read = interrupt_read,
-+ },
-+ {
-+ .offset = PCI_INTERRUPT_PIN,
-+ .size = 1,
-+ .u.b.read = pciback_read_config_byte,
-+ },
-+ {
-+ /* Any side effects of letting driver domain control cache line? */
-+ .offset = PCI_CACHE_LINE_SIZE,
-+ .size = 1,
-+ .u.b.read = pciback_read_config_byte,
-+ .u.b.write = pciback_write_config_byte,
-+ },
-+ {
-+ .offset = PCI_LATENCY_TIMER,
-+ .size = 1,
-+ .u.b.read = pciback_read_config_byte,
-+ },
-+ {
-+ .offset = PCI_BIST,
-+ .size = 1,
-+ .u.b.read = pciback_read_config_byte,
-+ .u.b.write = bist_write,
-+ },
-+ {}
-+};
++ pfn = idx_to_pfn(pending_idx);
++ /* Already unmapped? */
++ if (!phys_to_machine_mapping_valid(pfn))
++ continue;
+
-+#define CFG_FIELD_BAR(reg_offset) \
-+ { \
-+ .offset = reg_offset, \
-+ .size = 4, \
-+ .init = bar_init, \
-+ .reset = bar_reset, \
-+ .release = bar_release, \
-+ .u.dw.read = bar_read, \
-+ .u.dw.write = bar_write, \
-+ }
++ gnttab_set_unmap_op(gop, idx_to_kaddr(pending_idx),
++ GNTMAP_host_map,
++ grant_tx_handle[pending_idx]);
++ gop++;
++ }
+
-+#define CFG_FIELD_ROM(reg_offset) \
-+ { \
-+ .offset = reg_offset, \
-+ .size = 4, \
-+ .init = rom_init, \
-+ .reset = bar_reset, \
-+ .release = bar_release, \
-+ .u.dw.read = bar_read, \
-+ .u.dw.write = rom_write, \
-+ }
++ if (netbk_copy_skb_mode != NETBK_DELAYED_COPY_SKB ||
++ list_empty(&pending_inuse_head))
++ break;
+
-+static const struct config_field header_0[] = {
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_2),
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_3),
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_4),
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_5),
-+ CFG_FIELD_ROM(PCI_ROM_ADDRESS),
-+ {}
-+};
++ /* Copy any entries that have been pending for too long. */
++ list_for_each_entry_safe(inuse, n, &pending_inuse_head, list) {
++ if (time_after(inuse->alloc_time + HZ / 2, jiffies))
++ break;
+
-+static const struct config_field header_1[] = {
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
-+ CFG_FIELD_ROM(PCI_ROM_ADDRESS1),
-+ {}
-+};
++ switch (copy_pending_req(inuse - pending_inuse)) {
++ case 0:
++ list_move_tail(&inuse->list, &list);
++ continue;
++ case -EBUSY:
++ list_del_init(&inuse->list);
++ continue;
++ case -ENOENT:
++ continue;
++ }
+
-+int pciback_config_header_add_fields(struct pci_dev *dev)
-+{
-+ int err;
++ break;
++ }
++ } while (dp != dealloc_prod);
+
-+ err = pciback_config_add_fields(dev, header_common);
-+ if (err)
-+ goto out;
++ dealloc_cons = dc;
+
-+ switch (dev->hdr_type) {
-+ case PCI_HEADER_TYPE_NORMAL:
-+ err = pciback_config_add_fields(dev, header_0);
-+ break;
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
++ BUG_ON(ret);
+
-+ case PCI_HEADER_TYPE_BRIDGE:
-+ err = pciback_config_add_fields(dev, header_1);
-+ break;
++ list_for_each_entry_safe(inuse, n, &list, list) {
++ pending_idx = inuse - pending_inuse;
+
-+ default:
-+ err = -EINVAL;
-+ printk(KERN_ERR "pciback: %s: Unsupported header type %d!\n",
-+ pci_name(dev), dev->hdr_type);
-+ break;
-+ }
++ netif = pending_tx_info[pending_idx].netif;
+
-+ out:
-+ return err;
-+}
-diff --git a/drivers/xen/pciback/conf_space_quirks.c b/drivers/xen/pciback/conf_space_quirks.c
-new file mode 100644
-index 0000000..244a438
---- /dev/null
-+++ b/drivers/xen/pciback/conf_space_quirks.c
-@@ -0,0 +1,138 @@
-+/*
-+ * PCI Backend - Handle special overlays for broken devices.
-+ *
-+ * Author: Ryan Wilson <hap9(a)epoch.ncsc.mil>
-+ * Author: Chris Bookholt <hap10(a)epoch.ncsc.mil>
-+ */
++ make_tx_response(netif, &pending_tx_info[pending_idx].req,
++ NETIF_RSP_OKAY);
+
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include "pciback.h"
-+#include "conf_space.h"
-+#include "conf_space_quirks.h"
++ /* Ready for next use. */
++ gnttab_reset_grant_page(mmap_pages[pending_idx]);
+
-+LIST_HEAD(pciback_quirks);
++ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+
-+static inline const struct pci_device_id *
-+match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
-+{
-+ if ((id->vendor == PCI_ANY_ID || id->vendor == dev->vendor) &&
-+ (id->device == PCI_ANY_ID || id->device == dev->device) &&
-+ (id->subvendor == PCI_ANY_ID || id->subvendor == dev->subsystem_vendor) &&
-+ (id->subdevice == PCI_ANY_ID || id->subdevice == dev->subsystem_device) &&
-+ !((id->class ^ dev->class) & id->class_mask))
-+ return id;
-+ return NULL;
++ netif_put(netif);
++
++ list_del_init(&inuse->list);
++ }
+}
+
-+struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev)
++static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end)
+{
-+ struct pciback_config_quirk *tmp_quirk;
++ RING_IDX cons = netif->tx.req_cons;
+
-+ list_for_each_entry(tmp_quirk, &pciback_quirks, quirks_list)
-+ if (match_one_device(&tmp_quirk->devid, dev) != NULL)
-+ goto out;
-+ tmp_quirk = NULL;
-+ printk(KERN_DEBUG
-+ "quirk didn't match any device pciback knows about\n");
-+ out:
-+ return tmp_quirk;
++ do {
++ make_tx_response(netif, txp, NETIF_RSP_ERROR);
++ if (cons >= end)
++ break;
++ txp = RING_GET_REQUEST(&netif->tx, cons++);
++ } while (1);
++ netif->tx.req_cons = cons;
++ netif_schedule_work(netif);
++ netif_put(netif);
+}
+
-+static inline void register_quirk(struct pciback_config_quirk *quirk)
++static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first,
++ netif_tx_request_t *txp, int work_to_do)
+{
-+ list_add_tail(&quirk->quirks_list, &pciback_quirks);
-+}
++ RING_IDX cons = netif->tx.req_cons;
++ int frags = 0;
+
-+int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg)
-+{
-+ int ret = 0;
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ struct config_field_entry *cfg_entry;
++ if (!(first->flags & NETTXF_more_data))
++ return 0;
+
-+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-+ if ( OFFSET(cfg_entry) == reg) {
-+ ret = 1;
-+ break;
++ do {
++ if (frags >= work_to_do) {
++ DPRINTK("Need more frags\n");
++ return -frags;
+ }
-+ }
-+ return ret;
-+}
+
-+int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
-+ *field)
-+{
-+ int err = 0;
++ if (unlikely(frags >= MAX_SKB_FRAGS)) {
++ DPRINTK("Too many frags\n");
++ return -frags;
++ }
+
-+ switch (field->size) {
-+ case 1:
-+ field->u.b.read = pciback_read_config_byte;
-+ field->u.b.write = pciback_write_config_byte;
-+ break;
-+ case 2:
-+ field->u.w.read = pciback_read_config_word;
-+ field->u.w.write = pciback_write_config_word;
-+ break;
-+ case 4:
-+ field->u.dw.read = pciback_read_config_dword;
-+ field->u.dw.write = pciback_write_config_dword;
-+ break;
-+ default:
-+ err = -EINVAL;
-+ goto out;
-+ }
++ memcpy(txp, RING_GET_REQUEST(&netif->tx, cons + frags),
++ sizeof(*txp));
++ if (txp->size > first->size) {
++ DPRINTK("Frags galore\n");
++ return -frags;
++ }
+
-+ pciback_config_add_field(dev, field);
++ first->size -= txp->size;
++ frags++;
+
-+ out:
-+ return err;
++ if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
++ DPRINTK("txp->offset: %x, size: %u\n",
++ txp->offset, txp->size);
++ return -frags;
++ }
++ } while ((txp++)->flags & NETTXF_more_data);
++
++ return frags;
+}
+
-+int pciback_config_quirks_init(struct pci_dev *dev)
++static gnttab_map_grant_ref_t *netbk_get_requests(netif_t *netif,
++ struct sk_buff *skb,
++ netif_tx_request_t *txp,
++ gnttab_map_grant_ref_t *mop)
+{
-+ struct pciback_config_quirk *quirk;
-+ int ret = 0;
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ skb_frag_t *frags = shinfo->frags;
++ unsigned long pending_idx = *((u16 *)skb->data);
++ int i, start;
+
-+ quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC);
-+ if (!quirk) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
++ /* Skip first skb fragment if it is on same page as header fragment. */
++ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
+
-+ quirk->devid.vendor = dev->vendor;
-+ quirk->devid.device = dev->device;
-+ quirk->devid.subvendor = dev->subsystem_vendor;
-+ quirk->devid.subdevice = dev->subsystem_device;
-+ quirk->devid.class = 0;
-+ quirk->devid.class_mask = 0;
-+ quirk->devid.driver_data = 0UL;
++ for (i = start; i < shinfo->nr_frags; i++, txp++) {
++ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons++)];
+
-+ quirk->pdev = dev;
++ gnttab_set_map_op(mop++, idx_to_kaddr(pending_idx),
++ GNTMAP_host_map | GNTMAP_readonly,
++ txp->gref, netif->domid);
+
-+ register_quirk(quirk);
-+ out:
-+ return ret;
-+}
++ memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
++ netif_get(netif);
++ pending_tx_info[pending_idx].netif = netif;
++ frags[i].page = (void *)pending_idx;
++ }
+
-+void pciback_config_field_free(struct config_field *field)
-+{
-+ kfree(field);
++ return mop;
+}
+
-+int pciback_config_quirk_release(struct pci_dev *dev)
++static int netbk_tx_check_mop(struct sk_buff *skb,
++ gnttab_map_grant_ref_t **mopp)
+{
-+ struct pciback_config_quirk *quirk;
-+ int ret = 0;
++ gnttab_map_grant_ref_t *mop = *mopp;
++ int pending_idx = *((u16 *)skb->data);
++ netif_t *netif = pending_tx_info[pending_idx].netif;
++ netif_tx_request_t *txp;
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ int nr_frags = shinfo->nr_frags;
++ int i, err, start;
+
-+ quirk = pciback_find_quirk(dev);
-+ if (!quirk) {
-+ ret = -ENXIO;
-+ goto out;
++ /* Check status of header. */
++ err = mop->status;
++ if (unlikely(err)) {
++ txp = &pending_tx_info[pending_idx].req;
++ make_tx_response(netif, txp, NETIF_RSP_ERROR);
++ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++ netif_put(netif);
++ } else {
++ set_phys_to_machine(
++ __pa(idx_to_kaddr(pending_idx)) >> PAGE_SHIFT,
++ FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
++ grant_tx_handle[pending_idx] = mop->handle;
+ }
+
-+ list_del(&quirk->quirks_list);
-+ kfree(quirk);
++ /* Skip first skb fragment if it is on same page as header fragment. */
++ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
+
-+ out:
-+ return ret;
-+}
-diff --git a/drivers/xen/pciback/conf_space_quirks.h b/drivers/xen/pciback/conf_space_quirks.h
-new file mode 100644
-index 0000000..acd0e1a
---- /dev/null
-+++ b/drivers/xen/pciback/conf_space_quirks.h
-@@ -0,0 +1,35 @@
-+/*
-+ * PCI Backend - Data structures for special overlays for broken devices.
-+ *
-+ * Ryan Wilson <hap9(a)epoch.ncsc.mil>
-+ * Chris Bookholt <hap10(a)epoch.ncsc.mil>
-+ */
++ for (i = start; i < nr_frags; i++) {
++ int j, newerr;
+
-+#ifndef __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
-+#define __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
++ pending_idx = (unsigned long)shinfo->frags[i].page;
+
-+#include <linux/pci.h>
-+#include <linux/list.h>
++ /* Check error status: if okay then remember grant handle. */
++ newerr = (++mop)->status;
++ if (likely(!newerr)) {
++ set_phys_to_machine(
++ __pa(idx_to_kaddr(pending_idx))>>PAGE_SHIFT,
++ FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
++ grant_tx_handle[pending_idx] = mop->handle;
++ /* Had a previous error? Invalidate this fragment. */
++ if (unlikely(err))
++ netif_idx_release(pending_idx);
++ continue;
++ }
+
-+struct pciback_config_quirk {
-+ struct list_head quirks_list;
-+ struct pci_device_id devid;
-+ struct pci_dev *pdev;
-+};
++ /* Error on this fragment: respond to client with an error. */
++ txp = &pending_tx_info[pending_idx].req;
++ make_tx_response(netif, txp, NETIF_RSP_ERROR);
++ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++ netif_put(netif);
+
-+struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev);
++ /* Not the first error? Preceding frags already invalidated. */
++ if (err)
++ continue;
+
-+int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
-+ *field);
++ /* First error: invalidate header and preceding fragments. */
++ pending_idx = *((u16 *)skb->data);
++ netif_idx_release(pending_idx);
++ for (j = start; j < i; j++) {
++ pending_idx = (unsigned long)shinfo->frags[i].page;
++ netif_idx_release(pending_idx);
++ }
+
-+int pciback_config_quirks_remove_field(struct pci_dev *dev, int reg);
++ /* Remember the error: invalidate all subsequent fragments. */
++ err = newerr;
++ }
+
-+int pciback_config_quirks_init(struct pci_dev *dev);
++ *mopp = mop + 1;
++ return err;
++}
+
-+void pciback_config_field_free(struct config_field *field);
++static void netbk_fill_frags(struct sk_buff *skb)
++{
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ int nr_frags = shinfo->nr_frags;
++ int i;
+
-+int pciback_config_quirk_release(struct pci_dev *dev);
++ for (i = 0; i < nr_frags; i++) {
++ skb_frag_t *frag = shinfo->frags + i;
++ netif_tx_request_t *txp;
++ unsigned long pending_idx;
+
-+int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg);
++ pending_idx = (unsigned long)frag->page;
+
-+#endif
-diff --git a/drivers/xen/pciback/controller.c b/drivers/xen/pciback/controller.c
-new file mode 100644
-index 0000000..294e48f
---- /dev/null
-+++ b/drivers/xen/pciback/controller.c
-@@ -0,0 +1,443 @@
-+/*
-+ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
-+ * Alex Williamson <alex.williamson(a)hp.com>
-+ *
-+ * PCI "Controller" Backend - virtualize PCI bus topology based on PCI
-+ * controllers. Devices under the same PCI controller are exposed on the
-+ * same virtual domain:bus. Within a bus, device slots are virtualized
-+ * to compact the bus.
-+ *
-+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-+ */
++ pending_inuse[pending_idx].alloc_time = jiffies;
++ list_add_tail(&pending_inuse[pending_idx].list,
++ &pending_inuse_head);
+
-+#include <linux/acpi.h>
-+#include <linux/list.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include "pciback.h"
++ txp = &pending_tx_info[pending_idx].req;
++ frag->page = virt_to_page(idx_to_kaddr(pending_idx));
++ frag->size = txp->size;
++ frag->page_offset = txp->offset;
+
-+#define PCI_MAX_BUSSES 255
-+#define PCI_MAX_SLOTS 32
++ skb->len += txp->size;
++ skb->data_len += txp->size;
++ skb->truesize += txp->size;
++ }
++}
+
-+struct controller_dev_entry {
-+ struct list_head list;
-+ struct pci_dev *dev;
-+ unsigned int devfn;
-+};
++int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras,
++ int work_to_do)
++{
++ struct netif_extra_info extra;
++ RING_IDX cons = netif->tx.req_cons;
+
-+struct controller_list_entry {
-+ struct list_head list;
-+ struct pci_controller *controller;
-+ unsigned int domain;
-+ unsigned int bus;
-+ unsigned int next_devfn;
-+ struct list_head dev_list;
-+};
++ do {
++ if (unlikely(work_to_do-- <= 0)) {
++ DPRINTK("Missing extra info\n");
++ return -EBADR;
++ }
+
-+struct controller_dev_data {
-+ struct list_head list;
-+ unsigned int next_domain;
-+ unsigned int next_bus;
-+ spinlock_t lock;
-+};
++ memcpy(&extra, RING_GET_REQUEST(&netif->tx, cons),
++ sizeof(extra));
++ if (unlikely(!extra.type ||
++ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
++ netif->tx.req_cons = ++cons;
++ DPRINTK("Invalid extra type: %d\n", extra.type);
++ return -EINVAL;
++ }
+
-+struct walk_info {
-+ struct pciback_device *pdev;
-+ int resource_count;
-+ int root_num;
-+};
++ memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
++ netif->tx.req_cons = ++cons;
++ } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
+
-+struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
-+ unsigned int domain, unsigned int bus,
-+ unsigned int devfn)
-+{
-+ struct controller_dev_data *dev_data = pdev->pci_dev_data;
-+ struct controller_dev_entry *dev_entry;
-+ struct controller_list_entry *cntrl_entry;
-+ struct pci_dev *dev = NULL;
-+ unsigned long flags;
++ return work_to_do;
++}
+
-+ spin_lock_irqsave(&dev_data->lock, flags);
-+
-+ list_for_each_entry(cntrl_entry, &dev_data->list, list) {
-+ if (cntrl_entry->domain != domain ||
-+ cntrl_entry->bus != bus)
-+ continue;
-+
-+ list_for_each_entry(dev_entry, &cntrl_entry->dev_list, list) {
-+ if (devfn == dev_entry->devfn) {
-+ dev = dev_entry->dev;
-+ goto found;
-+ }
-+ }
-+ }
-+found:
-+ spin_unlock_irqrestore(&dev_data->lock, flags);
-+
-+ return dev;
-+}
-+
-+int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
-+ int devid, publish_pci_dev_cb publish_cb)
++static int netbk_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso)
+{
-+ struct controller_dev_data *dev_data = pdev->pci_dev_data;
-+ struct controller_dev_entry *dev_entry;
-+ struct controller_list_entry *cntrl_entry;
-+ struct pci_controller *dev_controller = PCI_CONTROLLER(dev);
-+ unsigned long flags;
-+ int ret = 0, found = 0;
-+
-+ spin_lock_irqsave(&dev_data->lock, flags);
++ if (!gso->u.gso.size) {
++ DPRINTK("GSO size must not be zero.\n");
++ return -EINVAL;
++ }
+
-+ /* Look to see if we already have a domain:bus for this controller */
-+ list_for_each_entry(cntrl_entry, &dev_data->list, list) {
-+ if (cntrl_entry->controller == dev_controller) {
-+ found = 1;
-+ break;
-+ }
++ /* Currently only TCPv4 S.O. is supported. */
++ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
++ DPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
++ return -EINVAL;
+ }
+
-+ if (!found) {
-+ cntrl_entry = kmalloc(sizeof(*cntrl_entry), GFP_ATOMIC);
-+ if (!cntrl_entry) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
++ skb_shinfo(skb)->gso_size = gso->u.gso.size;
++ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+
-+ cntrl_entry->controller = dev_controller;
-+ cntrl_entry->next_devfn = PCI_DEVFN(0, 0);
++ /* Header must be checked, and gso_segs computed. */
++ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
++ skb_shinfo(skb)->gso_segs = 0;
+
-+ cntrl_entry->domain = dev_data->next_domain;
-+ cntrl_entry->bus = dev_data->next_bus++;
-+ if (dev_data->next_bus > PCI_MAX_BUSSES) {
-+ dev_data->next_domain++;
-+ dev_data->next_bus = 0;
-+ }
++ return 0;
++}
+
-+ INIT_LIST_HEAD(&cntrl_entry->dev_list);
++/* Called after netfront has transmitted */
++static void net_tx_action(unsigned long unused)
++{
++ struct list_head *ent;
++ struct sk_buff *skb;
++ netif_t *netif;
++ netif_tx_request_t txreq;
++ netif_tx_request_t txfrags[MAX_SKB_FRAGS];
++ struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
++ u16 pending_idx;
++ RING_IDX i;
++ gnttab_map_grant_ref_t *mop;
++ unsigned int data_len;
++ int ret, work_to_do;
+
-+ list_add_tail(&cntrl_entry->list, &dev_data->list);
-+ }
++ if (dealloc_cons != dealloc_prod)
++ net_tx_action_dealloc();
+
-+ if (PCI_SLOT(cntrl_entry->next_devfn) > PCI_MAX_SLOTS) {
-+ /*
-+ * While it seems unlikely, this can actually happen if
-+ * a controller has P2P bridges under it.
-+ */
-+ xenbus_dev_fatal(pdev->xdev, -ENOSPC, "Virtual bus %04x:%02x "
-+ "is full, no room to export %04x:%02x:%02x.%x",
-+ cntrl_entry->domain, cntrl_entry->bus,
-+ pci_domain_nr(dev->bus), dev->bus->number,
-+ PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
-+ ret = -ENOSPC;
-+ goto out;
-+ }
++ mop = tx_map_ops;
++ while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
++ !list_empty(&net_schedule_list)) {
++ /* Get a netif from the list with work to do. */
++ ent = net_schedule_list.next;
++ netif = list_entry(ent, netif_t, list);
++ netif_get(netif);
++ remove_from_net_schedule_list(netif);
+
-+ dev_entry = kmalloc(sizeof(*dev_entry), GFP_ATOMIC);
-+ if (!dev_entry) {
-+ if (list_empty(&cntrl_entry->dev_list)) {
-+ list_del(&cntrl_entry->list);
-+ kfree(cntrl_entry);
++ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
++ if (!work_to_do) {
++ netif_put(netif);
++ continue;
+ }
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ dev_entry->dev = dev;
-+ dev_entry->devfn = cntrl_entry->next_devfn;
+
-+ list_add_tail(&dev_entry->list, &cntrl_entry->dev_list);
-+
-+ cntrl_entry->next_devfn += PCI_DEVFN(1, 0);
-+
-+out:
-+ spin_unlock_irqrestore(&dev_data->lock, flags);
++ i = netif->tx.req_cons;
++ rmb(); /* Ensure that we see the request before we copy it. */
++ memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
+
-+ /* TODO: Publish virtual domain:bus:slot.func here. */
++ /* Credit-based scheduling. */
++ if (txreq.size > netif->remaining_credit) {
++ unsigned long now = jiffies;
++ unsigned long next_credit =
++ netif->credit_timeout.expires +
++ msecs_to_jiffies(netif->credit_usec / 1000);
+
-+ return ret;
-+}
++ /* Timer could already be pending in rare cases. */
++ if (timer_pending(&netif->credit_timeout)) {
++ netif_put(netif);
++ continue;
++ }
+
-+void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
-+{
-+ struct controller_dev_data *dev_data = pdev->pci_dev_data;
-+ struct controller_list_entry *cntrl_entry;
-+ struct controller_dev_entry *dev_entry = NULL;
-+ struct pci_dev *found_dev = NULL;
-+ unsigned long flags;
++ /* Passed the point where we can replenish credit? */
++ if (time_after_eq(now, next_credit)) {
++ netif->credit_timeout.expires = now;
++ tx_add_credit(netif);
++ }
+
-+ spin_lock_irqsave(&dev_data->lock, flags);
++ /* Still too big to send right now? Set a callback. */
++ if (txreq.size > netif->remaining_credit) {
++ netif->credit_timeout.data =
++ (unsigned long)netif;
++ netif->credit_timeout.function =
++ tx_credit_callback;
++ __mod_timer(&netif->credit_timeout,
++ next_credit);
++ netif_put(netif);
++ continue;
++ }
++ }
++ netif->remaining_credit -= txreq.size;
+
-+ list_for_each_entry(cntrl_entry, &dev_data->list, list) {
-+ if (cntrl_entry->controller != PCI_CONTROLLER(dev))
-+ continue;
++ work_to_do--;
++ netif->tx.req_cons = ++i;
+
-+ list_for_each_entry(dev_entry, &cntrl_entry->dev_list, list) {
-+ if (dev_entry->dev == dev) {
-+ found_dev = dev_entry->dev;
-+ break;
++ memset(extras, 0, sizeof(extras));
++ if (txreq.flags & NETTXF_extra_info) {
++ work_to_do = netbk_get_extras(netif, extras,
++ work_to_do);
++ i = netif->tx.req_cons;
++ if (unlikely(work_to_do < 0)) {
++ netbk_tx_err(netif, &txreq, i);
++ continue;
+ }
+ }
-+ }
+
-+ if (!found_dev) {
-+ spin_unlock_irqrestore(&dev_data->lock, flags);
-+ return;
-+ }
++ ret = netbk_count_requests(netif, &txreq, txfrags, work_to_do);
++ if (unlikely(ret < 0)) {
++ netbk_tx_err(netif, &txreq, i - ret);
++ continue;
++ }
++ i += ret;
+
-+ list_del(&dev_entry->list);
-+ kfree(dev_entry);
++ if (unlikely(txreq.size < ETH_HLEN)) {
++ DPRINTK("Bad packet size: %d\n", txreq.size);
++ netbk_tx_err(netif, &txreq, i);
++ continue;
++ }
+
-+ if (list_empty(&cntrl_entry->dev_list)) {
-+ list_del(&cntrl_entry->list);
-+ kfree(cntrl_entry);
-+ }
++ /* No crossing a page as the payload mustn't fragment. */
++ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
++ DPRINTK("txreq.offset: %x, size: %u, end: %lu\n",
++ txreq.offset, txreq.size,
++ (txreq.offset &~PAGE_MASK) + txreq.size);
++ netbk_tx_err(netif, &txreq, i);
++ continue;
++ }
+
-+ spin_unlock_irqrestore(&dev_data->lock, flags);
-+ pcistub_put_pci_dev(found_dev);
-+}
++ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+
-+int pciback_init_devices(struct pciback_device *pdev)
-+{
-+ struct controller_dev_data *dev_data;
++ data_len = (txreq.size > PKT_PROT_LEN &&
++ ret < MAX_SKB_FRAGS) ?
++ PKT_PROT_LEN : txreq.size;
+
-+ dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
-+ if (!dev_data)
-+ return -ENOMEM;
++ skb = alloc_skb(data_len + 16 + NET_IP_ALIGN,
++ GFP_ATOMIC | __GFP_NOWARN);
++ if (unlikely(skb == NULL)) {
++ DPRINTK("Can't allocate a skb in start_xmit.\n");
++ netbk_tx_err(netif, &txreq, i);
++ break;
++ }
+
-+ spin_lock_init(&dev_data->lock);
++ /* Packets passed to netif_rx() must have some headroom. */
++ skb_reserve(skb, 16 + NET_IP_ALIGN);
+
-+ INIT_LIST_HEAD(&dev_data->list);
++ if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
++ struct netif_extra_info *gso;
++ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
+
-+ /* Starting domain:bus numbers */
-+ dev_data->next_domain = 0;
-+ dev_data->next_bus = 0;
++ if (netbk_set_skb_gso(skb, gso)) {
++ kfree_skb(skb);
++ netbk_tx_err(netif, &txreq, i);
++ continue;
++ }
++ }
+
-+ pdev->pci_dev_data = dev_data;
++ gnttab_set_map_op(mop, idx_to_kaddr(pending_idx),
++ GNTMAP_host_map | GNTMAP_readonly,
++ txreq.gref, netif->domid);
++ mop++;
+
-+ return 0;
-+}
++ memcpy(&pending_tx_info[pending_idx].req,
++ &txreq, sizeof(txreq));
++ pending_tx_info[pending_idx].netif = netif;
++ *((u16 *)skb->data) = pending_idx;
+
-+static acpi_status write_xenbus_resource(struct acpi_resource *res, void *data)
-+{
-+ struct walk_info *info = data;
-+ struct acpi_resource_address64 addr;
-+ acpi_status status;
-+ int i, len, err;
-+ char str[32], tmp[3];
-+ unsigned char *ptr, *buf;
++ __skb_put(skb, data_len);
+
-+ status = acpi_resource_to_address64(res, &addr);
++ skb_shinfo(skb)->nr_frags = ret;
++ if (data_len < txreq.size) {
++ skb_shinfo(skb)->nr_frags++;
++ skb_shinfo(skb)->frags[0].page =
++ (void *)(unsigned long)pending_idx;
++ } else {
++ /* Discriminate from any valid pending_idx value. */
++ skb_shinfo(skb)->frags[0].page = (void *)~0UL;
++ }
+
-+ /* Do we care about this range? Let's check. */
-+ if (!ACPI_SUCCESS(status) ||
-+ !(addr.resource_type == ACPI_MEMORY_RANGE ||
-+ addr.resource_type == ACPI_IO_RANGE) ||
-+ !addr.address_length || addr.producer_consumer != ACPI_PRODUCER)
-+ return AE_OK;
++ if (skb->data_len < skb_shinfo(skb)->gso_size) {
++ skb_shinfo(skb)->gso_size = 0;
++ skb_shinfo(skb)->gso_type = 0;
++ }
+
-+ /*
-+ * Furthermore, we really only care to tell the guest about
-+ * address ranges that require address translation of some sort.
-+ */
-+ if (!(addr.resource_type == ACPI_MEMORY_RANGE &&
-+ addr.info.mem.translation) &&
-+ !(addr.resource_type == ACPI_IO_RANGE &&
-+ addr.info.io.translation))
-+ return AE_OK;
-+
-+ /* Store the resource in xenbus for the guest */
-+ len = snprintf(str, sizeof(str), "root-%d-resource-%d",
-+ info->root_num, info->resource_count);
-+ if (unlikely(len >= (sizeof(str) - 1)))
-+ return AE_OK;
++ __skb_queue_tail(&tx_queue, skb);
+
-+ buf = kzalloc((sizeof(*res) * 2) + 1, GFP_KERNEL);
-+ if (!buf)
-+ return AE_OK;
++ pending_cons++;
+
-+ /* Clean out resource_source */
-+ res->data.address64.resource_source.index = 0xFF;
-+ res->data.address64.resource_source.string_length = 0;
-+ res->data.address64.resource_source.string_ptr = NULL;
++ mop = netbk_get_requests(netif, skb, txfrags, mop);
+
-+ ptr = (unsigned char *)res;
++ netif->tx.req_cons = i;
++ netif_schedule_work(netif);
+
-+ /* Turn the acpi_resource into an ASCII byte stream */
-+ for (i = 0; i < sizeof(*res); i++) {
-+ snprintf(tmp, sizeof(tmp), "%02x", ptr[i]);
-+ strncat(buf, tmp, 2);
++ if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
++ break;
+ }
+
-+ err = xenbus_printf(XBT_NIL, info->pdev->xdev->nodename,
-+ str, "%s", buf);
-+
-+ if (!err)
-+ info->resource_count++;
++ if (mop == tx_map_ops)
++ return;
+
-+ kfree(buf);
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
++ BUG_ON(ret);
+
-+ return AE_OK;
-+}
++ mop = tx_map_ops;
++ while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
++ netif_tx_request_t *txp;
+
-+int pciback_publish_pci_roots(struct pciback_device *pdev,
-+ publish_pci_root_cb publish_root_cb)
-+{
-+ struct controller_dev_data *dev_data = pdev->pci_dev_data;
-+ struct controller_list_entry *cntrl_entry;
-+ int i, root_num, len, err = 0;
-+ unsigned int domain, bus;
-+ char str[64];
-+ struct walk_info info;
++ pending_idx = *((u16 *)skb->data);
++ netif = pending_tx_info[pending_idx].netif;
++ txp = &pending_tx_info[pending_idx].req;
+
-+ spin_lock(&dev_data->lock);
++ /* Check the remap error code. */
++ if (unlikely(netbk_tx_check_mop(skb, &mop))) {
++ DPRINTK("netback grant failed.\n");
++ skb_shinfo(skb)->nr_frags = 0;
++ kfree_skb(skb);
++ continue;
++ }
+
-+ list_for_each_entry(cntrl_entry, &dev_data->list, list) {
-+ /* First publish all the domain:bus info */
-+ err = publish_root_cb(pdev, cntrl_entry->domain,
-+ cntrl_entry->bus);
-+ if (err)
-+ goto out;
++ data_len = skb->len;
++ memcpy(skb->data,
++ (void *)(idx_to_kaddr(pending_idx)|txp->offset),
++ data_len);
++ if (data_len < txp->size) {
++ /* Append the packet payload as a fragment. */
++ txp->offset += data_len;
++ txp->size -= data_len;
++ } else {
++ /* Schedule a response immediately. */
++ netif_idx_release(pending_idx);
++ }
+
+ /*
-+ * Now figure out which root-%d this belongs to
-+ * so we can associate resources with it.
++ * Old frontends do not assert data_validated but we
++ * can infer it from csum_blank so test both flags.
+ */
-+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
-+ "root_num", "%d", &root_num);
-+
-+ if (err != 1)
-+ goto out;
-+
-+ for (i = 0; i < root_num; i++) {
-+ len = snprintf(str, sizeof(str), "root-%d", i);
-+ if (unlikely(len >= (sizeof(str) - 1))) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
-+ str, "%x:%x", &domain, &bus);
-+ if (err != 2)
-+ goto out;
-+
-+ /* Is this the one we just published? */
-+ if (domain == cntrl_entry->domain &&
-+ bus == cntrl_entry->bus)
-+ break;
++ if (txp->flags & (NETTXF_data_validated|NETTXF_csum_blank)) {
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ skb->proto_data_valid = 1;
++ } else {
++ skb->ip_summed = CHECKSUM_NONE;
++ skb->proto_data_valid = 0;
+ }
++ skb->proto_csum_blank = !!(txp->flags & NETTXF_csum_blank);
+
-+ if (i == root_num)
-+ goto out;
++ netbk_fill_frags(skb);
+
-+ info.pdev = pdev;
-+ info.resource_count = 0;
-+ info.root_num = i;
++ skb->dev = netif->dev;
++ skb->protocol = eth_type_trans(skb, skb->dev);
+
-+ /* Let ACPI do the heavy lifting on decoding resources */
-+ acpi_walk_resources(cntrl_entry->controller->acpi_handle,
-+ METHOD_NAME__CRS, write_xenbus_resource,
-+ &info);
++ netif->stats.rx_bytes += skb->len;
++ netif->stats.rx_packets++;
+
-+ /* No resouces. OK. On to the next one */
-+ if (!info.resource_count)
++ if (unlikely(netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB) &&
++ unlikely(skb_linearize(skb))) {
++ DPRINTK("Can't linearize skb in net_tx_action.\n");
++ kfree_skb(skb);
+ continue;
-+
-+ /* Store the number of resources we wrote for this root-%d */
-+ len = snprintf(str, sizeof(str), "root-%d-resources", i);
-+ if (unlikely(len >= (sizeof(str) - 1))) {
-+ err = -ENOMEM;
-+ goto out;
+ }
+
-+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
-+ "%d", info.resource_count);
-+ if (err)
-+ goto out;
++ netif_rx(skb);
++ netif->dev->last_rx = jiffies;
+ }
+
-+ /* Finally, write some magic to synchronize with the guest. */
-+ len = snprintf(str, sizeof(str), "root-resource-magic");
-+ if (unlikely(len >= (sizeof(str) - 1))) {
-+ err = -ENOMEM;
-+ goto out;
++ if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB &&
++ !list_empty(&pending_inuse_head)) {
++ struct netbk_tx_pending_inuse *oldest;
++
++ oldest = list_entry(pending_inuse_head.next,
++ struct netbk_tx_pending_inuse, list);
++ mod_timer(&netbk_tx_pending_timer, oldest->alloc_time + HZ);
+ }
++}
+
-+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
-+ "%lx", (sizeof(struct acpi_resource) * 2) + 1);
++static void netif_idx_release(u16 pending_idx)
++{
++ static DEFINE_SPINLOCK(_lock);
++ unsigned long flags;
+
-+out:
-+ spin_unlock(&dev_data->lock);
++ spin_lock_irqsave(&_lock, flags);
++ dealloc_ring[MASK_PEND_IDX(dealloc_prod)] = pending_idx;
++ /* Sync with net_tx_action_dealloc: insert idx /then/ incr producer. */
++ smp_wmb();
++ dealloc_prod++;
++ spin_unlock_irqrestore(&_lock, flags);
+
-+ return err;
++ tasklet_schedule(&net_tx_tasklet);
+}
+
-+void pciback_release_devices(struct pciback_device *pdev)
++static void netif_page_release(struct page *page, unsigned int order)
+{
-+ struct controller_dev_data *dev_data = pdev->pci_dev_data;
-+ struct controller_list_entry *cntrl_entry, *c;
-+ struct controller_dev_entry *dev_entry, *d;
++ int idx = netif_page_index(page);
++ BUG_ON(order);
++ BUG_ON(idx < 0);
++ netif_idx_release(idx);
++}
+
-+ list_for_each_entry_safe(cntrl_entry, c, &dev_data->list, list) {
-+ list_for_each_entry_safe(dev_entry, d,
-+ &cntrl_entry->dev_list, list) {
-+ list_del(&dev_entry->list);
-+ pcistub_put_pci_dev(dev_entry->dev);
-+ kfree(dev_entry);
-+ }
-+ list_del(&cntrl_entry->list);
-+ kfree(cntrl_entry);
-+ }
++irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++{
++ netif_t *netif = dev_id;
+
-+ kfree(dev_data);
-+ pdev->pci_dev_data = NULL;
++ add_to_net_schedule_list_tail(netif);
++ maybe_schedule_tx_action();
++
++ if (netif_schedulable(netif) && !netbk_queue_full(netif))
++ netif_wake_queue(netif->dev);
++
++ return IRQ_HANDLED;
+}
+
-+int pciback_get_pcifront_dev(struct pci_dev *pcidev,
-+ struct pciback_device *pdev,
-+ unsigned int *domain, unsigned int *bus, unsigned int *devfn)
++static void make_tx_response(netif_t *netif,
++ netif_tx_request_t *txp,
++ s8 st)
+{
-+ struct controller_dev_data *dev_data = pdev->pci_dev_data;
-+ struct controller_dev_entry *dev_entry;
-+ struct controller_list_entry *cntrl_entry;
-+ unsigned long flags;
-+ int found = 0;
-+ spin_lock_irqsave(&dev_data->lock, flags);
-+
-+ list_for_each_entry(cntrl_entry, &dev_data->list, list) {
-+ list_for_each_entry(dev_entry, &cntrl_entry->dev_list, list) {
-+ if ( (dev_entry->dev->bus->number ==
-+ pcidev->bus->number) &&
-+ (dev_entry->dev->devfn ==
-+ pcidev->devfn) &&
-+ (pci_domain_nr(dev_entry->dev->bus) ==
-+ pci_domain_nr(pcidev->bus)))
-+ {
-+ found = 1;
-+ *domain = cntrl_entry->domain;
-+ *bus = cntrl_entry->bus;
-+ *devfn = dev_entry->devfn;
-+ goto out;
-+ }
-+ }
-+ }
-+out:
-+ spin_unlock_irqrestore(&dev_data->lock, flags);
-+ return found;
-+
-+}
-+
-diff --git a/drivers/xen/pciback/passthrough.c b/drivers/xen/pciback/passthrough.c
-new file mode 100644
-index 0000000..9e7a0c4
---- /dev/null
-+++ b/drivers/xen/pciback/passthrough.c
-@@ -0,0 +1,176 @@
-+/*
-+ * PCI Backend - Provides restricted access to the real PCI bus topology
-+ * to the frontend
-+ *
-+ * Author: Ryan Wilson <hap9(a)epoch.ncsc.mil>
-+ */
-+
-+#include <linux/list.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include "pciback.h"
++ RING_IDX i = netif->tx.rsp_prod_pvt;
++ netif_tx_response_t *resp;
++ int notify;
+
-+struct passthrough_dev_data {
-+ /* Access to dev_list must be protected by lock */
-+ struct list_head dev_list;
-+ spinlock_t lock;
-+};
++ resp = RING_GET_RESPONSE(&netif->tx, i);
++ resp->id = txp->id;
++ resp->status = st;
+
-+struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
-+ unsigned int domain, unsigned int bus,
-+ unsigned int devfn)
-+{
-+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+ struct pci_dev_entry *dev_entry;
-+ struct pci_dev *dev = NULL;
-+ unsigned long flags;
++ if (txp->flags & NETTXF_extra_info)
++ RING_GET_RESPONSE(&netif->tx, ++i)->status = NETIF_RSP_NULL;
+
-+ spin_lock_irqsave(&dev_data->lock, flags);
++ netif->tx.rsp_prod_pvt = ++i;
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->tx, notify);
++ if (notify)
++ notify_remote_via_irq(netif->irq);
+
-+ list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
-+ if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
-+ && bus == (unsigned int)dev_entry->dev->bus->number
-+ && devfn == dev_entry->dev->devfn) {
-+ dev = dev_entry->dev;
-+ break;
-+ }
++#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
++ if (i == netif->tx.req_cons) {
++ int more_to_do;
++ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
++ if (more_to_do)
++ add_to_net_schedule_list_tail(netif);
+ }
-+
-+ spin_unlock_irqrestore(&dev_data->lock, flags);
-+
-+ return dev;
++#endif
+}
+
-+int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
-+ int devid, publish_pci_dev_cb publish_cb)
++static netif_rx_response_t *make_rx_response(netif_t *netif,
++ u16 id,
++ s8 st,
++ u16 offset,
++ u16 size,
++ u16 flags)
+{
-+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+ struct pci_dev_entry *dev_entry;
-+ unsigned long flags;
-+ unsigned int domain, bus, devfn;
-+ int err;
-+
-+ dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
-+ if (!dev_entry)
-+ return -ENOMEM;
-+ dev_entry->dev = dev;
++ RING_IDX i = netif->rx.rsp_prod_pvt;
++ netif_rx_response_t *resp;
+
-+ spin_lock_irqsave(&dev_data->lock, flags);
-+ list_add_tail(&dev_entry->list, &dev_data->dev_list);
-+ spin_unlock_irqrestore(&dev_data->lock, flags);
++ resp = RING_GET_RESPONSE(&netif->rx, i);
++ resp->offset = offset;
++ resp->flags = flags;
++ resp->id = id;
++ resp->status = (s16)size;
++ if (st < 0)
++ resp->status = (s16)st;
+
-+ /* Publish this device. */
-+ domain = (unsigned int)pci_domain_nr(dev->bus);
-+ bus = (unsigned int)dev->bus->number;
-+ devfn = dev->devfn;
-+ err = publish_cb(pdev, domain, bus, devfn, devid);
++ netif->rx.rsp_prod_pvt = ++i;
+
-+ return err;
++ return resp;
+}
+
-+void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++#ifdef NETBE_DEBUG_INTERRUPT
++static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
+{
-+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+ struct pci_dev_entry *dev_entry, *t;
-+ struct pci_dev *found_dev = NULL;
-+ unsigned long flags;
++ struct list_head *ent;
++ netif_t *netif;
++ int i = 0;
+
-+ spin_lock_irqsave(&dev_data->lock, flags);
++ printk(KERN_ALERT "netif_schedule_list:\n");
++ spin_lock_irq(&net_schedule_list_lock);
+
-+ list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
-+ if (dev_entry->dev == dev) {
-+ list_del(&dev_entry->list);
-+ found_dev = dev_entry->dev;
-+ kfree(dev_entry);
-+ }
++ list_for_each (ent, &net_schedule_list) {
++ netif = list_entry(ent, netif_t, list);
++ printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
++ "rx_resp_prod=%08x\n",
++ i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
++ printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
++ netif->tx.req_cons, netif->tx.rsp_prod_pvt);
++ printk(KERN_ALERT " shared(rx_req_prod=%08x "
++ "rx_resp_prod=%08x\n",
++ netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
++ printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
++ netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
++ printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
++ netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
++ i++;
+ }
+
-+ spin_unlock_irqrestore(&dev_data->lock, flags);
++ spin_unlock_irq(&net_schedule_list_lock);
++ printk(KERN_ALERT " ** End of netif_schedule_list **\n");
+
-+ if (found_dev)
-+ pcistub_put_pci_dev(found_dev);
++ return IRQ_HANDLED;
+}
++#endif
+
-+int pciback_init_devices(struct pciback_device *pdev)
++static int __init netback_init(void)
+{
-+ struct passthrough_dev_data *dev_data;
++ int i;
++ struct page *page;
+
-+ dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
-+ if (!dev_data)
-+ return -ENOMEM;
++ if (!is_running_on_xen())
++ return -ENODEV;
+
-+ spin_lock_init(&dev_data->lock);
++ /* We can increase reservation by this much in net_rx_action(). */
++ balloon_update_driver_allowance(NET_RX_RING_SIZE);
+
-+ INIT_LIST_HEAD(&dev_data->dev_list);
++ skb_queue_head_init(&rx_queue);
++ skb_queue_head_init(&tx_queue);
+
-+ pdev->pci_dev_data = dev_data;
++ init_timer(&net_timer);
++ net_timer.data = 0;
++ net_timer.function = net_alarm;
+
-+ return 0;
-+}
++ init_timer(&netbk_tx_pending_timer);
++ netbk_tx_pending_timer.data = 0;
++ netbk_tx_pending_timer.function = netbk_tx_pending_timeout;
+
-+int pciback_publish_pci_roots(struct pciback_device *pdev,
-+ publish_pci_root_cb publish_root_cb)
-+{
-+ int err = 0;
-+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+ struct pci_dev_entry *dev_entry, *e;
-+ struct pci_dev *dev;
-+ int found;
-+ unsigned int domain, bus;
++ mmap_pages = alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
++ if (mmap_pages == NULL) {
++ printk("%s: out of memory\n", __FUNCTION__);
++ return -ENOMEM;
++ }
+
-+ spin_lock(&dev_data->lock);
++ for (i = 0; i < MAX_PENDING_REQS; i++) {
++ page = mmap_pages[i];
++ SetPageForeign(page, netif_page_release);
++ netif_set_page_index(page, i);
++ INIT_LIST_HEAD(&pending_inuse[i].list);
++ }
+
-+ list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
-+ /* Only publish this device as a root if none of its
-+ * parent bridges are exported
-+ */
-+ found = 0;
-+ dev = dev_entry->dev->bus->self;
-+ for (; !found && dev != NULL; dev = dev->bus->self) {
-+ list_for_each_entry(e, &dev_data->dev_list, list) {
-+ if (dev == e->dev) {
-+ found = 1;
-+ break;
-+ }
-+ }
-+ }
++ pending_cons = 0;
++ pending_prod = MAX_PENDING_REQS;
++ for (i = 0; i < MAX_PENDING_REQS; i++)
++ pending_ring[i] = i;
+
-+ domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
-+ bus = (unsigned int)dev_entry->dev->bus->number;
++ spin_lock_init(&net_schedule_list_lock);
++ INIT_LIST_HEAD(&net_schedule_list);
+
-+ if (!found) {
-+ err = publish_root_cb(pdev, domain, bus);
-+ if (err)
-+ break;
-+ }
++ netbk_copy_skb_mode = NETBK_DONT_COPY_SKB;
++ if (MODPARM_copy_skb) {
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
++ NULL, 0))
++ netbk_copy_skb_mode = NETBK_ALWAYS_COPY_SKB;
++ else
++ netbk_copy_skb_mode = NETBK_DELAYED_COPY_SKB;
+ }
+
-+ spin_unlock(&dev_data->lock);
-+
-+ return err;
-+}
++ netif_accel_init();
+
-+void pciback_release_devices(struct pciback_device *pdev)
-+{
-+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+ struct pci_dev_entry *dev_entry, *t;
++ netif_xenbus_init();
+
-+ list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
-+ list_del(&dev_entry->list);
-+ pcistub_put_pci_dev(dev_entry->dev);
-+ kfree(dev_entry);
-+ }
++#ifdef NETBE_DEBUG_INTERRUPT
++ (void)bind_virq_to_irqhandler(VIRQ_DEBUG,
++ 0,
++ netif_be_dbg,
++ SA_SHIRQ,
++ "net-be-dbg",
++ &netif_be_dbg);
++#endif
+
-+ kfree(dev_data);
-+ pdev->pci_dev_data = NULL;
++ return 0;
+}
+
-+int pciback_get_pcifront_dev(struct pci_dev *pcidev, struct pciback_device *pdev,
-+ unsigned int *domain, unsigned int *bus, unsigned int *devfn)
++module_init(netback_init);
+
-+{
-+ *domain = pci_domain_nr(pcidev->bus);
-+ *bus = pcidev->bus->number;
-+ *devfn = pcidev->devfn;
-+ return 1;
-+}
-diff --git a/drivers/xen/pciback/pci_stub.c b/drivers/xen/pciback/pci_stub.c
++MODULE_LICENSE("Dual BSD/GPL");
+diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
new file mode 100644
-index 0000000..c481a73
+index 0000000..d7faeb6
--- /dev/null
-+++ b/drivers/xen/pciback/pci_stub.c
-@@ -0,0 +1,1316 @@
-+/*
-+ * PCI Stub Driver - Grabs devices in backend to be exported later
-+ *
-+ * Ryan Wilson <hap9(a)epoch.ncsc.mil>
-+ * Chris Bookholt <hap10(a)epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/rwsem.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/kref.h>
-+#include <linux/pci.h>
-+#include <linux/wait.h>
-+#include <asm/atomic.h>
-+#include <xen/evtchn.h>
-+#include "pciback.h"
-+#include "conf_space.h"
-+#include "conf_space_quirks.h"
++++ b/drivers/xen/netback/xenbus.c
+@@ -0,0 +1,454 @@
++/* Xenbus code for netif backend
++ Copyright (C) 2005 Rusty Russell <rusty(a)rustcorp.com.au>
++ Copyright (C) 2005 XenSource Ltd
+
-+static char *pci_devs_to_hide = NULL;
-+wait_queue_head_t aer_wait_queue;
-+/*Add sem for sync AER handling and pciback remove/reconfigue ops,
-+* We want to avoid in middle of AER ops, pciback devices is being removed
-+*/
-+static DECLARE_RWSEM(pcistub_sem);
-+module_param_named(hide, pci_devs_to_hide, charp, 0444);
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
+
-+struct pcistub_device_id {
-+ struct list_head slot_list;
-+ int domain;
-+ unsigned char bus;
-+ unsigned int devfn;
-+};
-+static LIST_HEAD(pcistub_device_ids);
-+static DEFINE_SPINLOCK(device_ids_lock);
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
+
-+struct pcistub_device {
-+ struct kref kref;
-+ struct list_head dev_list;
-+ spinlock_t lock;
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++*/
+
-+ struct pci_dev *dev;
-+ struct pciback_device *pdev; /* non-NULL if struct pci_dev is in use */
-+};
++#include <stdarg.h>
++#include <linux/module.h>
++#include <xen/xenbus.h>
++#include "common.h"
+
-+/* Access to pcistub_devices & seized_devices lists and the initialize_devices
-+ * flag must be locked with pcistub_devices_lock
-+ */
-+static DEFINE_SPINLOCK(pcistub_devices_lock);
-+static LIST_HEAD(pcistub_devices);
++#if 0
++#undef DPRINTK
++#define DPRINTK(fmt, args...) \
++ printk("netback/xenbus (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
++#endif
+
-+/* wait for device_initcall before initializing our devices
-+ * (see pcistub_init_devices_late)
-+ */
-+static int initialize_devices = 0;
-+static LIST_HEAD(seized_devices);
+
-+static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
-+{
-+ struct pcistub_device *psdev;
++static int connect_rings(struct backend_info *);
++static void connect(struct backend_info *);
++static void backend_create_netif(struct backend_info *be);
+
-+ dev_dbg(&dev->dev, "pcistub_device_alloc\n");
++static int netback_remove(struct xenbus_device *dev)
++{
++ struct backend_info *be = dev->dev.driver_data;
+
-+ psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC);
-+ if (!psdev)
-+ return NULL;
++ netback_remove_accelerators(be, dev);
+
-+ psdev->dev = pci_dev_get(dev);
-+ if (!psdev->dev) {
-+ kfree(psdev);
-+ return NULL;
++ if (be->netif) {
++ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
++ netif_disconnect(be->netif);
++ be->netif = NULL;
+ }
-+
-+ kref_init(&psdev->kref);
-+ spin_lock_init(&psdev->lock);
-+
-+ return psdev;
++ kfree(be);
++ dev->dev.driver_data = NULL;
++ return 0;
+}
+
-+/* Don't call this directly as it's called by pcistub_device_put */
-+static void pcistub_device_release(struct kref *kref)
-+{
-+ struct pcistub_device *psdev;
+
-+ psdev = container_of(kref, struct pcistub_device, kref);
++/**
++ * Entry point to this code when a new device is created. Allocate the basic
++ * structures and switch to InitWait.
++ */
++static int netback_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ const char *message;
++ struct xenbus_transaction xbt;
++ int err;
++ int sg;
++ struct backend_info *be = kzalloc(sizeof(struct backend_info),
++ GFP_KERNEL);
++ if (!be) {
++ xenbus_dev_fatal(dev, -ENOMEM,
++ "allocating backend structure");
++ return -ENOMEM;
++ }
+
-+ dev_dbg(&psdev->dev->dev, "pcistub_device_release\n");
++ be->dev = dev;
++ dev->dev.driver_data = be;
+
-+ /* Clean-up the device */
-+ pciback_reset_device(psdev->dev);
-+ pciback_config_free_dyn_fields(psdev->dev);
-+ pciback_config_free_dev(psdev->dev);
-+ kfree(pci_get_drvdata(psdev->dev));
-+ pci_set_drvdata(psdev->dev, NULL);
++ sg = 1;
++ if (netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB)
++ sg = 0;
+
-+ pci_dev_put(psdev->dev);
++ do {
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ goto fail;
++ }
+
-+ kfree(psdev);
-+}
++ err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
++ if (err) {
++ message = "writing feature-sg";
++ goto abort_transaction;
++ }
+
-+static inline void pcistub_device_get(struct pcistub_device *psdev)
-+{
-+ kref_get(&psdev->kref);
-+}
++ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
++ "%d", sg);
++ if (err) {
++ message = "writing feature-gso-tcpv4";
++ goto abort_transaction;
++ }
+
-+static inline void pcistub_device_put(struct pcistub_device *psdev)
-+{
-+ kref_put(&psdev->kref, pcistub_device_release);
-+}
++ /* We support rx-copy path. */
++ err = xenbus_printf(xbt, dev->nodename,
++ "feature-rx-copy", "%d", 1);
++ if (err) {
++ message = "writing feature-rx-copy";
++ goto abort_transaction;
++ }
+
-+static struct pcistub_device *pcistub_device_find(int domain, int bus,
-+ int slot, int func)
-+{
-+ struct pcistub_device *psdev = NULL;
-+ unsigned long flags;
++ /*
++ * We don't support rx-flip path (except old guests who don't
++ * grok this feature flag).
++ */
++ err = xenbus_printf(xbt, dev->nodename,
++ "feature-rx-flip", "%d", 0);
++ if (err) {
++ message = "writing feature-rx-flip";
++ goto abort_transaction;
++ }
+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
++ err = xenbus_transaction_end(xbt, 0);
++ } while (err == -EAGAIN);
+
-+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+ if (psdev->dev != NULL
-+ && domain == pci_domain_nr(psdev->dev->bus)
-+ && bus == psdev->dev->bus->number
-+ && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
-+ pcistub_device_get(psdev);
-+ goto out;
-+ }
++ if (err) {
++ xenbus_dev_fatal(dev, err, "completing transaction");
++ goto fail;
+ }
+
-+ /* didn't find it */
-+ psdev = NULL;
++ netback_probe_accelerators(be, dev);
+
-+ out:
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+ return psdev;
-+}
++ err = xenbus_switch_state(dev, XenbusStateInitWait);
++ if (err)
++ goto fail;
+
-+static struct pci_dev *pcistub_device_get_pci_dev(struct pciback_device *pdev,
-+ struct pcistub_device *psdev)
-+{
-+ struct pci_dev *pci_dev = NULL;
-+ unsigned long flags;
-+
-+ pcistub_device_get(psdev);
-+
-+ spin_lock_irqsave(&psdev->lock, flags);
-+ if (!psdev->pdev) {
-+ psdev->pdev = pdev;
-+ pci_dev = psdev->dev;
-+ }
-+ spin_unlock_irqrestore(&psdev->lock, flags);
++ /* This kicks hotplug scripts, so do it immediately. */
++ backend_create_netif(be);
+
-+ if (!pci_dev)
-+ pcistub_device_put(psdev);
++ return 0;
+
-+ return pci_dev;
++abort_transaction:
++ xenbus_transaction_end(xbt, 1);
++ xenbus_dev_fatal(dev, err, "%s", message);
++fail:
++ DPRINTK("failed");
++ netback_remove(dev);
++ return err;
+}
+
-+struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
-+ int domain, int bus,
-+ int slot, int func)
++
++/**
++ * Handle the creation of the hotplug script environment. We add the script
++ * and vif variables to the environment, for the benefit of the vif-* hotplug
++ * scripts.
++ */
++static int netback_uevent(struct xenbus_device *xdev, char **envp,
++ int num_envp, char *buffer, int buffer_size)
+{
-+ struct pcistub_device *psdev;
-+ struct pci_dev *found_dev = NULL;
-+ unsigned long flags;
++ struct backend_info *be = xdev->dev.driver_data;
++ netif_t *netif = be->netif;
++ int i = 0, length = 0;
++ char *val;
+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
++ DPRINTK("netback_uevent");
+
-+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+ if (psdev->dev != NULL
-+ && domain == pci_domain_nr(psdev->dev->bus)
-+ && bus == psdev->dev->bus->number
-+ && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
-+ found_dev = pcistub_device_get_pci_dev(pdev, psdev);
-+ break;
-+ }
++ val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
++ if (IS_ERR(val)) {
++ int err = PTR_ERR(val);
++ xenbus_dev_fatal(xdev, err, "reading script");
++ return err;
++ }
++ else {
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size,
++ &length, "script=%s", val);
++ kfree(val);
+ }
+
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+ return found_dev;
-+}
-+
-+struct pci_dev *pcistub_get_pci_dev(struct pciback_device *pdev,
-+ struct pci_dev *dev)
-+{
-+ struct pcistub_device *psdev;
-+ struct pci_dev *found_dev = NULL;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "vif=%s", netif->dev->name);
+
-+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+ if (psdev->dev == dev) {
-+ found_dev = pcistub_device_get_pci_dev(pdev, psdev);
-+ break;
-+ }
-+ }
++ envp[i] = NULL;
+
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+ return found_dev;
++ return 0;
+}
+
-+void pcistub_put_pci_dev(struct pci_dev *dev)
++
++static void backend_create_netif(struct backend_info *be)
+{
-+ struct pcistub_device *psdev, *found_psdev = NULL;
-+ unsigned long flags;
++ int err;
++ long handle;
++ struct xenbus_device *dev = be->dev;
+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
++ if (be->netif != NULL)
++ return;
+
-+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+ if (psdev->dev == dev) {
-+ found_psdev = psdev;
-+ break;
-+ }
++ err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
++ if (err != 1) {
++ xenbus_dev_fatal(dev, err, "reading handle");
++ return;
+ }
+
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+
-+ /*hold this lock for avoiding breaking link between
-+ * pcistub and pciback when AER is in processing
-+ */
-+ down_write(&pcistub_sem);
-+ /* Cleanup our device
-+ * (so it's ready for the next domain)
-+ */
-+ pciback_reset_device(found_psdev->dev);
-+ pciback_config_free_dyn_fields(found_psdev->dev);
-+ pciback_config_reset_dev(found_psdev->dev);
-+
-+ spin_lock_irqsave(&found_psdev->lock, flags);
-+ found_psdev->pdev = NULL;
-+ spin_unlock_irqrestore(&found_psdev->lock, flags);
++ be->netif = netif_alloc(dev->otherend_id, handle);
++ if (IS_ERR(be->netif)) {
++ err = PTR_ERR(be->netif);
++ be->netif = NULL;
++ xenbus_dev_fatal(dev, err, "creating interface");
++ return;
++ }
+
-+ pcistub_device_put(found_psdev);
-+ up_write(&pcistub_sem);
++ kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
+}
+
-+static int __devinit pcistub_match_one(struct pci_dev *dev,
-+ struct pcistub_device_id *pdev_id)
++
++/**
++ * Callback received when the frontend's state changes.
++ */
++static void frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state)
+{
-+ /* Match the specified device by domain, bus, slot, func and also if
-+ * any of the device's parent bridges match.
-+ */
-+ for (; dev != NULL; dev = dev->bus->self) {
-+ if (pci_domain_nr(dev->bus) == pdev_id->domain
-+ && dev->bus->number == pdev_id->bus
-+ && dev->devfn == pdev_id->devfn)
-+ return 1;
++ struct backend_info *be = dev->dev.driver_data;
+
-+ /* Sometimes topmost bridge links to itself. */
-+ if (dev == dev->bus->self)
-+ break;
-+ }
++ DPRINTK("%s", xenbus_strstate(frontend_state));
+
-+ return 0;
-+}
++ be->frontend_state = frontend_state;
+
-+static int __devinit pcistub_match(struct pci_dev *dev)
-+{
-+ struct pcistub_device_id *pdev_id;
-+ unsigned long flags;
-+ int found = 0;
++ switch (frontend_state) {
++ case XenbusStateInitialising:
++ if (dev->state == XenbusStateClosed) {
++ printk(KERN_INFO "%s: %s: prepare for reconnect\n",
++ __FUNCTION__, dev->nodename);
++ xenbus_switch_state(dev, XenbusStateInitWait);
++ }
++ break;
+
-+ spin_lock_irqsave(&device_ids_lock, flags);
-+ list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) {
-+ if (pcistub_match_one(dev, pdev_id)) {
-+ found = 1;
++ case XenbusStateInitialised:
++ break;
++
++ case XenbusStateConnected:
++ if (dev->state == XenbusStateConnected)
+ break;
++ backend_create_netif(be);
++ if (be->netif)
++ connect(be);
++ break;
++
++ case XenbusStateClosing:
++ if (be->netif) {
++ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
++ netif_disconnect(be->netif);
++ be->netif = NULL;
+ }
-+ }
-+ spin_unlock_irqrestore(&device_ids_lock, flags);
++ xenbus_switch_state(dev, XenbusStateClosing);
++ break;
+
-+ return found;
-+}
++ case XenbusStateClosed:
++ xenbus_switch_state(dev, XenbusStateClosed);
++ if (xenbus_dev_is_online(dev))
++ break;
++ /* fall through if not online */
++ case XenbusStateUnknown:
++ device_unregister(&dev->dev);
++ break;
+
-+static int __devinit pcistub_init_device(struct pci_dev *dev)
-+{
-+ struct pciback_dev_data *dev_data;
-+ int err = 0;
++ default:
++ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
++ frontend_state);
++ break;
++ }
++}
+
-+ dev_dbg(&dev->dev, "initializing...\n");
+
-+ /* The PCI backend is not intended to be a module (or to work with
-+ * removable PCI devices (yet). If it were, pciback_config_free()
-+ * would need to be called somewhere to free the memory allocated
-+ * here and then to call kfree(pci_get_drvdata(psdev->dev)).
-+ */
-+ dev_data = kzalloc(sizeof(*dev_data), GFP_ATOMIC);
-+ if (!dev_data) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+ pci_set_drvdata(dev, dev_data);
++static void xen_net_read_rate(struct xenbus_device *dev,
++ unsigned long *bytes, unsigned long *usec)
++{
++ char *s, *e;
++ unsigned long b, u;
++ char *ratestr;
+
-+ dev_dbg(&dev->dev, "initializing config\n");
++ /* Default to unlimited bandwidth. */
++ *bytes = ~0UL;
++ *usec = 0;
+
-+ init_waitqueue_head(&aer_wait_queue);
-+ err = pciback_config_init_dev(dev);
-+ if (err)
-+ goto out;
++ ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
++ if (IS_ERR(ratestr))
++ return;
+
-+ /* HACK: Force device (& ACPI) to determine what IRQ it's on - we
-+ * must do this here because pcibios_enable_device may specify
-+ * the pci device's true irq (and possibly its other resources)
-+ * if they differ from what's in the configuration space.
-+ * This makes the assumption that the device's resources won't
-+ * change after this point (otherwise this code may break!)
-+ */
-+ dev_dbg(&dev->dev, "enabling device\n");
-+ err = pci_enable_device(dev);
-+ if (err)
-+ goto config_release;
++ s = ratestr;
++ b = simple_strtoul(s, &e, 10);
++ if ((s == e) || (*e != ','))
++ goto fail;
+
-+ /* Now disable the device (this also ensures some private device
-+ * data is setup before we export)
-+ */
-+ dev_dbg(&dev->dev, "reset device\n");
-+ pciback_reset_device(dev);
++ s = e + 1;
++ u = simple_strtoul(s, &e, 10);
++ if ((s == e) || (*e != '\0'))
++ goto fail;
+
-+ return 0;
++ *bytes = b;
++ *usec = u;
+
-+ config_release:
-+ pciback_config_free_dev(dev);
++ kfree(ratestr);
++ return;
+
-+ out:
-+ pci_set_drvdata(dev, NULL);
-+ kfree(dev_data);
-+ return err;
++ fail:
++ WPRINTK("Failed to parse network rate limit. Traffic unlimited.\n");
++ kfree(ratestr);
+}
+
-+/*
-+ * Because some initialization still happens on
-+ * devices during fs_initcall, we need to defer
-+ * full initialization of our devices until
-+ * device_initcall.
-+ */
-+static int __init pcistub_init_devices_late(void)
++static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
+{
-+ struct pcistub_device *psdev;
-+ unsigned long flags;
-+ int err = 0;
-+
-+ pr_debug("pciback: pcistub_init_devices_late\n");
-+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
-+
-+ while (!list_empty(&seized_devices)) {
-+ psdev = container_of(seized_devices.next,
-+ struct pcistub_device, dev_list);
-+ list_del(&psdev->dev_list);
++ char *s, *e, *macstr;
++ int i;
+
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++ macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
++ if (IS_ERR(macstr))
++ return PTR_ERR(macstr);
+
-+ err = pcistub_init_device(psdev->dev);
-+ if (err) {
-+ dev_err(&psdev->dev->dev,
-+ "error %d initializing device\n", err);
-+ kfree(psdev);
-+ psdev = NULL;
++ for (i = 0; i < ETH_ALEN; i++) {
++ mac[i] = simple_strtoul(s, &e, 16);
++ if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
++ kfree(macstr);
++ return -ENOENT;
+ }
-+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
-+
-+ if (psdev)
-+ list_add_tail(&psdev->dev_list, &pcistub_devices);
++ s = e+1;
+ }
+
-+ initialize_devices = 1;
-+
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+
++ kfree(macstr);
+ return 0;
+}
+
-+static int __devinit pcistub_seize(struct pci_dev *dev)
++static void connect(struct backend_info *be)
+{
-+ struct pcistub_device *psdev;
-+ unsigned long flags;
-+ int err = 0;
-+
-+ psdev = pcistub_device_alloc(dev);
-+ if (!psdev)
-+ return -ENOMEM;
-+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
-+
-+ if (initialize_devices) {
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+
-+ /* don't want irqs disabled when calling pcistub_init_device */
-+ err = pcistub_init_device(psdev->dev);
++ int err;
++ struct xenbus_device *dev = be->dev;
+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
++ err = connect_rings(be);
++ if (err)
++ return;
+
-+ if (!err)
-+ list_add(&psdev->dev_list, &pcistub_devices);
-+ } else {
-+ dev_dbg(&dev->dev, "deferring initialization\n");
-+ list_add(&psdev->dev_list, &seized_devices);
++ err = xen_net_read_mac(dev, be->netif->fe_dev_addr);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
++ return;
+ }
+
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++ xen_net_read_rate(dev, &be->netif->credit_bytes,
++ &be->netif->credit_usec);
++ be->netif->remaining_credit = be->netif->credit_bytes;
+
-+ if (err)
-+ pcistub_device_put(psdev);
++ xenbus_switch_state(dev, XenbusStateConnected);
+
-+ return err;
++ netif_wake_queue(be->netif->dev);
+}
+
-+static int __devinit pcistub_probe(struct pci_dev *dev,
-+ const struct pci_device_id *id)
++
++static int connect_rings(struct backend_info *be)
+{
-+ int err = 0;
++ struct xenbus_device *dev = be->dev;
++ unsigned long tx_ring_ref, rx_ring_ref;
++ unsigned int evtchn, rx_copy;
++ int err;
++ int val;
+
-+ dev_dbg(&dev->dev, "probing...\n");
++ DPRINTK("");
+
-+ if (pcistub_match(dev)) {
++ err = xenbus_gather(XBT_NIL, dev->otherend,
++ "tx-ring-ref", "%lu", &tx_ring_ref,
++ "rx-ring-ref", "%lu", &rx_ring_ref,
++ "event-channel", "%u", &evtchn, NULL);
++ if (err) {
++ xenbus_dev_fatal(dev, err,
++ "reading %s/ring-ref and event-channel",
++ dev->otherend);
++ return err;
++ }
+
-+ if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
-+ && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
-+ dev_err(&dev->dev, "can't export pci devices that "
-+ "don't have a normal (0) or bridge (1) "
-+ "header type!\n");
-+ err = -ENODEV;
-+ goto out;
-+ }
++ err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
++ &rx_copy);
++ if (err == -ENOENT) {
++ err = 0;
++ rx_copy = 0;
++ }
++ if (err < 0) {
++ xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
++ dev->otherend);
++ return err;
++ }
++ be->netif->copying_receiver = !!rx_copy;
+
-+ dev_info(&dev->dev, "seizing device\n");
-+ err = pcistub_seize(dev);
-+#ifdef CONFIG_PCI_GUESTDEV
-+ } else if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
-+ if (!pci_is_guestdev(dev)) {
-+ err = -ENODEV;
-+ goto out;
-+ }
++ if (be->netif->dev->tx_queue_len != 0) {
++ if (xenbus_scanf(XBT_NIL, dev->otherend,
++ "feature-rx-notify", "%d", &val) < 0)
++ val = 0;
++ if (val)
++ be->netif->can_queue = 1;
++ else
++ /* Must be non-zero for pfifo_fast to work. */
++ be->netif->dev->tx_queue_len = 1;
++ }
+
-+ dev_info(&dev->dev, "seizing device\n");
-+ err = pcistub_seize(dev);
-+#endif /* CONFIG_PCI_GUESTDEV */
-+ } else
-+ /* Didn't find the device */
-+ err = -ENODEV;
++ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", "%d", &val) < 0)
++ val = 0;
++ if (val) {
++ be->netif->features |= NETIF_F_SG;
++ be->netif->dev->features |= NETIF_F_SG;
++ }
+
-+ out:
-+ return err;
-+}
++ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d",
++ &val) < 0)
++ val = 0;
++ if (val) {
++ be->netif->features |= NETIF_F_TSO;
++ be->netif->dev->features |= NETIF_F_TSO;
++ }
+
-+static void pcistub_remove(struct pci_dev *dev)
-+{
-+ struct pcistub_device *psdev, *found_psdev = NULL;
-+ unsigned long flags;
++ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
++ "%d", &val) < 0)
++ val = 0;
++ if (val) {
++ be->netif->features &= ~NETIF_F_IP_CSUM;
++ be->netif->dev->features &= ~NETIF_F_IP_CSUM;
++ }
+
-+ dev_dbg(&dev->dev, "removing\n");
++ /* Map the shared frame, irq etc. */
++ err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn);
++ if (err) {
++ xenbus_dev_fatal(dev, err,
++ "mapping shared-frames %lu/%lu port %u",
++ tx_ring_ref, rx_ring_ref, evtchn);
++ return err;
++ }
++ return 0;
++}
+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
-+ pciback_config_quirk_release(dev);
++/* ** Driver Registration ** */
+
-+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+ if (psdev->dev == dev) {
-+ found_psdev = psdev;
-+ break;
-+ }
-+ }
+
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++static const struct xenbus_device_id netback_ids[] = {
++ { "vif" },
++ { "" }
++};
+
-+ if (found_psdev) {
-+ dev_dbg(&dev->dev, "found device to remove - in use? %p\n",
-+ found_psdev->pdev);
+
-+ if (found_psdev->pdev) {
-+ printk(KERN_WARNING "pciback: ****** removing device "
-+ "%s while still in-use! ******\n",
-+ pci_name(found_psdev->dev));
-+ printk(KERN_WARNING "pciback: ****** driver domain may "
-+ "still access this device's i/o resources!\n");
-+ printk(KERN_WARNING "pciback: ****** shutdown driver "
-+ "domain before binding device\n");
-+ printk(KERN_WARNING "pciback: ****** to other drivers "
-+ "or domains\n");
++static struct xenbus_driver netback = {
++ .name = "vif",
++ .owner = THIS_MODULE,
++ .ids = netback_ids,
++ .probe = netback_probe,
++ .remove = netback_remove,
++ .uevent = netback_uevent,
++ .otherend_changed = frontend_changed,
++};
+
-+ pciback_release_pci_dev(found_psdev->pdev,
-+ found_psdev->dev);
-+ }
+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
-+ list_del(&found_psdev->dev_list);
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+
-+ /* the final put for releasing from the list */
-+ pcistub_device_put(found_psdev);
-+ }
-+}
-+
-+static const struct pci_device_id pcistub_ids[] = {
-+ {
-+ .vendor = PCI_ANY_ID,
-+ .device = PCI_ANY_ID,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ },
-+ {0,},
-+};
-+
-+static void kill_domain_by_device(struct pcistub_device *psdev)
-+{
-+ struct xenbus_transaction xbt;
-+ int err;
-+ char nodename[1024];
-+
-+ if (!psdev)
-+ dev_err(&psdev->dev->dev,
-+ "device is NULL when do AER recovery/kill_domain\n");
-+ sprintf(nodename, "/local/domain/0/backend/pci/%d/0",
-+ psdev->pdev->xdev->otherend_id);
-+ nodename[strlen(nodename)] = '\0';
-+
-+again:
-+ err = xenbus_transaction_start(&xbt);
-+ if (err)
-+ {
-+ dev_err(&psdev->dev->dev,
-+ "error %d when start xenbus transaction\n", err);
-+ return;
-+ }
-+ /*PV AER handlers will set this flag*/
-+ xenbus_printf(xbt, nodename, "aerState" , "aerfail" );
-+ err = xenbus_transaction_end(xbt, 0);
-+ if (err)
-+ {
-+ if (err == -EAGAIN)
-+ goto again;
-+ dev_err(&psdev->dev->dev,
-+ "error %d when end xenbus transaction\n", err);
-+ return;
-+ }
-+}
-+
-+/* For each aer recovery step error_detected, mmio_enabled, etc, front_end and
-+ * backend need to have cooperation. In pciback, those steps will do similar
-+ * jobs: send service request and waiting for front_end response.
-+*/
-+static pci_ers_result_t common_process(struct pcistub_device *psdev,
-+ pci_channel_state_t state, int aer_cmd, pci_ers_result_t result)
-+{
-+ pci_ers_result_t res = result;
-+ struct xen_pcie_aer_op *aer_op;
-+ int ret;
-+
-+ /*with PV AER drivers*/
-+ aer_op = &(psdev->pdev->sh_info->aer_op);
-+ aer_op->cmd = aer_cmd ;
-+ /*useful for error_detected callback*/
-+ aer_op->err = state;
-+ /*pcifront_end BDF*/
-+ ret = pciback_get_pcifront_dev(psdev->dev, psdev->pdev,
-+ &aer_op->domain, &aer_op->bus, &aer_op->devfn);
-+ if (!ret) {
-+ dev_err(&psdev->dev->dev,
-+ "pciback: failed to get pcifront device\n");
-+ return PCI_ERS_RESULT_NONE;
-+ }
-+ wmb();
-+
-+ dev_dbg(&psdev->dev->dev,
-+ "pciback: aer_op %x dom %x bus %x devfn %x\n",
-+ aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn);
-+ /*local flag to mark there's aer request, pciback callback will use this
-+ * flag to judge whether we need to check pci-front give aer service
-+ * ack signal
-+ */
-+ set_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
-+
-+ /*It is possible that a pcifront conf_read_write ops request invokes
-+ * the callback which cause the spurious execution of wake_up.
-+ * Yet it is harmless and better than a spinlock here
-+ */
-+ set_bit(_XEN_PCIB_active,
-+ (unsigned long *)&psdev->pdev->sh_info->flags);
-+ wmb();
-+ notify_remote_via_irq(psdev->pdev->evtchn_irq);
-+
-+ ret = wait_event_timeout(aer_wait_queue, !(test_bit(_XEN_PCIB_active,
-+ (unsigned long *)&psdev->pdev->sh_info->flags)), 300*HZ);
-+
-+ if (!ret) {
-+ if (test_bit(_XEN_PCIB_active,
-+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
-+ dev_err(&psdev->dev->dev,
-+ "pcifront aer process not responding!\n");
-+ clear_bit(_XEN_PCIB_active,
-+ (unsigned long *)&psdev->pdev->sh_info->flags);
-+ aer_op->err = PCI_ERS_RESULT_NONE;
-+ return res;
-+ }
-+ }
-+ clear_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
-+
-+ if ( test_bit( _XEN_PCIF_active,
-+ (unsigned long*)&psdev->pdev->sh_info->flags)) {
-+ dev_dbg(&psdev->dev->dev,
-+ "schedule pci_conf service in pciback \n");
-+ test_and_schedule_op(psdev->pdev);
-+ }
-+
-+ res = (pci_ers_result_t)aer_op->err;
-+ return res;
-+}
-+
-+/*
-+* pciback_slot_reset: it will send the slot_reset request to pcifront in case
-+* of the device driver could provide this service, and then wait for pcifront
-+* ack.
-+* @dev: pointer to PCI devices
-+* return value is used by aer_core do_recovery policy
-+*/
-+static pci_ers_result_t pciback_slot_reset(struct pci_dev *dev)
++void netif_xenbus_init(void)
+{
-+ struct pcistub_device *psdev;
-+ pci_ers_result_t result;
-+
-+ result = PCI_ERS_RESULT_RECOVERED;
-+ dev_dbg(&dev->dev, "pciback_slot_reset(bus:%x,devfn:%x)\n",
-+ dev->bus->number, dev->devfn);
-+
-+ down_write(&pcistub_sem);
-+ psdev = pcistub_device_find(pci_domain_nr(dev->bus),
-+ dev->bus->number,
-+ PCI_SLOT(dev->devfn),
-+ PCI_FUNC(dev->devfn));
-+
-+ if ( !psdev || !psdev->pdev )
-+ {
-+ dev_err(&dev->dev,
-+ "pciback device is not found/assigned\n");
-+ goto end;
-+ }
-+
-+ if ( !psdev->pdev->sh_info )
-+ {
-+ dev_err(&dev->dev, "pciback device is not connected or owned"
-+ " by HVM, kill it\n");
-+ kill_domain_by_device(psdev);
-+ goto release;
-+ }
-+
-+ if ( !test_bit(_XEN_PCIB_AERHANDLER,
-+ (unsigned long *)&psdev->pdev->sh_info->flags) ) {
-+ dev_err(&dev->dev,
-+ "guest with no AER driver should have been killed\n");
-+ goto release;
-+ }
-+ result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result);
-+
-+ if (result == PCI_ERS_RESULT_NONE ||
-+ result == PCI_ERS_RESULT_DISCONNECT) {
-+ dev_dbg(&dev->dev,
-+ "No AER slot_reset service or disconnected!\n");
-+ kill_domain_by_device(psdev);
-+ }
-+release:
-+ pcistub_device_put(psdev);
-+end:
-+ up_write(&pcistub_sem);
-+ return result;
-+
++ xenbus_register_backend(&netback);
+}
+--
+1.7.4
+
+
+From 5b30803bf5f58ee980edd8d88a2d73dda995ee93 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Mon, 9 Feb 2009 12:05:52 -0800
+Subject: [PATCH 002/203] xen: netback: first cut at porting to upstream and cleaning up
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+---
+ drivers/xen/Kconfig | 2 +-
+ drivers/xen/netback/Makefile | 2 +-
+ drivers/xen/netback/common.h | 33 +++---
+ drivers/xen/netback/interface.c | 37 +++---
+ drivers/xen/netback/netback.c | 248 ++++++++++++++++++++++++---------------
+ drivers/xen/netback/xenbus.c | 25 ++--
+ 6 files changed, 201 insertions(+), 146 deletions(-)
+
+diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
+index 7e83d43..30290a8 100644
+--- a/drivers/xen/Kconfig
++++ b/drivers/xen/Kconfig
+@@ -38,7 +38,7 @@ config XEN_BACKEND
+ to other virtual machines.
+
+ config XEN_NETDEV_BACKEND
+- bool "Xen backend network device"
++ tristate "Xen backend network device"
+ depends on XEN_BACKEND && NET
+ help
+ Implement the network backend driver, which passes packets
+diff --git a/drivers/xen/netback/Makefile b/drivers/xen/netback/Makefile
+index f4a0c51..a01a1a3 100644
+--- a/drivers/xen/netback/Makefile
++++ b/drivers/xen/netback/Makefile
+@@ -1,3 +1,3 @@
+ obj-$(CONFIG_XEN_NETDEV_BACKEND) := netbk.o
+
+-netbk-y := netback.o xenbus.o interface.o
++netbk-y := netback.o xenbus.o interface.o
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index 9a54d57..65b88f4 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -43,8 +43,7 @@
+ #include <asm/io.h>
+ #include <asm/pgalloc.h>
+ #include <xen/interface/grant_table.h>
+-#include <xen/gnttab.h>
+-#include <xen/driver_util.h>
++#include <xen/grant_table.h>
+ #include <xen/xenbus.h>
+
+ #define DPRINTK(_f, _a...) \
+@@ -55,7 +54,7 @@
+ #define WPRINTK(fmt, args...) \
+ printk(KERN_WARNING "xen_net: " fmt, ##args)
+
+-typedef struct netif_st {
++struct xen_netif {
+ /* Unique identifier for this interface. */
+ domid_t domid;
+ unsigned int handle;
+@@ -70,8 +69,8 @@ typedef struct netif_st {
+ unsigned int irq;
+
+ /* The shared rings and indexes. */
+- netif_tx_back_ring_t tx;
+- netif_rx_back_ring_t rx;
++ struct xen_netif_tx_back_ring tx;
++ struct xen_netif_rx_back_ring rx;
+ struct vm_struct *tx_comms_area;
+ struct vm_struct *rx_comms_area;
+
+@@ -103,7 +102,7 @@ typedef struct netif_st {
+ unsigned int carrier;
+
+ wait_queue_head_t waiting_to_free;
+-} netif_t;
++};
+
+ /*
+ * Implement our own carrier flag: the network stack's version causes delays
+@@ -141,7 +140,7 @@ struct netback_accelerator {
+
+ struct backend_info {
+ struct xenbus_device *dev;
+- netif_t *netif;
++ struct xen_netif *netif;
+ enum xenbus_state frontend_state;
+
+ /* State relating to the netback accelerator */
+@@ -174,13 +173,13 @@ extern
+ void netif_accel_init(void);
+
+
+-#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
+-#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
++#define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
++#define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
+
+-void netif_disconnect(netif_t *netif);
++void netif_disconnect(struct xen_netif *netif);
+
+-netif_t *netif_alloc(domid_t domid, unsigned int handle);
+-int netif_map(netif_t *netif, unsigned long tx_ring_ref,
++struct xen_netif *netif_alloc(domid_t domid, unsigned int handle);
++int netif_map(struct xen_netif *netif, unsigned long tx_ring_ref,
+ unsigned long rx_ring_ref, unsigned int evtchn);
+
+ #define netif_get(_b) (atomic_inc(&(_b)->refcnt))
+@@ -195,22 +194,22 @@ void netif_xenbus_init(void);
+ #define netif_schedulable(netif) \
+ (netif_running((netif)->dev) && netback_carrier_ok(netif))
+
+-void netif_schedule_work(netif_t *netif);
+-void netif_deschedule_work(netif_t *netif);
++void netif_schedule_work(struct xen_netif *netif);
++void netif_deschedule_work(struct xen_netif *netif);
+
+ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
+ struct net_device_stats *netif_be_get_stats(struct net_device *dev);
+-irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++irqreturn_t netif_be_int(int irq, void *dev_id);
+
+ static inline int netbk_can_queue(struct net_device *dev)
+ {
+- netif_t *netif = netdev_priv(dev);
++ struct xen_netif *netif = netdev_priv(dev);
+ return netif->can_queue;
+ }
+
+ static inline int netbk_can_sg(struct net_device *dev)
+ {
+- netif_t *netif = netdev_priv(dev);
++ struct xen_netif *netif = netdev_priv(dev);
+ return netif->features & NETIF_F_SG;
+ }
+
+diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+index 7e67941..d184ad7 100644
+--- a/drivers/xen/netback/interface.c
++++ b/drivers/xen/netback/interface.c
+@@ -34,6 +34,9 @@
+ #include <linux/ethtool.h>
+ #include <linux/rtnetlink.h>
+
++#include <xen/events.h>
++#include <asm/xen/hypercall.h>
+
+ /*
+ * Module parameter 'queue_length':
+ *
+@@ -51,13 +54,13 @@
+ static unsigned long netbk_queue_length = 32;
+ module_param_named(queue_length, netbk_queue_length, ulong, 0);
+
+-static void __netif_up(netif_t *netif)
++static void __netif_up(struct xen_netif *netif)
+ {
+ enable_irq(netif->irq);
+ netif_schedule_work(netif);
+ }
+
+-static void __netif_down(netif_t *netif)
++static void __netif_down(struct xen_netif *netif)
+ {
+ disable_irq(netif->irq);
+ netif_deschedule_work(netif);
+@@ -65,7 +68,7 @@ static void __netif_down(netif_t *netif)
+
+ static int net_open(struct net_device *dev)
+ {
+- netif_t *netif = netdev_priv(dev);
++ struct xen_netif *netif = netdev_priv(dev);
+ if (netback_carrier_ok(netif)) {
+ __netif_up(netif);
+ netif_start_queue(dev);
+@@ -75,7 +78,7 @@ static int net_open(struct net_device *dev)
+
+ static int net_close(struct net_device *dev)
+ {
+- netif_t *netif = netdev_priv(dev);
++ struct xen_netif *netif = netdev_priv(dev);
+ if (netback_carrier_ok(netif))
+ __netif_down(netif);
+ netif_stop_queue(dev);
+@@ -95,7 +98,7 @@ static int netbk_change_mtu(struct net_device *dev, int mtu)
+ static int netbk_set_sg(struct net_device *dev, u32 data)
+ {
+ if (data) {
+- netif_t *netif = netdev_priv(dev);
++ struct xen_netif *netif = netdev_priv(dev);
+
+ if (!(netif->features & NETIF_F_SG))
+ return -ENOSYS;
+@@ -107,7 +110,7 @@ static int netbk_set_sg(struct net_device *dev, u32 data)
+ static int netbk_set_tso(struct net_device *dev, u32 data)
+ {
+ if (data) {
+- netif_t *netif = netdev_priv(dev);
++ struct xen_netif *netif = netdev_priv(dev);
+
+ if (!(netif->features & NETIF_F_TSO))
+ return -ENOSYS;
+@@ -127,15 +130,15 @@ static struct ethtool_ops network_ethtool_ops =
+ .get_link = ethtool_op_get_link,
+ };
+
+-netif_t *netif_alloc(domid_t domid, unsigned int handle)
++struct xen_netif *netif_alloc(domid_t domid, unsigned int handle)
+ {
+ int err = 0;
+ struct net_device *dev;
+- netif_t *netif;
++ struct xen_netif *netif;
+ char name[IFNAMSIZ] = {};
+
+ snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
+- dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
++ dev = alloc_netdev(sizeof(struct xen_netif), name, ether_setup);
+ if (dev == NULL) {
+ DPRINTK("Could not create netif: out of memory\n");
+ return ERR_PTR(-ENOMEM);
+@@ -194,7 +197,7 @@ netif_t *netif_alloc(domid_t domid, unsigned int handle)
+ }
+
+ static int map_frontend_pages(
+- netif_t *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
++ struct xen_netif *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
+ {
+ struct gnttab_map_grant_ref op;
+
+@@ -229,7 +232,7 @@ static int map_frontend_pages(
+ return 0;
+ }
+
+-static void unmap_frontend_pages(netif_t *netif)
++static void unmap_frontend_pages(struct xen_netif *netif)
+ {
+ struct gnttab_unmap_grant_ref op;
+
+@@ -246,12 +249,12 @@ static void unmap_frontend_pages(netif_t *netif)
+ BUG();
+ }
+
+-int netif_map(netif_t *netif, unsigned long tx_ring_ref,
++int netif_map(struct xen_netif *netif, unsigned long tx_ring_ref,
+ unsigned long rx_ring_ref, unsigned int evtchn)
+ {
+ int err = -ENOMEM;
+- netif_tx_sring_t *txs;
+- netif_rx_sring_t *rxs;
++ struct xen_netif_tx_sring *txs;
++ struct xen_netif_rx_sring *rxs;
+
+ /* Already connected through? */
+ if (netif->irq)
+@@ -276,10 +279,10 @@ int netif_map(netif_t *netif, unsigned long tx_ring_ref,
+ netif->irq = err;
+ disable_irq(netif->irq);
+
+- txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
++ txs = (struct xen_netif_tx_sring *)netif->tx_comms_area->addr;
+ BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
+
+- rxs = (netif_rx_sring_t *)
++ rxs = (struct xen_netif_rx_sring *)
+ ((char *)netif->rx_comms_area->addr);
+ BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
+
+@@ -303,7 +306,7 @@ err_rx:
+ return err;
+ }
+
+-void netif_disconnect(netif_t *netif)
++void netif_disconnect(struct xen_netif *netif)
+ {
+ if (netback_carrier_ok(netif)) {
+ rtnl_lock();
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index db629d4..c959075 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -35,9 +35,17 @@
+ */
+
+ #include "common.h"
+
-+/*pciback_mmio_enabled: it will send the mmio_enabled request to pcifront
-+* in case of the device driver could provide this service, and then wait
-+* for pcifront ack.
-+* @dev: pointer to PCI devices
-+* return value is used by aer_core do_recovery policy
-+*/
-+
-+static pci_ers_result_t pciback_mmio_enabled(struct pci_dev *dev)
-+{
-+ struct pcistub_device *psdev;
-+ pci_ers_result_t result;
-+
-+ result = PCI_ERS_RESULT_RECOVERED;
-+ dev_dbg(&dev->dev, "pciback_mmio_enabled(bus:%x,devfn:%x)\n",
-+ dev->bus->number, dev->devfn);
++#include <linux/tcp.h>
++#include <linux/udp.h>
+
-+ down_write(&pcistub_sem);
-+ psdev = pcistub_device_find(pci_domain_nr(dev->bus),
-+ dev->bus->number,
-+ PCI_SLOT(dev->devfn),
-+ PCI_FUNC(dev->devfn));
+ #include <xen/balloon.h>
++#include <xen/events.h>
+ #include <xen/interface/memory.h>
+
++#include <asm/xen/hypercall.h>
++#include <asm/xen/page.h>
+
-+ if ( !psdev || !psdev->pdev )
-+ {
-+ dev_err(&dev->dev,
-+ "pciback device is not found/assigned\n");
-+ goto end;
-+ }
+ /*define NETBE_DEBUG_INTERRUPT*/
+
+ struct netbk_rx_meta {
+@@ -51,11 +59,12 @@ struct netbk_tx_pending_inuse {
+ unsigned long alloc_time;
+ };
+
+
-+ if ( !psdev->pdev->sh_info )
-+ {
-+ dev_err(&dev->dev, "pciback device is not connected or owned"
-+ " by HVM, kill it\n");
-+ kill_domain_by_device(psdev);
-+ goto release;
-+ }
-+
-+ if ( !test_bit(_XEN_PCIB_AERHANDLER,
-+ (unsigned long *)&psdev->pdev->sh_info->flags) ) {
-+ dev_err(&dev->dev,
-+ "guest with no AER driver should have been killed\n");
-+ goto release;
-+ }
-+ result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result);
-+
-+ if (result == PCI_ERS_RESULT_NONE ||
-+ result == PCI_ERS_RESULT_DISCONNECT) {
-+ dev_dbg(&dev->dev,
-+ "No AER mmio_enabled service or disconnected!\n");
-+ kill_domain_by_device(psdev);
-+ }
-+release:
-+ pcistub_device_put(psdev);
-+end:
-+ up_write(&pcistub_sem);
-+ return result;
-+}
-+
-+/*pciback_error_detected: it will send the error_detected request to pcifront
-+* in case of the device driver could provide this service, and then wait
-+* for pcifront ack.
-+* @dev: pointer to PCI devices
-+* @error: the current PCI connection state
-+* return value is used by aer_core do_recovery policy
-+*/
-+
-+static pci_ers_result_t pciback_error_detected(struct pci_dev *dev,
-+ pci_channel_state_t error)
+ static void netif_idx_release(u16 pending_idx);
+-static void make_tx_response(netif_t *netif,
+- netif_tx_request_t *txp,
++static void make_tx_response(struct xen_netif *netif,
++ struct xen_netif_tx_request *txp,
+ s8 st);
+-static netif_rx_response_t *make_rx_response(netif_t *netif,
++static struct xen_netif_rx_response *make_rx_response(struct xen_netif *netif,
+ u16 id,
+ s8 st,
+ u16 offset,
+@@ -108,8 +117,8 @@ static inline int netif_page_index(struct page *pg)
+ #define PKT_PROT_LEN 64
+
+ static struct pending_tx_info {
+- netif_tx_request_t req;
+- netif_t *netif;
++ struct xen_netif_tx_request req;
++ struct xen_netif *netif;
+ } pending_tx_info[MAX_PENDING_REQS];
+ static u16 pending_ring[MAX_PENDING_REQS];
+ typedef unsigned int PEND_RING_IDX;
+@@ -128,8 +137,8 @@ static LIST_HEAD(pending_inuse_head);
+ static struct sk_buff_head tx_queue;
+
+ static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
+-static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
+-static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
++static struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
++static struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
+
+ static struct list_head net_schedule_list;
+ static spinlock_t net_schedule_list_lock;
+@@ -195,7 +204,7 @@ static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
+ goto err;
+
+ skb_reserve(nskb, 16 + NET_IP_ALIGN);
+- headlen = nskb->end - nskb->data;
++ headlen = skb_end_pointer(nskb) - nskb->data;
+ if (headlen > skb_headlen(skb))
+ headlen = skb_headlen(skb);
+ ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen);
+@@ -243,9 +252,9 @@ static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
+
+ offset = nskb->data - skb->data;
+
+- nskb->h.raw = skb->h.raw + offset;
+- nskb->nh.raw = skb->nh.raw + offset;
+- nskb->mac.raw = skb->mac.raw + offset;
++ nskb->transport_header = skb->transport_header + offset;
++ nskb->network_header = skb->network_header + offset;
++ nskb->mac_header = skb->mac_header + offset;
+
+ return nskb;
+
+@@ -255,14 +264,14 @@ static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
+ return NULL;
+ }
+
+-static inline int netbk_max_required_rx_slots(netif_t *netif)
++static inline int netbk_max_required_rx_slots(struct xen_netif *netif)
+ {
+ if (netif->features & (NETIF_F_SG|NETIF_F_TSO))
+ return MAX_SKB_FRAGS + 2; /* header + extra_info + frags */
+ return 1; /* all in one */
+ }
+
+-static inline int netbk_queue_full(netif_t *netif)
++static inline int netbk_queue_full(struct xen_netif *netif)
+ {
+ RING_IDX peek = netif->rx_req_cons_peek;
+ RING_IDX needed = netbk_max_required_rx_slots(netif);
+@@ -273,14 +282,14 @@ static inline int netbk_queue_full(netif_t *netif)
+
+ static void tx_queue_callback(unsigned long data)
+ {
+- netif_t *netif = (netif_t *)data;
++ struct xen_netif *netif = (struct xen_netif *)data;
+ if (netif_schedulable(netif))
+ netif_wake_queue(netif->dev);
+ }
+
+ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+- netif_t *netif = netdev_priv(dev);
++ struct xen_netif *netif = netdev_priv(dev);
+
+ BUG_ON(skb->dev != dev);
+
+@@ -302,7 +311,6 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ /* Copy only the header fields we use in this driver. */
+ nskb->dev = skb->dev;
+ nskb->ip_summed = skb->ip_summed;
+- nskb->proto_data_valid = skb->proto_data_valid;
+ dev_kfree_skb(skb);
+ skb = nskb;
+ }
+@@ -366,25 +374,25 @@ struct netrx_pending_operations {
+ unsigned mcl_prod, mcl_cons;
+ unsigned copy_prod, copy_cons;
+ unsigned meta_prod, meta_cons;
+- mmu_update_t *mmu;
+- gnttab_transfer_t *trans;
+- gnttab_copy_t *copy;
+- multicall_entry_t *mcl;
++ struct mmu_update *mmu;
++ struct gnttab_transfer *trans;
++ struct gnttab_copy *copy;
++ struct multicall_entry *mcl;
+ struct netbk_rx_meta *meta;
+ };
+
+ /* Set up the grant operations for this fragment. If it's a flipping
+ interface, we also set up the unmap request from here. */
+-static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
++static u16 netbk_gop_frag(struct xen_netif *netif, struct netbk_rx_meta *meta,
+ int i, struct netrx_pending_operations *npo,
+ struct page *page, unsigned long size,
+ unsigned long offset)
+ {
+- mmu_update_t *mmu;
+- gnttab_transfer_t *gop;
+- gnttab_copy_t *copy_gop;
+- multicall_entry_t *mcl;
+- netif_rx_request_t *req;
++ struct mmu_update *mmu;
++ struct gnttab_transfer *gop;
++ struct gnttab_copy *copy_gop;
++ struct multicall_entry *mcl;
++ struct xen_netif_rx_request *req;
+ unsigned long old_mfn, new_mfn;
+ int idx = netif_page_index(page);
+
+@@ -426,12 +434,12 @@ static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
+ mcl = npo->mcl + npo->mcl_prod++;
+ MULTI_update_va_mapping(mcl,
+ (unsigned long)page_address(page),
+- pfn_pte_ma(new_mfn, PAGE_KERNEL),
++ mfn_pte(new_mfn, PAGE_KERNEL),
+ 0);
+
+ mmu = npo->mmu + npo->mmu_prod++;
+- mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
+- MMU_MACHPHYS_UPDATE;
++ mmu->ptr = ((phys_addr_t)new_mfn << PAGE_SHIFT) |
++ MMU_MACHPHYS_UPDATE;
+ mmu->val = page_to_pfn(page);
+ }
+
+@@ -446,7 +454,7 @@ static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
+ static void netbk_gop_skb(struct sk_buff *skb,
+ struct netrx_pending_operations *npo)
+ {
+- netif_t *netif = netdev_priv(skb->dev);
++ struct xen_netif *netif = netdev_priv(skb->dev);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int i;
+ int extra;
+@@ -494,9 +502,9 @@ static inline void netbk_free_pages(int nr_frags, struct netbk_rx_meta *meta)
+ static int netbk_check_gop(int nr_frags, domid_t domid,
+ struct netrx_pending_operations *npo)
+ {
+- multicall_entry_t *mcl;
+- gnttab_transfer_t *gop;
+- gnttab_copy_t *copy_op;
++ struct multicall_entry *mcl;
++ struct gnttab_transfer *gop;
++ struct gnttab_copy *copy_op;
+ int status = NETIF_RSP_OKAY;
+ int i;
+
+@@ -534,7 +542,7 @@ static int netbk_check_gop(int nr_frags, domid_t domid,
+ return status;
+ }
+
+-static void netbk_add_frag_responses(netif_t *netif, int status,
++static void netbk_add_frag_responses(struct xen_netif *netif, int status,
+ struct netbk_rx_meta *meta, int nr_frags)
+ {
+ int i;
+@@ -555,11 +563,11 @@ static void netbk_add_frag_responses(netif_t *netif, int status,
+
+ static void net_rx_action(unsigned long unused)
+ {
+- netif_t *netif = NULL;
++ struct xen_netif *netif = NULL;
+ s8 status;
+ u16 id, irq, flags;
+- netif_rx_response_t *resp;
+- multicall_entry_t *mcl;
++ struct xen_netif_rx_response *resp;
++ struct multicall_entry *mcl;
+ struct sk_buff_head rxq;
+ struct sk_buff *skb;
+ int notify_nr = 0;
+@@ -572,10 +580,10 @@ static void net_rx_action(unsigned long unused)
+ * Putting hundreds of bytes on the stack is considered rude.
+ * Static works because a tasklet can only be on one CPU at any time.
+ */
+- static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+3];
+- static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
+- static gnttab_transfer_t grant_trans_op[NET_RX_RING_SIZE];
+- static gnttab_copy_t grant_copy_op[NET_RX_RING_SIZE];
++ static struct multicall_entry rx_mcl[NET_RX_RING_SIZE+3];
++ static struct mmu_update rx_mmu[NET_RX_RING_SIZE];
++ static struct gnttab_transfer grant_trans_op[NET_RX_RING_SIZE];
++ static struct gnttab_copy grant_copy_op[NET_RX_RING_SIZE];
+ static unsigned char rx_notify[NR_IRQS];
+ static u16 notify_list[NET_RX_RING_SIZE];
+ static struct netbk_rx_meta meta[NET_RX_RING_SIZE];
+@@ -596,7 +604,7 @@ static void net_rx_action(unsigned long unused)
+ *(int *)skb->cb = nr_frags;
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap) &&
+- !((netif_t *)netdev_priv(skb->dev))->copying_receiver &&
++ !((struct xen_netif *)netdev_priv(skb->dev))->copying_receiver &&
+ check_mfn(nr_frags + 1)) {
+ /* Memory squeeze? Back off for an arbitrary while. */
+ if ( net_ratelimit() )
+@@ -692,9 +700,10 @@ static void net_rx_action(unsigned long unused)
+ id = meta[npo.meta_cons].id;
+ flags = nr_frags ? NETRXF_more_data : 0;
+
+- if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
++ if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
+ flags |= NETRXF_csum_blank | NETRXF_data_validated;
+- else if (skb->proto_data_valid) /* remote but checksummed? */
++ else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
++ /* remote but checksummed. */
+ flags |= NETRXF_data_validated;
+
+ if (meta[npo.meta_cons].copy)
+@@ -705,8 +714,8 @@ static void net_rx_action(unsigned long unused)
+ skb_headlen(skb), flags);
+
+ if (meta[npo.meta_cons].frag.size) {
+- struct netif_extra_info *gso =
+- (struct netif_extra_info *)
++ struct xen_netif_extra_info *gso =
++ (struct xen_netif_extra_info *)
+ RING_GET_RESPONSE(&netif->rx,
+ netif->rx.rsp_prod_pvt++);
+
+@@ -769,16 +778,16 @@ static void netbk_tx_pending_timeout(unsigned long unused)
+
+ struct net_device_stats *netif_be_get_stats(struct net_device *dev)
+ {
+- netif_t *netif = netdev_priv(dev);
++ struct xen_netif *netif = netdev_priv(dev);
+ return &netif->stats;
+ }
+
+-static int __on_net_schedule_list(netif_t *netif)
++static int __on_net_schedule_list(struct xen_netif *netif)
+ {
+ return netif->list.next != NULL;
+ }
+
+-static void remove_from_net_schedule_list(netif_t *netif)
++static void remove_from_net_schedule_list(struct xen_netif *netif)
+ {
+ spin_lock_irq(&net_schedule_list_lock);
+ if (likely(__on_net_schedule_list(netif))) {
+@@ -789,7 +798,7 @@ static void remove_from_net_schedule_list(netif_t *netif)
+ spin_unlock_irq(&net_schedule_list_lock);
+ }
+
+-static void add_to_net_schedule_list_tail(netif_t *netif)
++static void add_to_net_schedule_list_tail(struct xen_netif *netif)
+ {
+ if (__on_net_schedule_list(netif))
+ return;
+@@ -811,7 +820,7 @@ static void add_to_net_schedule_list_tail(netif_t *netif)
+ * If we may be buffer transmit buffers for any reason then we must be rather
+ * more conservative and treat this as the final check for pending work.
+ */
+-void netif_schedule_work(netif_t *netif)
++void netif_schedule_work(struct xen_netif *netif)
+ {
+ int more_to_do;
+
+@@ -827,13 +836,13 @@ void netif_schedule_work(netif_t *netif)
+ }
+ }
+
+-void netif_deschedule_work(netif_t *netif)
++void netif_deschedule_work(struct xen_netif *netif)
+ {
+ remove_from_net_schedule_list(netif);
+ }
+
+
+-static void tx_add_credit(netif_t *netif)
++static void tx_add_credit(struct xen_netif *netif)
+ {
+ unsigned long max_burst, max_credit;
+
+@@ -855,7 +864,7 @@ static void tx_add_credit(netif_t *netif)
+
+ static void tx_credit_callback(unsigned long data)
+ {
+- netif_t *netif = (netif_t *)data;
++ struct xen_netif *netif = (struct xen_netif *)data;
+ tx_add_credit(netif);
+ netif_schedule_work(netif);
+ }
+@@ -869,10 +878,10 @@ static inline int copy_pending_req(PEND_RING_IDX pending_idx)
+ inline static void net_tx_action_dealloc(void)
+ {
+ struct netbk_tx_pending_inuse *inuse, *n;
+- gnttab_unmap_grant_ref_t *gop;
++ struct gnttab_unmap_grant_ref *gop;
+ u16 pending_idx;
+ PEND_RING_IDX dc, dp;
+- netif_t *netif;
++ struct xen_netif *netif;
+ int ret;
+ LIST_HEAD(list);
+
+@@ -954,7 +963,7 @@ inline static void net_tx_action_dealloc(void)
+ }
+ }
+
+-static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end)
++static void netbk_tx_err(struct xen_netif *netif, struct xen_netif_tx_request *txp, RING_IDX end)
+ {
+ RING_IDX cons = netif->tx.req_cons;
+
+@@ -969,8 +978,8 @@ static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end)
+ netif_put(netif);
+ }
+
+-static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first,
+- netif_tx_request_t *txp, int work_to_do)
++static int netbk_count_requests(struct xen_netif *netif, struct xen_netif_tx_request *first,
++ struct xen_netif_tx_request *txp, int work_to_do)
+ {
+ RING_IDX cons = netif->tx.req_cons;
+ int frags = 0;
+@@ -1009,10 +1018,10 @@ static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first,
+ return frags;
+ }
+
+-static gnttab_map_grant_ref_t *netbk_get_requests(netif_t *netif,
++static struct gnttab_map_grant_ref *netbk_get_requests(struct xen_netif *netif,
+ struct sk_buff *skb,
+- netif_tx_request_t *txp,
+- gnttab_map_grant_ref_t *mop)
++ struct xen_netif_tx_request *txp,
++ struct gnttab_map_grant_ref *mop)
+ {
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ skb_frag_t *frags = shinfo->frags;
+@@ -1039,12 +1048,12 @@ static gnttab_map_grant_ref_t *netbk_get_requests(netif_t *netif,
+ }
+
+ static int netbk_tx_check_mop(struct sk_buff *skb,
+- gnttab_map_grant_ref_t **mopp)
++ struct gnttab_map_grant_ref **mopp)
+ {
+- gnttab_map_grant_ref_t *mop = *mopp;
++ struct gnttab_map_grant_ref *mop = *mopp;
+ int pending_idx = *((u16 *)skb->data);
+- netif_t *netif = pending_tx_info[pending_idx].netif;
+- netif_tx_request_t *txp;
++ struct xen_netif *netif = pending_tx_info[pending_idx].netif;
++ struct xen_netif_tx_request *txp;
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ int nr_frags = shinfo->nr_frags;
+ int i, err, start;
+@@ -1118,7 +1127,7 @@ static void netbk_fill_frags(struct sk_buff *skb)
+
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = shinfo->frags + i;
+- netif_tx_request_t *txp;
++ struct xen_netif_tx_request *txp;
+ unsigned long pending_idx;
+
+ pending_idx = (unsigned long)frag->page;
+@@ -1138,10 +1147,10 @@ static void netbk_fill_frags(struct sk_buff *skb)
+ }
+ }
+
+-int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras,
++int netbk_get_extras(struct xen_netif *netif, struct xen_netif_extra_info *extras,
+ int work_to_do)
+ {
+- struct netif_extra_info extra;
++ struct xen_netif_extra_info extra;
+ RING_IDX cons = netif->tx.req_cons;
+
+ do {
+@@ -1166,7 +1175,7 @@ int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras,
+ return work_to_do;
+ }
+
+-static int netbk_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso)
++static int netbk_set_skb_gso(struct sk_buff *skb, struct xen_netif_extra_info *gso)
+ {
+ if (!gso->u.gso.size) {
+ DPRINTK("GSO size must not be zero.\n");
+@@ -1189,18 +1198,57 @@ static int netbk_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso)
+ return 0;
+ }
+
++static int skb_checksum_setup(struct sk_buff *skb)
+{
-+ struct pcistub_device *psdev;
-+ pci_ers_result_t result;
++ struct iphdr *iph;
++ unsigned char *th;
++ int err = -EPROTO;
+
-+ result = PCI_ERS_RESULT_CAN_RECOVER;
-+ dev_dbg(&dev->dev, "pciback_error_detected(bus:%x,devfn:%x)\n",
-+ dev->bus->number, dev->devfn);
++ if (skb->protocol != htons(ETH_P_IP))
++ goto out;
+
-+ down_write(&pcistub_sem);
-+ psdev = pcistub_device_find(pci_domain_nr(dev->bus),
-+ dev->bus->number,
-+ PCI_SLOT(dev->devfn),
-+ PCI_FUNC(dev->devfn));
++ iph = (void *)skb->data;
++ th = skb->data + 4 * iph->ihl;
++ if (th >= skb_tail_pointer(skb))
++ goto out;
+
-+ if ( !psdev || !psdev->pdev )
-+ {
-+ dev_err(&dev->dev,
-+ "pciback device is not found/assigned\n");
-+ goto end;
++ skb->csum_start = th - skb->head;
++ switch (iph->protocol) {
++ case IPPROTO_TCP:
++ skb->csum_offset = offsetof(struct tcphdr, check);
++ break;
++ case IPPROTO_UDP:
++ skb->csum_offset = offsetof(struct udphdr, check);
++ break;
++ default:
++ if (net_ratelimit())
++ printk(KERN_ERR "Attempting to checksum a non-"
++ "TCP/UDP packet, dropping a protocol"
++ " %d packet", iph->protocol);
++ goto out;
+ }
+
-+ if ( !psdev->pdev->sh_info )
-+ {
-+ dev_err(&dev->dev, "pciback device is not connected or owned"
-+ " by HVM, kill it\n");
-+ kill_domain_by_device(psdev);
-+ goto release;
-+ }
++ if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
++ goto out;
+
-+ /*Guest owns the device yet no aer handler regiested, kill guest*/
-+ if ( !test_bit(_XEN_PCIB_AERHANDLER,
-+ (unsigned long *)&psdev->pdev->sh_info->flags) ) {
-+ dev_dbg(&dev->dev, "guest may have no aer driver, kill it\n");
-+ kill_domain_by_device(psdev);
-+ goto release;
-+ }
-+ result = common_process(psdev, error, XEN_PCI_OP_aer_detected, result);
++ err = 0;
+
-+ if (result == PCI_ERS_RESULT_NONE ||
-+ result == PCI_ERS_RESULT_DISCONNECT) {
-+ dev_dbg(&dev->dev,
-+ "No AER error_detected service or disconnected!\n");
-+ kill_domain_by_device(psdev);
-+ }
-+release:
-+ pcistub_device_put(psdev);
-+end:
-+ up_write(&pcistub_sem);
-+ return result;
++out:
++ return err;
+}
+
-+/*pciback_error_resume: it will send the error_resume request to pcifront
-+* in case of the device driver could provide this service, and then wait
-+* for pcifront ack.
-+* @dev: pointer to PCI devices
-+*/
-+
-+static void pciback_error_resume(struct pci_dev *dev)
-+{
-+ struct pcistub_device *psdev;
-+
-+ dev_dbg(&dev->dev, "pciback_error_resume(bus:%x,devfn:%x)\n",
-+ dev->bus->number, dev->devfn);
+ /* Called after netfront has transmitted */
+ static void net_tx_action(unsigned long unused)
+ {
+ struct list_head *ent;
+ struct sk_buff *skb;
+- netif_t *netif;
+- netif_tx_request_t txreq;
+- netif_tx_request_t txfrags[MAX_SKB_FRAGS];
+- struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
++ struct xen_netif *netif;
++ struct xen_netif_tx_request txreq;
++ struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
++ struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
+ u16 pending_idx;
+ RING_IDX i;
+- gnttab_map_grant_ref_t *mop;
++ struct gnttab_map_grant_ref *mop;
+ unsigned int data_len;
+ int ret, work_to_do;
+
+@@ -1212,7 +1260,7 @@ static void net_tx_action(unsigned long unused)
+ !list_empty(&net_schedule_list)) {
+ /* Get a netif from the list with work to do. */
+ ent = net_schedule_list.next;
+- netif = list_entry(ent, netif_t, list);
++ netif = list_entry(ent, struct xen_netif, list);
+ netif_get(netif);
+ remove_from_net_schedule_list(netif);
+
+@@ -1313,7 +1361,7 @@ static void net_tx_action(unsigned long unused)
+ skb_reserve(skb, 16 + NET_IP_ALIGN);
+
+ if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
+- struct netif_extra_info *gso;
++ struct xen_netif_extra_info *gso;
+ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
+
+ if (netbk_set_skb_gso(skb, gso)) {
+@@ -1372,7 +1420,7 @@ static void net_tx_action(unsigned long unused)
+
+ mop = tx_map_ops;
+ while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
+- netif_tx_request_t *txp;
++ struct xen_netif_tx_request *txp;
+
+ pending_idx = *((u16 *)skb->data);
+ netif = pending_tx_info[pending_idx].netif;
+@@ -1403,14 +1451,10 @@ static void net_tx_action(unsigned long unused)
+ * Old frontends do not assert data_validated but we
+ * can infer it from csum_blank so test both flags.
+ */
+- if (txp->flags & (NETTXF_data_validated|NETTXF_csum_blank)) {
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+- skb->proto_data_valid = 1;
+- } else {
++ if (txp->flags & (NETTXF_data_validated|NETTXF_csum_blank))
++ skb->ip_summed = CHECKSUM_PARTIAL;
++ else
+ skb->ip_summed = CHECKSUM_NONE;
+- skb->proto_data_valid = 0;
+- }
+- skb->proto_csum_blank = !!(txp->flags & NETTXF_csum_blank);
+
+ netbk_fill_frags(skb);
+
+@@ -1420,6 +1464,14 @@ static void net_tx_action(unsigned long unused)
+ netif->stats.rx_bytes += skb->len;
+ netif->stats.rx_packets++;
+
++ if (skb->ip_summed == CHECKSUM_PARTIAL) {
++ if (skb_checksum_setup(skb)) {
++ DPRINTK("Can't setup checksum in net_tx_action\n");
++ kfree_skb(skb);
++ continue;
++ }
++ }
+
-+ down_write(&pcistub_sem);
-+ psdev = pcistub_device_find(pci_domain_nr(dev->bus),
-+ dev->bus->number,
-+ PCI_SLOT(dev->devfn),
-+ PCI_FUNC(dev->devfn));
+ if (unlikely(netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB) &&
+ unlikely(skb_linearize(skb))) {
+ DPRINTK("Can't linearize skb in net_tx_action.\n");
+@@ -1464,9 +1516,9 @@ static void netif_page_release(struct page *page, unsigned int order)
+ netif_idx_release(idx);
+ }
+
+-irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++irqreturn_t netif_be_int(int irq, void *dev_id)
+ {
+- netif_t *netif = dev_id;
++ struct xen_netif *netif = dev_id;
+
+ add_to_net_schedule_list_tail(netif);
+ maybe_schedule_tx_action();
+@@ -1477,12 +1529,12 @@ irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
+ return IRQ_HANDLED;
+ }
+
+-static void make_tx_response(netif_t *netif,
+- netif_tx_request_t *txp,
++static void make_tx_response(struct xen_netif *netif,
++ struct xen_netif_tx_request *txp,
+ s8 st)
+ {
+ RING_IDX i = netif->tx.rsp_prod_pvt;
+- netif_tx_response_t *resp;
++ struct xen_netif_tx_response *resp;
+ int notify;
+
+ resp = RING_GET_RESPONSE(&netif->tx, i);
+@@ -1507,7 +1559,7 @@ static void make_tx_response(netif_t *netif,
+ #endif
+ }
+
+-static netif_rx_response_t *make_rx_response(netif_t *netif,
++static struct xen_netif_rx_response *make_rx_response(struct xen_netif *netif,
+ u16 id,
+ s8 st,
+ u16 offset,
+@@ -1515,7 +1567,7 @@ static netif_rx_response_t *make_rx_response(netif_t *netif,
+ u16 flags)
+ {
+ RING_IDX i = netif->rx.rsp_prod_pvt;
+- netif_rx_response_t *resp;
++ struct xen_netif_rx_response *resp;
+
+ resp = RING_GET_RESPONSE(&netif->rx, i);
+ resp->offset = offset;
+@@ -1534,14 +1586,14 @@ static netif_rx_response_t *make_rx_response(netif_t *netif,
+ static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
+ {
+ struct list_head *ent;
+- netif_t *netif;
++ struct xen_netif *netif;
+ int i = 0;
+
+ printk(KERN_ALERT "netif_schedule_list:\n");
+ spin_lock_irq(&net_schedule_list_lock);
+
+ list_for_each (ent, &net_schedule_list) {
+- netif = list_entry(ent, netif_t, list);
++ netif = list_entry(ent, struct xen_netif, list);
+ printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
+ "rx_resp_prod=%08x\n",
+ i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
+@@ -1569,11 +1621,13 @@ static int __init netback_init(void)
+ int i;
+ struct page *page;
+
+- if (!is_running_on_xen())
++ printk(KERN_CRIT "*** netif_init\n");
+
-+ if ( !psdev || !psdev->pdev )
-+ {
-+ dev_err(&dev->dev,
-+ "pciback device is not found/assigned\n");
-+ goto end;
-+ }
++ if (!xen_domain())
+ return -ENODEV;
+
+ /* We can increase reservation by this much in net_rx_action(). */
+- balloon_update_driver_allowance(NET_RX_RING_SIZE);
++// balloon_update_driver_allowance(NET_RX_RING_SIZE);
+
+ skb_queue_head_init(&rx_queue);
+ skb_queue_head_init(&tx_queue);
+@@ -1616,7 +1670,7 @@ static int __init netback_init(void)
+ netbk_copy_skb_mode = NETBK_DELAYED_COPY_SKB;
+ }
+
+- netif_accel_init();
++ //netif_accel_init();
+
+ netif_xenbus_init();
+
+diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
+index d7faeb6..ed7c006 100644
+--- a/drivers/xen/netback/xenbus.c
++++ b/drivers/xen/netback/xenbus.c
+@@ -37,7 +37,7 @@ static int netback_remove(struct xenbus_device *dev)
+ {
+ struct backend_info *be = dev->dev.driver_data;
+
+- netback_remove_accelerators(be, dev);
++ //netback_remove_accelerators(be, dev);
+
+ if (be->netif) {
+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
+@@ -123,7 +123,7 @@ static int netback_probe(struct xenbus_device *dev,
+ goto fail;
+ }
+
+- netback_probe_accelerators(be, dev);
++ //netback_probe_accelerators(be, dev);
+
+ err = xenbus_switch_state(dev, XenbusStateInitWait);
+ if (err)
+@@ -149,12 +149,10 @@ fail:
+ * and vif variables to the environment, for the benefit of the vif-* hotplug
+ * scripts.
+ */
+-static int netback_uevent(struct xenbus_device *xdev, char **envp,
+- int num_envp, char *buffer, int buffer_size)
++static int netback_uevent(struct xenbus_device *xdev, struct kobj_uevent_env *env)
+ {
+ struct backend_info *be = xdev->dev.driver_data;
+- netif_t *netif = be->netif;
+- int i = 0, length = 0;
++ struct xen_netif *netif = be->netif;
+ char *val;
+
+ DPRINTK("netback_uevent");
+@@ -166,15 +164,15 @@ static int netback_uevent(struct xenbus_device *xdev, char **envp,
+ return err;
+ }
+ else {
+- add_uevent_var(envp, num_envp, &i, buffer, buffer_size,
+- &length, "script=%s", val);
++ if (add_uevent_var(env, "script=%s", val)) {
++ kfree(val);
++ return -ENOMEM;
++ }
+ kfree(val);
+ }
+
+- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
+- "vif=%s", netif->dev->name);
+-
+- envp[i] = NULL;
++ if (add_uevent_var(env, "vif=%s", netif->dev->name))
++ return -ENOMEM;
+
+ return 0;
+ }
+@@ -450,5 +448,6 @@ static struct xenbus_driver netback = {
+
+ void netif_xenbus_init(void)
+ {
+- xenbus_register_backend(&netback);
++ printk(KERN_CRIT "registering netback\n");
++ (void)xenbus_register_backend(&netback);
+ }
+--
+1.7.4
+
+
+From a41a2ab9e1ac4ef8320f69f2719e973e25faff5c Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy(a)goop.org>
+Date: Mon, 9 Feb 2009 16:39:01 -0800
+Subject: [PATCH 003/203] xen: netback: don't include xen/evtchn.h
+
+Its a usermode header for users of /dev/evtchn
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy(a)goop.org>
+---
+ drivers/xen/netback/common.h | 1 -
+ 1 files changed, 0 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index 65b88f4..5665ed1 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -38,7 +38,6 @@
+ #include <linux/netdevice.h>
+ #include <linux/etherdevice.h>
+ #include <linux/wait.h>
+-#include <xen/evtchn.h>
+ #include <xen/interface/io/netif.h>
+ #include <asm/io.h>
+ #include <asm/pgalloc.h>
+--
+1.7.4
+
+
+From f28a7c6148bb979acf99c0cbe3b441d0fb0853d9 Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Wed, 18 Feb 2009 15:55:18 -0800
+Subject: [PATCH 004/203] xen: netback: use mod_timer
+
+__mod_timer is no longer a public API.
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index c959075..e920703 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -334,7 +334,7 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ */
+ netif->tx_queue_timeout.data = (unsigned long)netif;
+ netif->tx_queue_timeout.function = tx_queue_callback;
+- __mod_timer(&netif->tx_queue_timeout, jiffies + HZ/2);
++ mod_timer(&netif->tx_queue_timeout, jiffies + HZ/2);
+ }
+ }
+
+@@ -1299,7 +1299,7 @@ static void net_tx_action(unsigned long unused)
+ (unsigned long)netif;
+ netif->credit_timeout.function =
+ tx_credit_callback;
+- __mod_timer(&netif->credit_timeout,
++ mod_timer(&netif->credit_timeout,
+ next_credit);
+ netif_put(netif);
+ continue;
+--
+1.7.4
+
+
+From 52f97ad360f28762c785343ba5c9f8abb83536f3 Mon Sep 17 00:00:00 2001
+From: Jan Beulich <jbeulich(a)novell.com>
+Date: Fri, 6 Mar 2009 08:29:31 +0000
+Subject: [PATCH 005/203] xen: netback: unmap tx ring gref when mapping of rx ring gref failed
+
+[ijc-ported from linux-2.6.18-xen.hg 782:51decc39e5e7]
+Signed-off-by: Jan Beulich <jbeulich(a)novell.com>
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+---
+ drivers/xen/netback/interface.c | 6 ++++++
+ 1 files changed, 6 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+index d184ad7..f3d9ea1 100644
+--- a/drivers/xen/netback/interface.c
++++ b/drivers/xen/netback/interface.c
+@@ -222,6 +222,12 @@ static int map_frontend_pages(
+ BUG();
+
+ if (op.status) {
++ struct gnttab_unmap_grant_ref unop;
+
-+ if ( !psdev->pdev->sh_info )
-+ {
-+ dev_err(&dev->dev, "pciback device is not connected or owned"
-+ " by HVM, kill it\n");
-+ kill_domain_by_device(psdev);
-+ goto release;
-+ }
++ gnttab_set_unmap_op(&unop,
++ (unsigned long)netif->tx_comms_area->addr,
++ GNTMAP_host_map, netif->tx_shmem_handle);
++ HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unop, 1);
+ DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
+ return op.status;
+ }
+--
+1.7.4
+
+
+From f9b63790f1404eb03ac824147b2294a46e485643 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <Ian.Campbell(a)citrix.com>
+Date: Fri, 6 Mar 2009 08:29:32 +0000
+Subject: [PATCH 006/203] xen: netback: add ethtool stat to track copied skbs.
+
+Copied skbs should be rare but we have no way of verifying that.
+
+[ijc-ported from linux-2.6.18-xen.hg 792:db9857bb0320]
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+---
+ drivers/xen/netback/common.h | 3 ++
+ drivers/xen/netback/interface.c | 47 +++++++++++++++++++++++++++++++++++++++
+ drivers/xen/netback/netback.c | 6 ++++-
+ 3 files changed, 55 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index 5665ed1..6ba804d 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -92,6 +92,9 @@ struct xen_netif {
+ /* Enforce draining of the transmit queue. */
+ struct timer_list tx_queue_timeout;
+
++ /* Statistics */
++ int nr_copied_skbs;
+
-+ if ( !test_bit(_XEN_PCIB_AERHANDLER,
-+ (unsigned long *)&psdev->pdev->sh_info->flags) ) {
-+ dev_err(&dev->dev,
-+ "guest with no AER driver should have been killed\n");
-+ kill_domain_by_device(psdev);
-+ goto release;
-+ }
-+ common_process(psdev, 1, XEN_PCI_OP_aer_resume, PCI_ERS_RESULT_RECOVERED);
-+release:
-+ pcistub_device_put(psdev);
-+end:
-+ up_write(&pcistub_sem);
-+ return;
+ /* Miscellaneous private stuff. */
+ struct list_head list; /* scheduling list */
+ atomic_t refcnt;
+diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+index f3d9ea1..1a99c87 100644
+--- a/drivers/xen/netback/interface.c
++++ b/drivers/xen/netback/interface.c
+@@ -119,8 +119,51 @@ static int netbk_set_tso(struct net_device *dev, u32 data)
+ return ethtool_op_set_tso(dev, data);
+ }
+
++static void netbk_get_drvinfo(struct net_device *dev,
++ struct ethtool_drvinfo *info)
++{
++ strcpy(info->driver, "netbk");
+}
+
-+/*add pciback AER handling*/
-+static struct pci_error_handlers pciback_error_handler = {
-+ .error_detected = pciback_error_detected,
-+ .mmio_enabled = pciback_mmio_enabled,
-+ .slot_reset = pciback_slot_reset,
-+ .resume = pciback_error_resume,
-+};
-+
-+/*
-+ * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
-+ * for a normal device. I don't want it to be loaded automatically.
-+ */
-+
-+static struct pci_driver pciback_pci_driver = {
-+ .name = "pciback",
-+ .id_table = pcistub_ids,
-+ .probe = pcistub_probe,
-+ .remove = pcistub_remove,
-+ .err_handler = &pciback_error_handler,
++static const struct netif_stat {
++ char name[ETH_GSTRING_LEN];
++ u16 offset;
++} netbk_stats[] = {
++ { "copied_skbs", offsetof(struct xen_netif, nr_copied_skbs) },
+};
+
-+static inline int str_to_slot(const char *buf, int *domain, int *bus,
-+ int *slot, int *func)
++static int netbk_get_stats_count(struct net_device *dev)
+{
-+ int err;
-+
-+ err = sscanf(buf, " %x:%x:%x.%x", domain, bus, slot, func);
-+ if (err == 4)
-+ return 0;
-+ else if (err < 0)
-+ return -EINVAL;
-+
-+ /* try again without domain */
-+ *domain = 0;
-+ err = sscanf(buf, " %x:%x.%x", bus, slot, func);
-+ if (err == 3)
-+ return 0;
-+
-+ return -EINVAL;
++ return ARRAY_SIZE(netbk_stats);
+}
+
-+static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
-+ *slot, int *func, int *reg, int *size, int *mask)
++static void netbk_get_ethtool_stats(struct net_device *dev,
++ struct ethtool_stats *stats, u64 * data)
+{
-+ int err;
++ void *netif = netdev_priv(dev);
++ int i;
+
-+ err =
-+ sscanf(buf, " %04x:%02x:%02x.%1x-%08x:%1x:%08x", domain, bus, slot,
-+ func, reg, size, mask);
-+ if (err == 7)
-+ return 0;
-+ return -EINVAL;
++ for (i = 0; i < ARRAY_SIZE(netbk_stats); i++)
++ data[i] = *(int *)(netif + netbk_stats[i].offset);
+}
+
-+static int pcistub_device_id_add(int domain, int bus, int slot, int func)
++static void netbk_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
-+ struct pcistub_device_id *pci_dev_id;
-+ unsigned long flags;
-+
-+ pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
-+ if (!pci_dev_id)
-+ return -ENOMEM;
-+
-+ pci_dev_id->domain = domain;
-+ pci_dev_id->bus = bus;
-+ pci_dev_id->devfn = PCI_DEVFN(slot, func);
-+
-+ pr_debug("pciback: wants to seize %04x:%02x:%02x.%01x\n",
-+ domain, bus, slot, func);
-+
-+ spin_lock_irqsave(&device_ids_lock, flags);
-+ list_add_tail(&pci_dev_id->slot_list, &pcistub_device_ids);
-+ spin_unlock_irqrestore(&device_ids_lock, flags);
++ int i;
+
-+ return 0;
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0; i < ARRAY_SIZE(netbk_stats); i++)
++ memcpy(data + i * ETH_GSTRING_LEN,
++ netbk_stats[i].name, ETH_GSTRING_LEN);
++ break;
++ }
+}
+
-+static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
-+{
-+ struct pcistub_device_id *pci_dev_id, *t;
-+ int devfn = PCI_DEVFN(slot, func);
-+ int err = -ENOENT;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&device_ids_lock, flags);
-+ list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids, slot_list) {
+ static struct ethtool_ops network_ethtool_ops =
+ {
++ .get_drvinfo = netbk_get_drvinfo,
+
-+ if (pci_dev_id->domain == domain
-+ && pci_dev_id->bus == bus && pci_dev_id->devfn == devfn) {
-+ /* Don't break; here because it's possible the same
-+ * slot could be in the list more than once
-+ */
-+ list_del(&pci_dev_id->slot_list);
-+ kfree(pci_dev_id);
-+
-+ err = 0;
-+
-+ pr_debug("pciback: removed %04x:%02x:%02x.%01x from "
-+ "seize list\n", domain, bus, slot, func);
-+ }
-+ }
-+ spin_unlock_irqrestore(&device_ids_lock, flags);
-+
-+ return err;
-+}
-+
-+static int pcistub_reg_add(int domain, int bus, int slot, int func, int reg,
-+ int size, int mask)
-+{
-+ int err = 0;
-+ struct pcistub_device *psdev;
-+ struct pci_dev *dev;
-+ struct config_field *field;
-+
-+ psdev = pcistub_device_find(domain, bus, slot, func);
-+ if (!psdev || !psdev->dev) {
-+ err = -ENODEV;
-+ goto out;
-+ }
-+ dev = psdev->dev;
-+
-+ field = kzalloc(sizeof(*field), GFP_ATOMIC);
-+ if (!field) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+@@ -128,6 +171,10 @@ static struct ethtool_ops network_ethtool_ops =
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = netbk_set_tso,
+ .get_link = ethtool_op_get_link,
+
-+ field->offset = reg;
-+ field->size = size;
-+ field->mask = mask;
-+ field->init = NULL;
-+ field->reset = NULL;
-+ field->release = NULL;
-+ field->clean = pciback_config_field_free;
++ .get_stats_count = netbk_get_stats_count,
++ .get_ethtool_stats = netbk_get_ethtool_stats,
++ .get_strings = netbk_get_strings,
+ };
+
+ struct xen_netif *netif_alloc(domid_t domid, unsigned int handle)
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index e920703..f59fadb 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -923,7 +923,11 @@ inline static void net_tx_action_dealloc(void)
+ if (time_after(inuse->alloc_time + HZ / 2, jiffies))
+ break;
+
+- switch (copy_pending_req(inuse - pending_inuse)) {
++ pending_idx = inuse - pending_inuse;
+
-+ err = pciback_config_quirks_add_field(dev, field);
-+ if (err)
-+ kfree(field);
-+ out:
-+ return err;
-+}
++ pending_tx_info[pending_idx].netif->nr_copied_skbs++;
+
-+static ssize_t pcistub_slot_add(struct device_driver *drv, const char *buf,
-+ size_t count)
-+{
-+ int domain, bus, slot, func;
-+ int err;
++ switch (copy_pending_req(pending_idx)) {
+ case 0:
+ list_move_tail(&inuse->list, &list);
+ continue;
+--
+1.7.4
+
+
+From c41d8da3d853d4e89ba38693b90c1fe512095704 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <Ian.Campbell(a)citrix.com>
+Date: Fri, 6 Mar 2009 08:29:33 +0000
+Subject: [PATCH 007/203] xen: netback: make queue length parameter writeable in sysfs
+
+Any changes will only take effect for newly created VIFs.
+
+Also hook up the vif devices to their parent and publish bus info via
+ethtool.
+
+[ijc-ported from linux-2.6.18-xen.hg 793:3aa9b8a7876b]
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+---
+ drivers/xen/netback/interface.c | 3 ++-
+ drivers/xen/netback/xenbus.c | 1 +
+ 2 files changed, 3 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+index 1a99c87..7706170 100644
+--- a/drivers/xen/netback/interface.c
++++ b/drivers/xen/netback/interface.c
+@@ -52,7 +52,7 @@
+ * blocked.
+ */
+ static unsigned long netbk_queue_length = 32;
+-module_param_named(queue_length, netbk_queue_length, ulong, 0);
++module_param_named(queue_length, netbk_queue_length, ulong, 0644);
+
+ static void __netif_up(struct xen_netif *netif)
+ {
+@@ -123,6 +123,7 @@ static void netbk_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+ {
+ strcpy(info->driver, "netbk");
++ strcpy(info->bus_info, dev->dev.parent->bus_id);
+ }
+
+ static const struct netif_stat {
+diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
+index ed7c006..dc7b367 100644
+--- a/drivers/xen/netback/xenbus.c
++++ b/drivers/xen/netback/xenbus.c
+@@ -200,6 +200,7 @@ static void backend_create_netif(struct backend_info *be)
+ xenbus_dev_fatal(dev, err, "creating interface");
+ return;
+ }
++ SET_NETDEV_DEV(be->netif->dev, &dev->dev);
+
+ kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
+ }
+--
+1.7.4
+
+
+From f204d7567ab11ddb1ff3208ab5ed8921b575af5d Mon Sep 17 00:00:00 2001
+From: Ian Campbell <Ian.Campbell(a)citrix.com>
+Date: Mon, 16 Mar 2009 22:05:16 +0000
+Subject: [PATCH 008/203] xen: netback: parent sysfs device should be set before registering.
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+---
+ drivers/xen/netback/common.h | 2 +-
+ drivers/xen/netback/interface.c | 4 +++-
+ drivers/xen/netback/xenbus.c | 3 +--
+ 3 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index 6ba804d..123a169 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -180,7 +180,7 @@ void netif_accel_init(void);
+
+ void netif_disconnect(struct xen_netif *netif);
+
+-struct xen_netif *netif_alloc(domid_t domid, unsigned int handle);
++struct xen_netif *netif_alloc(struct device *parent, domid_t domid, unsigned int handle);
+ int netif_map(struct xen_netif *netif, unsigned long tx_ring_ref,
+ unsigned long rx_ring_ref, unsigned int evtchn);
+
+diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+index 7706170..5e0d26d 100644
+--- a/drivers/xen/netback/interface.c
++++ b/drivers/xen/netback/interface.c
+@@ -178,7 +178,7 @@ static struct ethtool_ops network_ethtool_ops =
+ .get_strings = netbk_get_strings,
+ };
+
+-struct xen_netif *netif_alloc(domid_t domid, unsigned int handle)
++struct xen_netif *netif_alloc(struct device *parent, domid_t domid, unsigned int handle)
+ {
+ int err = 0;
+ struct net_device *dev;
+@@ -192,6 +192,8 @@ struct xen_netif *netif_alloc(domid_t domid, unsigned int handle)
+ return ERR_PTR(-ENOMEM);
+ }
+
++ SET_NETDEV_DEV(dev, parent);
+
-+ err = str_to_slot(buf, &domain, &bus, &slot, &func);
-+ if (err)
-+ goto out;
+ netif = netdev_priv(dev);
+ memset(netif, 0, sizeof(*netif));
+ netif->domid = domid;
+diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
+index dc7b367..749931e 100644
+--- a/drivers/xen/netback/xenbus.c
++++ b/drivers/xen/netback/xenbus.c
+@@ -193,14 +193,13 @@ static void backend_create_netif(struct backend_info *be)
+ return;
+ }
+
+- be->netif = netif_alloc(dev->otherend_id, handle);
++ be->netif = netif_alloc(&dev->dev, dev->otherend_id, handle);
+ if (IS_ERR(be->netif)) {
+ err = PTR_ERR(be->netif);
+ be->netif = NULL;
+ xenbus_dev_fatal(dev, err, "creating interface");
+ return;
+ }
+- SET_NETDEV_DEV(be->netif->dev, &dev->dev);
+
+ kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
+ }
+--
+1.7.4
+
+
+From bb606178665ea78b505cb54864899478b6020584 Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Thu, 19 Mar 2009 12:42:36 -0700
+Subject: [PATCH 009/203] xen: netback: use NET_SKB_PAD rather than "16"
+
+There's a constant for the default skb headroom.
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 6 +++---
+ 1 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index f59fadb..400f398 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -203,7 +203,7 @@ static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
+ if (unlikely(!nskb))
+ goto err;
+
+- skb_reserve(nskb, 16 + NET_IP_ALIGN);
++ skb_reserve(nskb, NET_SKB_PAD + NET_IP_ALIGN);
+ headlen = skb_end_pointer(nskb) - nskb->data;
+ if (headlen > skb_headlen(skb))
+ headlen = skb_headlen(skb);
+@@ -1353,7 +1353,7 @@ static void net_tx_action(unsigned long unused)
+ ret < MAX_SKB_FRAGS) ?
+ PKT_PROT_LEN : txreq.size;
+
+- skb = alloc_skb(data_len + 16 + NET_IP_ALIGN,
++ skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(skb == NULL)) {
+ DPRINTK("Can't allocate a skb in start_xmit.\n");
+@@ -1362,7 +1362,7 @@ static void net_tx_action(unsigned long unused)
+ }
+
+ /* Packets passed to netif_rx() must have some headroom. */
+- skb_reserve(skb, 16 + NET_IP_ALIGN);
++ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+ if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
+ struct xen_netif_extra_info *gso;
+--
+1.7.4
+
+
+From fe41ab031dfa0c6f9821c2667ce821e7f4f635ed Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Thu, 19 Mar 2009 13:31:26 -0700
+Subject: [PATCH 010/203] xen: netback: completely drop flip support
+
+Nobody uses it?
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/common.h | 1 -
+ drivers/xen/netback/netback.c | 245 ++++-------------------------------------
+ drivers/xen/netback/xenbus.c | 3 +-
+ 3 files changed, 22 insertions(+), 227 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index 123a169..06f04c1 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -78,7 +78,6 @@ struct xen_netif {
+
+ /* Internal feature information. */
+ u8 can_queue:1; /* can queue packets for receiver? */
+- u8 copying_receiver:1; /* copy packets to receiver? */
+
+ /* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
+ RING_IDX rx_req_cons_peek;
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 400f398..36bea2b 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -51,7 +51,6 @@
+ struct netbk_rx_meta {
+ skb_frag_t frag;
+ int id;
+- u8 copy:1;
+ };
+
+ struct netbk_tx_pending_inuse {
+@@ -160,26 +159,6 @@ static inline unsigned long alloc_mfn(void)
+ return mfn_list[--alloc_index];
+ }
+
+-static int check_mfn(int nr)
+-{
+- struct xen_memory_reservation reservation = {
+- .extent_order = 0,
+- .domid = DOMID_SELF
+- };
+- int rc;
+-
+- if (likely(alloc_index >= nr))
+- return 0;
+-
+- set_xen_guest_handle(reservation.extent_start, mfn_list + alloc_index);
+- reservation.nr_extents = MAX_MFN_ALLOC - alloc_index;
+- rc = HYPERVISOR_memory_op(XENMEM_increase_reservation, &reservation);
+- if (likely(rc > 0))
+- alloc_index += rc;
+-
+- return alloc_index >= nr ? 0 : -ENOMEM;
+-}
+-
+ static inline void maybe_schedule_tx_action(void)
+ {
+ smp_mb();
+@@ -188,82 +167,6 @@ static inline void maybe_schedule_tx_action(void)
+ tasklet_schedule(&net_tx_tasklet);
+ }
+
+-static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
+-{
+- struct skb_shared_info *ninfo;
+- struct sk_buff *nskb;
+- unsigned long offset;
+- int ret;
+- int len;
+- int headlen;
+-
+- BUG_ON(skb_shinfo(skb)->frag_list != NULL);
+-
+- nskb = alloc_skb(SKB_MAX_HEAD(0), GFP_ATOMIC | __GFP_NOWARN);
+- if (unlikely(!nskb))
+- goto err;
+-
+- skb_reserve(nskb, NET_SKB_PAD + NET_IP_ALIGN);
+- headlen = skb_end_pointer(nskb) - nskb->data;
+- if (headlen > skb_headlen(skb))
+- headlen = skb_headlen(skb);
+- ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen);
+- BUG_ON(ret);
+-
+- ninfo = skb_shinfo(nskb);
+- ninfo->gso_size = skb_shinfo(skb)->gso_size;
+- ninfo->gso_type = skb_shinfo(skb)->gso_type;
+-
+- offset = headlen;
+- len = skb->len - headlen;
+-
+- nskb->len = skb->len;
+- nskb->data_len = len;
+- nskb->truesize += len;
+-
+- while (len) {
+- struct page *page;
+- int copy;
+- int zero;
+-
+- if (unlikely(ninfo->nr_frags >= MAX_SKB_FRAGS)) {
+- dump_stack();
+- goto err_free;
+- }
+-
+- copy = len >= PAGE_SIZE ? PAGE_SIZE : len;
+- zero = len >= PAGE_SIZE ? 0 : __GFP_ZERO;
+-
+- page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | zero);
+- if (unlikely(!page))
+- goto err_free;
+-
+- ret = skb_copy_bits(skb, offset, page_address(page), copy);
+- BUG_ON(ret);
+-
+- ninfo->frags[ninfo->nr_frags].page = page;
+- ninfo->frags[ninfo->nr_frags].page_offset = 0;
+- ninfo->frags[ninfo->nr_frags].size = copy;
+- ninfo->nr_frags++;
+-
+- offset += copy;
+- len -= copy;
+- }
+-
+- offset = nskb->data - skb->data;
+-
+- nskb->transport_header = skb->transport_header + offset;
+- nskb->network_header = skb->network_header + offset;
+- nskb->mac_header = skb->mac_header + offset;
+-
+- return nskb;
+-
+- err_free:
+- kfree_skb(nskb);
+- err:
+- return NULL;
+-}
+-
+ static inline int netbk_max_required_rx_slots(struct xen_netif *netif)
+ {
+ if (netif->features & (NETIF_F_SG|NETIF_F_TSO))
+@@ -297,24 +200,6 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ if (unlikely(!netif_schedulable(netif) || netbk_queue_full(netif)))
+ goto drop;
+
+- /*
+- * Copy the packet here if it's destined for a flipping interface
+- * but isn't flippable (e.g. extra references to data).
+- * XXX For now we also copy skbuffs whose head crosses a page
+- * boundary, because netbk_gop_skb can't handle them.
+- */
+- if (!netif->copying_receiver ||
+- ((skb_headlen(skb) + offset_in_page(skb->data)) >= PAGE_SIZE)) {
+- struct sk_buff *nskb = netbk_copy_skb(skb);
+- if ( unlikely(nskb == NULL) )
+- goto drop;
+- /* Copy only the header fields we use in this driver. */
+- nskb->dev = skb->dev;
+- nskb->ip_summed = skb->ip_summed;
+- dev_kfree_skb(skb);
+- skb = nskb;
+- }
+-
+ netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 +
+ !!skb_shinfo(skb)->gso_size;
+ netif_get(netif);
+@@ -388,66 +273,32 @@ static u16 netbk_gop_frag(struct xen_netif *netif, struct netbk_rx_meta *meta,
+ struct page *page, unsigned long size,
+ unsigned long offset)
+ {
+- struct mmu_update *mmu;
+- struct gnttab_transfer *gop;
+ struct gnttab_copy *copy_gop;
+- struct multicall_entry *mcl;
+ struct xen_netif_rx_request *req;
+- unsigned long old_mfn, new_mfn;
++ unsigned long old_mfn;
+ int idx = netif_page_index(page);
+
+ old_mfn = virt_to_mfn(page_address(page));
+
+ req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);
+- if (netif->copying_receiver) {
+- /* The fragment needs to be copied rather than
+- flipped. */
+- meta->copy = 1;
+- copy_gop = npo->copy + npo->copy_prod++;
+- copy_gop->flags = GNTCOPY_dest_gref;
+- if (idx > -1) {
+- struct pending_tx_info *src_pend = &pending_tx_info[idx];
+- copy_gop->source.domid = src_pend->netif->domid;
+- copy_gop->source.u.ref = src_pend->req.gref;
+- copy_gop->flags |= GNTCOPY_source_gref;
+- } else {
+- copy_gop->source.domid = DOMID_SELF;
+- copy_gop->source.u.gmfn = old_mfn;
+- }
+- copy_gop->source.offset = offset;
+- copy_gop->dest.domid = netif->domid;
+- copy_gop->dest.offset = 0;
+- copy_gop->dest.u.ref = req->gref;
+- copy_gop->len = size;
+- } else {
+- meta->copy = 0;
+- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+- new_mfn = alloc_mfn();
+-
+- /*
+- * Set the new P2M table entry before
+- * reassigning the old data page. Heed the
+- * comment in pgtable-2level.h:pte_page(). :-)
+- */
+- set_phys_to_machine(page_to_pfn(page), new_mfn);
+-
+- mcl = npo->mcl + npo->mcl_prod++;
+- MULTI_update_va_mapping(mcl,
+- (unsigned long)page_address(page),
+- mfn_pte(new_mfn, PAGE_KERNEL),
+- 0);
+-
+- mmu = npo->mmu + npo->mmu_prod++;
+- mmu->ptr = ((phys_addr_t)new_mfn << PAGE_SHIFT) |
+- MMU_MACHPHYS_UPDATE;
+- mmu->val = page_to_pfn(page);
+- }
+
+- gop = npo->trans + npo->trans_prod++;
+- gop->mfn = old_mfn;
+- gop->domid = netif->domid;
+- gop->ref = req->gref;
++ copy_gop = npo->copy + npo->copy_prod++;
++ copy_gop->flags = GNTCOPY_dest_gref;
++ if (idx > -1) {
++ struct pending_tx_info *src_pend = &pending_tx_info[idx];
++ copy_gop->source.domid = src_pend->netif->domid;
++ copy_gop->source.u.ref = src_pend->req.gref;
++ copy_gop->flags |= GNTCOPY_source_gref;
++ } else {
++ copy_gop->source.domid = DOMID_SELF;
++ copy_gop->source.u.gmfn = old_mfn;
+ }
++ copy_gop->source.offset = offset;
++ copy_gop->dest.domid = netif->domid;
++ copy_gop->dest.offset = 0;
++ copy_gop->dest.u.ref = req->gref;
++ copy_gop->len = size;
+
-+ err = pcistub_device_id_add(domain, bus, slot, func);
-+
-+ out:
-+ if (!err)
-+ err = count;
-+ return err;
-+}
-+
-+DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
-+
-+static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
-+ size_t count)
-+{
-+ int domain, bus, slot, func;
-+ int err;
-+
-+ err = str_to_slot(buf, &domain, &bus, &slot, &func);
-+ if (err)
-+ goto out;
-+
-+ err = pcistub_device_id_remove(domain, bus, slot, func);
-+
-+ out:
-+ if (!err)
-+ err = count;
-+ return err;
-+}
-+
-+DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
-+
-+static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
-+{
-+ struct pcistub_device_id *pci_dev_id;
-+ size_t count = 0;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&device_ids_lock, flags);
-+ list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
-+ if (count >= PAGE_SIZE)
-+ break;
-+
-+ count += scnprintf(buf + count, PAGE_SIZE - count,
-+ "%04x:%02x:%02x.%01x\n",
-+ pci_dev_id->domain, pci_dev_id->bus,
-+ PCI_SLOT(pci_dev_id->devfn),
-+ PCI_FUNC(pci_dev_id->devfn));
-+ }
-+ spin_unlock_irqrestore(&device_ids_lock, flags);
-+
-+ return count;
-+}
-+
-+DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
-+
-+static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
-+ size_t count)
-+{
-+ int domain, bus, slot, func, reg, size, mask;
-+ int err;
-+
-+ err = str_to_quirk(buf, &domain, &bus, &slot, &func, ®, &size,
-+ &mask);
-+ if (err)
-+ goto out;
-+
-+ err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask);
-+
-+ out:
-+ if (!err)
-+ err = count;
-+ return err;
-+}
-+
-+static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf)
-+{
-+ int count = 0;
-+ unsigned long flags;
-+ extern struct list_head pciback_quirks;
-+ struct pciback_config_quirk *quirk;
-+ struct pciback_dev_data *dev_data;
-+ const struct config_field *field;
-+ const struct config_field_entry *cfg_entry;
-+
-+ spin_lock_irqsave(&device_ids_lock, flags);
-+ list_for_each_entry(quirk, &pciback_quirks, quirks_list) {
-+ if (count >= PAGE_SIZE)
-+ goto out;
-+
-+ count += scnprintf(buf + count, PAGE_SIZE - count,
-+ "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n",
-+ quirk->pdev->bus->number,
-+ PCI_SLOT(quirk->pdev->devfn),
-+ PCI_FUNC(quirk->pdev->devfn),
-+ quirk->devid.vendor, quirk->devid.device,
-+ quirk->devid.subvendor,
-+ quirk->devid.subdevice);
-+
-+ dev_data = pci_get_drvdata(quirk->pdev);
-+
-+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-+ field = cfg_entry->field;
-+ if (count >= PAGE_SIZE)
-+ goto out;
-+
-+ count += scnprintf(buf + count, PAGE_SIZE - count,
-+ "\t\t%08x:%01x:%08x\n",
-+ cfg_entry->base_offset + field->offset,
-+ field->size, field->mask);
-+ }
-+ }
-+
-+ out:
-+ spin_unlock_irqrestore(&device_ids_lock, flags);
-+
-+ return count;
-+}
-+
-+DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show, pcistub_quirk_add);
-+
-+static ssize_t permissive_add(struct device_driver *drv, const char *buf,
-+ size_t count)
-+{
-+ int domain, bus, slot, func;
-+ int err;
-+ struct pcistub_device *psdev;
-+ struct pciback_dev_data *dev_data;
-+ err = str_to_slot(buf, &domain, &bus, &slot, &func);
-+ if (err)
-+ goto out;
-+ psdev = pcistub_device_find(domain, bus, slot, func);
-+ if (!psdev) {
-+ err = -ENODEV;
-+ goto out;
-+ }
-+ if (!psdev->dev) {
-+ err = -ENODEV;
-+ goto release;
-+ }
-+ dev_data = pci_get_drvdata(psdev->dev);
-+ /* the driver data for a device should never be null at this point */
-+ if (!dev_data) {
-+ err = -ENXIO;
-+ goto release;
-+ }
-+ if (!dev_data->permissive) {
-+ dev_data->permissive = 1;
-+ /* Let user know that what they're doing could be unsafe */
-+ dev_warn(&psdev->dev->dev,
-+ "enabling permissive mode configuration space accesses!\n");
-+ dev_warn(&psdev->dev->dev,
-+ "permissive mode is potentially unsafe!\n");
-+ }
-+ release:
-+ pcistub_device_put(psdev);
-+ out:
-+ if (!err)
-+ err = count;
-+ return err;
-+}
+ return req->id;
+ }
+
+@@ -502,41 +353,17 @@ static inline void netbk_free_pages(int nr_frags, struct netbk_rx_meta *meta)
+ static int netbk_check_gop(int nr_frags, domid_t domid,
+ struct netrx_pending_operations *npo)
+ {
+- struct multicall_entry *mcl;
+- struct gnttab_transfer *gop;
+ struct gnttab_copy *copy_op;
+ int status = NETIF_RSP_OKAY;
+ int i;
+
+ for (i = 0; i <= nr_frags; i++) {
+- if (npo->meta[npo->meta_cons + i].copy) {
+ copy_op = npo->copy + npo->copy_cons++;
+ if (copy_op->status != GNTST_okay) {
+ DPRINTK("Bad status %d from copy to DOM%d.\n",
+ copy_op->status, domid);
+ status = NETIF_RSP_ERROR;
+ }
+- } else {
+- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+- mcl = npo->mcl + npo->mcl_cons++;
+- /* The update_va_mapping() must not fail. */
+- BUG_ON(mcl->result != 0);
+- }
+-
+- gop = npo->trans + npo->trans_cons++;
+- /* Check the reassignment error code. */
+- if (gop->status != 0) {
+- DPRINTK("Bad status %d from grant transfer to DOM%u\n",
+- gop->status, domid);
+- /*
+- * Page no longer belongs to us unless
+- * GNTST_bad_page, but that should be
+- * a fatal error anyway.
+- */
+- BUG_ON(gop->status == GNTST_bad_page);
+- status = NETIF_RSP_ERROR;
+- }
+- }
+ }
+
+ return status;
+@@ -551,11 +378,8 @@ static void netbk_add_frag_responses(struct xen_netif *netif, int status,
+ for (i = 0; i < nr_frags; i++) {
+ int id = meta[i].id;
+ int flags = (i == nr_frags - 1) ? 0 : NETRXF_more_data;
+-
+- if (meta[i].copy)
+- offset = 0;
+- else
+- offset = meta[i].frag.page_offset;
++
++ offset = 0;
+ make_rx_response(netif, id, status, offset,
+ meta[i].frag.size, flags);
+ }
+@@ -603,18 +427,6 @@ static void net_rx_action(unsigned long unused)
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ *(int *)skb->cb = nr_frags;
+
+- if (!xen_feature(XENFEAT_auto_translated_physmap) &&
+- !((struct xen_netif *)netdev_priv(skb->dev))->copying_receiver &&
+- check_mfn(nr_frags + 1)) {
+- /* Memory squeeze? Back off for an arbitrary while. */
+- if ( net_ratelimit() )
+- WPRINTK("Memory squeeze in netback "
+- "driver.\n");
+- mod_timer(&net_timer, jiffies + HZ);
+- skb_queue_head(&rx_queue, skb);
+- break;
+- }
+-
+ netbk_gop_skb(skb, &npo);
+
+ count += nr_frags + 1;
+@@ -677,20 +489,6 @@ static void net_rx_action(unsigned long unused)
+ nr_frags = *(int *)skb->cb;
+
+ netif = netdev_priv(skb->dev);
+- /* We can't rely on skb_release_data to release the
+- pages used by fragments for us, since it tries to
+- touch the pages in the fraglist. If we're in
+- flipping mode, that doesn't work. In copying mode,
+- we still have access to all of the pages, and so
+- it's safe to let release_data deal with it. */
+- /* (Freeing the fragments is safe since we copy
+- non-linear skbs destined for flipping interfaces) */
+- if (!netif->copying_receiver) {
+- atomic_set(&(skb_shinfo(skb)->dataref), 1);
+- skb_shinfo(skb)->frag_list = NULL;
+- skb_shinfo(skb)->nr_frags = 0;
+- netbk_free_pages(nr_frags, meta + npo.meta_cons + 1);
+- }
+
+ netif->stats.tx_bytes += skb->len;
+ netif->stats.tx_packets++;
+@@ -706,10 +504,7 @@ static void net_rx_action(unsigned long unused)
+ /* remote but checksummed. */
+ flags |= NETRXF_data_validated;
+
+- if (meta[npo.meta_cons].copy)
+- offset = 0;
+- else
+- offset = offset_in_page(skb->data);
++ offset = 0;
+ resp = make_rx_response(netif, id, status, offset,
+ skb_headlen(skb), flags);
+
+diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
+index 749931e..a492288 100644
+--- a/drivers/xen/netback/xenbus.c
++++ b/drivers/xen/netback/xenbus.c
+@@ -378,7 +378,8 @@ static int connect_rings(struct backend_info *be)
+ dev->otherend);
+ return err;
+ }
+- be->netif->copying_receiver = !!rx_copy;
++ if (!rx_copy)
++ return -EOPNOTSUPP;
+
+ if (be->netif->dev->tx_queue_len != 0) {
+ if (xenbus_scanf(XBT_NIL, dev->otherend,
+--
+1.7.4
+
+
+From 17d465234118873ab4f5a7992feb4ce7b5537cf7 Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Thu, 19 Mar 2009 15:19:39 -0700
+Subject: [PATCH 011/203] xen: netback: demacro MASK_PEND_IDX
+
+Replace it with a more meaningful inline: pending_index().
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 21 +++++++++++++--------
+ 1 files changed, 13 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 36bea2b..4095622 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -121,7 +121,12 @@ static struct pending_tx_info {
+ } pending_tx_info[MAX_PENDING_REQS];
+ static u16 pending_ring[MAX_PENDING_REQS];
+ typedef unsigned int PEND_RING_IDX;
+-#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
+
-+static ssize_t permissive_show(struct device_driver *drv, char *buf)
++static inline PEND_RING_IDX pending_index(unsigned i)
+{
-+ struct pcistub_device *psdev;
-+ struct pciback_dev_data *dev_data;
-+ size_t count = 0;
-+ unsigned long flags;
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
-+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+ if (count >= PAGE_SIZE)
-+ break;
-+ if (!psdev->dev)
-+ continue;
-+ dev_data = pci_get_drvdata(psdev->dev);
-+ if (!dev_data || !dev_data->permissive)
-+ continue;
-+ count +=
-+ scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
-+ pci_name(psdev->dev));
-+ }
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+ return count;
++ return i & (MAX_PENDING_REQS-1);
+}
+
-+DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, permissive_add);
-+
-+#ifdef CONFIG_PCI_MSI
-+
-+int pciback_get_owner(struct pci_dev *dev)
-+{
-+ struct pcistub_device *psdev;
-+
-+ psdev = pcistub_device_find(pci_domain_nr(dev->bus), dev->bus->number,
-+ PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
-+
-+ if (!psdev || !psdev->pdev)
-+ return -1;
-+
-+ return psdev->pdev->xdev->otherend_id;
-+}
-+#endif
+ static PEND_RING_IDX pending_prod, pending_cons;
+ #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
+
+@@ -695,7 +700,7 @@ inline static void net_tx_action_dealloc(void)
+ while (dc != dp) {
+ unsigned long pfn;
+
+- pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
++ pending_idx = dealloc_ring[pending_index(dc++)];
+ list_move_tail(&pending_inuse[pending_idx].list, &list);
+
+ pfn = idx_to_pfn(pending_idx);
+@@ -754,7 +759,7 @@ inline static void net_tx_action_dealloc(void)
+ /* Ready for next use. */
+ gnttab_reset_grant_page(mmap_pages[pending_idx]);
+
+- pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++ pending_ring[pending_index(pending_prod++)] = pending_idx;
+
+ netif_put(netif);
+
+@@ -831,7 +836,7 @@ static struct gnttab_map_grant_ref *netbk_get_requests(struct xen_netif *netif,
+ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
+
+ for (i = start; i < shinfo->nr_frags; i++, txp++) {
+- pending_idx = pending_ring[MASK_PEND_IDX(pending_cons++)];
++ pending_idx = pending_ring[pending_index(pending_cons++)];
+
+ gnttab_set_map_op(mop++, idx_to_kaddr(pending_idx),
+ GNTMAP_host_map | GNTMAP_readonly,
+@@ -862,7 +867,7 @@ static int netbk_tx_check_mop(struct sk_buff *skb,
+ if (unlikely(err)) {
+ txp = &pending_tx_info[pending_idx].req;
+ make_tx_response(netif, txp, NETIF_RSP_ERROR);
+- pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++ pending_ring[pending_index(pending_prod++)] = pending_idx;
+ netif_put(netif);
+ } else {
+ set_phys_to_machine(
+@@ -895,7 +900,7 @@ static int netbk_tx_check_mop(struct sk_buff *skb,
+ /* Error on this fragment: respond to client with an error. */
+ txp = &pending_tx_info[pending_idx].req;
+ make_tx_response(netif, txp, NETIF_RSP_ERROR);
+- pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++ pending_ring[pending_index(pending_prod++)] = pending_idx;
+ netif_put(netif);
+
+ /* Not the first error? Preceding frags already invalidated. */
+@@ -1142,7 +1147,7 @@ static void net_tx_action(unsigned long unused)
+ continue;
+ }
+
+- pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
++ pending_idx = pending_ring[pending_index(pending_cons)];
+
+ data_len = (txreq.size > PKT_PROT_LEN &&
+ ret < MAX_SKB_FRAGS) ?
+@@ -1298,7 +1303,7 @@ static void netif_idx_release(u16 pending_idx)
+ unsigned long flags;
+
+ spin_lock_irqsave(&_lock, flags);
+- dealloc_ring[MASK_PEND_IDX(dealloc_prod)] = pending_idx;
++ dealloc_ring[pending_index(dealloc_prod)] = pending_idx;
+ /* Sync with net_tx_action_dealloc: insert idx /then/ incr producer. */
+ smp_wmb();
+ dealloc_prod++;
+--
+1.7.4
+
+
+From d47af34f87b2d365c75aa3579ad512619ef3d579 Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Thu, 19 Mar 2009 15:29:30 -0700
+Subject: [PATCH 012/203] xen: netback: convert PEND_RING_IDX into a proper typedef name
+
+Rename PEND_RING_IDX to pending_ring_idx_t. Its not used that much,
+the extra typing won't kill anyone.
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 12 ++++++------
+ 1 files changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 4095622..8292e96 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -120,19 +120,19 @@ static struct pending_tx_info {
+ struct xen_netif *netif;
+ } pending_tx_info[MAX_PENDING_REQS];
+ static u16 pending_ring[MAX_PENDING_REQS];
+-typedef unsigned int PEND_RING_IDX;
++typedef unsigned int pending_ring_idx_t;
+
+-static inline PEND_RING_IDX pending_index(unsigned i)
++static inline pending_ring_idx_t pending_index(unsigned i)
+ {
+ return i & (MAX_PENDING_REQS-1);
+ }
+
+-static PEND_RING_IDX pending_prod, pending_cons;
++static pending_ring_idx_t pending_prod, pending_cons;
+ #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
+
+ /* Freed TX SKBs get batched on this ring before return to pending_ring. */
+ static u16 dealloc_ring[MAX_PENDING_REQS];
+-static PEND_RING_IDX dealloc_prod, dealloc_cons;
++static pending_ring_idx_t dealloc_prod, dealloc_cons;
+
+ /* Doubly-linked list of in-use pending entries. */
+ static struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS];
+@@ -669,7 +669,7 @@ static void tx_credit_callback(unsigned long data)
+ netif_schedule_work(netif);
+ }
+
+-static inline int copy_pending_req(PEND_RING_IDX pending_idx)
++static inline int copy_pending_req(pending_ring_idx_t pending_idx)
+ {
+ return gnttab_copy_grant_page(grant_tx_handle[pending_idx],
+ &mmap_pages[pending_idx]);
+@@ -680,7 +680,7 @@ inline static void net_tx_action_dealloc(void)
+ struct netbk_tx_pending_inuse *inuse, *n;
+ struct gnttab_unmap_grant_ref *gop;
+ u16 pending_idx;
+- PEND_RING_IDX dc, dp;
++ pending_ring_idx_t dc, dp;
+ struct xen_netif *netif;
+ int ret;
+ LIST_HEAD(list);
+--
+1.7.4
+
+
+From 56727a43f329d50c2a00fed0316ffd87d6c23ebd Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Thu, 19 Mar 2009 15:31:32 -0700
+Subject: [PATCH 013/203] xen: netback: rename NR_PENDING_REQS to nr_pending_reqs()
+
+Use function syntax to show its actually computing a value, rather than
+a constant.
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 10 +++++++---
+ 1 files changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 8292e96..5410a68 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -128,7 +128,11 @@ static inline pending_ring_idx_t pending_index(unsigned i)
+ }
+
+ static pending_ring_idx_t pending_prod, pending_cons;
+-#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
+
-+static void pcistub_exit(void)
++static inline pending_ring_idx_t nr_pending_reqs(void)
+{
-+ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_new_slot);
-+ driver_remove_file(&pciback_pci_driver.driver,
-+ &driver_attr_remove_slot);
-+ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_slots);
-+ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_quirks);
-+ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_permissive);
-+
-+ pci_unregister_driver(&pciback_pci_driver);
-+ WARN_ON(unregister_msi_get_owner(pciback_get_owner));
++ return MAX_PENDING_REQS - pending_prod + pending_cons;
+}
-+
-+static int __init pcistub_init(void)
+
+ /* Freed TX SKBs get batched on this ring before return to pending_ring. */
+ static u16 dealloc_ring[MAX_PENDING_REQS];
+@@ -167,7 +171,7 @@ static inline unsigned long alloc_mfn(void)
+ static inline void maybe_schedule_tx_action(void)
+ {
+ smp_mb();
+- if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
++ if ((nr_pending_reqs() < (MAX_PENDING_REQS/2)) &&
+ !list_empty(&net_schedule_list))
+ tasklet_schedule(&net_tx_tasklet);
+ }
+@@ -1060,7 +1064,7 @@ static void net_tx_action(unsigned long unused)
+ net_tx_action_dealloc();
+
+ mop = tx_map_ops;
+- while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
++ while (((nr_pending_reqs() + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
+ !list_empty(&net_schedule_list)) {
+ /* Get a netif from the list with work to do. */
+ ent = net_schedule_list.next;
+--
+1.7.4
+
+
+From 55b360614f1bd44d0b1395b4aabf41d8f1f13f17 Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Thu, 19 Mar 2009 15:45:45 -0700
+Subject: [PATCH 014/203] xen: netback: pre-initialize list and spinlocks; use empty list to indicate not on list
+
+Statically pre-initialize net_schedule_list head and lock.
+
+Use an empty list to mark when a xen_netif is not on the schedule list,
+rather than NULL (which may upset list debugging).
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/interface.c | 1 +
+ drivers/xen/netback/netback.c | 12 ++++--------
+ 2 files changed, 5 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+index 5e0d26d..dc4fb53 100644
+--- a/drivers/xen/netback/interface.c
++++ b/drivers/xen/netback/interface.c
+@@ -201,6 +201,7 @@ struct xen_netif *netif_alloc(struct device *parent, domid_t domid, unsigned int
+ atomic_set(&netif->refcnt, 1);
+ init_waitqueue_head(&netif->waiting_to_free);
+ netif->dev = dev;
++ INIT_LIST_HEAD(&netif->list);
+
+ netback_carrier_off(netif);
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 5410a68..cbd4b03 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -148,8 +148,8 @@ static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
+ static struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
+ static struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
+
+-static struct list_head net_schedule_list;
+-static spinlock_t net_schedule_list_lock;
++static LIST_HEAD(net_schedule_list);
++static DEFINE_SPINLOCK(net_schedule_list_lock);
+
+ #define MAX_MFN_ALLOC 64
+ static unsigned long mfn_list[MAX_MFN_ALLOC];
+@@ -588,15 +588,14 @@ struct net_device_stats *netif_be_get_stats(struct net_device *dev)
+
+ static int __on_net_schedule_list(struct xen_netif *netif)
+ {
+- return netif->list.next != NULL;
++ return !list_empty(&netif->list);
+ }
+
+ static void remove_from_net_schedule_list(struct xen_netif *netif)
+ {
+ spin_lock_irq(&net_schedule_list_lock);
+ if (likely(__on_net_schedule_list(netif))) {
+- list_del(&netif->list);
+- netif->list.next = NULL;
++ list_del_init(&netif->list);
+ netif_put(netif);
+ }
+ spin_unlock_irq(&net_schedule_list_lock);
+@@ -1466,9 +1465,6 @@ static int __init netback_init(void)
+ for (i = 0; i < MAX_PENDING_REQS; i++)
+ pending_ring[i] = i;
+
+- spin_lock_init(&net_schedule_list_lock);
+- INIT_LIST_HEAD(&net_schedule_list);
+-
+ netbk_copy_skb_mode = NETBK_DONT_COPY_SKB;
+ if (MODPARM_copy_skb) {
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
+--
+1.7.4
+
+
+From e12cf57de7a6c20e4c8900ce7bf4e6924a12f49e Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Thu, 19 Mar 2009 15:48:10 -0700
+Subject: [PATCH 015/203] xen: netback: remove CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
+
+Keir says:
+> > Does CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER need to be a config
+> > option? Could/should we always/never set it?
+> It doesn't work well with local delivery into dom0, nor even with IP
+> fragment reassembly. I don't think we would ever turn it on these days.
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 21 ---------------------
+ 1 files changed, 0 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index cbd4b03..f00e405 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -615,23 +615,11 @@ static void add_to_net_schedule_list_tail(struct xen_netif *netif)
+ spin_unlock_irq(&net_schedule_list_lock);
+ }
+
+-/*
+- * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
+- * If this driver is pipelining transmit requests then we can be very
+- * aggressive in avoiding new-packet notifications -- frontend only needs to
+- * send a notification if there are no outstanding unreceived responses.
+- * If we may be buffer transmit buffers for any reason then we must be rather
+- * more conservative and treat this as the final check for pending work.
+- */
+ void netif_schedule_work(struct xen_netif *netif)
+ {
+ int more_to_do;
+
+-#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
+- more_to_do = RING_HAS_UNCONSUMED_REQUESTS(&netif->tx);
+-#else
+ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
+-#endif
+
+ if (more_to_do) {
+ add_to_net_schedule_list_tail(netif);
+@@ -1355,15 +1343,6 @@ static void make_tx_response(struct xen_netif *netif,
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->tx, notify);
+ if (notify)
+ notify_remote_via_irq(netif->irq);
+-
+-#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
+- if (i == netif->tx.req_cons) {
+- int more_to_do;
+- RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
+- if (more_to_do)
+- add_to_net_schedule_list_tail(netif);
+- }
+-#endif
+ }
+
+ static struct xen_netif_rx_response *make_rx_response(struct xen_netif *netif,
+--
+1.7.4
+
+
+From adf542f9c714e3b7c76fcf9e44e0a89cae21a341 Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Thu, 19 Mar 2009 22:28:52 -0700
+Subject: [PATCH 016/203] xen: netback: make netif_get/put inlines
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/common.h | 16 ++++++++++------
+ 1 files changed, 10 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index 06f04c1..9056be0 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -183,12 +183,16 @@ struct xen_netif *netif_alloc(struct device *parent, domid_t domid, unsigned int
+ int netif_map(struct xen_netif *netif, unsigned long tx_ring_ref,
+ unsigned long rx_ring_ref, unsigned int evtchn);
+
+-#define netif_get(_b) (atomic_inc(&(_b)->refcnt))
+-#define netif_put(_b) \
+- do { \
+- if ( atomic_dec_and_test(&(_b)->refcnt) ) \
+- wake_up(&(_b)->waiting_to_free); \
+- } while (0)
++static inline void netif_get(struct xen_netif *netif)
+{
-+ int pos = 0;
-+ int err = 0;
-+ int domain, bus, slot, func;
-+ int parsed;
-+
-+ if (pci_devs_to_hide && *pci_devs_to_hide) {
-+ do {
-+ parsed = 0;
-+
-+ err = sscanf(pci_devs_to_hide + pos,
-+ " (%x:%x:%x.%x) %n",
-+ &domain, &bus, &slot, &func, &parsed);
-+ if (err != 4) {
-+ domain = 0;
-+ err = sscanf(pci_devs_to_hide + pos,
-+ " (%x:%x.%x) %n",
-+ &bus, &slot, &func, &parsed);
-+ if (err != 3)
-+ goto parse_error;
-+ }
-+
-+ err = pcistub_device_id_add(domain, bus, slot, func);
-+ if (err)
-+ goto out;
-+
-+ /* if parsed<=0, we've reached the end of the string */
-+ pos += parsed;
-+ } while (parsed > 0 && pci_devs_to_hide[pos]);
-+ }
-+
-+ /* If we're the first PCI Device Driver to register, we're the
-+ * first one to get offered PCI devices as they become
-+ * available (and thus we can be the first to grab them)
-+ */
-+ err = pci_register_driver(&pciback_pci_driver);
-+ if (err < 0)
-+ goto out;
-+
-+ err = driver_create_file(&pciback_pci_driver.driver,
-+ &driver_attr_new_slot);
-+ if (!err)
-+ err = driver_create_file(&pciback_pci_driver.driver,
-+ &driver_attr_remove_slot);
-+ if (!err)
-+ err = driver_create_file(&pciback_pci_driver.driver,
-+ &driver_attr_slots);
-+ if (!err)
-+ err = driver_create_file(&pciback_pci_driver.driver,
-+ &driver_attr_quirks);
-+ if (!err)
-+ err = driver_create_file(&pciback_pci_driver.driver,
-+ &driver_attr_permissive);
-+
-+ if (!err)
-+ err = register_msi_get_owner(pciback_get_owner);
-+ if (err)
-+ pcistub_exit();
-+
-+ out:
-+ return err;
-+
-+ parse_error:
-+ printk(KERN_ERR "pciback: Error parsing pci_devs_to_hide at \"%s\"\n",
-+ pci_devs_to_hide + pos);
-+ return -EINVAL;
++ atomic_inc(&netif->refcnt);
+}
+
-+#ifndef MODULE
-+/*
-+ * fs_initcall happens before device_initcall
-+ * so pciback *should* get called first (b/c we
-+ * want to suck up any device before other drivers
-+ * get a chance by being the first pci device
-+ * driver to register)
-+ */
-+fs_initcall(pcistub_init);
-+#endif
-+
-+static int __init pciback_init(void)
++static inline void netif_put(struct xen_netif *netif)
+{
-+ int err;
++ if (atomic_dec_and_test(&netif->refcnt))
++ wake_up(&netif->waiting_to_free);
++}
+
+ void netif_xenbus_init(void);
+
+--
+1.7.4
+
+
+From f06459a199f953a68f001f06e54dde54e1e16c87 Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Thu, 19 Mar 2009 22:30:24 -0700
+Subject: [PATCH 017/203] xen: netback: move code around
+
+net_tx_action() into several functions; move variables into
+their innermost scopes; rename "i" to "idx".
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 158 ++++++++++++++++++++++++-----------------
+ 1 files changed, 94 insertions(+), 64 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index f00e405..4d63ff3 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -773,7 +773,8 @@ static void netbk_tx_err(struct xen_netif *netif, struct xen_netif_tx_request *t
+ netif_put(netif);
+ }
+
+-static int netbk_count_requests(struct xen_netif *netif, struct xen_netif_tx_request *first,
++static int netbk_count_requests(struct xen_netif *netif,
++ struct xen_netif_tx_request *first,
+ struct xen_netif_tx_request *txp, int work_to_do)
+ {
+ RING_IDX cons = netif->tx.req_cons;
+@@ -1032,30 +1033,58 @@ out:
+ return err;
+ }
+
+-/* Called after netfront has transmitted */
+-static void net_tx_action(unsigned long unused)
++static bool tx_credit_exceeded(struct xen_netif *netif, unsigned size)
+ {
+- struct list_head *ent;
+- struct sk_buff *skb;
+- struct xen_netif *netif;
+- struct xen_netif_tx_request txreq;
+- struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
+- struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
+- u16 pending_idx;
+- RING_IDX i;
+- struct gnttab_map_grant_ref *mop;
+- unsigned int data_len;
+- int ret, work_to_do;
++ unsigned long now = jiffies;
++ unsigned long next_credit =
++ netif->credit_timeout.expires +
++ msecs_to_jiffies(netif->credit_usec / 1000);
+
-+ err = pciback_config_init();
-+ if (err)
-+ return err;
++ /* Timer could already be pending in rare cases. */
++ if (timer_pending(&netif->credit_timeout))
++ return true;
+
-+#ifdef MODULE
-+ err = pcistub_init();
-+ if (err < 0)
-+ return err;
-+#endif
++ /* Passed the point where we can replenish credit? */
++ if (time_after_eq(now, next_credit)) {
++ netif->credit_timeout.expires = now;
++ tx_add_credit(netif);
++ }
+
+- if (dealloc_cons != dealloc_prod)
+- net_tx_action_dealloc();
++ /* Still too big to send right now? Set a callback. */
++ if (size > netif->remaining_credit) {
++ netif->credit_timeout.data =
++ (unsigned long)netif;
++ netif->credit_timeout.function =
++ tx_credit_callback;
++ mod_timer(&netif->credit_timeout,
++ next_credit);
+
-+ pcistub_init_devices_late();
-+ err = pciback_xenbus_register();
-+ if (err)
-+ pcistub_exit();
++ return true;
++ }
+
-+ return err;
++ return false;
+}
+
-+static void __exit pciback_cleanup(void)
++static unsigned net_tx_build_mops(void)
+{
-+ pciback_xenbus_unregister();
-+ pcistub_exit();
-+}
-+
-+module_init(pciback_init);
-+module_exit(pciback_cleanup);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-diff --git a/drivers/xen/pciback/pciback.h b/drivers/xen/pciback/pciback.h
-new file mode 100644
-index 0000000..6744f45
---- /dev/null
-+++ b/drivers/xen/pciback/pciback.h
-@@ -0,0 +1,126 @@
-+/*
-+ * PCI Backend Common Data Structures & Function Declarations
-+ *
-+ * Author: Ryan Wilson <hap9(a)epoch.ncsc.mil>
-+ */
-+#ifndef __XEN_PCIBACK_H__
-+#define __XEN_PCIBACK_H__
-+
-+#include <linux/pci.h>
-+#include <linux/interrupt.h>
-+#include <xen/xenbus.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/workqueue.h>
-+#include <asm/atomic.h>
-+#include <xen/interface/io/pciif.h>
-+
-+struct pci_dev_entry {
-+ struct list_head list;
-+ struct pci_dev *dev;
-+};
-+
-+#define _PDEVF_op_active (0)
-+#define PDEVF_op_active (1<<(_PDEVF_op_active))
-+#define _PCIB_op_pending (1)
-+#define PCIB_op_pending (1<<(_PCIB_op_pending))
-+
-+struct pciback_device {
-+ void *pci_dev_data;
-+ spinlock_t dev_lock;
-+
-+ struct xenbus_device *xdev;
-+
-+ struct xenbus_watch be_watch;
-+ u8 be_watching;
-+
-+ int evtchn_irq;
-+
-+ struct vm_struct *sh_area;
-+ struct xen_pci_sharedinfo *sh_info;
-+
-+ unsigned long flags;
-+
-+ struct work_struct op_work;
-+};
-+
-+struct pciback_dev_data {
-+ struct list_head config_fields;
-+ int permissive;
-+ int warned_on_write;
-+};
-+
-+/* Get/Put PCI Devices that are hidden from the PCI Backend Domain */
-+struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
-+ int domain, int bus,
-+ int slot, int func);
-+struct pci_dev *pcistub_get_pci_dev(struct pciback_device *pdev,
-+ struct pci_dev *dev);
-+void pcistub_put_pci_dev(struct pci_dev *dev);
-+
-+/* Ensure a device is turned off or reset */
-+void pciback_reset_device(struct pci_dev *pdev);
++ struct gnttab_map_grant_ref *mop;
++ struct sk_buff *skb;
++ int ret;
+
+ mop = tx_map_ops;
+ while (((nr_pending_reqs() + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
+ !list_empty(&net_schedule_list)) {
++ struct xen_netif *netif;
++ struct xen_netif_tx_request txreq;
++ struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
++ struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
++ u16 pending_idx;
++ RING_IDX idx;
++ int work_to_do;
++ unsigned int data_len;
++
+ /* Get a netif from the list with work to do. */
+- ent = net_schedule_list.next;
+- netif = list_entry(ent, struct xen_netif, list);
++ netif = list_first_entry(&net_schedule_list, struct xen_netif, list);
+ netif_get(netif);
+ remove_from_net_schedule_list(netif);
+
+@@ -1065,67 +1094,43 @@ static void net_tx_action(unsigned long unused)
+ continue;
+ }
+
+- i = netif->tx.req_cons;
++ idx = netif->tx.req_cons;
+ rmb(); /* Ensure that we see the request before we copy it. */
+- memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
++ memcpy(&txreq, RING_GET_REQUEST(&netif->tx, idx), sizeof(txreq));
+
+ /* Credit-based scheduling. */
+- if (txreq.size > netif->remaining_credit) {
+- unsigned long now = jiffies;
+- unsigned long next_credit =
+- netif->credit_timeout.expires +
+- msecs_to_jiffies(netif->credit_usec / 1000);
+-
+- /* Timer could already be pending in rare cases. */
+- if (timer_pending(&netif->credit_timeout)) {
+- netif_put(netif);
+- continue;
+- }
+-
+- /* Passed the point where we can replenish credit? */
+- if (time_after_eq(now, next_credit)) {
+- netif->credit_timeout.expires = now;
+- tx_add_credit(netif);
+- }
+-
+- /* Still too big to send right now? Set a callback. */
+- if (txreq.size > netif->remaining_credit) {
+- netif->credit_timeout.data =
+- (unsigned long)netif;
+- netif->credit_timeout.function =
+- tx_credit_callback;
+- mod_timer(&netif->credit_timeout,
+- next_credit);
+- netif_put(netif);
+- continue;
+- }
++ if (txreq.size > netif->remaining_credit &&
++ tx_credit_exceeded(netif, txreq.size)) {
++ netif_put(netif);
++ continue;
+ }
+
-+/* Access a virtual configuration space for a PCI device */
-+int pciback_config_init(void);
-+int pciback_config_init_dev(struct pci_dev *dev);
-+void pciback_config_free_dyn_fields(struct pci_dev *dev);
-+void pciback_config_reset_dev(struct pci_dev *dev);
-+void pciback_config_free_dev(struct pci_dev *dev);
-+int pciback_config_read(struct pci_dev *dev, int offset, int size,
-+ u32 * ret_val);
-+int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value);
-+
-+/* Handle requests for specific devices from the frontend */
-+typedef int (*publish_pci_dev_cb) (struct pciback_device *pdev,
-+ unsigned int domain, unsigned int bus,
-+ unsigned int devfn, unsigned int devid);
-+typedef int (*publish_pci_root_cb) (struct pciback_device * pdev,
-+ unsigned int domain, unsigned int bus);
-+int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
-+ int devid, publish_pci_dev_cb publish_cb);
-+void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev);
-+struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
-+ unsigned int domain, unsigned int bus,
-+ unsigned int devfn);
-+
-+/**
-+* Add for domain0 PCIE-AER handling. Get guest domain/bus/devfn in pciback
-+* before sending aer request to pcifront, so that guest could identify
-+* device, coopearte with pciback to finish aer recovery job if device driver
-+* has the capability
-+*/
-+
-+int pciback_get_pcifront_dev(struct pci_dev *pcidev, struct pciback_device *pdev,
-+ unsigned int *domain, unsigned int *bus, unsigned int *devfn);
-+int pciback_init_devices(struct pciback_device *pdev);
-+int pciback_publish_pci_roots(struct pciback_device *pdev,
-+ publish_pci_root_cb cb);
-+void pciback_release_devices(struct pciback_device *pdev);
-+
-+/* Handles events from front-end */
-+irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs);
-+void pciback_do_op(void *data);
-+
-+int pciback_xenbus_register(void);
-+void pciback_xenbus_unregister(void);
-+
-+#ifdef CONFIG_PCI_MSI
-+int pciback_enable_msi(struct pciback_device *pdev,
-+ struct pci_dev *dev, struct xen_pci_op *op);
+ netif->remaining_credit -= txreq.size;
+
+ work_to_do--;
+- netif->tx.req_cons = ++i;
++ netif->tx.req_cons = ++idx;
+
+ memset(extras, 0, sizeof(extras));
+ if (txreq.flags & NETTXF_extra_info) {
+ work_to_do = netbk_get_extras(netif, extras,
+ work_to_do);
+- i = netif->tx.req_cons;
++ idx = netif->tx.req_cons;
+ if (unlikely(work_to_do < 0)) {
+- netbk_tx_err(netif, &txreq, i);
++ netbk_tx_err(netif, &txreq, idx);
+ continue;
+ }
+ }
+
+ ret = netbk_count_requests(netif, &txreq, txfrags, work_to_do);
+ if (unlikely(ret < 0)) {
+- netbk_tx_err(netif, &txreq, i - ret);
++ netbk_tx_err(netif, &txreq, idx - ret);
+ continue;
+ }
+- i += ret;
++ idx += ret;
+
+ if (unlikely(txreq.size < ETH_HLEN)) {
+ DPRINTK("Bad packet size: %d\n", txreq.size);
+- netbk_tx_err(netif, &txreq, i);
++ netbk_tx_err(netif, &txreq, idx);
+ continue;
+ }
+
+@@ -1134,7 +1139,7 @@ static void net_tx_action(unsigned long unused)
+ DPRINTK("txreq.offset: %x, size: %u, end: %lu\n",
+ txreq.offset, txreq.size,
+ (txreq.offset &~PAGE_MASK) + txreq.size);
+- netbk_tx_err(netif, &txreq, i);
++ netbk_tx_err(netif, &txreq, idx);
+ continue;
+ }
+
+@@ -1148,7 +1153,7 @@ static void net_tx_action(unsigned long unused)
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(skb == NULL)) {
+ DPRINTK("Can't allocate a skb in start_xmit.\n");
+- netbk_tx_err(netif, &txreq, i);
++ netbk_tx_err(netif, &txreq, idx);
+ break;
+ }
+
+@@ -1161,7 +1166,7 @@ static void net_tx_action(unsigned long unused)
+
+ if (netbk_set_skb_gso(skb, gso)) {
+ kfree_skb(skb);
+- netbk_tx_err(netif, &txreq, i);
++ netbk_tx_err(netif, &txreq, idx);
+ continue;
+ }
+ }
+@@ -1199,23 +1204,27 @@ static void net_tx_action(unsigned long unused)
+
+ mop = netbk_get_requests(netif, skb, txfrags, mop);
+
+- netif->tx.req_cons = i;
++ netif->tx.req_cons = idx;
+ netif_schedule_work(netif);
+
+ if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
+ break;
+ }
+
+- if (mop == tx_map_ops)
+- return;
++ return mop - tx_map_ops;
++}
+
+- ret = HYPERVISOR_grant_table_op(
+- GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
+- BUG_ON(ret);
++static void net_tx_submit(void)
++{
++ struct gnttab_map_grant_ref *mop;
++ struct sk_buff *skb;
+
+ mop = tx_map_ops;
+ while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
+ struct xen_netif_tx_request *txp;
++ struct xen_netif *netif;
++ u16 pending_idx;
++ unsigned data_len;
+
+ pending_idx = *((u16 *)skb->data);
+ netif = pending_tx_info[pending_idx].netif;
+@@ -1288,6 +1297,27 @@ static void net_tx_action(unsigned long unused)
+ }
+ }
+
++/* Called after netfront has transmitted */
++static void net_tx_action(unsigned long unused)
++{
++ unsigned nr_mops;
++ int ret;
+
-+int pciback_disable_msi(struct pciback_device *pdev,
-+ struct pci_dev *dev, struct xen_pci_op *op);
++ if (dealloc_cons != dealloc_prod)
++ net_tx_action_dealloc();
+
++ nr_mops = net_tx_build_mops();
+
-+int pciback_enable_msix(struct pciback_device *pdev,
-+ struct pci_dev *dev, struct xen_pci_op *op);
++ if (nr_mops == 0)
++ return;
+
-+int pciback_disable_msix(struct pciback_device *pdev,
-+ struct pci_dev *dev, struct xen_pci_op *op);
-+#endif
-+extern int verbose_request;
++ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
++ tx_map_ops, nr_mops);
++ BUG_ON(ret);
+
-+void test_and_schedule_op(struct pciback_device *pdev);
-+#endif
++ net_tx_submit();
++}
+
-diff --git a/drivers/xen/pciback/pciback_ops.c b/drivers/xen/pciback/pciback_ops.c
-new file mode 100644
-index 0000000..b85b2db
---- /dev/null
-+++ b/drivers/xen/pciback/pciback_ops.c
-@@ -0,0 +1,134 @@
+ static void netif_idx_release(u16 pending_idx)
+ {
+ static DEFINE_SPINLOCK(_lock);
+--
+1.7.4
+
+
+From cec84ff81d9f6ca882908572b984215529b4117b Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Fri, 20 Mar 2009 23:18:12 -0700
+Subject: [PATCH 018/203] xen: netback: document PKT_PROT_LEN
+
+Document the rationale for the existence and value of PKT_PROT_LEN.
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 9 +++++++++
+ 1 files changed, 9 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 4d63ff3..80b424f 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -113,6 +113,15 @@ static inline int netif_page_index(struct page *pg)
+ return idx;
+ }
+
+/*
-+ * PCI Backend Operations - respond to PCI requests from Frontend
-+ *
-+ * Author: Ryan Wilson <hap9(a)epoch.ncsc.mil>
++ * This is the amount of packet we copy rather than map, so that the
++ * guest can't fiddle with the contents of the headers while we do
++ * packet processing on them (netfilter, routing, etc). This could
++ * probably do with being larger, since 1) 64-bytes isn't necessarily
++ * long enough to cover a full christmas-tree ip+tcp header, let alone
++ * packet contents, and 2) the data is probably in cache anyway
++ * (though perhaps some other cpu's cache).
+ */
-+#include <linux/module.h>
-+#include <linux/wait.h>
-+#include <asm/bitops.h>
-+#include <xen/evtchn.h>
-+#include "pciback.h"
-+
-+int verbose_request = 0;
-+module_param(verbose_request, int, 0644);
+ #define PKT_PROT_LEN 64
+
+ static struct pending_tx_info {
+--
+1.7.4
+
+
+From a9402ee935757e8facebc6e886f9912c2c523da7 Mon Sep 17 00:00:00 2001
+From: Christophe Saout <chtephan(a)leto.intern.saout.de>
+Date: Sun, 12 Apr 2009 13:40:27 +0200
+Subject: [PATCH 019/203] xen: netback: use dev_name() instead of removed ->bus_id.
+
+Signed-off-by: Christophe Saout <chtephan(a)leto.intern.saout.de>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/interface.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+index dc4fb53..3bb5c20 100644
+--- a/drivers/xen/netback/interface.c
++++ b/drivers/xen/netback/interface.c
+@@ -123,7 +123,7 @@ static void netbk_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+ {
+ strcpy(info->driver, "netbk");
+- strcpy(info->bus_info, dev->dev.parent->bus_id);
++ strcpy(info->bus_info, dev_name(dev->dev.parent));
+ }
+
+ static const struct netif_stat {
+--
+1.7.4
+
+
+From 35de1701fca19d693e9722bffbe7609caf1d5ac6 Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Mon, 29 Jun 2009 14:04:23 -0700
+Subject: [PATCH 020/203] xen: netback: convert to net_device_ops
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/interface.c | 17 +++++++++++------
+ 1 files changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+index 3bb5c20..21c1f95 100644
+--- a/drivers/xen/netback/interface.c
++++ b/drivers/xen/netback/interface.c
+@@ -178,6 +178,15 @@ static struct ethtool_ops network_ethtool_ops =
+ .get_strings = netbk_get_strings,
+ };
+
++static struct net_device_ops netback_ops =
++{
++ .ndo_start_xmit = netif_be_start_xmit,
++ .ndo_get_stats = netif_be_get_stats,
++ .ndo_open = net_open,
++ .ndo_stop = net_close,
++ .ndo_change_mtu = netbk_change_mtu,
++};
+
-+/* Ensure a device is "turned off" and ready to be exported.
-+ * (Also see pciback_config_reset to ensure virtual configuration space is
-+ * ready to be re-exported)
-+ */
-+void pciback_reset_device(struct pci_dev *dev)
+ struct xen_netif *netif_alloc(struct device *parent, domid_t domid, unsigned int handle)
+ {
+ int err = 0;
+@@ -213,12 +222,8 @@ struct xen_netif *netif_alloc(struct device *parent, domid_t domid, unsigned int
+
+ init_timer(&netif->tx_queue_timeout);
+
+- dev->hard_start_xmit = netif_be_start_xmit;
+- dev->get_stats = netif_be_get_stats;
+- dev->open = net_open;
+- dev->stop = net_close;
+- dev->change_mtu = netbk_change_mtu;
+- dev->features = NETIF_F_IP_CSUM;
++ dev->netdev_ops = &netback_ops;
++ dev->features = NETIF_F_IP_CSUM;
+
+ SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
+
+--
+1.7.4
+
+
+From c6f3885ef05e96489025e1c1c7299aac7cf43d87 Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Fri, 4 Sep 2009 14:55:43 -0700
+Subject: [PATCH 021/203] xen: netback: reinstate missing code
+
+Change c3219dc868fe3e84070d6da2d0759a834b6f7251, "Completely drop flip
+support" was a bit too aggressive in removing code, and removed a chunk
+which was used for not only flip but if a buffer crossed a page boundary.
+Reinstate that code.
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 91 +++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 91 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 80b424f..7c0f05b 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -185,6 +185,82 @@ static inline void maybe_schedule_tx_action(void)
+ tasklet_schedule(&net_tx_tasklet);
+ }
+
++static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
+{
-+ u16 cmd;
++ struct skb_shared_info *ninfo;
++ struct sk_buff *nskb;
++ unsigned long offset;
++ int ret;
++ int len;
++ int headlen;
+
-+ /* Disable devices (but not bridges) */
-+ if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
-+ pci_disable_device(dev);
++ BUG_ON(skb_shinfo(skb)->frag_list != NULL);
+
-+ pci_write_config_word(dev, PCI_COMMAND, 0);
++ nskb = alloc_skb(SKB_MAX_HEAD(0), GFP_ATOMIC | __GFP_NOWARN);
++ if (unlikely(!nskb))
++ goto err;
+
-+ dev->is_enabled = 0;
-+ dev->is_busmaster = 0;
-+ } else {
-+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
-+ if (cmd & (PCI_COMMAND_INVALIDATE)) {
-+ cmd &= ~(PCI_COMMAND_INVALIDATE);
-+ pci_write_config_word(dev, PCI_COMMAND, cmd);
++ skb_reserve(nskb, NET_SKB_PAD + NET_IP_ALIGN);
++ headlen = skb_end_pointer(nskb) - nskb->data;
++ if (headlen > skb_headlen(skb))
++ headlen = skb_headlen(skb);
++ ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen);
++ BUG_ON(ret);
+
-+ dev->is_busmaster = 0;
-+ }
-+ }
-+}
-+extern wait_queue_head_t aer_wait_queue;
-+extern struct workqueue_struct *pciback_wq;
-+/*
-+* Now the same evtchn is used for both pcifront conf_read_write request
-+* as well as pcie aer front end ack. We use a new work_queue to schedule
-+* pciback conf_read_write service for avoiding confict with aer_core
-+* do_recovery job which also use the system default work_queue
-+*/
-+void test_and_schedule_op(struct pciback_device *pdev)
-+{
-+ /* Check that frontend is requesting an operation and that we are not
-+ * already processing a request */
-+ if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
-+ && !test_and_set_bit(_PDEVF_op_active, &pdev->flags))
-+ {
-+ queue_work(pciback_wq, &pdev->op_work);
-+ }
-+ /*_XEN_PCIB_active should have been cleared by pcifront. And also make
-+ sure pciback is waiting for ack by checking _PCIB_op_pending*/
-+ if (!test_bit(_XEN_PCIB_active,(unsigned long *)&pdev->sh_info->flags)
-+ &&test_bit(_PCIB_op_pending, &pdev->flags)) {
-+ wake_up(&aer_wait_queue);
-+ }
-+}
++ ninfo = skb_shinfo(nskb);
++ ninfo->gso_size = skb_shinfo(skb)->gso_size;
++ ninfo->gso_type = skb_shinfo(skb)->gso_type;
+
-+/* Performing the configuration space reads/writes must not be done in atomic
-+ * context because some of the pci_* functions can sleep (mostly due to ACPI
-+ * use of semaphores). This function is intended to be called from a work
-+ * queue in process context taking a struct pciback_device as a parameter */
-+void pciback_do_op(void *data)
-+{
-+ struct pciback_device *pdev = data;
-+ struct pci_dev *dev;
-+ struct xen_pci_op *op = &pdev->sh_info->op;
++ offset = headlen;
++ len = skb->len - headlen;
+
-+ dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
++ nskb->len = skb->len;
++ nskb->data_len = len;
++ nskb->truesize += len;
+
-+ if (dev == NULL)
-+ op->err = XEN_PCI_ERR_dev_not_found;
-+ else
-+ {
-+ switch (op->cmd)
-+ {
-+ case XEN_PCI_OP_conf_read:
-+ op->err = pciback_config_read(dev,
-+ op->offset, op->size, &op->value);
-+ break;
-+ case XEN_PCI_OP_conf_write:
-+ op->err = pciback_config_write(dev,
-+ op->offset, op->size, op->value);
-+ break;
-+#ifdef CONFIG_PCI_MSI
-+ case XEN_PCI_OP_enable_msi:
-+ op->err = pciback_enable_msi(pdev, dev, op);
-+ break;
-+ case XEN_PCI_OP_disable_msi:
-+ op->err = pciback_disable_msi(pdev, dev, op);
-+ break;
-+ case XEN_PCI_OP_enable_msix:
-+ op->err = pciback_enable_msix(pdev, dev, op);
-+ break;
-+ case XEN_PCI_OP_disable_msix:
-+ op->err = pciback_disable_msix(pdev, dev, op);
-+ break;
-+#endif
-+ default:
-+ op->err = XEN_PCI_ERR_not_implemented;
-+ break;
++ while (len) {
++ struct page *page;
++ int copy;
++ int zero;
++
++ if (unlikely(ninfo->nr_frags >= MAX_SKB_FRAGS)) {
++ dump_stack();
++ goto err_free;
+ }
-+ }
-+ /* Tell the driver domain that we're done. */
-+ wmb();
-+ clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
-+ notify_remote_via_irq(pdev->evtchn_irq);
+
-+ /* Mark that we're done. */
-+ smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
-+ clear_bit(_PDEVF_op_active, &pdev->flags);
-+ smp_mb__after_clear_bit(); /* /before/ final check for work */
++ copy = len >= PAGE_SIZE ? PAGE_SIZE : len;
++ zero = len >= PAGE_SIZE ? 0 : __GFP_ZERO;
+
-+ /* Check to see if the driver domain tried to start another request in
-+ * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
-+ */
-+ test_and_schedule_op(pdev);
-+}
++ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | zero);
++ if (unlikely(!page))
++ goto err_free;
+
-+irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+ struct pciback_device *pdev = dev_id;
++ ret = skb_copy_bits(skb, offset, page_address(page), copy);
++ BUG_ON(ret);
+
-+ test_and_schedule_op(pdev);
++ ninfo->frags[ninfo->nr_frags].page = page;
++ ninfo->frags[ninfo->nr_frags].page_offset = 0;
++ ninfo->frags[ninfo->nr_frags].size = copy;
++ ninfo->nr_frags++;
+
-+ return IRQ_HANDLED;
-+}
-diff --git a/drivers/xen/pciback/slot.c b/drivers/xen/pciback/slot.c
-new file mode 100644
-index 0000000..105a8b6
---- /dev/null
-+++ b/drivers/xen/pciback/slot.c
-@@ -0,0 +1,187 @@
-+/*
-+ * PCI Backend - Provides a Virtual PCI bus (with real devices)
-+ * to the frontend
-+ *
-+ * Author: Ryan Wilson <hap9(a)epoch.ncsc.mil> (vpci.c)
-+ * Author: Tristan Gingold <tristan.gingold(a)bull.net>, from vpci.c
-+ */
++ offset += copy;
++ len -= copy;
++ }
+
-+#include <linux/list.h>
-+#include <linux/slab.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include "pciback.h"
++ offset = nskb->data - skb->data;
+
-+/* There are at most 32 slots in a pci bus. */
-+#define PCI_SLOT_MAX 32
++ nskb->transport_header = skb->transport_header + offset;
++ nskb->network_header = skb->network_header + offset;
++ nskb->mac_header = skb->mac_header + offset;
+
-+#define PCI_BUS_NBR 2
++ return nskb;
+
-+struct slot_dev_data {
-+ /* Access to dev_list must be protected by lock */
-+ struct pci_dev *slots[PCI_BUS_NBR][PCI_SLOT_MAX];
-+ spinlock_t lock;
-+};
++ err_free:
++ kfree_skb(nskb);
++ err:
++ return NULL;
++}
+
-+struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
-+ unsigned int domain, unsigned int bus,
-+ unsigned int devfn)
-+{
-+ struct pci_dev *dev = NULL;
-+ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
-+ unsigned long flags;
-+
-+ if (domain != 0 || PCI_FUNC(devfn) != 0)
-+ return NULL;
-+
-+ if (PCI_SLOT(devfn) >= PCI_SLOT_MAX || bus >= PCI_BUS_NBR)
-+ return NULL;
-+
-+ spin_lock_irqsave(&slot_dev->lock, flags);
-+ dev = slot_dev->slots[bus][PCI_SLOT(devfn)];
-+ spin_unlock_irqrestore(&slot_dev->lock, flags);
-+
-+ return dev;
-+}
-+
-+int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
-+ int devid, publish_pci_dev_cb publish_cb)
-+{
-+ int err = 0, slot, bus;
-+ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
-+ unsigned long flags;
-+
-+ if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
-+ err = -EFAULT;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Can't export bridges on the virtual PCI bus");
-+ goto out;
+ static inline int netbk_max_required_rx_slots(struct xen_netif *netif)
+ {
+ if (netif->features & (NETIF_F_SG|NETIF_F_TSO))
+@@ -218,6 +294,21 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ if (unlikely(!netif_schedulable(netif) || netbk_queue_full(netif)))
+ goto drop;
+
++ /*
++ * XXX For now we also copy skbuffs whose head crosses a page
++ * boundary, because netbk_gop_skb can't handle them.
++ */
++ if ((skb_headlen(skb) + offset_in_page(skb->data)) >= PAGE_SIZE) {
++ struct sk_buff *nskb = netbk_copy_skb(skb);
++ if ( unlikely(nskb == NULL) )
++ goto drop;
++ /* Copy only the header fields we use in this driver. */
++ nskb->dev = skb->dev;
++ nskb->ip_summed = skb->ip_summed;
++ dev_kfree_skb(skb);
++ skb = nskb;
+ }
+
-+ spin_lock_irqsave(&slot_dev->lock, flags);
-+
-+ /* Assign to a new slot on the virtual PCI bus */
-+ for (bus = 0; bus < PCI_BUS_NBR; bus++)
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ if (slot_dev->slots[bus][slot] == NULL) {
-+ printk(KERN_INFO
-+ "pciback: slot: %s: assign to virtual slot %d, bus %d\n",
-+ pci_name(dev), slot, bus);
-+ slot_dev->slots[bus][slot] = dev;
-+ goto unlock;
-+ }
-+ }
-+
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "No more space on root virtual PCI bus");
-+
-+ unlock:
-+ spin_unlock_irqrestore(&slot_dev->lock, flags);
-+
-+ /* Publish this device. */
-+ if(!err)
-+ err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, 0), devid);
-+
-+ out:
-+ return err;
-+}
-+
-+void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
-+{
-+ int slot, bus;
-+ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
-+ struct pci_dev *found_dev = NULL;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&slot_dev->lock, flags);
-+
-+ for (bus = 0; bus < PCI_BUS_NBR; bus++)
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ if (slot_dev->slots[bus][slot] == dev) {
-+ slot_dev->slots[bus][slot] = NULL;
-+ found_dev = dev;
-+ goto out;
-+ }
-+ }
+ netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 +
+ !!skb_shinfo(skb)->gso_size;
+ netif_get(netif);
+--
+1.7.4
+
+
+From 2e290d790877df4368691180f76206ad27a42505 Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Wed, 9 Sep 2009 15:19:15 -0700
+Subject: [PATCH 022/203] xen: netback: remove debug noise
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 2 --
+ 1 files changed, 0 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 7c0f05b..d7d738e 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -1537,8 +1537,6 @@ static int __init netback_init(void)
+ int i;
+ struct page *page;
+
+- printk(KERN_CRIT "*** netif_init\n");
+-
+ if (!xen_domain())
+ return -ENODEV;
+
+--
+1.7.4
+
+
+From 3ba3bb7d563704c3050de6116aa0a761a5791428 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
+Date: Thu, 8 Oct 2009 13:23:09 -0400
+Subject: [PATCH 023/203] Fix compile warnings: ignoring return value of 'xenbus_register_backend' ..
+
+We neglect to check the return value of xenbus_register_backend
+and take actions when that fails. This patch fixes that and adds
+code to deal with those type of failures.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/common.h | 2 +-
+ drivers/xen/netback/netback.c | 12 +++++++++++-
+ drivers/xen/netback/xenbus.c | 4 ++--
+ 3 files changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index 9056be0..0675946 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -194,7 +194,7 @@ static inline void netif_put(struct xen_netif *netif)
+ wake_up(&netif->waiting_to_free);
+ }
+
+-void netif_xenbus_init(void);
++int netif_xenbus_init(void);
+
+ #define netif_schedulable(netif) \
+ (netif_running((netif)->dev) && netback_carrier_ok(netif))
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index d7d738e..860c61e 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -1536,6 +1536,7 @@ static int __init netback_init(void)
+ {
+ int i;
+ struct page *page;
++ int rc = 0;
+
+ if (!xen_domain())
+ return -ENODEV;
+@@ -1583,7 +1584,9 @@ static int __init netback_init(void)
+
+ //netif_accel_init();
+
+- netif_xenbus_init();
++ rc = netif_xenbus_init();
++ if (rc)
++ goto failed_init;
+
+ #ifdef NETBE_DEBUG_INTERRUPT
+ (void)bind_virq_to_irqhandler(VIRQ_DEBUG,
+@@ -1595,6 +1598,13 @@ static int __init netback_init(void)
+ #endif
+
+ return 0;
+
-+ out:
-+ spin_unlock_irqrestore(&slot_dev->lock, flags);
++failed_init:
++ free_empty_pages_and_pagevec(mmap_pages, MAX_PENDING_REQS);
++ del_timer(&netbk_tx_pending_timer);
++ del_timer(&net_timer);
++ return rc;
+
-+ if (found_dev)
-+ pcistub_put_pci_dev(found_dev);
-+}
+ }
+
+ module_init(netback_init);
+diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
+index a492288..c46b235 100644
+--- a/drivers/xen/netback/xenbus.c
++++ b/drivers/xen/netback/xenbus.c
+@@ -447,8 +447,8 @@ static struct xenbus_driver netback = {
+ };
+
+
+-void netif_xenbus_init(void)
++int netif_xenbus_init(void)
+ {
+ printk(KERN_CRIT "registering netback\n");
+- (void)xenbus_register_backend(&netback);
++ return xenbus_register_backend(&netback);
+ }
+--
+1.7.4
+
+
+From 4bc919e07d5dc48cb95b22cc6e90c6110c229343 Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Tue, 27 Oct 2009 12:37:50 -0700
+Subject: [PATCH 024/203] xen: netback: don't screw around with packet gso state
+
+These lines were reverted from 2.6.18 netback as the network stack
+was modified to deal with packets shorter than the gso size, so there's
+no need to fiddle with the gso state in netback.
+
+Taken from linux-2.6.18-xen.hg change 8081d19dce89
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 5 -----
+ 1 files changed, 0 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 860c61e..9a14976 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -1293,11 +1293,6 @@ static unsigned net_tx_build_mops(void)
+ skb_shinfo(skb)->frags[0].page = (void *)~0UL;
+ }
+
+- if (skb->data_len < skb_shinfo(skb)->gso_size) {
+- skb_shinfo(skb)->gso_size = 0;
+- skb_shinfo(skb)->gso_type = 0;
+- }
+-
+ __skb_queue_tail(&tx_queue, skb);
+
+ pending_cons++;
+--
+1.7.4
+
+
+From f2b947783c47a721497e5d325c736234f71501e7 Mon Sep 17 00:00:00 2001
+From: Steven Smith <ssmith(a)weybridge.uk.xensource.com>
+Date: Fri, 30 Oct 2009 13:55:23 -0700
+Subject: [PATCH 025/203] xen: netback: make sure that pg->mapping is never NULL for a page mapped from a foreign domain.
+
+Otherwise, the foreign maps tracking infrastructure gets confused, and
+thinks that the foreign page is local. This means that you can't
+forward that packet to another foreign domain. This leads to very
+high packet drop, and hence very poor performance.
+
+Signed-off-by: Steven Smith <steven.smith(a)citrix.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 9a14976..111fec7 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -97,12 +97,12 @@ static inline unsigned long idx_to_kaddr(unsigned int idx)
+ /* extra field used in struct page */
+ static inline void netif_set_page_index(struct page *pg, unsigned int index)
+ {
+- *(unsigned long *)&pg->mapping = index;
++ *(unsigned long *)&pg->mapping = index + 1;
+ }
+
+ static inline int netif_page_index(struct page *pg)
+ {
+- unsigned long idx = (unsigned long)pg->mapping;
++ unsigned long idx = (unsigned long)pg->mapping - 1;
+
+ if (!PageForeign(pg))
+ return -1;
+--
+1.7.4
+
+
+From df8b27ea0fb2695842104e06caaecb55780577a7 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ijc(a)hellion.org.uk>
+Date: Thu, 3 Dec 2009 21:56:19 +0000
+Subject: [PATCH 026/203] xen: rename netbk module xen-netback.
+
+netbk is rather generic for a modular distro style kernel.
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+Cc: Jeremy Fitzhardinge <jeremy(a)goop.org>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/Makefile | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/xen/netback/Makefile b/drivers/xen/netback/Makefile
+index a01a1a3..e346e81 100644
+--- a/drivers/xen/netback/Makefile
++++ b/drivers/xen/netback/Makefile
+@@ -1,3 +1,3 @@
+-obj-$(CONFIG_XEN_NETDEV_BACKEND) := netbk.o
++obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o
+
+-netbk-y := netback.o xenbus.o interface.o
++xen-netback-y := netback.o xenbus.o interface.o
+--
+1.7.4
+
+
+From 279097395ad64ae4df15e206a487cd5fd3be39a8 Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Tue, 16 Feb 2010 14:40:37 -0800
+Subject: [PATCH 027/203] xen: netback: use dev_get/set_drvdata() inteface
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/xenbus.c | 10 +++++-----
+ 1 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
+index c46b235..79e6fb0 100644
+--- a/drivers/xen/netback/xenbus.c
++++ b/drivers/xen/netback/xenbus.c
+@@ -35,7 +35,7 @@ static void backend_create_netif(struct backend_info *be);
+
+ static int netback_remove(struct xenbus_device *dev)
+ {
+- struct backend_info *be = dev->dev.driver_data;
++ struct backend_info *be = dev_get_drvdata(&dev->dev);
+
+ //netback_remove_accelerators(be, dev);
+
+@@ -45,7 +45,7 @@ static int netback_remove(struct xenbus_device *dev)
+ be->netif = NULL;
+ }
+ kfree(be);
+- dev->dev.driver_data = NULL;
++ dev_set_drvdata(&dev->dev, NULL);
+ return 0;
+ }
+
+@@ -70,7 +70,7 @@ static int netback_probe(struct xenbus_device *dev,
+ }
+
+ be->dev = dev;
+- dev->dev.driver_data = be;
++ dev_set_drvdata(&dev->dev, be);
+
+ sg = 1;
+ if (netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB)
+@@ -151,7 +151,7 @@ fail:
+ */
+ static int netback_uevent(struct xenbus_device *xdev, struct kobj_uevent_env *env)
+ {
+- struct backend_info *be = xdev->dev.driver_data;
++ struct backend_info *be = dev_get_drvdata(&xdev->dev);
+ struct xen_netif *netif = be->netif;
+ char *val;
+
+@@ -211,7 +211,7 @@ static void backend_create_netif(struct backend_info *be)
+ static void frontend_changed(struct xenbus_device *dev,
+ enum xenbus_state frontend_state)
+ {
+- struct backend_info *be = dev->dev.driver_data;
++ struct backend_info *be = dev_get_drvdata(&dev->dev);
+
+ DPRINTK("%s", xenbus_strstate(frontend_state));
+
+--
+1.7.4
+
+
+From 31d0b5f5763faf607e32f3b5a0f6b37a34bbbf09 Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Tue, 16 Feb 2010 14:41:12 -0800
+Subject: [PATCH 028/203] xen: netback: include linux/sched.h for TASK_* definitions
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/common.h | 2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index 0675946..d8653d3 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -38,6 +38,8 @@
+ #include <linux/netdevice.h>
+ #include <linux/etherdevice.h>
+ #include <linux/wait.h>
++#include <linux/sched.h>
+
-+int pciback_init_devices(struct pciback_device *pdev)
+ #include <xen/interface/io/netif.h>
+ #include <asm/io.h>
+ #include <asm/pgalloc.h>
+--
+1.7.4
+
+
+From cdefc88924b3cdfcac64be737a00a4ec5593cfd5 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Tue, 23 Feb 2010 11:52:27 +0000
+Subject: [PATCH 029/203] xen: netback: remove unused xen_network_done code
+
+It has been disabled effectively forever.
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 23 -----------------------
+ 1 files changed, 0 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 111fec7..4b24893 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -343,25 +343,6 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ return 0;
+ }
+
+-#if 0
+-static void xen_network_done_notify(void)
+-{
+- static struct net_device *eth0_dev = NULL;
+- if (unlikely(eth0_dev == NULL))
+- eth0_dev = __dev_get_by_name("eth0");
+- netif_rx_schedule(eth0_dev);
+-}
+-/*
+- * Add following to poll() function in NAPI driver (Tigon3 is example):
+- * if ( xen_network_done() )
+- * tg3_enable_ints(tp);
+- */
+-int xen_network_done(void)
+-{
+- return skb_queue_empty(&rx_queue);
+-}
+-#endif
+-
+ struct netrx_pending_operations {
+ unsigned trans_prod, trans_cons;
+ unsigned mmu_prod, mmu_mcl;
+@@ -664,10 +645,6 @@ static void net_rx_action(unsigned long unused)
+ /* More work to do? */
+ if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
+ tasklet_schedule(&net_rx_tasklet);
+-#if 0
+- else
+- xen_network_done_notify();
+-#endif
+ }
+
+ static void net_alarm(unsigned long unused)
+--
+1.7.4
+
+
+From 994be068dd9947cedcee69a7185e54738cda33d4 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Tue, 23 Feb 2010 11:58:26 +0000
+Subject: [PATCH 030/203] xen: netback: factor disconnect from backend into new function.
+
+Makes subsequent patches cleaner.
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+---
+ drivers/xen/netback/xenbus.c | 16 ++++++++++++----
+ 1 files changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
+index 79e6fb0..1f36b4d4 100644
+--- a/drivers/xen/netback/xenbus.c
++++ b/drivers/xen/netback/xenbus.c
+@@ -205,6 +205,16 @@ static void backend_create_netif(struct backend_info *be)
+ }
+
+
++static void disconnect_backend(struct xenbus_device *dev)
+{
-+ int slot, bus;
-+ struct slot_dev_data *slot_dev;
-+
-+ slot_dev = kmalloc(sizeof(*slot_dev), GFP_KERNEL);
-+ if (!slot_dev)
-+ return -ENOMEM;
-+
-+ spin_lock_init(&slot_dev->lock);
-+
-+ for (bus = 0; bus < PCI_BUS_NBR; bus++)
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++)
-+ slot_dev->slots[bus][slot] = NULL;
-+
-+ pdev->pci_dev_data = slot_dev;
-+
-+ return 0;
-+}
++ struct backend_info *be = dev_get_drvdata(&dev->dev);
+
-+int pciback_publish_pci_roots(struct pciback_device *pdev,
-+ publish_pci_root_cb publish_cb)
-+{
-+ /* The Virtual PCI bus has only one root */
-+ return publish_cb(pdev, 0, 0);
++ if (be->netif) {
++ netif_disconnect(be->netif);
++ be->netif = NULL;
++ }
+}
+
-+void pciback_release_devices(struct pciback_device *pdev)
-+{
-+ int slot, bus;
-+ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
-+ struct pci_dev *dev;
-+
-+ for (bus = 0; bus < PCI_BUS_NBR; bus++)
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ dev = slot_dev->slots[bus][slot];
-+ if (dev != NULL)
-+ pcistub_put_pci_dev(dev);
-+ }
-+
-+ kfree(slot_dev);
-+ pdev->pci_dev_data = NULL;
+ /**
+ * Callback received when the frontend's state changes.
+ */
+@@ -238,11 +248,9 @@ static void frontend_changed(struct xenbus_device *dev,
+ break;
+
+ case XenbusStateClosing:
+- if (be->netif) {
++ if (be->netif)
+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
+- netif_disconnect(be->netif);
+- be->netif = NULL;
+- }
++ disconnect_backend(dev);
+ xenbus_switch_state(dev, XenbusStateClosing);
+ break;
+
+--
+1.7.4
+
+
+From 9dcb4c18e5b29d8862cd7783d5b0040913010563 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Tue, 23 Feb 2010 12:10:24 +0000
+Subject: [PATCH 031/203] xen: netback: wait for hotplug scripts to complete before signalling connected to frontend
+
+Avoid the situation where the frontend is sending packets but the
+domain 0 bridging (or whatever) is not yet configured (because the
+hotplug scripts are too slow) and so packets get dropped.
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+Cc: Steven.Smith(a)citrix.com
+---
+ drivers/xen/netback/common.h | 2 +
+ drivers/xen/netback/xenbus.c | 45 +++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 46 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index d8653d3..1983768 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -145,6 +145,8 @@ struct backend_info {
+ struct xenbus_device *dev;
+ struct xen_netif *netif;
+ enum xenbus_state frontend_state;
++ struct xenbus_watch hotplug_status_watch;
++ int have_hotplug_status_watch:1;
+
+ /* State relating to the netback accelerator */
+ void *netback_accel_priv;
+diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
+index 1f36b4d4..d2407cc 100644
+--- a/drivers/xen/netback/xenbus.c
++++ b/drivers/xen/netback/xenbus.c
+@@ -32,6 +32,7 @@
+ static int connect_rings(struct backend_info *);
+ static void connect(struct backend_info *);
+ static void backend_create_netif(struct backend_info *be);
++static void unregister_hotplug_status_watch(struct backend_info *be);
+
+ static int netback_remove(struct xenbus_device *dev)
+ {
+@@ -39,8 +40,10 @@ static int netback_remove(struct xenbus_device *dev)
+
+ //netback_remove_accelerators(be, dev);
+
++ unregister_hotplug_status_watch(be);
+ if (be->netif) {
+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
++ xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
+ netif_disconnect(be->netif);
+ be->netif = NULL;
+ }
+@@ -210,6 +213,7 @@ static void disconnect_backend(struct xenbus_device *dev)
+ struct backend_info *be = dev_get_drvdata(&dev->dev);
+
+ if (be->netif) {
++ xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
+ netif_disconnect(be->netif);
+ be->netif = NULL;
+ }
+@@ -329,6 +333,36 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
+ return 0;
+ }
+
++static void unregister_hotplug_status_watch(struct backend_info *be)
++{
++ if (be->have_hotplug_status_watch) {
++ unregister_xenbus_watch(&be->hotplug_status_watch);
++ kfree(be->hotplug_status_watch.node);
++ }
++ be->have_hotplug_status_watch = 0;
+}
+
-+int pciback_get_pcifront_dev(struct pci_dev *pcidev, struct pciback_device *pdev,
-+ unsigned int *domain, unsigned int *bus, unsigned int *devfn)
++static void hotplug_status_changed(struct xenbus_watch *watch,
++ const char **vec,
++ unsigned int vec_size)
+{
-+ int slot, busnr;
-+ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
-+ struct pci_dev *dev;
-+ int found = 0;
-+ unsigned long flags;
++ struct backend_info *be = container_of(watch,
++ struct backend_info,
++ hotplug_status_watch);
++ char *str;
++ unsigned int len;
+
-+ spin_lock_irqsave(&slot_dev->lock, flags);
++ str = xenbus_read(XBT_NIL, be->dev->nodename, "hotplug-status", &len);
++ if (IS_ERR(str))
++ return;
++ if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
++ xenbus_switch_state(be->dev, XenbusStateConnected);
++ /* Not interested in this watch anymore. */
++ unregister_hotplug_status_watch(be);
++ }
++ kfree(str);
++}
+
-+ for (busnr = 0; busnr < PCI_BUS_NBR; bus++)
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ dev = slot_dev->slots[busnr][slot];
-+ if (dev && dev->bus->number == pcidev->bus->number
-+ && dev->devfn == pcidev->devfn
-+ && pci_domain_nr(dev->bus) == pci_domain_nr(pcidev->bus)) {
-+ found = 1;
-+ *domain = 0;
-+ *bus = busnr;
-+ *devfn = PCI_DEVFN(slot,0);
-+ goto out;
-+ }
+ static void connect(struct backend_info *be)
+ {
+ int err;
+@@ -348,7 +382,16 @@ static void connect(struct backend_info *be)
+ &be->netif->credit_usec);
+ be->netif->remaining_credit = be->netif->credit_bytes;
+
+- xenbus_switch_state(dev, XenbusStateConnected);
++ unregister_hotplug_status_watch(be);
++ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
++ hotplug_status_changed,
++ "%s/%s", dev->nodename, "hotplug-status");
++ if (err) {
++ /* Switch now, since we can't do a watch. */
++ xenbus_switch_state(dev, XenbusStateConnected);
++ } else {
++ be->have_hotplug_status_watch = 1;
++ }
+
+ netif_wake_queue(be->netif->dev);
+ }
+--
+1.7.4
+
+
+From 509cc7f20f866277a8f5d5895bb266b5b68aac6d Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Tue, 23 Feb 2010 12:11:51 +0000
+Subject: [PATCH 032/203] xen: netback: Always pull through PKT_PROT_LEN bytes into the linear part of an skb.
+
+Previously PKT_PROT_LEN would only have an effect on the first fragment.
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 10 ++++++++++
+ 1 files changed, 10 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 4b24893..d4a7a56 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -1334,6 +1334,16 @@ static void net_tx_submit(void)
+
+ netbk_fill_frags(skb);
+
++ /*
++ * If the initial fragment was < PKT_PROT_LEN then
++ * pull through some bytes from the other fragments to
++ * increase the linear region to PKT_PROT_LEN bytes.
++ */
++ if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
++ int target = min_t(int, skb->len, PKT_PROT_LEN);
++ __pskb_pull_tail(skb, target - skb_headlen(skb));
+ }
-+out:
-+ spin_unlock_irqrestore(&slot_dev->lock, flags);
-+ return found;
+
-+}
-diff --git a/drivers/xen/pciback/vpci.c b/drivers/xen/pciback/vpci.c
-new file mode 100644
-index 0000000..a5b7ece
---- /dev/null
-+++ b/drivers/xen/pciback/vpci.c
-@@ -0,0 +1,242 @@
-+/*
-+ * PCI Backend - Provides a Virtual PCI bus (with real devices)
-+ * to the frontend
-+ *
-+ * Author: Ryan Wilson <hap9(a)epoch.ncsc.mil>
-+ */
+ skb->dev = netif->dev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+--
+1.7.4
+
+
+From 673c82b5110cfffafe1e1978bc07d6d10d111d50 Mon Sep 17 00:00:00 2001
+From: Steven Smith <ssmith(a)xensource.com>
+Date: Tue, 23 Feb 2010 11:49:26 +0000
+Subject: [PATCH 033/203] xen: netback: try to pull a minimum of 72 bytes into the skb data area
+ when receiving a packet into netback.
+
+The previous number, 64, tended to place a fragment boundary in the middle of
+the TCP header options and led to unnecessary fragmentation in Windows <->
+Windows networking.
+
+Signed-off-by: Steven Smith <ssmith(a)xensource.com>
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 9 +++------
+ 1 files changed, 3 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index d4a7a56..44357d7 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -116,13 +116,10 @@ static inline int netif_page_index(struct page *pg)
+ /*
+ * This is the amount of packet we copy rather than map, so that the
+ * guest can't fiddle with the contents of the headers while we do
+- * packet processing on them (netfilter, routing, etc). This could
+- * probably do with being larger, since 1) 64-bytes isn't necessarily
+- * long enough to cover a full christmas-tree ip+tcp header, let alone
+- * packet contents, and 2) the data is probably in cache anyway
+- * (though perhaps some other cpu's cache).
++ * packet processing on them (netfilter, routing, etc). 72 is enough
++ * to cover TCP+IP headers including options.
+ */
+-#define PKT_PROT_LEN 64
++#define PKT_PROT_LEN 72
+
+ static struct pending_tx_info {
+ struct xen_netif_tx_request req;
+--
+1.7.4
+
+
+From c83bd213efd3ebf700189249c30d987b1cb14d7e Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Tue, 23 Feb 2010 11:54:30 +0000
+Subject: [PATCH 034/203] xen: netback: Allow setting of large MTU before rings have connected.
+
+This allows large MTU to be configured by the VIF hotplug
+script. Previously this would fail because at the point the hotplug
+script runs the VIF features have most likely not been negotiated with
+the frontend and so SG has not yet been enabled. Invert this behaviour
+so that SG is assumed present until negotiations prove otherwise and
+reduce MTU at that point.
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+---
+ drivers/xen/netback/interface.c | 6 +++++-
+ drivers/xen/netback/xenbus.c | 8 +++++---
+ 2 files changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+index 21c1f95..b23b14d 100644
+--- a/drivers/xen/netback/interface.c
++++ b/drivers/xen/netback/interface.c
+@@ -104,6 +104,9 @@ static int netbk_set_sg(struct net_device *dev, u32 data)
+ return -ENOSYS;
+ }
+
++ if (dev->mtu > ETH_DATA_LEN)
++ dev->mtu = ETH_DATA_LEN;
+
-+#include <linux/list.h>
-+#include <linux/slab.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include "pciback.h"
+ return ethtool_op_set_sg(dev, data);
+ }
+
+@@ -207,6 +210,7 @@ struct xen_netif *netif_alloc(struct device *parent, domid_t domid, unsigned int
+ memset(netif, 0, sizeof(*netif));
+ netif->domid = domid;
+ netif->handle = handle;
++ netif->features = NETIF_F_SG;
+ atomic_set(&netif->refcnt, 1);
+ init_waitqueue_head(&netif->waiting_to_free);
+ netif->dev = dev;
+@@ -223,7 +227,7 @@ struct xen_netif *netif_alloc(struct device *parent, domid_t domid, unsigned int
+ init_timer(&netif->tx_queue_timeout);
+
+ dev->netdev_ops = &netback_ops;
+- dev->features = NETIF_F_IP_CSUM;
++ dev->features = NETIF_F_IP_CSUM|NETIF_F_SG;
+
+ SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
+
+diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
+index d2407cc..fcd3c34 100644
+--- a/drivers/xen/netback/xenbus.c
++++ b/drivers/xen/netback/xenbus.c
+@@ -445,9 +445,11 @@ static int connect_rings(struct backend_info *be)
+
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", "%d", &val) < 0)
+ val = 0;
+- if (val) {
+- be->netif->features |= NETIF_F_SG;
+- be->netif->dev->features |= NETIF_F_SG;
++ if (!val) {
++ be->netif->features &= ~NETIF_F_SG;
++ be->netif->dev->features &= ~NETIF_F_SG;
++ if (be->netif->dev->mtu > ETH_DATA_LEN)
++ be->netif->dev->mtu = ETH_DATA_LEN;
+ }
+
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d",
+--
+1.7.4
+
+
+From e5cd35b00cb63f3a3fa1651260a58d59bbc134b7 Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Fri, 19 Mar 2010 13:09:16 -0700
+Subject: [PATCH 035/203] xen: netback: use get_sset_count rather than obsolete get_stats_count
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/interface.c | 11 ++++++++---
+ 1 files changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+index b23b14d..086d939 100644
+--- a/drivers/xen/netback/interface.c
++++ b/drivers/xen/netback/interface.c
+@@ -136,9 +136,14 @@ static const struct netif_stat {
+ { "copied_skbs", offsetof(struct xen_netif, nr_copied_skbs) },
+ };
+
+-static int netbk_get_stats_count(struct net_device *dev)
++static int netbk_get_sset_count(struct net_device *dev, int string_set)
+ {
+- return ARRAY_SIZE(netbk_stats);
++ switch (string_set) {
++ case ETH_SS_STATS:
++ return ARRAY_SIZE(netbk_stats);
++ default:
++ return -EINVAL;
++ }
+ }
+
+ static void netbk_get_ethtool_stats(struct net_device *dev,
+@@ -176,7 +181,7 @@ static struct ethtool_ops network_ethtool_ops =
+ .set_tso = netbk_set_tso,
+ .get_link = ethtool_op_get_link,
+
+- .get_stats_count = netbk_get_stats_count,
++ .get_sset_count = netbk_get_sset_count,
+ .get_ethtool_stats = netbk_get_ethtool_stats,
+ .get_strings = netbk_get_strings,
+ };
+--
+1.7.4
+
+
+From 0c34835ee66ad641f01a8077a973b7ec1bfdcd86 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Tue, 11 May 2010 09:33:42 +0100
+Subject: [PATCH 036/203] xen: netback: correctly setup skb->ip_summed on receive
+
+In 2.6.18 CHECKSUM_PARTIAL and CHECKSUM_UNNECESSARY were both synonyms for
+CHECKSUM_HW. This is no longer the case and we need to select the correct one.
+
+ data_validated csum_blank -> ip_summed
+ 0 0 CHECKSUM_NONE
+ 0 1 CHECKSUM_PARTIAL
+ 1 0 CHECKSUM_UNNECESSARY
+ 1 1 CHECKSUM_PARTIAL
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+Tested-by: Matej Zary <zary(a)cvtisr.sk>
+Tested-by: Michael D Labriola <mlabriol(a)gdeb.com>
+---
+ drivers/xen/netback/netback.c | 10 +++-------
+ 1 files changed, 3 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 44357d7..725da0f 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -1320,14 +1320,10 @@ static void net_tx_submit(void)
+ netif_idx_release(pending_idx);
+ }
+
+- /*
+- * Old frontends do not assert data_validated but we
+- * can infer it from csum_blank so test both flags.
+- */
+- if (txp->flags & (NETTXF_data_validated|NETTXF_csum_blank))
++ if (txp->flags & NETTXF_csum_blank)
+ skb->ip_summed = CHECKSUM_PARTIAL;
+- else
+- skb->ip_summed = CHECKSUM_NONE;
++ else if (txp->flags & NETTXF_data_validated)
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ netbk_fill_frags(skb);
+
+--
+1.7.4
+
+
+From 094944631cc5a9d6e623302c987f78117c0bf7ac Mon Sep 17 00:00:00 2001
+From: Dongxiao Xu <dongxiao.xu(a)intel.com>
+Date: Wed, 19 May 2010 16:58:56 -0700
+Subject: [PATCH 037/203] xen: netback: Move global/static variables into struct xen_netbk.
+
+Bundle a lot of discrete variables into a single structure.
+
+Signed-off-by: Dongxiao Xu <dongxiao.xu(a)intel.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/common.h | 59 +++++++
+ drivers/xen/netback/netback.c | 360 ++++++++++++++++++++---------------------
+ 2 files changed, 232 insertions(+), 187 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index 1983768..00208f4 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -222,4 +222,63 @@ static inline int netbk_can_sg(struct net_device *dev)
+ return netif->features & NETIF_F_SG;
+ }
+
++struct pending_tx_info {
++ struct xen_netif_tx_request req;
++ struct xen_netif *netif;
++};
++typedef unsigned int pending_ring_idx_t;
+
-+#define PCI_SLOT_MAX 32
++struct netbk_rx_meta {
++ skb_frag_t frag;
++ int id;
++};
+
-+struct vpci_dev_data {
-+ /* Access to dev_list must be protected by lock */
-+ struct list_head dev_list[PCI_SLOT_MAX];
-+ spinlock_t lock;
++struct netbk_tx_pending_inuse {
++ struct list_head list;
++ unsigned long alloc_time;
+};
+
-+static inline struct list_head *list_first(struct list_head *head)
-+{
-+ return head->next;
-+}
++#define MAX_PENDING_REQS 256
+
-+struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
-+ unsigned int domain, unsigned int bus,
-+ unsigned int devfn)
-+{
-+ struct pci_dev_entry *entry;
-+ struct pci_dev *dev = NULL;
-+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
-+ unsigned long flags;
++struct xen_netbk {
++ struct tasklet_struct net_tx_tasklet;
++ struct tasklet_struct net_rx_tasklet;
+
-+ if (domain != 0 || bus != 0)
-+ return NULL;
++ struct sk_buff_head rx_queue;
++ struct sk_buff_head tx_queue;
+
-+ if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
-+ spin_lock_irqsave(&vpci_dev->lock, flags);
++ struct timer_list net_timer;
++ struct timer_list netbk_tx_pending_timer;
+
-+ list_for_each_entry(entry,
-+ &vpci_dev->dev_list[PCI_SLOT(devfn)],
-+ list) {
-+ if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
-+ dev = entry->dev;
-+ break;
-+ }
-+ }
++ struct page **mmap_pages;
+
-+ spin_unlock_irqrestore(&vpci_dev->lock, flags);
-+ }
-+ return dev;
-+}
++ pending_ring_idx_t pending_prod;
++ pending_ring_idx_t pending_cons;
++ pending_ring_idx_t dealloc_prod;
++ pending_ring_idx_t dealloc_cons;
+
-+static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
-+{
-+ if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
-+ && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
-+ return 1;
++ struct list_head pending_inuse_head;
++ struct list_head net_schedule_list;
+
-+ return 0;
-+}
++ /* Protect the net_schedule_list in netif. */
++ spinlock_t net_schedule_list_lock;
+
-+int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
-+ int devid, publish_pci_dev_cb publish_cb)
-+{
-+ int err = 0, slot, func;
-+ struct pci_dev_entry *t, *dev_entry;
-+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
-+ unsigned long flags;
-+
-+ if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
-+ err = -EFAULT;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Can't export bridges on the virtual PCI bus");
-+ goto out;
-+ }
-+
-+ dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
-+ if (!dev_entry) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error adding entry to virtual PCI bus");
-+ goto out;
-+ }
++ struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
++ struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS];
++ struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
++ struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
+
-+ dev_entry->dev = dev;
++ grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
++ u16 pending_ring[MAX_PENDING_REQS];
++ u16 dealloc_ring[MAX_PENDING_REQS];
+
-+ spin_lock_irqsave(&vpci_dev->lock, flags);
++ struct multicall_entry rx_mcl[NET_RX_RING_SIZE+3];
++ struct mmu_update rx_mmu[NET_RX_RING_SIZE];
++ struct gnttab_transfer grant_trans_op[NET_RX_RING_SIZE];
++ struct gnttab_copy grant_copy_op[NET_RX_RING_SIZE];
++ unsigned char rx_notify[NR_IRQS];
++ u16 notify_list[NET_RX_RING_SIZE];
++ struct netbk_rx_meta meta[NET_RX_RING_SIZE];
++};
+
-+ /* Keep multi-function devices together on the virtual PCI bus */
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ if (!list_empty(&vpci_dev->dev_list[slot])) {
-+ t = list_entry(list_first(&vpci_dev->dev_list[slot]),
-+ struct pci_dev_entry, list);
+ #endif /* __NETIF__BACKEND__COMMON_H__ */
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 725da0f..417f497 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -48,16 +48,7 @@
+
+ /*define NETBE_DEBUG_INTERRUPT*/
+
+-struct netbk_rx_meta {
+- skb_frag_t frag;
+- int id;
+-};
+-
+-struct netbk_tx_pending_inuse {
+- struct list_head list;
+- unsigned long alloc_time;
+-};
+-
++static struct xen_netbk *netbk;
+
+ static void netif_idx_release(u16 pending_idx);
+ static void make_tx_response(struct xen_netif *netif,
+@@ -71,22 +62,12 @@ static struct xen_netif_rx_response *make_rx_response(struct xen_netif *netif,
+ u16 flags);
+
+ static void net_tx_action(unsigned long unused);
+-static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
+
+ static void net_rx_action(unsigned long unused);
+-static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
+-
+-static struct timer_list net_timer;
+-static struct timer_list netbk_tx_pending_timer;
+
+-#define MAX_PENDING_REQS 256
+-
+-static struct sk_buff_head rx_queue;
+-
+-static struct page **mmap_pages;
+ static inline unsigned long idx_to_pfn(unsigned int idx)
+ {
+- return page_to_pfn(mmap_pages[idx]);
++ return page_to_pfn(netbk->mmap_pages[idx]);
+ }
+
+ static inline unsigned long idx_to_kaddr(unsigned int idx)
+@@ -107,7 +88,7 @@ static inline int netif_page_index(struct page *pg)
+ if (!PageForeign(pg))
+ return -1;
+
+- if ((idx >= MAX_PENDING_REQS) || (mmap_pages[idx] != pg))
++ if ((idx >= MAX_PENDING_REQS) || (netbk->mmap_pages[idx] != pg))
+ return -1;
+
+ return idx;
+@@ -121,46 +102,17 @@ static inline int netif_page_index(struct page *pg)
+ */
+ #define PKT_PROT_LEN 72
+
+-static struct pending_tx_info {
+- struct xen_netif_tx_request req;
+- struct xen_netif *netif;
+-} pending_tx_info[MAX_PENDING_REQS];
+-static u16 pending_ring[MAX_PENDING_REQS];
+-typedef unsigned int pending_ring_idx_t;
+-
+ static inline pending_ring_idx_t pending_index(unsigned i)
+ {
+ return i & (MAX_PENDING_REQS-1);
+ }
+
+-static pending_ring_idx_t pending_prod, pending_cons;
+-
+ static inline pending_ring_idx_t nr_pending_reqs(void)
+ {
+- return MAX_PENDING_REQS - pending_prod + pending_cons;
++ return MAX_PENDING_REQS -
++ netbk->pending_prod + netbk->pending_cons;
+ }
+
+-/* Freed TX SKBs get batched on this ring before return to pending_ring. */
+-static u16 dealloc_ring[MAX_PENDING_REQS];
+-static pending_ring_idx_t dealloc_prod, dealloc_cons;
+-
+-/* Doubly-linked list of in-use pending entries. */
+-static struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS];
+-static LIST_HEAD(pending_inuse_head);
+-
+-static struct sk_buff_head tx_queue;
+-
+-static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
+-static struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
+-static struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
+-
+-static LIST_HEAD(net_schedule_list);
+-static DEFINE_SPINLOCK(net_schedule_list_lock);
+-
+-#define MAX_MFN_ALLOC 64
+-static unsigned long mfn_list[MAX_MFN_ALLOC];
+-static unsigned int alloc_index = 0;
+-
+ /* Setting this allows the safe use of this driver without netloop. */
+ static int MODPARM_copy_skb = 1;
+ module_param_named(copy_skb, MODPARM_copy_skb, bool, 0);
+@@ -168,18 +120,12 @@ MODULE_PARM_DESC(copy_skb, "Copy data received from netfront without netloop");
+
+ int netbk_copy_skb_mode;
+
+-static inline unsigned long alloc_mfn(void)
+-{
+- BUG_ON(alloc_index == 0);
+- return mfn_list[--alloc_index];
+-}
+-
+ static inline void maybe_schedule_tx_action(void)
+ {
+ smp_mb();
+ if ((nr_pending_reqs() < (MAX_PENDING_REQS/2)) &&
+- !list_empty(&net_schedule_list))
+- tasklet_schedule(&net_tx_tasklet);
++ !list_empty(&netbk->net_schedule_list))
++ tasklet_schedule(&netbk->net_tx_tasklet);
+ }
+
+ static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
+@@ -328,9 +274,8 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ mod_timer(&netif->tx_queue_timeout, jiffies + HZ/2);
+ }
+ }
+-
+- skb_queue_tail(&rx_queue, skb);
+- tasklet_schedule(&net_rx_tasklet);
++ skb_queue_tail(&netbk->rx_queue, skb);
++ tasklet_schedule(&netbk->net_rx_tasklet);
+
+ return 0;
+
+@@ -372,7 +317,7 @@ static u16 netbk_gop_frag(struct xen_netif *netif, struct netbk_rx_meta *meta,
+ copy_gop = npo->copy + npo->copy_prod++;
+ copy_gop->flags = GNTCOPY_dest_gref;
+ if (idx > -1) {
+- struct pending_tx_info *src_pend = &pending_tx_info[idx];
++ struct pending_tx_info *src_pend = &netbk->pending_tx_info[idx];
+ copy_gop->source.domid = src_pend->netif->domid;
+ copy_gop->source.u.ref = src_pend->req.gref;
+ copy_gop->flags |= GNTCOPY_source_gref;
+@@ -487,30 +432,19 @@ static void net_rx_action(unsigned long unused)
+ int count;
+ unsigned long offset;
+
+- /*
+- * Putting hundreds of bytes on the stack is considered rude.
+- * Static works because a tasklet can only be on one CPU at any time.
+- */
+- static struct multicall_entry rx_mcl[NET_RX_RING_SIZE+3];
+- static struct mmu_update rx_mmu[NET_RX_RING_SIZE];
+- static struct gnttab_transfer grant_trans_op[NET_RX_RING_SIZE];
+- static struct gnttab_copy grant_copy_op[NET_RX_RING_SIZE];
+- static unsigned char rx_notify[NR_IRQS];
+- static u16 notify_list[NET_RX_RING_SIZE];
+- static struct netbk_rx_meta meta[NET_RX_RING_SIZE];
+-
+ struct netrx_pending_operations npo = {
+- mmu: rx_mmu,
+- trans: grant_trans_op,
+- copy: grant_copy_op,
+- mcl: rx_mcl,
+- meta: meta};
++ .mmu = netbk->rx_mmu,
++ .trans = netbk->grant_trans_op,
++ .copy = netbk->grant_copy_op,
++ .mcl = netbk->rx_mcl,
++ .meta = netbk->meta,
++ };
+
+ skb_queue_head_init(&rxq);
+
+ count = 0;
+
+- while ((skb = skb_dequeue(&rx_queue)) != NULL) {
++ while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ *(int *)skb->cb = nr_frags;
+
+@@ -525,39 +459,39 @@ static void net_rx_action(unsigned long unused)
+ break;
+ }
+
+- BUG_ON(npo.meta_prod > ARRAY_SIZE(meta));
++ BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
+
+ npo.mmu_mcl = npo.mcl_prod;
+ if (npo.mcl_prod) {
+ BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
+- BUG_ON(npo.mmu_prod > ARRAY_SIZE(rx_mmu));
++ BUG_ON(npo.mmu_prod > ARRAY_SIZE(netbk->rx_mmu));
+ mcl = npo.mcl + npo.mcl_prod++;
+
+ BUG_ON(mcl[-1].op != __HYPERVISOR_update_va_mapping);
+ mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
+
+ mcl->op = __HYPERVISOR_mmu_update;
+- mcl->args[0] = (unsigned long)rx_mmu;
++ mcl->args[0] = (unsigned long)netbk->rx_mmu;
+ mcl->args[1] = npo.mmu_prod;
+ mcl->args[2] = 0;
+ mcl->args[3] = DOMID_SELF;
+ }
+
+ if (npo.trans_prod) {
+- BUG_ON(npo.trans_prod > ARRAY_SIZE(grant_trans_op));
++ BUG_ON(npo.trans_prod > ARRAY_SIZE(netbk->grant_trans_op));
+ mcl = npo.mcl + npo.mcl_prod++;
+ mcl->op = __HYPERVISOR_grant_table_op;
+ mcl->args[0] = GNTTABOP_transfer;
+- mcl->args[1] = (unsigned long)grant_trans_op;
++ mcl->args[1] = (unsigned long)netbk->grant_trans_op;
+ mcl->args[2] = npo.trans_prod;
+ }
+
+ if (npo.copy_prod) {
+- BUG_ON(npo.copy_prod > ARRAY_SIZE(grant_copy_op));
++ BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
+ mcl = npo.mcl + npo.mcl_prod++;
+ mcl->op = __HYPERVISOR_grant_table_op;
+ mcl->args[0] = GNTTABOP_copy;
+- mcl->args[1] = (unsigned long)grant_copy_op;
++ mcl->args[1] = (unsigned long)netbk->grant_copy_op;
+ mcl->args[2] = npo.copy_prod;
+ }
+
+@@ -565,7 +499,7 @@ static void net_rx_action(unsigned long unused)
+ if (!npo.mcl_prod)
+ return;
+
+- BUG_ON(npo.mcl_prod > ARRAY_SIZE(rx_mcl));
++ BUG_ON(npo.mcl_prod > ARRAY_SIZE(netbk->rx_mcl));
+
+ ret = HYPERVISOR_multicall(npo.mcl, npo.mcl_prod);
+ BUG_ON(ret != 0);
+@@ -582,7 +516,7 @@ static void net_rx_action(unsigned long unused)
+
+ status = netbk_check_gop(nr_frags, netif->domid, &npo);
+
+- id = meta[npo.meta_cons].id;
++ id = netbk->meta[npo.meta_cons].id;
+ flags = nr_frags ? NETRXF_more_data : 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
+@@ -595,7 +529,7 @@ static void net_rx_action(unsigned long unused)
+ resp = make_rx_response(netif, id, status, offset,
+ skb_headlen(skb), flags);
+
+- if (meta[npo.meta_cons].frag.size) {
++ if (netbk->meta[npo.meta_cons].frag.size) {
+ struct xen_netif_extra_info *gso =
+ (struct xen_netif_extra_info *)
+ RING_GET_RESPONSE(&netif->rx,
+@@ -603,7 +537,7 @@ static void net_rx_action(unsigned long unused)
+
+ resp->flags |= NETRXF_extra_info;
+
+- gso->u.gso.size = meta[npo.meta_cons].frag.size;
++ gso->u.gso.size = netbk->meta[npo.meta_cons].frag.size;
+ gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
+ gso->u.gso.pad = 0;
+ gso->u.gso.features = 0;
+@@ -613,14 +547,14 @@ static void net_rx_action(unsigned long unused)
+ }
+
+ netbk_add_frag_responses(netif, status,
+- meta + npo.meta_cons + 1,
+- nr_frags);
++ netbk->meta + npo.meta_cons + 1,
++ nr_frags);
+
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret);
+ irq = netif->irq;
+- if (ret && !rx_notify[irq]) {
+- rx_notify[irq] = 1;
+- notify_list[notify_nr++] = irq;
++ if (ret && !netbk->rx_notify[irq]) {
++ netbk->rx_notify[irq] = 1;
++ netbk->notify_list[notify_nr++] = irq;
+ }
+
+ if (netif_queue_stopped(netif->dev) &&
+@@ -634,24 +568,25 @@ static void net_rx_action(unsigned long unused)
+ }
+
+ while (notify_nr != 0) {
+- irq = notify_list[--notify_nr];
+- rx_notify[irq] = 0;
++ irq = netbk->notify_list[--notify_nr];
++ netbk->rx_notify[irq] = 0;
+ notify_remote_via_irq(irq);
+ }
+
+ /* More work to do? */
+- if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
+- tasklet_schedule(&net_rx_tasklet);
++ if (!skb_queue_empty(&netbk->rx_queue) &&
++ !timer_pending(&netbk->net_timer))
++ tasklet_schedule(&netbk->net_rx_tasklet);
+ }
+
+ static void net_alarm(unsigned long unused)
+ {
+- tasklet_schedule(&net_rx_tasklet);
++ tasklet_schedule(&netbk->net_rx_tasklet);
+ }
+
+ static void netbk_tx_pending_timeout(unsigned long unused)
+ {
+- tasklet_schedule(&net_tx_tasklet);
++ tasklet_schedule(&netbk->net_tx_tasklet);
+ }
+
+ struct net_device_stats *netif_be_get_stats(struct net_device *dev)
+@@ -667,12 +602,12 @@ static int __on_net_schedule_list(struct xen_netif *netif)
+
+ static void remove_from_net_schedule_list(struct xen_netif *netif)
+ {
+- spin_lock_irq(&net_schedule_list_lock);
++ spin_lock_irq(&netbk->net_schedule_list_lock);
+ if (likely(__on_net_schedule_list(netif))) {
+ list_del_init(&netif->list);
+ netif_put(netif);
+ }
+- spin_unlock_irq(&net_schedule_list_lock);
++ spin_unlock_irq(&netbk->net_schedule_list_lock);
+ }
+
+ static void add_to_net_schedule_list_tail(struct xen_netif *netif)
+@@ -680,13 +615,13 @@ static void add_to_net_schedule_list_tail(struct xen_netif *netif)
+ if (__on_net_schedule_list(netif))
+ return;
+
+- spin_lock_irq(&net_schedule_list_lock);
++ spin_lock_irq(&netbk->net_schedule_list_lock);
+ if (!__on_net_schedule_list(netif) &&
+ likely(netif_schedulable(netif))) {
+- list_add_tail(&netif->list, &net_schedule_list);
++ list_add_tail(&netif->list, &netbk->net_schedule_list);
+ netif_get(netif);
+ }
+- spin_unlock_irq(&net_schedule_list_lock);
++ spin_unlock_irq(&netbk->net_schedule_list_lock);
+ }
+
+ void netif_schedule_work(struct xen_netif *netif)
+@@ -736,8 +671,9 @@ static void tx_credit_callback(unsigned long data)
+
+ static inline int copy_pending_req(pending_ring_idx_t pending_idx)
+ {
+- return gnttab_copy_grant_page(grant_tx_handle[pending_idx],
+- &mmap_pages[pending_idx]);
++ return gnttab_copy_grant_page(
++ netbk->grant_tx_handle[pending_idx],
++ &netbk->mmap_pages[pending_idx]);
+ }
+
+ inline static void net_tx_action_dealloc(void)
+@@ -750,22 +686,24 @@ inline static void net_tx_action_dealloc(void)
+ int ret;
+ LIST_HEAD(list);
+
+- dc = dealloc_cons;
+- gop = tx_unmap_ops;
++ dc = netbk->dealloc_cons;
++ gop = netbk->tx_unmap_ops;
+
+ /*
+ * Free up any grants we have finished using
+ */
+ do {
+- dp = dealloc_prod;
++ dp = netbk->dealloc_prod;
+
+ /* Ensure we see all indices enqueued by netif_idx_release(). */
+ smp_rmb();
+
+ while (dc != dp) {
+ unsigned long pfn;
++ struct netbk_tx_pending_inuse *pending_inuse =
++ netbk->pending_inuse;
+
+- pending_idx = dealloc_ring[pending_index(dc++)];
++ pending_idx = netbk->dealloc_ring[pending_index(dc++)];
+ list_move_tail(&pending_inuse[pending_idx].list, &list);
+
+ pfn = idx_to_pfn(pending_idx);
+@@ -773,22 +711,27 @@ inline static void net_tx_action_dealloc(void)
+ if (!phys_to_machine_mapping_valid(pfn))
+ continue;
+
+- gnttab_set_unmap_op(gop, idx_to_kaddr(pending_idx),
+- GNTMAP_host_map,
+- grant_tx_handle[pending_idx]);
++ gnttab_set_unmap_op(gop,
++ idx_to_kaddr(pending_idx),
++ GNTMAP_host_map,
++ netbk->grant_tx_handle[pending_idx]);
+ gop++;
+ }
+
+ if (netbk_copy_skb_mode != NETBK_DELAYED_COPY_SKB ||
+- list_empty(&pending_inuse_head))
++ list_empty(&netbk->pending_inuse_head))
+ break;
+
+ /* Copy any entries that have been pending for too long. */
+- list_for_each_entry_safe(inuse, n, &pending_inuse_head, list) {
++ list_for_each_entry_safe(inuse, n,
++ &netbk->pending_inuse_head, list) {
++ struct pending_tx_info *pending_tx_info;
++ pending_tx_info = netbk->pending_tx_info;
+
-+ if (match_slot(dev, t->dev)) {
-+ pr_info("pciback: vpci: %s: "
-+ "assign to virtual slot %d func %d\n",
-+ pci_name(dev), slot,
-+ PCI_FUNC(dev->devfn));
-+ list_add_tail(&dev_entry->list,
-+ &vpci_dev->dev_list[slot]);
-+ func = PCI_FUNC(dev->devfn);
-+ goto unlock;
-+ }
-+ }
-+ }
+ if (time_after(inuse->alloc_time + HZ / 2, jiffies))
+ break;
+
+- pending_idx = inuse - pending_inuse;
++ pending_idx = inuse - netbk->pending_inuse;
+
+ pending_tx_info[pending_idx].netif->nr_copied_skbs++;
+
+@@ -805,16 +748,21 @@ inline static void net_tx_action_dealloc(void)
+
+ break;
+ }
+- } while (dp != dealloc_prod);
++ } while (dp != netbk->dealloc_prod);
+
+- dealloc_cons = dc;
++ netbk->dealloc_cons = dc;
+
+ ret = HYPERVISOR_grant_table_op(
+- GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
++ GNTTABOP_unmap_grant_ref, netbk->tx_unmap_ops,
++ gop - netbk->tx_unmap_ops);
+ BUG_ON(ret);
+
+ list_for_each_entry_safe(inuse, n, &list, list) {
+- pending_idx = inuse - pending_inuse;
++ struct pending_tx_info *pending_tx_info;
++ pending_ring_idx_t index;
+
-+ /* Assign to a new slot on the virtual PCI bus */
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ if (list_empty(&vpci_dev->dev_list[slot])) {
-+ printk(KERN_INFO
-+ "pciback: vpci: %s: assign to virtual slot %d\n",
-+ pci_name(dev), slot);
-+ list_add_tail(&dev_entry->list,
-+ &vpci_dev->dev_list[slot]);
-+ func = PCI_FUNC(dev->devfn);
-+ goto unlock;
-+ }
-+ }
-+
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "No more space on root virtual PCI bus");
-+
-+ unlock:
-+ spin_unlock_irqrestore(&vpci_dev->lock, flags);
-+
-+ /* Publish this device. */
-+ if(!err)
-+ err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
-+
-+ out:
-+ return err;
-+}
-+
-+void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
-+{
-+ int slot;
-+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
-+ struct pci_dev *found_dev = NULL;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&vpci_dev->lock, flags);
++ pending_tx_info = netbk->pending_tx_info;
++ pending_idx = inuse - netbk->pending_inuse;
+
+ netif = pending_tx_info[pending_idx].netif;
+
+@@ -822,9 +770,10 @@ inline static void net_tx_action_dealloc(void)
+ NETIF_RSP_OKAY);
+
+ /* Ready for next use. */
+- gnttab_reset_grant_page(mmap_pages[pending_idx]);
++ gnttab_reset_grant_page(netbk->mmap_pages[pending_idx]);
+
+- pending_ring[pending_index(pending_prod++)] = pending_idx;
++ index = pending_index(netbk->pending_prod++);
++ netbk->pending_ring[index] = pending_idx;
+
+ netif_put(netif);
+
+@@ -832,7 +781,8 @@ inline static void net_tx_action_dealloc(void)
+ }
+ }
+
+-static void netbk_tx_err(struct xen_netif *netif, struct xen_netif_tx_request *txp, RING_IDX end)
++static void netbk_tx_err(struct xen_netif *netif,
++ struct xen_netif_tx_request *txp, RING_IDX end)
+ {
+ RING_IDX cons = netif->tx.req_cons;
+
+@@ -902,7 +852,12 @@ static struct gnttab_map_grant_ref *netbk_get_requests(struct xen_netif *netif,
+ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
+
+ for (i = start; i < shinfo->nr_frags; i++, txp++) {
+- pending_idx = pending_ring[pending_index(pending_cons++)];
++ pending_ring_idx_t index;
++ struct pending_tx_info *pending_tx_info =
++ netbk->pending_tx_info;
+
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ struct pci_dev_entry *e, *tmp;
-+ list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
-+ list) {
-+ if (e->dev == dev) {
-+ list_del(&e->list);
-+ found_dev = e->dev;
-+ kfree(e);
-+ goto out;
-+ }
-+ }
++ index = pending_index(netbk->pending_cons++);
++ pending_idx = netbk->pending_ring[index];
+
+ gnttab_set_map_op(mop++, idx_to_kaddr(pending_idx),
+ GNTMAP_host_map | GNTMAP_readonly,
+@@ -922,6 +877,7 @@ static int netbk_tx_check_mop(struct sk_buff *skb,
+ {
+ struct gnttab_map_grant_ref *mop = *mopp;
+ int pending_idx = *((u16 *)skb->data);
++ struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
+ struct xen_netif *netif = pending_tx_info[pending_idx].netif;
+ struct xen_netif_tx_request *txp;
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+@@ -931,15 +887,17 @@ static int netbk_tx_check_mop(struct sk_buff *skb,
+ /* Check status of header. */
+ err = mop->status;
+ if (unlikely(err)) {
++ pending_ring_idx_t index;
++ index = pending_index(netbk->pending_prod++);
+ txp = &pending_tx_info[pending_idx].req;
+ make_tx_response(netif, txp, NETIF_RSP_ERROR);
+- pending_ring[pending_index(pending_prod++)] = pending_idx;
++ netbk->pending_ring[index] = pending_idx;
+ netif_put(netif);
+ } else {
+ set_phys_to_machine(
+ __pa(idx_to_kaddr(pending_idx)) >> PAGE_SHIFT,
+ FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
+- grant_tx_handle[pending_idx] = mop->handle;
++ netbk->grant_tx_handle[pending_idx] = mop->handle;
+ }
+
+ /* Skip first skb fragment if it is on same page as header fragment. */
+@@ -947,16 +905,19 @@ static int netbk_tx_check_mop(struct sk_buff *skb,
+
+ for (i = start; i < nr_frags; i++) {
+ int j, newerr;
++ pending_ring_idx_t index;
+
+ pending_idx = (unsigned long)shinfo->frags[i].page;
+
+ /* Check error status: if okay then remember grant handle. */
+ newerr = (++mop)->status;
+ if (likely(!newerr)) {
++ unsigned long addr;
++ addr = idx_to_kaddr(pending_idx);
+ set_phys_to_machine(
+- __pa(idx_to_kaddr(pending_idx))>>PAGE_SHIFT,
++ __pa(addr)>>PAGE_SHIFT,
+ FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
+- grant_tx_handle[pending_idx] = mop->handle;
++ netbk->grant_tx_handle[pending_idx] = mop->handle;
+ /* Had a previous error? Invalidate this fragment. */
+ if (unlikely(err))
+ netif_idx_release(pending_idx);
+@@ -964,9 +925,10 @@ static int netbk_tx_check_mop(struct sk_buff *skb,
+ }
+
+ /* Error on this fragment: respond to client with an error. */
+- txp = &pending_tx_info[pending_idx].req;
++ txp = &netbk->pending_tx_info[pending_idx].req;
+ make_tx_response(netif, txp, NETIF_RSP_ERROR);
+- pending_ring[pending_index(pending_prod++)] = pending_idx;
++ index = pending_index(netbk->pending_prod++);
++ netbk->pending_ring[index] = pending_idx;
+ netif_put(netif);
+
+ /* Not the first error? Preceding frags already invalidated. */
+@@ -1002,11 +964,11 @@ static void netbk_fill_frags(struct sk_buff *skb)
+
+ pending_idx = (unsigned long)frag->page;
+
+- pending_inuse[pending_idx].alloc_time = jiffies;
+- list_add_tail(&pending_inuse[pending_idx].list,
+- &pending_inuse_head);
++ netbk->pending_inuse[pending_idx].alloc_time = jiffies;
++ list_add_tail(&netbk->pending_inuse[pending_idx].list,
++ &netbk->pending_inuse_head);
+
+- txp = &pending_tx_info[pending_idx].req;
++ txp = &netbk->pending_tx_info[pending_idx].req;
+ frag->page = virt_to_page(idx_to_kaddr(pending_idx));
+ frag->size = txp->size;
+ frag->page_offset = txp->offset;
+@@ -1145,9 +1107,9 @@ static unsigned net_tx_build_mops(void)
+ struct sk_buff *skb;
+ int ret;
+
+- mop = tx_map_ops;
++ mop = netbk->tx_map_ops;
+ while (((nr_pending_reqs() + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
+- !list_empty(&net_schedule_list)) {
++ !list_empty(&netbk->net_schedule_list)) {
+ struct xen_netif *netif;
+ struct xen_netif_tx_request txreq;
+ struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
+@@ -1156,9 +1118,11 @@ static unsigned net_tx_build_mops(void)
+ RING_IDX idx;
+ int work_to_do;
+ unsigned int data_len;
++ pending_ring_idx_t index;
+
+ /* Get a netif from the list with work to do. */
+- netif = list_first_entry(&net_schedule_list, struct xen_netif, list);
++ netif = list_first_entry(&netbk->net_schedule_list,
++ struct xen_netif, list);
+ netif_get(netif);
+ remove_from_net_schedule_list(netif);
+
+@@ -1217,7 +1181,8 @@ static unsigned net_tx_build_mops(void)
+ continue;
+ }
+
+- pending_idx = pending_ring[pending_index(pending_cons)];
++ index = pending_index(netbk->pending_cons);
++ pending_idx = netbk->pending_ring[index];
+
+ data_len = (txreq.size > PKT_PROT_LEN &&
+ ret < MAX_SKB_FRAGS) ?
+@@ -1250,9 +1215,9 @@ static unsigned net_tx_build_mops(void)
+ txreq.gref, netif->domid);
+ mop++;
+
+- memcpy(&pending_tx_info[pending_idx].req,
++ memcpy(&netbk->pending_tx_info[pending_idx].req,
+ &txreq, sizeof(txreq));
+- pending_tx_info[pending_idx].netif = netif;
++ netbk->pending_tx_info[pending_idx].netif = netif;
+ *((u16 *)skb->data) = pending_idx;
+
+ __skb_put(skb, data_len);
+@@ -1267,20 +1232,20 @@ static unsigned net_tx_build_mops(void)
+ skb_shinfo(skb)->frags[0].page = (void *)~0UL;
+ }
+
+- __skb_queue_tail(&tx_queue, skb);
++ __skb_queue_tail(&netbk->tx_queue, skb);
+
+- pending_cons++;
++ netbk->pending_cons++;
+
+ mop = netbk_get_requests(netif, skb, txfrags, mop);
+
+ netif->tx.req_cons = idx;
+ netif_schedule_work(netif);
+
+- if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
++ if ((mop - netbk->tx_map_ops) >= ARRAY_SIZE(netbk->tx_map_ops))
+ break;
+ }
+
+- return mop - tx_map_ops;
++ return mop - netbk->tx_map_ops;
+ }
+
+ static void net_tx_submit(void)
+@@ -1288,16 +1253,16 @@ static void net_tx_submit(void)
+ struct gnttab_map_grant_ref *mop;
+ struct sk_buff *skb;
+
+- mop = tx_map_ops;
+- while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
++ mop = netbk->tx_map_ops;
++ while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) {
+ struct xen_netif_tx_request *txp;
+ struct xen_netif *netif;
+ u16 pending_idx;
+ unsigned data_len;
+
+ pending_idx = *((u16 *)skb->data);
+- netif = pending_tx_info[pending_idx].netif;
+- txp = &pending_tx_info[pending_idx].req;
++ netif = netbk->pending_tx_info[pending_idx].netif;
++ txp = &netbk->pending_tx_info[pending_idx].req;
+
+ /* Check the remap error code. */
+ if (unlikely(netbk_tx_check_mop(skb, &mop))) {
+@@ -1363,12 +1328,13 @@ static void net_tx_submit(void)
+ }
+
+ if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB &&
+- !list_empty(&pending_inuse_head)) {
++ !list_empty(&netbk->pending_inuse_head)) {
+ struct netbk_tx_pending_inuse *oldest;
+
+- oldest = list_entry(pending_inuse_head.next,
++ oldest = list_entry(netbk->pending_inuse_head.next,
+ struct netbk_tx_pending_inuse, list);
+- mod_timer(&netbk_tx_pending_timer, oldest->alloc_time + HZ);
++ mod_timer(&netbk->netbk_tx_pending_timer,
++ oldest->alloc_time + HZ);
+ }
+ }
+
+@@ -1378,7 +1344,7 @@ static void net_tx_action(unsigned long unused)
+ unsigned nr_mops;
+ int ret;
+
+- if (dealloc_cons != dealloc_prod)
++ if (netbk->dealloc_cons != netbk->dealloc_prod)
+ net_tx_action_dealloc();
+
+ nr_mops = net_tx_build_mops();
+@@ -1387,7 +1353,7 @@ static void net_tx_action(unsigned long unused)
+ return;
+
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
+- tx_map_ops, nr_mops);
++ netbk->tx_map_ops, nr_mops);
+ BUG_ON(ret);
+
+ net_tx_submit();
+@@ -1397,15 +1363,17 @@ static void netif_idx_release(u16 pending_idx)
+ {
+ static DEFINE_SPINLOCK(_lock);
+ unsigned long flags;
++ pending_ring_idx_t index;
+
+ spin_lock_irqsave(&_lock, flags);
+- dealloc_ring[pending_index(dealloc_prod)] = pending_idx;
++ index = pending_index(netbk->dealloc_prod);
++ netbk->dealloc_ring[index] = pending_idx;
+ /* Sync with net_tx_action_dealloc: insert idx /then/ incr producer. */
+ smp_wmb();
+- dealloc_prod++;
++ netbk->dealloc_prod++;
+ spin_unlock_irqrestore(&_lock, flags);
+
+- tasklet_schedule(&net_tx_tasklet);
++ tasklet_schedule(&netbk->net_tx_tasklet);
+ }
+
+ static void netif_page_release(struct page *page, unsigned int order)
+@@ -1481,9 +1449,9 @@ static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
+ int i = 0;
+
+ printk(KERN_ALERT "netif_schedule_list:\n");
+- spin_lock_irq(&net_schedule_list_lock);
++ spin_lock_irq(&netbk->net_schedule_list_lock);
+
+- list_for_each (ent, &net_schedule_list) {
++ list_for_each(ent, &netbk->net_schedule_list) {
+ netif = list_entry(ent, struct xen_netif, list);
+ printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
+ "rx_resp_prod=%08x\n",
+@@ -1500,7 +1468,7 @@ static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
+ i++;
+ }
+
+- spin_unlock_irq(&net_schedule_list_lock);
++ spin_unlock_irq(&netbk->net_schedule_list_lock);
+ printk(KERN_ALERT " ** End of netif_schedule_list **\n");
+
+ return IRQ_HANDLED;
+@@ -1516,37 +1484,53 @@ static int __init netback_init(void)
+ if (!xen_domain())
+ return -ENODEV;
+
++ netbk = (struct xen_netbk *)vmalloc(sizeof(struct xen_netbk));
++ if (!netbk) {
++ printk(KERN_ALERT "%s: out of memory\n", __func__);
++ return -ENOMEM;
+ }
+
-+ out:
-+ spin_unlock_irqrestore(&vpci_dev->lock, flags);
-+
-+ if (found_dev)
-+ pcistub_put_pci_dev(found_dev);
-+}
-+
-+int pciback_init_devices(struct pciback_device *pdev)
-+{
-+ int slot;
-+ struct vpci_dev_data *vpci_dev;
+ /* We can increase reservation by this much in net_rx_action(). */
+ // balloon_update_driver_allowance(NET_RX_RING_SIZE);
+
+- skb_queue_head_init(&rx_queue);
+- skb_queue_head_init(&tx_queue);
++ skb_queue_head_init(&netbk->rx_queue);
++ skb_queue_head_init(&netbk->tx_queue);
+
+- init_timer(&net_timer);
+- net_timer.data = 0;
+- net_timer.function = net_alarm;
++ init_timer(&netbk->net_timer);
++ netbk->net_timer.data = 0;
++ netbk->net_timer.function = net_alarm;
+
+- init_timer(&netbk_tx_pending_timer);
+- netbk_tx_pending_timer.data = 0;
+- netbk_tx_pending_timer.function = netbk_tx_pending_timeout;
++ init_timer(&netbk->netbk_tx_pending_timer);
++ netbk->netbk_tx_pending_timer.data = 0;
++ netbk->netbk_tx_pending_timer.function = netbk_tx_pending_timeout;
+
+- mmap_pages = alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
+- if (mmap_pages == NULL) {
+- printk("%s: out of memory\n", __FUNCTION__);
+- return -ENOMEM;
++ netbk->mmap_pages =
++ alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
++ if (!netbk->mmap_pages) {
++ printk(KERN_ALERT "%s: out of memory\n", __func__);
++ rc = -ENOMEM;
++ goto failed_init2;
+ }
+
+ for (i = 0; i < MAX_PENDING_REQS; i++) {
+- page = mmap_pages[i];
++ page = netbk->mmap_pages[i];
+ SetPageForeign(page, netif_page_release);
+ netif_set_page_index(page, i);
+- INIT_LIST_HEAD(&pending_inuse[i].list);
++ INIT_LIST_HEAD(&netbk->pending_inuse[i].list);
+ }
+
+- pending_cons = 0;
+- pending_prod = MAX_PENDING_REQS;
++ netbk->pending_cons = 0;
++ netbk->pending_prod = MAX_PENDING_REQS;
+ for (i = 0; i < MAX_PENDING_REQS; i++)
+- pending_ring[i] = i;
++ netbk->pending_ring[i] = i;
+
-+ vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
-+ if (!vpci_dev)
-+ return -ENOMEM;
++ tasklet_init(&netbk->net_tx_tasklet, net_tx_action, 0);
++ tasklet_init(&netbk->net_rx_tasklet, net_rx_action, 0);
+
-+ spin_lock_init(&vpci_dev->lock);
++ INIT_LIST_HEAD(&netbk->pending_inuse_head);
++ INIT_LIST_HEAD(&netbk->net_schedule_list);
+
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
-+ }
++ spin_lock_init(&netbk->net_schedule_list_lock);
+
+ netbk_copy_skb_mode = NETBK_DONT_COPY_SKB;
+ if (MODPARM_copy_skb) {
+@@ -1561,7 +1545,7 @@ static int __init netback_init(void)
+
+ rc = netif_xenbus_init();
+ if (rc)
+- goto failed_init;
++ goto failed_init1;
+
+ #ifdef NETBE_DEBUG_INTERRUPT
+ (void)bind_virq_to_irqhandler(VIRQ_DEBUG,
+@@ -1574,10 +1558,12 @@ static int __init netback_init(void)
+
+ return 0;
+
+-failed_init:
+- free_empty_pages_and_pagevec(mmap_pages, MAX_PENDING_REQS);
+- del_timer(&netbk_tx_pending_timer);
+- del_timer(&net_timer);
++failed_init1:
++ free_empty_pages_and_pagevec(netbk->mmap_pages, MAX_PENDING_REQS);
++failed_init2:
++ del_timer(&netbk->netbk_tx_pending_timer);
++ del_timer(&netbk->net_timer);
++ vfree(netbk);
+ return rc;
+
+ }
+--
+1.7.4
+
+
+From c099c22d8b1c12fc7d68998982eb4ccd4918e813 Mon Sep 17 00:00:00 2001
+From: Dongxiao Xu <dongxiao.xu(a)intel.com>
+Date: Wed, 19 May 2010 16:58:57 -0700
+Subject: [PATCH 038/203] xen: netback: Introduce a new struct type page_ext.
+
+struct page_ext is used to store the group and idx information by
+which a specified page could be identified.
+
+Signed-off-by: Dongxiao Xu <dongxiao.xu(a)intel.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/common.h | 15 +++++++++++++++
+ drivers/xen/netback/netback.c | 28 +++++++++++++++++-----------
+ 2 files changed, 32 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index 00208f4..5e0e467 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -240,6 +240,21 @@ struct netbk_tx_pending_inuse {
+
+ #define MAX_PENDING_REQS 256
+
++/* extra field used in struct page */
++union page_ext {
++ struct {
++#if BITS_PER_LONG < 64
++#define IDX_WIDTH 8
++#define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
++ unsigned int group:GROUP_WIDTH;
++ unsigned int idx:IDX_WIDTH;
++#else
++ unsigned int group, idx;
++#endif
++ } e;
++ void *mapping;
++};
+
-+ pdev->pci_dev_data = vpci_dev;
+ struct xen_netbk {
+ struct tasklet_struct net_tx_tasklet;
+ struct tasklet_struct net_rx_tasklet;
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 417f497..71ec999 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -76,22 +76,27 @@ static inline unsigned long idx_to_kaddr(unsigned int idx)
+ }
+
+ /* extra field used in struct page */
+-static inline void netif_set_page_index(struct page *pg, unsigned int index)
++static inline void netif_set_page_ext(struct page *pg, unsigned int group,
++ unsigned int idx)
+ {
+- *(unsigned long *)&pg->mapping = index + 1;
++ union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
+
-+ return 0;
++ BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
++ pg->mapping = ext.mapping;
+ }
+
+-static inline int netif_page_index(struct page *pg)
++static inline unsigned int netif_page_group(const struct page *pg)
+ {
+- unsigned long idx = (unsigned long)pg->mapping - 1;
++ union page_ext ext = { .mapping = pg->mapping };
+
+- if (!PageForeign(pg))
+- return -1;
++ return ext.e.group - 1;
+}
-+
-+int pciback_publish_pci_roots(struct pciback_device *pdev,
-+ publish_pci_root_cb publish_cb)
-+{
-+ /* The Virtual PCI bus has only one root */
-+ return publish_cb(pdev, 0, 0);
-+}
-+
-+void pciback_release_devices(struct pciback_device *pdev)
-+{
-+ int slot;
-+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
-+
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ struct pci_dev_entry *e, *tmp;
-+ list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
-+ list) {
-+ list_del(&e->list);
-+ pcistub_put_pci_dev(e->dev);
-+ kfree(e);
-+ }
-+ }
-+
-+ kfree(vpci_dev);
-+ pdev->pci_dev_data = NULL;
-+}
-+
-+int pciback_get_pcifront_dev(struct pci_dev *pcidev, struct pciback_device *pdev,
-+ unsigned int *domain, unsigned int *bus, unsigned int *devfn)
-+{
-+ struct pci_dev_entry *entry;
-+ struct pci_dev *dev = NULL;
-+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
-+ unsigned long flags;
-+ int found = 0, slot;
-+
-+ spin_lock_irqsave(&vpci_dev->lock, flags);
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ list_for_each_entry(entry,
-+ &vpci_dev->dev_list[slot],
-+ list) {
-+ dev = entry->dev;
-+ if (dev && dev->bus->number == pcidev->bus->number
-+ && pci_domain_nr(dev->bus) == pci_domain_nr(pcidev->bus)
-+ && dev->devfn == pcidev->devfn)
-+ {
-+ found = 1;
-+ *domain = 0;
-+ *bus = 0;
-+ *devfn = PCI_DEVFN(slot, PCI_FUNC(pcidev->devfn));
-+ }
-+ }
-+ }
-+ spin_unlock_irqrestore(&vpci_dev->lock, flags);
-+ return found;
-+}
-diff --git a/drivers/xen/pciback/xenbus.c b/drivers/xen/pciback/xenbus.c
-new file mode 100644
-index 0000000..4d56c45
---- /dev/null
-+++ b/drivers/xen/pciback/xenbus.c
-@@ -0,0 +1,710 @@
-+/*
-+ * PCI Backend Xenbus Setup - handles setup with frontend and xend
-+ *
-+ * Author: Ryan Wilson <hap9(a)epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/list.h>
-+#include <linux/vmalloc.h>
-+#include <xen/xenbus.h>
-+#include <xen/evtchn.h>
-+#include "pciback.h"
-+
-+#define INVALID_EVTCHN_IRQ (-1)
-+struct workqueue_struct *pciback_wq;
-+
-+static struct pciback_device *alloc_pdev(struct xenbus_device *xdev)
-+{
-+ struct pciback_device *pdev;
-+
-+ pdev = kzalloc(sizeof(struct pciback_device), GFP_KERNEL);
-+ if (pdev == NULL)
-+ goto out;
-+ dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
-+
-+ pdev->xdev = xdev;
-+ xdev->dev.driver_data = pdev;
-+
-+ spin_lock_init(&pdev->dev_lock);
-+
-+ pdev->sh_area = NULL;
-+ pdev->sh_info = NULL;
-+ pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
-+ pdev->be_watching = 0;
-+
-+ INIT_WORK(&pdev->op_work, pciback_do_op, pdev);
-+
-+ if (pciback_init_devices(pdev)) {
-+ kfree(pdev);
-+ pdev = NULL;
-+ }
-+ out:
-+ return pdev;
-+}
-+
-+static void pciback_disconnect(struct pciback_device *pdev)
-+{
-+ spin_lock(&pdev->dev_lock);
-+
-+ /* Ensure the guest can't trigger our handler before removing devices */
-+ if (pdev->evtchn_irq != INVALID_EVTCHN_IRQ) {
-+ unbind_from_irqhandler(pdev->evtchn_irq, pdev);
-+ pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
-+ }
-+
-+ /* If the driver domain started an op, make sure we complete it
-+ * before releasing the shared memory */
-+ flush_workqueue(pciback_wq);
-+
-+ if (pdev->sh_info != NULL) {
-+ xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_area);
-+ pdev->sh_info = NULL;
-+ }
-+
-+ spin_unlock(&pdev->dev_lock);
-+}
-+
-+static void free_pdev(struct pciback_device *pdev)
-+{
-+ if (pdev->be_watching)
-+ unregister_xenbus_watch(&pdev->be_watch);
-+
-+ pciback_disconnect(pdev);
-+
-+ pciback_release_devices(pdev);
-+
-+ pdev->xdev->dev.driver_data = NULL;
-+ pdev->xdev = NULL;
-+
-+ kfree(pdev);
-+}
-+
-+static int pciback_do_attach(struct pciback_device *pdev, int gnt_ref,
-+ int remote_evtchn)
-+{
-+ int err = 0;
-+ struct vm_struct *area;
-+
-+ dev_dbg(&pdev->xdev->dev,
-+ "Attaching to frontend resources - gnt_ref=%d evtchn=%d\n",
-+ gnt_ref, remote_evtchn);
-+
-+ area = xenbus_map_ring_valloc(pdev->xdev, gnt_ref);
-+ if (IS_ERR(area)) {
-+ err = PTR_ERR(area);
-+ goto out;
-+ }
-+ pdev->sh_area = area;
-+ pdev->sh_info = area->addr;
-+
-+ err = bind_interdomain_evtchn_to_irqhandler(
-+ pdev->xdev->otherend_id, remote_evtchn, pciback_handle_event,
-+ SA_SAMPLE_RANDOM, "pciback", pdev);
-+ if (err < 0) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error binding event channel to IRQ");
-+ goto out;
-+ }
-+ pdev->evtchn_irq = err;
-+ err = 0;
-+
-+ dev_dbg(&pdev->xdev->dev, "Attached!\n");
-+ out:
-+ return err;
-+}
-+
-+static int pciback_attach(struct pciback_device *pdev)
+
+- if ((idx >= MAX_PENDING_REQS) || (netbk->mmap_pages[idx] != pg))
+- return -1;
++static inline unsigned int netif_page_index(const struct page *pg)
+{
-+ int err = 0;
-+ int gnt_ref, remote_evtchn;
-+ char *magic = NULL;
-+
-+ spin_lock(&pdev->dev_lock);
-+
-+ /* Make sure we only do this setup once */
-+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
-+ XenbusStateInitialised)
-+ goto out;
-+
-+ /* Wait for frontend to state that it has published the configuration */
-+ if (xenbus_read_driver_state(pdev->xdev->otherend) !=
-+ XenbusStateInitialised)
-+ goto out;
-+
-+ dev_dbg(&pdev->xdev->dev, "Reading frontend config\n");
-+
-+ err = xenbus_gather(XBT_NIL, pdev->xdev->otherend,
-+ "pci-op-ref", "%u", &gnt_ref,
-+ "event-channel", "%u", &remote_evtchn,
-+ "magic", NULL, &magic, NULL);
-+ if (err) {
-+ /* If configuration didn't get read correctly, wait longer */
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error reading configuration from frontend");
-+ goto out;
-+ }
-+
-+ if (magic == NULL || strcmp(magic, XEN_PCI_MAGIC) != 0) {
-+ xenbus_dev_fatal(pdev->xdev, -EFAULT,
-+ "version mismatch (%s/%s) with pcifront - "
-+ "halting pciback",
-+ magic, XEN_PCI_MAGIC);
-+ goto out;
-+ }
-+
-+ err = pciback_do_attach(pdev, gnt_ref, remote_evtchn);
-+ if (err)
-+ goto out;
-+
-+ dev_dbg(&pdev->xdev->dev, "Connecting...\n");
-+
-+ err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
-+ if (err)
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error switching to connected state!");
-+
-+ dev_dbg(&pdev->xdev->dev, "Connected? %d\n", err);
-+ out:
-+ spin_unlock(&pdev->dev_lock);
-+
-+ if (magic)
-+ kfree(magic);
++ union page_ext ext = { .mapping = pg->mapping };
+
+- return idx;
++ return ext.e.idx;
+ }
+
+ /*
+@@ -1380,7 +1385,8 @@ static void netif_page_release(struct page *page, unsigned int order)
+ {
+ int idx = netif_page_index(page);
+ BUG_ON(order);
+- BUG_ON(idx < 0);
++ BUG_ON(idx < 0 || idx >= MAX_PENDING_REQS);
++ BUG_ON(netbk->mmap_pages[idx] != page);
+ netif_idx_release(idx);
+ }
+
+@@ -1515,7 +1521,7 @@ static int __init netback_init(void)
+ for (i = 0; i < MAX_PENDING_REQS; i++) {
+ page = netbk->mmap_pages[i];
+ SetPageForeign(page, netif_page_release);
+- netif_set_page_index(page, i);
++ netif_set_page_ext(page, 0, i);
+ INIT_LIST_HEAD(&netbk->pending_inuse[i].list);
+ }
+
+--
+1.7.4
+
+
+From 9534985c5b9cc3f6238d6cb8bba7d376e82039d3 Mon Sep 17 00:00:00 2001
+From: Dongxiao Xu <dongxiao.xu(a)intel.com>
+Date: Wed, 19 May 2010 17:08:21 -0700
+Subject: [PATCH 039/203] xen: netback: Multiple tasklets support.
+
+Now netback uses one pair of tasklets for Tx/Rx data transaction.
+Netback tasklet could only run at one CPU at a time, and it is
+used to serve all the netfronts. Therefore it has become a
+performance bottle neck. This patch is to use multiple tasklet
+pairs to replace the current single pair in dom0.
+
+Assuming that Dom0 has CPUNR VCPUs, we define CPUNR kinds of
+tasklets pair (CPUNR for Tx, and CPUNR for Rx). Each pare of
+tasklets serve specific group of netfronts. Also for those global
+and static variables, we duplicated them for each group in
+order to avoid the spinlock.
+
+Signed-off-by: Dongxiao Xu <dongxiao.xu(a)intel.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/common.h | 6 +
+ drivers/xen/netback/interface.c | 27 ++++
+ drivers/xen/netback/netback.c | 270 ++++++++++++++++++++++++---------------
+ 3 files changed, 197 insertions(+), 106 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index 5e0e467..847ba58 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -58,6 +58,7 @@
+ struct xen_netif {
+ /* Unique identifier for this interface. */
+ domid_t domid;
++ int group;
+ unsigned int handle;
+
+ u8 fe_dev_addr[6];
+@@ -278,6 +279,8 @@ struct xen_netbk {
+ /* Protect the net_schedule_list in netif. */
+ spinlock_t net_schedule_list_lock;
+
++ atomic_t netfront_count;
+
-+ return err;
-+}
+ struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
+ struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS];
+ struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
+@@ -296,4 +299,7 @@ struct xen_netbk {
+ struct netbk_rx_meta meta[NET_RX_RING_SIZE];
+ };
+
++extern struct xen_netbk *xen_netbk;
++extern int xen_netbk_group_nr;
+
-+static int pciback_publish_pci_dev(struct pciback_device *pdev,
-+ unsigned int domain, unsigned int bus,
-+ unsigned int devfn, unsigned int devid)
+ #endif /* __NETIF__BACKEND__COMMON_H__ */
+diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+index 086d939..172ef4c 100644
+--- a/drivers/xen/netback/interface.c
++++ b/drivers/xen/netback/interface.c
+@@ -54,8 +54,33 @@
+ static unsigned long netbk_queue_length = 32;
+ module_param_named(queue_length, netbk_queue_length, ulong, 0644);
+
++static void netbk_add_netif(struct xen_netbk *netbk, int group_nr,
++ struct xen_netif *netif)
+{
-+ int err;
-+ int len;
-+ char str[64];
-+
-+ len = snprintf(str, sizeof(str), "vdev-%d", devid);
-+ if (unlikely(len >= (sizeof(str) - 1))) {
-+ err = -ENOMEM;
-+ goto out;
++ int i;
++ int min_netfront_count;
++ int min_group = 0;
++ min_netfront_count = atomic_read(&netbk[0].netfront_count);
++ for (i = 0; i < group_nr; i++) {
++ int netfront_count = atomic_read(&netbk[i].netfront_count);
++ if (netfront_count < min_netfront_count) {
++ min_group = i;
++ min_netfront_count = netfront_count;
++ }
+ }
+
-+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
-+ "%04x:%02x:%02x.%02x", domain, bus,
-+ PCI_SLOT(devfn), PCI_FUNC(devfn));
-+
-+ out:
-+ return err;
++ netif->group = min_group;
++ atomic_inc(&netbk[netif->group].netfront_count);
+}
+
-+static int pciback_export_device(struct pciback_device *pdev,
-+ int domain, int bus, int slot, int func,
-+ int devid)
++static void netbk_remove_netif(struct xen_netbk *netbk, struct xen_netif *netif)
+{
-+ struct pci_dev *dev;
-+ int err = 0;
-+
-+ dev_dbg(&pdev->xdev->dev, "exporting dom %x bus %x slot %x func %x\n",
-+ domain, bus, slot, func);
-+
-+ dev = pcistub_get_pci_dev_by_slot(pdev, domain, bus, slot, func);
-+ if (!dev) {
-+ err = -EINVAL;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Couldn't locate PCI device "
-+ "(%04x:%02x:%02x.%01x)! "
-+ "perhaps already in-use?",
-+ domain, bus, slot, func);
-+ goto out;
-+ }
-+
-+ err = pciback_add_pci_dev(pdev, dev, devid, pciback_publish_pci_dev);
-+ if (err)
-+ goto out;
-+
-+ /* TODO: It'd be nice to export a bridge and have all of its children
-+ * get exported with it. This may be best done in xend (which will
-+ * have to calculate resource usage anyway) but we probably want to
-+ * put something in here to ensure that if a bridge gets given to a
-+ * driver domain, that all devices under that bridge are not given
-+ * to other driver domains (as he who controls the bridge can disable
-+ * it and stop the other devices from working).
-+ */
-+ out:
-+ return err;
++ atomic_dec(&netbk[netif->group].netfront_count);
+}
+
-+static int pciback_remove_device(struct pciback_device *pdev,
-+ int domain, int bus, int slot, int func)
-+{
-+ int err = 0;
-+ struct pci_dev *dev;
-+
-+ dev_dbg(&pdev->xdev->dev, "removing dom %x bus %x slot %x func %x\n",
-+ domain, bus, slot, func);
-+
-+ dev = pciback_get_pci_dev(pdev, domain, bus, PCI_DEVFN(slot, func));
-+ if (!dev) {
-+ err = -EINVAL;
-+ dev_dbg(&pdev->xdev->dev, "Couldn't locate PCI device "
-+ "(%04x:%02x:%02x.%01x)! not owned by this domain\n",
-+ domain, bus, slot, func);
-+ goto out;
-+ }
+ static void __netif_up(struct xen_netif *netif)
+ {
++ netbk_add_netif(xen_netbk, xen_netbk_group_nr, netif);
+ enable_irq(netif->irq);
+ netif_schedule_work(netif);
+ }
+@@ -64,6 +89,7 @@ static void __netif_down(struct xen_netif *netif)
+ {
+ disable_irq(netif->irq);
+ netif_deschedule_work(netif);
++ netbk_remove_netif(xen_netbk, netif);
+ }
+
+ static int net_open(struct net_device *dev)
+@@ -214,6 +240,7 @@ struct xen_netif *netif_alloc(struct device *parent, domid_t domid, unsigned int
+ netif = netdev_priv(dev);
+ memset(netif, 0, sizeof(*netif));
+ netif->domid = domid;
++ netif->group = -1;
+ netif->handle = handle;
+ netif->features = NETIF_F_SG;
+ atomic_set(&netif->refcnt, 1);
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 71ec999..feefb14 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -48,9 +48,10 @@
+
+ /*define NETBE_DEBUG_INTERRUPT*/
+
+-static struct xen_netbk *netbk;
++struct xen_netbk *xen_netbk;
++int xen_netbk_group_nr;
+
+-static void netif_idx_release(u16 pending_idx);
++static void netif_idx_release(struct xen_netbk *netbk, u16 pending_idx);
+ static void make_tx_response(struct xen_netif *netif,
+ struct xen_netif_tx_request *txp,
+ s8 st);
+@@ -61,18 +62,20 @@ static struct xen_netif_rx_response *make_rx_response(struct xen_netif *netif,
+ u16 size,
+ u16 flags);
+
+-static void net_tx_action(unsigned long unused);
++static void net_tx_action(unsigned long data);
+
+-static void net_rx_action(unsigned long unused);
++static void net_rx_action(unsigned long data);
+
+-static inline unsigned long idx_to_pfn(unsigned int idx)
++static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
++ unsigned int idx)
+ {
+ return page_to_pfn(netbk->mmap_pages[idx]);
+ }
+
+-static inline unsigned long idx_to_kaddr(unsigned int idx)
++static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
++ unsigned int idx)
+ {
+- return (unsigned long)pfn_to_kaddr(idx_to_pfn(idx));
++ return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
+ }
+
+ /* extra field used in struct page */
+@@ -112,7 +115,7 @@ static inline pending_ring_idx_t pending_index(unsigned i)
+ return i & (MAX_PENDING_REQS-1);
+ }
+
+-static inline pending_ring_idx_t nr_pending_reqs(void)
++static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
+ {
+ return MAX_PENDING_REQS -
+ netbk->pending_prod + netbk->pending_cons;
+@@ -125,10 +128,10 @@ MODULE_PARM_DESC(copy_skb, "Copy data received from netfront without netloop");
+
+ int netbk_copy_skb_mode;
+
+-static inline void maybe_schedule_tx_action(void)
++static inline void maybe_schedule_tx_action(struct xen_netbk *netbk)
+ {
+ smp_mb();
+- if ((nr_pending_reqs() < (MAX_PENDING_REQS/2)) &&
++ if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
+ !list_empty(&netbk->net_schedule_list))
+ tasklet_schedule(&netbk->net_tx_tasklet);
+ }
+@@ -235,9 +238,15 @@ static void tx_queue_callback(unsigned long data)
+ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct xen_netif *netif = netdev_priv(dev);
++ struct xen_netbk *netbk;
+
+ BUG_ON(skb->dev != dev);
+
++ if (netif->group == -1)
++ goto drop;
+
-+ pciback_release_pci_dev(pdev, dev);
-+
-+ out:
-+ return err;
-+}
++ netbk = &xen_netbk[netif->group];
+
-+static int pciback_publish_pci_root(struct pciback_device *pdev,
-+ unsigned int domain, unsigned int bus)
-+{
-+ unsigned int d, b;
-+ int i, root_num, len, err;
-+ char str[64];
-+
-+ dev_dbg(&pdev->xdev->dev, "Publishing pci roots\n");
-+
-+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
-+ "root_num", "%d", &root_num);
-+ if (err == 0 || err == -ENOENT)
-+ root_num = 0;
-+ else if (err < 0)
-+ goto out;
-+
-+ /* Verify that we haven't already published this pci root */
-+ for (i = 0; i < root_num; i++) {
-+ len = snprintf(str, sizeof(str), "root-%d", i);
-+ if (unlikely(len >= (sizeof(str) - 1))) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
+ /* Drop the packet if the target domain has no receive buffers. */
+ if (unlikely(!netif_schedulable(netif) || netbk_queue_full(netif)))
+ goto drop;
+@@ -313,6 +322,7 @@ static u16 netbk_gop_frag(struct xen_netif *netif, struct netbk_rx_meta *meta,
+ struct gnttab_copy *copy_gop;
+ struct xen_netif_rx_request *req;
+ unsigned long old_mfn;
++ int group = netif_page_group(page);
+ int idx = netif_page_index(page);
+
+ old_mfn = virt_to_mfn(page_address(page));
+@@ -321,7 +331,8 @@ static u16 netbk_gop_frag(struct xen_netif *netif, struct netbk_rx_meta *meta,
+
+ copy_gop = npo->copy + npo->copy_prod++;
+ copy_gop->flags = GNTCOPY_dest_gref;
+- if (idx > -1) {
++ if (PageForeign(page)) {
++ struct xen_netbk *netbk = &xen_netbk[group];
+ struct pending_tx_info *src_pend = &netbk->pending_tx_info[idx];
+ copy_gop->source.domid = src_pend->netif->domid;
+ copy_gop->source.u.ref = src_pend->req.gref;
+@@ -422,9 +433,10 @@ static void netbk_add_frag_responses(struct xen_netif *netif, int status,
+ }
+ }
+
+-static void net_rx_action(unsigned long unused)
++static void net_rx_action(unsigned long data)
+ {
+ struct xen_netif *netif = NULL;
++ struct xen_netbk *netbk = (struct xen_netbk *)data;
+ s8 status;
+ u16 id, irq, flags;
+ struct xen_netif_rx_response *resp;
+@@ -584,13 +596,15 @@ static void net_rx_action(unsigned long unused)
+ tasklet_schedule(&netbk->net_rx_tasklet);
+ }
+
+-static void net_alarm(unsigned long unused)
++static void net_alarm(unsigned long data)
+ {
++ struct xen_netbk *netbk = (struct xen_netbk *)data;
+ tasklet_schedule(&netbk->net_rx_tasklet);
+ }
+
+-static void netbk_tx_pending_timeout(unsigned long unused)
++static void netbk_tx_pending_timeout(unsigned long data)
+ {
++ struct xen_netbk *netbk = (struct xen_netbk *)data;
+ tasklet_schedule(&netbk->net_tx_tasklet);
+ }
+
+@@ -607,6 +621,7 @@ static int __on_net_schedule_list(struct xen_netif *netif)
+
+ static void remove_from_net_schedule_list(struct xen_netif *netif)
+ {
++ struct xen_netbk *netbk = &xen_netbk[netif->group];
+ spin_lock_irq(&netbk->net_schedule_list_lock);
+ if (likely(__on_net_schedule_list(netif))) {
+ list_del_init(&netif->list);
+@@ -617,6 +632,7 @@ static void remove_from_net_schedule_list(struct xen_netif *netif)
+
+ static void add_to_net_schedule_list_tail(struct xen_netif *netif)
+ {
++ struct xen_netbk *netbk = &xen_netbk[netif->group];
+ if (__on_net_schedule_list(netif))
+ return;
+
+@@ -631,13 +647,14 @@ static void add_to_net_schedule_list_tail(struct xen_netif *netif)
+
+ void netif_schedule_work(struct xen_netif *netif)
+ {
++ struct xen_netbk *netbk = &xen_netbk[netif->group];
+ int more_to_do;
+
+ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
+
+ if (more_to_do) {
+ add_to_net_schedule_list_tail(netif);
+- maybe_schedule_tx_action();
++ maybe_schedule_tx_action(netbk);
+ }
+ }
+
+@@ -674,14 +691,15 @@ static void tx_credit_callback(unsigned long data)
+ netif_schedule_work(netif);
+ }
+
+-static inline int copy_pending_req(pending_ring_idx_t pending_idx)
++static inline int copy_pending_req(struct xen_netbk *netbk,
++ pending_ring_idx_t pending_idx)
+ {
+ return gnttab_copy_grant_page(
+ netbk->grant_tx_handle[pending_idx],
+ &netbk->mmap_pages[pending_idx]);
+ }
+
+-inline static void net_tx_action_dealloc(void)
++static inline void net_tx_action_dealloc(struct xen_netbk *netbk)
+ {
+ struct netbk_tx_pending_inuse *inuse, *n;
+ struct gnttab_unmap_grant_ref *gop;
+@@ -711,13 +729,13 @@ inline static void net_tx_action_dealloc(void)
+ pending_idx = netbk->dealloc_ring[pending_index(dc++)];
+ list_move_tail(&pending_inuse[pending_idx].list, &list);
+
+- pfn = idx_to_pfn(pending_idx);
++ pfn = idx_to_pfn(netbk, pending_idx);
+ /* Already unmapped? */
+ if (!phys_to_machine_mapping_valid(pfn))
+ continue;
+
+ gnttab_set_unmap_op(gop,
+- idx_to_kaddr(pending_idx),
++ idx_to_kaddr(netbk, pending_idx),
+ GNTMAP_host_map,
+ netbk->grant_tx_handle[pending_idx]);
+ gop++;
+@@ -740,7 +758,7 @@ inline static void net_tx_action_dealloc(void)
+
+ pending_tx_info[pending_idx].netif->nr_copied_skbs++;
+
+- switch (copy_pending_req(pending_idx)) {
++ switch (copy_pending_req(netbk, pending_idx)) {
+ case 0:
+ list_move_tail(&inuse->list, &list);
+ continue;
+@@ -843,7 +861,8 @@ static int netbk_count_requests(struct xen_netif *netif,
+ return frags;
+ }
+
+-static struct gnttab_map_grant_ref *netbk_get_requests(struct xen_netif *netif,
++static struct gnttab_map_grant_ref *netbk_get_requests(struct xen_netbk *netbk,
++ struct xen_netif *netif,
+ struct sk_buff *skb,
+ struct xen_netif_tx_request *txp,
+ struct gnttab_map_grant_ref *mop)
+@@ -864,7 +883,7 @@ static struct gnttab_map_grant_ref *netbk_get_requests(struct xen_netif *netif,
+ index = pending_index(netbk->pending_cons++);
+ pending_idx = netbk->pending_ring[index];
+
+- gnttab_set_map_op(mop++, idx_to_kaddr(pending_idx),
++ gnttab_set_map_op(mop++, idx_to_kaddr(netbk, pending_idx),
+ GNTMAP_host_map | GNTMAP_readonly,
+ txp->gref, netif->domid);
+
+@@ -877,8 +896,9 @@ static struct gnttab_map_grant_ref *netbk_get_requests(struct xen_netif *netif,
+ return mop;
+ }
+
+-static int netbk_tx_check_mop(struct sk_buff *skb,
+- struct gnttab_map_grant_ref **mopp)
++static int netbk_tx_check_mop(struct xen_netbk *netbk,
++ struct sk_buff *skb,
++ struct gnttab_map_grant_ref **mopp)
+ {
+ struct gnttab_map_grant_ref *mop = *mopp;
+ int pending_idx = *((u16 *)skb->data);
+@@ -900,7 +920,7 @@ static int netbk_tx_check_mop(struct sk_buff *skb,
+ netif_put(netif);
+ } else {
+ set_phys_to_machine(
+- __pa(idx_to_kaddr(pending_idx)) >> PAGE_SHIFT,
++ __pa(idx_to_kaddr(netbk, pending_idx)) >> PAGE_SHIFT,
+ FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
+ netbk->grant_tx_handle[pending_idx] = mop->handle;
+ }
+@@ -918,14 +938,14 @@ static int netbk_tx_check_mop(struct sk_buff *skb,
+ newerr = (++mop)->status;
+ if (likely(!newerr)) {
+ unsigned long addr;
+- addr = idx_to_kaddr(pending_idx);
++ addr = idx_to_kaddr(netbk, pending_idx);
+ set_phys_to_machine(
+ __pa(addr)>>PAGE_SHIFT,
+ FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
+ netbk->grant_tx_handle[pending_idx] = mop->handle;
+ /* Had a previous error? Invalidate this fragment. */
+ if (unlikely(err))
+- netif_idx_release(pending_idx);
++ netif_idx_release(netbk, pending_idx);
+ continue;
+ }
+
+@@ -942,10 +962,10 @@ static int netbk_tx_check_mop(struct sk_buff *skb,
+
+ /* First error: invalidate header and preceding fragments. */
+ pending_idx = *((u16 *)skb->data);
+- netif_idx_release(pending_idx);
++ netif_idx_release(netbk, pending_idx);
+ for (j = start; j < i; j++) {
+ pending_idx = (unsigned long)shinfo->frags[i].page;
+- netif_idx_release(pending_idx);
++ netif_idx_release(netbk, pending_idx);
+ }
+
+ /* Remember the error: invalidate all subsequent fragments. */
+@@ -956,7 +976,7 @@ static int netbk_tx_check_mop(struct sk_buff *skb,
+ return err;
+ }
+
+-static void netbk_fill_frags(struct sk_buff *skb)
++static void netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
+ {
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ int nr_frags = shinfo->nr_frags;
+@@ -974,7 +994,7 @@ static void netbk_fill_frags(struct sk_buff *skb)
+ &netbk->pending_inuse_head);
+
+ txp = &netbk->pending_tx_info[pending_idx].req;
+- frag->page = virt_to_page(idx_to_kaddr(pending_idx));
++ frag->page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
+ frag->size = txp->size;
+ frag->page_offset = txp->offset;
+
+@@ -1106,14 +1126,14 @@ static bool tx_credit_exceeded(struct xen_netif *netif, unsigned size)
+ return false;
+ }
+
+-static unsigned net_tx_build_mops(void)
++static unsigned net_tx_build_mops(struct xen_netbk *netbk)
+ {
+ struct gnttab_map_grant_ref *mop;
+ struct sk_buff *skb;
+ int ret;
+
+ mop = netbk->tx_map_ops;
+- while (((nr_pending_reqs() + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
++ while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
+ !list_empty(&netbk->net_schedule_list)) {
+ struct xen_netif *netif;
+ struct xen_netif_tx_request txreq;
+@@ -1215,7 +1235,7 @@ static unsigned net_tx_build_mops(void)
+ }
+ }
+
+- gnttab_set_map_op(mop, idx_to_kaddr(pending_idx),
++ gnttab_set_map_op(mop, idx_to_kaddr(netbk, pending_idx),
+ GNTMAP_host_map | GNTMAP_readonly,
+ txreq.gref, netif->domid);
+ mop++;
+@@ -1241,7 +1261,7 @@ static unsigned net_tx_build_mops(void)
+
+ netbk->pending_cons++;
+
+- mop = netbk_get_requests(netif, skb, txfrags, mop);
++ mop = netbk_get_requests(netbk, netif, skb, txfrags, mop);
+
+ netif->tx.req_cons = idx;
+ netif_schedule_work(netif);
+@@ -1253,7 +1273,7 @@ static unsigned net_tx_build_mops(void)
+ return mop - netbk->tx_map_ops;
+ }
+
+-static void net_tx_submit(void)
++static void net_tx_submit(struct xen_netbk *netbk)
+ {
+ struct gnttab_map_grant_ref *mop;
+ struct sk_buff *skb;
+@@ -1270,7 +1290,7 @@ static void net_tx_submit(void)
+ txp = &netbk->pending_tx_info[pending_idx].req;
+
+ /* Check the remap error code. */
+- if (unlikely(netbk_tx_check_mop(skb, &mop))) {
++ if (unlikely(netbk_tx_check_mop(netbk, skb, &mop))) {
+ DPRINTK("netback grant failed.\n");
+ skb_shinfo(skb)->nr_frags = 0;
+ kfree_skb(skb);
+@@ -1279,7 +1299,7 @@ static void net_tx_submit(void)
+
+ data_len = skb->len;
+ memcpy(skb->data,
+- (void *)(idx_to_kaddr(pending_idx)|txp->offset),
++ (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset),
+ data_len);
+ if (data_len < txp->size) {
+ /* Append the packet payload as a fragment. */
+@@ -1287,7 +1307,7 @@ static void net_tx_submit(void)
+ txp->size -= data_len;
+ } else {
+ /* Schedule a response immediately. */
+- netif_idx_release(pending_idx);
++ netif_idx_release(netbk, pending_idx);
+ }
+
+ if (txp->flags & NETTXF_csum_blank)
+@@ -1295,7 +1315,7 @@ static void net_tx_submit(void)
+ else if (txp->flags & NETTXF_data_validated)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+- netbk_fill_frags(skb);
++ netbk_fill_frags(netbk, skb);
+
+ /*
+ * If the initial fragment was < PKT_PROT_LEN then
+@@ -1344,15 +1364,16 @@ static void net_tx_submit(void)
+ }
+
+ /* Called after netfront has transmitted */
+-static void net_tx_action(unsigned long unused)
++static void net_tx_action(unsigned long data)
+ {
++ struct xen_netbk *netbk = (struct xen_netbk *)data;
+ unsigned nr_mops;
+ int ret;
+
+ if (netbk->dealloc_cons != netbk->dealloc_prod)
+- net_tx_action_dealloc();
++ net_tx_action_dealloc(netbk);
+
+- nr_mops = net_tx_build_mops();
++ nr_mops = net_tx_build_mops(netbk);
+
+ if (nr_mops == 0)
+ return;
+@@ -1361,10 +1382,10 @@ static void net_tx_action(unsigned long unused)
+ netbk->tx_map_ops, nr_mops);
+ BUG_ON(ret);
+
+- net_tx_submit();
++ net_tx_submit(netbk);
+ }
+
+-static void netif_idx_release(u16 pending_idx)
++static void netif_idx_release(struct xen_netbk *netbk, u16 pending_idx)
+ {
+ static DEFINE_SPINLOCK(_lock);
+ unsigned long flags;
+@@ -1383,19 +1404,28 @@ static void netif_idx_release(u16 pending_idx)
+
+ static void netif_page_release(struct page *page, unsigned int order)
+ {
++ int group = netif_page_group(page);
+ int idx = netif_page_index(page);
++ struct xen_netbk *netbk = &xen_netbk[group];
+ BUG_ON(order);
++ BUG_ON(group < 0 || group >= xen_netbk_group_nr);
+ BUG_ON(idx < 0 || idx >= MAX_PENDING_REQS);
+ BUG_ON(netbk->mmap_pages[idx] != page);
+- netif_idx_release(idx);
++ netif_idx_release(netbk, idx);
+ }
+
+ irqreturn_t netif_be_int(int irq, void *dev_id)
+ {
+ struct xen_netif *netif = dev_id;
++ struct xen_netbk *netbk;
+
-+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
-+ str, "%x:%x", &d, &b);
-+ if (err < 0)
-+ goto out;
-+ if (err != 2) {
-+ err = -EINVAL;
-+ goto out;
-+ }
++ if (netif->group == -1)
++ return IRQ_NONE;
+
-+ if (d == domain && b == bus) {
-+ err = 0;
-+ goto out;
++ netbk = &xen_netbk[netif->group];
+
+ add_to_net_schedule_list_tail(netif);
+- maybe_schedule_tx_action();
++ maybe_schedule_tx_action(netbk);
+
+ if (netif_schedulable(netif) && !netbk_queue_full(netif))
+ netif_wake_queue(netif->dev);
+@@ -1453,28 +1483,40 @@ static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
+ struct list_head *ent;
+ struct xen_netif *netif;
+ int i = 0;
++ int group = 0;
+
+ printk(KERN_ALERT "netif_schedule_list:\n");
+- spin_lock_irq(&netbk->net_schedule_list_lock);
+
+- list_for_each(ent, &netbk->net_schedule_list) {
+- netif = list_entry(ent, struct xen_netif, list);
+- printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
+- "rx_resp_prod=%08x\n",
+- i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
+- printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
+- netif->tx.req_cons, netif->tx.rsp_prod_pvt);
+- printk(KERN_ALERT " shared(rx_req_prod=%08x "
+- "rx_resp_prod=%08x\n",
+- netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
+- printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
+- netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
+- printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
+- netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
+- i++;
++ for (group = 0; group < xen_netbk_group_nr; group++) {
++ struct xen_netbk *netbk = &xen_netbk[group];
++ spin_lock_irq(&netbk->net_schedule_list_lock);
++ printk(KERN_ALERT "xen_netback group number: %d\n", group);
++ list_for_each(ent, &netbk->net_schedule_list) {
++ netif = list_entry(ent, struct xen_netif, list);
++ printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
++ "rx_resp_prod=%08x\n",
++ i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
++ printk(KERN_ALERT
++ " tx_req_cons=%08x, tx_resp_prod=%08x)\n",
++ netif->tx.req_cons, netif->tx.rsp_prod_pvt);
++ printk(KERN_ALERT
++ " shared(rx_req_prod=%08x "
++ "rx_resp_prod=%08x\n",
++ netif->rx.sring->req_prod,
++ netif->rx.sring->rsp_prod);
++ printk(KERN_ALERT
++ " rx_event=%08x, tx_req_prod=%08x\n",
++ netif->rx.sring->rsp_event,
++ netif->tx.sring->req_prod);
++ printk(KERN_ALERT
++ " tx_resp_prod=%08x, tx_event=%08x)\n",
++ netif->tx.sring->rsp_prod,
++ netif->tx.sring->rsp_event);
++ i++;
+ }
-+ }
-+
-+ len = snprintf(str, sizeof(str), "root-%d", root_num);
-+ if (unlikely(len >= (sizeof(str) - 1))) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ dev_dbg(&pdev->xdev->dev, "writing root %d at %04x:%02x\n",
-+ root_num, domain, bus);
-+
-+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
-+ "%04x:%02x", domain, bus);
-+ if (err)
-+ goto out;
++ spin_unlock_irq(&netbk->net_schedule_list_lock);
+ }
+
+- spin_unlock_irq(&netbk->net_schedule_list_lock);
+ printk(KERN_ALERT " ** End of netif_schedule_list **\n");
+
+ return IRQ_HANDLED;
+@@ -1486,12 +1528,15 @@ static int __init netback_init(void)
+ int i;
+ struct page *page;
+ int rc = 0;
++ int group;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+- netbk = (struct xen_netbk *)vmalloc(sizeof(struct xen_netbk));
+- if (!netbk) {
++ xen_netbk_group_nr = num_online_cpus();
++ xen_netbk = (struct xen_netbk *)vmalloc(sizeof(struct xen_netbk) *
++ xen_netbk_group_nr);
++ if (!xen_netbk) {
+ printk(KERN_ALERT "%s: out of memory\n", __func__);
+ return -ENOMEM;
+ }
+@@ -1499,44 +1544,54 @@ static int __init netback_init(void)
+ /* We can increase reservation by this much in net_rx_action(). */
+ // balloon_update_driver_allowance(NET_RX_RING_SIZE);
+
+- skb_queue_head_init(&netbk->rx_queue);
+- skb_queue_head_init(&netbk->tx_queue);
+-
+- init_timer(&netbk->net_timer);
+- netbk->net_timer.data = 0;
+- netbk->net_timer.function = net_alarm;
+-
+- init_timer(&netbk->netbk_tx_pending_timer);
+- netbk->netbk_tx_pending_timer.data = 0;
+- netbk->netbk_tx_pending_timer.function = netbk_tx_pending_timeout;
++ for (group = 0; group < xen_netbk_group_nr; group++) {
++ struct xen_netbk *netbk = &xen_netbk[group];
++ skb_queue_head_init(&netbk->rx_queue);
++ skb_queue_head_init(&netbk->tx_queue);
+
-+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename,
-+ "root_num", "%d", (root_num + 1));
++ init_timer(&netbk->net_timer);
++ netbk->net_timer.data = (unsigned long)netbk;
++ netbk->net_timer.function = net_alarm;
+
-+ out:
-+ return err;
-+}
++ init_timer(&netbk->netbk_tx_pending_timer);
++ netbk->netbk_tx_pending_timer.data = (unsigned long)netbk;
++ netbk->netbk_tx_pending_timer.function =
++ netbk_tx_pending_timeout;
+
-+static int pciback_reconfigure(struct pciback_device *pdev)
-+{
-+ int err = 0;
-+ int num_devs;
-+ int domain, bus, slot, func;
-+ int substate;
-+ int i, len;
-+ char state_str[64];
-+ char dev_str[64];
-+
-+ spin_lock(&pdev->dev_lock);
-+
-+ dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
-+
-+ /* Make sure we only reconfigure once */
-+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
-+ XenbusStateReconfiguring)
-+ goto out;
-+
-+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
-+ &num_devs);
-+ if (err != 1) {
-+ if (err >= 0)
-+ err = -EINVAL;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error reading number of devices");
-+ goto out;
-+ }
-+
-+ for (i = 0; i < num_devs; i++) {
-+ len = snprintf(state_str, sizeof(state_str), "state-%d", i);
-+ if (unlikely(len >= (sizeof(state_str) - 1))) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "String overflow while reading "
-+ "configuration");
-+ goto out;
++ netbk->mmap_pages =
++ alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
++ if (!netbk->mmap_pages) {
++ printk(KERN_ALERT "%s: out of memory\n", __func__);
++ del_timer(&netbk->netbk_tx_pending_timer);
++ del_timer(&netbk->net_timer);
++ rc = -ENOMEM;
++ goto failed_init;
+ }
-+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, state_str,
-+ "%d", &substate);
-+ if (err != 1)
-+ substate = XenbusStateUnknown;
-+
-+ switch (substate) {
-+ case XenbusStateInitialising:
-+ dev_dbg(&pdev->xdev->dev, "Attaching dev-%d ...\n", i);
-+
-+ len = snprintf(dev_str, sizeof(dev_str), "dev-%d", i);
-+ if (unlikely(len >= (sizeof(dev_str) - 1))) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "String overflow while "
-+ "reading configuration");
-+ goto out;
-+ }
-+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
-+ dev_str, "%x:%x:%x.%x",
-+ &domain, &bus, &slot, &func);
-+ if (err < 0) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error reading device "
-+ "configuration");
-+ goto out;
-+ }
-+ if (err != 4) {
-+ err = -EINVAL;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error parsing pci device "
-+ "configuration");
-+ goto out;
-+ }
-+
-+ err = pciback_export_device(pdev, domain, bus, slot,
-+ func, i);
-+ if (err)
-+ goto out;
-+
-+ /* Publish pci roots. */
-+ err = pciback_publish_pci_roots(pdev, pciback_publish_pci_root);
-+ if (err) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error while publish PCI root"
-+ "buses for frontend");
-+ goto out;
-+ }
-+
-+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename,
-+ state_str, "%d",
-+ XenbusStateInitialised);
-+ if (err) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error switching substate of "
-+ "dev-%d\n", i);
-+ goto out;
-+ }
-+ break;
-+
-+ case XenbusStateClosing:
-+ dev_dbg(&pdev->xdev->dev, "Detaching dev-%d ...\n", i);
-+
-+ len = snprintf(dev_str, sizeof(dev_str), "vdev-%d", i);
-+ if (unlikely(len >= (sizeof(dev_str) - 1))) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "String overflow while "
-+ "reading configuration");
-+ goto out;
-+ }
-+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
-+ dev_str, "%x:%x:%x.%x",
-+ &domain, &bus, &slot, &func);
-+ if (err < 0) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error reading device "
-+ "configuration");
-+ goto out;
-+ }
-+ if (err != 4) {
-+ err = -EINVAL;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error parsing pci device "
-+ "configuration");
-+ goto out;
-+ }
-+
-+ err = pciback_remove_device(pdev, domain, bus, slot,
-+ func);
-+ if(err)
-+ goto out;
-+
-+ /* TODO: If at some point we implement support for pci
-+ * root hot-remove on pcifront side, we'll need to
-+ * remove unnecessary xenstore nodes of pci roots here.
-+ */
-+
-+ break;
-+
-+ default:
-+ break;
+
+- netbk->mmap_pages =
+- alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
+- if (!netbk->mmap_pages) {
+- printk(KERN_ALERT "%s: out of memory\n", __func__);
+- rc = -ENOMEM;
+- goto failed_init2;
+- }
++ for (i = 0; i < MAX_PENDING_REQS; i++) {
++ page = netbk->mmap_pages[i];
++ SetPageForeign(page, netif_page_release);
++ netif_set_page_ext(page, group, i);
++ INIT_LIST_HEAD(&netbk->pending_inuse[i].list);
+ }
+
+- for (i = 0; i < MAX_PENDING_REQS; i++) {
+- page = netbk->mmap_pages[i];
+- SetPageForeign(page, netif_page_release);
+- netif_set_page_ext(page, 0, i);
+- INIT_LIST_HEAD(&netbk->pending_inuse[i].list);
+- }
++ netbk->pending_cons = 0;
++ netbk->pending_prod = MAX_PENDING_REQS;
++ for (i = 0; i < MAX_PENDING_REQS; i++)
++ netbk->pending_ring[i] = i;
+
+- netbk->pending_cons = 0;
+- netbk->pending_prod = MAX_PENDING_REQS;
+- for (i = 0; i < MAX_PENDING_REQS; i++)
+- netbk->pending_ring[i] = i;
++ tasklet_init(&netbk->net_tx_tasklet, net_tx_action,
++ (unsigned long)netbk);
++ tasklet_init(&netbk->net_rx_tasklet, net_rx_action,
++ (unsigned long)netbk);
+
+- tasklet_init(&netbk->net_tx_tasklet, net_tx_action, 0);
+- tasklet_init(&netbk->net_rx_tasklet, net_rx_action, 0);
++ INIT_LIST_HEAD(&netbk->pending_inuse_head);
++ INIT_LIST_HEAD(&netbk->net_schedule_list);
+
+- INIT_LIST_HEAD(&netbk->pending_inuse_head);
+- INIT_LIST_HEAD(&netbk->net_schedule_list);
++ spin_lock_init(&netbk->net_schedule_list_lock);
+
+- spin_lock_init(&netbk->net_schedule_list_lock);
++ atomic_set(&netbk->netfront_count, 0);
+ }
-+
-+ err = xenbus_switch_state(pdev->xdev, XenbusStateReconfigured);
-+ if (err) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error switching to reconfigured state!");
-+ goto out;
+
+ netbk_copy_skb_mode = NETBK_DONT_COPY_SKB;
+ if (MODPARM_copy_skb) {
+@@ -1551,25 +1606,28 @@ static int __init netback_init(void)
+
+ rc = netif_xenbus_init();
+ if (rc)
+- goto failed_init1;
++ goto failed_init;
+
+ #ifdef NETBE_DEBUG_INTERRUPT
+ (void)bind_virq_to_irqhandler(VIRQ_DEBUG,
+ 0,
+ netif_be_dbg,
+- SA_SHIRQ,
++ IRQF_SHARED,
+ "net-be-dbg",
+ &netif_be_dbg);
+ #endif
+
+ return 0;
+
+-failed_init1:
+- free_empty_pages_and_pagevec(netbk->mmap_pages, MAX_PENDING_REQS);
+-failed_init2:
+- del_timer(&netbk->netbk_tx_pending_timer);
+- del_timer(&netbk->net_timer);
+- vfree(netbk);
++failed_init:
++ for (i = 0; i < group; i++) {
++ struct xen_netbk *netbk = &xen_netbk[i];
++ free_empty_pages_and_pagevec(netbk->mmap_pages,
++ MAX_PENDING_REQS);
++ del_timer(&netbk->netbk_tx_pending_timer);
++ del_timer(&netbk->net_timer);
+ }
-+
-+ out:
-+ spin_unlock(&pdev->dev_lock);
++ vfree(xen_netbk);
+ return rc;
+
+ }
+--
+1.7.4
+
+
+From e7317b70c0436c109b605bb377939cb2eaff6a6f Mon Sep 17 00:00:00 2001
+From: Dongxiao Xu <dongxiao.xu(a)intel.com>
+Date: Wed, 19 May 2010 17:08:22 -0700
+Subject: [PATCH 040/203] xen: netback: Use Kernel thread to replace the tasklet.
+
+Kernel thread has more control over QoS, and could improve dom0's
+userspace responseness. This option is defaultly off currently.
+
+Signed-off-by: Dongxiao Xu <dongxiao.xu(a)intel.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/common.h | 13 ++++-
+ drivers/xen/netback/netback.c | 109 ++++++++++++++++++++++++++++++++++++----
+ 2 files changed, 109 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index 847ba58..36cb2b9 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -257,8 +257,17 @@ union page_ext {
+ };
+
+ struct xen_netbk {
+- struct tasklet_struct net_tx_tasklet;
+- struct tasklet_struct net_rx_tasklet;
++ union {
++ struct {
++ struct tasklet_struct net_tx_tasklet;
++ struct tasklet_struct net_rx_tasklet;
++ } tasklet;
+
-+ return 0;
-+}
++ struct {
++ wait_queue_head_t netbk_action_wq;
++ struct task_struct *task;
++ } kthread;
++ };
+
+ struct sk_buff_head rx_queue;
+ struct sk_buff_head tx_queue;
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index feefb14..547dcaa 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -38,6 +38,7 @@
+
+ #include <linux/tcp.h>
+ #include <linux/udp.h>
++#include <linux/kthread.h>
+
+ #include <xen/balloon.h>
+ #include <xen/events.h>
+@@ -128,12 +129,31 @@ MODULE_PARM_DESC(copy_skb, "Copy data received from netfront without netloop");
+
+ int netbk_copy_skb_mode;
+
++static int MODPARM_netback_kthread;
++module_param_named(netback_kthread, MODPARM_netback_kthread, bool, 0);
++MODULE_PARM_DESC(netback_kthread, "Use kernel thread to replace tasklet");
+
-+static void pciback_frontend_changed(struct xenbus_device *xdev,
-+ enum xenbus_state fe_state)
++/*
++ * Netback bottom half handler.
++ * dir indicates the data direction.
++ * rx: 1, tx: 0.
++ */
++static inline void xen_netbk_bh_handler(struct xen_netbk *netbk, int dir)
+{
-+ struct pciback_device *pdev = xdev->dev.driver_data;
-+
-+ dev_dbg(&xdev->dev, "fe state changed %d\n", fe_state);
-+
-+ switch (fe_state) {
-+ case XenbusStateInitialised:
-+ pciback_attach(pdev);
-+ break;
++ if (MODPARM_netback_kthread)
++ wake_up(&netbk->kthread.netbk_action_wq);
++ else if (dir)
++ tasklet_schedule(&netbk->tasklet.net_rx_tasklet);
++ else
++ tasklet_schedule(&netbk->tasklet.net_tx_tasklet);
++}
+
-+ case XenbusStateReconfiguring:
-+ pciback_reconfigure(pdev);
-+ break;
+ static inline void maybe_schedule_tx_action(struct xen_netbk *netbk)
+ {
+ smp_mb();
+ if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
+ !list_empty(&netbk->net_schedule_list))
+- tasklet_schedule(&netbk->net_tx_tasklet);
++ xen_netbk_bh_handler(netbk, 0);
+ }
+
+ static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
+@@ -289,7 +309,8 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ }
+ }
+ skb_queue_tail(&netbk->rx_queue, skb);
+- tasklet_schedule(&netbk->net_rx_tasklet);
+
-+ case XenbusStateConnected:
-+ /* pcifront switched its state from reconfiguring to connected.
-+ * Then switch to connected state.
-+ */
-+ xenbus_switch_state(xdev, XenbusStateConnected);
-+ break;
++ xen_netbk_bh_handler(netbk, 1);
+
+ return 0;
+
+@@ -593,19 +614,19 @@ static void net_rx_action(unsigned long data)
+ /* More work to do? */
+ if (!skb_queue_empty(&netbk->rx_queue) &&
+ !timer_pending(&netbk->net_timer))
+- tasklet_schedule(&netbk->net_rx_tasklet);
++ xen_netbk_bh_handler(netbk, 1);
+ }
+
+ static void net_alarm(unsigned long data)
+ {
+ struct xen_netbk *netbk = (struct xen_netbk *)data;
+- tasklet_schedule(&netbk->net_rx_tasklet);
++ xen_netbk_bh_handler(netbk, 1);
+ }
+
+ static void netbk_tx_pending_timeout(unsigned long data)
+ {
+ struct xen_netbk *netbk = (struct xen_netbk *)data;
+- tasklet_schedule(&netbk->net_tx_tasklet);
++ xen_netbk_bh_handler(netbk, 0);
+ }
+
+ struct net_device_stats *netif_be_get_stats(struct net_device *dev)
+@@ -1348,7 +1369,7 @@ static void net_tx_submit(struct xen_netbk *netbk)
+ continue;
+ }
+
+- netif_rx(skb);
++ netif_rx_ni(skb);
+ netif->dev->last_rx = jiffies;
+ }
+
+@@ -1399,7 +1420,7 @@ static void netif_idx_release(struct xen_netbk *netbk, u16 pending_idx)
+ netbk->dealloc_prod++;
+ spin_unlock_irqrestore(&_lock, flags);
+
+- tasklet_schedule(&netbk->net_tx_tasklet);
++ xen_netbk_bh_handler(netbk, 0);
+ }
+
+ static void netif_page_release(struct page *page, unsigned int order)
+@@ -1523,6 +1544,46 @@ static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
+ }
+ #endif
+
++static inline int rx_work_todo(struct xen_netbk *netbk)
++{
++ return !skb_queue_empty(&netbk->rx_queue);
++}
+
-+ case XenbusStateClosing:
-+ pciback_disconnect(pdev);
-+ xenbus_switch_state(xdev, XenbusStateClosing);
-+ break;
++static inline int tx_work_todo(struct xen_netbk *netbk)
++{
++ if (netbk->dealloc_cons != netbk->dealloc_prod)
++ return 1;
+
-+ case XenbusStateClosed:
-+ pciback_disconnect(pdev);
-+ xenbus_switch_state(xdev, XenbusStateClosed);
-+ if (xenbus_dev_is_online(xdev))
-+ break;
-+ /* fall through if not online */
-+ case XenbusStateUnknown:
-+ dev_dbg(&xdev->dev, "frontend is gone! unregister device\n");
-+ device_unregister(&xdev->dev);
-+ break;
++ if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
++ !list_empty(&netbk->net_schedule_list))
++ return 1;
+
-+ default:
-+ break;
-+ }
++ return 0;
+}
+
-+static int pciback_setup_backend(struct pciback_device *pdev)
++static int netbk_action_thread(void *data)
+{
-+ /* Get configuration from xend (if available now) */
-+ int domain, bus, slot, func;
-+ int err = 0;
-+ int i, num_devs;
-+ char dev_str[64];
-+ char state_str[64];
-+
-+ spin_lock(&pdev->dev_lock);
++ struct xen_netbk *netbk = (struct xen_netbk *)data;
++ while (!kthread_should_stop()) {
++ wait_event_interruptible(netbk->kthread.netbk_action_wq,
++ rx_work_todo(netbk)
++ || tx_work_todo(netbk)
++ || kthread_should_stop());
++ cond_resched();
+
-+ /* It's possible we could get the call to setup twice, so make sure
-+ * we're not already connected.
-+ */
-+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
-+ XenbusStateInitWait)
-+ goto out;
++ if (kthread_should_stop())
++ break;
+
-+ dev_dbg(&pdev->xdev->dev, "getting be setup\n");
++ if (rx_work_todo(netbk))
++ net_rx_action((unsigned long)netbk);
+
-+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
-+ &num_devs);
-+ if (err != 1) {
-+ if (err >= 0)
-+ err = -EINVAL;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error reading number of devices");
-+ goto out;
++ if (tx_work_todo(netbk))
++ net_tx_action((unsigned long)netbk);
+ }
+
-+ for (i = 0; i < num_devs; i++) {
-+ int l = snprintf(dev_str, sizeof(dev_str), "dev-%d", i);
-+ if (unlikely(l >= (sizeof(dev_str) - 1))) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "String overflow while reading "
-+ "configuration");
-+ goto out;
-+ }
-+
-+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, dev_str,
-+ "%x:%x:%x.%x", &domain, &bus, &slot, &func);
-+ if (err < 0) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error reading device configuration");
-+ goto out;
-+ }
-+ if (err != 4) {
-+ err = -EINVAL;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error parsing pci device "
-+ "configuration");
-+ goto out;
-+ }
++ return 0;
++}
+
-+ err = pciback_export_device(pdev, domain, bus, slot, func, i);
-+ if (err)
-+ goto out;
+ static int __init netback_init(void)
+ {
+ int i;
+@@ -1580,10 +1641,34 @@ static int __init netback_init(void)
+ for (i = 0; i < MAX_PENDING_REQS; i++)
+ netbk->pending_ring[i] = i;
+
+- tasklet_init(&netbk->net_tx_tasklet, net_tx_action,
+- (unsigned long)netbk);
+- tasklet_init(&netbk->net_rx_tasklet, net_rx_action,
+- (unsigned long)netbk);
++ if (MODPARM_netback_kthread) {
++ init_waitqueue_head(&netbk->kthread.netbk_action_wq);
++ netbk->kthread.task =
++ kthread_create(netbk_action_thread,
++ (void *)netbk,
++ "netback/%u", group);
+
-+ /* Switch substate of this device. */
-+ l = snprintf(state_str, sizeof(state_str), "state-%d", i);
-+ if (unlikely(l >= (sizeof(state_str) - 1))) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "String overflow while reading "
-+ "configuration");
-+ goto out;
++ if (!IS_ERR(netbk->kthread.task)) {
++ kthread_bind(netbk->kthread.task, group);
++ wake_up_process(netbk->kthread.task);
++ } else {
++ printk(KERN_ALERT
++ "kthread_run() fails at netback\n");
++ free_empty_pages_and_pagevec(netbk->mmap_pages,
++ MAX_PENDING_REQS);
++ del_timer(&netbk->netbk_tx_pending_timer);
++ del_timer(&netbk->net_timer);
++ rc = PTR_ERR(netbk->kthread.task);
++ goto failed_init;
++ }
++ } else {
++ tasklet_init(&netbk->tasklet.net_tx_tasklet,
++ net_tx_action,
++ (unsigned long)netbk);
++ tasklet_init(&netbk->tasklet.net_rx_tasklet,
++ net_rx_action,
++ (unsigned long)netbk);
+ }
-+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, state_str,
-+ "%d", XenbusStateInitialised);
-+ if (err) {
-+ xenbus_dev_fatal(pdev->xdev, err, "Error switching "
-+ "substate of dev-%d\n", i);
-+ goto out;
-+ }
-+ }
-+
-+ err = pciback_publish_pci_roots(pdev, pciback_publish_pci_root);
-+ if (err) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error while publish PCI root buses "
-+ "for frontend");
-+ goto out;
-+ }
-+
-+ err = xenbus_switch_state(pdev->xdev, XenbusStateInitialised);
-+ if (err)
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error switching to initialised state!");
-+
-+ out:
-+ spin_unlock(&pdev->dev_lock);
-+
-+ if (!err)
-+ /* see if pcifront is already configured (if not, we'll wait) */
-+ pciback_attach(pdev);
-+
-+ return err;
-+}
-+
-+static void pciback_be_watch(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ struct pciback_device *pdev =
-+ container_of(watch, struct pciback_device, be_watch);
-+
-+ switch (xenbus_read_driver_state(pdev->xdev->nodename)) {
-+ case XenbusStateInitWait:
-+ pciback_setup_backend(pdev);
-+ break;
-+
-+ default:
-+ break;
-+ }
-+}
-+
-+static int pciback_xenbus_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ int err = 0;
-+ struct pciback_device *pdev = alloc_pdev(dev);
-+
-+ if (pdev == NULL) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(dev, err,
-+ "Error allocating pciback_device struct");
-+ goto out;
-+ }
-+
-+ /* wait for xend to configure us */
-+ err = xenbus_switch_state(dev, XenbusStateInitWait);
-+ if (err)
-+ goto out;
-+
-+ /* watch the backend node for backend configuration information */
-+ err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch,
-+ pciback_be_watch);
-+ if (err)
-+ goto out;
-+ pdev->be_watching = 1;
-+
-+ /* We need to force a call to our callback here in case
-+ * xend already configured us!
-+ */
-+ pciback_be_watch(&pdev->be_watch, NULL, 0);
-+
-+ out:
-+ return err;
-+}
-+
-+static int pciback_xenbus_remove(struct xenbus_device *dev)
-+{
-+ struct pciback_device *pdev = dev->dev.driver_data;
-+
-+ if (pdev != NULL)
-+ free_pdev(pdev);
-+
-+ return 0;
-+}
-+
-+static const struct xenbus_device_id xenpci_ids[] = {
-+ {"pci"},
-+ {{0}},
-+};
-+
-+static struct xenbus_driver xenbus_pciback_driver = {
-+ .name = "pciback",
-+ .owner = THIS_MODULE,
-+ .ids = xenpci_ids,
-+ .probe = pciback_xenbus_probe,
-+ .remove = pciback_xenbus_remove,
-+ .otherend_changed = pciback_frontend_changed,
-+};
-+
-+int __init pciback_xenbus_register(void)
-+{
-+ if (!is_running_on_xen())
-+ return -ENODEV;
-+ pciback_wq = create_workqueue("pciback_workqueue");
-+ if (!pciback_wq) {
-+ printk(KERN_ERR "pciback_xenbus_register: create"
-+ "pciback_workqueue failed\n");
-+ return -EFAULT;
-+ }
-+ return xenbus_register_backend(&xenbus_pciback_driver);
-+}
-+
-+void __exit pciback_xenbus_unregister(void)
-+{
-+ destroy_workqueue(pciback_wq);
-+ xenbus_unregister_driver(&xenbus_pciback_driver);
-+}
---
-1.7.4
-
-
-From cf2a64556286b762ce6a3a9b408ba7ecdcaea03a Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Tue, 13 Oct 2009 17:22:22 -0400
-Subject: [PATCH 008/244] xen-pciback: Fix include header name change (evtchn.h is now events.h)
-
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
----
- drivers/xen/pciback/pci_stub.c | 2 +-
- drivers/xen/pciback/pciback_ops.c | 2 +-
- drivers/xen/pciback/xenbus.c | 2 +-
- 3 files changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/drivers/xen/pciback/pci_stub.c b/drivers/xen/pciback/pci_stub.c
-index c481a73..c02f21f 100644
---- a/drivers/xen/pciback/pci_stub.c
-+++ b/drivers/xen/pciback/pci_stub.c
-@@ -13,7 +13,7 @@
- #include <linux/pci.h>
- #include <linux/wait.h>
- #include <asm/atomic.h>
--#include <xen/evtchn.h>
-+#include <xen/events.h>
- #include "pciback.h"
- #include "conf_space.h"
- #include "conf_space_quirks.h"
-diff --git a/drivers/xen/pciback/pciback_ops.c b/drivers/xen/pciback/pciback_ops.c
-index b85b2db..58d09eb 100644
---- a/drivers/xen/pciback/pciback_ops.c
-+++ b/drivers/xen/pciback/pciback_ops.c
-@@ -6,7 +6,7 @@
- #include <linux/module.h>
- #include <linux/wait.h>
- #include <asm/bitops.h>
--#include <xen/evtchn.h>
-+#include <xen/events.h>
- #include "pciback.h"
-
- int verbose_request = 0;
-diff --git a/drivers/xen/pciback/xenbus.c b/drivers/xen/pciback/xenbus.c
-index 4d56c45..bbca3fe 100644
---- a/drivers/xen/pciback/xenbus.c
-+++ b/drivers/xen/pciback/xenbus.c
-@@ -8,7 +8,7 @@
- #include <linux/list.h>
- #include <linux/vmalloc.h>
- #include <xen/xenbus.h>
--#include <xen/evtchn.h>
-+#include <xen/events.h>
- #include "pciback.h"
- #define INVALID_EVTCHN_IRQ (-1)
+ INIT_LIST_HEAD(&netbk->pending_inuse_head);
+ INIT_LIST_HEAD(&netbk->net_schedule_list);
+@@ -1626,6 +1711,8 @@ failed_init:
+ MAX_PENDING_REQS);
+ del_timer(&netbk->netbk_tx_pending_timer);
+ del_timer(&netbk->net_timer);
++ if (MODPARM_netback_kthread)
++ kthread_stop(netbk->kthread.task);
+ }
+ vfree(xen_netbk);
+ return rc;
--
1.7.4
-From f6222ae41f2fee3f67983f833ee8dcba2c7a1362 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Tue, 13 Oct 2009 17:22:24 -0400
-Subject: [PATCH 009/244] xen-pciback: Use pci_is_enabled() instead of is_enabled.
+From 6359d5939c5d1f59b794cd02e8cdbd36b9f3434d Mon Sep 17 00:00:00 2001
+From: James Harper <james.harper(a)bendigoit.com.au>
+Date: Fri, 28 May 2010 23:12:56 -0700
+Subject: [PATCH 041/203] xen: netback: avoid null-pointer access in netback_uevent
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
+Check if drvdata has been set up yet and return if it hasn't.
+
+Signed-off-by: James Harper <james.harper(a)bendigoit.com.au>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
---
- drivers/xen/pciback/conf_space_header.c | 4 ++--
- drivers/xen/pciback/pciback_ops.c | 1 -
- 2 files changed, 2 insertions(+), 3 deletions(-)
+ drivers/xen/netback/xenbus.c | 9 +++++++--
+ 1 files changed, 7 insertions(+), 2 deletions(-)
-diff --git a/drivers/xen/pciback/conf_space_header.c b/drivers/xen/pciback/conf_space_header.c
-index f794e12..5a9e028 100644
---- a/drivers/xen/pciback/conf_space_header.c
-+++ b/drivers/xen/pciback/conf_space_header.c
-@@ -22,14 +22,14 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
+diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
+index fcd3c34..e30b0c7 100644
+--- a/drivers/xen/netback/xenbus.c
++++ b/drivers/xen/netback/xenbus.c
+@@ -154,12 +154,17 @@ fail:
+ */
+ static int netback_uevent(struct xenbus_device *xdev, struct kobj_uevent_env *env)
{
- int err;
-
-- if (!dev->is_enabled && is_enable_cmd(value)) {
-+ if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
- if (unlikely(verbose_request))
- printk(KERN_DEBUG "pciback: %s: enable\n",
- pci_name(dev));
- err = pci_enable_device(dev);
- if (err)
- return err;
-- } else if (dev->is_enabled && !is_enable_cmd(value)) {
-+ } else if (pci_is_enabled(dev) && !is_enable_cmd(value)) {
- if (unlikely(verbose_request))
- printk(KERN_DEBUG "pciback: %s: disable\n",
- pci_name(dev));
-diff --git a/drivers/xen/pciback/pciback_ops.c b/drivers/xen/pciback/pciback_ops.c
-index 58d09eb..2d570e7 100644
---- a/drivers/xen/pciback/pciback_ops.c
-+++ b/drivers/xen/pciback/pciback_ops.c
-@@ -26,7 +26,6 @@ void pciback_reset_device(struct pci_dev *dev)
+- struct backend_info *be = dev_get_drvdata(&xdev->dev);
+- struct xen_netif *netif = be->netif;
++ struct backend_info *be;
++ struct xen_netif *netif;
+ char *val;
- pci_write_config_word(dev, PCI_COMMAND, 0);
+ DPRINTK("netback_uevent");
-- dev->is_enabled = 0;
- dev->is_busmaster = 0;
- } else {
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
++ be = dev_get_drvdata(&xdev->dev);
++ if (!be)
++ return 0;
++ netif = be->netif;
++
+ val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
+ if (IS_ERR(val)) {
+ int err = PTR_ERR(val);
--
1.7.4
-From 0d379d03a3284e4b4d890b7e1b8163d485cc72e6 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Tue, 13 Oct 2009 17:22:25 -0400
-Subject: [PATCH 010/244] xen-pciback: Fix usage of INIT_WORK.
+From 4a818daa044d9d499412e8f6e2e3086c0521e7b3 Mon Sep 17 00:00:00 2001
+From: Keir Fraser <keir.fraser(a)citrix.com>
+Date: Fri, 11 Jun 2010 11:48:30 +0100
+Subject: [PATCH 042/203] xen: netback: Fixes for delayed copy of tx network packets.
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+ - Should call net_tx_action_dealloc() even when dealloc ring is
+ empty, as there may in any case be work to do on the
+ pending_inuse list.
+ - Should not exit directly from the middle of the tx_action tasklet,
+ as the tx_pending_timer should always be checked and updated at the
+ end of the tasklet.
+
+Signed-off-by: Keir Fraser <keir.fraser(a)citrix.com>
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+[picked from linux-2.6.18-xen.hg 959:1a97bd686258, ported across a43e2175 "xen/netback: move code around"]
---
- drivers/xen/pciback/pciback.h | 4 ++--
- drivers/xen/pciback/pciback_ops.c | 7 ++++---
- drivers/xen/pciback/xenbus.c | 3 ++-
- 3 files changed, 8 insertions(+), 6 deletions(-)
+ drivers/xen/netback/netback.c | 25 ++++++++++++-------------
+ 1 files changed, 12 insertions(+), 13 deletions(-)
-diff --git a/drivers/xen/pciback/pciback.h b/drivers/xen/pciback/pciback.h
-index 6744f45..4fb8c05 100644
---- a/drivers/xen/pciback/pciback.h
-+++ b/drivers/xen/pciback/pciback.h
-@@ -99,8 +99,8 @@ int pciback_publish_pci_roots(struct pciback_device *pdev,
- void pciback_release_devices(struct pciback_device *pdev);
-
- /* Handles events from front-end */
--irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs);
--void pciback_do_op(void *data);
-+irqreturn_t pciback_handle_event(int irq, void *dev_id);
-+void pciback_do_op(struct work_struct *data);
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 547dcaa..58dfbd2 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -1372,16 +1372,6 @@ static void net_tx_submit(struct xen_netbk *netbk)
+ netif_rx_ni(skb);
+ netif->dev->last_rx = jiffies;
+ }
+-
+- if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB &&
+- !list_empty(&netbk->pending_inuse_head)) {
+- struct netbk_tx_pending_inuse *oldest;
+-
+- oldest = list_entry(netbk->pending_inuse_head.next,
+- struct netbk_tx_pending_inuse, list);
+- mod_timer(&netbk->netbk_tx_pending_timer,
+- oldest->alloc_time + HZ);
+- }
+ }
- int pciback_xenbus_register(void);
- void pciback_xenbus_unregister(void);
-diff --git a/drivers/xen/pciback/pciback_ops.c b/drivers/xen/pciback/pciback_ops.c
-index 2d570e7..6624faf 100644
---- a/drivers/xen/pciback/pciback_ops.c
-+++ b/drivers/xen/pciback/pciback_ops.c
-@@ -66,9 +66,10 @@ void test_and_schedule_op(struct pciback_device *pdev)
- * context because some of the pci_* functions can sleep (mostly due to ACPI
- * use of semaphores). This function is intended to be called from a work
- * queue in process context taking a struct pciback_device as a parameter */
--void pciback_do_op(void *data)
-+
-+void pciback_do_op(struct work_struct *data)
- {
-- struct pciback_device *pdev = data;
-+ struct pciback_device *pdev = container_of(data, struct pciback_device, op_work);
- struct pci_dev *dev;
- struct xen_pci_op *op = &pdev->sh_info->op;
+ /* Called after netfront has transmitted */
+@@ -1391,19 +1381,28 @@ static void net_tx_action(unsigned long data)
+ unsigned nr_mops;
+ int ret;
-@@ -123,7 +124,7 @@ void pciback_do_op(void *data)
- test_and_schedule_op(pdev);
- }
+- if (netbk->dealloc_cons != netbk->dealloc_prod)
+- net_tx_action_dealloc(netbk);
++ net_tx_action_dealloc(netbk);
--irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs)
-+irqreturn_t pciback_handle_event(int irq, void *dev_id)
- {
- struct pciback_device *pdev = dev_id;
+ nr_mops = net_tx_build_mops(netbk);
-diff --git a/drivers/xen/pciback/xenbus.c b/drivers/xen/pciback/xenbus.c
-index bbca3fe..bd52289 100644
---- a/drivers/xen/pciback/xenbus.c
-+++ b/drivers/xen/pciback/xenbus.c
-@@ -9,6 +9,7 @@
- #include <linux/vmalloc.h>
- #include <xen/xenbus.h>
- #include <xen/events.h>
-+#include <linux/workqueue.h>
- #include "pciback.h"
+ if (nr_mops == 0)
+- return;
++ goto out;
- #define INVALID_EVTCHN_IRQ (-1)
-@@ -33,7 +34,7 @@ static struct pciback_device *alloc_pdev(struct xenbus_device *xdev)
- pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
- pdev->be_watching = 0;
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
+ netbk->tx_map_ops, nr_mops);
+ BUG_ON(ret);
-- INIT_WORK(&pdev->op_work, pciback_do_op, pdev);
-+ INIT_WORK(&pdev->op_work, pciback_do_op);
+ net_tx_submit(netbk);
++out:
++ if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB &&
++ !list_empty(&netbk->pending_inuse_head)) {
++ struct netbk_tx_pending_inuse *oldest;
++
++ oldest = list_entry(netbk->pending_inuse_head.next,
++ struct netbk_tx_pending_inuse, list);
++ mod_timer(&netbk->netbk_tx_pending_timer,
++ oldest->alloc_time + HZ);
++ }
+ }
- if (pciback_init_devices(pdev)) {
- kfree(pdev);
+ static void netif_idx_release(struct xen_netbk *netbk, u16 pending_idx)
--
1.7.4
-From 57f6c49d0f428f96cca49147d68b0bb6156613a6 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Tue, 13 Oct 2009 17:22:26 -0400
-Subject: [PATCH 011/244] xen-pciback: Update the calling mechanism for xenbus_[map|unmap]_ring_valloc functions.
+From 48fa1af97e6c9d304c04f70a75de1340e7d79e18 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Fri, 11 Jun 2010 10:51:01 +0100
+Subject: [PATCH 043/203] xen: netback: handle NET_SKBUFF_DATA_USES_OFFSET correctly
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+Cc: Jan Beulich <JBeulich(a)novell.com>
---
- drivers/xen/pciback/pciback.h | 1 -
- drivers/xen/pciback/xenbus.c | 18 +++++++++---------
- 2 files changed, 9 insertions(+), 10 deletions(-)
+ drivers/xen/netback/netback.c | 4 ++++
+ 1 files changed, 4 insertions(+), 0 deletions(-)
-diff --git a/drivers/xen/pciback/pciback.h b/drivers/xen/pciback/pciback.h
-index 4fb8c05..5e8e14e 100644
---- a/drivers/xen/pciback/pciback.h
-+++ b/drivers/xen/pciback/pciback.h
-@@ -36,7 +36,6 @@ struct pciback_device {
-
- int evtchn_irq;
-
-- struct vm_struct *sh_area;
- struct xen_pci_sharedinfo *sh_info;
-
- unsigned long flags;
-diff --git a/drivers/xen/pciback/xenbus.c b/drivers/xen/pciback/xenbus.c
-index bd52289..5be1350 100644
---- a/drivers/xen/pciback/xenbus.c
-+++ b/drivers/xen/pciback/xenbus.c
-@@ -7,6 +7,7 @@
- #include <linux/init.h>
- #include <linux/list.h>
- #include <linux/vmalloc.h>
-+#include <linux/workqueue.h>
- #include <xen/xenbus.h>
- #include <xen/events.h>
- #include <linux/workqueue.h>
-@@ -29,7 +30,6 @@ static struct pciback_device *alloc_pdev(struct xenbus_device *xdev)
-
- spin_lock_init(&pdev->dev_lock);
-
-- pdev->sh_area = NULL;
- pdev->sh_info = NULL;
- pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
- pdev->be_watching = 0;
-@@ -59,7 +59,7 @@ static void pciback_disconnect(struct pciback_device *pdev)
- flush_workqueue(pciback_wq);
-
- if (pdev->sh_info != NULL) {
-- xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_area);
-+ xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_info);
- pdev->sh_info = NULL;
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 58dfbd2..aa094af 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -218,7 +218,11 @@ static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
+ len -= copy;
}
-@@ -85,23 +85,23 @@ static int pciback_do_attach(struct pciback_device *pdev, int gnt_ref,
- int remote_evtchn)
- {
- int err = 0;
-- struct vm_struct *area;
-+ void *vaddr;
-
- dev_dbg(&pdev->xdev->dev,
- "Attaching to frontend resources - gnt_ref=%d evtchn=%d\n",
- gnt_ref, remote_evtchn);
-
-- area = xenbus_map_ring_valloc(pdev->xdev, gnt_ref);
-- if (IS_ERR(area)) {
-- err = PTR_ERR(area);
-+ err = xenbus_map_ring_valloc(pdev->xdev, gnt_ref, &vaddr);
-+ if (err < 0) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error mapping other domain page in ours.");
- goto out;
- }
-- pdev->sh_area = area;
-- pdev->sh_info = area->addr;
-+ pdev->sh_info = vaddr;
++#ifdef NET_SKBUFF_DATA_USES_OFFSET
++ offset = 0;
++#else
+ offset = nskb->data - skb->data;
++#endif
- err = bind_interdomain_evtchn_to_irqhandler(
- pdev->xdev->otherend_id, remote_evtchn, pciback_handle_event,
-- SA_SAMPLE_RANDOM, "pciback", pdev);
-+ 0, "pciback", pdev);
- if (err < 0) {
- xenbus_dev_fatal(pdev->xdev, err,
- "Error binding event channel to IRQ");
+ nskb->transport_header = skb->transport_header + offset;
+ nskb->network_header = skb->network_header + offset;
--
1.7.4
-From 6e86fcb926e41fb55f512972603e5aaf77e2efb8 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Tue, 13 Oct 2009 17:22:30 -0400
-Subject: [PATCH 012/244] xen-pciback: Add check to load only under priviliged domain.
+From 7d3e6e42251f179e407fa5236f613e5500b3a3ea Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Fri, 11 Jun 2010 10:51:01 +0100
+Subject: [PATCH 044/203] xen: netback: drop frag member from struct netbk_rx_meta
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
----
- drivers/xen/pciback/pci_stub.c | 5 +++++
- drivers/xen/pciback/xenbus.c | 2 --
- 2 files changed, 5 insertions(+), 2 deletions(-)
+It has been unused since c3219dc "xen/netback: completely drop flip
+support", as has netbk_free_pages().
-diff --git a/drivers/xen/pciback/pci_stub.c b/drivers/xen/pciback/pci_stub.c
-index c02f21f..d97dac5 100644
---- a/drivers/xen/pciback/pci_stub.c
-+++ b/drivers/xen/pciback/pci_stub.c
-@@ -14,6 +14,8 @@
- #include <linux/wait.h>
- #include <asm/atomic.h>
- #include <xen/events.h>
-+#include <asm/xen/pci.h>
-+#include <asm/xen/hypervisor.h>
- #include "pciback.h"
- #include "conf_space.h"
- #include "conf_space_quirks.h"
-@@ -1286,6 +1288,9 @@ static int __init pciback_init(void)
- {
- int err;
-
-+ if (!xen_initial_domain())
-+ return -ENODEV;
-+
- err = pciback_config_init();
- if (err)
- return err;
-diff --git a/drivers/xen/pciback/xenbus.c b/drivers/xen/pciback/xenbus.c
-index 5be1350..a85c413 100644
---- a/drivers/xen/pciback/xenbus.c
-+++ b/drivers/xen/pciback/xenbus.c
-@@ -693,8 +693,6 @@ static struct xenbus_driver xenbus_pciback_driver = {
-
- int __init pciback_xenbus_register(void)
- {
-- if (!is_running_on_xen())
-- return -ENODEV;
- pciback_wq = create_workqueue("pciback_workqueue");
- if (!pciback_wq) {
- printk(KERN_ERR "pciback_xenbus_register: create"
---
-1.7.4
-
-
-From c1139f912c1336538e51966d56e5905954052cba Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Tue, 13 Oct 2009 17:22:31 -0400
-Subject: [PATCH 013/244] xen-pciback: Remove usage of pci_restore_bars() as Linux handles the power-up states correctly now.
-
-Originally this code was pulled from the upstream kernel, and stuck
-in the linux-2.6-sparse tree. At that point of time, the Linux tree (2.6.16?)
-did not know how to handle this. Nowadays the pci_set_power_state routine
-handles this case so we do not need this anymore.
+(Although it now has only a single member struct netbk_rx_meta will
+gain other members in a subsequent patch so there is no point
+reworking to get rid of the struct)
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
---
- drivers/xen/pciback/conf_space_capability_pm.c | 13 -------------
- 1 files changed, 0 insertions(+), 13 deletions(-)
+ drivers/xen/netback/common.h | 1 -
+ drivers/xen/netback/netback.c | 8 --------
+ 2 files changed, 0 insertions(+), 9 deletions(-)
-diff --git a/drivers/xen/pciback/conf_space_capability_pm.c b/drivers/xen/pciback/conf_space_capability_pm.c
-index e2f99c7..e1d3af4 100644
---- a/drivers/xen/pciback/conf_space_capability_pm.c
-+++ b/drivers/xen/pciback/conf_space_capability_pm.c
-@@ -58,19 +58,6 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
- goto out;
- }
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index 36cb2b9..be4fe91 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -230,7 +230,6 @@ struct pending_tx_info {
+ typedef unsigned int pending_ring_idx_t;
-- /*
-- * Device may lose PCI config info on D3->D0 transition. This
-- * is a problem for some guests which will not reset BARs. Even
-- * those that have a go will be foiled by our BAR-write handler
-- * which will discard the write! Since Linux won't re-init
-- * the config space automatically in all cases, we do it here.
-- * Future: Should we re-initialise all first 64 bytes of config space?
-- */
-- if (new_state == PCI_D0 &&
-- (old_state == PCI_D3hot || old_state == PCI_D3cold) &&
-- !(old_value & PCI_PM_CTRL_NO_SOFT_RESET))
-- pci_restore_bars(dev);
--
- out:
- return err;
+ struct netbk_rx_meta {
+- skb_frag_t frag;
+ int id;
+ };
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index aa094af..9f7e489 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -411,14 +411,6 @@ static void netbk_gop_skb(struct sk_buff *skb,
+ netif->rx.req_cons += nr_frags + extra;
}
+
+-static inline void netbk_free_pages(int nr_frags, struct netbk_rx_meta *meta)
+-{
+- int i;
+-
+- for (i = 0; i < nr_frags; i++)
+- put_page(meta[i].frag.page);
+-}
+-
+ /* This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
+ used to set up the operations on the top of
+ netrx_pending_operations, which have since been done. Check that
--
1.7.4
-From 721657d92623cfcf2f6f68c14abf97eb40fa6b20 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Tue, 13 Oct 2009 17:22:32 -0400
-Subject: [PATCH 014/244] xen-pciback: Enable Xen-PCI-back to be compiled.
+From 1ced27150d0092c40ebbbbb3896192003d433c0e Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Fri, 11 Jun 2010 10:51:01 +0100
+Subject: [PATCH 045/203] xen: netback: linearise SKBs as we copy them into guest memory on guest-RX.
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+There's no point in sending lots of little packets to a copying
+receiver if we can instead arrange to copy them all into a single RX
+buffer. We need to copy anyway, so there's no overhead here, and this
+is a little bit easier on the receiving domain's network stack.
+
+Based on a patch by Steven Smith. Fixed to not skip unnecessarily to
+the next buffer which could leave the head fragment of a received
+frame empty if the headlen of an SKB was large (which would crash
+netfront). Instead we only try and pack "small enough" fragments
+together but do not try to coalesce large or whole page fragments.
+
+In previous iterations of this patch we also tried to only include
+2048 bytes per frag because very old netfronts stored other
+information in the second half of the page. It has been determined
+that only frontends which support scatter-gather are going to come
+down this path and that any guest which supports scatter-gather is
+also new enough to allow us to use the full page size for each
+fragment (since this limitation which fixed as part of the SG
+implementation) so we do not need this restriction.
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+Cc: Steven Smith <Steven.Smith(a)eu.citrix.com>
---
- drivers/xen/Kconfig | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++
- drivers/xen/Makefile | 1 +
- 2 files changed, 66 insertions(+), 0 deletions(-)
+ drivers/xen/netback/common.h | 15 ++-
+ drivers/xen/netback/netback.c | 282 ++++++++++++++++++++++++++++++-----------
+ 2 files changed, 218 insertions(+), 79 deletions(-)
-diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
-index 6e6180c..d874453 100644
---- a/drivers/xen/Kconfig
-+++ b/drivers/xen/Kconfig
-@@ -29,6 +29,71 @@ config XEN_DEV_EVTCHN
- Support for backend device drivers that provide I/O services
- to other virtual machines.
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index be4fe91..9c0c048 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -82,7 +82,9 @@ struct xen_netif {
+ /* Internal feature information. */
+ u8 can_queue:1; /* can queue packets for receiver? */
-+config XEN_PCIDEV_BACKEND
-+ tristate "PCI-device backend driver"
-+ depends on PCI
-+ depends on XEN_BACKEND
-+ help
-+ The PCI device backend driver allows the kernel to export arbitrary
-+ PCI devices to other guests. If you select this to be a module, you
-+ will need to make sure no other driver has bound to the device(s)
-+ you want to make visible to other guests.
+- /* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
++ /* Allow netif_be_start_xmit() to peek ahead in the rx request
++ * ring. This is a prediction of what rx_req_cons will be once
++ * all queued skbs are put on the ring. */
+ RING_IDX rx_req_cons_peek;
+
+ /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
+@@ -231,6 +233,8 @@ typedef unsigned int pending_ring_idx_t;
+
+ struct netbk_rx_meta {
+ int id;
++ int size;
++ int gso_size;
+ };
+
+ struct netbk_tx_pending_inuse {
+@@ -240,6 +244,8 @@ struct netbk_tx_pending_inuse {
+
+ #define MAX_PENDING_REQS 256
+
++#define MAX_BUFFER_OFFSET PAGE_SIZE
+
-+choice
-+ prompt "PCI Backend Mode"
-+ depends on XEN_PCIDEV_BACKEND
-+ default XEN_PCIDEV_BACKEND_VPCI if !IA64
-+ default XEN_PCIDEV_BACKEND_CONTROLLER if IA64
+ /* extra field used in struct page */
+ union page_ext {
+ struct {
+@@ -301,7 +307,12 @@ struct xen_netbk {
+ struct multicall_entry rx_mcl[NET_RX_RING_SIZE+3];
+ struct mmu_update rx_mmu[NET_RX_RING_SIZE];
+ struct gnttab_transfer grant_trans_op[NET_RX_RING_SIZE];
+- struct gnttab_copy grant_copy_op[NET_RX_RING_SIZE];
++ /*
++ * Each head or fragment can be up to 4096 bytes. Given
++ * MAX_BUFFER_OFFSET of 4096 the worst case is that each
++ * head/fragment uses 2 copy operation.
++ */
++ struct gnttab_copy grant_copy_op[2*NET_RX_RING_SIZE];
+ unsigned char rx_notify[NR_IRQS];
+ u16 notify_list[NET_RX_RING_SIZE];
+ struct netbk_rx_meta meta[NET_RX_RING_SIZE];
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 9f7e489..d53d88e 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -259,6 +259,48 @@ static void tx_queue_callback(unsigned long data)
+ netif_wake_queue(netif->dev);
+ }
+
++/* Figure out how many ring slots we're going to need to send @skb to
++ the guest. */
++static unsigned count_skb_slots(struct sk_buff *skb, struct xen_netif *netif)
++{
++ unsigned count;
++ unsigned copy_off;
++ unsigned i;
+
-+config XEN_PCIDEV_BACKEND_VPCI
-+ bool "Virtual PCI"
-+ ---help---
-+ This PCI Backend hides the true PCI topology and makes the frontend
-+ think there is a single PCI bus with only the exported devices on it.
-+ For example, a device at 03:05.0 will be re-assigned to 00:00.0. A
-+ second device at 02:1a.1 will be re-assigned to 00:01.1.
++ copy_off = 0;
++ count = 1;
+
-+config XEN_PCIDEV_BACKEND_PASS
-+ bool "Passthrough"
-+ ---help---
-+ This PCI Backend provides a real view of the PCI topology to the
-+ frontend (for example, a device at 06:01.b will still appear at
-+ 06:01.b to the frontend). This is similar to how Xen 2.0.x exposed
-+ PCI devices to its driver domains. This may be required for drivers
-+ which depend on finding their hardward in certain bus/slot
-+ locations.
++ BUG_ON(offset_in_page(skb->data) + skb_headlen(skb) > MAX_BUFFER_OFFSET);
+
-+config XEN_PCIDEV_BACKEND_SLOT
-+ bool "Slot"
-+ ---help---
-+ This PCI Backend hides the true PCI topology and makes the frontend
-+ think there is a single PCI bus with only the exported devices on it.
-+ Contrary to the virtual PCI backend, a function becomes a new slot.
-+ For example, a device at 03:05.2 will be re-assigned to 00:00.0. A
-+ second device at 02:1a.1 will be re-assigned to 00:01.0.
++ copy_off = skb_headlen(skb);
+
-+config XEN_PCIDEV_BACKEND_CONTROLLER
-+ bool "Controller"
-+ depends on IA64
-+ ---help---
-+ This PCI backend virtualizes the PCI bus topology by providing a
-+ virtual bus per PCI root device. Devices which are physically under
-+ the same root bus will appear on the same virtual bus. For systems
-+ with complex I/O addressing, this is the only backend which supports
-+ extended I/O port spaces and MMIO translation offsets. This backend
-+ also supports slot virtualization. For example, a device at
-+ 0000:01:02.1 will be re-assigned to 0000:00:00.0. A second device
-+ at 0000:02:05.0 (behind a P2P bridge on bus 0000:01) will be
-+ re-assigned to 0000:00:01.0. A third device at 0000:16:05.0 (under
-+ a different PCI root bus) will be re-assigned to 0000:01:00.0.
++ if (skb_shinfo(skb)->gso_size)
++ count++;
+
-+endchoice
++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++ unsigned long size = skb_shinfo(skb)->frags[i].size;
++ unsigned long bytes;
++ while (size > 0) {
++ BUG_ON(copy_off > MAX_BUFFER_OFFSET);
+
-+config XEN_PCIDEV_BE_DEBUG
-+ bool "PCI Backend Debugging"
-+ depends on XEN_PCIDEV_BACKEND
++ /* These checks are the same as in netbk_gop_frag_copy */
++ if (copy_off == MAX_BUFFER_OFFSET
++ || ((copy_off + size > MAX_BUFFER_OFFSET) && (size <= MAX_BUFFER_OFFSET) && copy_off)) {
++ count++;
++ copy_off = 0;
++ }
+
++ bytes = size;
++ if (copy_off + bytes > MAX_BUFFER_OFFSET)
++ bytes = MAX_BUFFER_OFFSET - copy_off;
+
- config XENFS
- tristate "Xen filesystem"
- default y
-diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
-index eb8a78d..3737dee 100644
---- a/drivers/xen/Makefile
-+++ b/drivers/xen/Makefile
-@@ -9,6 +9,7 @@ obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
- obj-$(CONFIG_XEN_BALLOON) += balloon.o
- obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
- obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
-+obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback/
- obj-$(CONFIG_XENFS) += xenfs/
- obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
- obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o
---
-1.7.4
-
-
-From c164cd8577017d1c4e001b475fadddc7d2ff5c78 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Thu, 5 Nov 2009 15:25:43 -0500
-Subject: [PATCH 015/244] xen-pciback: Return the physical IRQ number instead of the allocated IRQ number to pcifront.
-
-The allocation of IRQ numbers in Linux privileged domains is based
-on finding the first unbound IRQ number. After the allocation is done
-a HYPERCALL to Xen is done, which allocates a PIRQ globally.
-That PIRQ->IRQ binding is saved in data structures that are used
-during ISR executions.
-
-Before this patch, for non-privileged domains we would return the local
-IRQ number instead of the PIRQ. The non-privileged domains require the
-PIRQ so that they can attach the their own interrupt handler to it.
-Fortunatly there is a function, 'xen_gsi_from_irq' that returns
-that global IRQ number.
-
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
----
- drivers/xen/pciback/conf_space_capability_msi.c | 12 ++++++++----
- 1 files changed, 8 insertions(+), 4 deletions(-)
-
-diff --git a/drivers/xen/pciback/conf_space_capability_msi.c b/drivers/xen/pciback/conf_space_capability_msi.c
-index 762e396..7fb5371 100644
---- a/drivers/xen/pciback/conf_space_capability_msi.c
-+++ b/drivers/xen/pciback/conf_space_capability_msi.c
-@@ -6,6 +6,7 @@
- #include "conf_space.h"
- #include "conf_space_capability.h"
- #include <xen/interface/io/pciif.h>
-+#include <xen/events.h>
- #include "pciback.h"
-
- int pciback_enable_msi(struct pciback_device *pdev,
-@@ -22,7 +23,9 @@ int pciback_enable_msi(struct pciback_device *pdev,
- return XEN_PCI_ERR_op_failed;
++ copy_off += bytes;
++ size -= bytes;
++ }
++ }
++ return count;
++}
++
+ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct xen_netif *netif = netdev_priv(dev);
+@@ -290,8 +332,9 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ skb = nskb;
}
-- op->value = dev->irq;
-+ /* The value the guest needs is actually the IDT vector, not the
-+ * the local domain's IRQ number. */
-+ op->value = xen_gsi_from_irq(dev->irq);
- return 0;
- }
-
-@@ -31,7 +34,7 @@ int pciback_disable_msi(struct pciback_device *pdev,
- {
- pci_disable_msi(dev);
+- netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 +
+- !!skb_shinfo(skb)->gso_size;
++ /* Reserve ring slots for the worst-case number of
++ * fragments. */
++ netif->rx_req_cons_peek += count_skb_slots(skb, netif);
+ netif_get(netif);
-- op->value = dev->irq;
-+ op->value = xen_gsi_from_irq(dev->irq);
- return 0;
- }
+ if (netbk_can_queue(dev) && netbk_queue_full(netif)) {
+@@ -335,96 +378,165 @@ struct netrx_pending_operations {
+ struct gnttab_copy *copy;
+ struct multicall_entry *mcl;
+ struct netbk_rx_meta *meta;
++ int copy_off;
++ grant_ref_t copy_gref;
+ };
-@@ -57,7 +60,8 @@ int pciback_enable_msix(struct pciback_device *pdev,
+ /* Set up the grant operations for this fragment. If it's a flipping
+ interface, we also set up the unmap request from here. */
+-static u16 netbk_gop_frag(struct xen_netif *netif, struct netbk_rx_meta *meta,
+- int i, struct netrx_pending_operations *npo,
+- struct page *page, unsigned long size,
+- unsigned long offset)
++
++static void netbk_gop_frag_copy(struct xen_netif *netif,
++ struct netrx_pending_operations *npo,
++ struct page *page, unsigned long size,
++ unsigned long offset, int head)
+ {
+ struct gnttab_copy *copy_gop;
+- struct xen_netif_rx_request *req;
+- unsigned long old_mfn;
++ struct netbk_rx_meta *meta;
+ int group = netif_page_group(page);
+ int idx = netif_page_index(page);
++ unsigned long bytes;
++
++ /* Data must not cross a page boundary. */
++ BUG_ON(size + offset > PAGE_SIZE);
- for (i = 0; i < op->value; i++) {
- op->msix_entries[i].entry = entries[i].entry;
-- op->msix_entries[i].vector = entries[i].vector;
-+ op->msix_entries[i].vector =
-+ xen_gsi_from_irq(entries[i].vector);
- }
+- old_mfn = virt_to_mfn(page_address(page));
++ meta = npo->meta + npo->meta_prod - 1;
- kfree(entries);
-@@ -73,7 +77,7 @@ int pciback_disable_msix(struct pciback_device *pdev,
+- req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);
++ while (size > 0) {
++ BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
- pci_disable_msix(dev);
+- copy_gop = npo->copy + npo->copy_prod++;
+- copy_gop->flags = GNTCOPY_dest_gref;
+- if (PageForeign(page)) {
++ /*
++ * Move to a new receive buffer if:
++ *
++ * simple case: we have completely filled the current buffer.
++ *
++ * complex case: the current frag would overflow
++ * the current buffer but only if:
++ * (i) this frag would fit completely in the next buffer
++ * and (ii) there is already some data in the current buffer
++ * and (iii) this is not the head buffer.
++ *
++ * Where:
++ * - (i) stops us splitting a frag into two copies
++ * unless the frag is too large for a single buffer.
++ * - (ii) stops us from leaving a buffer pointlessly empty.
++ * - (iii) stops us leaving the first buffer
++ * empty. Strictly speaking this is already covered
++ * by (ii) but is explicitly checked because
++ * netfront relies on the first buffer being
++ * non-empty and can crash otherwise.
++ *
++ * This means we will effectively linearise small
++ * frags but do not needlessly split large buffers
++ * into multiple copies tend to give large frags their
++ * own buffers as before.
++ */
++ if (npo->copy_off == MAX_BUFFER_OFFSET
++ || ((npo->copy_off + size > MAX_BUFFER_OFFSET) && (size <= MAX_BUFFER_OFFSET) && npo->copy_off && !head)) {
++ struct xen_netif_rx_request *req;
++
++ BUG_ON(head); /* Netfront requires there to be some data in the head buffer. */
++ /* Overflowed this request, go to the next one */
++ req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons++);
++ meta = npo->meta + npo->meta_prod++;
++ meta->size = 0;
++ meta->id = req->id;
++ npo->copy_off = 0;
++ npo->copy_gref = req->gref;
++ }
++
++ bytes = size;
++ if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
++ bytes = MAX_BUFFER_OFFSET - npo->copy_off;
++
++ copy_gop = npo->copy + npo->copy_prod++;
++ copy_gop->flags = GNTCOPY_dest_gref;
++ if (PageForeign(page)) {
+ struct xen_netbk *netbk = &xen_netbk[group];
+ struct pending_tx_info *src_pend = &netbk->pending_tx_info[idx];
+ copy_gop->source.domid = src_pend->netif->domid;
+ copy_gop->source.u.ref = src_pend->req.gref;
+- copy_gop->flags |= GNTCOPY_source_gref;
+- } else {
+- copy_gop->source.domid = DOMID_SELF;
+- copy_gop->source.u.gmfn = old_mfn;
+- }
+- copy_gop->source.offset = offset;
+- copy_gop->dest.domid = netif->domid;
+- copy_gop->dest.offset = 0;
+- copy_gop->dest.u.ref = req->gref;
+- copy_gop->len = size;
++ copy_gop->flags |= GNTCOPY_source_gref;
++ } else {
++ copy_gop->source.domid = DOMID_SELF;
++ copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
++ }
++ copy_gop->source.offset = offset;
++ copy_gop->dest.domid = netif->domid;
-- op->value = dev->irq;
-+ op->value = xen_gsi_from_irq(dev->irq);
- return 0;
+- return req->id;
++ copy_gop->dest.offset = npo->copy_off;
++ copy_gop->dest.u.ref = npo->copy_gref;
++ copy_gop->len = bytes;
++
++ npo->copy_off += bytes;
++ meta->size += bytes;
++
++ offset += bytes;
++ size -= bytes;
++ head = 0; /* Must be something in this buffer now */
++ }
}
---
-1.7.4
-
-
-From b0b035f1de3282aa96a6dc28007d513e8fce793d Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Thu, 5 Nov 2009 15:25:44 -0500
-Subject: [PATCH 016/244] xen-pciback: Fix checkpatch warnings and errors for pciback/ directory.
-
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
----
- drivers/xen/pciback/conf_space.c | 34 +++---
- drivers/xen/pciback/conf_space.h | 30 ++--
- drivers/xen/pciback/conf_space_capability.c | 5 +-
- drivers/xen/pciback/conf_space_capability.h | 3 +
- drivers/xen/pciback/conf_space_capability_msi.c | 3 +-
- drivers/xen/pciback/conf_space_capability_pm.c | 4 +-
- drivers/xen/pciback/conf_space_capability_vpd.c | 2 +-
- drivers/xen/pciback/conf_space_header.c | 7 +-
- drivers/xen/pciback/conf_space_quirks.c | 16 ++-
- drivers/xen/pciback/controller.c | 15 +-
- drivers/xen/pciback/passthrough.c | 6 +-
- drivers/xen/pciback/pci_stub.c | 165 +++++++++++------------
- drivers/xen/pciback/pciback.h | 28 +++--
- drivers/xen/pciback/pciback_ops.c | 74 +++++------
- drivers/xen/pciback/slot.c | 22 ++--
- drivers/xen/pciback/vpci.c | 28 ++--
- drivers/xen/pciback/xenbus.c | 42 +++---
- 17 files changed, 245 insertions(+), 239 deletions(-)
-
-diff --git a/drivers/xen/pciback/conf_space.c b/drivers/xen/pciback/conf_space.c
-index 0c76db1..370c18e 100644
---- a/drivers/xen/pciback/conf_space.c
-+++ b/drivers/xen/pciback/conf_space.c
-@@ -18,11 +18,11 @@
- static int permissive;
- module_param(permissive, bool, 0644);
+-static void netbk_gop_skb(struct sk_buff *skb,
+- struct netrx_pending_operations *npo)
++/* Prepare an SKB to be transmitted to the frontend. This is
++ responsible for allocating grant operations, meta structures, etc.
++ It returns the number of meta structures consumed. The number of
++ ring slots used is always equal to the number of meta slots used
++ plus the number of GSO descriptors used. Currently, we use either
++ zero GSO descriptors (for non-GSO packets) or one descriptor (for
++ frontend-side LRO). */
++static int netbk_gop_skb(struct sk_buff *skb,
++ struct netrx_pending_operations *npo)
+ {
+ struct xen_netif *netif = netdev_priv(skb->dev);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int i;
+- int extra;
+- struct netbk_rx_meta *head_meta, *meta;
++ struct xen_netif_rx_request *req;
++ struct netbk_rx_meta *meta;
++ int old_meta_prod;
++
++ old_meta_prod = npo->meta_prod;
--#define DEFINE_PCI_CONFIG(op,size,type) \
-+#define DEFINE_PCI_CONFIG(op, size, type) \
- int pciback_##op##_config_##size \
- (struct pci_dev *dev, int offset, type value, void *data) \
- { \
-- return pci_##op##_config_##size (dev, offset, value); \
-+ return pci_##op##_config_##size(dev, offset, value); \
- }
+- head_meta = npo->meta + npo->meta_prod++;
+- head_meta->frag.page_offset = skb_shinfo(skb)->gso_type;
+- head_meta->frag.size = skb_shinfo(skb)->gso_size;
+- extra = !!head_meta->frag.size + 1;
++ req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons++);
++ meta = npo->meta + npo->meta_prod++;
++ meta->gso_size = skb_shinfo(skb)->gso_size;
++ meta->size = 0;
++ meta->id = req->id;
++ npo->copy_off = 0;
++ npo->copy_gref = req->gref;
++
++ netbk_gop_frag_copy(netif,
++ npo, virt_to_page(skb->data),
++ skb_headlen(skb),
++ offset_in_page(skb->data), 1);
++
++ /* Leave a gap for the GSO descriptor. */
++ if (skb_shinfo(skb)->gso_size)
++ netif->rx.req_cons++;
- DEFINE_PCI_CONFIG(read, byte, u8 *)
-@@ -139,14 +139,15 @@ static int pcibios_err_to_errno(int err)
+ for (i = 0; i < nr_frags; i++) {
+- meta = npo->meta + npo->meta_prod++;
+- meta->frag = skb_shinfo(skb)->frags[i];
+- meta->id = netbk_gop_frag(netif, meta, i + extra, npo,
+- meta->frag.page,
+- meta->frag.size,
+- meta->frag.page_offset);
++ netbk_gop_frag_copy(netif, npo,
++ skb_shinfo(skb)->frags[i].page,
++ skb_shinfo(skb)->frags[i].size,
++ skb_shinfo(skb)->frags[i].page_offset,
++ 0);
+ }
+
+- /*
+- * This must occur at the end to ensure that we don't trash skb_shinfo
+- * until we're done. We know that the head doesn't cross a page
+- * boundary because such packets get copied in netif_be_start_xmit.
+- */
+- head_meta->id = netbk_gop_frag(netif, head_meta, 0, npo,
+- virt_to_page(skb->data),
+- skb_headlen(skb),
+- offset_in_page(skb->data));
+-
+- netif->rx.req_cons += nr_frags + extra;
++ return npo->meta_prod - old_meta_prod;
}
- int pciback_config_read(struct pci_dev *dev, int offset, int size,
-- u32 * ret_val)
-+ u32 *ret_val)
+ /* This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
+ used to set up the operations on the top of
+ netrx_pending_operations, which have since been done. Check that
+ they didn't give any errors and advance over them. */
+-static int netbk_check_gop(int nr_frags, domid_t domid,
++static int netbk_check_gop(int nr_meta_slots, domid_t domid,
+ struct netrx_pending_operations *npo)
{
- int err = 0;
- struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
- const struct config_field_entry *cfg_entry;
- const struct config_field *field;
- int req_start, req_end, field_start, field_end;
-- /* if read fails for any reason, return 0 (as if device didn't respond) */
-+ /* if read fails for any reason, return 0
-+ * (as if device didn't respond) */
- u32 value = 0, tmp_val;
-
- if (unlikely(verbose_request))
-@@ -161,10 +162,10 @@ int pciback_config_read(struct pci_dev *dev, int offset, int size,
- /* Get the real value first, then modify as appropriate */
- switch (size) {
- case 1:
-- err = pci_read_config_byte(dev, offset, (u8 *) & value);
-+ err = pci_read_config_byte(dev, offset, (u8 *) &value);
- break;
- case 2:
-- err = pci_read_config_word(dev, offset, (u16 *) & value);
-+ err = pci_read_config_word(dev, offset, (u16 *) &value);
- break;
- case 4:
- err = pci_read_config_dword(dev, offset, &value);
-@@ -192,7 +193,7 @@ int pciback_config_read(struct pci_dev *dev, int offset, int size,
- }
- }
+ struct gnttab_copy *copy_op;
+ int status = NETIF_RSP_OKAY;
+ int i;
-- out:
-+out:
- if (unlikely(verbose_request))
- printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x = %x\n",
- pci_name(dev), size, offset, value);
-@@ -276,8 +277,8 @@ int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value)
- } else if (!dev_data->warned_on_write) {
- dev_data->warned_on_write = 1;
- dev_warn(&dev->dev, "Driver tried to write to a "
-- "read-only configuration space field at offset "
-- "0x%x, size %d. This may be harmless, but if "
-+ "read-only configuration space field at offset"
-+ " 0x%x, size %d. This may be harmless, but if "
- "you have problems with your device:\n"
- "1) see permissive attribute in sysfs\n"
- "2) report problems to the xen-devel "
-@@ -295,8 +296,8 @@ void pciback_config_free_dyn_fields(struct pci_dev *dev)
- struct config_field_entry *cfg_entry, *t;
- const struct config_field *field;
+- for (i = 0; i <= nr_frags; i++) {
+- copy_op = npo->copy + npo->copy_cons++;
+- if (copy_op->status != GNTST_okay) {
++ for (i = 0; i < nr_meta_slots; i++) {
++ copy_op = npo->copy + npo->copy_cons++;
++ if (copy_op->status != GNTST_okay) {
+ DPRINTK("Bad status %d from copy to DOM%d.\n",
+ copy_op->status, domid);
+ status = NETIF_RSP_ERROR;
+@@ -435,27 +547,35 @@ static int netbk_check_gop(int nr_frags, domid_t domid,
+ }
-- dev_dbg(&dev->dev,
-- "free-ing dynamically allocated virtual configuration space fields\n");
-+ dev_dbg(&dev->dev, "free-ing dynamically allocated virtual "
-+ "configuration space fields\n");
- if (!dev_data)
- return;
+ static void netbk_add_frag_responses(struct xen_netif *netif, int status,
+- struct netbk_rx_meta *meta, int nr_frags)
++ struct netbk_rx_meta *meta,
++ int nr_meta_slots)
+ {
+ int i;
+ unsigned long offset;
-@@ -306,8 +307,7 @@ void pciback_config_free_dyn_fields(struct pci_dev *dev)
- if (field->clean) {
- field->clean((struct config_field *)field);
+- for (i = 0; i < nr_frags; i++) {
+- int id = meta[i].id;
+- int flags = (i == nr_frags - 1) ? 0 : NETRXF_more_data;
+-
++ for (i = 0; i < nr_meta_slots; i++) {
++ int flags;
++ if (i == nr_meta_slots - 1)
++ flags = 0;
++ else
++ flags = NETRXF_more_data;
++
+ offset = 0;
+- make_rx_response(netif, id, status, offset,
+- meta[i].frag.size, flags);
++ make_rx_response(netif, meta[i].id, status, offset,
++ meta[i].size, flags);
+ }
+ }
-- if (cfg_entry->data)
-- kfree(cfg_entry->data);
-+ kfree(cfg_entry->data);
++struct skb_cb_overlay {
++ int meta_slots_used;
++};
++
+ static void net_rx_action(unsigned long data)
+ {
+ struct xen_netif *netif = NULL;
+ struct xen_netbk *netbk = (struct xen_netbk *)data;
+ s8 status;
+- u16 id, irq, flags;
++ u16 irq, flags;
+ struct xen_netif_rx_response *resp;
+ struct multicall_entry *mcl;
+ struct sk_buff_head rxq;
+@@ -465,6 +585,7 @@ static void net_rx_action(unsigned long data)
+ int nr_frags;
+ int count;
+ unsigned long offset;
++ struct skb_cb_overlay *sco;
- list_del(&cfg_entry->list);
- kfree(cfg_entry);
-@@ -376,7 +376,7 @@ int pciback_config_add_field_offset(struct pci_dev *dev,
- cfg_entry->base_offset = base_offset;
+ struct netrx_pending_operations npo = {
+ .mmu = netbk->rx_mmu,
+@@ -479,10 +600,11 @@ static void net_rx_action(unsigned long data)
+ count = 0;
- /* silently ignore duplicate fields */
-- err = pciback_field_is_dup(dev,OFFSET(cfg_entry));
-+ err = pciback_field_is_dup(dev, OFFSET(cfg_entry));
- if (err)
- goto out;
+ while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
++ netif = netdev_priv(skb->dev);
+ nr_frags = skb_shinfo(skb)->nr_frags;
+- *(int *)skb->cb = nr_frags;
-@@ -395,14 +395,14 @@ int pciback_config_add_field_offset(struct pci_dev *dev,
- OFFSET(cfg_entry));
- list_add_tail(&cfg_entry->list, &dev_data->config_fields);
+- netbk_gop_skb(skb, &npo);
++ sco = (struct skb_cb_overlay *)skb->cb;
++ sco->meta_slots_used = netbk_gop_skb(skb, &npo);
-- out:
-+out:
- if (err)
- kfree(cfg_entry);
+ count += nr_frags + 1;
- return err;
- }
+@@ -541,18 +663,20 @@ static void net_rx_action(unsigned long data)
+ BUG_ON(npo.mmu_mcl && npo.mcl[npo.mmu_mcl].result != 0);
--/* This sets up the device's virtual configuration space to keep track of
-+/* This sets up the device's virtual configuration space to keep track of
- * certain registers (like the base address registers (BARs) so that we can
- * keep the client from manipulating them directly.
- */
-@@ -425,7 +425,7 @@ int pciback_config_init_dev(struct pci_dev *dev)
+ while ((skb = __skb_dequeue(&rxq)) != NULL) {
+- nr_frags = *(int *)skb->cb;
++ sco = (struct skb_cb_overlay *)skb->cb;
- err = pciback_config_quirks_init(dev);
+ netif = netdev_priv(skb->dev);
-- out:
-+out:
- return err;
- }
+ netif->stats.tx_bytes += skb->len;
+ netif->stats.tx_packets++;
-diff --git a/drivers/xen/pciback/conf_space.h b/drivers/xen/pciback/conf_space.h
-index fe746ef..50ebef2 100644
---- a/drivers/xen/pciback/conf_space.h
-+++ b/drivers/xen/pciback/conf_space.h
-@@ -11,21 +11,21 @@
- #include <linux/err.h>
+- status = netbk_check_gop(nr_frags, netif->domid, &npo);
+-
+- id = netbk->meta[npo.meta_cons].id;
+- flags = nr_frags ? NETRXF_more_data : 0;
++ status = netbk_check_gop(sco->meta_slots_used,
++ netif->domid, &npo);
- /* conf_field_init can return an errno in a ptr with ERR_PTR() */
--typedef void *(*conf_field_init) (struct pci_dev * dev, int offset);
--typedef void (*conf_field_reset) (struct pci_dev * dev, int offset, void *data);
--typedef void (*conf_field_free) (struct pci_dev * dev, int offset, void *data);
-+typedef void *(*conf_field_init) (struct pci_dev *dev, int offset);
-+typedef void (*conf_field_reset) (struct pci_dev *dev, int offset, void *data);
-+typedef void (*conf_field_free) (struct pci_dev *dev, int offset, void *data);
++ if (sco->meta_slots_used == 1)
++ flags = 0;
++ else
++ flags = NETRXF_more_data;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
+ flags |= NETRXF_csum_blank | NETRXF_data_validated;
+ else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+@@ -560,10 +684,12 @@ static void net_rx_action(unsigned long data)
+ flags |= NETRXF_data_validated;
--typedef int (*conf_dword_write) (struct pci_dev * dev, int offset, u32 value,
-+typedef int (*conf_dword_write) (struct pci_dev *dev, int offset, u32 value,
- void *data);
--typedef int (*conf_word_write) (struct pci_dev * dev, int offset, u16 value,
-+typedef int (*conf_word_write) (struct pci_dev *dev, int offset, u16 value,
- void *data);
--typedef int (*conf_byte_write) (struct pci_dev * dev, int offset, u8 value,
-+typedef int (*conf_byte_write) (struct pci_dev *dev, int offset, u8 value,
- void *data);
--typedef int (*conf_dword_read) (struct pci_dev * dev, int offset, u32 * value,
-+typedef int (*conf_dword_read) (struct pci_dev *dev, int offset, u32 *value,
- void *data);
--typedef int (*conf_word_read) (struct pci_dev * dev, int offset, u16 * value,
-+typedef int (*conf_word_read) (struct pci_dev *dev, int offset, u16 *value,
- void *data);
--typedef int (*conf_byte_read) (struct pci_dev * dev, int offset, u8 * value,
-+typedef int (*conf_byte_read) (struct pci_dev *dev, int offset, u8 *value,
- void *data);
+ offset = 0;
+- resp = make_rx_response(netif, id, status, offset,
+- skb_headlen(skb), flags);
++ resp = make_rx_response(netif, netbk->meta[npo.meta_cons].id,
++ status, offset,
++ netbk->meta[npo.meta_cons].size,
++ flags);
- /* These are the fields within the configuration space which we
-@@ -39,7 +39,7 @@ struct config_field {
- conf_field_init init;
- conf_field_reset reset;
- conf_field_free release;
-- void (*clean) (struct config_field * field);
-+ void (*clean) (struct config_field *field);
- union {
- struct {
- conf_dword_write write;
-@@ -92,8 +92,8 @@ static inline int pciback_config_add_fields(struct pci_dev *dev,
- }
+- if (netbk->meta[npo.meta_cons].frag.size) {
++ if (netbk->meta[npo.meta_cons].gso_size) {
+ struct xen_netif_extra_info *gso =
+ (struct xen_netif_extra_info *)
+ RING_GET_RESPONSE(&netif->rx,
+@@ -571,7 +697,7 @@ static void net_rx_action(unsigned long data)
- static inline int pciback_config_add_fields_offset(struct pci_dev *dev,
-- const struct config_field *field,
-- unsigned int offset)
-+ const struct config_field *field,
-+ unsigned int offset)
- {
- int i, err = 0;
- for (i = 0; field[i].size != 0; i++) {
-@@ -105,11 +105,11 @@ static inline int pciback_config_add_fields_offset(struct pci_dev *dev,
- }
+ resp->flags |= NETRXF_extra_info;
- /* Read/Write the real configuration space */
--int pciback_read_config_byte(struct pci_dev *dev, int offset, u8 * value,
-+int pciback_read_config_byte(struct pci_dev *dev, int offset, u8 *value,
- void *data);
--int pciback_read_config_word(struct pci_dev *dev, int offset, u16 * value,
-+int pciback_read_config_word(struct pci_dev *dev, int offset, u16 *value,
- void *data);
--int pciback_read_config_dword(struct pci_dev *dev, int offset, u32 * value,
-+int pciback_read_config_dword(struct pci_dev *dev, int offset, u32 *value,
- void *data);
- int pciback_write_config_byte(struct pci_dev *dev, int offset, u8 value,
- void *data);
-diff --git a/drivers/xen/pciback/conf_space_capability.c b/drivers/xen/pciback/conf_space_capability.c
-index 50efca4..0ea84d6 100644
---- a/drivers/xen/pciback/conf_space_capability.c
-+++ b/drivers/xen/pciback/conf_space_capability.c
-@@ -53,13 +53,10 @@ int pciback_config_capability_add_fields(struct pci_dev *dev)
+- gso->u.gso.size = netbk->meta[npo.meta_cons].frag.size;
++ gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
+ gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
+ gso->u.gso.pad = 0;
+ gso->u.gso.features = 0;
+@@ -580,9 +706,11 @@ static void net_rx_action(unsigned long data)
+ gso->flags = 0;
}
- }
-
-- out:
-+out:
- return err;
- }
--extern struct pciback_config_capability pciback_config_capability_vpd;
--extern struct pciback_config_capability pciback_config_capability_pm;
--
- int pciback_config_capability_init(void)
- {
- register_capability(&pciback_config_capability_vpd);
-diff --git a/drivers/xen/pciback/conf_space_capability.h b/drivers/xen/pciback/conf_space_capability.h
-index 823392e..8da3ac4 100644
---- a/drivers/xen/pciback/conf_space_capability.h
-+++ b/drivers/xen/pciback/conf_space_capability.h
-@@ -20,4 +20,7 @@ struct pciback_config_capability {
- const struct config_field *fields;
- };
+- netbk_add_frag_responses(netif, status,
+- netbk->meta + npo.meta_cons + 1,
+- nr_frags);
++ if (sco->meta_slots_used > 1) {
++ netbk_add_frag_responses(netif, status,
++ netbk->meta + npo.meta_cons + 1,
++ sco->meta_slots_used - 1);
++ }
-+extern struct pciback_config_capability pciback_config_capability_vpd;
-+extern struct pciback_config_capability pciback_config_capability_pm;
-+
- #endif
-diff --git a/drivers/xen/pciback/conf_space_capability_msi.c b/drivers/xen/pciback/conf_space_capability_msi.c
-index 7fb5371..b70ea8b 100644
---- a/drivers/xen/pciback/conf_space_capability_msi.c
-+++ b/drivers/xen/pciback/conf_space_capability_msi.c
-@@ -18,7 +18,8 @@ int pciback_enable_msi(struct pciback_device *pdev,
- status = pci_enable_msi(dev);
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret);
+ irq = netif->irq;
+@@ -597,8 +725,8 @@ static void net_rx_action(unsigned long data)
+ netif_wake_queue(netif->dev);
- if (status) {
-- printk("error enable msi for guest %x status %x\n", otherend, status);
-+ printk(KERN_ERR "error enable msi for guest %x status %x\n",
-+ otherend, status);
- op->value = 0;
- return XEN_PCI_ERR_op_failed;
+ netif_put(netif);
++ npo.meta_cons += sco->meta_slots_used;
+ dev_kfree_skb(skb);
+- npo.meta_cons += nr_frags + 1;
}
-diff --git a/drivers/xen/pciback/conf_space_capability_pm.c b/drivers/xen/pciback/conf_space_capability_pm.c
-index e1d3af4..0442616 100644
---- a/drivers/xen/pciback/conf_space_capability_pm.c
-+++ b/drivers/xen/pciback/conf_space_capability_pm.c
-@@ -20,7 +20,7 @@ static int pm_caps_read(struct pci_dev *dev, int offset, u16 *value,
-
- *value = real_value & ~PCI_PM_CAP_PME_MASK;
-
-- out:
-+out:
- return err;
- }
-@@ -77,7 +77,7 @@ static void *pm_ctrl_init(struct pci_dev *dev, int offset)
- err = pci_write_config_word(dev, offset, value);
+ while (notify_nr != 0) {
+--
+1.7.4
+
+
+From 96069b28e612232fb739ef48d9c2c5178b19f562 Mon Sep 17 00:00:00 2001
+From: Dongxiao Xu <dongxiao.xu(a)intel.com>
+Date: Thu, 10 Jun 2010 19:03:15 +0800
+Subject: [PATCH 046/203] xen: netback: Set allocated memory to zero from vmalloc.
+
+This should fix the windows/linux pv driver issue.
+
+Signed-off-by: Dongxiao Xu <dongxiao.xu(a)intel.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index d53d88e..c7024d4 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -1724,6 +1724,7 @@ static int __init netback_init(void)
+ printk(KERN_ALERT "%s: out of memory\n", __func__);
+ return -ENOMEM;
}
++ memset(xen_netbk, 0, sizeof(struct xen_netbk) * xen_netbk_group_nr);
-- out:
-+out:
- return ERR_PTR(err);
- }
+ /* We can increase reservation by this much in net_rx_action(). */
+ // balloon_update_driver_allowance(NET_RX_RING_SIZE);
+--
+1.7.4
+
+
+From 109a748d1c11b7eeaaacedb08c48bc65640b0bb8 Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Mon, 14 Jun 2010 13:23:33 +0100
+Subject: [PATCH 047/203] xen: netback: minor code formatting fixup
+
+Don't include redundant casts from allocation.
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 3 +--
+ 1 files changed, 1 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index c7024d4..58e920a 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -1718,8 +1718,7 @@ static int __init netback_init(void)
+ return -ENODEV;
-diff --git a/drivers/xen/pciback/conf_space_capability_vpd.c b/drivers/xen/pciback/conf_space_capability_vpd.c
-index 920cb4a..e7b4d66 100644
---- a/drivers/xen/pciback/conf_space_capability_vpd.c
-+++ b/drivers/xen/pciback/conf_space_capability_vpd.c
-@@ -33,7 +33,7 @@ static const struct config_field caplist_vpd[] = {
- },
- {}
- };
--
-+
- struct pciback_config_capability pciback_config_capability_vpd = {
- .capability = PCI_CAP_ID_VPD,
- .fields = caplist_vpd,
-diff --git a/drivers/xen/pciback/conf_space_header.c b/drivers/xen/pciback/conf_space_header.c
-index 5a9e028..3ae7da1 100644
---- a/drivers/xen/pciback/conf_space_header.c
-+++ b/drivers/xen/pciback/conf_space_header.c
-@@ -51,7 +51,8 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
- err = pci_set_mwi(dev);
- if (err) {
- printk(KERN_WARNING
-- "pciback: %s: cannot enable memory-write-invalidate (%d)\n",
-+ "pciback: %s: cannot enable "
-+ "memory-write-invalidate (%d)\n",
- pci_name(dev), err);
- value &= ~PCI_COMMAND_INVALIDATE;
- }
-@@ -206,7 +207,7 @@ static int bist_write(struct pci_dev *dev, int offset, u8 value, void *data)
- || value == PCI_BIST_START)
- err = pci_write_config_byte(dev, offset, value);
-
-- out:
-+out:
- return err;
- }
-
-@@ -312,6 +313,6 @@ int pciback_config_header_add_fields(struct pci_dev *dev)
- break;
- }
+ xen_netbk_group_nr = num_online_cpus();
+- xen_netbk = (struct xen_netbk *)vmalloc(sizeof(struct xen_netbk) *
+- xen_netbk_group_nr);
++ xen_netbk = vmalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
+ if (!xen_netbk) {
+ printk(KERN_ALERT "%s: out of memory\n", __func__);
+ return -ENOMEM;
+--
+1.7.4
+
+
+From 2424b59d68ee6ccdb7e52ab68bdba3a8b742513d Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Wed, 30 Jun 2010 10:12:49 +0100
+Subject: [PATCH 048/203] xen: netback: drop more relics of flipping mode
+
+The mmu_update and gnttab_transfer arrays were only used by flipping
+mode. With those gone the multicall now consists of a single call to
+GNTTABOP_copy so drop the multicall as well and just make the one
+hypercall.
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+Cc: Jeremy Fitzhardinge <jeremy(a)goop.org>
+Cc: Paul Durrant <paul.durrant(a)citrix.com>
+---
+ drivers/xen/netback/common.h | 3 --
+ drivers/xen/netback/netback.c | 55 +++--------------------------------------
+ 2 files changed, 4 insertions(+), 54 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index 9c0c048..08e7a0e 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -304,9 +304,6 @@ struct xen_netbk {
+ u16 pending_ring[MAX_PENDING_REQS];
+ u16 dealloc_ring[MAX_PENDING_REQS];
-- out:
-+out:
- return err;
- }
-diff --git a/drivers/xen/pciback/conf_space_quirks.c b/drivers/xen/pciback/conf_space_quirks.c
-index 244a438..45c31fb 100644
---- a/drivers/xen/pciback/conf_space_quirks.c
-+++ b/drivers/xen/pciback/conf_space_quirks.c
-@@ -18,8 +18,10 @@ match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
- {
- if ((id->vendor == PCI_ANY_ID || id->vendor == dev->vendor) &&
- (id->device == PCI_ANY_ID || id->device == dev->device) &&
-- (id->subvendor == PCI_ANY_ID || id->subvendor == dev->subsystem_vendor) &&
-- (id->subdevice == PCI_ANY_ID || id->subdevice == dev->subsystem_device) &&
-+ (id->subvendor == PCI_ANY_ID ||
-+ id->subvendor == dev->subsystem_vendor) &&
-+ (id->subdevice == PCI_ANY_ID ||
-+ id->subdevice == dev->subsystem_device) &&
- !((id->class ^ dev->class) & id->class_mask))
- return id;
- return NULL;
-@@ -35,7 +37,7 @@ struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev)
- tmp_quirk = NULL;
- printk(KERN_DEBUG
- "quirk didn't match any device pciback knows about\n");
-- out:
-+out:
- return tmp_quirk;
+- struct multicall_entry rx_mcl[NET_RX_RING_SIZE+3];
+- struct mmu_update rx_mmu[NET_RX_RING_SIZE];
+- struct gnttab_transfer grant_trans_op[NET_RX_RING_SIZE];
+ /*
+ * Each head or fragment can be up to 4096 bytes. Given
+ * MAX_BUFFER_OFFSET of 4096 the worst case is that each
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 58e920a..ca65840 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -368,15 +368,9 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
-@@ -51,7 +53,7 @@ int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg)
- struct config_field_entry *cfg_entry;
-
- list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-- if ( OFFSET(cfg_entry) == reg) {
-+ if (OFFSET(cfg_entry) == reg) {
- ret = 1;
- break;
- }
-@@ -84,7 +86,7 @@ int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
+ struct netrx_pending_operations {
+- unsigned trans_prod, trans_cons;
+- unsigned mmu_prod, mmu_mcl;
+- unsigned mcl_prod, mcl_cons;
+ unsigned copy_prod, copy_cons;
+ unsigned meta_prod, meta_cons;
+- struct mmu_update *mmu;
+- struct gnttab_transfer *trans;
+ struct gnttab_copy *copy;
+- struct multicall_entry *mcl;
+ struct netbk_rx_meta *meta;
+ int copy_off;
+ grant_ref_t copy_gref;
+@@ -577,7 +571,6 @@ static void net_rx_action(unsigned long data)
+ s8 status;
+ u16 irq, flags;
+ struct xen_netif_rx_response *resp;
+- struct multicall_entry *mcl;
+ struct sk_buff_head rxq;
+ struct sk_buff *skb;
+ int notify_nr = 0;
+@@ -588,10 +581,7 @@ static void net_rx_action(unsigned long data)
+ struct skb_cb_overlay *sco;
- pciback_config_add_field(dev, field);
+ struct netrx_pending_operations npo = {
+- .mmu = netbk->rx_mmu,
+- .trans = netbk->grant_trans_op,
+ .copy = netbk->grant_copy_op,
+- .mcl = netbk->rx_mcl,
+ .meta = netbk->meta,
+ };
-- out:
-+out:
- return err;
- }
+@@ -617,50 +607,13 @@ static void net_rx_action(unsigned long data)
-@@ -110,7 +112,7 @@ int pciback_config_quirks_init(struct pci_dev *dev)
- quirk->pdev = dev;
+ BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
- register_quirk(quirk);
-- out:
-+out:
- return ret;
- }
+- npo.mmu_mcl = npo.mcl_prod;
+- if (npo.mcl_prod) {
+- BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
+- BUG_ON(npo.mmu_prod > ARRAY_SIZE(netbk->rx_mmu));
+- mcl = npo.mcl + npo.mcl_prod++;
+-
+- BUG_ON(mcl[-1].op != __HYPERVISOR_update_va_mapping);
+- mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
+-
+- mcl->op = __HYPERVISOR_mmu_update;
+- mcl->args[0] = (unsigned long)netbk->rx_mmu;
+- mcl->args[1] = npo.mmu_prod;
+- mcl->args[2] = 0;
+- mcl->args[3] = DOMID_SELF;
+- }
+-
+- if (npo.trans_prod) {
+- BUG_ON(npo.trans_prod > ARRAY_SIZE(netbk->grant_trans_op));
+- mcl = npo.mcl + npo.mcl_prod++;
+- mcl->op = __HYPERVISOR_grant_table_op;
+- mcl->args[0] = GNTTABOP_transfer;
+- mcl->args[1] = (unsigned long)netbk->grant_trans_op;
+- mcl->args[2] = npo.trans_prod;
+- }
+-
+- if (npo.copy_prod) {
+- BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
+- mcl = npo.mcl + npo.mcl_prod++;
+- mcl->op = __HYPERVISOR_grant_table_op;
+- mcl->args[0] = GNTTABOP_copy;
+- mcl->args[1] = (unsigned long)netbk->grant_copy_op;
+- mcl->args[2] = npo.copy_prod;
+- }
+-
+- /* Nothing to do? */
+- if (!npo.mcl_prod)
++ if (!npo.copy_prod)
+ return;
-@@ -133,6 +135,6 @@ int pciback_config_quirk_release(struct pci_dev *dev)
- list_del(&quirk->quirks_list);
- kfree(quirk);
+- BUG_ON(npo.mcl_prod > ARRAY_SIZE(netbk->rx_mcl));
+-
+- ret = HYPERVISOR_multicall(npo.mcl, npo.mcl_prod);
++ BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
++ ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, &netbk->grant_copy_op,
++ npo.copy_prod);
+ BUG_ON(ret != 0);
+- /* The mmu_machphys_update() must not fail. */
+- BUG_ON(npo.mmu_mcl && npo.mcl[npo.mmu_mcl].result != 0);
-- out:
-+out:
- return ret;
- }
-diff --git a/drivers/xen/pciback/controller.c b/drivers/xen/pciback/controller.c
-index 294e48f..7f04f11 100644
---- a/drivers/xen/pciback/controller.c
-+++ b/drivers/xen/pciback/controller.c
-@@ -259,7 +259,7 @@ static acpi_status write_xenbus_resource(struct acpi_resource *res, void *data)
- !(addr.resource_type == ACPI_IO_RANGE &&
- addr.info.io.translation))
- return AE_OK;
--
+ while ((skb = __skb_dequeue(&rxq)) != NULL) {
+ sco = (struct skb_cb_overlay *)skb->cb;
+--
+1.7.4
+
+
+From 673a19d9e2d78939c6dc9c49e7e35ee54b54c8c7 Mon Sep 17 00:00:00 2001
+From: Paul Durrant <paul.durrant(a)citrix.com>
+Date: Fri, 2 Jul 2010 10:28:11 +0100
+Subject: [PATCH 049/203] xen: netback: Fix basic indentation issue
+
+Signed-off-by: Paul Durrant <paul.durrant(a)citrix.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 11 +++++++----
+ 1 files changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index ca65840..848503e 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -445,10 +445,13 @@ static void netbk_gop_frag_copy(struct xen_netif *netif,
+ copy_gop = npo->copy + npo->copy_prod++;
+ copy_gop->flags = GNTCOPY_dest_gref;
+ if (PageForeign(page)) {
+- struct xen_netbk *netbk = &xen_netbk[group];
+- struct pending_tx_info *src_pend = &netbk->pending_tx_info[idx];
+- copy_gop->source.domid = src_pend->netif->domid;
+- copy_gop->source.u.ref = src_pend->req.gref;
++ struct xen_netbk *netbk = &xen_netbk[group];
++ struct pending_tx_info *src_pend;
+
- /* Store the resource in xenbus for the guest */
- len = snprintf(str, sizeof(str), "root-%d-resource-%d",
- info->root_num, info->resource_count);
-@@ -314,7 +314,7 @@ int pciback_publish_pci_roots(struct pciback_device *pdev,
- goto out;
-
- /*
-- * Now figure out which root-%d this belongs to
-+ * Now figure out which root-%d this belongs to
- * so we can associate resources with it.
- */
- err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
-@@ -407,8 +407,8 @@ void pciback_release_devices(struct pciback_device *pdev)
- pdev->pci_dev_data = NULL;
- }
-
--int pciback_get_pcifront_dev(struct pci_dev *pcidev,
-- struct pciback_device *pdev,
-+int pciback_get_pcifront_dev(struct pci_dev *pcidev,
-+ struct pciback_device *pdev,
- unsigned int *domain, unsigned int *bus, unsigned int *devfn)
- {
- struct controller_dev_data *dev_data = pdev->pci_dev_data;
-@@ -420,13 +420,12 @@ int pciback_get_pcifront_dev(struct pci_dev *pcidev,
-
- list_for_each_entry(cntrl_entry, &dev_data->list, list) {
- list_for_each_entry(dev_entry, &cntrl_entry->dev_list, list) {
-- if ( (dev_entry->dev->bus->number ==
-+ if ((dev_entry->dev->bus->number ==
- pcidev->bus->number) &&
-- (dev_entry->dev->devfn ==
-+ (dev_entry->dev->devfn ==
- pcidev->devfn) &&
- (pci_domain_nr(dev_entry->dev->bus) ==
-- pci_domain_nr(pcidev->bus)))
-- {
-+ pci_domain_nr(pcidev->bus))) {
- found = 1;
- *domain = cntrl_entry->domain;
- *bus = cntrl_entry->bus;
-diff --git a/drivers/xen/pciback/passthrough.c b/drivers/xen/pciback/passthrough.c
-index 9e7a0c4..5386bebf 100644
---- a/drivers/xen/pciback/passthrough.c
-+++ b/drivers/xen/pciback/passthrough.c
-@@ -165,8 +165,10 @@ void pciback_release_devices(struct pciback_device *pdev)
- pdev->pci_dev_data = NULL;
- }
++ src_pend = &netbk->pending_tx_info[idx];
++
++ copy_gop->source.domid = src_pend->netif->domid;
++ copy_gop->source.u.ref = src_pend->req.gref;
+ copy_gop->flags |= GNTCOPY_source_gref;
+ } else {
+ copy_gop->source.domid = DOMID_SELF;
+--
+1.7.4
+
+
+From d08b2d1f2ff4723b335d0fb5b91ffd6cb6a005d3 Mon Sep 17 00:00:00 2001
+From: Paul Durrant <paul.durrant(a)citrix.com>
+Date: Mon, 5 Jul 2010 11:45:29 +0100
+Subject: [PATCH 050/203] xen: netback: Add a new style of passing GSO packets to frontends.
+
+feature-gso-tcpv4-prefix uses precedes the packet data passed to
+the frontend with a ring entry that contains the necessary
+metadata. This style of GSO passing is required for Citrix
+Windows PV Drivers.
+
+Signed-off-by: Paul Durrant <paul.durrant(a)citrix.com>
+Cc: Ian Campbell <ian.campbell(a)citrix.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/common.h | 3 ++-
+ drivers/xen/netback/netback.c | 37 ++++++++++++++++++++++++++++++++++---
+ drivers/xen/netback/xenbus.c | 15 ++++++++++++---
+ include/xen/interface/io/netif.h | 4 ++++
+ 4 files changed, 52 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index 08e7a0e..78451ab 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -80,7 +80,8 @@ struct xen_netif {
+ int features;
--int pciback_get_pcifront_dev(struct pci_dev *pcidev, struct pciback_device *pdev,
-- unsigned int *domain, unsigned int *bus, unsigned int *devfn)
-+int pciback_get_pcifront_dev(struct pci_dev *pcidev,
-+ struct pciback_device *pdev,
-+ unsigned int *domain, unsigned int *bus,
-+ unsigned int *devfn)
+ /* Internal feature information. */
+- u8 can_queue:1; /* can queue packets for receiver? */
++ u8 can_queue:1; /* can queue packets for receiver? */
++ u8 gso_prefix:1; /* use a prefix segment for GSO information */
- {
- *domain = pci_domain_nr(pcidev->bus);
-diff --git a/drivers/xen/pciback/pci_stub.c b/drivers/xen/pciback/pci_stub.c
-index d97dac5..28222ee 100644
---- a/drivers/xen/pciback/pci_stub.c
-+++ b/drivers/xen/pciback/pci_stub.c
-@@ -20,7 +20,7 @@
- #include "conf_space.h"
- #include "conf_space_quirks.h"
+ /* Allow netif_be_start_xmit() to peek ahead in the rx request
+ * ring. This is a prediction of what rx_req_cons will be once
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 848503e..e93a62e 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -432,6 +432,7 @@ static void netbk_gop_frag_copy(struct xen_netif *netif,
+ /* Overflowed this request, go to the next one */
+ req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons++);
+ meta = npo->meta + npo->meta_prod++;
++ meta->gso_size = 0;
+ meta->size = 0;
+ meta->id = req->id;
+ npo->copy_off = 0;
+@@ -492,9 +493,23 @@ static int netbk_gop_skb(struct sk_buff *skb,
--static char *pci_devs_to_hide = NULL;
-+static char *pci_devs_to_hide;
- wait_queue_head_t aer_wait_queue;
- /*Add sem for sync AER handling and pciback remove/reconfigue ops,
- * We want to avoid in middle of AER ops, pciback devices is being removed
-@@ -43,7 +43,7 @@ struct pcistub_device {
- spinlock_t lock;
+ old_meta_prod = npo->meta_prod;
- struct pci_dev *dev;
-- struct pciback_device *pdev; /* non-NULL if struct pci_dev is in use */
-+ struct pciback_device *pdev;/* non-NULL if struct pci_dev is in use */
- };
++ /* Set up a GSO prefix descriptor, if necessary */
++ if (skb_shinfo(skb)->gso_size && netif->gso_prefix) {
++ req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons++);
++ meta = npo->meta + npo->meta_prod++;
++ meta->gso_size = skb_shinfo(skb)->gso_size;
++ meta->size = 0;
++ meta->id = req->id;
++ }
++
+ req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons++);
+ meta = npo->meta + npo->meta_prod++;
+- meta->gso_size = skb_shinfo(skb)->gso_size;
++
++ if (!netif->gso_prefix)
++ meta->gso_size = skb_shinfo(skb)->gso_size;
++ else
++ meta->gso_size = 0;
++
+ meta->size = 0;
+ meta->id = req->id;
+ npo->copy_off = 0;
+@@ -506,7 +521,7 @@ static int netbk_gop_skb(struct sk_buff *skb,
+ offset_in_page(skb->data), 1);
- /* Access to pcistub_devices & seized_devices lists and the initialize_devices
-@@ -55,7 +55,7 @@ static LIST_HEAD(pcistub_devices);
- /* wait for device_initcall before initializing our devices
- * (see pcistub_init_devices_late)
- */
--static int initialize_devices = 0;
-+static int initialize_devices;
- static LIST_HEAD(seized_devices);
+ /* Leave a gap for the GSO descriptor. */
+- if (skb_shinfo(skb)->gso_size)
++ if (skb_shinfo(skb)->gso_size && !netif->gso_prefix)
+ netif->rx.req_cons++;
- static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
-@@ -132,7 +132,7 @@ static struct pcistub_device *pcistub_device_find(int domain, int bus,
- /* didn't find it */
- psdev = NULL;
+ for (i = 0; i < nr_frags; i++) {
+@@ -623,6 +638,21 @@ static void net_rx_action(unsigned long data)
-- out:
-+out:
- spin_unlock_irqrestore(&pcistub_devices_lock, flags);
- return psdev;
- }
-@@ -321,10 +321,10 @@ static int __devinit pcistub_init_device(struct pci_dev *dev)
+ netif = netdev_priv(skb->dev);
- return 0;
++ if (netbk->meta[npo.meta_cons].gso_size && netif->gso_prefix) {
++ resp = RING_GET_RESPONSE(&netif->rx,
++ netif->rx.rsp_prod_pvt++);
++
++ resp->flags = NETRXF_gso_prefix | NETRXF_more_data;
++
++ resp->offset = netbk->meta[npo.meta_cons].gso_size;
++ resp->id = netbk->meta[npo.meta_cons].id;
++ resp->status = sco->meta_slots_used;
++
++ npo.meta_cons++;
++ sco->meta_slots_used--;
++ }
++
++
+ netif->stats.tx_bytes += skb->len;
+ netif->stats.tx_packets++;
-- config_release:
-+config_release:
- pciback_config_free_dev(dev);
+@@ -633,6 +663,7 @@ static void net_rx_action(unsigned long data)
+ flags = 0;
+ else
+ flags = NETRXF_more_data;
++
+ if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
+ flags |= NETRXF_csum_blank | NETRXF_data_validated;
+ else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+@@ -645,7 +676,7 @@ static void net_rx_action(unsigned long data)
+ netbk->meta[npo.meta_cons].size,
+ flags);
-- out:
-+out:
- pci_set_drvdata(dev, NULL);
- kfree(dev_data);
- return err;
-@@ -443,7 +443,7 @@ static int __devinit pcistub_probe(struct pci_dev *dev,
- /* Didn't find the device */
- err = -ENODEV;
+- if (netbk->meta[npo.meta_cons].gso_size) {
++ if (netbk->meta[npo.meta_cons].gso_size && !netif->gso_prefix) {
+ struct xen_netif_extra_info *gso =
+ (struct xen_netif_extra_info *)
+ RING_GET_RESPONSE(&netif->rx,
+diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
+index e30b0c7..cda987f 100644
+--- a/drivers/xen/netback/xenbus.c
++++ b/drivers/xen/netback/xenbus.c
+@@ -457,16 +457,25 @@ static int connect_rings(struct backend_info *be)
+ be->netif->dev->mtu = ETH_DATA_LEN;
+ }
-- out:
-+out:
- return err;
- }
+- if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d",
+- &val) < 0)
++ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
++ "%d", &val) < 0)
+ val = 0;
+ if (val) {
+ be->netif->features |= NETIF_F_TSO;
+ be->netif->dev->features |= NETIF_F_TSO;
+ }
-@@ -511,26 +511,24 @@ static void kill_domain_by_device(struct pcistub_device *psdev)
- int err;
- char nodename[1024];
++ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
++ "%d", &val) < 0)
++ val = 0;
++ if (val) {
++ be->netif->features |= NETIF_F_TSO;
++ be->netif->dev->features |= NETIF_F_TSO;
++ be->netif->gso_prefix = 1;
++ }
++
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
+- "%d", &val) < 0)
++ "%d", &val) < 0)
+ val = 0;
+ if (val) {
+ be->netif->features &= ~NETIF_F_IP_CSUM;
+diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
+index 518481c..8309344 100644
+--- a/include/xen/interface/io/netif.h
++++ b/include/xen/interface/io/netif.h
+@@ -131,6 +131,10 @@ struct xen_netif_rx_request {
+ #define _NETRXF_extra_info (3)
+ #define NETRXF_extra_info (1U<<_NETRXF_extra_info)
-- if (!psdev)
-+ if (!psdev)
- dev_err(&psdev->dev->dev,
- "device is NULL when do AER recovery/kill_domain\n");
-- sprintf(nodename, "/local/domain/0/backend/pci/%d/0",
-+ sprintf(nodename, "/local/domain/0/backend/pci/%d/0",
- psdev->pdev->xdev->otherend_id);
- nodename[strlen(nodename)] = '\0';
++/* GSO Prefix descriptor. */
++#define _NETRXF_gso_prefix (4)
++#define NETRXF_gso_prefix (1U<<_NETRXF_gso_prefix)
++
+ struct xen_netif_rx_response {
+ uint16_t id;
+ uint16_t offset; /* Offset in page of start of received packet */
+--
+1.7.4
+
+
+From bd910979612331d60a629c16a49ebeb5efa0f035 Mon Sep 17 00:00:00 2001
+From: Paul Durrant <paul.durrant(a)citrix.com>
+Date: Fri, 2 Jul 2010 10:28:13 +0100
+Subject: [PATCH 051/203] xen: netback: Make frontend features distinct from netback feature flags.
+
+Make sure that if a feature flag is disabled by ethtool on netback
+that we do not gratuitously re-enabled it when we check the frontend
+features during ring connection.
+
+Signed-off-by: Paul Durrant <paul.durrant(a)citrix.com>
+Cc: Ian Campbell <ian.campbell(a)citrix.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/common.h | 14 ++++++--
+ drivers/xen/netback/interface.c | 68 ++++++++++++++++++++++++++++++--------
+ drivers/xen/netback/netback.c | 2 +-
+ drivers/xen/netback/xenbus.c | 44 ++++++++++---------------
+ 4 files changed, 81 insertions(+), 47 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index 78451ab..a5f3759 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -76,12 +76,17 @@ struct xen_netif {
+ struct vm_struct *tx_comms_area;
+ struct vm_struct *rx_comms_area;
- again:
- err = xenbus_transaction_start(&xbt);
-- if (err)
-- {
-+ if (err) {
- dev_err(&psdev->dev->dev,
- "error %d when start xenbus transaction\n", err);
- return;
- }
- /*PV AER handlers will set this flag*/
-- xenbus_printf(xbt, nodename, "aerState" , "aerfail" );
-+ xenbus_printf(xbt, nodename, "aerState" , "aerfail");
- err = xenbus_transaction_end(xbt, 0);
-- if (err)
-- {
-+ if (err) {
- if (err == -EAGAIN)
- goto again;
- dev_err(&psdev->dev->dev,
-@@ -541,9 +539,9 @@ again:
+- /* Set of features that can be turned on in dev->features. */
+- int features;
++ /* Flags that must not be set in dev->features */
++ int features_disabled;
++
++ /* Frontend feature information. */
++ u8 can_sg:1;
++ u8 gso:1;
++ u8 gso_prefix:1;
++ u8 csum:1;
- /* For each aer recovery step error_detected, mmio_enabled, etc, front_end and
- * backend need to have cooperation. In pciback, those steps will do similar
-- * jobs: send service request and waiting for front_end response.
-+ * jobs: send service request and waiting for front_end response.
- */
--static pci_ers_result_t common_process(struct pcistub_device *psdev,
-+static pci_ers_result_t common_process(struct pcistub_device *psdev,
- pci_channel_state_t state, int aer_cmd, pci_ers_result_t result)
- {
- pci_ers_result_t res = result;
-@@ -561,12 +559,12 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
- if (!ret) {
- dev_err(&psdev->dev->dev,
- "pciback: failed to get pcifront device\n");
-- return PCI_ERS_RESULT_NONE;
-+ return PCI_ERS_RESULT_NONE;
- }
- wmb();
+ /* Internal feature information. */
+ u8 can_queue:1; /* can queue packets for receiver? */
+- u8 gso_prefix:1; /* use a prefix segment for GSO information */
-- dev_dbg(&psdev->dev->dev,
-- "pciback: aer_op %x dom %x bus %x devfn %x\n",
-+ dev_dbg(&psdev->dev->dev,
-+ "pciback: aer_op %x dom %x bus %x devfn %x\n",
- aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn);
- /*local flag to mark there's aer request, pciback callback will use this
- * flag to judge whether we need to check pci-front give aer service
-@@ -575,21 +573,21 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
- set_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
+ /* Allow netif_be_start_xmit() to peek ahead in the rx request
+ * ring. This is a prediction of what rx_req_cons will be once
+@@ -187,6 +192,7 @@ void netif_accel_init(void);
- /*It is possible that a pcifront conf_read_write ops request invokes
-- * the callback which cause the spurious execution of wake_up.
-+ * the callback which cause the spurious execution of wake_up.
- * Yet it is harmless and better than a spinlock here
- */
-- set_bit(_XEN_PCIB_active,
-+ set_bit(_XEN_PCIB_active,
- (unsigned long *)&psdev->pdev->sh_info->flags);
- wmb();
- notify_remote_via_irq(psdev->pdev->evtchn_irq);
+ void netif_disconnect(struct xen_netif *netif);
- ret = wait_event_timeout(aer_wait_queue, !(test_bit(_XEN_PCIB_active,
-- (unsigned long *)&psdev->pdev->sh_info->flags)), 300*HZ);
-+ (unsigned long *)&psdev->pdev->sh_info->flags)), 300*HZ);
++void netif_set_features(struct xen_netif *netif);
+ struct xen_netif *netif_alloc(struct device *parent, domid_t domid, unsigned int handle);
+ int netif_map(struct xen_netif *netif, unsigned long tx_ring_ref,
+ unsigned long rx_ring_ref, unsigned int evtchn);
+@@ -223,7 +229,7 @@ static inline int netbk_can_queue(struct net_device *dev)
+ static inline int netbk_can_sg(struct net_device *dev)
+ {
+ struct xen_netif *netif = netdev_priv(dev);
+- return netif->features & NETIF_F_SG;
++ return netif->can_sg;
+ }
- if (!ret) {
-- if (test_bit(_XEN_PCIB_active,
-+ if (test_bit(_XEN_PCIB_active,
- (unsigned long *)&psdev->pdev->sh_info->flags)) {
-- dev_err(&psdev->dev->dev,
-+ dev_err(&psdev->dev->dev,
- "pcifront aer process not responding!\n");
- clear_bit(_XEN_PCIB_active,
- (unsigned long *)&psdev->pdev->sh_info->flags);
-@@ -599,16 +597,16 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
- }
- clear_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
+ struct pending_tx_info {
+diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+index 172ef4c..2e8508a 100644
+--- a/drivers/xen/netback/interface.c
++++ b/drivers/xen/netback/interface.c
+@@ -121,31 +121,69 @@ static int netbk_change_mtu(struct net_device *dev, int mtu)
+ return 0;
+ }
-- if ( test_bit( _XEN_PCIF_active,
-- (unsigned long*)&psdev->pdev->sh_info->flags)) {
-- dev_dbg(&psdev->dev->dev,
-+ if (test_bit(_XEN_PCIF_active,
-+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
-+ dev_dbg(&psdev->dev->dev,
- "schedule pci_conf service in pciback \n");
- test_and_schedule_op(psdev->pdev);
- }
+-static int netbk_set_sg(struct net_device *dev, u32 data)
++void netif_set_features(struct xen_netif *netif)
+ {
+- if (data) {
+- struct xen_netif *netif = netdev_priv(dev);
++ struct net_device *dev = netif->dev;
++ int features = dev->features;
++
++ if (netif->can_sg)
++ features |= NETIF_F_SG;
++ if (netif->gso || netif->gso_prefix)
++ features |= NETIF_F_TSO;
++ if (netif->csum)
++ features |= NETIF_F_IP_CSUM;
++
++ features &= ~(netif->features_disabled);
- res = (pci_ers_result_t)aer_op->err;
- return res;
--}
+- if (!(netif->features & NETIF_F_SG))
++ if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN)
++ dev->mtu = ETH_DATA_LEN;
++
++ dev->features = features;
+}
-
- /*
- * pciback_slot_reset: it will send the slot_reset request to pcifront in case
-@@ -632,24 +630,22 @@ static pci_ers_result_t pciback_slot_reset(struct pci_dev *dev)
- PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn));
-
-- if ( !psdev || !psdev->pdev )
-- {
-- dev_err(&dev->dev,
-+ if (!psdev || !psdev->pdev) {
-+ dev_err(&dev->dev,
- "pciback device is not found/assigned\n");
- goto end;
- }
-
-- if ( !psdev->pdev->sh_info )
-- {
-+ if (!psdev->pdev->sh_info) {
- dev_err(&dev->dev, "pciback device is not connected or owned"
- " by HVM, kill it\n");
- kill_domain_by_device(psdev);
- goto release;
++
++static int netbk_set_tx_csum(struct net_device *dev, u32 data)
++{
++ struct xen_netif *netif = netdev_priv(dev);
++ if (data) {
++ if (!netif->csum)
+ return -ENOSYS;
++ netif->features_disabled &= ~NETIF_F_IP_CSUM;
++ } else {
++ netif->features_disabled |= NETIF_F_IP_CSUM;
}
-- if ( !test_bit(_XEN_PCIB_AERHANDLER,
-- (unsigned long *)&psdev->pdev->sh_info->flags) ) {
-- dev_err(&dev->dev,
-+ if (!test_bit(_XEN_PCIB_AERHANDLER,
-+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
-+ dev_err(&dev->dev,
- "guest with no AER driver should have been killed\n");
- goto release;
- }
-@@ -657,7 +653,7 @@ static pci_ers_result_t pciback_slot_reset(struct pci_dev *dev)
+- if (dev->mtu > ETH_DATA_LEN)
+- dev->mtu = ETH_DATA_LEN;
++ netif_set_features(netif);
++ return 0;
++}
- if (result == PCI_ERS_RESULT_NONE ||
- result == PCI_ERS_RESULT_DISCONNECT) {
-- dev_dbg(&dev->dev,
-+ dev_dbg(&dev->dev,
- "No AER slot_reset service or disconnected!\n");
- kill_domain_by_device(psdev);
- }
-@@ -670,9 +666,9 @@ end:
+- return ethtool_op_set_sg(dev, data);
++static int netbk_set_sg(struct net_device *dev, u32 data)
++{
++ struct xen_netif *netif = netdev_priv(dev);
++ if (data) {
++ if (!netif->can_sg)
++ return -ENOSYS;
++ netif->features_disabled &= ~NETIF_F_SG;
++ } else {
++ netif->features_disabled |= NETIF_F_SG;
++ }
++
++ netif_set_features(netif);
++ return 0;
}
-
--/*pciback_mmio_enabled: it will send the mmio_enabled request to pcifront
--* in case of the device driver could provide this service, and then wait
--* for pcifront ack.
-+/*pciback_mmio_enabled: it will send the mmio_enabled request to pcifront
-+* in case of the device driver could provide this service, and then wait
-+* for pcifront ack
- * @dev: pointer to PCI devices
- * return value is used by aer_core do_recovery policy
- */
-@@ -692,24 +688,22 @@ static pci_ers_result_t pciback_mmio_enabled(struct pci_dev *dev)
- PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn));
-
-- if ( !psdev || !psdev->pdev )
-- {
-- dev_err(&dev->dev,
-+ if (!psdev || !psdev->pdev) {
-+ dev_err(&dev->dev,
- "pciback device is not found/assigned\n");
- goto end;
- }
-
-- if ( !psdev->pdev->sh_info )
-- {
-+ if (!psdev->pdev->sh_info) {
- dev_err(&dev->dev, "pciback device is not connected or owned"
- " by HVM, kill it\n");
- kill_domain_by_device(psdev);
- goto release;
- }
-
-- if ( !test_bit(_XEN_PCIB_AERHANDLER,
-- (unsigned long *)&psdev->pdev->sh_info->flags) ) {
-- dev_err(&dev->dev,
-+ if (!test_bit(_XEN_PCIB_AERHANDLER,
-+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
-+ dev_err(&dev->dev,
- "guest with no AER driver should have been killed\n");
- goto release;
+ static int netbk_set_tso(struct net_device *dev, u32 data)
+ {
++ struct xen_netif *netif = netdev_priv(dev);
+ if (data) {
+- struct xen_netif *netif = netdev_priv(dev);
+-
+- if (!(netif->features & NETIF_F_TSO))
++ if (!netif->gso && !netif->gso_prefix)
+ return -ENOSYS;
++ netif->features_disabled &= ~NETIF_F_TSO;
++ } else {
++ netif->features_disabled |= NETIF_F_TSO;
}
-@@ -717,7 +711,7 @@ static pci_ers_result_t pciback_mmio_enabled(struct pci_dev *dev)
- if (result == PCI_ERS_RESULT_NONE ||
- result == PCI_ERS_RESULT_DISCONNECT) {
-- dev_dbg(&dev->dev,
-+ dev_dbg(&dev->dev,
- "No AER mmio_enabled service or disconnected!\n");
- kill_domain_by_device(psdev);
- }
-@@ -728,8 +722,8 @@ end:
- return result;
+- return ethtool_op_set_tso(dev, data);
++ netif_set_features(netif);
++ return 0;
}
--/*pciback_error_detected: it will send the error_detected request to pcifront
--* in case of the device driver could provide this service, and then wait
-+/*pciback_error_detected: it will send the error_detected request to pcifront
-+* in case of the device driver could provide this service, and then wait
- * for pcifront ack.
- * @dev: pointer to PCI devices
- * @error: the current PCI connection state
-@@ -752,15 +746,13 @@ static pci_ers_result_t pciback_error_detected(struct pci_dev *dev,
- PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn));
+ static void netbk_get_drvinfo(struct net_device *dev,
+@@ -200,7 +238,7 @@ static struct ethtool_ops network_ethtool_ops =
+ .get_drvinfo = netbk_get_drvinfo,
-- if ( !psdev || !psdev->pdev )
-- {
-- dev_err(&dev->dev,
-+ if (!psdev || !psdev->pdev) {
-+ dev_err(&dev->dev,
- "pciback device is not found/assigned\n");
- goto end;
- }
+ .get_tx_csum = ethtool_op_get_tx_csum,
+- .set_tx_csum = ethtool_op_set_tx_csum,
++ .set_tx_csum = netbk_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = netbk_set_sg,
+ .get_tso = ethtool_op_get_tso,
+@@ -242,7 +280,8 @@ struct xen_netif *netif_alloc(struct device *parent, domid_t domid, unsigned int
+ netif->domid = domid;
+ netif->group = -1;
+ netif->handle = handle;
+- netif->features = NETIF_F_SG;
++ netif->can_sg = 1;
++ netif->csum = 1;
+ atomic_set(&netif->refcnt, 1);
+ init_waitqueue_head(&netif->waiting_to_free);
+ netif->dev = dev;
+@@ -259,8 +298,7 @@ struct xen_netif *netif_alloc(struct device *parent, domid_t domid, unsigned int
+ init_timer(&netif->tx_queue_timeout);
-- if ( !psdev->pdev->sh_info )
-- {
-+ if (!psdev->pdev->sh_info) {
- dev_err(&dev->dev, "pciback device is not connected or owned"
- " by HVM, kill it\n");
- kill_domain_by_device(psdev);
-@@ -768,8 +760,8 @@ static pci_ers_result_t pciback_error_detected(struct pci_dev *dev,
- }
+ dev->netdev_ops = &netback_ops;
+- dev->features = NETIF_F_IP_CSUM|NETIF_F_SG;
+-
++ netif_set_features(netif);
+ SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
- /*Guest owns the device yet no aer handler regiested, kill guest*/
-- if ( !test_bit(_XEN_PCIB_AERHANDLER,
-- (unsigned long *)&psdev->pdev->sh_info->flags) ) {
-+ if (!test_bit(_XEN_PCIB_AERHANDLER,
-+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
- dev_dbg(&dev->dev, "guest may have no aer driver, kill it\n");
- kill_domain_by_device(psdev);
- goto release;
-@@ -778,7 +770,7 @@ static pci_ers_result_t pciback_error_detected(struct pci_dev *dev,
+ dev->tx_queue_len = netbk_queue_length;
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index e93a62e..63a771e 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -238,7 +238,7 @@ static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
- if (result == PCI_ERS_RESULT_NONE ||
- result == PCI_ERS_RESULT_DISCONNECT) {
-- dev_dbg(&dev->dev,
-+ dev_dbg(&dev->dev,
- "No AER error_detected service or disconnected!\n");
- kill_domain_by_device(psdev);
- }
-@@ -789,8 +781,8 @@ end:
- return result;
+ static inline int netbk_max_required_rx_slots(struct xen_netif *netif)
+ {
+- if (netif->features & (NETIF_F_SG|NETIF_F_TSO))
++ if (netif->can_sg || netif->gso || netif->gso_prefix)
+ return MAX_SKB_FRAGS + 2; /* header + extra_info + frags */
+ return 1; /* all in one */
}
+diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
+index cda987f..17ff5cf 100644
+--- a/drivers/xen/netback/xenbus.c
++++ b/drivers/xen/netback/xenbus.c
+@@ -404,6 +404,7 @@ static void connect(struct backend_info *be)
--/*pciback_error_resume: it will send the error_resume request to pcifront
--* in case of the device driver could provide this service, and then wait
-+/*pciback_error_resume: it will send the error_resume request to pcifront
-+* in case of the device driver could provide this service, and then wait
- * for pcifront ack.
- * @dev: pointer to PCI devices
- */
-@@ -808,29 +800,28 @@ static void pciback_error_resume(struct pci_dev *dev)
- PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn));
-
-- if ( !psdev || !psdev->pdev )
-- {
-- dev_err(&dev->dev,
-+ if (!psdev || !psdev->pdev) {
-+ dev_err(&dev->dev,
- "pciback device is not found/assigned\n");
- goto end;
- }
-
-- if ( !psdev->pdev->sh_info )
-- {
-+ if (!psdev->pdev->sh_info) {
- dev_err(&dev->dev, "pciback device is not connected or owned"
- " by HVM, kill it\n");
- kill_domain_by_device(psdev);
- goto release;
- }
+ static int connect_rings(struct backend_info *be)
+ {
++ struct xen_netif *netif = be->netif;
+ struct xenbus_device *dev = be->dev;
+ unsigned long tx_ring_ref, rx_ring_ref;
+ unsigned int evtchn, rx_copy;
+@@ -437,53 +438,42 @@ static int connect_rings(struct backend_info *be)
+ if (!rx_copy)
+ return -EOPNOTSUPP;
-- if ( !test_bit(_XEN_PCIB_AERHANDLER,
-- (unsigned long *)&psdev->pdev->sh_info->flags) ) {
-- dev_err(&dev->dev,
-+ if (!test_bit(_XEN_PCIB_AERHANDLER,
-+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
-+ dev_err(&dev->dev,
- "guest with no AER driver should have been killed\n");
- kill_domain_by_device(psdev);
- goto release;
+- if (be->netif->dev->tx_queue_len != 0) {
++ if (netif->dev->tx_queue_len != 0) {
+ if (xenbus_scanf(XBT_NIL, dev->otherend,
+ "feature-rx-notify", "%d", &val) < 0)
+ val = 0;
+ if (val)
+- be->netif->can_queue = 1;
++ netif->can_queue = 1;
+ else
+ /* Must be non-zero for pfifo_fast to work. */
+- be->netif->dev->tx_queue_len = 1;
++ netif->dev->tx_queue_len = 1;
}
-- common_process(psdev, 1, XEN_PCI_OP_aer_resume, PCI_ERS_RESULT_RECOVERED);
-+ common_process(psdev, 1, XEN_PCI_OP_aer_resume,
-+ PCI_ERS_RESULT_RECOVERED);
- release:
- pcistub_device_put(psdev);
- end:
-@@ -923,8 +914,8 @@ static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
- unsigned long flags;
-
- spin_lock_irqsave(&device_ids_lock, flags);
-- list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids, slot_list) {
--
-+ list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids,
-+ slot_list) {
- if (pci_dev_id->domain == domain
- && pci_dev_id->bus == bus && pci_dev_id->devfn == devfn) {
- /* Don't break; here because it's possible the same
-@@ -976,7 +967,7 @@ static int pcistub_reg_add(int domain, int bus, int slot, int func, int reg,
- err = pciback_config_quirks_add_field(dev, field);
- if (err)
- kfree(field);
-- out:
-+out:
- return err;
- }
-@@ -992,7 +983,7 @@ static ssize_t pcistub_slot_add(struct device_driver *drv, const char *buf,
+- if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", "%d", &val) < 0)
++ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg",
++ "%d", &val) < 0)
+ val = 0;
+- if (!val) {
+- be->netif->features &= ~NETIF_F_SG;
+- be->netif->dev->features &= ~NETIF_F_SG;
+- if (be->netif->dev->mtu > ETH_DATA_LEN)
+- be->netif->dev->mtu = ETH_DATA_LEN;
+- }
++ netif->can_sg = !!val;
- err = pcistub_device_id_add(domain, bus, slot, func);
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
+- "%d", &val) < 0)
++ "%d", &val) < 0)
+ val = 0;
+- if (val) {
+- be->netif->features |= NETIF_F_TSO;
+- be->netif->dev->features |= NETIF_F_TSO;
+- }
++ netif->gso = !!val;
-- out:
-+out:
- if (!err)
- err = count;
- return err;
-@@ -1012,7 +1003,7 @@ static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
+- "%d", &val) < 0)
++ "%d", &val) < 0)
+ val = 0;
+- if (val) {
+- be->netif->features |= NETIF_F_TSO;
+- be->netif->dev->features |= NETIF_F_TSO;
+- be->netif->gso_prefix = 1;
+- }
++ netif->gso_prefix = !!val;
- err = pcistub_device_id_remove(domain, bus, slot, func);
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
+- "%d", &val) < 0)
++ "%d", &val) < 0)
+ val = 0;
+- if (val) {
+- be->netif->features &= ~NETIF_F_IP_CSUM;
+- be->netif->dev->features &= ~NETIF_F_IP_CSUM;
+- }
++ netif->csum = !val;
++
++ /* Set dev->features */
++ netif_set_features(netif);
-- out:
-+out:
- if (!err)
- err = count;
- return err;
-@@ -1057,7 +1048,7 @@ static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
+ /* Map the shared frame, irq etc. */
+- err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn);
++ err = netif_map(netif, tx_ring_ref, rx_ring_ref, evtchn);
+ if (err) {
+ xenbus_dev_fatal(dev, err,
+ "mapping shared-frames %lu/%lu port %u",
+--
+1.7.4
+
+
+From cf8c20169427de5829e3ec723712b77de52e64ac Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Date: Thu, 15 Jul 2010 10:46:50 -0700
+Subject: [PATCH 052/203] xen: netback: only initialize for PV domains
+
+HVM domains don't support netback
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 63a771e..911c85b 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -1701,7 +1701,7 @@ static int __init netback_init(void)
+ int rc = 0;
+ int group;
- err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask);
+- if (!xen_domain())
++ if (!xen_pv_domain())
+ return -ENODEV;
-- out:
-+out:
- if (!err)
- err = count;
- return err;
-@@ -1067,7 +1058,6 @@ static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf)
- {
- int count = 0;
- unsigned long flags;
-- extern struct list_head pciback_quirks;
- struct pciback_config_quirk *quirk;
- struct pciback_dev_data *dev_data;
- const struct config_field *field;
-@@ -1096,12 +1086,13 @@ static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf)
+ xen_netbk_group_nr = num_online_cpus();
+--
+1.7.4
+
+
+From dfa4906cfade8541573814c34be82ba02c348317 Mon Sep 17 00:00:00 2001
+From: Owen Smith <owen.smith(a)citrix.com>
+Date: Wed, 22 Dec 2010 15:05:00 +0000
+Subject: [PATCH 053/203] Union the blkif_request request specific fields
+
+Prepare for extending the block device ring to allow request
+specific fields, by moving the request specific fields for
+reads, writes and barrier requests to a union member
+
+Signed-off-by: Owen Smith <owen.smith(a)citrix.com>
+---
+ drivers/block/xen-blkfront.c | 8 ++++----
+ include/xen/interface/io/blkif.h | 16 +++++++++++-----
+ 2 files changed, 15 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index d7aa39e..cc4514c 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -281,7 +281,7 @@ static int blkif_queue_request(struct request *req)
+ info->shadow[id].request = req;
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "\t\t%08x:%01x:%08x\n",
-- cfg_entry->base_offset + field->offset,
-- field->size, field->mask);
-+ cfg_entry->base_offset +
-+ field->offset, field->size,
-+ field->mask);
- }
- }
+ ring_req->id = id;
+- ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
++ ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
+ ring_req->handle = info->handle;
-- out:
-+out:
- spin_unlock_irqrestore(&device_ids_lock, flags);
+ ring_req->operation = rq_data_dir(req) ?
+@@ -317,7 +317,7 @@ static int blkif_queue_request(struct request *req)
+ rq_data_dir(req) );
- return count;
-@@ -1137,14 +1128,14 @@ static ssize_t permissive_add(struct device_driver *drv, const char *buf,
- if (!dev_data->permissive) {
- dev_data->permissive = 1;
- /* Let user know that what they're doing could be unsafe */
-- dev_warn(&psdev->dev->dev,
-- "enabling permissive mode configuration space accesses!\n");
-+ dev_warn(&psdev->dev->dev, "enabling permissive mode "
-+ "configuration space accesses!\n");
- dev_warn(&psdev->dev->dev,
- "permissive mode is potentially unsafe!\n");
- }
-- release:
-+release:
- pcistub_device_put(psdev);
-- out:
-+out:
- if (!err)
- err = count;
- return err;
-@@ -1264,10 +1255,10 @@ static int __init pcistub_init(void)
- if (err)
- pcistub_exit();
+ info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
+- ring_req->seg[i] =
++ ring_req->u.rw.seg[i] =
+ (struct blkif_request_segment) {
+ .gref = ref,
+ .first_sect = fsect,
+@@ -615,7 +615,7 @@ static void blkif_completion(struct blk_shadow *s)
+ {
+ int i;
+ for (i = 0; i < s->req.nr_segments; i++)
+- gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
++ gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
+ }
-- out:
-+out:
- return err;
+ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
+@@ -932,7 +932,7 @@ static int blkif_recover(struct blkfront_info *info)
+ /* Rewrite any grant references invalidated by susp/resume. */
+ for (j = 0; j < req->nr_segments; j++)
+ gnttab_grant_foreign_access_ref(
+- req->seg[j].gref,
++ req->u.rw.seg[j].gref,
+ info->xbdev->otherend_id,
+ pfn_to_mfn(info->shadow[req->id].frame[j]),
+ rq_data_dir(info->shadow[req->id].request));
+diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
+index c2d1fa4..e4f743c 100644
+--- a/include/xen/interface/io/blkif.h
++++ b/include/xen/interface/io/blkif.h
+@@ -51,11 +51,7 @@ typedef uint64_t blkif_sector_t;
+ */
+ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
-- parse_error:
-+parse_error:
- printk(KERN_ERR "pciback: Error parsing pci_devs_to_hide at \"%s\"\n",
- pci_devs_to_hide + pos);
- return -EINVAL;
-@@ -1276,7 +1267,7 @@ static int __init pcistub_init(void)
- #ifndef MODULE
- /*
- * fs_initcall happens before device_initcall
-- * so pciback *should* get called first (b/c we
-+ * so pciback *should* get called first (b/c we
- * want to suck up any device before other drivers
- * get a chance by being the first pci device
- * driver to register)
-diff --git a/drivers/xen/pciback/pciback.h b/drivers/xen/pciback/pciback.h
-index 5e8e14e..98e2912 100644
---- a/drivers/xen/pciback/pciback.h
-+++ b/drivers/xen/pciback/pciback.h
-@@ -49,6 +49,12 @@ struct pciback_dev_data {
- int warned_on_write;
+-struct blkif_request {
+- uint8_t operation; /* BLKIF_OP_??? */
+- uint8_t nr_segments; /* number of segments */
+- blkif_vdev_t handle; /* only for read/write requests */
+- uint64_t id; /* private guest value, echoed in resp */
++struct blkif_request_rw {
+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
+ struct blkif_request_segment {
+ grant_ref_t gref; /* reference to I/O buffer frame */
+@@ -65,6 +61,16 @@ struct blkif_request {
+ } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
-+/* Used by XenBus and pciback_ops.c */
-+extern wait_queue_head_t aer_wait_queue;
-+extern struct workqueue_struct *pciback_wq;
-+/* Used by pcistub.c and conf_space_quirks.c */
-+extern struct list_head pciback_quirks;
++struct blkif_request {
++ uint8_t operation; /* BLKIF_OP_??? */
++ uint8_t nr_segments; /* number of segments */
++ blkif_vdev_t handle; /* only for read/write requests */
++ uint64_t id; /* private guest value, echoed in resp */
++ union {
++ struct blkif_request_rw rw;
++ } u;
++};
+
- /* Get/Put PCI Devices that are hidden from the PCI Backend Domain */
- struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
- int domain, int bus,
-@@ -67,14 +73,14 @@ void pciback_config_free_dyn_fields(struct pci_dev *dev);
- void pciback_config_reset_dev(struct pci_dev *dev);
- void pciback_config_free_dev(struct pci_dev *dev);
- int pciback_config_read(struct pci_dev *dev, int offset, int size,
-- u32 * ret_val);
-+ u32 *ret_val);
- int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value);
-
- /* Handle requests for specific devices from the frontend */
- typedef int (*publish_pci_dev_cb) (struct pciback_device *pdev,
- unsigned int domain, unsigned int bus,
- unsigned int devfn, unsigned int devid);
--typedef int (*publish_pci_root_cb) (struct pciback_device * pdev,
-+typedef int (*publish_pci_root_cb) (struct pciback_device *pdev,
- unsigned int domain, unsigned int bus);
- int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
- int devid, publish_pci_dev_cb publish_cb);
-@@ -83,15 +89,17 @@ struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
- unsigned int domain, unsigned int bus,
- unsigned int devfn);
+ struct blkif_response {
+ uint64_t id; /* copied from request */
+ uint8_t operation; /* copied from request */
+--
+1.7.4
+
+
+From e8bc588ab4c297e3f3d8f61f205b2d2db258907b Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
+Date: Tue, 18 Jan 2011 20:09:41 -0500
+Subject: [PATCH 054/203] xen: Mark all initial reserved pages for the balloon as INVALID_P2M_ENTRY.
+
+With this patch, we diligently set regions that will be used by the
+balloon driver to be INVALID_P2M_ENTRY and under the ownership
+of the balloon driver. We are OK using the __set_phys_to_machine
+as we do not expect to be allocating any P2M middle or entries pages.
+The set_phys_to_machine has the side-effect of potentially allocating
+new pages and we do not want that at this stage.
+
+We can do this because xen_build_mfn_list_list will have already
+allocated all such pages up to xen_max_p2m_pfn.
+
+We also move the check for auto translated physmap down the
+stack so it is present in __set_phys_to_machine.
+
+[v2: Rebased with mmu->p2m code split]
+Reviewed-by: Ian Campbell <ian.campbell(a)citrix.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
+---
+ arch/x86/include/asm/xen/page.h | 1 +
+ arch/x86/xen/mmu.c | 2 +-
+ arch/x86/xen/p2m.c | 9 ++++-----
+ arch/x86/xen/setup.c | 7 ++++++-
+ drivers/xen/balloon.c | 2 +-
+ 5 files changed, 13 insertions(+), 8 deletions(-)
+
+diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
+index f25bdf2..8ea9772 100644
+--- a/arch/x86/include/asm/xen/page.h
++++ b/arch/x86/include/asm/xen/page.h
+@@ -41,6 +41,7 @@ extern unsigned int machine_to_phys_order;
--/**
-+/**
- * Add for domain0 PCIE-AER handling. Get guest domain/bus/devfn in pciback
--* before sending aer request to pcifront, so that guest could identify
-+* before sending aer request to pcifront, so that guest could identify
- * device, coopearte with pciback to finish aer recovery job if device driver
- * has the capability
- */
+ extern unsigned long get_phys_to_machine(unsigned long pfn);
+ extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
++extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
--int pciback_get_pcifront_dev(struct pci_dev *pcidev, struct pciback_device *pdev,
-- unsigned int *domain, unsigned int *bus, unsigned int *devfn);
-+int pciback_get_pcifront_dev(struct pci_dev *pcidev,
-+ struct pciback_device *pdev,
-+ unsigned int *domain, unsigned int *bus,
-+ unsigned int *devfn);
- int pciback_init_devices(struct pciback_device *pdev);
- int pciback_publish_pci_roots(struct pciback_device *pdev,
- publish_pci_root_cb cb);
-@@ -106,17 +114,17 @@ void pciback_xenbus_unregister(void);
+ extern int m2p_add_override(unsigned long mfn, struct page *page);
+ extern int m2p_remove_override(struct page *page);
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index 5e92b61..0180ae8 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -2074,7 +2074,7 @@ static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
+ in_frames[i] = virt_to_mfn(vaddr);
- #ifdef CONFIG_PCI_MSI
- int pciback_enable_msi(struct pciback_device *pdev,
-- struct pci_dev *dev, struct xen_pci_op *op);
-+ struct pci_dev *dev, struct xen_pci_op *op);
+ MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
+- set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
++ __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
- int pciback_disable_msi(struct pciback_device *pdev,
-- struct pci_dev *dev, struct xen_pci_op *op);
-+ struct pci_dev *dev, struct xen_pci_op *op);
+ if (out_frames)
+ out_frames[i] = virt_to_pfn(vaddr);
+diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
+index ddc81a0..df4e367 100644
+--- a/arch/x86/xen/p2m.c
++++ b/arch/x86/xen/p2m.c
+@@ -365,6 +365,10 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+ {
+ unsigned topidx, mididx, idx;
++ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
++ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
++ return true;
++ }
+ if (unlikely(pfn >= MAX_P2M_PFN)) {
+ BUG_ON(mfn != INVALID_P2M_ENTRY);
+ return true;
+@@ -384,11 +388,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
- int pciback_enable_msix(struct pciback_device *pdev,
-- struct pci_dev *dev, struct xen_pci_op *op);
-+ struct pci_dev *dev, struct xen_pci_op *op);
+ bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+ {
+- if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
+- BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
+- return true;
+- }
+-
+ if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
+ if (!alloc_p2m(pfn))
+ return false;
+diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
+index b5a7f92..7201800 100644
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -52,6 +52,8 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
- int pciback_disable_msix(struct pciback_device *pdev,
-- struct pci_dev *dev, struct xen_pci_op *op);
-+ struct pci_dev *dev, struct xen_pci_op *op);
- #endif
- extern int verbose_request;
+ static __init void xen_add_extra_mem(unsigned long pages)
+ {
++ unsigned long pfn;
++
+ u64 size = (u64)pages * PAGE_SIZE;
+ u64 extra_start = xen_extra_mem_start + xen_extra_mem_size;
-diff --git a/drivers/xen/pciback/pciback_ops.c b/drivers/xen/pciback/pciback_ops.c
-index 6624faf..bf83dca 100644
---- a/drivers/xen/pciback/pciback_ops.c
-+++ b/drivers/xen/pciback/pciback_ops.c
-@@ -5,11 +5,11 @@
- */
- #include <linux/module.h>
- #include <linux/wait.h>
--#include <asm/bitops.h>
-+#include <linux/bitops.h>
- #include <xen/events.h>
- #include "pciback.h"
+@@ -66,6 +68,9 @@ static __init void xen_add_extra_mem(unsigned long pages)
+ xen_extra_mem_size += size;
--int verbose_request = 0;
-+int verbose_request;
- module_param(verbose_request, int, 0644);
+ xen_max_p2m_pfn = PFN_DOWN(extra_start + size);
++
++ for (pfn = PFN_DOWN(extra_start); pfn <= xen_max_p2m_pfn; pfn++)
++ __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ }
- /* Ensure a device is "turned off" and ready to be exported.
-@@ -37,12 +37,10 @@ void pciback_reset_device(struct pci_dev *dev)
+ static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
+@@ -104,7 +109,7 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
+ WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n",
+ start, end, ret);
+ if (ret == 1) {
+- set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
++ __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ len++;
}
}
- }
--extern wait_queue_head_t aer_wait_queue;
--extern struct workqueue_struct *pciback_wq;
- /*
- * Now the same evtchn is used for both pcifront conf_read_write request
- * as well as pcie aer front end ack. We use a new work_queue to schedule
--* pciback conf_read_write service for avoiding confict with aer_core
-+* pciback conf_read_write service for avoiding confict with aer_core
- * do_recovery job which also use the system default work_queue
- */
- void test_and_schedule_op(struct pciback_device *pdev)
-@@ -50,14 +48,13 @@ void test_and_schedule_op(struct pciback_device *pdev)
- /* Check that frontend is requesting an operation and that we are not
- * already processing a request */
- if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
-- && !test_and_set_bit(_PDEVF_op_active, &pdev->flags))
-- {
-+ && !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) {
- queue_work(pciback_wq, &pdev->op_work);
- }
- /*_XEN_PCIB_active should have been cleared by pcifront. And also make
- sure pciback is waiting for ack by checking _PCIB_op_pending*/
-- if (!test_bit(_XEN_PCIB_active,(unsigned long *)&pdev->sh_info->flags)
-- &&test_bit(_PCIB_op_pending, &pdev->flags)) {
-+ if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags)
-+ && test_bit(_PCIB_op_pending, &pdev->flags)) {
- wake_up(&aer_wait_queue);
+diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
+index 43f9f02..b1661cd 100644
+--- a/drivers/xen/balloon.c
++++ b/drivers/xen/balloon.c
+@@ -296,7 +296,7 @@ static int decrease_reservation(unsigned long nr_pages)
+ /* No more mappings: invalidate P2M and add to balloon. */
+ for (i = 0; i < nr_pages; i++) {
+ pfn = mfn_to_pfn(frame_list[i]);
+- set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
++ __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ balloon_append(pfn_to_page(pfn));
}
- }
-@@ -69,7 +66,8 @@ void test_and_schedule_op(struct pciback_device *pdev)
-
- void pciback_do_op(struct work_struct *data)
- {
-- struct pciback_device *pdev = container_of(data, struct pciback_device, op_work);
-+ struct pciback_device *pdev =
-+ container_of(data, struct pciback_device, op_work);
- struct pci_dev *dev;
- struct xen_pci_op *op = &pdev->sh_info->op;
-
-@@ -77,38 +75,36 @@ void pciback_do_op(struct work_struct *data)
- if (dev == NULL)
- op->err = XEN_PCI_ERR_dev_not_found;
-- else
-- {
-- switch (op->cmd)
-- {
-- case XEN_PCI_OP_conf_read:
-- op->err = pciback_config_read(dev,
-- op->offset, op->size, &op->value);
-- break;
-- case XEN_PCI_OP_conf_write:
-- op->err = pciback_config_write(dev,
-- op->offset, op->size, op->value);
-- break;
-+ else {
-+ switch (op->cmd) {
-+ case XEN_PCI_OP_conf_read:
-+ op->err = pciback_config_read(dev,
-+ op->offset, op->size, &op->value);
-+ break;
-+ case XEN_PCI_OP_conf_write:
-+ op->err = pciback_config_write(dev,
-+ op->offset, op->size, op->value);
-+ break;
- #ifdef CONFIG_PCI_MSI
-- case XEN_PCI_OP_enable_msi:
-- op->err = pciback_enable_msi(pdev, dev, op);
-- break;
-- case XEN_PCI_OP_disable_msi:
-- op->err = pciback_disable_msi(pdev, dev, op);
-- break;
-- case XEN_PCI_OP_enable_msix:
-- op->err = pciback_enable_msix(pdev, dev, op);
-- break;
-- case XEN_PCI_OP_disable_msix:
-- op->err = pciback_disable_msix(pdev, dev, op);
-- break;
-+ case XEN_PCI_OP_enable_msi:
-+ op->err = pciback_enable_msi(pdev, dev, op);
-+ break;
-+ case XEN_PCI_OP_disable_msi:
-+ op->err = pciback_disable_msi(pdev, dev, op);
-+ break;
-+ case XEN_PCI_OP_enable_msix:
-+ op->err = pciback_enable_msix(pdev, dev, op);
-+ break;
-+ case XEN_PCI_OP_disable_msix:
-+ op->err = pciback_disable_msix(pdev, dev, op);
-+ break;
- #endif
-- default:
-- op->err = XEN_PCI_ERR_not_implemented;
-- break;
-+ default:
-+ op->err = XEN_PCI_ERR_not_implemented;
-+ break;
- }
- }
-- /* Tell the driver domain that we're done. */
-+ /* Tell the driver domain that we're done. */
- wmb();
- clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
- notify_remote_via_irq(pdev->evtchn_irq);
-@@ -119,7 +115,7 @@ void pciback_do_op(struct work_struct *data)
- smp_mb__after_clear_bit(); /* /before/ final check for work */
-
- /* Check to see if the driver domain tried to start another request in
-- * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
-+ * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
- */
- test_and_schedule_op(pdev);
- }
-diff --git a/drivers/xen/pciback/slot.c b/drivers/xen/pciback/slot.c
-index 105a8b6..efb922d 100644
---- a/drivers/xen/pciback/slot.c
-+++ b/drivers/xen/pciback/slot.c
-@@ -65,7 +65,8 @@ int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
- for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
- if (slot_dev->slots[bus][slot] == NULL) {
- printk(KERN_INFO
-- "pciback: slot: %s: assign to virtual slot %d, bus %d\n",
-+ "pciback: slot: %s: assign to virtual "
-+ "slot %d, bus %d\n",
- pci_name(dev), slot, bus);
- slot_dev->slots[bus][slot] = dev;
- goto unlock;
-@@ -76,14 +77,14 @@ int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
- xenbus_dev_fatal(pdev->xdev, err,
- "No more space on root virtual PCI bus");
-
-- unlock:
-+unlock:
- spin_unlock_irqrestore(&slot_dev->lock, flags);
-
- /* Publish this device. */
-- if(!err)
-+ if (!err)
- err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, 0), devid);
-
-- out:
-+out:
- return err;
- }
-
-@@ -105,7 +106,7 @@ void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
- }
- }
-
-- out:
-+out:
- spin_unlock_irqrestore(&slot_dev->lock, flags);
-
- if (found_dev)
-@@ -156,8 +157,10 @@ void pciback_release_devices(struct pciback_device *pdev)
- pdev->pci_dev_data = NULL;
- }
-
--int pciback_get_pcifront_dev(struct pci_dev *pcidev, struct pciback_device *pdev,
-- unsigned int *domain, unsigned int *bus, unsigned int *devfn)
-+int pciback_get_pcifront_dev(struct pci_dev *pcidev,
-+ struct pciback_device *pdev,
-+ unsigned int *domain, unsigned int *bus,
-+ unsigned int *devfn)
- {
- int slot, busnr;
- struct slot_dev_data *slot_dev = pdev->pci_dev_data;
-@@ -172,11 +175,12 @@ int pciback_get_pcifront_dev(struct pci_dev *pcidev, struct pciback_device *pdev
- dev = slot_dev->slots[busnr][slot];
- if (dev && dev->bus->number == pcidev->bus->number
- && dev->devfn == pcidev->devfn
-- && pci_domain_nr(dev->bus) == pci_domain_nr(pcidev->bus)) {
-+ && pci_domain_nr(dev->bus) ==
-+ pci_domain_nr(pcidev->bus)) {
- found = 1;
- *domain = 0;
- *bus = busnr;
-- *devfn = PCI_DEVFN(slot,0);
-+ *devfn = PCI_DEVFN(slot, 0);
- goto out;
- }
- }
-diff --git a/drivers/xen/pciback/vpci.c b/drivers/xen/pciback/vpci.c
-index a5b7ece..721b81b 100644
---- a/drivers/xen/pciback/vpci.c
-+++ b/drivers/xen/pciback/vpci.c
-@@ -125,14 +125,14 @@ int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
- xenbus_dev_fatal(pdev->xdev, err,
- "No more space on root virtual PCI bus");
-
-- unlock:
-+unlock:
- spin_unlock_irqrestore(&vpci_dev->lock, flags);
-
- /* Publish this device. */
-- if(!err)
-+ if (!err)
- err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
-
-- out:
-+out:
- return err;
- }
-
-@@ -158,7 +158,7 @@ void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
- }
- }
-
-- out:
-+out:
- spin_unlock_irqrestore(&vpci_dev->lock, flags);
-
- if (found_dev)
-@@ -176,9 +176,8 @@ int pciback_init_devices(struct pciback_device *pdev)
-
- spin_lock_init(&vpci_dev->lock);
-
-- for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++)
- INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
-- }
-
- pdev->pci_dev_data = vpci_dev;
-
-@@ -211,8 +210,10 @@ void pciback_release_devices(struct pciback_device *pdev)
- pdev->pci_dev_data = NULL;
- }
-
--int pciback_get_pcifront_dev(struct pci_dev *pcidev, struct pciback_device *pdev,
-- unsigned int *domain, unsigned int *bus, unsigned int *devfn)
-+int pciback_get_pcifront_dev(struct pci_dev *pcidev,
-+ struct pciback_device *pdev,
-+ unsigned int *domain, unsigned int *bus,
-+ unsigned int *devfn)
- {
- struct pci_dev_entry *entry;
- struct pci_dev *dev = NULL;
-@@ -227,15 +228,16 @@ int pciback_get_pcifront_dev(struct pci_dev *pcidev, struct pciback_device *pdev
- list) {
- dev = entry->dev;
- if (dev && dev->bus->number == pcidev->bus->number
-- && pci_domain_nr(dev->bus) == pci_domain_nr(pcidev->bus)
-- && dev->devfn == pcidev->devfn)
-- {
-+ && pci_domain_nr(dev->bus) ==
-+ pci_domain_nr(pcidev->bus)
-+ && dev->devfn == pcidev->devfn) {
- found = 1;
- *domain = 0;
- *bus = 0;
-- *devfn = PCI_DEVFN(slot, PCI_FUNC(pcidev->devfn));
-+ *devfn = PCI_DEVFN(slot,
-+ PCI_FUNC(pcidev->devfn));
- }
-- }
-+ }
- }
- spin_unlock_irqrestore(&vpci_dev->lock, flags);
- return found;
-diff --git a/drivers/xen/pciback/xenbus.c b/drivers/xen/pciback/xenbus.c
-index a85c413..efec585 100644
---- a/drivers/xen/pciback/xenbus.c
-+++ b/drivers/xen/pciback/xenbus.c
-@@ -40,7 +40,7 @@ static struct pciback_device *alloc_pdev(struct xenbus_device *xdev)
- kfree(pdev);
- pdev = NULL;
- }
-- out:
-+out:
- return pdev;
- }
-
-@@ -111,7 +111,7 @@ static int pciback_do_attach(struct pciback_device *pdev, int gnt_ref,
- err = 0;
-
- dev_dbg(&pdev->xdev->dev, "Attached!\n");
-- out:
-+out:
- return err;
- }
-
-@@ -166,11 +166,10 @@ static int pciback_attach(struct pciback_device *pdev)
- "Error switching to connected state!");
-
- dev_dbg(&pdev->xdev->dev, "Connected? %d\n", err);
-- out:
-+out:
- spin_unlock(&pdev->dev_lock);
-
-- if (magic)
-- kfree(magic);
-+ kfree(magic);
-
- return err;
- }
-@@ -193,7 +192,7 @@ static int pciback_publish_pci_dev(struct pciback_device *pdev,
- "%04x:%02x:%02x.%02x", domain, bus,
- PCI_SLOT(devfn), PCI_FUNC(devfn));
-
-- out:
-+out:
- return err;
- }
-
-@@ -230,7 +229,7 @@ static int pciback_export_device(struct pciback_device *pdev,
- * to other driver domains (as he who controls the bridge can disable
- * it and stop the other devices from working).
- */
-- out:
-+out:
- return err;
- }
-
-@@ -253,8 +252,8 @@ static int pciback_remove_device(struct pciback_device *pdev,
- }
-
- pciback_release_pci_dev(pdev, dev);
--
-- out:
-+
-+out:
- return err;
- }
-
-@@ -314,7 +313,7 @@ static int pciback_publish_pci_root(struct pciback_device *pdev,
- err = xenbus_printf(XBT_NIL, pdev->xdev->nodename,
- "root_num", "%d", (root_num + 1));
-
-- out:
-+out:
- return err;
- }
-
-@@ -358,7 +357,7 @@ static int pciback_reconfigure(struct pciback_device *pdev)
- }
- err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, state_str,
- "%d", &substate);
-- if (err != 1)
-+ if (err != 1)
- substate = XenbusStateUnknown;
-
- switch (substate) {
-@@ -389,14 +388,15 @@ static int pciback_reconfigure(struct pciback_device *pdev)
- "configuration");
- goto out;
- }
--
-+
- err = pciback_export_device(pdev, domain, bus, slot,
- func, i);
- if (err)
- goto out;
-
- /* Publish pci roots. */
-- err = pciback_publish_pci_roots(pdev, pciback_publish_pci_root);
-+ err = pciback_publish_pci_roots(pdev,
-+ pciback_publish_pci_root);
- if (err) {
- xenbus_dev_fatal(pdev->xdev, err,
- "Error while publish PCI root"
-@@ -412,7 +412,7 @@ static int pciback_reconfigure(struct pciback_device *pdev)
- "Error switching substate of "
- "dev-%d\n", i);
- goto out;
-- }
-+ }
- break;
-
- case XenbusStateClosing:
-@@ -445,7 +445,7 @@ static int pciback_reconfigure(struct pciback_device *pdev)
-
- err = pciback_remove_device(pdev, domain, bus, slot,
- func);
-- if(err)
-+ if (err)
- goto out;
-
- /* TODO: If at some point we implement support for pci
-@@ -466,8 +466,8 @@ static int pciback_reconfigure(struct pciback_device *pdev)
- "Error switching to reconfigured state!");
- goto out;
- }
--
-- out:
-+
-+out:
- spin_unlock(&pdev->dev_lock);
-
- return 0;
-@@ -591,7 +591,7 @@ static int pciback_setup_backend(struct pciback_device *pdev)
- xenbus_dev_fatal(pdev->xdev, err, "Error switching "
- "substate of dev-%d\n", i);
- goto out;
-- }
-+ }
- }
-
- err = pciback_publish_pci_roots(pdev, pciback_publish_pci_root);
-@@ -607,7 +607,7 @@ static int pciback_setup_backend(struct pciback_device *pdev)
- xenbus_dev_fatal(pdev->xdev, err,
- "Error switching to initialised state!");
-
-- out:
-+out:
- spin_unlock(&pdev->dev_lock);
-
- if (!err)
-@@ -663,7 +663,7 @@ static int pciback_xenbus_probe(struct xenbus_device *dev,
- */
- pciback_be_watch(&pdev->be_watch, NULL, 0);
-
-- out:
-+out:
- return err;
- }
-
-@@ -679,7 +679,7 @@ static int pciback_xenbus_remove(struct xenbus_device *dev)
-
- static const struct xenbus_device_id xenpci_ids[] = {
- {"pci"},
-- {{0}},
-+ {""},
- };
-
- static struct xenbus_driver xenbus_pciback_driver = {
--
1.7.4
-From ca1ee0c25b425d9739b1a24cf911de2e041a2514 Mon Sep 17 00:00:00 2001
+From 7b2fc719094440d3eacacd95fa6a1f5ac495396b Mon Sep 17 00:00:00 2001
From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Mon, 8 Mar 2010 18:39:15 -0500
-Subject: [PATCH 017/244] xen-pciback: remove driver_data direct access to struct device
-
-The driver core is going to not allow direct access to the
-driver_data pointer in struct device. Instead, the functions
-dev_get_drvdata() and dev_set_drvdata() should be used.
+Date: Tue, 18 Jan 2011 20:15:21 -0500
+Subject: [PATCH 060/203] xen/mmu: Add the notion of identity (1-1) mapping.
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
----
- drivers/xen/pciback/xenbus.c | 8 ++++----
- 1 files changed, 4 insertions(+), 4 deletions(-)
+Our P2M tree structure is a three-level. On the leaf nodes
+we set the Machine Frame Number (MFN) of the PFN. What this means
+is that when one does: pfn_to_mfn(pfn), which is used when creating
+PTE entries, you get the real MFN of the hardware. When Xen sets
+up a guest it initially populates a array which has descending
+(or ascending) MFN values, as so:
-diff --git a/drivers/xen/pciback/xenbus.c b/drivers/xen/pciback/xenbus.c
-index efec585..af6c25a 100644
---- a/drivers/xen/pciback/xenbus.c
-+++ b/drivers/xen/pciback/xenbus.c
-@@ -26,7 +26,7 @@ static struct pciback_device *alloc_pdev(struct xenbus_device *xdev)
- dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
-
- pdev->xdev = xdev;
-- xdev->dev.driver_data = pdev;
-+ dev_set_drvdata(&xdev->dev, pdev);
-
- spin_lock_init(&pdev->dev_lock);
-
-@@ -75,7 +75,7 @@ static void free_pdev(struct pciback_device *pdev)
-
- pciback_release_devices(pdev);
-
-- pdev->xdev->dev.driver_data = NULL;
-+ dev_set_drvdata(&pdev->xdev->dev, NULL);
- pdev->xdev = NULL;
-
- kfree(pdev);
-@@ -476,7 +476,7 @@ out:
- static void pciback_frontend_changed(struct xenbus_device *xdev,
- enum xenbus_state fe_state)
- {
-- struct pciback_device *pdev = xdev->dev.driver_data;
-+ struct pciback_device *pdev = dev_get_drvdata(&xdev->dev);
-
- dev_dbg(&xdev->dev, "fe state changed %d\n", fe_state);
-
-@@ -669,7 +669,7 @@ out:
-
- static int pciback_xenbus_remove(struct xenbus_device *dev)
- {
-- struct pciback_device *pdev = dev->dev.driver_data;
-+ struct pciback_device *pdev = dev_get_drvdata(&dev->dev);
-
- if (pdev != NULL)
- free_pdev(pdev);
---
-1.7.4
+ idx: 0, 1, 2
+ [0x290F, 0x290E, 0x290D, ..]
+so pfn_to_mfn(2)==0x290D. If you start, restart many guests that list
+starts looking quite random.
-From 585f088e6aec3e4514ac2563852961f71c74e47e Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Mon, 8 Mar 2010 18:47:55 -0500
-Subject: [PATCH 018/244] xen-pciback: Fix compile error: 'TASK_NORMAL' undeclared.
+We graft this structure on our P2M tree structure and stick in
+those MFN in the leafs. But for all other leaf entries, or for the top
+root, or middle one, for which there is a void entry, we assume it is
+"missing". So
+ pfn_to_mfn(0xc0000)=INVALID_P2M_ENTRY.
-Both files were missing the #include <linux/sched.h>
+We add the possibility of setting 1-1 mappings on certain regions, so
+that:
+ pfn_to_mfn(0xc0000)=0xc0000
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
----
- drivers/xen/pciback/pci_stub.c | 1 +
- drivers/xen/pciback/pciback_ops.c | 1 +
- 2 files changed, 2 insertions(+), 0 deletions(-)
+The benefit of this is, that we can assume for non-RAM regions (think
+PCI BARs, or ACPI spaces), we can create mappings easily b/c we
+get the PFN value to match the MFN.
-diff --git a/drivers/xen/pciback/pci_stub.c b/drivers/xen/pciback/pci_stub.c
-index 28222ee..6fc0b6e 100644
---- a/drivers/xen/pciback/pci_stub.c
-+++ b/drivers/xen/pciback/pci_stub.c
-@@ -12,6 +12,7 @@
- #include <linux/kref.h>
- #include <linux/pci.h>
- #include <linux/wait.h>
-+#include <linux/sched.h>
- #include <asm/atomic.h>
- #include <xen/events.h>
- #include <asm/xen/pci.h>
-diff --git a/drivers/xen/pciback/pciback_ops.c b/drivers/xen/pciback/pciback_ops.c
-index bf83dca..2b9a93e 100644
---- a/drivers/xen/pciback/pciback_ops.c
-+++ b/drivers/xen/pciback/pciback_ops.c
-@@ -7,6 +7,7 @@
- #include <linux/wait.h>
- #include <linux/bitops.h>
- #include <xen/events.h>
-+#include <linux/sched.h>
- #include "pciback.h"
-
- int verbose_request;
---
-1.7.4
+For this to work efficiently we introduce one new page p2m_identity and
+allocate (via reserved_brk) any other pages we need to cover the sides
+(1GB or 4MB boundary violations). All entries in p2m_identity are set to
+INVALID_P2M_ENTRY type (Xen toolstack only recognizes that and MFNs,
+no other fancy value).
+On lookup we spot that the entry points to p2m_identity and return the identity
+value instead of dereferencing and returning INVALID_P2M_ENTRY. If the entry
+points to an allocated page, we just proceed as before and return the PFN.
+If the PFN has IDENTITY_FRAME_BIT set we unmask that in appropriate functions
+(pfn_to_mfn).
-From 03dd111c81bad8e69cdb8b5d67381702adb24593 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Wed, 9 Dec 2009 17:43:16 -0500
-Subject: [PATCH 019/244] xen-pciback: Remove the vestiges of CONFIG_PCI_GUESTDEV.
+The reason for having the IDENTITY_FRAME_BIT instead of just returning the
+PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a
+non-identity pfn. To protect ourselves against we elect to set (and get) the
+IDENTITY_FRAME_BIT on all identity mapped PFNs.
-The same functionality for this (that used to be called
-pci_is_guestdev) is now via: "pci=resource_alignment="
-command line argument.
+This simplistic diagram is used to explain the more subtle piece of code.
+There is also a digram of the P2M at the end that can help.
+Imagine your E820 looking as so:
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
----
- drivers/xen/pciback/pci_stub.c | 10 ----------
- 1 files changed, 0 insertions(+), 10 deletions(-)
+ 1GB 2GB
+/-------------------+---------\/----\ /----------\ /---+-----\
+| System RAM | Sys RAM ||ACPI| | reserved | | Sys RAM |
+\-------------------+---------/\----/ \----------/ \---+-----/
+ ^- 1029MB ^- 2001MB
-diff --git a/drivers/xen/pciback/pci_stub.c b/drivers/xen/pciback/pci_stub.c
-index 6fc0b6e..d30aa7c 100644
---- a/drivers/xen/pciback/pci_stub.c
-+++ b/drivers/xen/pciback/pci_stub.c
-@@ -430,16 +430,6 @@ static int __devinit pcistub_probe(struct pci_dev *dev,
-
- dev_info(&dev->dev, "seizing device\n");
- err = pcistub_seize(dev);
--#ifdef CONFIG_PCI_GUESTDEV
-- } else if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
-- if (!pci_is_guestdev(dev)) {
-- err = -ENODEV;
-- goto out;
-- }
--
-- dev_info(&dev->dev, "seizing device\n");
-- err = pcistub_seize(dev);
--#endif /* CONFIG_PCI_GUESTDEV */
- } else
- /* Didn't find the device */
- err = -ENODEV;
---
-1.7.4
+[1029MB = 263424 (0x40500), 2001MB = 512256 (0x7D100), 2048MB = 524288 (0x80000)]
+And dom0_mem=max:3GB,1GB is passed in to the guest, meaning memory past 1GB
+is actually not present (would have to kick the balloon driver to put it in).
-From 30acb3491495a43b59a64612ad92a7a290c59e82 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Wed, 9 Dec 2009 17:43:17 -0500
-Subject: [PATCH 020/244] xen-pciback: Remove deprecated routine to find domain owner of PCI device.
+When we are told to set the PFNs for identity mapping (see patch: "xen/setup:
+Set identity mapping for non-RAM E820 and E820 gaps.") we pass in the start
+of the PFN and the end PFN (263424 and 512256 respectively). The first step is
+to reserve_brk a top leaf page if the p2m[1] is missing. The top leaf page
+covers 512^2 of page estate (1GB) and in case the start or end PFN is not
+aligned on 512^2*PAGE_SIZE (1GB) we loop on aligned 1GB PFNs from start pfn to
+end pfn. We reserve_brk top leaf pages if they are missing (means they point
+to p2m_mid_missing).
-In linux-2.6.18.hg tree the mechanism to find the domain owner was
-for the MSI driver (msi-xen.c) to call in this function to retrieve
-the domain number. This is not the way anymore.
+With the E820 example above, 263424 is not 1GB aligned so we allocate a
+reserve_brk page which will cover the PFNs estate from 0x40000 to 0x80000.
+Each entry in the allocate page is "missing" (points to p2m_missing).
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Next stage is to determine if we need to do a more granular boundary check
+on the 4MB (or 2MB depending on architecture) off the start and end pfn's.
+We check if the start pfn and end pfn violate that boundary check, and if
+so reserve_brk a middle (p2m[x][y]) leaf page. This way we have a much finer
+granularity of setting which PFNs are missing and which ones are identity.
+In our example 263424 and 512256 both fail the check so we reserve_brk two
+pages. Populate them with INVALID_P2M_ENTRY (so they both have "missing" values)
+and assign them to p2m[1][2] and p2m[1][488] respectively.
+
+At this point we would at minimum reserve_brk one page, but could be up to
+three. Each call to set_phys_range_identity has at maximum a three page
+cost. If we were to query the P2M at this stage, all those entries from
+start PFN through end PFN (so 1029MB -> 2001MB) would return INVALID_P2M_ENTRY
+("missing").
+
+The next step is to walk from the start pfn to the end pfn setting
+the IDENTITY_FRAME_BIT on each PFN. This is done in 'set_phys_range_identity'.
+If we find that the middle leaf is pointing to p2m_missing we can swap it over
+to p2m_identity - this way covering 4MB (or 2MB) PFN space. At this point we
+do not need to worry about boundary aligment (so no need to reserve_brk a middle
+page, figure out which PFNs are "missing" and which ones are identity), as that
+has been done earlier. If we find that the middle leaf is not occupied by
+p2m_identity or p2m_missing, we dereference that page (which covers
+512 PFNs) and set the appropriate PFN with IDENTITY_FRAME_BIT. In our example
+263424 and 512256 end up there, and we set from p2m[1][2][256->511] and
+p2m[1][488][0->256] with IDENTITY_FRAME_BIT set.
+
+All other regions that are void (or not filled) either point to p2m_missing
+(considered missing) or have the default value of INVALID_P2M_ENTRY (also
+considered missing). In our case, p2m[1][2][0->255] and p2m[1][488][257->511]
+contain the INVALID_P2M_ENTRY value and are considered "missing."
+
+This is what the p2m ends up looking (for the E820 above) with this
+fabulous drawing:
+
+ p2m /--------------\
+ /-----\ | &mfn_list[0],| /-----------------\
+ | 0 |------>| &mfn_list[1],| /---------------\ | ~0, ~0, .. |
+ |-----| | ..., ~0, ~0 | | ~0, ~0, [x]---+----->| IDENTITY [@256] |
+ | 1 |---\ \--------------/ | [p2m_identity]+\ | IDENTITY [@257] |
+ |-----| \ | [p2m_identity]+\\ | .... |
+ | 2 |--\ \-------------------->| ... | \\ \----------------/
+ |-----| \ \---------------/ \\
+ | 3 |\ \ \\ p2m_identity
+ |-----| \ \-------------------->/---------------\ /-----------------\
+ | .. +->+ | [p2m_identity]+-->| ~0, ~0, ~0, ... |
+ \-----/ / | [p2m_identity]+-->| ..., ~0 |
+ / /---------------\ | .... | \-----------------/
+ / | IDENTITY[@0] | /-+-[x], ~0, ~0.. |
+ / | IDENTITY[@256]|<----/ \---------------/
+ / | ~0, ~0, .... |
+ | \---------------/
+ |
+ p2m_missing p2m_missing
+/------------------\ /------------\
+| [p2m_mid_missing]+---->| ~0, ~0, ~0 |
+| [p2m_mid_missing]+---->| ..., ~0 |
+\------------------/ \------------/
+
+where ~0 is INVALID_P2M_ENTRY. IDENTITY is (PFN | IDENTITY_BIT)
+
+[v5: Changed code to use ranges, added ASCII art]
+[v6: Rebased on top of xen->p2m code split]
+[v4: Squished patches in just this one]
+[v7: Added RESERVE_BRK for potentially allocated pages]
+[v8: Fixed alignment problem]
+[v9: Changed 1<<3X to 1<<BITS_PER_LONG-X]
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
---
- drivers/xen/pciback/pci_stub.c | 19 -------------------
- 1 files changed, 0 insertions(+), 19 deletions(-)
+ arch/x86/include/asm/xen/page.h | 8 ++-
+ arch/x86/xen/p2m.c | 113 ++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 117 insertions(+), 4 deletions(-)
-diff --git a/drivers/xen/pciback/pci_stub.c b/drivers/xen/pciback/pci_stub.c
-index d30aa7c..30e7b59 100644
---- a/drivers/xen/pciback/pci_stub.c
-+++ b/drivers/xen/pciback/pci_stub.c
-@@ -1157,22 +1157,6 @@ static ssize_t permissive_show(struct device_driver *drv, char *buf)
+diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
+index 8ea9772..65fa4f2 100644
+--- a/arch/x86/include/asm/xen/page.h
++++ b/arch/x86/include/asm/xen/page.h
+@@ -29,8 +29,10 @@ typedef struct xpaddr {
- DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, permissive_add);
+ /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
+ #define INVALID_P2M_ENTRY (~0UL)
+-#define FOREIGN_FRAME_BIT (1UL<<31)
++#define FOREIGN_FRAME_BIT (1UL<<(BITS_PER_LONG-1))
++#define IDENTITY_FRAME_BIT (1UL<<(BITS_PER_LONG-2))
+ #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
++#define IDENTITY_FRAME(m) ((m) | IDENTITY_FRAME_BIT)
--#ifdef CONFIG_PCI_MSI
--
--int pciback_get_owner(struct pci_dev *dev)
--{
-- struct pcistub_device *psdev;
--
-- psdev = pcistub_device_find(pci_domain_nr(dev->bus), dev->bus->number,
-- PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
--
-- if (!psdev || !psdev->pdev)
-- return -1;
--
-- return psdev->pdev->xdev->otherend_id;
--}
--#endif
--
- static void pcistub_exit(void)
+ /* Maximum amount of memory we can handle in a domain in pages */
+ #define MAX_DOMAIN_PAGES \
+@@ -42,6 +44,8 @@ extern unsigned int machine_to_phys_order;
+ extern unsigned long get_phys_to_machine(unsigned long pfn);
+ extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+ extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
++extern unsigned long set_phys_range_identity(unsigned long pfn_s,
++ unsigned long pfn_e);
+
+ extern int m2p_add_override(unsigned long mfn, struct page *page);
+ extern int m2p_remove_override(struct page *page);
+@@ -58,7 +62,7 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
+ mfn = get_phys_to_machine(pfn);
+
+ if (mfn != INVALID_P2M_ENTRY)
+- mfn &= ~FOREIGN_FRAME_BIT;
++ mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
+
+ return mfn;
+ }
+diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
+index df4e367..dd30ec8 100644
+--- a/arch/x86/xen/p2m.c
++++ b/arch/x86/xen/p2m.c
+@@ -59,9 +59,15 @@ static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
+ static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
+ static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
+
++static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE);
++
+ RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
+ RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
+
++/* We might hit two boundary violations at the start and end, at max each
++ * boundary violation will require three middle nodes. */
++RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3);
++
+ static inline unsigned p2m_top_index(unsigned long pfn)
{
- driver_remove_file(&pciback_pci_driver.driver, &driver_attr_new_slot);
-@@ -1183,7 +1167,6 @@ static void pcistub_exit(void)
- driver_remove_file(&pciback_pci_driver.driver, &driver_attr_permissive);
+ BUG_ON(pfn >= MAX_P2M_PFN);
+@@ -221,6 +227,9 @@ void __init xen_build_dynamic_phys_to_machine(void)
+ p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_top_init(p2m_top);
- pci_unregister_driver(&pciback_pci_driver);
-- WARN_ON(unregister_msi_get_owner(pciback_get_owner));
++ p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE);
++ p2m_init(p2m_identity);
++
+ /*
+ * The domain builder gives us a pre-constructed p2m array in
+ * mfn_list for all the pages initially given to us, so we just
+@@ -272,6 +281,14 @@ unsigned long get_phys_to_machine(unsigned long pfn)
+ mididx = p2m_mid_index(pfn);
+ idx = p2m_index(pfn);
+
++ /*
++ * The INVALID_P2M_ENTRY is filled in both p2m_*identity
++ * and in p2m_*missing, so returning the INVALID_P2M_ENTRY
++ * would be wrong.
++ */
++ if (p2m_top[topidx][mididx] == p2m_identity)
++ return IDENTITY_FRAME(pfn);
++
+ return p2m_top[topidx][mididx][idx];
}
+ EXPORT_SYMBOL_GPL(get_phys_to_machine);
+@@ -341,9 +358,11 @@ static bool alloc_p2m(unsigned long pfn)
+ p2m_top_mfn_p[topidx] = mid_mfn;
+ }
- static int __init pcistub_init(void)
-@@ -1241,8 +1224,6 @@ static int __init pcistub_init(void)
- err = driver_create_file(&pciback_pci_driver.driver,
- &driver_attr_permissive);
+- if (p2m_top[topidx][mididx] == p2m_missing) {
++ if (p2m_top[topidx][mididx] == p2m_identity ||
++ p2m_top[topidx][mididx] == p2m_missing) {
+ /* p2m leaf page is missing */
+ unsigned long *p2m;
++ unsigned long *p2m_orig = p2m_top[topidx][mididx];
-- if (!err)
-- err = register_msi_get_owner(pciback_get_owner);
- if (err)
- pcistub_exit();
+ p2m = alloc_p2m_page();
+ if (!p2m)
+@@ -351,7 +370,7 @@ static bool alloc_p2m(unsigned long pfn)
+
+ p2m_init(p2m);
+
+- if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
++ if (cmpxchg(&mid[mididx], p2m_orig, p2m) != p2m_orig)
+ free_p2m_page(p2m);
+ else
+ mid_mfn[mididx] = virt_to_mfn(p2m);
+@@ -360,6 +379,82 @@ static bool alloc_p2m(unsigned long pfn)
+ return true;
+ }
+
++bool __early_alloc_p2m(unsigned long pfn)
++{
++ unsigned topidx, mididx, idx;
++
++ topidx = p2m_top_index(pfn);
++ mididx = p2m_mid_index(pfn);
++ idx = p2m_index(pfn);
++
++ /* Pfff.. No boundary cross-over, lets get out. */
++ if (!idx)
++ return false;
++
++ WARN(p2m_top[topidx][mididx] == p2m_identity,
++ "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n",
++ topidx, mididx);
++
++ /*
++ * Could be done by xen_build_dynamic_phys_to_machine..
++ */
++ if (p2m_top[topidx][mididx] != p2m_missing)
++ return false;
++
++ /* Boundary cross-over for the edges: */
++ if (idx) {
++ unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
++
++ p2m_init(p2m);
++
++ p2m_top[topidx][mididx] = p2m;
++
++ }
++ return idx != 0;
++}
++unsigned long set_phys_range_identity(unsigned long pfn_s,
++ unsigned long pfn_e)
++{
++ unsigned long pfn;
++
++ if (unlikely(pfn_s >= MAX_P2M_PFN || pfn_e >= MAX_P2M_PFN))
++ return 0;
++
++ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
++ return pfn_e - pfn_s;
++
++ if (pfn_s > pfn_e)
++ return 0;
++
++ for (pfn = (pfn_s & ~(P2M_MID_PER_PAGE * P2M_PER_PAGE - 1));
++ pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE));
++ pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
++ {
++ unsigned topidx = p2m_top_index(pfn);
++ if (p2m_top[topidx] == p2m_mid_missing) {
++ unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
++
++ p2m_mid_init(mid);
++
++ p2m_top[topidx] = mid;
++ }
++ }
++
++ __early_alloc_p2m(pfn_s);
++ __early_alloc_p2m(pfn_e);
++
++ for (pfn = pfn_s; pfn < pfn_e; pfn++)
++ if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
++ break;
++
++ if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s),
++ "Identity mapping failed. We are %ld short of 1-1 mappings!\n",
++ (pfn_e - pfn_s) - (pfn - pfn_s)))
++ printk(KERN_DEBUG "1-1 mapping on %lx->%lx\n", pfn_s, pfn);
++
++ return pfn - pfn_s;
++}
++
+ /* Try to install p2m mapping; fail if intermediate bits missing */
+ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+ {
+@@ -378,6 +473,20 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+ mididx = p2m_mid_index(pfn);
+ idx = p2m_index(pfn);
+
++ /* For sparse holes were the p2m leaf has real PFN along with
++ * PCI holes, stick in the PFN as the MFN value.
++ */
++ if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) {
++ if (p2m_top[topidx][mididx] == p2m_identity)
++ return true;
++
++ /* Swap over from MISSING to IDENTITY if needed. */
++ if (p2m_top[topidx][mididx] == p2m_missing) {
++ p2m_top[topidx][mididx] = p2m_identity;
++ return true;
++ }
++ }
++
+ if (p2m_top[topidx][mididx] == p2m_missing)
+ return mfn == INVALID_P2M_ENTRY;
--
1.7.4
-From da36c7662d9738ce44c37b4f1f41c045c64d6914 Mon Sep 17 00:00:00 2001
+From 57fafbb798ffde486606244c3392ed6f63050222 Mon Sep 17 00:00:00 2001
From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Tue, 22 Dec 2009 13:53:41 -0500
-Subject: [PATCH 021/244] xen-pciback: Fix compiler warning in pci_stub.c.
+Date: Wed, 5 Jan 2011 15:46:31 -0500
+Subject: [PATCH 061/203] xen/mmu: Set _PAGE_IOMAP if PFN is an identity PFN.
-warning: the frame size of 1036 bytes is larger than 1024 bytes
+If we find that the PFN is within the P2M as an identity
+PFN make sure to tack on the _PAGE_IOMAP flag.
+Reviewed-by: Ian Campbell <ian.campbell(a)citrix.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
---
- drivers/xen/pciback/pci_stub.c | 5 +++--
- 1 files changed, 3 insertions(+), 2 deletions(-)
+ arch/x86/xen/mmu.c | 18 ++++++++++++++++--
+ 1 files changed, 16 insertions(+), 2 deletions(-)
-diff --git a/drivers/xen/pciback/pci_stub.c b/drivers/xen/pciback/pci_stub.c
-index 30e7b59..0b5a16b 100644
---- a/drivers/xen/pciback/pci_stub.c
-+++ b/drivers/xen/pciback/pci_stub.c
-@@ -496,16 +496,17 @@ static const struct pci_device_id pcistub_ids[] = {
- {0,},
- };
-
-+#define PCI_NODENAME_MAX 40
- static void kill_domain_by_device(struct pcistub_device *psdev)
- {
- struct xenbus_transaction xbt;
- int err;
-- char nodename[1024];
-+ char nodename[PCI_NODENAME_MAX];
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index 0180ae8..9c9e076 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -416,8 +416,12 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
+ if (val & _PAGE_PRESENT) {
+ unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
+ pteval_t flags = val & PTE_FLAGS_MASK;
+- unsigned long mfn = pfn_to_mfn(pfn);
++ unsigned long mfn;
- if (!psdev)
- dev_err(&psdev->dev->dev,
- "device is NULL when do AER recovery/kill_domain\n");
-- sprintf(nodename, "/local/domain/0/backend/pci/%d/0",
-+ snprintf(nodename, PCI_NODENAME_MAX, "/local/domain/0/backend/pci/%d/0",
- psdev->pdev->xdev->otherend_id);
- nodename[strlen(nodename)] = '\0';
++ if (!xen_feature(XENFEAT_auto_translated_physmap))
++ mfn = get_phys_to_machine(pfn);
++ else
++ mfn = pfn;
+ /*
+ * If there's no mfn for the pfn, then just create an
+ * empty non-present pte. Unfortunately this loses
+@@ -427,8 +431,18 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
+ if (unlikely(mfn == INVALID_P2M_ENTRY)) {
+ mfn = 0;
+ flags = 0;
++ } else {
++ /*
++ * Paramount to do this test _after_ the
++ * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
++ * IDENTITY_FRAME_BIT resolves to true.
++ */
++ mfn &= ~FOREIGN_FRAME_BIT;
++ if (mfn & IDENTITY_FRAME_BIT) {
++ mfn &= ~IDENTITY_FRAME_BIT;
++ flags |= _PAGE_IOMAP;
++ }
+ }
+-
+ val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
+ }
--
1.7.4
-From 83484f34b2cc42807c71514fbabbd40e281ec094 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Tue, 22 Dec 2009 13:53:42 -0500
-Subject: [PATCH 022/244] xen-pciback: Fix compile warning in vpci.c
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-warning: ‘func’ may be used uninitialized in this function
+From e6f1e7aabca9f9f99ef4fc1c752497b3efa7896c Mon Sep 17 00:00:00 2001
+From: Stefano Stabellini <stefano.stabellini(a)eu.citrix.com>
+Date: Mon, 31 Jan 2011 15:18:10 +0000
+Subject: [PATCH 062/203] x86/mm/init: respect memblock reserved regions when destroying mappings
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
----
- drivers/xen/pciback/vpci.c | 2 +-
- 1 files changed, 1 insertions(+), 1 deletions(-)
+x86/mm/init: respect memblock reserved regions when destroying mappings
-diff --git a/drivers/xen/pciback/vpci.c b/drivers/xen/pciback/vpci.c
-index 721b81b..2857ab8 100644
---- a/drivers/xen/pciback/vpci.c
-+++ b/drivers/xen/pciback/vpci.c
-@@ -65,7 +65,7 @@ static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
- int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
- int devid, publish_pci_dev_cb publish_cb)
- {
-- int err = 0, slot, func;
-+ int err = 0, slot, func = -1;
- struct pci_dev_entry *t, *dev_entry;
- struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
- unsigned long flags;
---
-1.7.4
+In init_memory_mapping we are destroying all the mappings between
+_brk_end and _end, no matter if some memory areas in that range have
+been reserved using memblock_x86_reserve_range.
+Besides if _end is not pmd aligned we might destroy the
+mappings for valid memory between _end and the following pmd.
+In order to avoid this problem, before clearing any pmds we check if the
+corresponding memory area has been reserved and we only destroy the
+mapping if it hasn't.
-From 5612e6358835700c49d8be5671823614ace30c94 Mon Sep 17 00:00:00 2001
-From: Ian Campbell <ijc(a)hellion.org.uk>
-Date: Thu, 3 Dec 2009 21:56:20 +0000
-Subject: [PATCH 023/244] xen: rename pciback module to xen-pciback.
+We found this problem because under Xen we have a valid mapping at _end,
+and if _end is not pmd aligned the current code destroys the initial
+part of it.
-pciback is rather generic for a modular distro style kernel.
+In practice this fix does not have any impact on native.
-Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
-Cc: Jeremy Fitzhardinge <jeremy(a)goop.org>
-Cc: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+Signed-off-by: Stefano Stabellini <stefano.stabellini(a)eu.citrix.com>
---
- drivers/xen/pciback/Makefile | 24 ++++++++++++------------
- 1 files changed, 12 insertions(+), 12 deletions(-)
+ arch/x86/mm/init.c | 13 +++++++++++--
+ 1 files changed, 11 insertions(+), 2 deletions(-)
-diff --git a/drivers/xen/pciback/Makefile b/drivers/xen/pciback/Makefile
-index 106dae7..38bc123 100644
---- a/drivers/xen/pciback/Makefile
-+++ b/drivers/xen/pciback/Makefile
-@@ -1,16 +1,16 @@
--obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback.o
-+obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback.o
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 947f42a..66637bd 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -283,6 +283,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
+ if (!after_bootmem && !start) {
+ pud_t *pud;
+ pmd_t *pmd;
++ unsigned long addr;
++ u64 size, memblock_addr;
--pciback-y := pci_stub.o pciback_ops.o xenbus.o
--pciback-y += conf_space.o conf_space_header.o \
-- conf_space_capability.o \
-- conf_space_capability_vpd.o \
-- conf_space_capability_pm.o \
-- conf_space_quirks.o
--pciback-$(CONFIG_PCI_MSI) += conf_space_capability_msi.o
--pciback-$(CONFIG_XEN_PCIDEV_BACKEND_VPCI) += vpci.o
--pciback-$(CONFIG_XEN_PCIDEV_BACKEND_SLOT) += slot.o
--pciback-$(CONFIG_XEN_PCIDEV_BACKEND_PASS) += passthrough.o
--pciback-$(CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER) += controller.o
-+xen-pciback-y := pci_stub.o pciback_ops.o xenbus.o
-+xen-pciback-y += conf_space.o conf_space_header.o \
-+ conf_space_capability.o \
-+ conf_space_capability_vpd.o \
-+ conf_space_capability_pm.o \
-+ conf_space_quirks.o
-+xen-pciback-$(CONFIG_PCI_MSI) += conf_space_capability_msi.o
-+xen-pciback-$(CONFIG_XEN_PCIDEV_BACKEND_VPCI) += vpci.o
-+xen-pciback-$(CONFIG_XEN_PCIDEV_BACKEND_SLOT) += slot.o
-+xen-pciback-$(CONFIG_XEN_PCIDEV_BACKEND_PASS) += passthrough.o
-+xen-pciback-$(CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER) += controller.o
+ mmu_cr4_features = read_cr4();
- ifeq ($(CONFIG_XEN_PCIDEV_BE_DEBUG),y)
- EXTRA_CFLAGS += -DDEBUG
+@@ -291,11 +293,18 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
+ * located on different 2M pages. cleanup_highmap(), however,
+ * can only consider _end when it runs, so destroy any
+ * mappings beyond _brk_end here.
++ * Respect memblock reserved regions.
+ */
+ pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
+ pmd = pmd_offset(pud, _brk_end - 1);
+- while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
+- pmd_clear(pmd);
++ addr = (_brk_end + PMD_SIZE - 1) & PMD_MASK;
++ while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1)) {
++ memblock_addr = memblock_x86_find_in_range_size(__pa(addr),
++ &size, PMD_SIZE);
++ if (memblock_addr == (u64) __pa(addr) && size >= PMD_SIZE)
++ pmd_clear(pmd);
++ addr += PMD_SIZE;
++ }
+ }
+ #endif
+ __flush_tlb_all();
--
1.7.4
-From 245a9ec5ef1f9c8a6bc6b5c0ac1bb616c3c8c979 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Wed, 9 Dec 2009 17:43:15 -0500
-Subject: [PATCH 024/244] xen-pciback: Register the owner (domain) of the PCI device.
+From 203b40c0915fc78e9ca1524c887623e7dc86422d Mon Sep 17 00:00:00 2001
+From: Bastian Blank <waldi(a)debian.org>
+Date: Thu, 29 Jul 2010 17:30:18 +0200
+Subject: [PATCH 063/203] xen: netback: Fix null-pointer access in netback_uevent
-When the front-end and back-end start negotiating we register
-the domain that will use the PCI device. Furthermore during shutdown
-of guest or unbinding of the PCI device (and unloading of module)
-from pciback we unregister the domain owner.
+The uevent method of Xen netback does not check if the the network
+device is already setup and tries to dereference a null-pointer if not.
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
+Signed-off-by: Bastian Blank <waldi(a)debian.org>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
---
- drivers/xen/pciback/pci_stub.c | 2 ++
- drivers/xen/pciback/xenbus.c | 13 +++++++++++++
- 2 files changed, 15 insertions(+), 0 deletions(-)
+ drivers/xen/netback/xenbus.c | 10 ++--------
+ 1 files changed, 2 insertions(+), 8 deletions(-)
-diff --git a/drivers/xen/pciback/pci_stub.c b/drivers/xen/pciback/pci_stub.c
-index 0b5a16b..02178e2 100644
---- a/drivers/xen/pciback/pci_stub.c
-+++ b/drivers/xen/pciback/pci_stub.c
-@@ -90,6 +90,8 @@ static void pcistub_device_release(struct kref *kref)
-
- dev_dbg(&psdev->dev->dev, "pcistub_device_release\n");
-
-+ xen_unregister_device_domain_owner(psdev->dev);
-+
- /* Clean-up the device */
- pciback_reset_device(psdev->dev);
- pciback_config_free_dyn_fields(psdev->dev);
-diff --git a/drivers/xen/pciback/xenbus.c b/drivers/xen/pciback/xenbus.c
-index af6c25a..d448bf5 100644
---- a/drivers/xen/pciback/xenbus.c
-+++ b/drivers/xen/pciback/xenbus.c
-@@ -10,6 +10,7 @@
- #include <linux/workqueue.h>
- #include <xen/xenbus.h>
- #include <xen/events.h>
-+#include <asm/xen/pci.h>
- #include <linux/workqueue.h>
- #include "pciback.h"
+diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
+index 17ff5cf..1fec65a 100644
+--- a/drivers/xen/netback/xenbus.c
++++ b/drivers/xen/netback/xenbus.c
+@@ -154,17 +154,11 @@ fail:
+ */
+ static int netback_uevent(struct xenbus_device *xdev, struct kobj_uevent_env *env)
+ {
+- struct backend_info *be;
+- struct xen_netif *netif;
++ struct backend_info *be = dev_get_drvdata(&xdev->dev);
+ char *val;
-@@ -221,6 +222,15 @@ static int pciback_export_device(struct pciback_device *pdev,
- if (err)
- goto out;
+ DPRINTK("netback_uevent");
-+ dev_dbg(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id);
-+ if (xen_register_device_domain_owner(dev,
-+ pdev->xdev->otherend_id) != 0) {
-+ dev_err(&dev->dev, "device has been assigned to another " \
-+ "domain! Over-writting the ownership, but beware.\n");
-+ xen_unregister_device_domain_owner(dev);
-+ xen_register_device_domain_owner(dev, pdev->xdev->otherend_id);
-+ }
-+
- /* TODO: It'd be nice to export a bridge and have all of its children
- * get exported with it. This may be best done in xend (which will
- * have to calculate resource usage anyway) but we probably want to
-@@ -251,6 +261,9 @@ static int pciback_remove_device(struct pciback_device *pdev,
- goto out;
+- be = dev_get_drvdata(&xdev->dev);
+- if (!be)
+- return 0;
+- netif = be->netif;
+-
+ val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
+ if (IS_ERR(val)) {
+ int err = PTR_ERR(val);
+@@ -179,7 +173,7 @@ static int netback_uevent(struct xenbus_device *xdev, struct kobj_uevent_env *en
+ kfree(val);
}
-+ dev_dbg(&dev->dev, "unregistering for %d\n", pdev->xdev->otherend_id);
-+ xen_unregister_device_domain_owner(dev);
-+
- pciback_release_pci_dev(pdev, dev);
+- if (add_uevent_var(env, "vif=%s", netif->dev->name))
++ if (be && be->netif && add_uevent_var(env, "vif=%s", be->netif->dev->name))
+ return -ENOMEM;
- out:
+ return 0;
--
1.7.4
-From cb6c976606d16119e8608c8bcc1ef9265881dd7f Mon Sep 17 00:00:00 2001
-From: Zhao, Yu <yu.zhao(a)intel.com>
-Date: Wed, 3 Mar 2010 13:27:55 -0500
-Subject: [PATCH 025/244] xen-pciback: guest SR-IOV support for PV guest
+From bee16269e5ba79706bdae554013cd14285b4f1ad Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Fri, 30 Jul 2010 15:16:47 +0100
+Subject: [PATCH 064/203] xen: netback: check if foreign pages are actually netback-created foreign pages.
-These changes are for PV guest to use Virtual Function. Because the VF's
-vendor, device registers in cfg space are 0xffff, which are invalid and
-ignored by PCI device scan. Values in 'struct pci_dev' are fixed up by
-SR-IOV code, and using these values will present correct VID and DID to
-PV guest kernel.
+020ba906 "xen/netback: Multiple tasklets support." changed
+netbk_gop_frag_copy to attempt to lookup a pending_tx_info for any
+foreign page, regardless of whether the page was a netback-foreign
+page.
-And command registers in the cfg space are read only 0, which means we
-have to emulate MMIO enable bit (VF only uses MMIO resource) so PV
-kernel can work properly.
+In the case of non-netback pages this can lead to dereferencing a NULL
+src_pend->netif.
-Acked-by: jbeulich(a)novell.com
+Restore the behaviour of netif_page_index prior toa3031942
+"xen/netback: Introduce a new struct type page_ext" by performing
+tests to ensure that page is a netback page and extend the same checks
+to netif_page_group.
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
+Actually combine netif_page_{index,group} in to a single function
+since they are always called together and it saves duplicating all the
+checks.
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+Cc: Jeremy Fitzhardinge <jeremy(a)goop.org>
+Cc: Xu, Dongxiao <dongxiao.xu(a)intel.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
---
- drivers/xen/pciback/conf_space_header.c | 71 ++++++++++++++++++++++++++++--
- 1 files changed, 66 insertions(+), 5 deletions(-)
+ drivers/xen/netback/netback.c | 56 ++++++++++++++++++++++++++++------------
+ 1 files changed, 39 insertions(+), 17 deletions(-)
-diff --git a/drivers/xen/pciback/conf_space_header.c b/drivers/xen/pciback/conf_space_header.c
-index 3ae7da1..1f4f86e 100644
---- a/drivers/xen/pciback/conf_space_header.c
-+++ b/drivers/xen/pciback/conf_space_header.c
-@@ -18,6 +18,25 @@ struct pci_bar_info {
- #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
- #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 911c85b..95df223 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -89,18 +89,37 @@ static inline void netif_set_page_ext(struct page *pg, unsigned int group,
+ pg->mapping = ext.mapping;
+ }
-+static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
-+{
-+ int i;
-+ int ret;
+-static inline unsigned int netif_page_group(const struct page *pg)
++static inline int netif_get_page_ext(struct page *pg, unsigned int *_group, unsigned int *_idx)
+ {
+ union page_ext ext = { .mapping = pg->mapping };
++ struct xen_netbk *netbk;
++ unsigned int group, idx;
+
+- return ext.e.group - 1;
+-}
++ if (!PageForeign(pg))
++ return 0;
+
+-static inline unsigned int netif_page_index(const struct page *pg)
+-{
+- union page_ext ext = { .mapping = pg->mapping };
++ group = ext.e.group - 1;
+
-+ ret = pciback_read_config_word(dev, offset, value, data);
-+ if (!atomic_read(&dev->enable_cnt))
-+ return ret;
++ if (group < 0 || group >= xen_netbk_group_nr)
++ return 0;
+
-+ for (i = 0; i < PCI_ROM_RESOURCE; i++) {
-+ if (dev->resource[i].flags & IORESOURCE_IO)
-+ *value |= PCI_COMMAND_IO;
-+ if (dev->resource[i].flags & IORESOURCE_MEM)
-+ *value |= PCI_COMMAND_MEMORY;
-+ }
++ netbk = &xen_netbk[group];
+
-+ return ret;
-+}
++ if (netbk->mmap_pages == NULL)
++ return 0;
+
+- return ext.e.idx;
++ idx = ext.e.idx;
+
- static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
- {
- int err;
-@@ -142,10 +161,26 @@ static inline void read_dev_bar(struct pci_dev *dev,
- struct pci_bar_info *bar_info, int offset,
- u32 len_mask)
- {
-- pci_read_config_dword(dev, offset, &bar_info->val);
-- pci_write_config_dword(dev, offset, len_mask);
-- pci_read_config_dword(dev, offset, &bar_info->len_val);
-- pci_write_config_dword(dev, offset, bar_info->val);
-+ int pos;
-+ struct resource *res = dev->resource;
++ if ((idx < 0) || (idx >= MAX_PENDING_REQS))
++ return 0;
+
-+ if (offset == PCI_ROM_ADDRESS || offset == PCI_ROM_ADDRESS1)
-+ pos = PCI_ROM_RESOURCE;
-+ else {
-+ pos = (offset - PCI_BASE_ADDRESS_0) / 4;
-+ if (pos && ((res[pos - 1].flags & (PCI_BASE_ADDRESS_SPACE |
-+ PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
-+ (PCI_BASE_ADDRESS_SPACE_MEMORY |
-+ PCI_BASE_ADDRESS_MEM_TYPE_64))) {
-+ bar_info->val = res[pos - 1].start >> 32;
-+ bar_info->len_val = res[pos - 1].end >> 32;
-+ return;
-+ }
-+ }
++ if (netbk->mmap_pages[idx] != pg)
++ return 0;
+
-+ bar_info->val = res[pos].start |
-+ (res[pos].flags & PCI_REGION_FLAG_MASK);
-+ bar_info->len_val = res[pos].end - res[pos].start + 1;
++ *_group = group;
++ *_idx = idx;
++
++ return 1;
}
- static void *bar_init(struct pci_dev *dev, int offset)
-@@ -186,6 +221,22 @@ static void bar_release(struct pci_dev *dev, int offset, void *data)
- kfree(data);
- }
+ /*
+@@ -386,8 +405,12 @@ static void netbk_gop_frag_copy(struct xen_netif *netif,
+ {
+ struct gnttab_copy *copy_gop;
+ struct netbk_rx_meta *meta;
+- int group = netif_page_group(page);
+- int idx = netif_page_index(page);
++ /*
++ * These variables a used iff netif_get_page_ext returns true,
++ * in which case they are guaranteed to be initialized.
++ */
++ unsigned int uninitialized_var(group), uninitialized_var(idx);
++ int foreign = netif_get_page_ext(page, &group, &idx);
+ unsigned long bytes;
-+static int pciback_read_vendor(struct pci_dev *dev, int offset,
-+ u16 *value, void *data)
-+{
-+ *value = dev->vendor;
-+
-+ return 0;
-+}
-+
-+static int pciback_read_device(struct pci_dev *dev, int offset,
-+ u16 *value, void *data)
-+{
-+ *value = dev->device;
+ /* Data must not cross a page boundary. */
+@@ -445,7 +468,7 @@ static void netbk_gop_frag_copy(struct xen_netif *netif,
+
+ copy_gop = npo->copy + npo->copy_prod++;
+ copy_gop->flags = GNTCOPY_dest_gref;
+- if (PageForeign(page)) {
++ if (foreign) {
+ struct xen_netbk *netbk = &xen_netbk[group];
+ struct pending_tx_info *src_pend;
+
+@@ -1535,14 +1558,13 @@ static void netif_idx_release(struct xen_netbk *netbk, u16 pending_idx)
+
+ static void netif_page_release(struct page *page, unsigned int order)
+ {
+- int group = netif_page_group(page);
+- int idx = netif_page_index(page);
+- struct xen_netbk *netbk = &xen_netbk[group];
++ unsigned int group, idx;
++ int foreign = netif_get_page_ext(page, &group, &idx);
+
-+ return 0;
-+}
++ BUG_ON(!foreign);
+ BUG_ON(order);
+- BUG_ON(group < 0 || group >= xen_netbk_group_nr);
+- BUG_ON(idx < 0 || idx >= MAX_PENDING_REQS);
+- BUG_ON(netbk->mmap_pages[idx] != page);
+- netif_idx_release(netbk, idx);
+
- static int interrupt_read(struct pci_dev *dev, int offset, u8 * value,
- void *data)
- {
-@@ -213,9 +264,19 @@ out:
++ netif_idx_release(&xen_netbk[group], idx);
+ }
- static const struct config_field header_common[] = {
- {
-+ .offset = PCI_VENDOR_ID,
-+ .size = 2,
-+ .u.w.read = pciback_read_vendor,
-+ },
-+ {
-+ .offset = PCI_DEVICE_ID,
-+ .size = 2,
-+ .u.w.read = pciback_read_device,
-+ },
-+ {
- .offset = PCI_COMMAND,
- .size = 2,
-- .u.w.read = pciback_read_config_word,
-+ .u.w.read = command_read,
- .u.w.write = command_write,
- },
- {
+ irqreturn_t netif_be_int(int irq, void *dev_id)
--
1.7.4
-From 1d77305c7900f3b6ec5d403d9aba6f0034b0112e Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Wed, 3 Mar 2010 13:38:43 -0500
-Subject: [PATCH 026/244] xen-pciback: Disable MSI/MSI-X when reseting device
-
-In cases where the guest is abruptly killed and has not disabled
-MSI/MSI-X interrupts we want to do that.
+From 3f33055e8af9d74e35670c52438e190d54ac5f9e Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Fri, 30 Jul 2010 15:16:46 +0100
+Subject: [PATCH 065/203] xen: netback: do not unleash netback threads until initialisation is complete
-Otherwise when the guest is started up and enables MSI, we would
-get a WARN() that the device already had been enabled.
+Otherwise netbk_action_thread can reference &netbk->net_schedule_list
+(via tx_work_todo) before it is initialised. Until now it was zeroed
+which is probably safe but not exactly robust.
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+Cc: Jeremy Fitzhardinge <jeremy(a)goop.org>
+Cc: Xu, Dongxiao <dongxiao.xu(a)intel.com>
+Cc: Paul Durrant <Paul.Durrant(a)citrix.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
---
- drivers/xen/pciback/pciback_ops.c | 8 ++++++++
- 1 files changed, 8 insertions(+), 0 deletions(-)
+ drivers/xen/netback/netback.c | 4 +++-
+ 1 files changed, 3 insertions(+), 1 deletions(-)
-diff --git a/drivers/xen/pciback/pciback_ops.c b/drivers/xen/pciback/pciback_ops.c
-index 2b9a93e..011db67 100644
---- a/drivers/xen/pciback/pciback_ops.c
-+++ b/drivers/xen/pciback/pciback_ops.c
-@@ -23,6 +23,14 @@ void pciback_reset_device(struct pci_dev *dev)
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 95df223..2646383 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -1782,7 +1782,6 @@ static int __init netback_init(void)
- /* Disable devices (but not bridges) */
- if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
-+#ifdef CONFIG_PCI_MSI
-+ /* The guest could have been abruptly killed without
-+ * disabling MSI/MSI-X interrupts.*/
-+ if (dev->msix_enabled)
-+ pci_disable_msix(dev);
-+ if (dev->msi_enabled)
-+ pci_disable_msi(dev);
-+#endif
- pci_disable_device(dev);
+ if (!IS_ERR(netbk->kthread.task)) {
+ kthread_bind(netbk->kthread.task, group);
+- wake_up_process(netbk->kthread.task);
+ } else {
+ printk(KERN_ALERT
+ "kthread_run() fails at netback\n");
+@@ -1808,6 +1807,9 @@ static int __init netback_init(void)
+ spin_lock_init(&netbk->net_schedule_list_lock);
- pci_write_config_word(dev, PCI_COMMAND, 0);
+ atomic_set(&netbk->netfront_count, 0);
++
++ if (MODPARM_netback_kthread)
++ wake_up_process(netbk->kthread.task);
+ }
+
+ netbk_copy_skb_mode = NETBK_DONT_COPY_SKB;
--
1.7.4
-From c89edb63b60166fe354493dd465cf5662b2c077d Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Mon, 12 Apr 2010 11:46:00 -0400
-Subject: [PATCH 027/244] xen-pciback: Allocate IRQ handler for device that is shared with guest.
+From 329a17f3dbb9c127d431d09df0ed63ec28b8c702 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ijc(a)hellion.org.uk>
+Date: Thu, 2 Sep 2010 14:36:40 +0100
+Subject: [PATCH 066/203] xen: netback: save interrupt state in add_to_net_schedule_list_tail
-If the pciback module is loaded with fake_irq_handler=1 we install
-for all devices that are to be passed to the guest domain a IRQ handler.
-The IRQ handler will return IRQ_HANDLED or IRQ_NONE depending on
-on the ack_intr flag.
+add_to_net_schedule_list_tail is called from both hard interrupt context
+(add_to_net_schedule_list_tail) and soft interrupt/process context
+(netif_schedule_work) so use the interrupt state saving spinlock
+variants.
-The trigger to install this IRQ handler is when the enable_isr flag
-is set.
+Fixes:
+ ------------[ cut here ]------------
+ WARNING: at kernel/lockdep.c:2323 trace_hardirqs_on_caller+0xef/0x1a0()
+ Hardware name: PowerEdge 860
+ Modules linked in: rtc_cmos rtc_core rtc_lib
+ Pid: 16, comm: xenwatch Not tainted 2.6.32.18-x86_32p-xen0-00850-ge6b9b2c #98
+ Call Trace:
+ [<c103951c>] warn_slowpath_common+0x6c/0xc0
+ [<c1039585>] warn_slowpath_null+0x15/0x20
+ [<c105f60f>] trace_hardirqs_on_caller+0xef/0x1a0
+ [<c105f6cb>] trace_hardirqs_on+0xb/0x10
+ [<c136cc72>] _spin_unlock_irq+0x22/0x40
+ [<c11ab9ef>] add_to_net_schedule_list_tail+0x5f/0xb0
+ [<c11aba6b>] netif_be_int+0x2b/0x120
+ [<c106dd8e>] handle_IRQ_event+0x2e/0xe0
+ [<c106f98e>] handle_level_irq+0x6e/0xf0
+ [<c1197cdf>] __xen_evtchn_do_upcall+0x16f/0x190
+ [<c11981b8>] xen_evtchn_do_upcall+0x28/0x40
+ [<c100b487>] xen_do_upcall+0x7/0xc
+ [<c119bcf9>] xs_talkv+0x59/0x1a0
+ [<c119bf6a>] xs_single+0x3a/0x50
+ [<c119c6f9>] xenbus_read+0x39/0x60
+ [<c11adf77>] frontend_changed+0x3e7/0x6a0
+ [<c119d35a>] xenbus_otherend_changed+0x8a/0xa0
+ [<c119d572>] frontend_changed+0x12/0x20
+ [<c119b9dc>] xenwatch_thread+0x7c/0x140
+ [<c104ea74>] kthread+0x74/0x80
+ [<c100b433>] kernel_thread_helper+0x7/0x10
+ ---[ end trace 48d73949a8e0909a ]---
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
---
- drivers/xen/pciback/pci_stub.c | 13 ++++-
- drivers/xen/pciback/pciback.h | 12 ++++-
- drivers/xen/pciback/pciback_ops.c | 95 ++++++++++++++++++++++++++++++++++++-
- 3 files changed, 115 insertions(+), 5 deletions(-)
+ drivers/xen/netback/netback.c | 6 ++++--
+ 1 files changed, 4 insertions(+), 2 deletions(-)
-diff --git a/drivers/xen/pciback/pci_stub.c b/drivers/xen/pciback/pci_stub.c
-index 02178e2..45bbe99 100644
---- a/drivers/xen/pciback/pci_stub.c
-+++ b/drivers/xen/pciback/pci_stub.c
-@@ -21,6 +21,8 @@
- #include "conf_space.h"
- #include "conf_space_quirks.h"
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 2646383..1d080f6 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -787,17 +787,19 @@ static void remove_from_net_schedule_list(struct xen_netif *netif)
-+#define DRV_NAME "pciback"
+ static void add_to_net_schedule_list_tail(struct xen_netif *netif)
+ {
++ unsigned long flags;
+
- static char *pci_devs_to_hide;
- wait_queue_head_t aer_wait_queue;
- /*Add sem for sync AER handling and pciback remove/reconfigue ops,
-@@ -290,13 +292,20 @@ static int __devinit pcistub_init_device(struct pci_dev *dev)
- * would need to be called somewhere to free the memory allocated
- * here and then to call kfree(pci_get_drvdata(psdev->dev)).
- */
-- dev_data = kzalloc(sizeof(*dev_data), GFP_ATOMIC);
-+ dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]")
-+ + strlen(pci_name(dev)) + 1, GFP_ATOMIC);
- if (!dev_data) {
- err = -ENOMEM;
- goto out;
- }
- pci_set_drvdata(dev, dev_data);
+ struct xen_netbk *netbk = &xen_netbk[netif->group];
+ if (__on_net_schedule_list(netif))
+ return;
-+ /*
-+ * Setup name for fake IRQ handler. It will only be enabled
-+ * once the device is turned on by the guest.
-+ */
-+ sprintf(dev_data->irq_name, DRV_NAME "[%s]", pci_name(dev));
-+
- dev_dbg(&dev->dev, "initializing config\n");
+- spin_lock_irq(&netbk->net_schedule_list_lock);
++ spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
+ if (!__on_net_schedule_list(netif) &&
+ likely(netif_schedulable(netif))) {
+ list_add_tail(&netif->list, &netbk->net_schedule_list);
+ netif_get(netif);
+ }
+- spin_unlock_irq(&netbk->net_schedule_list_lock);
++ spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
+ }
- init_waitqueue_head(&aer_wait_queue);
-@@ -837,7 +846,7 @@ static struct pci_error_handlers pciback_error_handler = {
- */
+ void netif_schedule_work(struct xen_netif *netif)
+--
+1.7.4
+
+
+From 7edc1ededaaa953877d2a052886db4ea2f69effe Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Fri, 8 Oct 2010 17:11:51 +0100
+Subject: [PATCH 067/203] xen: netback: increase size of rx_meta array.
+
+We can end up needing as many of these as we need grant copy operations so
+increase the array size for the same reason.
+
+Crash observed on XenServer:
+kernel: ------------[ cut here ]------------
+kernel: kernel BUG at drivers/xen/netback/netback.c:834!
+kernel: invalid opcode: 0000 [#1] SMP
+kernel: last sysfs file: /sys/devices/xen-backend/vbd-10-768/statistics/rd_usecs
+kernel: Process netback (pid: 1413, ti=ec8a4000 task=ed0a6b70 task.ti=ec8a4000)
+kernel: Stack: 00000000 00000612 00000001 00000000 00020000 00000000 ecfbe000 00000000
+kernel: ec8a5f80 ec8a5f98 ec8a5fac 00000000 c0537220 c0539220 00000000 c0534220
+kernel: cd7afaa0 cd7afaa0 0000000c 00000014 062de396 00000001 00000001 00000014
+kernel: Call Trace:
+kernel: [<c0285f10>] ? netbk_action_thread+0x0/0x1fe0
+kernel: [<c013daf2>] ? kthread+0x42/0x70
+kernel: [<c013dab0>] ? kthread+0x0/0x70
+kernel: [<c010569b>] ? kernel_thread_helper+0x7/0x10
+kernel: =======================
+kernel: Code: 00 00 c7 42 08 20 82 53 c0 8b 85 e4 fe ff ff c7 42 10 00 00 00 00 \
+ c7 42 14 f0 7f 00 00 89 42 0c 8b 8d ec fe ff ff e9 3e e9 ff ff <0f> \
+ 0b eb fe 0f 0b eb fe 0f 0b eb fe 0f 0b eb fe 31 c0 e8 bf 31
+kernel: EIP: [<c028790a>] netbk_action_thread+0x19fa/0x1fe0 SS:ESP 0069:ec8a5d98
+
+Corresponding to
+ BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
+---
+ drivers/xen/netback/common.h | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index a5f3759..ce0041a 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -319,7 +319,7 @@ struct xen_netbk {
+ struct gnttab_copy grant_copy_op[2*NET_RX_RING_SIZE];
+ unsigned char rx_notify[NR_IRQS];
+ u16 notify_list[NET_RX_RING_SIZE];
+- struct netbk_rx_meta meta[NET_RX_RING_SIZE];
++ struct netbk_rx_meta meta[2*NET_RX_RING_SIZE];
+ };
- static struct pci_driver pciback_pci_driver = {
-- .name = "pciback",
-+ .name = DRV_NAME,
- .id_table = pcistub_ids,
- .probe = pcistub_probe,
- .remove = pcistub_remove,
-diff --git a/drivers/xen/pciback/pciback.h b/drivers/xen/pciback/pciback.h
-index 98e2912..9d1b0a6 100644
---- a/drivers/xen/pciback/pciback.h
-+++ b/drivers/xen/pciback/pciback.h
-@@ -45,8 +45,13 @@ struct pciback_device {
-
- struct pciback_dev_data {
- struct list_head config_fields;
-- int permissive;
-- int warned_on_write;
-+ unsigned int permissive : 1;
-+ unsigned int warned_on_write : 1;
-+ unsigned int enable_intx : 1;
-+ unsigned int isr_on : 1; /* Whether the IRQ handler is installed. */
-+ unsigned int ack_intr : 1; /* .. and ACK-ing */
-+ unsigned int irq; /* Saved in case device transitions to MSI/MSI-X */
-+ char irq_name[0]; /* pciback[000:04:00.0] */
- };
-
- /* Used by XenBus and pciback_ops.c */
-@@ -131,3 +136,6 @@ extern int verbose_request;
- void test_and_schedule_op(struct pciback_device *pdev);
- #endif
-
-+/* Handles shared IRQs that can to device domain and control domain. */
-+void pciback_irq_handler(struct pci_dev *dev, int reset);
-+irqreturn_t pciback_guest_interrupt(int irq, void *dev_id);
-diff --git a/drivers/xen/pciback/pciback_ops.c b/drivers/xen/pciback/pciback_ops.c
-index 011db67..cb54893 100644
---- a/drivers/xen/pciback/pciback_ops.c
-+++ b/drivers/xen/pciback/pciback_ops.c
-@@ -13,6 +13,78 @@
- int verbose_request;
- module_param(verbose_request, int, 0644);
-
-+/* Ensure a device is has the fake IRQ handler "turned on/off" and is
-+ * ready to be exported. This MUST be run after pciback_reset_device
-+ * which does the actual PCI device enable/disable.
-+ */
-+void pciback_control_isr(struct pci_dev *dev, int reset)
-+{
-+ struct pciback_dev_data *dev_data;
-+ int rc;
-+ int enable = 0;
-+
-+ dev_data = pci_get_drvdata(dev);
-+ if (!dev_data)
-+ return;
-+
-+ /* We don't deal with bridges */
-+ if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
-+ return;
-+
-+ if (reset) {
-+ dev_data->enable_intx = 0;
-+ dev_data->ack_intr = 0;
-+ }
-+ enable = dev_data->enable_intx;
-+
-+ /* Asked to disable, but ISR isn't runnig */
-+ if (!enable && !dev_data->isr_on)
-+ return;
-+
-+ /* Squirrel away the IRQs in the dev_data. We need this
-+ * b/c when device transitions to MSI, the dev->irq is
-+ * overwritten with the MSI vector.
-+ */
-+ if (enable)
-+ dev_data->irq = dev->irq;
-+
-+ dev_dbg(&dev->dev, "%s: #%d %s %s%s %s-> %s\n",
-+ dev_data->irq_name,
-+ dev_data->irq,
-+ pci_is_enabled(dev) ? "on" : "off",
-+ dev->msi_enabled ? "MSI" : "",
-+ dev->msix_enabled ? "MSI/X" : "",
-+ dev_data->isr_on ? "enable" : "disable",
-+ enable ? "enable" : "disable");
-+
-+ if (enable) {
-+ rc = request_irq(dev_data->irq,
-+ pciback_guest_interrupt, IRQF_SHARED,
-+ dev_data->irq_name, dev);
-+ if (rc) {
-+ dev_err(&dev->dev, "%s: failed to install fake IRQ " \
-+ "handler for IRQ %d! (rc:%d)\n", dev_data->irq_name,
-+ dev_data->irq, rc);
-+ goto out;
-+ }
-+ }
-+ else {
-+ free_irq(dev_data->irq, dev);
-+ dev_data->irq = 0;
-+ }
-+ dev_data->isr_on = enable;
-+ dev_data->ack_intr = enable;
-+out:
-+ dev_dbg(&dev->dev, "%s: #%d %s %s%s %s\n",
-+ dev_data->irq_name,
-+ dev_data->irq,
-+ pci_is_enabled(dev) ? "on" : "off",
-+ dev->msi_enabled ? "MSI" : "",
-+ dev->msix_enabled ? "MSI/X" : "",
-+ enable ? (dev_data->isr_on ? "enabled" : "failed to enable") :
-+ (dev_data->isr_on ? "failed to disable" : "disabled"));
-+}
-+
- /* Ensure a device is "turned off" and ready to be exported.
- * (Also see pciback_config_reset to ensure virtual configuration space is
- * ready to be re-exported)
-@@ -21,6 +93,8 @@ void pciback_reset_device(struct pci_dev *dev)
- {
- u16 cmd;
-
-+ pciback_control_isr(dev, 1 /* reset device */);
-+
- /* Disable devices (but not bridges) */
- if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
- #ifdef CONFIG_PCI_MSI
-@@ -78,13 +152,18 @@ void pciback_do_op(struct work_struct *data)
- struct pciback_device *pdev =
- container_of(data, struct pciback_device, op_work);
- struct pci_dev *dev;
-+ struct pciback_dev_data *dev_data = NULL;
- struct xen_pci_op *op = &pdev->sh_info->op;
-+ int test_intx = 0;
-
- dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
-
- if (dev == NULL)
- op->err = XEN_PCI_ERR_dev_not_found;
- else {
-+ dev_data = pci_get_drvdata(dev);
-+ if (dev_data)
-+ test_intx = dev_data->enable_intx;
- switch (op->cmd) {
- case XEN_PCI_OP_conf_read:
- op->err = pciback_config_read(dev,
-@@ -109,10 +188,15 @@ void pciback_do_op(struct work_struct *data)
- break;
- #endif
- default:
-- op->err = XEN_PCI_ERR_not_implemented;
-+ op->err = XEN_PCI_ERR_not_implemented;
- break;
- }
- }
-+ if (!op->err && dev && dev_data) {
-+ /* Transition detected */
-+ if ((dev_data->enable_intx != test_intx))
-+ pciback_control_isr(dev, 0 /* no reset */);
-+ }
- /* Tell the driver domain that we're done. */
- wmb();
- clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
-@@ -137,3 +221,12 @@ irqreturn_t pciback_handle_event(int irq, void *dev_id)
-
- return IRQ_HANDLED;
- }
-+irqreturn_t pciback_guest_interrupt(int irq, void *dev_id)
-+{
-+ struct pci_dev *dev = (struct pci_dev *)dev_id;
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+
-+ if (dev_data->isr_on && dev_data->ack_intr)
-+ return IRQ_HANDLED;
-+ return IRQ_NONE;
-+}
+ extern struct xen_netbk *xen_netbk;
--
1.7.4
-From 29a451f41647deedc2fa535520e648c76755568c Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Mon, 12 Apr 2010 11:47:15 -0400
-Subject: [PATCH 028/244] xen-pciback: Add SysFS instrumentation for the fake IRQ handler.
+From 4fbb266d319fcb3349eff2f9c42cdee51c5fcb5f Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Fri, 15 Oct 2010 13:41:44 +0100
+Subject: [PATCH 068/203] xen: netback: take net_schedule_list_lock when removing entry from net_schedule_list
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
+There is a race in net_tx_build_mops between checking if
+net_schedule_list is empty and actually dequeuing the first entry on
+the list. If another thread dequeues the only entry on the list during
+this window we crash because list_first_entry expects a non-empty
+list, like so:
+
+[ 0.133127] BUG: unable to handle kernel NULL pointer dereference at 00000008
+[ 0.133132] IP: [<c12aae71>] net_tx_build_mops+0x91/0xa70
+[ 0.133142] *pdpt = 0000000000000000 *pde = 000000000000000f
+[ 0.133147] Oops: 0002 1 SMP
+[ 0.133150] last sysfs file:
+[ 0.133152] Modules linked in:
+[ 0.133154]
+[ 0.133156] Pid: 55, comm: netback/1 Not tainted (2.6.32.12-0.7.1 #1) Latitude E4310
+[ 0.133158] EIP: 0061:[<c12aae71>] EFLAGS: 00010202 CPU: 1
+[ 0.133161] EIP is at net_tx_build_mops+0x91/0xa70
+[ 0.133163] EAX: 00000012 EBX: 00000008 ECX: e112b734 EDX: e112b76c
+[ 0.133165] ESI: ffffff30 EDI: 00000000 EBP: e112b734 ESP: dfe85d98
+[ 0.133167] DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0069
+[ 0.133169] Process netback/1 (pid: 55, ti=dfe84000 task=dfe83340 task.ti=dfe84000)
+[ 0.133170] Stack:
+[ 0.133172] 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+[ 0.133177] <0> 00000000 e112b734 e112ec08 e112b7f8 e112ec08 ffffff30 00000000 00000000
+[ 0.133186] <0> 00000000 00000000 00000000 e112b76c dfe85df4 00000001 00000000 aaaaaaaa
+[ 0.133193] Call Trace:
+[ 0.133202] [<c12abc7f>] net_tx_action+0x42f/0xac0
+[ 0.133206] [<c12ac37a>] netbk_action_thread+0x6a/0x1b0
+[ 0.133212] [<c1057444>] kthread+0x74/0x80
+[ 0.133218] [<c10049d7>] kernel_thread_helper+0x7/0x10
+[ 0.133220] Code: c4 00 00 00 89 74 24 58 39 74 24 2c 0f 84 c7 06 00 00 8b 74 24 \
+ 58 8b 5c 24 58 81 ee d0 00 00 00 83 c3 08 89 74 24 34 8b 7c 24 \
+ 58 <f0> ff 47 08 89 f0 e8 b4 f9 ff ff 8b 46 2c 8b 56 34 89 44 24 5c
+[ 0.133261] EIP: [<c12aae71>] net_tx_build_mops+0x91/0xa70 SS:ESP 0069:dfe85d98
+[ 0.133265] CR2: 0000000000000008
+[ 0.133274] --[ end trace e2c5c15f54bd9d93 ]--
+
+Therefore after the initial lock free check for an empty list check
+again with the lock held before dequeueing the entry.
+
+Based on a patch by Tomasz Wroblewski.
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+Cc: Tomasz Wroblewski <tomasz.wroblewski(a)citrix.com>
---
- drivers/xen/pciback/pci_stub.c | 75 +++++++++++++++++++++++++++++++++++++++-
- 1 files changed, 74 insertions(+), 1 deletions(-)
+ drivers/xen/netback/netback.c | 35 ++++++++++++++++++++++++++++-------
+ 1 files changed, 28 insertions(+), 7 deletions(-)
-diff --git a/drivers/xen/pciback/pci_stub.c b/drivers/xen/pciback/pci_stub.c
-index 45bbe99..ee2cd68 100644
---- a/drivers/xen/pciback/pci_stub.c
-+++ b/drivers/xen/pciback/pci_stub.c
-@@ -1038,6 +1038,70 @@ static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
-
- DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 1d080f6..3b03435 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -774,15 +774,34 @@ static int __on_net_schedule_list(struct xen_netif *netif)
+ return !list_empty(&netif->list);
+ }
-+static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)
-+{
-+ struct pcistub_device *psdev;
-+ struct pciback_dev_data *dev_data;
-+ size_t count = 0;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&pcistub_devices_lock, flags);
-+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
-+ if (count >= PAGE_SIZE)
-+ break;
-+ if (!psdev->dev)
-+ continue;
-+ dev_data = pci_get_drvdata(psdev->dev);
-+ if (!dev_data)
-+ continue;
-+ count +=
-+ scnprintf(buf + count, PAGE_SIZE - count, "%s:%s:%sing\n",
-+ pci_name(psdev->dev),
-+ dev_data->isr_on ? "on" : "off",
-+ dev_data->ack_intr ? "ack" : "not ack");
-+ }
-+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
-+ return count;
++/* Must be called with net_schedule_list_lock held */
+ static void remove_from_net_schedule_list(struct xen_netif *netif)
+ {
+- struct xen_netbk *netbk = &xen_netbk[netif->group];
+- spin_lock_irq(&netbk->net_schedule_list_lock);
+ if (likely(__on_net_schedule_list(netif))) {
+ list_del_init(&netif->list);
+ netif_put(netif);
+ }
+}
+
-+DRIVER_ATTR(irq_handlers, S_IRUSR, pcistub_irq_handler_show, NULL);
-+
-+static ssize_t pcistub_irq_handler_switch(struct device_driver *drv,
-+ const char *buf,
-+ size_t count)
++static struct xen_netif *poll_net_schedule_list(struct xen_netbk *netbk)
+{
-+ struct pcistub_device *psdev;
-+ struct pciback_dev_data *dev_data;
-+ int domain, bus, slot, func;
-+ int err = -ENOENT;
-+
-+ err = str_to_slot(buf, &domain, &bus, &slot, &func);
-+ if (err)
-+ goto out;
-+
-+ psdev = pcistub_device_find(domain, bus, slot, func);
++ struct xen_netif *netif = NULL;
+
-+ if (!psdev)
++ spin_lock_irq(&netbk->net_schedule_list_lock);
++ if (list_empty(&netbk->net_schedule_list))
+ goto out;
+
-+ dev_data = pci_get_drvdata(psdev->dev);
-+ if (!dev_data)
++ netif = list_first_entry(&netbk->net_schedule_list,
++ struct xen_netif, list);
++ if (!netif)
+ goto out;
+
-+ dev_dbg(&psdev->dev->dev, "%s fake irq handler: %d->%d\n",
-+ dev_data->irq_name, dev_data->isr_on,
-+ !dev_data->isr_on);
++ netif_get(netif);
+
-+ dev_data->isr_on = !(dev_data->isr_on);
-+ if (dev_data->isr_on)
-+ dev_data->ack_intr = 1;
++ remove_from_net_schedule_list(netif);
+out:
-+ if (!err)
-+ err = count;
-+ return err;
-+}
-+DRIVER_ATTR(irq_handler_state, S_IWUSR, NULL, pcistub_irq_handler_switch);
-+
- static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
- size_t count)
+ spin_unlock_irq(&netbk->net_schedule_list_lock);
++ return netif;
+ }
+
+ static void add_to_net_schedule_list_tail(struct xen_netif *netif)
+@@ -817,7 +836,10 @@ void netif_schedule_work(struct xen_netif *netif)
+
+ void netif_deschedule_work(struct xen_netif *netif)
{
-@@ -1177,7 +1241,10 @@ static void pcistub_exit(void)
- driver_remove_file(&pciback_pci_driver.driver, &driver_attr_slots);
- driver_remove_file(&pciback_pci_driver.driver, &driver_attr_quirks);
- driver_remove_file(&pciback_pci_driver.driver, &driver_attr_permissive);
--
-+ driver_remove_file(&pciback_pci_driver.driver,
-+ &driver_attr_irq_handlers);
-+ driver_remove_file(&pciback_pci_driver.driver,
-+ &driver_attr_irq_handler_state);
- pci_unregister_driver(&pciback_pci_driver);
++ struct xen_netbk *netbk = &xen_netbk[netif->group];
++ spin_lock_irq(&netbk->net_schedule_list_lock);
+ remove_from_net_schedule_list(netif);
++ spin_unlock_irq(&netbk->net_schedule_list_lock);
}
-@@ -1236,6 +1303,12 @@ static int __init pcistub_init(void)
- err = driver_create_file(&pciback_pci_driver.driver,
- &driver_attr_permissive);
-+ if (!err)
-+ err = driver_create_file(&pciback_pci_driver.driver,
-+ &driver_attr_irq_handlers);
-+ if (!err)
-+ err = driver_create_file(&pciback_pci_driver.driver,
-+ &driver_attr_irq_handler_state);
- if (err)
- pcistub_exit();
+@@ -1301,12 +1323,11 @@ static unsigned net_tx_build_mops(struct xen_netbk *netbk)
+ int work_to_do;
+ unsigned int data_len;
+ pending_ring_idx_t index;
+-
++
+ /* Get a netif from the list with work to do. */
+- netif = list_first_entry(&netbk->net_schedule_list,
+- struct xen_netif, list);
+- netif_get(netif);
+- remove_from_net_schedule_list(netif);
++ netif = poll_net_schedule_list(netbk);
++ if (!netif)
++ continue;
+ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
+ if (!work_to_do) {
--
1.7.4
-From 6c7c36d411eeab67192fe0ed96ac1e048b4a1755 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Mon, 19 Apr 2010 14:39:10 -0400
-Subject: [PATCH 029/244] xen-pciback: When device transitions to MSI/MSI-X stop ACK-ing on the
- legacy interrupt.
+From d58a964195f593879cc0160ac0c6f31bc95cdc6e Mon Sep 17 00:00:00 2001
+From: Paul Durrant <paul.durrant(a)citrix.com>
+Date: Wed, 15 Dec 2010 09:48:12 +0000
+Subject: [PATCH 069/203] xen: netback: Re-define PKT_PROT_LEN to be bigger.
-But don't remove the irq handler from the legacy interrupt. The device
-might still transition back to the legacy interrupts.
+Re-define PKT_PROT_LEN to be big enough to handle maximal IPv4 and TCP options and phrase
+the definition so that it's reasonably obvious that's what it's for.
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
+Signed-off-by: Paul Durrant <paul.durrant(a)citrix.com>
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
---
- drivers/xen/pciback/conf_space_capability_msi.c | 17 ++++++++++++++++-
- 1 files changed, 16 insertions(+), 1 deletions(-)
+ drivers/xen/netback/netback.c | 14 +++++++++-----
+ 1 files changed, 9 insertions(+), 5 deletions(-)
-diff --git a/drivers/xen/pciback/conf_space_capability_msi.c b/drivers/xen/pciback/conf_space_capability_msi.c
-index b70ea8b..a236e2d 100644
---- a/drivers/xen/pciback/conf_space_capability_msi.c
-+++ b/drivers/xen/pciback/conf_space_capability_msi.c
-@@ -12,6 +12,7 @@
- int pciback_enable_msi(struct pciback_device *pdev,
- struct pci_dev *dev, struct xen_pci_op *op)
- {
-+ struct pciback_dev_data *dev_data;
- int otherend = pdev->xdev->otherend_id;
- int status;
-
-@@ -27,21 +28,29 @@ int pciback_enable_msi(struct pciback_device *pdev,
- /* The value the guest needs is actually the IDT vector, not the
- * the local domain's IRQ number. */
- op->value = xen_gsi_from_irq(dev->irq);
-+ dev_data = pci_get_drvdata(dev);
-+ if (dev_data)
-+ dev_data->ack_intr = 0;
- return 0;
- }
-
- int pciback_disable_msi(struct pciback_device *pdev,
- struct pci_dev *dev, struct xen_pci_op *op)
- {
-+ struct pciback_dev_data *dev_data;
- pci_disable_msi(dev);
-
- op->value = xen_gsi_from_irq(dev->irq);
-+ dev_data = pci_get_drvdata(dev);
-+ if (dev_data)
-+ dev_data->ack_intr = 1;
- return 0;
- }
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 3b03435..9bbd230 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -36,9 +36,11 @@
- int pciback_enable_msix(struct pciback_device *pdev,
- struct pci_dev *dev, struct xen_pci_op *op)
- {
-+ struct pciback_dev_data *dev_data;
- int i, result;
- struct msix_entry *entries;
+ #include "common.h"
-@@ -68,6 +77,9 @@ int pciback_enable_msix(struct pciback_device *pdev,
- kfree(entries);
+-#include <linux/tcp.h>
+-#include <linux/udp.h>
+ #include <linux/kthread.h>
++#include <linux/if_vlan.h>
++#include <linux/udp.h>
++
++#include <net/tcp.h>
- op->value = result;
-+ dev_data = pci_get_drvdata(dev);
-+ if (dev_data)
-+ dev_data->ack_intr = 0;
+ #include <xen/balloon.h>
+ #include <xen/events.h>
+@@ -125,10 +127,12 @@ static inline int netif_get_page_ext(struct page *pg, unsigned int *_group, unsi
+ /*
+ * This is the amount of packet we copy rather than map, so that the
+ * guest can't fiddle with the contents of the headers while we do
+- * packet processing on them (netfilter, routing, etc). 72 is enough
+- * to cover TCP+IP headers including options.
++ * packet processing on them (netfilter, routing, etc).
+ */
+-#define PKT_PROT_LEN 72
++#define PKT_PROT_LEN (ETH_HLEN + \
++ VLAN_HLEN + \
++ sizeof(struct iphdr) + MAX_IPOPTLEN + \
++ sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
- return result;
- }
-@@ -75,10 +87,13 @@ int pciback_enable_msix(struct pciback_device *pdev,
- int pciback_disable_msix(struct pciback_device *pdev,
- struct pci_dev *dev, struct xen_pci_op *op)
+ static inline pending_ring_idx_t pending_index(unsigned i)
{
--
-+ struct pciback_dev_data *dev_data;
- pci_disable_msix(dev);
-
- op->value = xen_gsi_from_irq(dev->irq);
-+ dev_data = pci_get_drvdata(dev);
-+ if (dev_data)
-+ dev_data->ack_intr = 1;
- return 0;
- }
-
--
1.7.4
-From c1cc36c68f096f2b1e796ba84d9c583009939d91 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Mon, 19 Apr 2010 14:40:38 -0400
-Subject: [PATCH 030/244] xen-pciback: Enable interrupt handler when device is enabled.
+From 7120999e00f068383bc16d03cceae8d6d511c43a Mon Sep 17 00:00:00 2001
+From: Paul Durrant <paul.durrant(a)citrix.com>
+Date: Tue, 14 Dec 2010 20:35:19 +0000
+Subject: [PATCH 070/203] xen: netback: Don't count packets we don't actually receive.
-And also request it to be disabled when the device has been
-disabled.
+Make sure we only bump rx_packets when we're definitely going to call netif_rx_ni().
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
+Signed-off-by: Paul Durrant <paul.durrant(a)citrix.com>
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
---
- drivers/xen/pciback/conf_space_header.c | 6 ++++++
- 1 files changed, 6 insertions(+), 0 deletions(-)
+ drivers/xen/netback/netback.c | 6 +++---
+ 1 files changed, 3 insertions(+), 3 deletions(-)
-diff --git a/drivers/xen/pciback/conf_space_header.c b/drivers/xen/pciback/conf_space_header.c
-index 1f4f86e..cb450f4 100644
---- a/drivers/xen/pciback/conf_space_header.c
-+++ b/drivers/xen/pciback/conf_space_header.c
-@@ -39,8 +39,10 @@ static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 9bbd230..78d3509 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -1512,9 +1512,6 @@ static void net_tx_submit(struct xen_netbk *netbk)
+ skb->dev = netif->dev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
- static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
- {
-+ struct pciback_dev_data *dev_data;
- int err;
+- netif->stats.rx_bytes += skb->len;
+- netif->stats.rx_packets++;
+-
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb_checksum_setup(skb)) {
+ DPRINTK("Can't setup checksum in net_tx_action\n");
+@@ -1530,6 +1527,9 @@ static void net_tx_submit(struct xen_netbk *netbk)
+ continue;
+ }
-+ dev_data = pci_get_drvdata(dev);
- if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
- if (unlikely(verbose_request))
- printk(KERN_DEBUG "pciback: %s: enable\n",
-@@ -48,11 +50,15 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
- err = pci_enable_device(dev);
- if (err)
- return err;
-+ if (dev_data)
-+ dev_data->enable_intx = 1;
- } else if (pci_is_enabled(dev) && !is_enable_cmd(value)) {
- if (unlikely(verbose_request))
- printk(KERN_DEBUG "pciback: %s: disable\n",
- pci_name(dev));
- pci_disable_device(dev);
-+ if (dev_data)
-+ dev_data->enable_intx = 0;
++ netif->stats.rx_bytes += skb->len;
++ netif->stats.rx_packets++;
++
+ netif_rx_ni(skb);
+ netif->dev->last_rx = jiffies;
}
-
- if (!dev->is_busmaster && is_master_cmd(value)) {
--
1.7.4
-From a732e3d6ed4831c460586bd7a16ef7f6b7d28936 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Mon, 19 Apr 2010 16:23:06 -0400
-Subject: [PATCH 031/244] xen-pciback: Probe the IRQ line to check if it is not shared.
-
-If it is not shared, we stop ACK-ing the IRQ line as there is
-no need for this irq handler to return IRQ_HANDLED.
+From 3c1f462cb05ad5eb5bb8e65cf6feb9a4e4363ebf Mon Sep 17 00:00:00 2001
+From: Paul Durrant <paul.durrant(a)citrix.com>
+Date: Tue, 14 Dec 2010 20:35:20 +0000
+Subject: [PATCH 071/203] xen: netback: Remove the 500ms timeout to restart the netif queue.
-We have to this check once much much later than the pciback
-and pcifront have started talking as guests doing the hypercall
-that would notify the other guest that the IRQ line is shared
-is done asynchronously.
+It is generally unhelpful as it results in a massive tail-drop should a guest become
+unresponsive for a relatively short period of time and no back-pressure (other than
+that caused by a higher layer protocol) is applied to the sender.
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
+Signed-off-by: Paul Durrant <paul.durrant(a)citrix.com>
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
---
- drivers/xen/pciback/pci_stub.c | 5 +++--
- drivers/xen/pciback/pciback.h | 1 +
- drivers/xen/pciback/pciback_ops.c | 12 +++++++++++-
- 3 files changed, 15 insertions(+), 3 deletions(-)
+ drivers/xen/netback/netback.c | 20 +-------------------
+ 1 files changed, 1 insertions(+), 19 deletions(-)
-diff --git a/drivers/xen/pciback/pci_stub.c b/drivers/xen/pciback/pci_stub.c
-index ee2cd68..88c7ca1 100644
---- a/drivers/xen/pciback/pci_stub.c
-+++ b/drivers/xen/pciback/pci_stub.c
-@@ -1055,10 +1055,11 @@ static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)
- if (!dev_data)
- continue;
- count +=
-- scnprintf(buf + count, PAGE_SIZE - count, "%s:%s:%sing\n",
-+ scnprintf(buf + count, PAGE_SIZE - count, "%s:%s:%sing:%ld\n",
- pci_name(psdev->dev),
- dev_data->isr_on ? "on" : "off",
-- dev_data->ack_intr ? "ack" : "not ack");
-+ dev_data->ack_intr ? "ack" : "not ack",
-+ dev_data->handled);
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 78d3509..2caa5f8 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -275,13 +275,6 @@ static inline int netbk_queue_full(struct xen_netif *netif)
+ ((netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek) < needed);
+ }
+
+-static void tx_queue_callback(unsigned long data)
+-{
+- struct xen_netif *netif = (struct xen_netif *)data;
+- if (netif_schedulable(netif))
+- netif_wake_queue(netif->dev);
+-}
+-
+ /* Figure out how many ring slots we're going to need to send @skb to
+ the guest. */
+ static unsigned count_skb_slots(struct sk_buff *skb, struct xen_netif *netif)
+@@ -364,19 +357,8 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ netif->rx.sring->req_event = netif->rx_req_cons_peek +
+ netbk_max_required_rx_slots(netif);
+ mb(); /* request notification /then/ check & stop the queue */
+- if (netbk_queue_full(netif)) {
++ if (netbk_queue_full(netif))
+ netif_stop_queue(dev);
+- /*
+- * Schedule 500ms timeout to restart the queue, thus
+- * ensuring that an inactive queue will be drained.
+- * Packets will be immediately be dropped until more
+- * receive buffers become available (see
+- * netbk_queue_full() check above).
+- */
+- netif->tx_queue_timeout.data = (unsigned long)netif;
+- netif->tx_queue_timeout.function = tx_queue_callback;
+- mod_timer(&netif->tx_queue_timeout, jiffies + HZ/2);
+- }
}
- spin_unlock_irqrestore(&pcistub_devices_lock, flags);
- return count;
-diff --git a/drivers/xen/pciback/pciback.h b/drivers/xen/pciback/pciback.h
-index 9d1b0a6..fc31052 100644
---- a/drivers/xen/pciback/pciback.h
-+++ b/drivers/xen/pciback/pciback.h
-@@ -50,6 +50,7 @@ struct pciback_dev_data {
- unsigned int enable_intx : 1;
- unsigned int isr_on : 1; /* Whether the IRQ handler is installed. */
- unsigned int ack_intr : 1; /* .. and ACK-ing */
-+ unsigned long handled;
- unsigned int irq; /* Saved in case device transitions to MSI/MSI-X */
- char irq_name[0]; /* pciback[000:04:00.0] */
- };
-diff --git a/drivers/xen/pciback/pciback_ops.c b/drivers/xen/pciback/pciback_ops.c
-index cb54893..5543881 100644
---- a/drivers/xen/pciback/pciback_ops.c
-+++ b/drivers/xen/pciback/pciback_ops.c
-@@ -226,7 +226,17 @@ irqreturn_t pciback_guest_interrupt(int irq, void *dev_id)
- struct pci_dev *dev = (struct pci_dev *)dev_id;
- struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
+ skb_queue_tail(&netbk->rx_queue, skb);
-- if (dev_data->isr_on && dev_data->ack_intr)
-+ if (dev_data->isr_on && dev_data->ack_intr) {
-+ dev_data->handled++;
-+ if ((dev_data->handled % 1000) == 0) {
-+ if (xen_ignore_irq(irq)) {
-+ printk(KERN_INFO "%s IRQ line is not shared "
-+ "with other domains. Turning ISR off\n",
-+ dev_data->irq_name);
-+ dev_data->ack_intr = 0;
-+ }
-+ }
- return IRQ_HANDLED;
-+ }
- return IRQ_NONE;
- }
--
1.7.4
-From 3312c11c3f9c857b2457c293e6b6e15928a32f32 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Tue, 20 Apr 2010 20:22:40 -0400
-Subject: [PATCH 032/244] xen-pciback: Add debug statements for the MSI/MSI-X configuration module.
+From d70d4906c2736dadc5c287caa36c5880947f8688 Mon Sep 17 00:00:00 2001
+From: Paul Durrant <paul.durrant(a)citrix.com>
+Date: Tue, 14 Dec 2010 20:35:21 +0000
+Subject: [PATCH 072/203] xen: netback: Add a missing test to tx_work_todo.
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
+Adda test so that, when netback is using worker threads, net_tx_action()
+gets called in a timely manner when the pending_inuse list is populated.
+
+Signed-off-by: Paul Durrant <paul.durrant(a)citrix.com>
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
---
- drivers/xen/pciback/conf_space_capability_msi.c | 11 +++++++++++
- 1 files changed, 11 insertions(+), 0 deletions(-)
+ drivers/xen/netback/netback.c | 4 ++++
+ 1 files changed, 4 insertions(+), 0 deletions(-)
-diff --git a/drivers/xen/pciback/conf_space_capability_msi.c b/drivers/xen/pciback/conf_space_capability_msi.c
-index a236e2d..b15131e 100644
---- a/drivers/xen/pciback/conf_space_capability_msi.c
-+++ b/drivers/xen/pciback/conf_space_capability_msi.c
-@@ -16,6 +16,9 @@ int pciback_enable_msi(struct pciback_device *pdev,
- int otherend = pdev->xdev->otherend_id;
- int status;
-
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: enable MSI\n", pci_name(dev));
-+
- status = pci_enable_msi(dev);
-
- if (status) {
-@@ -31,6 +34,7 @@ int pciback_enable_msi(struct pciback_device *pdev,
- dev_data = pci_get_drvdata(dev);
- if (dev_data)
- dev_data->ack_intr = 0;
-+
- return 0;
- }
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 2caa5f8..dd52d01 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -1695,6 +1695,10 @@ static inline int tx_work_todo(struct xen_netbk *netbk)
+ if (netbk->dealloc_cons != netbk->dealloc_prod)
+ return 1;
-@@ -38,6 +42,9 @@ int pciback_disable_msi(struct pciback_device *pdev,
- struct pci_dev *dev, struct xen_pci_op *op)
- {
- struct pciback_dev_data *dev_data;
++ if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB &&
++ !list_empty(&netbk->pending_inuse_head))
++ return 1;
+
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: disable MSI\n", pci_name(dev));
- pci_disable_msi(dev);
-
- op->value = xen_gsi_from_irq(dev->irq);
-@@ -54,6 +61,8 @@ int pciback_enable_msix(struct pciback_device *pdev,
- int i, result;
- struct msix_entry *entries;
-
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: enable MSI-X\n", pci_name(dev));
- if (op->value > SH_INFO_MAX_VEC)
- return -EINVAL;
-
-@@ -88,6 +97,8 @@ int pciback_disable_msix(struct pciback_device *pdev,
- struct pci_dev *dev, struct xen_pci_op *op)
- {
- struct pciback_dev_data *dev_data;
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: disable MSI-X\n", pci_name(dev));
- pci_disable_msix(dev);
-
- op->value = xen_gsi_from_irq(dev->irq);
+ if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
+ !list_empty(&netbk->net_schedule_list))
+ return 1;
--
1.7.4
-From 52257d7ad18bd91fd614df5ef960a88af3ed5200 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Fri, 23 Jul 2010 14:35:47 -0400
-Subject: [PATCH 033/244] xen-pciback: Redo spinlock usage.
+From 8811a0cf07ccd1b9f40eaae76de3aa2792c9fd28 Mon Sep 17 00:00:00 2001
+From: Paul Durrant <paul.durrant(a)citrix.com>
+Date: Tue, 14 Dec 2010 20:35:22 +0000
+Subject: [PATCH 073/203] xen: netback: Re-factor net_tx_action_dealloc() slightly.
-We were using coarse spinlocks that could end up with a deadlock.
-This patch fixes that and makes the spinlocks much more fine-grained.
+There is no need for processing of the pending_inuse list to be within the dealloc_prod/cons
+loop.
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
+Signed-off-by: Paul Durrant <paul.durrant(a)citrix.com>
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
---
- drivers/xen/pciback/xenbus.c | 34 +++++++++++++++++++++-------------
- 1 files changed, 21 insertions(+), 13 deletions(-)
+ drivers/xen/netback/netback.c | 26 ++++++++++++++------------
+ 1 files changed, 14 insertions(+), 12 deletions(-)
-diff --git a/drivers/xen/pciback/xenbus.c b/drivers/xen/pciback/xenbus.c
-index d448bf5..f0d5426 100644
---- a/drivers/xen/pciback/xenbus.c
-+++ b/drivers/xen/pciback/xenbus.c
-@@ -54,23 +54,31 @@ static void pciback_disconnect(struct pciback_device *pdev)
- unbind_from_irqhandler(pdev->evtchn_irq, pdev);
- pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
- }
-+ spin_unlock(&pdev->dev_lock);
-
- /* If the driver domain started an op, make sure we complete it
- * before releasing the shared memory */
-+
-+ /* Note, the workqueue does not use spinlocks at all.*/
- flush_workqueue(pciback_wq);
-
-+ spin_lock(&pdev->dev_lock);
- if (pdev->sh_info != NULL) {
- xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_info);
- pdev->sh_info = NULL;
- }
--
- spin_unlock(&pdev->dev_lock);
-+
- }
-
- static void free_pdev(struct pciback_device *pdev)
- {
-- if (pdev->be_watching)
-+ spin_lock(&pdev->dev_lock);
-+ if (pdev->be_watching) {
- unregister_xenbus_watch(&pdev->be_watch);
-+ pdev->be_watching = 0;
-+ }
-+ spin_unlock(&pdev->dev_lock);
-
- pciback_disconnect(pdev);
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index dd52d01..53b3a0e 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -906,11 +906,20 @@ static inline void net_tx_action_dealloc(struct xen_netbk *netbk)
+ gop++;
+ }
-@@ -98,7 +106,10 @@ static int pciback_do_attach(struct pciback_device *pdev, int gnt_ref,
- "Error mapping other domain page in ours.");
- goto out;
- }
+- if (netbk_copy_skb_mode != NETBK_DELAYED_COPY_SKB ||
+- list_empty(&netbk->pending_inuse_head))
+- break;
++ } while (dp != netbk->dealloc_prod);
+
-+ spin_lock(&pdev->dev_lock);
- pdev->sh_info = vaddr;
-+ spin_unlock(&pdev->dev_lock);
++ netbk->dealloc_cons = dc;
- err = bind_interdomain_evtchn_to_irqhandler(
- pdev->xdev->otherend_id, remote_evtchn, pciback_handle_event,
-@@ -108,7 +119,10 @@ static int pciback_do_attach(struct pciback_device *pdev, int gnt_ref,
- "Error binding event channel to IRQ");
- goto out;
- }
+- /* Copy any entries that have been pending for too long. */
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_unmap_grant_ref, netbk->tx_unmap_ops,
++ gop - netbk->tx_unmap_ops);
++ BUG_ON(ret);
+
-+ spin_lock(&pdev->dev_lock);
- pdev->evtchn_irq = err;
-+ spin_unlock(&pdev->dev_lock);
- err = 0;
-
- dev_dbg(&pdev->xdev->dev, "Attached!\n");
-@@ -122,7 +136,6 @@ static int pciback_attach(struct pciback_device *pdev)
- int gnt_ref, remote_evtchn;
- char *magic = NULL;
-
-- spin_lock(&pdev->dev_lock);
-
- /* Make sure we only do this setup once */
- if (xenbus_read_driver_state(pdev->xdev->nodename) !=
-@@ -168,7 +181,6 @@ static int pciback_attach(struct pciback_device *pdev)
-
- dev_dbg(&pdev->xdev->dev, "Connected? %d\n", err);
- out:
-- spin_unlock(&pdev->dev_lock);
-
- kfree(magic);
-
-@@ -340,7 +352,6 @@ static int pciback_reconfigure(struct pciback_device *pdev)
- char state_str[64];
- char dev_str[64];
-
-- spin_lock(&pdev->dev_lock);
-
- dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
-
-@@ -481,8 +492,6 @@ static int pciback_reconfigure(struct pciback_device *pdev)
- }
++ /*
++ * Copy any entries that have been pending for too long
++ */
++ if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB &&
++ !list_empty(&netbk->pending_inuse_head)) {
+ list_for_each_entry_safe(inuse, n,
+ &netbk->pending_inuse_head, list) {
+ struct pending_tx_info *pending_tx_info;
+@@ -936,14 +945,7 @@ static inline void net_tx_action_dealloc(struct xen_netbk *netbk)
- out:
-- spin_unlock(&pdev->dev_lock);
+ break;
+ }
+- } while (dp != netbk->dealloc_prod);
-
- return 0;
- }
-
-@@ -539,8 +548,6 @@ static int pciback_setup_backend(struct pciback_device *pdev)
- char dev_str[64];
- char state_str[64];
-
-- spin_lock(&pdev->dev_lock);
+- netbk->dealloc_cons = dc;
-
- /* It's possible we could get the call to setup twice, so make sure
- * we're not already connected.
- */
-@@ -621,8 +628,6 @@ static int pciback_setup_backend(struct pciback_device *pdev)
- "Error switching to initialised state!");
+- ret = HYPERVISOR_grant_table_op(
+- GNTTABOP_unmap_grant_ref, netbk->tx_unmap_ops,
+- gop - netbk->tx_unmap_ops);
+- BUG_ON(ret);
++ }
- out:
-- spin_unlock(&pdev->dev_lock);
--
- if (!err)
- /* see if pcifront is already configured (if not, we'll wait) */
- pciback_attach(pdev);
-@@ -669,7 +674,10 @@ static int pciback_xenbus_probe(struct xenbus_device *dev,
- pciback_be_watch);
- if (err)
- goto out;
-+
-+ spin_lock(&pdev->dev_lock);
- pdev->be_watching = 1;
-+ spin_unlock(&pdev->dev_lock);
+ list_for_each_entry_safe(inuse, n, &list, list) {
+ struct pending_tx_info *pending_tx_info;
+--
+1.7.4
+
+
+From 9481475c92f00d15100d0a5083ef338f1b528506 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Wed, 5 Jan 2011 09:57:37 +0000
+Subject: [PATCH 074/203] xen: netback: Drop GSO SKBs which do not have csum_blank.
+
+The Linux network stack expects all GSO SKBs to have ip_summed ==
+CHECKSUM_PARTIAL (which implies that the frame contains a partial
+checksum) and the Xen network ring protocol similarly expects an SKB
+which has GSO set to also have NETRX_csum_blank (which also implies a
+partial checksum). Therefore drop such frames on receive otherwise
+they will trigger the warning in skb_gso_segment.
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+Cc: Jeremy Fitzhardinge <jeremy(a)goop.org>
+Cc: xen-devel(a)lists.xensource.com
+---
+ drivers/xen/netback/netback.c | 4 ++++
+ 1 files changed, 4 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 53b3a0e..2766b93 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -1502,6 +1502,10 @@ static void net_tx_submit(struct xen_netbk *netbk)
+ kfree_skb(skb);
+ continue;
+ }
++ } else if (skb_is_gso(skb)) {
++ DPRINTK("Dropping GSO but not CHECKSUM_PARTIAL skb\n");
++ kfree_skb(skb);
++ continue;
+ }
- /* We need to force a call to our callback here in case
- * xend already configured us!
-@@ -708,8 +716,8 @@ int __init pciback_xenbus_register(void)
- {
- pciback_wq = create_workqueue("pciback_workqueue");
- if (!pciback_wq) {
-- printk(KERN_ERR "pciback_xenbus_register: create"
-- "pciback_workqueue failed\n");
-+ printk(KERN_ERR "%s: create"
-+ "pciback_workqueue failed\n",__FUNCTION__);
- return -EFAULT;
- }
- return xenbus_register_backend(&xenbus_pciback_driver);
+ if (unlikely(netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB) &&
--
1.7.4
-From a9e0cfab0577730e74787b701edc727756a52b11 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Wed, 28 Jul 2010 13:28:34 -0400
-Subject: [PATCH 034/244] xen-pciback: Remove spinlock for be->watching state.
+From a45f9a0889210fba5c73994ec2fa1c36f82a435f Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Wed, 19 Jan 2011 12:43:38 +0000
+Subject: [PATCH 075/203] xen: netback: completely remove tx_queue_timer
-There is no need to guard this with a spinlock. It
-is already guarded by the xenwatch_thread against multiple
-customers.
+"xen: netback: Remove the 500ms timeout to restart the netif queue." missed
+removing the timer initialisation.
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
+Also remove the related comment which has been obsolete since the default for
+MODPARM_copy_skb was switched to true some time ago.
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+Cc: Paul Durrant <Paul.Durrant(a)citrix.com>
---
- drivers/xen/pciback/xenbus.c | 4 ----
- 1 files changed, 0 insertions(+), 4 deletions(-)
+ drivers/xen/netback/common.h | 3 ---
+ drivers/xen/netback/interface.c | 13 +------------
+ 2 files changed, 1 insertions(+), 15 deletions(-)
-diff --git a/drivers/xen/pciback/xenbus.c b/drivers/xen/pciback/xenbus.c
-index f0d5426..993b659 100644
---- a/drivers/xen/pciback/xenbus.c
-+++ b/drivers/xen/pciback/xenbus.c
-@@ -73,12 +73,10 @@ static void pciback_disconnect(struct pciback_device *pdev)
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index ce0041a..7e03a46 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -99,9 +99,6 @@ struct xen_netif {
+ unsigned long remaining_credit;
+ struct timer_list credit_timeout;
- static void free_pdev(struct pciback_device *pdev)
- {
-- spin_lock(&pdev->dev_lock);
- if (pdev->be_watching) {
- unregister_xenbus_watch(&pdev->be_watch);
- pdev->be_watching = 0;
- }
-- spin_unlock(&pdev->dev_lock);
+- /* Enforce draining of the transmit queue. */
+- struct timer_list tx_queue_timeout;
+-
+ /* Statistics */
+ int nr_copied_skbs;
- pciback_disconnect(pdev);
+diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+index 2e8508a..efdc21c 100644
+--- a/drivers/xen/netback/interface.c
++++ b/drivers/xen/netback/interface.c
+@@ -41,15 +41,7 @@
+ * Module parameter 'queue_length':
+ *
+ * Enables queuing in the network stack when a client has run out of receive
+- * descriptors. Although this feature can improve receive bandwidth by avoiding
+- * packet loss, it can also result in packets sitting in the 'tx_queue' for
+- * unbounded time. This is bad if those packets hold onto foreign resources.
+- * For example, consider a packet that holds onto resources belonging to the
+- * guest for which it is queued (e.g., packet received on vif1.0, destined for
+- * vif1.1 which is not activated in the guest): in this situation the guest
+- * will never be destroyed, unless vif1.1 is taken down. To avoid this, we
+- * run a timer (tx_queue_timeout) to drain the queue when the interface is
+- * blocked.
++ * descriptors.
+ */
+ static unsigned long netbk_queue_length = 32;
+ module_param_named(queue_length, netbk_queue_length, ulong, 0644);
+@@ -295,8 +287,6 @@ struct xen_netif *netif_alloc(struct device *parent, domid_t domid, unsigned int
+ /* Initialize 'expires' now: it's used to track the credit window. */
+ netif->credit_timeout.expires = jiffies;
-@@ -675,9 +673,7 @@ static int pciback_xenbus_probe(struct xenbus_device *dev,
- if (err)
- goto out;
+- init_timer(&netif->tx_queue_timeout);
+-
+ dev->netdev_ops = &netback_ops;
+ netif_set_features(netif);
+ SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
+@@ -458,7 +448,6 @@ void netif_disconnect(struct xen_netif *netif)
+ wait_event(netif->waiting_to_free, atomic_read(&netif->refcnt) == 0);
-- spin_lock(&pdev->dev_lock);
- pdev->be_watching = 1;
-- spin_unlock(&pdev->dev_lock);
+ del_timer_sync(&netif->credit_timeout);
+- del_timer_sync(&netif->tx_queue_timeout);
- /* We need to force a call to our callback here in case
- * xend already configured us!
+ if (netif->irq)
+ unbind_from_irqhandler(netif->irq, netif);
--
1.7.4
-From c0cae0b36c43e75d4d69c60f5319e6ba802b2233 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Mon, 13 Dec 2010 11:06:36 -0500
-Subject: [PATCH 035/244] xen/pciback: Fix checkpatch warnings and errors.
+From 2794a483aeafab1c0765a0478b760978b361bad9 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Fri, 28 Jan 2011 13:11:45 +0000
+Subject: [PATCH 076/203] Revert "xen: netback: Drop GSO SKBs which do not have csum_blank."
-Checkpatch found some extra warnings and errors. This mega
-patch fixes all of them in one swoop.
+This reverts commit 082386b4a581b2ba5a125cc8944a57ceb33ff37c.
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
+Based on discussion surrounding the netfront equivalent fix
+(http://patchwork.ozlabs.org/patch/80389/) this issue will be fixed
+differently.
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
---
- arch/x86/include/asm/xen/pci.h | 2 +-
- drivers/xen/events.c | 38 ++++++++++----------
- drivers/xen/pciback/conf_space.c | 4 +-
- drivers/xen/pciback/conf_space_capability_msi.c | 11 +++---
- drivers/xen/pciback/conf_space_header.c | 42 +++++++++++-----------
- drivers/xen/pciback/controller.c | 2 +-
- drivers/xen/pciback/pci_stub.c | 7 ++--
- drivers/xen/pciback/pciback.h | 16 ++++----
- drivers/xen/pciback/pciback_ops.c | 9 ++---
- drivers/xen/pciback/xenbus.c | 14 ++++----
- 10 files changed, 73 insertions(+), 72 deletions(-)
+ drivers/xen/netback/netback.c | 4 ----
+ 1 files changed, 0 insertions(+), 4 deletions(-)
-diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h
-index 8474b4b..7e61d78 100644
---- a/arch/x86/include/asm/xen/pci.h
-+++ b/arch/x86/include/asm/xen/pci.h
-@@ -27,7 +27,7 @@ static inline int xen_find_device_domain_owner(struct pci_dev *dev)
- return -1;
- }
- static inline int xen_register_device_domain_owner(struct pci_dev *dev,
-- uint16_t domain)
-+ uint16_t domain)
- {
- return -1;
- }
-diff --git a/drivers/xen/events.c b/drivers/xen/events.c
-index 95eea13..3929c20 100644
---- a/drivers/xen/events.c
-+++ b/drivers/xen/events.c
-@@ -698,7 +698,7 @@ int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type)
- domid = rc = xen_find_device_domain_owner(dev);
- if (rc < 0)
- domid = DOMID_SELF;
--
-+
- memset(&map_irq, 0, sizeof(map_irq));
- map_irq.domid = domid;
- map_irq.type = MAP_PIRQ_TYPE_MSI;
-@@ -850,18 +850,18 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
- }
-
- static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
-- unsigned int remote_port)
-+ unsigned int remote_port)
- {
-- struct evtchn_bind_interdomain bind_interdomain;
-- int err;
-+ struct evtchn_bind_interdomain bind_interdomain;
-+ int err;
-
-- bind_interdomain.remote_dom = remote_domain;
-- bind_interdomain.remote_port = remote_port;
-+ bind_interdomain.remote_dom = remote_domain;
-+ bind_interdomain.remote_port = remote_port;
-
-- err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
-- &bind_interdomain);
-+ err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
-+ &bind_interdomain);
-
-- return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
-+ return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
- }
-
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 2766b93..53b3a0e 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -1502,10 +1502,6 @@ static void net_tx_submit(struct xen_netbk *netbk)
+ kfree_skb(skb);
+ continue;
+ }
+- } else if (skb_is_gso(skb)) {
+- DPRINTK("Dropping GSO but not CHECKSUM_PARTIAL skb\n");
+- kfree_skb(skb);
+- continue;
+ }
-@@ -966,19 +966,19 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
- const char *devname,
- void *dev_id)
- {
-- int irq, retval;
-+ int irq, retval;
+ if (unlikely(netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB) &&
+--
+1.7.4
+
+
+From 01cd16474ee82db4d958c9cf8e481be897fa4ca6 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Thu, 27 Jan 2011 15:43:46 +0000
+Subject: [PATCH 077/203] xen: netback: handle incoming GSO SKBs which are not CHECKSUM_PARTIAL
+
+The Linux network stack expects all GSO SKBs to have ip_summed ==
+CHECKSUM_PARTIAL (which implies that the frame contains a partial
+checksum) and the Xen network ring protocol similarly expects an SKB
+which has GSO set to also have NETRX_csum_blank (which also implies a
+partial checksum).
+
+However there have been cases of buggy guests which mark a frame as
+GSO but do not set csum_blank. If we detect that we a receiving such a
+frame (which manifests as ip_summed != PARTIAL && skb_is_gso) then
+force the SKB to partial and recalculate the checksum, since we cannot
+rely on the peer having done so if they have not set csum_blank.
+
+Add an ethtool stat to track occurances of this event.
+
+A corresponding fix was made to netfront in e0ce4af920eb028f38bfd680b1d733f4c7a0b7cf.
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+Cc: Jeremy Fitzhardinge <jeremy(a)goop.org>
+Cc: xen-devel(a)lists.xensource.com
+---
+ drivers/xen/netback/common.h | 1 +
+ drivers/xen/netback/interface.c | 9 +++++++-
+ drivers/xen/netback/netback.c | 43 ++++++++++++++++++++++++++++++++------
+ 3 files changed, 45 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index 7e03a46..f660eb5 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -101,6 +101,7 @@ struct xen_netif {
-- irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
-- if (irq < 0)
-- return irq;
-+ irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
-+ if (irq < 0)
-+ return irq;
+ /* Statistics */
+ int nr_copied_skbs;
++ int rx_gso_checksum_fixup;
-- retval = request_irq(irq, handler, irqflags, devname, dev_id);
-- if (retval != 0) {
-- unbind_from_irq(irq);
-- return retval;
-- }
-+ retval = request_irq(irq, handler, irqflags, devname, dev_id);
-+ if (retval != 0) {
-+ unbind_from_irq(irq);
-+ return retval;
-+ }
+ /* Miscellaneous private stuff. */
+ struct list_head list; /* scheduling list */
+diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+index efdc21c..d3af68e 100644
+--- a/drivers/xen/netback/interface.c
++++ b/drivers/xen/netback/interface.c
+@@ -189,7 +189,14 @@ static const struct netif_stat {
+ char name[ETH_GSTRING_LEN];
+ u16 offset;
+ } netbk_stats[] = {
+- { "copied_skbs", offsetof(struct xen_netif, nr_copied_skbs) },
++ {
++ "copied_skbs",
++ offsetof(struct xen_netif, nr_copied_skbs)
++ },
++ {
++ "rx_gso_checksum_fixup",
++ offsetof(struct xen_netif, rx_gso_checksum_fixup)
++ },
+ };
-- return irq;
-+ return irq;
+ static int netbk_get_sset_count(struct net_device *dev, int string_set)
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 53b3a0e..8189199 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -1222,11 +1222,28 @@ static int netbk_set_skb_gso(struct sk_buff *skb, struct xen_netif_extra_info *g
+ return 0;
}
- EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
-
-diff --git a/drivers/xen/pciback/conf_space.c b/drivers/xen/pciback/conf_space.c
-index 370c18e..eb6bba0 100644
---- a/drivers/xen/pciback/conf_space.c
-+++ b/drivers/xen/pciback/conf_space.c
-@@ -18,8 +18,8 @@
- static int permissive;
- module_param(permissive, bool, 0644);
-
--#define DEFINE_PCI_CONFIG(op, size, type) \
--int pciback_##op##_config_##size \
-+#define DEFINE_PCI_CONFIG(op, size, type) \
-+int pciback_##op##_config_##size \
- (struct pci_dev *dev, int offset, type value, void *data) \
- { \
- return pci_##op##_config_##size(dev, offset, value); \
-diff --git a/drivers/xen/pciback/conf_space_capability_msi.c b/drivers/xen/pciback/conf_space_capability_msi.c
-index b15131e..3acda69 100644
---- a/drivers/xen/pciback/conf_space_capability_msi.c
-+++ b/drivers/xen/pciback/conf_space_capability_msi.c
-@@ -16,7 +16,7 @@ int pciback_enable_msi(struct pciback_device *pdev,
- int otherend = pdev->xdev->otherend_id;
- int status;
-
-- if (unlikely(verbose_request))
-+ if (unlikely(verbose_request))
- printk(KERN_DEBUG "pciback: %s: enable MSI\n", pci_name(dev));
-
- status = pci_enable_msi(dev);
-@@ -43,7 +43,7 @@ int pciback_disable_msi(struct pciback_device *pdev,
- {
- struct pciback_dev_data *dev_data;
-
-- if (unlikely(verbose_request))
-+ if (unlikely(verbose_request))
- printk(KERN_DEBUG "pciback: %s: disable MSI\n", pci_name(dev));
- pci_disable_msi(dev);
-
-@@ -61,7 +61,7 @@ int pciback_enable_msix(struct pciback_device *pdev,
- int i, result;
- struct msix_entry *entries;
-- if (unlikely(verbose_request))
-+ if (unlikely(verbose_request))
- printk(KERN_DEBUG "pciback: %s: enable MSI-X\n", pci_name(dev));
- if (op->value > SH_INFO_MAX_VEC)
- return -EINVAL;
-@@ -97,8 +97,9 @@ int pciback_disable_msix(struct pciback_device *pdev,
- struct pci_dev *dev, struct xen_pci_op *op)
+-static int skb_checksum_setup(struct sk_buff *skb)
++static int checksum_setup(struct xen_netif *netif, struct sk_buff *skb)
{
- struct pciback_dev_data *dev_data;
-- if (unlikely(verbose_request))
-- printk(KERN_DEBUG "pciback: %s: disable MSI-X\n", pci_name(dev));
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: disable MSI-X\n",
-+ pci_name(dev));
- pci_disable_msix(dev);
-
- op->value = xen_gsi_from_irq(dev->irq);
-diff --git a/drivers/xen/pciback/conf_space_header.c b/drivers/xen/pciback/conf_space_header.c
-index cb450f4..22ad0f5 100644
---- a/drivers/xen/pciback/conf_space_header.c
-+++ b/drivers/xen/pciback/conf_space_header.c
-@@ -316,27 +316,27 @@ static const struct config_field header_common[] = {
- {}
- };
-
--#define CFG_FIELD_BAR(reg_offset) \
-- { \
-- .offset = reg_offset, \
-- .size = 4, \
-- .init = bar_init, \
-- .reset = bar_reset, \
-- .release = bar_release, \
-- .u.dw.read = bar_read, \
-- .u.dw.write = bar_write, \
-- }
--
--#define CFG_FIELD_ROM(reg_offset) \
-- { \
-- .offset = reg_offset, \
-- .size = 4, \
-- .init = rom_init, \
-- .reset = bar_reset, \
-- .release = bar_release, \
-- .u.dw.read = bar_read, \
-- .u.dw.write = rom_write, \
-- }
-+#define CFG_FIELD_BAR(reg_offset) \
-+ { \
-+ .offset = reg_offset, \
-+ .size = 4, \
-+ .init = bar_init, \
-+ .reset = bar_reset, \
-+ .release = bar_release, \
-+ .u.dw.read = bar_read, \
-+ .u.dw.write = bar_write, \
-+ }
+ struct iphdr *iph;
+ unsigned char *th;
+ int err = -EPROTO;
++ int recalculate_partial_csum = 0;
+
-+#define CFG_FIELD_ROM(reg_offset) \
-+ { \
-+ .offset = reg_offset, \
-+ .size = 4, \
-+ .init = rom_init, \
-+ .reset = bar_reset, \
-+ .release = bar_release, \
-+ .u.dw.read = bar_read, \
-+ .u.dw.write = rom_write, \
++ /*
++ * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
++ * peers can fail to set NETRXF_csum_blank when sending a GSO
++ * frame. In this case force the SKB to CHECKSUM_PARTIAL and
++ * recalculate the partial checksum.
++ */
++ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
++ netif->rx_gso_checksum_fixup++;
++ skb->ip_summed = CHECKSUM_PARTIAL;
++ recalculate_partial_csum = 1;
+ }
++
++ /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
++ if (skb->ip_summed != CHECKSUM_PARTIAL)
++ return 0;
- static const struct config_field header_0[] = {
- CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
-diff --git a/drivers/xen/pciback/controller.c b/drivers/xen/pciback/controller.c
-index 7f04f11..5a7e4cc 100644
---- a/drivers/xen/pciback/controller.c
-+++ b/drivers/xen/pciback/controller.c
-@@ -378,7 +378,7 @@ int pciback_publish_pci_roots(struct pciback_device *pdev,
- }
-
- err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
-- "%lx", (sizeof(struct acpi_resource) * 2) + 1);
-+ "%lx", (sizeof(struct acpi_resource) *2) + 1);
-
- out:
- spin_unlock(&dev_data->lock);
-diff --git a/drivers/xen/pciback/pci_stub.c b/drivers/xen/pciback/pci_stub.c
-index 88c7ca1..c8f6f29 100644
---- a/drivers/xen/pciback/pci_stub.c
-+++ b/drivers/xen/pciback/pci_stub.c
-@@ -13,7 +13,7 @@
- #include <linux/pci.h>
- #include <linux/wait.h>
- #include <linux/sched.h>
--#include <asm/atomic.h>
-+#include <linux/atomic.h>
- #include <xen/events.h>
- #include <asm/xen/pci.h>
- #include <asm/xen/hypervisor.h>
-@@ -603,7 +603,7 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
- if (test_bit(_XEN_PCIF_active,
- (unsigned long *)&psdev->pdev->sh_info->flags)) {
- dev_dbg(&psdev->dev->dev,
-- "schedule pci_conf service in pciback \n");
-+ "schedule pci_conf service in pciback\n");
- test_and_schedule_op(psdev->pdev);
- }
-
-@@ -1055,7 +1055,8 @@ static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)
- if (!dev_data)
- continue;
- count +=
-- scnprintf(buf + count, PAGE_SIZE - count, "%s:%s:%sing:%ld\n",
-+ scnprintf(buf + count, PAGE_SIZE - count,
-+ "%s:%s:%sing:%ld\n",
- pci_name(psdev->dev),
- dev_data->isr_on ? "on" : "off",
- dev_data->ack_intr ? "ack" : "not ack",
-diff --git a/drivers/xen/pciback/pciback.h b/drivers/xen/pciback/pciback.h
-index fc31052..5c14020 100644
---- a/drivers/xen/pciback/pciback.h
-+++ b/drivers/xen/pciback/pciback.h
-@@ -12,7 +12,7 @@
- #include <linux/list.h>
- #include <linux/spinlock.h>
- #include <linux/workqueue.h>
--#include <asm/atomic.h>
-+#include <linux/atomic.h>
- #include <xen/interface/io/pciif.h>
-
- struct pci_dev_entry {
-@@ -20,8 +20,8 @@ struct pci_dev_entry {
- struct pci_dev *dev;
- };
-
--#define _PDEVF_op_active (0)
--#define PDEVF_op_active (1<<(_PDEVF_op_active))
-+#define _PDEVF_op_active (0)
-+#define PDEVF_op_active (1<<(_PDEVF_op_active))
- #define _PCIB_op_pending (1)
- #define PCIB_op_pending (1<<(_PCIB_op_pending))
-
-@@ -45,11 +45,11 @@ struct pciback_device {
+ if (skb->protocol != htons(ETH_P_IP))
+ goto out;
+@@ -1240,9 +1257,23 @@ static int skb_checksum_setup(struct sk_buff *skb)
+ switch (iph->protocol) {
+ case IPPROTO_TCP:
+ skb->csum_offset = offsetof(struct tcphdr, check);
++
++ if (recalculate_partial_csum) {
++ struct tcphdr *tcph = (struct tcphdr *)th;
++ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
++ skb->len - iph->ihl*4,
++ IPPROTO_TCP, 0);
++ }
+ break;
+ case IPPROTO_UDP:
+ skb->csum_offset = offsetof(struct udphdr, check);
++
++ if (recalculate_partial_csum) {
++ struct udphdr *udph = (struct udphdr *)th;
++ udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
++ skb->len - iph->ihl*4,
++ IPPROTO_UDP, 0);
++ }
+ break;
+ default:
+ if (net_ratelimit())
+@@ -1496,12 +1527,10 @@ static void net_tx_submit(struct xen_netbk *netbk)
+ skb->dev = netif->dev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
- struct pciback_dev_data {
- struct list_head config_fields;
-- unsigned int permissive : 1;
-- unsigned int warned_on_write : 1;
-- unsigned int enable_intx : 1;
-- unsigned int isr_on : 1; /* Whether the IRQ handler is installed. */
-- unsigned int ack_intr : 1; /* .. and ACK-ing */
-+ unsigned int permissive:1;
-+ unsigned int warned_on_write:1;
-+ unsigned int enable_intx:1;
-+ unsigned int isr_on:1; /* Whether the IRQ handler is installed. */
-+ unsigned int ack_intr:1; /* .. and ACK-ing */
- unsigned long handled;
- unsigned int irq; /* Saved in case device transitions to MSI/MSI-X */
- char irq_name[0]; /* pciback[000:04:00.0] */
-diff --git a/drivers/xen/pciback/pciback_ops.c b/drivers/xen/pciback/pciback_ops.c
-index 5543881..9a465e9 100644
---- a/drivers/xen/pciback/pciback_ops.c
-+++ b/drivers/xen/pciback/pciback_ops.c
-@@ -63,12 +63,11 @@ void pciback_control_isr(struct pci_dev *dev, int reset)
- dev_data->irq_name, dev);
- if (rc) {
- dev_err(&dev->dev, "%s: failed to install fake IRQ " \
-- "handler for IRQ %d! (rc:%d)\n", dev_data->irq_name,
-- dev_data->irq, rc);
-+ "handler for IRQ %d! (rc:%d)\n",
-+ dev_data->irq_name, dev_data->irq, rc);
- goto out;
- }
-- }
-- else {
-+ } else {
- free_irq(dev_data->irq, dev);
- dev_data->irq = 0;
- }
-@@ -188,7 +187,7 @@ void pciback_do_op(struct work_struct *data)
- break;
- #endif
- default:
-- op->err = XEN_PCI_ERR_not_implemented;
-+ op->err = XEN_PCI_ERR_not_implemented;
- break;
+- if (skb->ip_summed == CHECKSUM_PARTIAL) {
+- if (skb_checksum_setup(skb)) {
+- DPRINTK("Can't setup checksum in net_tx_action\n");
+- kfree_skb(skb);
+- continue;
+- }
++ if (checksum_setup(netif, skb)) {
++ DPRINTK("Can't setup checksum in net_tx_action\n");
++ kfree_skb(skb);
++ continue;
}
- }
-diff --git a/drivers/xen/pciback/xenbus.c b/drivers/xen/pciback/xenbus.c
-index 993b659..70030c4 100644
---- a/drivers/xen/pciback/xenbus.c
-+++ b/drivers/xen/pciback/xenbus.c
-@@ -700,12 +700,12 @@ static const struct xenbus_device_id xenpci_ids[] = {
- };
-
- static struct xenbus_driver xenbus_pciback_driver = {
-- .name = "pciback",
-- .owner = THIS_MODULE,
-- .ids = xenpci_ids,
-- .probe = pciback_xenbus_probe,
-- .remove = pciback_xenbus_remove,
-- .otherend_changed = pciback_frontend_changed,
-+ .name = "pciback",
-+ .owner = THIS_MODULE,
-+ .ids = xenpci_ids,
-+ .probe = pciback_xenbus_probe,
-+ .remove = pciback_xenbus_remove,
-+ .otherend_changed = pciback_frontend_changed,
- };
- int __init pciback_xenbus_register(void)
-@@ -713,7 +713,7 @@ int __init pciback_xenbus_register(void)
- pciback_wq = create_workqueue("pciback_workqueue");
- if (!pciback_wq) {
- printk(KERN_ERR "%s: create"
-- "pciback_workqueue failed\n",__FUNCTION__);
-+ "pciback_workqueue failed\n", __func__);
- return -EFAULT;
- }
- return xenbus_register_backend(&xenbus_pciback_driver);
+ if (unlikely(netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB) &&
--
1.7.4
-From 83d24d8dbd9e52a7ac94deae2d9fff6681ce8761 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
-Date: Mon, 13 Dec 2010 11:30:29 -0500
-Subject: [PATCH 036/244] xen/xen-pciback: Swap over to DEFINE_PCI_DEVICE_TABLE
+From 929c2cd7541a48c9ab64af1b25b3e53ed396e0c5 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Tue, 18 Jan 2011 11:37:12 +0000
+Subject: [PATCH 078/203] xen: netback: rationalise types used in count_skb_slots
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
---
- drivers/xen/pciback/pci_stub.c | 2 +-
- 1 files changed, 1 insertions(+), 1 deletions(-)
+ drivers/xen/netback/netback.c | 10 +++-------
+ 1 files changed, 3 insertions(+), 7 deletions(-)
-diff --git a/drivers/xen/pciback/pci_stub.c b/drivers/xen/pciback/pci_stub.c
-index c8f6f29..09dd60c 100644
---- a/drivers/xen/pciback/pci_stub.c
-+++ b/drivers/xen/pciback/pci_stub.c
-@@ -497,7 +497,7 @@ static void pcistub_remove(struct pci_dev *dev)
- }
- }
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 8189199..22c1fa5 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -277,14 +277,10 @@ static inline int netbk_queue_full(struct xen_netif *netif)
+
+ /* Figure out how many ring slots we're going to need to send @skb to
+ the guest. */
+-static unsigned count_skb_slots(struct sk_buff *skb, struct xen_netif *netif)
++static unsigned int count_skb_slots(struct sk_buff *skb, struct xen_netif *netif)
+ {
+- unsigned count;
+- unsigned copy_off;
+- unsigned i;
+-
+- copy_off = 0;
+- count = 1;
++ unsigned int count = 1;
++ int i, copy_off = 0;
+
+ BUG_ON(offset_in_page(skb->data) + skb_headlen(skb) > MAX_BUFFER_OFFSET);
--static const struct pci_device_id pcistub_ids[] = {
-+DEFINE_PCI_DEVICE_TABLE(pcistub_ids) = {
- {
- .vendor = PCI_ANY_ID,
- .device = PCI_ANY_ID,
--
1.7.4
-From 03c4949992e2b7e84b7cdeb156d803db3f848b6c Mon Sep 17 00:00:00 2001
+From 0ded854cc2e58205d857df3d96ef6f9a108a4a66 Mon Sep 17 00:00:00 2001
From: Ian Campbell <ian.campbell(a)citrix.com>
-Date: Mon, 9 Feb 2009 12:05:52 -0800
-Subject: [PATCH 037/244] xen: netback: Initial import of linux-2.6.18-xen.hg netback driver.
-
-This corresponds to 774:107e10e0e07c in that tree.
+Date: Tue, 18 Jan 2011 11:21:35 +0000
+Subject: [PATCH 079/203] xen: netback: refactor logic for moving to a new receive buffer.
Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
---
- drivers/xen/Kconfig | 7 +
- drivers/xen/Makefile | 1 +
- drivers/xen/netback/Makefile | 3 +
- drivers/xen/netback/common.h | 217 ++++++
- drivers/xen/netback/interface.c | 336 ++++++++
- drivers/xen/netback/netback.c | 1637 +++++++++++++++++++++++++++++++++++++++
- drivers/xen/netback/xenbus.c | 454 +++++++++++
- 7 files changed, 2655 insertions(+), 0 deletions(-)
- create mode 100644 drivers/xen/netback/Makefile
- create mode 100644 drivers/xen/netback/common.h
- create mode 100644 drivers/xen/netback/interface.c
- create mode 100644 drivers/xen/netback/netback.c
- create mode 100644 drivers/xen/netback/xenbus.c
+ drivers/xen/netback/netback.c | 80 ++++++++++++++++++++++++-----------------
+ 1 files changed, 47 insertions(+), 33 deletions(-)
-diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
-index 5a48ce9..7e83d43 100644
---- a/drivers/xen/Kconfig
-+++ b/drivers/xen/Kconfig
-@@ -37,6 +37,13 @@ config XEN_BACKEND
- depends on XEN_PCIDEV_BACKEND
-
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 22c1fa5..909e0ef 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -275,8 +275,51 @@ static inline int netbk_queue_full(struct xen_netif *netif)
+ ((netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek) < needed);
+ }
-+config XEN_NETDEV_BACKEND
-+ bool "Xen backend network device"
-+ depends on XEN_BACKEND && NET
-+ help
-+ Implement the network backend driver, which passes packets
-+ from the guest domain's frontend drivers to the network.
-+
- config XENFS
- tristate "Xen filesystem"
- default y
-diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
-index 533a199..c0e0509 100644
---- a/drivers/xen/Makefile
-+++ b/drivers/xen/Makefile
-@@ -9,6 +9,7 @@ obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
- obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
- obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
- obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback/
-+obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/
- obj-$(CONFIG_XENFS) += xenfs/
- obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
- obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o
-diff --git a/drivers/xen/netback/Makefile b/drivers/xen/netback/Makefile
-new file mode 100644
-index 0000000..f4a0c51
---- /dev/null
-+++ b/drivers/xen/netback/Makefile
-@@ -0,0 +1,3 @@
-+obj-$(CONFIG_XEN_NETDEV_BACKEND) := netbk.o
-+
-+netbk-y := netback.o xenbus.o interface.o
-diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
-new file mode 100644
-index 0000000..9a54d57
---- /dev/null
-+++ b/drivers/xen/netback/common.h
-@@ -0,0 +1,217 @@
-+/******************************************************************************
-+ * arch/xen/drivers/netif/backend/common.h
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __NETIF__BACKEND__COMMON_H__
-+#define __NETIF__BACKEND__COMMON_H__
-+
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/slab.h>
-+#include <linux/ip.h>
-+#include <linux/in.h>
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/wait.h>
-+#include <xen/evtchn.h>
-+#include <xen/interface/io/netif.h>
-+#include <asm/io.h>
-+#include <asm/pgalloc.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/gnttab.h>
-+#include <xen/driver_util.h>
-+#include <xen/xenbus.h>
-+
-+#define DPRINTK(_f, _a...) \
-+ pr_debug("(file=%s, line=%d) " _f, \
-+ __FILE__ , __LINE__ , ## _a )
-+#define IPRINTK(fmt, args...) \
-+ printk(KERN_INFO "xen_net: " fmt, ##args)
-+#define WPRINTK(fmt, args...) \
-+ printk(KERN_WARNING "xen_net: " fmt, ##args)
-+
-+typedef struct netif_st {
-+ /* Unique identifier for this interface. */
-+ domid_t domid;
-+ unsigned int handle;
-+
-+ u8 fe_dev_addr[6];
-+
-+ /* Physical parameters of the comms window. */
-+ grant_handle_t tx_shmem_handle;
-+ grant_ref_t tx_shmem_ref;
-+ grant_handle_t rx_shmem_handle;
-+ grant_ref_t rx_shmem_ref;
-+ unsigned int irq;
-+
-+ /* The shared rings and indexes. */
-+ netif_tx_back_ring_t tx;
-+ netif_rx_back_ring_t rx;
-+ struct vm_struct *tx_comms_area;
-+ struct vm_struct *rx_comms_area;
-+
-+ /* Set of features that can be turned on in dev->features. */
-+ int features;
-+
-+ /* Internal feature information. */
-+ u8 can_queue:1; /* can queue packets for receiver? */
-+ u8 copying_receiver:1; /* copy packets to receiver? */
-+
-+ /* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
-+ RING_IDX rx_req_cons_peek;
-+
-+ /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
-+ unsigned long credit_bytes;
-+ unsigned long credit_usec;
-+ unsigned long remaining_credit;
-+ struct timer_list credit_timeout;
-+
-+ /* Enforce draining of the transmit queue. */
-+ struct timer_list tx_queue_timeout;
-+
-+ /* Miscellaneous private stuff. */
-+ struct list_head list; /* scheduling list */
-+ atomic_t refcnt;
-+ struct net_device *dev;
-+ struct net_device_stats stats;
-+
-+ unsigned int carrier;
-+
-+ wait_queue_head_t waiting_to_free;
-+} netif_t;
-+
-+/*
-+ * Implement our own carrier flag: the network stack's version causes delays
-+ * when the carrier is re-enabled (in particular, dev_activate() may not
-+ * immediately be called, which can cause packet loss; also the etherbridge
-+ * can be rather lazy in activating its port).
-+ */
-+#define netback_carrier_on(netif) ((netif)->carrier = 1)
-+#define netback_carrier_off(netif) ((netif)->carrier = 0)
-+#define netback_carrier_ok(netif) ((netif)->carrier)
-+
-+enum {
-+ NETBK_DONT_COPY_SKB,
-+ NETBK_DELAYED_COPY_SKB,
-+ NETBK_ALWAYS_COPY_SKB,
-+};
-+
-+extern int netbk_copy_skb_mode;
-+
-+/* Function pointers into netback accelerator plugin modules */
-+struct netback_accel_hooks {
-+ struct module *owner;
-+ int (*probe)(struct xenbus_device *dev);
-+ int (*remove)(struct xenbus_device *dev);
-+};
-+
-+/* Structure to track the state of a netback accelerator plugin */
-+struct netback_accelerator {
-+ struct list_head link;
-+ int id;
-+ char *eth_name;
-+ atomic_t use_count;
-+ struct netback_accel_hooks *hooks;
-+};
-+
-+struct backend_info {
-+ struct xenbus_device *dev;
-+ netif_t *netif;
-+ enum xenbus_state frontend_state;
-+
-+ /* State relating to the netback accelerator */
-+ void *netback_accel_priv;
-+ /* The accelerator that this backend is currently using */
-+ struct netback_accelerator *accelerator;
-+};
-+
-+#define NETBACK_ACCEL_VERSION 0x00010001
-+
+-/* Figure out how many ring slots we're going to need to send @skb to
+- the guest. */
+/*
-+ * Connect an accelerator plugin module to netback. Returns zero on
-+ * success, < 0 on error, > 0 (with highest version number supported)
-+ * if version mismatch.
++ * Returns true if we should start a new receive buffer instead of
++ * adding 'size' bytes to a buffer which currently contains 'offset'
++ * bytes.
+ */
-+extern int netback_connect_accelerator(unsigned version,
-+ int id, const char *eth_name,
-+ struct netback_accel_hooks *hooks);
-+/* Disconnect a previously connected accelerator plugin module */
-+extern void netback_disconnect_accelerator(int id, const char *eth_name);
-+
-+
-+extern
-+void netback_probe_accelerators(struct backend_info *be,
-+ struct xenbus_device *dev);
-+extern
-+void netback_remove_accelerators(struct backend_info *be,
-+ struct xenbus_device *dev);
-+extern
-+void netif_accel_init(void);
-+
-+
-+#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
-+#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
-+
-+void netif_disconnect(netif_t *netif);
-+
-+netif_t *netif_alloc(domid_t domid, unsigned int handle);
-+int netif_map(netif_t *netif, unsigned long tx_ring_ref,
-+ unsigned long rx_ring_ref, unsigned int evtchn);
-+
-+#define netif_get(_b) (atomic_inc(&(_b)->refcnt))
-+#define netif_put(_b) \
-+ do { \
-+ if ( atomic_dec_and_test(&(_b)->refcnt) ) \
-+ wake_up(&(_b)->waiting_to_free); \
-+ } while (0)
-+
-+void netif_xenbus_init(void);
-+
-+#define netif_schedulable(netif) \
-+ (netif_running((netif)->dev) && netback_carrier_ok(netif))
-+
-+void netif_schedule_work(netif_t *netif);
-+void netif_deschedule_work(netif_t *netif);
-+
-+int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
-+struct net_device_stats *netif_be_get_stats(struct net_device *dev);
-+irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
-+
-+static inline int netbk_can_queue(struct net_device *dev)
-+{
-+ netif_t *netif = netdev_priv(dev);
-+ return netif->can_queue;
-+}
-+
-+static inline int netbk_can_sg(struct net_device *dev)
++static bool start_new_rx_buffer(int offset, unsigned long size, int head)
+{
-+ netif_t *netif = netdev_priv(dev);
-+ return netif->features & NETIF_F_SG;
-+}
++ /* simple case: we have completely filled the current buffer. */
++ if (offset == MAX_BUFFER_OFFSET)
++ return true;
+
-+#endif /* __NETIF__BACKEND__COMMON_H__ */
-diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
-new file mode 100644
-index 0000000..7e67941
---- /dev/null
-+++ b/drivers/xen/netback/interface.c
-@@ -0,0 +1,336 @@
-+/******************************************************************************
-+ * arch/xen/drivers/netif/backend/interface.c
-+ *
-+ * Network-device interface management.
-+ *
-+ * Copyright (c) 2004-2005, Keir Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
++ /*
++ * complex case: start a fresh buffer if the current frag
++ * would overflow the current buffer but only if:
++ * (i) this frag would fit completely in the next buffer
++ * and (ii) there is already some data in the current buffer
++ * and (iii) this is not the head buffer.
++ *
++ * Where:
++ * - (i) stops us splitting a frag into two copies
++ * unless the frag is too large for a single buffer.
++ * - (ii) stops us from leaving a buffer pointlessly empty.
++ * - (iii) stops us leaving the first buffer
++ * empty. Strictly speaking this is already covered
++ * by (ii) but is explicitly checked because
++ * netfront relies on the first buffer being
++ * non-empty and can crash otherwise.
++ *
++ * This means we will effectively linearise small
++ * frags but do not needlessly split large buffers
++ * into multiple copies tend to give large frags their
++ * own buffers as before.
++ */
++ if ((offset + size > MAX_BUFFER_OFFSET) &&
++ (size <= MAX_BUFFER_OFFSET) && offset && !head)
++ return true;
+
-+#include "common.h"
-+#include <linux/ethtool.h>
-+#include <linux/rtnetlink.h>
++ return false;
++}
+
+/*
-+ * Module parameter 'queue_length':
-+ *
-+ * Enables queuing in the network stack when a client has run out of receive
-+ * descriptors. Although this feature can improve receive bandwidth by avoiding
-+ * packet loss, it can also result in packets sitting in the 'tx_queue' for
-+ * unbounded time. This is bad if those packets hold onto foreign resources.
-+ * For example, consider a packet that holds onto resources belonging to the
-+ * guest for which it is queued (e.g., packet received on vif1.0, destined for
-+ * vif1.1 which is not activated in the guest): in this situation the guest
-+ * will never be destroyed, unless vif1.1 is taken down. To avoid this, we
-+ * run a timer (tx_queue_timeout) to drain the queue when the interface is
-+ * blocked.
++ * Figure out how many ring slots we're going to need to send @skb to
++ * the guest. This function is essentially a dry run of
++ * netbk_gop_frag_copy.
+ */
-+static unsigned long netbk_queue_length = 32;
-+module_param_named(queue_length, netbk_queue_length, ulong, 0);
-+
-+static void __netif_up(netif_t *netif)
-+{
-+ enable_irq(netif->irq);
-+ netif_schedule_work(netif);
-+}
-+
-+static void __netif_down(netif_t *netif)
-+{
-+ disable_irq(netif->irq);
-+ netif_deschedule_work(netif);
-+}
-+
-+static int net_open(struct net_device *dev)
-+{
-+ netif_t *netif = netdev_priv(dev);
-+ if (netback_carrier_ok(netif)) {
-+ __netif_up(netif);
-+ netif_start_queue(dev);
-+ }
-+ return 0;
-+}
-+
-+static int net_close(struct net_device *dev)
-+{
-+ netif_t *netif = netdev_priv(dev);
-+ if (netback_carrier_ok(netif))
-+ __netif_down(netif);
-+ netif_stop_queue(dev);
-+ return 0;
-+}
-+
-+static int netbk_change_mtu(struct net_device *dev, int mtu)
+ static unsigned int count_skb_slots(struct sk_buff *skb, struct xen_netif *netif)
+ {
+ unsigned int count = 1;
+@@ -295,9 +338,7 @@ static unsigned int count_skb_slots(struct sk_buff *skb, struct xen_netif *netif
+ while (size > 0) {
+ BUG_ON(copy_off > MAX_BUFFER_OFFSET);
+
+- /* These checks are the same as in netbk_gop_frag_copy */
+- if (copy_off == MAX_BUFFER_OFFSET
+- || ((copy_off + size > MAX_BUFFER_OFFSET) && (size <= MAX_BUFFER_OFFSET) && copy_off)) {
++ if (start_new_rx_buffer(copy_off, size, 0)) {
+ count++;
+ copy_off = 0;
+ }
+@@ -403,34 +444,7 @@ static void netbk_gop_frag_copy(struct xen_netif *netif,
+ while (size > 0) {
+ BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
+
+- /*
+- * Move to a new receive buffer if:
+- *
+- * simple case: we have completely filled the current buffer.
+- *
+- * complex case: the current frag would overflow
+- * the current buffer but only if:
+- * (i) this frag would fit completely in the next buffer
+- * and (ii) there is already some data in the current buffer
+- * and (iii) this is not the head buffer.
+- *
+- * Where:
+- * - (i) stops us splitting a frag into two copies
+- * unless the frag is too large for a single buffer.
+- * - (ii) stops us from leaving a buffer pointlessly empty.
+- * - (iii) stops us leaving the first buffer
+- * empty. Strictly speaking this is already covered
+- * by (ii) but is explicitly checked because
+- * netfront relies on the first buffer being
+- * non-empty and can crash otherwise.
+- *
+- * This means we will effectively linearise small
+- * frags but do not needlessly split large buffers
+- * into multiple copies tend to give large frags their
+- * own buffers as before.
+- */
+- if (npo->copy_off == MAX_BUFFER_OFFSET
+- || ((npo->copy_off + size > MAX_BUFFER_OFFSET) && (size <= MAX_BUFFER_OFFSET) && npo->copy_off && !head)) {
++ if (start_new_rx_buffer(npo->copy_off, size, head)) {
+ struct xen_netif_rx_request *req;
+
+ BUG_ON(head); /* Netfront requires there to be some data in the head buffer. */
+--
+1.7.4
+
+
+From 9e79b27c9369d25ca86abf66611a700783ce1ef2 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Wed, 2 Feb 2011 11:12:39 +0000
+Subject: [PATCH 080/203] xen: netback: refactor code to get next rx buffer into own function.
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 37 ++++++++++++++++++++++++++-----------
+ 1 files changed, 26 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index 909e0ef..a8ee1c2 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -418,6 +418,25 @@ struct netrx_pending_operations {
+ grant_ref_t copy_gref;
+ };
+
++static struct netbk_rx_meta *get_next_rx_buffer(struct xen_netif *netif,
++ struct netrx_pending_operations *npo)
+{
-+ int max = netbk_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
++ struct netbk_rx_meta *meta;
++ struct xen_netif_rx_request *req;
+
-+ if (mtu > max)
-+ return -EINVAL;
-+ dev->mtu = mtu;
-+ return 0;
-+}
++ req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons++);
+
-+static int netbk_set_sg(struct net_device *dev, u32 data)
-+{
-+ if (data) {
-+ netif_t *netif = netdev_priv(dev);
++ meta = npo->meta + npo->meta_prod++;
++ meta->gso_size = 0;
++ meta->size = 0;
++ meta->id = req->id;
+
-+ if (!(netif->features & NETIF_F_SG))
-+ return -ENOSYS;
-+ }
++ npo->copy_off = 0;
++ npo->copy_gref = req->gref;
+
-+ return ethtool_op_set_sg(dev, data);
++ return meta;
+}
+
-+static int netbk_set_tso(struct net_device *dev, u32 data)
-+{
-+ if (data) {
-+ netif_t *netif = netdev_priv(dev);
+ /* Set up the grant operations for this fragment. If it's a flipping
+ interface, we also set up the unmap request from here. */
+
+@@ -445,17 +464,13 @@ static void netbk_gop_frag_copy(struct xen_netif *netif,
+ BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
+
+ if (start_new_rx_buffer(npo->copy_off, size, head)) {
+- struct xen_netif_rx_request *req;
+-
+- BUG_ON(head); /* Netfront requires there to be some data in the head buffer. */
+- /* Overflowed this request, go to the next one */
+- req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons++);
+- meta = npo->meta + npo->meta_prod++;
+- meta->gso_size = 0;
+- meta->size = 0;
+- meta->id = req->id;
+- npo->copy_off = 0;
+- npo->copy_gref = req->gref;
++ /*
++ * Netfront requires there to be some data in the head
++ * buffer.
++ */
++ BUG_ON(head);
+
-+ if (!(netif->features & NETIF_F_TSO))
-+ return -ENOSYS;
-+ }
-+
-+ return ethtool_op_set_tso(dev, data);
-+}
-+
-+static struct ethtool_ops network_ethtool_ops =
-+{
-+ .get_tx_csum = ethtool_op_get_tx_csum,
-+ .set_tx_csum = ethtool_op_set_tx_csum,
-+ .get_sg = ethtool_op_get_sg,
-+ .set_sg = netbk_set_sg,
-+ .get_tso = ethtool_op_get_tso,
-+ .set_tso = netbk_set_tso,
-+ .get_link = ethtool_op_get_link,
-+};
-+
-+netif_t *netif_alloc(domid_t domid, unsigned int handle)
-+{
-+ int err = 0;
-+ struct net_device *dev;
-+ netif_t *netif;
-+ char name[IFNAMSIZ] = {};
-+
-+ snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
-+ dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
-+ if (dev == NULL) {
-+ DPRINTK("Could not create netif: out of memory\n");
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ netif = netdev_priv(dev);
-+ memset(netif, 0, sizeof(*netif));
-+ netif->domid = domid;
-+ netif->handle = handle;
-+ atomic_set(&netif->refcnt, 1);
-+ init_waitqueue_head(&netif->waiting_to_free);
-+ netif->dev = dev;
-+
-+ netback_carrier_off(netif);
-+
-+ netif->credit_bytes = netif->remaining_credit = ~0UL;
-+ netif->credit_usec = 0UL;
-+ init_timer(&netif->credit_timeout);
-+ /* Initialize 'expires' now: it's used to track the credit window. */
-+ netif->credit_timeout.expires = jiffies;
-+
-+ init_timer(&netif->tx_queue_timeout);
-+
-+ dev->hard_start_xmit = netif_be_start_xmit;
-+ dev->get_stats = netif_be_get_stats;
-+ dev->open = net_open;
-+ dev->stop = net_close;
-+ dev->change_mtu = netbk_change_mtu;
-+ dev->features = NETIF_F_IP_CSUM;
-+
-+ SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
-+
-+ dev->tx_queue_len = netbk_queue_length;
-+
-+ /*
-+ * Initialise a dummy MAC address. We choose the numerically
-+ * largest non-broadcast address to prevent the address getting
-+ * stolen by an Ethernet bridge for STP purposes.
-+ * (FE:FF:FF:FF:FF:FF)
-+ */
-+ memset(dev->dev_addr, 0xFF, ETH_ALEN);
-+ dev->dev_addr[0] &= ~0x01;
-+
-+ rtnl_lock();
-+ err = register_netdevice(dev);
-+ rtnl_unlock();
-+ if (err) {
-+ DPRINTK("Could not register new net device %s: err=%d\n",
-+ dev->name, err);
-+ free_netdev(dev);
-+ return ERR_PTR(err);
-+ }
-+
-+ DPRINTK("Successfully created netif\n");
-+ return netif;
-+}
-+
-+static int map_frontend_pages(
-+ netif_t *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
-+{
-+ struct gnttab_map_grant_ref op;
-+
-+ gnttab_set_map_op(&op, (unsigned long)netif->tx_comms_area->addr,
-+ GNTMAP_host_map, tx_ring_ref, netif->domid);
-+
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-+ BUG();
-+
-+ if (op.status) {
-+ DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
-+ return op.status;
-+ }
-+
-+ netif->tx_shmem_ref = tx_ring_ref;
-+ netif->tx_shmem_handle = op.handle;
-+
-+ gnttab_set_map_op(&op, (unsigned long)netif->rx_comms_area->addr,
-+ GNTMAP_host_map, rx_ring_ref, netif->domid);
-+
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-+ BUG();
-+
-+ if (op.status) {
-+ DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
-+ return op.status;
-+ }
-+
-+ netif->rx_shmem_ref = rx_ring_ref;
-+ netif->rx_shmem_handle = op.handle;
-+
-+ return 0;
-+}
-+
-+static void unmap_frontend_pages(netif_t *netif)
-+{
-+ struct gnttab_unmap_grant_ref op;
-+
-+ gnttab_set_unmap_op(&op, (unsigned long)netif->tx_comms_area->addr,
-+ GNTMAP_host_map, netif->tx_shmem_handle);
-+
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
-+ BUG();
-+
-+ gnttab_set_unmap_op(&op, (unsigned long)netif->rx_comms_area->addr,
-+ GNTMAP_host_map, netif->rx_shmem_handle);
-+
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
-+ BUG();
-+}
-+
-+int netif_map(netif_t *netif, unsigned long tx_ring_ref,
-+ unsigned long rx_ring_ref, unsigned int evtchn)
-+{
-+ int err = -ENOMEM;
-+ netif_tx_sring_t *txs;
-+ netif_rx_sring_t *rxs;
-+
-+ /* Already connected through? */
-+ if (netif->irq)
-+ return 0;
-+
-+ netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
-+ if (netif->tx_comms_area == NULL)
-+ return -ENOMEM;
-+ netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
-+ if (netif->rx_comms_area == NULL)
-+ goto err_rx;
-+
-+ err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
-+ if (err)
-+ goto err_map;
-+
-+ err = bind_interdomain_evtchn_to_irqhandler(
-+ netif->domid, evtchn, netif_be_int, 0,
-+ netif->dev->name, netif);
-+ if (err < 0)
-+ goto err_hypervisor;
-+ netif->irq = err;
-+ disable_irq(netif->irq);
-+
-+ txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
-+ BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
-+
-+ rxs = (netif_rx_sring_t *)
-+ ((char *)netif->rx_comms_area->addr);
-+ BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
-+
-+ netif->rx_req_cons_peek = 0;
-+
-+ netif_get(netif);
-+
-+ rtnl_lock();
-+ netback_carrier_on(netif);
-+ if (netif_running(netif->dev))
-+ __netif_up(netif);
-+ rtnl_unlock();
-+
-+ return 0;
-+err_hypervisor:
-+ unmap_frontend_pages(netif);
-+err_map:
-+ free_vm_area(netif->rx_comms_area);
-+err_rx:
-+ free_vm_area(netif->tx_comms_area);
-+ return err;
-+}
-+
-+void netif_disconnect(netif_t *netif)
-+{
-+ if (netback_carrier_ok(netif)) {
-+ rtnl_lock();
-+ netback_carrier_off(netif);
-+ netif_carrier_off(netif->dev); /* discard queued packets */
-+ if (netif_running(netif->dev))
-+ __netif_down(netif);
-+ rtnl_unlock();
-+ netif_put(netif);
-+ }
-+
-+ atomic_dec(&netif->refcnt);
-+ wait_event(netif->waiting_to_free, atomic_read(&netif->refcnt) == 0);
-+
-+ del_timer_sync(&netif->credit_timeout);
-+ del_timer_sync(&netif->tx_queue_timeout);
-+
-+ if (netif->irq)
-+ unbind_from_irqhandler(netif->irq, netif);
-+
-+ unregister_netdev(netif->dev);
-+
-+ if (netif->tx.sring) {
-+ unmap_frontend_pages(netif);
-+ free_vm_area(netif->tx_comms_area);
-+ free_vm_area(netif->rx_comms_area);
-+ }
-+
-+ free_netdev(netif->dev);
-+}
++ meta = get_next_rx_buffer(netif, npo);
+ }
+
+ bytes = size;
+--
+1.7.4
+
+
+From e02c96928e165eb351ded4cc78cce4c615778be3 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Wed, 2 Feb 2011 11:14:17 +0000
+Subject: [PATCH 081/203] xen: netback: simplify use of netbk_add_frag_responses
+
+Move all the logic into the function instead of having some in the caller.
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+---
+ drivers/xen/netback/netback.c | 14 +++++++++-----
+ 1 files changed, 9 insertions(+), 5 deletions(-)
+
diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
-new file mode 100644
-index 0000000..db629d4
---- /dev/null
+index a8ee1c2..cfe7931 100644
+--- a/drivers/xen/netback/netback.c
+++ b/drivers/xen/netback/netback.c
-@@ -0,0 +1,1637 @@
-+/******************************************************************************
-+ * drivers/xen/netback/netback.c
-+ *
-+ * Back-end of the driver for virtual network devices. This portion of the
-+ * driver exports a 'unified' network-device interface that can be accessed
-+ * by any operating system that implements a compatible front end. A
-+ * reference front-end implementation can be found in:
-+ * drivers/xen/netfront/netfront.c
-+ *
-+ * Copyright (c) 2002-2005, K A Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include "common.h"
-+#include <xen/balloon.h>
-+#include <xen/interface/memory.h>
-+
-+/*define NETBE_DEBUG_INTERRUPT*/
-+
-+struct netbk_rx_meta {
-+ skb_frag_t frag;
-+ int id;
-+ u8 copy:1;
-+};
-+
-+struct netbk_tx_pending_inuse {
-+ struct list_head list;
-+ unsigned long alloc_time;
-+};
-+
-+static void netif_idx_release(u16 pending_idx);
-+static void make_tx_response(netif_t *netif,
-+ netif_tx_request_t *txp,
-+ s8 st);
-+static netif_rx_response_t *make_rx_response(netif_t *netif,
-+ u16 id,
-+ s8 st,
-+ u16 offset,
-+ u16 size,
-+ u16 flags);
-+
-+static void net_tx_action(unsigned long unused);
-+static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
-+
-+static void net_rx_action(unsigned long unused);
-+static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
-+
-+static struct timer_list net_timer;
-+static struct timer_list netbk_tx_pending_timer;
-+
-+#define MAX_PENDING_REQS 256
-+
-+static struct sk_buff_head rx_queue;
-+
-+static struct page **mmap_pages;
-+static inline unsigned long idx_to_pfn(unsigned int idx)
-+{
-+ return page_to_pfn(mmap_pages[idx]);
-+}
-+
-+static inline unsigned long idx_to_kaddr(unsigned int idx)
-+{
-+ return (unsigned long)pfn_to_kaddr(idx_to_pfn(idx));
-+}
-+
-+/* extra field used in struct page */
-+static inline void netif_set_page_index(struct page *pg, unsigned int index)
-+{
-+ *(unsigned long *)&pg->mapping = index;
-+}
-+
-+static inline int netif_page_index(struct page *pg)
-+{
-+ unsigned long idx = (unsigned long)pg->mapping;
-+
-+ if (!PageForeign(pg))
-+ return -1;
-+
-+ if ((idx >= MAX_PENDING_REQS) || (mmap_pages[idx] != pg))
-+ return -1;
-+
-+ return idx;
-+}
-+
-+#define PKT_PROT_LEN 64
-+
-+static struct pending_tx_info {
-+ netif_tx_request_t req;
-+ netif_t *netif;
-+} pending_tx_info[MAX_PENDING_REQS];
-+static u16 pending_ring[MAX_PENDING_REQS];
-+typedef unsigned int PEND_RING_IDX;
-+#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
-+static PEND_RING_IDX pending_prod, pending_cons;
-+#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
-+
-+/* Freed TX SKBs get batched on this ring before return to pending_ring. */
-+static u16 dealloc_ring[MAX_PENDING_REQS];
-+static PEND_RING_IDX dealloc_prod, dealloc_cons;
-+
-+/* Doubly-linked list of in-use pending entries. */
-+static struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS];
-+static LIST_HEAD(pending_inuse_head);
-+
-+static struct sk_buff_head tx_queue;
-+
-+static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
-+static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
-+static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
-+
-+static struct list_head net_schedule_list;
-+static spinlock_t net_schedule_list_lock;
-+
-+#define MAX_MFN_ALLOC 64
-+static unsigned long mfn_list[MAX_MFN_ALLOC];
-+static unsigned int alloc_index = 0;
-+
-+/* Setting this allows the safe use of this driver without netloop. */
-+static int MODPARM_copy_skb = 1;
-+module_param_named(copy_skb, MODPARM_copy_skb, bool, 0);
-+MODULE_PARM_DESC(copy_skb, "Copy data received from netfront without netloop");
-+
-+int netbk_copy_skb_mode;
-+
-+static inline unsigned long alloc_mfn(void)
-+{
-+ BUG_ON(alloc_index == 0);
-+ return mfn_list[--alloc_index];
-+}
-+
-+static int check_mfn(int nr)
-+{
-+ struct xen_memory_reservation reservation = {
-+ .extent_order = 0,
-+ .domid = DOMID_SELF
-+ };
-+ int rc;
-+
-+ if (likely(alloc_index >= nr))
-+ return 0;
-+
-+ set_xen_guest_handle(reservation.extent_start, mfn_list + alloc_index);
-+ reservation.nr_extents = MAX_MFN_ALLOC - alloc_index;
-+ rc = HYPERVISOR_memory_op(XENMEM_increase_reservation, &reservation);
-+ if (likely(rc > 0))
-+ alloc_index += rc;
-+
-+ return alloc_index >= nr ? 0 : -ENOMEM;
-+}
-+
-+static inline void maybe_schedule_tx_action(void)
-+{
-+ smp_mb();
-+ if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
-+ !list_empty(&net_schedule_list))
-+ tasklet_schedule(&net_tx_tasklet);
-+}
-+
-+static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
-+{
-+ struct skb_shared_info *ninfo;
-+ struct sk_buff *nskb;
-+ unsigned long offset;
-+ int ret;
-+ int len;
-+ int headlen;
-+
-+ BUG_ON(skb_shinfo(skb)->frag_list != NULL);
-+
-+ nskb = alloc_skb(SKB_MAX_HEAD(0), GFP_ATOMIC | __GFP_NOWARN);
-+ if (unlikely(!nskb))
-+ goto err;
-+
-+ skb_reserve(nskb, 16 + NET_IP_ALIGN);
-+ headlen = nskb->end - nskb->data;
-+ if (headlen > skb_headlen(skb))
-+ headlen = skb_headlen(skb);
-+ ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen);
-+ BUG_ON(ret);
-+
-+ ninfo = skb_shinfo(nskb);
-+ ninfo->gso_size = skb_shinfo(skb)->gso_size;
-+ ninfo->gso_type = skb_shinfo(skb)->gso_type;
-+
-+ offset = headlen;
-+ len = skb->len - headlen;
-+
-+ nskb->len = skb->len;
-+ nskb->data_len = len;
-+ nskb->truesize += len;
-+
-+ while (len) {
-+ struct page *page;
-+ int copy;
-+ int zero;
-+
-+ if (unlikely(ninfo->nr_frags >= MAX_SKB_FRAGS)) {
-+ dump_stack();
-+ goto err_free;
-+ }
-+
-+ copy = len >= PAGE_SIZE ? PAGE_SIZE : len;
-+ zero = len >= PAGE_SIZE ? 0 : __GFP_ZERO;
-+
-+ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | zero);
-+ if (unlikely(!page))
-+ goto err_free;
-+
-+ ret = skb_copy_bits(skb, offset, page_address(page), copy);
-+ BUG_ON(ret);
-+
-+ ninfo->frags[ninfo->nr_frags].page = page;
-+ ninfo->frags[ninfo->nr_frags].page_offset = 0;
-+ ninfo->frags[ninfo->nr_frags].size = copy;
-+ ninfo->nr_frags++;
-+
-+ offset += copy;
-+ len -= copy;
-+ }
-+
-+ offset = nskb->data - skb->data;
-+
-+ nskb->h.raw = skb->h.raw + offset;
-+ nskb->nh.raw = skb->nh.raw + offset;
-+ nskb->mac.raw = skb->mac.raw + offset;
-+
-+ return nskb;
+@@ -599,6 +599,12 @@ static void netbk_add_frag_responses(struct xen_netif *netif, int status,
+ int i;
+ unsigned long offset;
+
++ /* No fragments used */
++ if (nr_meta_slots <= 1)
++ return;
+
-+ err_free:
-+ kfree_skb(nskb);
-+ err:
-+ return NULL;
-+}
++ nr_meta_slots--;
+
-+static inline int netbk_max_required_rx_slots(netif_t *netif)
-+{
-+ if (netif->features & (NETIF_F_SG|NETIF_F_TSO))
-+ return MAX_SKB_FRAGS + 2; /* header + extra_info + frags */
-+ return 1; /* all in one */
-+}
-+
-+static inline int netbk_queue_full(netif_t *netif)
-+{
-+ RING_IDX peek = netif->rx_req_cons_peek;
-+ RING_IDX needed = netbk_max_required_rx_slots(netif);
-+
-+ return ((netif->rx.sring->req_prod - peek) < needed) ||
-+ ((netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek) < needed);
-+}
-+
-+static void tx_queue_callback(unsigned long data)
-+{
-+ netif_t *netif = (netif_t *)data;
-+ if (netif_schedulable(netif))
-+ netif_wake_queue(netif->dev);
-+}
-+
-+int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
-+{
-+ netif_t *netif = netdev_priv(dev);
-+
-+ BUG_ON(skb->dev != dev);
-+
-+ /* Drop the packet if the target domain has no receive buffers. */
-+ if (unlikely(!netif_schedulable(netif) || netbk_queue_full(netif)))
-+ goto drop;
-+
-+ /*
-+ * Copy the packet here if it's destined for a flipping interface
-+ * but isn't flippable (e.g. extra references to data).
-+ * XXX For now we also copy skbuffs whose head crosses a page
-+ * boundary, because netbk_gop_skb can't handle them.
-+ */
-+ if (!netif->copying_receiver ||
-+ ((skb_headlen(skb) + offset_in_page(skb->data)) >= PAGE_SIZE)) {
-+ struct sk_buff *nskb = netbk_copy_skb(skb);
-+ if ( unlikely(nskb == NULL) )
-+ goto drop;
-+ /* Copy only the header fields we use in this driver. */
-+ nskb->dev = skb->dev;
-+ nskb->ip_summed = skb->ip_summed;
-+ nskb->proto_data_valid = skb->proto_data_valid;
-+ dev_kfree_skb(skb);
-+ skb = nskb;
-+ }
-+
-+ netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 +
-+ !!skb_shinfo(skb)->gso_size;
-+ netif_get(netif);
-+
-+ if (netbk_can_queue(dev) && netbk_queue_full(netif)) {
-+ netif->rx.sring->req_event = netif->rx_req_cons_peek +
-+ netbk_max_required_rx_slots(netif);
-+ mb(); /* request notification /then/ check & stop the queue */
-+ if (netbk_queue_full(netif)) {
-+ netif_stop_queue(dev);
-+ /*
-+ * Schedule 500ms timeout to restart the queue, thus
-+ * ensuring that an inactive queue will be drained.
-+ * Packets will be immediately be dropped until more
-+ * receive buffers become available (see
-+ * netbk_queue_full() check above).
-+ */
-+ netif->tx_queue_timeout.data = (unsigned long)netif;
-+ netif->tx_queue_timeout.function = tx_queue_callback;
-+ __mod_timer(&netif->tx_queue_timeout, jiffies + HZ/2);
-+ }
-+ }
-+
-+ skb_queue_tail(&rx_queue, skb);
-+ tasklet_schedule(&net_rx_tasklet);
-+
-+ return 0;
-+
-+ drop:
-+ netif->stats.tx_dropped++;
-+ dev_kfree_skb(skb);
-+ return 0;
-+}
-+
-+#if 0
-+static void xen_network_done_notify(void)
-+{
-+ static struct net_device *eth0_dev = NULL;
-+ if (unlikely(eth0_dev == NULL))
-+ eth0_dev = __dev_get_by_name("eth0");
-+ netif_rx_schedule(eth0_dev);
-+}
+ for (i = 0; i < nr_meta_slots; i++) {
+ int flags;
+ if (i == nr_meta_slots - 1)
+@@ -727,11 +733,9 @@ static void net_rx_action(unsigned long data)
+ gso->flags = 0;
+ }
+
+- if (sco->meta_slots_used > 1) {
+- netbk_add_frag_responses(netif, status,
+- netbk->meta + npo.meta_cons + 1,
+- sco->meta_slots_used - 1);
+- }
++ netbk_add_frag_responses(netif, status,
++ netbk->meta + npo.meta_cons + 1,
++ sco->meta_slots_used);
+
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret);
+ irq = netif->irq;
+--
+1.7.4
+
+
+From 9304651128c7fdc2ffdf36e9fd589a13d5aca35d Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Fri, 24 Dec 2010 13:37:04 +0000
+Subject: [PATCH 082/203] xen: netback: cleanup coding style
+
+Fix checkpatch.pl errors plus manual sweep.
+
+Including:
+- remove incorrect and unnecessary filenames from comment headers.
+- do not include <linux/version.h>
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+---
+ drivers/xen/netback/common.h | 57 ++----------------------
+ drivers/xen/netback/interface.c | 18 +++----
+ drivers/xen/netback/netback.c | 93 ++++++++++++++++++++-------------------
+ drivers/xen/netback/xenbus.c | 65 ++++++++++++---------------
+ 4 files changed, 89 insertions(+), 144 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index f660eb5..a2455a0 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -1,6 +1,4 @@
+-/******************************************************************************
+- * arch/xen/drivers/netif/backend/common.h
+- *
+/*
-+ * Add following to poll() function in NAPI driver (Tigon3 is example):
-+ * if ( xen_network_done() )
-+ * tg3_enable_ints(tp);
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+@@ -29,19 +27,18 @@
+ #ifndef __NETIF__BACKEND__COMMON_H__
+ #define __NETIF__BACKEND__COMMON_H__
+
+-#include <linux/version.h>
+ #include <linux/module.h>
+ #include <linux/interrupt.h>
+ #include <linux/slab.h>
+ #include <linux/ip.h>
+ #include <linux/in.h>
++#include <linux/io.h>
+ #include <linux/netdevice.h>
+ #include <linux/etherdevice.h>
+ #include <linux/wait.h>
+ #include <linux/sched.h>
+
+ #include <xen/interface/io/netif.h>
+-#include <asm/io.h>
+ #include <asm/pgalloc.h>
+ #include <xen/interface/grant_table.h>
+ #include <xen/grant_table.h>
+@@ -49,7 +46,7 @@
+
+ #define DPRINTK(_f, _a...) \
+ pr_debug("(file=%s, line=%d) " _f, \
+- __FILE__ , __LINE__ , ## _a )
++ __FILE__ , __LINE__ , ## _a)
+ #define IPRINTK(fmt, args...) \
+ printk(KERN_INFO "xen_net: " fmt, ##args)
+ #define WPRINTK(fmt, args...) \
+@@ -132,66 +129,22 @@ enum {
+
+ extern int netbk_copy_skb_mode;
+
+-/* Function pointers into netback accelerator plugin modules */
+-struct netback_accel_hooks {
+- struct module *owner;
+- int (*probe)(struct xenbus_device *dev);
+- int (*remove)(struct xenbus_device *dev);
+-};
+-
+-/* Structure to track the state of a netback accelerator plugin */
+-struct netback_accelerator {
+- struct list_head link;
+- int id;
+- char *eth_name;
+- atomic_t use_count;
+- struct netback_accel_hooks *hooks;
+-};
+-
+ struct backend_info {
+ struct xenbus_device *dev;
+ struct xen_netif *netif;
+ enum xenbus_state frontend_state;
+ struct xenbus_watch hotplug_status_watch;
+ int have_hotplug_status_watch:1;
+-
+- /* State relating to the netback accelerator */
+- void *netback_accel_priv;
+- /* The accelerator that this backend is currently using */
+- struct netback_accelerator *accelerator;
+ };
+
+-#define NETBACK_ACCEL_VERSION 0x00010001
+-
+-/*
+- * Connect an accelerator plugin module to netback. Returns zero on
+- * success, < 0 on error, > 0 (with highest version number supported)
+- * if version mismatch.
+- */
+-extern int netback_connect_accelerator(unsigned version,
+- int id, const char *eth_name,
+- struct netback_accel_hooks *hooks);
+-/* Disconnect a previously connected accelerator plugin module */
+-extern void netback_disconnect_accelerator(int id, const char *eth_name);
+-
+-
+-extern
+-void netback_probe_accelerators(struct backend_info *be,
+- struct xenbus_device *dev);
+-extern
+-void netback_remove_accelerators(struct backend_info *be,
+- struct xenbus_device *dev);
+-extern
+-void netif_accel_init(void);
+-
+-
+ #define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
+ #define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
+
+ void netif_disconnect(struct xen_netif *netif);
+
+ void netif_set_features(struct xen_netif *netif);
+-struct xen_netif *netif_alloc(struct device *parent, domid_t domid, unsigned int handle);
++struct xen_netif *netif_alloc(struct device *parent, domid_t domid,
++ unsigned int handle);
+ int netif_map(struct xen_netif *netif, unsigned long tx_ring_ref,
+ unsigned long rx_ring_ref, unsigned int evtchn);
+
+diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+index d3af68e..4622653 100644
+--- a/drivers/xen/netback/interface.c
++++ b/drivers/xen/netback/interface.c
+@@ -1,6 +1,4 @@
+-/******************************************************************************
+- * arch/xen/drivers/netif/backend/interface.c
+- *
++/*
+ * Network-device interface management.
+ *
+ * Copyright (c) 2004-2005, Keir Fraser
+@@ -232,8 +230,7 @@ static void netbk_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+ }
+ }
+
+-static struct ethtool_ops network_ethtool_ops =
+-{
++static struct ethtool_ops network_ethtool_ops = {
+ .get_drvinfo = netbk_get_drvinfo,
+
+ .get_tx_csum = ethtool_op_get_tx_csum,
+@@ -249,8 +246,7 @@ static struct ethtool_ops network_ethtool_ops =
+ .get_strings = netbk_get_strings,
+ };
+
+-static struct net_device_ops netback_ops =
+-{
++static struct net_device_ops netback_ops = {
+ .ndo_start_xmit = netif_be_start_xmit,
+ .ndo_get_stats = netif_be_get_stats,
+ .ndo_open = net_open,
+@@ -258,7 +254,8 @@ static struct net_device_ops netback_ops =
+ .ndo_change_mtu = netbk_change_mtu,
+ };
+
+-struct xen_netif *netif_alloc(struct device *parent, domid_t domid, unsigned int handle)
++struct xen_netif *netif_alloc(struct device *parent, domid_t domid,
++ unsigned int handle)
+ {
+ int err = 0;
+ struct net_device *dev;
+@@ -323,8 +320,9 @@ struct xen_netif *netif_alloc(struct device *parent, domid_t domid, unsigned int
+ return netif;
+ }
+
+-static int map_frontend_pages(
+- struct xen_netif *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
++static int map_frontend_pages(struct xen_netif *netif,
++ grant_ref_t tx_ring_ref,
++ grant_ref_t rx_ring_ref)
+ {
+ struct gnttab_map_grant_ref op;
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index cfe7931..d4aa8ac 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -1,11 +1,9 @@
+-/******************************************************************************
+- * drivers/xen/netback/netback.c
+- *
++/*
+ * Back-end of the driver for virtual network devices. This portion of the
+ * driver exports a 'unified' network-device interface that can be accessed
+ * by any operating system that implements a compatible front end. A
+ * reference front-end implementation can be found in:
+- * drivers/xen/netfront/netfront.c
++ * drivers/net/xen-netfront.c
+ *
+ * Copyright (c) 2002-2005, K A Fraser
+ *
+@@ -82,8 +80,8 @@ static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
+ }
+
+ /* extra field used in struct page */
+-static inline void netif_set_page_ext(struct page *pg, unsigned int group,
+- unsigned int idx)
++static inline void netif_set_page_ext(struct page *pg,
++ unsigned int group, unsigned int idx)
+ {
+ union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
+
+@@ -91,7 +89,8 @@ static inline void netif_set_page_ext(struct page *pg, unsigned int group,
+ pg->mapping = ext.mapping;
+ }
+
+-static inline int netif_get_page_ext(struct page *pg, unsigned int *_group, unsigned int *_idx)
++static int netif_get_page_ext(struct page *pg,
++ unsigned int *_group, unsigned int *_idx)
+ {
+ union page_ext ext = { .mapping = pg->mapping };
+ struct xen_netbk *netbk;
+@@ -325,7 +324,7 @@ static unsigned int count_skb_slots(struct sk_buff *skb, struct xen_netif *netif
+ unsigned int count = 1;
+ int i, copy_off = 0;
+
+- BUG_ON(offset_in_page(skb->data) + skb_headlen(skb) > MAX_BUFFER_OFFSET);
++ BUG_ON(offset_in_page(skb->data)+skb_headlen(skb) > MAX_BUFFER_OFFSET);
+
+ copy_off = skb_headlen(skb);
+
+@@ -376,7 +375,7 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ */
+ if ((skb_headlen(skb) + offset_in_page(skb->data)) >= PAGE_SIZE) {
+ struct sk_buff *nskb = netbk_copy_skb(skb);
+- if ( unlikely(nskb == NULL) )
++ if (unlikely(nskb == NULL))
+ goto drop;
+ /* Copy only the header fields we use in this driver. */
+ nskb->dev = skb->dev;
+@@ -385,8 +384,7 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ skb = nskb;
+ }
+
+- /* Reserve ring slots for the worst-case number of
+- * fragments. */
++ /* Reserve ring slots for the worst-case number of fragments. */
+ netif->rx_req_cons_peek += count_skb_slots(skb, netif);
+ netif_get(netif);
+
+@@ -437,9 +435,10 @@ static struct netbk_rx_meta *get_next_rx_buffer(struct xen_netif *netif,
+ return meta;
+ }
+
+-/* Set up the grant operations for this fragment. If it's a flipping
+- interface, we also set up the unmap request from here. */
+-
++/*
++ * Set up the grant operations for this fragment. If it's a flipping
++ * interface, we also set up the unmap request from here.
+ */
-+int xen_network_done(void)
-+{
-+ return skb_queue_empty(&rx_queue);
-+}
-+#endif
-+
-+struct netrx_pending_operations {
-+ unsigned trans_prod, trans_cons;
-+ unsigned mmu_prod, mmu_mcl;
-+ unsigned mcl_prod, mcl_cons;
-+ unsigned copy_prod, copy_cons;
-+ unsigned meta_prod, meta_cons;
-+ mmu_update_t *mmu;
-+ gnttab_transfer_t *trans;
-+ gnttab_copy_t *copy;
-+ multicall_entry_t *mcl;
-+ struct netbk_rx_meta *meta;
-+};
-+
-+/* Set up the grant operations for this fragment. If it's a flipping
-+ interface, we also set up the unmap request from here. */
-+static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
-+ int i, struct netrx_pending_operations *npo,
-+ struct page *page, unsigned long size,
-+ unsigned long offset)
-+{
-+ mmu_update_t *mmu;
-+ gnttab_transfer_t *gop;
-+ gnttab_copy_t *copy_gop;
-+ multicall_entry_t *mcl;
-+ netif_rx_request_t *req;
-+ unsigned long old_mfn, new_mfn;
-+ int idx = netif_page_index(page);
-+
-+ old_mfn = virt_to_mfn(page_address(page));
-+
-+ req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);
-+ if (netif->copying_receiver) {
-+ /* The fragment needs to be copied rather than
-+ flipped. */
-+ meta->copy = 1;
-+ copy_gop = npo->copy + npo->copy_prod++;
-+ copy_gop->flags = GNTCOPY_dest_gref;
-+ if (idx > -1) {
-+ struct pending_tx_info *src_pend = &pending_tx_info[idx];
-+ copy_gop->source.domid = src_pend->netif->domid;
-+ copy_gop->source.u.ref = src_pend->req.gref;
-+ copy_gop->flags |= GNTCOPY_source_gref;
-+ } else {
-+ copy_gop->source.domid = DOMID_SELF;
-+ copy_gop->source.u.gmfn = old_mfn;
-+ }
-+ copy_gop->source.offset = offset;
-+ copy_gop->dest.domid = netif->domid;
-+ copy_gop->dest.offset = 0;
-+ copy_gop->dest.u.ref = req->gref;
-+ copy_gop->len = size;
-+ } else {
-+ meta->copy = 0;
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ new_mfn = alloc_mfn();
-+
-+ /*
-+ * Set the new P2M table entry before
-+ * reassigning the old data page. Heed the
-+ * comment in pgtable-2level.h:pte_page(). :-)
-+ */
-+ set_phys_to_machine(page_to_pfn(page), new_mfn);
-+
-+ mcl = npo->mcl + npo->mcl_prod++;
-+ MULTI_update_va_mapping(mcl,
-+ (unsigned long)page_address(page),
-+ pfn_pte_ma(new_mfn, PAGE_KERNEL),
-+ 0);
-+
-+ mmu = npo->mmu + npo->mmu_prod++;
-+ mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
-+ MMU_MACHPHYS_UPDATE;
-+ mmu->val = page_to_pfn(page);
-+ }
-+
-+ gop = npo->trans + npo->trans_prod++;
-+ gop->mfn = old_mfn;
-+ gop->domid = netif->domid;
-+ gop->ref = req->gref;
-+ }
-+ return req->id;
-+}
-+
-+static void netbk_gop_skb(struct sk_buff *skb,
-+ struct netrx_pending_operations *npo)
-+{
-+ netif_t *netif = netdev_priv(skb->dev);
-+ int nr_frags = skb_shinfo(skb)->nr_frags;
-+ int i;
-+ int extra;
-+ struct netbk_rx_meta *head_meta, *meta;
-+
-+ head_meta = npo->meta + npo->meta_prod++;
-+ head_meta->frag.page_offset = skb_shinfo(skb)->gso_type;
-+ head_meta->frag.size = skb_shinfo(skb)->gso_size;
-+ extra = !!head_meta->frag.size + 1;
-+
-+ for (i = 0; i < nr_frags; i++) {
-+ meta = npo->meta + npo->meta_prod++;
-+ meta->frag = skb_shinfo(skb)->frags[i];
-+ meta->id = netbk_gop_frag(netif, meta, i + extra, npo,
-+ meta->frag.page,
-+ meta->frag.size,
-+ meta->frag.page_offset);
-+ }
-+
-+ /*
-+ * This must occur at the end to ensure that we don't trash skb_shinfo
-+ * until we're done. We know that the head doesn't cross a page
-+ * boundary because such packets get copied in netif_be_start_xmit.
+ static void netbk_gop_frag_copy(struct xen_netif *netif,
+ struct netrx_pending_operations *npo,
+ struct page *page, unsigned long size,
+@@ -450,7 +449,7 @@ static void netbk_gop_frag_copy(struct xen_netif *netif,
+ /*
+ * These variables a used iff netif_get_page_ext returns true,
+ * in which case they are guaranteed to be initialized.
+- */
+ */
-+ head_meta->id = netbk_gop_frag(netif, head_meta, 0, npo,
-+ virt_to_page(skb->data),
-+ skb_headlen(skb),
-+ offset_in_page(skb->data));
-+
-+ netif->rx.req_cons += nr_frags + extra;
-+}
-+
-+static inline void netbk_free_pages(int nr_frags, struct netbk_rx_meta *meta)
-+{
-+ int i;
-+
-+ for (i = 0; i < nr_frags; i++)
-+ put_page(meta[i].frag.page);
-+}
-+
-+/* This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
-+ used to set up the operations on the top of
-+ netrx_pending_operations, which have since been done. Check that
-+ they didn't give any errors and advance over them. */
-+static int netbk_check_gop(int nr_frags, domid_t domid,
-+ struct netrx_pending_operations *npo)
-+{
-+ multicall_entry_t *mcl;
-+ gnttab_transfer_t *gop;
-+ gnttab_copy_t *copy_op;
-+ int status = NETIF_RSP_OKAY;
-+ int i;
-+
-+ for (i = 0; i <= nr_frags; i++) {
-+ if (npo->meta[npo->meta_cons + i].copy) {
-+ copy_op = npo->copy + npo->copy_cons++;
-+ if (copy_op->status != GNTST_okay) {
-+ DPRINTK("Bad status %d from copy to DOM%d.\n",
-+ copy_op->status, domid);
-+ status = NETIF_RSP_ERROR;
-+ }
-+ } else {
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ mcl = npo->mcl + npo->mcl_cons++;
-+ /* The update_va_mapping() must not fail. */
-+ BUG_ON(mcl->result != 0);
-+ }
-+
-+ gop = npo->trans + npo->trans_cons++;
-+ /* Check the reassignment error code. */
-+ if (gop->status != 0) {
-+ DPRINTK("Bad status %d from grant transfer to DOM%u\n",
-+ gop->status, domid);
-+ /*
-+ * Page no longer belongs to us unless
-+ * GNTST_bad_page, but that should be
-+ * a fatal error anyway.
-+ */
-+ BUG_ON(gop->status == GNTST_bad_page);
-+ status = NETIF_RSP_ERROR;
-+ }
-+ }
-+ }
-+
-+ return status;
-+}
-+
-+static void netbk_add_frag_responses(netif_t *netif, int status,
-+ struct netbk_rx_meta *meta, int nr_frags)
-+{
-+ int i;
-+ unsigned long offset;
-+
-+ for (i = 0; i < nr_frags; i++) {
-+ int id = meta[i].id;
-+ int flags = (i == nr_frags - 1) ? 0 : NETRXF_more_data;
-+
-+ if (meta[i].copy)
-+ offset = 0;
-+ else
-+ offset = meta[i].frag.page_offset;
-+ make_rx_response(netif, id, status, offset,
-+ meta[i].frag.size, flags);
-+ }
-+}
-+
-+static void net_rx_action(unsigned long unused)
-+{
-+ netif_t *netif = NULL;
-+ s8 status;
-+ u16 id, irq, flags;
-+ netif_rx_response_t *resp;
-+ multicall_entry_t *mcl;
-+ struct sk_buff_head rxq;
-+ struct sk_buff *skb;
-+ int notify_nr = 0;
-+ int ret;
-+ int nr_frags;
-+ int count;
-+ unsigned long offset;
-+
-+ /*
-+ * Putting hundreds of bytes on the stack is considered rude.
-+ * Static works because a tasklet can only be on one CPU at any time.
-+ */
-+ static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+3];
-+ static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
-+ static gnttab_transfer_t grant_trans_op[NET_RX_RING_SIZE];
-+ static gnttab_copy_t grant_copy_op[NET_RX_RING_SIZE];
-+ static unsigned char rx_notify[NR_IRQS];
-+ static u16 notify_list[NET_RX_RING_SIZE];
-+ static struct netbk_rx_meta meta[NET_RX_RING_SIZE];
-+
-+ struct netrx_pending_operations npo = {
-+ mmu: rx_mmu,
-+ trans: grant_trans_op,
-+ copy: grant_copy_op,
-+ mcl: rx_mcl,
-+ meta: meta};
-+
-+ skb_queue_head_init(&rxq);
-+
-+ count = 0;
-+
-+ while ((skb = skb_dequeue(&rx_queue)) != NULL) {
-+ nr_frags = skb_shinfo(skb)->nr_frags;
-+ *(int *)skb->cb = nr_frags;
-+
-+ if (!xen_feature(XENFEAT_auto_translated_physmap) &&
-+ !((netif_t *)netdev_priv(skb->dev))->copying_receiver &&
-+ check_mfn(nr_frags + 1)) {
-+ /* Memory squeeze? Back off for an arbitrary while. */
-+ if ( net_ratelimit() )
-+ WPRINTK("Memory squeeze in netback "
-+ "driver.\n");
-+ mod_timer(&net_timer, jiffies + HZ);
-+ skb_queue_head(&rx_queue, skb);
-+ break;
-+ }
-+
-+ netbk_gop_skb(skb, &npo);
-+
-+ count += nr_frags + 1;
-+
-+ __skb_queue_tail(&rxq, skb);
-+
-+ /* Filled the batch queue? */
-+ if (count + MAX_SKB_FRAGS >= NET_RX_RING_SIZE)
-+ break;
-+ }
-+
-+ BUG_ON(npo.meta_prod > ARRAY_SIZE(meta));
-+
-+ npo.mmu_mcl = npo.mcl_prod;
-+ if (npo.mcl_prod) {
-+ BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
-+ BUG_ON(npo.mmu_prod > ARRAY_SIZE(rx_mmu));
-+ mcl = npo.mcl + npo.mcl_prod++;
-+
-+ BUG_ON(mcl[-1].op != __HYPERVISOR_update_va_mapping);
-+ mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-+
-+ mcl->op = __HYPERVISOR_mmu_update;
-+ mcl->args[0] = (unsigned long)rx_mmu;
-+ mcl->args[1] = npo.mmu_prod;
-+ mcl->args[2] = 0;
-+ mcl->args[3] = DOMID_SELF;
-+ }
-+
-+ if (npo.trans_prod) {
-+ BUG_ON(npo.trans_prod > ARRAY_SIZE(grant_trans_op));
-+ mcl = npo.mcl + npo.mcl_prod++;
-+ mcl->op = __HYPERVISOR_grant_table_op;
-+ mcl->args[0] = GNTTABOP_transfer;
-+ mcl->args[1] = (unsigned long)grant_trans_op;
-+ mcl->args[2] = npo.trans_prod;
-+ }
-+
-+ if (npo.copy_prod) {
-+ BUG_ON(npo.copy_prod > ARRAY_SIZE(grant_copy_op));
-+ mcl = npo.mcl + npo.mcl_prod++;
-+ mcl->op = __HYPERVISOR_grant_table_op;
-+ mcl->args[0] = GNTTABOP_copy;
-+ mcl->args[1] = (unsigned long)grant_copy_op;
-+ mcl->args[2] = npo.copy_prod;
-+ }
-+
-+ /* Nothing to do? */
-+ if (!npo.mcl_prod)
-+ return;
-+
-+ BUG_ON(npo.mcl_prod > ARRAY_SIZE(rx_mcl));
-+
-+ ret = HYPERVISOR_multicall(npo.mcl, npo.mcl_prod);
-+ BUG_ON(ret != 0);
-+ /* The mmu_machphys_update() must not fail. */
-+ BUG_ON(npo.mmu_mcl && npo.mcl[npo.mmu_mcl].result != 0);
-+
-+ while ((skb = __skb_dequeue(&rxq)) != NULL) {
-+ nr_frags = *(int *)skb->cb;
-+
-+ netif = netdev_priv(skb->dev);
-+ /* We can't rely on skb_release_data to release the
-+ pages used by fragments for us, since it tries to
-+ touch the pages in the fraglist. If we're in
-+ flipping mode, that doesn't work. In copying mode,
-+ we still have access to all of the pages, and so
-+ it's safe to let release_data deal with it. */
-+ /* (Freeing the fragments is safe since we copy
-+ non-linear skbs destined for flipping interfaces) */
-+ if (!netif->copying_receiver) {
-+ atomic_set(&(skb_shinfo(skb)->dataref), 1);
-+ skb_shinfo(skb)->frag_list = NULL;
-+ skb_shinfo(skb)->nr_frags = 0;
-+ netbk_free_pages(nr_frags, meta + npo.meta_cons + 1);
-+ }
-+
-+ netif->stats.tx_bytes += skb->len;
-+ netif->stats.tx_packets++;
-+
-+ status = netbk_check_gop(nr_frags, netif->domid, &npo);
-+
-+ id = meta[npo.meta_cons].id;
-+ flags = nr_frags ? NETRXF_more_data : 0;
-+
-+ if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
-+ flags |= NETRXF_csum_blank | NETRXF_data_validated;
-+ else if (skb->proto_data_valid) /* remote but checksummed? */
-+ flags |= NETRXF_data_validated;
-+
-+ if (meta[npo.meta_cons].copy)
-+ offset = 0;
-+ else
-+ offset = offset_in_page(skb->data);
-+ resp = make_rx_response(netif, id, status, offset,
-+ skb_headlen(skb), flags);
-+
-+ if (meta[npo.meta_cons].frag.size) {
-+ struct netif_extra_info *gso =
-+ (struct netif_extra_info *)
-+ RING_GET_RESPONSE(&netif->rx,
-+ netif->rx.rsp_prod_pvt++);
-+
-+ resp->flags |= NETRXF_extra_info;
-+
-+ gso->u.gso.size = meta[npo.meta_cons].frag.size;
-+ gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
-+ gso->u.gso.pad = 0;
-+ gso->u.gso.features = 0;
-+
-+ gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
-+ gso->flags = 0;
-+ }
-+
-+ netbk_add_frag_responses(netif, status,
-+ meta + npo.meta_cons + 1,
-+ nr_frags);
-+
-+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret);
-+ irq = netif->irq;
-+ if (ret && !rx_notify[irq]) {
-+ rx_notify[irq] = 1;
-+ notify_list[notify_nr++] = irq;
-+ }
-+
-+ if (netif_queue_stopped(netif->dev) &&
-+ netif_schedulable(netif) &&
-+ !netbk_queue_full(netif))
-+ netif_wake_queue(netif->dev);
-+
-+ netif_put(netif);
-+ dev_kfree_skb(skb);
-+ npo.meta_cons += nr_frags + 1;
-+ }
-+
-+ while (notify_nr != 0) {
-+ irq = notify_list[--notify_nr];
-+ rx_notify[irq] = 0;
-+ notify_remote_via_irq(irq);
-+ }
-+
-+ /* More work to do? */
-+ if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
-+ tasklet_schedule(&net_rx_tasklet);
-+#if 0
-+ else
-+ xen_network_done_notify();
-+#endif
-+}
-+
-+static void net_alarm(unsigned long unused)
-+{
-+ tasklet_schedule(&net_rx_tasklet);
-+}
-+
-+static void netbk_tx_pending_timeout(unsigned long unused)
-+{
-+ tasklet_schedule(&net_tx_tasklet);
-+}
-+
-+struct net_device_stats *netif_be_get_stats(struct net_device *dev)
-+{
-+ netif_t *netif = netdev_priv(dev);
-+ return &netif->stats;
-+}
-+
-+static int __on_net_schedule_list(netif_t *netif)
-+{
-+ return netif->list.next != NULL;
-+}
-+
-+static void remove_from_net_schedule_list(netif_t *netif)
-+{
-+ spin_lock_irq(&net_schedule_list_lock);
-+ if (likely(__on_net_schedule_list(netif))) {
-+ list_del(&netif->list);
-+ netif->list.next = NULL;
-+ netif_put(netif);
-+ }
-+ spin_unlock_irq(&net_schedule_list_lock);
-+}
-+
-+static void add_to_net_schedule_list_tail(netif_t *netif)
-+{
-+ if (__on_net_schedule_list(netif))
-+ return;
-+
-+ spin_lock_irq(&net_schedule_list_lock);
-+ if (!__on_net_schedule_list(netif) &&
-+ likely(netif_schedulable(netif))) {
-+ list_add_tail(&netif->list, &net_schedule_list);
-+ netif_get(netif);
-+ }
-+ spin_unlock_irq(&net_schedule_list_lock);
-+}
-+
+ unsigned int uninitialized_var(group), uninitialized_var(idx);
+ int foreign = netif_get_page_ext(page, &group, &idx);
+ unsigned long bytes;
+@@ -489,8 +488,9 @@ static void netbk_gop_frag_copy(struct xen_netif *netif,
+ copy_gop->source.u.ref = src_pend->req.gref;
+ copy_gop->flags |= GNTCOPY_source_gref;
+ } else {
++ void *vaddr = page_address(page);
+ copy_gop->source.domid = DOMID_SELF;
+- copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
++ copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
+ }
+ copy_gop->source.offset = offset;
+ copy_gop->dest.domid = netif->domid;
+@@ -504,17 +504,22 @@ static void netbk_gop_frag_copy(struct xen_netif *netif,
+
+ offset += bytes;
+ size -= bytes;
+- head = 0; /* Must be something in this buffer now */
++ head = 0; /* There must be something in this buffer now. */
+ }
+ }
+
+-/* Prepare an SKB to be transmitted to the frontend. This is
+- responsible for allocating grant operations, meta structures, etc.
+- It returns the number of meta structures consumed. The number of
+- ring slots used is always equal to the number of meta slots used
+- plus the number of GSO descriptors used. Currently, we use either
+- zero GSO descriptors (for non-GSO packets) or one descriptor (for
+- frontend-side LRO). */
+/*
-+ * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
-+ * If this driver is pipelining transmit requests then we can be very
-+ * aggressive in avoiding new-packet notifications -- frontend only needs to
-+ * send a notification if there are no outstanding unreceived responses.
-+ * If we may be buffer transmit buffers for any reason then we must be rather
-+ * more conservative and treat this as the final check for pending work.
++ * Prepare an SKB to be transmitted to the frontend.
++ *
++ * This function is responsible for allocating grant operations, meta
++ * structures, etc.
++ *
++ * It returns the number of meta structures consumed. The number of
++ * ring slots used is always equal to the number of meta slots used
++ * plus the number of GSO descriptors used. Currently, we use either
++ * zero GSO descriptors (for non-GSO packets) or one descriptor (for
++ * frontend-side LRO).
+ */
-+void netif_schedule_work(netif_t *netif)
-+{
-+ int more_to_do;
-+
-+#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
-+ more_to_do = RING_HAS_UNCONSUMED_REQUESTS(&netif->tx);
-+#else
-+ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
-+#endif
-+
-+ if (more_to_do) {
-+ add_to_net_schedule_list_tail(netif);
-+ maybe_schedule_tx_action();
-+ }
-+}
-+
-+void netif_deschedule_work(netif_t *netif)
-+{
-+ remove_from_net_schedule_list(netif);
-+}
-+
-+
-+static void tx_add_credit(netif_t *netif)
-+{
-+ unsigned long max_burst, max_credit;
-+
-+ /*
-+ * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
-+ * Otherwise the interface can seize up due to insufficient credit.
-+ */
-+ max_burst = RING_GET_REQUEST(&netif->tx, netif->tx.req_cons)->size;
-+ max_burst = min(max_burst, 131072UL);
-+ max_burst = max(max_burst, netif->credit_bytes);
-+
-+ /* Take care that adding a new chunk of credit doesn't wrap to zero. */
-+ max_credit = netif->remaining_credit + netif->credit_bytes;
-+ if (max_credit < netif->remaining_credit)
-+ max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
-+
-+ netif->remaining_credit = min(max_credit, max_burst);
-+}
-+
-+static void tx_credit_callback(unsigned long data)
-+{
-+ netif_t *netif = (netif_t *)data;
-+ tx_add_credit(netif);
-+ netif_schedule_work(netif);
-+}
-+
-+static inline int copy_pending_req(PEND_RING_IDX pending_idx)
-+{
-+ return gnttab_copy_grant_page(grant_tx_handle[pending_idx],
-+ &mmap_pages[pending_idx]);
-+}
-+
-+inline static void net_tx_action_dealloc(void)
-+{
-+ struct netbk_tx_pending_inuse *inuse, *n;
-+ gnttab_unmap_grant_ref_t *gop;
-+ u16 pending_idx;
-+ PEND_RING_IDX dc, dp;
-+ netif_t *netif;
-+ int ret;
-+ LIST_HEAD(list);
-+
-+ dc = dealloc_cons;
-+ gop = tx_unmap_ops;
-+
-+ /*
-+ * Free up any grants we have finished using
-+ */
-+ do {
-+ dp = dealloc_prod;
-+
-+ /* Ensure we see all indices enqueued by netif_idx_release(). */
-+ smp_rmb();
-+
-+ while (dc != dp) {
-+ unsigned long pfn;
-+
-+ pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
-+ list_move_tail(&pending_inuse[pending_idx].list, &list);
-+
-+ pfn = idx_to_pfn(pending_idx);
-+ /* Already unmapped? */
-+ if (!phys_to_machine_mapping_valid(pfn))
-+ continue;
-+
-+ gnttab_set_unmap_op(gop, idx_to_kaddr(pending_idx),
-+ GNTMAP_host_map,
-+ grant_tx_handle[pending_idx]);
-+ gop++;
-+ }
-+
-+ if (netbk_copy_skb_mode != NETBK_DELAYED_COPY_SKB ||
-+ list_empty(&pending_inuse_head))
-+ break;
-+
-+ /* Copy any entries that have been pending for too long. */
-+ list_for_each_entry_safe(inuse, n, &pending_inuse_head, list) {
-+ if (time_after(inuse->alloc_time + HZ / 2, jiffies))
-+ break;
-+
-+ switch (copy_pending_req(inuse - pending_inuse)) {
-+ case 0:
-+ list_move_tail(&inuse->list, &list);
-+ continue;
-+ case -EBUSY:
-+ list_del_init(&inuse->list);
-+ continue;
-+ case -ENOENT:
-+ continue;
-+ }
-+
-+ break;
-+ }
-+ } while (dp != dealloc_prod);
-+
-+ dealloc_cons = dc;
-+
-+ ret = HYPERVISOR_grant_table_op(
-+ GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
-+ BUG_ON(ret);
-+
-+ list_for_each_entry_safe(inuse, n, &list, list) {
-+ pending_idx = inuse - pending_inuse;
-+
-+ netif = pending_tx_info[pending_idx].netif;
-+
-+ make_tx_response(netif, &pending_tx_info[pending_idx].req,
-+ NETIF_RSP_OKAY);
-+
-+ /* Ready for next use. */
-+ gnttab_reset_grant_page(mmap_pages[pending_idx]);
-+
-+ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-+
-+ netif_put(netif);
-+
-+ list_del_init(&inuse->list);
-+ }
-+}
-+
-+static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end)
-+{
-+ RING_IDX cons = netif->tx.req_cons;
-+
-+ do {
-+ make_tx_response(netif, txp, NETIF_RSP_ERROR);
-+ if (cons >= end)
-+ break;
-+ txp = RING_GET_REQUEST(&netif->tx, cons++);
-+ } while (1);
-+ netif->tx.req_cons = cons;
-+ netif_schedule_work(netif);
-+ netif_put(netif);
-+}
-+
-+static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first,
-+ netif_tx_request_t *txp, int work_to_do)
-+{
-+ RING_IDX cons = netif->tx.req_cons;
-+ int frags = 0;
-+
-+ if (!(first->flags & NETTXF_more_data))
-+ return 0;
-+
-+ do {
-+ if (frags >= work_to_do) {
-+ DPRINTK("Need more frags\n");
-+ return -frags;
-+ }
-+
-+ if (unlikely(frags >= MAX_SKB_FRAGS)) {
-+ DPRINTK("Too many frags\n");
-+ return -frags;
-+ }
-+
-+ memcpy(txp, RING_GET_REQUEST(&netif->tx, cons + frags),
-+ sizeof(*txp));
-+ if (txp->size > first->size) {
-+ DPRINTK("Frags galore\n");
-+ return -frags;
-+ }
-+
-+ first->size -= txp->size;
-+ frags++;
-+
-+ if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
-+ DPRINTK("txp->offset: %x, size: %u\n",
-+ txp->offset, txp->size);
-+ return -frags;
-+ }
-+ } while ((txp++)->flags & NETTXF_more_data);
-+
-+ return frags;
-+}
-+
-+static gnttab_map_grant_ref_t *netbk_get_requests(netif_t *netif,
-+ struct sk_buff *skb,
-+ netif_tx_request_t *txp,
-+ gnttab_map_grant_ref_t *mop)
-+{
-+ struct skb_shared_info *shinfo = skb_shinfo(skb);
-+ skb_frag_t *frags = shinfo->frags;
-+ unsigned long pending_idx = *((u16 *)skb->data);
-+ int i, start;
-+
-+ /* Skip first skb fragment if it is on same page as header fragment. */
-+ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
-+
-+ for (i = start; i < shinfo->nr_frags; i++, txp++) {
-+ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons++)];
-+
-+ gnttab_set_map_op(mop++, idx_to_kaddr(pending_idx),
-+ GNTMAP_host_map | GNTMAP_readonly,
-+ txp->gref, netif->domid);
-+
-+ memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
-+ netif_get(netif);
-+ pending_tx_info[pending_idx].netif = netif;
-+ frags[i].page = (void *)pending_idx;
-+ }
-+
-+ return mop;
-+}
-+
-+static int netbk_tx_check_mop(struct sk_buff *skb,
-+ gnttab_map_grant_ref_t **mopp)
-+{
-+ gnttab_map_grant_ref_t *mop = *mopp;
-+ int pending_idx = *((u16 *)skb->data);
-+ netif_t *netif = pending_tx_info[pending_idx].netif;
-+ netif_tx_request_t *txp;
-+ struct skb_shared_info *shinfo = skb_shinfo(skb);
-+ int nr_frags = shinfo->nr_frags;
-+ int i, err, start;
-+
-+ /* Check status of header. */
-+ err = mop->status;
-+ if (unlikely(err)) {
-+ txp = &pending_tx_info[pending_idx].req;
-+ make_tx_response(netif, txp, NETIF_RSP_ERROR);
-+ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-+ netif_put(netif);
+ static int netbk_gop_skb(struct sk_buff *skb,
+ struct netrx_pending_operations *npo)
+ {
+@@ -569,10 +574,12 @@ static int netbk_gop_skb(struct sk_buff *skb,
+ return npo->meta_prod - old_meta_prod;
+ }
+
+-/* This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
+- used to set up the operations on the top of
+- netrx_pending_operations, which have since been done. Check that
+- they didn't give any errors and advance over them. */
++/*
++ * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
++ * used to set up the operations on the top of
++ * netrx_pending_operations, which have since been done. Check that
++ * they didn't give any errors and advance over them.
++ */
+ static int netbk_check_gop(int nr_meta_slots, domid_t domid,
+ struct netrx_pending_operations *npo)
+ {
+@@ -906,9 +913,7 @@ static inline void net_tx_action_dealloc(struct xen_netbk *netbk)
+ dc = netbk->dealloc_cons;
+ gop = netbk->tx_unmap_ops;
+
+- /*
+- * Free up any grants we have finished using
+- */
++ /* Free up any grants we have finished using. */
+ do {
+ dp = netbk->dealloc_prod;
+
+@@ -1018,7 +1023,8 @@ static void netbk_tx_err(struct xen_netif *netif,
+
+ static int netbk_count_requests(struct xen_netif *netif,
+ struct xen_netif_tx_request *first,
+- struct xen_netif_tx_request *txp, int work_to_do)
++ struct xen_netif_tx_request *txp,
++ int work_to_do)
+ {
+ RING_IDX cons = netif->tx.req_cons;
+ int frags = 0;
+@@ -1058,10 +1064,10 @@ static int netbk_count_requests(struct xen_netif *netif,
+ }
+
+ static struct gnttab_map_grant_ref *netbk_get_requests(struct xen_netbk *netbk,
+- struct xen_netif *netif,
+- struct sk_buff *skb,
+- struct xen_netif_tx_request *txp,
+- struct gnttab_map_grant_ref *mop)
++ struct xen_netif *netif,
++ struct sk_buff *skb,
++ struct xen_netif_tx_request *txp,
++ struct gnttab_map_grant_ref *mop)
+ {
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ skb_frag_t *frags = shinfo->frags;
+@@ -1200,7 +1206,8 @@ static void netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
+ }
+ }
+
+-int netbk_get_extras(struct xen_netif *netif, struct xen_netif_extra_info *extras,
++int netbk_get_extras(struct xen_netif *netif,
++ struct xen_netif_extra_info *extras,
+ int work_to_do)
+ {
+ struct xen_netif_extra_info extra;
+@@ -1228,7 +1235,8 @@ int netbk_get_extras(struct xen_netif *netif, struct xen_netif_extra_info *extra
+ return work_to_do;
+ }
+
+-static int netbk_set_skb_gso(struct sk_buff *skb, struct xen_netif_extra_info *gso)
++static int netbk_set_skb_gso(struct sk_buff *skb,
++ struct xen_netif_extra_info *gso)
+ {
+ if (!gso->u.gso.size) {
+ DPRINTK("GSO size must not be zero.\n");
+@@ -1365,7 +1373,7 @@ static unsigned net_tx_build_mops(struct xen_netbk *netbk)
+ struct xen_netif *netif;
+ struct xen_netif_tx_request txreq;
+ struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
+- struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
++ struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
+ u16 pending_idx;
+ RING_IDX idx;
+ int work_to_do;
+@@ -1427,7 +1435,7 @@ static unsigned net_tx_build_mops(struct xen_netbk *netbk)
+ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
+ DPRINTK("txreq.offset: %x, size: %u, end: %lu\n",
+ txreq.offset, txreq.size,
+- (txreq.offset &~PAGE_MASK) + txreq.size);
++ (txreq.offset&~PAGE_MASK) + txreq.size);
+ netbk_tx_err(netif, &txreq, idx);
+ continue;
+ }
+@@ -1807,9 +1815,6 @@ static int __init netback_init(void)
+ }
+ memset(xen_netbk, 0, sizeof(struct xen_netbk) * xen_netbk_group_nr);
+
+- /* We can increase reservation by this much in net_rx_action(). */
+-// balloon_update_driver_allowance(NET_RX_RING_SIZE);
+-
+ for (group = 0; group < xen_netbk_group_nr; group++) {
+ struct xen_netbk *netbk = &xen_netbk[group];
+ skb_queue_head_init(&netbk->rx_queue);
+@@ -1894,8 +1899,6 @@ static int __init netback_init(void)
+ netbk_copy_skb_mode = NETBK_DELAYED_COPY_SKB;
+ }
+
+- //netif_accel_init();
+-
+ rc = netif_xenbus_init();
+ if (rc)
+ goto failed_init;
+diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
+index 1fec65a..dd44341 100644
+--- a/drivers/xen/netback/xenbus.c
++++ b/drivers/xen/netback/xenbus.c
+@@ -1,20 +1,22 @@
+-/* Xenbus code for netif backend
+- Copyright (C) 2005 Rusty Russell <rusty(a)rustcorp.com.au>
+- Copyright (C) 2005 XenSource Ltd
+-
+- This program is free software; you can redistribute it and/or modify
+- it under the terms of the GNU General Public License as published by
+- the Free Software Foundation; either version 2 of the License, or
+- (at your option) any later version.
+-
+- This program is distributed in the hope that it will be useful,
+- but WITHOUT ANY WARRANTY; without even the implied warranty of
+- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- GNU General Public License for more details.
+-
+- You should have received a copy of the GNU General Public License
+- along with this program; if not, write to the Free Software
+- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++/*
++ * Xenbus code for netif backend
++ *
++ * Copyright (C) 2005 Rusty Russell <rusty(a)rustcorp.com.au>
++ * Copyright (C) 2005 XenSource Ltd
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+ #include <stdarg.h>
+@@ -22,13 +24,6 @@
+ #include <xen/xenbus.h>
+ #include "common.h"
+
+-#if 0
+-#undef DPRINTK
+-#define DPRINTK(fmt, args...) \
+- printk("netback/xenbus (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
+-#endif
+-
+-
+ static int connect_rings(struct backend_info *);
+ static void connect(struct backend_info *);
+ static void backend_create_netif(struct backend_info *be);
+@@ -36,9 +31,7 @@ static void unregister_hotplug_status_watch(struct backend_info *be);
+
+ static int netback_remove(struct xenbus_device *dev)
+ {
+- struct backend_info *be = dev_get_drvdata(&dev->dev);
+-
+- //netback_remove_accelerators(be, dev);
++ struct backend_info *be = dev_get_drvdata(&dev->dev);
+
+ unregister_hotplug_status_watch(be);
+ if (be->netif) {
+@@ -126,8 +119,6 @@ static int netback_probe(struct xenbus_device *dev,
+ goto fail;
+ }
+
+- //netback_probe_accelerators(be, dev);
+-
+ err = xenbus_switch_state(dev, XenbusStateInitWait);
+ if (err)
+ goto fail;
+@@ -147,12 +138,13 @@ fail:
+ }
+
+
+-/**
++/*
+ * Handle the creation of the hotplug script environment. We add the script
+ * and vif variables to the environment, for the benefit of the vif-* hotplug
+ * scripts.
+ */
+-static int netback_uevent(struct xenbus_device *xdev, struct kobj_uevent_env *env)
++static int netback_uevent(struct xenbus_device *xdev,
++ struct kobj_uevent_env *env)
+ {
+ struct backend_info *be = dev_get_drvdata(&xdev->dev);
+ char *val;
+@@ -164,8 +156,7 @@ static int netback_uevent(struct xenbus_device *xdev, struct kobj_uevent_env *en
+ int err = PTR_ERR(val);
+ xenbus_dev_fatal(xdev, err, "reading script");
+ return err;
+- }
+- else {
+ } else {
-+ set_phys_to_machine(
-+ __pa(idx_to_kaddr(pending_idx)) >> PAGE_SHIFT,
-+ FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
-+ grant_tx_handle[pending_idx] = mop->handle;
-+ }
-+
-+ /* Skip first skb fragment if it is on same page as header fragment. */
-+ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
-+
-+ for (i = start; i < nr_frags; i++) {
-+ int j, newerr;
-+
-+ pending_idx = (unsigned long)shinfo->frags[i].page;
-+
-+ /* Check error status: if okay then remember grant handle. */
-+ newerr = (++mop)->status;
-+ if (likely(!newerr)) {
-+ set_phys_to_machine(
-+ __pa(idx_to_kaddr(pending_idx))>>PAGE_SHIFT,
-+ FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
-+ grant_tx_handle[pending_idx] = mop->handle;
-+ /* Had a previous error? Invalidate this fragment. */
-+ if (unlikely(err))
-+ netif_idx_release(pending_idx);
-+ continue;
-+ }
-+
-+ /* Error on this fragment: respond to client with an error. */
-+ txp = &pending_tx_info[pending_idx].req;
-+ make_tx_response(netif, txp, NETIF_RSP_ERROR);
-+ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-+ netif_put(netif);
-+
-+ /* Not the first error? Preceding frags already invalidated. */
-+ if (err)
-+ continue;
-+
-+ /* First error: invalidate header and preceding fragments. */
-+ pending_idx = *((u16 *)skb->data);
-+ netif_idx_release(pending_idx);
-+ for (j = start; j < i; j++) {
-+ pending_idx = (unsigned long)shinfo->frags[i].page;
-+ netif_idx_release(pending_idx);
-+ }
-+
-+ /* Remember the error: invalidate all subsequent fragments. */
-+ err = newerr;
-+ }
-+
-+ *mopp = mop + 1;
-+ return err;
-+}
-+
-+static void netbk_fill_frags(struct sk_buff *skb)
-+{
-+ struct skb_shared_info *shinfo = skb_shinfo(skb);
-+ int nr_frags = shinfo->nr_frags;
-+ int i;
-+
-+ for (i = 0; i < nr_frags; i++) {
-+ skb_frag_t *frag = shinfo->frags + i;
-+ netif_tx_request_t *txp;
-+ unsigned long pending_idx;
-+
-+ pending_idx = (unsigned long)frag->page;
-+
-+ pending_inuse[pending_idx].alloc_time = jiffies;
-+ list_add_tail(&pending_inuse[pending_idx].list,
-+ &pending_inuse_head);
-+
-+ txp = &pending_tx_info[pending_idx].req;
-+ frag->page = virt_to_page(idx_to_kaddr(pending_idx));
-+ frag->size = txp->size;
-+ frag->page_offset = txp->offset;
-+
-+ skb->len += txp->size;
-+ skb->data_len += txp->size;
-+ skb->truesize += txp->size;
-+ }
-+}
-+
-+int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras,
-+ int work_to_do)
-+{
-+ struct netif_extra_info extra;
-+ RING_IDX cons = netif->tx.req_cons;
-+
-+ do {
-+ if (unlikely(work_to_do-- <= 0)) {
-+ DPRINTK("Missing extra info\n");
-+ return -EBADR;
-+ }
-+
-+ memcpy(&extra, RING_GET_REQUEST(&netif->tx, cons),
-+ sizeof(extra));
-+ if (unlikely(!extra.type ||
-+ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
-+ netif->tx.req_cons = ++cons;
-+ DPRINTK("Invalid extra type: %d\n", extra.type);
-+ return -EINVAL;
-+ }
-+
-+ memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
-+ netif->tx.req_cons = ++cons;
-+ } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
-+
-+ return work_to_do;
-+}
-+
-+static int netbk_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso)
-+{
-+ if (!gso->u.gso.size) {
-+ DPRINTK("GSO size must not be zero.\n");
-+ return -EINVAL;
-+ }
+ if (add_uevent_var(env, "script=%s", val)) {
+ kfree(val);
+ return -ENOMEM;
+@@ -173,10 +164,10 @@ static int netback_uevent(struct xenbus_device *xdev, struct kobj_uevent_env *en
+ kfree(val);
+ }
+
+- if (be && be->netif && add_uevent_var(env, "vif=%s", be->netif->dev->name))
+- return -ENOMEM;
++ if (!be || !be->netif)
++ return 0;
+
+- return 0;
++ return add_uevent_var(env, "vif=%s", be->netif->dev->name);
+ }
+
+
+@@ -234,7 +225,7 @@ static void frontend_changed(struct xenbus_device *dev,
+ case XenbusStateInitialising:
+ if (dev->state == XenbusStateClosed) {
+ printk(KERN_INFO "%s: %s: prepare for reconnect\n",
+- __FUNCTION__, dev->nodename);
++ __func__, dev->nodename);
+ xenbus_switch_state(dev, XenbusStateInitWait);
+ }
+ break;
+--
+1.7.4
+
+
+From 7ab35b88fd3b452d8b9aec972ab7d3e9de710894 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Wed, 19 Jan 2011 10:51:45 +0000
+Subject: [PATCH 083/203] xen: netback: drop private ?PRINTK macros in favour of pr_*
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+---
+ drivers/xen/netback/common.h | 10 ++--------
+ drivers/xen/netback/interface.c | 13 +++++++------
+ drivers/xen/netback/netback.c | 38 +++++++++++++++++++-------------------
+ drivers/xen/netback/xenbus.c | 13 +++----------
+ 4 files changed, 31 insertions(+), 43 deletions(-)
+
+diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+index a2455a0..49dc4cf 100644
+--- a/drivers/xen/netback/common.h
++++ b/drivers/xen/netback/common.h
+@@ -27,6 +27,8 @@
+ #ifndef __NETIF__BACKEND__COMMON_H__
+ #define __NETIF__BACKEND__COMMON_H__
+
++#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
-+ /* Currently only TCPv4 S.O. is supported. */
-+ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
-+ DPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
-+ return -EINVAL;
-+ }
+ #include <linux/module.h>
+ #include <linux/interrupt.h>
+ #include <linux/slab.h>
+@@ -44,14 +46,6 @@
+ #include <xen/grant_table.h>
+ #include <xen/xenbus.h>
+
+-#define DPRINTK(_f, _a...) \
+- pr_debug("(file=%s, line=%d) " _f, \
+- __FILE__ , __LINE__ , ## _a)
+-#define IPRINTK(fmt, args...) \
+- printk(KERN_INFO "xen_net: " fmt, ##args)
+-#define WPRINTK(fmt, args...) \
+- printk(KERN_WARNING "xen_net: " fmt, ##args)
+-
+ struct xen_netif {
+ /* Unique identifier for this interface. */
+ domid_t domid;
+diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+index 4622653..b429f8c 100644
+--- a/drivers/xen/netback/interface.c
++++ b/drivers/xen/netback/interface.c
+@@ -29,6 +29,7 @@
+ */
+
+ #include "common.h"
+
-+ skb_shinfo(skb)->gso_size = gso->u.gso.size;
-+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ #include <linux/ethtool.h>
+ #include <linux/rtnetlink.h>
+
+@@ -265,7 +266,7 @@ struct xen_netif *netif_alloc(struct device *parent, domid_t domid,
+ snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
+ dev = alloc_netdev(sizeof(struct xen_netif), name, ether_setup);
+ if (dev == NULL) {
+- DPRINTK("Could not create netif: out of memory\n");
++ pr_debug("Could not allocate netdev\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+@@ -310,13 +311,13 @@ struct xen_netif *netif_alloc(struct device *parent, domid_t domid,
+ err = register_netdevice(dev);
+ rtnl_unlock();
+ if (err) {
+- DPRINTK("Could not register new net device %s: err=%d\n",
+- dev->name, err);
++ pr_debug("Could not register new net device %s: err=%d\n",
++ dev->name, err);
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+
+- DPRINTK("Successfully created netif\n");
++ pr_debug("Successfully created netif\n");
+ return netif;
+ }
+
+@@ -333,7 +334,7 @@ static int map_frontend_pages(struct xen_netif *netif,
+ BUG();
+
+ if (op.status) {
+- DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
++ pr_debug("Gnttab failure mapping tx_ring_ref!\n");
+ return op.status;
+ }
+
+@@ -353,7 +354,7 @@ static int map_frontend_pages(struct xen_netif *netif,
+ (unsigned long)netif->tx_comms_area->addr,
+ GNTMAP_host_map, netif->tx_shmem_handle);
+ HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unop, 1);
+- DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
++ pr_debug("Gnttab failure mapping rx_ring_ref!\n");
+ return op.status;
+ }
+
+diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+index d4aa8ac..b290525 100644
+--- a/drivers/xen/netback/netback.c
++++ b/drivers/xen/netback/netback.c
+@@ -590,8 +590,8 @@ static int netbk_check_gop(int nr_meta_slots, domid_t domid,
+ for (i = 0; i < nr_meta_slots; i++) {
+ copy_op = npo->copy + npo->copy_cons++;
+ if (copy_op->status != GNTST_okay) {
+- DPRINTK("Bad status %d from copy to DOM%d.\n",
+- copy_op->status, domid);
++ pr_debug("Bad status %d from copy to DOM%d.\n",
++ copy_op->status, domid);
+ status = NETIF_RSP_ERROR;
+ }
+ }
+@@ -1034,19 +1034,19 @@ static int netbk_count_requests(struct xen_netif *netif,
+
+ do {
+ if (frags >= work_to_do) {
+- DPRINTK("Need more frags\n");
++ pr_debug("Need more frags\n");
+ return -frags;
+ }
+
+ if (unlikely(frags >= MAX_SKB_FRAGS)) {
+- DPRINTK("Too many frags\n");
++ pr_debug("Too many frags\n");
+ return -frags;
+ }
+
+ memcpy(txp, RING_GET_REQUEST(&netif->tx, cons + frags),
+ sizeof(*txp));
+ if (txp->size > first->size) {
+- DPRINTK("Frags galore\n");
++ pr_debug("Frags galore\n");
+ return -frags;
+ }
+
+@@ -1054,8 +1054,8 @@ static int netbk_count_requests(struct xen_netif *netif,
+ frags++;
+
+ if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
+- DPRINTK("txp->offset: %x, size: %u\n",
+- txp->offset, txp->size);
++ pr_debug("txp->offset: %x, size: %u\n",
++ txp->offset, txp->size);
+ return -frags;
+ }
+ } while ((txp++)->flags & NETTXF_more_data);
+@@ -1215,7 +1215,7 @@ int netbk_get_extras(struct xen_netif *netif,
+
+ do {
+ if (unlikely(work_to_do-- <= 0)) {
+- DPRINTK("Missing extra info\n");
++ pr_debug("Missing extra info\n");
+ return -EBADR;
+ }
+
+@@ -1224,7 +1224,7 @@ int netbk_get_extras(struct xen_netif *netif,
+ if (unlikely(!extra.type ||
+ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+ netif->tx.req_cons = ++cons;
+- DPRINTK("Invalid extra type: %d\n", extra.type);
++ pr_debug("Invalid extra type: %d\n", extra.type);
+ return -EINVAL;
+ }
+
+@@ -1239,13 +1239,13 @@ static int netbk_set_skb_gso(struct sk_buff *skb,
+ struct xen_netif_extra_info *gso)
+ {
+ if (!gso->u.gso.size) {
+- DPRINTK("GSO size must not be zero.\n");
++ pr_debug("GSO size must not be zero.\n");
+ return -EINVAL;
+ }
+
+ /* Currently only TCPv4 S.O. is supported. */
+ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+- DPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
++ pr_debug("Bad GSO type %d.\n", gso->u.gso.type);
+ return -EINVAL;
+ }
+
+@@ -1426,16 +1426,16 @@ static unsigned net_tx_build_mops(struct xen_netbk *netbk)
+ idx += ret;
+
+ if (unlikely(txreq.size < ETH_HLEN)) {
+- DPRINTK("Bad packet size: %d\n", txreq.size);
++ pr_debug("Bad packet size: %d\n", txreq.size);
+ netbk_tx_err(netif, &txreq, idx);
+ continue;
+ }
+
+ /* No crossing a page as the payload mustn't fragment. */
+ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
+- DPRINTK("txreq.offset: %x, size: %u, end: %lu\n",
+- txreq.offset, txreq.size,
+- (txreq.offset&~PAGE_MASK) + txreq.size);
++ pr_debug("txreq.offset: %x, size: %u, end: %lu\n",
++ txreq.offset, txreq.size,
++ (txreq.offset&~PAGE_MASK) + txreq.size);
+ netbk_tx_err(netif, &txreq, idx);
+ continue;
+ }
+@@ -1450,7 +1450,7 @@ static unsigned net_tx_build_mops(struct xen_netbk *netbk)
+ skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(skb == NULL)) {
+- DPRINTK("Can't allocate a skb in start_xmit.\n");
++ pr_debug("Can't allocate a skb in start_xmit.\n");
+ netbk_tx_err(netif, &txreq, idx);
+ break;
+ }
+@@ -1525,7 +1525,7 @@ static void net_tx_submit(struct xen_netbk *netbk)
+
+ /* Check the remap error code. */
+ if (unlikely(netbk_tx_check_mop(netbk, skb, &mop))) {
+- DPRINTK("netback grant failed.\n");
++ pr_debug("netback grant failed.\n");
+ skb_shinfo(skb)->nr_frags = 0;
+ kfree_skb(skb);
+ continue;
+@@ -1565,14 +1565,14 @@ static void net_tx_submit(struct xen_netbk *netbk)
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (checksum_setup(netif, skb)) {
+- DPRINTK("Can't setup checksum in net_tx_action\n");
++ pr_debug("Can't setup checksum in net_tx_action\n");
+ kfree_skb(skb);
+ continue;
+ }
+
+ if (unlikely(netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB) &&
+ unlikely(skb_linearize(skb))) {
+- DPRINTK("Can't linearize skb in net_tx_action.\n");
++ pr_debug("Can't linearize skb in net_tx_action.\n");
+ kfree_skb(skb);
+ continue;
+ }
+diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
+index dd44341..867dc25 100644
+--- a/drivers/xen/netback/xenbus.c
++++ b/drivers/xen/netback/xenbus.c
+@@ -19,9 +19,6 @@
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+-#include <stdarg.h>
+-#include <linux/module.h>
+-#include <xen/xenbus.h>
+ #include "common.h"
+
+ static int connect_rings(struct backend_info *);
+@@ -132,7 +129,7 @@ abort_transaction:
+ xenbus_transaction_end(xbt, 1);
+ xenbus_dev_fatal(dev, err, "%s", message);
+ fail:
+- DPRINTK("failed");
++ pr_debug("failed");
+ netback_remove(dev);
+ return err;
+ }
+@@ -149,8 +146,6 @@ static int netback_uevent(struct xenbus_device *xdev,
+ struct backend_info *be = dev_get_drvdata(&xdev->dev);
+ char *val;
+
+- DPRINTK("netback_uevent");
+-
+ val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
+ if (IS_ERR(val)) {
+ int err = PTR_ERR(val);
+@@ -217,7 +212,7 @@ static void frontend_changed(struct xenbus_device *dev,
+ {
+ struct backend_info *be = dev_get_drvdata(&dev->dev);
+
+- DPRINTK("%s", xenbus_strstate(frontend_state));
++ pr_debug("frontend state %s", xenbus_strstate(frontend_state));
+
+ be->frontend_state = frontend_state;
+
+@@ -297,7 +292,7 @@ static void xen_net_read_rate(struct xenbus_device *dev,
+ return;
+
+ fail:
+- WPRINTK("Failed to parse network rate limit. Traffic unlimited.\n");
++ pr_warn("Failed to parse network rate limit. Traffic unlimited.\n");
+ kfree(ratestr);
+ }
+
+@@ -396,8 +391,6 @@ static int connect_rings(struct backend_info *be)
+ int err;
+ int val;
+
+- DPRINTK("");
+-
+ err = xenbus_gather(XBT_NIL, dev->otherend,
+ "tx-ring-ref", "%lu", &tx_ring_ref,
+ "rx-ring-ref", "%lu", &rx_ring_ref,
+--
+1.7.4
+
+
+From 34db20ebe74e4ffacda2d42eb9cd92ae9b770970 Mon Sep 17 00:00:00 2001
+From: Ian Campbell <ian.campbell(a)citrix.com>
+Date: Tue, 18 Jan 2011 12:54:12 +0000
+Subject: [PATCH 084/203] xen: netback: move under drivers/net/xen-netback/
+
+From the kernel's PoV netback is just another network device driver.
+
+Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
+---
+ drivers/net/Kconfig | 7 +
+ drivers/net/Makefile | 1 +
+ drivers/net/xen-netback/Makefile | 3 +
+ drivers/net/xen-netback/common.h | 273 +++++
+ drivers/net/xen-netback/interface.c | 470 +++++++++
+ drivers/net/xen-netback/netback.c | 1934 +++++++++++++++++++++++++++++++++++
+ drivers/net/xen-netback/xenbus.c | 489 +++++++++
+ drivers/xen/Kconfig | 7 -
+ drivers/xen/Makefile | 1 -
+ drivers/xen/netback/Makefile | 3 -
+ drivers/xen/netback/common.h | 273 -----
+ drivers/xen/netback/interface.c | 470 ---------
+ drivers/xen/netback/netback.c | 1934 -----------------------------------
+ drivers/xen/netback/xenbus.c | 489 ---------
+ 14 files changed, 3177 insertions(+), 3177 deletions(-)
+ create mode 100644 drivers/net/xen-netback/Makefile
+ create mode 100644 drivers/net/xen-netback/common.h
+ create mode 100644 drivers/net/xen-netback/interface.c
+ create mode 100644 drivers/net/xen-netback/netback.c
+ create mode 100644 drivers/net/xen-netback/xenbus.c
+ delete mode 100644 drivers/xen/netback/Makefile
+ delete mode 100644 drivers/xen/netback/common.h
+ delete mode 100644 drivers/xen/netback/interface.c
+ delete mode 100644 drivers/xen/netback/netback.c
+ delete mode 100644 drivers/xen/netback/xenbus.c
+
+diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
+index cbf0635..5b088f5 100644
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -2970,6 +2970,13 @@ config XEN_NETDEV_FRONTEND
+ if you are compiling a kernel for a Xen guest, you almost
+ certainly want to enable this.
+
++config XEN_NETDEV_BACKEND
++ tristate "Xen backend network device"
++ depends on XEN_BACKEND
++ help
++ Implement the network backend driver, which passes packets
++ from the guest domain's frontend drivers to the network.
+
-+ /* Header must be checked, and gso_segs computed. */
-+ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
-+ skb_shinfo(skb)->gso_segs = 0;
+ config ISERIES_VETH
+ tristate "iSeries Virtual Ethernet driver support"
+ depends on PPC_ISERIES
+diff --git a/drivers/net/Makefile b/drivers/net/Makefile
+index b90738d..145dfd7 100644
+--- a/drivers/net/Makefile
++++ b/drivers/net/Makefile
+@@ -171,6 +171,7 @@ obj-$(CONFIG_SLIP) += slip.o
+ obj-$(CONFIG_SLHC) += slhc.o
+
+ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
++obj-$(CONFIG_XEN_NETDEV_BACKEND) += xen-netback/
+
+ obj-$(CONFIG_DUMMY) += dummy.o
+ obj-$(CONFIG_IFB) += ifb.o
+diff --git a/drivers/net/xen-netback/Makefile b/drivers/net/xen-netback/Makefile
+new file mode 100644
+index 0000000..e346e81
+--- /dev/null
++++ b/drivers/net/xen-netback/Makefile
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o
+
-+ return 0;
-+}
++xen-netback-y := netback.o xenbus.o interface.o
+diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
+new file mode 100644
+index 0000000..2d727a0
+--- /dev/null
++++ b/drivers/net/xen-netback/common.h
+@@ -0,0 +1,273 @@
++/*
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
+
-+/* Called after netfront has transmitted */
-+static void net_tx_action(unsigned long unused)
-+{
-+ struct list_head *ent;
-+ struct sk_buff *skb;
-+ netif_t *netif;
-+ netif_tx_request_t txreq;
-+ netif_tx_request_t txfrags[MAX_SKB_FRAGS];
-+ struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
-+ u16 pending_idx;
-+ RING_IDX i;
-+ gnttab_map_grant_ref_t *mop;
-+ unsigned int data_len;
-+ int ret, work_to_do;
++#ifndef __XEN_NETBACK__COMMON_H__
++#define __XEN_NETBACK__COMMON_H__
+
-+ if (dealloc_cons != dealloc_prod)
-+ net_tx_action_dealloc();
++#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
-+ mop = tx_map_ops;
-+ while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
-+ !list_empty(&net_schedule_list)) {
-+ /* Get a netif from the list with work to do. */
-+ ent = net_schedule_list.next;
-+ netif = list_entry(ent, netif_t, list);
-+ netif_get(netif);
-+ remove_from_net_schedule_list(netif);
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/ip.h>
++#include <linux/in.h>
++#include <linux/io.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
+
-+ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
-+ if (!work_to_do) {
-+ netif_put(netif);
-+ continue;
-+ }
++#include <xen/interface/io/netif.h>
++#include <asm/pgalloc.h>
++#include <xen/interface/grant_table.h>
++#include <xen/grant_table.h>
++#include <xen/xenbus.h>
+
-+ i = netif->tx.req_cons;
-+ rmb(); /* Ensure that we see the request before we copy it. */
-+ memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
++struct xen_netif {
++ /* Unique identifier for this interface. */
++ domid_t domid;
++ int group;
++ unsigned int handle;
+
-+ /* Credit-based scheduling. */
-+ if (txreq.size > netif->remaining_credit) {
-+ unsigned long now = jiffies;
-+ unsigned long next_credit =
-+ netif->credit_timeout.expires +
-+ msecs_to_jiffies(netif->credit_usec / 1000);
++ u8 fe_dev_addr[6];
+
-+ /* Timer could already be pending in rare cases. */
-+ if (timer_pending(&netif->credit_timeout)) {
-+ netif_put(netif);
-+ continue;
-+ }
++ /* Physical parameters of the comms window. */
++ grant_handle_t tx_shmem_handle;
++ grant_ref_t tx_shmem_ref;
++ grant_handle_t rx_shmem_handle;
++ grant_ref_t rx_shmem_ref;
++ unsigned int irq;
+
-+ /* Passed the point where we can replenish credit? */
-+ if (time_after_eq(now, next_credit)) {
-+ netif->credit_timeout.expires = now;
-+ tx_add_credit(netif);
-+ }
++ /* The shared rings and indexes. */
++ struct xen_netif_tx_back_ring tx;
++ struct xen_netif_rx_back_ring rx;
++ struct vm_struct *tx_comms_area;
++ struct vm_struct *rx_comms_area;
+
-+ /* Still too big to send right now? Set a callback. */
-+ if (txreq.size > netif->remaining_credit) {
-+ netif->credit_timeout.data =
-+ (unsigned long)netif;
-+ netif->credit_timeout.function =
-+ tx_credit_callback;
-+ __mod_timer(&netif->credit_timeout,
-+ next_credit);
-+ netif_put(netif);
-+ continue;
-+ }
-+ }
-+ netif->remaining_credit -= txreq.size;
++ /* Flags that must not be set in dev->features */
++ int features_disabled;
+
-+ work_to_do--;
-+ netif->tx.req_cons = ++i;
++ /* Frontend feature information. */
++ u8 can_sg:1;
++ u8 gso:1;
++ u8 gso_prefix:1;
++ u8 csum:1;
+
-+ memset(extras, 0, sizeof(extras));
-+ if (txreq.flags & NETTXF_extra_info) {
-+ work_to_do = netbk_get_extras(netif, extras,
-+ work_to_do);
-+ i = netif->tx.req_cons;
-+ if (unlikely(work_to_do < 0)) {
-+ netbk_tx_err(netif, &txreq, i);
-+ continue;
-+ }
-+ }
++ /* Internal feature information. */
++ u8 can_queue:1; /* can queue packets for receiver? */
+
-+ ret = netbk_count_requests(netif, &txreq, txfrags, work_to_do);
-+ if (unlikely(ret < 0)) {
-+ netbk_tx_err(netif, &txreq, i - ret);
-+ continue;
-+ }
-+ i += ret;
++ /* Allow netif_be_start_xmit() to peek ahead in the rx request
++ * ring. This is a prediction of what rx_req_cons will be once
++ * all queued skbs are put on the ring. */
++ RING_IDX rx_req_cons_peek;
+
-+ if (unlikely(txreq.size < ETH_HLEN)) {
-+ DPRINTK("Bad packet size: %d\n", txreq.size);
-+ netbk_tx_err(netif, &txreq, i);
-+ continue;
-+ }
++ /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
++ unsigned long credit_bytes;
++ unsigned long credit_usec;
++ unsigned long remaining_credit;
++ struct timer_list credit_timeout;
+
-+ /* No crossing a page as the payload mustn't fragment. */
-+ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
-+ DPRINTK("txreq.offset: %x, size: %u, end: %lu\n",
-+ txreq.offset, txreq.size,
-+ (txreq.offset &~PAGE_MASK) + txreq.size);
-+ netbk_tx_err(netif, &txreq, i);
-+ continue;
-+ }
++ /* Statistics */
++ int nr_copied_skbs;
++ int rx_gso_checksum_fixup;
+
-+ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
++ /* Miscellaneous private stuff. */
++ struct list_head list; /* scheduling list */
++ atomic_t refcnt;
++ struct net_device *dev;
++ struct net_device_stats stats;
+
-+ data_len = (txreq.size > PKT_PROT_LEN &&
-+ ret < MAX_SKB_FRAGS) ?
-+ PKT_PROT_LEN : txreq.size;
++ unsigned int carrier;
+
-+ skb = alloc_skb(data_len + 16 + NET_IP_ALIGN,
-+ GFP_ATOMIC | __GFP_NOWARN);
-+ if (unlikely(skb == NULL)) {
-+ DPRINTK("Can't allocate a skb in start_xmit.\n");
-+ netbk_tx_err(netif, &txreq, i);
-+ break;
-+ }
++ wait_queue_head_t waiting_to_free;
++};
+
-+ /* Packets passed to netif_rx() must have some headroom. */
-+ skb_reserve(skb, 16 + NET_IP_ALIGN);
++/*
++ * Implement our own carrier flag: the network stack's version causes delays
++ * when the carrier is re-enabled (in particular, dev_activate() may not
++ * immediately be called, which can cause packet loss; also the etherbridge
++ * can be rather lazy in activating its port).
++ */
++#define netback_carrier_on(netif) ((netif)->carrier = 1)
++#define netback_carrier_off(netif) ((netif)->carrier = 0)
++#define netback_carrier_ok(netif) ((netif)->carrier)
+
-+ if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
-+ struct netif_extra_info *gso;
-+ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
++enum {
++ NETBK_DONT_COPY_SKB,
++ NETBK_DELAYED_COPY_SKB,
++ NETBK_ALWAYS_COPY_SKB,
++};
+
-+ if (netbk_set_skb_gso(skb, gso)) {
-+ kfree_skb(skb);
-+ netbk_tx_err(netif, &txreq, i);
-+ continue;
-+ }
-+ }
++extern int netbk_copy_skb_mode;
+
-+ gnttab_set_map_op(mop, idx_to_kaddr(pending_idx),
-+ GNTMAP_host_map | GNTMAP_readonly,
-+ txreq.gref, netif->domid);
-+ mop++;
++struct backend_info {
++ struct xenbus_device *dev;
++ struct xen_netif *netif;
++ enum xenbus_state frontend_state;
++ struct xenbus_watch hotplug_status_watch;
++ int have_hotplug_status_watch:1;
++};
+
-+ memcpy(&pending_tx_info[pending_idx].req,
-+ &txreq, sizeof(txreq));
-+ pending_tx_info[pending_idx].netif = netif;
-+ *((u16 *)skb->data) = pending_idx;
++#define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
++#define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
+
-+ __skb_put(skb, data_len);
++void netif_disconnect(struct xen_netif *netif);
+
-+ skb_shinfo(skb)->nr_frags = ret;
-+ if (data_len < txreq.size) {
-+ skb_shinfo(skb)->nr_frags++;
-+ skb_shinfo(skb)->frags[0].page =
-+ (void *)(unsigned long)pending_idx;
-+ } else {
-+ /* Discriminate from any valid pending_idx value. */
-+ skb_shinfo(skb)->frags[0].page = (void *)~0UL;
-+ }
++void netif_set_features(struct xen_netif *netif);
++struct xen_netif *netif_alloc(struct device *parent, domid_t domid,
++ unsigned int handle);
++int netif_map(struct xen_netif *netif, unsigned long tx_ring_ref,
++ unsigned long rx_ring_ref, unsigned int evtchn);
+
-+ if (skb->data_len < skb_shinfo(skb)->gso_size) {
-+ skb_shinfo(skb)->gso_size = 0;
-+ skb_shinfo(skb)->gso_type = 0;
-+ }
++static inline void netif_get(struct xen_netif *netif)
++{
++ atomic_inc(&netif->refcnt);
++}
+
-+ __skb_queue_tail(&tx_queue, skb);
++static inline void netif_put(struct xen_netif *netif)
++{
++ if (atomic_dec_and_test(&netif->refcnt))
++ wake_up(&netif->waiting_to_free);
++}
+
-+ pending_cons++;
++int netif_xenbus_init(void);
+
-+ mop = netbk_get_requests(netif, skb, txfrags, mop);
++#define netif_schedulable(netif) \
++ (netif_running((netif)->dev) && netback_carrier_ok(netif))
+
-+ netif->tx.req_cons = i;
-+ netif_schedule_work(netif);
++void netif_schedule_work(struct xen_netif *netif);
++void netif_deschedule_work(struct xen_netif *netif);
+
-+ if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
-+ break;
-+ }
++int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
++struct net_device_stats *netif_be_get_stats(struct net_device *dev);
++irqreturn_t netif_be_int(int irq, void *dev_id);
+
-+ if (mop == tx_map_ops)
-+ return;
++static inline int netbk_can_queue(struct net_device *dev)
++{
++ struct xen_netif *netif = netdev_priv(dev);
++ return netif->can_queue;
++}
+
-+ ret = HYPERVISOR_grant_table_op(
-+ GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
-+ BUG_ON(ret);
++static inline int netbk_can_sg(struct net_device *dev)
++{
++ struct xen_netif *netif = netdev_priv(dev);
++ return netif->can_sg;
++}
+
-+ mop = tx_map_ops;
-+ while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
-+ netif_tx_request_t *txp;
++struct pending_tx_info {
++ struct xen_netif_tx_request req;
++ struct xen_netif *netif;
++};
++typedef unsigned int pending_ring_idx_t;
+
-+ pending_idx = *((u16 *)skb->data);
-+ netif = pending_tx_info[pending_idx].netif;
-+ txp = &pending_tx_info[pending_idx].req;
++struct netbk_rx_meta {
++ int id;
++ int size;
++ int gso_size;
++};
+
-+ /* Check the remap error code. */
-+ if (unlikely(netbk_tx_check_mop(skb, &mop))) {
-+ DPRINTK("netback grant failed.\n");
-+ skb_shinfo(skb)->nr_frags = 0;
-+ kfree_skb(skb);
-+ continue;
-+ }
++struct netbk_tx_pending_inuse {
++ struct list_head list;
++ unsigned long alloc_time;
++};
+
-+ data_len = skb->len;
-+ memcpy(skb->data,
-+ (void *)(idx_to_kaddr(pending_idx)|txp->offset),
-+ data_len);
-+ if (data_len < txp->size) {
-+ /* Append the packet payload as a fragment. */
-+ txp->offset += data_len;
-+ txp->size -= data_len;
-+ } else {
-+ /* Schedule a response immediately. */
-+ netif_idx_release(pending_idx);
-+ }
++#define MAX_PENDING_REQS 256
+
-+ /*
-+ * Old frontends do not assert data_validated but we
-+ * can infer it from csum_blank so test both flags.
-+ */
-+ if (txp->flags & (NETTXF_data_validated|NETTXF_csum_blank)) {
-+ skb->ip_summed = CHECKSUM_UNNECESSARY;
-+ skb->proto_data_valid = 1;
-+ } else {
-+ skb->ip_summed = CHECKSUM_NONE;
-+ skb->proto_data_valid = 0;
-+ }
-+ skb->proto_csum_blank = !!(txp->flags & NETTXF_csum_blank);
++#define MAX_BUFFER_OFFSET PAGE_SIZE
+
-+ netbk_fill_frags(skb);
++/* extra field used in struct page */
++union page_ext {
++ struct {
++#if BITS_PER_LONG < 64
++#define IDX_WIDTH 8
++#define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
++ unsigned int group:GROUP_WIDTH;
++ unsigned int idx:IDX_WIDTH;
++#else
++ unsigned int group, idx;
++#endif
++ } e;
++ void *mapping;
++};
+
-+ skb->dev = netif->dev;
-+ skb->protocol = eth_type_trans(skb, skb->dev);
++struct xen_netbk {
++ union {
++ struct {
++ struct tasklet_struct net_tx_tasklet;
++ struct tasklet_struct net_rx_tasklet;
++ } tasklet;
+
-+ netif->stats.rx_bytes += skb->len;
-+ netif->stats.rx_packets++;
++ struct {
++ wait_queue_head_t netbk_action_wq;
++ struct task_struct *task;
++ } kthread;
++ };
+
-+ if (unlikely(netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB) &&
-+ unlikely(skb_linearize(skb))) {
-+ DPRINTK("Can't linearize skb in net_tx_action.\n");
-+ kfree_skb(skb);
-+ continue;
-+ }
++ struct sk_buff_head rx_queue;
++ struct sk_buff_head tx_queue;
+
-+ netif_rx(skb);
-+ netif->dev->last_rx = jiffies;
-+ }
++ struct timer_list net_timer;
++ struct timer_list netbk_tx_pending_timer;
+
-+ if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB &&
-+ !list_empty(&pending_inuse_head)) {
-+ struct netbk_tx_pending_inuse *oldest;
++ struct page **mmap_pages;
+
-+ oldest = list_entry(pending_inuse_head.next,
-+ struct netbk_tx_pending_inuse, list);
-+ mod_timer(&netbk_tx_pending_timer, oldest->alloc_time + HZ);
-+ }
-+}
++ pending_ring_idx_t pending_prod;
++ pending_ring_idx_t pending_cons;
++ pending_ring_idx_t dealloc_prod;
++ pending_ring_idx_t dealloc_cons;
+
-+static void netif_idx_release(u16 pending_idx)
-+{
-+ static DEFINE_SPINLOCK(_lock);
-+ unsigned long flags;
++ struct list_head pending_inuse_head;
++ struct list_head net_schedule_list;
+
-+ spin_lock_irqsave(&_lock, flags);
-+ dealloc_ring[MASK_PEND_IDX(dealloc_prod)] = pending_idx;
-+ /* Sync with net_tx_action_dealloc: insert idx /then/ incr producer. */
-+ smp_wmb();
-+ dealloc_prod++;
-+ spin_unlock_irqrestore(&_lock, flags);
++ /* Protect the net_schedule_list in netif. */
++ spinlock_t net_schedule_list_lock;
+
-+ tasklet_schedule(&net_tx_tasklet);
-+}
++ atomic_t netfront_count;
+
-+static void netif_page_release(struct page *page, unsigned int order)
-+{
-+ int idx = netif_page_index(page);
-+ BUG_ON(order);
-+ BUG_ON(idx < 0);
-+ netif_idx_release(idx);
-+}
++ struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
++ struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS];
++ struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
++ struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
+
-+irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+ netif_t *netif = dev_id;
++ grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
++ u16 pending_ring[MAX_PENDING_REQS];
++ u16 dealloc_ring[MAX_PENDING_REQS];
+
-+ add_to_net_schedule_list_tail(netif);
-+ maybe_schedule_tx_action();
++ /*
++ * Each head or fragment can be up to 4096 bytes. Given
++ * MAX_BUFFER_OFFSET of 4096 the worst case is that each
++ * head/fragment uses 2 copy operation.
++ */
++ struct gnttab_copy grant_copy_op[2*NET_RX_RING_SIZE];
++ unsigned char rx_notify[NR_IRQS];
++ u16 notify_list[NET_RX_RING_SIZE];
++ struct netbk_rx_meta meta[2*NET_RX_RING_SIZE];
++};
+
-+ if (netif_schedulable(netif) && !netbk_queue_full(netif))
-+ netif_wake_queue(netif->dev);
++extern struct xen_netbk *xen_netbk;
++extern int xen_netbk_group_nr;
+
-+ return IRQ_HANDLED;
-+}
++#endif /* __XEN_NETBACK__COMMON_H__ */
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+new file mode 100644
+index 0000000..b429f8c
+--- /dev/null
++++ b/drivers/net/xen-netback/interface.c
+@@ -0,0 +1,470 @@
++/*
++ * Network-device interface management.
++ *
++ * Copyright (c) 2004-2005, Keir Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
+
-+static void make_tx_response(netif_t *netif,
-+ netif_tx_request_t *txp,
-+ s8 st)
-+{
-+ RING_IDX i = netif->tx.rsp_prod_pvt;
-+ netif_tx_response_t *resp;
-+ int notify;
++#include "common.h"
+
-+ resp = RING_GET_RESPONSE(&netif->tx, i);
-+ resp->id = txp->id;
-+ resp->status = st;
++#include <linux/ethtool.h>
++#include <linux/rtnetlink.h>
+
-+ if (txp->flags & NETTXF_extra_info)
-+ RING_GET_RESPONSE(&netif->tx, ++i)->status = NETIF_RSP_NULL;
++#include <xen/events.h>
++#include <asm/xen/hypercall.h>
+
-+ netif->tx.rsp_prod_pvt = ++i;
-+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->tx, notify);
-+ if (notify)
-+ notify_remote_via_irq(netif->irq);
++/*
++ * Module parameter 'queue_length':
++ *
++ * Enables queuing in the network stack when a client has run out of receive
++ * descriptors.
++ */
++static unsigned long netbk_queue_length = 32;
++module_param_named(queue_length, netbk_queue_length, ulong, 0644);
+
-+#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
-+ if (i == netif->tx.req_cons) {
-+ int more_to_do;
-+ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
-+ if (more_to_do)
-+ add_to_net_schedule_list_tail(netif);
++static void netbk_add_netif(struct xen_netbk *netbk, int group_nr,
++ struct xen_netif *netif)
++{
++ int i;
++ int min_netfront_count;
++ int min_group = 0;
++ min_netfront_count = atomic_read(&netbk[0].netfront_count);
++ for (i = 0; i < group_nr; i++) {
++ int netfront_count = atomic_read(&netbk[i].netfront_count);
++ if (netfront_count < min_netfront_count) {
++ min_group = i;
++ min_netfront_count = netfront_count;
++ }
+ }
-+#endif
++
++ netif->group = min_group;
++ atomic_inc(&netbk[netif->group].netfront_count);
+}
+
-+static netif_rx_response_t *make_rx_response(netif_t *netif,
-+ u16 id,
-+ s8 st,
-+ u16 offset,
-+ u16 size,
-+ u16 flags)
++static void netbk_remove_netif(struct xen_netbk *netbk, struct xen_netif *netif)
+{
-+ RING_IDX i = netif->rx.rsp_prod_pvt;
-+ netif_rx_response_t *resp;
-+
-+ resp = RING_GET_RESPONSE(&netif->rx, i);
-+ resp->offset = offset;
-+ resp->flags = flags;
-+ resp->id = id;
-+ resp->status = (s16)size;
-+ if (st < 0)
-+ resp->status = (s16)st;
-+
-+ netif->rx.rsp_prod_pvt = ++i;
-+
-+ return resp;
++ atomic_dec(&netbk[netif->group].netfront_count);
+}
+
-+#ifdef NETBE_DEBUG_INTERRUPT
-+static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
++static void __netif_up(struct xen_netif *netif)
+{
-+ struct list_head *ent;
-+ netif_t *netif;
-+ int i = 0;
++ netbk_add_netif(xen_netbk, xen_netbk_group_nr, netif);
++ enable_irq(netif->irq);
++ netif_schedule_work(netif);
++}
+
-+ printk(KERN_ALERT "netif_schedule_list:\n");
-+ spin_lock_irq(&net_schedule_list_lock);
++static void __netif_down(struct xen_netif *netif)
++{
++ disable_irq(netif->irq);
++ netif_deschedule_work(netif);
++ netbk_remove_netif(xen_netbk, netif);
++}
+
-+ list_for_each (ent, &net_schedule_list) {
-+ netif = list_entry(ent, netif_t, list);
-+ printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
-+ "rx_resp_prod=%08x\n",
-+ i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
-+ printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
-+ netif->tx.req_cons, netif->tx.rsp_prod_pvt);
-+ printk(KERN_ALERT " shared(rx_req_prod=%08x "
-+ "rx_resp_prod=%08x\n",
-+ netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
-+ printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
-+ netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
-+ printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
-+ netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
-+ i++;
++static int net_open(struct net_device *dev)
++{
++ struct xen_netif *netif = netdev_priv(dev);
++ if (netback_carrier_ok(netif)) {
++ __netif_up(netif);
++ netif_start_queue(dev);
+ }
++ return 0;
++}
+
-+ spin_unlock_irq(&net_schedule_list_lock);
-+ printk(KERN_ALERT " ** End of netif_schedule_list **\n");
-+
-+ return IRQ_HANDLED;
++static int net_close(struct net_device *dev)
++{
++ struct xen_netif *netif = netdev_priv(dev);
++ if (netback_carrier_ok(netif))
++ __netif_down(netif);
++ netif_stop_queue(dev);
++ return 0;
+}
-+#endif
+
-+static int __init netback_init(void)
++static int netbk_change_mtu(struct net_device *dev, int mtu)
+{
-+ int i;
-+ struct page *page;
++ int max = netbk_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
+
-+ if (!is_running_on_xen())
-+ return -ENODEV;
++ if (mtu > max)
++ return -EINVAL;
++ dev->mtu = mtu;
++ return 0;
++}
+
-+ /* We can increase reservation by this much in net_rx_action(). */
-+ balloon_update_driver_allowance(NET_RX_RING_SIZE);
++void netif_set_features(struct xen_netif *netif)
++{
++ struct net_device *dev = netif->dev;
++ int features = dev->features;
+
-+ skb_queue_head_init(&rx_queue);
-+ skb_queue_head_init(&tx_queue);
++ if (netif->can_sg)
++ features |= NETIF_F_SG;
++ if (netif->gso || netif->gso_prefix)
++ features |= NETIF_F_TSO;
++ if (netif->csum)
++ features |= NETIF_F_IP_CSUM;
+
-+ init_timer(&net_timer);
-+ net_timer.data = 0;
-+ net_timer.function = net_alarm;
++ features &= ~(netif->features_disabled);
+
-+ init_timer(&netbk_tx_pending_timer);
-+ netbk_tx_pending_timer.data = 0;
-+ netbk_tx_pending_timer.function = netbk_tx_pending_timeout;
++ if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN)
++ dev->mtu = ETH_DATA_LEN;
+
-+ mmap_pages = alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
-+ if (mmap_pages == NULL) {
-+ printk("%s: out of memory\n", __FUNCTION__);
-+ return -ENOMEM;
-+ }
++ dev->features = features;
++}
+
-+ for (i = 0; i < MAX_PENDING_REQS; i++) {
-+ page = mmap_pages[i];
-+ SetPageForeign(page, netif_page_release);
-+ netif_set_page_index(page, i);
-+ INIT_LIST_HEAD(&pending_inuse[i].list);
++static int netbk_set_tx_csum(struct net_device *dev, u32 data)
++{
++ struct xen_netif *netif = netdev_priv(dev);
++ if (data) {
++ if (!netif->csum)
++ return -ENOSYS;
++ netif->features_disabled &= ~NETIF_F_IP_CSUM;
++ } else {
++ netif->features_disabled |= NETIF_F_IP_CSUM;
+ }
+
-+ pending_cons = 0;
-+ pending_prod = MAX_PENDING_REQS;
-+ for (i = 0; i < MAX_PENDING_REQS; i++)
-+ pending_ring[i] = i;
-+
-+ spin_lock_init(&net_schedule_list_lock);
-+ INIT_LIST_HEAD(&net_schedule_list);
++ netif_set_features(netif);
++ return 0;
++}
+
-+ netbk_copy_skb_mode = NETBK_DONT_COPY_SKB;
-+ if (MODPARM_copy_skb) {
-+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
-+ NULL, 0))
-+ netbk_copy_skb_mode = NETBK_ALWAYS_COPY_SKB;
-+ else
-+ netbk_copy_skb_mode = NETBK_DELAYED_COPY_SKB;
++static int netbk_set_sg(struct net_device *dev, u32 data)
++{
++ struct xen_netif *netif = netdev_priv(dev);
++ if (data) {
++ if (!netif->can_sg)
++ return -ENOSYS;
++ netif->features_disabled &= ~NETIF_F_SG;
++ } else {
++ netif->features_disabled |= NETIF_F_SG;
+ }
+
-+ netif_accel_init();
-+
-+ netif_xenbus_init();
-+
-+#ifdef NETBE_DEBUG_INTERRUPT
-+ (void)bind_virq_to_irqhandler(VIRQ_DEBUG,
-+ 0,
-+ netif_be_dbg,
-+ SA_SHIRQ,
-+ "net-be-dbg",
-+ &netif_be_dbg);
-+#endif
-+
++ netif_set_features(netif);
+ return 0;
+}
+
-+module_init(netback_init);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
-new file mode 100644
-index 0000000..d7faeb6
---- /dev/null
-+++ b/drivers/xen/netback/xenbus.c
-@@ -0,0 +1,454 @@
-+/* Xenbus code for netif backend
-+ Copyright (C) 2005 Rusty Russell <rusty(a)rustcorp.com.au>
-+ Copyright (C) 2005 XenSource Ltd
-+
-+ This program is free software; you can redistribute it and/or modify
-+ it under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 2 of the License, or
-+ (at your option) any later version.
++static int netbk_set_tso(struct net_device *dev, u32 data)
++{
++ struct xen_netif *netif = netdev_priv(dev);
++ if (data) {
++ if (!netif->gso && !netif->gso_prefix)
++ return -ENOSYS;
++ netif->features_disabled &= ~NETIF_F_TSO;
++ } else {
++ netif->features_disabled |= NETIF_F_TSO;
++ }
+
-+ This program is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ GNU General Public License for more details.
++ netif_set_features(netif);
++ return 0;
++}
+
-+ You should have received a copy of the GNU General Public License
-+ along with this program; if not, write to the Free Software
-+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+*/
++static void netbk_get_drvinfo(struct net_device *dev,
++ struct ethtool_drvinfo *info)
++{
++ strcpy(info->driver, "netbk");
++ strcpy(info->bus_info, dev_name(dev->dev.parent));
++}
+
-+#include <stdarg.h>
-+#include <linux/module.h>
-+#include <xen/xenbus.h>
-+#include "common.h"
++static const struct netif_stat {
++ char name[ETH_GSTRING_LEN];
++ u16 offset;
++} netbk_stats[] = {
++ {
++ "copied_skbs",
++ offsetof(struct xen_netif, nr_copied_skbs)
++ },
++ {
++ "rx_gso_checksum_fixup",
++ offsetof(struct xen_netif, rx_gso_checksum_fixup)
++ },
++};
+
-+#if 0
-+#undef DPRINTK
-+#define DPRINTK(fmt, args...) \
-+ printk("netback/xenbus (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
-+#endif
++static int netbk_get_sset_count(struct net_device *dev, int string_set)
++{
++ switch (string_set) {
++ case ETH_SS_STATS:
++ return ARRAY_SIZE(netbk_stats);
++ default:
++ return -EINVAL;
++ }
++}
+
++static void netbk_get_ethtool_stats(struct net_device *dev,
++ struct ethtool_stats *stats, u64 * data)
++{
++ void *netif = netdev_priv(dev);
++ int i;
+
-+static int connect_rings(struct backend_info *);
-+static void connect(struct backend_info *);
-+static void backend_create_netif(struct backend_info *be);
++ for (i = 0; i < ARRAY_SIZE(netbk_stats); i++)
++ data[i] = *(int *)(netif + netbk_stats[i].offset);
++}
+
-+static int netback_remove(struct xenbus_device *dev)
++static void netbk_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
-+ struct backend_info *be = dev->dev.driver_data;
-+
-+ netback_remove_accelerators(be, dev);
++ int i;
+
-+ if (be->netif) {
-+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
-+ netif_disconnect(be->netif);
-+ be->netif = NULL;
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0; i < ARRAY_SIZE(netbk_stats); i++)
++ memcpy(data + i * ETH_GSTRING_LEN,
++ netbk_stats[i].name, ETH_GSTRING_LEN);
++ break;
+ }
-+ kfree(be);
-+ dev->dev.driver_data = NULL;
-+ return 0;
+}
+
++static struct ethtool_ops network_ethtool_ops = {
++ .get_drvinfo = netbk_get_drvinfo,
+
-+/**
-+ * Entry point to this code when a new device is created. Allocate the basic
-+ * structures and switch to InitWait.
-+ */
-+static int netback_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
++ .get_tx_csum = ethtool_op_get_tx_csum,
++ .set_tx_csum = netbk_set_tx_csum,
++ .get_sg = ethtool_op_get_sg,
++ .set_sg = netbk_set_sg,
++ .get_tso = ethtool_op_get_tso,
++ .set_tso = netbk_set_tso,
++ .get_link = ethtool_op_get_link,
++
++ .get_sset_count = netbk_get_sset_count,
++ .get_ethtool_stats = netbk_get_ethtool_stats,
++ .get_strings = netbk_get_strings,
++};
++
++static struct net_device_ops netback_ops = {
++ .ndo_start_xmit = netif_be_start_xmit,
++ .ndo_get_stats = netif_be_get_stats,
++ .ndo_open = net_open,
++ .ndo_stop = net_close,
++ .ndo_change_mtu = netbk_change_mtu,
++};
++
++struct xen_netif *netif_alloc(struct device *parent, domid_t domid,
++ unsigned int handle)
+{
-+ const char *message;
-+ struct xenbus_transaction xbt;
-+ int err;
-+ int sg;
-+ struct backend_info *be = kzalloc(sizeof(struct backend_info),
-+ GFP_KERNEL);
-+ if (!be) {
-+ xenbus_dev_fatal(dev, -ENOMEM,
-+ "allocating backend structure");
-+ return -ENOMEM;
-+ }
++ int err = 0;
++ struct net_device *dev;
++ struct xen_netif *netif;
++ char name[IFNAMSIZ] = {};
+
-+ be->dev = dev;
-+ dev->dev.driver_data = be;
++ snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
++ dev = alloc_netdev(sizeof(struct xen_netif), name, ether_setup);
++ if (dev == NULL) {
++ pr_debug("Could not allocate netdev\n");
++ return ERR_PTR(-ENOMEM);
++ }
+
-+ sg = 1;
-+ if (netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB)
-+ sg = 0;
++ SET_NETDEV_DEV(dev, parent);
+
-+ do {
-+ err = xenbus_transaction_start(&xbt);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "starting transaction");
-+ goto fail;
-+ }
++ netif = netdev_priv(dev);
++ memset(netif, 0, sizeof(*netif));
++ netif->domid = domid;
++ netif->group = -1;
++ netif->handle = handle;
++ netif->can_sg = 1;
++ netif->csum = 1;
++ atomic_set(&netif->refcnt, 1);
++ init_waitqueue_head(&netif->waiting_to_free);
++ netif->dev = dev;
++ INIT_LIST_HEAD(&netif->list);
+
-+ err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
-+ if (err) {
-+ message = "writing feature-sg";
-+ goto abort_transaction;
-+ }
++ netback_carrier_off(netif);
+
-+ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
-+ "%d", sg);
-+ if (err) {
-+ message = "writing feature-gso-tcpv4";
-+ goto abort_transaction;
-+ }
++ netif->credit_bytes = netif->remaining_credit = ~0UL;
++ netif->credit_usec = 0UL;
++ init_timer(&netif->credit_timeout);
++ /* Initialize 'expires' now: it's used to track the credit window. */
++ netif->credit_timeout.expires = jiffies;
+
-+ /* We support rx-copy path. */
-+ err = xenbus_printf(xbt, dev->nodename,
-+ "feature-rx-copy", "%d", 1);
-+ if (err) {
-+ message = "writing feature-rx-copy";
-+ goto abort_transaction;
-+ }
++ dev->netdev_ops = &netback_ops;
++ netif_set_features(netif);
++ SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
+
-+ /*
-+ * We don't support rx-flip path (except old guests who don't
-+ * grok this feature flag).
-+ */
-+ err = xenbus_printf(xbt, dev->nodename,
-+ "feature-rx-flip", "%d", 0);
-+ if (err) {
-+ message = "writing feature-rx-flip";
-+ goto abort_transaction;
-+ }
++ dev->tx_queue_len = netbk_queue_length;
+
-+ err = xenbus_transaction_end(xbt, 0);
-+ } while (err == -EAGAIN);
++ /*
++ * Initialise a dummy MAC address. We choose the numerically
++ * largest non-broadcast address to prevent the address getting
++ * stolen by an Ethernet bridge for STP purposes.
++ * (FE:FF:FF:FF:FF:FF)
++ */
++ memset(dev->dev_addr, 0xFF, ETH_ALEN);
++ dev->dev_addr[0] &= ~0x01;
+
++ rtnl_lock();
++ err = register_netdevice(dev);
++ rtnl_unlock();
+ if (err) {
-+ xenbus_dev_fatal(dev, err, "completing transaction");
-+ goto fail;
++ pr_debug("Could not register new net device %s: err=%d\n",
++ dev->name, err);
++ free_netdev(dev);
++ return ERR_PTR(err);
+ }
+
-+ netback_probe_accelerators(be, dev);
++ pr_debug("Successfully created netif\n");
++ return netif;
++}
+
-+ err = xenbus_switch_state(dev, XenbusStateInitWait);
-+ if (err)
-+ goto fail;
++static int map_frontend_pages(struct xen_netif *netif,
++ grant_ref_t tx_ring_ref,
++ grant_ref_t rx_ring_ref)
++{
++ struct gnttab_map_grant_ref op;
+
-+ /* This kicks hotplug scripts, so do it immediately. */
-+ backend_create_netif(be);
++ gnttab_set_map_op(&op, (unsigned long)netif->tx_comms_area->addr,
++ GNTMAP_host_map, tx_ring_ref, netif->domid);
+
-+ return 0;
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
+
-+abort_transaction:
-+ xenbus_transaction_end(xbt, 1);
-+ xenbus_dev_fatal(dev, err, "%s", message);
-+fail:
-+ DPRINTK("failed");
-+ netback_remove(dev);
-+ return err;
-+}
++ if (op.status) {
++ pr_debug("Gnttab failure mapping tx_ring_ref!\n");
++ return op.status;
++ }
+
++ netif->tx_shmem_ref = tx_ring_ref;
++ netif->tx_shmem_handle = op.handle;
+
-+/**
-+ * Handle the creation of the hotplug script environment. We add the script
-+ * and vif variables to the environment, for the benefit of the vif-* hotplug
-+ * scripts.
-+ */
-+static int netback_uevent(struct xenbus_device *xdev, char **envp,
-+ int num_envp, char *buffer, int buffer_size)
-+{
-+ struct backend_info *be = xdev->dev.driver_data;
-+ netif_t *netif = be->netif;
-+ int i = 0, length = 0;
-+ char *val;
++ gnttab_set_map_op(&op, (unsigned long)netif->rx_comms_area->addr,
++ GNTMAP_host_map, rx_ring_ref, netif->domid);
+
-+ DPRINTK("netback_uevent");
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
+
-+ val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
-+ if (IS_ERR(val)) {
-+ int err = PTR_ERR(val);
-+ xenbus_dev_fatal(xdev, err, "reading script");
-+ return err;
-+ }
-+ else {
-+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size,
-+ &length, "script=%s", val);
-+ kfree(val);
-+ }
++ if (op.status) {
++ struct gnttab_unmap_grant_ref unop;
+
-+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
-+ "vif=%s", netif->dev->name);
++ gnttab_set_unmap_op(&unop,
++ (unsigned long)netif->tx_comms_area->addr,
++ GNTMAP_host_map, netif->tx_shmem_handle);
++ HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unop, 1);
++ pr_debug("Gnttab failure mapping rx_ring_ref!\n");
++ return op.status;
++ }
+
-+ envp[i] = NULL;
++ netif->rx_shmem_ref = rx_ring_ref;
++ netif->rx_shmem_handle = op.handle;
+
+ return 0;
+}
+
-+
-+static void backend_create_netif(struct backend_info *be)
++static void unmap_frontend_pages(struct xen_netif *netif)
+{
-+ int err;
-+ long handle;
-+ struct xenbus_device *dev = be->dev;
++ struct gnttab_unmap_grant_ref op;
+
-+ if (be->netif != NULL)
-+ return;
++ gnttab_set_unmap_op(&op, (unsigned long)netif->tx_comms_area->addr,
++ GNTMAP_host_map, netif->tx_shmem_handle);
+
-+ err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
-+ if (err != 1) {
-+ xenbus_dev_fatal(dev, err, "reading handle");
-+ return;
-+ }
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
+
-+ be->netif = netif_alloc(dev->otherend_id, handle);
-+ if (IS_ERR(be->netif)) {
-+ err = PTR_ERR(be->netif);
-+ be->netif = NULL;
-+ xenbus_dev_fatal(dev, err, "creating interface");
-+ return;
-+ }
++ gnttab_set_unmap_op(&op, (unsigned long)netif->rx_comms_area->addr,
++ GNTMAP_host_map, netif->rx_shmem_handle);
+
-+ kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
+}
+
-+
-+/**
-+ * Callback received when the frontend's state changes.
-+ */
-+static void frontend_changed(struct xenbus_device *dev,
-+ enum xenbus_state frontend_state)
++int netif_map(struct xen_netif *netif, unsigned long tx_ring_ref,
++ unsigned long rx_ring_ref, unsigned int evtchn)
+{
-+ struct backend_info *be = dev->dev.driver_data;
++ int err = -ENOMEM;
++ struct xen_netif_tx_sring *txs;
++ struct xen_netif_rx_sring *rxs;
+
-+ DPRINTK("%s", xenbus_strstate(frontend_state));
++ /* Already connected through? */
++ if (netif->irq)
++ return 0;
+
-+ be->frontend_state = frontend_state;
++ netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
++ if (netif->tx_comms_area == NULL)
++ return -ENOMEM;
++ netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
++ if (netif->rx_comms_area == NULL)
++ goto err_rx;
+
-+ switch (frontend_state) {
-+ case XenbusStateInitialising:
-+ if (dev->state == XenbusStateClosed) {
-+ printk(KERN_INFO "%s: %s: prepare for reconnect\n",
-+ __FUNCTION__, dev->nodename);
-+ xenbus_switch_state(dev, XenbusStateInitWait);
-+ }
-+ break;
++ err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
++ if (err)
++ goto err_map;
+
-+ case XenbusStateInitialised:
-+ break;
++ err = bind_interdomain_evtchn_to_irqhandler(
++ netif->domid, evtchn, netif_be_int, 0,
++ netif->dev->name, netif);
++ if (err < 0)
++ goto err_hypervisor;
++ netif->irq = err;
++ disable_irq(netif->irq);
+
-+ case XenbusStateConnected:
-+ if (dev->state == XenbusStateConnected)
-+ break;
-+ backend_create_netif(be);
-+ if (be->netif)
-+ connect(be);
-+ break;
++ txs = (struct xen_netif_tx_sring *)netif->tx_comms_area->addr;
++ BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
+
-+ case XenbusStateClosing:
-+ if (be->netif) {
-+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
-+ netif_disconnect(be->netif);
-+ be->netif = NULL;
-+ }
-+ xenbus_switch_state(dev, XenbusStateClosing);
-+ break;
++ rxs = (struct xen_netif_rx_sring *)
++ ((char *)netif->rx_comms_area->addr);
++ BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
+
-+ case XenbusStateClosed:
-+ xenbus_switch_state(dev, XenbusStateClosed);
-+ if (xenbus_dev_is_online(dev))
-+ break;
-+ /* fall through if not online */
-+ case XenbusStateUnknown:
-+ device_unregister(&dev->dev);
-+ break;
++ netif->rx_req_cons_peek = 0;
+
-+ default:
-+ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
-+ frontend_state);
-+ break;
-+ }
-+}
++ netif_get(netif);
++
++ rtnl_lock();
++ netback_carrier_on(netif);
++ if (netif_running(netif->dev))
++ __netif_up(netif);
++ rtnl_unlock();
+
++ return 0;
++err_hypervisor:
++ unmap_frontend_pages(netif);
++err_map:
++ free_vm_area(netif->rx_comms_area);
++err_rx:
++ free_vm_area(netif->tx_comms_area);
++ return err;
++}
+
-+static void xen_net_read_rate(struct xenbus_device *dev,
-+ unsigned long *bytes, unsigned long *usec)
++void netif_disconnect(struct xen_netif *netif)
+{
-+ char *s, *e;
-+ unsigned long b, u;
-+ char *ratestr;
-+
-+ /* Default to unlimited bandwidth. */
-+ *bytes = ~0UL;
-+ *usec = 0;
++ if (netback_carrier_ok(netif)) {
++ rtnl_lock();
++ netback_carrier_off(netif);
++ netif_carrier_off(netif->dev); /* discard queued packets */
++ if (netif_running(netif->dev))
++ __netif_down(netif);
++ rtnl_unlock();
++ netif_put(netif);
++ }
+
-+ ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
-+ if (IS_ERR(ratestr))
-+ return;
++ atomic_dec(&netif->refcnt);
++ wait_event(netif->waiting_to_free, atomic_read(&netif->refcnt) == 0);
+
-+ s = ratestr;
-+ b = simple_strtoul(s, &e, 10);
-+ if ((s == e) || (*e != ','))
-+ goto fail;
++ del_timer_sync(&netif->credit_timeout);
+
-+ s = e + 1;
-+ u = simple_strtoul(s, &e, 10);
-+ if ((s == e) || (*e != '\0'))
-+ goto fail;
++ if (netif->irq)
++ unbind_from_irqhandler(netif->irq, netif);
+
-+ *bytes = b;
-+ *usec = u;
++ unregister_netdev(netif->dev);
+
-+ kfree(ratestr);
-+ return;
++ if (netif->tx.sring) {
++ unmap_frontend_pages(netif);
++ free_vm_area(netif->tx_comms_area);
++ free_vm_area(netif->rx_comms_area);
++ }
+
-+ fail:
-+ WPRINTK("Failed to parse network rate limit. Traffic unlimited.\n");
-+ kfree(ratestr);
++ free_netdev(netif->dev);
+}
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+new file mode 100644
+index 0000000..b290525
+--- /dev/null
++++ b/drivers/net/xen-netback/netback.c
+@@ -0,0 +1,1934 @@
++/*
++ * Back-end of the driver for virtual network devices. This portion of the
++ * driver exports a 'unified' network-device interface that can be accessed
++ * by any operating system that implements a compatible front end. A
++ * reference front-end implementation can be found in:
++ * drivers/net/xen-netfront.c
++ *
++ * Copyright (c) 2002-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
+
-+static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
-+{
-+ char *s, *e, *macstr;
-+ int i;
-+
-+ macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
-+ if (IS_ERR(macstr))
-+ return PTR_ERR(macstr);
++#include "common.h"
+
-+ for (i = 0; i < ETH_ALEN; i++) {
-+ mac[i] = simple_strtoul(s, &e, 16);
-+ if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
-+ kfree(macstr);
-+ return -ENOENT;
-+ }
-+ s = e+1;
-+ }
++#include <linux/kthread.h>
++#include <linux/if_vlan.h>
++#include <linux/udp.h>
+
-+ kfree(macstr);
-+ return 0;
++#include <net/tcp.h>
++
++#include <xen/balloon.h>
++#include <xen/events.h>
++#include <xen/interface/memory.h>
++
++#include <asm/xen/hypercall.h>
++#include <asm/xen/page.h>
++
++/*define NETBE_DEBUG_INTERRUPT*/
++
++struct xen_netbk *xen_netbk;
++int xen_netbk_group_nr;
++
++static void netif_idx_release(struct xen_netbk *netbk, u16 pending_idx);
++static void make_tx_response(struct xen_netif *netif,
++ struct xen_netif_tx_request *txp,
++ s8 st);
++static struct xen_netif_rx_response *make_rx_response(struct xen_netif *netif,
++ u16 id,
++ s8 st,
++ u16 offset,
++ u16 size,
++ u16 flags);
++
++static void net_tx_action(unsigned long data);
++
++static void net_rx_action(unsigned long data);
++
++static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
++ unsigned int idx)
++{
++ return page_to_pfn(netbk->mmap_pages[idx]);
+}
+
-+static void connect(struct backend_info *be)
++static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
++ unsigned int idx)
+{
-+ int err;
-+ struct xenbus_device *dev = be->dev;
++ return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
++}
+
-+ err = connect_rings(be);
-+ if (err)
-+ return;
++/* extra field used in struct page */
++static inline void netif_set_page_ext(struct page *pg,
++ unsigned int group, unsigned int idx)
++{
++ union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
+
-+ err = xen_net_read_mac(dev, be->netif->fe_dev_addr);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
-+ return;
-+ }
++ BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
++ pg->mapping = ext.mapping;
++}
+
-+ xen_net_read_rate(dev, &be->netif->credit_bytes,
-+ &be->netif->credit_usec);
-+ be->netif->remaining_credit = be->netif->credit_bytes;
++static int netif_get_page_ext(struct page *pg,
++ unsigned int *_group, unsigned int *_idx)
++{
++ union page_ext ext = { .mapping = pg->mapping };
++ struct xen_netbk *netbk;
++ unsigned int group, idx;
+
-+ xenbus_switch_state(dev, XenbusStateConnected);
++ if (!PageForeign(pg))
++ return 0;
+
-+ netif_wake_queue(be->netif->dev);
++ group = ext.e.group - 1;
++
++ if (group < 0 || group >= xen_netbk_group_nr)
++ return 0;
++
++ netbk = &xen_netbk[group];
++
++ if (netbk->mmap_pages == NULL)
++ return 0;
++
++ idx = ext.e.idx;
++
++ if ((idx < 0) || (idx >= MAX_PENDING_REQS))
++ return 0;
++
++ if (netbk->mmap_pages[idx] != pg)
++ return 0;
++
++ *_group = group;
++ *_idx = idx;
++
++ return 1;
+}
+
++/*
++ * This is the amount of packet we copy rather than map, so that the
++ * guest can't fiddle with the contents of the headers while we do
++ * packet processing on them (netfilter, routing, etc).
++ */
++#define PKT_PROT_LEN (ETH_HLEN + \
++ VLAN_HLEN + \
++ sizeof(struct iphdr) + MAX_IPOPTLEN + \
++ sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
+
-+static int connect_rings(struct backend_info *be)
++static inline pending_ring_idx_t pending_index(unsigned i)
+{
-+ struct xenbus_device *dev = be->dev;
-+ unsigned long tx_ring_ref, rx_ring_ref;
-+ unsigned int evtchn, rx_copy;
-+ int err;
-+ int val;
++ return i & (MAX_PENDING_REQS-1);
++}
+
-+ DPRINTK("");
++static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
++{
++ return MAX_PENDING_REQS -
++ netbk->pending_prod + netbk->pending_cons;
++}
+
-+ err = xenbus_gather(XBT_NIL, dev->otherend,
-+ "tx-ring-ref", "%lu", &tx_ring_ref,
-+ "rx-ring-ref", "%lu", &rx_ring_ref,
-+ "event-channel", "%u", &evtchn, NULL);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err,
-+ "reading %s/ring-ref and event-channel",
-+ dev->otherend);
-+ return err;
-+ }
++/* Setting this allows the safe use of this driver without netloop. */
++static int MODPARM_copy_skb = 1;
++module_param_named(copy_skb, MODPARM_copy_skb, bool, 0);
++MODULE_PARM_DESC(copy_skb, "Copy data received from netfront without netloop");
+
-+ err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
-+ &rx_copy);
-+ if (err == -ENOENT) {
-+ err = 0;
-+ rx_copy = 0;
-+ }
-+ if (err < 0) {
-+ xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
-+ dev->otherend);
-+ return err;
-+ }
-+ be->netif->copying_receiver = !!rx_copy;
++int netbk_copy_skb_mode;
+
-+ if (be->netif->dev->tx_queue_len != 0) {
-+ if (xenbus_scanf(XBT_NIL, dev->otherend,
-+ "feature-rx-notify", "%d", &val) < 0)
-+ val = 0;
-+ if (val)
-+ be->netif->can_queue = 1;
-+ else
-+ /* Must be non-zero for pfifo_fast to work. */
-+ be->netif->dev->tx_queue_len = 1;
-+ }
++static int MODPARM_netback_kthread;
++module_param_named(netback_kthread, MODPARM_netback_kthread, bool, 0);
++MODULE_PARM_DESC(netback_kthread, "Use kernel thread to replace tasklet");
+
-+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", "%d", &val) < 0)
-+ val = 0;
-+ if (val) {
-+ be->netif->features |= NETIF_F_SG;
-+ be->netif->dev->features |= NETIF_F_SG;
++/*
++ * Netback bottom half handler.
++ * dir indicates the data direction.
++ * rx: 1, tx: 0.
++ */
++static inline void xen_netbk_bh_handler(struct xen_netbk *netbk, int dir)
++{
++ if (MODPARM_netback_kthread)
++ wake_up(&netbk->kthread.netbk_action_wq);
++ else if (dir)
++ tasklet_schedule(&netbk->tasklet.net_rx_tasklet);
++ else
++ tasklet_schedule(&netbk->tasklet.net_tx_tasklet);
++}
++
++static inline void maybe_schedule_tx_action(struct xen_netbk *netbk)
++{
++ smp_mb();
++ if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
++ !list_empty(&netbk->net_schedule_list))
++ xen_netbk_bh_handler(netbk, 0);
++}
++
++static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
++{
++ struct skb_shared_info *ninfo;
++ struct sk_buff *nskb;
++ unsigned long offset;
++ int ret;
++ int len;
++ int headlen;
++
++ BUG_ON(skb_shinfo(skb)->frag_list != NULL);
++
++ nskb = alloc_skb(SKB_MAX_HEAD(0), GFP_ATOMIC | __GFP_NOWARN);
++ if (unlikely(!nskb))
++ goto err;
++
++ skb_reserve(nskb, NET_SKB_PAD + NET_IP_ALIGN);
++ headlen = skb_end_pointer(nskb) - nskb->data;
++ if (headlen > skb_headlen(skb))
++ headlen = skb_headlen(skb);
++ ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen);
++ BUG_ON(ret);
++
++ ninfo = skb_shinfo(nskb);
++ ninfo->gso_size = skb_shinfo(skb)->gso_size;
++ ninfo->gso_type = skb_shinfo(skb)->gso_type;
++
++ offset = headlen;
++ len = skb->len - headlen;
++
++ nskb->len = skb->len;
++ nskb->data_len = len;
++ nskb->truesize += len;
++
++ while (len) {
++ struct page *page;
++ int copy;
++ int zero;
++
++ if (unlikely(ninfo->nr_frags >= MAX_SKB_FRAGS)) {
++ dump_stack();
++ goto err_free;
++ }
++
++ copy = len >= PAGE_SIZE ? PAGE_SIZE : len;
++ zero = len >= PAGE_SIZE ? 0 : __GFP_ZERO;
++
++ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | zero);
++ if (unlikely(!page))
++ goto err_free;
++
++ ret = skb_copy_bits(skb, offset, page_address(page), copy);
++ BUG_ON(ret);
++
++ ninfo->frags[ninfo->nr_frags].page = page;
++ ninfo->frags[ninfo->nr_frags].page_offset = 0;
++ ninfo->frags[ninfo->nr_frags].size = copy;
++ ninfo->nr_frags++;
++
++ offset += copy;
++ len -= copy;
+ }
+
-+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d",
-+ &val) < 0)
-+ val = 0;
-+ if (val) {
-+ be->netif->features |= NETIF_F_TSO;
-+ be->netif->dev->features |= NETIF_F_TSO;
++#ifdef NET_SKBUFF_DATA_USES_OFFSET
++ offset = 0;
++#else
++ offset = nskb->data - skb->data;
++#endif
++
++ nskb->transport_header = skb->transport_header + offset;
++ nskb->network_header = skb->network_header + offset;
++ nskb->mac_header = skb->mac_header + offset;
++
++ return nskb;
++
++ err_free:
++ kfree_skb(nskb);
++ err:
++ return NULL;
++}
++
++static inline int netbk_max_required_rx_slots(struct xen_netif *netif)
++{
++ if (netif->can_sg || netif->gso || netif->gso_prefix)
++ return MAX_SKB_FRAGS + 2; /* header + extra_info + frags */
++ return 1; /* all in one */
++}
++
++static inline int netbk_queue_full(struct xen_netif *netif)
++{
++ RING_IDX peek = netif->rx_req_cons_peek;
++ RING_IDX needed = netbk_max_required_rx_slots(netif);
++
++ return ((netif->rx.sring->req_prod - peek) < needed) ||
++ ((netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek) < needed);
++}
++
++/*
++ * Returns true if we should start a new receive buffer instead of
++ * adding 'size' bytes to a buffer which currently contains 'offset'
++ * bytes.
++ */
++static bool start_new_rx_buffer(int offset, unsigned long size, int head)
++{
++ /* simple case: we have completely filled the current buffer. */
++ if (offset == MAX_BUFFER_OFFSET)
++ return true;
++
++ /*
++ * complex case: start a fresh buffer if the current frag
++ * would overflow the current buffer but only if:
++ * (i) this frag would fit completely in the next buffer
++ * and (ii) there is already some data in the current buffer
++ * and (iii) this is not the head buffer.
++ *
++ * Where:
++ * - (i) stops us splitting a frag into two copies
++ * unless the frag is too large for a single buffer.
++ * - (ii) stops us from leaving a buffer pointlessly empty.
++ * - (iii) stops us leaving the first buffer
++ * empty. Strictly speaking this is already covered
++ * by (ii) but is explicitly checked because
++ * netfront relies on the first buffer being
++ * non-empty and can crash otherwise.
++ *
++ * This means we will effectively linearise small
++ * frags but do not needlessly split large buffers
++ * into multiple copies tend to give large frags their
++ * own buffers as before.
++ */
++ if ((offset + size > MAX_BUFFER_OFFSET) &&
++ (size <= MAX_BUFFER_OFFSET) && offset && !head)
++ return true;
++
++ return false;
++}
++
++/*
++ * Figure out how many ring slots we're going to need to send @skb to
++ * the guest. This function is essentially a dry run of
++ * netbk_gop_frag_copy.
++ */
++static unsigned int count_skb_slots(struct sk_buff *skb, struct xen_netif *netif)
++{
++ unsigned int count = 1;
++ int i, copy_off = 0;
++
++ BUG_ON(offset_in_page(skb->data)+skb_headlen(skb) > MAX_BUFFER_OFFSET);
++
++ copy_off = skb_headlen(skb);
++
++ if (skb_shinfo(skb)->gso_size)
++ count++;
++
++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++ unsigned long size = skb_shinfo(skb)->frags[i].size;
++ unsigned long bytes;
++ while (size > 0) {
++ BUG_ON(copy_off > MAX_BUFFER_OFFSET);
++
++ if (start_new_rx_buffer(copy_off, size, 0)) {
++ count++;
++ copy_off = 0;
++ }
++
++ bytes = size;
++ if (copy_off + bytes > MAX_BUFFER_OFFSET)
++ bytes = MAX_BUFFER_OFFSET - copy_off;
++
++ copy_off += bytes;
++ size -= bytes;
++ }
+ }
++ return count;
++}
+
-+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
-+ "%d", &val) < 0)
-+ val = 0;
-+ if (val) {
-+ be->netif->features &= ~NETIF_F_IP_CSUM;
-+ be->netif->dev->features &= ~NETIF_F_IP_CSUM;
++int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct xen_netif *netif = netdev_priv(dev);
++ struct xen_netbk *netbk;
++
++ BUG_ON(skb->dev != dev);
++
++ if (netif->group == -1)
++ goto drop;
++
++ netbk = &xen_netbk[netif->group];
++
++ /* Drop the packet if the target domain has no receive buffers. */
++ if (unlikely(!netif_schedulable(netif) || netbk_queue_full(netif)))
++ goto drop;
++
++ /*
++ * XXX For now we also copy skbuffs whose head crosses a page
++ * boundary, because netbk_gop_skb can't handle them.
++ */
++ if ((skb_headlen(skb) + offset_in_page(skb->data)) >= PAGE_SIZE) {
++ struct sk_buff *nskb = netbk_copy_skb(skb);
++ if (unlikely(nskb == NULL))
++ goto drop;
++ /* Copy only the header fields we use in this driver. */
++ nskb->dev = skb->dev;
++ nskb->ip_summed = skb->ip_summed;
++ dev_kfree_skb(skb);
++ skb = nskb;
+ }
+
-+ /* Map the shared frame, irq etc. */
-+ err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err,
-+ "mapping shared-frames %lu/%lu port %u",
-+ tx_ring_ref, rx_ring_ref, evtchn);
-+ return err;
++ /* Reserve ring slots for the worst-case number of fragments. */
++ netif->rx_req_cons_peek += count_skb_slots(skb, netif);
++ netif_get(netif);
++
++ if (netbk_can_queue(dev) && netbk_queue_full(netif)) {
++ netif->rx.sring->req_event = netif->rx_req_cons_peek +
++ netbk_max_required_rx_slots(netif);
++ mb(); /* request notification /then/ check & stop the queue */
++ if (netbk_queue_full(netif))
++ netif_stop_queue(dev);
+ }
-+ return 0;
-+}
++ skb_queue_tail(&netbk->rx_queue, skb);
+
++ xen_netbk_bh_handler(netbk, 1);
+
-+/* ** Driver Registration ** */
++ return 0;
+
++ drop:
++ netif->stats.tx_dropped++;
++ dev_kfree_skb(skb);
++ return 0;
++}
+
-+static const struct xenbus_device_id netback_ids[] = {
-+ { "vif" },
-+ { "" }
++struct netrx_pending_operations {
++ unsigned copy_prod, copy_cons;
++ unsigned meta_prod, meta_cons;
++ struct gnttab_copy *copy;
++ struct netbk_rx_meta *meta;
++ int copy_off;
++ grant_ref_t copy_gref;
+};
+
++static struct netbk_rx_meta *get_next_rx_buffer(struct xen_netif *netif,
++ struct netrx_pending_operations *npo)
++{
++ struct netbk_rx_meta *meta;
++ struct xen_netif_rx_request *req;
+
-+static struct xenbus_driver netback = {
-+ .name = "vif",
-+ .owner = THIS_MODULE,
-+ .ids = netback_ids,
-+ .probe = netback_probe,
-+ .remove = netback_remove,
-+ .uevent = netback_uevent,
-+ .otherend_changed = frontend_changed,
-+};
++ req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons++);
+
++ meta = npo->meta + npo->meta_prod++;
++ meta->gso_size = 0;
++ meta->size = 0;
++ meta->id = req->id;
+
-+void netif_xenbus_init(void)
-+{
-+ xenbus_register_backend(&netback);
++ npo->copy_off = 0;
++ npo->copy_gref = req->gref;
++
++ return meta;
+}
---
-1.7.4
-
-
-From 5b30803bf5f58ee980edd8d88a2d73dda995ee93 Mon Sep 17 00:00:00 2001
-From: Ian Campbell <ian.campbell(a)citrix.com>
-Date: Mon, 9 Feb 2009 12:05:52 -0800
-Subject: [PATCH 038/244] xen: netback: first cut at porting to upstream and cleaning up
-
-Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
----
- drivers/xen/Kconfig | 2 +-
- drivers/xen/netback/Makefile | 2 +-
- drivers/xen/netback/common.h | 33 +++---
- drivers/xen/netback/interface.c | 37 +++---
- drivers/xen/netback/netback.c | 248 ++++++++++++++++++++++++---------------
- drivers/xen/netback/xenbus.c | 25 ++--
- 6 files changed, 201 insertions(+), 146 deletions(-)
-
-diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
-index 7e83d43..30290a8 100644
---- a/drivers/xen/Kconfig
-+++ b/drivers/xen/Kconfig
-@@ -38,7 +38,7 @@ config XEN_BACKEND
- to other virtual machines.
-
- config XEN_NETDEV_BACKEND
-- bool "Xen backend network device"
-+ tristate "Xen backend network device"
- depends on XEN_BACKEND && NET
- help
- Implement the network backend driver, which passes packets
-diff --git a/drivers/xen/netback/Makefile b/drivers/xen/netback/Makefile
-index f4a0c51..a01a1a3 100644
---- a/drivers/xen/netback/Makefile
-+++ b/drivers/xen/netback/Makefile
-@@ -1,3 +1,3 @@
- obj-$(CONFIG_XEN_NETDEV_BACKEND) := netbk.o
-
--netbk-y := netback.o xenbus.o interface.o
-+netbk-y := netback.o xenbus.o interface.o
-diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
-index 9a54d57..65b88f4 100644
---- a/drivers/xen/netback/common.h
-+++ b/drivers/xen/netback/common.h
-@@ -43,8 +43,7 @@
- #include <asm/io.h>
- #include <asm/pgalloc.h>
- #include <xen/interface/grant_table.h>
--#include <xen/gnttab.h>
--#include <xen/driver_util.h>
-+#include <xen/grant_table.h>
- #include <xen/xenbus.h>
-
- #define DPRINTK(_f, _a...) \
-@@ -55,7 +54,7 @@
- #define WPRINTK(fmt, args...) \
- printk(KERN_WARNING "xen_net: " fmt, ##args)
-
--typedef struct netif_st {
-+struct xen_netif {
- /* Unique identifier for this interface. */
- domid_t domid;
- unsigned int handle;
-@@ -70,8 +69,8 @@ typedef struct netif_st {
- unsigned int irq;
-
- /* The shared rings and indexes. */
-- netif_tx_back_ring_t tx;
-- netif_rx_back_ring_t rx;
-+ struct xen_netif_tx_back_ring tx;
-+ struct xen_netif_rx_back_ring rx;
- struct vm_struct *tx_comms_area;
- struct vm_struct *rx_comms_area;
-
-@@ -103,7 +102,7 @@ typedef struct netif_st {
- unsigned int carrier;
-
- wait_queue_head_t waiting_to_free;
--} netif_t;
-+};
-
- /*
- * Implement our own carrier flag: the network stack's version causes delays
-@@ -141,7 +140,7 @@ struct netback_accelerator {
-
- struct backend_info {
- struct xenbus_device *dev;
-- netif_t *netif;
-+ struct xen_netif *netif;
- enum xenbus_state frontend_state;
-
- /* State relating to the netback accelerator */
-@@ -174,13 +173,13 @@ extern
- void netif_accel_init(void);
-
-
--#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
--#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
-+#define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
-+#define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
-
--void netif_disconnect(netif_t *netif);
-+void netif_disconnect(struct xen_netif *netif);
-
--netif_t *netif_alloc(domid_t domid, unsigned int handle);
--int netif_map(netif_t *netif, unsigned long tx_ring_ref,
-+struct xen_netif *netif_alloc(domid_t domid, unsigned int handle);
-+int netif_map(struct xen_netif *netif, unsigned long tx_ring_ref,
- unsigned long rx_ring_ref, unsigned int evtchn);
-
- #define netif_get(_b) (atomic_inc(&(_b)->refcnt))
-@@ -195,22 +194,22 @@ void netif_xenbus_init(void);
- #define netif_schedulable(netif) \
- (netif_running((netif)->dev) && netback_carrier_ok(netif))
-
--void netif_schedule_work(netif_t *netif);
--void netif_deschedule_work(netif_t *netif);
-+void netif_schedule_work(struct xen_netif *netif);
-+void netif_deschedule_work(struct xen_netif *netif);
-
- int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
- struct net_device_stats *netif_be_get_stats(struct net_device *dev);
--irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
-+irqreturn_t netif_be_int(int irq, void *dev_id);
-
- static inline int netbk_can_queue(struct net_device *dev)
- {
-- netif_t *netif = netdev_priv(dev);
-+ struct xen_netif *netif = netdev_priv(dev);
- return netif->can_queue;
- }
-
- static inline int netbk_can_sg(struct net_device *dev)
- {
-- netif_t *netif = netdev_priv(dev);
-+ struct xen_netif *netif = netdev_priv(dev);
- return netif->features & NETIF_F_SG;
- }
-
-diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
-index 7e67941..d184ad7 100644
---- a/drivers/xen/netback/interface.c
-+++ b/drivers/xen/netback/interface.c
-@@ -34,6 +34,9 @@
- #include <linux/ethtool.h>
- #include <linux/rtnetlink.h>
-
-+#include <xen/events.h>
-+#include <asm/xen/hypercall.h>
+
- /*
- * Module parameter 'queue_length':
- *
-@@ -51,13 +54,13 @@
- static unsigned long netbk_queue_length = 32;
- module_param_named(queue_length, netbk_queue_length, ulong, 0);
-
--static void __netif_up(netif_t *netif)
-+static void __netif_up(struct xen_netif *netif)
- {
- enable_irq(netif->irq);
- netif_schedule_work(netif);
- }
-
--static void __netif_down(netif_t *netif)
-+static void __netif_down(struct xen_netif *netif)
- {
- disable_irq(netif->irq);
- netif_deschedule_work(netif);
-@@ -65,7 +68,7 @@ static void __netif_down(netif_t *netif)
-
- static int net_open(struct net_device *dev)
- {
-- netif_t *netif = netdev_priv(dev);
-+ struct xen_netif *netif = netdev_priv(dev);
- if (netback_carrier_ok(netif)) {
- __netif_up(netif);
- netif_start_queue(dev);
-@@ -75,7 +78,7 @@ static int net_open(struct net_device *dev)
-
- static int net_close(struct net_device *dev)
- {
-- netif_t *netif = netdev_priv(dev);
-+ struct xen_netif *netif = netdev_priv(dev);
- if (netback_carrier_ok(netif))
- __netif_down(netif);
- netif_stop_queue(dev);
-@@ -95,7 +98,7 @@ static int netbk_change_mtu(struct net_device *dev, int mtu)
- static int netbk_set_sg(struct net_device *dev, u32 data)
- {
- if (data) {
-- netif_t *netif = netdev_priv(dev);
-+ struct xen_netif *netif = netdev_priv(dev);
-
- if (!(netif->features & NETIF_F_SG))
- return -ENOSYS;
-@@ -107,7 +110,7 @@ static int netbk_set_sg(struct net_device *dev, u32 data)
- static int netbk_set_tso(struct net_device *dev, u32 data)
- {
- if (data) {
-- netif_t *netif = netdev_priv(dev);
-+ struct xen_netif *netif = netdev_priv(dev);
-
- if (!(netif->features & NETIF_F_TSO))
- return -ENOSYS;
-@@ -127,15 +130,15 @@ static struct ethtool_ops network_ethtool_ops =
- .get_link = ethtool_op_get_link,
- };
-
--netif_t *netif_alloc(domid_t domid, unsigned int handle)
-+struct xen_netif *netif_alloc(domid_t domid, unsigned int handle)
- {
- int err = 0;
- struct net_device *dev;
-- netif_t *netif;
-+ struct xen_netif *netif;
- char name[IFNAMSIZ] = {};
-
- snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
-- dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
-+ dev = alloc_netdev(sizeof(struct xen_netif), name, ether_setup);
- if (dev == NULL) {
- DPRINTK("Could not create netif: out of memory\n");
- return ERR_PTR(-ENOMEM);
-@@ -194,7 +197,7 @@ netif_t *netif_alloc(domid_t domid, unsigned int handle)
- }
-
- static int map_frontend_pages(
-- netif_t *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
-+ struct xen_netif *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
- {
- struct gnttab_map_grant_ref op;
-
-@@ -229,7 +232,7 @@ static int map_frontend_pages(
- return 0;
- }
-
--static void unmap_frontend_pages(netif_t *netif)
-+static void unmap_frontend_pages(struct xen_netif *netif)
- {
- struct gnttab_unmap_grant_ref op;
-
-@@ -246,12 +249,12 @@ static void unmap_frontend_pages(netif_t *netif)
- BUG();
- }
-
--int netif_map(netif_t *netif, unsigned long tx_ring_ref,
-+int netif_map(struct xen_netif *netif, unsigned long tx_ring_ref,
- unsigned long rx_ring_ref, unsigned int evtchn)
- {
- int err = -ENOMEM;
-- netif_tx_sring_t *txs;
-- netif_rx_sring_t *rxs;
-+ struct xen_netif_tx_sring *txs;
-+ struct xen_netif_rx_sring *rxs;
-
- /* Already connected through? */
- if (netif->irq)
-@@ -276,10 +279,10 @@ int netif_map(netif_t *netif, unsigned long tx_ring_ref,
- netif->irq = err;
- disable_irq(netif->irq);
-
-- txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
-+ txs = (struct xen_netif_tx_sring *)netif->tx_comms_area->addr;
- BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
-
-- rxs = (netif_rx_sring_t *)
-+ rxs = (struct xen_netif_rx_sring *)
- ((char *)netif->rx_comms_area->addr);
- BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
-
-@@ -303,7 +306,7 @@ err_rx:
- return err;
- }
-
--void netif_disconnect(netif_t *netif)
-+void netif_disconnect(struct xen_netif *netif)
- {
- if (netback_carrier_ok(netif)) {
- rtnl_lock();
-diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
-index db629d4..c959075 100644
---- a/drivers/xen/netback/netback.c
-+++ b/drivers/xen/netback/netback.c
-@@ -35,9 +35,17 @@
- */
-
- #include "common.h"
++/*
++ * Set up the grant operations for this fragment. If it's a flipping
++ * interface, we also set up the unmap request from here.
++ */
++static void netbk_gop_frag_copy(struct xen_netif *netif,
++ struct netrx_pending_operations *npo,
++ struct page *page, unsigned long size,
++ unsigned long offset, int head)
++{
++ struct gnttab_copy *copy_gop;
++ struct netbk_rx_meta *meta;
++ /*
++ * These variables a used iff netif_get_page_ext returns true,
++ * in which case they are guaranteed to be initialized.
++ */
++ unsigned int uninitialized_var(group), uninitialized_var(idx);
++ int foreign = netif_get_page_ext(page, &group, &idx);
++ unsigned long bytes;
+
-+#include <linux/tcp.h>
-+#include <linux/udp.h>
++ /* Data must not cross a page boundary. */
++ BUG_ON(size + offset > PAGE_SIZE);
+
- #include <xen/balloon.h>
-+#include <xen/events.h>
- #include <xen/interface/memory.h>
-
-+#include <asm/xen/hypercall.h>
-+#include <asm/xen/page.h>
++ meta = npo->meta + npo->meta_prod - 1;
+
- /*define NETBE_DEBUG_INTERRUPT*/
-
- struct netbk_rx_meta {
-@@ -51,11 +59,12 @@ struct netbk_tx_pending_inuse {
- unsigned long alloc_time;
- };
-
++ while (size > 0) {
++ BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
+
- static void netif_idx_release(u16 pending_idx);
--static void make_tx_response(netif_t *netif,
-- netif_tx_request_t *txp,
-+static void make_tx_response(struct xen_netif *netif,
-+ struct xen_netif_tx_request *txp,
- s8 st);
--static netif_rx_response_t *make_rx_response(netif_t *netif,
-+static struct xen_netif_rx_response *make_rx_response(struct xen_netif *netif,
- u16 id,
- s8 st,
- u16 offset,
-@@ -108,8 +117,8 @@ static inline int netif_page_index(struct page *pg)
- #define PKT_PROT_LEN 64
-
- static struct pending_tx_info {
-- netif_tx_request_t req;
-- netif_t *netif;
-+ struct xen_netif_tx_request req;
-+ struct xen_netif *netif;
- } pending_tx_info[MAX_PENDING_REQS];
- static u16 pending_ring[MAX_PENDING_REQS];
- typedef unsigned int PEND_RING_IDX;
-@@ -128,8 +137,8 @@ static LIST_HEAD(pending_inuse_head);
- static struct sk_buff_head tx_queue;
-
- static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
--static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
--static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
-+static struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
-+static struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
-
- static struct list_head net_schedule_list;
- static spinlock_t net_schedule_list_lock;
-@@ -195,7 +204,7 @@ static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
- goto err;
-
- skb_reserve(nskb, 16 + NET_IP_ALIGN);
-- headlen = nskb->end - nskb->data;
-+ headlen = skb_end_pointer(nskb) - nskb->data;
- if (headlen > skb_headlen(skb))
- headlen = skb_headlen(skb);
- ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen);
-@@ -243,9 +252,9 @@ static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
-
- offset = nskb->data - skb->data;
-
-- nskb->h.raw = skb->h.raw + offset;
-- nskb->nh.raw = skb->nh.raw + offset;
-- nskb->mac.raw = skb->mac.raw + offset;
-+ nskb->transport_header = skb->transport_header + offset;
-+ nskb->network_header = skb->network_header + offset;
-+ nskb->mac_header = skb->mac_header + offset;
-
- return nskb;
-
-@@ -255,14 +264,14 @@ static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
- return NULL;
- }
-
--static inline int netbk_max_required_rx_slots(netif_t *netif)
-+static inline int netbk_max_required_rx_slots(struct xen_netif *netif)
- {
- if (netif->features & (NETIF_F_SG|NETIF_F_TSO))
- return MAX_SKB_FRAGS + 2; /* header + extra_info + frags */
- return 1; /* all in one */
- }
-
--static inline int netbk_queue_full(netif_t *netif)
-+static inline int netbk_queue_full(struct xen_netif *netif)
- {
- RING_IDX peek = netif->rx_req_cons_peek;
- RING_IDX needed = netbk_max_required_rx_slots(netif);
-@@ -273,14 +282,14 @@ static inline int netbk_queue_full(netif_t *netif)
-
- static void tx_queue_callback(unsigned long data)
- {
-- netif_t *netif = (netif_t *)data;
-+ struct xen_netif *netif = (struct xen_netif *)data;
- if (netif_schedulable(netif))
- netif_wake_queue(netif->dev);
- }
-
- int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
- {
-- netif_t *netif = netdev_priv(dev);
-+ struct xen_netif *netif = netdev_priv(dev);
-
- BUG_ON(skb->dev != dev);
-
-@@ -302,7 +311,6 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
- /* Copy only the header fields we use in this driver. */
- nskb->dev = skb->dev;
- nskb->ip_summed = skb->ip_summed;
-- nskb->proto_data_valid = skb->proto_data_valid;
- dev_kfree_skb(skb);
- skb = nskb;
- }
-@@ -366,25 +374,25 @@ struct netrx_pending_operations {
- unsigned mcl_prod, mcl_cons;
- unsigned copy_prod, copy_cons;
- unsigned meta_prod, meta_cons;
-- mmu_update_t *mmu;
-- gnttab_transfer_t *trans;
-- gnttab_copy_t *copy;
-- multicall_entry_t *mcl;
-+ struct mmu_update *mmu;
-+ struct gnttab_transfer *trans;
-+ struct gnttab_copy *copy;
-+ struct multicall_entry *mcl;
- struct netbk_rx_meta *meta;
- };
-
- /* Set up the grant operations for this fragment. If it's a flipping
- interface, we also set up the unmap request from here. */
--static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
-+static u16 netbk_gop_frag(struct xen_netif *netif, struct netbk_rx_meta *meta,
- int i, struct netrx_pending_operations *npo,
- struct page *page, unsigned long size,
- unsigned long offset)
- {
-- mmu_update_t *mmu;
-- gnttab_transfer_t *gop;
-- gnttab_copy_t *copy_gop;
-- multicall_entry_t *mcl;
-- netif_rx_request_t *req;
-+ struct mmu_update *mmu;
-+ struct gnttab_transfer *gop;
-+ struct gnttab_copy *copy_gop;
-+ struct multicall_entry *mcl;
-+ struct xen_netif_rx_request *req;
- unsigned long old_mfn, new_mfn;
- int idx = netif_page_index(page);
-
-@@ -426,12 +434,12 @@ static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
- mcl = npo->mcl + npo->mcl_prod++;
- MULTI_update_va_mapping(mcl,
- (unsigned long)page_address(page),
-- pfn_pte_ma(new_mfn, PAGE_KERNEL),
-+ mfn_pte(new_mfn, PAGE_KERNEL),
- 0);
-
- mmu = npo->mmu + npo->mmu_prod++;
-- mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
-- MMU_MACHPHYS_UPDATE;
-+ mmu->ptr = ((phys_addr_t)new_mfn << PAGE_SHIFT) |
-+ MMU_MACHPHYS_UPDATE;
- mmu->val = page_to_pfn(page);
- }
-
-@@ -446,7 +454,7 @@ static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
- static void netbk_gop_skb(struct sk_buff *skb,
- struct netrx_pending_operations *npo)
- {
-- netif_t *netif = netdev_priv(skb->dev);
-+ struct xen_netif *netif = netdev_priv(skb->dev);
- int nr_frags = skb_shinfo(skb)->nr_frags;
- int i;
- int extra;
-@@ -494,9 +502,9 @@ static inline void netbk_free_pages(int nr_frags, struct netbk_rx_meta *meta)
- static int netbk_check_gop(int nr_frags, domid_t domid,
- struct netrx_pending_operations *npo)
- {
-- multicall_entry_t *mcl;
-- gnttab_transfer_t *gop;
-- gnttab_copy_t *copy_op;
-+ struct multicall_entry *mcl;
-+ struct gnttab_transfer *gop;
-+ struct gnttab_copy *copy_op;
- int status = NETIF_RSP_OKAY;
- int i;
-
-@@ -534,7 +542,7 @@ static int netbk_check_gop(int nr_frags, domid_t domid,
- return status;
- }
-
--static void netbk_add_frag_responses(netif_t *netif, int status,
-+static void netbk_add_frag_responses(struct xen_netif *netif, int status,
- struct netbk_rx_meta *meta, int nr_frags)
- {
- int i;
-@@ -555,11 +563,11 @@ static void netbk_add_frag_responses(netif_t *netif, int status,
-
- static void net_rx_action(unsigned long unused)
- {
-- netif_t *netif = NULL;
++ if (start_new_rx_buffer(npo->copy_off, size, head)) {
++ /*
++ * Netfront requires there to be some data in the head
++ * buffer.
++ */
++ BUG_ON(head);
++
++ meta = get_next_rx_buffer(netif, npo);
++ }
++
++ bytes = size;
++ if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
++ bytes = MAX_BUFFER_OFFSET - npo->copy_off;
++
++ copy_gop = npo->copy + npo->copy_prod++;
++ copy_gop->flags = GNTCOPY_dest_gref;
++ if (foreign) {
++ struct xen_netbk *netbk = &xen_netbk[group];
++ struct pending_tx_info *src_pend;
++
++ src_pend = &netbk->pending_tx_info[idx];
++
++ copy_gop->source.domid = src_pend->netif->domid;
++ copy_gop->source.u.ref = src_pend->req.gref;
++ copy_gop->flags |= GNTCOPY_source_gref;
++ } else {
++ void *vaddr = page_address(page);
++ copy_gop->source.domid = DOMID_SELF;
++ copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
++ }
++ copy_gop->source.offset = offset;
++ copy_gop->dest.domid = netif->domid;
++
++ copy_gop->dest.offset = npo->copy_off;
++ copy_gop->dest.u.ref = npo->copy_gref;
++ copy_gop->len = bytes;
++
++ npo->copy_off += bytes;
++ meta->size += bytes;
++
++ offset += bytes;
++ size -= bytes;
++ head = 0; /* There must be something in this buffer now. */
++ }
++}
++
++/*
++ * Prepare an SKB to be transmitted to the frontend.
++ *
++ * This function is responsible for allocating grant operations, meta
++ * structures, etc.
++ *
++ * It returns the number of meta structures consumed. The number of
++ * ring slots used is always equal to the number of meta slots used
++ * plus the number of GSO descriptors used. Currently, we use either
++ * zero GSO descriptors (for non-GSO packets) or one descriptor (for
++ * frontend-side LRO).
++ */
++static int netbk_gop_skb(struct sk_buff *skb,
++ struct netrx_pending_operations *npo)
++{
++ struct xen_netif *netif = netdev_priv(skb->dev);
++ int nr_frags = skb_shinfo(skb)->nr_frags;
++ int i;
++ struct xen_netif_rx_request *req;
++ struct netbk_rx_meta *meta;
++ int old_meta_prod;
++
++ old_meta_prod = npo->meta_prod;
++
++ /* Set up a GSO prefix descriptor, if necessary */
++ if (skb_shinfo(skb)->gso_size && netif->gso_prefix) {
++ req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons++);
++ meta = npo->meta + npo->meta_prod++;
++ meta->gso_size = skb_shinfo(skb)->gso_size;
++ meta->size = 0;
++ meta->id = req->id;
++ }
++
++ req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons++);
++ meta = npo->meta + npo->meta_prod++;
++
++ if (!netif->gso_prefix)
++ meta->gso_size = skb_shinfo(skb)->gso_size;
++ else
++ meta->gso_size = 0;
++
++ meta->size = 0;
++ meta->id = req->id;
++ npo->copy_off = 0;
++ npo->copy_gref = req->gref;
++
++ netbk_gop_frag_copy(netif,
++ npo, virt_to_page(skb->data),
++ skb_headlen(skb),
++ offset_in_page(skb->data), 1);
++
++ /* Leave a gap for the GSO descriptor. */
++ if (skb_shinfo(skb)->gso_size && !netif->gso_prefix)
++ netif->rx.req_cons++;
++
++ for (i = 0; i < nr_frags; i++) {
++ netbk_gop_frag_copy(netif, npo,
++ skb_shinfo(skb)->frags[i].page,
++ skb_shinfo(skb)->frags[i].size,
++ skb_shinfo(skb)->frags[i].page_offset,
++ 0);
++ }
++
++ return npo->meta_prod - old_meta_prod;
++}
++
++/*
++ * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
++ * used to set up the operations on the top of
++ * netrx_pending_operations, which have since been done. Check that
++ * they didn't give any errors and advance over them.
++ */
++static int netbk_check_gop(int nr_meta_slots, domid_t domid,
++ struct netrx_pending_operations *npo)
++{
++ struct gnttab_copy *copy_op;
++ int status = NETIF_RSP_OKAY;
++ int i;
++
++ for (i = 0; i < nr_meta_slots; i++) {
++ copy_op = npo->copy + npo->copy_cons++;
++ if (copy_op->status != GNTST_okay) {
++ pr_debug("Bad status %d from copy to DOM%d.\n",
++ copy_op->status, domid);
++ status = NETIF_RSP_ERROR;
++ }
++ }
++
++ return status;
++}
++
++static void netbk_add_frag_responses(struct xen_netif *netif, int status,
++ struct netbk_rx_meta *meta,
++ int nr_meta_slots)
++{
++ int i;
++ unsigned long offset;
++
++ /* No fragments used */
++ if (nr_meta_slots <= 1)
++ return;
++
++ nr_meta_slots--;
++
++ for (i = 0; i < nr_meta_slots; i++) {
++ int flags;
++ if (i == nr_meta_slots - 1)
++ flags = 0;
++ else
++ flags = NETRXF_more_data;
++
++ offset = 0;
++ make_rx_response(netif, meta[i].id, status, offset,
++ meta[i].size, flags);
++ }
++}
++
++struct skb_cb_overlay {
++ int meta_slots_used;
++};
++
++static void net_rx_action(unsigned long data)
++{
+ struct xen_netif *netif = NULL;
- s8 status;
- u16 id, irq, flags;
-- netif_rx_response_t *resp;
-- multicall_entry_t *mcl;
++ struct xen_netbk *netbk = (struct xen_netbk *)data;
++ s8 status;
++ u16 irq, flags;
+ struct xen_netif_rx_response *resp;
-+ struct multicall_entry *mcl;
- struct sk_buff_head rxq;
- struct sk_buff *skb;
- int notify_nr = 0;
-@@ -572,10 +580,10 @@ static void net_rx_action(unsigned long unused)
- * Putting hundreds of bytes on the stack is considered rude.
- * Static works because a tasklet can only be on one CPU at any time.
- */
-- static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+3];
-- static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
-- static gnttab_transfer_t grant_trans_op[NET_RX_RING_SIZE];
-- static gnttab_copy_t grant_copy_op[NET_RX_RING_SIZE];
-+ static struct multicall_entry rx_mcl[NET_RX_RING_SIZE+3];
-+ static struct mmu_update rx_mmu[NET_RX_RING_SIZE];
-+ static struct gnttab_transfer grant_trans_op[NET_RX_RING_SIZE];
-+ static struct gnttab_copy grant_copy_op[NET_RX_RING_SIZE];
- static unsigned char rx_notify[NR_IRQS];
- static u16 notify_list[NET_RX_RING_SIZE];
- static struct netbk_rx_meta meta[NET_RX_RING_SIZE];
-@@ -596,7 +604,7 @@ static void net_rx_action(unsigned long unused)
- *(int *)skb->cb = nr_frags;
-
- if (!xen_feature(XENFEAT_auto_translated_physmap) &&
-- !((netif_t *)netdev_priv(skb->dev))->copying_receiver &&
-+ !((struct xen_netif *)netdev_priv(skb->dev))->copying_receiver &&
- check_mfn(nr_frags + 1)) {
- /* Memory squeeze? Back off for an arbitrary while. */
- if ( net_ratelimit() )
-@@ -692,9 +700,10 @@ static void net_rx_action(unsigned long unused)
- id = meta[npo.meta_cons].id;
- flags = nr_frags ? NETRXF_more_data : 0;
-
-- if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
++ struct sk_buff_head rxq;
++ struct sk_buff *skb;
++ int notify_nr = 0;
++ int ret;
++ int nr_frags;
++ int count;
++ unsigned long offset;
++ struct skb_cb_overlay *sco;
++
++ struct netrx_pending_operations npo = {
++ .copy = netbk->grant_copy_op,
++ .meta = netbk->meta,
++ };
++
++ skb_queue_head_init(&rxq);
++
++ count = 0;
++
++ while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
++ netif = netdev_priv(skb->dev);
++ nr_frags = skb_shinfo(skb)->nr_frags;
++
++ sco = (struct skb_cb_overlay *)skb->cb;
++ sco->meta_slots_used = netbk_gop_skb(skb, &npo);
++
++ count += nr_frags + 1;
++
++ __skb_queue_tail(&rxq, skb);
++
++ /* Filled the batch queue? */
++ if (count + MAX_SKB_FRAGS >= NET_RX_RING_SIZE)
++ break;
++ }
++
++ BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
++
++ if (!npo.copy_prod)
++ return;
++
++ BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
++ ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, &netbk->grant_copy_op,
++ npo.copy_prod);
++ BUG_ON(ret != 0);
++
++ while ((skb = __skb_dequeue(&rxq)) != NULL) {
++ sco = (struct skb_cb_overlay *)skb->cb;
++
++ netif = netdev_priv(skb->dev);
++
++ if (netbk->meta[npo.meta_cons].gso_size && netif->gso_prefix) {
++ resp = RING_GET_RESPONSE(&netif->rx,
++ netif->rx.rsp_prod_pvt++);
++
++ resp->flags = NETRXF_gso_prefix | NETRXF_more_data;
++
++ resp->offset = netbk->meta[npo.meta_cons].gso_size;
++ resp->id = netbk->meta[npo.meta_cons].id;
++ resp->status = sco->meta_slots_used;
++
++ npo.meta_cons++;
++ sco->meta_slots_used--;
++ }
++
++
++ netif->stats.tx_bytes += skb->len;
++ netif->stats.tx_packets++;
++
++ status = netbk_check_gop(sco->meta_slots_used,
++ netif->domid, &npo);
++
++ if (sco->meta_slots_used == 1)
++ flags = 0;
++ else
++ flags = NETRXF_more_data;
++
+ if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
- flags |= NETRXF_csum_blank | NETRXF_data_validated;
-- else if (skb->proto_data_valid) /* remote but checksummed? */
++ flags |= NETRXF_csum_blank | NETRXF_data_validated;
+ else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ /* remote but checksummed. */
- flags |= NETRXF_data_validated;
-
- if (meta[npo.meta_cons].copy)
-@@ -705,8 +714,8 @@ static void net_rx_action(unsigned long unused)
- skb_headlen(skb), flags);
-
- if (meta[npo.meta_cons].frag.size) {
-- struct netif_extra_info *gso =
-- (struct netif_extra_info *)
++ flags |= NETRXF_data_validated;
++
++ offset = 0;
++ resp = make_rx_response(netif, netbk->meta[npo.meta_cons].id,
++ status, offset,
++ netbk->meta[npo.meta_cons].size,
++ flags);
++
++ if (netbk->meta[npo.meta_cons].gso_size && !netif->gso_prefix) {
+ struct xen_netif_extra_info *gso =
+ (struct xen_netif_extra_info *)
- RING_GET_RESPONSE(&netif->rx,
- netif->rx.rsp_prod_pvt++);
-
-@@ -769,16 +778,16 @@ static void netbk_tx_pending_timeout(unsigned long unused)
-
- struct net_device_stats *netif_be_get_stats(struct net_device *dev)
- {
-- netif_t *netif = netdev_priv(dev);
++ RING_GET_RESPONSE(&netif->rx,
++ netif->rx.rsp_prod_pvt++);
++
++ resp->flags |= NETRXF_extra_info;
++
++ gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
++ gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
++ gso->u.gso.pad = 0;
++ gso->u.gso.features = 0;
++
++ gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
++ gso->flags = 0;
++ }
++
++ netbk_add_frag_responses(netif, status,
++ netbk->meta + npo.meta_cons + 1,
++ sco->meta_slots_used);
++
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret);
++ irq = netif->irq;
++ if (ret && !netbk->rx_notify[irq]) {
++ netbk->rx_notify[irq] = 1;
++ netbk->notify_list[notify_nr++] = irq;
++ }
++
++ if (netif_queue_stopped(netif->dev) &&
++ netif_schedulable(netif) &&
++ !netbk_queue_full(netif))
++ netif_wake_queue(netif->dev);
++
++ netif_put(netif);
++ npo.meta_cons += sco->meta_slots_used;
++ dev_kfree_skb(skb);
++ }
++
++ while (notify_nr != 0) {
++ irq = netbk->notify_list[--notify_nr];
++ netbk->rx_notify[irq] = 0;
++ notify_remote_via_irq(irq);
++ }
++
++ /* More work to do? */
++ if (!skb_queue_empty(&netbk->rx_queue) &&
++ !timer_pending(&netbk->net_timer))
++ xen_netbk_bh_handler(netbk, 1);
++}
++
++static void net_alarm(unsigned long data)
++{
++ struct xen_netbk *netbk = (struct xen_netbk *)data;
++ xen_netbk_bh_handler(netbk, 1);
++}
++
++static void netbk_tx_pending_timeout(unsigned long data)
++{
++ struct xen_netbk *netbk = (struct xen_netbk *)data;
++ xen_netbk_bh_handler(netbk, 0);
++}
++
++struct net_device_stats *netif_be_get_stats(struct net_device *dev)
++{
+ struct xen_netif *netif = netdev_priv(dev);
- return &netif->stats;
- }
-
--static int __on_net_schedule_list(netif_t *netif)
++ return &netif->stats;
++}
++
+static int __on_net_schedule_list(struct xen_netif *netif)
- {
- return netif->list.next != NULL;
- }
-
--static void remove_from_net_schedule_list(netif_t *netif)
++{
++ return !list_empty(&netif->list);
++}
++
++/* Must be called with net_schedule_list_lock held */
+static void remove_from_net_schedule_list(struct xen_netif *netif)
- {
- spin_lock_irq(&net_schedule_list_lock);
- if (likely(__on_net_schedule_list(netif))) {
-@@ -789,7 +798,7 @@ static void remove_from_net_schedule_list(netif_t *netif)
- spin_unlock_irq(&net_schedule_list_lock);
- }
-
--static void add_to_net_schedule_list_tail(netif_t *netif)
++{
++ if (likely(__on_net_schedule_list(netif))) {
++ list_del_init(&netif->list);
++ netif_put(netif);
++ }
++}
++
++static struct xen_netif *poll_net_schedule_list(struct xen_netbk *netbk)
++{
++ struct xen_netif *netif = NULL;
++
++ spin_lock_irq(&netbk->net_schedule_list_lock);
++ if (list_empty(&netbk->net_schedule_list))
++ goto out;
++
++ netif = list_first_entry(&netbk->net_schedule_list,
++ struct xen_netif, list);
++ if (!netif)
++ goto out;
++
++ netif_get(netif);
++
++ remove_from_net_schedule_list(netif);
++out:
++ spin_unlock_irq(&netbk->net_schedule_list_lock);
++ return netif;
++}
++
+static void add_to_net_schedule_list_tail(struct xen_netif *netif)
- {
- if (__on_net_schedule_list(netif))
- return;
-@@ -811,7 +820,7 @@ static void add_to_net_schedule_list_tail(netif_t *netif)
- * If we may be buffer transmit buffers for any reason then we must be rather
- * more conservative and treat this as the final check for pending work.
- */
--void netif_schedule_work(netif_t *netif)
++{
++ unsigned long flags;
++
++ struct xen_netbk *netbk = &xen_netbk[netif->group];
++ if (__on_net_schedule_list(netif))
++ return;
++
++ spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
++ if (!__on_net_schedule_list(netif) &&
++ likely(netif_schedulable(netif))) {
++ list_add_tail(&netif->list, &netbk->net_schedule_list);
++ netif_get(netif);
++ }
++ spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
++}
++
+void netif_schedule_work(struct xen_netif *netif)
- {
- int more_to_do;
-
-@@ -827,13 +836,13 @@ void netif_schedule_work(netif_t *netif)
- }
- }
-
--void netif_deschedule_work(netif_t *netif)
++{
++ struct xen_netbk *netbk = &xen_netbk[netif->group];
++ int more_to_do;
++
++ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
++
++ if (more_to_do) {
++ add_to_net_schedule_list_tail(netif);
++ maybe_schedule_tx_action(netbk);
++ }
++}
++
+void netif_deschedule_work(struct xen_netif *netif)
- {
- remove_from_net_schedule_list(netif);
- }
-
-
--static void tx_add_credit(netif_t *netif)
++{
++ struct xen_netbk *netbk = &xen_netbk[netif->group];
++ spin_lock_irq(&netbk->net_schedule_list_lock);
++ remove_from_net_schedule_list(netif);
++ spin_unlock_irq(&netbk->net_schedule_list_lock);
++}
++
++
+static void tx_add_credit(struct xen_netif *netif)
- {
- unsigned long max_burst, max_credit;
-
-@@ -855,7 +864,7 @@ static void tx_add_credit(netif_t *netif)
-
- static void tx_credit_callback(unsigned long data)
- {
-- netif_t *netif = (netif_t *)data;
-+ struct xen_netif *netif = (struct xen_netif *)data;
- tx_add_credit(netif);
- netif_schedule_work(netif);
- }
-@@ -869,10 +878,10 @@ static inline int copy_pending_req(PEND_RING_IDX pending_idx)
- inline static void net_tx_action_dealloc(void)
- {
- struct netbk_tx_pending_inuse *inuse, *n;
-- gnttab_unmap_grant_ref_t *gop;
-+ struct gnttab_unmap_grant_ref *gop;
- u16 pending_idx;
- PEND_RING_IDX dc, dp;
-- netif_t *netif;
-+ struct xen_netif *netif;
- int ret;
- LIST_HEAD(list);
-
-@@ -954,7 +963,7 @@ inline static void net_tx_action_dealloc(void)
- }
- }
-
--static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end)
-+static void netbk_tx_err(struct xen_netif *netif, struct xen_netif_tx_request *txp, RING_IDX end)
- {
- RING_IDX cons = netif->tx.req_cons;
-
-@@ -969,8 +978,8 @@ static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end)
- netif_put(netif);
- }
-
--static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first,
-- netif_tx_request_t *txp, int work_to_do)
-+static int netbk_count_requests(struct xen_netif *netif, struct xen_netif_tx_request *first,
-+ struct xen_netif_tx_request *txp, int work_to_do)
- {
- RING_IDX cons = netif->tx.req_cons;
- int frags = 0;
-@@ -1009,10 +1018,10 @@ static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first,
- return frags;
- }
-
--static gnttab_map_grant_ref_t *netbk_get_requests(netif_t *netif,
-+static struct gnttab_map_grant_ref *netbk_get_requests(struct xen_netif *netif,
- struct sk_buff *skb,
-- netif_tx_request_t *txp,
-- gnttab_map_grant_ref_t *mop)
-+ struct xen_netif_tx_request *txp,
-+ struct gnttab_map_grant_ref *mop)
- {
- struct skb_shared_info *shinfo = skb_shinfo(skb);
- skb_frag_t *frags = shinfo->frags;
-@@ -1039,12 +1048,12 @@ static gnttab_map_grant_ref_t *netbk_get_requests(netif_t *netif,
- }
-
- static int netbk_tx_check_mop(struct sk_buff *skb,
-- gnttab_map_grant_ref_t **mopp)
-+ struct gnttab_map_grant_ref **mopp)
- {
-- gnttab_map_grant_ref_t *mop = *mopp;
++{
++ unsigned long max_burst, max_credit;
++
++ /*
++ * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
++ * Otherwise the interface can seize up due to insufficient credit.
++ */
++ max_burst = RING_GET_REQUEST(&netif->tx, netif->tx.req_cons)->size;
++ max_burst = min(max_burst, 131072UL);
++ max_burst = max(max_burst, netif->credit_bytes);
++
++ /* Take care that adding a new chunk of credit doesn't wrap to zero. */
++ max_credit = netif->remaining_credit + netif->credit_bytes;
++ if (max_credit < netif->remaining_credit)
++ max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
++
++ netif->remaining_credit = min(max_credit, max_burst);
++}
++
++static void tx_credit_callback(unsigned long data)
++{
++ struct xen_netif *netif = (struct xen_netif *)data;
++ tx_add_credit(netif);
++ netif_schedule_work(netif);
++}
++
++static inline int copy_pending_req(struct xen_netbk *netbk,
++ pending_ring_idx_t pending_idx)
++{
++ return gnttab_copy_grant_page(
++ netbk->grant_tx_handle[pending_idx],
++ &netbk->mmap_pages[pending_idx]);
++}
++
++static inline void net_tx_action_dealloc(struct xen_netbk *netbk)
++{
++ struct netbk_tx_pending_inuse *inuse, *n;
++ struct gnttab_unmap_grant_ref *gop;
++ u16 pending_idx;
++ pending_ring_idx_t dc, dp;
++ struct xen_netif *netif;
++ int ret;
++ LIST_HEAD(list);
++
++ dc = netbk->dealloc_cons;
++ gop = netbk->tx_unmap_ops;
++
++ /* Free up any grants we have finished using. */
++ do {
++ dp = netbk->dealloc_prod;
++
++ /* Ensure we see all indices enqueued by netif_idx_release(). */
++ smp_rmb();
++
++ while (dc != dp) {
++ unsigned long pfn;
++ struct netbk_tx_pending_inuse *pending_inuse =
++ netbk->pending_inuse;
++
++ pending_idx = netbk->dealloc_ring[pending_index(dc++)];
++ list_move_tail(&pending_inuse[pending_idx].list, &list);
++
++ pfn = idx_to_pfn(netbk, pending_idx);
++ /* Already unmapped? */
++ if (!phys_to_machine_mapping_valid(pfn))
++ continue;
++
++ gnttab_set_unmap_op(gop,
++ idx_to_kaddr(netbk, pending_idx),
++ GNTMAP_host_map,
++ netbk->grant_tx_handle[pending_idx]);
++ gop++;
++ }
++
++ } while (dp != netbk->dealloc_prod);
++
++ netbk->dealloc_cons = dc;
++
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_unmap_grant_ref, netbk->tx_unmap_ops,
++ gop - netbk->tx_unmap_ops);
++ BUG_ON(ret);
++
++ /*
++ * Copy any entries that have been pending for too long
++ */
++ if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB &&
++ !list_empty(&netbk->pending_inuse_head)) {
++ list_for_each_entry_safe(inuse, n,
++ &netbk->pending_inuse_head, list) {
++ struct pending_tx_info *pending_tx_info;
++ pending_tx_info = netbk->pending_tx_info;
++
++ if (time_after(inuse->alloc_time + HZ / 2, jiffies))
++ break;
++
++ pending_idx = inuse - netbk->pending_inuse;
++
++ pending_tx_info[pending_idx].netif->nr_copied_skbs++;
++
++ switch (copy_pending_req(netbk, pending_idx)) {
++ case 0:
++ list_move_tail(&inuse->list, &list);
++ continue;
++ case -EBUSY:
++ list_del_init(&inuse->list);
++ continue;
++ case -ENOENT:
++ continue;
++ }
++
++ break;
++ }
++ }
++
++ list_for_each_entry_safe(inuse, n, &list, list) {
++ struct pending_tx_info *pending_tx_info;
++ pending_ring_idx_t index;
++
++ pending_tx_info = netbk->pending_tx_info;
++ pending_idx = inuse - netbk->pending_inuse;
++
++ netif = pending_tx_info[pending_idx].netif;
++
++ make_tx_response(netif, &pending_tx_info[pending_idx].req,
++ NETIF_RSP_OKAY);
++
++ /* Ready for next use. */
++ gnttab_reset_grant_page(netbk->mmap_pages[pending_idx]);
++
++ index = pending_index(netbk->pending_prod++);
++ netbk->pending_ring[index] = pending_idx;
++
++ netif_put(netif);
++
++ list_del_init(&inuse->list);
++ }
++}
++
++static void netbk_tx_err(struct xen_netif *netif,
++ struct xen_netif_tx_request *txp, RING_IDX end)
++{
++ RING_IDX cons = netif->tx.req_cons;
++
++ do {
++ make_tx_response(netif, txp, NETIF_RSP_ERROR);
++ if (cons >= end)
++ break;
++ txp = RING_GET_REQUEST(&netif->tx, cons++);
++ } while (1);
++ netif->tx.req_cons = cons;
++ netif_schedule_work(netif);
++ netif_put(netif);
++}
++
++static int netbk_count_requests(struct xen_netif *netif,
++ struct xen_netif_tx_request *first,
++ struct xen_netif_tx_request *txp,
++ int work_to_do)
++{
++ RING_IDX cons = netif->tx.req_cons;
++ int frags = 0;
++
++ if (!(first->flags & NETTXF_more_data))
++ return 0;
++
++ do {
++ if (frags >= work_to_do) {
++ pr_debug("Need more frags\n");
++ return -frags;
++ }
++
++ if (unlikely(frags >= MAX_SKB_FRAGS)) {
++ pr_debug("Too many frags\n");
++ return -frags;
++ }
++
++ memcpy(txp, RING_GET_REQUEST(&netif->tx, cons + frags),
++ sizeof(*txp));
++ if (txp->size > first->size) {
++ pr_debug("Frags galore\n");
++ return -frags;
++ }
++
++ first->size -= txp->size;
++ frags++;
++
++ if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
++ pr_debug("txp->offset: %x, size: %u\n",
++ txp->offset, txp->size);
++ return -frags;
++ }
++ } while ((txp++)->flags & NETTXF_more_data);
++
++ return frags;
++}
++
++static struct gnttab_map_grant_ref *netbk_get_requests(struct xen_netbk *netbk,
++ struct xen_netif *netif,
++ struct sk_buff *skb,
++ struct xen_netif_tx_request *txp,
++ struct gnttab_map_grant_ref *mop)
++{
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ skb_frag_t *frags = shinfo->frags;
++ unsigned long pending_idx = *((u16 *)skb->data);
++ int i, start;
++
++ /* Skip first skb fragment if it is on same page as header fragment. */
++ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
++
++ for (i = start; i < shinfo->nr_frags; i++, txp++) {
++ pending_ring_idx_t index;
++ struct pending_tx_info *pending_tx_info =
++ netbk->pending_tx_info;
++
++ index = pending_index(netbk->pending_cons++);
++ pending_idx = netbk->pending_ring[index];
++
++ gnttab_set_map_op(mop++, idx_to_kaddr(netbk, pending_idx),
++ GNTMAP_host_map | GNTMAP_readonly,
++ txp->gref, netif->domid);
++
++ memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
++ netif_get(netif);
++ pending_tx_info[pending_idx].netif = netif;
++ frags[i].page = (void *)pending_idx;
++ }
++
++ return mop;
++}
++
++static int netbk_tx_check_mop(struct xen_netbk *netbk,
++ struct sk_buff *skb,
++ struct gnttab_map_grant_ref **mopp)
++{
+ struct gnttab_map_grant_ref *mop = *mopp;
- int pending_idx = *((u16 *)skb->data);
-- netif_t *netif = pending_tx_info[pending_idx].netif;
-- netif_tx_request_t *txp;
++ int pending_idx = *((u16 *)skb->data);
++ struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
+ struct xen_netif *netif = pending_tx_info[pending_idx].netif;
+ struct xen_netif_tx_request *txp;
- struct skb_shared_info *shinfo = skb_shinfo(skb);
- int nr_frags = shinfo->nr_frags;
- int i, err, start;
-@@ -1118,7 +1127,7 @@ static void netbk_fill_frags(struct sk_buff *skb)
-
- for (i = 0; i < nr_frags; i++) {
- skb_frag_t *frag = shinfo->frags + i;
-- netif_tx_request_t *txp;
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ int nr_frags = shinfo->nr_frags;
++ int i, err, start;
++
++ /* Check status of header. */
++ err = mop->status;
++ if (unlikely(err)) {
++ pending_ring_idx_t index;
++ index = pending_index(netbk->pending_prod++);
++ txp = &pending_tx_info[pending_idx].req;
++ make_tx_response(netif, txp, NETIF_RSP_ERROR);
++ netbk->pending_ring[index] = pending_idx;
++ netif_put(netif);
++ } else {
++ set_phys_to_machine(
++ __pa(idx_to_kaddr(netbk, pending_idx)) >> PAGE_SHIFT,
++ FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
++ netbk->grant_tx_handle[pending_idx] = mop->handle;
++ }
++
++ /* Skip first skb fragment if it is on same page as header fragment. */
++ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
++
++ for (i = start; i < nr_frags; i++) {
++ int j, newerr;
++ pending_ring_idx_t index;
++
++ pending_idx = (unsigned long)shinfo->frags[i].page;
++
++ /* Check error status: if okay then remember grant handle. */
++ newerr = (++mop)->status;
++ if (likely(!newerr)) {
++ unsigned long addr;
++ addr = idx_to_kaddr(netbk, pending_idx);
++ set_phys_to_machine(
++ __pa(addr)>>PAGE_SHIFT,
++ FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
++ netbk->grant_tx_handle[pending_idx] = mop->handle;
++ /* Had a previous error? Invalidate this fragment. */
++ if (unlikely(err))
++ netif_idx_release(netbk, pending_idx);
++ continue;
++ }
++
++ /* Error on this fragment: respond to client with an error. */
++ txp = &netbk->pending_tx_info[pending_idx].req;
++ make_tx_response(netif, txp, NETIF_RSP_ERROR);
++ index = pending_index(netbk->pending_prod++);
++ netbk->pending_ring[index] = pending_idx;
++ netif_put(netif);
++
++ /* Not the first error? Preceding frags already invalidated. */
++ if (err)
++ continue;
++
++ /* First error: invalidate header and preceding fragments. */
++ pending_idx = *((u16 *)skb->data);
++ netif_idx_release(netbk, pending_idx);
++ for (j = start; j < i; j++) {
++ pending_idx = (unsigned long)shinfo->frags[i].page;
++ netif_idx_release(netbk, pending_idx);
++ }
++
++ /* Remember the error: invalidate all subsequent fragments. */
++ err = newerr;
++ }
++
++ *mopp = mop + 1;
++ return err;
++}
++
++static void netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
++{
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ int nr_frags = shinfo->nr_frags;
++ int i;
++
++ for (i = 0; i < nr_frags; i++) {
++ skb_frag_t *frag = shinfo->frags + i;
+ struct xen_netif_tx_request *txp;
- unsigned long pending_idx;
-
- pending_idx = (unsigned long)frag->page;
-@@ -1138,10 +1147,10 @@ static void netbk_fill_frags(struct sk_buff *skb)
- }
- }
-
--int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras,
-+int netbk_get_extras(struct xen_netif *netif, struct xen_netif_extra_info *extras,
- int work_to_do)
- {
-- struct netif_extra_info extra;
++ unsigned long pending_idx;
++
++ pending_idx = (unsigned long)frag->page;
++
++ netbk->pending_inuse[pending_idx].alloc_time = jiffies;
++ list_add_tail(&netbk->pending_inuse[pending_idx].list,
++ &netbk->pending_inuse_head);
++
++ txp = &netbk->pending_tx_info[pending_idx].req;
++ frag->page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
++ frag->size = txp->size;
++ frag->page_offset = txp->offset;
++
++ skb->len += txp->size;
++ skb->data_len += txp->size;
++ skb->truesize += txp->size;
++ }
++}
++
++int netbk_get_extras(struct xen_netif *netif,
++ struct xen_netif_extra_info *extras,
++ int work_to_do)
++{
+ struct xen_netif_extra_info extra;
- RING_IDX cons = netif->tx.req_cons;
-
- do {
-@@ -1166,7 +1175,7 @@ int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras,
- return work_to_do;
- }
-
--static int netbk_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso)
-+static int netbk_set_skb_gso(struct sk_buff *skb, struct xen_netif_extra_info *gso)
- {
- if (!gso->u.gso.size) {
- DPRINTK("GSO size must not be zero.\n");
-@@ -1189,18 +1198,57 @@ static int netbk_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso)
- return 0;
- }
-
-+static int skb_checksum_setup(struct sk_buff *skb)
++ RING_IDX cons = netif->tx.req_cons;
++
++ do {
++ if (unlikely(work_to_do-- <= 0)) {
++ pr_debug("Missing extra info\n");
++ return -EBADR;
++ }
++
++ memcpy(&extra, RING_GET_REQUEST(&netif->tx, cons),
++ sizeof(extra));
++ if (unlikely(!extra.type ||
++ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
++ netif->tx.req_cons = ++cons;
++ pr_debug("Invalid extra type: %d\n", extra.type);
++ return -EINVAL;
++ }
++
++ memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
++ netif->tx.req_cons = ++cons;
++ } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
++
++ return work_to_do;
++}
++
++static int netbk_set_skb_gso(struct sk_buff *skb,
++ struct xen_netif_extra_info *gso)
++{
++ if (!gso->u.gso.size) {
++ pr_debug("GSO size must not be zero.\n");
++ return -EINVAL;
++ }
++
++ /* Currently only TCPv4 S.O. is supported. */
++ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
++ pr_debug("Bad GSO type %d.\n", gso->u.gso.type);
++ return -EINVAL;
++ }
++
++ skb_shinfo(skb)->gso_size = gso->u.gso.size;
++ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
++
++ /* Header must be checked, and gso_segs computed. */
++ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
++ skb_shinfo(skb)->gso_segs = 0;
++
++ return 0;
++}
++
++static int checksum_setup(struct xen_netif *netif, struct sk_buff *skb)
+{
+ struct iphdr *iph;
+ unsigned char *th;
+ int err = -EPROTO;
++ int recalculate_partial_csum = 0;
++
++ /*
++ * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
++ * peers can fail to set NETRXF_csum_blank when sending a GSO
++ * frame. In this case force the SKB to CHECKSUM_PARTIAL and
++ * recalculate the partial checksum.
++ */
++ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
++ netif->rx_gso_checksum_fixup++;
++ skb->ip_summed = CHECKSUM_PARTIAL;
++ recalculate_partial_csum = 1;
++ }
++
++ /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
++ if (skb->ip_summed != CHECKSUM_PARTIAL)
++ return 0;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ goto out;
@@ -12599,9 +14566,23 @@ index db629d4..c959075 100644
+ switch (iph->protocol) {
+ case IPPROTO_TCP:
+ skb->csum_offset = offsetof(struct tcphdr, check);
++
++ if (recalculate_partial_csum) {
++ struct tcphdr *tcph = (struct tcphdr *)th;
++ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
++ skb->len - iph->ihl*4,
++ IPPROTO_TCP, 0);
++ }
+ break;
+ case IPPROTO_UDP:
+ skb->csum_offset = offsetof(struct udphdr, check);
++
++ if (recalculate_partial_csum) {
++ struct udphdr *udph = (struct udphdr *)th;
++ udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
++ skb->len - iph->ihl*4,
++ IPPROTO_UDP, 0);
++ }
+ break;
+ default:
+ if (net_ratelimit())
@@ -12620,2428 +14601,958 @@ index db629d4..c959075 100644
+ return err;
+}
+
- /* Called after netfront has transmitted */
- static void net_tx_action(unsigned long unused)
- {
- struct list_head *ent;
- struct sk_buff *skb;
-- netif_t *netif;
-- netif_tx_request_t txreq;
-- netif_tx_request_t txfrags[MAX_SKB_FRAGS];
-- struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
-+ struct xen_netif *netif;
-+ struct xen_netif_tx_request txreq;
-+ struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
-+ struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
- u16 pending_idx;
- RING_IDX i;
-- gnttab_map_grant_ref_t *mop;
-+ struct gnttab_map_grant_ref *mop;
- unsigned int data_len;
- int ret, work_to_do;
-
-@@ -1212,7 +1260,7 @@ static void net_tx_action(unsigned long unused)
- !list_empty(&net_schedule_list)) {
- /* Get a netif from the list with work to do. */
- ent = net_schedule_list.next;
-- netif = list_entry(ent, netif_t, list);
-+ netif = list_entry(ent, struct xen_netif, list);
- netif_get(netif);
- remove_from_net_schedule_list(netif);
-
-@@ -1313,7 +1361,7 @@ static void net_tx_action(unsigned long unused)
- skb_reserve(skb, 16 + NET_IP_ALIGN);
-
- if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
-- struct netif_extra_info *gso;
-+ struct xen_netif_extra_info *gso;
- gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
-
- if (netbk_set_skb_gso(skb, gso)) {
-@@ -1372,7 +1420,7 @@ static void net_tx_action(unsigned long unused)
-
- mop = tx_map_ops;
- while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
-- netif_tx_request_t *txp;
-+ struct xen_netif_tx_request *txp;
-
- pending_idx = *((u16 *)skb->data);
- netif = pending_tx_info[pending_idx].netif;
-@@ -1403,14 +1451,10 @@ static void net_tx_action(unsigned long unused)
- * Old frontends do not assert data_validated but we
- * can infer it from csum_blank so test both flags.
- */
-- if (txp->flags & (NETTXF_data_validated|NETTXF_csum_blank)) {
-- skb->ip_summed = CHECKSUM_UNNECESSARY;
-- skb->proto_data_valid = 1;
-- } else {
-+ if (txp->flags & (NETTXF_data_validated|NETTXF_csum_blank))
-+ skb->ip_summed = CHECKSUM_PARTIAL;
-+ else
- skb->ip_summed = CHECKSUM_NONE;
-- skb->proto_data_valid = 0;
-- }
-- skb->proto_csum_blank = !!(txp->flags & NETTXF_csum_blank);
-
- netbk_fill_frags(skb);
-
-@@ -1420,6 +1464,14 @@ static void net_tx_action(unsigned long unused)
- netif->stats.rx_bytes += skb->len;
- netif->stats.rx_packets++;
-
-+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
-+ if (skb_checksum_setup(skb)) {
-+ DPRINTK("Can't setup checksum in net_tx_action\n");
-+ kfree_skb(skb);
-+ continue;
-+ }
-+ }
-+
- if (unlikely(netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB) &&
- unlikely(skb_linearize(skb))) {
- DPRINTK("Can't linearize skb in net_tx_action.\n");
-@@ -1464,9 +1516,9 @@ static void netif_page_release(struct page *page, unsigned int order)
- netif_idx_release(idx);
- }
-
--irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
-+irqreturn_t netif_be_int(int irq, void *dev_id)
- {
-- netif_t *netif = dev_id;
-+ struct xen_netif *netif = dev_id;
-
- add_to_net_schedule_list_tail(netif);
- maybe_schedule_tx_action();
-@@ -1477,12 +1529,12 @@ irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
- return IRQ_HANDLED;
- }
-
--static void make_tx_response(netif_t *netif,
-- netif_tx_request_t *txp,
-+static void make_tx_response(struct xen_netif *netif,
-+ struct xen_netif_tx_request *txp,
- s8 st)
- {
- RING_IDX i = netif->tx.rsp_prod_pvt;
-- netif_tx_response_t *resp;
-+ struct xen_netif_tx_response *resp;
- int notify;
-
- resp = RING_GET_RESPONSE(&netif->tx, i);
-@@ -1507,7 +1559,7 @@ static void make_tx_response(netif_t *netif,
- #endif
- }
-
--static netif_rx_response_t *make_rx_response(netif_t *netif,
-+static struct xen_netif_rx_response *make_rx_response(struct xen_netif *netif,
- u16 id,
- s8 st,
- u16 offset,
-@@ -1515,7 +1567,7 @@ static netif_rx_response_t *make_rx_response(netif_t *netif,
- u16 flags)
- {
- RING_IDX i = netif->rx.rsp_prod_pvt;
-- netif_rx_response_t *resp;
-+ struct xen_netif_rx_response *resp;
-
- resp = RING_GET_RESPONSE(&netif->rx, i);
- resp->offset = offset;
-@@ -1534,14 +1586,14 @@ static netif_rx_response_t *make_rx_response(netif_t *netif,
- static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
- {
- struct list_head *ent;
-- netif_t *netif;
-+ struct xen_netif *netif;
- int i = 0;
-
- printk(KERN_ALERT "netif_schedule_list:\n");
- spin_lock_irq(&net_schedule_list_lock);
-
- list_for_each (ent, &net_schedule_list) {
-- netif = list_entry(ent, netif_t, list);
-+ netif = list_entry(ent, struct xen_netif, list);
- printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
- "rx_resp_prod=%08x\n",
- i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
-@@ -1569,11 +1621,13 @@ static int __init netback_init(void)
- int i;
- struct page *page;
-
-- if (!is_running_on_xen())
-+ printk(KERN_CRIT "*** netif_init\n");
++static bool tx_credit_exceeded(struct xen_netif *netif, unsigned size)
++{
++ unsigned long now = jiffies;
++ unsigned long next_credit =
++ netif->credit_timeout.expires +
++ msecs_to_jiffies(netif->credit_usec / 1000);
+
-+ if (!xen_domain())
- return -ENODEV;
-
- /* We can increase reservation by this much in net_rx_action(). */
-- balloon_update_driver_allowance(NET_RX_RING_SIZE);
-+// balloon_update_driver_allowance(NET_RX_RING_SIZE);
-
- skb_queue_head_init(&rx_queue);
- skb_queue_head_init(&tx_queue);
-@@ -1616,7 +1670,7 @@ static int __init netback_init(void)
- netbk_copy_skb_mode = NETBK_DELAYED_COPY_SKB;
- }
-
-- netif_accel_init();
-+ //netif_accel_init();
-
- netif_xenbus_init();
-
-diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
-index d7faeb6..ed7c006 100644
---- a/drivers/xen/netback/xenbus.c
-+++ b/drivers/xen/netback/xenbus.c
-@@ -37,7 +37,7 @@ static int netback_remove(struct xenbus_device *dev)
- {
- struct backend_info *be = dev->dev.driver_data;
-
-- netback_remove_accelerators(be, dev);
-+ //netback_remove_accelerators(be, dev);
-
- if (be->netif) {
- kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
-@@ -123,7 +123,7 @@ static int netback_probe(struct xenbus_device *dev,
- goto fail;
- }
-
-- netback_probe_accelerators(be, dev);
-+ //netback_probe_accelerators(be, dev);
-
- err = xenbus_switch_state(dev, XenbusStateInitWait);
- if (err)
-@@ -149,12 +149,10 @@ fail:
- * and vif variables to the environment, for the benefit of the vif-* hotplug
- * scripts.
- */
--static int netback_uevent(struct xenbus_device *xdev, char **envp,
-- int num_envp, char *buffer, int buffer_size)
-+static int netback_uevent(struct xenbus_device *xdev, struct kobj_uevent_env *env)
- {
- struct backend_info *be = xdev->dev.driver_data;
-- netif_t *netif = be->netif;
-- int i = 0, length = 0;
-+ struct xen_netif *netif = be->netif;
- char *val;
-
- DPRINTK("netback_uevent");
-@@ -166,15 +164,15 @@ static int netback_uevent(struct xenbus_device *xdev, char **envp,
- return err;
- }
- else {
-- add_uevent_var(envp, num_envp, &i, buffer, buffer_size,
-- &length, "script=%s", val);
-+ if (add_uevent_var(env, "script=%s", val)) {
-+ kfree(val);
-+ return -ENOMEM;
-+ }
- kfree(val);
- }
-
-- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
-- "vif=%s", netif->dev->name);
--
-- envp[i] = NULL;
-+ if (add_uevent_var(env, "vif=%s", netif->dev->name))
-+ return -ENOMEM;
-
- return 0;
- }
-@@ -450,5 +448,6 @@ static struct xenbus_driver netback = {
-
- void netif_xenbus_init(void)
- {
-- xenbus_register_backend(&netback);
-+ printk(KERN_CRIT "registering netback\n");
-+ (void)xenbus_register_backend(&netback);
- }
---
-1.7.4
-
-
-From a41a2ab9e1ac4ef8320f69f2719e973e25faff5c Mon Sep 17 00:00:00 2001
-From: Jeremy Fitzhardinge <jeremy(a)goop.org>
-Date: Mon, 9 Feb 2009 16:39:01 -0800
-Subject: [PATCH 039/244] xen: netback: don't include xen/evtchn.h
-
-Its a usermode header for users of /dev/evtchn
-
-Signed-off-by: Jeremy Fitzhardinge <jeremy(a)goop.org>
----
- drivers/xen/netback/common.h | 1 -
- 1 files changed, 0 insertions(+), 1 deletions(-)
-
-diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
-index 65b88f4..5665ed1 100644
---- a/drivers/xen/netback/common.h
-+++ b/drivers/xen/netback/common.h
-@@ -38,7 +38,6 @@
- #include <linux/netdevice.h>
- #include <linux/etherdevice.h>
- #include <linux/wait.h>
--#include <xen/evtchn.h>
- #include <xen/interface/io/netif.h>
- #include <asm/io.h>
- #include <asm/pgalloc.h>
---
-1.7.4
-
-
-From f28a7c6148bb979acf99c0cbe3b441d0fb0853d9 Mon Sep 17 00:00:00 2001
-From: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
-Date: Wed, 18 Feb 2009 15:55:18 -0800
-Subject: [PATCH 040/244] xen: netback: use mod_timer
-
-__mod_timer is no longer a public API.
-
-Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge(a)citrix.com>
----
- drivers/xen/netback/netback.c | 4 ++--
- 1 files changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
-index c959075..e920703 100644
---- a/drivers/xen/netback/netback.c
-+++ b/drivers/xen/netback/netback.c
-@@ -334,7 +334,7 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
- */
- netif->tx_queue_timeout.data = (unsigned long)netif;
- netif->tx_queue_timeout.function = tx_queue_callback;
-- __mod_timer(&netif->tx_queue_timeout, jiffies + HZ/2);
-+ mod_timer(&netif->tx_queue_timeout, jiffies + HZ/2);
- }
- }
-
-@@ -1299,7 +1299,7 @@ static void net_tx_action(unsigned long unused)
- (unsigned long)netif;
- netif->credit_timeout.function =
- tx_credit_callback;
-- __mod_timer(&netif->credit_timeout,
-+ mod_timer(&netif->credit_timeout,
- next_credit);
- netif_put(netif);
- continue;
---
-1.7.4
-
-
-From 52f97ad360f28762c785343ba5c9f8abb83536f3 Mon Sep 17 00:00:00 2001
-From: Jan Beulich <jbeulich(a)novell.com>
-Date: Fri, 6 Mar 2009 08:29:31 +0000
-Subject: [PATCH 041/244] xen: netback: unmap tx ring gref when mapping of rx ring gref failed
-
-[ijc-ported from linux-2.6.18-xen.hg 782:51decc39e5e7]
-Signed-off-by: Jan Beulich <jbeulich(a)novell.com>
-Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
----
- drivers/xen/netback/interface.c | 6 ++++++
- 1 files changed, 6 insertions(+), 0 deletions(-)
-
-diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
-index d184ad7..f3d9ea1 100644
---- a/drivers/xen/netback/interface.c
-+++ b/drivers/xen/netback/interface.c
-@@ -222,6 +222,12 @@ static int map_frontend_pages(
- BUG();
-
- if (op.status) {
-+ struct gnttab_unmap_grant_ref unop;
++ /* Timer could already be pending in rare cases. */
++ if (timer_pending(&netif->credit_timeout))
++ return true;
+
-+ gnttab_set_unmap_op(&unop,
-+ (unsigned long)netif->tx_comms_area->addr,
-+ GNTMAP_host_map, netif->tx_shmem_handle);
-+ HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unop, 1);
- DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
- return op.status;
- }
---
-1.7.4
-
-
-From f9b63790f1404eb03ac824147b2294a46e485643 Mon Sep 17 00:00:00 2001
-From: Ian Campbell <Ian.Campbell(a)citrix.com>
-Date: Fri, 6 Mar 2009 08:29:32 +0000
-Subject: [PATCH 042/244] xen: netback: add ethtool stat to track copied skbs.
-
-Copied skbs should be rare but we have no way of verifying that.
-
-[ijc-ported from linux-2.6.18-xen.hg 792:db9857bb0320]
-Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
----
- drivers/xen/netback/common.h | 3 ++
- drivers/xen/netback/interface.c | 47 +++++++++++++++++++++++++++++++++++++++
- drivers/xen/netback/netback.c | 6 ++++-
- 3 files changed, 55 insertions(+), 1 deletions(-)
-
-diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
-index 5665ed1..6ba804d 100644
---- a/drivers/xen/netback/common.h
-+++ b/drivers/xen/netback/common.h
-@@ -92,6 +92,9 @@ struct xen_netif {
- /* Enforce draining of the transmit queue. */
- struct timer_list tx_queue_timeout;
-
-+ /* Statistics */
-+ int nr_copied_skbs;
++ /* Passed the point where we can replenish credit? */
++ if (time_after_eq(now, next_credit)) {
++ netif->credit_timeout.expires = now;
++ tx_add_credit(netif);
++ }
+
- /* Miscellaneous private stuff. */
- struct list_head list; /* scheduling list */
- atomic_t refcnt;
-diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
-index f3d9ea1..1a99c87 100644
---- a/drivers/xen/netback/interface.c
-+++ b/drivers/xen/netback/interface.c
-@@ -119,8 +119,51 @@ static int netbk_set_tso(struct net_device *dev, u32 data)
- return ethtool_op_set_tso(dev, data);
- }
-
-+static void netbk_get_drvinfo(struct net_device *dev,
-+ struct ethtool_drvinfo *info)
-+{
-+ strcpy(info->driver, "netbk");
-+}
++ /* Still too big to send right now? Set a callback. */
++ if (size > netif->remaining_credit) {
++ netif->credit_timeout.data =
++ (unsigned long)netif;
++ netif->credit_timeout.function =
++ tx_credit_callback;
++ mod_timer(&netif->credit_timeout,
++ next_credit);
+
-+static const struct netif_stat {
-+ char name[ETH_GSTRING_LEN];
-+ u16 offset;
-+} netbk_stats[] = {
-+ { "copied_skbs", offsetof(struct xen_netif, nr_copied_skbs) },
-+};
++ return true;
++ }
+
-+static int netbk_get_stats_count(struct net_device *dev)
-+{
-+ return ARRAY_SIZE(netbk_stats);
++ return false;
+}
+
-+static void netbk_get_ethtool_stats(struct net_device *dev,
-+ struct ethtool_stats *stats, u64 * data)
++static unsigned net_tx_build_mops(struct xen_netbk *netbk)
+{
-+ void *netif = netdev_priv(dev);
-+ int i;
++ struct gnttab_map_grant_ref *mop;
++ struct sk_buff *skb;
++ int ret;
+
-+ for (i = 0; i < ARRAY_SIZE(netbk_stats); i++)
-+ data[i] = *(int *)(netif + netbk_stats[i].offset);
-+}
++ mop = netbk->tx_map_ops;
++ while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
++ !list_empty(&netbk->net_schedule_list)) {
++ struct xen_netif *netif;
++ struct xen_netif_tx_request txreq;
++ struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
++ struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
++ u16 pending_idx;
++ RING_IDX idx;
++ int work_to_do;
++ unsigned int data_len;
++ pending_ring_idx_t index;
+
-+static void netbk_get_strings(struct net_device *dev, u32 stringset, u8 * data)
-+{
-+ int i;
++ /* Get a netif from the list with work to do. */
++ netif = poll_net_schedule_list(netbk);
++ if (!netif)
++ continue;
+
-+ switch (stringset) {
-+ case ETH_SS_STATS:
-+ for (i = 0; i < ARRAY_SIZE(netbk_stats); i++)
-+ memcpy(data + i * ETH_GSTRING_LEN,
-+ netbk_stats[i].name, ETH_GSTRING_LEN);
-+ break;
-+ }
-+}
++ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
++ if (!work_to_do) {
++ netif_put(netif);
++ continue;
++ }
+
- static struct ethtool_ops network_ethtool_ops =
- {
-+ .get_drvinfo = netbk_get_drvinfo,
++ idx = netif->tx.req_cons;
++ rmb(); /* Ensure that we see the request before we copy it. */
++ memcpy(&txreq, RING_GET_REQUEST(&netif->tx, idx), sizeof(txreq));
+
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
-@@ -128,6 +171,10 @@ static struct ethtool_ops network_ethtool_ops =
- .get_tso = ethtool_op_get_tso,
- .set_tso = netbk_set_tso,
- .get_link = ethtool_op_get_link,
++ /* Credit-based scheduling. */
++ if (txreq.size > netif->remaining_credit &&
++ tx_credit_exceeded(netif, txreq.size)) {
++ netif_put(netif);
++ continue;
++ }
+
-+ .get_stats_count = netbk_get_stats_count,
-+ .get_ethtool_stats = netbk_get_ethtool_stats,
-+ .get_strings = netbk_get_strings,
- };
-
- struct xen_netif *netif_alloc(domid_t domid, unsigned int handle)
-diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
-index e920703..f59fadb 100644
---- a/drivers/xen/netback/netback.c
-+++ b/drivers/xen/netback/netback.c
-@@ -923,7 +923,11 @@ inline static void net_tx_action_dealloc(void)
- if (time_after(inuse->alloc_time + HZ / 2, jiffies))
- break;
-
-- switch (copy_pending_req(inuse - pending_inuse)) {
-+ pending_idx = inuse - pending_inuse;
++ netif->remaining_credit -= txreq.size;
+
-+ pending_tx_info[pending_idx].netif->nr_copied_skbs++;
++ work_to_do--;
++ netif->tx.req_cons = ++idx;
+
-+ switch (copy_pending_req(pending_idx)) {
- case 0:
- list_move_tail(&inuse->list, &list);
- continue;
---
-1.7.4
-
-
-From c41d8da3d853d4e89ba38693b90c1fe512095704 Mon Sep 17 00:00:00 2001
-From: Ian Campbell <Ian.Campbell(a)citrix.com>
-Date: Fri, 6 Mar 2009 08:29:33 +0000
-Subject: [PATCH 043/244] xen: netback: make queue length parameter writeable in sysfs
-
-Any changes will only take effect for newly created VIFs.
-
-Also hook up the vif devices to their parent and publish bus info via
-ethtool.
-
-[ijc-ported from linux-2.6.18-xen.hg 793:3aa9b8a7876b]
-Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
----
- drivers/xen/netback/interface.c | 3 ++-
- drivers/xen/netback/xenbus.c | 1 +
- 2 files changed, 3 insertions(+), 1 deletions(-)
-
-diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
-index 1a99c87..7706170 100644
---- a/drivers/xen/netback/interface.c
-+++ b/drivers/xen/netback/interface.c
-@@ -52,7 +52,7 @@
- * blocked.
- */
- static unsigned long netbk_queue_length = 32;
--module_param_named(queue_length, netbk_queue_length, ulong, 0);
-+module_param_named(queue_length, netbk_queue_length, ulong, 0644);
-
- static void __netif_up(struct xen_netif *netif)
- {
-@@ -123,6 +123,7 @@ static void netbk_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
- {
- strcpy(info->driver, "netbk");
-+ strcpy(info->bus_info, dev->dev.parent->bus_id);
- }
-
- static const struct netif_stat {
-diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
-index ed7c006..dc7b367 100644
---- a/drivers/xen/netback/xenbus.c
-+++ b/drivers/xen/netback/xenbus.c
-@@ -200,6 +200,7 @@ static void backend_create_netif(struct backend_info *be)
- xenbus_dev_fatal(dev, err, "creating interface");
- return;
- }
-+ SET_NETDEV_DEV(be->netif->dev, &dev->dev);
-
- kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
- }
---
-1.7.4
-
-
-From f204d7567ab11ddb1ff3208ab5ed8921b575af5d Mon Sep 17 00:00:00 2001
-From: Ian Campbell <Ian.Campbell(a)citrix.com>
-Date: Mon, 16 Mar 2009 22:05:16 +0000
-Subject: [PATCH 044/244] xen: netback: parent sysfs device should be set before registering.
-
-Signed-off-by: Ian Campbell <ian.campbell(a)citrix.com>
----
- drivers/xen/netback/common.h | 2 +-
- drivers/xen/netback/interface.c | 4 +++-
- drivers/xen/netback/xenbus.c | 3 +--
- 3 files changed, 5 insertions(+), 4 deletions(-)
-
-diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
-index 6ba804d..123a169 100644
---- a/drivers/xen/netback/common.h
-+++ b/drivers/xen/netback/common.h
-@@ -180,7 +180,7 @@ void netif_accel_init(void);
-
- void netif_disconnect(struct xen_netif *netif);
-
--struct xen_netif *netif_alloc(domid_t domid, unsigned int handle);
-+struct xen_netif *netif_alloc(struct device *parent, domid_t domid, unsigned int handle);
- int netif_map(struct xen_netif *netif, unsigned long tx_ring_ref,
- unsigned long rx_ring_ref, unsigned int evtchn);
-
-diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
-index 7706170..5e0d26d 100644
---- a/drivers/xen/netback/interface.c
-+++ b/drivers/xen/netback/interface.c
-@@ -178,7 +178,7 @@ static struct ethtool_ops network_ethtool_ops =
- .get_strings = netbk_get_strings,
- };
-
--struct xen_netif *netif_alloc(domid_t domid, unsigned int handle)
-+struct xen_netif *netif_alloc(struct device *parent, domid_t domid, unsigned int handle)
- {
- int err = 0;
- struct net_device *dev;
-@@ -192,6 +192,8 @@ struct xen_netif *netif_alloc(domid_t domid, unsigned int handle)
- return ERR_PTR(-ENOMEM);
- }
-
-+ SET_NETDEV_DEV(dev, parent);
++ memset(extras, 0, sizeof(extras));
++ if (txreq.flags & NETTXF_extra_info) {
++ work_to_do = netbk_get_extras(netif, extras,
++ work_to_do);
++ idx = netif->tx.req_cons;
++ if (unlikely(work_to_do < 0)) {
++ netbk_tx_err(netif, &txreq, idx);
++ continue;
++ }
++ }
++
++ ret = netbk_count_requests(netif, &txreq, txfrags, work_to_do);
++ if (unlikely(ret < 0)) {
++ netbk_tx_err(netif, &txreq, idx - ret);
++ continue;
++ }
++ idx += ret;
++
++ if (unlikely(txreq.size < ETH_HLEN)) {
++ pr_debug("Bad packet size: %d\n", txreq.size);
++ netbk_tx_err(netif, &txreq, idx);
++ continue;
++ }
++
++ /* No crossing a page as the payload mustn't fragment. */
++ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
++ pr_debug("txreq.offset: %x, size: %u, end: %lu\n",
++ txreq.offset, txreq.size,
++ (txreq.offset&~PAGE_MASK) + txreq.size);
++ netbk_tx_err(netif, &txreq, idx);
++ continue;
++ }
++
++ index = pending_index(netbk->pending_cons);
++ pending_idx = netbk->pending_ring[index];
++
++ data_len = (txreq.size > PKT_PROT_LEN &&
++ ret < MAX_SKB_FRAGS) ?
++ PKT_PROT_LEN : txreq.size;
+
- netif = netdev_priv(dev);
- memset(netif, 0, sizeof(*netif));
- netif->domid = domid;
-diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
-index dc7b367..749931e 100644
---- a/drivers/xen/netback/xenbus.c
-+++ b/drivers/xen/netback/xenbus.c
-@@ -193,14 +193,13 @@ static void backend_create_netif(struct backend_info *be)
- return;
- }
-
-- be->netif = netif_alloc(dev->otherend_id, handle);
-+ be->netif = netif_alloc(&dev->dev, dev->otherend_id, handle);
- if (IS_ERR(be->netif)) {
- err = PTR_ERR(be->netif);
- be->netif = NULL;
- xenbus_dev_fatal(dev, err, "creating interface");
- return;
- }
-- SET_NETDEV_DEV(be->netif->dev, &dev->dev);
-
- kobject_uevent(&dev->dev