WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] netfront: Lockdep fixes

To: Christian Limpach <Christian.Limpach@xxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] netfront: Lockdep fixes
From: Jeremy Fitzhardinge <jeremy@xxxxxxxxxxxxx>
Date: Thu, 12 Apr 2007 15:58:34 -0700
Cc: Andrei Petrov <andrei@xxxxxxxxxxxxx>, Xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxx>
Delivery-date: Fri, 13 Apr 2007 02:17:04 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Thunderbird 1.5.0.10 (X11/20070302)
netfront contains two locking problems found by lockdep:

1. rx_lock is a normal spinlock, and tx_lock is an irq spinlock.  This
   means that in normal use, tx_lock may be taken by an interrupt routine
   while rx_lock is held.  However, netif_disconnect_backend takes them
   in the order tx_lock->rx_lock, which could lead to a deadlock.  Reverse
   them.
2. rx_lock can also be used in softirq context, so it should be taken/released
   with spin_(un)lock_bh.

Signed-off-by: Jeremy Fitzhardinge <jeremy@xxxxxxxxxxxxx>
Cc: Chris Wright <chrisw@xxxxxxxxxxxx>
Cc: Christian Limpach <Christian.Limpach@xxxxxxxxxxxx>

diff -r a69029562c74 linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Thu Apr 12 
14:13:33 2007 -0700
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Thu Apr 12 
14:19:42 2007 -0700
@@ -622,14 +622,14 @@ static int network_open(struct net_devic
 
        memset(&np->stats, 0, sizeof(np->stats));
 
-       spin_lock(&np->rx_lock);
+       spin_lock_bh(&np->rx_lock);
        if (netfront_carrier_ok(np)) {
                network_alloc_rx_buffers(dev);
                np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
                if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
                        netif_rx_schedule(dev);
        }
-       spin_unlock(&np->rx_lock);
+       spin_unlock_bh(&np->rx_lock);
 
        network_maybe_wake_tx(dev);
 
@@ -1307,10 +1307,10 @@ static int netif_poll(struct net_device 
        int pages_flipped = 0;
        int err;
 
-       spin_lock(&np->rx_lock);
+       spin_lock_bh(&np->rx_lock);
 
        if (unlikely(!netfront_carrier_ok(np))) {
-               spin_unlock(&np->rx_lock);
+               spin_unlock_bh(&np->rx_lock);
                return 0;
        }
 
@@ -1478,7 +1478,7 @@ err:
                local_irq_restore(flags);
        }
 
-       spin_unlock(&np->rx_lock);
+       spin_unlock_bh(&np->rx_lock);
 
        return more_to_do;
 }
@@ -1520,7 +1520,7 @@ static void netif_release_rx_bufs(struct
 
        skb_queue_head_init(&free_list);
 
-       spin_lock(&np->rx_lock);
+       spin_lock_bh(&np->rx_lock);
 
        for (id = 0; id < NET_RX_RING_SIZE; id++) {
                if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) {
@@ -1588,7 +1588,7 @@ static void netif_release_rx_bufs(struct
        while ((skb = __skb_dequeue(&free_list)) != NULL)
                dev_kfree_skb(skb);
 
-       spin_unlock(&np->rx_lock);
+       spin_unlock_bh(&np->rx_lock);
 }
 
 static int network_close(struct net_device *dev)
@@ -1708,8 +1708,8 @@ static int network_connect(struct net_de
        IPRINTK("device %s has %sing receive path.\n",
                dev->name, np->copying_receiver ? "copy" : "flipp");
 
+       spin_lock_bh(&np->rx_lock);
        spin_lock_irq(&np->tx_lock);
-       spin_lock(&np->rx_lock);
 
        /*
         * Recovery procedure:
@@ -1761,7 +1761,7 @@ static int network_connect(struct net_de
        network_tx_buf_gc(dev);
        network_alloc_rx_buffers(dev);
 
-       spin_unlock(&np->rx_lock);
+       spin_unlock_bh(&np->rx_lock);
        spin_unlock_irq(&np->tx_lock);
 
        return 0;
@@ -1818,7 +1818,7 @@ static ssize_t store_rxbuf_min(struct cl
        if (target > RX_MAX_TARGET)
                target = RX_MAX_TARGET;
 
-       spin_lock(&np->rx_lock);
+       spin_lock_bh(&np->rx_lock);
        if (target > np->rx_max_target)
                np->rx_max_target = target;
        np->rx_min_target = target;
@@ -1827,7 +1827,7 @@ static ssize_t store_rxbuf_min(struct cl
 
        network_alloc_rx_buffers(netdev);
 
-       spin_unlock(&np->rx_lock);
+       spin_unlock_bh(&np->rx_lock);
        return len;
 }
 
@@ -1861,7 +1861,7 @@ static ssize_t store_rxbuf_max(struct cl
        if (target > RX_MAX_TARGET)
                target = RX_MAX_TARGET;
 
-       spin_lock(&np->rx_lock);
+       spin_lock_bh(&np->rx_lock);
        if (target < np->rx_min_target)
                np->rx_min_target = target;
        np->rx_max_target = target;
@@ -1870,7 +1870,7 @@ static ssize_t store_rxbuf_max(struct cl
 
        network_alloc_rx_buffers(netdev);
 
-       spin_unlock(&np->rx_lock);
+       spin_unlock_bh(&np->rx_lock);
        return len;
 }
 
@@ -2033,10 +2033,10 @@ static void netif_disconnect_backend(str
 static void netif_disconnect_backend(struct netfront_info *info)
 {
        /* Stop old i/f to prevent errors whilst we rebuild the state. */
+       spin_lock_bh(&info->rx_lock);
        spin_lock_irq(&info->tx_lock);
-       spin_lock(&info->rx_lock);
        netfront_carrier_off(info);
-       spin_unlock(&info->rx_lock);
+       spin_unlock_bh(&info->rx_lock);
        spin_unlock_irq(&info->tx_lock);
 
        if (info->irq)


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>