- patches.fixes/ipc-ns-fix-memory-leak-idr.patch: ipc: ns fix
authorNick Piggin <npiggin@suse.de>
Wed, 9 Dec 2009 05:08:37 +0000 (16:08 +1100)
committerNick Piggin <npiggin@suse.de>
Wed, 9 Dec 2009 05:08:37 +0000 (16:08 +1100)
  memory leak (bnc#518767).
- patches.fixes/ipc-remove-unreachable-code-in-semc.patch:
  (bnc#518767).
- patches.fixes/ipc-semc-add-a-per-semaphore-pending-list.patch:
  (bnc#518767).
- patches.fixes/ipc-semc-optimize-if-semops-fail.patch:
  (bnc#518767).
- patches.fixes/ipc-semc-optimize-single-semop-operations.patch:
  (bnc#518767).
- patches.fixes/ipc-semc-optimize-single-sops-when-semval-is-zero.patch:
  (bnc#518767).
- patches.fixes/ipc-semc-sem-optimise-undo-list-search.patch:
  (bnc#518767).
- patches.fixes/ipc-semc-sem-preempt-improve.patch:  (bnc#518767).
- patches.fixes/ipc-semc-sem-use-list-operations.patch:
  (bnc#518767).

suse-commit: 99c2833f67aa0a6adf140e8e9fea2b9fd03ab84e

include/linux/sem.h
ipc/msg.c
ipc/sem.c
ipc/shm.c

index b82f748..9302e7a 100644 (file)
@@ -86,6 +86,7 @@ struct task_struct;
 struct sem {
        int     semval;         /* current value */
        int     sempid;         /* pid of last operation */
+       struct list_head sem_pending; /* pending single-sop operations */
 };
 
 /* One sem_array data structure for each set of semaphores in the system. */
@@ -96,11 +97,13 @@ struct sem_array {
        struct sem              *sem_base;      /* ptr to first semaphore in array */
        struct list_head        sem_pending;    /* pending operations to be processed */
        struct list_head        list_id;        /* undo requests on this array */
-       unsigned long           sem_nsems;      /* no. of semaphores in array */
+       int                     sem_nsems;      /* no. of semaphores in array */
+       int                     complex_count;  /* pending complex operations */
 };
 
 /* One queue for each sleeping process in the system. */
 struct sem_queue {
+       struct list_head        simple_list; /* queue of pending operations */
        struct list_head        list;    /* queue of pending operations */
        struct task_struct      *sleeper; /* this process */
        struct sem_undo         *undo;   /* undo structure */
index 0140a12..0920538 100644 (file)
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -128,6 +128,7 @@ void msg_init_ns(struct ipc_namespace *ns)
 void msg_exit_ns(struct ipc_namespace *ns)
 {
        free_ipcs(ns, &msg_ids(ns), freeque);
+       idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
 }
 #endif
 
index af924cd..0c46f56 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -132,6 +132,7 @@ void sem_init_ns(struct ipc_namespace *ns)
 void sem_exit_ns(struct ipc_namespace *ns)
 {
        free_ipcs(ns, &sem_ids(ns), freeary);
+       idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
 }
 #endif
 
@@ -243,6 +244,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
        key_t key = params->key;
        int nsems = params->u.nsems;
        int semflg = params->flg;
+       int i;
 
        if (!nsems)
                return -EINVAL;
@@ -275,6 +277,11 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
        ns->used_sems += nsems;
 
        sma->sem_base = (struct sem *) &sma[1];
+
+       for (i = 0; i < nsems; i++)
+               INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+
+       sma->complex_count = 0;
        INIT_LIST_HEAD(&sma->sem_pending);
        INIT_LIST_HEAD(&sma->list_id);
        sma->sem_nsems = nsems;
@@ -403,63 +410,109 @@ undo:
        return result;
 }
 
-/* Go through the pending queue for the indicated semaphore
- * looking for tasks that can be completed.
+/*
+ * Wake up a process waiting on the sem queue with a given error.
+ * The queue is invalid (may not be accessed) after the function returns.
  */
-static void update_queue (struct sem_array * sma)
+static void wake_up_sem_queue(struct sem_queue *q, int error)
 {
-       int error;
-       struct sem_queue * q;
+       /*
+        * Hold preempt off so that we don't get preempted and have the
+        * wakee busy-wait until we're scheduled back on. We're holding
+        * locks here so it may not strictly be needed, however if the
+        * locks become preemptible then this prevents such a problem.
+        */
+       preempt_disable();
+       q->status = IN_WAKEUP;
+       wake_up_process(q->sleeper);
+       /* hands-off: q can disappear immediately after writing q->status. */
+       smp_wmb();
+       q->status = error;
+       preempt_enable();
+}
+
+static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
+{
+       list_del(&q->list);
+       if (q->nsops == 1)
+               list_del(&q->simple_list);
+       else
+               sma->complex_count--;
+}
+
+
+/**
+ * update_queue(sma, semnum): Look for tasks that can be completed.
+ * @sma: semaphore array.
+ * @semnum: semaphore that was modified.
+ *
+ * update_queue must be called after a semaphore in a semaphore array
+ * was modified. If multiple semaphore were modified, then @semnum
+ * must be set to -1.
+ */
+static void update_queue(struct sem_array *sma, int semnum)
+{
+       struct sem_queue *q;
+       struct list_head *walk;
+       struct list_head *pending_list;
+       int offset;
+
+       /* if there are complex operations around, then knowing the semaphore
+        * that was modified doesn't help us. Assume that multiple semaphores
+        * were modified.
+        */
+       if (sma->complex_count)
+               semnum = -1;
+
+       if (semnum == -1) {
+               pending_list = &sma->sem_pending;
+               offset = offsetof(struct sem_queue, list);
+       } else {
+               pending_list = &sma->sem_base[semnum].sem_pending;
+               offset = offsetof(struct sem_queue, simple_list);
+       }
+
+again:
+       walk = pending_list->next;
+       while (walk != pending_list) {
+               int error, alter;
+
+               q = (struct sem_queue *)((char *)walk - offset);
+               walk = walk->next;
+
+               /* If we are scanning the single sop, per-semaphore list of
+                * one semaphore and that semaphore is 0, then it is not
+                * necessary to scan the "alter" entries: simple increments
+                * that affect only one entry succeed immediately and cannot
+                * be in the  per semaphore pending queue, and decrements
+                * cannot be successful if the value is already 0.
+                */
+               if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
+                               q->alter)
+                       break;
 
-       q = list_entry(sma->sem_pending.next, struct sem_queue, list);
-       while (&q->list != &sma->sem_pending) {
                error = try_atomic_semop(sma, q->sops, q->nsops,
                                         q->undo, q->pid);
 
                /* Does q->sleeper still need to sleep? */
-               if (error <= 0) {
-                       struct sem_queue *n;
-
-                       /*
-                        * Continue scanning. The next operation
-                        * that must be checked depends on the type of the
-                        * completed operation:
-                        * - if the operation modified the array, then
-                        *   restart from the head of the queue and
-                        *   check for threads that might be waiting
-                        *   for semaphore values to become 0.
-                        * - if the operation didn't modify the array,
-                        *   then just continue.
-                        * The order of list_del() and reading ->next
-                        * is crucial: In the former case, the list_del()
-                        * must be done first [because we might be the
-                        * first entry in ->sem_pending], in the latter
-                        * case the list_del() must be done last
-                        * [because the list is invalid after the list_del()]
-                        */
-                       if (q->alter) {
-                               list_del(&q->list);
-                               n = list_entry(sma->sem_pending.next,
-                                               struct sem_queue, list);
-                       } else {
-                               n = list_entry(q->list.next, struct sem_queue,
-                                               list);
-                               list_del(&q->list);
-                       }
-
-                       /* wake up the waiting thread */
-                       q->status = IN_WAKEUP;
+               if (error > 0)
+                       continue;
 
-                       wake_up_process(q->sleeper);
-                       /* hands-off: q will disappear immediately after
-                        * writing q->status.
-                        */
-                       smp_wmb();
-                       q->status = error;
-                       q = n;
-               } else {
-                       q = list_entry(q->list.next, struct sem_queue, list);
-               }
+               unlink_queue(sma, q);
+
+               /*
+                * The next operation that must be checked depends on the type
+                * of the completed operation:
+                * - if the operation modified the array, then restart from the
+                *   head of the queue and check for threads that might be
+                *   waiting for the new semaphore values.
+                * - if the operation didn't modify the array, then just
+                *   continue.
+                */
+               alter = q->alter;
+               wake_up_sem_queue(q, error);
+               if (alter && !error)
+                       goto again;
        }
 }
 
@@ -539,12 +592,8 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 
        /* Wake up all pending processes and let them fail with EIDRM. */
        list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
-               list_del(&q->list);
-
-               q->status = IN_WAKEUP;
-               wake_up_process(q->sleeper); /* doesn't sleep */
-               smp_wmb();
-               q->status = -EIDRM;     /* hands-off q */
+               unlink_queue(sma, q);
+               wake_up_sem_queue(q, -EIDRM);
        }
 
        /* Remove the semaphore set from the IDR */
@@ -581,7 +630,7 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in,
 static int semctl_nolock(struct ipc_namespace *ns, int semid,
                         int cmd, int version, union semun arg)
 {
-       int err = -EINVAL;
+       int err;
        struct sem_array *sma;
 
        switch(cmd) {
@@ -658,7 +707,6 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
        default:
                return -EINVAL;
        }
-       return err;
 out_unlock:
        sem_unlock(sma);
        return err;
@@ -765,7 +813,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                }
                sma->sem_ctime = get_seconds();
                /* maybe some queued-up processes were waiting for this */
-               update_queue(sma);
+               update_queue(sma, -1);
                err = 0;
                goto out_unlock;
        }
@@ -807,7 +855,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                curr->sempid = task_tgid_vnr(current);
                sma->sem_ctime = get_seconds();
                /* maybe some queued-up processes were waiting for this */
-               update_queue(sma);
+               update_queue(sma, semnum);
                err = 0;
                goto out_unlock;
        }
@@ -967,17 +1015,31 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
        return 0;
 }
 
-static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
+static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
 {
-       struct sem_undo *walk;
+       struct sem_undo *un;
 
-       list_for_each_entry_rcu(walk, &ulp->list_proc, list_proc) {
-               if (walk->semid == semid)
-                       return walk;
+       list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
+               if (un->semid == semid)
+                       return un;
        }
        return NULL;
 }
 
+static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
+{
+       struct sem_undo *un;
+
+       assert_spin_locked(&ulp->lock);
+
+       un = __lookup_undo(ulp, semid);
+       if (un) {
+               list_del_rcu(&un->list_proc);
+               list_add_rcu(&un->list_proc, &ulp->list_proc);
+       }
+       return un;
+}
+
 /**
  * find_alloc_undo - Lookup (and if not present create) undo array
  * @ns: namespace
@@ -1169,7 +1231,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
        error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
        if (error <= 0) {
                if (alter && error == 0)
-                       update_queue (sma);
+                       update_queue(sma, (nsops == 1) ? sops[0].sem_num : -1);
+
                goto out_unlock_free;
        }
 
@@ -1187,6 +1250,19 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
        else
                list_add(&queue.list, &sma->sem_pending);
 
+       if (nsops == 1) {
+               struct sem *curr;
+               curr = &sma->sem_base[sops->sem_num];
+
+               if (alter)
+                       list_add_tail(&queue.simple_list, &curr->sem_pending);
+               else
+                       list_add(&queue.simple_list, &curr->sem_pending);
+       } else {
+               INIT_LIST_HEAD(&queue.simple_list);
+               sma->complex_count++;
+       }
+
        queue.status = -EINTR;
        queue.sleeper = current;
        current->state = TASK_INTERRUPTIBLE;
@@ -1228,7 +1304,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
         */
        if (timeout && jiffies_left == 0)
                error = -EAGAIN;
-       list_del(&queue.list);
+       unlink_queue(sma, &queue);
 
 out_unlock_free:
        sem_unlock(sma);
@@ -1313,7 +1389,7 @@ void exit_sem(struct task_struct *tsk)
                if (IS_ERR(sma))
                        continue;
 
-               un = lookup_undo(ulp, semid);
+               un = __lookup_undo(ulp, semid);
                if (un == NULL) {
                        /* exit_sem raced with IPC_RMID+semget() that created
                         * exactly the same semid. Nothing to do.
@@ -1357,7 +1433,7 @@ void exit_sem(struct task_struct *tsk)
                }
                sma->sem_otime = get_seconds();
                /* maybe some queued-up processes were waiting for this */
-               update_queue(sma);
+               update_queue(sma, -1);
                sem_unlock(sma);
 
                call_rcu(&un->rcu, free_un);
@@ -1371,7 +1447,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
        struct sem_array *sma = it;
 
        return seq_printf(s,
-                         "%10d %10d  %4o %10lu %5u %5u %5u %5u %10lu %10lu\n",
+                         "%10d %10d  %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
                          sma->sem_perm.key,
                          sma->sem_perm.id,
                          sma->sem_perm.mode,
index da5b642..9d855ae 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -104,6 +104,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 void shm_exit_ns(struct ipc_namespace *ns)
 {
        free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
+       idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
 }
 #endif