dpward / rpms / sssd

Forked from rpms/sssd 3 years ago
Clone

Blame SOURCES/0136-KCM-Fix-the-per-client-serialization-queue.patch

bb7cd1
From b31f75f44a9e1dc0521ec73176f89e05db4973ba Mon Sep 17 00:00:00 2001
bb7cd1
From: Jakub Hrozek <jhrozek@redhat.com>
bb7cd1
Date: Thu, 11 May 2017 16:24:24 +0200
bb7cd1
Subject: [PATCH 136/138] KCM: Fix the per-client serialization queue
bb7cd1
MIME-Version: 1.0
bb7cd1
Content-Type: text/plain; charset=UTF-8
bb7cd1
Content-Transfer-Encoding: 8bit
bb7cd1
bb7cd1
Resolves:
bb7cd1
    https://pagure.io/SSSD/sssd/issue/3372
bb7cd1
bb7cd1
Fixes a race condition between one client request adding an operation to
bb7cd1
the hash table value, which was previously a linked list of operations,
bb7cd1
while another concurrent operation would remove the last remaining
bb7cd1
linked list element through its callback.
bb7cd1
bb7cd1
Instead, the hash table value is now a separate 'queue head' structure
bb7cd1
which is only changed in a tevent request to make sure is is not
bb7cd1
processes concurrently with adding to the queue (which is also a tevent
bb7cd1
request).
bb7cd1
bb7cd1
Reviewed-by: Pavel Březina <pbrezina@redhat.com>
bb7cd1
(cherry picked from commit fb51bb68e62de7bb8542f5d224994eb7143040a6)
bb7cd1
---
bb7cd1
 src/responder/kcm/kcmsrv_op_queue.c | 182 ++++++++++++++++++++++++------------
bb7cd1
 1 file changed, 122 insertions(+), 60 deletions(-)
bb7cd1
bb7cd1
diff --git a/src/responder/kcm/kcmsrv_op_queue.c b/src/responder/kcm/kcmsrv_op_queue.c
bb7cd1
index f6c425dd5b64877c8b7401e488dd6565157fc9b5..55c8b65d94f70979fe56fcc4d8747547a9cc9d33 100644
bb7cd1
--- a/src/responder/kcm/kcmsrv_op_queue.c
bb7cd1
+++ b/src/responder/kcm/kcmsrv_op_queue.c
bb7cd1
@@ -27,17 +27,23 @@
bb7cd1
 
bb7cd1
 struct kcm_ops_queue_entry {
bb7cd1
     struct tevent_req *req;
bb7cd1
-    uid_t uid;
bb7cd1
 
bb7cd1
-    hash_table_t *wait_queue_hash;
bb7cd1
+    struct kcm_ops_queue *queue;
bb7cd1
 
bb7cd1
-    struct kcm_ops_queue_entry *head;
bb7cd1
     struct kcm_ops_queue_entry *next;
bb7cd1
     struct kcm_ops_queue_entry *prev;
bb7cd1
 };
bb7cd1
 
bb7cd1
+struct kcm_ops_queue {
bb7cd1
+    uid_t uid;
bb7cd1
+    struct tevent_context *ev;
bb7cd1
+    struct kcm_ops_queue_ctx *qctx;
bb7cd1
+
bb7cd1
+    struct kcm_ops_queue_entry *head;
bb7cd1
+};
bb7cd1
+
bb7cd1
 struct kcm_ops_queue_ctx {
bb7cd1
-    /* UID: dlist of kcm_ops_queue_entry */
bb7cd1
+    /* UID:kcm_ops_queue */
bb7cd1
     hash_table_t *wait_queue_hash;
bb7cd1
 };
bb7cd1
 
bb7cd1
@@ -45,8 +51,9 @@ struct kcm_ops_queue_ctx {
bb7cd1
  * Per-UID wait queue
bb7cd1
  *
bb7cd1
  * They key in the hash table is the UID of the peer. The value of each
bb7cd1
- * hash table entry is a linked list of kcm_ops_queue_entry structures
bb7cd1
- * which primarily hold the tevent request being queued.
bb7cd1
+ * hash table entry is kcm_ops_queue structure which in turn contains a
bb7cd1
+ * linked list of kcm_ops_queue_entry structures * which primarily hold the
bb7cd1
+ * tevent request being queued.
bb7cd1
  */
bb7cd1
 struct kcm_ops_queue_ctx *kcm_ops_queue_create(TALLOC_CTX *mem_ctx)
bb7cd1
 {
bb7cd1
@@ -71,11 +78,45 @@ struct kcm_ops_queue_ctx *kcm_ops_queue_create(TALLOC_CTX *mem_ctx)
bb7cd1
     return queue_ctx;
bb7cd1
 }
bb7cd1
 
bb7cd1
-static int kcm_op_queue_entry_destructor(struct kcm_ops_queue_entry *entry)
bb7cd1
+void queue_removal_cb(struct tevent_context *ctx,
bb7cd1
+                      struct tevent_immediate *imm,
bb7cd1
+                      void *private_data)
bb7cd1
 {
bb7cd1
+    struct kcm_ops_queue *kq = talloc_get_type(private_data,
bb7cd1
+                                               struct kcm_ops_queue);
bb7cd1
     int ret;
bb7cd1
+    hash_key_t key;
bb7cd1
+
bb7cd1
+    talloc_free(imm);
bb7cd1
+
bb7cd1
+    if (kq->head != NULL) {
bb7cd1
+        DEBUG(SSSDBG_TRACE_LIBS, "The queue is no longer empty\n");
bb7cd1
+        return;
bb7cd1
+    }
bb7cd1
+
bb7cd1
+    key.type = HASH_KEY_ULONG;
bb7cd1
+    key.ul = kq->uid;
bb7cd1
+
bb7cd1
+    /* If this was the last entry, remove the key (the UID) from the
bb7cd1
+     * hash table to signal the queue is empty
bb7cd1
+     */
bb7cd1
+    ret = hash_delete(kq->qctx->wait_queue_hash, &key);
bb7cd1
+    if (ret != HASH_SUCCESS) {
bb7cd1
+        DEBUG(SSSDBG_CRIT_FAILURE,
bb7cd1
+              "Failed to remove wait queue for user %"SPRIuid"\n",
bb7cd1
+              kq->uid);
bb7cd1
+        return;
bb7cd1
+    }
bb7cd1
+
bb7cd1
+    DEBUG(SSSDBG_FUNC_DATA,
bb7cd1
+          "Removed queue for %"SPRIuid" \n", kq->uid);
bb7cd1
+    talloc_free(kq);
bb7cd1
+}
bb7cd1
+
bb7cd1
+static int kcm_op_queue_entry_destructor(struct kcm_ops_queue_entry *entry)
bb7cd1
+{
bb7cd1
     struct kcm_ops_queue_entry *next_entry;
bb7cd1
-    hash_key_t key;
bb7cd1
+    struct tevent_immediate *imm;
bb7cd1
 
bb7cd1
     if (entry == NULL) {
bb7cd1
         return 1;
bb7cd1
@@ -85,22 +126,19 @@ static int kcm_op_queue_entry_destructor(struct kcm_ops_queue_entry *entry)
bb7cd1
     next_entry = entry->next;
bb7cd1
 
bb7cd1
     /* Remove the current entry from the queue */
bb7cd1
-    DLIST_REMOVE(entry->head, entry);
bb7cd1
+    DLIST_REMOVE(entry->queue->head, entry);
bb7cd1
 
bb7cd1
     if (next_entry == NULL) {
bb7cd1
-        key.type = HASH_KEY_ULONG;
bb7cd1
-        key.ul = entry->uid;
bb7cd1
-
bb7cd1
-        /* If this was the last entry, remove the key (the UID) from the
bb7cd1
-         * hash table to signal the queue is empty
bb7cd1
+        /* If there was no other entry, schedule removal of the queue. Do it
bb7cd1
+         * in another tevent tick to avoid issues with callbacks invoking
bb7cd1
+         * the descructor while another request is touching the queue
bb7cd1
          */
bb7cd1
-        ret = hash_delete(entry->wait_queue_hash, &key);
bb7cd1
-        if (ret != HASH_SUCCESS) {
bb7cd1
-            DEBUG(SSSDBG_CRIT_FAILURE,
bb7cd1
-                  "Failed to remove wait queue for user %"SPRIuid"\n",
bb7cd1
-                  entry->uid);
bb7cd1
+        imm = tevent_create_immediate(entry->queue);
bb7cd1
+        if (imm == NULL) {
bb7cd1
             return 1;
bb7cd1
         }
bb7cd1
+
bb7cd1
+        tevent_schedule_immediate(imm, entry->queue->ev, queue_removal_cb, entry->queue);
bb7cd1
         return 0;
bb7cd1
     }
bb7cd1
 
bb7cd1
@@ -109,41 +147,33 @@ static int kcm_op_queue_entry_destructor(struct kcm_ops_queue_entry *entry)
bb7cd1
     return 0;
bb7cd1
 }
bb7cd1
 
bb7cd1
-static errno_t kcm_op_queue_add(hash_table_t *wait_queue_hash,
bb7cd1
-                                struct kcm_ops_queue_entry *entry,
bb7cd1
-                                uid_t uid)
bb7cd1
+static struct kcm_ops_queue *kcm_op_queue_get(struct kcm_ops_queue_ctx *qctx,
bb7cd1
+                                              struct tevent_context *ev,
bb7cd1
+                                              uid_t uid)
bb7cd1
 {
bb7cd1
     errno_t ret;
bb7cd1
     hash_key_t key;
bb7cd1
     hash_value_t value;
bb7cd1
-    struct kcm_ops_queue_entry *head = NULL;
bb7cd1
+    struct kcm_ops_queue *kq;
bb7cd1
 
bb7cd1
     key.type = HASH_KEY_ULONG;
bb7cd1
     key.ul = uid;
bb7cd1
 
bb7cd1
-    ret = hash_lookup(wait_queue_hash, &key, &value);
bb7cd1
+    ret = hash_lookup(qctx->wait_queue_hash, &key, &value);
bb7cd1
     switch (ret) {
bb7cd1
     case HASH_SUCCESS:
bb7cd1
-        /* The key with this UID already exists. Its value is request queue
bb7cd1
-         * for the UID, so let's just add the current request to the end
bb7cd1
-         * of the queue and wait for the previous requests to finish
bb7cd1
-         */
bb7cd1
         if (value.type != HASH_VALUE_PTR) {
bb7cd1
             DEBUG(SSSDBG_CRIT_FAILURE, "Unexpected hash value type.\n");
bb7cd1
-            return EINVAL;
bb7cd1
+            return NULL;
bb7cd1
         }
bb7cd1
 
bb7cd1
-        head = talloc_get_type(value.ptr, struct kcm_ops_queue_entry);
bb7cd1
-        if (head == NULL) {
bb7cd1
+        kq = talloc_get_type(value.ptr, struct kcm_ops_queue);
bb7cd1
+        if (kq == NULL) {
bb7cd1
             DEBUG(SSSDBG_CRIT_FAILURE, "Invalid queue pointer\n");
bb7cd1
-            return EINVAL;
bb7cd1
+            return NULL;
bb7cd1
         }
bb7cd1
 
bb7cd1
-        entry->head = head;
bb7cd1
-        DLIST_ADD_END(head, entry, struct kcm_ops_queue_entry *);
bb7cd1
-
bb7cd1
-        DEBUG(SSSDBG_TRACE_LIBS, "Waiting in queue\n");
bb7cd1
-        ret = EAGAIN;
bb7cd1
+        DEBUG(SSSDBG_TRACE_LIBS, "Found existing queue for this ID\n");
bb7cd1
         break;
bb7cd1
 
bb7cd1
     case HASH_ERROR_KEY_NOT_FOUND:
bb7cd1
@@ -151,36 +181,41 @@ static errno_t kcm_op_queue_add(hash_table_t *wait_queue_hash,
bb7cd1
          * another one comes in and return EOK to run the current request
bb7cd1
          * immediatelly
bb7cd1
          */
bb7cd1
-        entry->head = entry;
bb7cd1
+        DEBUG(SSSDBG_TRACE_LIBS, "No existing queue for this ID\n");
bb7cd1
+
bb7cd1
+        kq = talloc_zero(qctx->wait_queue_hash, struct kcm_ops_queue);
bb7cd1
+        if (kq == NULL) {
bb7cd1
+            return NULL;
bb7cd1
+        }
bb7cd1
+        kq->uid = uid;
bb7cd1
+        kq->qctx = qctx;
bb7cd1
+        kq->ev = ev;
bb7cd1
 
bb7cd1
         value.type = HASH_VALUE_PTR;
bb7cd1
-        value.ptr = entry;
bb7cd1
+        value.ptr = kq;
bb7cd1
 
bb7cd1
-        ret = hash_enter(wait_queue_hash, &key, &value);
bb7cd1
+        ret = hash_enter(qctx->wait_queue_hash, &key, &value);
bb7cd1
         if (ret != HASH_SUCCESS) {
bb7cd1
             DEBUG(SSSDBG_CRIT_FAILURE, "hash_enter failed.\n");
bb7cd1
-            return EIO;
bb7cd1
+            return NULL;
bb7cd1
         }
bb7cd1
-
bb7cd1
-        DEBUG(SSSDBG_TRACE_LIBS,
bb7cd1
-              "Added a first request to the queue, running immediately\n");
bb7cd1
-        ret = EOK;
bb7cd1
         break;
bb7cd1
 
bb7cd1
     default:
bb7cd1
         DEBUG(SSSDBG_CRIT_FAILURE, "hash_lookup failed.\n");
bb7cd1
-        return EIO;
bb7cd1
+        return NULL;
bb7cd1
     }
bb7cd1
 
bb7cd1
-    talloc_steal(wait_queue_hash, entry);
bb7cd1
-    talloc_set_destructor(entry, kcm_op_queue_entry_destructor);
bb7cd1
-    return ret;
bb7cd1
+    return kq;
bb7cd1
 }
bb7cd1
 
bb7cd1
 struct kcm_op_queue_state {
bb7cd1
     struct kcm_ops_queue_entry *entry;
bb7cd1
 };
bb7cd1
 
bb7cd1
+static errno_t kcm_op_queue_add_req(struct kcm_ops_queue *kq,
bb7cd1
+                                    struct tevent_req *req);
bb7cd1
+
bb7cd1
 /*
bb7cd1
  * Enqueue a request.
bb7cd1
  *
bb7cd1
@@ -198,6 +233,7 @@ struct tevent_req *kcm_op_queue_send(TALLOC_CTX *mem_ctx,
bb7cd1
 {
bb7cd1
     errno_t ret;
bb7cd1
     struct tevent_req *req;
bb7cd1
+    struct kcm_ops_queue *kq;
bb7cd1
     struct kcm_op_queue_state *state;
bb7cd1
     uid_t uid;
bb7cd1
 
bb7cd1
@@ -208,22 +244,21 @@ struct tevent_req *kcm_op_queue_send(TALLOC_CTX *mem_ctx,
bb7cd1
         return NULL;
bb7cd1
     }
bb7cd1
 
bb7cd1
-    state->entry = talloc_zero(state, struct kcm_ops_queue_entry);
bb7cd1
-    if (state->entry == NULL) {
bb7cd1
-        ret = ENOMEM;
bb7cd1
-        goto immediate;
bb7cd1
-    }
bb7cd1
-    state->entry->req = req;
bb7cd1
-    state->entry->uid = uid;
bb7cd1
-    state->entry->wait_queue_hash = qctx->wait_queue_hash;
bb7cd1
-
bb7cd1
     DEBUG(SSSDBG_FUNC_DATA,
bb7cd1
           "Adding request by %"SPRIuid" to the wait queue\n", uid);
bb7cd1
 
bb7cd1
-    ret = kcm_op_queue_add(qctx->wait_queue_hash, state->entry, uid);
bb7cd1
+    kq = kcm_op_queue_get(qctx, ev, uid);
bb7cd1
+    if (kq == NULL) {
bb7cd1
+        ret = EIO;
bb7cd1
+        DEBUG(SSSDBG_OP_FAILURE,
bb7cd1
+              "Cannot get queue [%d]: %s\n", ret, sss_strerror(ret));
bb7cd1
+        goto immediate;
bb7cd1
+    }
bb7cd1
+
bb7cd1
+    ret = kcm_op_queue_add_req(kq, req);
bb7cd1
     if (ret == EOK) {
bb7cd1
         DEBUG(SSSDBG_TRACE_LIBS,
bb7cd1
-              "Wait queue was empty, running immediately\n");
bb7cd1
+              "Queue was empty, running the request immediately\n");
bb7cd1
         goto immediate;
bb7cd1
     } else if (ret != EAGAIN) {
bb7cd1
         DEBUG(SSSDBG_OP_FAILURE,
bb7cd1
@@ -244,6 +279,33 @@ immediate:
bb7cd1
     return req;
bb7cd1
 }
bb7cd1
 
bb7cd1
+static errno_t kcm_op_queue_add_req(struct kcm_ops_queue *kq,
bb7cd1
+                                    struct tevent_req *req)
bb7cd1
+{
bb7cd1
+    errno_t ret;
bb7cd1
+    struct kcm_op_queue_state *state = tevent_req_data(req,
bb7cd1
+                                                struct kcm_op_queue_state);
bb7cd1
+
bb7cd1
+    state->entry = talloc_zero(kq->qctx->wait_queue_hash, struct kcm_ops_queue_entry);
bb7cd1
+    if (state->entry == NULL) {
bb7cd1
+        return ENOMEM;
bb7cd1
+    }
bb7cd1
+    state->entry->req = req;
bb7cd1
+    state->entry->queue = kq;
bb7cd1
+    talloc_set_destructor(state->entry, kcm_op_queue_entry_destructor);
bb7cd1
+
bb7cd1
+    if (kq->head == NULL) {
bb7cd1
+        /* First entry, will run callback at once */
bb7cd1
+        ret = EOK;
bb7cd1
+    } else {
bb7cd1
+        /* Will wait for the previous callbacks to finish */
bb7cd1
+        ret = EAGAIN;
bb7cd1
+    }
bb7cd1
+
bb7cd1
+    DLIST_ADD_END(kq->head, state->entry, struct kcm_ops_queue_entry *);
bb7cd1
+    return ret;
bb7cd1
+}
bb7cd1
+
bb7cd1
 /*
bb7cd1
  * The queue recv function is called when this request is 'activated'. The queue
bb7cd1
  * entry should be allocated on the same memory context as the enqueued request
bb7cd1
-- 
bb7cd1
2.9.4
bb7cd1