nalika / rpms / grub2

Forked from rpms/grub2 2 years ago
Clone

Blame SOURCES/0293-mm-Clarify-grub_real_malloc.patch

b35c50
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
b35c50
From: Daniel Axtens <dja@axtens.net>
b35c50
Date: Thu, 25 Nov 2021 02:22:46 +1100
b35c50
Subject: [PATCH] mm: Clarify grub_real_malloc()
b35c50
b35c50
When iterating through the singly linked list of free blocks,
b35c50
grub_real_malloc() uses p and q for the current and previous blocks
b35c50
respectively. This isn't super clear, so swap to using prev and cur.
b35c50
b35c50
This makes another quirk more obvious. The comment at the top of
b35c50
grub_real_malloc() might lead you to believe that the function will
b35c50
allocate from *first if there is space in that block.
b35c50
b35c50
It actually doesn't do that, and it can't do that with the current
b35c50
data structures. If we used up all of *first, we would need to change
b35c50
the ->next of the previous block to point to *first->next, but we
b35c50
can't do that because it's a singly linked list and we don't have
b35c50
access to *first's previous block.
b35c50
b35c50
What grub_real_malloc() actually does is set *first to the initial
b35c50
previous block, and *first->next is the block we try to allocate
b35c50
from. That allows us to keep all the data structures consistent.
b35c50
b35c50
Document that.
b35c50
b35c50
Signed-off-by: Daniel Axtens <dja@axtens.net>
b35c50
Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
b35c50
(cherry picked from commit 246ad6a44c281bb13486ddea0a26bb661db73106)
b35c50
---
b35c50
 grub-core/kern/mm.c | 76 +++++++++++++++++++++++++++++------------------------
b35c50
 1 file changed, 41 insertions(+), 35 deletions(-)
b35c50
b35c50
diff --git a/grub-core/kern/mm.c b/grub-core/kern/mm.c
b35c50
index d8c8377578..fb20e93acf 100644
b35c50
--- a/grub-core/kern/mm.c
b35c50
+++ b/grub-core/kern/mm.c
b35c50
@@ -178,13 +178,20 @@ grub_mm_init_region (void *addr, grub_size_t size)
b35c50
 }
b35c50
 
b35c50
 /* Allocate the number of units N with the alignment ALIGN from the ring
b35c50
-   buffer starting from *FIRST.  ALIGN must be a power of two. Both N and
b35c50
-   ALIGN are in units of GRUB_MM_ALIGN.  Return a non-NULL if successful,
b35c50
-   otherwise return NULL.  */
b35c50
+ * buffer given in *FIRST.  ALIGN must be a power of two. Both N and
b35c50
+ * ALIGN are in units of GRUB_MM_ALIGN.  Return a non-NULL if successful,
b35c50
+ * otherwise return NULL.
b35c50
+ *
b35c50
+ * Note: because in certain circumstances we need to adjust the ->next
b35c50
+ * pointer of the previous block, we iterate over the singly linked
b35c50
+ * list with the pair (prev, cur). *FIRST is our initial previous, and
b35c50
+ * *FIRST->next is our initial current pointer. So we will actually
b35c50
+ * allocate from *FIRST->next first and *FIRST itself last.
b35c50
+ */
b35c50
 static void *
b35c50
 grub_real_malloc (grub_mm_header_t *first, grub_size_t n, grub_size_t align)
b35c50
 {
b35c50
-  grub_mm_header_t p, q;
b35c50
+  grub_mm_header_t cur, prev;
b35c50
 
b35c50
   /* When everything is allocated side effect is that *first will have alloc
b35c50
      magic marked, meaning that there is no room in this region.  */
b35c50
@@ -192,24 +199,24 @@ grub_real_malloc (grub_mm_header_t *first, grub_size_t n, grub_size_t align)
b35c50
     return 0;
b35c50
 
b35c50
   /* Try to search free slot for allocation in this memory region.  */
b35c50
-  for (q = *first, p = q->next; ; q = p, p = p->next)
b35c50
+  for (prev = *first, cur = prev->next; ; prev = cur, cur = cur->next)
b35c50
     {
b35c50
       grub_off_t extra;
b35c50
 
b35c50
-      extra = ((grub_addr_t) (p + 1) >> GRUB_MM_ALIGN_LOG2) & (align - 1);
b35c50
+      extra = ((grub_addr_t) (cur + 1) >> GRUB_MM_ALIGN_LOG2) & (align - 1);
b35c50
       if (extra)
b35c50
 	extra = align - extra;
b35c50
 
b35c50
-      if (! p)
b35c50
+      if (! cur)
b35c50
 	grub_fatal ("null in the ring");
b35c50
 
b35c50
-      if (p->magic != GRUB_MM_FREE_MAGIC)
b35c50
-	grub_fatal ("free magic is broken at %p: 0x%x", p, p->magic);
b35c50
+      if (cur->magic != GRUB_MM_FREE_MAGIC)
b35c50
+	grub_fatal ("free magic is broken at %p: 0x%x", cur, cur->magic);
b35c50
 
b35c50
-      if (p->size >= n + extra)
b35c50
+      if (cur->size >= n + extra)
b35c50
 	{
b35c50
-	  extra += (p->size - extra - n) & (~(align - 1));
b35c50
-	  if (extra == 0 && p->size == n)
b35c50
+	  extra += (cur->size - extra - n) & (~(align - 1));
b35c50
+	  if (extra == 0 && cur->size == n)
b35c50
 	    {
b35c50
 	      /* There is no special alignment requirement and memory block
b35c50
 	         is complete match.
b35c50
@@ -222,9 +229,9 @@ grub_real_malloc (grub_mm_header_t *first, grub_size_t n, grub_size_t align)
b35c50
 	         | alloc, size=n |          |
b35c50
 	         +---------------+          v
b35c50
 	       */
b35c50
-	      q->next = p->next;
b35c50
+	      prev->next = cur->next;
b35c50
 	    }
b35c50
-	  else if (align == 1 || p->size == n + extra)
b35c50
+	  else if (align == 1 || cur->size == n + extra)
b35c50
 	    {
b35c50
 	      /* There might be alignment requirement, when taking it into
b35c50
 	         account memory block fits in.
b35c50
@@ -241,23 +248,22 @@ grub_real_malloc (grub_mm_header_t *first, grub_size_t n, grub_size_t align)
b35c50
 	         | alloc, size=n |        |
b35c50
 	         +---------------+        v
b35c50
 	       */
b35c50
-
b35c50
-	      p->size -= n;
b35c50
-	      p += p->size;
b35c50
+	      cur->size -= n;
b35c50
+	      cur += cur->size;
b35c50
 	    }
b35c50
 	  else if (extra == 0)
b35c50
 	    {
b35c50
 	      grub_mm_header_t r;
b35c50
 	      
b35c50
-	      r = p + extra + n;
b35c50
+	      r = cur + extra + n;
b35c50
 	      r->magic = GRUB_MM_FREE_MAGIC;
b35c50
-	      r->size = p->size - extra - n;
b35c50
-	      r->next = p->next;
b35c50
-	      q->next = r;
b35c50
+	      r->size = cur->size - extra - n;
b35c50
+	      r->next = cur->next;
b35c50
+	      prev->next = r;
b35c50
 
b35c50
-	      if (q == p)
b35c50
+	      if (prev == cur)
b35c50
 		{
b35c50
-		  q = r;
b35c50
+		  prev = r;
b35c50
 		  r->next = r;
b35c50
 		}
b35c50
 	    }
b35c50
@@ -284,32 +290,32 @@ grub_real_malloc (grub_mm_header_t *first, grub_size_t n, grub_size_t align)
b35c50
 	       */
b35c50
 	      grub_mm_header_t r;
b35c50
 
b35c50
-	      r = p + extra + n;
b35c50
+	      r = cur + extra + n;
b35c50
 	      r->magic = GRUB_MM_FREE_MAGIC;
b35c50
-	      r->size = p->size - extra - n;
b35c50
-	      r->next = p;
b35c50
+	      r->size = cur->size - extra - n;
b35c50
+	      r->next = cur;
b35c50
 
b35c50
-	      p->size = extra;
b35c50
-	      q->next = r;
b35c50
-	      p += extra;
b35c50
+	      cur->size = extra;
b35c50
+	      prev->next = r;
b35c50
+	      cur += extra;
b35c50
 	    }
b35c50
 
b35c50
-	  p->magic = GRUB_MM_ALLOC_MAGIC;
b35c50
-	  p->size = n;
b35c50
+	  cur->magic = GRUB_MM_ALLOC_MAGIC;
b35c50
+	  cur->size = n;
b35c50
 
b35c50
 	  /* Mark find as a start marker for next allocation to fasten it.
b35c50
 	     This will have side effect of fragmenting memory as small
b35c50
 	     pieces before this will be un-used.  */
b35c50
 	  /* So do it only for chunks under 64K.  */
b35c50
 	  if (n < (0x8000 >> GRUB_MM_ALIGN_LOG2)
b35c50
-	      || *first == p)
b35c50
-	    *first = q;
b35c50
+	      || *first == cur)
b35c50
+	    *first = prev;
b35c50
 
b35c50
-	  return p + 1;
b35c50
+	  return cur + 1;
b35c50
 	}
b35c50
 
b35c50
       /* Search was completed without result.  */
b35c50
-      if (p == *first)
b35c50
+      if (cur == *first)
b35c50
 	break;
b35c50
     }
b35c50