public inbox for glibc-cvs@sourceware.org
help / color / mirror / Atom feed
* [glibc/arm/morello/main] cheri: elf: Setup per module RX and RW capabilities
@ 2022-10-26 15:19 Szabolcs Nagy
  0 siblings, 0 replies; 4+ messages in thread
From: Szabolcs Nagy @ 2022-10-26 15:19 UTC (permalink / raw)
  To: glibc-cvs

https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=48958cf812c955026011da8134f2b3a46cfb8457

commit 48958cf812c955026011da8134f2b3a46cfb8457
Author: Szabolcs Nagy <szabolcs.nagy@arm.com>
Date:   Thu Apr 7 08:43:00 2022 +0100

    cheri: elf: Setup per module RX and RW capabilities
    
    _dl_map_segments must use capabilities, this required changes beyond
    the obvious elfptr_t changes:
    
    - Ensure map_end is derived from map_start,
    
    - Use strict mmap bounds with MAP_FIXED: c->mapend is aligned up to
      pagesize which may be out of bounds of l_map_start (covering the
      load segments, but bounds are not aligned up), so use c->dataend
      instead.
    
    Propagate l_map_start and l_rw_start capabilities of ld.so and exe that
    come from auxv, and ensure they are not recomputed incorrectly by ld.so.
    
    The l_rw_range should exclude the relro region, but in libc.so and
    ld.so this does not work: symbols are accessed before relro is applied
    and then the permission should be writable.

Diff:
---
 elf/dl-map-segments.h | 72 ++++++++++++++++++++++++++++++++++++++++++---------
 elf/rtld.c            | 51 ++++++++++++++++++++++++++++++++++++
 2 files changed, 111 insertions(+), 12 deletions(-)

diff --git a/elf/dl-map-segments.h b/elf/dl-map-segments.h
index 024175b2d5..ce51585ce4 100644
--- a/elf/dl-map-segments.h
+++ b/elf/dl-map-segments.h
@@ -18,15 +18,18 @@
    <https://www.gnu.org/licenses/>.  */
 
 #include <dl-load.h>
+#ifdef __CHERI_PURE_CAPABILITY__
+# include <cheri_perms.h>
+#endif
 
 /* Map a segment and align it properly.  */
 
-static __always_inline ElfW(Addr)
+static __always_inline elfptr_t
 _dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
 		 const size_t maplength, int fd)
 {
   if (__glibc_likely (c->mapalign <= GLRO(dl_pagesize)))
-    return (ElfW(Addr)) __mmap ((void *) mappref, maplength, c->prot,
+    return (elfptr_t) __mmap ((void *) mappref, maplength, c->prot,
 				MAP_COPY|MAP_FILE, fd, c->mapoff);
 
   /* If the segment alignment > the page size, allocate enough space to
@@ -34,15 +37,15 @@ _dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
   ElfW(Addr) maplen = (maplength >= c->mapalign
 		       ? (maplength + c->mapalign)
 		       : (2 * c->mapalign));
-  ElfW(Addr) map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplen,
+  elfptr_t map_start = (elfptr_t) __mmap ((void *) mappref, maplen,
 					      PROT_NONE,
 					      MAP_ANONYMOUS|MAP_PRIVATE,
 					      -1, 0);
   if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
     return map_start;
 
-  ElfW(Addr) map_start_aligned = ALIGN_UP (map_start, c->mapalign);
-  map_start_aligned = (ElfW(Addr)) __mmap ((void *) map_start_aligned,
+  elfptr_t map_start_aligned = ALIGN_UP (map_start, c->mapalign);
+  map_start_aligned = (elfptr_t) __mmap ((void *) map_start_aligned,
 					   maplength, c->prot,
 					   MAP_COPY|MAP_FILE|MAP_FIXED,
 					   fd, c->mapoff);
@@ -54,7 +57,7 @@ _dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
       ElfW(Addr) delta = map_start_aligned - map_start;
       if (delta)
 	__munmap ((void *) map_start, delta);
-      ElfW(Addr) map_end = map_start_aligned + maplength;
+      elfptr_t map_end = map_start + (map_start_aligned - map_start) + maplength;
       map_end = ALIGN_UP (map_end, GLRO(dl_pagesize));
       delta = map_start + maplen - map_end;
       if (delta)
@@ -79,6 +82,10 @@ _dl_map_segments (struct link_map *l, int fd,
                   struct link_map *loader)
 {
   const struct loadcmd *c = loadcmds;
+#ifdef __CHERI_PURE_CAPABILITY__
+  ElfW(Addr) rw_start = -1;
+  ElfW(Addr) rw_end = 0;
+#endif
 
   if (__glibc_likely (type == ET_DYN))
     {
@@ -116,7 +123,7 @@ _dl_map_segments (struct link_map *l, int fd,
 				c->mapend))
 	    return N_("ELF load command address/offset not page-aligned");
           if (__glibc_unlikely
-              (__mprotect ((caddr_t) (l->l_addr + c->mapend),
+              (__mprotect ((caddr_t) dl_rx_ptr (l, c->mapend),
                            loadcmds[nloadcmds - 1].mapstart - c->mapend,
                            PROT_NONE) < 0))
             return DL_MAP_SEGMENTS_ERROR_MPROTECT;
@@ -126,6 +133,22 @@ _dl_map_segments (struct link_map *l, int fd,
 
       goto postmap;
     }
+#ifdef __CHERI_PURE_CAPABILITY__
+  else
+    {
+      /* Need a single capability to cover all load segments.  */
+      void *p = __mmap ((void *) c->mapstart, maplength, c->prot,
+                        MAP_FIXED|MAP_COPY|MAP_FILE,
+                        fd, c->mapoff);
+      if (p == MAP_FAILED)
+        return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
+      l->l_map_start = (elfptr_t) p;
+      l->l_map_end = l->l_map_start + maplength;
+      l->l_contiguous = !has_holes;
+
+      goto postmap;
+    }
+#endif
 
   /* Remember which part of the address space this object uses.  */
   l->l_map_start = c->mapstart + l->l_addr;
@@ -134,10 +157,10 @@ _dl_map_segments (struct link_map *l, int fd,
 
   while (c < &loadcmds[nloadcmds])
     {
-      if (c->mapend > c->mapstart
+      if (c->dataend > c->mapstart
           /* Map the segment contents from the file.  */
-          && (__mmap ((void *) (l->l_addr + c->mapstart),
-                      c->mapend - c->mapstart, c->prot,
+          && (__mmap ((void *) dl_rx_ptr (l, c->mapstart),
+                      c->dataend - c->mapstart, c->prot,
                       MAP_FIXED|MAP_COPY|MAP_FILE,
                       fd, c->mapoff)
               == MAP_FAILED))
@@ -146,13 +169,28 @@ _dl_map_segments (struct link_map *l, int fd,
     postmap:
       _dl_postprocess_loadcmd (l, header, c);
 
+#ifdef __CHERI_PURE_CAPABILITY__
+      if (c->prot & PROT_WRITE)
+	{
+          if (l->l_rw_count >= DL_MAX_RW_COUNT)
+	    return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; // TODO: right error code
+	  if (c->mapstart < rw_start)
+	    rw_start = c->mapstart;
+	  if (c->allocend > rw_end)
+	    rw_end = c->allocend;
+	  l->l_rw_range[l->l_rw_count].start = l->l_addr + c->mapstart;
+	  l->l_rw_range[l->l_rw_count].end = l->l_addr + c->allocend;
+	  l->l_rw_count++;
+	}
+#endif
+
       if (c->allocend > c->dataend)
         {
           /* Extra zero pages should appear at the end of this segment,
              after the data mapped from the file.   */
-          ElfW(Addr) zero, zeroend, zeropage;
+	  elfptr_t zero, zeroend, zeropage;
 
-          zero = l->l_addr + c->dataend;
+          zero = dl_rx_ptr (l, c->dataend);
           zeroend = l->l_addr + c->allocend;
           zeropage = ((zero + GLRO(dl_pagesize) - 1)
                       & ~(GLRO(dl_pagesize) - 1));
@@ -194,6 +232,16 @@ _dl_map_segments (struct link_map *l, int fd,
       ++c;
     }
 
+#ifdef __CHERI_PURE_CAPABILITY__
+  if (l->l_rw_count > 0)
+    {
+      l->l_rw_start = __builtin_cheri_address_set (l->l_map_start, l->l_addr + rw_start);
+      l->l_rw_start = __builtin_cheri_bounds_set (l->l_rw_start, rw_end - rw_start);
+      l->l_rw_start = __builtin_cheri_perms_and (l->l_rw_start, CAP_PERM_MASK_RW);
+    }
+  l->l_map_start = __builtin_cheri_perms_and (l->l_map_start, CAP_PERM_MASK_RX);
+#endif
+
   /* Notify ELF_PREFERRED_ADDRESS that we have to load this one
      fixed.  */
   ELF_FIXED_ADDRESS (loader, c->mapstart);
diff --git a/elf/rtld.c b/elf/rtld.c
index 205df43bb2..26af99305e 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -474,10 +474,19 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
 	  sizeof GL(dl_rtld_map).l_info);
   GL(dl_rtld_map).l_mach = info->l.l_mach;
   GL(dl_rtld_map).l_relocated = 1;
+# ifdef __CHERI_PURE_CAPABILITY__
+  GL(dl_rtld_map).l_map_start = info->l.l_map_start;
+  GL(dl_rtld_map).l_rw_start = info->l.l_rw_start;
+  GL(dl_rtld_map).l_rw_count = info->l.l_rw_count;
+  for (int i = 0; i < info->l.l_rw_count; i++)
+    GL(dl_rtld_map).l_rw_range[i] = info->l.l_rw_range[i];
+# endif
 #endif
   _dl_setup_hash (&GL(dl_rtld_map));
   GL(dl_rtld_map).l_real = &GL(dl_rtld_map);
+#ifndef __CHERI_PURE_CAPABILITY__
   GL(dl_rtld_map).l_map_start = (ElfW(Addr)) &__ehdr_start;
+#endif
   GL(dl_rtld_map).l_map_end = (ElfW(Addr)) _end;
   GL(dl_rtld_map).l_text_end = (ElfW(Addr)) _etext;
   /* Copy the TLS related data if necessary.  */
@@ -542,11 +551,16 @@ _dl_start (void *arg)
 # endif
 #endif
 
+#ifdef __CHERI_PURE_CAPABILITY__
+  elf_machine_rtld_base_setup (&bootstrap_map, arg);
+  bootstrap_map.l_ld = elf_machine_runtime_dynamic ();
+#else
   /* Figure out the run-time load address of the dynamic linker itself.  */
   bootstrap_map.l_addr = elf_machine_load_address ();
 
   /* Read our own dynamic section and fill in the info array.  */
   bootstrap_map.l_ld = (void *) bootstrap_map.l_addr + elf_machine_dynamic ();
+#endif
   bootstrap_map.l_ld_readonly = DL_RO_DYN_SECTION;
   elf_get_dynamic_info (&bootstrap_map, true, false);
 
@@ -1125,8 +1139,13 @@ rtld_setup_main_map (struct link_map *main_map)
 
   main_map->l_map_end = 0;
   main_map->l_text_end = 0;
+#ifndef __CHERI_PURE_CAPABILITY__
   /* Perhaps the executable has no PT_LOAD header entries at all.  */
   main_map->l_map_start = ~0;
+#else
+  /* May be computed already when exe is loaded by ld.so.  */
+  main_map->l_rw_count = 0;
+#endif
   /* And it was opened directly.  */
   ++main_map->l_direct_opencount;
   main_map->l_contiguous = 1;
@@ -1205,8 +1224,10 @@ rtld_setup_main_map (struct link_map *main_map)
 	  /* Remember where the main program starts in memory.  */
 	  mapstart = (main_map->l_addr
 		      + (ph->p_vaddr & ~(GLRO(dl_pagesize) - 1)));
+#ifndef __CHERI_PURE_CAPABILITY__
 	  if (main_map->l_map_start > mapstart)
 	    main_map->l_map_start = mapstart;
+#endif
 
 	  if (main_map->l_contiguous && expected_load_address != 0
 	      && expected_load_address != mapstart)
@@ -1223,6 +1244,15 @@ rtld_setup_main_map (struct link_map *main_map)
 	     segment.  */
 	  expected_load_address = ((allocend + GLRO(dl_pagesize) - 1)
 				   & ~(GLRO(dl_pagesize) - 1));
+#ifdef __CHERI_PURE_CAPABILITY__
+	  if (ph->p_flags & PF_W)
+	    {
+	      assert (main_map->l_rw_count < DL_MAX_RW_COUNT);
+	      main_map->l_rw_range[main_map->l_rw_count].start = mapstart;
+	      main_map->l_rw_range[main_map->l_rw_count].end = allocend;
+	      main_map->l_rw_count++;
+	    }
+#endif
 	}
 	break;
 
@@ -1635,6 +1665,14 @@ dl_main (const ElfW(Phdr) *phdr,
 	  case AT_EXECFN:
 	    av->a_un.a_val = (uintptr_t) _dl_argv[0];
 	    break;
+# ifdef __CHERI_PURE_CAPABILITY__
+	  case AT_CHERI_EXEC_RX_CAP:
+	    av->a_un.a_val = main_map->l_map_start;
+	    break;
+	  case AT_CHERI_EXEC_RW_CAP:
+	    av->a_un.a_val = main_map->l_rw_start;
+	    break;
+# endif
 	  }
 #endif
 
@@ -1678,6 +1716,19 @@ dl_main (const ElfW(Phdr) *phdr,
 
       /* We delay initializing the path structure until we got the dynamic
 	 information for the program.  */
+
+#ifdef __CHERI_PURE_CAPABILITY__
+      for (ElfW(auxv_t) *av = auxv; av->a_type != AT_NULL; av++)
+	switch (av->a_type)
+	  {
+	  case AT_CHERI_EXEC_RX_CAP:
+	    main_map->l_map_start = av->a_un.a_val;
+	    break;
+	  case AT_CHERI_EXEC_RW_CAP:
+	    main_map->l_rw_start = av->a_un.a_val;
+	    break;
+	  }
+#endif
     }
 
   bool has_interp = rtld_setup_main_map (main_map);

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [glibc/arm/morello/main] cheri: elf: Setup per module RX and RW capabilities
@ 2022-11-23 14:47 Szabolcs Nagy
  0 siblings, 0 replies; 4+ messages in thread
From: Szabolcs Nagy @ 2022-11-23 14:47 UTC (permalink / raw)
  To: glibc-cvs

https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=0b5f254b4daa0307a898b17eeb994a84e5e1a89f

commit 0b5f254b4daa0307a898b17eeb994a84e5e1a89f
Author: Szabolcs Nagy <szabolcs.nagy@arm.com>
Date:   Thu Apr 7 08:43:00 2022 +0100

    cheri: elf: Setup per module RX and RW capabilities
    
    _dl_map_segments must use capabilities, this required changes beyond
    the obvious elfptr_t changes:
    
    - Ensure map_end is derived from map_start,
    
    - Use strict mmap bounds with MAP_FIXED: c->mapend is aligned up to
      pagesize which may be out of bounds of l_map_start (covering the
      load segments, but bounds are not aligned up), so use c->dataend
      instead.
    
    Propagate l_map_start and l_rw_start capabilities of ld.so and exe that
    come from auxv, and ensure they are not recomputed incorrectly by ld.so.
    
    The l_rw_range should exclude the relro region, but in libc.so and
    ld.so this does not work: symbols are accessed before relro is applied
    and then the permission should be writable.

Diff:
---
 elf/dl-map-segments.h | 72 ++++++++++++++++++++++++++++++++++++++++++---------
 elf/rtld.c            | 51 ++++++++++++++++++++++++++++++++++++
 2 files changed, 111 insertions(+), 12 deletions(-)

diff --git a/elf/dl-map-segments.h b/elf/dl-map-segments.h
index 024175b2d5..ce51585ce4 100644
--- a/elf/dl-map-segments.h
+++ b/elf/dl-map-segments.h
@@ -18,15 +18,18 @@
    <https://www.gnu.org/licenses/>.  */
 
 #include <dl-load.h>
+#ifdef __CHERI_PURE_CAPABILITY__
+# include <cheri_perms.h>
+#endif
 
 /* Map a segment and align it properly.  */
 
-static __always_inline ElfW(Addr)
+static __always_inline elfptr_t
 _dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
 		 const size_t maplength, int fd)
 {
   if (__glibc_likely (c->mapalign <= GLRO(dl_pagesize)))
-    return (ElfW(Addr)) __mmap ((void *) mappref, maplength, c->prot,
+    return (elfptr_t) __mmap ((void *) mappref, maplength, c->prot,
 				MAP_COPY|MAP_FILE, fd, c->mapoff);
 
   /* If the segment alignment > the page size, allocate enough space to
@@ -34,15 +37,15 @@ _dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
   ElfW(Addr) maplen = (maplength >= c->mapalign
 		       ? (maplength + c->mapalign)
 		       : (2 * c->mapalign));
-  ElfW(Addr) map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplen,
+  elfptr_t map_start = (elfptr_t) __mmap ((void *) mappref, maplen,
 					      PROT_NONE,
 					      MAP_ANONYMOUS|MAP_PRIVATE,
 					      -1, 0);
   if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
     return map_start;
 
-  ElfW(Addr) map_start_aligned = ALIGN_UP (map_start, c->mapalign);
-  map_start_aligned = (ElfW(Addr)) __mmap ((void *) map_start_aligned,
+  elfptr_t map_start_aligned = ALIGN_UP (map_start, c->mapalign);
+  map_start_aligned = (elfptr_t) __mmap ((void *) map_start_aligned,
 					   maplength, c->prot,
 					   MAP_COPY|MAP_FILE|MAP_FIXED,
 					   fd, c->mapoff);
@@ -54,7 +57,7 @@ _dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
       ElfW(Addr) delta = map_start_aligned - map_start;
       if (delta)
 	__munmap ((void *) map_start, delta);
-      ElfW(Addr) map_end = map_start_aligned + maplength;
+      elfptr_t map_end = map_start + (map_start_aligned - map_start) + maplength;
       map_end = ALIGN_UP (map_end, GLRO(dl_pagesize));
       delta = map_start + maplen - map_end;
       if (delta)
@@ -79,6 +82,10 @@ _dl_map_segments (struct link_map *l, int fd,
                   struct link_map *loader)
 {
   const struct loadcmd *c = loadcmds;
+#ifdef __CHERI_PURE_CAPABILITY__
+  ElfW(Addr) rw_start = -1;
+  ElfW(Addr) rw_end = 0;
+#endif
 
   if (__glibc_likely (type == ET_DYN))
     {
@@ -116,7 +123,7 @@ _dl_map_segments (struct link_map *l, int fd,
 				c->mapend))
 	    return N_("ELF load command address/offset not page-aligned");
           if (__glibc_unlikely
-              (__mprotect ((caddr_t) (l->l_addr + c->mapend),
+              (__mprotect ((caddr_t) dl_rx_ptr (l, c->mapend),
                            loadcmds[nloadcmds - 1].mapstart - c->mapend,
                            PROT_NONE) < 0))
             return DL_MAP_SEGMENTS_ERROR_MPROTECT;
@@ -126,6 +133,22 @@ _dl_map_segments (struct link_map *l, int fd,
 
       goto postmap;
     }
+#ifdef __CHERI_PURE_CAPABILITY__
+  else
+    {
+      /* Need a single capability to cover all load segments.  */
+      void *p = __mmap ((void *) c->mapstart, maplength, c->prot,
+                        MAP_FIXED|MAP_COPY|MAP_FILE,
+                        fd, c->mapoff);
+      if (p == MAP_FAILED)
+        return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
+      l->l_map_start = (elfptr_t) p;
+      l->l_map_end = l->l_map_start + maplength;
+      l->l_contiguous = !has_holes;
+
+      goto postmap;
+    }
+#endif
 
   /* Remember which part of the address space this object uses.  */
   l->l_map_start = c->mapstart + l->l_addr;
@@ -134,10 +157,10 @@ _dl_map_segments (struct link_map *l, int fd,
 
   while (c < &loadcmds[nloadcmds])
     {
-      if (c->mapend > c->mapstart
+      if (c->dataend > c->mapstart
           /* Map the segment contents from the file.  */
-          && (__mmap ((void *) (l->l_addr + c->mapstart),
-                      c->mapend - c->mapstart, c->prot,
+          && (__mmap ((void *) dl_rx_ptr (l, c->mapstart),
+                      c->dataend - c->mapstart, c->prot,
                       MAP_FIXED|MAP_COPY|MAP_FILE,
                       fd, c->mapoff)
               == MAP_FAILED))
@@ -146,13 +169,28 @@ _dl_map_segments (struct link_map *l, int fd,
     postmap:
       _dl_postprocess_loadcmd (l, header, c);
 
+#ifdef __CHERI_PURE_CAPABILITY__
+      if (c->prot & PROT_WRITE)
+	{
+          if (l->l_rw_count >= DL_MAX_RW_COUNT)
+	    return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; // TODO: right error code
+	  if (c->mapstart < rw_start)
+	    rw_start = c->mapstart;
+	  if (c->allocend > rw_end)
+	    rw_end = c->allocend;
+	  l->l_rw_range[l->l_rw_count].start = l->l_addr + c->mapstart;
+	  l->l_rw_range[l->l_rw_count].end = l->l_addr + c->allocend;
+	  l->l_rw_count++;
+	}
+#endif
+
       if (c->allocend > c->dataend)
         {
           /* Extra zero pages should appear at the end of this segment,
              after the data mapped from the file.   */
-          ElfW(Addr) zero, zeroend, zeropage;
+	  elfptr_t zero, zeroend, zeropage;
 
-          zero = l->l_addr + c->dataend;
+          zero = dl_rx_ptr (l, c->dataend);
           zeroend = l->l_addr + c->allocend;
           zeropage = ((zero + GLRO(dl_pagesize) - 1)
                       & ~(GLRO(dl_pagesize) - 1));
@@ -194,6 +232,16 @@ _dl_map_segments (struct link_map *l, int fd,
       ++c;
     }
 
+#ifdef __CHERI_PURE_CAPABILITY__
+  if (l->l_rw_count > 0)
+    {
+      l->l_rw_start = __builtin_cheri_address_set (l->l_map_start, l->l_addr + rw_start);
+      l->l_rw_start = __builtin_cheri_bounds_set (l->l_rw_start, rw_end - rw_start);
+      l->l_rw_start = __builtin_cheri_perms_and (l->l_rw_start, CAP_PERM_MASK_RW);
+    }
+  l->l_map_start = __builtin_cheri_perms_and (l->l_map_start, CAP_PERM_MASK_RX);
+#endif
+
   /* Notify ELF_PREFERRED_ADDRESS that we have to load this one
      fixed.  */
   ELF_FIXED_ADDRESS (loader, c->mapstart);
diff --git a/elf/rtld.c b/elf/rtld.c
index 205df43bb2..26af99305e 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -474,10 +474,19 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
 	  sizeof GL(dl_rtld_map).l_info);
   GL(dl_rtld_map).l_mach = info->l.l_mach;
   GL(dl_rtld_map).l_relocated = 1;
+# ifdef __CHERI_PURE_CAPABILITY__
+  GL(dl_rtld_map).l_map_start = info->l.l_map_start;
+  GL(dl_rtld_map).l_rw_start = info->l.l_rw_start;
+  GL(dl_rtld_map).l_rw_count = info->l.l_rw_count;
+  for (int i = 0; i < info->l.l_rw_count; i++)
+    GL(dl_rtld_map).l_rw_range[i] = info->l.l_rw_range[i];
+# endif
 #endif
   _dl_setup_hash (&GL(dl_rtld_map));
   GL(dl_rtld_map).l_real = &GL(dl_rtld_map);
+#ifndef __CHERI_PURE_CAPABILITY__
   GL(dl_rtld_map).l_map_start = (ElfW(Addr)) &__ehdr_start;
+#endif
   GL(dl_rtld_map).l_map_end = (ElfW(Addr)) _end;
   GL(dl_rtld_map).l_text_end = (ElfW(Addr)) _etext;
   /* Copy the TLS related data if necessary.  */
@@ -542,11 +551,16 @@ _dl_start (void *arg)
 # endif
 #endif
 
+#ifdef __CHERI_PURE_CAPABILITY__
+  elf_machine_rtld_base_setup (&bootstrap_map, arg);
+  bootstrap_map.l_ld = elf_machine_runtime_dynamic ();
+#else
   /* Figure out the run-time load address of the dynamic linker itself.  */
   bootstrap_map.l_addr = elf_machine_load_address ();
 
   /* Read our own dynamic section and fill in the info array.  */
   bootstrap_map.l_ld = (void *) bootstrap_map.l_addr + elf_machine_dynamic ();
+#endif
   bootstrap_map.l_ld_readonly = DL_RO_DYN_SECTION;
   elf_get_dynamic_info (&bootstrap_map, true, false);
 
@@ -1125,8 +1139,13 @@ rtld_setup_main_map (struct link_map *main_map)
 
   main_map->l_map_end = 0;
   main_map->l_text_end = 0;
+#ifndef __CHERI_PURE_CAPABILITY__
   /* Perhaps the executable has no PT_LOAD header entries at all.  */
   main_map->l_map_start = ~0;
+#else
+  /* May be computed already when exe is loaded by ld.so.  */
+  main_map->l_rw_count = 0;
+#endif
   /* And it was opened directly.  */
   ++main_map->l_direct_opencount;
   main_map->l_contiguous = 1;
@@ -1205,8 +1224,10 @@ rtld_setup_main_map (struct link_map *main_map)
 	  /* Remember where the main program starts in memory.  */
 	  mapstart = (main_map->l_addr
 		      + (ph->p_vaddr & ~(GLRO(dl_pagesize) - 1)));
+#ifndef __CHERI_PURE_CAPABILITY__
 	  if (main_map->l_map_start > mapstart)
 	    main_map->l_map_start = mapstart;
+#endif
 
 	  if (main_map->l_contiguous && expected_load_address != 0
 	      && expected_load_address != mapstart)
@@ -1223,6 +1244,15 @@ rtld_setup_main_map (struct link_map *main_map)
 	     segment.  */
 	  expected_load_address = ((allocend + GLRO(dl_pagesize) - 1)
 				   & ~(GLRO(dl_pagesize) - 1));
+#ifdef __CHERI_PURE_CAPABILITY__
+	  if (ph->p_flags & PF_W)
+	    {
+	      assert (main_map->l_rw_count < DL_MAX_RW_COUNT);
+	      main_map->l_rw_range[main_map->l_rw_count].start = mapstart;
+	      main_map->l_rw_range[main_map->l_rw_count].end = allocend;
+	      main_map->l_rw_count++;
+	    }
+#endif
 	}
 	break;
 
@@ -1635,6 +1665,14 @@ dl_main (const ElfW(Phdr) *phdr,
 	  case AT_EXECFN:
 	    av->a_un.a_val = (uintptr_t) _dl_argv[0];
 	    break;
+# ifdef __CHERI_PURE_CAPABILITY__
+	  case AT_CHERI_EXEC_RX_CAP:
+	    av->a_un.a_val = main_map->l_map_start;
+	    break;
+	  case AT_CHERI_EXEC_RW_CAP:
+	    av->a_un.a_val = main_map->l_rw_start;
+	    break;
+# endif
 	  }
 #endif
 
@@ -1678,6 +1716,19 @@ dl_main (const ElfW(Phdr) *phdr,
 
       /* We delay initializing the path structure until we got the dynamic
 	 information for the program.  */
+
+#ifdef __CHERI_PURE_CAPABILITY__
+      for (ElfW(auxv_t) *av = auxv; av->a_type != AT_NULL; av++)
+	switch (av->a_type)
+	  {
+	  case AT_CHERI_EXEC_RX_CAP:
+	    main_map->l_map_start = av->a_un.a_val;
+	    break;
+	  case AT_CHERI_EXEC_RW_CAP:
+	    main_map->l_rw_start = av->a_un.a_val;
+	    break;
+	  }
+#endif
     }
 
   bool has_interp = rtld_setup_main_map (main_map);

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [glibc/arm/morello/main] cheri: elf: Setup per module RX and RW capabilities
@ 2022-10-27 13:57 Szabolcs Nagy
  0 siblings, 0 replies; 4+ messages in thread
From: Szabolcs Nagy @ 2022-10-27 13:57 UTC (permalink / raw)
  To: glibc-cvs

https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=5eedf66625842a56c0ed7e16a1f79fda4b52b425

commit 5eedf66625842a56c0ed7e16a1f79fda4b52b425
Author: Szabolcs Nagy <szabolcs.nagy@arm.com>
Date:   Thu Apr 7 08:43:00 2022 +0100

    cheri: elf: Setup per module RX and RW capabilities
    
    _dl_map_segments must use capabilities, this required changes beyond
    the obvious elfptr_t changes:
    
    - Ensure map_end is derived from map_start,
    
    - Use strict mmap bounds with MAP_FIXED: c->mapend is aligned up to
      pagesize which may be out of bounds of l_map_start (covering the
      load segments, but bounds are not aligned up), so use c->dataend
      instead.
    
    Propagate l_map_start and l_rw_start capabilities of ld.so and exe that
    come from auxv, and ensure they are not recomputed incorrectly by ld.so.
    
    The l_rw_range should exclude the relro region, but in libc.so and
    ld.so this does not work: symbols are accessed before relro is applied
    and then the permission should be writable.

Diff:
---
 elf/dl-map-segments.h | 72 ++++++++++++++++++++++++++++++++++++++++++---------
 elf/rtld.c            | 51 ++++++++++++++++++++++++++++++++++++
 2 files changed, 111 insertions(+), 12 deletions(-)

diff --git a/elf/dl-map-segments.h b/elf/dl-map-segments.h
index 024175b2d5..ce51585ce4 100644
--- a/elf/dl-map-segments.h
+++ b/elf/dl-map-segments.h
@@ -18,15 +18,18 @@
    <https://www.gnu.org/licenses/>.  */
 
 #include <dl-load.h>
+#ifdef __CHERI_PURE_CAPABILITY__
+# include <cheri_perms.h>
+#endif
 
 /* Map a segment and align it properly.  */
 
-static __always_inline ElfW(Addr)
+static __always_inline elfptr_t
 _dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
 		 const size_t maplength, int fd)
 {
   if (__glibc_likely (c->mapalign <= GLRO(dl_pagesize)))
-    return (ElfW(Addr)) __mmap ((void *) mappref, maplength, c->prot,
+    return (elfptr_t) __mmap ((void *) mappref, maplength, c->prot,
 				MAP_COPY|MAP_FILE, fd, c->mapoff);
 
   /* If the segment alignment > the page size, allocate enough space to
@@ -34,15 +37,15 @@ _dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
   ElfW(Addr) maplen = (maplength >= c->mapalign
 		       ? (maplength + c->mapalign)
 		       : (2 * c->mapalign));
-  ElfW(Addr) map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplen,
+  elfptr_t map_start = (elfptr_t) __mmap ((void *) mappref, maplen,
 					      PROT_NONE,
 					      MAP_ANONYMOUS|MAP_PRIVATE,
 					      -1, 0);
   if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
     return map_start;
 
-  ElfW(Addr) map_start_aligned = ALIGN_UP (map_start, c->mapalign);
-  map_start_aligned = (ElfW(Addr)) __mmap ((void *) map_start_aligned,
+  elfptr_t map_start_aligned = ALIGN_UP (map_start, c->mapalign);
+  map_start_aligned = (elfptr_t) __mmap ((void *) map_start_aligned,
 					   maplength, c->prot,
 					   MAP_COPY|MAP_FILE|MAP_FIXED,
 					   fd, c->mapoff);
@@ -54,7 +57,7 @@ _dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
       ElfW(Addr) delta = map_start_aligned - map_start;
       if (delta)
 	__munmap ((void *) map_start, delta);
-      ElfW(Addr) map_end = map_start_aligned + maplength;
+      elfptr_t map_end = map_start + (map_start_aligned - map_start) + maplength;
       map_end = ALIGN_UP (map_end, GLRO(dl_pagesize));
       delta = map_start + maplen - map_end;
       if (delta)
@@ -79,6 +82,10 @@ _dl_map_segments (struct link_map *l, int fd,
                   struct link_map *loader)
 {
   const struct loadcmd *c = loadcmds;
+#ifdef __CHERI_PURE_CAPABILITY__
+  ElfW(Addr) rw_start = -1;
+  ElfW(Addr) rw_end = 0;
+#endif
 
   if (__glibc_likely (type == ET_DYN))
     {
@@ -116,7 +123,7 @@ _dl_map_segments (struct link_map *l, int fd,
 				c->mapend))
 	    return N_("ELF load command address/offset not page-aligned");
           if (__glibc_unlikely
-              (__mprotect ((caddr_t) (l->l_addr + c->mapend),
+              (__mprotect ((caddr_t) dl_rx_ptr (l, c->mapend),
                            loadcmds[nloadcmds - 1].mapstart - c->mapend,
                            PROT_NONE) < 0))
             return DL_MAP_SEGMENTS_ERROR_MPROTECT;
@@ -126,6 +133,22 @@ _dl_map_segments (struct link_map *l, int fd,
 
       goto postmap;
     }
+#ifdef __CHERI_PURE_CAPABILITY__
+  else
+    {
+      /* Need a single capability to cover all load segments.  */
+      void *p = __mmap ((void *) c->mapstart, maplength, c->prot,
+                        MAP_FIXED|MAP_COPY|MAP_FILE,
+                        fd, c->mapoff);
+      if (p == MAP_FAILED)
+        return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
+      l->l_map_start = (elfptr_t) p;
+      l->l_map_end = l->l_map_start + maplength;
+      l->l_contiguous = !has_holes;
+
+      goto postmap;
+    }
+#endif
 
   /* Remember which part of the address space this object uses.  */
   l->l_map_start = c->mapstart + l->l_addr;
@@ -134,10 +157,10 @@ _dl_map_segments (struct link_map *l, int fd,
 
   while (c < &loadcmds[nloadcmds])
     {
-      if (c->mapend > c->mapstart
+      if (c->dataend > c->mapstart
           /* Map the segment contents from the file.  */
-          && (__mmap ((void *) (l->l_addr + c->mapstart),
-                      c->mapend - c->mapstart, c->prot,
+          && (__mmap ((void *) dl_rx_ptr (l, c->mapstart),
+                      c->dataend - c->mapstart, c->prot,
                       MAP_FIXED|MAP_COPY|MAP_FILE,
                       fd, c->mapoff)
               == MAP_FAILED))
@@ -146,13 +169,28 @@ _dl_map_segments (struct link_map *l, int fd,
     postmap:
       _dl_postprocess_loadcmd (l, header, c);
 
+#ifdef __CHERI_PURE_CAPABILITY__
+      if (c->prot & PROT_WRITE)
+	{
+          if (l->l_rw_count >= DL_MAX_RW_COUNT)
+	    return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; // TODO: right error code
+	  if (c->mapstart < rw_start)
+	    rw_start = c->mapstart;
+	  if (c->allocend > rw_end)
+	    rw_end = c->allocend;
+	  l->l_rw_range[l->l_rw_count].start = l->l_addr + c->mapstart;
+	  l->l_rw_range[l->l_rw_count].end = l->l_addr + c->allocend;
+	  l->l_rw_count++;
+	}
+#endif
+
       if (c->allocend > c->dataend)
         {
           /* Extra zero pages should appear at the end of this segment,
              after the data mapped from the file.   */
-          ElfW(Addr) zero, zeroend, zeropage;
+	  elfptr_t zero, zeroend, zeropage;
 
-          zero = l->l_addr + c->dataend;
+          zero = dl_rx_ptr (l, c->dataend);
           zeroend = l->l_addr + c->allocend;
           zeropage = ((zero + GLRO(dl_pagesize) - 1)
                       & ~(GLRO(dl_pagesize) - 1));
@@ -194,6 +232,16 @@ _dl_map_segments (struct link_map *l, int fd,
       ++c;
     }
 
+#ifdef __CHERI_PURE_CAPABILITY__
+  if (l->l_rw_count > 0)
+    {
+      l->l_rw_start = __builtin_cheri_address_set (l->l_map_start, l->l_addr + rw_start);
+      l->l_rw_start = __builtin_cheri_bounds_set (l->l_rw_start, rw_end - rw_start);
+      l->l_rw_start = __builtin_cheri_perms_and (l->l_rw_start, CAP_PERM_MASK_RW);
+    }
+  l->l_map_start = __builtin_cheri_perms_and (l->l_map_start, CAP_PERM_MASK_RX);
+#endif
+
   /* Notify ELF_PREFERRED_ADDRESS that we have to load this one
      fixed.  */
   ELF_FIXED_ADDRESS (loader, c->mapstart);
diff --git a/elf/rtld.c b/elf/rtld.c
index 205df43bb2..26af99305e 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -474,10 +474,19 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
 	  sizeof GL(dl_rtld_map).l_info);
   GL(dl_rtld_map).l_mach = info->l.l_mach;
   GL(dl_rtld_map).l_relocated = 1;
+# ifdef __CHERI_PURE_CAPABILITY__
+  GL(dl_rtld_map).l_map_start = info->l.l_map_start;
+  GL(dl_rtld_map).l_rw_start = info->l.l_rw_start;
+  GL(dl_rtld_map).l_rw_count = info->l.l_rw_count;
+  for (int i = 0; i < info->l.l_rw_count; i++)
+    GL(dl_rtld_map).l_rw_range[i] = info->l.l_rw_range[i];
+# endif
 #endif
   _dl_setup_hash (&GL(dl_rtld_map));
   GL(dl_rtld_map).l_real = &GL(dl_rtld_map);
+#ifndef __CHERI_PURE_CAPABILITY__
   GL(dl_rtld_map).l_map_start = (ElfW(Addr)) &__ehdr_start;
+#endif
   GL(dl_rtld_map).l_map_end = (ElfW(Addr)) _end;
   GL(dl_rtld_map).l_text_end = (ElfW(Addr)) _etext;
   /* Copy the TLS related data if necessary.  */
@@ -542,11 +551,16 @@ _dl_start (void *arg)
 # endif
 #endif
 
+#ifdef __CHERI_PURE_CAPABILITY__
+  elf_machine_rtld_base_setup (&bootstrap_map, arg);
+  bootstrap_map.l_ld = elf_machine_runtime_dynamic ();
+#else
   /* Figure out the run-time load address of the dynamic linker itself.  */
   bootstrap_map.l_addr = elf_machine_load_address ();
 
   /* Read our own dynamic section and fill in the info array.  */
   bootstrap_map.l_ld = (void *) bootstrap_map.l_addr + elf_machine_dynamic ();
+#endif
   bootstrap_map.l_ld_readonly = DL_RO_DYN_SECTION;
   elf_get_dynamic_info (&bootstrap_map, true, false);
 
@@ -1125,8 +1139,13 @@ rtld_setup_main_map (struct link_map *main_map)
 
   main_map->l_map_end = 0;
   main_map->l_text_end = 0;
+#ifndef __CHERI_PURE_CAPABILITY__
   /* Perhaps the executable has no PT_LOAD header entries at all.  */
   main_map->l_map_start = ~0;
+#else
+  /* May be computed already when exe is loaded by ld.so.  */
+  main_map->l_rw_count = 0;
+#endif
   /* And it was opened directly.  */
   ++main_map->l_direct_opencount;
   main_map->l_contiguous = 1;
@@ -1205,8 +1224,10 @@ rtld_setup_main_map (struct link_map *main_map)
 	  /* Remember where the main program starts in memory.  */
 	  mapstart = (main_map->l_addr
 		      + (ph->p_vaddr & ~(GLRO(dl_pagesize) - 1)));
+#ifndef __CHERI_PURE_CAPABILITY__
 	  if (main_map->l_map_start > mapstart)
 	    main_map->l_map_start = mapstart;
+#endif
 
 	  if (main_map->l_contiguous && expected_load_address != 0
 	      && expected_load_address != mapstart)
@@ -1223,6 +1244,15 @@ rtld_setup_main_map (struct link_map *main_map)
 	     segment.  */
 	  expected_load_address = ((allocend + GLRO(dl_pagesize) - 1)
 				   & ~(GLRO(dl_pagesize) - 1));
+#ifdef __CHERI_PURE_CAPABILITY__
+	  if (ph->p_flags & PF_W)
+	    {
+	      assert (main_map->l_rw_count < DL_MAX_RW_COUNT);
+	      main_map->l_rw_range[main_map->l_rw_count].start = mapstart;
+	      main_map->l_rw_range[main_map->l_rw_count].end = allocend;
+	      main_map->l_rw_count++;
+	    }
+#endif
 	}
 	break;
 
@@ -1635,6 +1665,14 @@ dl_main (const ElfW(Phdr) *phdr,
 	  case AT_EXECFN:
 	    av->a_un.a_val = (uintptr_t) _dl_argv[0];
 	    break;
+# ifdef __CHERI_PURE_CAPABILITY__
+	  case AT_CHERI_EXEC_RX_CAP:
+	    av->a_un.a_val = main_map->l_map_start;
+	    break;
+	  case AT_CHERI_EXEC_RW_CAP:
+	    av->a_un.a_val = main_map->l_rw_start;
+	    break;
+# endif
 	  }
 #endif
 
@@ -1678,6 +1716,19 @@ dl_main (const ElfW(Phdr) *phdr,
 
       /* We delay initializing the path structure until we got the dynamic
 	 information for the program.  */
+
+#ifdef __CHERI_PURE_CAPABILITY__
+      for (ElfW(auxv_t) *av = auxv; av->a_type != AT_NULL; av++)
+	switch (av->a_type)
+	  {
+	  case AT_CHERI_EXEC_RX_CAP:
+	    main_map->l_map_start = av->a_un.a_val;
+	    break;
+	  case AT_CHERI_EXEC_RW_CAP:
+	    main_map->l_rw_start = av->a_un.a_val;
+	    break;
+	  }
+#endif
     }
 
   bool has_interp = rtld_setup_main_map (main_map);

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [glibc/arm/morello/main] cheri: elf: Setup per module RX and RW capabilities
@ 2022-10-12 14:17 Szabolcs Nagy
  0 siblings, 0 replies; 4+ messages in thread
From: Szabolcs Nagy @ 2022-10-12 14:17 UTC (permalink / raw)
  To: glibc-cvs

https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=a66d563c9e33cffbf646e1327bdd73423a10ef76

commit a66d563c9e33cffbf646e1327bdd73423a10ef76
Author: Szabolcs Nagy <szabolcs.nagy@arm.com>
Date:   Thu Sep 1 09:45:30 2022 +0100

    cheri: elf: Setup per module RX and RW capabilities
    
    The l_map_start and l_rw_start of the ld.so and exe comes from the auxv
    since they are normally mapped by the kernel.  Some generic code had to
    be modified so l_map_start is propagated and not overwritten when it is
    recomputed.
    
    The l_rw_range should exclude the relro region, but in libc.so and
    ld.so this does not work: symbols are accessed before relro is applied
    and then the permission should be writable.

Diff:
---
 elf/dl-map-segments.h | 44 ++++++++++++++++++++++++++++++++++++++++++++
 elf/rtld.c            | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 95 insertions(+)

diff --git a/elf/dl-map-segments.h b/elf/dl-map-segments.h
index 5439c20233..4ba1c71c73 100644
--- a/elf/dl-map-segments.h
+++ b/elf/dl-map-segments.h
@@ -18,6 +18,9 @@
    <https://www.gnu.org/licenses/>.  */
 
 #include <dl-load.h>
+#ifdef __CHERI_PURE_CAPABILITY__
+# include <cheri_perms.h>
+#endif
 
 /* Map a segment and align it properly.  */
 
@@ -79,6 +82,10 @@ _dl_map_segments (struct link_map *l, int fd,
                   struct link_map *loader)
 {
   const struct loadcmd *c = loadcmds;
+#ifdef __CHERI_PURE_CAPABILITY__
+  ElfW(Addr) rw_start = -1;
+  ElfW(Addr) rw_end = 0;
+#endif
 
   if (__glibc_likely (type == ET_DYN))
     {
@@ -129,6 +136,16 @@ _dl_map_segments (struct link_map *l, int fd,
 #ifdef __CHERI_PURE_CAPABILITY__
   else
     {
+      /* Need a single capability to cover all load segments.  */
+      void *p = __mmap ((void *) c->mapstart, maplength, c->prot,
+                        MAP_FIXED|MAP_COPY|MAP_FILE,
+                        fd, c->mapoff);
+      if (p == MAP_FAILED)
+        return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
+      l->l_map_start = (elfptr_t) p;
+      l->l_map_end = l->l_map_start + maplength;
+      l->l_contiguous = !has_holes;
+
       /* TODO: l_addr is 0 in an exe, but it should cover the load segments.  */
       uintptr_t l_addr = 0;
       unsigned long allocend = ALIGN_UP (loadcmds[nloadcmds - 1].allocend,
@@ -136,6 +153,8 @@ _dl_map_segments (struct link_map *l, int fd,
       asm volatile ("cvtd %0, %x0" : "+r"(l_addr));
       asm volatile ("scbnds %0, %0, %x1" : "+r"(l_addr) : "r"(allocend));
       l->l_addr = l_addr;
+
+      goto postmap;
     }
 #endif
 
@@ -158,6 +177,21 @@ _dl_map_segments (struct link_map *l, int fd,
     postmap:
       _dl_postprocess_loadcmd (l, header, c);
 
+#ifdef __CHERI_PURE_CAPABILITY__
+      if (c->prot & PROT_WRITE)
+	{
+          if (l->l_rw_count >= DL_MAX_RW_COUNT)
+	    return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; // TODO: right error code
+	  if (c->mapstart < rw_start)
+	    rw_start = c->mapstart;
+	  if (c->allocend > rw_end)
+	    rw_end = c->allocend;
+	  l->l_rw_range[l->l_rw_count].start = l->l_addr + c->mapstart;
+	  l->l_rw_range[l->l_rw_count].end = l->l_addr + c->allocend;
+	  l->l_rw_count++;
+	}
+#endif
+
       if (c->allocend > c->dataend)
         {
           /* Extra zero pages should appear at the end of this segment,
@@ -206,6 +240,16 @@ _dl_map_segments (struct link_map *l, int fd,
       ++c;
     }
 
+#ifdef __CHERI_PURE_CAPABILITY__
+  if (l->l_rw_count > 0)
+    {
+      l->l_rw_start = __builtin_cheri_address_set (l->l_map_start, l->l_addr + rw_start);
+      l->l_rw_start = __builtin_cheri_bounds_set (l->l_rw_start, rw_end - rw_start);
+      l->l_rw_start = __builtin_cheri_perms_and (l->l_rw_start, CAP_PERM_MASK_RW);
+    }
+  l->l_map_start = __builtin_cheri_perms_and (l->l_map_start, CAP_PERM_MASK_RX);
+#endif
+
   /* Notify ELF_PREFERRED_ADDRESS that we have to load this one
      fixed.  */
   ELF_FIXED_ADDRESS (loader, c->mapstart);
diff --git a/elf/rtld.c b/elf/rtld.c
index 3a1f32ea7c..007e938b90 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -474,10 +474,19 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
 	  sizeof GL(dl_rtld_map).l_info);
   GL(dl_rtld_map).l_mach = info->l.l_mach;
   GL(dl_rtld_map).l_relocated = 1;
+# ifdef __CHERI_PURE_CAPABILITY__
+  GL(dl_rtld_map).l_map_start = info->l.l_map_start;
+  GL(dl_rtld_map).l_rw_start = info->l.l_rw_start;
+  GL(dl_rtld_map).l_rw_count = info->l.l_rw_count;
+  for (int i = 0; i < info->l.l_rw_count; i++)
+    GL(dl_rtld_map).l_rw_range[i] = info->l.l_rw_range[i];
+# endif
 #endif
   _dl_setup_hash (&GL(dl_rtld_map));
   GL(dl_rtld_map).l_real = &GL(dl_rtld_map);
+#ifndef __CHERI_PURE_CAPABILITY__
   GL(dl_rtld_map).l_map_start = (ElfW(Addr)) &__ehdr_start;
+#endif
   GL(dl_rtld_map).l_map_end = (ElfW(Addr)) _end;
   GL(dl_rtld_map).l_text_end = (ElfW(Addr)) _etext;
   /* Copy the TLS related data if necessary.  */
@@ -543,6 +552,7 @@ _dl_start (void *arg)
 #endif
 
 #ifdef __CHERI_PURE_CAPABILITY__
+  elf_machine_rtld_base_setup (&bootstrap_map, arg);
   bootstrap_map.l_addr = elf_machine_load_address_from_args (arg);
   bootstrap_map.l_ld = elf_machine_runtime_dynamic ();
 #else
@@ -1130,8 +1140,13 @@ rtld_setup_main_map (struct link_map *main_map)
 
   main_map->l_map_end = 0;
   main_map->l_text_end = 0;
+#ifndef __CHERI_PURE_CAPABILITY__
   /* Perhaps the executable has no PT_LOAD header entries at all.  */
   main_map->l_map_start = ~0;
+#else
+  /* May be computed already when exe is loaded by ld.so.  */
+  main_map->l_rw_count = 0;
+#endif
   /* And it was opened directly.  */
   ++main_map->l_direct_opencount;
   main_map->l_contiguous = 1;
@@ -1158,6 +1173,10 @@ rtld_setup_main_map (struct link_map *main_map)
       case PT_PHDR:
 	/* Find out the load address.  */
 	main_map->l_addr = (elfptr_t) phdr - ph->p_vaddr;
+#ifdef __CHERI_PURE_CAPABILITY__
+	// TODO: we still need laddr
+	asm volatile ("cvtd %0, %x0" : "+r"(main_map->l_addr));
+#endif
 	break;
       case PT_DYNAMIC:
 	/* This tells us where to find the dynamic section,
@@ -1210,8 +1229,10 @@ rtld_setup_main_map (struct link_map *main_map)
 	  /* Remember where the main program starts in memory.  */
 	  mapstart = (main_map->l_addr
 		      + (ph->p_vaddr & ~(GLRO(dl_pagesize) - 1)));
+#ifndef __CHERI_PURE_CAPABILITY__
 	  if (main_map->l_map_start > mapstart)
 	    main_map->l_map_start = mapstart;
+#endif
 
 	  if (main_map->l_contiguous && expected_load_address != 0
 	      && expected_load_address != mapstart)
@@ -1228,6 +1249,15 @@ rtld_setup_main_map (struct link_map *main_map)
 	     segment.  */
 	  expected_load_address = ((allocend + GLRO(dl_pagesize) - 1)
 				   & ~(GLRO(dl_pagesize) - 1));
+#ifdef __CHERI_PURE_CAPABILITY__
+	  if (ph->p_flags & PF_W)
+	    {
+	      assert (main_map->l_rw_count < DL_MAX_RW_COUNT);
+	      main_map->l_rw_range[main_map->l_rw_count].start = mapstart;
+	      main_map->l_rw_range[main_map->l_rw_count].end = allocend;
+	      main_map->l_rw_count++;
+	    }
+#endif
 	}
 	break;
 
@@ -1640,6 +1670,14 @@ dl_main (const ElfW(Phdr) *phdr,
 	  case AT_EXECFN:
 	    av->a_un.a_val = (uintptr_t) _dl_argv[0];
 	    break;
+# ifdef __CHERI_PURE_CAPABILITY__
+	  case AT_CHERI_EXEC_RX_CAP:
+	    av->a_un.a_val = main_map->l_map_start;
+	    break;
+	  case AT_CHERI_EXEC_RW_CAP:
+	    av->a_un.a_val = main_map->l_rw_start;
+	    break;
+# endif
 	  }
 #endif
 
@@ -1683,6 +1721,19 @@ dl_main (const ElfW(Phdr) *phdr,
 
       /* We delay initializing the path structure until we got the dynamic
 	 information for the program.  */
+
+#ifdef __CHERI_PURE_CAPABILITY__
+      for (ElfW(auxv_t) *av = auxv; av->a_type != AT_NULL; av++)
+	switch (av->a_type)
+	  {
+	  case AT_CHERI_EXEC_RX_CAP:
+	    main_map->l_map_start = av->a_un.a_val;
+	    break;
+	  case AT_CHERI_EXEC_RW_CAP:
+	    main_map->l_rw_start = av->a_un.a_val;
+	    break;
+	  }
+#endif
     }
 
   bool has_interp = rtld_setup_main_map (main_map);

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-11-23 14:47 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-10-26 15:19 [glibc/arm/morello/main] cheri: elf: Setup per module RX and RW capabilities Szabolcs Nagy
  -- strict thread matches above, loose matches on Subject: below --
2022-11-23 14:47 Szabolcs Nagy
2022-10-27 13:57 Szabolcs Nagy
2022-10-12 14:17 Szabolcs Nagy

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).