public inbox for glibc-cvs@sourceware.org
help / color / mirror / Atom feed
From: Szabolcs Nagy <nsz@sourceware.org>
To: glibc-cvs@sourceware.org
Subject: [glibc/arm/morello/main] cheri: elf: Setup per module RX and RW capabilities
Date: Wed, 12 Oct 2022 14:17:26 +0000 (GMT)	[thread overview]
Message-ID: <20221012141726.852613851157@sourceware.org> (raw)

https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=a66d563c9e33cffbf646e1327bdd73423a10ef76

commit a66d563c9e33cffbf646e1327bdd73423a10ef76
Author: Szabolcs Nagy <szabolcs.nagy@arm.com>
Date:   Thu Sep 1 09:45:30 2022 +0100

    cheri: elf: Setup per module RX and RW capabilities
    
    The l_map_start and l_rw_start of the ld.so and exe comes from the auxv
    since they are normally mapped by the kernel.  Some generic code had to
    be modified so l_map_start is propagated and not overwritten when it is
    recomputed.
    
    The l_rw_range should exclude the relro region, but in libc.so and
    ld.so this does not work: symbols are accessed before relro is applied
    and then the permission should be writable.

Diff:
---
 elf/dl-map-segments.h | 44 ++++++++++++++++++++++++++++++++++++++++++++
 elf/rtld.c            | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 95 insertions(+)

diff --git a/elf/dl-map-segments.h b/elf/dl-map-segments.h
index 5439c20233..4ba1c71c73 100644
--- a/elf/dl-map-segments.h
+++ b/elf/dl-map-segments.h
@@ -18,6 +18,9 @@
    <https://www.gnu.org/licenses/>.  */
 
 #include <dl-load.h>
+#ifdef __CHERI_PURE_CAPABILITY__
+# include <cheri_perms.h>
+#endif
 
 /* Map a segment and align it properly.  */
 
@@ -79,6 +82,10 @@ _dl_map_segments (struct link_map *l, int fd,
                   struct link_map *loader)
 {
   const struct loadcmd *c = loadcmds;
+#ifdef __CHERI_PURE_CAPABILITY__
+  ElfW(Addr) rw_start = -1;
+  ElfW(Addr) rw_end = 0;
+#endif
 
   if (__glibc_likely (type == ET_DYN))
     {
@@ -129,6 +136,16 @@ _dl_map_segments (struct link_map *l, int fd,
 #ifdef __CHERI_PURE_CAPABILITY__
   else
     {
+      /* Need a single capability to cover all load segments.  */
+      void *p = __mmap ((void *) c->mapstart, maplength, c->prot,
+                        MAP_FIXED|MAP_COPY|MAP_FILE,
+                        fd, c->mapoff);
+      if (p == MAP_FAILED)
+        return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
+      l->l_map_start = (elfptr_t) p;
+      l->l_map_end = l->l_map_start + maplength;
+      l->l_contiguous = !has_holes;
+
       /* TODO: l_addr is 0 in an exe, but it should cover the load segments.  */
       uintptr_t l_addr = 0;
       unsigned long allocend = ALIGN_UP (loadcmds[nloadcmds - 1].allocend,
@@ -136,6 +153,8 @@ _dl_map_segments (struct link_map *l, int fd,
       asm volatile ("cvtd %0, %x0" : "+r"(l_addr));
       asm volatile ("scbnds %0, %0, %x1" : "+r"(l_addr) : "r"(allocend));
       l->l_addr = l_addr;
+
+      goto postmap;
     }
 #endif
 
@@ -158,6 +177,21 @@ _dl_map_segments (struct link_map *l, int fd,
     postmap:
       _dl_postprocess_loadcmd (l, header, c);
 
+#ifdef __CHERI_PURE_CAPABILITY__
+      if (c->prot & PROT_WRITE)
+	{
+          if (l->l_rw_count >= DL_MAX_RW_COUNT)
+	    return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; // TODO: right error code
+	  if (c->mapstart < rw_start)
+	    rw_start = c->mapstart;
+	  if (c->allocend > rw_end)
+	    rw_end = c->allocend;
+	  l->l_rw_range[l->l_rw_count].start = l->l_addr + c->mapstart;
+	  l->l_rw_range[l->l_rw_count].end = l->l_addr + c->allocend;
+	  l->l_rw_count++;
+	}
+#endif
+
       if (c->allocend > c->dataend)
         {
           /* Extra zero pages should appear at the end of this segment,
@@ -206,6 +240,16 @@ _dl_map_segments (struct link_map *l, int fd,
       ++c;
     }
 
+#ifdef __CHERI_PURE_CAPABILITY__
+  if (l->l_rw_count > 0)
+    {
+      l->l_rw_start = __builtin_cheri_address_set (l->l_map_start, l->l_addr + rw_start);
+      l->l_rw_start = __builtin_cheri_bounds_set (l->l_rw_start, rw_end - rw_start);
+      l->l_rw_start = __builtin_cheri_perms_and (l->l_rw_start, CAP_PERM_MASK_RW);
+    }
+  l->l_map_start = __builtin_cheri_perms_and (l->l_map_start, CAP_PERM_MASK_RX);
+#endif
+
   /* Notify ELF_PREFERRED_ADDRESS that we have to load this one
      fixed.  */
   ELF_FIXED_ADDRESS (loader, c->mapstart);
diff --git a/elf/rtld.c b/elf/rtld.c
index 3a1f32ea7c..007e938b90 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -474,10 +474,19 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
 	  sizeof GL(dl_rtld_map).l_info);
   GL(dl_rtld_map).l_mach = info->l.l_mach;
   GL(dl_rtld_map).l_relocated = 1;
+# ifdef __CHERI_PURE_CAPABILITY__
+  GL(dl_rtld_map).l_map_start = info->l.l_map_start;
+  GL(dl_rtld_map).l_rw_start = info->l.l_rw_start;
+  GL(dl_rtld_map).l_rw_count = info->l.l_rw_count;
+  for (int i = 0; i < info->l.l_rw_count; i++)
+    GL(dl_rtld_map).l_rw_range[i] = info->l.l_rw_range[i];
+# endif
 #endif
   _dl_setup_hash (&GL(dl_rtld_map));
   GL(dl_rtld_map).l_real = &GL(dl_rtld_map);
+#ifndef __CHERI_PURE_CAPABILITY__
   GL(dl_rtld_map).l_map_start = (ElfW(Addr)) &__ehdr_start;
+#endif
   GL(dl_rtld_map).l_map_end = (ElfW(Addr)) _end;
   GL(dl_rtld_map).l_text_end = (ElfW(Addr)) _etext;
   /* Copy the TLS related data if necessary.  */
@@ -543,6 +552,7 @@ _dl_start (void *arg)
 #endif
 
 #ifdef __CHERI_PURE_CAPABILITY__
+  elf_machine_rtld_base_setup (&bootstrap_map, arg);
   bootstrap_map.l_addr = elf_machine_load_address_from_args (arg);
   bootstrap_map.l_ld = elf_machine_runtime_dynamic ();
 #else
@@ -1130,8 +1140,13 @@ rtld_setup_main_map (struct link_map *main_map)
 
   main_map->l_map_end = 0;
   main_map->l_text_end = 0;
+#ifndef __CHERI_PURE_CAPABILITY__
   /* Perhaps the executable has no PT_LOAD header entries at all.  */
   main_map->l_map_start = ~0;
+#else
+  /* May be computed already when exe is loaded by ld.so.  */
+  main_map->l_rw_count = 0;
+#endif
   /* And it was opened directly.  */
   ++main_map->l_direct_opencount;
   main_map->l_contiguous = 1;
@@ -1158,6 +1173,10 @@ rtld_setup_main_map (struct link_map *main_map)
       case PT_PHDR:
 	/* Find out the load address.  */
 	main_map->l_addr = (elfptr_t) phdr - ph->p_vaddr;
+#ifdef __CHERI_PURE_CAPABILITY__
+	// TODO: we still need laddr
+	asm volatile ("cvtd %0, %x0" : "+r"(main_map->l_addr));
+#endif
 	break;
       case PT_DYNAMIC:
 	/* This tells us where to find the dynamic section,
@@ -1210,8 +1229,10 @@ rtld_setup_main_map (struct link_map *main_map)
 	  /* Remember where the main program starts in memory.  */
 	  mapstart = (main_map->l_addr
 		      + (ph->p_vaddr & ~(GLRO(dl_pagesize) - 1)));
+#ifndef __CHERI_PURE_CAPABILITY__
 	  if (main_map->l_map_start > mapstart)
 	    main_map->l_map_start = mapstart;
+#endif
 
 	  if (main_map->l_contiguous && expected_load_address != 0
 	      && expected_load_address != mapstart)
@@ -1228,6 +1249,15 @@ rtld_setup_main_map (struct link_map *main_map)
 	     segment.  */
 	  expected_load_address = ((allocend + GLRO(dl_pagesize) - 1)
 				   & ~(GLRO(dl_pagesize) - 1));
+#ifdef __CHERI_PURE_CAPABILITY__
+	  if (ph->p_flags & PF_W)
+	    {
+	      assert (main_map->l_rw_count < DL_MAX_RW_COUNT);
+	      main_map->l_rw_range[main_map->l_rw_count].start = mapstart;
+	      main_map->l_rw_range[main_map->l_rw_count].end = allocend;
+	      main_map->l_rw_count++;
+	    }
+#endif
 	}
 	break;
 
@@ -1640,6 +1670,14 @@ dl_main (const ElfW(Phdr) *phdr,
 	  case AT_EXECFN:
 	    av->a_un.a_val = (uintptr_t) _dl_argv[0];
 	    break;
+# ifdef __CHERI_PURE_CAPABILITY__
+	  case AT_CHERI_EXEC_RX_CAP:
+	    av->a_un.a_val = main_map->l_map_start;
+	    break;
+	  case AT_CHERI_EXEC_RW_CAP:
+	    av->a_un.a_val = main_map->l_rw_start;
+	    break;
+# endif
 	  }
 #endif
 
@@ -1683,6 +1721,19 @@ dl_main (const ElfW(Phdr) *phdr,
 
       /* We delay initializing the path structure until we got the dynamic
 	 information for the program.  */
+
+#ifdef __CHERI_PURE_CAPABILITY__
+      for (ElfW(auxv_t) *av = auxv; av->a_type != AT_NULL; av++)
+	switch (av->a_type)
+	  {
+	  case AT_CHERI_EXEC_RX_CAP:
+	    main_map->l_map_start = av->a_un.a_val;
+	    break;
+	  case AT_CHERI_EXEC_RW_CAP:
+	    main_map->l_rw_start = av->a_un.a_val;
+	    break;
+	  }
+#endif
     }
 
   bool has_interp = rtld_setup_main_map (main_map);

             reply	other threads:[~2022-10-12 14:17 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-12 14:17 Szabolcs Nagy [this message]
2022-10-26 15:19 Szabolcs Nagy
2022-10-27 13:57 Szabolcs Nagy
2022-11-23 14:47 Szabolcs Nagy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221012141726.852613851157@sourceware.org \
    --to=nsz@sourceware.org \
    --cc=glibc-cvs@sourceware.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).