public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [PATCH] ASan on unaligned accesses
@ 2015-03-04  8:00 Marat Zakirov
  2015-03-04  8:07 ` Andrew Pinski
  2015-03-12 12:07 ` [PING][PATCH] " Marat Zakirov
  0 siblings, 2 replies; 16+ messages in thread
From: Marat Zakirov @ 2015-03-04  8:00 UTC (permalink / raw)
  To: gcc-patches
  Cc: Kostya Serebryany, dvyukov, Yury Gribov, 'Andrey Ryabinin'

[-- Attachment #1: Type: text/plain, Size: 379 bytes --]

Hi all!

Here is the patch which forces ASan to work on memory access without 
proper alignment. it's useful because some programs like linux kernel 
often cheat with alignment which may cause false negatives. This patch 
needs additional support for proper work on unaligned accesses in global 
data and heap. It will be implemented in libsanitizer by separate patch.


--Marat

[-- Attachment #2: mavdt-95_16.diff --]
[-- Type: text/x-patch, Size: 8057 bytes --]

gcc/ChangeLog:

2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>

	* asan.c (asan_emit_stack_protection): Support for misalign accesses. 
	(asan_expand_check_ifn): Likewise. 
	* params.def: New option asan-catch-misaligned.
	* params.h: New param ASAN_CATCH_MISALIGNED.

gcc/testsuite/ChangeLog:

2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>

	* c-c++-common/asan/misalign-catch.c: New test.


diff --git a/gcc/asan.c b/gcc/asan.c
index b7c2b11..49d0da4 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -1050,7 +1050,6 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   rtx_code_label *lab;
   rtx_insn *insns;
   char buf[30];
-  unsigned char shadow_bytes[4];
   HOST_WIDE_INT base_offset = offsets[length - 1];
   HOST_WIDE_INT base_align_bias = 0, offset, prev_offset;
   HOST_WIDE_INT asan_frame_size = offsets[0] - base_offset;
@@ -1059,6 +1058,7 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   unsigned char cur_shadow_byte = ASAN_STACK_MAGIC_LEFT;
   tree str_cst, decl, id;
   int use_after_return_class = -1;
+  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS) || ASAN_CATCH_MISALIGNED;
 
   if (shadow_ptr_types[0] == NULL_TREE)
     asan_init_shadow_ptr_types ();
@@ -1193,11 +1193,37 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
   prev_offset = base_offset;
+
+  vec<rtx> shadow_mems;
+  vec<unsigned char> shadow_bytes;
+
+  shadow_mems.create(0);
+  shadow_bytes.create(0);
+
   for (l = length; l; l -= 2)
     {
       if (l == 2)
 	cur_shadow_byte = ASAN_STACK_MAGIC_RIGHT;
       offset = offsets[l - 1];
+      if (l != length && misalign)
+	{
+	  HOST_WIDE_INT aoff
+	    = base_offset + ((offset - base_offset)
+			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
+	      - ASAN_RED_ZONE_SIZE;
+	  if (aoff > prev_offset)
+	    {
+	      shadow_mem = adjust_address (shadow_mem, VOIDmode,
+					   (aoff - prev_offset)
+					   >> ASAN_SHADOW_SHIFT);
+	      prev_offset = aoff;
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_mems.safe_push (shadow_mem);
+	    }
+	}
       if ((offset - base_offset) & (ASAN_RED_ZONE_SIZE - 1))
 	{
 	  int i;
@@ -1212,13 +1238,13 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 	    if (aoff < offset)
 	      {
 		if (aoff < offset - (1 << ASAN_SHADOW_SHIFT) + 1)
-		  shadow_bytes[i] = 0;
+		  shadow_bytes.safe_push (0);
 		else
-		  shadow_bytes[i] = offset - aoff;
+		  shadow_bytes.safe_push (offset - aoff);
 	      }
 	    else
-	      shadow_bytes[i] = ASAN_STACK_MAGIC_PARTIAL;
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	      shadow_bytes.safe_push (ASAN_STACK_MAGIC_PARTIAL);
+	  shadow_mems.safe_push(shadow_mem);
 	  offset = aoff;
 	}
       while (offset <= offsets[l - 2] - ASAN_RED_ZONE_SIZE)
@@ -1227,12 +1253,21 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 				       (offset - prev_offset)
 				       >> ASAN_SHADOW_SHIFT);
 	  prev_offset = offset;
-	  memset (shadow_bytes, cur_shadow_byte, 4);
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_mems.safe_push(shadow_mem);
 	  offset += ASAN_RED_ZONE_SIZE;
 	}
       cur_shadow_byte = ASAN_STACK_MAGIC_MIDDLE;
     }
+  for (unsigned i = 0; misalign && i < shadow_bytes.length () - 1; i++)
+    if (shadow_bytes[i] == 0 && shadow_bytes[i + 1] > 0)
+      shadow_bytes[i] = 8 + (shadow_bytes[i + 1] > 7 ? 0 : shadow_bytes[i + 1]);
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
+    emit_move_insn (shadow_mems[i], asan_shadow_cst (&shadow_bytes[i * 4]));
+  
   do_pending_stack_adjust ();
 
   /* Construct epilogue sequence.  */
@@ -1285,34 +1320,8 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
 
-  prev_offset = base_offset;
-  last_offset = base_offset;
-  last_size = 0;
-  for (l = length; l; l -= 2)
-    {
-      offset = base_offset + ((offsets[l - 1] - base_offset)
-			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1));
-      if (last_offset + last_size != offset)
-	{
-	  shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				       (last_offset - prev_offset)
-				       >> ASAN_SHADOW_SHIFT);
-	  prev_offset = last_offset;
-	  asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
-	  last_offset = offset;
-	  last_size = 0;
-	}
-      last_size += base_offset + ((offsets[l - 2] - base_offset)
-				  & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
-		   - offset;
-    }
-  if (last_size)
-    {
-      shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				   (last_offset - prev_offset)
-				   >> ASAN_SHADOW_SHIFT);
-      asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
-    }
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
+    asan_clear_shadow (shadow_mems[i], 4);
 
   do_pending_stack_adjust ();
   if (lab)
@@ -2544,6 +2553,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   gimple g = gsi_stmt (*iter);
   location_t loc = gimple_location (g);
 
+  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS) || ASAN_CATCH_MISALIGNED;
   bool recover_p
     = (flag_sanitize & flag_sanitize_recover & SANITIZE_KERNEL_ADDRESS) != 0;
 
@@ -2641,7 +2651,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   tree base_addr = gimple_assign_lhs (g);
 
   tree t = NULL_TREE;
-  if (real_size_in_bytes >= 8)
+  if (real_size_in_bytes >= 8 && !misalign)
     {
       tree shadow = build_shadow_mem_access (&gsi, loc, base_addr,
 					     shadow_ptr_type);
@@ -2660,7 +2670,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
       /* Aligned (>= 8 bytes) can test just
 	 (real_size_in_bytes - 1 >= shadow), as base_addr & 7 is known
 	 to be 0.  */
-      if (align < 8)
+      if (align < 8 || misalign)
 	{
 	  gimple_seq_add_stmt (&seq, build_assign (BIT_AND_EXPR,
 						   base_addr, 7));
diff --git a/gcc/params.def b/gcc/params.def
index 4d3b398..a22ed22 100644
--- a/gcc/params.def
+++ b/gcc/params.def
@@ -1134,6 +1134,11 @@ DEFPARAM (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD,
          "in function becomes greater or equal to this number",
          7000, 0, INT_MAX)
 
+DEFPARAM (PARAM_ASAN_CATCH_MISALIGNED,
+         "asan-catch-misaligned",
+         "catch unaligned access",
+         0, 1, 1)
+
 DEFPARAM (PARAM_UNINIT_CONTROL_DEP_ATTEMPTS,
 	  "uninit-control-dep-attempts",
 	  "Maximum number of nested calls to search for control dependencies "
diff --git a/gcc/params.h b/gcc/params.h
index 2e50ff4..e455327 100644
--- a/gcc/params.h
+++ b/gcc/params.h
@@ -238,5 +238,7 @@ extern void init_param_values (int *params);
   PARAM_VALUE (PARAM_ASAN_USE_AFTER_RETURN)
 #define ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD \
   PARAM_VALUE (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD)
+#define ASAN_CATCH_MISALIGNED \
+  PARAM_VALUE (PARAM_ASAN_CATCH_MISALIGNED)
 
 #endif /* ! GCC_PARAMS_H */
diff --git a/gcc/testsuite/c-c++-common/asan/misalign-catch.c b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
new file mode 100644
index 0000000..131cf65
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
@@ -0,0 +1,21 @@
+/* { dg-do run } */
+/* { dg-options "--param asan-catch-misaligned=1" } */
+/* { dg-shouldfail "asan" } */
+
+long long *ptr;
+
+__attribute__((noinline))
+void foo () {
+   ptr = ((long long int *)(((char *)ptr) + 1));
+   *ptr = 1;
+}
+
+int main()
+{
+   long long int local[9];
+   ptr = (long long *)&local[8];
+   foo ();
+   return 0;
+}
+
+/* { dg-output "ERROR: AddressSanitizer: stack-buffer-overflow.*(\n|\r\n|\r)" } */

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH] ASan on unaligned accesses
  2015-03-04  8:00 [PATCH] ASan on unaligned accesses Marat Zakirov
@ 2015-03-04  8:07 ` Andrew Pinski
  2015-03-19  6:01   ` [PINGv2][PATCH] " Marat Zakirov
  2015-03-12 12:07 ` [PING][PATCH] " Marat Zakirov
  1 sibling, 1 reply; 16+ messages in thread
From: Andrew Pinski @ 2015-03-04  8:07 UTC (permalink / raw)
  To: Marat Zakirov
  Cc: gcc-patches, Kostya Serebryany, Dmitry Vyukov, Yury Gribov,
	Andrey Ryabinin

On Wed, Mar 4, 2015 at 12:00 AM, Marat Zakirov <m.zakirov@samsung.com> wrote:
> Hi all!
>
> Here is the patch which forces ASan to work on memory access without proper
> alignment. it's useful because some programs like linux kernel often cheat
> with alignment which may cause false negatives. This patch needs additional
> support for proper work on unaligned accesses in global data and heap. It
> will be implemented in libsanitizer by separate patch.
>
>
> --Marat
>
> gcc/ChangeLog:
>
> 2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>
>
>         * asan.c (asan_emit_stack_protection): Support for misalign
> accesses.
>         (asan_expand_check_ifn): Likewise.
>         * params.def: New option asan-catch-misaligned.
>         * params.h: New param ASAN_CATCH_MISALIGNED.

Since this parameter can only be true or false, I think it should be a
normal option.  Also you did not add documentation of the param.

Thanks,
Andrew

>
> gcc/testsuite/ChangeLog:
>
> 2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>
>
>         * c-c++-common/asan/misalign-catch.c: New test.
>
>
> diff --git a/gcc/asan.c b/gcc/asan.c
> index b7c2b11..49d0da4 100644
> --- a/gcc/asan.c
> +++ b/gcc/asan.c
> @@ -1050,7 +1050,6 @@ asan_emit_stack_protection (rtx base, rtx pbase,
> unsigned int alignb,
>    rtx_code_label *lab;
>    rtx_insn *insns;
>    char buf[30];
> -  unsigned char shadow_bytes[4];
>    HOST_WIDE_INT base_offset = offsets[length - 1];
>    HOST_WIDE_INT base_align_bias = 0, offset, prev_offset;
>    HOST_WIDE_INT asan_frame_size = offsets[0] - base_offset;
> @@ -1059,6 +1058,7 @@ asan_emit_stack_protection (rtx base, rtx pbase,
> unsigned int alignb,
>    unsigned char cur_shadow_byte = ASAN_STACK_MAGIC_LEFT;
>    tree str_cst, decl, id;
>    int use_after_return_class = -1;
> +  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS) ||
> ASAN_CATCH_MISALIGNED;
>
>    if (shadow_ptr_types[0] == NULL_TREE)
>      asan_init_shadow_ptr_types ();
> @@ -1193,11 +1193,37 @@ asan_emit_stack_protection (rtx base, rtx pbase,
> unsigned int alignb,
>    if (STRICT_ALIGNMENT)
>      set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
>    prev_offset = base_offset;
> +
> +  vec<rtx> shadow_mems;
> +  vec<unsigned char> shadow_bytes;
> +
> +  shadow_mems.create(0);
> +  shadow_bytes.create(0);
> +
>    for (l = length; l; l -= 2)
>      {
>        if (l == 2)
>         cur_shadow_byte = ASAN_STACK_MAGIC_RIGHT;
>        offset = offsets[l - 1];
> +      if (l != length && misalign)
> +       {
> +         HOST_WIDE_INT aoff
> +           = base_offset + ((offset - base_offset)
> +                            & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
> +             - ASAN_RED_ZONE_SIZE;
> +         if (aoff > prev_offset)
> +           {
> +             shadow_mem = adjust_address (shadow_mem, VOIDmode,
> +                                          (aoff - prev_offset)
> +                                          >> ASAN_SHADOW_SHIFT);
> +             prev_offset = aoff;
> +             shadow_bytes.safe_push (0);
> +             shadow_bytes.safe_push (0);
> +             shadow_bytes.safe_push (0);
> +             shadow_bytes.safe_push (0);
> +             shadow_mems.safe_push (shadow_mem);
> +           }
> +       }
>        if ((offset - base_offset) & (ASAN_RED_ZONE_SIZE - 1))
>         {
>           int i;
> @@ -1212,13 +1238,13 @@ asan_emit_stack_protection (rtx base, rtx pbase,
> unsigned int alignb,
>             if (aoff < offset)
>               {
>                 if (aoff < offset - (1 << ASAN_SHADOW_SHIFT) + 1)
> -                 shadow_bytes[i] = 0;
> +                 shadow_bytes.safe_push (0);
>                 else
> -                 shadow_bytes[i] = offset - aoff;
> +                 shadow_bytes.safe_push (offset - aoff);
>               }
>             else
> -             shadow_bytes[i] = ASAN_STACK_MAGIC_PARTIAL;
> -         emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
> +             shadow_bytes.safe_push (ASAN_STACK_MAGIC_PARTIAL);
> +         shadow_mems.safe_push(shadow_mem);
>           offset = aoff;
>         }
>        while (offset <= offsets[l - 2] - ASAN_RED_ZONE_SIZE)
> @@ -1227,12 +1253,21 @@ asan_emit_stack_protection (rtx base, rtx pbase,
> unsigned int alignb,
>                                        (offset - prev_offset)
>                                        >> ASAN_SHADOW_SHIFT);
>           prev_offset = offset;
> -         memset (shadow_bytes, cur_shadow_byte, 4);
> -         emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
> +         shadow_bytes.safe_push (cur_shadow_byte);
> +         shadow_bytes.safe_push (cur_shadow_byte);
> +         shadow_bytes.safe_push (cur_shadow_byte);
> +         shadow_bytes.safe_push (cur_shadow_byte);
> +         shadow_mems.safe_push(shadow_mem);
>           offset += ASAN_RED_ZONE_SIZE;
>         }
>        cur_shadow_byte = ASAN_STACK_MAGIC_MIDDLE;
>      }
> +  for (unsigned i = 0; misalign && i < shadow_bytes.length () - 1; i++)
> +    if (shadow_bytes[i] == 0 && shadow_bytes[i + 1] > 0)
> +      shadow_bytes[i] = 8 + (shadow_bytes[i + 1] > 7 ? 0 : shadow_bytes[i +
> 1]);
> +  for (unsigned i = 0; i < shadow_mems.length (); i++)
> +    emit_move_insn (shadow_mems[i], asan_shadow_cst (&shadow_bytes[i *
> 4]));
> +
>    do_pending_stack_adjust ();
>
>    /* Construct epilogue sequence.  */
> @@ -1285,34 +1320,8 @@ asan_emit_stack_protection (rtx base, rtx pbase,
> unsigned int alignb,
>    if (STRICT_ALIGNMENT)
>      set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
>
> -  prev_offset = base_offset;
> -  last_offset = base_offset;
> -  last_size = 0;
> -  for (l = length; l; l -= 2)
> -    {
> -      offset = base_offset + ((offsets[l - 1] - base_offset)
> -                            & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1));
> -      if (last_offset + last_size != offset)
> -       {
> -         shadow_mem = adjust_address (shadow_mem, VOIDmode,
> -                                      (last_offset - prev_offset)
> -                                      >> ASAN_SHADOW_SHIFT);
> -         prev_offset = last_offset;
> -         asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
> -         last_offset = offset;
> -         last_size = 0;
> -       }
> -      last_size += base_offset + ((offsets[l - 2] - base_offset)
> -                                 & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
> -                  - offset;
> -    }
> -  if (last_size)
> -    {
> -      shadow_mem = adjust_address (shadow_mem, VOIDmode,
> -                                  (last_offset - prev_offset)
> -                                  >> ASAN_SHADOW_SHIFT);
> -      asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
> -    }
> +  for (unsigned i = 0; i < shadow_mems.length (); i++)
> +    asan_clear_shadow (shadow_mems[i], 4);
>
>    do_pending_stack_adjust ();
>    if (lab)
> @@ -2544,6 +2553,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter,
> bool use_calls)
>    gimple g = gsi_stmt (*iter);
>    location_t loc = gimple_location (g);
>
> +  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS) ||
> ASAN_CATCH_MISALIGNED;
>    bool recover_p
>      = (flag_sanitize & flag_sanitize_recover & SANITIZE_KERNEL_ADDRESS) !=
> 0;
>
> @@ -2641,7 +2651,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter,
> bool use_calls)
>    tree base_addr = gimple_assign_lhs (g);
>
>    tree t = NULL_TREE;
> -  if (real_size_in_bytes >= 8)
> +  if (real_size_in_bytes >= 8 && !misalign)
>      {
>        tree shadow = build_shadow_mem_access (&gsi, loc, base_addr,
>                                              shadow_ptr_type);
> @@ -2660,7 +2670,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter,
> bool use_calls)
>        /* Aligned (>= 8 bytes) can test just
>          (real_size_in_bytes - 1 >= shadow), as base_addr & 7 is known
>          to be 0.  */
> -      if (align < 8)
> +      if (align < 8 || misalign)
>         {
>           gimple_seq_add_stmt (&seq, build_assign (BIT_AND_EXPR,
>                                                    base_addr, 7));
> diff --git a/gcc/params.def b/gcc/params.def
> index 4d3b398..a22ed22 100644
> --- a/gcc/params.def
> +++ b/gcc/params.def
> @@ -1134,6 +1134,11 @@ DEFPARAM
> (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD,
>           "in function becomes greater or equal to this number",
>           7000, 0, INT_MAX)
>
> +DEFPARAM (PARAM_ASAN_CATCH_MISALIGNED,
> +         "asan-catch-misaligned",
> +         "catch unaligned access",
> +         0, 1, 1)
> +
>  DEFPARAM (PARAM_UNINIT_CONTROL_DEP_ATTEMPTS,
>           "uninit-control-dep-attempts",
>           "Maximum number of nested calls to search for control dependencies
> "
> diff --git a/gcc/params.h b/gcc/params.h
> index 2e50ff4..e455327 100644
> --- a/gcc/params.h
> +++ b/gcc/params.h
> @@ -238,5 +238,7 @@ extern void init_param_values (int *params);
>    PARAM_VALUE (PARAM_ASAN_USE_AFTER_RETURN)
>  #define ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD \
>    PARAM_VALUE (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD)
> +#define ASAN_CATCH_MISALIGNED \
> +  PARAM_VALUE (PARAM_ASAN_CATCH_MISALIGNED)
>
>  #endif /* ! GCC_PARAMS_H */
> diff --git a/gcc/testsuite/c-c++-common/asan/misalign-catch.c
> b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
> new file mode 100644
> index 0000000..131cf65
> --- /dev/null
> +++ b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
> @@ -0,0 +1,21 @@
> +/* { dg-do run } */
> +/* { dg-options "--param asan-catch-misaligned=1" } */
> +/* { dg-shouldfail "asan" } */
> +
> +long long *ptr;
> +
> +__attribute__((noinline))
> +void foo () {
> +   ptr = ((long long int *)(((char *)ptr) + 1));
> +   *ptr = 1;
> +}
> +
> +int main()
> +{
> +   long long int local[9];
> +   ptr = (long long *)&local[8];
> +   foo ();
> +   return 0;
> +}
> +
> +/* { dg-output "ERROR: AddressSanitizer:
> stack-buffer-overflow.*(\n|\r\n|\r)" } */
>

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PING][PATCH] ASan on unaligned accesses
  2015-03-04  8:00 [PATCH] ASan on unaligned accesses Marat Zakirov
  2015-03-04  8:07 ` Andrew Pinski
@ 2015-03-12 12:07 ` Marat Zakirov
  1 sibling, 0 replies; 16+ messages in thread
From: Marat Zakirov @ 2015-03-12 12:07 UTC (permalink / raw)
  To: gcc-patches
  Cc: Kostya Serebryany, dvyukov, Yury Gribov, 'Andrey Ryabinin'

[-- Attachment #1: Type: text/plain, Size: 447 bytes --]



On 03/04/2015 11:00 AM, Marat Zakirov wrote:
> Hi all!
>
> Here is the patch which forces ASan to work on memory access without 
> proper alignment. it's useful because some programs like linux kernel 
> often cheat with alignment which may cause false negatives. This patch 
> needs additional support for proper work on unaligned accesses in 
> global data and heap. It will be implemented in libsanitizer by 
> separate patch.
>
>
> --Marat


[-- Attachment #2: mavdt-95_16.diff --]
[-- Type: text/x-patch, Size: 8057 bytes --]

gcc/ChangeLog:

2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>

	* asan.c (asan_emit_stack_protection): Support for misalign accesses. 
	(asan_expand_check_ifn): Likewise. 
	* params.def: New option asan-catch-misaligned.
	* params.h: New param ASAN_CATCH_MISALIGNED.

gcc/testsuite/ChangeLog:

2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>

	* c-c++-common/asan/misalign-catch.c: New test.


diff --git a/gcc/asan.c b/gcc/asan.c
index b7c2b11..49d0da4 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -1050,7 +1050,6 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   rtx_code_label *lab;
   rtx_insn *insns;
   char buf[30];
-  unsigned char shadow_bytes[4];
   HOST_WIDE_INT base_offset = offsets[length - 1];
   HOST_WIDE_INT base_align_bias = 0, offset, prev_offset;
   HOST_WIDE_INT asan_frame_size = offsets[0] - base_offset;
@@ -1059,6 +1058,7 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   unsigned char cur_shadow_byte = ASAN_STACK_MAGIC_LEFT;
   tree str_cst, decl, id;
   int use_after_return_class = -1;
+  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS) || ASAN_CATCH_MISALIGNED;
 
   if (shadow_ptr_types[0] == NULL_TREE)
     asan_init_shadow_ptr_types ();
@@ -1193,11 +1193,37 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
   prev_offset = base_offset;
+
+  vec<rtx> shadow_mems;
+  vec<unsigned char> shadow_bytes;
+
+  shadow_mems.create(0);
+  shadow_bytes.create(0);
+
   for (l = length; l; l -= 2)
     {
       if (l == 2)
 	cur_shadow_byte = ASAN_STACK_MAGIC_RIGHT;
       offset = offsets[l - 1];
+      if (l != length && misalign)
+	{
+	  HOST_WIDE_INT aoff
+	    = base_offset + ((offset - base_offset)
+			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
+	      - ASAN_RED_ZONE_SIZE;
+	  if (aoff > prev_offset)
+	    {
+	      shadow_mem = adjust_address (shadow_mem, VOIDmode,
+					   (aoff - prev_offset)
+					   >> ASAN_SHADOW_SHIFT);
+	      prev_offset = aoff;
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_mems.safe_push (shadow_mem);
+	    }
+	}
       if ((offset - base_offset) & (ASAN_RED_ZONE_SIZE - 1))
 	{
 	  int i;
@@ -1212,13 +1238,13 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 	    if (aoff < offset)
 	      {
 		if (aoff < offset - (1 << ASAN_SHADOW_SHIFT) + 1)
-		  shadow_bytes[i] = 0;
+		  shadow_bytes.safe_push (0);
 		else
-		  shadow_bytes[i] = offset - aoff;
+		  shadow_bytes.safe_push (offset - aoff);
 	      }
 	    else
-	      shadow_bytes[i] = ASAN_STACK_MAGIC_PARTIAL;
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	      shadow_bytes.safe_push (ASAN_STACK_MAGIC_PARTIAL);
+	  shadow_mems.safe_push(shadow_mem);
 	  offset = aoff;
 	}
       while (offset <= offsets[l - 2] - ASAN_RED_ZONE_SIZE)
@@ -1227,12 +1253,21 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 				       (offset - prev_offset)
 				       >> ASAN_SHADOW_SHIFT);
 	  prev_offset = offset;
-	  memset (shadow_bytes, cur_shadow_byte, 4);
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_mems.safe_push(shadow_mem);
 	  offset += ASAN_RED_ZONE_SIZE;
 	}
       cur_shadow_byte = ASAN_STACK_MAGIC_MIDDLE;
     }
+  for (unsigned i = 0; misalign && i < shadow_bytes.length () - 1; i++)
+    if (shadow_bytes[i] == 0 && shadow_bytes[i + 1] > 0)
+      shadow_bytes[i] = 8 + (shadow_bytes[i + 1] > 7 ? 0 : shadow_bytes[i + 1]);
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
+    emit_move_insn (shadow_mems[i], asan_shadow_cst (&shadow_bytes[i * 4]));
+  
   do_pending_stack_adjust ();
 
   /* Construct epilogue sequence.  */
@@ -1285,34 +1320,8 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
 
-  prev_offset = base_offset;
-  last_offset = base_offset;
-  last_size = 0;
-  for (l = length; l; l -= 2)
-    {
-      offset = base_offset + ((offsets[l - 1] - base_offset)
-			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1));
-      if (last_offset + last_size != offset)
-	{
-	  shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				       (last_offset - prev_offset)
-				       >> ASAN_SHADOW_SHIFT);
-	  prev_offset = last_offset;
-	  asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
-	  last_offset = offset;
-	  last_size = 0;
-	}
-      last_size += base_offset + ((offsets[l - 2] - base_offset)
-				  & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
-		   - offset;
-    }
-  if (last_size)
-    {
-      shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				   (last_offset - prev_offset)
-				   >> ASAN_SHADOW_SHIFT);
-      asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
-    }
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
+    asan_clear_shadow (shadow_mems[i], 4);
 
   do_pending_stack_adjust ();
   if (lab)
@@ -2544,6 +2553,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   gimple g = gsi_stmt (*iter);
   location_t loc = gimple_location (g);
 
+  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS) || ASAN_CATCH_MISALIGNED;
   bool recover_p
     = (flag_sanitize & flag_sanitize_recover & SANITIZE_KERNEL_ADDRESS) != 0;
 
@@ -2641,7 +2651,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   tree base_addr = gimple_assign_lhs (g);
 
   tree t = NULL_TREE;
-  if (real_size_in_bytes >= 8)
+  if (real_size_in_bytes >= 8 && !misalign)
     {
       tree shadow = build_shadow_mem_access (&gsi, loc, base_addr,
 					     shadow_ptr_type);
@@ -2660,7 +2670,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
       /* Aligned (>= 8 bytes) can test just
 	 (real_size_in_bytes - 1 >= shadow), as base_addr & 7 is known
 	 to be 0.  */
-      if (align < 8)
+      if (align < 8 || misalign)
 	{
 	  gimple_seq_add_stmt (&seq, build_assign (BIT_AND_EXPR,
 						   base_addr, 7));
diff --git a/gcc/params.def b/gcc/params.def
index 4d3b398..a22ed22 100644
--- a/gcc/params.def
+++ b/gcc/params.def
@@ -1134,6 +1134,11 @@ DEFPARAM (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD,
          "in function becomes greater or equal to this number",
          7000, 0, INT_MAX)
 
+DEFPARAM (PARAM_ASAN_CATCH_MISALIGNED,
+         "asan-catch-misaligned",
+         "catch unaligned access",
+         0, 1, 1)
+
 DEFPARAM (PARAM_UNINIT_CONTROL_DEP_ATTEMPTS,
 	  "uninit-control-dep-attempts",
 	  "Maximum number of nested calls to search for control dependencies "
diff --git a/gcc/params.h b/gcc/params.h
index 2e50ff4..e455327 100644
--- a/gcc/params.h
+++ b/gcc/params.h
@@ -238,5 +238,7 @@ extern void init_param_values (int *params);
   PARAM_VALUE (PARAM_ASAN_USE_AFTER_RETURN)
 #define ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD \
   PARAM_VALUE (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD)
+#define ASAN_CATCH_MISALIGNED \
+  PARAM_VALUE (PARAM_ASAN_CATCH_MISALIGNED)
 
 #endif /* ! GCC_PARAMS_H */
diff --git a/gcc/testsuite/c-c++-common/asan/misalign-catch.c b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
new file mode 100644
index 0000000..131cf65
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
@@ -0,0 +1,21 @@
+/* { dg-do run } */
+/* { dg-options "--param asan-catch-misaligned=1" } */
+/* { dg-shouldfail "asan" } */
+
+long long *ptr;
+
+__attribute__((noinline))
+void foo () {
+   ptr = ((long long int *)(((char *)ptr) + 1));
+   *ptr = 1;
+}
+
+int main()
+{
+   long long int local[9];
+   ptr = (long long *)&local[8];
+   foo ();
+   return 0;
+}
+
+/* { dg-output "ERROR: AddressSanitizer: stack-buffer-overflow.*(\n|\r\n|\r)" } */

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PINGv2][PATCH] ASan on unaligned accesses
  2015-03-04  8:07 ` Andrew Pinski
@ 2015-03-19  6:01   ` Marat Zakirov
  2015-03-26  6:53     ` [PINGv3][PATCH] " Marat Zakirov
  0 siblings, 1 reply; 16+ messages in thread
From: Marat Zakirov @ 2015-03-19  6:01 UTC (permalink / raw)
  To: Andrew Pinski
  Cc: gcc-patches, Kostya Serebryany, Dmitry Vyukov, Yury Gribov,
	Andrey Ryabinin

[-- Attachment #1: Type: text/plain, Size: 1027 bytes --]


On 03/04/2015 11:07 AM, Andrew Pinski wrote:
> On Wed, Mar 4, 2015 at 12:00 AM, Marat Zakirov <m.zakirov@samsung.com> wrote:
>> Hi all!
>>
>> Here is the patch which forces ASan to work on memory access without proper
>> alignment. it's useful because some programs like linux kernel often cheat
>> with alignment which may cause false negatives. This patch needs additional
>> support for proper work on unaligned accesses in global data and heap. It
>> will be implemented in libsanitizer by separate patch.
>>
>>
>> --Marat
>>
>> gcc/ChangeLog:
>>
>> 2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>
>>
>>          * asan.c (asan_emit_stack_protection): Support for misalign
>> accesses.
>>          (asan_expand_check_ifn): Likewise.
>>          * params.def: New option asan-catch-misaligned.
>>          * params.h: New param ASAN_CATCH_MISALIGNED.
> Since this parameter can only be true or false, I think it should be a
> normal option.  Also you did not add documentation of the param.
>
> Thanks,
> Andrew
Fixed.


[-- Attachment #2: mavdt-95_17.diff --]
[-- Type: text/x-patch, Size: 8531 bytes --]

gcc/ChangeLog:

2015-03-12  Marat Zakirov  <m.zakirov@samsung.com>

	* asan.c (asan_emit_stack_protection): Support for misalign accesses.
	(asan_expand_check_ifn): Likewise.
	* common.opt: New flag -fasan-catch-misaligned.
	* doc/invoke.texi: New flag description.
	* opts.c (finish_options): Add check for new flag.
	(common_handle_option): Switch on flag if SANITIZE_KERNEL_ADDRESS.

gcc/testsuite/ChangeLog:

2015-03-12  Marat Zakirov  <m.zakirov@samsung.com>

	* c-c++-common/asan/misalign-catch.c: New test.


diff --git a/gcc/asan.c b/gcc/asan.c
index 9e4a629..80bf2e8 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -1050,7 +1050,6 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   rtx_code_label *lab;
   rtx_insn *insns;
   char buf[30];
-  unsigned char shadow_bytes[4];
   HOST_WIDE_INT base_offset = offsets[length - 1];
   HOST_WIDE_INT base_align_bias = 0, offset, prev_offset;
   HOST_WIDE_INT asan_frame_size = offsets[0] - base_offset;
@@ -1193,11 +1192,37 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
   prev_offset = base_offset;
+
+  vec<rtx> shadow_mems;
+  vec<unsigned char> shadow_bytes;
+
+  shadow_mems.create(0);
+  shadow_bytes.create(0);
+
   for (l = length; l; l -= 2)
     {
       if (l == 2)
 	cur_shadow_byte = ASAN_STACK_MAGIC_RIGHT;
       offset = offsets[l - 1];
+      if (l != length && flag_asan_catch_misaligned)
+	{
+	  HOST_WIDE_INT aoff
+	    = base_offset + ((offset - base_offset)
+			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
+	      - ASAN_RED_ZONE_SIZE;
+	  if (aoff > prev_offset)
+	    {
+	      shadow_mem = adjust_address (shadow_mem, VOIDmode,
+					   (aoff - prev_offset)
+					   >> ASAN_SHADOW_SHIFT);
+	      prev_offset = aoff;
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_mems.safe_push (shadow_mem);
+	    }
+	}
       if ((offset - base_offset) & (ASAN_RED_ZONE_SIZE - 1))
 	{
 	  int i;
@@ -1212,13 +1237,13 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 	    if (aoff < offset)
 	      {
 		if (aoff < offset - (1 << ASAN_SHADOW_SHIFT) + 1)
-		  shadow_bytes[i] = 0;
+		  shadow_bytes.safe_push (0);
 		else
-		  shadow_bytes[i] = offset - aoff;
+		  shadow_bytes.safe_push (offset - aoff);
 	      }
 	    else
-	      shadow_bytes[i] = ASAN_STACK_MAGIC_PARTIAL;
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	      shadow_bytes.safe_push (ASAN_STACK_MAGIC_PARTIAL);
+	  shadow_mems.safe_push(shadow_mem);
 	  offset = aoff;
 	}
       while (offset <= offsets[l - 2] - ASAN_RED_ZONE_SIZE)
@@ -1227,12 +1252,21 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 				       (offset - prev_offset)
 				       >> ASAN_SHADOW_SHIFT);
 	  prev_offset = offset;
-	  memset (shadow_bytes, cur_shadow_byte, 4);
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_mems.safe_push(shadow_mem);
 	  offset += ASAN_RED_ZONE_SIZE;
 	}
       cur_shadow_byte = ASAN_STACK_MAGIC_MIDDLE;
     }
+  for (unsigned i = 0; flag_asan_catch_misaligned && i < shadow_bytes.length () - 1; i++)
+    if (shadow_bytes[i] == 0 && shadow_bytes[i + 1] > 0)
+      shadow_bytes[i] = 8 + (shadow_bytes[i + 1] > 7 ? 0 : shadow_bytes[i + 1]);
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
+    emit_move_insn (shadow_mems[i], asan_shadow_cst (&shadow_bytes[i * 4]));
+  
   do_pending_stack_adjust ();
 
   /* Construct epilogue sequence.  */
@@ -1285,34 +1319,8 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
 
-  prev_offset = base_offset;
-  last_offset = base_offset;
-  last_size = 0;
-  for (l = length; l; l -= 2)
-    {
-      offset = base_offset + ((offsets[l - 1] - base_offset)
-			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1));
-      if (last_offset + last_size != offset)
-	{
-	  shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				       (last_offset - prev_offset)
-				       >> ASAN_SHADOW_SHIFT);
-	  prev_offset = last_offset;
-	  asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
-	  last_offset = offset;
-	  last_size = 0;
-	}
-      last_size += base_offset + ((offsets[l - 2] - base_offset)
-				  & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
-		   - offset;
-    }
-  if (last_size)
-    {
-      shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				   (last_offset - prev_offset)
-				   >> ASAN_SHADOW_SHIFT);
-      asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
-    }
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
+    asan_clear_shadow (shadow_mems[i], 4);
 
   do_pending_stack_adjust ();
   if (lab)
@@ -2643,7 +2651,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   tree base_addr = gimple_assign_lhs (g);
 
   tree t = NULL_TREE;
-  if (real_size_in_bytes >= 8)
+  if (real_size_in_bytes >= 8 && !flag_asan_catch_misaligned)
     {
       tree shadow = build_shadow_mem_access (&gsi, loc, base_addr,
 					     shadow_ptr_type);
@@ -2662,7 +2670,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
       /* Aligned (>= 8 bytes) can test just
 	 (real_size_in_bytes - 1 >= shadow), as base_addr & 7 is known
 	 to be 0.  */
-      if (align < 8)
+      if (align < 8 || flag_asan_catch_misaligned)
 	{
 	  gimple_seq_add_stmt (&seq, build_assign (BIT_AND_EXPR,
 						   base_addr, 7));
diff --git a/gcc/common.opt b/gcc/common.opt
index b49ac46..a7af95e 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -1161,6 +1161,12 @@ Common Driver Var(flag_report_bug)
 Collect and dump debug information into temporary file if ICE in C/C++
 compiler occured.
 
+fasan-catch-misaligned
+Common Driver Var(flag_asan_catch_misaligned)
+Catch invalid unaligned memory accesses.
+This option is needed to prevent potential ASan false positives due to 
+unaligned to type size memory accesses in some apllication like Linux kernel.
+
 fdump-passes
 Common Var(flag_dump_passes) Init(0)
 Dump optimization passes
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 1534ed9..c85aa38 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -6680,6 +6680,12 @@ text / bss / data / heap / stack / dso start locations.
 Collect and dump debug information into temporary file if ICE in C/C++
 compiler occured.
 
+@item -fasan-catch-misaligned
+@opindex fasan-catch-misaligned
+Catch invalid unaligned memory accesses.
+This option is needed to prevent potential ASan false positives due to 
+unaligned to type size memory accesses in some apllication like Linux kernel.
+
 @item -fdump-unnumbered
 @opindex fdump-unnumbered
 When doing debugging dumps, suppress instruction numbers and address output.
diff --git a/gcc/opts.c b/gcc/opts.c
index 39c190d..b238b90 100644
--- a/gcc/opts.c
+++ b/gcc/opts.c
@@ -925,6 +925,9 @@ finish_options (struct gcc_options *opts, struct gcc_options *opts_set,
       opts->x_flag_aggressive_loop_optimizations = 0;
       opts->x_flag_strict_overflow = 0;
     }
+  
+  if (flag_asan_catch_misaligned && !opts->x_flag_sanitize)
+    error_at (loc, "-fasan-catch-misaligned is valid only with -fsanitize option");
 }
 
 #define LEFT_COLUMN	27
@@ -1662,6 +1665,7 @@ common_handle_option (struct gcc_options *opts,
 	    maybe_set_param_value (PARAM_ASAN_USE_AFTER_RETURN, 0,
 				   opts->x_param_values,
 				   opts_set->x_param_values);
+            flag_asan_catch_misaligned = true;
 	  }
 
 	break;
diff --git a/gcc/testsuite/c-c++-common/asan/misalign-catch.c b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
new file mode 100644
index 0000000..158efd4
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
@@ -0,0 +1,21 @@
+/* { dg-do run } */
+/* { dg-options "-fasan-catch-misaligned" } */
+/* { dg-shouldfail "asan" } */
+
+long long *ptr;
+
+__attribute__((noinline))
+void foo () {
+   ptr = ((long long int *)(((char *)ptr) + 1));
+   *ptr = 1;
+}
+
+int main()
+{
+   long long int local[9];
+   ptr = (long long *)&local[8];
+   foo ();
+   return 0;
+}
+
+/* { dg-output "ERROR: AddressSanitizer: stack-buffer-overflow.*(\n|\r\n|\r)" } */

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PINGv3][PATCH] ASan on unaligned accesses
  2015-03-19  6:01   ` [PINGv2][PATCH] " Marat Zakirov
@ 2015-03-26  6:53     ` Marat Zakirov
  2015-03-26 11:50       ` Jakub Jelinek
  0 siblings, 1 reply; 16+ messages in thread
From: Marat Zakirov @ 2015-03-26  6:53 UTC (permalink / raw)
  To: gcc-patches
  Cc: Andrew Pinski, Kostya Serebryany, Dmitry Vyukov, Yury Gribov,
	Andrey Ryabinin

[-- Attachment #1: Type: text/plain, Size: 1131 bytes --]



On 03/19/2015 09:01 AM, Marat Zakirov wrote:
>
> On 03/04/2015 11:07 AM, Andrew Pinski wrote:
>> On Wed, Mar 4, 2015 at 12:00 AM, Marat Zakirov 
>> <m.zakirov@samsung.com> wrote:
>>> Hi all!
>>>
>>> Here is the patch which forces ASan to work on memory access without 
>>> proper
>>> alignment. it's useful because some programs like linux kernel often 
>>> cheat
>>> with alignment which may cause false negatives. This patch needs 
>>> additional
>>> support for proper work on unaligned accesses in global data and 
>>> heap. It
>>> will be implemented in libsanitizer by separate patch.
>>>
>>>
>>> --Marat
>>>
>>> gcc/ChangeLog:
>>>
>>> 2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>
>>>
>>>          * asan.c (asan_emit_stack_protection): Support for misalign
>>> accesses.
>>>          (asan_expand_check_ifn): Likewise.
>>>          * params.def: New option asan-catch-misaligned.
>>>          * params.h: New param ASAN_CATCH_MISALIGNED.
>> Since this parameter can only be true or false, I think it should be a
>> normal option.  Also you did not add documentation of the param.
>>
>> Thanks,
>> Andrew
> Fixed.
>


[-- Attachment #2: mavdt-95_17.diff --]
[-- Type: text/x-patch, Size: 8531 bytes --]

gcc/ChangeLog:

2015-03-12  Marat Zakirov  <m.zakirov@samsung.com>

	* asan.c (asan_emit_stack_protection): Support for misalign accesses.
	(asan_expand_check_ifn): Likewise.
	* common.opt: New flag -fasan-catch-misaligned.
	* doc/invoke.texi: New flag description.
	* opts.c (finish_options): Add check for new flag.
	(common_handle_option): Switch on flag if SANITIZE_KERNEL_ADDRESS.

gcc/testsuite/ChangeLog:

2015-03-12  Marat Zakirov  <m.zakirov@samsung.com>

	* c-c++-common/asan/misalign-catch.c: New test.


diff --git a/gcc/asan.c b/gcc/asan.c
index 9e4a629..80bf2e8 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -1050,7 +1050,6 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   rtx_code_label *lab;
   rtx_insn *insns;
   char buf[30];
-  unsigned char shadow_bytes[4];
   HOST_WIDE_INT base_offset = offsets[length - 1];
   HOST_WIDE_INT base_align_bias = 0, offset, prev_offset;
   HOST_WIDE_INT asan_frame_size = offsets[0] - base_offset;
@@ -1193,11 +1192,37 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
   prev_offset = base_offset;
+
+  vec<rtx> shadow_mems;
+  vec<unsigned char> shadow_bytes;
+
+  shadow_mems.create(0);
+  shadow_bytes.create(0);
+
   for (l = length; l; l -= 2)
     {
       if (l == 2)
 	cur_shadow_byte = ASAN_STACK_MAGIC_RIGHT;
       offset = offsets[l - 1];
+      if (l != length && flag_asan_catch_misaligned)
+	{
+	  HOST_WIDE_INT aoff
+	    = base_offset + ((offset - base_offset)
+			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
+	      - ASAN_RED_ZONE_SIZE;
+	  if (aoff > prev_offset)
+	    {
+	      shadow_mem = adjust_address (shadow_mem, VOIDmode,
+					   (aoff - prev_offset)
+					   >> ASAN_SHADOW_SHIFT);
+	      prev_offset = aoff;
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_mems.safe_push (shadow_mem);
+	    }
+	}
       if ((offset - base_offset) & (ASAN_RED_ZONE_SIZE - 1))
 	{
 	  int i;
@@ -1212,13 +1237,13 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 	    if (aoff < offset)
 	      {
 		if (aoff < offset - (1 << ASAN_SHADOW_SHIFT) + 1)
-		  shadow_bytes[i] = 0;
+		  shadow_bytes.safe_push (0);
 		else
-		  shadow_bytes[i] = offset - aoff;
+		  shadow_bytes.safe_push (offset - aoff);
 	      }
 	    else
-	      shadow_bytes[i] = ASAN_STACK_MAGIC_PARTIAL;
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	      shadow_bytes.safe_push (ASAN_STACK_MAGIC_PARTIAL);
+	  shadow_mems.safe_push(shadow_mem);
 	  offset = aoff;
 	}
       while (offset <= offsets[l - 2] - ASAN_RED_ZONE_SIZE)
@@ -1227,12 +1252,21 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 				       (offset - prev_offset)
 				       >> ASAN_SHADOW_SHIFT);
 	  prev_offset = offset;
-	  memset (shadow_bytes, cur_shadow_byte, 4);
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_mems.safe_push(shadow_mem);
 	  offset += ASAN_RED_ZONE_SIZE;
 	}
       cur_shadow_byte = ASAN_STACK_MAGIC_MIDDLE;
     }
+  for (unsigned i = 0; flag_asan_catch_misaligned && i < shadow_bytes.length () - 1; i++)
+    if (shadow_bytes[i] == 0 && shadow_bytes[i + 1] > 0)
+      shadow_bytes[i] = 8 + (shadow_bytes[i + 1] > 7 ? 0 : shadow_bytes[i + 1]);
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
+    emit_move_insn (shadow_mems[i], asan_shadow_cst (&shadow_bytes[i * 4]));
+  
   do_pending_stack_adjust ();
 
   /* Construct epilogue sequence.  */
@@ -1285,34 +1319,8 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
 
-  prev_offset = base_offset;
-  last_offset = base_offset;
-  last_size = 0;
-  for (l = length; l; l -= 2)
-    {
-      offset = base_offset + ((offsets[l - 1] - base_offset)
-			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1));
-      if (last_offset + last_size != offset)
-	{
-	  shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				       (last_offset - prev_offset)
-				       >> ASAN_SHADOW_SHIFT);
-	  prev_offset = last_offset;
-	  asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
-	  last_offset = offset;
-	  last_size = 0;
-	}
-      last_size += base_offset + ((offsets[l - 2] - base_offset)
-				  & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
-		   - offset;
-    }
-  if (last_size)
-    {
-      shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				   (last_offset - prev_offset)
-				   >> ASAN_SHADOW_SHIFT);
-      asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
-    }
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
+    asan_clear_shadow (shadow_mems[i], 4);
 
   do_pending_stack_adjust ();
   if (lab)
@@ -2643,7 +2651,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   tree base_addr = gimple_assign_lhs (g);
 
   tree t = NULL_TREE;
-  if (real_size_in_bytes >= 8)
+  if (real_size_in_bytes >= 8 && !flag_asan_catch_misaligned)
     {
       tree shadow = build_shadow_mem_access (&gsi, loc, base_addr,
 					     shadow_ptr_type);
@@ -2662,7 +2670,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
       /* Aligned (>= 8 bytes) can test just
 	 (real_size_in_bytes - 1 >= shadow), as base_addr & 7 is known
 	 to be 0.  */
-      if (align < 8)
+      if (align < 8 || flag_asan_catch_misaligned)
 	{
 	  gimple_seq_add_stmt (&seq, build_assign (BIT_AND_EXPR,
 						   base_addr, 7));
diff --git a/gcc/common.opt b/gcc/common.opt
index b49ac46..a7af95e 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -1161,6 +1161,12 @@ Common Driver Var(flag_report_bug)
 Collect and dump debug information into temporary file if ICE in C/C++
 compiler occured.
 
+fasan-catch-misaligned
+Common Driver Var(flag_asan_catch_misaligned)
+Catch invalid unaligned memory accesses.
+This option is needed to prevent potential ASan false positives due to 
+unaligned to type size memory accesses in some apllication like Linux kernel.
+
 fdump-passes
 Common Var(flag_dump_passes) Init(0)
 Dump optimization passes
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 1534ed9..c85aa38 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -6680,6 +6680,12 @@ text / bss / data / heap / stack / dso start locations.
 Collect and dump debug information into temporary file if ICE in C/C++
 compiler occured.
 
+@item -fasan-catch-misaligned
+@opindex fasan-catch-misaligned
+Catch invalid unaligned memory accesses.
+This option is needed to prevent potential ASan false positives due to 
+unaligned to type size memory accesses in some apllication like Linux kernel.
+
 @item -fdump-unnumbered
 @opindex fdump-unnumbered
 When doing debugging dumps, suppress instruction numbers and address output.
diff --git a/gcc/opts.c b/gcc/opts.c
index 39c190d..b238b90 100644
--- a/gcc/opts.c
+++ b/gcc/opts.c
@@ -925,6 +925,9 @@ finish_options (struct gcc_options *opts, struct gcc_options *opts_set,
       opts->x_flag_aggressive_loop_optimizations = 0;
       opts->x_flag_strict_overflow = 0;
     }
+  
+  if (flag_asan_catch_misaligned && !opts->x_flag_sanitize)
+    error_at (loc, "-fasan-catch-misaligned is valid only with -fsanitize option");
 }
 
 #define LEFT_COLUMN	27
@@ -1662,6 +1665,7 @@ common_handle_option (struct gcc_options *opts,
 	    maybe_set_param_value (PARAM_ASAN_USE_AFTER_RETURN, 0,
 				   opts->x_param_values,
 				   opts_set->x_param_values);
+            flag_asan_catch_misaligned = true;
 	  }
 
 	break;
diff --git a/gcc/testsuite/c-c++-common/asan/misalign-catch.c b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
new file mode 100644
index 0000000..158efd4
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
@@ -0,0 +1,21 @@
+/* { dg-do run } */
+/* { dg-options "-fasan-catch-misaligned" } */
+/* { dg-shouldfail "asan" } */
+
+long long *ptr;
+
+__attribute__((noinline))
+void foo () {
+   ptr = ((long long int *)(((char *)ptr) + 1));
+   *ptr = 1;
+}
+
+int main()
+{
+   long long int local[9];
+   ptr = (long long *)&local[8];
+   foo ();
+   return 0;
+}
+
+/* { dg-output "ERROR: AddressSanitizer: stack-buffer-overflow.*(\n|\r\n|\r)" } */

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PINGv3][PATCH] ASan on unaligned accesses
  2015-03-26  6:53     ` [PINGv3][PATCH] " Marat Zakirov
@ 2015-03-26 11:50       ` Jakub Jelinek
  2015-03-26 12:29         ` [PINGv4][PATCH] " Marat Zakirov
  0 siblings, 1 reply; 16+ messages in thread
From: Jakub Jelinek @ 2015-03-26 11:50 UTC (permalink / raw)
  To: Marat Zakirov
  Cc: gcc-patches, Andrew Pinski, Kostya Serebryany, Dmitry Vyukov,
	Yury Gribov, Andrey Ryabinin

On Thu, Mar 26, 2015 at 09:53:03AM +0300, Marat Zakirov wrote:
> gcc/ChangeLog:
> 
> 2015-03-12  Marat Zakirov  <m.zakirov@samsung.com>
> 
> 	* asan.c (asan_emit_stack_protection): Support for misalign accesses.
> 	(asan_expand_check_ifn): Likewise.
> 	* common.opt: New flag -fasan-catch-misaligned.
> 	* doc/invoke.texi: New flag description.
> 	* opts.c (finish_options): Add check for new flag.
> 	(common_handle_option): Switch on flag if SANITIZE_KERNEL_ADDRESS.

Well, as all the other asan options are done as params, handling one option
differently is just too inconsistent.
The reason we went with params was that it was expected most users wouldn't
really need to play with the knobs, and we didn't want to be stuck in
supporting those forever (params aren't supported from one version to
another, can be replaced, removed, added etc.).
So, I think this really should be a param at this point and perhaps for GCC
6 we can discuss if we want to change the params into normal options.

	Jakub

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PINGv4][PATCH] ASan on unaligned accesses
  2015-03-26 11:50       ` Jakub Jelinek
@ 2015-03-26 12:29         ` Marat Zakirov
  2015-03-30 17:43           ` Jakub Jelinek
  0 siblings, 1 reply; 16+ messages in thread
From: Marat Zakirov @ 2015-03-26 12:29 UTC (permalink / raw)
  To: gcc-patches
  Cc: Jakub Jelinek, Andrew Pinski, Kostya Serebryany, Dmitry Vyukov,
	Yury Gribov, Andrey Ryabinin

[-- Attachment #1: Type: text/plain, Size: 1086 bytes --]



On 03/26/2015 02:50 PM, Jakub Jelinek wrote:
> On Thu, Mar 26, 2015 at 09:53:03AM +0300, Marat Zakirov wrote:
>> gcc/ChangeLog:
>>
>> 2015-03-12  Marat Zakirov  <m.zakirov@samsung.com>
>>
>> 	* asan.c (asan_emit_stack_protection): Support for misalign accesses.
>> 	(asan_expand_check_ifn): Likewise.
>> 	* common.opt: New flag -fasan-catch-misaligned.
>> 	* doc/invoke.texi: New flag description.
>> 	* opts.c (finish_options): Add check for new flag.
>> 	(common_handle_option): Switch on flag if SANITIZE_KERNEL_ADDRESS.
> Well, as all the other asan options are done as params, handling one option
> differently is just too inconsistent.
> The reason we went with params was that it was expected most users wouldn't
> really need to play with the knobs, and we didn't want to be stuck in
> supporting those forever (params aren't supported from one version to
> another, can be replaced, removed, added etc.).
> So, I think this really should be a param at this point and perhaps for GCC
> 6 we can discuss if we want to change the params into normal options.
>
> 	Jakub
>
Fixed.

[-- Attachment #2: mavdt-95_18.diff --]
[-- Type: text/x-patch, Size: 8875 bytes --]

gcc/ChangeLog:

2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>

	* asan.c (asan_emit_stack_protection): Support for misalign accesses. 
	(asan_expand_check_ifn): Likewise. 
	* params.def: New option asan-catch-misaligned.
	* params.h: New param ASAN_CATCH_MISALIGNED.
	* doc/invoke.texi: New asan param description.

gcc/testsuite/ChangeLog:

2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>

	* c-c++-common/asan/misalign-catch.c: New test.


diff --git a/gcc/asan.c b/gcc/asan.c
index 9e4a629..0ac1a11 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -1050,7 +1050,6 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   rtx_code_label *lab;
   rtx_insn *insns;
   char buf[30];
-  unsigned char shadow_bytes[4];
   HOST_WIDE_INT base_offset = offsets[length - 1];
   HOST_WIDE_INT base_align_bias = 0, offset, prev_offset;
   HOST_WIDE_INT asan_frame_size = offsets[0] - base_offset;
@@ -1059,6 +1058,7 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   unsigned char cur_shadow_byte = ASAN_STACK_MAGIC_LEFT;
   tree str_cst, decl, id;
   int use_after_return_class = -1;
+  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS) || ASAN_CATCH_MISALIGNED;
 
   if (shadow_ptr_types[0] == NULL_TREE)
     asan_init_shadow_ptr_types ();
@@ -1193,11 +1193,37 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
   prev_offset = base_offset;
+
+  vec<rtx> shadow_mems;
+  vec<unsigned char> shadow_bytes;
+
+  shadow_mems.create(0);
+  shadow_bytes.create(0);
+
   for (l = length; l; l -= 2)
     {
       if (l == 2)
 	cur_shadow_byte = ASAN_STACK_MAGIC_RIGHT;
       offset = offsets[l - 1];
+      if (l != length && misalign)
+	{
+	  HOST_WIDE_INT aoff
+	    = base_offset + ((offset - base_offset)
+			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
+	      - ASAN_RED_ZONE_SIZE;
+	  if (aoff > prev_offset)
+	    {
+	      shadow_mem = adjust_address (shadow_mem, VOIDmode,
+					   (aoff - prev_offset)
+					   >> ASAN_SHADOW_SHIFT);
+	      prev_offset = aoff;
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_mems.safe_push (shadow_mem);
+	    }
+	}
       if ((offset - base_offset) & (ASAN_RED_ZONE_SIZE - 1))
 	{
 	  int i;
@@ -1212,13 +1238,13 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 	    if (aoff < offset)
 	      {
 		if (aoff < offset - (1 << ASAN_SHADOW_SHIFT) + 1)
-		  shadow_bytes[i] = 0;
+		  shadow_bytes.safe_push (0);
 		else
-		  shadow_bytes[i] = offset - aoff;
+		  shadow_bytes.safe_push (offset - aoff);
 	      }
 	    else
-	      shadow_bytes[i] = ASAN_STACK_MAGIC_PARTIAL;
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	      shadow_bytes.safe_push (ASAN_STACK_MAGIC_PARTIAL);
+	  shadow_mems.safe_push(shadow_mem);
 	  offset = aoff;
 	}
       while (offset <= offsets[l - 2] - ASAN_RED_ZONE_SIZE)
@@ -1227,12 +1253,21 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 				       (offset - prev_offset)
 				       >> ASAN_SHADOW_SHIFT);
 	  prev_offset = offset;
-	  memset (shadow_bytes, cur_shadow_byte, 4);
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_mems.safe_push(shadow_mem);
 	  offset += ASAN_RED_ZONE_SIZE;
 	}
       cur_shadow_byte = ASAN_STACK_MAGIC_MIDDLE;
     }
+  for (unsigned i = 0; misalign && i < shadow_bytes.length () - 1; i++)
+    if (shadow_bytes[i] == 0 && shadow_bytes[i + 1] > 0)
+      shadow_bytes[i] = 8 + (shadow_bytes[i + 1] > 7 ? 0 : shadow_bytes[i + 1]);
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
+    emit_move_insn (shadow_mems[i], asan_shadow_cst (&shadow_bytes[i * 4]));
+  
   do_pending_stack_adjust ();
 
   /* Construct epilogue sequence.  */
@@ -1285,34 +1320,8 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
 
-  prev_offset = base_offset;
-  last_offset = base_offset;
-  last_size = 0;
-  for (l = length; l; l -= 2)
-    {
-      offset = base_offset + ((offsets[l - 1] - base_offset)
-			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1));
-      if (last_offset + last_size != offset)
-	{
-	  shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				       (last_offset - prev_offset)
-				       >> ASAN_SHADOW_SHIFT);
-	  prev_offset = last_offset;
-	  asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
-	  last_offset = offset;
-	  last_size = 0;
-	}
-      last_size += base_offset + ((offsets[l - 2] - base_offset)
-				  & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
-		   - offset;
-    }
-  if (last_size)
-    {
-      shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				   (last_offset - prev_offset)
-				   >> ASAN_SHADOW_SHIFT);
-      asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
-    }
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
+    asan_clear_shadow (shadow_mems[i], 4);
 
   do_pending_stack_adjust ();
   if (lab)
@@ -2546,6 +2555,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   gimple g = gsi_stmt (*iter);
   location_t loc = gimple_location (g);
 
+  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS) || ASAN_CATCH_MISALIGNED;
   bool recover_p
     = (flag_sanitize & flag_sanitize_recover & SANITIZE_KERNEL_ADDRESS) != 0;
 
@@ -2643,7 +2653,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   tree base_addr = gimple_assign_lhs (g);
 
   tree t = NULL_TREE;
-  if (real_size_in_bytes >= 8)
+  if (real_size_in_bytes >= 8 && !misalign)
     {
       tree shadow = build_shadow_mem_access (&gsi, loc, base_addr,
 					     shadow_ptr_type);
@@ -2662,7 +2672,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
       /* Aligned (>= 8 bytes) can test just
 	 (real_size_in_bytes - 1 >= shadow), as base_addr & 7 is known
 	 to be 0.  */
-      if (align < 8)
+      if (align < 8 || misalign)
 	{
 	  gimple_seq_add_stmt (&seq, build_assign (BIT_AND_EXPR,
 						   base_addr, 7));
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 9749727..ea7f6ed 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -10956,6 +10956,12 @@ is greater or equal to this number, use callbacks instead of inline checks.
 E.g. to disable inline code use
 @option{--param asan-instrumentation-with-call-threshold=0}.
 
+@item asan-catch-misaligned
+Catch invalid unaligned memory accesses.
+This option is needed to prevent potential ASan false positives due to 
+unaligned to type size memory accesses in some apllication like Linux kernel
+@option{--param asan-catch-misaligned=0}.
+
 @item chkp-max-ctor-size
 Static constructors generated by Pointer Bounds Checker may become very
 large and significantly increase compile time at optimization level
diff --git a/gcc/params.def b/gcc/params.def
index f890cb0..a8d35b5 100644
--- a/gcc/params.def
+++ b/gcc/params.def
@@ -1139,6 +1139,11 @@ DEFPARAM (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD,
          "in function becomes greater or equal to this number",
          7000, 0, INT_MAX)
 
+DEFPARAM (PARAM_ASAN_CATCH_MISALIGNED,
+         "asan-catch-misaligned",
+         "catch unaligned access",
+         0, 1, 1)
+
 DEFPARAM (PARAM_UNINIT_CONTROL_DEP_ATTEMPTS,
 	  "uninit-control-dep-attempts",
 	  "Maximum number of nested calls to search for control dependencies "
diff --git a/gcc/params.h b/gcc/params.h
index 28d077f..c556ca6 100644
--- a/gcc/params.h
+++ b/gcc/params.h
@@ -240,5 +240,7 @@ extern void init_param_values (int *params);
   PARAM_VALUE (PARAM_ASAN_USE_AFTER_RETURN)
 #define ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD \
   PARAM_VALUE (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD)
+#define ASAN_CATCH_MISALIGNED \
+  PARAM_VALUE (PARAM_ASAN_CATCH_MISALIGNED)
 
 #endif /* ! GCC_PARAMS_H */
diff --git a/gcc/testsuite/c-c++-common/asan/misalign-catch.c b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
new file mode 100644
index 0000000..131cf65
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
@@ -0,0 +1,21 @@
+/* { dg-do run } */
+/* { dg-options "--param asan-catch-misaligned=1" } */
+/* { dg-shouldfail "asan" } */
+
+long long *ptr;
+
+__attribute__((noinline))
+void foo () {
+   ptr = ((long long int *)(((char *)ptr) + 1));
+   *ptr = 1;
+}
+
+int main()
+{
+   long long int local[9];
+   ptr = (long long *)&local[8];
+   foo ();
+   return 0;
+}
+
+/* { dg-output "ERROR: AddressSanitizer: stack-buffer-overflow.*(\n|\r\n|\r)" } */

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PINGv4][PATCH] ASan on unaligned accesses
  2015-03-26 12:29         ` [PINGv4][PATCH] " Marat Zakirov
@ 2015-03-30 17:43           ` Jakub Jelinek
  2015-04-07 10:16             ` [PINGv5][PATCH] " Marat Zakirov
  0 siblings, 1 reply; 16+ messages in thread
From: Jakub Jelinek @ 2015-03-30 17:43 UTC (permalink / raw)
  To: Marat Zakirov
  Cc: gcc-patches, Andrew Pinski, Kostya Serebryany, Dmitry Vyukov,
	Yury Gribov, Andrey Ryabinin

On Thu, Mar 26, 2015 at 03:28:53PM +0300, Marat Zakirov wrote:
> 2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>
> 
> 	* asan.c (asan_emit_stack_protection): Support for misalign accesses. 
> 	(asan_expand_check_ifn): Likewise. 
> 	* params.def: New option asan-catch-misaligned.
> 	* params.h: New param ASAN_CATCH_MISALIGNED.
> 	* doc/invoke.texi: New asan param description.

Can you please start by explaining the asan_emit_stack_protection changes?
What is the problem there, what do you want to achieve etc.?
Is it to support ABI violating stack pointer alignment, or something
different?  If so, the compiler knows (or should be aware) what the stack
alignment is.
The asan_expand_check_ifn change looks reasonable.

Also, the changes regress code quality without the parameter,
say on the testcase you've added at -O2 -fsanitize=address (no param used),
on x86_64 the patch causes undesirable
 	movl	$0, 2147450880(%rbp)
-	movq	$0, 2147450892(%rbp)
+	movl	$0, 2147450892(%rbp)
+	movl	$0, 2147450896(%rbp)
change.

> --- a/gcc/asan.c
> +++ b/gcc/asan.c
> @@ -1050,7 +1050,6 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
>    rtx_code_label *lab;
>    rtx_insn *insns;
>    char buf[30];
> -  unsigned char shadow_bytes[4];

Do you really need to do that and why?

>    HOST_WIDE_INT base_offset = offsets[length - 1];
>    HOST_WIDE_INT base_align_bias = 0, offset, prev_offset;
>    HOST_WIDE_INT asan_frame_size = offsets[0] - base_offset;
> @@ -1059,6 +1058,7 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
>    unsigned char cur_shadow_byte = ASAN_STACK_MAGIC_LEFT;
>    tree str_cst, decl, id;
>    int use_after_return_class = -1;
> +  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS) || ASAN_CATCH_MISALIGNED;

Too long line.

> +  vec<rtx> shadow_mems;
> +  vec<unsigned char> shadow_bytes;
> +
> +  shadow_mems.create(0);
> +  shadow_bytes.create(0);

2x missing space before (.

> +	  shadow_mems.safe_push(shadow_mem);

Similarly.

> +	  shadow_bytes.safe_push (cur_shadow_byte);
> +	  shadow_bytes.safe_push (cur_shadow_byte);
> +	  shadow_bytes.safe_push (cur_shadow_byte);
> +	  shadow_mems.safe_push(shadow_mem);

Similarly.

>      }
> +  for (unsigned i = 0; misalign && i < shadow_bytes.length () - 1; i++)
> +    if (shadow_bytes[i] == 0 && shadow_bytes[i + 1] > 0)
> +      shadow_bytes[i] = 8 + (shadow_bytes[i + 1] > 7 ? 0 : shadow_bytes[i + 1]);

Too long line.

> -  prev_offset = base_offset;
> -  last_offset = base_offset;
> -  last_size = 0;
> -  for (l = length; l; l -= 2)
> -    {
> -      offset = base_offset + ((offsets[l - 1] - base_offset)
> -			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1));
> -      if (last_offset + last_size != offset)
> -	{
> -	  shadow_mem = adjust_address (shadow_mem, VOIDmode,
> -				       (last_offset - prev_offset)
> -				       >> ASAN_SHADOW_SHIFT);
> -	  prev_offset = last_offset;
> -	  asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
> -	  last_offset = offset;
> -	  last_size = 0;
> -	}
> -      last_size += base_offset + ((offsets[l - 2] - base_offset)
> -				  & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
> -		   - offset;
> -    }
> -  if (last_size)
> -    {
> -      shadow_mem = adjust_address (shadow_mem, VOIDmode,
> -				   (last_offset - prev_offset)
> -				   >> ASAN_SHADOW_SHIFT);
> -      asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
> -    }
> +  for (unsigned i = 0; i < shadow_mems.length (); i++)
> +    asan_clear_shadow (shadow_mems[i], 4);

Bet this change causes the regression I've mentioned above.

> @@ -2546,6 +2555,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
>    gimple g = gsi_stmt (*iter);
>    location_t loc = gimple_location (g);
>  
> +  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS) || ASAN_CATCH_MISALIGNED;

Too long line.

	Jakub

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PINGv5][PATCH] ASan on unaligned accesses
  2015-03-30 17:43           ` Jakub Jelinek
@ 2015-04-07 10:16             ` Marat Zakirov
  2015-04-07 12:22               ` Jakub Jelinek
  0 siblings, 1 reply; 16+ messages in thread
From: Marat Zakirov @ 2015-04-07 10:16 UTC (permalink / raw)
  To: Jakub Jelinek
  Cc: gcc-patches, Andrew Pinski, Kostya Serebryany, Dmitry Vyukov,
	Yury Gribov, Andrey Ryabinin

[-- Attachment #1: Type: text/plain, Size: 1367 bytes --]

Hi Jakub!

On 03/30/2015 08:42 PM, Jakub Jelinek wrote:
> Can you please start by explaining the asan_emit_stack_protection 
> changes? What is the problem there, what do you want to achieve etc.? 
> Is it to support ABI violating stack pointer alignment, or something 
> different? If so, the compiler knows (or should be aware) what the 
> stack alignment is. The asan_expand_check_ifn change looks reasonable. 
This patch is needed to support ASan on codes (like Linux kernel) which 
do not care about compiler stack alignment - see example from the 
attached patch:

long long *ptr;

__attribute__((noinline))
void foo () {
    ptr = ((long long int *)(((char *)ptr) + 1));
    *ptr = 1;
}

int main ()
{
    long long int local[9];
    ptr = (long long *)&local[8];
    foo ();
    return 0;
}

In this example current ASan won't find invalid memory write. Attached 
patch (with new --param asan-catch-misaligned=1) resolves this problem.
> Also, the changes regress code quality without the parameter, say on 
> the testcase you've added at -O2 -fsanitize=address (no param used), 
> on x86_64 the patch causes undesirable movl $0, 2147450880(%rbp) - 
> movq $0, 2147450892(%rbp) + movl $0, 2147450892(%rbp) + movl $0, 
> 2147450896(%rbp) change. 
  Fixed in new patch.  But I believe joining movs (2 movl to one movq) 
is a x86 RTL job - not ASan.


--Marat



[-- Attachment #2: mavdt-95_19.diff --]
[-- Type: text/x-patch, Size: 8986 bytes --]

gcc/ChangeLog:

2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>

	* asan.c (asan_emit_stack_protection): Support for misalign accesses. 
	(asan_expand_check_ifn): Likewise. 
	* params.def: New option asan-catch-misaligned.
	* params.h: New param ASAN_CATCH_MISALIGNED.
	* doc/invoke.texi: New asan param description.

gcc/testsuite/ChangeLog:

2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>

	* c-c++-common/asan/misalign-catch.c: New test.


diff --git a/gcc/asan.c b/gcc/asan.c
index 9e4a629..7f60014 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -1050,7 +1050,6 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   rtx_code_label *lab;
   rtx_insn *insns;
   char buf[30];
-  unsigned char shadow_bytes[4];
   HOST_WIDE_INT base_offset = offsets[length - 1];
   HOST_WIDE_INT base_align_bias = 0, offset, prev_offset;
   HOST_WIDE_INT asan_frame_size = offsets[0] - base_offset;
@@ -1059,6 +1058,8 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   unsigned char cur_shadow_byte = ASAN_STACK_MAGIC_LEFT;
   tree str_cst, decl, id;
   int use_after_return_class = -1;
+  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS)
+		  || ASAN_CATCH_MISALIGNED;
 
   if (shadow_ptr_types[0] == NULL_TREE)
     asan_init_shadow_ptr_types ();
@@ -1193,11 +1194,37 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
   prev_offset = base_offset;
+
+  vec<rtx> shadow_mems;
+  vec<unsigned char> shadow_bytes;
+
+  shadow_mems.create (0);
+  shadow_bytes.create (0);
+
   for (l = length; l; l -= 2)
     {
       if (l == 2)
 	cur_shadow_byte = ASAN_STACK_MAGIC_RIGHT;
       offset = offsets[l - 1];
+      if (l != length && misalign)
+	{
+	  HOST_WIDE_INT aoff
+	    = base_offset + ((offset - base_offset)
+			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
+	      - ASAN_RED_ZONE_SIZE;
+	  if (aoff > prev_offset)
+	    {
+	      shadow_mem = adjust_address (shadow_mem, VOIDmode,
+					   (aoff - prev_offset)
+					   >> ASAN_SHADOW_SHIFT);
+	      prev_offset = aoff;
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_mems.safe_push (shadow_mem);
+	    }
+	}
       if ((offset - base_offset) & (ASAN_RED_ZONE_SIZE - 1))
 	{
 	  int i;
@@ -1212,13 +1239,13 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 	    if (aoff < offset)
 	      {
 		if (aoff < offset - (1 << ASAN_SHADOW_SHIFT) + 1)
-		  shadow_bytes[i] = 0;
+		  shadow_bytes.safe_push (0);
 		else
-		  shadow_bytes[i] = offset - aoff;
+		  shadow_bytes.safe_push (offset - aoff);
 	      }
 	    else
-	      shadow_bytes[i] = ASAN_STACK_MAGIC_PARTIAL;
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	      shadow_bytes.safe_push (ASAN_STACK_MAGIC_PARTIAL);
+	  shadow_mems.safe_push (shadow_mem);
 	  offset = aoff;
 	}
       while (offset <= offsets[l - 2] - ASAN_RED_ZONE_SIZE)
@@ -1227,12 +1254,21 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 				       (offset - prev_offset)
 				       >> ASAN_SHADOW_SHIFT);
 	  prev_offset = offset;
-	  memset (shadow_bytes, cur_shadow_byte, 4);
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_mems.safe_push (shadow_mem);
 	  offset += ASAN_RED_ZONE_SIZE;
 	}
       cur_shadow_byte = ASAN_STACK_MAGIC_MIDDLE;
     }
+  for (unsigned i = 0; misalign && i < shadow_bytes.length () - 1; i++)
+    if (shadow_bytes[i] == 0 && shadow_bytes[i + 1] > 0)
+      shadow_bytes[i] = 8 + (shadow_bytes[i + 1] > 7 ? 0 : shadow_bytes[i + 1]);
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
+    emit_move_insn (shadow_mems[i], asan_shadow_cst (&shadow_bytes[i * 4]));
+
   do_pending_stack_adjust ();
 
   /* Construct epilogue sequence.  */
@@ -1285,33 +1321,14 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
 
-  prev_offset = base_offset;
-  last_offset = base_offset;
-  last_size = 0;
-  for (l = length; l; l -= 2)
-    {
-      offset = base_offset + ((offsets[l - 1] - base_offset)
-			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1));
-      if (last_offset + last_size != offset)
-	{
-	  shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				       (last_offset - prev_offset)
-				       >> ASAN_SHADOW_SHIFT);
-	  prev_offset = last_offset;
-	  asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
-	  last_offset = offset;
-	  last_size = 0;
-	}
-      last_size += base_offset + ((offsets[l - 2] - base_offset)
-				  & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
-		   - offset;
-    }
-  if (last_size)
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
     {
-      shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				   (last_offset - prev_offset)
-				   >> ASAN_SHADOW_SHIFT);
-      asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
+      if (shadow_bytes[i*4+3] == ASAN_STACK_MAGIC_PARTIAL)
+      {
+	asan_clear_shadow (shadow_mems[i], 8);
+	i++;
+      } else
+	asan_clear_shadow (shadow_mems[i], 4);
     }
 
   do_pending_stack_adjust ();
@@ -2546,6 +2563,8 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   gimple g = gsi_stmt (*iter);
   location_t loc = gimple_location (g);
 
+  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS)
+		  || ASAN_CATCH_MISALIGNED;
   bool recover_p
     = (flag_sanitize & flag_sanitize_recover & SANITIZE_KERNEL_ADDRESS) != 0;
 
@@ -2643,7 +2662,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   tree base_addr = gimple_assign_lhs (g);
 
   tree t = NULL_TREE;
-  if (real_size_in_bytes >= 8)
+  if (real_size_in_bytes >= 8 && !misalign)
     {
       tree shadow = build_shadow_mem_access (&gsi, loc, base_addr,
 					     shadow_ptr_type);
@@ -2662,7 +2681,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
       /* Aligned (>= 8 bytes) can test just
 	 (real_size_in_bytes - 1 >= shadow), as base_addr & 7 is known
 	 to be 0.  */
-      if (align < 8)
+      if (align < 8 || misalign)
 	{
 	  gimple_seq_add_stmt (&seq, build_assign (BIT_AND_EXPR,
 						   base_addr, 7));
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index bf8afad..ee67e45 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -10965,6 +10965,12 @@ is greater or equal to this number, use callbacks instead of inline checks.
 E.g. to disable inline code use
 @option{--param asan-instrumentation-with-call-threshold=0}.
 
+@item asan-catch-misaligned
+Catch invalid unaligned memory accesses.
+This option is needed to prevent potential ASan false positives due to
+unaligned to type size memory accesses in some apllication like Linux kernel
+@option{--param asan-catch-misaligned=0}.
+
 @item chkp-max-ctor-size
 Static constructors generated by Pointer Bounds Checker may become very
 large and significantly increase compile time at optimization level
diff --git a/gcc/params.def b/gcc/params.def
index 5e2c769..d96db71 100644
--- a/gcc/params.def
+++ b/gcc/params.def
@@ -1151,6 +1151,11 @@ DEFPARAM (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD,
          "in function becomes greater or equal to this number",
          7000, 0, INT_MAX)
 
+DEFPARAM (PARAM_ASAN_CATCH_MISALIGNED,
+	  "asan-catch-misaligned",
+	  "catch unaligned access",
+	  0, 1, 1)
+
 DEFPARAM (PARAM_UNINIT_CONTROL_DEP_ATTEMPTS,
 	  "uninit-control-dep-attempts",
 	  "Maximum number of nested calls to search for control dependencies "
diff --git a/gcc/params.h b/gcc/params.h
index 28d077f..c556ca6 100644
--- a/gcc/params.h
+++ b/gcc/params.h
@@ -240,5 +240,7 @@ extern void init_param_values (int *params);
   PARAM_VALUE (PARAM_ASAN_USE_AFTER_RETURN)
 #define ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD \
   PARAM_VALUE (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD)
+#define ASAN_CATCH_MISALIGNED \
+  PARAM_VALUE (PARAM_ASAN_CATCH_MISALIGNED)
 
 #endif /* ! GCC_PARAMS_H */
diff --git a/gcc/testsuite/c-c++-common/asan/misalign-catch.c b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
new file mode 100644
index 0000000..ae5dfc4
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
@@ -0,0 +1,21 @@
+/* { dg-do run } */
+/* { dg-options "--param asan-catch-misaligned=1" } */
+/* { dg-shouldfail "asan" } */
+
+long long *ptr;
+
+__attribute__((noinline))
+void foo () {
+   ptr = ((long long int *)(((char *)ptr) + 1));
+   *ptr = 1;
+}
+
+int main ()
+{
+   long long int local[9];
+   ptr = (long long *)&local[8];
+   foo ();
+   return 0;
+}
+
+/* { dg-output "ERROR: AddressSanitizer: stack-buffer-overflow.*(\n|\r\n|\r)" } */

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PINGv5][PATCH] ASan on unaligned accesses
  2015-04-07 10:16             ` [PINGv5][PATCH] " Marat Zakirov
@ 2015-04-07 12:22               ` Jakub Jelinek
  2015-05-12 11:23                 ` [PINGv6][PATCH] " Marat Zakirov
  0 siblings, 1 reply; 16+ messages in thread
From: Jakub Jelinek @ 2015-04-07 12:22 UTC (permalink / raw)
  To: Marat Zakirov
  Cc: gcc-patches, Andrew Pinski, Kostya Serebryany, Dmitry Vyukov,
	Yury Gribov, Andrey Ryabinin

On Tue, Apr 07, 2015 at 01:16:10PM +0300, Marat Zakirov wrote:
> On 03/30/2015 08:42 PM, Jakub Jelinek wrote:
> >Can you please start by explaining the asan_emit_stack_protection changes?
> >What is the problem there, what do you want to achieve etc.? Is it to
> >support ABI violating stack pointer alignment, or something different? If
> >so, the compiler knows (or should be aware) what the stack alignment is.
> >The asan_expand_check_ifn change looks reasonable.
> This patch is needed to support ASan on codes (like Linux kernel) which do
> not care about compiler stack alignment - see example from the attached
> patch:

That is not sufficient description to me.

> long long *ptr;
> 
> __attribute__((noinline))
> void foo () {
>    ptr = ((long long int *)(((char *)ptr) + 1));
>    *ptr = 1;
> }
> 
> int main ()
> {
>    long long int local[9];
>    ptr = (long long *)&local[8];
>    foo ();
>    return 0;
> }

This testcase has, at least when compiled with say -O2 -fsanitize=address,
local array aligned, so I don't understand why would you need any special
changes in the prologue and/or epilogue of functions for that, the
asan_expand_check_ifn of course makes sense.  How are the automatic
misaligned variables different from say heap allocated ones, or global vars
etc.?
So can you explain the rationale for the prologue/epilogue changes and what
you are trying to do with that?  Is kernel using some non-standard option
like -mincoming-stack-boundary= etc.?
If so, perhaps you should make the changes dependent on that?

>  Fixed in new patch.  But I believe joining movs (2 movl to one movq) is a
> x86 RTL job - not ASan.

Well, a RTL solution I've tried at http://gcc.gnu.org/PR22141, but it gave
mixed results, so either it needs more cost tuning when it is desirable and
when it is not, or perhaps better do that still on GIMPLE instead, together
with trying to optimize bitfield accesses and other cases of adjacent
location accesses.  But if we handle that on GIMPLE, it won't really affect
what asan RTL emitting code produces.

	Jakub

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PINGv6][PATCH] ASan on unaligned accesses
  2015-04-07 12:22               ` Jakub Jelinek
@ 2015-05-12 11:23                 ` Marat Zakirov
  2015-05-12 11:45                   ` Yury Gribov
  2015-05-20  7:19                   ` [PINGv7][PATCH] " Marat Zakirov
  0 siblings, 2 replies; 16+ messages in thread
From: Marat Zakirov @ 2015-05-12 11:23 UTC (permalink / raw)
  To: Jakub Jelinek
  Cc: gcc-patches, Andrew Pinski, Kostya Serebryany, Dmitry Vyukov,
	Yury Gribov, Andrey Ryabinin

[-- Attachment #1: Type: text/plain, Size: 971 bytes --]

On 04/07/2015 03:22 PM, Jakub Jelinek wrote:
> How are the automatic misaligned variables different from say heap 
> allocated ones, or global vars etc.? 
No difference you are right Jakub. Shadow memory initialization for heap 
values and globals of course also should be changed but it is a task for 
libsanitizer not ASan for which I am sending patch. Fix for libsanitizer 
to support unaligned heaps and globals will be committed by a separate 
patch.
> Well, a RTL solution I've tried at http://gcc.gnu.org/PR22141, but it gave
> mixed results, so either it needs more cost tuning when it is desirable and
> when it is not, or perhaps better do that still on GIMPLE instead, together
> with trying to optimize bitfield accesses and other cases of adjacent
> location accesses.  But if we handle that on GIMPLE, it won't really affect
> what asan RTL emitting code produces.
>
> 	Jakub
>
I fixed the issue with 'movq' you were mentioned in a previous mail.

--Marat


[-- Attachment #2: mavdt-95_20.diff --]
[-- Type: text/x-patch, Size: 8979 bytes --]

gcc/ChangeLog:

2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>

	* asan.c (asan_emit_stack_protection): Support for misalign accesses. 
	(asan_expand_check_ifn): Likewise. 
	* params.def: New option asan-catch-misaligned.
	* params.h: New param ASAN_CATCH_MISALIGNED.
	* doc/invoke.texi: New asan param description.

gcc/testsuite/ChangeLog:

2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>

	* c-c++-common/asan/misalign-catch.c: New test.


diff --git a/gcc/asan.c b/gcc/asan.c
index 9e4a629..f9d052f 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -1050,7 +1050,6 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   rtx_code_label *lab;
   rtx_insn *insns;
   char buf[30];
-  unsigned char shadow_bytes[4];
   HOST_WIDE_INT base_offset = offsets[length - 1];
   HOST_WIDE_INT base_align_bias = 0, offset, prev_offset;
   HOST_WIDE_INT asan_frame_size = offsets[0] - base_offset;
@@ -1059,6 +1058,8 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   unsigned char cur_shadow_byte = ASAN_STACK_MAGIC_LEFT;
   tree str_cst, decl, id;
   int use_after_return_class = -1;
+  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS)
+		  || ASAN_CATCH_MISALIGNED;
 
   if (shadow_ptr_types[0] == NULL_TREE)
     asan_init_shadow_ptr_types ();
@@ -1193,11 +1194,37 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
   prev_offset = base_offset;
+
+  vec<rtx> shadow_mems;
+  vec<unsigned char> shadow_bytes;
+
+  shadow_mems.create (0);
+  shadow_bytes.create (0);
+
   for (l = length; l; l -= 2)
     {
       if (l == 2)
 	cur_shadow_byte = ASAN_STACK_MAGIC_RIGHT;
       offset = offsets[l - 1];
+      if (l != length && misalign)
+	{
+	  HOST_WIDE_INT aoff
+	    = base_offset + ((offset - base_offset)
+			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
+	      - ASAN_RED_ZONE_SIZE;
+	  if (aoff > prev_offset)
+	    {
+	      shadow_mem = adjust_address (shadow_mem, VOIDmode,
+					   (aoff - prev_offset)
+					   >> ASAN_SHADOW_SHIFT);
+	      prev_offset = aoff;
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_mems.safe_push (shadow_mem);
+	    }
+	}
       if ((offset - base_offset) & (ASAN_RED_ZONE_SIZE - 1))
 	{
 	  int i;
@@ -1212,13 +1239,13 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 	    if (aoff < offset)
 	      {
 		if (aoff < offset - (1 << ASAN_SHADOW_SHIFT) + 1)
-		  shadow_bytes[i] = 0;
+		  shadow_bytes.safe_push (0);
 		else
-		  shadow_bytes[i] = offset - aoff;
+		  shadow_bytes.safe_push (offset - aoff);
 	      }
 	    else
-	      shadow_bytes[i] = ASAN_STACK_MAGIC_PARTIAL;
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	      shadow_bytes.safe_push (ASAN_STACK_MAGIC_PARTIAL);
+	  shadow_mems.safe_push (shadow_mem);
 	  offset = aoff;
 	}
       while (offset <= offsets[l - 2] - ASAN_RED_ZONE_SIZE)
@@ -1227,12 +1254,21 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 				       (offset - prev_offset)
 				       >> ASAN_SHADOW_SHIFT);
 	  prev_offset = offset;
-	  memset (shadow_bytes, cur_shadow_byte, 4);
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_mems.safe_push (shadow_mem);
 	  offset += ASAN_RED_ZONE_SIZE;
 	}
       cur_shadow_byte = ASAN_STACK_MAGIC_MIDDLE;
     }
+  for (unsigned i = 0; misalign && i < shadow_bytes.length () - 1; i++)
+    if (shadow_bytes[i] == 0 && shadow_bytes[i + 1] > 0)
+      shadow_bytes[i] = 8 + (shadow_bytes[i + 1] > 7 ? 0 : shadow_bytes[i + 1]);
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
+    emit_move_insn (shadow_mems[i], asan_shadow_cst (&shadow_bytes[i * 4]));
+
   do_pending_stack_adjust ();
 
   /* Construct epilogue sequence.  */
@@ -1285,33 +1321,15 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
 
-  prev_offset = base_offset;
-  last_offset = base_offset;
-  last_size = 0;
-  for (l = length; l; l -= 2)
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
     {
-      offset = base_offset + ((offsets[l - 1] - base_offset)
-			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1));
-      if (last_offset + last_size != offset)
+      if (shadow_bytes[i*4+3] == ASAN_STACK_MAGIC_PARTIAL)
 	{
-	  shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				       (last_offset - prev_offset)
-				       >> ASAN_SHADOW_SHIFT);
-	  prev_offset = last_offset;
-	  asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
-	  last_offset = offset;
-	  last_size = 0;
+	  asan_clear_shadow (shadow_mems[i], 8);
+	  i++;
 	}
-      last_size += base_offset + ((offsets[l - 2] - base_offset)
-				  & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
-		   - offset;
-    }
-  if (last_size)
-    {
-      shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				   (last_offset - prev_offset)
-				   >> ASAN_SHADOW_SHIFT);
-      asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
+      else
+	asan_clear_shadow (shadow_mems[i], 4);
     }
 
   do_pending_stack_adjust ();
@@ -2546,6 +2564,8 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   gimple g = gsi_stmt (*iter);
   location_t loc = gimple_location (g);
 
+  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS)
+		  || ASAN_CATCH_MISALIGNED;
   bool recover_p
     = (flag_sanitize & flag_sanitize_recover & SANITIZE_KERNEL_ADDRESS) != 0;
 
@@ -2643,7 +2663,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   tree base_addr = gimple_assign_lhs (g);
 
   tree t = NULL_TREE;
-  if (real_size_in_bytes >= 8)
+  if (real_size_in_bytes >= 8 && !misalign)
     {
       tree shadow = build_shadow_mem_access (&gsi, loc, base_addr,
 					     shadow_ptr_type);
@@ -2662,7 +2682,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
       /* Aligned (>= 8 bytes) can test just
 	 (real_size_in_bytes - 1 >= shadow), as base_addr & 7 is known
 	 to be 0.  */
-      if (align < 8)
+      if (align < 8 || misalign)
 	{
 	  gimple_seq_add_stmt (&seq, build_assign (BIT_AND_EXPR,
 						   base_addr, 7));
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index bf8afad..ee67e45 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -10965,6 +10965,12 @@ is greater or equal to this number, use callbacks instead of inline checks.
 E.g. to disable inline code use
 @option{--param asan-instrumentation-with-call-threshold=0}.
 
+@item asan-catch-misaligned
+Catch invalid unaligned memory accesses.
+This option is needed to prevent potential ASan false positives due to
+unaligned to type size memory accesses in some apllication like Linux kernel
+@option{--param asan-catch-misaligned=0}.
+
 @item chkp-max-ctor-size
 Static constructors generated by Pointer Bounds Checker may become very
 large and significantly increase compile time at optimization level
diff --git a/gcc/params.def b/gcc/params.def
index 5e2c769..d96db71 100644
--- a/gcc/params.def
+++ b/gcc/params.def
@@ -1151,6 +1151,11 @@ DEFPARAM (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD,
          "in function becomes greater or equal to this number",
          7000, 0, INT_MAX)
 
+DEFPARAM (PARAM_ASAN_CATCH_MISALIGNED,
+	  "asan-catch-misaligned",
+	  "catch unaligned access",
+	  0, 1, 1)
+
 DEFPARAM (PARAM_UNINIT_CONTROL_DEP_ATTEMPTS,
 	  "uninit-control-dep-attempts",
 	  "Maximum number of nested calls to search for control dependencies "
diff --git a/gcc/params.h b/gcc/params.h
index 28d077f..c556ca6 100644
--- a/gcc/params.h
+++ b/gcc/params.h
@@ -240,5 +240,7 @@ extern void init_param_values (int *params);
   PARAM_VALUE (PARAM_ASAN_USE_AFTER_RETURN)
 #define ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD \
   PARAM_VALUE (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD)
+#define ASAN_CATCH_MISALIGNED \
+  PARAM_VALUE (PARAM_ASAN_CATCH_MISALIGNED)
 
 #endif /* ! GCC_PARAMS_H */
diff --git a/gcc/testsuite/c-c++-common/asan/misalign-catch.c b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
new file mode 100644
index 0000000..ae5dfc4
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
@@ -0,0 +1,21 @@
+/* { dg-do run } */
+/* { dg-options "--param asan-catch-misaligned=1" } */
+/* { dg-shouldfail "asan" } */
+
+long long *ptr;
+
+__attribute__((noinline))
+void foo () {
+   ptr = ((long long int *)(((char *)ptr) + 1));
+   *ptr = 1;
+}
+
+int main ()
+{
+   long long int local[9];
+   ptr = (long long *)&local[8];
+   foo ();
+   return 0;
+}
+
+/* { dg-output "ERROR: AddressSanitizer: stack-buffer-overflow.*(\n|\r\n|\r)" } */

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PINGv6][PATCH] ASan on unaligned accesses
  2015-05-12 11:23                 ` [PINGv6][PATCH] " Marat Zakirov
@ 2015-05-12 11:45                   ` Yury Gribov
  2015-05-20  7:19                   ` [PINGv7][PATCH] " Marat Zakirov
  1 sibling, 0 replies; 16+ messages in thread
From: Yury Gribov @ 2015-05-12 11:45 UTC (permalink / raw)
  To: Marat Zakirov, Jakub Jelinek
  Cc: gcc-patches, Andrew Pinski, Kostya Serebryany, Dmitry Vyukov,
	Andrey Ryabinin

On 05/12/2015 02:16 PM, Marat Zakirov wrote:
> On 04/07/2015 03:22 PM, Jakub Jelinek wrote:
>> How are the automatic misaligned variables different from say heap
>> allocated ones, or global vars etc.?
> No difference you are right Jakub. Shadow memory initialization for heap
> values and globals of course also should be changed but it is a task for
> libsanitizer not ASan for which I am sending patch. Fix for libsanitizer
> to support unaligned heaps and globals will be committed by a separate
> patch.

AFAIK folks only wanted this feature in kernel ASan for now. Runtime 
support for heap and globals will be done inside kernel.

-Y

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PINGv7][PATCH] ASan on unaligned accesses
  2015-05-12 11:23                 ` [PINGv6][PATCH] " Marat Zakirov
  2015-05-12 11:45                   ` Yury Gribov
@ 2015-05-20  7:19                   ` Marat Zakirov
  2015-05-26 14:12                     ` [PINGv8][PATCH] " Marat Zakirov
  1 sibling, 1 reply; 16+ messages in thread
From: Marat Zakirov @ 2015-05-20  7:19 UTC (permalink / raw)
  To: Jakub Jelinek
  Cc: gcc-patches, Andrew Pinski, Kostya Serebryany, Dmitry Vyukov,
	Yury Gribov, Andrey Ryabinin

[-- Attachment #1: Type: text/plain, Size: 1067 bytes --]



On 05/12/2015 02:16 PM, Marat Zakirov wrote:
> On 04/07/2015 03:22 PM, Jakub Jelinek wrote:
>> How are the automatic misaligned variables different from say heap 
>> allocated ones, or global vars etc.? 
> No difference you are right Jakub. Shadow memory initialization for 
> heap values and globals of course also should be changed but it is a 
> task for libsanitizer not ASan for which I am sending patch. Fix for 
> libsanitizer to support unaligned heaps and globals will be committed 
> by a separate patch.
>> Well, a RTL solution I've tried at http://gcc.gnu.org/PR22141, but it 
>> gave
>> mixed results, so either it needs more cost tuning when it is 
>> desirable and
>> when it is not, or perhaps better do that still on GIMPLE instead, 
>> together
>> with trying to optimize bitfield accesses and other cases of adjacent
>> location accesses.  But if we handle that on GIMPLE, it won't really 
>> affect
>> what asan RTL emitting code produces.
>>
>>     Jakub
>>
> I fixed the issue with 'movq' you were mentioned in a previous mail.
>
> --Marat
>


[-- Attachment #2: mavdt-95_20.diff --]
[-- Type: text/x-patch, Size: 8979 bytes --]

gcc/ChangeLog:

2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>

	* asan.c (asan_emit_stack_protection): Support for misalign accesses. 
	(asan_expand_check_ifn): Likewise. 
	* params.def: New option asan-catch-misaligned.
	* params.h: New param ASAN_CATCH_MISALIGNED.
	* doc/invoke.texi: New asan param description.

gcc/testsuite/ChangeLog:

2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>

	* c-c++-common/asan/misalign-catch.c: New test.


diff --git a/gcc/asan.c b/gcc/asan.c
index 9e4a629..f9d052f 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -1050,7 +1050,6 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   rtx_code_label *lab;
   rtx_insn *insns;
   char buf[30];
-  unsigned char shadow_bytes[4];
   HOST_WIDE_INT base_offset = offsets[length - 1];
   HOST_WIDE_INT base_align_bias = 0, offset, prev_offset;
   HOST_WIDE_INT asan_frame_size = offsets[0] - base_offset;
@@ -1059,6 +1058,8 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   unsigned char cur_shadow_byte = ASAN_STACK_MAGIC_LEFT;
   tree str_cst, decl, id;
   int use_after_return_class = -1;
+  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS)
+		  || ASAN_CATCH_MISALIGNED;
 
   if (shadow_ptr_types[0] == NULL_TREE)
     asan_init_shadow_ptr_types ();
@@ -1193,11 +1194,37 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
   prev_offset = base_offset;
+
+  vec<rtx> shadow_mems;
+  vec<unsigned char> shadow_bytes;
+
+  shadow_mems.create (0);
+  shadow_bytes.create (0);
+
   for (l = length; l; l -= 2)
     {
       if (l == 2)
 	cur_shadow_byte = ASAN_STACK_MAGIC_RIGHT;
       offset = offsets[l - 1];
+      if (l != length && misalign)
+	{
+	  HOST_WIDE_INT aoff
+	    = base_offset + ((offset - base_offset)
+			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
+	      - ASAN_RED_ZONE_SIZE;
+	  if (aoff > prev_offset)
+	    {
+	      shadow_mem = adjust_address (shadow_mem, VOIDmode,
+					   (aoff - prev_offset)
+					   >> ASAN_SHADOW_SHIFT);
+	      prev_offset = aoff;
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_mems.safe_push (shadow_mem);
+	    }
+	}
       if ((offset - base_offset) & (ASAN_RED_ZONE_SIZE - 1))
 	{
 	  int i;
@@ -1212,13 +1239,13 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 	    if (aoff < offset)
 	      {
 		if (aoff < offset - (1 << ASAN_SHADOW_SHIFT) + 1)
-		  shadow_bytes[i] = 0;
+		  shadow_bytes.safe_push (0);
 		else
-		  shadow_bytes[i] = offset - aoff;
+		  shadow_bytes.safe_push (offset - aoff);
 	      }
 	    else
-	      shadow_bytes[i] = ASAN_STACK_MAGIC_PARTIAL;
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	      shadow_bytes.safe_push (ASAN_STACK_MAGIC_PARTIAL);
+	  shadow_mems.safe_push (shadow_mem);
 	  offset = aoff;
 	}
       while (offset <= offsets[l - 2] - ASAN_RED_ZONE_SIZE)
@@ -1227,12 +1254,21 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 				       (offset - prev_offset)
 				       >> ASAN_SHADOW_SHIFT);
 	  prev_offset = offset;
-	  memset (shadow_bytes, cur_shadow_byte, 4);
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_mems.safe_push (shadow_mem);
 	  offset += ASAN_RED_ZONE_SIZE;
 	}
       cur_shadow_byte = ASAN_STACK_MAGIC_MIDDLE;
     }
+  for (unsigned i = 0; misalign && i < shadow_bytes.length () - 1; i++)
+    if (shadow_bytes[i] == 0 && shadow_bytes[i + 1] > 0)
+      shadow_bytes[i] = 8 + (shadow_bytes[i + 1] > 7 ? 0 : shadow_bytes[i + 1]);
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
+    emit_move_insn (shadow_mems[i], asan_shadow_cst (&shadow_bytes[i * 4]));
+
   do_pending_stack_adjust ();
 
   /* Construct epilogue sequence.  */
@@ -1285,33 +1321,15 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
 
-  prev_offset = base_offset;
-  last_offset = base_offset;
-  last_size = 0;
-  for (l = length; l; l -= 2)
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
     {
-      offset = base_offset + ((offsets[l - 1] - base_offset)
-			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1));
-      if (last_offset + last_size != offset)
+      if (shadow_bytes[i*4+3] == ASAN_STACK_MAGIC_PARTIAL)
 	{
-	  shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				       (last_offset - prev_offset)
-				       >> ASAN_SHADOW_SHIFT);
-	  prev_offset = last_offset;
-	  asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
-	  last_offset = offset;
-	  last_size = 0;
+	  asan_clear_shadow (shadow_mems[i], 8);
+	  i++;
 	}
-      last_size += base_offset + ((offsets[l - 2] - base_offset)
-				  & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
-		   - offset;
-    }
-  if (last_size)
-    {
-      shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				   (last_offset - prev_offset)
-				   >> ASAN_SHADOW_SHIFT);
-      asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
+      else
+	asan_clear_shadow (shadow_mems[i], 4);
     }
 
   do_pending_stack_adjust ();
@@ -2546,6 +2564,8 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   gimple g = gsi_stmt (*iter);
   location_t loc = gimple_location (g);
 
+  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS)
+		  || ASAN_CATCH_MISALIGNED;
   bool recover_p
     = (flag_sanitize & flag_sanitize_recover & SANITIZE_KERNEL_ADDRESS) != 0;
 
@@ -2643,7 +2663,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   tree base_addr = gimple_assign_lhs (g);
 
   tree t = NULL_TREE;
-  if (real_size_in_bytes >= 8)
+  if (real_size_in_bytes >= 8 && !misalign)
     {
       tree shadow = build_shadow_mem_access (&gsi, loc, base_addr,
 					     shadow_ptr_type);
@@ -2662,7 +2682,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
       /* Aligned (>= 8 bytes) can test just
 	 (real_size_in_bytes - 1 >= shadow), as base_addr & 7 is known
 	 to be 0.  */
-      if (align < 8)
+      if (align < 8 || misalign)
 	{
 	  gimple_seq_add_stmt (&seq, build_assign (BIT_AND_EXPR,
 						   base_addr, 7));
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index bf8afad..ee67e45 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -10965,6 +10965,12 @@ is greater or equal to this number, use callbacks instead of inline checks.
 E.g. to disable inline code use
 @option{--param asan-instrumentation-with-call-threshold=0}.
 
+@item asan-catch-misaligned
+Catch invalid unaligned memory accesses.
+This option is needed to prevent potential ASan false positives due to
+unaligned to type size memory accesses in some apllication like Linux kernel
+@option{--param asan-catch-misaligned=0}.
+
 @item chkp-max-ctor-size
 Static constructors generated by Pointer Bounds Checker may become very
 large and significantly increase compile time at optimization level
diff --git a/gcc/params.def b/gcc/params.def
index 5e2c769..d96db71 100644
--- a/gcc/params.def
+++ b/gcc/params.def
@@ -1151,6 +1151,11 @@ DEFPARAM (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD,
          "in function becomes greater or equal to this number",
          7000, 0, INT_MAX)
 
+DEFPARAM (PARAM_ASAN_CATCH_MISALIGNED,
+	  "asan-catch-misaligned",
+	  "catch unaligned access",
+	  0, 1, 1)
+
 DEFPARAM (PARAM_UNINIT_CONTROL_DEP_ATTEMPTS,
 	  "uninit-control-dep-attempts",
 	  "Maximum number of nested calls to search for control dependencies "
diff --git a/gcc/params.h b/gcc/params.h
index 28d077f..c556ca6 100644
--- a/gcc/params.h
+++ b/gcc/params.h
@@ -240,5 +240,7 @@ extern void init_param_values (int *params);
   PARAM_VALUE (PARAM_ASAN_USE_AFTER_RETURN)
 #define ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD \
   PARAM_VALUE (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD)
+#define ASAN_CATCH_MISALIGNED \
+  PARAM_VALUE (PARAM_ASAN_CATCH_MISALIGNED)
 
 #endif /* ! GCC_PARAMS_H */
diff --git a/gcc/testsuite/c-c++-common/asan/misalign-catch.c b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
new file mode 100644
index 0000000..ae5dfc4
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
@@ -0,0 +1,21 @@
+/* { dg-do run } */
+/* { dg-options "--param asan-catch-misaligned=1" } */
+/* { dg-shouldfail "asan" } */
+
+long long *ptr;
+
+__attribute__((noinline))
+void foo () {
+   ptr = ((long long int *)(((char *)ptr) + 1));
+   *ptr = 1;
+}
+
+int main ()
+{
+   long long int local[9];
+   ptr = (long long *)&local[8];
+   foo ();
+   return 0;
+}
+
+/* { dg-output "ERROR: AddressSanitizer: stack-buffer-overflow.*(\n|\r\n|\r)" } */

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PINGv8][PATCH] ASan on unaligned accesses
  2015-05-20  7:19                   ` [PINGv7][PATCH] " Marat Zakirov
@ 2015-05-26 14:12                     ` Marat Zakirov
  2015-06-02 13:15                       ` [PINGv9][PATCH] " Marat Zakirov
  0 siblings, 1 reply; 16+ messages in thread
From: Marat Zakirov @ 2015-05-26 14:12 UTC (permalink / raw)
  To: Jakub Jelinek
  Cc: gcc-patches, Andrew Pinski, Kostya Serebryany, Dmitry Vyukov,
	Yury Gribov, Andrey Ryabinin

[-- Attachment #1: Type: text/plain, Size: 1145 bytes --]



On 05/20/2015 10:01 AM, Marat Zakirov wrote:
>
>
> On 05/12/2015 02:16 PM, Marat Zakirov wrote:
>> On 04/07/2015 03:22 PM, Jakub Jelinek wrote:
>>> How are the automatic misaligned variables different from say heap 
>>> allocated ones, or global vars etc.? 
>> No difference you are right Jakub. Shadow memory initialization for 
>> heap values and globals of course also should be changed but it is a 
>> task for libsanitizer not ASan for which I am sending patch. Fix for 
>> libsanitizer to support unaligned heaps and globals will be committed 
>> by a separate patch.
>>> Well, a RTL solution I've tried at http://gcc.gnu.org/PR22141, but 
>>> it gave
>>> mixed results, so either it needs more cost tuning when it is 
>>> desirable and
>>> when it is not, or perhaps better do that still on GIMPLE instead, 
>>> together
>>> with trying to optimize bitfield accesses and other cases of adjacent
>>> location accesses.  But if we handle that on GIMPLE, it won't really 
>>> affect
>>> what asan RTL emitting code produces.
>>>
>>>     Jakub
>>>
>> I fixed the issue with 'movq' you were mentioned in a previous mail.
>>
>> --Marat
>>
>


[-- Attachment #2: mavdt-95_20.diff --]
[-- Type: text/x-patch, Size: 8979 bytes --]

gcc/ChangeLog:

2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>

	* asan.c (asan_emit_stack_protection): Support for misalign accesses. 
	(asan_expand_check_ifn): Likewise. 
	* params.def: New option asan-catch-misaligned.
	* params.h: New param ASAN_CATCH_MISALIGNED.
	* doc/invoke.texi: New asan param description.

gcc/testsuite/ChangeLog:

2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>

	* c-c++-common/asan/misalign-catch.c: New test.


diff --git a/gcc/asan.c b/gcc/asan.c
index 9e4a629..f9d052f 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -1050,7 +1050,6 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   rtx_code_label *lab;
   rtx_insn *insns;
   char buf[30];
-  unsigned char shadow_bytes[4];
   HOST_WIDE_INT base_offset = offsets[length - 1];
   HOST_WIDE_INT base_align_bias = 0, offset, prev_offset;
   HOST_WIDE_INT asan_frame_size = offsets[0] - base_offset;
@@ -1059,6 +1058,8 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   unsigned char cur_shadow_byte = ASAN_STACK_MAGIC_LEFT;
   tree str_cst, decl, id;
   int use_after_return_class = -1;
+  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS)
+		  || ASAN_CATCH_MISALIGNED;
 
   if (shadow_ptr_types[0] == NULL_TREE)
     asan_init_shadow_ptr_types ();
@@ -1193,11 +1194,37 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
   prev_offset = base_offset;
+
+  vec<rtx> shadow_mems;
+  vec<unsigned char> shadow_bytes;
+
+  shadow_mems.create (0);
+  shadow_bytes.create (0);
+
   for (l = length; l; l -= 2)
     {
       if (l == 2)
 	cur_shadow_byte = ASAN_STACK_MAGIC_RIGHT;
       offset = offsets[l - 1];
+      if (l != length && misalign)
+	{
+	  HOST_WIDE_INT aoff
+	    = base_offset + ((offset - base_offset)
+			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
+	      - ASAN_RED_ZONE_SIZE;
+	  if (aoff > prev_offset)
+	    {
+	      shadow_mem = adjust_address (shadow_mem, VOIDmode,
+					   (aoff - prev_offset)
+					   >> ASAN_SHADOW_SHIFT);
+	      prev_offset = aoff;
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_mems.safe_push (shadow_mem);
+	    }
+	}
       if ((offset - base_offset) & (ASAN_RED_ZONE_SIZE - 1))
 	{
 	  int i;
@@ -1212,13 +1239,13 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 	    if (aoff < offset)
 	      {
 		if (aoff < offset - (1 << ASAN_SHADOW_SHIFT) + 1)
-		  shadow_bytes[i] = 0;
+		  shadow_bytes.safe_push (0);
 		else
-		  shadow_bytes[i] = offset - aoff;
+		  shadow_bytes.safe_push (offset - aoff);
 	      }
 	    else
-	      shadow_bytes[i] = ASAN_STACK_MAGIC_PARTIAL;
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	      shadow_bytes.safe_push (ASAN_STACK_MAGIC_PARTIAL);
+	  shadow_mems.safe_push (shadow_mem);
 	  offset = aoff;
 	}
       while (offset <= offsets[l - 2] - ASAN_RED_ZONE_SIZE)
@@ -1227,12 +1254,21 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 				       (offset - prev_offset)
 				       >> ASAN_SHADOW_SHIFT);
 	  prev_offset = offset;
-	  memset (shadow_bytes, cur_shadow_byte, 4);
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_mems.safe_push (shadow_mem);
 	  offset += ASAN_RED_ZONE_SIZE;
 	}
       cur_shadow_byte = ASAN_STACK_MAGIC_MIDDLE;
     }
+  for (unsigned i = 0; misalign && i < shadow_bytes.length () - 1; i++)
+    if (shadow_bytes[i] == 0 && shadow_bytes[i + 1] > 0)
+      shadow_bytes[i] = 8 + (shadow_bytes[i + 1] > 7 ? 0 : shadow_bytes[i + 1]);
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
+    emit_move_insn (shadow_mems[i], asan_shadow_cst (&shadow_bytes[i * 4]));
+
   do_pending_stack_adjust ();
 
   /* Construct epilogue sequence.  */
@@ -1285,33 +1321,15 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
 
-  prev_offset = base_offset;
-  last_offset = base_offset;
-  last_size = 0;
-  for (l = length; l; l -= 2)
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
     {
-      offset = base_offset + ((offsets[l - 1] - base_offset)
-			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1));
-      if (last_offset + last_size != offset)
+      if (shadow_bytes[i*4+3] == ASAN_STACK_MAGIC_PARTIAL)
 	{
-	  shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				       (last_offset - prev_offset)
-				       >> ASAN_SHADOW_SHIFT);
-	  prev_offset = last_offset;
-	  asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
-	  last_offset = offset;
-	  last_size = 0;
+	  asan_clear_shadow (shadow_mems[i], 8);
+	  i++;
 	}
-      last_size += base_offset + ((offsets[l - 2] - base_offset)
-				  & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
-		   - offset;
-    }
-  if (last_size)
-    {
-      shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				   (last_offset - prev_offset)
-				   >> ASAN_SHADOW_SHIFT);
-      asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
+      else
+	asan_clear_shadow (shadow_mems[i], 4);
     }
 
   do_pending_stack_adjust ();
@@ -2546,6 +2564,8 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   gimple g = gsi_stmt (*iter);
   location_t loc = gimple_location (g);
 
+  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS)
+		  || ASAN_CATCH_MISALIGNED;
   bool recover_p
     = (flag_sanitize & flag_sanitize_recover & SANITIZE_KERNEL_ADDRESS) != 0;
 
@@ -2643,7 +2663,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   tree base_addr = gimple_assign_lhs (g);
 
   tree t = NULL_TREE;
-  if (real_size_in_bytes >= 8)
+  if (real_size_in_bytes >= 8 && !misalign)
     {
       tree shadow = build_shadow_mem_access (&gsi, loc, base_addr,
 					     shadow_ptr_type);
@@ -2662,7 +2682,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
       /* Aligned (>= 8 bytes) can test just
 	 (real_size_in_bytes - 1 >= shadow), as base_addr & 7 is known
 	 to be 0.  */
-      if (align < 8)
+      if (align < 8 || misalign)
 	{
 	  gimple_seq_add_stmt (&seq, build_assign (BIT_AND_EXPR,
 						   base_addr, 7));
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index bf8afad..ee67e45 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -10965,6 +10965,12 @@ is greater or equal to this number, use callbacks instead of inline checks.
 E.g. to disable inline code use
 @option{--param asan-instrumentation-with-call-threshold=0}.
 
+@item asan-catch-misaligned
+Catch invalid unaligned memory accesses.
+This option is needed to prevent potential ASan false positives due to
+unaligned to type size memory accesses in some apllication like Linux kernel
+@option{--param asan-catch-misaligned=0}.
+
 @item chkp-max-ctor-size
 Static constructors generated by Pointer Bounds Checker may become very
 large and significantly increase compile time at optimization level
diff --git a/gcc/params.def b/gcc/params.def
index 5e2c769..d96db71 100644
--- a/gcc/params.def
+++ b/gcc/params.def
@@ -1151,6 +1151,11 @@ DEFPARAM (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD,
          "in function becomes greater or equal to this number",
          7000, 0, INT_MAX)
 
+DEFPARAM (PARAM_ASAN_CATCH_MISALIGNED,
+	  "asan-catch-misaligned",
+	  "catch unaligned access",
+	  0, 1, 1)
+
 DEFPARAM (PARAM_UNINIT_CONTROL_DEP_ATTEMPTS,
 	  "uninit-control-dep-attempts",
 	  "Maximum number of nested calls to search for control dependencies "
diff --git a/gcc/params.h b/gcc/params.h
index 28d077f..c556ca6 100644
--- a/gcc/params.h
+++ b/gcc/params.h
@@ -240,5 +240,7 @@ extern void init_param_values (int *params);
   PARAM_VALUE (PARAM_ASAN_USE_AFTER_RETURN)
 #define ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD \
   PARAM_VALUE (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD)
+#define ASAN_CATCH_MISALIGNED \
+  PARAM_VALUE (PARAM_ASAN_CATCH_MISALIGNED)
 
 #endif /* ! GCC_PARAMS_H */
diff --git a/gcc/testsuite/c-c++-common/asan/misalign-catch.c b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
new file mode 100644
index 0000000..ae5dfc4
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
@@ -0,0 +1,21 @@
+/* { dg-do run } */
+/* { dg-options "--param asan-catch-misaligned=1" } */
+/* { dg-shouldfail "asan" } */
+
+long long *ptr;
+
+__attribute__((noinline))
+void foo () {
+   ptr = ((long long int *)(((char *)ptr) + 1));
+   *ptr = 1;
+}
+
+int main ()
+{
+   long long int local[9];
+   ptr = (long long *)&local[8];
+   foo ();
+   return 0;
+}
+
+/* { dg-output "ERROR: AddressSanitizer: stack-buffer-overflow.*(\n|\r\n|\r)" } */

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PINGv9][PATCH] ASan on unaligned accesses
  2015-05-26 14:12                     ` [PINGv8][PATCH] " Marat Zakirov
@ 2015-06-02 13:15                       ` Marat Zakirov
  2015-06-11 11:04                         ` [PINGv10][PATCH] " Marat Zakirov
  0 siblings, 1 reply; 16+ messages in thread
From: Marat Zakirov @ 2015-06-02 13:15 UTC (permalink / raw)
  To: Jakub Jelinek
  Cc: gcc-patches, Andrew Pinski, Kostya Serebryany, Dmitry Vyukov,
	Yury Gribov, Andrey Ryabinin

[-- Attachment #1: Type: text/plain, Size: 1225 bytes --]

On 05/26/2015 05:03 PM, Marat Zakirov wrote:
>
>
> On 05/20/2015 10:01 AM, Marat Zakirov wrote:
>>
>>
>> On 05/12/2015 02:16 PM, Marat Zakirov wrote:
>>> On 04/07/2015 03:22 PM, Jakub Jelinek wrote:
>>>> How are the automatic misaligned variables different from say heap 
>>>> allocated ones, or global vars etc.? 
>>> No difference you are right Jakub. Shadow memory initialization for 
>>> heap values and globals of course also should be changed but it is a 
>>> task for libsanitizer not ASan for which I am sending patch. Fix for 
>>> libsanitizer to support unaligned heaps and globals will be 
>>> committed by a separate patch.
>>>> Well, a RTL solution I've tried at http://gcc.gnu.org/PR22141, but 
>>>> it gave
>>>> mixed results, so either it needs more cost tuning when it is 
>>>> desirable and
>>>> when it is not, or perhaps better do that still on GIMPLE instead, 
>>>> together
>>>> with trying to optimize bitfield accesses and other cases of adjacent
>>>> location accesses.  But if we handle that on GIMPLE, it won't 
>>>> really affect
>>>> what asan RTL emitting code produces.
>>>>
>>>>     Jakub
>>>>
>>> I fixed the issue with 'movq' you were mentioned in a previous mail.
>>>
>>> --Marat
>>>
>>
>


[-- Attachment #2: mavdt-95_20.diff --]
[-- Type: text/x-patch, Size: 8979 bytes --]

gcc/ChangeLog:

2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>

	* asan.c (asan_emit_stack_protection): Support for misalign accesses. 
	(asan_expand_check_ifn): Likewise. 
	* params.def: New option asan-catch-misaligned.
	* params.h: New param ASAN_CATCH_MISALIGNED.
	* doc/invoke.texi: New asan param description.

gcc/testsuite/ChangeLog:

2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>

	* c-c++-common/asan/misalign-catch.c: New test.


diff --git a/gcc/asan.c b/gcc/asan.c
index 9e4a629..f9d052f 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -1050,7 +1050,6 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   rtx_code_label *lab;
   rtx_insn *insns;
   char buf[30];
-  unsigned char shadow_bytes[4];
   HOST_WIDE_INT base_offset = offsets[length - 1];
   HOST_WIDE_INT base_align_bias = 0, offset, prev_offset;
   HOST_WIDE_INT asan_frame_size = offsets[0] - base_offset;
@@ -1059,6 +1058,8 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   unsigned char cur_shadow_byte = ASAN_STACK_MAGIC_LEFT;
   tree str_cst, decl, id;
   int use_after_return_class = -1;
+  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS)
+		  || ASAN_CATCH_MISALIGNED;
 
   if (shadow_ptr_types[0] == NULL_TREE)
     asan_init_shadow_ptr_types ();
@@ -1193,11 +1194,37 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
   prev_offset = base_offset;
+
+  vec<rtx> shadow_mems;
+  vec<unsigned char> shadow_bytes;
+
+  shadow_mems.create (0);
+  shadow_bytes.create (0);
+
   for (l = length; l; l -= 2)
     {
       if (l == 2)
 	cur_shadow_byte = ASAN_STACK_MAGIC_RIGHT;
       offset = offsets[l - 1];
+      if (l != length && misalign)
+	{
+	  HOST_WIDE_INT aoff
+	    = base_offset + ((offset - base_offset)
+			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
+	      - ASAN_RED_ZONE_SIZE;
+	  if (aoff > prev_offset)
+	    {
+	      shadow_mem = adjust_address (shadow_mem, VOIDmode,
+					   (aoff - prev_offset)
+					   >> ASAN_SHADOW_SHIFT);
+	      prev_offset = aoff;
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_mems.safe_push (shadow_mem);
+	    }
+	}
       if ((offset - base_offset) & (ASAN_RED_ZONE_SIZE - 1))
 	{
 	  int i;
@@ -1212,13 +1239,13 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 	    if (aoff < offset)
 	      {
 		if (aoff < offset - (1 << ASAN_SHADOW_SHIFT) + 1)
-		  shadow_bytes[i] = 0;
+		  shadow_bytes.safe_push (0);
 		else
-		  shadow_bytes[i] = offset - aoff;
+		  shadow_bytes.safe_push (offset - aoff);
 	      }
 	    else
-	      shadow_bytes[i] = ASAN_STACK_MAGIC_PARTIAL;
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	      shadow_bytes.safe_push (ASAN_STACK_MAGIC_PARTIAL);
+	  shadow_mems.safe_push (shadow_mem);
 	  offset = aoff;
 	}
       while (offset <= offsets[l - 2] - ASAN_RED_ZONE_SIZE)
@@ -1227,12 +1254,21 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 				       (offset - prev_offset)
 				       >> ASAN_SHADOW_SHIFT);
 	  prev_offset = offset;
-	  memset (shadow_bytes, cur_shadow_byte, 4);
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_mems.safe_push (shadow_mem);
 	  offset += ASAN_RED_ZONE_SIZE;
 	}
       cur_shadow_byte = ASAN_STACK_MAGIC_MIDDLE;
     }
+  for (unsigned i = 0; misalign && i < shadow_bytes.length () - 1; i++)
+    if (shadow_bytes[i] == 0 && shadow_bytes[i + 1] > 0)
+      shadow_bytes[i] = 8 + (shadow_bytes[i + 1] > 7 ? 0 : shadow_bytes[i + 1]);
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
+    emit_move_insn (shadow_mems[i], asan_shadow_cst (&shadow_bytes[i * 4]));
+
   do_pending_stack_adjust ();
 
   /* Construct epilogue sequence.  */
@@ -1285,33 +1321,15 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
 
-  prev_offset = base_offset;
-  last_offset = base_offset;
-  last_size = 0;
-  for (l = length; l; l -= 2)
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
     {
-      offset = base_offset + ((offsets[l - 1] - base_offset)
-			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1));
-      if (last_offset + last_size != offset)
+      if (shadow_bytes[i*4+3] == ASAN_STACK_MAGIC_PARTIAL)
 	{
-	  shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				       (last_offset - prev_offset)
-				       >> ASAN_SHADOW_SHIFT);
-	  prev_offset = last_offset;
-	  asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
-	  last_offset = offset;
-	  last_size = 0;
+	  asan_clear_shadow (shadow_mems[i], 8);
+	  i++;
 	}
-      last_size += base_offset + ((offsets[l - 2] - base_offset)
-				  & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
-		   - offset;
-    }
-  if (last_size)
-    {
-      shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				   (last_offset - prev_offset)
-				   >> ASAN_SHADOW_SHIFT);
-      asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
+      else
+	asan_clear_shadow (shadow_mems[i], 4);
     }
 
   do_pending_stack_adjust ();
@@ -2546,6 +2564,8 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   gimple g = gsi_stmt (*iter);
   location_t loc = gimple_location (g);
 
+  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS)
+		  || ASAN_CATCH_MISALIGNED;
   bool recover_p
     = (flag_sanitize & flag_sanitize_recover & SANITIZE_KERNEL_ADDRESS) != 0;
 
@@ -2643,7 +2663,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   tree base_addr = gimple_assign_lhs (g);
 
   tree t = NULL_TREE;
-  if (real_size_in_bytes >= 8)
+  if (real_size_in_bytes >= 8 && !misalign)
     {
       tree shadow = build_shadow_mem_access (&gsi, loc, base_addr,
 					     shadow_ptr_type);
@@ -2662,7 +2682,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
       /* Aligned (>= 8 bytes) can test just
 	 (real_size_in_bytes - 1 >= shadow), as base_addr & 7 is known
 	 to be 0.  */
-      if (align < 8)
+      if (align < 8 || misalign)
 	{
 	  gimple_seq_add_stmt (&seq, build_assign (BIT_AND_EXPR,
 						   base_addr, 7));
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index bf8afad..ee67e45 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -10965,6 +10965,12 @@ is greater or equal to this number, use callbacks instead of inline checks.
 E.g. to disable inline code use
 @option{--param asan-instrumentation-with-call-threshold=0}.
 
+@item asan-catch-misaligned
+Catch invalid unaligned memory accesses.
+This option is needed to prevent potential ASan false positives due to
+unaligned to type size memory accesses in some apllication like Linux kernel
+@option{--param asan-catch-misaligned=0}.
+
 @item chkp-max-ctor-size
 Static constructors generated by Pointer Bounds Checker may become very
 large and significantly increase compile time at optimization level
diff --git a/gcc/params.def b/gcc/params.def
index 5e2c769..d96db71 100644
--- a/gcc/params.def
+++ b/gcc/params.def
@@ -1151,6 +1151,11 @@ DEFPARAM (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD,
          "in function becomes greater or equal to this number",
          7000, 0, INT_MAX)
 
+DEFPARAM (PARAM_ASAN_CATCH_MISALIGNED,
+	  "asan-catch-misaligned",
+	  "catch unaligned access",
+	  0, 1, 1)
+
 DEFPARAM (PARAM_UNINIT_CONTROL_DEP_ATTEMPTS,
 	  "uninit-control-dep-attempts",
 	  "Maximum number of nested calls to search for control dependencies "
diff --git a/gcc/params.h b/gcc/params.h
index 28d077f..c556ca6 100644
--- a/gcc/params.h
+++ b/gcc/params.h
@@ -240,5 +240,7 @@ extern void init_param_values (int *params);
   PARAM_VALUE (PARAM_ASAN_USE_AFTER_RETURN)
 #define ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD \
   PARAM_VALUE (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD)
+#define ASAN_CATCH_MISALIGNED \
+  PARAM_VALUE (PARAM_ASAN_CATCH_MISALIGNED)
 
 #endif /* ! GCC_PARAMS_H */
diff --git a/gcc/testsuite/c-c++-common/asan/misalign-catch.c b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
new file mode 100644
index 0000000..ae5dfc4
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
@@ -0,0 +1,21 @@
+/* { dg-do run } */
+/* { dg-options "--param asan-catch-misaligned=1" } */
+/* { dg-shouldfail "asan" } */
+
+long long *ptr;
+
+__attribute__((noinline))
+void foo () {
+   ptr = ((long long int *)(((char *)ptr) + 1));
+   *ptr = 1;
+}
+
+int main ()
+{
+   long long int local[9];
+   ptr = (long long *)&local[8];
+   foo ();
+   return 0;
+}
+
+/* { dg-output "ERROR: AddressSanitizer: stack-buffer-overflow.*(\n|\r\n|\r)" } */

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PINGv10][PATCH] ASan on unaligned accesses
  2015-06-02 13:15                       ` [PINGv9][PATCH] " Marat Zakirov
@ 2015-06-11 11:04                         ` Marat Zakirov
  0 siblings, 0 replies; 16+ messages in thread
From: Marat Zakirov @ 2015-06-11 11:04 UTC (permalink / raw)
  To: Jakub Jelinek
  Cc: gcc-patches, Andrew Pinski, Kostya Serebryany, Dmitry Vyukov,
	Yury Gribov, Andrey Ryabinin

[-- Attachment #1: Type: text/plain, Size: 1309 bytes --]



On 06/02/2015 04:11 PM, Marat Zakirov wrote:
> On 05/26/2015 05:03 PM, Marat Zakirov wrote:
>>
>>
>> On 05/20/2015 10:01 AM, Marat Zakirov wrote:
>>>
>>>
>>> On 05/12/2015 02:16 PM, Marat Zakirov wrote:
>>>> On 04/07/2015 03:22 PM, Jakub Jelinek wrote:
>>>>> How are the automatic misaligned variables different from say heap 
>>>>> allocated ones, or global vars etc.? 
>>>> No difference you are right Jakub. Shadow memory initialization for 
>>>> heap values and globals of course also should be changed but it is 
>>>> a task for libsanitizer not ASan for which I am sending patch. Fix 
>>>> for libsanitizer to support unaligned heaps and globals will be 
>>>> committed by a separate patch.
>>>>> Well, a RTL solution I've tried at http://gcc.gnu.org/PR22141, but 
>>>>> it gave
>>>>> mixed results, so either it needs more cost tuning when it is 
>>>>> desirable and
>>>>> when it is not, or perhaps better do that still on GIMPLE instead, 
>>>>> together
>>>>> with trying to optimize bitfield accesses and other cases of adjacent
>>>>> location accesses.  But if we handle that on GIMPLE, it won't 
>>>>> really affect
>>>>> what asan RTL emitting code produces.
>>>>>
>>>>>     Jakub
>>>>>
>>>> I fixed the issue with 'movq' you were mentioned in a previous mail.
>>>>
>>>> --Marat
>>>>
>>>
>>
>


[-- Attachment #2: mavdt-95_20.diff --]
[-- Type: text/x-patch, Size: 8979 bytes --]

gcc/ChangeLog:

2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>

	* asan.c (asan_emit_stack_protection): Support for misalign accesses. 
	(asan_expand_check_ifn): Likewise. 
	* params.def: New option asan-catch-misaligned.
	* params.h: New param ASAN_CATCH_MISALIGNED.
	* doc/invoke.texi: New asan param description.

gcc/testsuite/ChangeLog:

2015-02-25  Marat Zakirov  <m.zakirov@samsung.com>

	* c-c++-common/asan/misalign-catch.c: New test.


diff --git a/gcc/asan.c b/gcc/asan.c
index 9e4a629..f9d052f 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -1050,7 +1050,6 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   rtx_code_label *lab;
   rtx_insn *insns;
   char buf[30];
-  unsigned char shadow_bytes[4];
   HOST_WIDE_INT base_offset = offsets[length - 1];
   HOST_WIDE_INT base_align_bias = 0, offset, prev_offset;
   HOST_WIDE_INT asan_frame_size = offsets[0] - base_offset;
@@ -1059,6 +1058,8 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   unsigned char cur_shadow_byte = ASAN_STACK_MAGIC_LEFT;
   tree str_cst, decl, id;
   int use_after_return_class = -1;
+  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS)
+		  || ASAN_CATCH_MISALIGNED;
 
   if (shadow_ptr_types[0] == NULL_TREE)
     asan_init_shadow_ptr_types ();
@@ -1193,11 +1194,37 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
   prev_offset = base_offset;
+
+  vec<rtx> shadow_mems;
+  vec<unsigned char> shadow_bytes;
+
+  shadow_mems.create (0);
+  shadow_bytes.create (0);
+
   for (l = length; l; l -= 2)
     {
       if (l == 2)
 	cur_shadow_byte = ASAN_STACK_MAGIC_RIGHT;
       offset = offsets[l - 1];
+      if (l != length && misalign)
+	{
+	  HOST_WIDE_INT aoff
+	    = base_offset + ((offset - base_offset)
+			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
+	      - ASAN_RED_ZONE_SIZE;
+	  if (aoff > prev_offset)
+	    {
+	      shadow_mem = adjust_address (shadow_mem, VOIDmode,
+					   (aoff - prev_offset)
+					   >> ASAN_SHADOW_SHIFT);
+	      prev_offset = aoff;
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_bytes.safe_push (0);
+	      shadow_mems.safe_push (shadow_mem);
+	    }
+	}
       if ((offset - base_offset) & (ASAN_RED_ZONE_SIZE - 1))
 	{
 	  int i;
@@ -1212,13 +1239,13 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 	    if (aoff < offset)
 	      {
 		if (aoff < offset - (1 << ASAN_SHADOW_SHIFT) + 1)
-		  shadow_bytes[i] = 0;
+		  shadow_bytes.safe_push (0);
 		else
-		  shadow_bytes[i] = offset - aoff;
+		  shadow_bytes.safe_push (offset - aoff);
 	      }
 	    else
-	      shadow_bytes[i] = ASAN_STACK_MAGIC_PARTIAL;
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	      shadow_bytes.safe_push (ASAN_STACK_MAGIC_PARTIAL);
+	  shadow_mems.safe_push (shadow_mem);
 	  offset = aoff;
 	}
       while (offset <= offsets[l - 2] - ASAN_RED_ZONE_SIZE)
@@ -1227,12 +1254,21 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 				       (offset - prev_offset)
 				       >> ASAN_SHADOW_SHIFT);
 	  prev_offset = offset;
-	  memset (shadow_bytes, cur_shadow_byte, 4);
-	  emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_bytes.safe_push (cur_shadow_byte);
+	  shadow_mems.safe_push (shadow_mem);
 	  offset += ASAN_RED_ZONE_SIZE;
 	}
       cur_shadow_byte = ASAN_STACK_MAGIC_MIDDLE;
     }
+  for (unsigned i = 0; misalign && i < shadow_bytes.length () - 1; i++)
+    if (shadow_bytes[i] == 0 && shadow_bytes[i + 1] > 0)
+      shadow_bytes[i] = 8 + (shadow_bytes[i + 1] > 7 ? 0 : shadow_bytes[i + 1]);
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
+    emit_move_insn (shadow_mems[i], asan_shadow_cst (&shadow_bytes[i * 4]));
+
   do_pending_stack_adjust ();
 
   /* Construct epilogue sequence.  */
@@ -1285,33 +1321,15 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
   if (STRICT_ALIGNMENT)
     set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
 
-  prev_offset = base_offset;
-  last_offset = base_offset;
-  last_size = 0;
-  for (l = length; l; l -= 2)
+  for (unsigned i = 0; i < shadow_mems.length (); i++)
     {
-      offset = base_offset + ((offsets[l - 1] - base_offset)
-			     & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1));
-      if (last_offset + last_size != offset)
+      if (shadow_bytes[i*4+3] == ASAN_STACK_MAGIC_PARTIAL)
 	{
-	  shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				       (last_offset - prev_offset)
-				       >> ASAN_SHADOW_SHIFT);
-	  prev_offset = last_offset;
-	  asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
-	  last_offset = offset;
-	  last_size = 0;
+	  asan_clear_shadow (shadow_mems[i], 8);
+	  i++;
 	}
-      last_size += base_offset + ((offsets[l - 2] - base_offset)
-				  & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
-		   - offset;
-    }
-  if (last_size)
-    {
-      shadow_mem = adjust_address (shadow_mem, VOIDmode,
-				   (last_offset - prev_offset)
-				   >> ASAN_SHADOW_SHIFT);
-      asan_clear_shadow (shadow_mem, last_size >> ASAN_SHADOW_SHIFT);
+      else
+	asan_clear_shadow (shadow_mems[i], 4);
     }
 
   do_pending_stack_adjust ();
@@ -2546,6 +2564,8 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   gimple g = gsi_stmt (*iter);
   location_t loc = gimple_location (g);
 
+  bool misalign = (flag_sanitize & SANITIZE_KERNEL_ADDRESS)
+		  || ASAN_CATCH_MISALIGNED;
   bool recover_p
     = (flag_sanitize & flag_sanitize_recover & SANITIZE_KERNEL_ADDRESS) != 0;
 
@@ -2643,7 +2663,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
   tree base_addr = gimple_assign_lhs (g);
 
   tree t = NULL_TREE;
-  if (real_size_in_bytes >= 8)
+  if (real_size_in_bytes >= 8 && !misalign)
     {
       tree shadow = build_shadow_mem_access (&gsi, loc, base_addr,
 					     shadow_ptr_type);
@@ -2662,7 +2682,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
       /* Aligned (>= 8 bytes) can test just
 	 (real_size_in_bytes - 1 >= shadow), as base_addr & 7 is known
 	 to be 0.  */
-      if (align < 8)
+      if (align < 8 || misalign)
 	{
 	  gimple_seq_add_stmt (&seq, build_assign (BIT_AND_EXPR,
 						   base_addr, 7));
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index bf8afad..ee67e45 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -10965,6 +10965,12 @@ is greater or equal to this number, use callbacks instead of inline checks.
 E.g. to disable inline code use
 @option{--param asan-instrumentation-with-call-threshold=0}.
 
+@item asan-catch-misaligned
+Catch invalid unaligned memory accesses.
+This option is needed to prevent potential ASan false positives due to
+unaligned to type size memory accesses in some apllication like Linux kernel
+@option{--param asan-catch-misaligned=0}.
+
 @item chkp-max-ctor-size
 Static constructors generated by Pointer Bounds Checker may become very
 large and significantly increase compile time at optimization level
diff --git a/gcc/params.def b/gcc/params.def
index 5e2c769..d96db71 100644
--- a/gcc/params.def
+++ b/gcc/params.def
@@ -1151,6 +1151,11 @@ DEFPARAM (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD,
          "in function becomes greater or equal to this number",
          7000, 0, INT_MAX)
 
+DEFPARAM (PARAM_ASAN_CATCH_MISALIGNED,
+	  "asan-catch-misaligned",
+	  "catch unaligned access",
+	  0, 1, 1)
+
 DEFPARAM (PARAM_UNINIT_CONTROL_DEP_ATTEMPTS,
 	  "uninit-control-dep-attempts",
 	  "Maximum number of nested calls to search for control dependencies "
diff --git a/gcc/params.h b/gcc/params.h
index 28d077f..c556ca6 100644
--- a/gcc/params.h
+++ b/gcc/params.h
@@ -240,5 +240,7 @@ extern void init_param_values (int *params);
   PARAM_VALUE (PARAM_ASAN_USE_AFTER_RETURN)
 #define ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD \
   PARAM_VALUE (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD)
+#define ASAN_CATCH_MISALIGNED \
+  PARAM_VALUE (PARAM_ASAN_CATCH_MISALIGNED)
 
 #endif /* ! GCC_PARAMS_H */
diff --git a/gcc/testsuite/c-c++-common/asan/misalign-catch.c b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
new file mode 100644
index 0000000..ae5dfc4
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/misalign-catch.c
@@ -0,0 +1,21 @@
+/* { dg-do run } */
+/* { dg-options "--param asan-catch-misaligned=1" } */
+/* { dg-shouldfail "asan" } */
+
+long long *ptr;
+
+__attribute__((noinline))
+void foo () {
+   ptr = ((long long int *)(((char *)ptr) + 1));
+   *ptr = 1;
+}
+
+int main ()
+{
+   long long int local[9];
+   ptr = (long long *)&local[8];
+   foo ();
+   return 0;
+}
+
+/* { dg-output "ERROR: AddressSanitizer: stack-buffer-overflow.*(\n|\r\n|\r)" } */

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2015-06-11 11:00 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-03-04  8:00 [PATCH] ASan on unaligned accesses Marat Zakirov
2015-03-04  8:07 ` Andrew Pinski
2015-03-19  6:01   ` [PINGv2][PATCH] " Marat Zakirov
2015-03-26  6:53     ` [PINGv3][PATCH] " Marat Zakirov
2015-03-26 11:50       ` Jakub Jelinek
2015-03-26 12:29         ` [PINGv4][PATCH] " Marat Zakirov
2015-03-30 17:43           ` Jakub Jelinek
2015-04-07 10:16             ` [PINGv5][PATCH] " Marat Zakirov
2015-04-07 12:22               ` Jakub Jelinek
2015-05-12 11:23                 ` [PINGv6][PATCH] " Marat Zakirov
2015-05-12 11:45                   ` Yury Gribov
2015-05-20  7:19                   ` [PINGv7][PATCH] " Marat Zakirov
2015-05-26 14:12                     ` [PINGv8][PATCH] " Marat Zakirov
2015-06-02 13:15                       ` [PINGv9][PATCH] " Marat Zakirov
2015-06-11 11:04                         ` [PINGv10][PATCH] " Marat Zakirov
2015-03-12 12:07 ` [PING][PATCH] " Marat Zakirov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).