public inbox for binutils@sourceware.org
 help / color / mirror / Atom feed
* [PATCH v2] x86: Add -munaligned-vector-move to assembler
@ 2021-10-21 17:50 H.J. Lu
  2021-10-22  8:06 ` Jan Beulich
  0 siblings, 1 reply; 4+ messages in thread
From: H.J. Lu @ 2021-10-21 17:50 UTC (permalink / raw)
  To: binutils

Unaligned load/store instructions on aligned memory or register are as
fast as aligned load/store instructions on modern Intel processors.  Add
a command-line option, -munaligned-vector-move, to x86 assembler to
encode encode aligned vector load/store instructions as unaligned
vector load/store instructions.

	* NEWS: Mention -munaligned-vector-move.
	* config/tc-i386.c (unaligned_vector_move): New.
	(optimize_encoding): Encode aligned vector move as unaligned
	vector move if asked.
	(md_assemble): Call optimize_encoding for -munaligned-vector-move.
	(OPTION_MUNALIGNED_VECTOR_MOVE): New.
	(md_longopts): Add -munaligned-vector-move.
	(md_parse_option): Handle -munaligned-vector-move.
	(md_show_usage): Add -munaligned-vector-move.
	* doc/c-i386.texi: Document -munaligned-vector-move.
	* testsuite/gas/i386/i386.exp: Run unaligned-vector-move and
	x86-64-unaligned-vector-move.
	* testsuite/gas/i386/unaligned-vector-move.d: New file.
	* testsuite/gas/i386/unaligned-vector-move.s: Likewise.
	* testsuite/gas/i386/x86-64-unaligned-vector-move.d: Likewise.
---
 gas/NEWS                                      |  3 ++
 gas/config/tc-i386.c                          | 34 ++++++++++++++++++-
 gas/doc/c-i386.texi                           |  6 ++++
 gas/testsuite/gas/i386/i386.exp               |  2 ++
 .../gas/i386/unaligned-vector-move.d          | 22 ++++++++++++
 .../gas/i386/unaligned-vector-move.s          | 15 ++++++++
 .../gas/i386/x86-64-unaligned-vector-move.d   | 23 +++++++++++++
 7 files changed, 104 insertions(+), 1 deletion(-)
 create mode 100644 gas/testsuite/gas/i386/unaligned-vector-move.d
 create mode 100644 gas/testsuite/gas/i386/unaligned-vector-move.s
 create mode 100644 gas/testsuite/gas/i386/x86-64-unaligned-vector-move.d

diff --git a/gas/NEWS b/gas/NEWS
index 5de205ecd55..d07d4c15ca8 100644
--- a/gas/NEWS
+++ b/gas/NEWS
@@ -1,5 +1,8 @@
 -*- text -*-
 
+* Add a command-line option, -munaligned-vector-move, for x86 target to
+  encode aligned vector move as unaligned vector move.
+
 * Add support for Cortex-R52+ for Arm.
 
 * Add support for Cortex-A510, Cortex-A710, Cortex-X2 for AArch64.
diff --git a/gas/config/tc-i386.c b/gas/config/tc-i386.c
index 339f9694948..57ca2b6a25d 100644
--- a/gas/config/tc-i386.c
+++ b/gas/config/tc-i386.c
@@ -800,6 +800,9 @@ static unsigned int no_cond_jump_promotion = 0;
 /* Encode SSE instructions with VEX prefix.  */
 static unsigned int sse2avx;
 
+/* Encode aligned vector move as unaligned vector move.  */
+static unsigned int unaligned_vector_move;
+
 /* Encode scalar AVX instructions with specific vector length.  */
 static enum
   {
@@ -4080,6 +4083,26 @@ optimize_encoding (void)
 {
   unsigned int j;
 
+  /* Encode aligned vector move as unaligned vector move if asked.  */
+  if (unaligned_vector_move)
+    switch (i.tm.base_opcode)
+      {
+      case 0x28:
+	/* movaps/movapd/vmovaps/vmovapd.  */
+	if (i.tm.opcode_modifier.opcodespace == 1
+	    && i.tm.opcode_modifier.opcodeprefix <= 1)
+	  i.tm.base_opcode = 0x10;
+	break;
+      case 0x6f:
+	/* movdqa/vmovdqa/vmovdqa64/vmovdqa32 */
+	if (i.tm.opcode_modifier.opcodespace == 1
+	    && i.tm.opcode_modifier.opcodeprefix == 1)
+	  i.tm.opcode_modifier.opcodeprefix = 2;
+	break;
+      default:
+	break;
+      }
+
   if (i.tm.opcode_modifier.opcodespace == SPACE_BASE
       && i.tm.base_opcode == 0x8d)
     {
@@ -5053,7 +5076,8 @@ md_assemble (char *line)
       i.disp_operands = 0;
     }
 
-  if (optimize && !i.no_optimize && i.tm.opcode_modifier.optimize)
+  if (unaligned_vector_move
+      || (optimize && !i.no_optimize && i.tm.opcode_modifier.optimize))
     optimize_encoding ();
 
   if (!process_suffix ())
@@ -13060,6 +13084,7 @@ const char *md_shortopts = "qnO::";
 #define OPTION_MLFENCE_AFTER_LOAD (OPTION_MD_BASE + 31)
 #define OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH (OPTION_MD_BASE + 32)
 #define OPTION_MLFENCE_BEFORE_RET (OPTION_MD_BASE + 33)
+#define OPTION_MUNALIGNED_VECTOR_MOVE (OPTION_MD_BASE + 34)
 
 struct option md_longopts[] =
 {
@@ -13081,6 +13106,7 @@ struct option md_longopts[] =
   {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
   {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
   {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
+  {"munaligned-vector-move", no_argument, NULL, OPTION_MUNALIGNED_VECTOR_MOVE},
   {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
   {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
   {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
@@ -13381,6 +13407,10 @@ md_parse_option (int c, const char *arg)
       sse2avx = 1;
       break;
 
+    case OPTION_MUNALIGNED_VECTOR_MOVE:
+      unaligned_vector_move = 1;
+      break;
+
     case OPTION_MSSE_CHECK:
       if (strcasecmp (arg, "error") == 0)
 	sse_check = check_error;
@@ -13796,6 +13826,8 @@ md_show_usage (FILE *stream)
   fprintf (stream, _("\
   -msse2avx               encode SSE instructions with VEX prefix\n"));
   fprintf (stream, _("\
+  -munaligned-vector-move encode aligned vector move as unaligned vector move\n"));
+  fprintf (stream, _("\
   -msse-check=[none|error|warning] (default: warning)\n\
                           check SSE instructions\n"));
   fprintf (stream, _("\
diff --git a/gas/doc/c-i386.texi b/gas/doc/c-i386.texi
index 99576ef2953..caf8fe0a0a7 100644
--- a/gas/doc/c-i386.texi
+++ b/gas/doc/c-i386.texi
@@ -316,6 +316,12 @@ Valid @var{CPU} values are identical to the processor list of
 This option specifies that the assembler should encode SSE instructions
 with VEX prefix.
 
+@cindex @samp{-munaligned-vector-move} option, i386
+@cindex @samp{-munaligned-vector-move} option, x86-64
+@item -munaligned-vector-move
+This option specifies that the assembler should encode encode aligned
+vector move as unaligned vector move.
+
 @cindex @samp{-msse-check=} option, i386
 @cindex @samp{-msse-check=} option, x86-64
 @item -msse-check=@var{none}
diff --git a/gas/testsuite/gas/i386/i386.exp b/gas/testsuite/gas/i386/i386.exp
index 680259b1c4e..378e32b39cb 100644
--- a/gas/testsuite/gas/i386/i386.exp
+++ b/gas/testsuite/gas/i386/i386.exp
@@ -272,6 +272,7 @@ if [gas_32_check] then {
     run_dump_test "evex-wig1-intel"
     run_dump_test "evex-no-scale-32"
     run_dump_test "sse2avx"
+    run_dump_test "unaligned-vector-move"
     run_list_test "inval-avx" "-al"
     run_list_test "inval-avx512f" "-al"
     run_list_test "inval-avx512vl" "-al"
@@ -948,6 +949,7 @@ if [gas_64_check] then {
     run_dump_test "x86-64-evex-wig2"
     run_dump_test "evex-no-scale-64"
     run_dump_test "x86-64-sse2avx"
+    run_dump_test "x86-64-unaligned-vector-move"
     run_list_test "x86-64-inval-avx" "-al"
     run_list_test "x86-64-inval-avx512f" "-al"
     run_list_test "x86-64-inval-avx512vl" "-al"
diff --git a/gas/testsuite/gas/i386/unaligned-vector-move.d b/gas/testsuite/gas/i386/unaligned-vector-move.d
new file mode 100644
index 00000000000..be0d96fd8b2
--- /dev/null
+++ b/gas/testsuite/gas/i386/unaligned-vector-move.d
@@ -0,0 +1,22 @@
+#as: -munaligned-vector-move
+#objdump: -dw
+#name: i386 (Encode aligned vector move as unaligned vector move)
+
+.*: +file format .*
+
+
+Disassembly of section .text:
+
+0+ <_start>:
+ +[a-f0-9]+:	0f 10 d1             	movups %xmm1,%xmm2
+ +[a-f0-9]+:	66 0f 10 d1          	movupd %xmm1,%xmm2
+ +[a-f0-9]+:	f3 0f 6f d1          	movdqu %xmm1,%xmm2
+ +[a-f0-9]+:	c5 f8 10 d1          	vmovups %xmm1,%xmm2
+ +[a-f0-9]+:	c5 f9 10 d1          	vmovupd %xmm1,%xmm2
+ +[a-f0-9]+:	c5 fa 6f d1          	vmovdqu %xmm1,%xmm2
+ +[a-f0-9]+:	c5 f8 10 d1          	vmovups %xmm1,%xmm2
+ +[a-f0-9]+:	62 f1 fd 09 10 d1    	vmovupd %xmm1,%xmm2\{%k1\}
+ +[a-f0-9]+:	62 f1 7c 09 10 d1    	vmovups %xmm1,%xmm2\{%k1\}
+ +[a-f0-9]+:	62 f1 7e 09 6f d1    	vmovdqu32 %xmm1,%xmm2\{%k1\}
+ +[a-f0-9]+:	62 f1 fe 09 6f d1    	vmovdqu64 %xmm1,%xmm2\{%k1\}
+#pass
diff --git a/gas/testsuite/gas/i386/unaligned-vector-move.s b/gas/testsuite/gas/i386/unaligned-vector-move.s
new file mode 100644
index 00000000000..b88ae232a38
--- /dev/null
+++ b/gas/testsuite/gas/i386/unaligned-vector-move.s
@@ -0,0 +1,15 @@
+# Encode aligned vector move as unaligned vector move.
+
+	.text
+_start:
+	movaps %xmm1, %xmm2
+	movapd %xmm1, %xmm2
+	movdqa %xmm1, %xmm2
+	vmovaps %xmm1, %xmm2
+	vmovapd %xmm1, %xmm2
+	vmovdqa %xmm1, %xmm2
+	vmovaps %xmm1, %xmm2
+	vmovapd %xmm1, %xmm2{%k1}
+	vmovaps %xmm1, %xmm2{%k1}
+	vmovdqa32 %xmm1, %xmm2{%k1}
+	vmovdqa64 %xmm1, %xmm2{%k1}
diff --git a/gas/testsuite/gas/i386/x86-64-unaligned-vector-move.d b/gas/testsuite/gas/i386/x86-64-unaligned-vector-move.d
new file mode 100644
index 00000000000..410d9478dad
--- /dev/null
+++ b/gas/testsuite/gas/i386/x86-64-unaligned-vector-move.d
@@ -0,0 +1,23 @@
+#source: unaligned-vector-move.s
+#as: -munaligned-vector-move
+#objdump: -dw
+#name: x86-64 (Encode aligned vector move as unaligned vector move)
+
+.*: +file format .*
+
+
+Disassembly of section .text:
+
+0+ <_start>:
+ +[a-f0-9]+:	0f 10 d1             	movups %xmm1,%xmm2
+ +[a-f0-9]+:	66 0f 10 d1          	movupd %xmm1,%xmm2
+ +[a-f0-9]+:	f3 0f 6f d1          	movdqu %xmm1,%xmm2
+ +[a-f0-9]+:	c5 f8 10 d1          	vmovups %xmm1,%xmm2
+ +[a-f0-9]+:	c5 f9 10 d1          	vmovupd %xmm1,%xmm2
+ +[a-f0-9]+:	c5 fa 6f d1          	vmovdqu %xmm1,%xmm2
+ +[a-f0-9]+:	c5 f8 10 d1          	vmovups %xmm1,%xmm2
+ +[a-f0-9]+:	62 f1 fd 09 10 d1    	vmovupd %xmm1,%xmm2\{%k1\}
+ +[a-f0-9]+:	62 f1 7c 09 10 d1    	vmovups %xmm1,%xmm2\{%k1\}
+ +[a-f0-9]+:	62 f1 7e 09 6f d1    	vmovdqu32 %xmm1,%xmm2\{%k1\}
+ +[a-f0-9]+:	62 f1 fe 09 6f d1    	vmovdqu64 %xmm1,%xmm2\{%k1\}
+#pass
-- 
2.32.0


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v2] x86: Add -munaligned-vector-move to assembler
  2021-10-21 17:50 [PATCH v2] x86: Add -munaligned-vector-move to assembler H.J. Lu
@ 2021-10-22  8:06 ` Jan Beulich
  2021-10-22 12:09   ` H.J. Lu
  0 siblings, 1 reply; 4+ messages in thread
From: Jan Beulich @ 2021-10-22  8:06 UTC (permalink / raw)
  To: H.J. Lu; +Cc: binutils

On 21.10.2021 19:50, H.J. Lu wrote:
> Unaligned load/store instructions on aligned memory or register are as
> fast as aligned load/store instructions on modern Intel processors.  Add
> a command-line option, -munaligned-vector-move, to x86 assembler to
> encode encode aligned vector load/store instructions as unaligned
> vector load/store instructions.

As said I'm still lacking the spelling out here of some form of
motivation for the change. The resulting code, afaict, isn't going
to be better than what we have now, yet there's the price of extra
new code that you introduce.

> @@ -4080,6 +4083,26 @@ optimize_encoding (void)
>  {
>    unsigned int j;
>  
> +  /* Encode aligned vector move as unaligned vector move if asked.  */
> +  if (unaligned_vector_move)
> +    switch (i.tm.base_opcode)
> +      {
> +      case 0x28:
> +	/* movaps/movapd/vmovaps/vmovapd.  */
> +	if (i.tm.opcode_modifier.opcodespace == 1
> +	    && i.tm.opcode_modifier.opcodeprefix <= 1)

I don't think the prefix needs checking here? F3 and F2 encodings
don't exist, so maybe at most gas_assert() this?

> +	  i.tm.base_opcode = 0x10;
> +	break;
> +      case 0x6f:
> +	/* movdqa/vmovdqa/vmovdqa64/vmovdqa32 */
> +	if (i.tm.opcode_modifier.opcodespace == 1
> +	    && i.tm.opcode_modifier.opcodeprefix == 1)
> +	  i.tm.opcode_modifier.opcodeprefix = 2;

Please can you avoid using literal numbers here? This not only makes it
needlessly harder to potentially change the SPACE_* and PREFIX_* values
(despite me hoping / assuming that we would never have to do so), but
also makes the code not sufficiently self-documenting.

> @@ -5053,7 +5076,8 @@ md_assemble (char *line)
>        i.disp_operands = 0;
>      }
>  
> -  if (optimize && !i.no_optimize && i.tm.opcode_modifier.optimize)
> +  if (unaligned_vector_move
> +      || (optimize && !i.no_optimize && i.tm.opcode_modifier.optimize))
>      optimize_encoding ();

Isn't this fragile? You now depend on optimize_encoding() to do nothing
optimization-wise if unaligned_vector_move is set. I was rather hoping
you would introduce a separate helper.

Jan


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v2] x86: Add -munaligned-vector-move to assembler
  2021-10-22  8:06 ` Jan Beulich
@ 2021-10-22 12:09   ` H.J. Lu
  2021-10-25 15:54     ` Michael Matz
  0 siblings, 1 reply; 4+ messages in thread
From: H.J. Lu @ 2021-10-22 12:09 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Binutils

On Fri, Oct 22, 2021 at 1:06 AM Jan Beulich <jbeulich@suse.com> wrote:
>
> On 21.10.2021 19:50, H.J. Lu wrote:
> > Unaligned load/store instructions on aligned memory or register are as
> > fast as aligned load/store instructions on modern Intel processors.  Add
> > a command-line option, -munaligned-vector-move, to x86 assembler to
> > encode encode aligned vector load/store instructions as unaligned
> > vector load/store instructions.
>
> As said I'm still lacking the spelling out here of some form of
> motivation for the change. The resulting code, afaict, isn't going
> to be better than what we have now, yet there's the price of extra
> new code that you introduce.

We'd like to have such an option just in case.

> > @@ -4080,6 +4083,26 @@ optimize_encoding (void)
> >  {
> >    unsigned int j;
> >
> > +  /* Encode aligned vector move as unaligned vector move if asked.  */
> > +  if (unaligned_vector_move)
> > +    switch (i.tm.base_opcode)
> > +      {
> > +      case 0x28:
> > +     /* movaps/movapd/vmovaps/vmovapd.  */
> > +     if (i.tm.opcode_modifier.opcodespace == 1
> > +         && i.tm.opcode_modifier.opcodeprefix <= 1)
>
> I don't think the prefix needs checking here? F3 and F2 encodings
> don't exist, so maybe at most gas_assert() this?

I changed it to

    if (i.tm.opcode_modifier.opcodespace == SPACE_0F
          && i.tm.opcode_modifier.opcodeprefix <= PREFIX_0X66)
        i.tm.base_opcode = 0x10;

in case that F3 and F2 are used in the future.

> > +       i.tm.base_opcode = 0x10;
> > +     break;
> > +      case 0x6f:
> > +     /* movdqa/vmovdqa/vmovdqa64/vmovdqa32 */
> > +     if (i.tm.opcode_modifier.opcodespace == 1
> > +         && i.tm.opcode_modifier.opcodeprefix == 1)
> > +       i.tm.opcode_modifier.opcodeprefix = 2;
>
> Please can you avoid using literal numbers here? This not only makes it
> needlessly harder to potentially change the SPACE_* and PREFIX_* values
> (despite me hoping / assuming that we would never have to do so), but
> also makes the code not sufficiently self-documenting.

I changed it to

     if (i.tm.opcode_modifier.opcodespace == SPACE_0F
          && i.tm.opcode_modifier.opcodeprefix == PREFIX_0X66)
        i.tm.opcode_modifier.opcodeprefix = PREFIX_0XF3;

> > @@ -5053,7 +5076,8 @@ md_assemble (char *line)
> >        i.disp_operands = 0;
> >      }
> >
> > -  if (optimize && !i.no_optimize && i.tm.opcode_modifier.optimize)
> > +  if (unaligned_vector_move
> > +      || (optimize && !i.no_optimize && i.tm.opcode_modifier.optimize))
> >      optimize_encoding ();
>
> Isn't this fragile? You now depend on optimize_encoding() to do nothing
> optimization-wise if unaligned_vector_move is set. I was rather hoping
> you would introduce a separate helper.
>

I added encode_with_unaligned_vector_move and renamed the option
to -muse-unaligned-vector-move.

Thanks.

-- 
H.J.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v2] x86: Add -munaligned-vector-move to assembler
  2021-10-22 12:09   ` H.J. Lu
@ 2021-10-25 15:54     ` Michael Matz
  0 siblings, 0 replies; 4+ messages in thread
From: Michael Matz @ 2021-10-25 15:54 UTC (permalink / raw)
  To: H.J. Lu; +Cc: Jan Beulich, Binutils

Hello,

On Fri, 22 Oct 2021, H.J. Lu via Binutils wrote:

> On Fri, Oct 22, 2021 at 1:06 AM Jan Beulich <jbeulich@suse.com> wrote:
> >
> > On 21.10.2021 19:50, H.J. Lu wrote:
> > > Unaligned load/store instructions on aligned memory or register are as
> > > fast as aligned load/store instructions on modern Intel processors.  Add
> > > a command-line option, -munaligned-vector-move, to x86 assembler to
> > > encode encode aligned vector load/store instructions as unaligned
> > > vector load/store instructions.
> >
> > As said I'm still lacking the spelling out here of some form of
> > motivation for the change. The resulting code, afaict, isn't going
> > to be better than what we have now, yet there's the price of extra
> > new code that you introduce.
> 
> We'd like to have such an option just in case.

As long as it's never going to be default...

I would be quite opposed to making it default.  Linker and assembler 
relaxation are one thing (i.e. the rewriting of certain instruction 
combinations with others), but silently changing an explicitely written 
opcode mnemonic, that doesn't have other forms documented (or where it's 
existing custom that multiple alternatives exist for the assembler to 
choose from), into some other opcode entirely, no matter if, or if they 
currently don't have same semantics for the situation at hand, is 
something else entirely.

Imagine the aligned variants will have slightly different behaviour again 
in the future (or for other processors).  The authors of assembly snippets 
claiming it's a bug that they've requested the aligned variant and the 
assembler had silently given them the unaligned variant would be correct; 
it would be a bug in the assembler.

This is a change that should be done in the compiler.  If you want an 
instruction that can expand to aligned or unaligned variants then you 
could also create a new mnemonic (and document it to be expanding to 
either variant).

Of course, this all being optional on a flag: yeah, well, maybe 
acceptable.  But IMHO even that feels wrong.


Ciao,
Michael.

> 
> > > @@ -4080,6 +4083,26 @@ optimize_encoding (void)
> > >  {
> > >    unsigned int j;
> > >
> > > +  /* Encode aligned vector move as unaligned vector move if asked.  */
> > > +  if (unaligned_vector_move)
> > > +    switch (i.tm.base_opcode)
> > > +      {
> > > +      case 0x28:
> > > +     /* movaps/movapd/vmovaps/vmovapd.  */
> > > +     if (i.tm.opcode_modifier.opcodespace == 1
> > > +         && i.tm.opcode_modifier.opcodeprefix <= 1)
> >
> > I don't think the prefix needs checking here? F3 and F2 encodings
> > don't exist, so maybe at most gas_assert() this?
> 
> I changed it to
> 
>     if (i.tm.opcode_modifier.opcodespace == SPACE_0F
>           && i.tm.opcode_modifier.opcodeprefix <= PREFIX_0X66)
>         i.tm.base_opcode = 0x10;
> 
> in case that F3 and F2 are used in the future.
> 
> > > +       i.tm.base_opcode = 0x10;
> > > +     break;
> > > +      case 0x6f:
> > > +     /* movdqa/vmovdqa/vmovdqa64/vmovdqa32 */
> > > +     if (i.tm.opcode_modifier.opcodespace == 1
> > > +         && i.tm.opcode_modifier.opcodeprefix == 1)
> > > +       i.tm.opcode_modifier.opcodeprefix = 2;
> >
> > Please can you avoid using literal numbers here? This not only makes it
> > needlessly harder to potentially change the SPACE_* and PREFIX_* values
> > (despite me hoping / assuming that we would never have to do so), but
> > also makes the code not sufficiently self-documenting.
> 
> I changed it to
> 
>      if (i.tm.opcode_modifier.opcodespace == SPACE_0F
>           && i.tm.opcode_modifier.opcodeprefix == PREFIX_0X66)
>         i.tm.opcode_modifier.opcodeprefix = PREFIX_0XF3;
> 
> > > @@ -5053,7 +5076,8 @@ md_assemble (char *line)
> > >        i.disp_operands = 0;
> > >      }
> > >
> > > -  if (optimize && !i.no_optimize && i.tm.opcode_modifier.optimize)
> > > +  if (unaligned_vector_move
> > > +      || (optimize && !i.no_optimize && i.tm.opcode_modifier.optimize))
> > >      optimize_encoding ();
> >
> > Isn't this fragile? You now depend on optimize_encoding() to do nothing
> > optimization-wise if unaligned_vector_move is set. I was rather hoping
> > you would introduce a separate helper.
> >
> 
> I added encode_with_unaligned_vector_move and renamed the option
> to -muse-unaligned-vector-move.
> 
> Thanks.
> 
> 

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-10-25 15:54 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-21 17:50 [PATCH v2] x86: Add -munaligned-vector-move to assembler H.J. Lu
2021-10-22  8:06 ` Jan Beulich
2021-10-22 12:09   ` H.J. Lu
2021-10-25 15:54     ` Michael Matz

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).