public inbox for binutils@sourceware.org
 help / color / mirror / Atom feed
* [PATCH 05/16] [binutils][aarch64] New SVE_Zm3_11_INDEX operand.
  2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
                   ` (6 preceding siblings ...)
  2019-05-01 14:44 ` [PATCH 03/16] [binutils][aarch64] Introduce SVE_IMM_ROT3 operand Matthew Malcomson
@ 2019-05-01 14:44 ` Matthew Malcomson
  2019-05-01 14:45 ` [PATCH 11/16] [binutils][aarch64] New sve_shift_tsz_bhsd iclass Matthew Malcomson
                   ` (9 subsequent siblings)
  17 siblings, 0 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-01 14:44 UTC (permalink / raw)
  To: binutils; +Cc: nd, Matthew Malcomson

Introduce new operand SVE_Zm3_11_INDEX that indicates a register between
z0-z7 stored in bits 18-16 and an index stored in bits 20-19:11.

gas/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* config/tc-aarch64.c (parse_operands): Handle new SVE_Zm3_11_INDEX operand.

include/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* opcode/aarch64.h (enum aarch64_opnd): New SVE_Zm3_11_INDEX operand.

opcodes/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* aarch64-asm-2.c: Regenerated.
	* aarch64-dis-2.c: Regenerated.
	* aarch64-opc-2.c: Regenerated.
	* aarch64-opc.c (operand_general_constraint_met_p): Constraint checking
	for SVE_Zm3_11_INDEX.
	(aarch64_print_operand): Add printing for SVE_Zm3_11_INDEX.
	(fields): Handle SVE_i3l and SVE_i3h2 fields.
	* aarch64-opc.h (enum aarch64_field_kind): New SVE_i3l and SVE_i3h2
	fields.
	* aarch64-tbl.h (AARCH64_OPERANDS): Use new SVE_Zm3_11_INDEX operand.
---
 gas/config/tc-aarch64.c  |  1 +
 include/opcode/aarch64.h |  1 +
 opcodes/aarch64-asm-2.c  | 13 +++++++------
 opcodes/aarch64-dis-2.c  | 13 +++++++------
 opcodes/aarch64-opc-2.c  |  1 +
 opcodes/aarch64-opc.c    |  4 ++++
 opcodes/aarch64-opc.h    |  2 ++
 opcodes/aarch64-tbl.h    |  3 +++
 8 files changed, 26 insertions(+), 12 deletions(-)

diff --git a/gas/config/tc-aarch64.c b/gas/config/tc-aarch64.c
index 407ffee..612febd 100644
--- a/gas/config/tc-aarch64.c
+++ b/gas/config/tc-aarch64.c
@@ -5635,6 +5635,7 @@ parse_operands (char *str, const aarch64_opcode *opcode)
 
 	case AARCH64_OPND_SVE_Zm3_INDEX:
 	case AARCH64_OPND_SVE_Zm3_22_INDEX:
+	case AARCH64_OPND_SVE_Zm3_11_INDEX:
 	case AARCH64_OPND_SVE_Zm4_INDEX:
 	case AARCH64_OPND_SVE_Zn_INDEX:
 	  reg_type = REG_TYPE_ZN;
diff --git a/include/opcode/aarch64.h b/include/opcode/aarch64.h
index 10541d8..1c3f126 100644
--- a/include/opcode/aarch64.h
+++ b/include/opcode/aarch64.h
@@ -409,6 +409,7 @@ enum aarch64_opnd
   AARCH64_OPND_SVE_Zm_16,	/* SVE vector register in Zm, bits [20,16].  */
   AARCH64_OPND_SVE_Zm3_INDEX,	/* z0-z7[0-3] in Zm, bits [20,16].  */
   AARCH64_OPND_SVE_Zm3_22_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 22.  */
+  AARCH64_OPND_SVE_Zm3_11_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 11.  */
   AARCH64_OPND_SVE_Zm4_INDEX,	/* z0-z15[0-1] in Zm, bits [20,16].  */
   AARCH64_OPND_SVE_Zn,		/* SVE vector register in Zn.  */
   AARCH64_OPND_SVE_Zn_INDEX,	/* Indexed SVE vector register, for DUP.  */
diff --git a/opcodes/aarch64-asm-2.c b/opcodes/aarch64-asm-2.c
index b89d677..0b67ceb 100644
--- a/opcodes/aarch64-asm-2.c
+++ b/opcodes/aarch64-asm-2.c
@@ -646,8 +646,8 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 188:
     case 189:
     case 190:
-    case 194:
-    case 197:
+    case 195:
+    case 198:
       return aarch64_ins_regno (self, info, code, inst, errors);
     case 13:
       return aarch64_ins_reg_extended (self, info, code, inst, errors);
@@ -659,7 +659,7 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 31:
     case 32:
     case 33:
-    case 199:
+    case 200:
       return aarch64_ins_reglane (self, info, code, inst, errors);
     case 34:
       return aarch64_ins_reglist (self, info, code, inst, errors);
@@ -846,11 +846,12 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 191:
     case 192:
     case 193:
+    case 194:
       return aarch64_ins_sve_quad_index (self, info, code, inst, errors);
-    case 195:
-      return aarch64_ins_sve_index (self, info, code, inst, errors);
     case 196:
-    case 198:
+      return aarch64_ins_sve_index (self, info, code, inst, errors);
+    case 197:
+    case 199:
       return aarch64_ins_sve_reglist (self, info, code, inst, errors);
     default: assert (0); abort ();
     }
diff --git a/opcodes/aarch64-dis-2.c b/opcodes/aarch64-dis-2.c
index f7dddef..630ef20 100644
--- a/opcodes/aarch64-dis-2.c
+++ b/opcodes/aarch64-dis-2.c
@@ -20041,8 +20041,8 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 188:
     case 189:
     case 190:
-    case 194:
-    case 197:
+    case 195:
+    case 198:
       return aarch64_ext_regno (self, info, code, inst, errors);
     case 8:
       return aarch64_ext_regrt_sysins (self, info, code, inst, errors);
@@ -20058,7 +20058,7 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 31:
     case 32:
     case 33:
-    case 199:
+    case 200:
       return aarch64_ext_reglane (self, info, code, inst, errors);
     case 34:
       return aarch64_ext_reglist (self, info, code, inst, errors);
@@ -20248,11 +20248,12 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 191:
     case 192:
     case 193:
+    case 194:
       return aarch64_ext_sve_quad_index (self, info, code, inst, errors);
-    case 195:
-      return aarch64_ext_sve_index (self, info, code, inst, errors);
     case 196:
-    case 198:
+      return aarch64_ext_sve_index (self, info, code, inst, errors);
+    case 197:
+    case 199:
       return aarch64_ext_sve_reglist (self, info, code, inst, errors);
     default: assert (0); abort ();
     }
diff --git a/opcodes/aarch64-opc-2.c b/opcodes/aarch64-opc-2.c
index 90e9654..db2fc37 100644
--- a/opcodes/aarch64-opc-2.c
+++ b/opcodes/aarch64-opc-2.c
@@ -217,6 +217,7 @@ const struct aarch64_operand aarch64_operands[] =
   {AARCH64_OPND_CLASS_SVE_REG, "SVE_Zm_16", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zm_16}, "an SVE vector register"},
   {AARCH64_OPND_CLASS_SVE_REG, "SVE_Zm3_INDEX", 3 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zm_16}, "an indexed SVE vector register"},
   {AARCH64_OPND_CLASS_SVE_REG, "SVE_Zm3_22_INDEX", 3 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_i3h, FLD_SVE_Zm_16}, "an indexed SVE vector register"},
+  {AARCH64_OPND_CLASS_SVE_REG, "SVE_Zm3_11_INDEX", 3 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_i3h2, FLD_SVE_i3l, FLD_SVE_imm3}, "an indexed SVE vector register"},
   {AARCH64_OPND_CLASS_SVE_REG, "SVE_Zm4_INDEX", 4 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zm_16}, "an indexed SVE vector register"},
   {AARCH64_OPND_CLASS_SVE_REG, "SVE_Zn", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zn}, "an SVE vector register"},
   {AARCH64_OPND_CLASS_SVE_REG, "SVE_Zn_INDEX", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zn}, "an indexed SVE vector register"},
diff --git a/opcodes/aarch64-opc.c b/opcodes/aarch64-opc.c
index c133c1d..695146f 100644
--- a/opcodes/aarch64-opc.c
+++ b/opcodes/aarch64-opc.c
@@ -294,6 +294,8 @@ const aarch64_field fields[] =
     {  0,  5 }, /* SVE_Zt: SVE vector register, bits [4,0].  */
     {  5,  1 }, /* SVE_i1: single-bit immediate.  */
     { 22,  1 }, /* SVE_i3h: high bit of 3-bit immediate.  */
+    { 11,  1 }, /* SVE_i3l: low bit of 3-bit immediate.  */
+    { 19,  2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19].  */
     { 16,  3 }, /* SVE_imm3: 3-bit immediate field.  */
     { 16,  4 }, /* SVE_imm4: 4-bit immediate field.  */
     {  5,  5 }, /* SVE_imm5: 5-bit immediate field.  */
@@ -1515,6 +1517,7 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
 	{
 	case AARCH64_OPND_SVE_Zm3_INDEX:
 	case AARCH64_OPND_SVE_Zm3_22_INDEX:
+	case AARCH64_OPND_SVE_Zm3_11_INDEX:
 	case AARCH64_OPND_SVE_Zm4_INDEX:
 	  size = get_operand_fields_width (get_operand_from_code (type));
 	  shift = get_operand_specific_data (&aarch64_operands[type]);
@@ -3301,6 +3304,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
 
     case AARCH64_OPND_SVE_Zm3_INDEX:
     case AARCH64_OPND_SVE_Zm3_22_INDEX:
+    case AARCH64_OPND_SVE_Zm3_11_INDEX:
     case AARCH64_OPND_SVE_Zm4_INDEX:
     case AARCH64_OPND_SVE_Zn_INDEX:
       snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
diff --git a/opcodes/aarch64-opc.h b/opcodes/aarch64-opc.h
index 942fa58..8803bca 100644
--- a/opcodes/aarch64-opc.h
+++ b/opcodes/aarch64-opc.h
@@ -121,6 +121,8 @@ enum aarch64_field_kind
   FLD_SVE_Zt,
   FLD_SVE_i1,
   FLD_SVE_i3h,
+  FLD_SVE_i3l,
+  FLD_SVE_i3h2,
   FLD_SVE_imm3,
   FLD_SVE_imm4,
   FLD_SVE_imm5,
diff --git a/opcodes/aarch64-tbl.h b/opcodes/aarch64-tbl.h
index 980d8fa..826b4df 100644
--- a/opcodes/aarch64-tbl.h
+++ b/opcodes/aarch64-tbl.h
@@ -4939,6 +4939,9 @@ struct aarch64_opcode aarch64_opcode_table[] =
     Y(SVE_REG, sve_quad_index, "SVE_Zm3_22_INDEX", 			\
       3 << OPD_F_OD_LSB, F(FLD_SVE_i3h, FLD_SVE_Zm_16),			\
       "an indexed SVE vector register")					\
+    Y(SVE_REG, sve_quad_index, "SVE_Zm3_11_INDEX", 			\
+      3 << OPD_F_OD_LSB, F(FLD_SVE_i3h2, FLD_SVE_i3l, FLD_SVE_imm3),    \
+      "an indexed SVE vector register")					\
     Y(SVE_REG, sve_quad_index, "SVE_Zm4_INDEX", 			\
       4 << OPD_F_OD_LSB, F(FLD_SVE_Zm_16),				\
       "an indexed SVE vector register")					\
-- 
2.7.4


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 08/16] [binutils][aarch64] New sve_size_bh iclass.
  2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
                   ` (3 preceding siblings ...)
  2019-05-01 14:44 ` [PATCH 02/16] [binutils][aarch64] Allow movprfx for SVE2 instructions Matthew Malcomson
@ 2019-05-01 14:44 ` Matthew Malcomson
  2019-05-01 14:44 ` [PATCH 01/16] [binutils][aarch64] SVE2 feature extension flags Matthew Malcomson
                   ` (12 subsequent siblings)
  17 siblings, 0 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-01 14:44 UTC (permalink / raw)
  To: binutils; +Cc: nd, Matthew Malcomson

Add new iclass sve_size_bh to handle instructions that have two variants
encoded with the SVE_sz field.
This iclass behaves the same as the sve_size_sd iclass, but it has a
nicer name for those instructions that choose between variants using the
"B" and "H" size qualifiers.

include/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* opcode/aarch64.h (enum aarch64_insn_class): Add sve_size_bh iclass.

opcodes/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* aarch64-asm.c (aarch64_encode_variant_using_iclass): Handle
	sve_size_bh iclass encode.
	* aarch64-dis.c (aarch64_decode_variant_using_iclass): Handle
	sve_size_bh iclass decode.
---
 include/opcode/aarch64.h | 1 +
 opcodes/aarch64-asm.c    | 1 +
 opcodes/aarch64-dis.c    | 1 +
 3 files changed, 3 insertions(+)

diff --git a/include/opcode/aarch64.h b/include/opcode/aarch64.h
index d23a6e7..d1d4a23 100644
--- a/include/opcode/aarch64.h
+++ b/include/opcode/aarch64.h
@@ -590,6 +590,7 @@ enum aarch64_insn_class
   sve_size_hsd,
   sve_size_hsd2,
   sve_size_sd,
+  sve_size_bh,
   sve_size_sd2,
   testbranch,
   cryptosm3,
diff --git a/opcodes/aarch64-asm.c b/opcodes/aarch64-asm.c
index 6627b54..674eba5 100644
--- a/opcodes/aarch64-asm.c
+++ b/opcodes/aarch64-asm.c
@@ -1655,6 +1655,7 @@ aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
       insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
       break;
 
+    case sve_size_bh:
     case sve_size_sd:
       insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
       break;
diff --git a/opcodes/aarch64-dis.c b/opcodes/aarch64-dis.c
index 35576b3..bfc47b4 100644
--- a/opcodes/aarch64-dis.c
+++ b/opcodes/aarch64-dis.c
@@ -2806,6 +2806,7 @@ aarch64_decode_variant_using_iclass (aarch64_inst *inst)
       variant = i - 1;
       break;
 
+    case sve_size_bh:
     case sve_size_sd:
       variant = extract_field (FLD_SVE_sz, inst->value, 0);
       break;
-- 
2.7.4


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 01/16] [binutils][aarch64] SVE2 feature extension flags.
  2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
                   ` (4 preceding siblings ...)
  2019-05-01 14:44 ` [PATCH 08/16] [binutils][aarch64] New sve_size_bh iclass Matthew Malcomson
@ 2019-05-01 14:44 ` Matthew Malcomson
  2019-05-01 14:44 ` [PATCH 03/16] [binutils][aarch64] Introduce SVE_IMM_ROT3 operand Matthew Malcomson
                   ` (11 subsequent siblings)
  17 siblings, 0 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-01 14:44 UTC (permalink / raw)
  To: binutils; +Cc: nd, Matthew Malcomson

Include all feature flag macros.

The "sve2" extension that enables the core sve2 instructions.
This also enables the sve extension, since sve is a requirement of sve2.

Extra optional sve2 features are the bitperm, sm4, aes, and sha3 extensions.
These are all given extra feature flags, "bitperm", "sve2-sm4",
"sve2-aes", and "sve2-sha3" respectively.
The sm4, aes, and sha3 extensions are explicitly marked as sve2
extensions to distinguish them from the corresponding NEON extensions.

Rather than continue extending the current feature flag numbers, I used
some bits that have been skipped.

gas/ChangeLog:

2019-04-02  Matthew Malcomson  <matthew.malcomson@arm.com>

	* config/tc-aarch64.c: Add command line architecture feature flags
	"sve2", "sve2-sm4", "sve2-aes", "sve2-sha3", "bitperm".
	* doc/c-aarch64.texi: Document new architecture feature flags.

include/ChangeLog:

2019-04-02  Matthew Malcomson  <matthew.malcomson@arm.com>

	* opcode/aarch64.h (AARCH64_FEATURE_SVE2
	AARCH64_FEATURE_SVE2_AES, AARCH64_FEATURE_SVE2_BITPERM,
	AARCH64_FEATURE_SVE2_SM4, AARCH64_FEATURE_SVE2_SHA3): New
	feature macros.

opcodes/ChangeLog:

2019-04-02  Matthew Malcomson  <matthew.malcomson@arm.com>

	* aarch64-tbl.h
	(aarch64_feature_sve2, aarch64_feature_sve2aes,
	aarch64_feature_sve2sha3, aarch64_feature_sve2sm4,
	aarch64_feature_sve2bitperm): New feature sets.
	(SVE2, SVE2_AES, SVE2_SHA3, SVE2_SM4, SVE2_BITPERM): New macros
	for feature set addresses.
	(SVE2_INSN, SVE2_INSNC, SVE2AES_INSN, SVE2SHA3_INSN,
	SVE2SM4_INSN, SVE2SM4_INSNC, SVE2BITPERM_INSN): New macros.
---
 gas/config/tc-aarch64.c  | 13 +++++++++++++
 gas/doc/c-aarch64.texi   | 10 ++++++++++
 include/opcode/aarch64.h |  6 ++++++
 opcodes/aarch64-tbl.h    | 36 ++++++++++++++++++++++++++++++++++++
 4 files changed, 65 insertions(+)

diff --git a/gas/config/tc-aarch64.c b/gas/config/tc-aarch64.c
index d04e9a1..f8d5b56 100644
--- a/gas/config/tc-aarch64.c
+++ b/gas/config/tc-aarch64.c
@@ -8866,6 +8866,19 @@ static const struct aarch64_option_cpu_value_table aarch64_features[] = {
 			AARCH64_ARCH_NONE},
   {"memtag",		AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
 			AARCH64_ARCH_NONE},
+  {"sve2",		AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
+			AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
+  {"sve2-sm4",		AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
+			AARCH64_FEATURE (AARCH64_FEATURE_SVE2
+					 | AARCH64_FEATURE_SM4, 0)},
+  {"sve2-aes",		AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
+			AARCH64_FEATURE (AARCH64_FEATURE_SVE2
+					 | AARCH64_FEATURE_AES, 0)},
+  {"sve2-sha3",		AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
+			AARCH64_FEATURE (AARCH64_FEATURE_SVE2
+					 | AARCH64_FEATURE_SHA3, 0)},
+  {"bitperm",		AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
+			AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
   {NULL,		AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
 };
 
diff --git a/gas/doc/c-aarch64.texi b/gas/doc/c-aarch64.texi
index 445fb2f..0f80852 100644
--- a/gas/doc/c-aarch64.texi
+++ b/gas/doc/c-aarch64.texi
@@ -194,6 +194,16 @@ automatically cause those extensions to be disabled.
  @tab Enable Speculative Store Bypassing Safe state read and write.
 @item @code{memtag} @tab ARMv8.5-A @tab No
  @tab Enable ARMv8.5-A Memory Tagging Extensions.
+@item @code{sve2} @tab ARMv8-A @tab No
+ @tab Enable the SVE2 Extension.
+@item @code{bitperm} @tab ARMv8-A @tab No
+ @tab Enable SVE2 BITPERM Extension.
+@item @code{sve2-sm4} @tab ARMv8-A @tab No
+ @tab Enable SVE2 SM4 Extension.
+@item @code{sve2-aes} @tab ARMv8-A @tab No
+ @tab Enable SVE2 AES Extension.
+@item @code{sve2-sha3} @tab ARMv8-A @tab No
+ @tab Enable SVE2 SHA3 Extension.
 @end multitable
 
 @node AArch64 Syntax
diff --git a/include/opcode/aarch64.h b/include/opcode/aarch64.h
index 0c0234a..ecd57cb 100644
--- a/include/opcode/aarch64.h
+++ b/include/opcode/aarch64.h
@@ -87,6 +87,12 @@ typedef uint32_t aarch64_insn;
 /* Memory Tagging Extension.  */
 #define AARCH64_FEATURE_MEMTAG		0x1000000000000ULL
 
+/* SVE2 instructions.  */
+#define AARCH64_FEATURE_SVE2		0x000000010
+#define AARCH64_FEATURE_SVE2_AES		0x000000080
+#define AARCH64_FEATURE_SVE2_BITPERM	0x000000100
+#define AARCH64_FEATURE_SVE2_SM4		0x000000200
+#define AARCH64_FEATURE_SVE2_SHA3	0x000000400
 
 /* Architectures are the sum of the base and extensions.  */
 #define AARCH64_ARCH_V8		AARCH64_FEATURE (AARCH64_FEATURE_V8, \
diff --git a/opcodes/aarch64-tbl.h b/opcodes/aarch64-tbl.h
index e0c3903..07b6d62 100644
--- a/opcodes/aarch64-tbl.h
+++ b/opcodes/aarch64-tbl.h
@@ -2197,6 +2197,16 @@ static const aarch64_feature_set aarch64_feature_bti =
   AARCH64_FEATURE (AARCH64_FEATURE_BTI, 0);
 static const aarch64_feature_set aarch64_feature_memtag =
   AARCH64_FEATURE (AARCH64_FEATURE_V8_5 | AARCH64_FEATURE_MEMTAG, 0);
+static const aarch64_feature_set aarch64_feature_sve2 =
+  AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0);
+static const aarch64_feature_set aarch64_feature_sve2aes =
+  AARCH64_FEATURE (AARCH64_FEATURE_SVE2 | AARCH64_FEATURE_SVE2_AES, 0);
+static const aarch64_feature_set aarch64_feature_sve2sha3 =
+  AARCH64_FEATURE (AARCH64_FEATURE_SVE2 | AARCH64_FEATURE_SVE2_SHA3, 0);
+static const aarch64_feature_set aarch64_feature_sve2sm4 =
+  AARCH64_FEATURE (AARCH64_FEATURE_SVE2 | AARCH64_FEATURE_SVE2_SM4, 0);
+static const aarch64_feature_set aarch64_feature_sve2bitperm =
+  AARCH64_FEATURE (AARCH64_FEATURE_SVE2 | AARCH64_FEATURE_SVE2_BITPERM, 0);
 
 
 #define CORE		&aarch64_feature_v8
@@ -2232,6 +2242,11 @@ static const aarch64_feature_set aarch64_feature_memtag =
 #define PREDRES		&aarch64_feature_predres
 #define BTI		&aarch64_feature_bti
 #define MEMTAG		&aarch64_feature_memtag
+#define SVE2		&aarch64_feature_sve2
+#define SVE2_AES		&aarch64_feature_sve2aes
+#define SVE2_SHA3	&aarch64_feature_sve2sha3
+#define SVE2_SM4		&aarch64_feature_sve2sm4
+#define SVE2_BITPERM	&aarch64_feature_sve2bitperm
 
 #define CORE_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS) \
   { NAME, OPCODE, MASK, CLASS, OP, CORE, OPS, QUALS, FLAGS, 0, 0, NULL }
@@ -2299,6 +2314,27 @@ static const aarch64_feature_set aarch64_feature_memtag =
   { NAME, OPCODE, MASK, CLASS, 0, BTI, OPS, QUALS, FLAGS, 0, 0, NULL }
 #define MEMTAG_INSN(NAME,OPCODE,MASK,CLASS,OPS,QUALS,FLAGS) \
   { NAME, OPCODE, MASK, CLASS, 0, MEMTAG, OPS, QUALS, FLAGS, 0, 0, NULL }
+#define SVE2_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \
+  { NAME, OPCODE, MASK, CLASS, OP, SVE2, OPS, QUALS, \
+    FLAGS | F_STRICT, 0, TIED, NULL }
+#define SVE2_INSNC(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,CONSTRAINTS,TIED) \
+  { NAME, OPCODE, MASK, CLASS, OP, SVE2, OPS, QUALS, \
+    FLAGS | F_STRICT, CONSTRAINTS, TIED, NULL }
+#define SVE2AES_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \
+  { NAME, OPCODE, MASK, CLASS, OP, SVE2_AES, OPS, QUALS, \
+    FLAGS | F_STRICT, 0, TIED, NULL }
+#define SVE2SHA3_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \
+  { NAME, OPCODE, MASK, CLASS, OP, SVE2_SHA3, OPS, QUALS, \
+    FLAGS | F_STRICT, 0, TIED, NULL }
+#define SVE2SM4_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \
+  { NAME, OPCODE, MASK, CLASS, OP, SVE2_SM4, OPS, QUALS, \
+    FLAGS | F_STRICT, 0, TIED, NULL }
+#define SVE2SM4_INSNC(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,CONSTRAINTS,TIED) \
+  { NAME, OPCODE, MASK, CLASS, OP, SVE2_SM4, OPS, QUALS, \
+    FLAGS | F_STRICT, CONSTRAINTS, TIED, NULL }
+#define SVE2BITPERM_INSN(NAME,OPCODE,MASK,CLASS,OP,OPS,QUALS,FLAGS,TIED) \
+  { NAME, OPCODE, MASK, CLASS, OP, SVE2_BITPERM, OPS, QUALS, \
+    FLAGS | F_STRICT, 0, TIED, NULL }
 
 struct aarch64_opcode aarch64_opcode_table[] =
 {
-- 
2.7.4


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64
@ 2019-05-01 14:44 Matthew Malcomson
  2019-05-01 14:44 ` [PATCH 07/16] [binutils][aarch64] New sve_size_sd2 iclass Matthew Malcomson
                   ` (17 more replies)
  0 siblings, 18 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-01 14:44 UTC (permalink / raw)
  To: binutils; +Cc: nd

This series of patches adds support for the "Future Architecture Technologies"
Scalable Vector Extension V2 (SVE2) announced at Linaro Connect.
https://connect.linaro.org/resources/bkk19/new-technologies-in-the-arm-architecture/

Tested by comparing against an independent implementation with the testfile
provided and running the testsuite on an aarch64-linux-gnu cross-assembler.

The instructions and testcases are added in the final two patches to keep the
number of patches that need to be zipped to a minimum.

The instructions are published at the link below.
https://developer.arm.com/docs/ddi0602/latest/a64-sve-instructions-alphabetic-order

Thanks,
Matthew



^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 04/16] [binutils][aarch64] New iclass sve_size_hsd2.
  2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
  2019-05-01 14:44 ` [PATCH 07/16] [binutils][aarch64] New sve_size_sd2 iclass Matthew Malcomson
  2019-05-01 14:44 ` [PATCH 09/16] [binutils][aarch64] New sve_size_013 iclass Matthew Malcomson
@ 2019-05-01 14:44 ` Matthew Malcomson
  2019-05-01 14:44 ` [PATCH 02/16] [binutils][aarch64] Allow movprfx for SVE2 instructions Matthew Malcomson
                   ` (14 subsequent siblings)
  17 siblings, 0 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-01 14:44 UTC (permalink / raw)
  To: binutils; +Cc: nd, Matthew Malcomson

Add "sve_size_hsd2" iclass decode that uses the new FLD_SVE_size field
value to determine the variant of an instruction.

include/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* opcode/aarch64.h (enum aarch64_insn_class): Add sve_size_hsd2 iclass.

opcodes/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* aarch64-asm.c (aarch64_encode_variant_using_iclass): Handle
	sve_size_hsd2 iclass encode.
	* aarch64-dis.c (aarch64_decode_variant_using_iclass): Handle
	sve_size_hsd2 iclass decode.
	* aarch64-opc.c (fields): Handle SVE_size field.
	* aarch64-opc.h (enum aarch64_field_kind): New SVE_size field.
---
 include/opcode/aarch64.h | 1 +
 opcodes/aarch64-asm.c    | 5 +++++
 opcodes/aarch64-dis.c    | 7 +++++++
 opcodes/aarch64-opc.c    | 1 +
 opcodes/aarch64-opc.h    | 1 +
 5 files changed, 15 insertions(+)

diff --git a/include/opcode/aarch64.h b/include/opcode/aarch64.h
index 8f629b9..10541d8 100644
--- a/include/opcode/aarch64.h
+++ b/include/opcode/aarch64.h
@@ -586,6 +586,7 @@ enum aarch64_insn_class
   sve_size_bhs,
   sve_size_bhsd,
   sve_size_hsd,
+  sve_size_hsd2,
   sve_size_sd,
   testbranch,
   cryptosm3,
diff --git a/opcodes/aarch64-asm.c b/opcodes/aarch64-asm.c
index 2424b66..d4f498f 100644
--- a/opcodes/aarch64-asm.c
+++ b/opcodes/aarch64-asm.c
@@ -1659,6 +1659,11 @@ aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
       insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
       break;
 
+    case sve_size_hsd2:
+      insert_field (FLD_SVE_size, &inst->value,
+		    aarch64_get_variant (inst) + 1, 0);
+      break;
+
     default:
       break;
     }
diff --git a/opcodes/aarch64-dis.c b/opcodes/aarch64-dis.c
index eea649f..844c6ab 100644
--- a/opcodes/aarch64-dis.c
+++ b/opcodes/aarch64-dis.c
@@ -2810,6 +2810,13 @@ aarch64_decode_variant_using_iclass (aarch64_inst *inst)
       variant = extract_field (FLD_SVE_sz, inst->value, 0);
       break;
 
+    case sve_size_hsd2:
+      i = extract_field (FLD_SVE_size, inst->value, 0);
+      if (i < 1)
+	return FALSE;
+      variant = i - 1;
+      break;
+
     default:
       /* No mapping between instruction class and qualifiers.  */
       return TRUE;
diff --git a/opcodes/aarch64-opc.c b/opcodes/aarch64-opc.c
index 5a381d6..c133c1d 100644
--- a/opcodes/aarch64-opc.c
+++ b/opcodes/aarch64-opc.c
@@ -311,6 +311,7 @@ const aarch64_field fields[] =
     { 10,  2 }, /* SVE_rot2: 2-bit rotation amount.  */
     { 10,  1 }, /* SVE_rot3: 1-bit rotation amount at bit 10.  */
     { 22,  1 }, /* SVE_sz: 1-bit element size select.  */
+    { 17,  2 }, /* SVE_size: 2-bit element size, bits [18,17].  */
     { 16,  4 }, /* SVE_tsz: triangular size select.  */
     { 22,  2 }, /* SVE_tszh: triangular size select high, bits [23,22].  */
     {  8,  2 }, /* SVE_tszl_8: triangular size select low, bits [9,8].  */
diff --git a/opcodes/aarch64-opc.h b/opcodes/aarch64-opc.h
index b1060d4..942fa58 100644
--- a/opcodes/aarch64-opc.h
+++ b/opcodes/aarch64-opc.h
@@ -138,6 +138,7 @@ enum aarch64_field_kind
   FLD_SVE_rot2,
   FLD_SVE_rot3,
   FLD_SVE_sz,
+  FLD_SVE_size,
   FLD_SVE_tsz,
   FLD_SVE_tszh,
   FLD_SVE_tszl_8,
-- 
2.7.4


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 03/16] [binutils][aarch64] Introduce SVE_IMM_ROT3 operand.
  2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
                   ` (5 preceding siblings ...)
  2019-05-01 14:44 ` [PATCH 01/16] [binutils][aarch64] SVE2 feature extension flags Matthew Malcomson
@ 2019-05-01 14:44 ` Matthew Malcomson
  2019-05-01 14:44 ` [PATCH 05/16] [binutils][aarch64] New SVE_Zm3_11_INDEX operand Matthew Malcomson
                   ` (10 subsequent siblings)
  17 siblings, 0 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-01 14:44 UTC (permalink / raw)
  To: binutils; +Cc: nd, Matthew Malcomson

New operand AARCH64_OPND_SVE_IMM_ROT3 handles a single bit rotate
operand encoded at bit position 10.

gas/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* config/tc-aarch64.c (parse_operands): Handle new SVE_IMM_ROT3 operand.

include/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* opcode/aarch64.h (enum aarch64_opnd): New SVE_IMM_ROT3 operand.

opcodes/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* aarch64-asm-2.c: Regenerated.
	* aarch64-dis-2.c: Regenerated.
	* aarch64-opc-2.c: Regenerated.
	* aarch64-opc.c (operand_general_constraint_met_p): Constraint checking
	for SVE_IMM_ROT3.
	(aarch64_print_operand): Add printing for SVE_IMM_ROT3.
	(fields): Handle SVE_rot3 field.
	* aarch64-opc.h (enum aarch64_field_kind): New SVE_rot3 field.
	* aarch64-tbl.h (AARCH64_OPERANDS): Use new SVE_IMM_ROT3 operand.
---
 gas/config/tc-aarch64.c  |  1 +
 include/opcode/aarch64.h |  1 +
 opcodes/aarch64-asm-2.c  | 37 +++++++++++++++++++------------------
 opcodes/aarch64-dis-2.c  | 37 +++++++++++++++++++------------------
 opcodes/aarch64-opc-2.c  |  1 +
 opcodes/aarch64-opc.c    |  3 +++
 opcodes/aarch64-opc.h    |  1 +
 opcodes/aarch64-tbl.h    |  2 ++
 8 files changed, 47 insertions(+), 36 deletions(-)

diff --git a/gas/config/tc-aarch64.c b/gas/config/tc-aarch64.c
index f8d5b56..407ffee 100644
--- a/gas/config/tc-aarch64.c
+++ b/gas/config/tc-aarch64.c
@@ -5779,6 +5779,7 @@ parse_operands (char *str, const aarch64_opcode *opcode)
 	case AARCH64_OPND_IMM_ROT3:
 	case AARCH64_OPND_SVE_IMM_ROT1:
 	case AARCH64_OPND_SVE_IMM_ROT2:
+	case AARCH64_OPND_SVE_IMM_ROT3:
 	  po_imm_nc_or_fail ();
 	  info->imm.value = val;
 	  break;
diff --git a/include/opcode/aarch64.h b/include/opcode/aarch64.h
index ecd57cb..8f629b9 100644
--- a/include/opcode/aarch64.h
+++ b/include/opcode/aarch64.h
@@ -369,6 +369,7 @@ enum aarch64_opnd
   AARCH64_OPND_SVE_I1_ZERO_ONE,	/* SVE choice between 0.0 and 1.0.  */
   AARCH64_OPND_SVE_IMM_ROT1,	/* SVE 1-bit rotate operand (90 or 270).  */
   AARCH64_OPND_SVE_IMM_ROT2,	/* SVE 2-bit rotate operand (N*90).  */
+  AARCH64_OPND_SVE_IMM_ROT3,	/* SVE cadd 1-bit rotate (90 or 270).  */
   AARCH64_OPND_SVE_INV_LIMM,	/* SVE inverted logical immediate.  */
   AARCH64_OPND_SVE_LIMM,	/* SVE logical immediate.  */
   AARCH64_OPND_SVE_LIMM_MOV,	/* SVE logical immediate for MOV.  */
diff --git a/opcodes/aarch64-asm-2.c b/opcodes/aarch64-asm-2.c
index 2587093..b89d677 100644
--- a/opcodes/aarch64-asm-2.c
+++ b/opcodes/aarch64-asm-2.c
@@ -627,7 +627,6 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 27:
     case 28:
     case 29:
-    case 159:
     case 160:
     case 161:
     case 162:
@@ -637,7 +636,7 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 166:
     case 167:
     case 168:
-    case 181:
+    case 169:
     case 182:
     case 183:
     case 184:
@@ -646,8 +645,9 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 187:
     case 188:
     case 189:
-    case 193:
-    case 196:
+    case 190:
+    case 194:
+    case 197:
       return aarch64_ins_regno (self, info, code, inst, errors);
     case 13:
       return aarch64_ins_reg_extended (self, info, code, inst, errors);
@@ -659,7 +659,7 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 31:
     case 32:
     case 33:
-    case 198:
+    case 199:
       return aarch64_ins_reglane (self, info, code, inst, errors);
     case 34:
       return aarch64_ins_reglist (self, info, code, inst, errors);
@@ -693,9 +693,8 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 79:
     case 80:
     case 81:
-    case 156:
-    case 158:
-    case 173:
+    case 157:
+    case 159:
     case 174:
     case 175:
     case 176:
@@ -703,6 +702,7 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 178:
     case 179:
     case 180:
+    case 181:
       return aarch64_ins_imm (self, info, code, inst, errors);
     case 42:
     case 43:
@@ -715,7 +715,7 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 147:
       return aarch64_ins_fpimm (self, info, code, inst, errors);
     case 67:
-    case 154:
+    case 155:
       return aarch64_ins_limm (self, info, code, inst, errors);
     case 68:
       return aarch64_ins_aimm (self, info, code, inst, errors);
@@ -729,6 +729,7 @@ aarch64_insert_operand (const aarch64_operand *self,
       return aarch64_ins_imm_rotate2 (self, info, code, inst, errors);
     case 74:
     case 151:
+    case 153:
       return aarch64_ins_imm_rotate1 (self, info, code, inst, errors);
     case 75:
     case 76:
@@ -830,26 +831,26 @@ aarch64_insert_operand (const aarch64_operand *self,
       return aarch64_ins_sve_float_half_two (self, info, code, inst, errors);
     case 150:
       return aarch64_ins_sve_float_zero_one (self, info, code, inst, errors);
-    case 153:
+    case 154:
       return aarch64_ins_inv_limm (self, info, code, inst, errors);
-    case 155:
+    case 156:
       return aarch64_ins_sve_limm_mov (self, info, code, inst, errors);
-    case 157:
+    case 158:
       return aarch64_ins_sve_scale (self, info, code, inst, errors);
-    case 169:
     case 170:
-      return aarch64_ins_sve_shlimm (self, info, code, inst, errors);
     case 171:
+      return aarch64_ins_sve_shlimm (self, info, code, inst, errors);
     case 172:
+    case 173:
       return aarch64_ins_sve_shrimm (self, info, code, inst, errors);
-    case 190:
     case 191:
     case 192:
+    case 193:
       return aarch64_ins_sve_quad_index (self, info, code, inst, errors);
-    case 194:
-      return aarch64_ins_sve_index (self, info, code, inst, errors);
     case 195:
-    case 197:
+      return aarch64_ins_sve_index (self, info, code, inst, errors);
+    case 196:
+    case 198:
       return aarch64_ins_sve_reglist (self, info, code, inst, errors);
     default: assert (0); abort ();
     }
diff --git a/opcodes/aarch64-dis-2.c b/opcodes/aarch64-dis-2.c
index a5b7f45..f7dddef 100644
--- a/opcodes/aarch64-dis-2.c
+++ b/opcodes/aarch64-dis-2.c
@@ -20022,7 +20022,6 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 27:
     case 28:
     case 29:
-    case 159:
     case 160:
     case 161:
     case 162:
@@ -20032,7 +20031,7 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 166:
     case 167:
     case 168:
-    case 181:
+    case 169:
     case 182:
     case 183:
     case 184:
@@ -20041,8 +20040,9 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 187:
     case 188:
     case 189:
-    case 193:
-    case 196:
+    case 190:
+    case 194:
+    case 197:
       return aarch64_ext_regno (self, info, code, inst, errors);
     case 8:
       return aarch64_ext_regrt_sysins (self, info, code, inst, errors);
@@ -20058,7 +20058,7 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 31:
     case 32:
     case 33:
-    case 198:
+    case 199:
       return aarch64_ext_reglane (self, info, code, inst, errors);
     case 34:
       return aarch64_ext_reglist (self, info, code, inst, errors);
@@ -20093,9 +20093,8 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 79:
     case 80:
     case 81:
-    case 156:
-    case 158:
-    case 173:
+    case 157:
+    case 159:
     case 174:
     case 175:
     case 176:
@@ -20103,6 +20102,7 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 178:
     case 179:
     case 180:
+    case 181:
       return aarch64_ext_imm (self, info, code, inst, errors);
     case 42:
     case 43:
@@ -20117,7 +20117,7 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 147:
       return aarch64_ext_fpimm (self, info, code, inst, errors);
     case 67:
-    case 154:
+    case 155:
       return aarch64_ext_limm (self, info, code, inst, errors);
     case 68:
       return aarch64_ext_aimm (self, info, code, inst, errors);
@@ -20131,6 +20131,7 @@ aarch64_extract_operand (const aarch64_operand *self,
       return aarch64_ext_imm_rotate2 (self, info, code, inst, errors);
     case 74:
     case 151:
+    case 153:
       return aarch64_ext_imm_rotate1 (self, info, code, inst, errors);
     case 75:
     case 76:
@@ -20232,26 +20233,26 @@ aarch64_extract_operand (const aarch64_operand *self,
       return aarch64_ext_sve_float_half_two (self, info, code, inst, errors);
     case 150:
       return aarch64_ext_sve_float_zero_one (self, info, code, inst, errors);
-    case 153:
+    case 154:
       return aarch64_ext_inv_limm (self, info, code, inst, errors);
-    case 155:
+    case 156:
       return aarch64_ext_sve_limm_mov (self, info, code, inst, errors);
-    case 157:
+    case 158:
       return aarch64_ext_sve_scale (self, info, code, inst, errors);
-    case 169:
     case 170:
-      return aarch64_ext_sve_shlimm (self, info, code, inst, errors);
     case 171:
+      return aarch64_ext_sve_shlimm (self, info, code, inst, errors);
     case 172:
+    case 173:
       return aarch64_ext_sve_shrimm (self, info, code, inst, errors);
-    case 190:
     case 191:
     case 192:
+    case 193:
       return aarch64_ext_sve_quad_index (self, info, code, inst, errors);
-    case 194:
-      return aarch64_ext_sve_index (self, info, code, inst, errors);
     case 195:
-    case 197:
+      return aarch64_ext_sve_index (self, info, code, inst, errors);
+    case 196:
+    case 198:
       return aarch64_ext_sve_reglist (self, info, code, inst, errors);
     default: assert (0); abort ();
     }
diff --git a/opcodes/aarch64-opc-2.c b/opcodes/aarch64-opc-2.c
index 15f2b9f..90e9654 100644
--- a/opcodes/aarch64-opc-2.c
+++ b/opcodes/aarch64-opc-2.c
@@ -177,6 +177,7 @@ const struct aarch64_operand aarch64_operands[] =
   {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_I1_ZERO_ONE", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_i1}, "either 0.0 or 1.0"},
   {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_IMM_ROT1", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_rot1}, "a 1-bit rotation specifier for complex arithmetic operations"},
   {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_IMM_ROT2", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_rot2}, "a 2-bit rotation specifier for complex arithmetic operations"},
+  {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_IMM_ROT3", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_rot3}, "a 1-bit rotation specifier for complex arithmetic operations"},
   {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_INV_LIMM", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_N,FLD_SVE_immr,FLD_SVE_imms}, "an inverted 13-bit logical immediate"},
   {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_LIMM", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_N,FLD_SVE_immr,FLD_SVE_imms}, "a 13-bit logical immediate"},
   {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_LIMM_MOV", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_N,FLD_SVE_immr,FLD_SVE_imms}, "a 13-bit logical move immediate"},
diff --git a/opcodes/aarch64-opc.c b/opcodes/aarch64-opc.c
index 33e4af6..5a381d6 100644
--- a/opcodes/aarch64-opc.c
+++ b/opcodes/aarch64-opc.c
@@ -309,6 +309,7 @@ const aarch64_field fields[] =
     {  0,  4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD].  */
     { 16,  1 }, /* SVE_rot1: 1-bit rotation amount.  */
     { 10,  2 }, /* SVE_rot2: 2-bit rotation amount.  */
+    { 10,  1 }, /* SVE_rot3: 1-bit rotation amount at bit 10.  */
     { 22,  1 }, /* SVE_sz: 1-bit element size select.  */
     { 16,  4 }, /* SVE_tsz: triangular size select.  */
     { 22,  2 }, /* SVE_tszh: triangular size select high, bits [23,22].  */
@@ -2230,6 +2231,7 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
 
 	case AARCH64_OPND_IMM_ROT3:
 	case AARCH64_OPND_SVE_IMM_ROT1:
+	case AARCH64_OPND_SVE_IMM_ROT3:
 	  if (opnd->imm.value != 90 && opnd->imm.value != 270)
 	    {
 	      set_other_error (mismatch_detail, idx,
@@ -3343,6 +3345,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
     case AARCH64_OPND_IMM_ROT3:
     case AARCH64_OPND_SVE_IMM_ROT1:
     case AARCH64_OPND_SVE_IMM_ROT2:
+    case AARCH64_OPND_SVE_IMM_ROT3:
       snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
       break;
 
diff --git a/opcodes/aarch64-opc.h b/opcodes/aarch64-opc.h
index f6c506d..b1060d4 100644
--- a/opcodes/aarch64-opc.h
+++ b/opcodes/aarch64-opc.h
@@ -136,6 +136,7 @@ enum aarch64_field_kind
   FLD_SVE_prfop,
   FLD_SVE_rot1,
   FLD_SVE_rot2,
+  FLD_SVE_rot3,
   FLD_SVE_sz,
   FLD_SVE_tsz,
   FLD_SVE_tszh,
diff --git a/opcodes/aarch64-tbl.h b/opcodes/aarch64-tbl.h
index 07b6d62..980d8fa 100644
--- a/opcodes/aarch64-tbl.h
+++ b/opcodes/aarch64-tbl.h
@@ -4858,6 +4858,8 @@ struct aarch64_opcode aarch64_opcode_table[] =
       "a 1-bit rotation specifier for complex arithmetic operations")	\
     Y(IMMEDIATE, imm_rotate2, "SVE_IMM_ROT2", 0, F(FLD_SVE_rot2),	\
       "a 2-bit rotation specifier for complex arithmetic operations")	\
+    Y(IMMEDIATE, imm_rotate1, "SVE_IMM_ROT3", 0, F(FLD_SVE_rot3),	\
+      "a 1-bit rotation specifier for complex arithmetic operations")	\
     Y(IMMEDIATE, inv_limm, "SVE_INV_LIMM", 0,				\
       F(FLD_SVE_N,FLD_SVE_immr,FLD_SVE_imms),				\
       "an inverted 13-bit logical immediate")				\
-- 
2.7.4


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 09/16] [binutils][aarch64] New sve_size_013 iclass.
  2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
  2019-05-01 14:44 ` [PATCH 07/16] [binutils][aarch64] New sve_size_sd2 iclass Matthew Malcomson
@ 2019-05-01 14:44 ` Matthew Malcomson
  2019-05-01 14:44 ` [PATCH 04/16] [binutils][aarch64] New iclass sve_size_hsd2 Matthew Malcomson
                   ` (15 subsequent siblings)
  17 siblings, 0 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-01 14:44 UTC (permalink / raw)
  To: binutils; +Cc: nd, Matthew Malcomson

Add sve_size_013 instruction class

This new iclass handles instructions such as pmullb whose size specifier
can only be encoded as 0, 1, or 3.

include/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* opcode/aarch64.h (enum aarch64_insn_class): Add sve_size_013 iclass.

opcodes/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* aarch64-asm.c (aarch64_encode_variant_using_iclass): Handle
	sve_size_013 iclass encode.
	* aarch64-dis.c (aarch64_decode_variant_using_iclass): Handle
	sve_size_013 iclass decode.
---
 include/opcode/aarch64.h |  1 +
 opcodes/aarch64-asm.c    |  8 ++++++++
 opcodes/aarch64-dis.c    | 10 ++++++++++
 3 files changed, 19 insertions(+)

diff --git a/include/opcode/aarch64.h b/include/opcode/aarch64.h
index d1d4a23..f46e378 100644
--- a/include/opcode/aarch64.h
+++ b/include/opcode/aarch64.h
@@ -592,6 +592,7 @@ enum aarch64_insn_class
   sve_size_sd,
   sve_size_bh,
   sve_size_sd2,
+  sve_size_013,
   testbranch,
   cryptosm3,
   cryptosm4,
diff --git a/opcodes/aarch64-asm.c b/opcodes/aarch64-asm.c
index 674eba5..0ec27b2 100644
--- a/opcodes/aarch64-asm.c
+++ b/opcodes/aarch64-asm.c
@@ -1613,6 +1613,7 @@ do_special_encoding (struct aarch64_inst *inst)
 static void
 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
 {
+  int variant = 0;
   switch (inst->opcode->iclass)
     {
     case sve_cpy:
@@ -1669,6 +1670,13 @@ aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
 		    aarch64_get_variant (inst) + 1, 0);
       break;
 
+    case sve_size_013:
+      variant = aarch64_get_variant (inst);
+      if (variant == 2)
+	  variant = 3;
+      insert_field (FLD_size, &inst->value, variant, 0);
+      break;
+
     default:
       break;
     }
diff --git a/opcodes/aarch64-dis.c b/opcodes/aarch64-dis.c
index bfc47b4..1a727a4 100644
--- a/opcodes/aarch64-dis.c
+++ b/opcodes/aarch64-dis.c
@@ -2822,6 +2822,16 @@ aarch64_decode_variant_using_iclass (aarch64_inst *inst)
       variant = i - 1;
       break;
 
+    case sve_size_013:
+      i = extract_field (FLD_size, inst->value, 0);
+      if (i == 2)
+	return FALSE;
+      if (i == 3)
+	variant = 2;
+      else
+	variant = i;
+      break;
+
     default:
       /* No mapping between instruction class and qualifiers.  */
       return TRUE;
-- 
2.7.4


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 02/16] [binutils][aarch64] Allow movprfx for SVE2 instructions.
  2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
                   ` (2 preceding siblings ...)
  2019-05-01 14:44 ` [PATCH 04/16] [binutils][aarch64] New iclass sve_size_hsd2 Matthew Malcomson
@ 2019-05-01 14:44 ` Matthew Malcomson
  2019-05-01 14:44 ` [PATCH 08/16] [binutils][aarch64] New sve_size_bh iclass Matthew Malcomson
                   ` (13 subsequent siblings)
  17 siblings, 0 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-01 14:44 UTC (permalink / raw)
  To: binutils; +Cc: nd, Matthew Malcomson

SVE2 introduces a number of new instructions that work with the movprfx
instruction.  This commit ensures that SVE2 instructions are accounted
for.

opcodes/ChangeLog:

2019-04-02  Matthew Malcomson  <matthew.malcomson@arm.com>

	* aarch64-opc.c (verify_constraints): Check for movprfx for sve2
	instructions.
---
 opcodes/aarch64-opc.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/opcodes/aarch64-opc.c b/opcodes/aarch64-opc.c
index a174116..33e4af6 100644
--- a/opcodes/aarch64-opc.c
+++ b/opcodes/aarch64-opc.c
@@ -4864,7 +4864,9 @@ verify_constraints (const struct aarch64_inst *inst,
 	{
 	  /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
 	     instruction for better error messages.  */
-	  if (!opcode->avariant || !(*opcode->avariant & AARCH64_FEATURE_SVE))
+	  if (!opcode->avariant
+	      || !(*opcode->avariant &
+		   (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
 	    {
 	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
 	      mismatch_detail->error = _("SVE instruction expected after "
-- 
2.7.4


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 07/16] [binutils][aarch64] New sve_size_sd2 iclass.
  2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
@ 2019-05-01 14:44 ` Matthew Malcomson
  2019-05-01 14:44 ` [PATCH 09/16] [binutils][aarch64] New sve_size_013 iclass Matthew Malcomson
                   ` (16 subsequent siblings)
  17 siblings, 0 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-01 14:44 UTC (permalink / raw)
  To: binutils; +Cc: nd, Matthew Malcomson

Define new sve_size_sd2 iclass to distinguish between the two variants
of ldnt1sb and ldnt1sh.

include/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* opcode/aarch64.h (enum aarch64_insn_class): Add sve_size_sd2 iclass.

opcodes/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* aarch64-asm.c (aarch64_encode_variant_using_iclass): Handle
	sve_size_sd2 iclass encode.
	* aarch64-dis.c (aarch64_decode_variant_using_iclass): Handle
	sve_size_sd2 iclass decode.
	* aarch64-opc.c (fields): Handle SVE_sz2 field.
	* aarch64-opc.h (enum aarch64_field_kind): New SVE_sz2 field.
---
 include/opcode/aarch64.h | 1 +
 opcodes/aarch64-asm.c    | 4 ++++
 opcodes/aarch64-dis.c    | 4 ++++
 opcodes/aarch64-opc.c    | 1 +
 opcodes/aarch64-opc.h    | 1 +
 5 files changed, 11 insertions(+)

diff --git a/include/opcode/aarch64.h b/include/opcode/aarch64.h
index 5e8f6dd..d23a6e7 100644
--- a/include/opcode/aarch64.h
+++ b/include/opcode/aarch64.h
@@ -590,6 +590,7 @@ enum aarch64_insn_class
   sve_size_hsd,
   sve_size_hsd2,
   sve_size_sd,
+  sve_size_sd2,
   testbranch,
   cryptosm3,
   cryptosm4,
diff --git a/opcodes/aarch64-asm.c b/opcodes/aarch64-asm.c
index d4f498f..6627b54 100644
--- a/opcodes/aarch64-asm.c
+++ b/opcodes/aarch64-asm.c
@@ -1659,6 +1659,10 @@ aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
       insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
       break;
 
+    case sve_size_sd2:
+      insert_field (FLD_SVE_sz2, &inst->value, aarch64_get_variant (inst), 0);
+      break;
+
     case sve_size_hsd2:
       insert_field (FLD_SVE_size, &inst->value,
 		    aarch64_get_variant (inst) + 1, 0);
diff --git a/opcodes/aarch64-dis.c b/opcodes/aarch64-dis.c
index 844c6ab..35576b3 100644
--- a/opcodes/aarch64-dis.c
+++ b/opcodes/aarch64-dis.c
@@ -2810,6 +2810,10 @@ aarch64_decode_variant_using_iclass (aarch64_inst *inst)
       variant = extract_field (FLD_SVE_sz, inst->value, 0);
       break;
 
+    case sve_size_sd2:
+      variant = extract_field (FLD_SVE_sz2, inst->value, 0);
+      break;
+
     case sve_size_hsd2:
       i = extract_field (FLD_SVE_size, inst->value, 0);
       if (i < 1)
diff --git a/opcodes/aarch64-opc.c b/opcodes/aarch64-opc.c
index f76715f..187ad12 100644
--- a/opcodes/aarch64-opc.c
+++ b/opcodes/aarch64-opc.c
@@ -314,6 +314,7 @@ const aarch64_field fields[] =
     { 10,  1 }, /* SVE_rot3: 1-bit rotation amount at bit 10.  */
     { 22,  1 }, /* SVE_sz: 1-bit element size select.  */
     { 17,  2 }, /* SVE_size: 2-bit element size, bits [18,17].  */
+    { 30,  1 }, /* SVE_sz2: 1-bit element size select.  */
     { 16,  4 }, /* SVE_tsz: triangular size select.  */
     { 22,  2 }, /* SVE_tszh: triangular size select high, bits [23,22].  */
     {  8,  2 }, /* SVE_tszl_8: triangular size select low, bits [9,8].  */
diff --git a/opcodes/aarch64-opc.h b/opcodes/aarch64-opc.h
index 8803bca..8d18175 100644
--- a/opcodes/aarch64-opc.h
+++ b/opcodes/aarch64-opc.h
@@ -141,6 +141,7 @@ enum aarch64_field_kind
   FLD_SVE_rot3,
   FLD_SVE_sz,
   FLD_SVE_size,
+  FLD_SVE_sz2,
   FLD_SVE_tsz,
   FLD_SVE_tszh,
   FLD_SVE_tszl_8,
-- 
2.7.4


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 06/16] [binutils][aarch64] New SVE_ADDR_ZX operand.
  2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
                   ` (9 preceding siblings ...)
  2019-05-01 14:45 ` [PATCH 10/16] [binutils][aarch64] New SVE_SHRIMM_UNPRED_22 operand Matthew Malcomson
@ 2019-05-01 14:45 ` Matthew Malcomson
  2019-05-01 14:45 ` [PATCH 12/16] [binutils][aarch64] New SVE_Zm4_11_INDEX operand Matthew Malcomson
                   ` (6 subsequent siblings)
  17 siblings, 0 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-01 14:45 UTC (permalink / raw)
  To: binutils; +Cc: nd, Matthew Malcomson

Add AARCH64_OPND_SVE_ADDR_ZX operand that allows a vector of addresses
in a Zn register, offset by an Xm register.
This is used with scatter/gather SVE2 instructions.

gas/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* config/tc-aarch64.c (REG_ZR): Macro specifying zero register.
	(parse_address_main): Account for new addressing mode [Zn.S, Xm].
	(parse_operands): Handle new SVE_ADDR_ZX operand.

include/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* opcode/aarch64.h (enum aarch64_opnd): New SVE_ADDR_ZX operand.

opcodes/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* aarch64-asm-2.c: Regenerated.
	* aarch64-dis-2.c: Regenerated.
	* aarch64-opc-2.c: Regenerated.
	* aarch64-opc.c (operand_general_constraint_met_p): Constraint checking
	for SVE_ADDR_ZX.
	(aarch64_print_operand): Add printing for SVE_ADDR_ZX.
	* aarch64-tbl.h (AARCH64_OPERANDS): Use new SVE_ADDR_ZX operand.
---
 gas/config/tc-aarch64.c  | 52 +++++++++++++++++++++++++++++++++++---
 include/opcode/aarch64.h |  1 +
 opcodes/aarch64-asm-2.c  | 65 ++++++++++++++++++++++++------------------------
 opcodes/aarch64-dis-2.c  | 65 ++++++++++++++++++++++++------------------------
 opcodes/aarch64-opc-2.c  |  1 +
 opcodes/aarch64-opc.c    | 18 ++++++++++++++
 opcodes/aarch64-tbl.h    |  3 +++
 7 files changed, 137 insertions(+), 68 deletions(-)

diff --git a/gas/config/tc-aarch64.c b/gas/config/tc-aarch64.c
index 612febd..f30b8df 100644
--- a/gas/config/tc-aarch64.c
+++ b/gas/config/tc-aarch64.c
@@ -449,6 +449,7 @@ get_reg_expected_msg (aarch64_reg_type reg_type)
 
 /* Some well known registers that we refer to directly elsewhere.  */
 #define REG_SP	31
+#define REG_ZR	31
 
 /* Instructions take 4 bytes in the object file.  */
 #define INSN_SIZE	4
@@ -3393,6 +3394,7 @@ parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
      [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
      [Zn.S,#imm]
      [Zn.D,#imm]
+     [Zn.S{, Xm}]
      [Zn.S,Zm.S{,LSL #imm}]      // in ADR
      [Zn.D,Zm.D{,LSL #imm}]      // in ADR
      [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
@@ -3558,6 +3560,7 @@ parse_address_main (char **str, aarch64_opnd_info *operand,
 		return FALSE;
 	    }
 	  /* We only accept:
+	     [base,Xm]  # For vector plus scalar SVE2 indexing.
 	     [base,Xm{,LSL #imm}]
 	     [base,Xm,SXTX {#imm}]
 	     [base,Wm,(S|U)XTW {#imm}]  */
@@ -3571,7 +3574,10 @@ parse_address_main (char **str, aarch64_opnd_info *operand,
 		  return FALSE;
 		}
 	      if (aarch64_get_qualifier_esize (*base_qualifier)
-		  != aarch64_get_qualifier_esize (*offset_qualifier))
+		  != aarch64_get_qualifier_esize (*offset_qualifier)
+		  && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
+		      || *base_qualifier != AARCH64_OPND_QLF_S_S
+		      || *offset_qualifier != AARCH64_OPND_QLF_X))
 		{
 		  set_syntax_error (_("offset has different size from base"));
 		  return FALSE;
@@ -3689,7 +3695,9 @@ parse_address_main (char **str, aarch64_opnd_info *operand,
     }
 
   /* If at this point neither .preind nor .postind is set, we have a
-     bare [Rn]{!}; reject [Rn]! accept [Rn] as a shorthand for [Rn,#0].  */
+     bare [Rn]{!}; reject [Rn]! accept [Rn] as a shorthand for [Rn,#0].
+     For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
+     [Zn.<T>, xzr].  */
   if (operand->addr.preind == 0 && operand->addr.postind == 0)
     {
       if (operand->addr.writeback)
@@ -3700,8 +3708,17 @@ parse_address_main (char **str, aarch64_opnd_info *operand,
 	}
 
       operand->addr.preind = 1;
-      inst.reloc.exp.X_op = O_constant;
-      inst.reloc.exp.X_add_number = 0;
+      if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
+	{
+	  operand->addr.offset.is_reg = 1;
+	  operand->addr.offset.regno = REG_ZR;
+	  *offset_qualifier = AARCH64_OPND_QLF_X;
+	}
+      else
+	{
+	  inst.reloc.exp.X_op = O_constant;
+	  inst.reloc.exp.X_add_number = 0;
+	}
     }
 
   *str = p;
@@ -6416,6 +6433,33 @@ parse_operands (char *str, const aarch64_opcode *opcode)
 	  info->qualifier = offset_qualifier;
 	  goto regoff_addr;
 
+	case AARCH64_OPND_SVE_ADDR_ZX:
+	  /* [Zn.<T>{, <Xm>}].  */
+	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
+					      &offset_qualifier));
+	  /* Things to check:
+	      base_qualifier either S_S or S_D
+	      offset_qualifier must be X
+	      */
+	  if ((base_qualifier != AARCH64_OPND_QLF_S_S
+	       && base_qualifier != AARCH64_OPND_QLF_S_D)
+	      || offset_qualifier != AARCH64_OPND_QLF_X)
+	    {
+	      set_syntax_error (_("invalid addressing mode"));
+	      goto failure;
+	    }
+	  info->qualifier = base_qualifier;
+	  if (!info->addr.offset.is_reg || info->addr.pcrel
+	      || !info->addr.preind || info->addr.writeback
+	      || info->shifter.operator_present != 0)
+	    {
+	      set_syntax_error (_("invalid addressing mode"));
+	      goto failure;
+	    }
+	  info->shifter.kind = AARCH64_MOD_LSL;
+	  break;
+
+
 	case AARCH64_OPND_SVE_ADDR_ZI_U5:
 	case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
 	case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
diff --git a/include/opcode/aarch64.h b/include/opcode/aarch64.h
index 1c3f126..5e8f6dd 100644
--- a/include/opcode/aarch64.h
+++ b/include/opcode/aarch64.h
@@ -334,6 +334,7 @@ enum aarch64_opnd
   AARCH64_OPND_SVE_ADDR_RX_LSL1,    /* SVE [<Xn|SP>, <Xm>, LSL #1].  */
   AARCH64_OPND_SVE_ADDR_RX_LSL2,    /* SVE [<Xn|SP>, <Xm>, LSL #2].  */
   AARCH64_OPND_SVE_ADDR_RX_LSL3,    /* SVE [<Xn|SP>, <Xm>, LSL #3].  */
+  AARCH64_OPND_SVE_ADDR_ZX,	    /* SVE [Zn.<T>{, <Xm>}].  */
   AARCH64_OPND_SVE_ADDR_RZ,	    /* SVE [<Xn|SP>, Zm.D].  */
   AARCH64_OPND_SVE_ADDR_RZ_LSL1,    /* SVE [<Xn|SP>, Zm.D, LSL #1].  */
   AARCH64_OPND_SVE_ADDR_RZ_LSL2,    /* SVE [<Xn|SP>, Zm.D, LSL #2].  */
diff --git a/opcodes/aarch64-asm-2.c b/opcodes/aarch64-asm-2.c
index 0b67ceb..0931c3f 100644
--- a/opcodes/aarch64-asm-2.c
+++ b/opcodes/aarch64-asm-2.c
@@ -627,7 +627,6 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 27:
     case 28:
     case 29:
-    case 160:
     case 161:
     case 162:
     case 163:
@@ -637,7 +636,7 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 167:
     case 168:
     case 169:
-    case 182:
+    case 170:
     case 183:
     case 184:
     case 185:
@@ -646,8 +645,9 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 188:
     case 189:
     case 190:
-    case 195:
-    case 198:
+    case 191:
+    case 196:
+    case 199:
       return aarch64_ins_regno (self, info, code, inst, errors);
     case 13:
       return aarch64_ins_reg_extended (self, info, code, inst, errors);
@@ -659,7 +659,7 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 31:
     case 32:
     case 33:
-    case 200:
+    case 201:
       return aarch64_ins_reglane (self, info, code, inst, errors);
     case 34:
       return aarch64_ins_reglist (self, info, code, inst, errors);
@@ -693,9 +693,8 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 79:
     case 80:
     case 81:
-    case 157:
-    case 159:
-    case 174:
+    case 158:
+    case 160:
     case 175:
     case 176:
     case 177:
@@ -703,6 +702,7 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 179:
     case 180:
     case 181:
+    case 182:
       return aarch64_ins_imm (self, info, code, inst, errors);
     case 42:
     case 43:
@@ -712,10 +712,10 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 46:
       return aarch64_ins_advsimd_imm_modified (self, info, code, inst, errors);
     case 50:
-    case 147:
+    case 148:
       return aarch64_ins_fpimm (self, info, code, inst, errors);
     case 67:
-    case 155:
+    case 156:
       return aarch64_ins_limm (self, info, code, inst, errors);
     case 68:
       return aarch64_ins_aimm (self, info, code, inst, errors);
@@ -725,11 +725,11 @@ aarch64_insert_operand (const aarch64_operand *self,
       return aarch64_ins_fbits (self, info, code, inst, errors);
     case 72:
     case 73:
-    case 152:
+    case 153:
       return aarch64_ins_imm_rotate2 (self, info, code, inst, errors);
     case 74:
-    case 151:
-    case 153:
+    case 152:
+    case 154:
       return aarch64_ins_imm_rotate1 (self, info, code, inst, errors);
     case 75:
     case 76:
@@ -800,8 +800,8 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 127:
     case 128:
     case 129:
-      return aarch64_ins_sve_addr_rr_lsl (self, info, code, inst, errors);
     case 130:
+      return aarch64_ins_sve_addr_rr_lsl (self, info, code, inst, errors);
     case 131:
     case 132:
     case 133:
@@ -809,49 +809,50 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 135:
     case 136:
     case 137:
-      return aarch64_ins_sve_addr_rz_xtw (self, info, code, inst, errors);
     case 138:
+      return aarch64_ins_sve_addr_rz_xtw (self, info, code, inst, errors);
     case 139:
     case 140:
     case 141:
-      return aarch64_ins_sve_addr_zi_u5 (self, info, code, inst, errors);
     case 142:
-      return aarch64_ins_sve_addr_zz_lsl (self, info, code, inst, errors);
+      return aarch64_ins_sve_addr_zi_u5 (self, info, code, inst, errors);
     case 143:
-      return aarch64_ins_sve_addr_zz_sxtw (self, info, code, inst, errors);
+      return aarch64_ins_sve_addr_zz_lsl (self, info, code, inst, errors);
     case 144:
-      return aarch64_ins_sve_addr_zz_uxtw (self, info, code, inst, errors);
+      return aarch64_ins_sve_addr_zz_sxtw (self, info, code, inst, errors);
     case 145:
-      return aarch64_ins_sve_aimm (self, info, code, inst, errors);
+      return aarch64_ins_sve_addr_zz_uxtw (self, info, code, inst, errors);
     case 146:
+      return aarch64_ins_sve_aimm (self, info, code, inst, errors);
+    case 147:
       return aarch64_ins_sve_asimm (self, info, code, inst, errors);
-    case 148:
-      return aarch64_ins_sve_float_half_one (self, info, code, inst, errors);
     case 149:
-      return aarch64_ins_sve_float_half_two (self, info, code, inst, errors);
+      return aarch64_ins_sve_float_half_one (self, info, code, inst, errors);
     case 150:
+      return aarch64_ins_sve_float_half_two (self, info, code, inst, errors);
+    case 151:
       return aarch64_ins_sve_float_zero_one (self, info, code, inst, errors);
-    case 154:
+    case 155:
       return aarch64_ins_inv_limm (self, info, code, inst, errors);
-    case 156:
+    case 157:
       return aarch64_ins_sve_limm_mov (self, info, code, inst, errors);
-    case 158:
+    case 159:
       return aarch64_ins_sve_scale (self, info, code, inst, errors);
-    case 170:
     case 171:
-      return aarch64_ins_sve_shlimm (self, info, code, inst, errors);
     case 172:
+      return aarch64_ins_sve_shlimm (self, info, code, inst, errors);
     case 173:
+    case 174:
       return aarch64_ins_sve_shrimm (self, info, code, inst, errors);
-    case 191:
     case 192:
     case 193:
     case 194:
+    case 195:
       return aarch64_ins_sve_quad_index (self, info, code, inst, errors);
-    case 196:
-      return aarch64_ins_sve_index (self, info, code, inst, errors);
     case 197:
-    case 199:
+      return aarch64_ins_sve_index (self, info, code, inst, errors);
+    case 198:
+    case 200:
       return aarch64_ins_sve_reglist (self, info, code, inst, errors);
     default: assert (0); abort ();
     }
diff --git a/opcodes/aarch64-dis-2.c b/opcodes/aarch64-dis-2.c
index 630ef20..62fd65a 100644
--- a/opcodes/aarch64-dis-2.c
+++ b/opcodes/aarch64-dis-2.c
@@ -20022,7 +20022,6 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 27:
     case 28:
     case 29:
-    case 160:
     case 161:
     case 162:
     case 163:
@@ -20032,7 +20031,7 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 167:
     case 168:
     case 169:
-    case 182:
+    case 170:
     case 183:
     case 184:
     case 185:
@@ -20041,8 +20040,9 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 188:
     case 189:
     case 190:
-    case 195:
-    case 198:
+    case 191:
+    case 196:
+    case 199:
       return aarch64_ext_regno (self, info, code, inst, errors);
     case 8:
       return aarch64_ext_regrt_sysins (self, info, code, inst, errors);
@@ -20058,7 +20058,7 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 31:
     case 32:
     case 33:
-    case 200:
+    case 201:
       return aarch64_ext_reglane (self, info, code, inst, errors);
     case 34:
       return aarch64_ext_reglist (self, info, code, inst, errors);
@@ -20093,9 +20093,8 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 79:
     case 80:
     case 81:
-    case 157:
-    case 159:
-    case 174:
+    case 158:
+    case 160:
     case 175:
     case 176:
     case 177:
@@ -20103,6 +20102,7 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 179:
     case 180:
     case 181:
+    case 182:
       return aarch64_ext_imm (self, info, code, inst, errors);
     case 42:
     case 43:
@@ -20114,10 +20114,10 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 47:
       return aarch64_ext_shll_imm (self, info, code, inst, errors);
     case 50:
-    case 147:
+    case 148:
       return aarch64_ext_fpimm (self, info, code, inst, errors);
     case 67:
-    case 155:
+    case 156:
       return aarch64_ext_limm (self, info, code, inst, errors);
     case 68:
       return aarch64_ext_aimm (self, info, code, inst, errors);
@@ -20127,11 +20127,11 @@ aarch64_extract_operand (const aarch64_operand *self,
       return aarch64_ext_fbits (self, info, code, inst, errors);
     case 72:
     case 73:
-    case 152:
+    case 153:
       return aarch64_ext_imm_rotate2 (self, info, code, inst, errors);
     case 74:
-    case 151:
-    case 153:
+    case 152:
+    case 154:
       return aarch64_ext_imm_rotate1 (self, info, code, inst, errors);
     case 75:
     case 76:
@@ -20202,8 +20202,8 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 127:
     case 128:
     case 129:
-      return aarch64_ext_sve_addr_rr_lsl (self, info, code, inst, errors);
     case 130:
+      return aarch64_ext_sve_addr_rr_lsl (self, info, code, inst, errors);
     case 131:
     case 132:
     case 133:
@@ -20211,49 +20211,50 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 135:
     case 136:
     case 137:
-      return aarch64_ext_sve_addr_rz_xtw (self, info, code, inst, errors);
     case 138:
+      return aarch64_ext_sve_addr_rz_xtw (self, info, code, inst, errors);
     case 139:
     case 140:
     case 141:
-      return aarch64_ext_sve_addr_zi_u5 (self, info, code, inst, errors);
     case 142:
-      return aarch64_ext_sve_addr_zz_lsl (self, info, code, inst, errors);
+      return aarch64_ext_sve_addr_zi_u5 (self, info, code, inst, errors);
     case 143:
-      return aarch64_ext_sve_addr_zz_sxtw (self, info, code, inst, errors);
+      return aarch64_ext_sve_addr_zz_lsl (self, info, code, inst, errors);
     case 144:
-      return aarch64_ext_sve_addr_zz_uxtw (self, info, code, inst, errors);
+      return aarch64_ext_sve_addr_zz_sxtw (self, info, code, inst, errors);
     case 145:
-      return aarch64_ext_sve_aimm (self, info, code, inst, errors);
+      return aarch64_ext_sve_addr_zz_uxtw (self, info, code, inst, errors);
     case 146:
+      return aarch64_ext_sve_aimm (self, info, code, inst, errors);
+    case 147:
       return aarch64_ext_sve_asimm (self, info, code, inst, errors);
-    case 148:
-      return aarch64_ext_sve_float_half_one (self, info, code, inst, errors);
     case 149:
-      return aarch64_ext_sve_float_half_two (self, info, code, inst, errors);
+      return aarch64_ext_sve_float_half_one (self, info, code, inst, errors);
     case 150:
+      return aarch64_ext_sve_float_half_two (self, info, code, inst, errors);
+    case 151:
       return aarch64_ext_sve_float_zero_one (self, info, code, inst, errors);
-    case 154:
+    case 155:
       return aarch64_ext_inv_limm (self, info, code, inst, errors);
-    case 156:
+    case 157:
       return aarch64_ext_sve_limm_mov (self, info, code, inst, errors);
-    case 158:
+    case 159:
       return aarch64_ext_sve_scale (self, info, code, inst, errors);
-    case 170:
     case 171:
-      return aarch64_ext_sve_shlimm (self, info, code, inst, errors);
     case 172:
+      return aarch64_ext_sve_shlimm (self, info, code, inst, errors);
     case 173:
+    case 174:
       return aarch64_ext_sve_shrimm (self, info, code, inst, errors);
-    case 191:
     case 192:
     case 193:
     case 194:
+    case 195:
       return aarch64_ext_sve_quad_index (self, info, code, inst, errors);
-    case 196:
-      return aarch64_ext_sve_index (self, info, code, inst, errors);
     case 197:
-    case 199:
+      return aarch64_ext_sve_index (self, info, code, inst, errors);
+    case 198:
+    case 200:
       return aarch64_ext_sve_reglist (self, info, code, inst, errors);
     default: assert (0); abort ();
     }
diff --git a/opcodes/aarch64-opc-2.c b/opcodes/aarch64-opc-2.c
index db2fc37..56a77d6 100644
--- a/opcodes/aarch64-opc-2.c
+++ b/opcodes/aarch64-opc-2.c
@@ -150,6 +150,7 @@ const struct aarch64_operand aarch64_operands[] =
   {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RX_LSL1", (1 << OPD_F_OD_LSB) | OPD_F_NO_ZR | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_Rm}, "an address with a scalar register offset"},
   {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RX_LSL2", (2 << OPD_F_OD_LSB) | OPD_F_NO_ZR | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_Rm}, "an address with a scalar register offset"},
   {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RX_LSL3", (3 << OPD_F_OD_LSB) | OPD_F_NO_ZR | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_Rm}, "an address with a scalar register offset"},
+  {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_ZX", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zn,FLD_Rm}, "vector of address with a scalar register offset"},
   {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RZ", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_SVE_Zm_16}, "an address with a vector register offset"},
   {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RZ_LSL1", 1 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_SVE_Zm_16}, "an address with a vector register offset"},
   {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RZ_LSL2", 2 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_SVE_Zm_16}, "an address with a vector register offset"},
diff --git a/opcodes/aarch64-opc.c b/opcodes/aarch64-opc.c
index 695146f..f76715f 100644
--- a/opcodes/aarch64-opc.c
+++ b/opcodes/aarch64-opc.c
@@ -1899,6 +1899,17 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
 	  max_value = 7;
 	  goto sve_imm_offset;
 
+	case AARCH64_OPND_SVE_ADDR_ZX:
+	  /* Everything is already ensured by parse_operands or
+	     aarch64_ext_sve_addr_rr_lsl (because this is a very specific
+	     argument type).  */
+	  assert (opnd->addr.offset.is_reg);
+	  assert (opnd->addr.preind);
+	  assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
+	  assert (opnd->shifter.kind == AARCH64_MOD_LSL);
+	  assert (opnd->shifter.operator_present == 0);
+	  break;
+
 	case AARCH64_OPND_SVE_ADDR_R:
 	case AARCH64_OPND_SVE_ADDR_RR:
 	case AARCH64_OPND_SVE_ADDR_RR_LSL1:
@@ -3580,6 +3591,13 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
 	 get_offset_int_reg_name (opnd));
       break;
 
+    case AARCH64_OPND_SVE_ADDR_ZX:
+      print_register_offset_address
+	(buf, size, opnd,
+	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
+	 get_64bit_int_reg_name (opnd->addr.offset.regno, 0));
+      break;
+
     case AARCH64_OPND_SVE_ADDR_RZ:
     case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
     case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
diff --git a/opcodes/aarch64-tbl.h b/opcodes/aarch64-tbl.h
index 826b4df..0e20b66 100644
--- a/opcodes/aarch64-tbl.h
+++ b/opcodes/aarch64-tbl.h
@@ -4786,6 +4786,9 @@ struct aarch64_opcode aarch64_opcode_table[] =
     Y(ADDRESS, sve_addr_rr_lsl, "SVE_ADDR_RX_LSL3",			\
       (3 << OPD_F_OD_LSB) | OPD_F_NO_ZR, F(FLD_Rn,FLD_Rm),		\
       "an address with a scalar register offset")			\
+    Y(ADDRESS, sve_addr_rr_lsl, "SVE_ADDR_ZX",				\
+      0 << OPD_F_OD_LSB , F(FLD_SVE_Zn,FLD_Rm),				\
+      "vector of address with a scalar register offset")		\
     Y(ADDRESS, sve_addr_rr_lsl, "SVE_ADDR_RZ", 0 << OPD_F_OD_LSB,	\
       F(FLD_Rn,FLD_SVE_Zm_16),						\
       "an address with a vector register offset")			\
-- 
2.7.4


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 10/16] [binutils][aarch64] New SVE_SHRIMM_UNPRED_22 operand.
  2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
                   ` (8 preceding siblings ...)
  2019-05-01 14:45 ` [PATCH 11/16] [binutils][aarch64] New sve_shift_tsz_bhsd iclass Matthew Malcomson
@ 2019-05-01 14:45 ` Matthew Malcomson
  2019-05-01 14:45 ` [PATCH 06/16] [binutils][aarch64] New SVE_ADDR_ZX operand Matthew Malcomson
                   ` (7 subsequent siblings)
  17 siblings, 0 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-01 14:45 UTC (permalink / raw)
  To: binutils; +Cc: nd, Matthew Malcomson

Include a new iclass to extract the variant from the most significant 3
bits of this operand.

Instructions such as rshrnb include a constant shift amount as an
operand, where the most significant three bits of this operand determine
what size elements the instruction is operating on.

The new SVE_SHRIMM_UNPRED_22 operand denotes this constant encoded in
bits 22:20-19:18-16 while the new sve_shift_tsz_hsd iclass denotes that
the SVE qualifier is encoded in bits 22:20-19.

gas/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* config/tc-aarch64.c (parse_operands): Handle new SVE_SHRIMM_UNPRED_22
	operand.

include/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* opcode/aarch64.h (enum aarch64_opnd): New SVE_SHRIMM_UNPRED_22
	operand.
	(enum aarch64_insn_class): Add sve_shift_tsz_hsd iclass.

opcodes/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* aarch64-asm-2.c: Regenerated.
	* aarch64-dis-2.c: Regenerated.
	* aarch64-opc-2.c: Regenerated.
	* aarch64-asm.c (aarch64_ins_sve_shrimm):
	(aarch64_encode_variant_using_iclass): Handle
	sve_shift_tsz_hsd iclass encode.
	* aarch64-dis.c (aarch64_decode_variant_using_iclass): Handle
	sve_shift_tsz_hsd iclass decode.
	* aarch64-opc.c (operand_general_constraint_met_p): Constraint checking
	for SVE_SHRIMM_UNPRED_22.
	(aarch64_print_operand): Add printing for SVE_SHRIMM_UNPRED_22.
	* aarch64-tbl.h (AARCH64_OPERANDS): Use new SVE_SHRIMM_UNPRED_22
	operand.
---
 gas/config/tc-aarch64.c  |  1 +
 include/opcode/aarch64.h |  2 ++
 opcodes/aarch64-asm-2.c  | 19 ++++++++++---------
 opcodes/aarch64-asm.c    |  6 ++++--
 opcodes/aarch64-dis-2.c  | 19 ++++++++++---------
 opcodes/aarch64-dis.c    | 11 +++++++++++
 opcodes/aarch64-opc-2.c  |  5 +++--
 opcodes/aarch64-opc.c    | 18 ++++++++++++------
 opcodes/aarch64-tbl.h    |  7 +++++--
 9 files changed, 58 insertions(+), 30 deletions(-)

diff --git a/gas/config/tc-aarch64.c b/gas/config/tc-aarch64.c
index f30b8df..d7fc372 100644
--- a/gas/config/tc-aarch64.c
+++ b/gas/config/tc-aarch64.c
@@ -5784,6 +5784,7 @@ parse_operands (char *str, const aarch64_opcode *opcode)
 	case AARCH64_OPND_SVE_SHLIMM_UNPRED:
 	case AARCH64_OPND_SVE_SHRIMM_PRED:
 	case AARCH64_OPND_SVE_SHRIMM_UNPRED:
+	case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
 	case AARCH64_OPND_SVE_SIMM5:
 	case AARCH64_OPND_SVE_SIMM5B:
 	case AARCH64_OPND_SVE_SIMM6:
diff --git a/include/opcode/aarch64.h b/include/opcode/aarch64.h
index f46e378..ce9955d 100644
--- a/include/opcode/aarch64.h
+++ b/include/opcode/aarch64.h
@@ -391,6 +391,7 @@ enum aarch64_opnd
   AARCH64_OPND_SVE_SHLIMM_UNPRED, /* SVE shift left amount (unpredicated).  */
   AARCH64_OPND_SVE_SHRIMM_PRED,	  /* SVE shift right amount (predicated).  */
   AARCH64_OPND_SVE_SHRIMM_UNPRED, /* SVE shift right amount (unpredicated).  */
+  AARCH64_OPND_SVE_SHRIMM_UNPRED_22,	/* SVE 3 bit shift right unpred.  */
   AARCH64_OPND_SVE_SIMM5,	/* SVE signed 5-bit immediate.  */
   AARCH64_OPND_SVE_SIMM5B,	/* SVE secondary signed 5-bit immediate.  */
   AARCH64_OPND_SVE_SIMM6,	/* SVE signed 6-bit immediate.  */
@@ -593,6 +594,7 @@ enum aarch64_insn_class
   sve_size_bh,
   sve_size_sd2,
   sve_size_013,
+  sve_shift_tsz_hsd,
   testbranch,
   cryptosm3,
   cryptosm4,
diff --git a/opcodes/aarch64-asm-2.c b/opcodes/aarch64-asm-2.c
index 0931c3f..e6ddce7 100644
--- a/opcodes/aarch64-asm-2.c
+++ b/opcodes/aarch64-asm-2.c
@@ -637,7 +637,6 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 168:
     case 169:
     case 170:
-    case 183:
     case 184:
     case 185:
     case 186:
@@ -646,8 +645,9 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 189:
     case 190:
     case 191:
-    case 196:
-    case 199:
+    case 192:
+    case 197:
+    case 200:
       return aarch64_ins_regno (self, info, code, inst, errors);
     case 13:
       return aarch64_ins_reg_extended (self, info, code, inst, errors);
@@ -659,7 +659,7 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 31:
     case 32:
     case 33:
-    case 201:
+    case 202:
       return aarch64_ins_reglane (self, info, code, inst, errors);
     case 34:
       return aarch64_ins_reglist (self, info, code, inst, errors);
@@ -695,7 +695,6 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 81:
     case 158:
     case 160:
-    case 175:
     case 176:
     case 177:
     case 178:
@@ -703,6 +702,7 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 180:
     case 181:
     case 182:
+    case 183:
       return aarch64_ins_imm (self, info, code, inst, errors);
     case 42:
     case 43:
@@ -843,16 +843,17 @@ aarch64_insert_operand (const aarch64_operand *self,
       return aarch64_ins_sve_shlimm (self, info, code, inst, errors);
     case 173:
     case 174:
+    case 175:
       return aarch64_ins_sve_shrimm (self, info, code, inst, errors);
-    case 192:
     case 193:
     case 194:
     case 195:
+    case 196:
       return aarch64_ins_sve_quad_index (self, info, code, inst, errors);
-    case 197:
-      return aarch64_ins_sve_index (self, info, code, inst, errors);
     case 198:
-    case 200:
+      return aarch64_ins_sve_index (self, info, code, inst, errors);
+    case 199:
+    case 201:
       return aarch64_ins_sve_reglist (self, info, code, inst, errors);
     default: assert (0); abort ();
     }
diff --git a/opcodes/aarch64-asm.c b/opcodes/aarch64-asm.c
index 0ec27b2..6be17f9 100644
--- a/opcodes/aarch64-asm.c
+++ b/opcodes/aarch64-asm.c
@@ -1241,8 +1241,9 @@ aarch64_ins_sve_shrimm (const aarch64_operand *self,
   const aarch64_opnd_info *prev_operand;
   unsigned int esize;
 
-  assert (info->idx > 0);
-  prev_operand = &inst->operands[info->idx - 1];
+  unsigned int opnd_backshift = get_operand_specific_data (self);
+  assert (info->idx >= (int)opnd_backshift);
+  prev_operand = &inst->operands[info->idx - opnd_backshift];
   esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
   insert_all_fields (self, code, 16 * esize - info->imm.value);
   return TRUE;
@@ -1624,6 +1625,7 @@ aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
     case sve_index:
     case sve_shift_pred:
     case sve_shift_unpred:
+    case sve_shift_tsz_hsd:
       /* For indices and shift amounts, the variant is encoded as
 	 part of the immediate.  */
       break;
diff --git a/opcodes/aarch64-dis-2.c b/opcodes/aarch64-dis-2.c
index 62fd65a..6a802b4 100644
--- a/opcodes/aarch64-dis-2.c
+++ b/opcodes/aarch64-dis-2.c
@@ -20032,7 +20032,6 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 168:
     case 169:
     case 170:
-    case 183:
     case 184:
     case 185:
     case 186:
@@ -20041,8 +20040,9 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 189:
     case 190:
     case 191:
-    case 196:
-    case 199:
+    case 192:
+    case 197:
+    case 200:
       return aarch64_ext_regno (self, info, code, inst, errors);
     case 8:
       return aarch64_ext_regrt_sysins (self, info, code, inst, errors);
@@ -20058,7 +20058,7 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 31:
     case 32:
     case 33:
-    case 201:
+    case 202:
       return aarch64_ext_reglane (self, info, code, inst, errors);
     case 34:
       return aarch64_ext_reglist (self, info, code, inst, errors);
@@ -20095,7 +20095,6 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 81:
     case 158:
     case 160:
-    case 175:
     case 176:
     case 177:
     case 178:
@@ -20103,6 +20102,7 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 180:
     case 181:
     case 182:
+    case 183:
       return aarch64_ext_imm (self, info, code, inst, errors);
     case 42:
     case 43:
@@ -20245,16 +20245,17 @@ aarch64_extract_operand (const aarch64_operand *self,
       return aarch64_ext_sve_shlimm (self, info, code, inst, errors);
     case 173:
     case 174:
+    case 175:
       return aarch64_ext_sve_shrimm (self, info, code, inst, errors);
-    case 192:
     case 193:
     case 194:
     case 195:
+    case 196:
       return aarch64_ext_sve_quad_index (self, info, code, inst, errors);
-    case 197:
-      return aarch64_ext_sve_index (self, info, code, inst, errors);
     case 198:
-    case 200:
+      return aarch64_ext_sve_index (self, info, code, inst, errors);
+    case 199:
+    case 201:
       return aarch64_ext_sve_reglist (self, info, code, inst, errors);
     default: assert (0); abort ();
     }
diff --git a/opcodes/aarch64-dis.c b/opcodes/aarch64-dis.c
index 1a727a4..5571ab6 100644
--- a/opcodes/aarch64-dis.c
+++ b/opcodes/aarch64-dis.c
@@ -2832,6 +2832,17 @@ aarch64_decode_variant_using_iclass (aarch64_inst *inst)
 	variant = i;
       break;
 
+    case sve_shift_tsz_hsd:
+      i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
+      if (i == 0)
+	return FALSE;
+      while (i != 1)
+	{
+	  i >>= 1;
+	  variant += 1;
+	}
+      break;
+
     default:
       /* No mapping between instruction class and qualifiers.  */
       return TRUE;
diff --git a/opcodes/aarch64-opc-2.c b/opcodes/aarch64-opc-2.c
index 56a77d6..256e696 100644
--- a/opcodes/aarch64-opc-2.c
+++ b/opcodes/aarch64-opc-2.c
@@ -197,8 +197,9 @@ const struct aarch64_operand aarch64_operands[] =
   {AARCH64_OPND_CLASS_INT_REG, "SVE_Rn_SP", OPD_F_MAYBE_SP | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Rn}, "an integer register or SP"},
   {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_SHLIMM_PRED", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_tszh,FLD_SVE_imm5}, "a shift-left immediate operand"},
   {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_SHLIMM_UNPRED", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_tszh,FLD_imm5}, "a shift-left immediate operand"},
-  {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_SHRIMM_PRED", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_tszh,FLD_SVE_imm5}, "a shift-right immediate operand"},
-  {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_SHRIMM_UNPRED", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_tszh,FLD_imm5}, "a shift-right immediate operand"},
+  {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_SHRIMM_PRED", 1 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_tszh,FLD_SVE_imm5}, "a shift-right immediate operand"},
+  {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_SHRIMM_UNPRED", 1 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_tszh,FLD_imm5}, "a shift-right immediate operand"},
+  {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_SHRIMM_UNPRED_22", 2 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_sz, FLD_SVE_tszl_19, FLD_SVE_imm3}, "a shift-right immediate operand"},
   {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_SIMM5", OPD_F_SEXT | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_imm5}, "a 5-bit signed immediate"},
   {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_SIMM5B", OPD_F_SEXT | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_imm5b}, "a 5-bit signed immediate"},
   {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_SIMM6", OPD_F_SEXT | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_imms}, "a 6-bit signed immediate"},
diff --git a/opcodes/aarch64-opc.c b/opcodes/aarch64-opc.c
index 187ad12..5ee9e20 100644
--- a/opcodes/aarch64-opc.c
+++ b/opcodes/aarch64-opc.c
@@ -2539,13 +2539,18 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
 
 	case AARCH64_OPND_SVE_SHRIMM_PRED:
 	case AARCH64_OPND_SVE_SHRIMM_UNPRED:
-	  size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
-	  if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
+	case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
 	    {
-	      set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
-	      return 0;
-	    }
-	  break;
+	      unsigned int index =
+		(type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
+	      size = aarch64_get_qualifier_esize (opnds[idx - index].qualifier);
+	      if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
+		{
+		  set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
+		  return 0;
+		}
+	      break;
+	   }
 
 	default:
 	  break;
@@ -3349,6 +3354,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
     case AARCH64_OPND_SVE_SHLIMM_UNPRED:
     case AARCH64_OPND_SVE_SHRIMM_PRED:
     case AARCH64_OPND_SVE_SHRIMM_UNPRED:
+    case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
     case AARCH64_OPND_SVE_SIMM5:
     case AARCH64_OPND_SVE_SIMM5B:
     case AARCH64_OPND_SVE_SIMM6:
diff --git a/opcodes/aarch64-tbl.h b/opcodes/aarch64-tbl.h
index 0e20b66..c8ad71f 100644
--- a/opcodes/aarch64-tbl.h
+++ b/opcodes/aarch64-tbl.h
@@ -4902,10 +4902,13 @@ struct aarch64_opcode aarch64_opcode_table[] =
       F(FLD_SVE_tszh,FLD_SVE_imm5), "a shift-left immediate operand")	\
     Y(IMMEDIATE, sve_shlimm, "SVE_SHLIMM_UNPRED", 0,			\
       F(FLD_SVE_tszh,FLD_imm5), "a shift-left immediate operand")	\
-    Y(IMMEDIATE, sve_shrimm, "SVE_SHRIMM_PRED", 0,			\
+    Y(IMMEDIATE, sve_shrimm, "SVE_SHRIMM_PRED", 1 << OPD_F_OD_LSB,	\
       F(FLD_SVE_tszh,FLD_SVE_imm5), "a shift-right immediate operand")	\
-    Y(IMMEDIATE, sve_shrimm, "SVE_SHRIMM_UNPRED", 0,			\
+    Y(IMMEDIATE, sve_shrimm, "SVE_SHRIMM_UNPRED", 1 << OPD_F_OD_LSB,	\
       F(FLD_SVE_tszh,FLD_imm5), "a shift-right immediate operand")	\
+    Y(IMMEDIATE, sve_shrimm, "SVE_SHRIMM_UNPRED_22", 2 << OPD_F_OD_LSB,	\
+      F(FLD_SVE_sz, FLD_SVE_tszl_19, FLD_SVE_imm3),			\
+      "a shift-right immediate operand")				\
     Y(IMMEDIATE, imm, "SVE_SIMM5", OPD_F_SEXT, F(FLD_SVE_imm5),		\
       "a 5-bit signed immediate")					\
     Y(IMMEDIATE, imm, "SVE_SIMM5B", OPD_F_SEXT, F(FLD_SVE_imm5b),	\
-- 
2.7.4


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 11/16] [binutils][aarch64] New sve_shift_tsz_bhsd iclass.
  2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
                   ` (7 preceding siblings ...)
  2019-05-01 14:44 ` [PATCH 05/16] [binutils][aarch64] New SVE_Zm3_11_INDEX operand Matthew Malcomson
@ 2019-05-01 14:45 ` Matthew Malcomson
  2019-05-01 14:45 ` [PATCH 10/16] [binutils][aarch64] New SVE_SHRIMM_UNPRED_22 operand Matthew Malcomson
                   ` (8 subsequent siblings)
  17 siblings, 0 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-01 14:45 UTC (permalink / raw)
  To: binutils; +Cc: nd, Matthew Malcomson

This new iclass encodes the variant by which is the most significant bit
used of bits 23-22:20-19, where those bits are usually part of a
given constant operand.

include/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* opcode/aarch64.h (enum aarch64_insn_class): Add sve_shift_tsz_bhsd
	iclass.

opcodes/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* aarch64-asm.c (aarch64_encode_variant_using_iclass): Handle
	sve_shift_tsz_bhsd iclass encode.
	* aarch64-dis.c (aarch64_decode_variant_using_iclass): Handle
	sve_shift_tsz_bhsd iclass decode.
---
 include/opcode/aarch64.h |  1 +
 opcodes/aarch64-asm.c    |  1 +
 opcodes/aarch64-dis.c    | 11 +++++++++++
 3 files changed, 13 insertions(+)

diff --git a/include/opcode/aarch64.h b/include/opcode/aarch64.h
index ce9955d..285af27 100644
--- a/include/opcode/aarch64.h
+++ b/include/opcode/aarch64.h
@@ -595,6 +595,7 @@ enum aarch64_insn_class
   sve_size_sd2,
   sve_size_013,
   sve_shift_tsz_hsd,
+  sve_shift_tsz_bhsd,
   testbranch,
   cryptosm3,
   cryptosm4,
diff --git a/opcodes/aarch64-asm.c b/opcodes/aarch64-asm.c
index 6be17f9..ad50598 100644
--- a/opcodes/aarch64-asm.c
+++ b/opcodes/aarch64-asm.c
@@ -1626,6 +1626,7 @@ aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
     case sve_shift_pred:
     case sve_shift_unpred:
     case sve_shift_tsz_hsd:
+    case sve_shift_tsz_bhsd:
       /* For indices and shift amounts, the variant is encoded as
 	 part of the immediate.  */
       break;
diff --git a/opcodes/aarch64-dis.c b/opcodes/aarch64-dis.c
index 5571ab6..b42e4d5 100644
--- a/opcodes/aarch64-dis.c
+++ b/opcodes/aarch64-dis.c
@@ -2832,6 +2832,17 @@ aarch64_decode_variant_using_iclass (aarch64_inst *inst)
 	variant = i;
       break;
 
+    case sve_shift_tsz_bhsd:
+      i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
+      if (i == 0)
+	return FALSE;
+      while (i != 1)
+	{
+	  i >>= 1;
+	  variant += 1;
+	}
+      break;
+
     case sve_shift_tsz_hsd:
       i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
       if (i == 0)
-- 
2.7.4


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 14/16] [binutils][aarch64] New SVE_SHLIMM_UNPRED_22 operand.
  2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
                   ` (11 preceding siblings ...)
  2019-05-01 14:45 ` [PATCH 12/16] [binutils][aarch64] New SVE_Zm4_11_INDEX operand Matthew Malcomson
@ 2019-05-01 14:45 ` Matthew Malcomson
  2019-05-01 14:45 ` [PATCH 13/16] [binutils][aarch64] New sve_size_tsz_bhs iclass Matthew Malcomson
                   ` (4 subsequent siblings)
  17 siblings, 0 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-01 14:45 UTC (permalink / raw)
  To: binutils; +Cc: nd, Matthew Malcomson

New operand describes a shift-left immediate encoded in bits
22:20-19:18-16 where UInt(bits) - esize == shift.
This operand is useful for instructions like sshllb.

gas/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* config/tc-aarch64.c (parse_operands): Handle new SVE_SHLIMM_UNPRED_22 operand.

include/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* opcode/aarch64.h (enum aarch64_opnd): New SVE_SHLIMM_UNPRED_22 operand.

opcodes/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* aarch64-asm-2.c: Regenerated.
	* aarch64-dis-2.c: Regenerated.
	* aarch64-opc-2.c: Regenerated.
	* aarch64-opc.c (operand_general_constraint_met_p): Constraint checking
	for SVE_SHLIMM_UNPRED_22.
	(aarch64_print_operand): Add printing for SVE_SHLIMM_UNPRED_22.
	* aarch64-tbl.h (AARCH64_OPERANDS): Use new SVE_SHLIMM_UNPRED_22 operand.
---
 gas/config/tc-aarch64.c  |  1 +
 include/opcode/aarch64.h |  1 +
 opcodes/aarch64-asm-2.c  | 21 +++++++++++----------
 opcodes/aarch64-dis-2.c  | 21 +++++++++++----------
 opcodes/aarch64-opc-2.c  |  1 +
 opcodes/aarch64-opc.c    |  2 ++
 opcodes/aarch64-tbl.h    |  3 +++
 7 files changed, 30 insertions(+), 20 deletions(-)

diff --git a/gas/config/tc-aarch64.c b/gas/config/tc-aarch64.c
index fbe2dcf..9b09cbf 100644
--- a/gas/config/tc-aarch64.c
+++ b/gas/config/tc-aarch64.c
@@ -5783,6 +5783,7 @@ parse_operands (char *str, const aarch64_opcode *opcode)
 	case AARCH64_OPND_SVE_LIMM_MOV:
 	case AARCH64_OPND_SVE_SHLIMM_PRED:
 	case AARCH64_OPND_SVE_SHLIMM_UNPRED:
+	case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
 	case AARCH64_OPND_SVE_SHRIMM_PRED:
 	case AARCH64_OPND_SVE_SHRIMM_UNPRED:
 	case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
diff --git a/include/opcode/aarch64.h b/include/opcode/aarch64.h
index 2b78e97..ac484ca 100644
--- a/include/opcode/aarch64.h
+++ b/include/opcode/aarch64.h
@@ -389,6 +389,7 @@ enum aarch64_opnd
   AARCH64_OPND_SVE_Rn_SP,	/* Integer Rn or SP, alt. SVE position.  */
   AARCH64_OPND_SVE_SHLIMM_PRED,	  /* SVE shift left amount (predicated).  */
   AARCH64_OPND_SVE_SHLIMM_UNPRED, /* SVE shift left amount (unpredicated).  */
+  AARCH64_OPND_SVE_SHLIMM_UNPRED_22,	/* SVE 3 bit shift left unpred.  */
   AARCH64_OPND_SVE_SHRIMM_PRED,	  /* SVE shift right amount (predicated).  */
   AARCH64_OPND_SVE_SHRIMM_UNPRED, /* SVE shift right amount (unpredicated).  */
   AARCH64_OPND_SVE_SHRIMM_UNPRED_22,	/* SVE 3 bit shift right unpred.  */
diff --git a/opcodes/aarch64-asm-2.c b/opcodes/aarch64-asm-2.c
index 3735fbc..c661c20 100644
--- a/opcodes/aarch64-asm-2.c
+++ b/opcodes/aarch64-asm-2.c
@@ -637,7 +637,6 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 168:
     case 169:
     case 170:
-    case 184:
     case 185:
     case 186:
     case 187:
@@ -646,8 +645,9 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 190:
     case 191:
     case 192:
-    case 198:
-    case 201:
+    case 193:
+    case 199:
+    case 202:
       return aarch64_ins_regno (self, info, code, inst, errors);
     case 13:
       return aarch64_ins_reg_extended (self, info, code, inst, errors);
@@ -659,7 +659,7 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 31:
     case 32:
     case 33:
-    case 203:
+    case 204:
       return aarch64_ins_reglane (self, info, code, inst, errors);
     case 34:
       return aarch64_ins_reglist (self, info, code, inst, errors);
@@ -695,7 +695,6 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 81:
     case 158:
     case 160:
-    case 176:
     case 177:
     case 178:
     case 179:
@@ -703,6 +702,7 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 181:
     case 182:
     case 183:
+    case 184:
       return aarch64_ins_imm (self, info, code, inst, errors);
     case 42:
     case 43:
@@ -840,21 +840,22 @@ aarch64_insert_operand (const aarch64_operand *self,
       return aarch64_ins_sve_scale (self, info, code, inst, errors);
     case 171:
     case 172:
-      return aarch64_ins_sve_shlimm (self, info, code, inst, errors);
     case 173:
+      return aarch64_ins_sve_shlimm (self, info, code, inst, errors);
     case 174:
     case 175:
+    case 176:
       return aarch64_ins_sve_shrimm (self, info, code, inst, errors);
-    case 193:
     case 194:
     case 195:
     case 196:
     case 197:
+    case 198:
       return aarch64_ins_sve_quad_index (self, info, code, inst, errors);
-    case 199:
-      return aarch64_ins_sve_index (self, info, code, inst, errors);
     case 200:
-    case 202:
+      return aarch64_ins_sve_index (self, info, code, inst, errors);
+    case 201:
+    case 203:
       return aarch64_ins_sve_reglist (self, info, code, inst, errors);
     default: assert (0); abort ();
     }
diff --git a/opcodes/aarch64-dis-2.c b/opcodes/aarch64-dis-2.c
index a7281a9..d3f51ef 100644
--- a/opcodes/aarch64-dis-2.c
+++ b/opcodes/aarch64-dis-2.c
@@ -20032,7 +20032,6 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 168:
     case 169:
     case 170:
-    case 184:
     case 185:
     case 186:
     case 187:
@@ -20041,8 +20040,9 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 190:
     case 191:
     case 192:
-    case 198:
-    case 201:
+    case 193:
+    case 199:
+    case 202:
       return aarch64_ext_regno (self, info, code, inst, errors);
     case 8:
       return aarch64_ext_regrt_sysins (self, info, code, inst, errors);
@@ -20058,7 +20058,7 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 31:
     case 32:
     case 33:
-    case 203:
+    case 204:
       return aarch64_ext_reglane (self, info, code, inst, errors);
     case 34:
       return aarch64_ext_reglist (self, info, code, inst, errors);
@@ -20095,7 +20095,6 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 81:
     case 158:
     case 160:
-    case 176:
     case 177:
     case 178:
     case 179:
@@ -20103,6 +20102,7 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 181:
     case 182:
     case 183:
+    case 184:
       return aarch64_ext_imm (self, info, code, inst, errors);
     case 42:
     case 43:
@@ -20242,21 +20242,22 @@ aarch64_extract_operand (const aarch64_operand *self,
       return aarch64_ext_sve_scale (self, info, code, inst, errors);
     case 171:
     case 172:
-      return aarch64_ext_sve_shlimm (self, info, code, inst, errors);
     case 173:
+      return aarch64_ext_sve_shlimm (self, info, code, inst, errors);
     case 174:
     case 175:
+    case 176:
       return aarch64_ext_sve_shrimm (self, info, code, inst, errors);
-    case 193:
     case 194:
     case 195:
     case 196:
     case 197:
+    case 198:
       return aarch64_ext_sve_quad_index (self, info, code, inst, errors);
-    case 199:
-      return aarch64_ext_sve_index (self, info, code, inst, errors);
     case 200:
-    case 202:
+      return aarch64_ext_sve_index (self, info, code, inst, errors);
+    case 201:
+    case 203:
       return aarch64_ext_sve_reglist (self, info, code, inst, errors);
     default: assert (0); abort ();
     }
diff --git a/opcodes/aarch64-opc-2.c b/opcodes/aarch64-opc-2.c
index 43d59ec..7de9ca7 100644
--- a/opcodes/aarch64-opc-2.c
+++ b/opcodes/aarch64-opc-2.c
@@ -197,6 +197,7 @@ const struct aarch64_operand aarch64_operands[] =
   {AARCH64_OPND_CLASS_INT_REG, "SVE_Rn_SP", OPD_F_MAYBE_SP | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Rn}, "an integer register or SP"},
   {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_SHLIMM_PRED", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_tszh,FLD_SVE_imm5}, "a shift-left immediate operand"},
   {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_SHLIMM_UNPRED", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_tszh,FLD_imm5}, "a shift-left immediate operand"},
+  {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_SHLIMM_UNPRED_22", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_sz, FLD_SVE_tszl_19, FLD_SVE_imm3}, "a shift-left immediate operand"},
   {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_SHRIMM_PRED", 1 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_tszh,FLD_SVE_imm5}, "a shift-right immediate operand"},
   {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_SHRIMM_UNPRED", 1 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_tszh,FLD_imm5}, "a shift-right immediate operand"},
   {AARCH64_OPND_CLASS_IMMEDIATE, "SVE_SHRIMM_UNPRED_22", 2 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_sz, FLD_SVE_tszl_19, FLD_SVE_imm3}, "a shift-right immediate operand"},
diff --git a/opcodes/aarch64-opc.c b/opcodes/aarch64-opc.c
index 292de34..6b43651 100644
--- a/opcodes/aarch64-opc.c
+++ b/opcodes/aarch64-opc.c
@@ -2530,6 +2530,7 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
 
 	case AARCH64_OPND_SVE_SHLIMM_PRED:
 	case AARCH64_OPND_SVE_SHLIMM_UNPRED:
+	case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
 	  size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
 	  if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
 	    {
@@ -3355,6 +3356,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
     case AARCH64_OPND_SIMM5:
     case AARCH64_OPND_SVE_SHLIMM_PRED:
     case AARCH64_OPND_SVE_SHLIMM_UNPRED:
+    case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
     case AARCH64_OPND_SVE_SHRIMM_PRED:
     case AARCH64_OPND_SVE_SHRIMM_UNPRED:
     case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
diff --git a/opcodes/aarch64-tbl.h b/opcodes/aarch64-tbl.h
index f6ab6ad..d7043a2 100644
--- a/opcodes/aarch64-tbl.h
+++ b/opcodes/aarch64-tbl.h
@@ -4902,6 +4902,9 @@ struct aarch64_opcode aarch64_opcode_table[] =
       F(FLD_SVE_tszh,FLD_SVE_imm5), "a shift-left immediate operand")	\
     Y(IMMEDIATE, sve_shlimm, "SVE_SHLIMM_UNPRED", 0,			\
       F(FLD_SVE_tszh,FLD_imm5), "a shift-left immediate operand")	\
+    Y(IMMEDIATE, sve_shlimm, "SVE_SHLIMM_UNPRED_22", 0,			\
+      F(FLD_SVE_sz, FLD_SVE_tszl_19, FLD_SVE_imm3),			\
+      "a shift-left immediate operand")					\
     Y(IMMEDIATE, sve_shrimm, "SVE_SHRIMM_PRED", 1 << OPD_F_OD_LSB,	\
       F(FLD_SVE_tszh,FLD_SVE_imm5), "a shift-right immediate operand")	\
     Y(IMMEDIATE, sve_shrimm, "SVE_SHRIMM_UNPRED", 1 << OPD_F_OD_LSB,	\
-- 
2.7.4


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 12/16] [binutils][aarch64] New SVE_Zm4_11_INDEX operand.
  2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
                   ` (10 preceding siblings ...)
  2019-05-01 14:45 ` [PATCH 06/16] [binutils][aarch64] New SVE_ADDR_ZX operand Matthew Malcomson
@ 2019-05-01 14:45 ` Matthew Malcomson
  2019-05-01 14:45 ` [PATCH 14/16] [binutils][aarch64] New SVE_SHLIMM_UNPRED_22 operand Matthew Malcomson
                   ` (5 subsequent siblings)
  17 siblings, 0 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-01 14:45 UTC (permalink / raw)
  To: binutils; +Cc: nd, Matthew Malcomson

This includes defining a new single bit field SVE_i2h at position 20.
SVE_Zm4_11_INDEX handles indexed Zn registers where the index is encoded
in bits 20:11 and the register is chosed from range z0-z15 in bits 19-16.

gas/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* config/tc-aarch64.c (parse_operands): Handle new SVE_Zm4_11_INDEX operand.

include/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* opcode/aarch64.h (enum aarch64_opnd): New SVE_Zm4_11_INDEX operand.

opcodes/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* aarch64-asm-2.c: Regenerated.
	* aarch64-dis-2.c: Regenerated.
	* aarch64-opc-2.c: Regenerated.
	* aarch64-opc.c (operand_general_constraint_met_p): Constraint checking
	for SVE_Zm4_11_INDEX.
	(aarch64_print_operand): Add printing for SVE_Zm4_11_INDEX.
	(fields): Handle SVE_i2h field.
	* aarch64-opc.h (enum aarch64_field_kind): New SVE_i2h field.
	* aarch64-tbl.h (AARCH64_OPERANDS): Use new SVE_Zm4_11_INDEX operand.
---
 gas/config/tc-aarch64.c  |  1 +
 include/opcode/aarch64.h |  1 +
 opcodes/aarch64-asm-2.c  | 13 +++++++------
 opcodes/aarch64-dis-2.c  | 13 +++++++------
 opcodes/aarch64-opc-2.c  |  1 +
 opcodes/aarch64-opc.c    |  3 +++
 opcodes/aarch64-opc.h    |  1 +
 opcodes/aarch64-tbl.h    |  3 +++
 8 files changed, 24 insertions(+), 12 deletions(-)

diff --git a/gas/config/tc-aarch64.c b/gas/config/tc-aarch64.c
index d7fc372..fbe2dcf 100644
--- a/gas/config/tc-aarch64.c
+++ b/gas/config/tc-aarch64.c
@@ -5653,6 +5653,7 @@ parse_operands (char *str, const aarch64_opcode *opcode)
 	case AARCH64_OPND_SVE_Zm3_INDEX:
 	case AARCH64_OPND_SVE_Zm3_22_INDEX:
 	case AARCH64_OPND_SVE_Zm3_11_INDEX:
+	case AARCH64_OPND_SVE_Zm4_11_INDEX:
 	case AARCH64_OPND_SVE_Zm4_INDEX:
 	case AARCH64_OPND_SVE_Zn_INDEX:
 	  reg_type = REG_TYPE_ZN;
diff --git a/include/opcode/aarch64.h b/include/opcode/aarch64.h
index 285af27..99da95c 100644
--- a/include/opcode/aarch64.h
+++ b/include/opcode/aarch64.h
@@ -412,6 +412,7 @@ enum aarch64_opnd
   AARCH64_OPND_SVE_Zm3_INDEX,	/* z0-z7[0-3] in Zm, bits [20,16].  */
   AARCH64_OPND_SVE_Zm3_22_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 22.  */
   AARCH64_OPND_SVE_Zm3_11_INDEX, /* z0-z7[0-7] in Zm3_INDEX plus bit 11.  */
+  AARCH64_OPND_SVE_Zm4_11_INDEX, /* z0-z15[0-3] in Zm plus bit 11.  */
   AARCH64_OPND_SVE_Zm4_INDEX,	/* z0-z15[0-1] in Zm, bits [20,16].  */
   AARCH64_OPND_SVE_Zn,		/* SVE vector register in Zn.  */
   AARCH64_OPND_SVE_Zn_INDEX,	/* Indexed SVE vector register, for DUP.  */
diff --git a/opcodes/aarch64-asm-2.c b/opcodes/aarch64-asm-2.c
index e6ddce7..3735fbc 100644
--- a/opcodes/aarch64-asm-2.c
+++ b/opcodes/aarch64-asm-2.c
@@ -646,8 +646,8 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 190:
     case 191:
     case 192:
-    case 197:
-    case 200:
+    case 198:
+    case 201:
       return aarch64_ins_regno (self, info, code, inst, errors);
     case 13:
       return aarch64_ins_reg_extended (self, info, code, inst, errors);
@@ -659,7 +659,7 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 31:
     case 32:
     case 33:
-    case 202:
+    case 203:
       return aarch64_ins_reglane (self, info, code, inst, errors);
     case 34:
       return aarch64_ins_reglist (self, info, code, inst, errors);
@@ -849,11 +849,12 @@ aarch64_insert_operand (const aarch64_operand *self,
     case 194:
     case 195:
     case 196:
+    case 197:
       return aarch64_ins_sve_quad_index (self, info, code, inst, errors);
-    case 198:
-      return aarch64_ins_sve_index (self, info, code, inst, errors);
     case 199:
-    case 201:
+      return aarch64_ins_sve_index (self, info, code, inst, errors);
+    case 200:
+    case 202:
       return aarch64_ins_sve_reglist (self, info, code, inst, errors);
     default: assert (0); abort ();
     }
diff --git a/opcodes/aarch64-dis-2.c b/opcodes/aarch64-dis-2.c
index 6a802b4..a7281a9 100644
--- a/opcodes/aarch64-dis-2.c
+++ b/opcodes/aarch64-dis-2.c
@@ -20041,8 +20041,8 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 190:
     case 191:
     case 192:
-    case 197:
-    case 200:
+    case 198:
+    case 201:
       return aarch64_ext_regno (self, info, code, inst, errors);
     case 8:
       return aarch64_ext_regrt_sysins (self, info, code, inst, errors);
@@ -20058,7 +20058,7 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 31:
     case 32:
     case 33:
-    case 202:
+    case 203:
       return aarch64_ext_reglane (self, info, code, inst, errors);
     case 34:
       return aarch64_ext_reglist (self, info, code, inst, errors);
@@ -20251,11 +20251,12 @@ aarch64_extract_operand (const aarch64_operand *self,
     case 194:
     case 195:
     case 196:
+    case 197:
       return aarch64_ext_sve_quad_index (self, info, code, inst, errors);
-    case 198:
-      return aarch64_ext_sve_index (self, info, code, inst, errors);
     case 199:
-    case 201:
+      return aarch64_ext_sve_index (self, info, code, inst, errors);
+    case 200:
+    case 202:
       return aarch64_ext_sve_reglist (self, info, code, inst, errors);
     default: assert (0); abort ();
     }
diff --git a/opcodes/aarch64-opc-2.c b/opcodes/aarch64-opc-2.c
index 256e696..43d59ec 100644
--- a/opcodes/aarch64-opc-2.c
+++ b/opcodes/aarch64-opc-2.c
@@ -220,6 +220,7 @@ const struct aarch64_operand aarch64_operands[] =
   {AARCH64_OPND_CLASS_SVE_REG, "SVE_Zm3_INDEX", 3 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zm_16}, "an indexed SVE vector register"},
   {AARCH64_OPND_CLASS_SVE_REG, "SVE_Zm3_22_INDEX", 3 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_i3h, FLD_SVE_Zm_16}, "an indexed SVE vector register"},
   {AARCH64_OPND_CLASS_SVE_REG, "SVE_Zm3_11_INDEX", 3 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_i3h2, FLD_SVE_i3l, FLD_SVE_imm3}, "an indexed SVE vector register"},
+  {AARCH64_OPND_CLASS_SVE_REG, "SVE_Zm4_11_INDEX", 4 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_i2h, FLD_SVE_i3l, FLD_SVE_imm4}, "an indexed SVE vector register"},
   {AARCH64_OPND_CLASS_SVE_REG, "SVE_Zm4_INDEX", 4 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zm_16}, "an indexed SVE vector register"},
   {AARCH64_OPND_CLASS_SVE_REG, "SVE_Zn", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zn}, "an SVE vector register"},
   {AARCH64_OPND_CLASS_SVE_REG, "SVE_Zn_INDEX", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zn}, "an indexed SVE vector register"},
diff --git a/opcodes/aarch64-opc.c b/opcodes/aarch64-opc.c
index 5ee9e20..292de34 100644
--- a/opcodes/aarch64-opc.c
+++ b/opcodes/aarch64-opc.c
@@ -296,6 +296,7 @@ const aarch64_field fields[] =
     { 22,  1 }, /* SVE_i3h: high bit of 3-bit immediate.  */
     { 11,  1 }, /* SVE_i3l: low bit of 3-bit immediate.  */
     { 19,  2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19].  */
+    { 20,  1 }, /* SVE_i2h: high bit of 2bit immediate, bits.  */
     { 16,  3 }, /* SVE_imm3: 3-bit immediate field.  */
     { 16,  4 }, /* SVE_imm4: 4-bit immediate field.  */
     {  5,  5 }, /* SVE_imm5: 5-bit immediate field.  */
@@ -1519,6 +1520,7 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
 	case AARCH64_OPND_SVE_Zm3_INDEX:
 	case AARCH64_OPND_SVE_Zm3_22_INDEX:
 	case AARCH64_OPND_SVE_Zm3_11_INDEX:
+	case AARCH64_OPND_SVE_Zm4_11_INDEX:
 	case AARCH64_OPND_SVE_Zm4_INDEX:
 	  size = get_operand_fields_width (get_operand_from_code (type));
 	  shift = get_operand_specific_data (&aarch64_operands[type]);
@@ -3322,6 +3324,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
     case AARCH64_OPND_SVE_Zm3_INDEX:
     case AARCH64_OPND_SVE_Zm3_22_INDEX:
     case AARCH64_OPND_SVE_Zm3_11_INDEX:
+    case AARCH64_OPND_SVE_Zm4_11_INDEX:
     case AARCH64_OPND_SVE_Zm4_INDEX:
     case AARCH64_OPND_SVE_Zn_INDEX:
       snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
diff --git a/opcodes/aarch64-opc.h b/opcodes/aarch64-opc.h
index 8d18175..bb0a508 100644
--- a/opcodes/aarch64-opc.h
+++ b/opcodes/aarch64-opc.h
@@ -123,6 +123,7 @@ enum aarch64_field_kind
   FLD_SVE_i3h,
   FLD_SVE_i3l,
   FLD_SVE_i3h2,
+  FLD_SVE_i2h,
   FLD_SVE_imm3,
   FLD_SVE_imm4,
   FLD_SVE_imm5,
diff --git a/opcodes/aarch64-tbl.h b/opcodes/aarch64-tbl.h
index c8ad71f..f6ab6ad 100644
--- a/opcodes/aarch64-tbl.h
+++ b/opcodes/aarch64-tbl.h
@@ -4948,6 +4948,9 @@ struct aarch64_opcode aarch64_opcode_table[] =
     Y(SVE_REG, sve_quad_index, "SVE_Zm3_11_INDEX", 			\
       3 << OPD_F_OD_LSB, F(FLD_SVE_i3h2, FLD_SVE_i3l, FLD_SVE_imm3),    \
       "an indexed SVE vector register")					\
+    Y(SVE_REG, sve_quad_index, "SVE_Zm4_11_INDEX", 			\
+      4 << OPD_F_OD_LSB, F(FLD_SVE_i2h, FLD_SVE_i3l, FLD_SVE_imm4),     \
+      "an indexed SVE vector register")					\
     Y(SVE_REG, sve_quad_index, "SVE_Zm4_INDEX", 			\
       4 << OPD_F_OD_LSB, F(FLD_SVE_Zm_16),				\
       "an indexed SVE vector register")					\
-- 
2.7.4


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 13/16] [binutils][aarch64] New sve_size_tsz_bhs iclass.
  2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
                   ` (12 preceding siblings ...)
  2019-05-01 14:45 ` [PATCH 14/16] [binutils][aarch64] New SVE_SHLIMM_UNPRED_22 operand Matthew Malcomson
@ 2019-05-01 14:45 ` Matthew Malcomson
  2019-05-01 14:55 ` [Patch 15/16] [binutils][aarch64] Add SVE2 instructions Matthew Malcomson
                   ` (3 subsequent siblings)
  17 siblings, 0 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-01 14:45 UTC (permalink / raw)
  To: binutils; +Cc: nd, Matthew Malcomson

Add sve_size_tsz_bhs iclass needed for sqxtnb and similar instructions.
This iclass encodes one of three variants by the most significant bit
set in a 3-bit value where only one bit may be set.

include/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* opcode/aarch64.h (enum aarch64_insn_class): Add sve_size_tsz_bhs
	iclass.

opcodes/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* aarch64-asm.c (aarch64_encode_variant_using_iclass): Handle
	sve_size_tsz_bhs iclass encode.
	* aarch64-dis.c (aarch64_decode_variant_using_iclass): Handle
	sve_size_tsz_bhs iclass decode.
---
 include/opcode/aarch64.h |  1 +
 opcodes/aarch64-asm.c    |  6 ++++++
 opcodes/aarch64-dis.c    | 11 +++++++++++
 3 files changed, 18 insertions(+)

diff --git a/include/opcode/aarch64.h b/include/opcode/aarch64.h
index 99da95c..2b78e97 100644
--- a/include/opcode/aarch64.h
+++ b/include/opcode/aarch64.h
@@ -597,6 +597,7 @@ enum aarch64_insn_class
   sve_size_013,
   sve_shift_tsz_hsd,
   sve_shift_tsz_bhsd,
+  sve_size_tsz_bhs,
   testbranch,
   cryptosm3,
   cryptosm4,
diff --git a/opcodes/aarch64-asm.c b/opcodes/aarch64-asm.c
index ad50598..afb0e5b 100644
--- a/opcodes/aarch64-asm.c
+++ b/opcodes/aarch64-asm.c
@@ -1673,6 +1673,12 @@ aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
 		    aarch64_get_variant (inst) + 1, 0);
       break;
 
+    case sve_size_tsz_bhs:
+      insert_fields (&inst->value,
+		     (1 << aarch64_get_variant (inst)),
+		     0, 2, FLD_SVE_tszl_19, FLD_SVE_sz);
+      break;
+
     case sve_size_013:
       variant = aarch64_get_variant (inst);
       if (variant == 2)
diff --git a/opcodes/aarch64-dis.c b/opcodes/aarch64-dis.c
index b42e4d5..6b53a2c 100644
--- a/opcodes/aarch64-dis.c
+++ b/opcodes/aarch64-dis.c
@@ -2843,6 +2843,17 @@ aarch64_decode_variant_using_iclass (aarch64_inst *inst)
 	}
       break;
 
+    case sve_size_tsz_bhs:
+      i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
+      while (i != 1)
+	{
+	  if (i & 1)
+	    return FALSE;
+	  i >>= 1;
+	  variant += 1;
+	}
+      break;
+
     case sve_shift_tsz_hsd:
       i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
       if (i == 0)
-- 
2.7.4


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [Patch 15/16] [binutils][aarch64] Add SVE2 instructions.
  2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
                   ` (13 preceding siblings ...)
  2019-05-01 14:45 ` [PATCH 13/16] [binutils][aarch64] New sve_size_tsz_bhs iclass Matthew Malcomson
@ 2019-05-01 14:55 ` Matthew Malcomson
  2019-05-01 15:02 ` [Patch 16/16] [binutils][aarch64] Add SVE2 tests Matthew Malcomson
                   ` (2 subsequent siblings)
  17 siblings, 0 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-01 14:55 UTC (permalink / raw)
  To: binutils; +Cc: nd

This patch adds all the SVE2 instructions and their associated qualifier
sets.

I've removed the generated file from this patch to make things easier to
review.

Ok for trunk?

opcodes/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* aarch64-dis-2.c: Regenerate.
	* aarch64-tbl.h (OP_SVE_BBU): New variant set.
	(OP_SVE_BBB): New variant set.
	(OP_SVE_DDDD): New variant set.
	(OP_SVE_HHH): New variant set.
	(OP_SVE_HHHU): New variant set.
	(OP_SVE_SSS): New variant set.
	(OP_SVE_SSSU): New variant set.
	(OP_SVE_SHH): New variant set.
	(OP_SVE_SBBU): New variant set.
	(OP_SVE_DSS): New variant set.
	(OP_SVE_DHHU): New variant set.
	(OP_SVE_VMV_HSD_BHS): New variant set.
	(OP_SVE_VVU_HSD_BHS): New variant set.
	(OP_SVE_VVVU_SD_BH): New variant set.
	(OP_SVE_VVVU_BHSD): New variant set.
	(OP_SVE_VVV_QHD_DBS): New variant set.
	(OP_SVE_VVV_HSD_BHS): New variant set.
	(OP_SVE_VVV_HSD_BHS2): New variant set.
	(OP_SVE_VVV_BHS_HSD): New variant set.
	(OP_SVE_VV_BHS_HSD): New variant set.
	(OP_SVE_VVV_SD): New variant set.
	(OP_SVE_VVU_BHS_HSD): New variant set.
	(OP_SVE_VZVV_SD): New variant set.
	(OP_SVE_VZVV_BH): New variant set.
	(OP_SVE_VZV_SD): New variant set.
	(aarch64_opcode_table): Add sve2 instructions.
---
 opcodes/aarch64-tbl.h | 419 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 419 insertions(+)

diff --git a/opcodes/aarch64-tbl.h b/opcodes/aarch64-tbl.h
index d7043a2..093e6f7 100644
--- a/opcodes/aarch64-tbl.h
+++ b/opcodes/aarch64-tbl.h
@@ -1455,6 +1455,14 @@
 {                                                       \
   QLF2(S_B,S_B),                                        \
 }
+#define OP_SVE_BBU                                      \
+{                                                       \
+  QLF3(S_B,S_B,NIL),                                \
+}
+#define OP_SVE_BBB                                      \
+{                                                       \
+  QLF3(S_B,S_B,S_B),                                \
+}
 #define OP_SVE_BBBU                                     \
 {                                                       \
   QLF4(S_B,S_B,S_B,NIL),                                \
@@ -1504,6 +1512,10 @@
 {                                                       \
   QLF3(S_D,S_D,S_D),                                    \
 }
+#define OP_SVE_DDDD                                     \
+{                                                       \
+  QLF4(S_D,S_D,S_D,S_D),                                \
+}
 #define OP_SVE_DMD                                      \
 {                                                       \
   QLF3(S_D,P_M,S_D),                                    \
@@ -1553,6 +1565,14 @@
 {                                                       \
   QLF2(S_H,S_B),                                        \
 }
+#define OP_SVE_HHH                                      \
+{                                                       \
+  QLF3(S_H,S_H,S_H),                                    \
+}
+#define OP_SVE_HHHU                                     \
+{                                                       \
+  QLF4(S_H,S_H,S_H,NIL),                                \
+}
 #define OP_SVE_HMH                                      \
 {                                                       \
   QLF3(S_H,P_M,S_H),                                    \
@@ -1600,10 +1620,22 @@
 {                                                       \
   QLF3(S_S,P_M,S_D),                                    \
 }
+#define OP_SVE_SSS                                      \
+{                                                       \
+  QLF3(S_S,S_S,S_S),                                    \
+}
+#define OP_SVE_SSSU                                     \
+{                                                       \
+  QLF4(S_S,S_S,S_S,NIL),                                \
+}
 #define OP_SVE_SMH                                      \
 {                                                       \
   QLF3(S_S,P_M,S_H),                                    \
 }
+#define OP_SVE_SHH                                      \
+{                                                       \
+  QLF3(S_S,S_H,S_H),                                    \
+}
 #define OP_SVE_SMS                                      \
 {                                                       \
   QLF3(S_S,P_M,S_S),                                    \
@@ -1624,6 +1656,18 @@
 {                                                       \
   QLF3(S_S,P_Z,S_S),                                    \
 }
+#define OP_SVE_SBBU                                     \
+{                                                       \
+  QLF4(S_S,S_B,S_B,NIL),                                \
+}
+#define OP_SVE_DSS                                      \
+{                                                       \
+  QLF3(S_D,S_S,S_S),                                    \
+}
+#define OP_SVE_DHHU                                     \
+{                                                       \
+  QLF4(S_D,S_H,S_H,NIL),                                \
+}
 #define OP_SVE_SZU                                      \
 {                                                       \
   QLF3(S_S,P_Z,NIL),                                    \
@@ -1709,6 +1753,18 @@
   QLF3(S_S,P_M,S_S),                                    \
   QLF3(S_D,P_M,S_D),                                    \
 }
+#define OP_SVE_VMV_HSD_BHS                              \
+{                                                       \
+  QLF3(S_H,P_M,S_B),                                    \
+  QLF3(S_S,P_M,S_H),                                    \
+  QLF3(S_D,P_M,S_S),                                    \
+}
+#define OP_SVE_VVU_HSD_BHS                              \
+{                                                       \
+  QLF3(S_H,S_B,NIL),                                    \
+  QLF3(S_S,S_H,NIL),                                    \
+  QLF3(S_D,S_S,NIL),                                    \
+}
 #define OP_SVE_VMV_SD                                   \
 {                                                       \
   QLF3(S_S,P_M,S_S),                                    \
@@ -1848,12 +1904,24 @@
 {                                                       \
   QLF4(S_S,S_S,S_S,NIL),                                \
 }
+#define OP_SVE_VVVU_SD_BH                               \
+{                                                       \
+  QLF4(S_S,S_B,S_B,NIL),                                \
+  QLF4(S_D,S_H,S_H,NIL),                                \
+}
 #define OP_SVE_VVVU_HSD                                 \
 {                                                       \
   QLF4(S_H,S_H,S_H,NIL),                                \
   QLF4(S_S,S_S,S_S,NIL),                                \
   QLF4(S_D,S_D,S_D,NIL),                                \
 }
+#define OP_SVE_VVVU_BHSD                                \
+{                                                       \
+  QLF4(S_B,S_B,S_B,NIL),                                \
+  QLF4(S_H,S_H,S_H,NIL),                                \
+  QLF4(S_S,S_S,S_S,NIL),                                \
+  QLF4(S_D,S_D,S_D,NIL),                                \
+}
 #define OP_SVE_VVV_BHSD                                 \
 {                                                       \
   QLF3(S_B,S_B,S_B),                                    \
@@ -1887,11 +1955,46 @@
 {                                                       \
   QLF3(S_S,S_B,S_B),                                    \
 }
+#define OP_SVE_VVV_QHD_DBS                              \
+{                                                       \
+  QLF3(S_Q,S_D,S_D),                                    \
+  QLF3(S_H,S_B,S_B),                                    \
+  QLF3(S_D,S_S,S_S),                                    \
+}
+#define OP_SVE_VVV_HSD_BHS                              \
+{                                                       \
+  QLF3(S_H,S_B,S_B),                                    \
+  QLF3(S_S,S_H,S_H),                                    \
+  QLF3(S_D,S_S,S_S),                                    \
+}
+#define OP_SVE_VVV_HSD_BHS2                             \
+{                                                       \
+  QLF3(S_H,S_H,S_B),                                    \
+  QLF3(S_S,S_S,S_H),                                    \
+  QLF3(S_D,S_D,S_S),                                    \
+}
+#define OP_SVE_VVV_BHS_HSD                              \
+{                                                       \
+  QLF3(S_B,S_H,S_H),                                    \
+  QLF3(S_H,S_S,S_S),                                    \
+  QLF3(S_S,S_D,S_D),                                    \
+}
+#define OP_SVE_VV_BHS_HSD                               \
+{                                                       \
+  QLF2(S_B,S_H),                                        \
+  QLF2(S_H,S_S),                                        \
+  QLF2(S_S,S_D),                                        \
+}
 #define OP_SVE_VVV_SD_BH                                \
 {                                                       \
   QLF3(S_S,S_B,S_B),                                    \
   QLF3(S_D,S_H,S_H),                                    \
 }
+#define OP_SVE_VVV_SD                                   \
+{                                                       \
+  QLF3(S_S,S_S,S_S),                                    \
+  QLF3(S_D,S_D,S_D),                                    \
+}
 #define OP_SVE_VV_BHSD                                  \
 {                                                       \
   QLF2(S_B,S_B),                                        \
@@ -1913,6 +2016,12 @@
   QLF2(S_S,S_S),                                        \
   QLF2(S_D,S_D),                                        \
 }
+#define OP_SVE_VVU_BHS_HSD                               \
+{                                                       \
+  QLF3(S_B,S_H,NIL),                                        \
+  QLF3(S_H,S_S,NIL),                                        \
+  QLF3(S_S,S_D,NIL),                                        \
+}
 #define OP_SVE_VV_HSD_BHS                               \
 {                                                       \
   QLF2(S_H,S_B),                                        \
@@ -1964,6 +2073,21 @@
   QLF4(S_S,P_Z,S_S,S_S),                                \
   QLF4(S_D,P_Z,S_D,S_D),                                \
 }
+#define OP_SVE_VZVV_SD                                 \
+{                                                       \
+  QLF4(S_S,P_Z,S_S,S_S),                                \
+  QLF4(S_D,P_Z,S_D,S_D),                                \
+}
+#define OP_SVE_VZVV_BH                                 \
+{                                                       \
+  QLF4(S_B,P_Z,S_B,S_B),                                \
+  QLF4(S_H,P_Z,S_H,S_H),                                \
+}
+#define OP_SVE_VZV_SD                                   \
+{                                                       \
+  QLF3(S_S,P_Z,S_S),                                    \
+  QLF3(S_D,P_Z,S_D),                                    \
+}
 #define OP_SVE_VZV_HSD                                  \
 {                                                       \
   QLF3(S_H,P_Z,S_H),                                    \
@@ -4463,6 +4587,301 @@ struct aarch64_opcode aarch64_opcode_table[] =
   _SVE_INSNC ("fmov", 0x05104000, 0xff30ffe0, sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Pg4_16, FPIMM0), OP_SVE_VM_HSD, F_ALIAS | F_PSEUDO, C_SCAN_MOVPRFX, 0),
   _SVE_INSNC ("orn", 0x05000000, 0xfffc0000, sve_limm, 0, OP3 (SVE_Zd, SVE_Zd, SVE_INV_LIMM), OP_SVE_VVU_BHSD, F_ALIAS | F_PSEUDO, C_SCAN_MOVPRFX, 1),
 
+  /* SVE2 instructions.  */
+  SVE2_INSNC ("adclb", 0x4500d000, 0xffa0fc00, sve_size_sd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_SD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("adclt", 0x4500d400, 0xffa0fc00, sve_size_sd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_SD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSN ("addhnb", 0x45206000, 0xff20fc00, sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHS_HSD, 0, 0),
+  SVE2_INSN ("addhnt", 0x45206400, 0xff20fc00, sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHS_HSD, 0, 0),
+  SVE2_INSNC ("addp", 0x4411a000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("bcax", 0x04603800, 0xffe0fc00, sve_misc, 0, OP4 (SVE_Zd, SVE_Zd, SVE_Zm_16, SVE_Zn), OP_SVE_DDDD, 0, C_SCAN_MOVPRFX, 1),
+  SVE2_INSNC ("bsl", 0x04203c00, 0xffe0fc00, sve_misc, 0, OP4 (SVE_Zd, SVE_Zd, SVE_Zm_16, SVE_Zn), OP_SVE_DDDD, 0, C_SCAN_MOVPRFX, 1),
+  SVE2_INSNC ("bsl1n", 0x04603c00, 0xffe0fc00, sve_misc, 0, OP4 (SVE_Zd, SVE_Zd, SVE_Zm_16, SVE_Zn), OP_SVE_DDDD, 0, C_SCAN_MOVPRFX, 1),
+  SVE2_INSNC ("bsl2n", 0x04a03c00, 0xffe0fc00, sve_misc, 0, OP4 (SVE_Zd, SVE_Zd, SVE_Zm_16, SVE_Zn), OP_SVE_DDDD, 0, C_SCAN_MOVPRFX, 1),
+  SVE2_INSNC ("cadd", 0x4500d800, 0xff3ff800, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Zd, SVE_Zn, SVE_IMM_ROT3), OP_SVE_VVVU_BHSD, 0, C_SCAN_MOVPRFX, 1),
+  SVE2_INSNC ("cdot", 0x44801000, 0xffa0f000, sve_size_sd, 0, OP4 (SVE_Zd, SVE_Zn, SVE_Zm_16, SVE_IMM_ROT2), OP_SVE_VVVU_SD_BH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("cdot", 0x44e04000, 0xffe0f000, sve_misc, 0, OP4 (SVE_Zd, SVE_Zn, SVE_Zm4_INDEX, SVE_IMM_ROT2), OP_SVE_DHHU, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("cdot", 0x44a04000, 0xffe0f000, sve_misc, 0, OP4 (SVE_Zd, SVE_Zn, SVE_Zm3_INDEX, SVE_IMM_ROT2), OP_SVE_SBBU, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("cmla", 0x44002000, 0xff20f000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Zn, SVE_Zm_16, SVE_IMM_ROT2), OP_SVE_VVVU_BHSD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("cmla", 0x44a06000, 0xffe0f000, sve_misc, 0, OP4 (SVE_Zd, SVE_Zn, SVE_Zm3_INDEX, SVE_IMM_ROT2), OP_SVE_VVVU_H, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("cmla", 0x44e06000, 0xffe0f000, sve_misc, 0, OP4 (SVE_Zd, SVE_Zn, SVE_Zm4_INDEX, SVE_IMM_ROT2), OP_SVE_VVVU_S, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("eor3", 0x04203800, 0xffe0fc00, sve_misc, 0, OP4 (SVE_Zd, SVE_Zd, SVE_Zm_16, SVE_Zn), OP_SVE_DDDD, 0, C_SCAN_MOVPRFX, 1),
+  SVE2_INSNC ("eorbt", 0x45009000, 0xff20fc00, sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHSD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("eortb", 0x45009400, 0xff20fc00, sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHSD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSN ("ext", 0x05600000, 0xffe0e000, sve_misc, 0, OP3 (SVE_Zd, SVE_ZnxN, SVE_UIMM8_53), OP_SVE_BBU, F_OD(2), 0),
+  SVE2_INSNC ("faddp", 0x64108000, 0xff3fe000, sve_size_hsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_HSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSN ("fcvtlt", 0x6489a000, 0xffffe000, sve_misc, 0, OP3 (SVE_Zd, SVE_Pg3, SVE_Zn), OP_SVE_SMH, 0, 0),
+  SVE2_INSN ("fcvtlt", 0x64cba000, 0xffffe000, sve_misc, 0, OP3 (SVE_Zd, SVE_Pg3, SVE_Zn), OP_SVE_DMS, 0, 0),
+  SVE2_INSN ("fcvtnt", 0x6488a000, 0xffffe000, sve_misc, 0, OP3 (SVE_Zd, SVE_Pg3, SVE_Zn), OP_SVE_HMS, 0, 0),
+  SVE2_INSN ("fcvtnt", 0x64caa000, 0xffffe000, sve_misc, 0, OP3 (SVE_Zd, SVE_Pg3, SVE_Zn), OP_SVE_SMD, 0, 0),
+  SVE2_INSNC ("fcvtx", 0x650aa000, 0xffffe000, sve_misc, 0, OP3 (SVE_Zd, SVE_Pg3, SVE_Zn), OP_SVE_SMD, 0, C_SCAN_MOVPRFX | C_MAX_ELEM, 0),
+  SVE2_INSN ("fcvtxnt", 0x640aa000, 0xffffe000, sve_misc, 0, OP3 (SVE_Zd, SVE_Pg3, SVE_Zn), OP_SVE_SMD, 0, 0),
+  SVE2_INSNC ("flogb", 0x6518a000, 0xfff9e000, sve_size_hsd2, 0, OP3 (SVE_Zd, SVE_Pg3, SVE_Zn), OP_SVE_VMV_HSD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("fmaxnmp", 0x64148000, 0xff3fe000, sve_size_hsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_HSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("fmaxp", 0x64168000, 0xff3fe000, sve_size_hsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_HSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("fminnmp", 0x64158000, 0xff3fe000, sve_size_hsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_HSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("fminp", 0x64178000, 0xff3fe000, sve_size_hsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_HSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("fmlalb", 0x64a04000, 0xffe0f400, sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("fmlalb", 0x64a08000, 0xffe0fc00, sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("fmlalt", 0x64a04400, 0xffe0f400, sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("fmlalt", 0x64a08400, 0xffe0fc00, sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("fmlslb", 0x64a06000, 0xffe0f400, sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("fmlslb", 0x64a0a000, 0xffe0fc00, sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("fmlslt", 0x64a06400, 0xffe0f400, sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("fmlslt", 0x64a0a400, 0xffe0fc00, sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSN ("histcnt", 0x45a0c000, 0xffa0e000, sve_size_sd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zn, SVE_Zm_16), OP_SVE_VZVV_SD, 0, 0),
+  SVE2_INSN ("histseg", 0x4520a000, 0xffe0fc00, sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_BBB, 0, 0),
+  SVE2_INSN ("ldnt1b", 0x8400a000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_SZS, F_OD(1), 0),
+  SVE2_INSN ("ldnt1b", 0xc400c000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_DZD, F_OD(1), 0),
+  SVE2_INSN ("ldnt1d", 0xc580c000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_DZD, F_OD(1), 0),
+  SVE2_INSN ("ldnt1h", 0x8480a000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_SZS, F_OD(1), 0),
+  SVE2_INSN ("ldnt1h", 0xc480c000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_DZD, F_OD(1), 0),
+  SVE2_INSN ("ldnt1sb", 0x84008000, 0xbfe0e000, sve_size_sd2, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_VZV_SD, F_OD(1), 0),
+  SVE2_INSN ("ldnt1sh", 0x84808000, 0xbfe0e000, sve_size_sd2, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_VZV_SD, F_OD(1), 0),
+  SVE2_INSN ("ldnt1sw", 0xc5008000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_DZD, F_OD(1), 0),
+  SVE2_INSN ("ldnt1w", 0x8500a000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_SZS, F_OD(1), 0),
+  SVE2_INSN ("ldnt1w", 0xc500c000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_DZD, F_OD(1), 0),
+  SVE2_INSN ("match", 0x45208000, 0xffa0e010, sve_size_bh, 0, OP4 (SVE_Pd, SVE_Pg3, SVE_Zn, SVE_Zm_16), OP_SVE_VZVV_BH, 0, 0),
+  SVE2_INSNC ("mla", 0x44200800, 0xffa0fc00, sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_22_INDEX), OP_SVE_HHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("mla", 0x44a00800, 0xffe0fc00, sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_INDEX), OP_SVE_SSS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("mla", 0x44e00800, 0xffe0fc00, sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_INDEX), OP_SVE_DDD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("mls", 0x44200c00, 0xffa0fc00, sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_22_INDEX), OP_SVE_HHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("mls", 0x44a00c00, 0xffe0fc00, sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_INDEX), OP_SVE_SSS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("mls", 0x44e00c00, 0xffe0fc00, sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_INDEX), OP_SVE_DDD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSN ("mul", 0x4420f800, 0xffa0fc00, sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_22_INDEX), OP_SVE_HHH, 0, 0),
+  SVE2_INSN ("mul", 0x44a0f800, 0xffe0fc00,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_INDEX), OP_SVE_SSS, 0, 0),
+  SVE2_INSN ("mul", 0x44e0f800, 0xffe0fc00,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_INDEX), OP_SVE_DDD, 0, 0),
+  SVE2_INSN ("mul", 0x04206000, 0xff20fc00,  sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHSD, 0, 0),
+  SVE2_INSNC ("nbsl", 0x04e03c00, 0xffe0fc00, sve_misc, 0, OP4 (SVE_Zd, SVE_Zd, SVE_Zm_16, SVE_Zn), OP_SVE_DDDD, 0, C_SCAN_MOVPRFX, 1),
+  SVE2_INSN ("nmatch", 0x45208010, 0xffa0e010,  sve_size_bh, 0, OP4 (SVE_Pd, SVE_Pg3, SVE_Zn, SVE_Zm_16), OP_SVE_VZVV_BH, 0, 0),
+  SVE2_INSN ("pmul", 0x04206400, 0xffe0fc00,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_BBB, 0, 0),
+  SVE2_INSN ("raddhnb", 0x45206800, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHS_HSD, 0, 0),
+  SVE2_INSN ("raddhnt", 0x45206c00, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHS_HSD, 0, 0),
+  SVE2_INSN ("rshrnb", 0x45201800, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED_22), OP_SVE_VVU_BHS_HSD, 0, 0),
+  SVE2_INSN ("rshrnt", 0x45201c00, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED_22), OP_SVE_VVU_BHS_HSD, 0, 0),
+  SVE2_INSN ("rsubhnb", 0x45207800, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHS_HSD, 0, 0),
+  SVE2_INSN ("rsubhnt", 0x45207c00, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHS_HSD, 0, 0),
+  SVE2_INSNC ("saba", 0x4500f800, 0xff20fc00,  sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHSD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sabalb", 0x4500c000, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sabalt", 0x4500c400, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSN ("sabdlb", 0x45003000, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSN ("sabdlt", 0x45003400, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSNC ("sadalp", 0x4404a000, 0xff3fe000,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Pg3, SVE_Zn), OP_SVE_VMV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSN ("saddlb", 0x45000000, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSN ("saddlbt", 0x45008000, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSN ("saddlt", 0x45000400, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSN ("saddwb", 0x45004000, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS2, 0, 0),
+  SVE2_INSN ("saddwt", 0x45004400, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS2, 0, 0),
+  SVE2_INSNC ("sbclb", 0x4580d000, 0xffa0fc00,  sve_size_sd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_SD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sbclt", 0x4580d400, 0xffa0fc00,  sve_size_sd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_SD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("shadd", 0x44108000, 0xff3fe000,  sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSN ("shrnb", 0x45201000, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED_22), OP_SVE_VVU_BHS_HSD, 0, 0),
+  SVE2_INSN ("shrnt", 0x45201400, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED_22), OP_SVE_VVU_BHS_HSD, 0, 0),
+  SVE2_INSNC ("shsub", 0x44128000, 0xff3fe000,  sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("shsubr", 0x44168000, 0xff3fe000,  sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSN ("sli", 0x4500f400, 0xff20fc00,  sve_shift_tsz_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHLIMM_UNPRED), OP_SVE_VVU_BHSD, 0, 0),
+  SVE2_INSNC ("smaxp", 0x4414a000, 0xff3fe000,  sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sminp", 0x4416a000, 0xff3fe000,  sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("smlalb", 0x44a08000, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("smlalb", 0x44e08000, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("smlalb", 0x44004000, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("smlalt", 0x44a08400, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("smlalt", 0x44e08400, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("smlalt", 0x44004400, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("smlslb", 0x44a0a000, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("smlslb", 0x44e0a000, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("smlslb", 0x44005000, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("smlslt", 0x44a0a400, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("smlslt", 0x44e0a400, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("smlslt", 0x44005400, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSN ("smulh", 0x04206800, 0xff20fc00,  sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHSD, 0, 0),
+  SVE2_INSN ("smullb", 0x44a0c000, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, 0),
+  SVE2_INSN ("smullb", 0x44e0c000, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, 0),
+  SVE2_INSN ("smullb", 0x45007000, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSN ("smullt", 0x44a0c400, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, 0),
+  SVE2_INSN ("smullt", 0x44e0c400, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, 0),
+  SVE2_INSN ("smullt", 0x45007400, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSN ("splice", 0x052d8000, 0xff3fe000, sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_Pg3, SVE_ZnxN), OP_SVE_VUV_BHSD, F_OD(2), 0),
+  SVE2_INSNC ("sqabs", 0x4408a000, 0xff3fe000,  sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_Pg3, SVE_Zn), OP_SVE_VMV_BHSD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqadd", 0x44188000, 0xff3fe000,  sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("sqcadd", 0x4501d800, 0xff3ff800,  sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Zd, SVE_Zn, SVE_IMM_ROT3), OP_SVE_VVVU_BHSD, 0, C_SCAN_MOVPRFX, 1),
+  SVE2_INSNC ("sqdmlalb", 0x44a02000, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqdmlalb", 0x44e02000, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqdmlalb", 0x44006000, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqdmlalbt", 0x44000800, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqdmlalt", 0x44a02400, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqdmlalt", 0x44e02400, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqdmlalt", 0x44006400, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqdmlslb", 0x44a03000, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqdmlslb", 0x44e03000, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqdmlslb", 0x44006800, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqdmlslbt", 0x44000c00, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqdmlslt", 0x44a03400, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqdmlslt", 0x44e03400, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqdmlslt", 0x44006c00, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSN ("sqdmulh", 0x4420f000, 0xffa0fc00,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_22_INDEX), OP_SVE_HHH, 0, 0),
+  SVE2_INSN ("sqdmulh", 0x44a0f000, 0xffe0fc00,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_INDEX), OP_SVE_SSS, 0, 0),
+  SVE2_INSN ("sqdmulh", 0x44e0f000, 0xffe0fc00,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_INDEX), OP_SVE_DDD, 0, 0),
+  SVE2_INSN ("sqdmulh", 0x04207000, 0xff20fc00,  sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHSD, 0, 0),
+  SVE2_INSN ("sqdmullb", 0x44a0e000, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, 0),
+  SVE2_INSN ("sqdmullb", 0x44e0e000, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, 0),
+  SVE2_INSN ("sqdmullb", 0x45006000, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSN ("sqdmullt", 0x44a0e400, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, 0),
+  SVE2_INSN ("sqdmullt", 0x44e0e400, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, 0),
+  SVE2_INSN ("sqdmullt", 0x45006400, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSNC ("sqneg", 0x4409a000, 0xff3fe000,  sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_Pg3, SVE_Zn), OP_SVE_VMV_BHSD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqrdcmlah", 0x44a07000, 0xffe0f000,  sve_misc, 0, OP4 (SVE_Zd, SVE_Zn, SVE_Zm3_INDEX, SVE_IMM_ROT2), OP_SVE_HHHU, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqrdcmlah", 0x44e07000, 0xffe0f000,  sve_misc, 0, OP4 (SVE_Zd, SVE_Zn, SVE_Zm4_INDEX, SVE_IMM_ROT2), OP_SVE_SSSU, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqrdcmlah", 0x44003000, 0xff20f000,  sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Zn, SVE_Zm_16, SVE_IMM_ROT2), OP_SVE_VVVU_BHSD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqrdmlah", 0x44201000, 0xffa0fc00,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_22_INDEX), OP_SVE_HHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqrdmlah", 0x44a01000, 0xffe0fc00,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_INDEX), OP_SVE_SSS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqrdmlah", 0x44e01000, 0xffe0fc00,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_INDEX), OP_SVE_DDD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqrdmlah", 0x44007000, 0xff20fc00,  sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHSD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqrdmlsh", 0x44201400, 0xffa0fc00,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_22_INDEX), OP_SVE_HHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqrdmlsh", 0x44a01400, 0xffe0fc00,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_INDEX), OP_SVE_SSS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqrdmlsh", 0x44e01400, 0xffe0fc00,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_INDEX), OP_SVE_DDD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("sqrdmlsh", 0x44007400, 0xff20fc00,  sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHSD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSN ("sqrdmulh", 0x4420f400, 0xffa0fc00,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_22_INDEX), OP_SVE_HHH, 0, 0),
+  SVE2_INSN ("sqrdmulh", 0x44a0f400, 0xffe0fc00,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_INDEX), OP_SVE_SSS, 0, 0),
+  SVE2_INSN ("sqrdmulh", 0x44e0f400, 0xffe0fc00,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_INDEX), OP_SVE_DDD, 0, 0),
+  SVE2_INSN ("sqrdmulh", 0x04207400, 0xff20fc00,  sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHSD, 0, 0),
+  SVE2_INSNC ("sqrshl", 0x440a8000, 0xff3fe000,  sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("sqrshlr", 0x440e8000, 0xff3fe000,  sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSN ("sqrshrnb", 0x45202800, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED_22), OP_SVE_VVU_BHS_HSD, 0, 0),
+  SVE2_INSN ("sqrshrnt", 0x45202c00, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED_22), OP_SVE_VVU_BHS_HSD, 0, 0),
+  SVE2_INSN ("sqrshrunb", 0x45200800, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED_22), OP_SVE_VVU_BHS_HSD, 0, 0),
+  SVE2_INSN ("sqrshrunt", 0x45200c00, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED_22), OP_SVE_VVU_BHS_HSD, 0, 0),
+  SVE2_INSNC ("sqshl", 0x04068000, 0xff3fe000,  sve_shift_pred, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_SHLIMM_PRED), OP_SVE_VMVU_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("sqshl", 0x44088000, 0xff3fe000,  sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("sqshlr", 0x440c8000, 0xff3fe000,  sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("sqshlu", 0x040f8000, 0xff3fe000,  sve_shift_pred, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_SHLIMM_PRED), OP_SVE_VMVU_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSN ("sqshrnb", 0x45202000, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED_22), OP_SVE_VVU_BHS_HSD, 0, 0),
+  SVE2_INSN ("sqshrnt", 0x45202400, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED_22), OP_SVE_VVU_BHS_HSD, 0, 0),
+  SVE2_INSN ("sqshrunb", 0x45200000, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED_22), OP_SVE_VVU_BHS_HSD, 0, 0),
+  SVE2_INSN ("sqshrunt", 0x45200400, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED_22), OP_SVE_VVU_BHS_HSD, 0, 0),
+  SVE2_INSNC ("sqsub", 0x441a8000, 0xff3fe000,  sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("sqsubr", 0x441e8000, 0xff3fe000,  sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSN ("sqxtnb", 0x45204000, 0xffa7fc00,  sve_size_tsz_bhs, 0, OP2 (SVE_Zd, SVE_Zn), OP_SVE_VV_BHS_HSD, 0, 0),
+  SVE2_INSN ("sqxtnt", 0x45204400, 0xffa7fc00,  sve_size_tsz_bhs, 0, OP2 (SVE_Zd, SVE_Zn), OP_SVE_VV_BHS_HSD, 0, 0),
+  SVE2_INSN ("sqxtunb", 0x45205000, 0xffa7fc00,  sve_size_tsz_bhs, 0, OP2 (SVE_Zd, SVE_Zn), OP_SVE_VV_BHS_HSD, 0, 0),
+  SVE2_INSN ("sqxtunt", 0x45205400, 0xffa7fc00,  sve_size_tsz_bhs, 0, OP2 (SVE_Zd, SVE_Zn), OP_SVE_VV_BHS_HSD, 0, 0),
+  SVE2_INSNC ("srhadd", 0x44148000, 0xff3fe000,  sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSN ("sri", 0x4500f000, 0xff20fc00,  sve_shift_tsz_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED), OP_SVE_VVU_BHSD, 0, 0),
+  SVE2_INSNC ("srshl", 0x44028000, 0xff3fe000,  sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("srshlr", 0x44068000, 0xff3fe000,  sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("srshr", 0x040c8000, 0xff3fe000,  sve_shift_pred, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_SHRIMM_PRED), OP_SVE_VMVU_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("srsra", 0x4500e800, 0xff20fc00,  sve_shift_tsz_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED), OP_SVE_VVU_BHSD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSN ("sshllb", 0x4500a000, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHLIMM_UNPRED_22), OP_SVE_VVU_HSD_BHS, 0, 0),
+  SVE2_INSN ("sshllt", 0x4500a400, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHLIMM_UNPRED_22), OP_SVE_VVU_HSD_BHS, 0, 0),
+  SVE2_INSNC ("ssra", 0x4500e000, 0xff20fc00,  sve_shift_tsz_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED), OP_SVE_VVU_BHSD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSN ("ssublb", 0x45001000, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSN ("ssublbt", 0x45008800, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSN ("ssublt", 0x45001400, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSN ("ssubltb", 0x45008c00, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSN ("ssubwb", 0x45005000, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS2, 0, 0),
+  SVE2_INSN ("ssubwt", 0x45005400, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS2, 0, 0),
+  SVE2_INSN ("stnt1b", 0xe4402000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_SUS, F_OD(1), 0),
+  SVE2_INSN ("stnt1b", 0xe4002000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_DUD, F_OD(1), 0),
+  SVE2_INSN ("stnt1d", 0xe5802000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_DUD, F_OD(1), 0),
+  SVE2_INSN ("stnt1h", 0xe4c02000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_SUS, F_OD(1), 0),
+  SVE2_INSN ("stnt1h", 0xe4802000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_DUD, F_OD(1), 0),
+  SVE2_INSN ("stnt1w", 0xe5402000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_SUS, F_OD(1), 0),
+  SVE2_INSN ("stnt1w", 0xe5002000, 0xffe0e000, sve_misc, 0, OP3 (SVE_ZtxN, SVE_Pg3, SVE_ADDR_ZX), OP_SVE_DUD, F_OD(1), 0),
+  SVE2_INSN ("subhnb", 0x45207000, 0xff20fc00, sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHS_HSD, 0, 0),
+  SVE2_INSN ("subhnt", 0x45207400, 0xff20fc00, sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHS_HSD, 0, 0),
+  SVE2_INSNC ("suqadd", 0x441c8000, 0xff3fe000,  sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSN ("tbl", 0x05202800, 0xff20fc00, sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_ZnxN, SVE_Zm_16), OP_SVE_VVV_BHSD, F_OD(2), 0),
+  SVE2_INSN ("tbx", 0x05202c00, 0xff20fc00, sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHSD, 0, 0),
+  SVE2_INSNC ("uaba", 0x4500fc00, 0xff20fc00, sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHSD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("uabalb", 0x4500c800, 0xff20fc00, sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("uabalt", 0x4500cc00, 0xff20fc00, sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSN ("uabdlb", 0x45003800, 0xff20fc00, sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSN ("uabdlt", 0x45003c00, 0xff20fc00, sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSNC ("uadalp", 0x4405a000, 0xff3fe000, sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Pg3, SVE_Zn), OP_SVE_VMV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSN ("uaddlb", 0x45000800, 0xff20fc00, sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSN ("uaddlt", 0x45000c00, 0xff20fc00, sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSN ("uaddwb", 0x45004800, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS2, 0, 0),
+  SVE2_INSN ("uaddwt", 0x45004c00, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS2, 0, 0),
+  SVE2_INSNC ("uhadd", 0x44118000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("uhsub", 0x44138000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("uhsubr", 0x44178000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("umaxp", 0x4415a000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("uminp", 0x4417a000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("umlalb", 0x44a09000, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("umlalb", 0x44e09000, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("umlalb", 0x44004800, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("umlalt", 0x44a09400, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("umlalt", 0x44e09400, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("umlalt", 0x44004c00, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("umlslb", 0x44a0b000, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("umlslb", 0x44e0b000, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("umlslb", 0x44005800, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("umlslt", 0x44a0b400, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("umlslt", 0x44e0b400, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("umlslt", 0x44005c00, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSN ("umulh", 0x04206c00, 0xff20fc00, sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHSD, 0, 0),
+  SVE2_INSN ("umullb", 0x44a0d000, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, 0),
+  SVE2_INSN ("umullb", 0x44e0d000, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, 0),
+  SVE2_INSN ("umullb", 0x45007800, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSN ("umullt", 0x44a0d400, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm3_11_INDEX), OP_SVE_SHH, 0, 0),
+  SVE2_INSN ("umullt", 0x44e0d400, 0xffe0f400,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm4_11_INDEX), OP_SVE_DSS, 0, 0),
+  SVE2_INSN ("umullt", 0x45007c00, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSNC ("uqadd", 0x44198000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("uqrshl", 0x440b8000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("uqrshlr", 0x440f8000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSN ("uqrshrnb", 0x45203800, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED_22), OP_SVE_VVU_BHS_HSD, 0, 0),
+  SVE2_INSN ("uqrshrnt", 0x45203c00, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED_22), OP_SVE_VVU_BHS_HSD, 0, 0),
+  SVE2_INSNC ("uqshl", 0x04078000, 0xff3fe000,  sve_shift_pred, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_SHLIMM_PRED), OP_SVE_VMVU_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("uqshl", 0x44098000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("uqshlr", 0x440d8000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSN ("uqshrnb", 0x45203000, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED_22), OP_SVE_VVU_BHS_HSD, 0, 0),
+  SVE2_INSN ("uqshrnt", 0x45203400, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED_22), OP_SVE_VVU_BHS_HSD, 0, 0),
+  SVE2_INSNC ("uqsub", 0x441b8000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("uqsubr", 0x441f8000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSN ("uqxtnb", 0x45204800, 0xffa7fc00,  sve_size_tsz_bhs, 0, OP2 (SVE_Zd, SVE_Zn), OP_SVE_VV_BHS_HSD, 0, 0),
+  SVE2_INSN ("uqxtnt", 0x45204c00, 0xffa7fc00,  sve_size_tsz_bhs, 0, OP2 (SVE_Zd, SVE_Zn), OP_SVE_VV_BHS_HSD, 0, 0),
+  SVE2_INSNC ("urecpe", 0x4480a000, 0xffffe000, sve_misc, 0, OP3 (SVE_Zd, SVE_Pg3, SVE_Zn), OP_SVE_SMS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("urhadd", 0x44158000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("urshl", 0x44038000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("urshlr", 0x44078000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("urshr", 0x040d8000, 0xff3fe000,  sve_shift_pred, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_SHRIMM_PRED), OP_SVE_VMVU_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("ursqrte", 0x4481a000, 0xffffe000, sve_misc, 0, OP3 (SVE_Zd, SVE_Pg3, SVE_Zn), OP_SVE_SMS, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSNC ("ursra", 0x4500ec00, 0xff20fc00,  sve_shift_tsz_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED), OP_SVE_VVU_BHSD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSN ("ushllb", 0x4500a800, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHLIMM_UNPRED_22), OP_SVE_VVU_HSD_BHS, 0, 0),
+  SVE2_INSN ("ushllt", 0x4500ac00, 0xffa0fc00,  sve_shift_tsz_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHLIMM_UNPRED_22), OP_SVE_VVU_HSD_BHS, 0, 0),
+  SVE2_INSNC ("usqadd", 0x441d8000, 0xff3fe000, sve_size_bhsd, 0, OP4 (SVE_Zd, SVE_Pg3, SVE_Zd, SVE_Zn), OP_SVE_VMVV_BHSD, 0, C_SCAN_MOVPRFX, 2),
+  SVE2_INSNC ("usra", 0x4500e400, 0xff20fc00,  sve_shift_tsz_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED), OP_SVE_VVU_BHSD, 0, C_SCAN_MOVPRFX, 0),
+  SVE2_INSN ("usublb", 0x45001800, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSN ("usublt", 0x45001c00, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS, 0, 0),
+  SVE2_INSN ("usubwb", 0x45005800, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS2, 0, 0),
+  SVE2_INSN ("usubwt", 0x45005c00, 0xff20fc00,  sve_size_hsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_HSD_BHS2, 0, 0),
+  SVE2_INSN ("whilege", 0x25200000, 0xff20fc10, sve_size_bhsd, 0, OP3 (SVE_Pd, Rn, Rm), OP_SVE_VWW_BHSD, 0, 0),
+  SVE2_INSN ("whilege", 0x25201000, 0xff20fc10, sve_size_bhsd, 0, OP3 (SVE_Pd, Rn, Rm), OP_SVE_VXX_BHSD, 0, 0),
+  SVE2_INSN ("whilegt", 0x25200010, 0xff20fc10, sve_size_bhsd, 0, OP3 (SVE_Pd, Rn, Rm), OP_SVE_VWW_BHSD, 0, 0),
+  SVE2_INSN ("whilegt", 0x25201010, 0xff20fc10, sve_size_bhsd, 0, OP3 (SVE_Pd, Rn, Rm), OP_SVE_VXX_BHSD, 0, 0),
+  SVE2_INSN ("whilehi", 0x25200810, 0xff20fc10, sve_size_bhsd, 0, OP3 (SVE_Pd, Rn, Rm), OP_SVE_VWW_BHSD, 0, 0),
+  SVE2_INSN ("whilehi", 0x25201810, 0xff20fc10, sve_size_bhsd, 0, OP3 (SVE_Pd, Rn, Rm), OP_SVE_VXX_BHSD, 0, 0),
+  SVE2_INSN ("whilehs", 0x25200800, 0xff20fc10, sve_size_bhsd, 0, OP3 (SVE_Pd, Rn, Rm), OP_SVE_VWW_BHSD, 0, 0),
+  SVE2_INSN ("whilehs", 0x25201800, 0xff20fc10, sve_size_bhsd, 0, OP3 (SVE_Pd, Rn, Rm), OP_SVE_VXX_BHSD, 0, 0),
+  SVE2_INSN ("whilerw", 0x25203010, 0xff20fc10, sve_size_bhsd, 0, OP3 (SVE_Pd, Rn, Rm), OP_SVE_VXX_BHSD, 0, 0),
+  SVE2_INSN ("whilewr", 0x25203000, 0xff20fc10, sve_size_bhsd, 0, OP3 (SVE_Pd, Rn, Rm), OP_SVE_VXX_BHSD, 0, 0),
+  SVE2_INSNC ("xar", 0x04203400, 0xff20fc00, sve_shift_tsz_bhsd, 0, OP4 (SVE_Zd, SVE_Zd, SVE_Zn, SVE_SHRIMM_UNPRED), OP_SVE_VVVU_BHSD, 0, C_SCAN_MOVPRFX, 1),
+  /* SVE2_SM4 instructions.  */
+  SVE2SM4_INSN ("sm4e", 0x4523e000, 0xfffffc00,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zd, SVE_Zn), OP_SVE_SSS, 0, 1),
+  SVE2SM4_INSN ("sm4ekey", 0x4520f000, 0xffe0fc00,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_SSS, 0, 0),
+  /* SVE2_AES instructions.  */
+  SVE2AES_INSN ("aesd", 0x4522e400, 0xfffffc00, sve_misc, 0, OP3 (SVE_Zd, SVE_Zd, SVE_Zn), OP_SVE_BBB, 0, 1),
+  SVE2AES_INSN ("aese", 0x4522e000, 0xfffffc00, sve_misc, 0, OP3 (SVE_Zd, SVE_Zd, SVE_Zn), OP_SVE_BBB, 0, 1),
+  SVE2AES_INSN ("aesimc", 0x4520e400, 0xffffffe0, sve_misc, 0, OP2 (SVE_Zd, SVE_Zd), OP_SVE_BB, 0, 1),
+  SVE2AES_INSN ("aesmc", 0x4520e000, 0xffffffe0, sve_misc, 0, OP2 (SVE_Zd, SVE_Zd), OP_SVE_BB, 0, 1),
+  SVE2AES_INSN ("pmullb", 0x45006800, 0xff20fc00,  sve_size_013, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_QHD_DBS, 0, 0),
+  SVE2AES_INSN ("pmullt", 0x45006c00, 0xff20fc00,  sve_size_013, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_QHD_DBS, 0, 0),
+  /* SVE2_SHA3 instructions.  */
+  SVE2SHA3_INSN ("rax1", 0x4520f400, 0xffe0fc00,  sve_misc, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_DDD, 0, 0),
+  /* SVE2_BITPERM instructions. */
+  SVE2BITPERM_INSN ("bdep", 0x4500b400, 0xff20fc00, sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHSD, 0, 0),
+  SVE2BITPERM_INSN ("bext", 0x4500b000, 0xff20fc00, sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHSD, 0, 0),
+  SVE2BITPERM_INSN ("bgrp", 0x4500b800, 0xff20fc00, sve_size_bhsd, 0, OP3 (SVE_Zd, SVE_Zn, SVE_Zm_16), OP_SVE_VVV_BHSD, 0, 0),
+
   /* SIMD Dot Product (optional in v8.2-A).  */
   DOT_INSN ("udot", 0x2e009400, 0xbf20fc00, dotproduct, OP3 (Vd, Vn, Vm), QL_V3DOT, F_SIZEQ),
   DOT_INSN ("sdot", 0xe009400,  0xbf20fc00, dotproduct, OP3 (Vd, Vn, Vm), QL_V3DOT, F_SIZEQ),
-- 
2.7.4

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [Patch 16/16] [binutils][aarch64] Add SVE2 tests
  2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
                   ` (14 preceding siblings ...)
  2019-05-01 14:55 ` [Patch 15/16] [binutils][aarch64] Add SVE2 instructions Matthew Malcomson
@ 2019-05-01 15:02 ` Matthew Malcomson
  2019-05-02 15:25 ` [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Joseph Myers
  2019-05-03 11:21 ` Nick Clifton
  17 siblings, 0 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-01 15:02 UTC (permalink / raw)
  To: binutils; +Cc: nd

[-- Attachment #1: Type: text/plain, Size: 1259 bytes --]

Add tests that SVE2 instructions are encoded as they should be, and
tests that invalid instructions have their problems reported.

Also check that each sve2 cryptographic extension is required to use the
corresponding cryptographic instructions.

Finally, test to ensure that sve2 instructions using mnemonics that
exist in sve1 still need the sve2 feature to be used.

gzipped patch is attached.

gas/ChangeLog:

2019-04-04  Matthew Malcomson  <matthew.malcomson@arm.com>

	* testsuite/gas/aarch64/illegal-sve2-aes.d: New test.
	* testsuite/gas/aarch64/illegal-sve2-bitperm.d: New test.
	* testsuite/gas/aarch64/illegal-sve2-sha3.d: Test new instructions.
	* testsuite/gas/aarch64/illegal-sve2-sm4.d: Test new instructions.
	* testsuite/gas/aarch64/illegal-sve2-sve1ext.d: Test new instructions.
	* testsuite/gas/aarch64/illegal-sve2-sve1ext.l: Test new instructions.
	* testsuite/gas/aarch64/illegal-sve2.d: Test new instructions.
	* testsuite/gas/aarch64/illegal-sve2.l: Test new instructions.
	* testsuite/gas/aarch64/illegal-sve2.s: Test new instructions.
	* testsuite/gas/aarch64/sve1-extended-sve2.s: New test.
	* testsuite/gas/aarch64/sve2.d: Test new instructions.
	* testsuite/gas/aarch64/sve2.s: Test new instructions.


[-- Attachment #2: sve2-testsuite.patch.gz --]
[-- Type: application/gzip, Size: 36785 bytes --]

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64
  2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
                   ` (15 preceding siblings ...)
  2019-05-01 15:02 ` [Patch 16/16] [binutils][aarch64] Add SVE2 tests Matthew Malcomson
@ 2019-05-02 15:25 ` Joseph Myers
  2019-05-02 15:34   ` Matthew Malcomson
  2019-05-03 11:21 ` Nick Clifton
  17 siblings, 1 reply; 24+ messages in thread
From: Joseph Myers @ 2019-05-02 15:25 UTC (permalink / raw)
  To: Matthew Malcomson; +Cc: binutils, nd

On Wed, 1 May 2019, Matthew Malcomson wrote:

> This series of patches adds support for the "Future Architecture Technologies"
> Scalable Vector Extension V2 (SVE2) announced at Linaro Connect.
> https://connect.linaro.org/resources/bkk19/new-technologies-in-the-arm-architecture/

I get a 404 for that URL.

-- 
Joseph S. Myers
joseph@codesourcery.com

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64
  2019-05-02 15:25 ` [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Joseph Myers
@ 2019-05-02 15:34   ` Matthew Malcomson
  0 siblings, 0 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-02 15:34 UTC (permalink / raw)
  To: Joseph Myers; +Cc: binutils, nd

On 02/05/19 16:25, Joseph Myers wrote:
> On Wed, 1 May 2019, Matthew Malcomson wrote:
> 
>> This series of patches adds support for the "Future Architecture Technologies"
>> Scalable Vector Extension V2 (SVE2) announced at Linaro Connect.
>> https://connect.linaro.org/resources/bkk19/new-technologies-in-the-arm-architecture/
> 
> I get a 404 for that URL.
> 

Apologies, I guess the page must have moved since I copied it down.

I can now find the page at this url
https://connect.linaro.org/resources/bkk19/bkk19-202/

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64
  2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
                   ` (16 preceding siblings ...)
  2019-05-02 15:25 ` [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Joseph Myers
@ 2019-05-03 11:21 ` Nick Clifton
  2019-05-03 14:56   ` Matthew Malcomson
  17 siblings, 1 reply; 24+ messages in thread
From: Nick Clifton @ 2019-05-03 11:21 UTC (permalink / raw)
  To: Matthew Malcomson, binutils; +Cc: nd

Hi Matthew,

> This series of patches adds support for the "Future Architecture Technologies"
> Scalable Vector Extension V2 (SVE2) announced at Linaro Connect.
> https://connect.linaro.org/resources/bkk19/new-technologies-in-the-arm-architecture/

Sorry to be a pain, but would you mind resending these patches ?

There are two problems:

   1. They do not apply cleanly to the current mainline sources
      now that the TME patches have gone in.

   2. The patch format in your emails is causing problems for my
      email reader (Thunderbird).  I am not sure what format you 
      used, but the patches are showing up as text at the end of
      each email, rather than as attachments.  But if I save the
      email to a file and then try to examine the patch there, it
      is encoded with some kind of MIME encoding.

I do not mind if you combine all of the patches into just one email,
if that makes it easier for you, but it really would help if they
could be updated to match the latest sources, and provided as an
attachment, rather than inline.

Cheers
  Nick

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64
  2019-05-03 11:21 ` Nick Clifton
@ 2019-05-03 14:56   ` Matthew Malcomson
  2019-05-08 10:08     ` Nick Clifton
  0 siblings, 1 reply; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-03 14:56 UTC (permalink / raw)
  To: nickc, binutils; +Cc: nd

[-- Attachment #1: Type: text/plain, Size: 1424 bytes --]

On 03/05/19 12:21, Nick Clifton wrote:
> Hi Matthew,
> 
>> This series of patches adds support for the "Future Architecture Technologies"
>> Scalable Vector Extension V2 (SVE2) announced at Linaro Connect.
>> https://connect.linaro.org/resources/bkk19/new-technologies-in-the-arm-architecture/
> 
> Sorry to be a pain, but would you mind resending these patches ?
> 
> There are two problems:
> 
>     1. They do not apply cleanly to the current mainline sources
>        now that the TME patches have gone in.
> 
>     2. The patch format in your emails is causing problems for my
>        email reader (Thunderbird).  I am not sure what format you
>        used, but the patches are showing up as text at the end of
>        each email, rather than as attachments.  But if I save the
>        email to a file and then try to examine the patch there, it
>        is encoded with some kind of MIME encoding.
> 
> I do not mind if you combine all of the patches into just one email,
> if that makes it easier for you, but it really would help if they
> could be updated to match the latest sources, and provided as an
> attachment, rather than inline.
> 
> Cheers
>    Nick
> 

Hi Nick,

Sure -- I can do that (I should have noticed the recent updates ;-]).

Attached is a tar file of the patch series from "git format-patch" and a 
zipped diff from the entire series.

Cheers,
Matthew

[-- Attachment #2: sve2-entire-diff.patch.gz --]
[-- Type: application/gzip, Size: 84518 bytes --]

[-- Attachment #3: sve2-patch-series.gz --]
[-- Type: application/gzip, Size: 135444 bytes --]

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64
  2019-05-03 14:56   ` Matthew Malcomson
@ 2019-05-08 10:08     ` Nick Clifton
  2019-05-09 13:30       ` Christophe Lyon
  0 siblings, 1 reply; 24+ messages in thread
From: Nick Clifton @ 2019-05-08 10:08 UTC (permalink / raw)
  To: Matthew Malcomson, binutils; +Cc: nd

Hi Matthew,

> Attached is a tar file of the patch series from "git format-patch" and a 
> zipped diff from the entire series.

Thanks - that was exactly what I needed.

Patch series approved - please apply.

Cheers
  Nick

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64
  2019-05-08 10:08     ` Nick Clifton
@ 2019-05-09 13:30       ` Christophe Lyon
  2019-05-09 14:04         ` Matthew Malcomson
  0 siblings, 1 reply; 24+ messages in thread
From: Christophe Lyon @ 2019-05-09 13:30 UTC (permalink / raw)
  To: Nick Clifton; +Cc: Matthew Malcomson, binutils, nd

On Wed, 8 May 2019 at 12:08, Nick Clifton <nickc@redhat.com> wrote:
>
> Hi Matthew,
>
> > Attached is a tar file of the patch series from "git format-patch" and a
> > zipped diff from the entire series.
>
> Thanks - that was exactly what I needed.
>
> Patch series approved - please apply.
>

Hi Matthew,

After your commit, I noticed a new failure on aarch64_be:
gas/aarch64/sve2

Christophe

> Cheers
>   Nick
>

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64
  2019-05-09 13:30       ` Christophe Lyon
@ 2019-05-09 14:04         ` Matthew Malcomson
  0 siblings, 0 replies; 24+ messages in thread
From: Matthew Malcomson @ 2019-05-09 14:04 UTC (permalink / raw)
  To: Christophe Lyon, nickc; +Cc: binutils, nd

[-- Attachment #1: Type: text/plain, Size: 1129 bytes --]

On 09/05/19 14:30, Christophe Lyon wrote:
> On Wed, 8 May 2019 at 12:08, Nick Clifton <nickc@redhat.com> wrote:
>>
>> Hi Matthew,
>>
>>> Attached is a tar file of the patch series from "git format-patch" and a
>>> zipped diff from the entire series.
>>
>> Thanks - that was exactly what I needed.
>>
>> Patch series approved - please apply.
>>
> 
> Hi Matthew,
> 
> After your commit, I noticed a new failure on aarch64_be:
> gas/aarch64/sve2
> 
> Christophe
> 
>> Cheers
>>    Nick
>>
Thanks Christophe -- I'd left some of the output from objdump in that 
test so I was accidentally requiring little-endian.

Fixed as obvious with the below change (entire patch attached for 
reference).


diff --git a/gas/testsuite/gas/aarch64/sve2.d 
b/gas/testsuite/gas/aarch64/sve2.d
index d7c56c0..efa9b27 100644
--- a/gas/testsuite/gas/aarch64/sve2.d
+++ b/gas/testsuite/gas/aarch64/sve2.d
@@ -1,7 +1,7 @@
  #as: -march=armv8-a+sve2+sve2-aes+sve2-sm4+sve2-sha3+bitperm
  #objdump: -dr

-[^:]+:     file format elf64-littleaarch64
+[^:]+:     file format .*


  Disassembly of section \.text:


[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: remove-file-format-restriction.patch --]
[-- Type: text/x-patch; name="remove-file-format-restriction.patch", Size: 1646 bytes --]

commit 8de09632ff6683a48b8acfb05d3b35b4ab4507fe
Author: Matthew Malcomson <matthew.malcomson@arm.com>
Date:   Thu May 9 14:52:45 2019 +0100

    [gas][testsuite] Don't specify arch in testsuite output
    
    My testcase matched against a file format of elf64-littleaarch64 in the
    objdump output.  This was unnecessarily restrictive and causes testcase
    failures on aarch64_be.
    
    Here we remove that restriction.
    Committed as obvious.
    
    Testing done on aarch64_be-none-elf gas to see the failure goes away.
    
    gas/ChangeLog:
    
    2019-05-09  Matthew Malcomson  <matthew.malcomson@arm.com>
    
    	* testsuite/gas/aarch64/sve2.d: Remove file format restriction.

diff --git a/gas/ChangeLog b/gas/ChangeLog
index fa81e97..5307d23 100644
--- a/gas/ChangeLog
+++ b/gas/ChangeLog
@@ -1,5 +1,9 @@
 2019-05-09  Matthew Malcomson  <matthew.malcomson@arm.com>
 
+	* testsuite/gas/aarch64/sve2.d: Remove file format restriction.
+
+2019-05-09  Matthew Malcomson  <matthew.malcomson@arm.com>
+
 	* testsuite/gas/aarch64/illegal-sve2-aes.d: New test.
 	* testsuite/gas/aarch64/illegal-sve2-bitperm.d: New test.
 	* testsuite/gas/aarch64/illegal-sve2-sha3.d: Test new instructions.
diff --git a/gas/testsuite/gas/aarch64/sve2.d b/gas/testsuite/gas/aarch64/sve2.d
index d7c56c0..efa9b27 100644
--- a/gas/testsuite/gas/aarch64/sve2.d
+++ b/gas/testsuite/gas/aarch64/sve2.d
@@ -1,7 +1,7 @@
 #as: -march=armv8-a+sve2+sve2-aes+sve2-sm4+sve2-sha3+bitperm
 #objdump: -dr
 
-[^:]+:     file format elf64-littleaarch64
+[^:]+:     file format .*
 
 
 Disassembly of section \.text:

^ permalink raw reply	[flat|nested] 24+ messages in thread

end of thread, other threads:[~2019-05-09 14:04 UTC | newest]

Thread overview: 24+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-05-01 14:44 [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Matthew Malcomson
2019-05-01 14:44 ` [PATCH 07/16] [binutils][aarch64] New sve_size_sd2 iclass Matthew Malcomson
2019-05-01 14:44 ` [PATCH 09/16] [binutils][aarch64] New sve_size_013 iclass Matthew Malcomson
2019-05-01 14:44 ` [PATCH 04/16] [binutils][aarch64] New iclass sve_size_hsd2 Matthew Malcomson
2019-05-01 14:44 ` [PATCH 02/16] [binutils][aarch64] Allow movprfx for SVE2 instructions Matthew Malcomson
2019-05-01 14:44 ` [PATCH 08/16] [binutils][aarch64] New sve_size_bh iclass Matthew Malcomson
2019-05-01 14:44 ` [PATCH 01/16] [binutils][aarch64] SVE2 feature extension flags Matthew Malcomson
2019-05-01 14:44 ` [PATCH 03/16] [binutils][aarch64] Introduce SVE_IMM_ROT3 operand Matthew Malcomson
2019-05-01 14:44 ` [PATCH 05/16] [binutils][aarch64] New SVE_Zm3_11_INDEX operand Matthew Malcomson
2019-05-01 14:45 ` [PATCH 11/16] [binutils][aarch64] New sve_shift_tsz_bhsd iclass Matthew Malcomson
2019-05-01 14:45 ` [PATCH 10/16] [binutils][aarch64] New SVE_SHRIMM_UNPRED_22 operand Matthew Malcomson
2019-05-01 14:45 ` [PATCH 06/16] [binutils][aarch64] New SVE_ADDR_ZX operand Matthew Malcomson
2019-05-01 14:45 ` [PATCH 12/16] [binutils][aarch64] New SVE_Zm4_11_INDEX operand Matthew Malcomson
2019-05-01 14:45 ` [PATCH 14/16] [binutils][aarch64] New SVE_SHLIMM_UNPRED_22 operand Matthew Malcomson
2019-05-01 14:45 ` [PATCH 13/16] [binutils][aarch64] New sve_size_tsz_bhs iclass Matthew Malcomson
2019-05-01 14:55 ` [Patch 15/16] [binutils][aarch64] Add SVE2 instructions Matthew Malcomson
2019-05-01 15:02 ` [Patch 16/16] [binutils][aarch64] Add SVE2 tests Matthew Malcomson
2019-05-02 15:25 ` [Patch 00/16] [gas][aarch64] SVE2 binutils instructions for aarch64 Joseph Myers
2019-05-02 15:34   ` Matthew Malcomson
2019-05-03 11:21 ` Nick Clifton
2019-05-03 14:56   ` Matthew Malcomson
2019-05-08 10:08     ` Nick Clifton
2019-05-09 13:30       ` Christophe Lyon
2019-05-09 14:04         ` Matthew Malcomson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).