diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md index 5d3f21b91c4..f66ed7da012 100644 --- a/gcc/config/arm/arm.md +++ b/gcc/config/arm/arm.md @@ -12617,6 +12617,40 @@ (define_expand "copysigndf3" }" ) +;; movmisalign for DImode +(define_expand "movmisaligndi" + [(match_operand:DI 0 "general_operand") + (match_operand:DI 1 "general_operand")] + "unaligned_access" +{ + /* Avoid breaking up an aligned load or store, this avoids problems if + that operand might be volatile. */ + if (MEM_P (operands[0]) + && MEM_ALIGN (operands[0]) >= GET_MODE_ALIGNMENT (DImode)) + { + rtx tmp = gen_reg_rtx (DImode); + emit_insn (gen_movmisaligndi (tmp, operands[1])); + emit_insn (gen_movdi (operands[0], tmp)); + DONE; + } + else if (MEM_P (operands[1]) + && MEM_ALIGN (operands[1]) >= GET_MODE_ALIGNMENT (DImode)) + { + rtx tmp = gen_reg_rtx (DImode); + emit_insn (gen_movdi (tmp, operands[1])); + operands[1] = tmp; + } + + rtx lo_op0 = gen_lowpart (SImode, operands[0]); + rtx lo_op1 = gen_lowpart (SImode, operands[1]); + rtx hi_op0 = gen_highpart_mode (SImode, DImode, operands[0]); + rtx hi_op1 = gen_highpart_mode (SImode, DImode, operands[1]); + + emit_insn (gen_movmisalignsi (lo_op0, lo_op1)); + emit_insn (gen_movmisalignsi (hi_op0, hi_op1)); + DONE; +}) + ;; movmisalign patterns for HImode and SImode. (define_expand "movmisalign" [(match_operand:HSI 0 "general_operand") diff --git a/gcc/config/arm/vec-common.md b/gcc/config/arm/vec-common.md index 68de4f0f943..e71d9b3811f 100644 --- a/gcc/config/arm/vec-common.md +++ b/gcc/config/arm/vec-common.md @@ -281,8 +281,8 @@ (define_expand "cml4" }) (define_expand "movmisalign" - [(set (match_operand:VDQX 0 "neon_perm_struct_or_reg_operand") - (unspec:VDQX [(match_operand:VDQX 1 "neon_perm_struct_or_reg_operand")] + [(set (match_operand:VDQ 0 "neon_perm_struct_or_reg_operand") + (unspec:VDQ [(match_operand:VDQ 1 "neon_perm_struct_or_reg_operand")] UNSPEC_MISALIGNED_ACCESS))] "ARM_HAVE__LDST && !BYTES_BIG_ENDIAN && unaligned_access && !TARGET_REALLY_IWMMXT"