public inbox for binutils@sourceware.org
 help / color / mirror / Atom feed
From: Jan Beulich <jbeulich@suse.com>
To: "Cui, Lili" <lili.cui@intel.com>, "Hu, Lin1" <lin1.hu@intel.com>
Cc: hjl.tools@gmail.com, binutils@sourceware.org
Subject: Re: [PATCH] Support {evex} pseudo prefix for decode evex promoted insns without egpr32.
Date: Fri, 8 Mar 2024 11:37:54 +0100	[thread overview]
Message-ID: <1b29c006-7ea6-40d4-ac7c-1c0b40c6243c@suse.com> (raw)
In-Reply-To: <20240306095820.4117372-1-lili.cui@intel.com>

On 06.03.2024 10:58, Cui, Lili wrote:
> --- /dev/null
> +++ b/gas/testsuite/gas/i386/noreg64-evex.s

This separate test, not directly fitting with the patch title, wants
mentioning in the patch description.

> @@ -0,0 +1,67 @@
> +# Check 64-bit insns not sizeable through register operands with evex
> +	.text
> +	{evex} adc	$1, (%rax)
> +	{evex} adc	$0x89, (%rax)
> +	{evex} adc	$0x1234, (%rax)
> +	{evex} adc	$0x12345678, (%rax)
> +	{evex} add	$1, (%rax)
> +	{evex} add	$0x89, (%rax)
> +	{evex} add	$0x1234, (%rax)
> +	{evex} add	$0x12345678, (%rax)
> +	{evex} and	$1, (%rax)
> +	{evex} and	$0x89, (%rax)
> +	{evex} and	$0x1234, (%rax)
> +	{evex} and	$0x12345678, (%rax)
> +	{evex} crc32	(%rax), %eax

noreg64.s tests %rax as a destination separately, for a reason.

> +	{evex} dec	(%rax)
> +	{evex} div	(%rax)
> +	{evex} idiv	(%rax)
> +	{evex} imul	(%rax)
> +	{evex} inc	(%rax)
> +	{evex} mul	(%rax)
> +	{evex} neg	(%rax)
> +	{evex} not	(%rax)
> +	{evex} or 	$1, (%rax)
> +	{evex} or 	$0x89, (%rax)
> +	{evex} or 	$0x1234, (%rax)
> +	{evex} or 	$0x12345678, (%rax)
> +	{evex} rcl	$1, (%rax)
> +	{evex} rcl	$2, (%rax)
> +	{evex} rcl	%cl, (%rax)
> +	{evex} rcl	(%rax)
> +	{evex} rcr	$1, (%rax)
> +	{evex} rcr	$2, (%rax)
> +	{evex} rcr	%cl, (%rax)
> +	{evex} rcr	(%rax)
> +	{evex} rol	$1, (%rax)
> +	{evex} rol	$2, (%rax)
> +	{evex} rol	%cl, (%rax)
> +	{evex} rol	(%rax)
> +	{evex} ror	$1, (%rax)
> +	{evex} ror	$2, (%rax)
> +	{evex} ror	%cl, (%rax)
> +	{evex} ror	(%rax)
> +	{evex} sbb	$1, (%rax)
> +	{evex} sbb	$0x89, (%rax)
> +	{evex} sbb	$0x1234, (%rax)
> +	{evex} sbb	$0x12345678, (%rax)
> +	{evex} sar	$1, (%rax)

Like noreg64.s please have "sal" tests here, too.

> --- a/gas/testsuite/gas/i386/x86-64-apx-ndd-optimize.d
> +++ b/gas/testsuite/gas/i386/x86-64-apx-ndd-optimize.d
> @@ -118,7 +118,7 @@ Disassembly of section .text:
>  \s*[a-f0-9]+:\s*67 0f 4d 90 90 90 90 90 	cmovge -0x6f6f6f70\(%eax\),%edx
>  \s*[a-f0-9]+:\s*67 0f 4e 90 90 90 90 90 	cmovle -0x6f6f6f70\(%eax\),%edx
>  \s*[a-f0-9]+:\s*67 0f 4f 90 90 90 90 90 	cmovg  -0x6f6f6f70\(%eax\),%edx
> -\s*[a-f0-9]+:\s*62 f4 7d 08 60 c0    	movbe  %ax,%ax
> +\s*[a-f0-9]+:\s*62 f4 7d 08 60 c0    	\{evex\} movbe %ax,%ax

This is wrong: An {evex} prefix should appear on MOVBE only when there's a
memory operand (and no use of an eGPR).

> --- /dev/null
> +++ b/gas/testsuite/gas/i386/x86-64-apx_f-evex-intel.d
> @@ -0,0 +1,1197 @@
> +#as:
> +#objdump: -dw -Mintel
> +#name: x86_64 APX_F insns with evex pseudo prefix (Intel disassembly)

Is an Intel disassembly variant of this test really worth it? It's not
exactly small, after all. If you want to keep it, ...

> --- /dev/null
> +++ b/gas/testsuite/gas/i386/x86-64-apx_f-evex.d
> @@ -0,0 +1,1197 @@
> +#as:
> +#objdump: -dw
> +#name: x86_64 APX_F insns with evex pseudo prefix
> +#source: x86-64-apx_f-evex.s
> +
> +.*: +file format .*
> +
> +Disassembly of section \.text:
> +
> +0+ <_start>:
> +\s*[a-f0-9]+:\s*62 54 fc 08 fc bc 80 23 01 00 00\s+\{evex\} aadd\s+%r15,0x123\(%r8,%rax,4\)

... please bring its expectations in line with this (just a single blank between
{evex} and the insn menmonic).

> --- /dev/null
> +++ b/gas/testsuite/gas/i386/x86-64-apx_f-evex.s
> @@ -0,0 +1,1192 @@
> +# Check 64bit APX_F instructions with evex pseudo prefix
> +
> +	.text
> +_start:
> +	{evex}	aadd	%r15,0x123(%r8,%rax,4)
> +	{evex}	aadd	%r15d,0x123(%r8,%rax,4)
> +	{evex}	aand	%r15,0x123(%r8,%rax,4)
> +	{evex}	aand	%r15d,0x123(%r8,%rax,4)
> +	{evex}	adc	$0x7b,%r15
> +	{evex}	adc	$0x7b,%r15d
> +	{evex}	adc	$0x7b,%r15w
> +	{evex}	adc	$0x7b,%r8b
> +	{evex}	adcb	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	adcw	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	adcl	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	adcq	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	adc	%r15,%r15
> +	{evex}	adc	%r15,0x123(%r8,%rax,4)
> +	{evex}	adc	%r15d,%edx
> +	{evex}	adc	%r15d,0x123(%r8,%rax,4)
> +	{evex}	adc	%r15w,%ax
> +	{evex}	adc	%r15w,0x123(%r8,%rax,4)
> +	{evex}	adc	%r8b,%dl
> +	{evex}	adc	%r8b,0x123(%r8,%rax,4)
> +	{evex}	adc	0x123(%r8,%rax,4),%r15
> +	{evex}	adc	0x123(%r8,%rax,4),%r15d
> +	{evex}	adc	0x123(%r8,%rax,4),%r15w
> +	{evex}	adc	0x123(%r8,%rax,4),%r8b
> +	{evex}	adcx	%r15,%r15
> +	{evex}	adcx	%r15d,%edx
> +	{evex}	adcx	0x123(%r8,%rax,4),%r15
> +	{evex}	adcx	0x123(%r8,%rax,4),%r15d
> +	{evex}	add	$0x7b,%r15
> +	{evex}	add	$0x7b,%r15d
> +	{evex}	add	$0x7b,%r15w
> +	{evex}	add	$0x7b,%r8b
> +	{evex}	addb	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	addw	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	addl	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	addq	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	add	%r15,%r15
> +	{evex}	add	%r15,0x123(%r8,%rax,4)
> +	{evex}	add	%r15d,%edx
> +	{evex}	add	%r15d,0x123(%r8,%rax,4)
> +	{evex}	add	%r15w,%ax
> +	{evex}	add	%r15w,0x123(%r8,%rax,4)
> +	{evex}	add	%r8b,%dl
> +	{evex}	add	%r8b,0x123(%r8,%rax,4)
> +	{evex}	add	0x123(%r8,%rax,4),%r15
> +	{evex}	add	0x123(%r8,%rax,4),%r15d
> +	{evex}	add	0x123(%r8,%rax,4),%r15w
> +	{evex}	add	0x123(%r8,%rax,4),%r8b
> +	{evex}	adox	%r15,%r15
> +	{evex}	adox	%r15d,%edx
> +	{evex}	adox	0x123(%r8,%rax,4),%r15
> +	{evex}	adox	0x123(%r8,%rax,4),%r15d
> +	{evex}	aesdec128kl	0x123(%r8,%rax,4),%xmm12
> +	{evex}	aesdec256kl	0x123(%r8,%rax,4),%xmm12
> +	{evex}	aesdecwide128kl	0x123(%r8,%rax,4)
> +	{evex}	aesdecwide256kl	0x123(%r8,%rax,4)
> +	{evex}	aesenc128kl	0x123(%r8,%rax,4),%xmm12
> +	{evex}	aesenc256kl	0x123(%r8,%rax,4),%xmm12
> +	{evex}	aesencwide128kl	0x123(%r8,%rax,4)
> +	{evex}	aesencwide256kl	0x123(%r8,%rax,4)
> +	{evex}	and	$0x7b,%r15
> +	{evex}	and	$0x7b,%r15d
> +	{evex}	and	$0x7b,%r15w
> +	{evex}	and	$0x7b,%r8b
> +	{evex}	andb	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	andw	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	andl	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	andq	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	and	%r15,%r15
> +	{evex}	and	%r15,0x123(%r8,%rax,4)
> +	{evex}	and	%r15d,%edx
> +	{evex}	and	%r15d,0x123(%r8,%rax,4)
> +	{evex}	and	%r15w,%ax
> +	{evex}	and	%r15w,0x123(%r8,%rax,4)
> +	{evex}	and	%r8b,%dl
> +	{evex}	and	%r8b,0x123(%r8,%rax,4)
> +	{evex}	and	0x123(%r8,%rax,4),%r15
> +	{evex}	and	0x123(%r8,%rax,4),%r15d
> +	{evex}	and	0x123(%r8,%rax,4),%r15w
> +	{evex}	and	0x123(%r8,%rax,4),%r8b
> +	{evex}	andn	%r15,%r15,%r11
> +	{evex}	andn	%r15d,%edx,%r10d
> +	{evex}	andn	0x123(%r8,%rax,4),%r15,%r15
> +	{evex}	andn	0x123(%r8,%rax,4),%r15d,%edx
> +	{evex}	aor	%r15,0x123(%r8,%rax,4)
> +	{evex}	aor	%r15d,0x123(%r8,%rax,4)
> +	{evex}	axor	%r15,0x123(%r8,%rax,4)
> +	{evex}	axor	%r15d,0x123(%r8,%rax,4)
> +	{evex}	bextr	%r15,%r15,%r11
> +	{evex}	bextr	%r15,0x123(%r8,%rax,4),%r15
> +	{evex}	bextr	%r15d,%edx,%r10d
> +	{evex}	bextr	%r15d,0x123(%r8,%rax,4),%edx
> +	{evex}	blsi	%r15,%r15
> +	{evex}	blsi	%r15d,%edx
> +	{evex}	blsi	0x123(%r8,%rax,4),%r15
> +	{evex}	blsi	0x123(%r8,%rax,4),%r15d
> +	{evex}	blsmsk	%r15,%r15
> +	{evex}	blsmsk	%r15d,%edx
> +	{evex}	blsmsk	0x123(%r8,%rax,4),%r15
> +	{evex}	blsmsk	0x123(%r8,%rax,4),%r15d
> +	{evex}	blsr	%r15,%r15
> +	{evex}	blsr	%r15d,%edx
> +	{evex}	blsr	0x123(%r8,%rax,4),%r15
> +	{evex}	blsr	0x123(%r8,%rax,4),%r15d
> +	{evex}	bzhi	%r15,%r15,%r11
> +	{evex}	bzhi	%r15,0x123(%r8,%rax,4),%r15
> +	{evex}	bzhi	%r15d,%edx,%r10d
> +	{evex}	bzhi	%r15d,0x123(%r8,%rax,4),%edx
> +	{evex}	cmpbexadd	%r15,%r15,0x123(%r8,%rax,4)
> +	{evex}	cmpbexadd	%r15d,%edx,0x123(%r8,%rax,4)
> +	{evex}	cmpbxadd	%r15,%r15,0x123(%r8,%rax,4)
> +	{evex}	cmpbxadd	%r15d,%edx,0x123(%r8,%rax,4)
> +	{evex}	cmplexadd	%r15,%r15,0x123(%r8,%rax,4)
> +	{evex}	cmplexadd	%r15d,%edx,0x123(%r8,%rax,4)
> +	{evex}	cmplxadd	%r15,%r15,0x123(%r8,%rax,4)
> +	{evex}	cmplxadd	%r15d,%edx,0x123(%r8,%rax,4)
> +	{evex}	cmpnbexadd	%r15,%r15,0x123(%r8,%rax,4)
> +	{evex}	cmpnbexadd	%r15d,%edx,0x123(%r8,%rax,4)
> +	{evex}	cmpnbxadd	%r15,%r15,0x123(%r8,%rax,4)
> +	{evex}	cmpnbxadd	%r15d,%edx,0x123(%r8,%rax,4)
> +	{evex}	cmpnlexadd	%r15,%r15,0x123(%r8,%rax,4)
> +	{evex}	cmpnlexadd	%r15d,%edx,0x123(%r8,%rax,4)
> +	{evex}	cmpnlxadd	%r15,%r15,0x123(%r8,%rax,4)
> +	{evex}	cmpnlxadd	%r15d,%edx,0x123(%r8,%rax,4)
> +	{evex}	cmpnoxadd	%r15,%r15,0x123(%r8,%rax,4)
> +	{evex}	cmpnoxadd	%r15d,%edx,0x123(%r8,%rax,4)
> +	{evex}	cmpnpxadd	%r15,%r15,0x123(%r8,%rax,4)
> +	{evex}	cmpnpxadd	%r15d,%edx,0x123(%r8,%rax,4)
> +	{evex}	cmpnsxadd	%r15,%r15,0x123(%r8,%rax,4)
> +	{evex}	cmpnsxadd	%r15d,%edx,0x123(%r8,%rax,4)
> +	{evex}	cmpnzxadd	%r15,%r15,0x123(%r8,%rax,4)
> +	{evex}	cmpnzxadd	%r15d,%edx,0x123(%r8,%rax,4)
> +	{evex}	cmpoxadd	%r15,%r15,0x123(%r8,%rax,4)
> +	{evex}	cmpoxadd	%r15d,%edx,0x123(%r8,%rax,4)
> +	{evex}	cmppxadd	%r15,%r15,0x123(%r8,%rax,4)
> +	{evex}	cmppxadd	%r15d,%edx,0x123(%r8,%rax,4)
> +	{evex}	cmpsxadd	%r15,%r15,0x123(%r8,%rax,4)
> +	{evex}	cmpsxadd	%r15d,%edx,0x123(%r8,%rax,4)
> +	{evex}	cmpzxadd	%r15,%r15,0x123(%r8,%rax,4)
> +	{evex}	cmpzxadd	%r15d,%edx,0x123(%r8,%rax,4)
> +	{evex}	dec	%r15
> +	{evex}	dec	%r15d
> +	{evex}	dec	%r15w
> +	{evex}	dec	%r8b
> +	{evex}	decb	0x123(%r8,%rax,4)
> +	{evex}	decw	0x123(%r8,%rax,4)
> +	{evex}	decl	0x123(%r8,%rax,4)
> +	{evex}	decq	0x123(%r8,%rax,4)
> +	{evex}	div	%r15
> +	{evex}	div	%r15d
> +	{evex}	div	%r15w
> +	{evex}	div	%r8b
> +	{evex}	divb	0x123(%r8,%rax,4)
> +	{evex}	divw	0x123(%r8,%rax,4)
> +	{evex}	divl	0x123(%r8,%rax,4)
> +	{evex}	divq	0x123(%r8,%rax,4)
> +	{evex}	encodekey128	%r15d,%edx
> +	{evex}	encodekey256	%r15d,%edx
> +	{evex}	enqcmd	0x123(%r8,%rax,4),%r15
> +	{evex}	enqcmd	0x123(%r8d,%eax,4),%r15d
> +	{evex}	enqcmds	0x123(%r8,%rax,4),%r15
> +	{evex}	enqcmds	0x123(%r8d,%eax,4),%r15d
> +	{evex}	idiv	%r15
> +	{evex}	idiv	%r15d
> +	{evex}	idiv	%r15w
> +	{evex}	idiv	%r8b
> +	{evex}	idivb	0x123(%r8,%rax,4)
> +	{evex}	idivw	0x123(%r8,%rax,4)
> +	{evex}	idivl	0x123(%r8,%rax,4)
> +	{evex}	idivq	0x123(%r8,%rax,4)
> +	{evex}	imul	%r15
> +	{evex}	imul	%r15,%r15
> +	{evex}	imul	%r15d
> +	{evex}	imul	%r15d,%edx
> +	{evex}	imul	%r15w
> +	{evex}	imul	%r15w,%ax
> +	{evex}	imul	%r8b
> +	{evex}	imulb	0x123(%r8,%rax,4)
> +	{evex}	imulw	0x123(%r8,%rax,4)
> +	{evex}	imull	0x123(%r8,%rax,4)
> +	{evex}	imul	0x123(%r8,%rax,4),%r15
> +	{evex}	imul	0x123(%r8,%rax,4),%r15d
> +	{evex}	imul	0x123(%r8,%rax,4),%r15w
> +	{evex}	imulq	0x123(%r8,%rax,4)
> +	{evex}	imul	$0x7b, %dx, %ax
> +	{evex}	imul	$0x7b, %ecx, %edx
> +	{evex}	imul	$0x7b, %r9, %r15
> +	{evex}	imul	$0x7b, 291(%r8, %rax, 4), %dx
> +	{evex}	imul	$0x7b, 291(%r8, %rax, 4), %ecx
> +	{evex}	imul	$0x7b, 291(%r8, %rax, 4), %r9
> +	{evex}	imul	$0xff90, %dx, %ax
> +	{evex}	imul	$0xff90, %ecx, %edx
> +	{evex}	imul	$0xff90, %r9, %r15
> +	{evex}	imul	$0xff90, 291(%r8, %rax, 4), %dx
> +	{evex}	imul	$0xff90, 291(%r8, %rax, 4), %ecx
> +	{evex}	imul	$0xff90, 291(%r8, %rax, 4), %r9

Also covering the 2-operand forms of these (srcreg == dstreg) would
seem desirable.

> +	{evex}	inc	%r15
> +	{evex}	inc	%r15d
> +	{evex}	inc	%r15w
> +	{evex}	inc	%r8b
> +	{evex}	incb	0x123(%r8,%rax,4)
> +	{evex}	incw	0x123(%r8,%rax,4)
> +	{evex}	incl	0x123(%r8,%rax,4)
> +	{evex}	incq	0x123(%r8,%rax,4)
> +	{evex}	invept	0x123(%r8,%rax,4),%r15
> +	{evex}	invpcid	0x123(%r8,%rax,4),%r15
> +	{evex}	invvpid	0x123(%r8,%rax,4),%r15
> +	{evex}	kmovb	%k3,%k5
> +	{evex}	kmovb	%k5,%r15d
> +	{evex}	kmovb	%k5,0x123(%r8,%rax,4)
> +	{evex}	kmovb	%r15d,%k5
> +	{evex}	kmovb	0x123(%r8,%rax,4),%k5
> +	{evex}	kmovd	%k3,%k5
> +	{evex}	kmovd	%k5,%r15d
> +	{evex}	kmovd	%k5,0x123(%r8,%rax,4)
> +	{evex}	kmovd	%r15d,%k5
> +	{evex}	kmovd	0x123(%r8,%rax,4),%k5
> +	{evex}	kmovq	%k3,%k5
> +	{evex}	kmovq	%k5,%r15
> +	{evex}	kmovq	%k5,0x123(%r8,%rax,4)
> +	{evex}	kmovq	%r15,%k5
> +	{evex}	kmovq	0x123(%r8,%rax,4),%k5
> +	{evex}	kmovw	%k3,%k5
> +	{evex}	kmovw	%k5,%r15d
> +	{evex}	kmovw	%k5,0x123(%r8,%rax,4)
> +	{evex}	kmovw	%r15d,%k5
> +	{evex}	kmovw	0x123(%r8,%rax,4),%k5
> +	{evex}	lzcnt	%r15,%r15
> +	{evex}	lzcnt	%r15d,%edx
> +	{evex}	lzcnt	%r15w,%ax
> +	{evex}	lzcnt	0x123(%r8,%rax,4),%r15
> +	{evex}	lzcnt	0x123(%r8,%rax,4),%r15d
> +	{evex}	lzcnt	0x123(%r8,%rax,4),%r15w
> +	{evex}	movbe	%r15,%r15

No {evex} needed here, ...

> +	{evex}	movbe	%r15,0x123(%r8,%rax,4)
> +	{evex}	movbe	%r15d,%edx

... here, or ...

> +	{evex}	movbe	%r15d,0x123(%r8,%rax,4)
> +	{evex}	movbe	%r15w,%ax

... here (iow these three can be omitted).

> +	{evex}	movbe	%r15w,0x123(%r8,%rax,4)
> +	{evex}	movbe	0x123(%r8,%rax,4),%r15
> +	{evex}	movbe	0x123(%r8,%rax,4),%r15d
> +	{evex}	movbe	0x123(%r8,%rax,4),%r15w
> +	{evex}	movdir64b	0x123(%r8,%rax,4),%r15
> +	{evex}	movdir64b	0x123(%r8d,%eax,4),%r15d
> +	{evex}	movdiri	%r15,0x123(%r8,%rax,4)
> +	{evex}	movdiri	%r15d,0x123(%r8,%rax,4)
> +	{evex}	mul	%r15
> +	{evex}	mul	%r15d
> +	{evex}	mul	%r15w
> +	{evex}	mul	%r8b
> +	{evex}	mulb	0x123(%r8,%rax,4)
> +	{evex}	mulw	0x123(%r8,%rax,4)
> +	{evex}	mull	0x123(%r8,%rax,4)
> +	{evex}	mulq	0x123(%r8,%rax,4)
> +	{evex}	mulx	%r15,%r15,%r11
> +	{evex}	mulx	%r15d,%edx,%r10d
> +	{evex}	mulx	0x123(%r8,%rax,4),%r15,%r15
> +	{evex}	mulx	0x123(%r8,%rax,4),%r15d,%edx
> +	{evex}	neg	%r15
> +	{evex}	neg	%r15d
> +	{evex}	neg	%r15w
> +	{evex}	neg	%r8b
> +	{evex}	negb	0x123(%r8,%rax,4)
> +	{evex}	negw	0x123(%r8,%rax,4)
> +	{evex}	negl	0x123(%r8,%rax,4)
> +	{evex}	negq	0x123(%r8,%rax,4)
> +	{evex}	not	%r15
> +	{evex}	not	%r15d
> +	{evex}	not	%r15w
> +	{evex}	not	%r8b
> +	{evex}	notb	0x123(%r8,%rax,4)
> +	{evex}	notw	0x123(%r8,%rax,4)
> +	{evex}	notl	0x123(%r8,%rax,4)
> +	{evex}	notq	0x123(%r8,%rax,4)
> +	{evex}	or	$0x7b,%r15
> +	{evex}	or	$0x7b,%r15d
> +	{evex}	or	$0x7b,%r15w
> +	{evex}	or	$0x7b,%r8b
> +	{evex}	orb	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	orw	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	orl	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	orq	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	or	%r15,%r15
> +	{evex}	or	%r15,0x123(%r8,%rax,4)
> +	{evex}	or	%r15d,%edx
> +	{evex}	or	%r15d,0x123(%r8,%rax,4)
> +	{evex}	or	%r15w,%ax
> +	{evex}	or	%r15w,0x123(%r8,%rax,4)
> +	{evex}	or	%r8b,%dl
> +	{evex}	or	%r8b,0x123(%r8,%rax,4)
> +	{evex}	or	0x123(%r8,%rax,4),%r15
> +	{evex}	or	0x123(%r8,%rax,4),%r15d
> +	{evex}	or	0x123(%r8,%rax,4),%r15w
> +	{evex}	or	0x123(%r8,%rax,4),%r8b
> +	{evex}	pdep	%r15,%r15,%r11
> +	{evex}	pdep	%r15d,%edx,%r10d
> +	{evex}	pdep	0x123(%r8,%rax,4),%r15,%r15
> +	{evex}	pdep	0x123(%r8,%rax,4),%r15d,%edx
> +	{evex}	pext	%r15,%r15,%r11
> +	{evex}	pext	%r15d,%edx,%r10d
> +	{evex}	pext	0x123(%r8,%rax,4),%r15,%r15
> +	{evex}	pext	0x123(%r8,%rax,4),%r15d,%edx
> +	{evex}	popcnt	%r15,%r15
> +	{evex}	popcnt	%r15d,%edx
> +	{evex}	popcnt	%r15w,%ax
> +	{evex}	popcnt	0x123(%r8,%rax,4),%r15
> +	{evex}	popcnt	0x123(%r8,%rax,4),%r15d
> +	{evex}	popcnt	0x123(%r8,%rax,4),%r15w
> +	{evex}	rcl	$0x7b,%r15
> +	{evex}	rcl	$0x7b,%r15d
> +	{evex}	rcl	$0x7b,%r15w
> +	{evex}	rcl	$0x7b,%r8b
> +	{evex}	rclb	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	rclw	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	rcll	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	rclq	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	rcl	$1,%r15
> +	{evex}	rcl	$1,%r15d
> +	{evex}	rcl	$1,%r15w
> +	{evex}	rcl	$1,%r8b
> +	{evex}	rclb	$1,0x123(%r8,%rax,4)
> +	{evex}	rclw	$1,0x123(%r8,%rax,4)
> +	{evex}	rcll	$1,0x123(%r8,%rax,4)
> +	{evex}	rclq	$1,0x123(%r8,%rax,4)
> +	{evex}	rcl	%cl,%r15
> +	{evex}	rcl	%cl,%r15d
> +	{evex}	rcl	%cl,%r15w
> +	{evex}	rcl	%cl,%r8b
> +	{evex}	rclb	%cl,0x123(%r8,%rax,4)
> +	{evex}	rclw	%cl,0x123(%r8,%rax,4)
> +	{evex}	rcll	%cl,0x123(%r8,%rax,4)
> +	{evex}	rclq	%cl,0x123(%r8,%rax,4)
> +	{evex}	rcr	$0x7b,%r15
> +	{evex}	rcr	$0x7b,%r15d
> +	{evex}	rcr	$0x7b,%r15w
> +	{evex}	rcr	$0x7b,%r8b
> +	{evex}	rcrb	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	rcrw	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	rcrl	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	rcrq	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	rcr	$1,%r15
> +	{evex}	rcr	$1,%r15d
> +	{evex}	rcr	$1,%r15w
> +	{evex}	rcr	$1,%r8b
> +	{evex}	rcrb	$1,0x123(%r8,%rax,4)
> +	{evex}	rcrw	$1,0x123(%r8,%rax,4)
> +	{evex}	rcrl	$1,0x123(%r8,%rax,4)
> +	{evex}	rcrq	$1,0x123(%r8,%rax,4)
> +	{evex}	rcr	%cl,%r15
> +	{evex}	rcr	%cl,%r15d
> +	{evex}	rcr	%cl,%r15w
> +	{evex}	rcr	%cl,%r8b
> +	{evex}	rcrb	%cl,0x123(%r8,%rax,4)
> +	{evex}	rcrw	%cl,0x123(%r8,%rax,4)
> +	{evex}	rcrl	%cl,0x123(%r8,%rax,4)
> +	{evex}	rcrq	%cl,0x123(%r8,%rax,4)
> +	{evex}	rol	$0x7b,%r15
> +	{evex}	rol	$0x7b,%r15d
> +	{evex}	rol	$0x7b,%r15w
> +	{evex}	rol	$0x7b,%r8b
> +	{evex}	rolb	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	rolw	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	roll	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	rolq	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	rol	$1,%r15
> +	{evex}	rol	$1,%r15d
> +	{evex}	rol	$1,%r15w
> +	{evex}	rol	$1,%r8b
> +	{evex}	rolb	$1,0x123(%r8,%rax,4)
> +	{evex}	rolw	$1,0x123(%r8,%rax,4)
> +	{evex}	roll	$1,0x123(%r8,%rax,4)
> +	{evex}	rolq	$1,0x123(%r8,%rax,4)
> +	{evex}	rol	%cl,%r15
> +	{evex}	rol	%cl,%r15d
> +	{evex}	rol	%cl,%r15w
> +	{evex}	rol	%cl,%r8b
> +	{evex}	rolb	%cl,0x123(%r8,%rax,4)
> +	{evex}	rolw	%cl,0x123(%r8,%rax,4)
> +	{evex}	roll	%cl,0x123(%r8,%rax,4)
> +	{evex}	rolq	%cl,0x123(%r8,%rax,4)
> +	{evex}	ror	$0x7b,%r15
> +	{evex}	ror	$0x7b,%r15d
> +	{evex}	ror	$0x7b,%r15w
> +	{evex}	ror	$0x7b,%r8b
> +	{evex}	rorb	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	rorw	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	rorl	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	rorq	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	ror	$1,%r15
> +	{evex}	ror	$1,%r15d
> +	{evex}	ror	$1,%r15w
> +	{evex}	ror	$1,%r8b
> +	{evex}	rorb	$1,0x123(%r8,%rax,4)
> +	{evex}	rorw	$1,0x123(%r8,%rax,4)
> +	{evex}	rorl	$1,0x123(%r8,%rax,4)
> +	{evex}	rorq	$1,0x123(%r8,%rax,4)
> +	{evex}	ror	%cl,%r15
> +	{evex}	ror	%cl,%r15d
> +	{evex}	ror	%cl,%r15w
> +	{evex}	ror	%cl,%r8b
> +	{evex}	rorb	%cl,0x123(%r8,%rax,4)
> +	{evex}	rorw	%cl,0x123(%r8,%rax,4)
> +	{evex}	rorl	%cl,0x123(%r8,%rax,4)
> +	{evex}	rorq	%cl,0x123(%r8,%rax,4)
> +	{evex}	rorx	$0x7b,%r15,%r15
> +	{evex}	rorx	$0x7b,%r15d,%edx
> +	{evex}	rorx	$0x7b,0x123(%r8,%rax,4),%r15
> +	{evex}	rorx	$0x7b,0x123(%r8,%rax,4),%r15d
> +	{evex}	sar	$0x7b,%r15

Please also again cover "sal".

> +	{evex}	sar	$0x7b,%r15d
> +	{evex}	sar	$0x7b,%r15w
> +	{evex}	sar	$0x7b,%r8b
> +	{evex}	sarb	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	sarw	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	sarl	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	sarq	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	sar	$1,%r15
> +	{evex}	sar	$1,%r15d
> +	{evex}	sar	$1,%r15w
> +	{evex}	sar	$1,%r8b
> +	{evex}	sarb	$1,0x123(%r8,%rax,4)
> +	{evex}	sarw	$1,0x123(%r8,%rax,4)
> +	{evex}	sarl	$1,0x123(%r8,%rax,4)
> +	{evex}	sarq	$1,0x123(%r8,%rax,4)
> +	{evex}	sar	%cl,%r15
> +	{evex}	sar	%cl,%r15d
> +	{evex}	sar	%cl,%r15w
> +	{evex}	sar	%cl,%r8b
> +	{evex}	sarb	%cl,0x123(%r8,%rax,4)
> +	{evex}	sarw	%cl,0x123(%r8,%rax,4)
> +	{evex}	sarl	%cl,0x123(%r8,%rax,4)
> +	{evex}	sarq	%cl,0x123(%r8,%rax,4)
> +	{evex}	sarx	%r15,%r15,%r11
> +	{evex}	sarx	%r15,0x123(%r8,%rax,4),%r15
> +	{evex}	sarx	%r15d,%edx,%r10d
> +	{evex}	sarx	%r15d,0x123(%r8,%rax,4),%edx
> +	{evex}	sbb	$0x7b,%r15
> +	{evex}	sbb	$0x7b,%r15d
> +	{evex}	sbb	$0x7b,%r15w
> +	{evex}	sbb	$0x7b,%r8b
> +	{evex}	sbbb	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	sbbw	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	sbbl	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	sbbq	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	sbb	%r15,%r15
> +	{evex}	sbb	%r15,0x123(%r8,%rax,4)
> +	{evex}	sbb	%r15d,%edx
> +	{evex}	sbb	%r15d,0x123(%r8,%rax,4)
> +	{evex}	sbb	%r15w,%ax
> +	{evex}	sbb	%r15w,0x123(%r8,%rax,4)
> +	{evex}	sbb	%r8b,%dl
> +	{evex}	sbb	%r8b,0x123(%r8,%rax,4)
> +	{evex}	sbb	0x123(%r8,%rax,4),%r15
> +	{evex}	sbb	0x123(%r8,%rax,4),%r15d
> +	{evex}	sbb	0x123(%r8,%rax,4),%r15w
> +	{evex}	sbb	0x123(%r8,%rax,4),%r8b
> +	{evex}	sha1msg1	%xmm13,%xmm12
> +	{evex}	sha1msg1	0x123(%r8,%rax,4),%xmm12
> +	{evex}	sha1msg2	%xmm13,%xmm12
> +	{evex}	sha1msg2	0x123(%r8,%rax,4),%xmm12
> +	{evex}	sha1nexte	%xmm13,%xmm12
> +	{evex}	sha1nexte	0x123(%r8,%rax,4),%xmm12
> +	{evex}	sha1rnds4	$0x7b,%xmm13,%xmm12
> +	{evex}	sha1rnds4	$0x7b,0x123(%r8,%rax,4),%xmm12
> +	{evex}	sha256msg1	%xmm13,%xmm12
> +	{evex}	sha256msg1	0x123(%r8,%rax,4),%xmm12
> +	{evex}	sha256msg2	%xmm13,%xmm12
> +	{evex}	sha256msg2	0x123(%r8,%rax,4),%xmm12
> +	{evex}	sha256rnds2	0x123(%r8,%rax,4),%xmm12
> +	{evex}	shl	$0x7b,%r15
> +	{evex}	shl	$0x7b,%r15d
> +	{evex}	shl	$0x7b,%r15w
> +	{evex}	shl	$0x7b,%r8b
> +	{evex}	shlb	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	shlw	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	shll	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	shlq	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	shl	$1,%r15
> +	{evex}	shl	$1,%r15d
> +	{evex}	shl	$1,%r15w
> +	{evex}	shl	$1,%r8b
> +	{evex}	shlb	$1,0x123(%r8,%rax,4)
> +	{evex}	shlw	$1,0x123(%r8,%rax,4)
> +	{evex}	shll	$1,0x123(%r8,%rax,4)
> +	{evex}	shlq	$1,0x123(%r8,%rax,4)
> +	{evex}	shl	%cl,%r15
> +	{evex}	shl	%cl,%r15d
> +	{evex}	shl	%cl,%r15w
> +	{evex}	shl	%cl,%r8b
> +	{evex}	shlb	%cl,0x123(%r8,%rax,4)
> +	{evex}	shlw	%cl,0x123(%r8,%rax,4)
> +	{evex}	shll	%cl,0x123(%r8,%rax,4)
> +	{evex}	shlq	%cl,0x123(%r8,%rax,4)
> +	{evex}	shld	$0x7b,%r15,%r15
> +	{evex}	shld	$0x7b,%r15,0x123(%r8,%rax,4)
> +	{evex}	shld	$0x7b,%r15d,%edx
> +	{evex}	shld	$0x7b,%r15d,0x123(%r8,%rax,4)
> +	{evex}	shld	$0x7b,%r15w,%ax
> +	{evex}	shld	$0x7b,%r15w,0x123(%r8,%rax,4)
> +	{evex}	shld	%cl,%r15,%r15
> +	{evex}	shld	%cl,%r15,0x123(%r8,%rax,4)
> +	{evex}	shld	%cl,%r15d,%edx
> +	{evex}	shld	%cl,%r15d,0x123(%r8,%rax,4)
> +	{evex}	shld	%cl,%r15w,%ax
> +	{evex}	shld	%cl,%r15w,0x123(%r8,%rax,4)
> +	{evex}	shlx	%r15,%r15,%r11
> +	{evex}	shlx	%r15,0x123(%r8,%rax,4),%r15
> +	{evex}	shlx	%r15d,%edx,%r10d
> +	{evex}	shlx	%r15d,0x123(%r8,%rax,4),%edx
> +	{evex}	shr	$0x7b,%r15
> +	{evex}	shr	$0x7b,%r15d
> +	{evex}	shr	$0x7b,%r15w
> +	{evex}	shr	$0x7b,%r8b
> +	{evex}	shrb	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	shrw	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	shrl	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	shrq	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	shr	$1,%r15
> +	{evex}	shr	$1,%r15d
> +	{evex}	shr	$1,%r15w
> +	{evex}	shr	$1,%r8b
> +	{evex}	shrb	$1,0x123(%r8,%rax,4)
> +	{evex}	shrw	$1,0x123(%r8,%rax,4)
> +	{evex}	shrl	$1,0x123(%r8,%rax,4)
> +	{evex}	shrq	$1,0x123(%r8,%rax,4)
> +	{evex}	shr	%cl,%r15
> +	{evex}	shr	%cl,%r15d
> +	{evex}	shr	%cl,%r15w
> +	{evex}	shr	%cl,%r8b
> +	{evex}	shrb	%cl,0x123(%r8,%rax,4)
> +	{evex}	shrw	%cl,0x123(%r8,%rax,4)
> +	{evex}	shrl	%cl,0x123(%r8,%rax,4)
> +	{evex}	shrq	%cl,0x123(%r8,%rax,4)
> +	{evex}	shrd	$0x7b,%r15,%r15
> +	{evex}	shrd	$0x7b,%r15,0x123(%r8,%rax,4)
> +	{evex}	shrd	$0x7b,%r15d,%edx
> +	{evex}	shrd	$0x7b,%r15d,0x123(%r8,%rax,4)
> +	{evex}	shrd	$0x7b,%r15w,%ax
> +	{evex}	shrd	$0x7b,%r15w,0x123(%r8,%rax,4)
> +	{evex}	shrd	%cl,%r15,%r15
> +	{evex}	shrd	%cl,%r15,0x123(%r8,%rax,4)
> +	{evex}	shrd	%cl,%r15d,%edx
> +	{evex}	shrd	%cl,%r15d,0x123(%r8,%rax,4)
> +	{evex}	shrd	%cl,%r15w,%ax
> +	{evex}	shrd	%cl,%r15w,0x123(%r8,%rax,4)
> +	{evex}	shrx	%r15,%r15,%r11
> +	{evex}	shrx	%r15,0x123(%r8,%rax,4),%r15
> +	{evex}	shrx	%r15d,%edx,%r10d
> +	{evex}	shrx	%r15d,0x123(%r8,%rax,4),%edx
> +	{evex}	sub	$0x7b,%r15
> +	{evex}	sub	$0x7b,%r15d
> +	{evex}	sub	$0x7b,%r15w
> +	{evex}	sub	$0x7b,%r8b
> +	{evex}	subb	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	subw	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	subl	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	subq	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	sub	%r15,%r15
> +	{evex}	sub	%r15,0x123(%r8,%rax,4)
> +	{evex}	sub	%r15d,%edx
> +	{evex}	sub	%r15d,0x123(%r8,%rax,4)
> +	{evex}	sub	%r15w,%ax
> +	{evex}	sub	%r15w,0x123(%r8,%rax,4)
> +	{evex}	sub	%r8b,%dl
> +	{evex}	sub	%r8b,0x123(%r8,%rax,4)
> +	{evex}	sub	0x123(%r8,%rax,4),%r15
> +	{evex}	sub	0x123(%r8,%rax,4),%r15d
> +	{evex}	sub	0x123(%r8,%rax,4),%r15w
> +	{evex}	sub	0x123(%r8,%rax,4),%r8b
> +	{evex}	tzcnt	%r15,%r15
> +	{evex}	tzcnt	%r15d,%edx
> +	{evex}	tzcnt	%r15w,%ax
> +	{evex}	tzcnt	0x123(%r8,%rax,4),%r15
> +	{evex}	tzcnt	0x123(%r8,%rax,4),%r15d
> +	{evex}	tzcnt	0x123(%r8,%rax,4),%r15w
> +	{evex}	wrssd	%r15d,0x123(%r8,%rax,4)
> +	{evex}	wrssq	%r15,0x123(%r8,%rax,4)
> +	{evex}	wrussd	%r15d,0x123(%r8,%rax,4)
> +	{evex}	wrussq	%r15,0x123(%r8,%rax,4)
> +	{evex}	xor	$0x7b,%r15
> +	{evex}	xor	$0x7b,%r15d
> +	{evex}	xor	$0x7b,%r15w
> +	{evex}	xor	$0x7b,%r8b
> +	{evex}	xorb	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	xorw	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	xorl	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	xorq	$0x7b,0x123(%r8,%rax,4)
> +	{evex}	xor	%r15,%r15
> +	{evex}	xor	%r15,0x123(%r8,%rax,4)
> +	{evex}	xor	%r15d,%edx
> +	{evex}	xor	%r15d,0x123(%r8,%rax,4)
> +	{evex}	xor	%r15w,%ax
> +	{evex}	xor	%r15w,0x123(%r8,%rax,4)
> +	{evex}	xor	%r8b,%dl
> +	{evex}	xor	%r8b,0x123(%r8,%rax,4)
> +	{evex}	xor	0x123(%r8,%rax,4),%r15
> +	{evex}	xor	0x123(%r8,%rax,4),%r15d
> +	{evex}	xor	0x123(%r8,%rax,4),%r15w
> +	{evex}	xor	0x123(%r8,%rax,4),%r8b
> +
> +	.intel_syntax noprefix

Along the lines of the earlier remark, I wonder whether this repetition
of everything is really useful to have.

> --- a/opcodes/i386-dis-evex-mod.h
> +++ b/opcodes/i386-dis-evex-mod.h
> @@ -1,10 +1,10 @@
>    /* MOD_EVEX_MAP4_F8_P1 */
>    {
> -    { "enqcmds",	{ Gva, M }, 0 },
> -    { "uwrmsr",		{ Gq, Eq }, 0 },
> +    { "%XEenqcmds",		{ Gva, M }, 0 },
> +    { "%XEuwrmsr",		{ Gq, Eq }, 0 },
>    },
>    /* MOD_EVEX_MAP4_F8_P3 */
>    {
> -    { "enqcmd",		{ Gva, M }, 0 },
> -    { "urdmsr",		{ Eq, Gq }, 0 },
> +    { "%XEenqcmd",		{ Gva, M }, 0 },
> +    { "%XEurdmsr",		{ Eq, Gq }, 0 },
>    },

Hmm. Once new encodings appear in Map4, I can see that we will need such
distinction. But right now can't we get away without touching all of them
again, by simply taking it being Map4 as an indication? The only place
where it may be helpful to indeed add all of these right away is MOVBE,
where its register form then would not have %XE added.

> --- a/opcodes/i386-dis-evex-reg.h
> +++ b/opcodes/i386-dis-evex-reg.h
> @@ -53,8 +53,8 @@
>    {
>      { "%NFaddA",	{ VexGb, Eb, Ib }, NO_PREFIX },
>      { "%NForA",	{ VexGb, Eb, Ib }, NO_PREFIX },
> -    { "adcA",	{ VexGb, Eb, Ib }, NO_PREFIX },
> -    { "sbbA",	{ VexGb, Eb, Ib }, NO_PREFIX },
> +    { "%XEadcA",	{ VexGb, Eb, Ib }, NO_PREFIX },
> +    { "%XEsbbA",	{ VexGb, Eb, Ib }, NO_PREFIX },
>      { "%NFandA",	{ VexGb, Eb, Ib }, NO_PREFIX },
>      { "%NFsubA",	{ VexGb, Eb, Ib }, NO_PREFIX },
>      { "%NFxorA",	{ VexGb, Eb, Ib }, NO_PREFIX },

IOW this patch goes on top of the NF one, without that being said anywhere?
Except not quite, as ...

> --- a/opcodes/i386-dis.c
> +++ b/opcodes/i386-dis.c
> @@ -2625,8 +2625,8 @@ static const struct dis386 reg_table[][8] = {
>    {
>      { "%NFrolA",	{ VexGb, Eb, Ib }, NO_PREFIX },
>      { "%NFrorA",	{ VexGb, Eb, Ib }, NO_PREFIX },
> -    { "rclA",	{ VexGb, Eb, Ib }, NO_PREFIX },
> -    { "rcrA",	{ VexGb, Eb, Ib }, NO_PREFIX },
> +    { "%XErclA",	{ VexGb, Eb, Ib }, NO_PREFIX },
> +    { "%XErcrA",	{ VexGb, Eb, Ib }, NO_PREFIX },

... that patch wrongly added %NF here. So looks like that patch was partly
fixed already in this regard (the NOT adjustment there will then also need
reflecting here). But please - anything like this needs making entirely
transparent to the reader.

> @@ -10595,7 +10595,9 @@ putop (instr_info *ins, const char *in_template, int sizeflag)
>  		  *ins->obufp++ = '}';
>  		  *ins->obufp++ = ' ';
>  		}
> -	      else if (ins->evex_type == evex_from_legacy && !ins->vex.b)
> +	      else if ((ins->evex_type == evex_from_legacy && !ins->vex.b)
> +		       || (ins->evex_type == evex_from_vex
> +			   && !((ins->rex2 & 7) || !ins->vex.v)))

This double negation is hard to follow:

	      else if ((ins->evex_type == evex_from_legacy && !ins->vex.b)
		       || (ins->evex_type == evex_from_vex
			   && !(ins->rex2 & 7) && ins->vex.v))

Jan

  reply	other threads:[~2024-03-08 10:37 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-06  9:58 Cui, Lili
2024-03-08 10:37 ` Jan Beulich [this message]
2024-03-20 13:12   ` Cui, Lili
2024-03-20 13:21     ` Jan Beulich
2024-03-21 12:33       ` Cui, Lili
2024-03-22  9:45   ` Cui, Lili

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1b29c006-7ea6-40d4-ac7c-1c0b40c6243c@suse.com \
    --to=jbeulich@suse.com \
    --cc=binutils@sourceware.org \
    --cc=hjl.tools@gmail.com \
    --cc=lili.cui@intel.com \
    --cc=lin1.hu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).