Commit b6f8c7c4 authored by H.J. Lu's avatar H.J. Lu

x86: Add -O[2|s] assembler command-line options

On x86, some instructions have alternate shorter encodings:

1. When the upper 32 bits of destination registers of

andq $imm31, %r64
testq $imm31, %r64
xorq %r64, %r64
subq %r64, %r64

known to be zero, we can encode them without the REX_W bit:

andl $imm31, %r32
testl $imm31, %r32
xorl %r32, %r32
subl %r32, %r32

This optimization is enabled with -O, -O2 and -Os.
2. Since 0xb0 mov with 32-bit destination registers zero-extends 32-bit
immediate to 64-bit destination register, we can use it to encode 64-bit
mov with 32-bit immediates.  This optimization is enabled with -O, -O2
and -Os.
3. Since the upper bits of destination registers of VEX128 and EVEX128
instructions are extended to zero, if all bits of destination registers
of AVX256 or AVX512 instructions are zero, we can use VEX128 or EVEX128
encoding to encode AVX256 or AVX512 instructions.  When 2 source
registers are identical, AVX256 and AVX512 andn and xor instructions:

VOP %reg, %reg, %dest_reg

can be encoded with

VOP128 %reg, %reg, %dest_reg

This optimization is enabled with -O2 and -Os.
4. 16-bit, 32-bit and 64-bit register tests with immediate may be
encoded as 8-bit register test with immediate.  This optimization is
enabled with -Os.

This patch does:

1. Add {nooptimize} pseudo prefix to disable instruction size
optimization.
2. Add optimize to i386_opcode_modifier to tell assembler that encoding
of an instruction may be optimized.

gas/

	PR gas/22871
	* NEWS: Mention -O[2|s].
	* config/tc-i386.c (_i386_insn): Add no_optimize.
	(optimize): New.
	(optimize_for_space): Likewise.
	(fits_in_imm7): New function.
	(fits_in_imm31): Likewise.
	(optimize_encoding): Likewise.
	(md_assemble): Call optimize_encoding to optimize encoding.
	(parse_insn): Handle {nooptimize}.
	(md_shortopts): Append "O::".
	(md_parse_option): Handle -On.
	* doc/c-i386.texi: Document -O0, -O, -O1, -O2 and -Os as well
	as {nooptimize}.
	* testsuite/gas/cfi/cfi-x86_64.d: Pass -O0 to assembler.
	* testsuite/gas/i386/ilp32/cfi/cfi-x86_64.d: Likewise.
	* testsuite/gas/i386/i386.exp: Run optimize-1, optimize-2,
	optimize-3, x86-64-optimize-1, x86-64-optimize-2,
	x86-64-optimize-3 and x86-64-optimize-4.
	* testsuite/gas/i386/optimize-1.d: New file.
	* testsuite/gas/i386/optimize-1.s: Likewise.
	* testsuite/gas/i386/optimize-2.d: Likewise.
	* testsuite/gas/i386/optimize-2.s: Likewise.
	* testsuite/gas/i386/optimize-3.d: Likewise.
	* testsuite/gas/i386/optimize-3.s: Likewise.
	* testsuite/gas/i386/x86-64-optimize-1.s: Likewise.
	* testsuite/gas/i386/x86-64-optimize-1.d: Likewise.
	* testsuite/gas/i386/x86-64-optimize-2.d: Likewise.
	* testsuite/gas/i386/x86-64-optimize-2.s: Likewise.
	* testsuite/gas/i386/x86-64-optimize-3.d: Likewise.
	* testsuite/gas/i386/x86-64-optimize-3.s: Likewise.
	* testsuite/gas/i386/x86-64-optimize-4.d: Likewise.
	* testsuite/gas/i386/x86-64-optimize-4.s: Likewise.

opcodes/

	PR gas/22871
	* i386-gen.c (opcode_modifiers): Add Optimize.
	* i386-opc.h (Optimize): New enum.
	(i386_opcode_modifier): Add optimize.
	* i386-opc.tbl: Add "Optimize" to "mov $imm, reg",
	"sub reg, reg/mem", "test $imm, acc", "test $imm, reg/mem",
	"and $imm, acc", "and $imm, reg/mem", "xor reg, reg/mem",
	"movq $imm, reg" and AVX256 and AVX512 versions of vandnps,
	vandnpd, vpandn, vpandnd, vpandnq, vxorps, vxorpd, vpxor,
	vpxord and vpxorq.
	* i386-tbl.h: Regenerated.
parent bc7c0509
2018-02-27 H.J. Lu <hongjiu.lu@intel.com>
PR gas/22871
* NEWS: Mention -O[2|s].
* config/tc-i386.c (_i386_insn): Add no_optimize.
(optimize): New.
(optimize_for_space): Likewise.
(fits_in_imm7): New function.
(fits_in_imm31): Likewise.
(optimize_encoding): Likewise.
(md_assemble): Call optimize_encoding to optimize encoding.
(parse_insn): Handle {nooptimize}.
(md_shortopts): Append "O::".
(md_parse_option): Handle -On.
* doc/c-i386.texi: Document -O0, -O, -O1, -O2 and -Os as well
as {nooptimize}.
* testsuite/gas/cfi/cfi-x86_64.d: Pass -O0 to assembler.
* testsuite/gas/i386/ilp32/cfi/cfi-x86_64.d: Likewise.
* testsuite/gas/i386/i386.exp: Run optimize-1, optimize-2,
optimize-3, x86-64-optimize-1, x86-64-optimize-2,
x86-64-optimize-3 and x86-64-optimize-4.
* testsuite/gas/i386/optimize-1.d: New file.
* testsuite/gas/i386/optimize-1.s: Likewise.
* testsuite/gas/i386/optimize-2.d: Likewise.
* testsuite/gas/i386/optimize-2.s: Likewise.
* testsuite/gas/i386/optimize-3.d: Likewise.
* testsuite/gas/i386/optimize-3.s: Likewise.
* testsuite/gas/i386/x86-64-optimize-1.s: Likewise.
* testsuite/gas/i386/x86-64-optimize-1.d: Likewise.
* testsuite/gas/i386/x86-64-optimize-2.d: Likewise.
* testsuite/gas/i386/x86-64-optimize-2.s: Likewise.
* testsuite/gas/i386/x86-64-optimize-3.d: Likewise.
* testsuite/gas/i386/x86-64-optimize-3.s: Likewise.
* testsuite/gas/i386/x86-64-optimize-4.d: Likewise.
* testsuite/gas/i386/x86-64-optimize-4.s: Likewise.
2018-02-27 Nick Clifton <nickc@redhat.com>
* po/ru.po: Updated Russian translation.
......
-*- text -*-
* Add -O[2|s] command-line options to x86 assembler to enable alternate
shorter instruction encoding.
* Add support for .nop directive. It is currently supported only for
x86 targets.
......
......@@ -372,6 +372,9 @@ struct _i386_insn
/* Prefer the REX byte in encoding. */
bfd_boolean rex_encoding;
/* Disable instruction size optimization. */
bfd_boolean no_optimize;
/* How to encode vector instructions. */
enum
{
......@@ -600,6 +603,22 @@ static enum check_kind
}
sse_check, operand_check = check_warning;
/* Optimization:
1. Clear the REX_W bit with register operand if possible.
2. Above plus use 128bit vector instruction to clear the full vector
register.
*/
static int optimize = 0;
/* Optimization:
1. Clear the REX_W bit with register operand if possible.
2. Above plus use 128bit vector instruction to clear the full vector
register.
3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
"testb $imm7,%r8".
*/
static int optimize_for_space = 0;
/* Register prefix used for error message. */
static const char *register_prefix = "%";
......@@ -2185,6 +2204,18 @@ fits_in_imm4 (offsetT num)
return (num & 0xf) == num;
}
static INLINE int
fits_in_imm7 (offsetT num)
{
return (num & 0x7f) == num;
}
static INLINE int
fits_in_imm31 (offsetT num)
{
return (num & 0x7fffffff) == num;
}
static i386_operand_type
smallest_imm_type (offsetT num)
{
......@@ -3712,6 +3743,179 @@ check_hle (void)
}
}
/* Try the shortest encoding by shortening operand size. */
static void
optimize_encoding (void)
{
int j;
if (optimize_for_space
&& i.reg_operands == 1
&& i.imm_operands == 1
&& !i.types[1].bitfield.byte
&& i.op[0].imms->X_op == O_constant
&& fits_in_imm7 (i.op[0].imms->X_add_number)
&& ((i.tm.base_opcode == 0xa8
&& i.tm.extension_opcode == None)
|| (i.tm.base_opcode == 0xf6
&& i.tm.extension_opcode == 0x0)))
{
/* Optimize: -Os:
test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
*/
unsigned int base_regnum = i.op[1].regs->reg_num;
if (flag_code == CODE_64BIT || base_regnum < 4)
{
i.types[1].bitfield.byte = 1;
/* Ignore the suffix. */
i.suffix = 0;
if (base_regnum >= 4
&& !(i.op[1].regs->reg_flags & RegRex))
{
/* Handle SP, BP, SI and DI registers. */
if (i.types[1].bitfield.word)
j = 16;
else if (i.types[1].bitfield.dword)
j = 32;
else
j = 48;
i.op[1].regs -= j;
}
}
}
else if (flag_code == CODE_64BIT
&& ((i.reg_operands == 1
&& i.imm_operands == 1
&& i.op[0].imms->X_op == O_constant
&& ((i.tm.base_opcode == 0xb0
&& i.tm.extension_opcode == None
&& fits_in_unsigned_long (i.op[0].imms->X_add_number))
|| (fits_in_imm31 (i.op[0].imms->X_add_number)
&& (((i.tm.base_opcode == 0x24
|| i.tm.base_opcode == 0xa8)
&& i.tm.extension_opcode == None)
|| (i.tm.base_opcode == 0x80
&& i.tm.extension_opcode == 0x4)
|| ((i.tm.base_opcode == 0xf6
|| i.tm.base_opcode == 0xc6)
&& i.tm.extension_opcode == 0x0)))))
|| (i.reg_operands == 2
&& i.op[0].regs == i.op[1].regs
&& ((i.tm.base_opcode == 0x30
|| i.tm.base_opcode == 0x28)
&& i.tm.extension_opcode == None)))
&& i.types[1].bitfield.qword)
{
/* Optimize: -O:
andq $imm31, %r64 -> andl $imm31, %r32
testq $imm31, %r64 -> testl $imm31, %r32
xorq %r64, %r64 -> xorl %r32, %r32
subq %r64, %r64 -> subl %r32, %r32
movq $imm31, %r64 -> movl $imm31, %r32
movq $imm32, %r64 -> movl $imm32, %r32
*/
i.tm.opcode_modifier.norex64 = 1;
if (i.tm.base_opcode == 0xb0 || i.tm.base_opcode == 0xc6)
{
/* Handle
movq $imm31, %r64 -> movl $imm31, %r32
movq $imm32, %r64 -> movl $imm32, %r32
*/
i.tm.operand_types[0].bitfield.imm32 = 1;
i.tm.operand_types[0].bitfield.imm32s = 0;
i.tm.operand_types[0].bitfield.imm64 = 0;
i.types[0].bitfield.imm32 = 1;
i.types[0].bitfield.imm32s = 0;
i.types[0].bitfield.imm64 = 0;
i.types[1].bitfield.dword = 1;
i.types[1].bitfield.qword = 0;
if (i.tm.base_opcode == 0xc6)
{
/* Handle
movq $imm31, %r64 -> movl $imm31, %r32
*/
i.tm.base_opcode = 0xb0;
i.tm.extension_opcode = None;
i.tm.opcode_modifier.shortform = 1;
i.tm.opcode_modifier.modrm = 0;
}
}
}
else if (optimize > 1
&& i.reg_operands == 3
&& i.op[0].regs == i.op[1].regs
&& !i.types[2].bitfield.xmmword
&& (i.tm.opcode_modifier.vex
|| (!i.mask
&& !i.rounding
&& i.tm.opcode_modifier.evex
&& cpu_arch_flags.bitfield.cpuavx512vl))
&& ((i.tm.base_opcode == 0x55
|| i.tm.base_opcode == 0x6655
|| i.tm.base_opcode == 0x66df
|| i.tm.base_opcode == 0x57
|| i.tm.base_opcode == 0x6657
|| i.tm.base_opcode == 0x66ef)
&& i.tm.extension_opcode == None))
{
/* Optimize: -O2:
VOP, one of vandnps, vandnpd, vxorps and vxorpd:
EVEX VOP %zmmM, %zmmM, %zmmN
-> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
-> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
EVEX VOP %ymmM, %ymmM, %ymmN
-> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
-> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
VEX VOP %ymmM, %ymmM, %ymmN
-> VEX VOP %xmmM, %xmmM, %xmmN
VOP, one of vpandn and vpxor:
VEX VOP %ymmM, %ymmM, %ymmN
-> VEX VOP %xmmM, %xmmM, %xmmN
VOP, one of vpandnd and vpandnq:
EVEX VOP %zmmM, %zmmM, %zmmN
-> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
-> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
EVEX VOP %ymmM, %ymmM, %ymmN
-> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
-> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
VOP, one of vpxord and vpxorq:
EVEX VOP %zmmM, %zmmM, %zmmN
-> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
-> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
EVEX VOP %ymmM, %ymmM, %ymmN
-> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
-> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
*/
if (i.tm.opcode_modifier.evex)
{
/* If only lower 16 vector registers are used, we can use
VEX encoding. */
for (j = 0; j < 3; j++)
if (register_number (i.op[j].regs) > 15)
break;
if (j < 3)
i.tm.opcode_modifier.evex = EVEX128;
else
{
i.tm.opcode_modifier.vex = VEX128;
i.tm.opcode_modifier.vexw = VEXW0;
i.tm.opcode_modifier.evex = 0;
}
}
else
i.tm.opcode_modifier.vex = VEX128;
if (i.tm.opcode_modifier.vex)
for (j = 0; j < 3; j++)
{
i.types[j].bitfield.xmmword = 1;
i.types[j].bitfield.ymmword = 0;
}
}
}
/* This is the guts of the machine-dependent assembler. LINE points to a
machine dependent instruction. This function is supposed to emit
the frags/bytes it assembles to. */
......@@ -3877,6 +4081,9 @@ md_assemble (char *line)
i.disp_operands = 0;
}
if (optimize && !i.no_optimize && i.tm.opcode_modifier.optimize)
optimize_encoding ();
if (!process_suffix ())
return;
......@@ -4131,6 +4338,10 @@ parse_insn (char *line, char *mnemonic)
/* {rex} */
i.rex_encoding = TRUE;
break;
case 0x8:
/* {nooptimize} */
i.no_optimize = TRUE;
break;
default:
abort ();
}
......@@ -10074,9 +10285,9 @@ md_operand (expressionS *e)
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
const char *md_shortopts = "kVQ:sqn";
const char *md_shortopts = "kVQ:sqnO::";
#else
const char *md_shortopts = "qn";
const char *md_shortopts = "qnO::";
#endif
#define OPTION_32 (OPTION_MD_BASE + 0)
......@@ -10513,6 +10724,27 @@ md_parse_option (int c, const char *arg)
intel64 = 1;
break;
case 'O':
if (arg == NULL)
{
optimize = 1;
/* Turn off -Os. */
optimize_for_space = 0;
}
else if (*arg == 's')
{
optimize_for_space = 1;
/* Turn on all encoding optimizations. */
optimize = -1;
}
else
{
optimize = atoi (arg);
/* Turn off -Os. */
optimize_for_space = 0;
}
break;
default:
return 0;
}
......
......@@ -411,6 +411,28 @@ with 01, 10 and 11 RC bits, respectively.
This option specifies that the assembler should accept only AMD64 or
Intel64 ISA in 64-bit mode. The default is to accept both.
@cindex @samp{-O0} option, i386
@cindex @samp{-O0} option, x86-64
@cindex @samp{-O} option, i386
@cindex @samp{-O} option, x86-64
@cindex @samp{-O1} option, i386
@cindex @samp{-O1} option, x86-64
@cindex @samp{-O2} option, i386
@cindex @samp{-O2} option, x86-64
@cindex @samp{-Os} option, i386
@cindex @samp{-Os} option, x86-64
@item -O0 | -O | -O1 | -O2 | -Os
Optimize instruction encoding with smaller instruction size. @samp{-O}
and @samp{-O1} encode 64-bit register load instructions with 64-bit
immediate as 32-bit register load instructions with 31-bit or 32-bits
immediates and encode 64-bit register clearing instructions with 32-bit
register clearing instructions. @samp{-O2} includes @samp{-O1}
optimization plus encodes 256-bit and 512-bit vector register clearing
instructions with 128-bit vector register clearing instructions.
@samp{-Os} includes @samp{-O2} optimization plus encodes 16-bit, 32-bit
and 64-bit register tests with immediate as 8-bit register test with
immediate. @samp{-O0} turns off this optimization.
@end table
@c man end
......@@ -647,6 +669,9 @@ Different encoding options can be specified via pseudo prefixes:
@samp{@{rex@}} -- prefer REX prefix for integer and legacy vector
instructions (x86-64 only). Note that this differs from the @samp{rex}
prefix which generates REX prefix unconditionally.
@item
@samp{@{nooptimize@}} -- disable instruction size optimization.
@end itemize
@cindex conversion instructions, i386
......
#as: -O0
#objdump: -Wf
#name: CFI on x86-64
#...
......
......@@ -433,6 +433,9 @@ if [expr ([istarget "i*86-*-*"] || [istarget "x86_64-*-*"]) && [gas_32_check]]
run_list_test "inval-pseudo" "-al"
run_dump_test "nop-1"
run_dump_test "nop-2"
run_dump_test "optimize-1"
run_dump_test "optimize-2"
run_dump_test "optimize-3"
# These tests require support for 8 and 16 bit relocs,
# so we only run them for ELF and COFF targets.
......@@ -913,6 +916,10 @@ if [expr ([istarget "i*86-*-*"] || [istarget "x86_64-*-*"]) && [gas_64_check]] t
run_dump_test "x86-64-movd-intel"
run_dump_test "x86-64-nop-1"
run_dump_test "x86-64-nop-2"
run_dump_test "x86-64-optimize-1"
run_dump_test "x86-64-optimize-2"
run_dump_test "x86-64-optimize-3"
run_dump_test "x86-64-optimize-4"
if { ![istarget "*-*-aix*"]
&& ![istarget "*-*-beos*"]
......
#source: ../../../cfi/cfi-x86_64.s
#as: -O0
#readelf: -wf
#name: CFI on x86-64
Contents of the .eh_frame section:
......
#as: -O2
#objdump: -drw
#name: optimized encoding 1 with -O2
.*: +file format .*
Disassembly of section .text:
0+ <_start>:
+[a-f0-9]+: 62 f1 f5 4f 55 e9 vandnpd %zmm1,%zmm1,%zmm5\{%k7\}
+[a-f0-9]+: 62 f1 f5 af 55 e9 vandnpd %ymm1,%ymm1,%ymm5\{%k7\}\{z\}
+[a-f0-9]+: c5 f1 55 e9 vandnpd %xmm1,%xmm1,%xmm5
+[a-f0-9]+: c5 f1 55 e9 vandnpd %xmm1,%xmm1,%xmm5
+[a-f0-9]+: 62 f1 74 4f 55 e9 vandnps %zmm1,%zmm1,%zmm5\{%k7\}
+[a-f0-9]+: 62 f1 74 af 55 e9 vandnps %ymm1,%ymm1,%ymm5\{%k7\}\{z\}
+[a-f0-9]+: c5 f0 55 e9 vandnps %xmm1,%xmm1,%xmm5
+[a-f0-9]+: c5 f0 55 e9 vandnps %xmm1,%xmm1,%xmm5
+[a-f0-9]+: c5 f1 df e9 vpandn %xmm1,%xmm1,%xmm5
+[a-f0-9]+: 62 f1 75 4f df e9 vpandnd %zmm1,%zmm1,%zmm5\{%k7\}
+[a-f0-9]+: 62 f1 75 af df e9 vpandnd %ymm1,%ymm1,%ymm5\{%k7\}\{z\}
+[a-f0-9]+: c5 f1 df e9 vpandn %xmm1,%xmm1,%xmm5
+[a-f0-9]+: c5 f1 df e9 vpandn %xmm1,%xmm1,%xmm5
+[a-f0-9]+: 62 f1 f5 4f df e9 vpandnq %zmm1,%zmm1,%zmm5\{%k7\}
+[a-f0-9]+: 62 f1 f5 af df e9 vpandnq %ymm1,%ymm1,%ymm5\{%k7\}\{z\}
+[a-f0-9]+: c5 f1 df e9 vpandn %xmm1,%xmm1,%xmm5
+[a-f0-9]+: c5 f1 df e9 vpandn %xmm1,%xmm1,%xmm5
+[a-f0-9]+: 62 f1 f5 4f 57 e9 vxorpd %zmm1,%zmm1,%zmm5\{%k7\}
+[a-f0-9]+: 62 f1 f5 af 57 e9 vxorpd %ymm1,%ymm1,%ymm5\{%k7\}\{z\}
+[a-f0-9]+: c5 f1 57 e9 vxorpd %xmm1,%xmm1,%xmm5
+[a-f0-9]+: c5 f1 57 e9 vxorpd %xmm1,%xmm1,%xmm5
+[a-f0-9]+: 62 f1 74 4f 57 e9 vxorps %zmm1,%zmm1,%zmm5\{%k7\}
+[a-f0-9]+: 62 f1 74 af 57 e9 vxorps %ymm1,%ymm1,%ymm5\{%k7\}\{z\}
+[a-f0-9]+: c5 f0 57 e9 vxorps %xmm1,%xmm1,%xmm5
+[a-f0-9]+: c5 f0 57 e9 vxorps %xmm1,%xmm1,%xmm5
+[a-f0-9]+: c5 f1 ef e9 vpxor %xmm1,%xmm1,%xmm5
+[a-f0-9]+: 62 f1 75 4f ef e9 vpxord %zmm1,%zmm1,%zmm5\{%k7\}
+[a-f0-9]+: 62 f1 75 af ef e9 vpxord %ymm1,%ymm1,%ymm5\{%k7\}\{z\}
+[a-f0-9]+: c5 f1 ef e9 vpxor %xmm1,%xmm1,%xmm5
+[a-f0-9]+: c5 f1 ef e9 vpxor %xmm1,%xmm1,%xmm5
+[a-f0-9]+: 62 f1 f5 4f ef e9 vpxorq %zmm1,%zmm1,%zmm5\{%k7\}
+[a-f0-9]+: 62 f1 f5 af ef e9 vpxorq %ymm1,%ymm1,%ymm5\{%k7\}\{z\}
+[a-f0-9]+: c5 f1 ef e9 vpxor %xmm1,%xmm1,%xmm5
+[a-f0-9]+: c5 f1 ef e9 vpxor %xmm1,%xmm1,%xmm5
#pass
# Check instructions with optimized encoding
.allow_index_reg
.text
_start:
vandnpd %zmm1, %zmm1, %zmm5{%k7}
vandnpd %ymm1, %ymm1, %ymm5{z}{%k7}
vandnpd %zmm1, %zmm1, %zmm5
vandnpd %ymm1, %ymm1, %ymm5
vandnps %zmm1, %zmm1, %zmm5{%k7}
vandnps %ymm1, %ymm1, %ymm5{z}{%k7}
vandnps %zmm1, %zmm1, %zmm5
vandnps %ymm1, %ymm1, %ymm5
vpandn %ymm1, %ymm1, %ymm5
vpandnd %zmm1, %zmm1, %zmm5{%k7}
vpandnd %ymm1, %ymm1, %ymm5{z}{%k7}
vpandnd %zmm1, %zmm1, %zmm5
vpandnd %ymm1, %ymm1, %ymm5
vpandnq %zmm1, %zmm1, %zmm5{%k7}
vpandnq %ymm1, %ymm1, %ymm5{z}{%k7}
vpandnq %zmm1, %zmm1, %zmm5
vpandnq %ymm1, %ymm1, %ymm5
vxorpd %zmm1, %zmm1, %zmm5{%k7}
vxorpd %ymm1, %ymm1, %ymm5{z}{%k7}
vxorpd %zmm1, %zmm1, %zmm5
vxorpd %ymm1, %ymm1, %ymm5
vxorps %zmm1, %zmm1, %zmm5{%k7}
vxorps %ymm1, %ymm1, %ymm5{z}{%k7}
vxorps %zmm1, %zmm1, %zmm5
vxorps %ymm1, %ymm1, %ymm5
vpxor %ymm1, %ymm1, %ymm5
vpxord %zmm1, %zmm1, %zmm5{%k7}
vpxord %ymm1, %ymm1, %ymm5{z}{%k7}
vpxord %zmm1, %zmm1, %zmm5
vpxord %ymm1, %ymm1, %ymm5
vpxorq %zmm1, %zmm1, %zmm5{%k7}
vpxorq %ymm1, %ymm1, %ymm5{z}{%k7}
vpxorq %zmm1, %zmm1, %zmm5
vpxorq %ymm1, %ymm1, %ymm5
#as: -Os
#objdump: -drw
#name: optimized encoding 2 with -Os
.*: +file format .*
Disassembly of section .text:
0+ <_start>:
+[a-f0-9]+: a8 7f test \$0x7f,%al
+[a-f0-9]+: a8 7f test \$0x7f,%al
+[a-f0-9]+: a8 7f test \$0x7f,%al
+[a-f0-9]+: f6 c3 7f test \$0x7f,%bl
+[a-f0-9]+: f6 c3 7f test \$0x7f,%bl
+[a-f0-9]+: f6 c3 7f test \$0x7f,%bl
+[a-f0-9]+: f7 c7 7f 00 00 00 test \$0x7f,%edi
+[a-f0-9]+: 66 f7 c7 7f 00 test \$0x7f,%di
#pass
# Check instructions with optimized encoding
.allow_index_reg
.text
_start:
testl $0x7f, %eax
testw $0x7f, %ax
testb $0x7f, %al
test $0x7f, %ebx
test $0x7f, %bx
test $0x7f, %bl
test $0x7f, %edi
test $0x7f, %di
#as: -Os
#objdump: -drw
#name: optimized encoding 3 with -Os
.*: +file format .*
Disassembly of section .text:
0+ <_start>:
+[a-f0-9]+: a9 7f 00 00 00 test \$0x7f,%eax
#pass
# Check instructions with optimized encoding
.allow_index_reg
.text
_start:
{nooptimize} testl $0x7f, %eax
#as: -O
#objdump: -drw
#name: x86-64 optimized encoding 1 with -O
.*: +file format .*
Disassembly of section .text:
0+ <_start>:
+[a-f0-9]+: 48 25 00 00 00 00 and \$0x0,%rax 2: R_X86_64_32S foo
+[a-f0-9]+: 25 ff ff ff 7f and \$0x7fffffff,%eax
+[a-f0-9]+: 81 e3 ff ff ff 7f and \$0x7fffffff,%ebx
+[a-f0-9]+: 41 81 e6 ff ff ff 7f and \$0x7fffffff,%r14d
+[a-f0-9]+: 48 25 00 00 00 80 and \$0xffffffff80000000,%rax
+[a-f0-9]+: 48 81 e3 00 00 00 80 and \$0xffffffff80000000,%rbx
+[a-f0-9]+: 49 81 e6 00 00 00 80 and \$0xffffffff80000000,%r14
+[a-f0-9]+: a9 ff ff ff 7f test \$0x7fffffff,%eax
+[a-f0-9]+: f7 c3 ff ff ff 7f test \$0x7fffffff,%ebx
+[a-f0-9]+: 41 f7 c6 ff ff ff 7f test \$0x7fffffff,%r14d
+[a-f0-9]+: 48 a9 00 00 00 80 test \$0xffffffff80000000,%rax
+[a-f0-9]+: 48 f7 c3 00 00 00 80 test \$0xffffffff80000000,%rbx
+[a-f0-9]+: 49 f7 c6 00 00 00 80 test \$0xffffffff80000000,%r14
+[a-f0-9]+: 48 33 06 xor \(%rsi\),%rax
+[a-f0-9]+: 31 c0 xor %eax,%eax
+[a-f0-9]+: 31 db xor %ebx,%ebx
+[a-f0-9]+: 45 31 f6 xor %r14d,%r14d
+[a-f0-9]+: 48 31 d0 xor %rdx,%rax
+[a-f0-9]+: 48 31 d3 xor %rdx,%rbx
+[a-f0-9]+: 49 31 d6 xor %rdx,%r14
+[a-f0-9]+: 29 c0 sub %eax,%eax
+[a-f0-9]+: 29 db sub %ebx,%ebx
+[a-f0-9]+: 45 29 f6 sub %r14d,%r14d
+[a-f0-9]+: 48 29 d0 sub %rdx,%rax
+[a-f0-9]+: 48 29 d3 sub %rdx,%rbx
+[a-f0-9]+: 49 29 d6 sub %rdx,%r14
+[a-f0-9]+: 48 81 20 ff ff ff 7f andq \$0x7fffffff,\(%rax\)
+[a-f0-9]+: 48 81 20 00 00 00 80 andq \$0xffffffff80000000,\(%rax\)
+[a-f0-9]+: 48 f7 00 ff ff ff 7f testq \$0x7fffffff,\(%rax\)
+[a-f0-9]+: 48 f7 00 00 00 00 80 testq \$0xffffffff80000000,\(%rax\)
+[a-f0-9]+: b8 ff ff ff 7f mov \$0x7fffffff,%eax
+[a-f0-9]+: b8 ff ff ff 7f mov \$0x7fffffff,%eax
+[a-f0-9]+: 41 b8 ff ff ff 7f mov \$0x7fffffff,%r8d
+[a-f0-9]+: 41 b8 ff ff ff 7f mov \$0x7fffffff,%r8d
+[a-f0-9]+: b8 ff ff ff ff mov \$0xffffffff,%eax
+[a-f0-9]+: b8 ff ff ff ff mov \$0xffffffff,%eax
+[a-f0-9]+: 41 b8 ff ff ff ff mov \$0xffffffff,%r8d
+[a-f0-9]+: 41 b8 ff ff ff ff mov \$0xffffffff,%r8d
+[a-f0-9]+: b8 ff 03 00 00 mov \$0x3ff,%eax
+[a-f0-9]+: b8 ff 03 00 00 mov \$0x3ff,%eax
+[a-f0-9]+: 48 b8 00 00 00 00 01 00 00 00 movabs \$0x100000000,%rax
+[a-f0-9]+: 48 b8 00 00 00 00 01 00 00 00 movabs \$0x100000000,%rax
#pass
# Check 64bit instructions with optimized encoding
.allow_index_reg
.text
_start:
andq $foo, %rax
andq $((1<<31) - 1), %rax
andq $((1<<31) - 1), %rbx
andq $((1<<31) - 1), %r14
andq $-((1<<31)), %rax
andq $-((1<<31)), %rbx
andq