This is the mail archive of the
binutils@sources.redhat.com
mailing list for the binutils project.
Thumb32 assembler (59/69)
- From: Zack Weinberg <zack at codesourcery dot com>
- To: binutils <binutils at sourceware dot org>
- Date: Tue, 26 Apr 2005 03:01:27 -0700
- Subject: Thumb32 assembler (59/69)
It is now possible to collapse the tinsns[] table into the insns[]
table. This does mean that the Thumb encoders have to enforce their
additional operand constraints by hand. To be sure it all works, I
wrote a new test for all those constraints.
zw
* config/tc-arm.c (struct asm_opcode): Move operands immediately after
template. Change value to avalue, variant to avariant, encode to
aencode; add tvalue, tvariant, tencode fields.
(T_OPCODE_ROR_R): New #define.
(arm_tops_hsh, OP_RL, OP_RLw, OP_RR_EX, OP_RL_EXi, OP_oRL): Delete.
(parse_operands): Delete processing of these codes.
(do_t_add_sub, do_t_arit, do_t_arit3, do_t_bkpt, do_t_ldmstm)
(do_t_lds, do_t_ldst, do_t_mul, do_t_shift): Enforce the additional
restrictions on operands that Thumb encoding has, over and above
the restrictions to the corresponding ARM instructions.
(do_t_branch9, do_t_branch12): Collapse into one function, do_t_branch.
(do_t_cpsi, do_t_nop): New functions.
(md_assemble): Look up all instructions in arm_ops_hsh. Use the
'a' or 't' fields from the opcode structure depending on whether
we are in Thumb mode.
(TCE, TCC, TCM, TUE, TUF): New opcode-table notational macros.
(CE, CM, UE, UF): Define in terms of them.
(TI): Delete.
(tinsns): Fold into insns.
(md_begin): Don't initialize arm_tops_hsh.
* testsuite/gas/arm/t16-bad.s, testsuite/gas/arm/t16-bad.l:
New errors test.
* testsuite/gas/arm/arm.exp: Run it.
===================================================================
Index: gas/config/tc-arm.c
--- gas/config/tc-arm.c (revision 61)
+++ gas/config/tc-arm.c (revision 62)
@@ -346,17 +346,24 @@
/* Basic string to match. */
const char *template;
+ /* Parameters to instruction. */
+ unsigned char operands[8];
+
/* Basic instruction code. */
- unsigned long value;
+ unsigned long avalue;
+ /* Thumb-format instruction code. */
+ unsigned long tvalue;
+
/* Which architecture variant provides this instruction. */
- unsigned long variant;
+ unsigned long avariant;
+ unsigned long tvariant;
- /* Parameters to instruction. */
- unsigned char operands[8];
+ /* Function to call to encode instruction in ARM format. */
+ void (* aencode) (void);
- /* Function to call to encode instruction. */
- void (* encode) (void);
+ /* Function to call to encode instruction in Thumb format. */
+ void (* tencode) (void);
};
/* Defines for various bits that we will want to toggle. */
@@ -414,6 +421,7 @@
#define T_OPCODE_ASR_R 0x4100
#define T_OPCODE_LSL_R 0x4080
#define T_OPCODE_LSR_R 0x40c0
+#define T_OPCODE_ROR_R 0x41c0
#define T_OPCODE_ASR_I 0x1000
#define T_OPCODE_LSL_I 0x0000
#define T_OPCODE_LSR_I 0x0800
@@ -457,7 +465,6 @@
#define BAD_HIREG _("lo register required")
static struct hash_control *arm_ops_hsh;
-static struct hash_control *arm_tops_hsh;
static struct hash_control *arm_cond_hsh;
static struct hash_control *arm_shift_hsh;
static struct hash_control *arm_psr_hsh;
@@ -3742,8 +3749,6 @@
OP_RRnpc, /* ARM register, not r15 */
OP_RRnpcb, /* ARM register, not r15, in square brackets */
OP_RRw, /* ARM register, not r15, optional trailing ! */
- OP_RL, /* Thumb low register */
- OP_RLw, /* Thumb low register, optional trailing ! */
OP_RCP, /* Coprocessor number */
OP_RCN, /* Coprocessor register */
OP_RF, /* FPA register */
@@ -3790,8 +3795,6 @@
OP_ENDI, /* Endianness specifier */
OP_PSR, /* CPSR/SPSR mask for msr */
- OP_RR_EX, /* ARM register or expression */
- OP_RL_EXi, /* Thumb low register or expression with imm prefix */
OP_RRnpc_I0, /* ARM register or literal 0 */
OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
OP_RR_EXi, /* ARM register or expression with imm prefix */
@@ -3801,13 +3804,10 @@
/* Optional operands. */
OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
OP_oI31b, /* 0 .. 31 */
- OP_oI255b, /* 0 .. 255 */
OP_oIffffb, /* 0 .. 65535 */
OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
- OP_oRL, /* Thumb low register */
OP_oRR, /* ARM register */
-
OP_oSHll, /* LSL immediate */
OP_oSHar, /* ASR immediate */
OP_oSHllar, /* LSL or ASR immediate */
@@ -3884,8 +3884,6 @@
{
/* Registers */
case OP_RRnpc:
- case OP_oRL:
- case OP_RL:
case OP_oRR:
case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
@@ -3911,7 +3909,6 @@
po_char_or_fail (']');
break;
- case OP_RLw:
case OP_RRw:
po_reg_or_fail (REG_TYPE_RN);
if (skip_past_char (&str, '!') == SUCCESS)
@@ -3934,7 +3931,6 @@
case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
case OP_oI31b:
case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
- case OP_oI255b: po_imm_or_fail ( 0, 255, TRUE); break;
case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
/* Immediate variants */
@@ -3968,7 +3964,7 @@
GE_OPT_PREFIX));
break;
- case OP_EXP: EXP:
+ case OP_EXP:
po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
GE_NO_PREFIX));
break;
@@ -3993,9 +3989,7 @@
break;
/* Register or expression */
- case OP_RR_EX: po_reg_or_goto (REG_TYPE_RN, EXP); break;
case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
- case OP_RL_EXi:
case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
/* Register or immediate */
@@ -4092,14 +4086,6 @@
inst.error = BAD_PC;
break;
- case OP_oRL:
- case OP_RL:
- case OP_RLw:
- case OP_RL_EXi:
- if (inst.operands[i].isreg && inst.operands[i].reg > 7)
- inst.error = BAD_HIREG;
- break;
-
case OP_CPSF:
case OP_ENDI:
case OP_oROR:
@@ -5027,19 +5013,23 @@
if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
{
+ constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
+ || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
+ BAD_HIREG);
inst.instruction |= (Rd << 4) | Rs;
inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
return;
}
Rn = inst.operands[2].reg;
+ constraint (inst.operands[2].shifted, _("unshifted register required"));
/* We now have Rd, Rs, and Rn set to registers. */
if (Rd > 7 || Rs > 7 || Rn > 7)
{
- constraint (Rs != Rd, _("dest and source1 must be the same register"));
/* Can't do this for SUB. */
- constraint (subtract, _("subtract valid only on lo regs"));
+ constraint (subtract, BAD_HIREG);
+ constraint (Rs != Rd, _("dest and source1 must be the same register"));
inst.instruction = T_OPCODE_ADD_HI;
inst.instruction |= (Rd & 8) << 4;
inst.instruction |= (Rd & 7);
@@ -5069,6 +5059,9 @@
static void
do_t_arit (void)
{
+ constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
+ _("unshifted register required"));
+ constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7, BAD_HIREG);
inst.instruction |= inst.operands[0].reg;
inst.instruction |= inst.operands[1].reg << 3;
}
@@ -5078,6 +5071,11 @@
static void
do_t_arit3 (void)
{
+ constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
+ _("unshifted register required"));
+ constraint (inst.operands[0].reg > 7
+ || inst.operands[1].reg > 7
+ || inst.operands[2].reg > 7, BAD_HIREG);
constraint (inst.operands[1].present
&& inst.operands[0].reg != inst.operands[1].reg,
_("dest and source1 must be the same register"));
@@ -5090,7 +5088,7 @@
BLX <target_addr> which is BLX(1)
BLX <Rm> which is BLX(2)
Unfortunately, there are two different opcodes for this mnemonic.
- So, the tinsns[].value is not used, and the code here zaps values
+ So, the insns[].value is not used, and the code here zaps values
into inst.instruction. */
static void
@@ -5110,17 +5108,23 @@
}
static void
-do_t_branch9 (void)
+do_t_branch (void)
{
- inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
+ inst.reloc.type = (inst.instruction == T_OPCODE_BRANCH
+ ? BFD_RELOC_THUMB_PCREL_BRANCH12
+ : BFD_RELOC_THUMB_PCREL_BRANCH9);
inst.reloc.pc_rel = 1;
}
static void
-do_t_branch12 (void)
+do_t_bkpt (void)
{
- inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
- inst.reloc.pc_rel = 1;
+ if (inst.operands[0].present)
+ {
+ constraint (inst.operands[0].imm > 255,
+ _("immediate value out of range"));
+ inst.instruction |= inst.operands[0].imm;
+ }
}
static void
@@ -5150,6 +5154,15 @@
because BX PC only works if the instruction is word aligned. */
}
+static void
+do_t_cpsi (void)
+{
+ constraint (inst.operands[1].present,
+ _("Thumb does not support the 2-argument "
+ "form of this instruction"));
+ inst.instruction |= inst.operands[0].imm;
+}
+
/* THUMB CPY instruction (argument parse). */
static void
@@ -5166,12 +5179,28 @@
/* This really doesn't seem worth it. */
constraint (inst.reloc.type != BFD_RELOC_UNUSED,
_("expression too complex"));
- constraint (inst.operands[1].imm & ~0xff,
- _("only lo-regs valid in load/store multiple"));
+ constraint (inst.operands[0].reg > 7
+ || (inst.operands[1].imm & ~0xff), BAD_HIREG);
constraint (inst.operands[1].writeback,
_("Thumb load/store multiple does not support {reglist}^"));
- if (!inst.operands[0].writeback)
- as_warn (_("Thumb load/store multiple always writes back base register"));
+ if (!(inst.instruction & THUMB_LOAD_BIT))
+ {
+ if (!inst.operands[0].writeback)
+ as_warn (_("this instruction will write back the base register"));
+ if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
+ && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
+ as_warn (_("value stored for r%d is UNPREDICTABLE"),
+ inst.operands[0].reg);
+ }
+ else
+ {
+ if (!inst.operands[0].writeback
+ && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
+ as_warn (_("this instruction will write back the base register"));
+ else if (inst.operands[0].writeback
+ && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
+ as_warn (_("this instruction will not write back the base register"));
+ }
inst.instruction |= inst.operands[0].reg << 8;
inst.instruction |= inst.operands[1].imm;
@@ -5184,8 +5213,10 @@
constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
|| inst.operands[1].postind || inst.operands[1].shifted
|| inst.operands[1].negative,
- _("invalid addressing mode"));
- constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
+ _("Thumb does not support this addressing mode"));
+ constraint (inst.operands[0].reg > 7
+ || inst.operands[1].reg > 7
+ || inst.operands[1].imm > 7, BAD_HIREG);
inst.instruction |= inst.operands[0].reg;
inst.instruction |= inst.operands[1].reg << 3;
@@ -5195,11 +5226,15 @@
static void
do_t_ldst (void)
{
+ constraint (inst.operands[0].reg > 7, BAD_HIREG);
+
if (!inst.operands[1].isreg)
if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
return;
- constraint (!inst.operands[1].preind || inst.operands[1].shifted,
+ constraint (!inst.operands[1].preind
+ || inst.operands[1].shifted
+ || inst.operands[1].writeback,
_("Thumb does not support this addressing mode"));
if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
{
@@ -5225,8 +5260,7 @@
return;
}
- constraint (inst.operands[0].reg > 7, BAD_HIREG);
-
+ constraint (inst.operands[1].reg > 7, BAD_HIREG);
if (!inst.operands[1].immisreg)
{
/* Immediate offset. */
@@ -5248,9 +5282,10 @@
else
{
/* Register offset. Opcode is already correct. */
- constraint (inst.operands[1].reg > 7, BAD_HIREG);
+ constraint (inst.operands[1].reg > 7
+ || inst.operands[1].imm > 7, BAD_HIREG);
constraint (inst.operands[1].negative,
- _("Thumb does not support [Rn,-Rm] addressing"));
+ _("Thumb does not support this addressing mode"));
inst.instruction |= inst.operands[0].reg;
inst.instruction |= inst.operands[1].reg << 3;
inst.instruction |= inst.operands[1].imm << 6;
@@ -5294,6 +5329,7 @@
static void
do_t_mul (void)
{
+ constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7, BAD_HIREG);
constraint (inst.operands[2].present
&& inst.operands[0].reg != inst.operands[2].reg,
_("dest and source2 must be the same register"));
@@ -5305,6 +5341,13 @@
}
static void
+do_t_nop (void)
+{
+ constraint (inst.operands[0].present,
+ _("Thumb does not support NOP with hints"));
+}
+
+static void
do_t_setend (void)
{
if (inst.operands[0].imm)
@@ -5318,8 +5361,11 @@
? inst.operands[1].reg
: inst.operands[0].reg);
+ constraint (inst.operands[0].reg > 7 || Rs > 7, BAD_HIREG);
+
if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
{
+ constraint (inst.operands[2].reg > 7, BAD_HIREG);
constraint (inst.operands[0].reg != Rs,
_("source1 and dest must be same register"));
inst.instruction |= inst.operands[0].reg;
@@ -5334,6 +5380,8 @@
case T_OPCODE_ASR_R: inst.instruction = T_OPCODE_ASR_I; break;
case T_OPCODE_LSL_R: inst.instruction = T_OPCODE_LSL_I; break;
case T_OPCODE_LSR_R: inst.instruction = T_OPCODE_LSR_I; break;
+ case T_OPCODE_ROR_R: inst.error = _("ror #imm not supported"); return;
+ default: abort ();
}
inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
inst.instruction |= inst.operands[0].reg | (Rs << 3);
@@ -5870,46 +5918,47 @@
return;
}
- if (thumb_mode)
+ opcode = hash_find_n (arm_ops_hsh, str, p - str);
+ if (opcode)
{
- opcode = hash_find_n (arm_tops_hsh, str, p - str);
-
- if (opcode)
+ if (thumb_mode)
{
/* Check that this instruction is supported for this CPU. */
- if (thumb_mode == 1 && (opcode->variant & cpu_variant) == 0)
+ if (thumb_mode == 1 && (opcode->tvariant & cpu_variant) == 0)
{
as_bad (_("selected processor does not support `%s'"), str);
return;
}
-
mapping_state (MAP_THUMB);
- inst.instruction = opcode->value;
- inst.size = (opcode->value > 0xffff ? 4 : 2);
+ inst.instruction = opcode->tvalue;
+ inst.size = (opcode->tvalue > 0xffff ? 4 : 2);
if (!parse_operands (p, opcode->operands))
- opcode->encode ();
- output_inst (str);
- return;
+ opcode->tencode ();
}
- }
- else
- {
- opcode = hash_find_n (arm_ops_hsh, str, p - str);
-
- if (opcode)
+ else
{
/* Check that this instruction is supported for this CPU. */
- if ((opcode->variant & cpu_variant) == 0)
+ if ((opcode->avariant & cpu_variant) == 0)
{
as_bad (_("selected processor does not support `%s'"), str);
return;
}
mapping_state (MAP_ARM);
- inst.instruction = opcode->value;
+ inst.instruction = opcode->avalue;
inst.size = INSN_SIZE;
if (!parse_operands (p, opcode->operands))
- opcode->encode ();
+ opcode->aencode ();
+ }
+ output_inst (str);
+ return;
+ }
+ else
+ {
+ opcode = hash_find_n (arm_ops_hsh, str, p - str);
+
+ if (opcode)
+ {
output_inst (str);
return;
}
@@ -6253,161 +6302,193 @@
/* These macros assemble the conditional variants of each instruction
from its bare form. */
-#define CE(mnem, opc, nops, ops, aenc) \
- { #mnem, 0xe ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #mnem "eq", 0x0 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #mnem "ne", 0x1 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #mnem "cs", 0x2 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #mnem "hs", 0x2 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #mnem "cc", 0x3 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #mnem "ul", 0x3 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #mnem "lo", 0x3 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #mnem "mi", 0x4 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #mnem "pl", 0x5 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #mnem "vs", 0x6 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #mnem "vc", 0x7 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #mnem "hi", 0x8 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #mnem "ls", 0x9 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #mnem "ge", 0xa ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #mnem "lt", 0xb ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #mnem "gt", 0xc ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #mnem "le", 0xd ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #mnem "al", 0xe ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }
+#define TCE(mnem, op, top, nops, ops, ae, te) \
+ { #mnem, OPS##nops ops, 0xe##op, 0x##top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }, \
+ { #mnem "eq", OPS##nops ops, 0x0##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #mnem "ne", OPS##nops ops, 0x1##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #mnem "cs", OPS##nops ops, 0x2##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #mnem "hs", OPS##nops ops, 0x2##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #mnem "cc", OPS##nops ops, 0x3##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #mnem "ul", OPS##nops ops, 0x3##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #mnem "lo", OPS##nops ops, 0x3##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #mnem "mi", OPS##nops ops, 0x4##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #mnem "pl", OPS##nops ops, 0x5##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #mnem "vs", OPS##nops ops, 0x6##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #mnem "vc", OPS##nops ops, 0x7##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #mnem "hi", OPS##nops ops, 0x8##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #mnem "ls", OPS##nops ops, 0x9##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #mnem "ge", OPS##nops ops, 0xa##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #mnem "lt", OPS##nops ops, 0xb##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #mnem "gt", OPS##nops ops, 0xc##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #mnem "le", OPS##nops ops, 0xd##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #mnem "al", OPS##nops ops, 0xe##op, 0, ARM_VARIANT, 0, do_##ae, 0 }
-#define CM(m1, m2, opc, nops, ops, aenc) \
- { #m1 #m2, 0xe ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #m1 "eq" #m2, 0x0 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #m1 "ne" #m2, 0x1 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #m1 "cs" #m2, 0x2 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #m1 "hs" #m2, 0x2 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #m1 "cc" #m2, 0x3 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #m1 "ul" #m2, 0x3 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #m1 "lo" #m2, 0x3 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #m1 "mi" #m2, 0x4 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #m1 "pl" #m2, 0x5 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #m1 "vs" #m2, 0x6 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #m1 "vc" #m2, 0x7 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #m1 "hi" #m2, 0x8 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #m1 "ls" #m2, 0x9 ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #m1 "ge" #m2, 0xa ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #m1 "lt" #m2, 0xb ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #m1 "gt" #m2, 0xc ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #m1 "le" #m2, 0xd ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }, \
- { #m1 "al" #m2, 0xe ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }
+#define TCC(mnem, op, tu, t1, t2, nops, ops, ae, te) \
+ { #mnem, OPS##nops ops,0xe##op,0x##tu, ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }, \
+ { #mnem "eq", OPS##nops ops,0x0##op,0x##t1##0##t2,ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }, \
+ { #mnem "ne", OPS##nops ops,0x1##op,0x##t1##1##t2,ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }, \
+ { #mnem "cs", OPS##nops ops,0x2##op,0x##t1##2##t2,ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }, \
+ { #mnem "hs", OPS##nops ops,0x2##op,0x##t1##2##t2,ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }, \
+ { #mnem "cc", OPS##nops ops,0x3##op,0x##t1##3##t2,ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }, \
+ { #mnem "ul", OPS##nops ops,0x3##op,0x##t1##3##t2,ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }, \
+ { #mnem "lo", OPS##nops ops,0x3##op,0x##t1##3##t2,ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }, \
+ { #mnem "mi", OPS##nops ops,0x4##op,0x##t1##4##t2,ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }, \
+ { #mnem "pl", OPS##nops ops,0x5##op,0x##t1##5##t2,ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }, \
+ { #mnem "vs", OPS##nops ops,0x6##op,0x##t1##6##t2,ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }, \
+ { #mnem "vc", OPS##nops ops,0x7##op,0x##t1##7##t2,ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }, \
+ { #mnem "hi", OPS##nops ops,0x8##op,0x##t1##8##t2,ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }, \
+ { #mnem "ls", OPS##nops ops,0x9##op,0x##t1##9##t2,ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }, \
+ { #mnem "ge", OPS##nops ops,0xa##op,0x##t1##a##t2,ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }, \
+ { #mnem "lt", OPS##nops ops,0xb##op,0x##t1##b##t2,ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }, \
+ { #mnem "gt", OPS##nops ops,0xc##op,0x##t1##c##t2,ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }, \
+ { #mnem "le", OPS##nops ops,0xd##op,0x##t1##d##t2,ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }, \
+ { #mnem "al", OPS##nops ops,0xe##op,0x##t1##e##t2,ARM_VARIANT,THUMB_VARIANT,do_##ae,do_##te }
-#define UE(mnem, opc, nops, ops, aenc) \
- { #mnem, 0xe ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }
+#define TCM(m1, m2, op, top, nops, ops, ae, te) \
+ { #m1 #m2, OPS##nops ops, 0xe##op, 0x##top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }, \
+ { #m1 "eq" #m2, OPS##nops ops, 0x0##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #m1 "ne" #m2, OPS##nops ops, 0x1##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #m1 "cs" #m2, OPS##nops ops, 0x2##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #m1 "hs" #m2, OPS##nops ops, 0x2##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #m1 "cc" #m2, OPS##nops ops, 0x3##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #m1 "ul" #m2, OPS##nops ops, 0x3##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #m1 "lo" #m2, OPS##nops ops, 0x3##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #m1 "mi" #m2, OPS##nops ops, 0x4##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #m1 "pl" #m2, OPS##nops ops, 0x5##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #m1 "vs" #m2, OPS##nops ops, 0x6##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #m1 "vc" #m2, OPS##nops ops, 0x7##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #m1 "hi" #m2, OPS##nops ops, 0x8##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #m1 "ls" #m2, OPS##nops ops, 0x9##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #m1 "ge" #m2, OPS##nops ops, 0xa##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #m1 "lt" #m2, OPS##nops ops, 0xb##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #m1 "gt" #m2, OPS##nops ops, 0xc##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #m1 "le" #m2, OPS##nops ops, 0xd##op, 0, ARM_VARIANT, 0, do_##ae, 0 }, \
+ { #m1 "al" #m2, OPS##nops ops, 0xe##op, 0, ARM_VARIANT, 0, do_##ae, 0 }
-#define UF(mnem, opc, nops, ops, aenc) \
- { #mnem, 0xf ## opc, ARM_VARIANT, OPS##nops ops, do_ ## aenc }
+#define TUE(mnem, op, top, nops, ops, ae, te) \
+ { #mnem, OPS##nops ops, 0xe##op, 0x##top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
+#define TUF(mnem, op, top, nops, ops, ae, te) \
+ { #mnem, OPS##nops ops, 0xf##op, 0x##top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
+
+#define CE(mnem, op, nops, ops, ae) TCE(mnem, op, 0, nops, ops, ae, 0)
+#define CM(m1,m2, op, nops, ops, ae) TCM(m1,m2, op, 0, nops, ops, ae, 0)
+#define UE(mnem, op, nops, ops, ae) TUE(mnem, op, 0, nops, ops, ae, 0)
+#define UF(mnem, op, nops, ops, ae) TUF(mnem, op, 0, nops, ops, ae, 0)
+#define do_0 0
+
static const struct asm_opcode insns[] =
{
#define ARM_VARIANT ARM_EXT_V1 /* Core ARM Instructions. */
- CE(and, 0000000, 3, (RR, oRR, SH), arit),
- CM(and,s, 0100000, 3, (RR, oRR, SH), arit),
- CE(eor, 0200000, 3, (RR, oRR, SH), arit),
- CM(eor,s, 0300000, 3, (RR, oRR, SH), arit),
- CE(sub, 0400000, 3, (RR, oRR, SH), arit),
- CM(sub,s, 0500000, 3, (RR, oRR, SH), arit),
- CE(rsb, 0600000, 3, (RR, oRR, SH), arit),
- CM(rsb,s, 0700000, 3, (RR, oRR, SH), arit),
- CE(add, 0800000, 3, (RR, oRR, SH), arit),
- CM(add,s, 0900000, 3, (RR, oRR, SH), arit),
- CE(adc, 0a00000, 3, (RR, oRR, SH), arit),
- CM(adc,s, 0b00000, 3, (RR, oRR, SH), arit),
- CE(sbc, 0c00000, 3, (RR, oRR, SH), arit),
- CM(sbc,s, 0d00000, 3, (RR, oRR, SH), arit),
- CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
- CM(rsc,s, 0f00000, 3, (RR, oRR, SH), arit),
- CE(orr, 1800000, 3, (RR, oRR, SH), arit),
- CM(orr,s, 1900000, 3, (RR, oRR, SH), arit),
- CE(bic, 1c00000, 3, (RR, oRR, SH), arit),
- CM(bic,s, 1d00000, 3, (RR, oRR, SH), arit),
+#define THUMB_VARIANT ARM_EXT_V4T
+ TCE(and, 0000000, 4000, 3, (RR, oRR, SH), arit, t_arit3),
+ CM(and,s, 0100000, 3, (RR, oRR, SH), arit),
+ TCE(eor, 0200000, 4040, 3, (RR, oRR, SH), arit, t_arit3),
+ CM(eor,s, 0300000, 3, (RR, oRR, SH), arit),
+ TCE(sub, 0400000, 8000, 3, (RR, oRR, SH), arit, t_add_sub),
+ CM(sub,s, 0500000, 3, (RR, oRR, SH), arit),
+ CE(rsb, 0600000, 3, (RR, oRR, SH), arit),
+ CM(rsb,s, 0700000, 3, (RR, oRR, SH), arit),
+ TCE(add, 0800000, 0000, 3, (RR, oRR, SH), arit, t_add_sub),
+ CM(add,s, 0900000, 3, (RR, oRR, SH), arit),
+ TCE(adc, 0a00000, 4140, 3, (RR, oRR, SH), arit, t_arit3),
+ CM(adc,s, 0b00000, 3, (RR, oRR, SH), arit),
+ TCE(sbc, 0c00000, 4180, 3, (RR, oRR, SH), arit, t_arit3),
+ CM(sbc,s, 0d00000, 3, (RR, oRR, SH), arit),
+ CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
+ CM(rsc,s, 0f00000, 3, (RR, oRR, SH), arit),
+ TCE(orr, 1800000, 4300, 3, (RR, oRR, SH), arit, t_arit3),
+ CM(orr,s, 1900000, 3, (RR, oRR, SH), arit),
+ TCE(bic, 1c00000, 4380, 3, (RR, oRR, SH), arit, t_arit3),
+ CM(bic,s, 1d00000, 3, (RR, oRR, SH), arit),
- CE(tst, 1100000, 2, (RR, SH), cmp),
- CM(tst,s, 1100000, 2, (RR, SH), cmp),
- CM(tst,p, 110f000, 2, (RR, SH), cmp),
- CE(teq, 1300000, 2, (RR, SH), cmp),
- CM(teq,s, 1300000, 2, (RR, SH), cmp),
- CM(teq,p, 130f000, 2, (RR, SH), cmp),
- CE(cmp, 1500000, 2, (RR, SH), cmp),
- CM(cmp,s, 1500000, 2, (RR, SH), cmp),
- CM(cmp,p, 150f000, 2, (RR, SH), cmp),
- CE(cmn, 1700000, 2, (RR, SH), cmp),
- CM(cmn,s, 1700000, 2, (RR, SH), cmp),
- CM(cmn,p, 170f000, 2, (RR, SH), cmp),
+ TCE(tst, 1100000, 4200, 2, (RR, SH), cmp, t_arit),
+ CM(tst,s, 1100000, 2, (RR, SH), cmp),
+ CM(tst,p, 110f000, 2, (RR, SH), cmp),
+ CE(teq, 1300000, 2, (RR, SH), cmp),
+ CM(teq,s, 1300000, 2, (RR, SH), cmp),
+ CM(teq,p, 130f000, 2, (RR, SH), cmp),
+ TCE(cmp, 1500000, 4280, 2, (RR, SH), cmp, t_mov_cmp),
+ CM(cmp,s, 1500000, 2, (RR, SH), cmp),
+ CM(cmp,p, 150f000, 2, (RR, SH), cmp),
+ TCE(cmn, 1700000, 42c0, 2, (RR, SH), cmp, t_arit),
+ CM(cmn,s, 1700000, 2, (RR, SH), cmp),
+ CM(cmn,p, 170f000, 2, (RR, SH), cmp),
- CE(mov, 1a00000, 2, (RR, SH), mov),
- CM(mov,s, 1b00000, 2, (RR, SH), mov),
- CE(mvn, 1e00000, 2, (RR, SH), mov),
- CM(mvn,s, 1f00000, 2, (RR, SH), mov),
+ TCE(mov, 1a00000, 4600, 2, (RR, SH), mov, t_mov_cmp),
+ CM(mov,s, 1b00000, 2, (RR, SH), mov),
+ TCE(mvn, 1e00000, 43c0, 2, (RR, SH), mov, t_arit),
+ CM(mvn,s, 1f00000, 2, (RR, SH), mov),
- CE(ldr, 4100000, 2, (RR, ADDR), ldst),
- CM(ldr,b, 4500000, 2, (RR, ADDR), ldst),
- CM(ldr,t, 4300000, 2, (RR, ADDR), ldstt),
- CM(ldr,bt, 4700000, 2, (RR, ADDR), ldstt),
- CE(str, 4000000, 2, (RR, ADDR), ldst),
- CM(str,b, 4400000, 2, (RR, ADDR), ldst),
- CM(str,t, 4200000, 2, (RR, ADDR), ldstt),
- CM(str,bt, 4600000, 2, (RR, ADDR), ldstt),
+ TCE(ldr, 4100000, 5800, 2, (RR, ADDR), ldst, t_ldst),
+ TCM(ldr,b, 4500000, 5c00, 2, (RR, ADDR), ldst, t_ldst),
+ CM(ldr,t, 4300000, 2, (RR, ADDR), ldstt),
+ CM(ldr,bt, 4700000, 2, (RR, ADDR), ldstt),
+ TCE(str, 4000000, 5000, 2, (RR, ADDR), ldst, t_ldst),
+ TCM(str,b, 4400000, 5400, 2, (RR, ADDR), ldst, t_ldst),
+ CM(str,t, 4200000, 2, (RR, ADDR), ldstt),
+ CM(str,bt, 4600000, 2, (RR, ADDR), ldstt),
- CM(stm,ia, 8800000, 2, (RRw, REGLST), ldmstm),
- CM(stm,ib, 9800000, 2, (RRw, REGLST), ldmstm),
- CM(stm,da, 8000000, 2, (RRw, REGLST), ldmstm),
- CM(stm,db, 9000000, 2, (RRw, REGLST), ldmstm),
- CM(stm,fd, 9000000, 2, (RRw, REGLST), ldmstm),
- CM(stm,fa, 9800000, 2, (RRw, REGLST), ldmstm),
- CM(stm,ea, 8800000, 2, (RRw, REGLST), ldmstm),
- CM(stm,ed, 8000000, 2, (RRw, REGLST), ldmstm),
+ TCM(stm,ia, 8800000, c000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
+ CM(stm,ib, 9800000, 2, (RRw, REGLST), ldmstm),
+ CM(stm,da, 8000000, 2, (RRw, REGLST), ldmstm),
+ CM(stm,db, 9000000, 2, (RRw, REGLST), ldmstm),
+ CM(stm,fd, 9000000, 2, (RRw, REGLST), ldmstm),
+ CM(stm,fa, 9800000, 2, (RRw, REGLST), ldmstm),
+ CM(stm,ea, 8800000, 2, (RRw, REGLST), ldmstm),
+ CM(stm,ed, 8000000, 2, (RRw, REGLST), ldmstm),
- CM(ldm,ia, 8900000, 2, (RRw, REGLST), ldmstm),
- CM(ldm,ib, 9900000, 2, (RRw, REGLST), ldmstm),
- CM(ldm,da, 8100000, 2, (RRw, REGLST), ldmstm),
- CM(ldm,db, 9100000, 2, (RRw, REGLST), ldmstm),
- CM(ldm,fd, 8900000, 2, (RRw, REGLST), ldmstm),
- CM(ldm,fa, 8100000, 2, (RRw, REGLST), ldmstm),
- CM(ldm,ea, 9100000, 2, (RRw, REGLST), ldmstm),
- CM(ldm,ed, 9900000, 2, (RRw, REGLST), ldmstm),
+ TCM(ldm,ia, 8900000, c800, 2, (RRw, REGLST), ldmstm, t_ldmstm),
+ CM(ldm,ib, 9900000, 2, (RRw, REGLST), ldmstm),
+ CM(ldm,da, 8100000, 2, (RRw, REGLST), ldmstm),
+ CM(ldm,db, 9100000, 2, (RRw, REGLST), ldmstm),
+ CM(ldm,fd, 8900000, 2, (RRw, REGLST), ldmstm),
+ CM(ldm,fa, 8100000, 2, (RRw, REGLST), ldmstm),
+ CM(ldm,ea, 9100000, 2, (RRw, REGLST), ldmstm),
+ CM(ldm,ed, 9900000, 2, (RRw, REGLST), ldmstm),
- CE(swi, f000000, 1, (EXPi), swi),
+ TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
#ifdef TE_WINCE
/* XXX This is the wrong place to do this. Think multi-arch. */
- CE(b, a000000, 1, (EXPr), branch),
- CE(bl, b000000, 1, (EXPr), branch),
+ TCC(b, a000000, e7fe, d,fe, 1, (EXPr), branch, t_branch),
+ TCE(bl, b000000, f7fffffe, 1, (EXPr), branch, t_branch23),
#else
- CE(b, afffffe, 1, (EXPr), branch),
- CE(bl, bfffffe, 1, (EXPr), branch),
+ TCC(b, afffffe, e7fe, d,fe, 1, (EXPr), branch, t_branch),
+ TCE(bl, bfffffe, f7fffffe, 1, (EXPr), branch, t_branch23),
#endif
/* Pseudo ops. */
- CE(adr, 28f0000, 2, (RR, EXP), adr),
- CM(adr,l, 28f0000, 2, (RR, EXP), adrl),
- CE(nop, 1a00000, 1, (oI255c), nop),
+ TCE(adr, 28f0000, 000f, 2, (RR, EXP), adr, t_adr),
+ CM(adr,l, 28f0000, 2, (RR, EXP), adrl),
+ TCE(nop, 1a00000, 46c0, 1, (oI255c), nop, t_nop),
/* Thumb-compatibility pseudo ops. Note that the 16-bit Thumb
instruction is equivalent to the s-form of the ARM instruction
in all cases where that form exists. */
- CE(cpy, 1a00000, 2, (RR, RR), rd_rm), /* mov */
- CE(lsl, 1a00000, 3, (RR, oRR, RR_EXi), shift),
- CM(lsl,s, 1b00000, 3, (RR, oRR, RR_EXi), shift),
- CE(lsr, 1a00020, 3, (RR, oRR, RR_EXi), shift),
- CM(lsr,s, 1b00020, 3, (RR, oRR, RR_EXi), shift),
- CE(asr, 1a00040, 3, (RR, oRR, RR_EXi), shift),
- CM(asr,s, 1b00040, 3, (RR, oRR, RR_EXi), shift),
- CE(ror, 1a00060, 3, (RR, oRR, RR_EXi), shift),
- CM(ror,s, 1b00060, 3, (RR, oRR, RR_EXi), shift),
- CE(neg, 2600000, 2, (RR, RR), rd_rn), /* rsbs */
- CM(neg,s, 2700000, 2, (RR, RR), rd_rn),
- CE(push, 92d0000, 1, (REGLST), push_pop), /* stmfd */
- CE(pop, 8bd0000, 1, (REGLST), push_pop), /* ldmfd */
+ TCE(lsl, 1a00000, 4080, 3, (RR, oRR, RR_EXi), shift, t_shift),
+ CM(lsl,s, 1b00000, 3, (RR, oRR, RR_EXi), shift),
+ TCE(lsr, 1a00020, 40c0, 3, (RR, oRR, RR_EXi), shift, t_shift),
+ CM(lsr,s, 1b00020, 3, (RR, oRR, RR_EXi), shift),
+ TCE(asr, 1a00040, 4100, 3, (RR, oRR, RR_EXi), shift, t_shift),
+ CM(asr,s, 1b00040, 3, (RR, oRR, RR_EXi), shift),
+ TCE(ror, 1a00060, 41c0, 3, (RR, oRR, RR_EXi), shift, t_shift),
+ CM(ror,s, 1b00060, 3, (RR, oRR, RR_EXi), shift),
+ TCE(neg, 2600000, 4240, 2, (RR, RR), rd_rn, t_arit),
+ CM(neg,s, 2700000, 2, (RR, RR), rd_rn),
+ TCE(push, 92d0000, b400, 1, (REGLST), push_pop, t_push_pop),
+ TCE(pop, 8bd0000, bc00, 1, (REGLST), push_pop, t_push_pop),
+#undef THUMB_VARIANT
+#define THUMB_VARIANT ARM_EXT_V6
+ TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
#undef ARM_VARIANT
#define ARM_VARIANT ARM_EXT_V2 /* ARM 2 - multiplies. */
- CE(mul, 0000090, 3, (RRnpc, RRnpc, oRR), mul),
- CM(mul,s, 0100090, 3, (RRnpc, RRnpc, oRR), mul),
- CE(mla, 0200090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
- CM(mla,s, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
+#undef THUMB_VARIANT
+#define THUMB_VARIANT ARM_EXT_V4T
+ TCE(mul, 0000090, 4340, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
+ CM(mul,s, 0100090, 3, (RRnpc, RRnpc, oRR), mul),
+ CE(mla, 0200090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
+ CM(mla,s, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
/* Generic coprocessor instructions. */
CE(cdp, e000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp),
@@ -6441,32 +6522,36 @@
#undef ARM_VARIANT
#define ARM_VARIANT ARM_EXT_V4 /* ARM Architecture 4. */
- CM(ldr,h, 01000b0, 2, (RR, ADDR), ldstv4),
- CM(ldr,sh, 01000f0, 2, (RR, ADDR), ldstv4),
- CM(ldr,sb, 01000d0, 2, (RR, ADDR), ldstv4),
- CM(str,h, 00000b0, 2, (RR, ADDR), ldstv4),
+ TCM(ldr,h, 01000b0, 5a00, 2, (RR, ADDR), ldstv4, t_ldst),
+ TCM(str,h, 00000b0, 5200, 2, (RR, ADDR), ldstv4, t_ldst),
+ TCM(ldr,sh, 01000f0, 5e00, 2, (RR, ADDR), ldstv4, t_lds),
+ TCM(ldr,sb, 01000d0, 5600, 2, (RR, ADDR), ldstv4, t_lds),
+ TCM(ld,sh, 01000f0, 5e00, 2, (RR, ADDR), ldstv4, t_lds),
+ TCM(ld,sb, 01000d0, 5600, 2, (RR, ADDR), ldstv4, t_lds),
#undef ARM_VARIANT
#define ARM_VARIANT ARM_EXT_V4T|ARM_EXT_V5
/* ARM Architecture 4T. */
/* Note: bx (and blx) are required on V5, even if the processor does
not support Thumb. */
- CE(bx, 12fff10, 1, (RR), bx),
+ TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
#undef ARM_VARIANT
#define ARM_VARIANT ARM_EXT_V5 /* ARM Architecture 5T. */
+#undef THUMB_VARIANT
+#define THUMB_VARIANT ARM_EXT_V5T
/* Note: blx has 2 variants; the .value coded here is for
BLX(2). Only this variant has conditional execution. */
- CE(blx, 12fff30, 1, (RR_EXr), blx),
- CE(clz, 16f0f10, 2, (RRnpc, RRnpc), rd_rm),
- UE(bkpt, 1200070, 1, (oIffffb), bkpt),
- UF(ldc2, c100000, 3, (RCP, RCN, ADDR), lstc),
- UF(ldc2l, c500000, 3, (RCP, RCN, ADDR), lstc),
- UF(stc2, c000000, 3, (RCP, RCN, ADDR), lstc),
- UF(stc2l, c400000, 3, (RCP, RCN, ADDR), lstc),
- UF(cdp2, e000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp),
- UF(mcr2, e000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg),
- UF(mrc2, e100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg),
+ TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
+ CE(clz, 16f0f10, 2, (RRnpc, RRnpc), rd_rm),
+ TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
+ UF(ldc2, c100000, 3, (RCP, RCN, ADDR), lstc),
+ UF(ldc2l, c500000, 3, (RCP, RCN, ADDR), lstc),
+ UF(stc2, c000000, 3, (RCP, RCN, ADDR), lstc),
+ UF(stc2l, c400000, 3, (RCP, RCN, ADDR), lstc),
+ UF(cdp2, e000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp),
+ UF(mcr2, e000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg),
+ UF(mrc2, e100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg),
#undef ARM_VARIANT
#define ARM_VARIANT ARM_EXT_V5ExP /* ARM Architecture 5TExP. */
@@ -6511,119 +6596,123 @@
#undef ARM_VARIANT
#define ARM_VARIANT ARM_EXT_V6 /* ARM V6. */
- UF(cps, 1020000, 1, (I31b), imm0),
- UF(cpsie, 1080000, 2, (CPSF, oI31b), cpsi),
- UF(cpsid, 10c0000, 2, (CPSF, oI31b), cpsi),
- CE(ldrex, 1900f9f, 2, (RRnpc, RRnpcb), rd_rn),
- UF(mcrr2, c400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c),
- UF(mrrc2, c500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c),
- CE(pkhbt, 6800010, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt),
- CE(pkhtb, 6800050, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb),
- CE(qadd16, 6200f10, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(qadd8, 6200f90, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(qaddsubx, 6200f30, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(qsub16, 6200f70, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(qsub8, 6200ff0, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(qsubaddx, 6200f50, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(sadd16, 6100f10, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(sadd8, 6100f90, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(saddsubx, 6100f30, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(shadd16, 6300f10, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(shadd8, 6300f90, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(shaddsubx, 6300f30, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(shsub16, 6300f70, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(shsub8, 6300ff0, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(shsubaddx, 6300f50, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(ssub16, 6100f70, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(ssub8, 6100ff0, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(ssubaddx, 6100f50, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(uadd16, 6500f10, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(uadd8, 6500f90, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(uaddsubx, 6500f30, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(uhadd16, 6700f10, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(uhadd8, 6700f90, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(uhaddsubx, 6700f30, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(uhsub16, 6700f70, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(uhsub8, 6700ff0, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(uhsubaddx, 6700f50, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(uqadd16, 6600f10, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(uqadd8, 6600f90, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(uqaddsubx, 6600f30, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(uqsub16, 6600f70, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(uqsub8, 6600ff0, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(uqsubaddx, 6600f50, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(usub16, 6500f70, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(usub8, 6500ff0, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(usubaddx, 6500f50, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- CE(rev, 6bf0f30, 2, (RRnpc, RRnpc), rd_rm),
- CE(rev16, 6bf0fb0, 2, (RRnpc, RRnpc), rd_rm),
- CE(revsh, 6ff0fb0, 2, (RRnpc, RRnpc), rd_rm),
- UF(rfeia, 8900a00, 1, (RRw), rfe),
- UF(rfeib, 9900a00, 1, (RRw), rfe),
- UF(rfeda, 8100a00, 1, (RRw), rfe),
- UF(rfedb, 9100a00, 1, (RRw), rfe),
- UF(rfefd, 8900a00, 1, (RRw), rfe),
- UF(rfefa, 9900a00, 1, (RRw), rfe),
- UF(rfeea, 8100a00, 1, (RRw), rfe),
- UF(rfeed, 9100a00, 1, (RRw), rfe),
- CE(sxtah, 6b00070, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah),
- CE(sxtab16, 6800070, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah),
- CE(sxtab, 6a00070, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah),
- CE(sxth, 6bf0070, 3, (RRnpc, RRnpc, oROR), sxth),
- CE(sxtb16, 68f0070, 3, (RRnpc, RRnpc, oROR), sxth),
- CE(sxtb, 6af0070, 3, (RRnpc, RRnpc, oROR), sxth),
- CE(uxtah, 6f00070, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah),
- CE(uxtab16, 6c00070, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah),
- CE(uxtab, 6e00070, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah),
- CE(uxth, 6ff0070, 3, (RRnpc, RRnpc, oROR), sxth),
- CE(uxtb16, 6cf0070, 3, (RRnpc, RRnpc, oROR), sxth),
- CE(uxtb, 6ef0070, 3, (RRnpc, RRnpc, oROR), sxth),
- CE(sel, 68000b0, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
- UF(setend, 1010000, 1, (ENDI), setend),
- CE(smlad, 7000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla),
- CE(smladx, 7000030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla),
- CE(smlald, 7400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal),
- CE(smlaldx, 7400030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal),
- CE(smlsd, 7000050, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla),
- CE(smlsdx, 7000070, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla),
- CE(smlsld, 7400050, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal),
- CE(smlsldx, 7400070, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal),
- CE(smmla, 7500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla),
- CE(smmlar, 7500030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla),
- CE(smmls, 75000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla),
- CE(smmlsr, 75000f0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla),
- CE(smmul, 750f010, 3, (RRnpc, RRnpc, RRnpc), smul),
- CE(smmulr, 750f030, 3, (RRnpc, RRnpc, RRnpc), smul),
- CE(smuad, 700f010, 3, (RRnpc, RRnpc, RRnpc), smul),
- CE(smuadx, 700f030, 3, (RRnpc, RRnpc, RRnpc), smul),
- CE(smusd, 700f050, 3, (RRnpc, RRnpc, RRnpc), smul),
- CE(smusdx, 700f070, 3, (RRnpc, RRnpc, RRnpc), smul),
- UF(srsia, 8cd0500, 1, (I31w), srs),
- UF(srsib, 9cd0500, 1, (I31w), srs),
- UF(srsda, 84d0500, 1, (I31w), srs),
- UF(srsdb, 94d0500, 1, (I31w), srs),
- CE(ssat, 6a00010, 4, (RRnpc, I32, RRnpc, oSHllar), ssat),
- CE(ssat16, 6a00f30, 3, (RRnpc, I16, RRnpc), ssat16),
- CE(strex, 1800f90, 3, (RRnpc, RRnpc, RRnpcb), strex),
- CE(umaal, 0400090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal),
- CE(usad8, 780f010, 3, (RRnpc, RRnpc, RRnpc), smul),
- CE(usada8, 7800010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla),
- CE(usat, 6e00010, 4, (RRnpc, I31, RRnpc, oSHllar), usat),
- CE(usat16, 6e00f30, 3, (RRnpc, I15, RRnpc), usat16),
+#undef THUMB_VARIANT
+#define THUMB_VARIANT ARM_EXT_V6
+ UF(cps, 1020000, 1, (I31b), imm0),
+ TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
+ TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
+ CE(ldrex, 1900f9f, 2, (RRnpc, RRnpcb), rd_rn),
+ UF(mcrr2, c400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c),
+ UF(mrrc2, c500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c),
+ CE(pkhbt, 6800010, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt),
+ CE(pkhtb, 6800050, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb),
+ CE(qadd16, 6200f10, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(qadd8, 6200f90, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(qaddsubx, 6200f30, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(qsub16, 6200f70, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(qsub8, 6200ff0, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(qsubaddx, 6200f50, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(sadd16, 6100f10, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(sadd8, 6100f90, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(saddsubx, 6100f30, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(shadd16, 6300f10, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(shadd8, 6300f90, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(shaddsubx, 6300f30, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(shsub16, 6300f70, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(shsub8, 6300ff0, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(shsubaddx, 6300f50, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(ssub16, 6100f70, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(ssub8, 6100ff0, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(ssubaddx, 6100f50, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(uadd16, 6500f10, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(uadd8, 6500f90, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(uaddsubx, 6500f30, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(uhadd16, 6700f10, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(uhadd8, 6700f90, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(uhaddsubx, 6700f30, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(uhsub16, 6700f70, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(uhsub8, 6700ff0, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(uhsubaddx, 6700f50, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(uqadd16, 6600f10, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(uqadd8, 6600f90, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(uqaddsubx, 6600f30, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(uqsub16, 6600f70, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(uqsub8, 6600ff0, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(uqsubaddx, 6600f50, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(usub16, 6500f70, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(usub8, 6500ff0, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ CE(usubaddx, 6500f50, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ TCE(rev, 6bf0f30, ba00, 2, (RRnpc, RRnpc), rd_rm, t_arit),
+ TCE(rev16, 6bf0fb0, ba40, 2, (RRnpc, RRnpc), rd_rm, t_arit),
+ TCE(revsh, 6ff0fb0, bac0, 2, (RRnpc, RRnpc), rd_rm, t_arit),
+ UF(rfeia, 8900a00, 1, (RRw), rfe),
+ UF(rfeib, 9900a00, 1, (RRw), rfe),
+ UF(rfeda, 8100a00, 1, (RRw), rfe),
+ UF(rfedb, 9100a00, 1, (RRw), rfe),
+ UF(rfefd, 8900a00, 1, (RRw), rfe),
+ UF(rfefa, 9900a00, 1, (RRw), rfe),
+ UF(rfeea, 8100a00, 1, (RRw), rfe),
+ UF(rfeed, 9100a00, 1, (RRw), rfe),
+ CE(sxtah, 6b00070, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah),
+ CE(sxtab16, 6800070, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah),
+ CE(sxtab, 6a00070, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah),
+ TCE(sxth, 6bf0070, b200, 3, (RRnpc, RRnpc, oROR), sxth, t_arit),
+ CE(sxtb16, 68f0070, 3, (RRnpc, RRnpc, oROR), sxth),
+ TCE(sxtb, 6af0070, b240, 3, (RRnpc, RRnpc, oROR), sxth, t_arit),
+ CE(uxtah, 6f00070, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah),
+ CE(uxtab16, 6c00070, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah),
+ CE(uxtab, 6e00070, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah),
+ TCE(uxth, 6ff0070, b280, 3, (RRnpc, RRnpc, oROR), sxth, t_arit),
+ CE(uxtb16, 6cf0070, 3, (RRnpc, RRnpc, oROR), sxth),
+ TCE(uxtb, 6ef0070, b2c0, 3, (RRnpc, RRnpc, oROR), sxth, t_arit),
+ CE(sel, 68000b0, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm),
+ TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
+ CE(smlad, 7000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla),
+ CE(smladx, 7000030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla),
+ CE(smlald, 7400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal),
+ CE(smlaldx, 7400030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal),
+ CE(smlsd, 7000050, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla),
+ CE(smlsdx, 7000070, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla),
+ CE(smlsld, 7400050, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal),
+ CE(smlsldx, 7400070, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal),
+ CE(smmla, 7500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla),
+ CE(smmlar, 7500030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla),
+ CE(smmls, 75000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla),
+ CE(smmlsr, 75000f0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla),
+ CE(smmul, 750f010, 3, (RRnpc, RRnpc, RRnpc), smul),
+ CE(smmulr, 750f030, 3, (RRnpc, RRnpc, RRnpc), smul),
+ CE(smuad, 700f010, 3, (RRnpc, RRnpc, RRnpc), smul),
+ CE(smuadx, 700f030, 3, (RRnpc, RRnpc, RRnpc), smul),
+ CE(smusd, 700f050, 3, (RRnpc, RRnpc, RRnpc), smul),
+ CE(smusdx, 700f070, 3, (RRnpc, RRnpc, RRnpc), smul),
+ UF(srsia, 8cd0500, 1, (I31w), srs),
+ UF(srsib, 9cd0500, 1, (I31w), srs),
+ UF(srsda, 84d0500, 1, (I31w), srs),
+ UF(srsdb, 94d0500, 1, (I31w), srs),
+ CE(ssat, 6a00010, 4, (RRnpc, I32, RRnpc, oSHllar), ssat),
+ CE(ssat16, 6a00f30, 3, (RRnpc, I16, RRnpc), ssat16),
+ CE(strex, 1800f90, 3, (RRnpc, RRnpc, RRnpcb), strex),
+ CE(umaal, 0400090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal),
+ CE(usad8, 780f010, 3, (RRnpc, RRnpc, RRnpc), smul),
+ CE(usada8, 7800010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla),
+ CE(usat, 6e00010, 4, (RRnpc, I31, RRnpc, oSHllar), usat),
+ CE(usat16, 6e00f30, 3, (RRnpc, I15, RRnpc), usat16),
#undef ARM_VARIANT
#define ARM_VARIANT ARM_EXT_V6K
- CE(ldrexb, 1d00f9f, 2, (RRnpc, RRnpcb), rd_rn),
- CE(ldrexd, 1b00f9f, 2, (RRnpc, RRnpcb), rd_rn),
- CE(ldrexh, 1f00f9f, 2, (RRnpc, RRnpcb), rd_rn),
- CE(strexb, 1c00f90, 3, (RRnpc, RRnpc, RRnpcb), strex),
- CE(strexd, 1a00f90, 3, (RRnpc, RRnpc, RRnpcb), strex),
- CE(strexh, 1e00f90, 3, (RRnpc, RRnpc, RRnpcb), strex),
- UF(clrex, 57ff01f, 0, (), noargs),
- CE(wfe, 320f002, 0, (), noargs),
- CE(wfi, 320f003, 0, (), noargs),
- CE(yield, 320f001, 0, (), noargs),
- CE(sev, 320f004, 0, (), noargs),
+#undef THUMB_VARIANT
+#define THUMB_VARIANT ARM_EXT_V6K
+ CE(ldrexb, 1d00f9f, 2, (RRnpc, RRnpcb), rd_rn),
+ CE(ldrexd, 1b00f9f, 2, (RRnpc, RRnpcb), rd_rn),
+ CE(ldrexh, 1f00f9f, 2, (RRnpc, RRnpcb), rd_rn),
+ CE(strexb, 1c00f90, 3, (RRnpc, RRnpc, RRnpcb), strex),
+ CE(strexd, 1a00f90, 3, (RRnpc, RRnpc, RRnpcb), strex),
+ CE(strexh, 1e00f90, 3, (RRnpc, RRnpc, RRnpcb), strex),
+ UF(clrex, 57ff01f, 0, (), noargs),
+ TCE(yield, 320f001, bf10, 0, (), noargs, noargs),
+ TCE(wfe, 320f002, bf20, 0, (), noargs, noargs),
+ TCE(wfi, 320f003, bf30, 0, (), noargs, noargs),
+ TCE(sev, 320f004, bf40, 0, (), noargs, noargs),
#undef ARM_VARIANT
#define ARM_VARIANT ARM_EXT_V6Z
@@ -7464,106 +7553,16 @@
CE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
};
#undef ARM_VARIANT
+#undef THUMB_VARIANT
+#undef TCE
+#undef TCM
+#undef TUE
+#undef TUF
+#undef TCC
#undef CE
#undef CM
#undef UE
#undef UF
-
-/* Thumb instructions are substantially simpler, there being no conditional
- suffix or infix. */
-#define TI(mnem, opc, nops, ops, tenc) \
- { #mnem, 0x ## opc, THUMB_VARIANT, OPS##nops ops, do_ ## tenc }
-
-static const struct asm_opcode tinsns[] =
-{
-#define THUMB_VARIANT ARM_EXT_V4T /* Thumb v1 (ARMv4T). */
- TI(adc, 4140, 3, (RL, oRL, RL), t_arit3),
- TI(add, 0000, 3, (RR, oRR, RR_EXi), t_add_sub),
- TI(and, 4000, 3, (RL, oRL, RL), t_arit3),
- TI(asr, 4100, 3, (RL, oRL, RL_EXi), t_shift),
- TI(b, e7fe, 1, (EXP), t_branch12),
- TI(beq, d0fe, 1, (EXP), t_branch9),
- TI(bne, d1fe, 1, (EXP), t_branch9),
- TI(bcs, d2fe, 1, (EXP), t_branch9),
- TI(bhs, d2fe, 1, (EXP), t_branch9),
- TI(bcc, d3fe, 1, (EXP), t_branch9),
- TI(bul, d3fe, 1, (EXP), t_branch9),
- TI(blo, d3fe, 1, (EXP), t_branch9),
- TI(bmi, d4fe, 1, (EXP), t_branch9),
- TI(bpl, d5fe, 1, (EXP), t_branch9),
- TI(bvs, d6fe, 1, (EXP), t_branch9),
- TI(bvc, d7fe, 1, (EXP), t_branch9),
- TI(bhi, d8fe, 1, (EXP), t_branch9),
- TI(bls, d9fe, 1, (EXP), t_branch9),
- TI(bge, dafe, 1, (EXP), t_branch9),
- TI(blt, dbfe, 1, (EXP), t_branch9),
- TI(bgt, dcfe, 1, (EXP), t_branch9),
- TI(ble, ddfe, 1, (EXP), t_branch9),
- TI(bal, defe, 1, (EXP), t_branch9),
- TI(bic, 4380, 3, (RL, oRL, RL), t_arit3),
- TI(bl, f7fffffe, 1, (EXP), t_branch23),
- TI(bx, 4700, 1, (RR), t_bx),
- TI(cmn, 42c0, 2, (RL, RL), t_arit),
- TI(cmp, 4280, 2, (RR, RR_EXi), t_mov_cmp),
- TI(eor, 4040, 3, (RL, oRL, RL), t_arit3),
- TI(ldmia, c800, 2, (RLw, REGLST), t_ldmstm),
- TI(ldr, 5800, 2, (RL, ADDR), t_ldst),
- TI(ldrb, 5c00, 2, (RL, ADDR), t_ldst),
- TI(ldrh, 5a00, 2, (RL, ADDR), t_ldst),
- TI(ldrsb, 5600, 2, (RL, ADDR), t_lds),
- TI(ldrsh, 5e00, 2, (RL, ADDR), t_lds),
- TI(ldsb, 5600, 2, (RL, ADDR), t_lds),
- TI(ldsh, 5e00, 2, (RL, ADDR), t_lds),
- TI(lsl, 4080, 3, (RL, oRL, RL_EXi), t_shift),
- TI(lsr, 40c0, 3, (RL, oRL, RL_EXi), t_shift),
- TI(mov, 4600, 2, (RR, RR_EXi), t_mov_cmp),
- TI(mul, 4340, 3, (RL, RL, oRL), t_mul),
- TI(mvn, 43c0, 2, (RL, RL), t_arit),
- TI(neg, 4240, 2, (RL, RL), t_arit),
- TI(orr, 4300, 3, (RL, oRL, RL), t_arit3),
- TI(pop, bc00, 1, (REGLST), t_push_pop),
- TI(push, b400, 1, (REGLST), t_push_pop),
- TI(ror, 41c0, 2, (RL, RL), t_arit),
- TI(sbc, 4180, 3, (RL, oRL, RL), t_arit3),
- TI(stmia, c000, 2, (RLw, REGLST), t_ldmstm),
- TI(str, 5000, 2, (RL, ADDR), t_ldst),
- TI(strb, 5400, 2, (RL, ADDR), t_ldst),
- TI(strh, 5200, 2, (RL, ADDR), t_ldst),
- TI(swi, df00, 1, (EXP), t_swi),
- TI(sub, 8000, 3, (RR, oRR, RR_EXi), t_add_sub),
- TI(tst, 4200, 2, (RL, RL), t_arit),
- /* Pseudo ops: */
- TI(adr, 000f, 2, (RL, EXP), t_adr),
- TI(nop, 46c0, 0, (), noargs), /* mov r8,r8 */
-
-#undef THUMB_VARIANT
-#define THUMB_VARIANT ARM_EXT_V5T /* Thumb v2 (ARMv5T). */
- TI(blx, 4780, 1, (RR_EX), t_blx),
- TI(bkpt, be00, 1, (oI255b), imm0),
-
-#undef THUMB_VARIANT
-#define THUMB_VARIANT ARM_EXT_V6
- TI(cpsie, b660, 1, (CPSF), imm0),
- TI(cpsid, b670, 1, (CPSF), imm0),
- TI(cpy, 4600, 2, (RR, RR), t_cpy),
- TI(rev, ba00, 2, (RL, RL), t_arit),
- TI(rev16, ba40, 2, (RL, RL), t_arit),
- TI(revsh, bac0, 2, (RL, RL), t_arit),
- TI(setend, b650, 1, (ENDI), t_setend),
- TI(sxth, b200, 2, (RL, RL), t_arit),
- TI(sxtb, b240, 2, (RL, RL), t_arit),
- TI(uxth, b280, 2, (RL, RL), t_arit),
- TI(uxtb, b2c0, 2, (RL, RL), t_arit),
-
-#undef THUMB_VARIANT
-#define THUMB_VARIANT ARM_EXT_V6K
- TI(sev, bf40, 0, (), noargs),
- TI(wfe, bf20, 0, (), noargs),
- TI(wfi, bf30, 0, (), noargs),
- TI(yield, bf10, 0, (), noargs),
-};
-#undef THUMB_VARIANT
-#undef TI
#undef OPS0
#undef OPS1
#undef OPS2
@@ -7571,6 +7570,7 @@
#undef OPS4
#undef OPS5
#undef OPS6
+#undef do_0
/* MD interface: bits in the object file. */
@@ -9515,7 +9515,6 @@
unsigned int i;
if ( (arm_ops_hsh = hash_new ()) == NULL
- || (arm_tops_hsh = hash_new ()) == NULL
|| (arm_cond_hsh = hash_new ()) == NULL
|| (arm_shift_hsh = hash_new ()) == NULL
|| (arm_psr_hsh = hash_new ()) == NULL
@@ -9525,8 +9524,6 @@
for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i));
- for (i = 0; i < sizeof (tinsns) / sizeof (struct asm_opcode); i++)
- hash_insert (arm_tops_hsh, tinsns[i].template, (PTR) (tinsns + i));
for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i));
for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
===================================================================
Index: gas/testsuite/gas/arm/arm.exp
--- gas/testsuite/gas/arm/arm.exp (revision 61)
+++ gas/testsuite/gas/arm/arm.exp (revision 62)
@@ -60,6 +60,7 @@
run_errors_test "armv1-bad" "-mcpu=arm7m" "ARM v1 errors"
run_errors_test "r15-bad" "" "Invalid use of r15 errors"
run_errors_test "archv6t2-bad" "-march=armv6t2" "Invalid V6T2 instructions"
+ run_errors_test "t16-bad" "-march=armv6k" "Valid ARM, invalid Thumb"
if {[istarget *-*-*coff] || [istarget *-*-pe] || [istarget *-*-wince] ||
[istarget *-*-*aout*] || [istarget *-*-netbsd] || [istarget *-*-riscix*]} then {
===================================================================
Index: gas/testsuite/gas/arm/t16-bad.l
--- gas/testsuite/gas/arm/t16-bad.l (revision 0)
+++ gas/testsuite/gas/arm/t16-bad.l (revision 62)
@@ -0,0 +1,181 @@
+[^:]*: Assembler messages:
+[^:]*:36: Error: lo register required -- `tst r8,r0'
+[^:]*:36: Error: lo register required -- `tst r0,r8'
+[^:]*:36: Error: unshifted register required -- `tst r0,#12'
+[^:]*:36: Error: unshifted register required -- `tst r0,r1,lsl#2'
+[^:]*:36: Error: unshifted register required -- `tst r0,r1,lsl r3'
+[^:]*:37: Error: lo register required -- `cmn r8,r0'
+[^:]*:37: Error: lo register required -- `cmn r0,r8'
+[^:]*:37: Error: unshifted register required -- `cmn r0,#12'
+[^:]*:37: Error: unshifted register required -- `cmn r0,r1,lsl#2'
+[^:]*:37: Error: unshifted register required -- `cmn r0,r1,lsl r3'
+[^:]*:38: Error: lo register required -- `mvn r8,r0'
+[^:]*:38: Error: lo register required -- `mvn r0,r8'
+[^:]*:38: Error: unshifted register required -- `mvn r0,#12'
+[^:]*:38: Error: unshifted register required -- `mvn r0,r1,lsl#2'
+[^:]*:38: Error: unshifted register required -- `mvn r0,r1,lsl r3'
+[^:]*:39: Error: lo register required -- `neg r8,r0'
+[^:]*:39: Error: lo register required -- `neg r0,r8'
+[^:]*:40: Error: lo register required -- `rev r8,r0'
+[^:]*:40: Error: lo register required -- `rev r0,r8'
+[^:]*:41: Error: lo register required -- `rev16 r8,r0'
+[^:]*:41: Error: lo register required -- `rev16 r0,r8'
+[^:]*:42: Error: lo register required -- `revsh r8,r0'
+[^:]*:42: Error: lo register required -- `revsh r0,r8'
+[^:]*:43: Error: lo register required -- `sxtb r8,r0'
+[^:]*:43: Error: lo register required -- `sxtb r0,r8'
+[^:]*:44: Error: lo register required -- `sxth r8,r0'
+[^:]*:44: Error: lo register required -- `sxth r0,r8'
+[^:]*:45: Error: lo register required -- `uxtb r8,r0'
+[^:]*:45: Error: lo register required -- `uxtb r0,r8'
+[^:]*:46: Error: lo register required -- `uxth r8,r0'
+[^:]*:46: Error: lo register required -- `uxth r0,r8'
+[^:]*:48: Error: dest and source1 must be the same register -- `adc r1,r2,r3'
+[^:]*:48: Error: lo register required -- `adc r8,r0'
+[^:]*:48: Error: lo register required -- `adc r0,r8'
+[^:]*:48: Error: unshifted register required -- `adc r0,#12'
+[^:]*:48: Error: unshifted register required -- `adc r0,r1,lsl#2'
+[^:]*:48: Error: unshifted register required -- `adc r0,r1,lsl r3'
+[^:]*:49: Error: dest and source1 must be the same register -- `and r1,r2,r3'
+[^:]*:49: Error: lo register required -- `and r8,r0'
+[^:]*:49: Error: lo register required -- `and r0,r8'
+[^:]*:49: Error: unshifted register required -- `and r0,#12'
+[^:]*:49: Error: unshifted register required -- `and r0,r1,lsl#2'
+[^:]*:49: Error: unshifted register required -- `and r0,r1,lsl r3'
+[^:]*:50: Error: dest and source1 must be the same register -- `bic r1,r2,r3'
+[^:]*:50: Error: lo register required -- `bic r8,r0'
+[^:]*:50: Error: lo register required -- `bic r0,r8'
+[^:]*:50: Error: unshifted register required -- `bic r0,#12'
+[^:]*:50: Error: unshifted register required -- `bic r0,r1,lsl#2'
+[^:]*:50: Error: unshifted register required -- `bic r0,r1,lsl r3'
+[^:]*:51: Error: dest and source1 must be the same register -- `eor r1,r2,r3'
+[^:]*:51: Error: lo register required -- `eor r8,r0'
+[^:]*:51: Error: lo register required -- `eor r0,r8'
+[^:]*:51: Error: unshifted register required -- `eor r0,#12'
+[^:]*:51: Error: unshifted register required -- `eor r0,r1,lsl#2'
+[^:]*:51: Error: unshifted register required -- `eor r0,r1,lsl r3'
+[^:]*:52: Error: dest and source1 must be the same register -- `orr r1,r2,r3'
+[^:]*:52: Error: lo register required -- `orr r8,r0'
+[^:]*:52: Error: lo register required -- `orr r0,r8'
+[^:]*:52: Error: unshifted register required -- `orr r0,#12'
+[^:]*:52: Error: unshifted register required -- `orr r0,r1,lsl#2'
+[^:]*:52: Error: unshifted register required -- `orr r0,r1,lsl r3'
+[^:]*:53: Error: dest and source1 must be the same register -- `sbc r1,r2,r3'
+[^:]*:53: Error: lo register required -- `sbc r8,r0'
+[^:]*:53: Error: lo register required -- `sbc r0,r8'
+[^:]*:53: Error: unshifted register required -- `sbc r0,#12'
+[^:]*:53: Error: unshifted register required -- `sbc r0,r1,lsl#2'
+[^:]*:53: Error: unshifted register required -- `sbc r0,r1,lsl r3'
+[^:]*:54: Error: dest and source2 must be the same register -- `mul r1,r2,r3'
+[^:]*:54: Error: lo register required -- `mul r8,r0'
+[^:]*:54: Error: lo register required -- `mul r0,r8'
+[^:]*:62: Error: lo register required -- `asr r8,r0,#12'
+[^:]*:62: Error: lo register required -- `asr r0,r8,#12'
+[^:]*:62: Error: lo register required -- `asr r8,r0'
+[^:]*:62: Error: lo register required -- `asr r0,r8'
+[^:]*:63: Error: lo register required -- `lsl r8,r0,#12'
+[^:]*:63: Error: lo register required -- `lsl r0,r8,#12'
+[^:]*:63: Error: lo register required -- `lsl r8,r0'
+[^:]*:63: Error: lo register required -- `lsl r0,r8'
+[^:]*:64: Error: lo register required -- `lsr r8,r0,#12'
+[^:]*:64: Error: lo register required -- `lsr r0,r8,#12'
+[^:]*:64: Error: lo register required -- `lsr r8,r0'
+[^:]*:64: Error: lo register required -- `lsr r0,r8'
+[^:]*:65: Error: lo register required -- `ror r8,r0,#12'
+[^:]*:65: Error: lo register required -- `ror r0,r8,#12'
+[^:]*:65: Error: lo register required -- `ror r8,r0'
+[^:]*:65: Error: lo register required -- `ror r0,r8'
+[^:]*:66: Error: ror #imm not supported -- `ror r0,r1,#12'
+[^:]*:69: Error: unshifted register required -- `add r0,r1,lsl#2'
+[^:]*:70: Error: unshifted register required -- `add r0,r1,lsl r3'
+[^:]*:71: Error: lo register required -- `add r8,r0,#1'
+[^:]*:72: Error: lo register required -- `add r0,r8,#1'
+[^:]*:73: Error: lo register required -- `add r8,#10'
+[^:]*:74: Error: dest and source1 must be the same register -- `add r8,r1,r2'
+[^:]*:75: Error: dest and source1 must be the same register -- `add r1,r8,r2'
+[^:]*:76: Error: dest and source1 must be the same register -- `add r1,r2,r8'
+[^:]*:77: Error: lo register required -- `add r8,pc,#4'
+[^:]*:78: Error: lo register required -- `add r8,sp,#4'
+[^:]*:80: Error: lo register required -- `sub r8,r0'
+[^:]*:80: Error: lo register required -- `sub r0,r8'
+[^:]*:80: Error: unshifted register required -- `sub r0,r1,lsl#2'
+[^:]*:80: Error: unshifted register required -- `sub r0,r1,lsl r3'
+[^:]*:81: Error: lo register required -- `sub r8,r0,#1'
+[^:]*:82: Error: lo register required -- `sub r0,r8,#1'
+[^:]*:83: Error: lo register required -- `sub r8,#10'
+[^:]*:84: Error: lo register required -- `sub r8,r1,r2'
+[^:]*:85: Error: lo register required -- `sub r1,r8,r2'
+[^:]*:86: Error: lo register required -- `sub r1,r2,r8'
+[^:]*:90: Error: only lo regs allowed with immediate -- `cmp r8,#255'
+[^:]*:94: Error: only lo regs allowed with immediate -- `mov r8,#255'
+[^:]*:106: Error: lo register required -- `ldr r8,\[r0\]'
+[^:]*:106: Error: lo register required -- `ldr r0,\[r8\]'
+[^:]*:106: Error: lo register required -- `ldr r0,\[r0,r8\]'
+[^:]*:106: Error: Thumb does not support this addressing mode -- `ldr r0,\[r1,#4\]!'
+[^:]*:106: Error: Thumb does not support this addressing mode -- `ldr r0,\[r1\],#4'
+[^:]*:106: Error: Thumb does not support this addressing mode -- `ldr r0,\[r1,-r2\]'
+[^:]*:106: Error: Thumb does not support this addressing mode -- `ldr r0,\[r1\],r2'
+[^:]*:107: Error: lo register required -- `ldrb r8,\[r0\]'
+[^:]*:107: Error: lo register required -- `ldrb r0,\[r8\]'
+[^:]*:107: Error: lo register required -- `ldrb r0,\[r0,r8\]'
+[^:]*:107: Error: Thumb does not support this addressing mode -- `ldrb r0,\[r1,#4\]!'
+[^:]*:107: Error: Thumb does not support this addressing mode -- `ldrb r0,\[r1\],#4'
+[^:]*:107: Error: Thumb does not support this addressing mode -- `ldrb r0,\[r1,-r2\]'
+[^:]*:107: Error: Thumb does not support this addressing mode -- `ldrb r0,\[r1\],r2'
+[^:]*:108: Error: lo register required -- `ldrh r8,\[r0\]'
+[^:]*:108: Error: lo register required -- `ldrh r0,\[r8\]'
+[^:]*:108: Error: lo register required -- `ldrh r0,\[r0,r8\]'
+[^:]*:108: Error: Thumb does not support this addressing mode -- `ldrh r0,\[r1,#4\]!'
+[^:]*:108: Error: Thumb does not support this addressing mode -- `ldrh r0,\[r1\],#4'
+[^:]*:108: Error: Thumb does not support this addressing mode -- `ldrh r0,\[r1,-r2\]'
+[^:]*:108: Error: Thumb does not support this addressing mode -- `ldrh r0,\[r1\],r2'
+[^:]*:109: Error: Thumb does not support this addressing mode -- `ldrsb r8,\[r0\]'
+[^:]*:109: Error: Thumb does not support this addressing mode -- `ldrsb r0,\[r8\]'
+[^:]*:109: Error: lo register required -- `ldrsb r0,\[r0,r8\]'
+[^:]*:109: Error: Thumb does not support this addressing mode -- `ldrsb r0,\[r1,#4\]!'
+[^:]*:109: Error: Thumb does not support this addressing mode -- `ldrsb r0,\[r1\],#4'
+[^:]*:109: Error: Thumb does not support this addressing mode -- `ldrsb r0,\[r1,-r2\]'
+[^:]*:109: Error: Thumb does not support this addressing mode -- `ldrsb r0,\[r1\],r2'
+[^:]*:110: Error: Thumb does not support this addressing mode -- `ldrsh r8,\[r0\]'
+[^:]*:110: Error: Thumb does not support this addressing mode -- `ldrsh r0,\[r8\]'
+[^:]*:110: Error: lo register required -- `ldrsh r0,\[r0,r8\]'
+[^:]*:110: Error: Thumb does not support this addressing mode -- `ldrsh r0,\[r1,#4\]!'
+[^:]*:110: Error: Thumb does not support this addressing mode -- `ldrsh r0,\[r1\],#4'
+[^:]*:110: Error: Thumb does not support this addressing mode -- `ldrsh r0,\[r1,-r2\]'
+[^:]*:110: Error: Thumb does not support this addressing mode -- `ldrsh r0,\[r1\],r2'
+[^:]*:111: Error: lo register required -- `str r8,\[r0\]'
+[^:]*:111: Error: lo register required -- `str r0,\[r8\]'
+[^:]*:111: Error: lo register required -- `str r0,\[r0,r8\]'
+[^:]*:111: Error: Thumb does not support this addressing mode -- `str r0,\[r1,#4\]!'
+[^:]*:111: Error: Thumb does not support this addressing mode -- `str r0,\[r1\],#4'
+[^:]*:111: Error: Thumb does not support this addressing mode -- `str r0,\[r1,-r2\]'
+[^:]*:111: Error: Thumb does not support this addressing mode -- `str r0,\[r1\],r2'
+[^:]*:112: Error: lo register required -- `strb r8,\[r0\]'
+[^:]*:112: Error: lo register required -- `strb r0,\[r8\]'
+[^:]*:112: Error: lo register required -- `strb r0,\[r0,r8\]'
+[^:]*:112: Error: Thumb does not support this addressing mode -- `strb r0,\[r1,#4\]!'
+[^:]*:112: Error: Thumb does not support this addressing mode -- `strb r0,\[r1\],#4'
+[^:]*:112: Error: Thumb does not support this addressing mode -- `strb r0,\[r1,-r2\]'
+[^:]*:112: Error: Thumb does not support this addressing mode -- `strb r0,\[r1\],r2'
+[^:]*:113: Error: lo register required -- `strh r8,\[r0\]'
+[^:]*:113: Error: lo register required -- `strh r0,\[r8\]'
+[^:]*:113: Error: lo register required -- `strh r0,\[r0,r8\]'
+[^:]*:113: Error: Thumb does not support this addressing mode -- `strh r0,\[r1,#4\]!'
+[^:]*:113: Error: Thumb does not support this addressing mode -- `strh r0,\[r1\],#4'
+[^:]*:113: Error: Thumb does not support this addressing mode -- `strh r0,\[r1,-r2\]'
+[^:]*:113: Error: Thumb does not support this addressing mode -- `strh r0,\[r1\],r2'
+[^:]*:115: Error: Thumb does not support this addressing mode -- `ldr r0,\[r1,r2,lsl#1\]'
+[^:]*:116: Error: Thumb does not support this addressing mode -- `str r0,\[r1,r2,lsl#1\]'
+[^:]*:119: Error: lo register required -- `ldmia r8!,{r1,r2}'
+[^:]*:120: Error: lo register required -- `ldmia r7!,{r8}'
+[^:]*:121: Warning: this instruction will write back the base register
+[^:]*:122: Warning: this instruction will not write back the base register
+[^:]*:124: Error: lo register required -- `stmia r8!,{r1,r2}'
+[^:]*:125: Error: lo register required -- `stmia r7!,{r8}'
+[^:]*:126: Warning: this instruction will write back the base register
+[^:]*:127: Warning: value stored for r7 is UNPREDICTABLE
+[^:]*:129: Error: invalid register list to push/pop instruction -- `push {r8,r9}'
+[^:]*:130: Error: invalid register list to push/pop instruction -- `pop {r8,r9}'
+[^:]*:133: Error: immediate value out of range -- `bkpt #257'
+[^:]*:134: Error: Thumb does not support the 2-argument form of this instruction -- `cpsie ai,#5'
+[^:]*:135: Error: Thumb does not support the 2-argument form of this instruction -- `cpsid ai,#5'
===================================================================
Index: gas/testsuite/gas/arm/t16-bad.s
--- gas/testsuite/gas/arm/t16-bad.s (revision 0)
+++ gas/testsuite/gas/arm/t16-bad.s (revision 62)
@@ -0,0 +1,135 @@
+ @ Things you can't do with 16-bit Thumb instructions, but you can
+ @ do with the equivalent ARM instruction. Does not include errors
+ @ caught by fixup processing (e.g. out-of-range immediates).
+
+ .text
+ .code 16
+ .thumb_func
+l:
+ @ Arithmetic instruction templates
+ .macro ar2 opc
+ \opc r8,r0
+ \opc r0,r8
+ .endm
+ .macro ar2sh opc
+ ar2 \opc
+ \opc r0,#12
+ \opc r0,r1,lsl #2
+ \opc r0,r1,lsl r3
+ .endm
+ .macro ar2r opc
+ ar2 \opc
+ \opc r0,r1,ror #8
+ .endm
+ .macro ar3 opc
+ \opc r1,r2,r3
+ \opc r8,r0
+ \opc r0,r8
+ .endm
+ .macro ar3sh opc
+ ar3 \opc
+ \opc r0,#12
+ \opc r0,r1,lsl #2
+ \opc r0,r1,lsl r3
+ .endm
+
+ ar2sh tst
+ ar2sh cmn
+ ar2sh mvn
+ ar2 neg
+ ar2 rev
+ ar2 rev16
+ ar2 revsh
+ ar2r sxtb
+ ar2r sxth
+ ar2r uxtb
+ ar2r uxth
+
+ ar3sh adc
+ ar3sh and
+ ar3sh bic
+ ar3sh eor
+ ar3sh orr
+ ar3sh sbc
+ ar3 mul
+
+ @ Shift instruction template
+ .macro shift opc
+ \opc r8,r0,#12 @ form 1
+ \opc r0,r8,#12
+ ar2 \opc @ form 2
+ .endm
+ shift asr
+ shift lsl
+ shift lsr
+ shift ror
+ ror r0,r1,#12
+
+ @ add/sub/mov/cmp are idiosyncratic
+ add r0,r1,lsl #2
+ add r0,r1,lsl r3
+ add r8,r0,#1 @ form 1
+ add r0,r8,#1
+ add r8,#10 @ form 2
+ add r8,r1,r2 @ form 3
+ add r1,r8,r2
+ add r1,r2,r8
+ add r8,pc,#4 @ form 5
+ add r8,sp,#4 @ form 6
+
+ ar3sh sub
+ sub r8,r0,#1 @ form 1
+ sub r0,r8,#1
+ sub r8,#10 @ form 2
+ sub r8,r1,r2 @ form 3
+ sub r1,r8,r2
+ sub r1,r2,r8
+
+ cmp r0,r1,lsl #2
+ cmp r0,r1,lsl r3
+ cmp r8,#255
+
+ mov r0,r1,lsl #2
+ mov r0,r1,lsl r3
+ mov r8,#255
+
+ @ Load/store template
+ .macro ldst opc
+ \opc r8,[r0]
+ \opc r0,[r8]
+ \opc r0,[r0,r8]
+ \opc r0,[r1,#4]!
+ \opc r0,[r1],#4
+ \opc r0,[r1,-r2]
+ \opc r0,[r1],r2
+ .endm
+ ldst ldr
+ ldst ldrb
+ ldst ldrh
+ ldst ldrsb
+ ldst ldrsh
+ ldst str
+ ldst strb
+ ldst strh
+
+ ldr r0,[r1,r2,lsl #1]
+ str r0,[r1,r2,lsl #1]
+
+ @ Load/store multiple
+ ldmia r8!,{r1,r2}
+ ldmia r7!,{r8}
+ ldmia r7,{r1,r2}
+ ldmia r7!,{r1,r7}
+
+ stmia r8!,{r1,r2}
+ stmia r7!,{r8}
+ stmia r7,{r1,r2}
+ stmia r7!,{r1,r7}
+
+ push {r8,r9}
+ pop {r8,r9}
+
+ @ Miscellaneous
+ bkpt #257
+ cpsie ai,#5
+ cpsid ai,#5