blob: a620c408efa6bd6d96ecc1be0c467d12da3f6519 [file] [log] [blame]
commit af2a38458197e07cbd03ea9446714008488826d8
Author: Victor Campos <victor.campos@arm.com>
Date: Sat Feb 8 11:59:46 2020 +0000
Revert "[ARM] Improve codegen of volatile load/store of i64"
This reverts commit 60e0120c913dd1d4bfe33769e1f000a076249a42.
diff --git a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index 2c3ac816219..de4377ec5a4 100644
--- a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -1952,24 +1952,6 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
MI.eraseFromParent();
return true;
}
- case ARM::LOADDUAL:
- case ARM::STOREDUAL: {
- Register PairReg = MI.getOperand(0).getReg();
-
- MachineInstrBuilder MIB =
- BuildMI(MBB, MBBI, MI.getDebugLoc(),
- TII->get(Opcode == ARM::LOADDUAL ? ARM::LDRD : ARM::STRD))
- .addReg(TRI->getSubReg(PairReg, ARM::gsub_0),
- Opcode == ARM::LOADDUAL ? RegState::Define : 0)
- .addReg(TRI->getSubReg(PairReg, ARM::gsub_1),
- Opcode == ARM::LOADDUAL ? RegState::Define : 0);
- for (unsigned i = 1; i < MI.getNumOperands(); i++)
- MIB.add(MI.getOperand(i));
- MIB.add(predOps(ARMCC::AL));
- MIB.cloneMemRefs(MI);
- MI.eraseFromParent();
- return true;
- }
}
}
diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 696d938e39c..da7d3d9f2a4 100644
--- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -145,8 +145,6 @@ public:
// Thumb 2 Addressing Modes:
bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
- template <unsigned Shift>
- bool SelectT2AddrModeImm8(SDValue N, SDValue &Base, SDValue &OffImm);
bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
SDValue &OffImm);
bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
@@ -1301,33 +1299,6 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
return true;
}
-template <unsigned Shift>
-bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N, SDValue &Base,
- SDValue &OffImm) {
- if (N.getOpcode() == ISD::SUB || CurDAG->isBaseWithConstantOffset(N)) {
- int RHSC;
- if (isScaledConstantInRange(N.getOperand(1), 1 << Shift, -255, 256, RHSC)) {
- Base = N.getOperand(0);
- if (Base.getOpcode() == ISD::FrameIndex) {
- int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(
- FI, TLI->getPointerTy(CurDAG->getDataLayout()));
- }
-
- if (N.getOpcode() == ISD::SUB)
- RHSC = -RHSC;
- OffImm =
- CurDAG->getTargetConstant(RHSC * (1 << Shift), SDLoc(N), MVT::i32);
- return true;
- }
- }
-
- // Base only.
- Base = N;
- OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
- return true;
-}
-
bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
SDValue &Base, SDValue &OffImm) {
// Match simple R - imm8 operands.
@@ -3589,26 +3560,6 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
CurDAG->RemoveDeadNode(N);
return;
}
- case ARMISD::LDRD: {
- if (Subtarget->isThumb2())
- break; // TableGen handles isel in this case.
- SDValue Base, RegOffset, ImmOffset;
- const SDValue &Chain = N->getOperand(0);
- const SDValue &Addr = N->getOperand(1);
- SelectAddrMode3(Addr, Base, RegOffset, ImmOffset);
- SDValue Ops[] = {Base, RegOffset, ImmOffset, Chain};
- SDNode *New = CurDAG->getMachineNode(ARM::LOADDUAL, dl,
- {MVT::Untyped, MVT::Other}, Ops);
- SDValue Lo = CurDAG->getTargetExtractSubreg(ARM::gsub_0, dl, MVT::i32,
- SDValue(New, 0));
- SDValue Hi = CurDAG->getTargetExtractSubreg(ARM::gsub_1, dl, MVT::i32,
- SDValue(New, 0));
- ReplaceUses(SDValue(N, 0), Lo);
- ReplaceUses(SDValue(N, 1), Hi);
- ReplaceUses(SDValue(N, 2), SDValue(New, 1));
- CurDAG->RemoveDeadNode(N);
- return;
- }
case ARMISD::LOOP_DEC: {
SDValue Ops[] = { N->getOperand(1),
N->getOperand(2),
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index a94d9b8ab3f..e4ff0845c5c 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -1073,8 +1073,6 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SRA, MVT::i64, Custom);
setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
- setOperationAction(ISD::LOAD, MVT::i64, Custom);
- setOperationAction(ISD::STORE, MVT::i64, Custom);
// MVE lowers 64 bit shifts to lsll and lsrl
// assuming that ISD::SRL and SRA of i64 are already marked custom
@@ -1608,9 +1606,6 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::PRELOAD: return "ARMISD::PRELOAD";
- case ARMISD::LDRD: return "ARMISD::LDRD";
- case ARMISD::STRD: return "ARMISD::STRD";
-
case ARMISD::WIN__CHKSTK: return "ARMISD::WIN__CHKSTK";
case ARMISD::WIN__DBZCHK: return "ARMISD::WIN__DBZCHK";
@@ -9105,24 +9100,6 @@ static SDValue LowerPredicateLoad(SDValue Op, SelectionDAG &DAG) {
return DAG.getMergeValues({Pred, Load.getValue(1)}, dl);
}
-void ARMTargetLowering::LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG) const {
- LoadSDNode *LD = cast<LoadSDNode>(N);
- EVT MemVT = LD->getMemoryVT();
- assert(LD->isUnindexed() && "Loads should be unindexed at this point.");
-
- if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() &&
- !Subtarget->isThumb1Only() && LD->isVolatile()) {
- SDLoc dl(N);
- SDValue Result = DAG.getMemIntrinsicNode(
- ARMISD::LDRD, dl, DAG.getVTList({MVT::i32, MVT::i32, MVT::Other}),
- {LD->getChain(), LD->getBasePtr()}, MemVT, LD->getMemOperand());
- SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64,
- Result.getValue(0), Result.getValue(1));
- Results.append({Pair, Result.getValue(2)});
- }
-}
-
static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG) {
StoreSDNode *ST = cast<StoreSDNode>(Op.getNode());
EVT MemVT = ST->getMemoryVT();
@@ -9152,34 +9129,6 @@ static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG) {
ST->getMemOperand());
}
-static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG,
- const ARMSubtarget *Subtarget) {
- StoreSDNode *ST = cast<StoreSDNode>(Op.getNode());
- EVT MemVT = ST->getMemoryVT();
- assert(ST->isUnindexed() && "Stores should be unindexed at this point.");
-
- if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() &&
- !Subtarget->isThumb1Only() && ST->isVolatile()) {
- SDNode *N = Op.getNode();
- SDLoc dl(N);
-
- SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, ST->getValue(),
- DAG.getTargetConstant(0, dl, MVT::i32));
- SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, ST->getValue(),
- DAG.getTargetConstant(1, dl, MVT::i32));
-
- return DAG.getMemIntrinsicNode(ARMISD::STRD, dl, DAG.getVTList(MVT::Other),
- {ST->getChain(), Lo, Hi, ST->getBasePtr()},
- MemVT, ST->getMemOperand());
- } else if (Subtarget->hasMVEIntegerOps() &&
- ((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 ||
- MemVT == MVT::v16i1))) {
- return LowerPredicateStore(Op, DAG);
- }
-
- return SDValue();
-}
-
static bool isZeroVector(SDValue N) {
return (ISD::isBuildVectorAllZeros(N.getNode()) ||
(N->getOpcode() == ARMISD::VMOVIMM &&
@@ -9412,7 +9361,7 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::LOAD:
return LowerPredicateLoad(Op, DAG);
case ISD::STORE:
- return LowerSTORE(Op, DAG, Subtarget);
+ return LowerPredicateStore(Op, DAG);
case ISD::MLOAD:
return LowerMLOAD(Op, DAG);
case ISD::ATOMIC_LOAD:
@@ -9516,9 +9465,7 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
case ISD::ABS:
lowerABS(N, Results, DAG);
return ;
- case ISD::LOAD:
- LowerLOAD(N, Results, DAG);
- break;
+
}
if (Res.getNode())
Results.push_back(Res);
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 3b5a3793ade..fb17d081aac 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -281,11 +281,7 @@ class VectorType;
VST4_UPD,
VST2LN_UPD,
VST3LN_UPD,
- VST4LN_UPD,
-
- // Load/Store of dual registers
- LDRD,
- STRD
+ VST4LN_UPD
};
} // end namespace ARMISD
@@ -736,8 +732,6 @@ class VectorType;
SDValue LowerFSETCC(SDValue Op, SelectionDAG &DAG) const;
void lowerABS(SDNode *N, SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const;
- void LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG) const;
Register getRegisterByName(const char* RegName, LLT VT,
const MachineFunction &MF) const override;
diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.td b/llvm/lib/Target/ARM/ARMInstrInfo.td
index c9fc8333da8..672dfcab98e 100644
--- a/llvm/lib/Target/ARM/ARMInstrInfo.td
+++ b/llvm/lib/Target/ARM/ARMInstrInfo.td
@@ -243,12 +243,6 @@ def ARMqsub8b : SDNode<"ARMISD::QSUB8b", SDT_ARMAnd, []>;
def ARMqadd16b : SDNode<"ARMISD::QADD16b", SDT_ARMAnd, []>;
def ARMqsub16b : SDNode<"ARMISD::QSUB16b", SDT_ARMAnd, []>;
-def SDT_ARMldrd : SDTypeProfile<2, 1, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
-def ARMldrd : SDNode<"ARMISD::LDRD", SDT_ARMldrd, [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
-
-def SDT_ARMstrd : SDTypeProfile<0, 3, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
-def ARMstrd : SDNode<"ARMISD::STRD", SDT_ARMstrd, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
-
// Vector operations shared between NEON and MVE
def ARMvdup : SDNode<"ARMISD::VDUP", SDTypeProfile<1, 1, [SDTCisVec<0>]>>;
@@ -2705,14 +2699,6 @@ let mayLoad = 1, hasSideEffects = 0, hasExtraDefRegAllocReq = 1 in {
Requires<[IsARM, HasV5TE]>;
}
-let mayLoad = 1, hasSideEffects = 0, hasNoSchedulingInfo = 1 in {
-def LOADDUAL : ARMPseudoInst<(outs GPRPairOp:$Rt), (ins addrmode3:$addr),
- 64, IIC_iLoad_d_r, []>,
- Requires<[IsARM, HasV5TE]> {
- let AM = AddrMode3;
-}
-}
-
def LDA : AIldracq<0b00, (outs GPR:$Rt), (ins addr_offset_none:$addr),
NoItinerary, "lda", "\t$Rt, $addr", []>;
def LDAB : AIldracq<0b10, (outs GPR:$Rt), (ins addr_offset_none:$addr),
@@ -2988,19 +2974,6 @@ let mayStore = 1, hasSideEffects = 0, hasExtraSrcRegAllocReq = 1 in {
}
}
-let mayStore = 1, hasSideEffects = 0, hasNoSchedulingInfo = 1 in {
-def STOREDUAL : ARMPseudoInst<(outs), (ins GPRPairOp:$Rt, addrmode3:$addr),
- 64, IIC_iStore_d_r, []>,
- Requires<[IsARM, HasV5TE]> {
- let AM = AddrMode3;
-}
-}
-
-let Predicates = [IsARM, HasV5TE] in {
-def : Pat<(ARMstrd GPR:$Rt, GPR:$Rt2, addrmode3:$addr),
- (STOREDUAL (REG_SEQUENCE GPRPair, GPR:$Rt, gsub_0, GPR:$Rt2, gsub_1), addrmode3:$addr)>;
-}
-
// Indexed stores
multiclass AI2_stridx<bit isByte, string opc,
InstrItinClass iii, InstrItinClass iir> {
diff --git a/llvm/lib/Target/ARM/ARMInstrThumb2.td b/llvm/lib/Target/ARM/ARMInstrThumb2.td
index 1c5764e20d2..1a9237b5afc 100644
--- a/llvm/lib/Target/ARM/ARMInstrThumb2.td
+++ b/llvm/lib/Target/ARM/ARMInstrThumb2.td
@@ -270,8 +270,7 @@ def t2am_imm8_offset : MemOperand,
// t2addrmode_imm8s4 := reg +/- (imm8 << 2)
def MemImm8s4OffsetAsmOperand : AsmOperandClass {let Name = "MemImm8s4Offset";}
-class T2AddrMode_Imm8s4 : MemOperand,
- ComplexPattern<i32, 2, "SelectT2AddrModeImm8<2>", []> {
+class T2AddrMode_Imm8s4 : MemOperand {
let EncoderMethod = "getT2AddrModeImm8s4OpValue";
let DecoderMethod = "DecodeT2AddrModeImm8s4";
let ParserMatchClass = MemImm8s4OffsetAsmOperand;
@@ -1449,8 +1448,7 @@ let mayLoad = 1, hasSideEffects = 0, hasExtraDefRegAllocReq = 1 in {
// Load doubleword
def t2LDRDi8 : T2Ii8s4<1, 0, 1, (outs rGPR:$Rt, rGPR:$Rt2),
(ins t2addrmode_imm8s4:$addr),
- IIC_iLoad_d_i, "ldrd", "\t$Rt, $Rt2, $addr", "",
- [(set rGPR:$Rt, rGPR:$Rt2, (ARMldrd t2addrmode_imm8s4:$addr))]>,
+ IIC_iLoad_d_i, "ldrd", "\t$Rt, $Rt2, $addr", "", []>,
Sched<[WriteLd]>;
} // mayLoad = 1, hasSideEffects = 0, hasExtraDefRegAllocReq = 1
@@ -1631,8 +1629,7 @@ defm t2STRH:T2I_st<0b01,"strh", IIC_iStore_bh_i, IIC_iStore_bh_si,
let mayStore = 1, hasSideEffects = 0, hasExtraSrcRegAllocReq = 1 in
def t2STRDi8 : T2Ii8s4<1, 0, 0, (outs),
(ins rGPR:$Rt, rGPR:$Rt2, t2addrmode_imm8s4:$addr),
- IIC_iStore_d_r, "strd", "\t$Rt, $Rt2, $addr", "",
- [(ARMstrd rGPR:$Rt, rGPR:$Rt2, t2addrmode_imm8s4:$addr)]>,
+ IIC_iStore_d_r, "strd", "\t$Rt, $Rt2, $addr", "", []>,
Sched<[WriteST]>;
// Indexed stores
diff --git a/llvm/test/CodeGen/ARM/i64_volatile_load_store.ll b/llvm/test/CodeGen/ARM/i64_volatile_load_store.ll
deleted file mode 100644
index a6244458402..00000000000
--- a/llvm/test/CodeGen/ARM/i64_volatile_load_store.ll
+++ /dev/null
@@ -1,180 +0,0 @@
-; RUN: llc -mtriple=armv5e-arm-none-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-ARMV5TE,CHECK
-; RUN: llc -mtriple=thumbv6t2-arm-none-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-T2,CHECK
-; RUN: llc -mtriple=armv4t-arm-none-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-ARMV4T,CHECK
-
-@x = common dso_local global i64 0, align 8
-@y = common dso_local global i64 0, align 8
-
-define void @test() {
-entry:
-; CHECK-LABEL: test:
-; CHECK-ARMV5TE: ldr [[ADDR0:r[0-9]+]]
-; CHECK-ARMV5TE-NEXT: ldr [[ADDR1:r[0-9]+]]
-; CHECK-ARMV5TE-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]]]
-; CHECK-ARMV5TE-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]]]
-; CHECK-T2: movw [[ADDR0:r[0-9]+]], :lower16:x
-; CHECK-T2-NEXT: movw [[ADDR1:r[0-9]+]], :lower16:y
-; CHECK-T2-NEXT: movt [[ADDR0]], :upper16:x
-; CHECK-T2-NEXT: movt [[ADDR1]], :upper16:y
-; CHECK-T2-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]]]
-; CHECK-T2-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]]]
-; CHECK-ARMV4T: ldr [[ADDR0:r[0-9]+]]
-; CHECK-ARMV4T-NEXT: ldr [[ADDR1:r[0-9]+]]
-; CHECK-ARMV4T-NEXT: ldr [[R1:r[0-9]+]], {{\[}}[[ADDR0]]]
-; CHECK-ARMV4T-NEXT: ldr [[R0:r[0-9]+]], {{\[}}[[ADDR0]], #4]
-; CHECK-ARMV4T-NEXT: str [[R0]], {{\[}}[[ADDR1]], #4]
-; CHECK-ARMV4T-NEXT: str [[R1]], {{\[}}[[ADDR1]]]
- %0 = load volatile i64, i64* @x, align 8
- store volatile i64 %0, i64* @y, align 8
- ret void
-}
-
-define void @test_offset() {
-entry:
-; CHECK-LABEL: test_offset:
-; CHECK-ARMV5TE: ldr [[ADDR0:r[0-9]+]]
-; CHECK-ARMV5TE-NEXT: ldr [[ADDR1:r[0-9]+]]
-; CHECK-ARMV5TE-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]], #-4]
-; CHECK-ARMV5TE-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]], #-4]
-; CHECK-T2: movw [[ADDR0:r[0-9]+]], :lower16:x
-; CHECK-T2-NEXT: movw [[ADDR1:r[0-9]+]], :lower16:y
-; CHECK-T2-NEXT: movt [[ADDR0]], :upper16:x
-; CHECK-T2-NEXT: movt [[ADDR1]], :upper16:y
-; CHECK-T2-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]], #-4]
-; CHECK-T2-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]], #-4]
-; CHECK-ARMV4T: ldr [[ADDR0:r[0-9]+]]
-; CHECK-ARMV4T-NEXT: ldr [[ADDR1:r[0-9]+]]
-; CHECK-ARMV4T-NEXT: ldr [[R0:r[0-9]+]], {{\[}}[[ADDR0]], #-4]
-; CHECK-ARMV4T-NEXT: ldr [[R1:r[0-9]+]], {{\[}}[[ADDR0]]]
-; CHECK-ARMV4T-NEXT: str [[R1]], {{\[}}[[ADDR1]]]
-; CHECK-ARMV4T-NEXT: str [[R0]], {{\[}}[[ADDR1]], #-4]
- %0 = load volatile i64, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @x to i8*), i32 -4) to i64*), align 8
- store volatile i64 %0, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @y to i8*), i32 -4) to i64*), align 8
- ret void
-}
-
-define void @test_offset_1() {
-; CHECK-LABEL: test_offset_1:
-; CHECK-ARMV5TE: ldr [[ADDR0:r[0-9]+]]
-; CHECK-ARMV5TE-NEXT: ldr [[ADDR1:r[0-9]+]]
-; CHECK-ARMV5TE-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]], #255]
-; CHECK-ARMV5TE-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]], #255]
-; CHECK-T2: adds [[ADDR0:r[0-9]+]], #255
-; CHECK-T2-NEXT: adds [[ADDR1:r[0-9]+]], #255
-; CHECK-T2-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]]]
-; CHECK-T2-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]]]
-; CHECK-ARMV4T: ldr [[ADDR0:r[0-9]+]]
-; CHECK-ARMV4T-NEXT: ldr [[ADDR1:r[0-9]+]]
-; CHECK-ARMV4T-NEXT: ldr [[R0:r[0-9]+]], {{\[}}[[ADDR0]], #255]
-; CHECK-ARMV4T-NEXT: ldr [[R1:r[0-9]+]], {{\[}}[[ADDR0]], #259]
-; CHECK-ARMV4T-NEXT: str [[R1]], {{\[}}[[ADDR1]], #259]
-; CHECK-ARMV4T-NEXT: str [[R0]], {{\[}}[[ADDR1]], #255]
-entry:
- %0 = load volatile i64, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @x to i8*), i32 255) to i64*), align 8
- store volatile i64 %0, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @y to i8*), i32 255) to i64*), align 8
- ret void
-}
-
-define void @test_offset_2() {
-; CHECK-LABEL: test_offset_2:
-; CHECK-ARMV5TE: ldr [[ADDR0:r[0-9]+]]
-; CHECK-ARMV5TE-NEXT: mov [[OFFSET0:r[0-9]+]], #256
-; CHECK-ARMV5TE-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]], [[OFFSET0]]]
-; CHECK-ARMV5TE-NEXT: ldr [[ADDR1:r[0-9]+]]
-; CHECK-ARMV5TE-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]], [[OFFSET0]]]
-; CHECK-T2: movw [[ADDR0:r[0-9]+]], :lower16:x
-; CHECK-T2-NEXT: movw [[ADDR1:r[0-9]+]], :lower16:y
-; CHECK-T2-NEXT: movt [[ADDR0]], :upper16:x
-; CHECK-T2-NEXT: movt [[ADDR1]], :upper16:y
-; CHECK-T2-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]], #256]
-; CHECK-T2-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]], #256]
-; CHECK-ARMV4T: ldr [[ADDR0:r[0-9]+]]
-; CHECK-ARMV4T-NEXT: ldr [[ADDR1:r[0-9]+]]
-; CHECK-ARMV4T-NEXT: ldr [[R0:r[0-9]+]], {{\[}}[[ADDR0]], #256]
-; CHECK-ARMV4T-NEXT: ldr [[R1:r[0-9]+]], {{\[}}[[ADDR0]], #260]
-; CHECK-ARMV4T-NEXT: str [[R1]], {{\[}}[[ADDR1]], #260]
-; CHECK-ARMV4T-NEXT: str [[R0]], {{\[}}[[ADDR1]], #256]
-entry:
- %0 = load volatile i64, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @x to i8*), i32 256) to i64*), align 8
- store volatile i64 %0, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @y to i8*), i32 256) to i64*), align 8
- ret void
-}
-
-define void @test_offset_3() {
-; CHECK-LABEL: test_offset_3:
-; CHECK-ARMV5TE: ldr [[ADDR0:r[0-9]+]]
-; CHECK-ARMV5TE-NEXT: mov [[OFFSET0:r[0-9]+]], #1020
-; CHECK-ARMV5TE-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]], [[OFFSET0]]]
-; CHECK-ARMV5TE-NEXT: ldr [[ADDR1:r[0-9]+]]
-; CHECK-ARMV5TE-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]], [[OFFSET0]]]
-; CHECK-T2: movw [[ADDR0:r[0-9]+]], :lower16:x
-; CHECK-T2-NEXT: movw [[ADDR1:r[0-9]+]], :lower16:y
-; CHECK-T2-NEXT: movt [[ADDR0]], :upper16:x
-; CHECK-T2-NEXT: movt [[ADDR1]], :upper16:y
-; CHECK-T2-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]], #1020]
-; CHECK-T2-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]], #1020]
-; CHECK-ARMV4T: ldr [[ADDR0:r[0-9]+]]
-; CHECK-ARMV4T-NEXT: ldr [[ADDR1:r[0-9]+]]
-; CHECK-ARMV4T-NEXT: ldr [[R0:r[0-9]+]], {{\[}}[[ADDR0]], #1020]
-; CHECK-ARMV4T-NEXT: ldr [[R1:r[0-9]+]], {{\[}}[[ADDR0]], #1024]
-; CHECK-ARMV4T-NEXT: str [[R1]], {{\[}}[[ADDR1]], #1024]
-; CHECK-ARMV4T-NEXT: str [[R0]], {{\[}}[[ADDR1]], #1020]
-entry:
- %0 = load volatile i64, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @x to i8*), i32 1020) to i64*), align 8
- store volatile i64 %0, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @y to i8*), i32 1020) to i64*), align 8
- ret void
-}
-
-define void @test_offset_4() {
-; CHECK-LABEL: test_offset_4:
-; CHECK-ARMV5TE: ldr [[ADDR0:r[0-9]+]]
-; CHECK-ARMV5TE-NEXT: mov [[OFFSET0:r[0-9]+]], #1024
-; CHECK-ARMV5TE-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]], [[OFFSET0]]]
-; CHECK-ARMV5TE: ldr [[ADDR1:r[0-9]+]]
-; CHECK-ARMV5TE-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]], [[OFFSET0]]]
-; CHECK-T2: movw [[ADDR1:r[0-9]+]], :lower16:y
-; CHECK-T2-NEXT: movw [[ADDR0:r[0-9]+]], :lower16:x
-; CHECK-T2-NEXT: movt [[ADDR1]], :upper16:y
-; CHECK-T2-NEXT: movt [[ADDR0]], :upper16:x
-; CHECK-T2-NEXT: add.w [[ADDR0]], [[ADDR0]], #1024
-; CHECK-T2-NEXT: add.w [[ADDR1]], [[ADDR1]], #1024
-; CHECK-T2-NEXT: ldrd [[R0:r[0-9]+]], [[R1:r[0-9]+]], {{\[}}[[ADDR0]]]
-; CHECK-T2-NEXT: strd [[R0]], [[R1]], {{\[}}[[ADDR1]]]
-; CHECK-ARMV4T: ldr [[ADDR0:r[0-9]+]]
-; CHECK-ARMV4T-NEXT: ldr [[ADDR1:r[0-9]+]]
-; CHECK-ARMV4T-NEXT: ldr [[R0:r[0-9]+]], {{\[}}[[ADDR0]], #1024]
-; CHECK-ARMV4T-NEXT: ldr [[R1:r[0-9]+]], {{\[}}[[ADDR0]], #1028]
-; CHECK-ARMV4T-NEXT: str [[R1]], {{\[}}[[ADDR1]], #1028]
-; CHECK-ARMV4T-NEXT: str [[R0]], {{\[}}[[ADDR1]], #1024]
-entry:
- %0 = load volatile i64, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @x to i8*), i32 1024) to i64*), align 8
- store volatile i64 %0, i64* bitcast (i8* getelementptr (i8, i8* bitcast (i64* @y to i8*), i32 1024) to i64*), align 8
- ret void
-}
-
-define void @test_stack() {
-; CHECK-LABEL: test_stack:
-; CHECK-ARMV5TE: sub sp, sp, #8
-; CHECK-ARMV5TE-NEXT: mov r1, #0
-; CHECK-ARMV5TE-NEXT: mov r0, #5
-; CHECK-ARMV5TE-NEXT: strd r0, r1, [sp]
-; CHECK-ARMV5TE-NEXT: ldrd r0, r1, [sp]
-; CHECK-T2: sub sp, #8
-; CHECK-T2-NEXT: mov r0, sp
-; CHECK-T2-NEXT: movs r1, #0
-; CHECK-T2-NEXT: movs r2, #5
-; CHECK-T2-NEXT: strd r2, r1, [r0]
-; CHECK-T2-NEXT: ldrd r0, r1, [r0]
-; CHECK-ARMV4T: sub sp, sp, #8
-; CHECK-ARMV4T-NEXT: mov r0, #0
-; CHECK-ARMV4T-NEXT: str r0, [sp, #4]
-; CHECK-ARMV4T-NEXT: mov r0, #5
-; CHECK-ARMV4T-NEXT: str r0, [sp]
-; CHECK-ARMV4T-NEXT: ldr r0, [sp]
-; CHECK-ARMV4T-NEXT: ldr r0, [sp, #4]
-entry:
- %0 = alloca i64
- store volatile i64 5, i64* %0
- %1 = load volatile i64, i64* %0
- ret void
-}