#include "PPCRegisterInfo.h"
#include "PPCFrameLowering.h"
#include "PPCInstrBuilder.h"
#include "PPCMachineFunctionInfo.h"
#include "PPCSubtarget.h"
#include "PPCTargetMachine.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <cstdlib>
using namespace llvm;
#define DEBUG_TYPE "reginfo"
#define GET_REGINFO_TARGET_DESC
#include "PPCGenRegisterInfo.inc"
STATISTIC(InflateGPRC, "Number of gprc inputs for getLargestLegalClass");
STATISTIC(InflateGP8RC, "Number of g8rc inputs for getLargestLegalClass");
static cl::opt<bool>
EnableBasePointer("ppc-use-base-pointer", cl::Hidden, cl::init(true),
cl::desc("Enable use of a base pointer for complex stack frames"));
static cl::opt<bool>
AlwaysBasePointer("ppc-always-use-base-pointer", cl::Hidden, cl::init(false),
cl::desc("Force the use of a base pointer in every function"));
static cl::opt<bool>
EnableGPRToVecSpills("ppc-enable-gpr-to-vsr-spills", cl::Hidden, cl::init(false),
cl::desc("Enable spills from gpr to vsr rather than stack"));
static cl::opt<bool>
StackPtrConst("ppc-stack-ptr-caller-preserved",
cl::desc("Consider R1 caller preserved so stack saves of "
"caller preserved registers can be LICM candidates"),
cl::init(true), cl::Hidden);
static cl::opt<unsigned>
MaxCRBitSpillDist("ppc-max-crbit-spill-dist",
cl::desc("Maximum search distance for definition of CR bit "
"spill on ppc"),
cl::Hidden, cl::init(100));
#ifndef NDEBUG
static cl::opt<bool>
ReportAccMoves("ppc-report-acc-moves",
cl::desc("Emit information about accumulator register spills "
"and copies"),
cl::Hidden, cl::init(false));
#endif
extern cl::opt<bool> DisableAutoPairedVecSt;
static unsigned offsetMinAlignForOpcode(unsigned OpC);
PPCRegisterInfo::PPCRegisterInfo(const PPCTargetMachine &TM)
: PPCGenRegisterInfo(TM.isPPC64() ? PPC::LR8 : PPC::LR,
TM.isPPC64() ? 0 : 1,
TM.isPPC64() ? 0 : 1),
TM(TM) {
ImmToIdxMap[PPC::LD] = PPC::LDX; ImmToIdxMap[PPC::STD] = PPC::STDX;
ImmToIdxMap[PPC::LBZ] = PPC::LBZX; ImmToIdxMap[PPC::STB] = PPC::STBX;
ImmToIdxMap[PPC::LHZ] = PPC::LHZX; ImmToIdxMap[PPC::LHA] = PPC::LHAX;
ImmToIdxMap[PPC::LWZ] = PPC::LWZX; ImmToIdxMap[PPC::LWA] = PPC::LWAX;
ImmToIdxMap[PPC::LFS] = PPC::LFSX; ImmToIdxMap[PPC::LFD] = PPC::LFDX;
ImmToIdxMap[PPC::STH] = PPC::STHX; ImmToIdxMap[PPC::STW] = PPC::STWX;
ImmToIdxMap[PPC::STFS] = PPC::STFSX; ImmToIdxMap[PPC::STFD] = PPC::STFDX;
ImmToIdxMap[PPC::ADDI] = PPC::ADD4;
ImmToIdxMap[PPC::LWA_32] = PPC::LWAX_32;
ImmToIdxMap[PPC::LHA8] = PPC::LHAX8; ImmToIdxMap[PPC::LBZ8] = PPC::LBZX8;
ImmToIdxMap[PPC::LHZ8] = PPC::LHZX8; ImmToIdxMap[PPC::LWZ8] = PPC::LWZX8;
ImmToIdxMap[PPC::STB8] = PPC::STBX8; ImmToIdxMap[PPC::STH8] = PPC::STHX8;
ImmToIdxMap[PPC::STW8] = PPC::STWX8; ImmToIdxMap[PPC::STDU] = PPC::STDUX;
ImmToIdxMap[PPC::ADDI8] = PPC::ADD8;
ImmToIdxMap[PPC::LQ] = PPC::LQX_PSEUDO;
ImmToIdxMap[PPC::STQ] = PPC::STQX_PSEUDO;
ImmToIdxMap[PPC::DFLOADf32] = PPC::LXSSPX;
ImmToIdxMap[PPC::DFLOADf64] = PPC::LXSDX;
ImmToIdxMap[PPC::SPILLTOVSR_LD] = PPC::SPILLTOVSR_LDX;
ImmToIdxMap[PPC::SPILLTOVSR_ST] = PPC::SPILLTOVSR_STX;
ImmToIdxMap[PPC::DFSTOREf32] = PPC::STXSSPX;
ImmToIdxMap[PPC::DFSTOREf64] = PPC::STXSDX;
ImmToIdxMap[PPC::LXV] = PPC::LXVX;
ImmToIdxMap[PPC::LXSD] = PPC::LXSDX;
ImmToIdxMap[PPC::LXSSP] = PPC::LXSSPX;
ImmToIdxMap[PPC::STXV] = PPC::STXVX;
ImmToIdxMap[PPC::STXSD] = PPC::STXSDX;
ImmToIdxMap[PPC::STXSSP] = PPC::STXSSPX;
ImmToIdxMap[PPC::EVLDD] = PPC::EVLDDX;
ImmToIdxMap[PPC::EVSTDD] = PPC::EVSTDDX;
ImmToIdxMap[PPC::SPESTW] = PPC::SPESTWX;
ImmToIdxMap[PPC::SPELWZ] = PPC::SPELWZX;
ImmToIdxMap[PPC::PLBZ] = PPC::LBZX; ImmToIdxMap[PPC::PLBZ8] = PPC::LBZX8;
ImmToIdxMap[PPC::PLHZ] = PPC::LHZX; ImmToIdxMap[PPC::PLHZ8] = PPC::LHZX8;
ImmToIdxMap[PPC::PLHA] = PPC::LHAX; ImmToIdxMap[PPC::PLHA8] = PPC::LHAX8;
ImmToIdxMap[PPC::PLWZ] = PPC::LWZX; ImmToIdxMap[PPC::PLWZ8] = PPC::LWZX8;
ImmToIdxMap[PPC::PLWA] = PPC::LWAX; ImmToIdxMap[PPC::PLWA8] = PPC::LWAX;
ImmToIdxMap[PPC::PLD] = PPC::LDX; ImmToIdxMap[PPC::PSTD] = PPC::STDX;
ImmToIdxMap[PPC::PSTB] = PPC::STBX; ImmToIdxMap[PPC::PSTB8] = PPC::STBX8;
ImmToIdxMap[PPC::PSTH] = PPC::STHX; ImmToIdxMap[PPC::PSTH8] = PPC::STHX8;
ImmToIdxMap[PPC::PSTW] = PPC::STWX; ImmToIdxMap[PPC::PSTW8] = PPC::STWX8;
ImmToIdxMap[PPC::PLFS] = PPC::LFSX; ImmToIdxMap[PPC::PSTFS] = PPC::STFSX;
ImmToIdxMap[PPC::PLFD] = PPC::LFDX; ImmToIdxMap[PPC::PSTFD] = PPC::STFDX;
ImmToIdxMap[PPC::PLXSSP] = PPC::LXSSPX; ImmToIdxMap[PPC::PSTXSSP] = PPC::STXSSPX;
ImmToIdxMap[PPC::PLXSD] = PPC::LXSDX; ImmToIdxMap[PPC::PSTXSD] = PPC::STXSDX;
ImmToIdxMap[PPC::PLXV] = PPC::LXVX; ImmToIdxMap[PPC::PSTXV] = PPC::STXVX;
ImmToIdxMap[PPC::LXVP] = PPC::LXVPX;
ImmToIdxMap[PPC::STXVP] = PPC::STXVPX;
ImmToIdxMap[PPC::PLXVP] = PPC::LXVPX;
ImmToIdxMap[PPC::PSTXVP] = PPC::STXVPX;
}
const TargetRegisterClass *
PPCRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
const {
if (Kind == 1) {
if (TM.isPPC64())
return &PPC::G8RC_NOX0RegClass;
return &PPC::GPRC_NOR0RegClass;
}
if (TM.isPPC64())
return &PPC::G8RCRegClass;
return &PPC::GPRCRegClass;
}
const MCPhysReg*
PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const PPCSubtarget &Subtarget = MF->getSubtarget<PPCSubtarget>();
if (MF->getFunction().getCallingConv() == CallingConv::AnyReg) {
if (!TM.isPPC64() && Subtarget.isAIXABI())
report_fatal_error("AnyReg unimplemented on 32-bit AIX.");
if (Subtarget.hasVSX()) {
if (Subtarget.pairedVectorMemops())
return CSR_64_AllRegs_VSRP_SaveList;
if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
return CSR_64_AllRegs_AIX_Dflt_VSX_SaveList;
return CSR_64_AllRegs_VSX_SaveList;
}
if (Subtarget.hasAltivec()) {
if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
return CSR_64_AllRegs_AIX_Dflt_Altivec_SaveList;
return CSR_64_AllRegs_Altivec_SaveList;
}
return CSR_64_AllRegs_SaveList;
}
bool SaveR2 = MF->getRegInfo().isAllocatable(PPC::X2) &&
!Subtarget.isUsingPCRelativeCalls();
if (MF->getFunction().getCallingConv() == CallingConv::Cold) {
if (Subtarget.isAIXABI())
report_fatal_error("Cold calling unimplemented on AIX.");
if (TM.isPPC64()) {
if (Subtarget.pairedVectorMemops())
return SaveR2 ? CSR_SVR64_ColdCC_R2_VSRP_SaveList
: CSR_SVR64_ColdCC_VSRP_SaveList;
if (Subtarget.hasAltivec())
return SaveR2 ? CSR_SVR64_ColdCC_R2_Altivec_SaveList
: CSR_SVR64_ColdCC_Altivec_SaveList;
return SaveR2 ? CSR_SVR64_ColdCC_R2_SaveList
: CSR_SVR64_ColdCC_SaveList;
}
if (Subtarget.pairedVectorMemops())
return CSR_SVR32_ColdCC_VSRP_SaveList;
else if (Subtarget.hasAltivec())
return CSR_SVR32_ColdCC_Altivec_SaveList;
else if (Subtarget.hasSPE())
return CSR_SVR32_ColdCC_SPE_SaveList;
return CSR_SVR32_ColdCC_SaveList;
}
if (TM.isPPC64()) {
if (Subtarget.pairedVectorMemops())
return SaveR2 ? CSR_SVR464_R2_VSRP_SaveList : CSR_SVR464_VSRP_SaveList;
if (Subtarget.hasAltivec() &&
(!Subtarget.isAIXABI() || TM.getAIXExtendedAltivecABI())) {
return SaveR2 ? CSR_PPC64_R2_Altivec_SaveList
: CSR_PPC64_Altivec_SaveList;
}
return SaveR2 ? CSR_PPC64_R2_SaveList : CSR_PPC64_SaveList;
}
if (Subtarget.isAIXABI()) {
if (Subtarget.hasAltivec())
return TM.getAIXExtendedAltivecABI() ? CSR_AIX32_Altivec_SaveList
: CSR_AIX32_SaveList;
return CSR_AIX32_SaveList;
}
if (Subtarget.pairedVectorMemops())
return CSR_SVR432_VSRP_SaveList;
if (Subtarget.hasAltivec())
return CSR_SVR432_Altivec_SaveList;
else if (Subtarget.hasSPE())
return CSR_SVR432_SPE_SaveList;
return CSR_SVR432_SaveList;
}
const uint32_t *
PPCRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
CallingConv::ID CC) const {
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
if (CC == CallingConv::AnyReg) {
if (Subtarget.hasVSX()) {
if (Subtarget.pairedVectorMemops())
return CSR_64_AllRegs_VSRP_RegMask;
if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
return CSR_64_AllRegs_AIX_Dflt_VSX_RegMask;
return CSR_64_AllRegs_VSX_RegMask;
}
if (Subtarget.hasAltivec()) {
if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
return CSR_64_AllRegs_AIX_Dflt_Altivec_RegMask;
return CSR_64_AllRegs_Altivec_RegMask;
}
return CSR_64_AllRegs_RegMask;
}
if (Subtarget.isAIXABI()) {
return TM.isPPC64()
? ((Subtarget.hasAltivec() && TM.getAIXExtendedAltivecABI())
? CSR_PPC64_Altivec_RegMask
: CSR_PPC64_RegMask)
: ((Subtarget.hasAltivec() && TM.getAIXExtendedAltivecABI())
? CSR_AIX32_Altivec_RegMask
: CSR_AIX32_RegMask);
}
if (CC == CallingConv::Cold) {
if (TM.isPPC64())
return Subtarget.pairedVectorMemops()
? CSR_SVR64_ColdCC_VSRP_RegMask
: (Subtarget.hasAltivec() ? CSR_SVR64_ColdCC_Altivec_RegMask
: CSR_SVR64_ColdCC_RegMask);
else
return Subtarget.pairedVectorMemops()
? CSR_SVR32_ColdCC_VSRP_RegMask
: (Subtarget.hasAltivec()
? CSR_SVR32_ColdCC_Altivec_RegMask
: (Subtarget.hasSPE() ? CSR_SVR32_ColdCC_SPE_RegMask
: CSR_SVR32_ColdCC_RegMask));
}
if (TM.isPPC64())
return Subtarget.pairedVectorMemops()
? CSR_SVR464_VSRP_RegMask
: (Subtarget.hasAltivec() ? CSR_PPC64_Altivec_RegMask
: CSR_PPC64_RegMask);
else
return Subtarget.pairedVectorMemops()
? CSR_SVR432_VSRP_RegMask
: (Subtarget.hasAltivec()
? CSR_SVR432_Altivec_RegMask
: (Subtarget.hasSPE() ? CSR_SVR432_SPE_RegMask
: CSR_SVR432_RegMask));
}
const uint32_t*
PPCRegisterInfo::getNoPreservedMask() const {
return CSR_NoRegs_RegMask;
}
void PPCRegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const {
for (unsigned PseudoReg : {PPC::ZERO, PPC::ZERO8, PPC::RM})
Mask[PseudoReg / 32] &= ~(1u << (PseudoReg % 32));
}
BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const PPCFrameLowering *TFI = getFrameLowering(MF);
markSuperRegs(Reserved, PPC::ZERO);
markSuperRegs(Reserved, PPC::FP);
markSuperRegs(Reserved, PPC::BP);
markSuperRegs(Reserved, PPC::CTR);
markSuperRegs(Reserved, PPC::CTR8);
markSuperRegs(Reserved, PPC::R1);
markSuperRegs(Reserved, PPC::LR);
markSuperRegs(Reserved, PPC::LR8);
markSuperRegs(Reserved, PPC::RM);
markSuperRegs(Reserved, PPC::VRSAVE);
if (Subtarget.isSVR4ABI()) {
const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
if (!TM.isPPC64() || FuncInfo->usesTOCBasePtr() || MF.hasInlineAsm())
markSuperRegs(Reserved, PPC::R2); markSuperRegs(Reserved, PPC::R13); }
if (Subtarget.isAIXABI())
markSuperRegs(Reserved, PPC::R2);
if (TM.isPPC64())
markSuperRegs(Reserved, PPC::R13);
if (TFI->needsFP(MF))
markSuperRegs(Reserved, PPC::R31);
bool IsPositionIndependent = TM.isPositionIndependent();
if (hasBasePointer(MF)) {
if (Subtarget.is32BitELFABI() && IsPositionIndependent)
markSuperRegs(Reserved, PPC::R29);
else
markSuperRegs(Reserved, PPC::R30);
}
if (Subtarget.is32BitELFABI() && IsPositionIndependent)
markSuperRegs(Reserved, PPC::R30);
if (!Subtarget.hasAltivec())
for (TargetRegisterClass::iterator I = PPC::VRRCRegClass.begin(),
IE = PPC::VRRCRegClass.end(); I != IE; ++I)
markSuperRegs(Reserved, *I);
if (Subtarget.isAIXABI() && Subtarget.hasAltivec() &&
!TM.getAIXExtendedAltivecABI()) {
for (auto Reg : CSR_Altivec_SaveList) {
if (Reg == 0)
break;
markSuperRegs(Reserved, Reg);
for (MCRegAliasIterator AS(Reg, this, true); AS.isValid(); ++AS) {
Reserved.set(*AS);
}
}
}
assert(checkAllSuperRegsMarked(Reserved));
return Reserved;
}
bool PPCRegisterInfo::isAsmClobberable(const MachineFunction &MF,
MCRegister PhysReg) const {
return PhysReg != PPC::R1 && PhysReg != PPC::X1;
}
bool PPCRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) const {
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const PPCInstrInfo *InstrInfo = Subtarget.getInstrInfo();
const MachineFrameInfo &MFI = MF.getFrameInfo();
const std::vector<CalleeSavedInfo> &Info = MFI.getCalleeSavedInfo();
LLVM_DEBUG(dbgs() << "requiresFrameIndexScavenging for " << MF.getName()
<< ".\n");
if (!MFI.isCalleeSavedInfoValid()) {
LLVM_DEBUG(dbgs() << "TRUE - Invalid callee saved info.\n");
return true;
}
unsigned FrameSize = MFI.getStackSize();
if (FrameSize & ~0x7FFF) {
LLVM_DEBUG(dbgs() << "TRUE - Frame size is too large for D-Form.\n");
return true;
}
for (unsigned i = 0; i < Info.size(); i++) {
if (Info[i].isSpilledToReg())
continue;
int FrIdx = Info[i].getFrameIdx();
Register Reg = Info[i].getReg();
const TargetRegisterClass *RC = getMinimalPhysRegClass(Reg);
unsigned Opcode = InstrInfo->getStoreOpcodeForSpill(RC);
if (!MFI.isFixedObjectIndex(FrIdx)) {
if (offsetMinAlignForOpcode(Opcode) > 1) {
LLVM_DEBUG(dbgs() << "Memory Operand: " << InstrInfo->getName(Opcode)
<< " for register " << printReg(Reg, this) << ".\n");
LLVM_DEBUG(dbgs() << "TRUE - Not fixed frame object that requires "
<< "alignment.\n");
return true;
}
}
if (InstrInfo->isXFormMemOp(Opcode)) {
LLVM_DEBUG(dbgs() << "Memory Operand: " << InstrInfo->getName(Opcode)
<< " for register " << printReg(Reg, this) << ".\n");
LLVM_DEBUG(dbgs() << "TRUE - Memory operand is X-Form.\n");
return true;
}
if ((Opcode == PPC::RESTORE_QUADWORD) || (Opcode == PPC::SPILL_QUADWORD)) {
LLVM_DEBUG(dbgs() << "Memory Operand: " << InstrInfo->getName(Opcode)
<< " for register " << printReg(Reg, this) << ".\n");
LLVM_DEBUG(dbgs() << "TRUE - Memory operand is a quadword.\n");
return true;
}
}
LLVM_DEBUG(dbgs() << "FALSE - Scavenging is not required.\n");
return false;
}
bool PPCRegisterInfo::requiresVirtualBaseRegisters(
const MachineFunction &MF) const {
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
return !Subtarget.hasROPProtect();
}
bool PPCRegisterInfo::isCallerPreservedPhysReg(MCRegister PhysReg,
const MachineFunction &MF) const {
assert(Register::isPhysicalRegister(PhysReg));
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const MachineFrameInfo &MFI = MF.getFrameInfo();
if (!Subtarget.is64BitELFABI() && !Subtarget.isAIXABI())
return false;
if (PhysReg == Subtarget.getTOCPointerRegister())
return (getReservedRegs(MF).test(PhysReg));
if (StackPtrConst && PhysReg == Subtarget.getStackPointerRegister() &&
!MFI.hasVarSizedObjects() && !MFI.hasOpaqueSPAdjustment())
return true;
return false;
}
bool PPCRegisterInfo::getRegAllocationHints(Register VirtReg,
ArrayRef<MCPhysReg> Order,
SmallVectorImpl<MCPhysReg> &Hints,
const MachineFunction &MF,
const VirtRegMap *VRM,
const LiveRegMatrix *Matrix) const {
const MachineRegisterInfo *MRI = &MF.getRegInfo();
bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
VirtReg, Order, Hints, MF, VRM, Matrix);
for (MachineInstr &Use : MRI->reg_nodbg_instructions(VirtReg)) {
const MachineOperand *ResultOp = nullptr;
Register ResultReg;
switch (Use.getOpcode()) {
case TargetOpcode::COPY: {
ResultOp = &Use.getOperand(0);
ResultReg = ResultOp->getReg();
if (Register::isVirtualRegister(ResultReg) &&
MRI->getRegClass(ResultReg)->contains(PPC::UACC0) &&
VRM->hasPhys(ResultReg)) {
Register UACCPhys = VRM->getPhys(ResultReg);
Register HintReg = getSubReg(UACCPhys, ResultOp->getSubReg());
if (HintReg >= PPC::VSRp0 && HintReg <= PPC::VSRp31)
Hints.push_back(HintReg);
}
break;
}
case PPC::BUILD_UACC: {
ResultOp = &Use.getOperand(0);
ResultReg = ResultOp->getReg();
if (MRI->getRegClass(ResultReg)->contains(PPC::ACC0) &&
VRM->hasPhys(ResultReg)) {
Register ACCPhys = VRM->getPhys(ResultReg);
assert((ACCPhys >= PPC::ACC0 && ACCPhys <= PPC::ACC7) &&
"Expecting an ACC register for BUILD_UACC.");
Register HintReg = PPC::UACC0 + (ACCPhys - PPC::ACC0);
Hints.push_back(HintReg);
}
break;
}
}
}
return BaseImplRetVal;
}
unsigned PPCRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const {
const PPCFrameLowering *TFI = getFrameLowering(MF);
const unsigned DefaultSafety = 1;
switch (RC->getID()) {
default:
return 0;
case PPC::G8RC_NOX0RegClassID:
case PPC::GPRC_NOR0RegClassID:
case PPC::SPERCRegClassID:
case PPC::G8RCRegClassID:
case PPC::GPRCRegClassID: {
unsigned FP = TFI->hasFP(MF) ? 1 : 0;
return 32 - FP - DefaultSafety;
}
case PPC::F4RCRegClassID:
case PPC::F8RCRegClassID:
case PPC::VSLRCRegClassID:
return 32 - DefaultSafety;
case PPC::VFRCRegClassID:
case PPC::VRRCRegClassID: {
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
if (!TM.getAIXExtendedAltivecABI() && Subtarget.isAIXABI())
return 20 - DefaultSafety;
}
return 32 - DefaultSafety;
case PPC::VSFRCRegClassID:
case PPC::VSSRCRegClassID:
case PPC::VSRCRegClassID: {
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
if (!TM.getAIXExtendedAltivecABI() && Subtarget.isAIXABI())
return 52 - DefaultSafety;
}
return 64 - DefaultSafety;
case PPC::CRRCRegClassID:
return 8 - DefaultSafety;
}
}
const TargetRegisterClass *
PPCRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
const MachineFunction &MF) const {
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const auto *DefaultSuperclass =
TargetRegisterInfo::getLargestLegalSuperClass(RC, MF);
if (Subtarget.hasVSX()) {
if (TM.isELFv2ABI() || Subtarget.isAIXABI()) {
if (Subtarget.hasP9Vector() && EnableGPRToVecSpills &&
RC == &PPC::G8RCRegClass) {
InflateGP8RC++;
return &PPC::SPILLTOVSRRCRegClass;
}
if (RC == &PPC::GPRCRegClass && EnableGPRToVecSpills)
InflateGPRC++;
}
for (const auto *I = RC->getSuperClasses(); *I; ++I) {
if (getRegSizeInBits(**I) != getRegSizeInBits(*RC))
continue;
switch ((*I)->getID()) {
case PPC::VSSRCRegClassID:
return Subtarget.hasP8Vector() ? *I : DefaultSuperclass;
case PPC::VSFRCRegClassID:
case PPC::VSRCRegClassID:
return *I;
case PPC::VSRpRCRegClassID:
return Subtarget.pairedVectorMemops() ? *I : DefaultSuperclass;
case PPC::ACCRCRegClassID:
case PPC::UACCRCRegClassID:
return Subtarget.hasMMA() ? *I : DefaultSuperclass;
}
}
}
return DefaultSuperclass;
}
void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II) const {
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = MF.getFrameInfo();
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
bool LP64 = TM.isPPC64();
DebugLoc dl = MI.getDebugLoc();
unsigned maxCallFrameSize = MFI.getMaxCallFrameSize();
Align MaxAlign = MFI.getMaxAlign();
assert(isAligned(MaxAlign, maxCallFrameSize) &&
"Maximum call-frame size not sufficiently aligned");
(void)MaxAlign;
const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
bool KillNegSizeReg = MI.getOperand(1).isKill();
Register NegSizeReg = MI.getOperand(1).getReg();
prepareDynamicAlloca(II, NegSizeReg, KillNegSizeReg, Reg);
if (LP64) {
BuildMI(MBB, II, dl, TII.get(PPC::STDUX), PPC::X1)
.addReg(Reg, RegState::Kill)
.addReg(PPC::X1)
.addReg(NegSizeReg, getKillRegState(KillNegSizeReg));
BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), MI.getOperand(0).getReg())
.addReg(PPC::X1)
.addImm(maxCallFrameSize);
} else {
BuildMI(MBB, II, dl, TII.get(PPC::STWUX), PPC::R1)
.addReg(Reg, RegState::Kill)
.addReg(PPC::R1)
.addReg(NegSizeReg, getKillRegState(KillNegSizeReg));
BuildMI(MBB, II, dl, TII.get(PPC::ADDI), MI.getOperand(0).getReg())
.addReg(PPC::R1)
.addImm(maxCallFrameSize);
}
MBB.erase(II);
}
void PPCRegisterInfo::prepareDynamicAlloca(MachineBasicBlock::iterator II,
Register &NegSizeReg,
bool &KillNegSizeReg,
Register &FramePointer) const {
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = MF.getFrameInfo();
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
bool LP64 = TM.isPPC64();
DebugLoc dl = MI.getDebugLoc();
unsigned FrameSize = MFI.getStackSize();
const PPCFrameLowering *TFI = getFrameLowering(MF);
Align TargetAlign = TFI->getStackAlign();
Align MaxAlign = MFI.getMaxAlign();
const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
if (MaxAlign < TargetAlign && isInt<16>(FrameSize)) {
if (LP64)
BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), FramePointer)
.addReg(PPC::X31)
.addImm(FrameSize);
else
BuildMI(MBB, II, dl, TII.get(PPC::ADDI), FramePointer)
.addReg(PPC::R31)
.addImm(FrameSize);
} else if (LP64) {
BuildMI(MBB, II, dl, TII.get(PPC::LD), FramePointer)
.addImm(0)
.addReg(PPC::X1);
} else {
BuildMI(MBB, II, dl, TII.get(PPC::LWZ), FramePointer)
.addImm(0)
.addReg(PPC::R1);
}
if (LP64) {
if (MaxAlign > TargetAlign) {
unsigned UnalNegSizeReg = NegSizeReg;
NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC);
BuildMI(MBB, II, dl, TII.get(PPC::LI8), NegSizeReg)
.addImm(~(MaxAlign.value() - 1));
unsigned NegSizeReg1 = NegSizeReg;
NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC);
BuildMI(MBB, II, dl, TII.get(PPC::AND8), NegSizeReg)
.addReg(UnalNegSizeReg, getKillRegState(KillNegSizeReg))
.addReg(NegSizeReg1, RegState::Kill);
KillNegSizeReg = true;
}
} else {
if (MaxAlign > TargetAlign) {
unsigned UnalNegSizeReg = NegSizeReg;
NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC);
BuildMI(MBB, II, dl, TII.get(PPC::LI), NegSizeReg)
.addImm(~(MaxAlign.value() - 1));
unsigned NegSizeReg1 = NegSizeReg;
NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC);
BuildMI(MBB, II, dl, TII.get(PPC::AND), NegSizeReg)
.addReg(UnalNegSizeReg, getKillRegState(KillNegSizeReg))
.addReg(NegSizeReg1, RegState::Kill);
KillNegSizeReg = true;
}
}
}
void PPCRegisterInfo::lowerPrepareProbedAlloca(
MachineBasicBlock::iterator II) const {
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
bool LP64 = TM.isPPC64();
DebugLoc dl = MI.getDebugLoc();
Register FramePointer = MI.getOperand(0).getReg();
const Register ActualNegSizeReg = MI.getOperand(1).getReg();
bool KillNegSizeReg = MI.getOperand(2).isKill();
Register NegSizeReg = MI.getOperand(2).getReg();
const MCInstrDesc &CopyInst = TII.get(LP64 ? PPC::OR8 : PPC::OR);
if (FramePointer == NegSizeReg) {
assert(KillNegSizeReg && "FramePointer is a def and NegSizeReg is an use, "
"NegSizeReg should be killed");
BuildMI(MBB, II, dl, CopyInst, ActualNegSizeReg)
.addReg(NegSizeReg)
.addReg(NegSizeReg);
NegSizeReg = ActualNegSizeReg;
KillNegSizeReg = false;
}
prepareDynamicAlloca(II, NegSizeReg, KillNegSizeReg, FramePointer);
if (NegSizeReg != ActualNegSizeReg)
BuildMI(MBB, II, dl, CopyInst, ActualNegSizeReg)
.addReg(NegSizeReg)
.addReg(NegSizeReg);
MBB.erase(II);
}
void PPCRegisterInfo::lowerDynamicAreaOffset(
MachineBasicBlock::iterator II) const {
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = MF.getFrameInfo();
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
unsigned maxCallFrameSize = MFI.getMaxCallFrameSize();
bool is64Bit = TM.isPPC64();
DebugLoc dl = MI.getDebugLoc();
BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LI8 : PPC::LI),
MI.getOperand(0).getReg())
.addImm(maxCallFrameSize);
MBB.erase(II);
}
void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II,
unsigned FrameIndex) const {
MachineInstr &MI = *II; MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc dl = MI.getDebugLoc();
bool LP64 = TM.isPPC64();
const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
Register SrcReg = MI.getOperand(0).getReg();
BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg)
.addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill()));
if (SrcReg != PPC::CR0) {
Register Reg1 = Reg;
Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg)
.addReg(Reg1, RegState::Kill)
.addImm(getEncodingValue(SrcReg) * 4)
.addImm(0)
.addImm(31);
}
addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::STW8 : PPC::STW))
.addReg(Reg, RegState::Kill),
FrameIndex);
MBB.erase(II);
}
void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II,
unsigned FrameIndex) const {
MachineInstr &MI = *II; MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc dl = MI.getDebugLoc();
bool LP64 = TM.isPPC64();
const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
Register DestReg = MI.getOperand(0).getReg();
assert(MI.definesRegister(DestReg) &&
"RESTORE_CR does not define its destination");
addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LWZ8 : PPC::LWZ),
Reg), FrameIndex);
if (DestReg != PPC::CR0) {
Register Reg1 = Reg;
Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
unsigned ShiftBits = getEncodingValue(DestReg)*4;
BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg)
.addReg(Reg1, RegState::Kill).addImm(32-ShiftBits).addImm(0)
.addImm(31);
}
BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF), DestReg)
.addReg(Reg, RegState::Kill);
MBB.erase(II);
}
void PPCRegisterInfo::lowerCRBitSpilling(MachineBasicBlock::iterator II,
unsigned FrameIndex) const {
MachineInstr &MI = *II; MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
const TargetRegisterInfo* TRI = Subtarget.getRegisterInfo();
DebugLoc dl = MI.getDebugLoc();
bool LP64 = TM.isPPC64();
const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
Register SrcReg = MI.getOperand(0).getReg();
MachineBasicBlock::reverse_iterator Ins = MI;
MachineBasicBlock::reverse_iterator Rend = MBB.rend();
++Ins;
unsigned CRBitSpillDistance = 0;
bool SeenUse = false;
for (; Ins != Rend; ++Ins) {
if (Ins->modifiesRegister(SrcReg, TRI))
break;
if (Ins->readsRegister(SrcReg, TRI))
SeenUse = true;
if (CRBitSpillDistance == MaxCRBitSpillDist) {
Ins = MI;
break;
}
if (!Ins->isDebugInstr())
CRBitSpillDistance++;
}
if (Ins == MBB.rend())
Ins = MI;
bool SpillsKnownBit = false;
switch (Ins->getOpcode()) {
case PPC::CRUNSET:
BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LI8 : PPC::LI), Reg)
.addImm(0);
SpillsKnownBit = true;
break;
case PPC::CRSET:
BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LIS8 : PPC::LIS), Reg)
.addImm(-32768);
SpillsKnownBit = true;
break;
default:
if (Subtarget.isISA3_1()) {
BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::SETNBC8 : PPC::SETNBC), Reg)
.addReg(SrcReg, RegState::Undef);
break;
}
if (Subtarget.isISA3_0()) {
if (SrcReg == PPC::CR0LT || SrcReg == PPC::CR1LT ||
SrcReg == PPC::CR2LT || SrcReg == PPC::CR3LT ||
SrcReg == PPC::CR4LT || SrcReg == PPC::CR5LT ||
SrcReg == PPC::CR6LT || SrcReg == PPC::CR7LT) {
BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::SETB8 : PPC::SETB), Reg)
.addReg(getCRFromCRBit(SrcReg), RegState::Undef);
break;
}
}
BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg)
.addReg(getCRFromCRBit(SrcReg), RegState::Undef)
.addReg(SrcReg,
RegState::Implicit | getKillRegState(MI.getOperand(0).isKill()));
Register Reg1 = Reg;
Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg)
.addReg(Reg1, RegState::Kill)
.addImm(getEncodingValue(SrcReg))
.addImm(0).addImm(0);
}
addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::STW8 : PPC::STW))
.addReg(Reg, RegState::Kill),
FrameIndex);
bool KillsCRBit = MI.killsRegister(SrcReg, TRI);
MBB.erase(II);
if (SpillsKnownBit && KillsCRBit && !SeenUse) {
Ins->setDesc(TII.get(PPC::UNENCODED_NOP));
Ins->removeOperand(0);
}
}
void PPCRegisterInfo::lowerCRBitRestore(MachineBasicBlock::iterator II,
unsigned FrameIndex) const {
MachineInstr &MI = *II; MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc dl = MI.getDebugLoc();
bool LP64 = TM.isPPC64();
const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
Register DestReg = MI.getOperand(0).getReg();
assert(MI.definesRegister(DestReg) &&
"RESTORE_CRBIT does not define its destination");
addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LWZ8 : PPC::LWZ),
Reg), FrameIndex);
BuildMI(MBB, II, dl, TII.get(TargetOpcode::IMPLICIT_DEF), DestReg);
Register RegO = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), RegO)
.addReg(getCRFromCRBit(DestReg));
unsigned ShiftBits = getEncodingValue(DestReg);
BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWIMI8 : PPC::RLWIMI), RegO)
.addReg(RegO, RegState::Kill)
.addReg(Reg, RegState::Kill)
.addImm(ShiftBits ? 32 - ShiftBits : 0)
.addImm(ShiftBits)
.addImm(ShiftBits);
BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF),
getCRFromCRBit(DestReg))
.addReg(RegO, RegState::Kill)
.addReg(getCRFromCRBit(DestReg), RegState::Implicit);
MBB.erase(II);
}
void PPCRegisterInfo::emitAccCopyInfo(MachineBasicBlock &MBB,
MCRegister DestReg, MCRegister SrcReg) {
#ifdef NDEBUG
return;
#else
if (ReportAccMoves) {
std::string Dest = PPC::ACCRCRegClass.contains(DestReg) ? "acc" : "uacc";
std::string Src = PPC::ACCRCRegClass.contains(SrcReg) ? "acc" : "uacc";
dbgs() << "Emitting copy from " << Src << " to " << Dest << ":\n";
MBB.dump();
}
#endif
}
static void emitAccSpillRestoreInfo(MachineBasicBlock &MBB, bool IsPrimed,
bool IsRestore) {
#ifdef NDEBUG
return;
#else
if (ReportAccMoves) {
dbgs() << "Emitting " << (IsPrimed ? "acc" : "uacc") << " register "
<< (IsRestore ? "restore" : "spill") << ":\n";
MBB.dump();
}
#endif
}
static void spillRegPairs(MachineBasicBlock &MBB,
MachineBasicBlock::iterator II, DebugLoc DL,
const TargetInstrInfo &TII, Register SrcReg,
unsigned FrameIndex, bool IsLittleEndian,
bool IsKilled, bool TwoPairs) {
unsigned Offset = 0;
if (TwoPairs)
Offset = IsLittleEndian ? 48 : 0;
else
Offset = IsLittleEndian ? 16 : 0;
Register Reg = (SrcReg > PPC::VSRp15) ? PPC::V0 + (SrcReg - PPC::VSRp16) * 2
: PPC::VSL0 + (SrcReg - PPC::VSRp0) * 2;
addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXV))
.addReg(Reg, getKillRegState(IsKilled)),
FrameIndex, Offset);
Offset += IsLittleEndian ? -16 : 16;
addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXV))
.addReg(Reg + 1, getKillRegState(IsKilled)),
FrameIndex, Offset);
if (TwoPairs) {
Offset += IsLittleEndian ? -16 : 16;
addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXV))
.addReg(Reg + 2, getKillRegState(IsKilled)),
FrameIndex, Offset);
Offset += IsLittleEndian ? -16 : 16;
addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXV))
.addReg(Reg + 3, getKillRegState(IsKilled)),
FrameIndex, Offset);
}
}
void PPCRegisterInfo::lowerOctWordSpilling(MachineBasicBlock::iterator II,
unsigned FrameIndex) const {
assert(DisableAutoPairedVecSt &&
"Expecting to do this only if paired vector stores are disabled.");
MachineInstr &MI = *II; MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
Register SrcReg = MI.getOperand(0).getReg();
bool IsLittleEndian = Subtarget.isLittleEndian();
bool IsKilled = MI.getOperand(0).isKill();
spillRegPairs(MBB, II, DL, TII, SrcReg, FrameIndex, IsLittleEndian, IsKilled,
false);
MBB.erase(II);
}
void PPCRegisterInfo::lowerACCSpilling(MachineBasicBlock::iterator II,
unsigned FrameIndex) const {
MachineInstr &MI = *II; MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
Register SrcReg = MI.getOperand(0).getReg();
bool IsKilled = MI.getOperand(0).isKill();
bool IsPrimed = PPC::ACCRCRegClass.contains(SrcReg);
Register Reg =
PPC::VSRp0 + (SrcReg - (IsPrimed ? PPC::ACC0 : PPC::UACC0)) * 2;
bool IsLittleEndian = Subtarget.isLittleEndian();
emitAccSpillRestoreInfo(MBB, IsPrimed, false);
if (IsPrimed)
BuildMI(MBB, II, DL, TII.get(PPC::XXMFACC), SrcReg).addReg(SrcReg);
if (DisableAutoPairedVecSt)
spillRegPairs(MBB, II, DL, TII, Reg, FrameIndex, IsLittleEndian, IsKilled,
true);
else {
addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXVP))
.addReg(Reg, getKillRegState(IsKilled)),
FrameIndex, IsLittleEndian ? 32 : 0);
addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXVP))
.addReg(Reg + 1, getKillRegState(IsKilled)),
FrameIndex, IsLittleEndian ? 0 : 32);
}
if (IsPrimed && !IsKilled)
BuildMI(MBB, II, DL, TII.get(PPC::XXMTACC), SrcReg).addReg(SrcReg);
MBB.erase(II);
}
void PPCRegisterInfo::lowerACCRestore(MachineBasicBlock::iterator II,
unsigned FrameIndex) const {
MachineInstr &MI = *II; MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
Register DestReg = MI.getOperand(0).getReg();
assert(MI.definesRegister(DestReg) &&
"RESTORE_ACC does not define its destination");
bool IsPrimed = PPC::ACCRCRegClass.contains(DestReg);
Register Reg =
PPC::VSRp0 + (DestReg - (IsPrimed ? PPC::ACC0 : PPC::UACC0)) * 2;
bool IsLittleEndian = Subtarget.isLittleEndian();
emitAccSpillRestoreInfo(MBB, IsPrimed, true);
addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LXVP), Reg),
FrameIndex, IsLittleEndian ? 32 : 0);
addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LXVP), Reg + 1),
FrameIndex, IsLittleEndian ? 0 : 32);
if (IsPrimed)
BuildMI(MBB, II, DL, TII.get(PPC::XXMTACC), DestReg).addReg(DestReg);
MBB.erase(II);
}
void PPCRegisterInfo::lowerQuadwordSpilling(MachineBasicBlock::iterator II,
unsigned FrameIndex) const {
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
Register SrcReg = MI.getOperand(0).getReg();
bool IsKilled = MI.getOperand(0).isKill();
Register Reg = PPC::X0 + (SrcReg - PPC::G8p0) * 2;
bool IsLittleEndian = Subtarget.isLittleEndian();
addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STD))
.addReg(Reg, getKillRegState(IsKilled)),
FrameIndex, IsLittleEndian ? 8 : 0);
addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STD))
.addReg(Reg + 1, getKillRegState(IsKilled)),
FrameIndex, IsLittleEndian ? 0 : 8);
MBB.erase(II);
}
void PPCRegisterInfo::lowerQuadwordRestore(MachineBasicBlock::iterator II,
unsigned FrameIndex) const {
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
Register DestReg = MI.getOperand(0).getReg();
assert(MI.definesRegister(DestReg) &&
"RESTORE_QUADWORD does not define its destination");
Register Reg = PPC::X0 + (DestReg - PPC::G8p0) * 2;
bool IsLittleEndian = Subtarget.isLittleEndian();
addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LD), Reg), FrameIndex,
IsLittleEndian ? 8 : 0);
addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LD), Reg + 1), FrameIndex,
IsLittleEndian ? 0 : 8);
MBB.erase(II);
}
bool PPCRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
Register Reg, int &FrameIdx) const {
if (PPC::CR2 <= Reg && Reg <= PPC::CR4) {
FrameIdx = MF.getInfo<PPCFunctionInfo>()->getCRSpillFrameIndex();
return true;
}
return false;
}
static unsigned offsetMinAlignForOpcode(unsigned OpC) {
switch (OpC) {
default:
return 1;
case PPC::LWA:
case PPC::LWA_32:
case PPC::LD:
case PPC::LDU:
case PPC::STD:
case PPC::STDU:
case PPC::DFLOADf32:
case PPC::DFLOADf64:
case PPC::DFSTOREf32:
case PPC::DFSTOREf64:
case PPC::LXSD:
case PPC::LXSSP:
case PPC::STXSD:
case PPC::STXSSP:
case PPC::STQ:
return 4;
case PPC::EVLDD:
case PPC::EVSTDD:
return 8;
case PPC::LXV:
case PPC::STXV:
case PPC::LQ:
case PPC::LXVP:
case PPC::STXVP:
return 16;
}
}
static unsigned offsetMinAlign(const MachineInstr &MI) {
unsigned OpC = MI.getOpcode();
return offsetMinAlignForOpcode(OpC);
}
static unsigned getOffsetONFromFION(const MachineInstr &MI,
unsigned FIOperandNum) {
unsigned OffsetOperandNo = (FIOperandNum == 2) ? 1 : 2;
if (MI.isInlineAsm())
OffsetOperandNo = FIOperandNum - 1;
else if (MI.getOpcode() == TargetOpcode::STACKMAP ||
MI.getOpcode() == TargetOpcode::PATCHPOINT)
OffsetOperandNo = FIOperandNum + 1;
return OffsetOperandNo;
}
void
PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS) const {
assert(SPAdj == 0 && "Unexpected");
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
MachineFrameInfo &MFI = MF.getFrameInfo();
DebugLoc dl = MI.getDebugLoc();
unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum);
int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
int FPSI = FI->getFramePointerSaveIndex();
unsigned OpC = MI.getOpcode();
if ((OpC == PPC::DYNAREAOFFSET || OpC == PPC::DYNAREAOFFSET8)) {
lowerDynamicAreaOffset(II);
return;
}
if (FPSI && FrameIndex == FPSI &&
(OpC == PPC::DYNALLOC || OpC == PPC::DYNALLOC8)) {
lowerDynamicAlloc(II);
return;
}
if (FPSI && FrameIndex == FPSI &&
(OpC == PPC::PREPARE_PROBED_ALLOCA_64 ||
OpC == PPC::PREPARE_PROBED_ALLOCA_32 ||
OpC == PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64 ||
OpC == PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32)) {
lowerPrepareProbedAlloca(II);
return;
}
if (OpC == PPC::SPILL_CR) {
lowerCRSpilling(II, FrameIndex);
return;
} else if (OpC == PPC::RESTORE_CR) {
lowerCRRestore(II, FrameIndex);
return;
} else if (OpC == PPC::SPILL_CRBIT) {
lowerCRBitSpilling(II, FrameIndex);
return;
} else if (OpC == PPC::RESTORE_CRBIT) {
lowerCRBitRestore(II, FrameIndex);
return;
} else if (OpC == PPC::SPILL_ACC || OpC == PPC::SPILL_UACC) {
lowerACCSpilling(II, FrameIndex);
return;
} else if (OpC == PPC::RESTORE_ACC || OpC == PPC::RESTORE_UACC) {
lowerACCRestore(II, FrameIndex);
return;
} else if (OpC == PPC::STXVP && DisableAutoPairedVecSt) {
lowerOctWordSpilling(II, FrameIndex);
return;
} else if (OpC == PPC::SPILL_QUADWORD) {
lowerQuadwordSpilling(II, FrameIndex);
return;
} else if (OpC == PPC::RESTORE_QUADWORD) {
lowerQuadwordRestore(II, FrameIndex);
return;
}
MI.getOperand(FIOperandNum).ChangeToRegister(
FrameIndex < 0 ? getBaseRegister(MF) : getFrameRegister(MF), false);
bool noImmForm = !MI.isInlineAsm() && OpC != TargetOpcode::STACKMAP &&
OpC != TargetOpcode::PATCHPOINT && !ImmToIdxMap.count(OpC);
int64_t Offset = MFI.getObjectOffset(FrameIndex);
Offset += MI.getOperand(OffsetOperandNo).getImm();
if (!MF.getFunction().hasFnAttribute(Attribute::Naked)) {
if (!(hasBasePointer(MF) && FrameIndex < 0))
Offset += MFI.getStackSize();
}
if ((OpC == PPC::LXVP || OpC == PPC::STXVP) &&
(!isInt<16>(Offset) || (Offset % offsetMinAlign(MI)) != 0) &&
Subtarget.hasPrefixInstrs()) {
unsigned NewOpc = OpC == PPC::LXVP ? PPC::PLXVP : PPC::PSTXVP;
MI.setDesc(TII.get(NewOpc));
OpC = NewOpc;
}
assert(OpC != PPC::DBG_VALUE &&
"This should be handled in a target-independent way");
bool OffsetFitsMnemonic = (OpC == PPC::EVSTDD || OpC == PPC::EVLDD) ?
isUInt<8>(Offset) :
isInt<16>(Offset);
if (TII.isPrefixed(MI.getOpcode()))
OffsetFitsMnemonic = isInt<34>(Offset);
if (!noImmForm && ((OffsetFitsMnemonic &&
((Offset % offsetMinAlign(MI)) == 0)) ||
OpC == TargetOpcode::STACKMAP ||
OpC == TargetOpcode::PATCHPOINT)) {
MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
return;
}
bool is64Bit = TM.isPPC64();
const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
const TargetRegisterClass *RC = is64Bit ? G8RC : GPRC;
Register SRegHi = MF.getRegInfo().createVirtualRegister(RC),
SReg = MF.getRegInfo().createVirtualRegister(RC);
unsigned NewOpcode = 0u;
if (isInt<16>(Offset))
BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LI8 : PPC::LI), SReg)
.addImm(Offset);
else if (isInt<32>(Offset)) {
BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LIS8 : PPC::LIS), SRegHi)
.addImm(Offset >> 16);
BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::ORI8 : PPC::ORI), SReg)
.addReg(SRegHi, RegState::Kill)
.addImm(Offset);
} else {
assert(is64Bit && "Huge stack is only supported on PPC64");
TII.materializeImmPostRA(MBB, II, dl, SReg, Offset);
}
unsigned OperandBase;
if (noImmForm)
OperandBase = 1;
else if (OpC != TargetOpcode::INLINEASM &&
OpC != TargetOpcode::INLINEASM_BR) {
assert(ImmToIdxMap.count(OpC) &&
"No indexed form of load or store available!");
NewOpcode = ImmToIdxMap.find(OpC)->second;
MI.setDesc(TII.get(NewOpcode));
OperandBase = 1;
} else {
OperandBase = OffsetOperandNo;
}
Register StackReg = MI.getOperand(FIOperandNum).getReg();
MI.getOperand(OperandBase).ChangeToRegister(StackReg, false);
MI.getOperand(OperandBase + 1).ChangeToRegister(SReg, false, false, true);
if (NewOpcode == PPC::LQX_PSEUDO || NewOpcode == PPC::STQX_PSEUDO) {
assert(is64Bit && "Quadword loads/stores only supported in 64-bit mode");
Register NewReg = MF.getRegInfo().createVirtualRegister(&PPC::G8RCRegClass);
BuildMI(MBB, II, dl, TII.get(PPC::ADD8), NewReg)
.addReg(SReg, RegState::Kill)
.addReg(StackReg);
MI.setDesc(TII.get(NewOpcode == PPC::LQX_PSEUDO ? PPC::LQ : PPC::STQ));
MI.getOperand(OperandBase + 1).ChangeToRegister(NewReg, false);
MI.getOperand(OperandBase).ChangeToImmediate(0);
}
}
Register PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const PPCFrameLowering *TFI = getFrameLowering(MF);
if (!TM.isPPC64())
return TFI->hasFP(MF) ? PPC::R31 : PPC::R1;
else
return TFI->hasFP(MF) ? PPC::X31 : PPC::X1;
}
Register PPCRegisterInfo::getBaseRegister(const MachineFunction &MF) const {
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
if (!hasBasePointer(MF))
return getFrameRegister(MF);
if (TM.isPPC64())
return PPC::X30;
if (Subtarget.isSVR4ABI() && TM.isPositionIndependent())
return PPC::R29;
return PPC::R30;
}
bool PPCRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
if (!EnableBasePointer)
return false;
if (AlwaysBasePointer)
return true;
return hasStackRealignment(MF);
}
bool PPCRegisterInfo::
needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
assert(Offset < 0 && "Local offset must be negative");
unsigned OpC = MI->getOpcode();
if (!ImmToIdxMap.count(OpC))
return false;
if ((OpC == PPC::ADDI || OpC == PPC::ADDI8) &&
MI->getOperand(2).getImm() == 0)
return false;
MachineBasicBlock &MBB = *MI->getParent();
MachineFunction &MF = *MBB.getParent();
const PPCFrameLowering *TFI = getFrameLowering(MF);
unsigned StackEst = TFI->determineFrameLayout(MF, true);
if (!StackEst)
return false;
Offset += StackEst;
return !isFrameOffsetLegal(MI, getBaseRegister(MF), Offset);
}
Register PPCRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
int FrameIdx,
int64_t Offset) const {
unsigned ADDriOpc = TM.isPPC64() ? PPC::ADDI8 : PPC::ADDI;
MachineBasicBlock::iterator Ins = MBB->begin();
DebugLoc DL; if (Ins != MBB->end())
DL = Ins->getDebugLoc();
const MachineFunction &MF = *MBB->getParent();
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
const MCInstrDesc &MCID = TII.get(ADDriOpc);
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
const TargetRegisterClass *RC = getPointerRegClass(MF);
Register BaseReg = MRI.createVirtualRegister(RC);
MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF));
BuildMI(*MBB, Ins, DL, MCID, BaseReg)
.addFrameIndex(FrameIdx).addImm(Offset);
return BaseReg;
}
void PPCRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
int64_t Offset) const {
unsigned FIOperandNum = 0;
while (!MI.getOperand(FIOperandNum).isFI()) {
++FIOperandNum;
assert(FIOperandNum < MI.getNumOperands() &&
"Instr doesn't have FrameIndex operand!");
}
MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum);
Offset += MI.getOperand(OffsetOperandNo).getImm();
MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
const MCInstrDesc &MCID = MI.getDesc();
MachineRegisterInfo &MRI = MF.getRegInfo();
MRI.constrainRegClass(BaseReg,
TII.getRegClass(MCID, FIOperandNum, this, MF));
}
bool PPCRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
Register BaseReg,
int64_t Offset) const {
unsigned FIOperandNum = 0;
while (!MI->getOperand(FIOperandNum).isFI()) {
++FIOperandNum;
assert(FIOperandNum < MI->getNumOperands() &&
"Instr doesn't have FrameIndex operand!");
}
unsigned OffsetOperandNo = getOffsetONFromFION(*MI, FIOperandNum);
Offset += MI->getOperand(OffsetOperandNo).getImm();
return MI->getOpcode() == PPC::DBG_VALUE || MI->getOpcode() == TargetOpcode::STACKMAP ||
MI->getOpcode() == TargetOpcode::PATCHPOINT ||
(isInt<16>(Offset) && (Offset % offsetMinAlign(*MI)) == 0);
}