#include "PPCISelLowering.h"
#include "MCTargetDesc/PPCPredicates.h"
#include "PPC.h"
#include "PPCCCState.h"
#include "PPCCallingConv.h"
#include "PPCFrameLowering.h"
#include "PPCInstrInfo.h"
#include "PPCMachineFunctionInfo.h"
#include "PPCPerfectShuffle.h"
#include "PPCRegisterInfo.h"
#include "PPCSubtarget.h"
#include "PPCTargetMachine.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsPowerPC.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSectionXCOFF.h"
#include "llvm/MC/MCSymbolXCOFF.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MachineValueType.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <list>
#include <utility>
#include <vector>
using namespace llvm;
#define DEBUG_TYPE "ppc-lowering"
static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
static cl::opt<bool> DisableSCO("disable-ppc-sco",
cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
cl::desc("use absolute jump tables on ppc"), cl::Hidden);
static cl::opt<bool> EnableQuadwordAtomics(
"ppc-quadword-atomics",
cl::desc("enable quadword lock-free atomic operations"), cl::init(false),
cl::Hidden);
static cl::opt<bool>
DisablePerfectShuffle("ppc-disable-perfect-shuffle",
cl::desc("disable vector permute decomposition"),
cl::init(true), cl::Hidden);
cl::opt<bool> DisableAutoPairedVecSt(
"disable-auto-paired-vec-st",
cl::desc("disable automatically generated 32byte paired vector stores"),
cl::init(true), cl::Hidden);
STATISTIC(NumTailCalls, "Number of tail calls");
STATISTIC(NumSiblingCalls, "Number of sibling calls");
STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM");
STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed");
static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
static const char AIXSSPCanaryWordName[] = "__ssp_canary_word";
extern cl::opt<bool> ANDIGlueBug;
PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
const PPCSubtarget &STI)
: TargetLowering(TM), Subtarget(STI) {
initializeAddrModeMap();
bool isPPC64 = Subtarget.isPPC64();
setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
if (!useSoftFloat()) {
if (hasSPE()) {
addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
if (!Subtarget.hasEFPU2())
addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
} else {
addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
}
}
setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
setOperationAction(ISD::INLINEASM, MVT::Other, Custom);
setOperationAction(ISD::INLINEASM_BR, MVT::Other, Custom);
for (MVT VT : MVT::integer_valuetypes()) {
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
}
if (Subtarget.isISA3_0()) {
setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
setTruncStoreAction(MVT::f64, MVT::f16, Legal);
setTruncStoreAction(MVT::f32, MVT::f16, Legal);
} else {
setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
setTruncStoreAction(MVT::f64, MVT::f16, Expand);
setTruncStoreAction(MVT::f32, MVT::f16, Expand);
}
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
if (!Subtarget.hasSPE()) {
setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
}
const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
for (MVT VT : ScalarIntVTs) {
setOperationAction(ISD::ADDC, VT, Legal);
setOperationAction(ISD::ADDE, VT, Legal);
setOperationAction(ISD::SUBC, VT, Legal);
setOperationAction(ISD::SUBE, VT, Legal);
}
if (Subtarget.useCRBits()) {
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
if (isPPC64 || Subtarget.hasFPCVT()) {
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Promote);
AddPromotedToType(ISD::STRICT_SINT_TO_FP, MVT::i1,
isPPC64 ? MVT::i64 : MVT::i32);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Promote);
AddPromotedToType(ISD::STRICT_UINT_TO_FP, MVT::i1,
isPPC64 ? MVT::i64 : MVT::i32);
setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
isPPC64 ? MVT::i64 : MVT::i32);
setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
isPPC64 ? MVT::i64 : MVT::i32);
setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i1, Promote);
AddPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::i1,
isPPC64 ? MVT::i64 : MVT::i32);
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i1, Promote);
AddPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::i1,
isPPC64 ? MVT::i64 : MVT::i32);
setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
AddPromotedToType(ISD::FP_TO_SINT, MVT::i1,
isPPC64 ? MVT::i64 : MVT::i32);
setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
AddPromotedToType(ISD::FP_TO_UINT, MVT::i1,
isPPC64 ? MVT::i64 : MVT::i32);
} else {
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Custom);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
}
setOperationAction(ISD::LOAD, MVT::i1, Custom);
setOperationAction(ISD::STORE, MVT::i1, Custom);
if (ANDIGlueBug)
setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
for (MVT VT : MVT::integer_valuetypes()) {
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
setTruncStoreAction(VT, MVT::i1, Expand);
}
addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
}
setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::ppcf128, Custom);
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::ppcf128, Custom);
setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand);
setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
setOperationAction(ISD::FRINT, MVT::ppcf128, Expand);
setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
if (Subtarget.isISA3_0()) {
setOperationAction(ISD::SREM, MVT::i32, Legal);
setOperationAction(ISD::UREM, MVT::i32, Legal);
setOperationAction(ISD::SREM, MVT::i64, Legal);
setOperationAction(ISD::UREM, MVT::i64, Legal);
} else {
setOperationAction(ISD::SREM, MVT::i32, Expand);
setOperationAction(ISD::UREM, MVT::i32, Expand);
setOperationAction(ISD::SREM, MVT::i64, Expand);
setOperationAction(ISD::UREM, MVT::i64, Expand);
}
setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
if (!Subtarget.hasSPE()) {
setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
}
if (Subtarget.hasVSX()) {
setOperationAction(ISD::STRICT_FRINT, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FRINT, MVT::f64, Legal);
}
if (Subtarget.hasFSQRT()) {
setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
}
if (Subtarget.hasFPRND()) {
setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FCEIL, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal);
setOperationAction(ISD::STRICT_FCEIL, MVT::f64, Legal);
setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal);
setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal);
}
setOperationAction(ISD::FSIN , MVT::f64, Expand);
setOperationAction(ISD::FCOS , MVT::f64, Expand);
setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
setOperationAction(ISD::FREM , MVT::f64, Expand);
setOperationAction(ISD::FPOW , MVT::f64, Expand);
setOperationAction(ISD::FSIN , MVT::f32, Expand);
setOperationAction(ISD::FCOS , MVT::f32, Expand);
setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
setOperationAction(ISD::FREM , MVT::f32, Expand);
setOperationAction(ISD::FPOW , MVT::f32, Expand);
if (TM.getOptLevel() == CodeGenOpt::Aggressive) {
setOperationAction(ISD::FSIN , MVT::f64, Custom);
setOperationAction(ISD::FCOS , MVT::f64, Custom);
setOperationAction(ISD::FPOW , MVT::f64, Custom);
setOperationAction(ISD::FLOG, MVT::f64, Custom);
setOperationAction(ISD::FLOG10, MVT::f64, Custom);
setOperationAction(ISD::FEXP, MVT::f64, Custom);
setOperationAction(ISD::FSIN , MVT::f32, Custom);
setOperationAction(ISD::FCOS , MVT::f32, Custom);
setOperationAction(ISD::FPOW , MVT::f32, Custom);
setOperationAction(ISD::FLOG, MVT::f32, Custom);
setOperationAction(ISD::FLOG10, MVT::f32, Custom);
setOperationAction(ISD::FEXP, MVT::f32, Custom);
}
if (Subtarget.hasSPE()) {
setOperationAction(ISD::FMA , MVT::f64, Expand);
setOperationAction(ISD::FMA , MVT::f32, Expand);
} else {
setOperationAction(ISD::FMA , MVT::f64, Legal);
setOperationAction(ISD::FMA , MVT::f32, Legal);
}
if (Subtarget.hasSPE())
setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
if (!Subtarget.hasFSQRT() &&
!(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
Subtarget.hasFRE()))
setOperationAction(ISD::FSQRT, MVT::f64, Expand);
if (!Subtarget.hasFSQRT() &&
!(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
Subtarget.hasFRES()))
setOperationAction(ISD::FSQRT, MVT::f32, Expand);
if (Subtarget.hasFCPSGN()) {
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
} else {
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
}
if (Subtarget.hasFPRND()) {
setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
setOperationAction(ISD::FCEIL, MVT::f64, Legal);
setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
setOperationAction(ISD::FROUND, MVT::f64, Legal);
setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
setOperationAction(ISD::FCEIL, MVT::f32, Legal);
setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
setOperationAction(ISD::FROUND, MVT::f32, Legal);
}
setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
if (Subtarget.hasP9Vector() && Subtarget.isPPC64())
setOperationAction(ISD::BSWAP, MVT::i64 , Custom);
else
setOperationAction(ISD::BSWAP, MVT::i64 , Expand);
if (Subtarget.isISA3_0()) {
setOperationAction(ISD::CTTZ , MVT::i32 , Legal);
setOperationAction(ISD::CTTZ , MVT::i64 , Legal);
} else {
setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
}
if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
setOperationAction(ISD::CTPOP, MVT::i32 , Legal);
setOperationAction(ISD::CTPOP, MVT::i64 , Legal);
} else {
setOperationAction(ISD::CTPOP, MVT::i32 , Expand);
setOperationAction(ISD::CTPOP, MVT::i64 , Expand);
}
setOperationAction(ISD::ROTR, MVT::i32 , Expand);
setOperationAction(ISD::ROTR, MVT::i64 , Expand);
if (!Subtarget.useCRBits()) {
setOperationAction(ISD::SELECT, MVT::i32, Expand);
setOperationAction(ISD::SELECT, MVT::i64, Expand);
setOperationAction(ISD::SELECT, MVT::f32, Expand);
setOperationAction(ISD::SELECT, MVT::f64, Expand);
}
setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
if (!Subtarget.useCRBits())
setOperationAction(ISD::SETCC, MVT::i32, Custom);
if (Subtarget.hasFPU()) {
setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Legal);
setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Legal);
}
if (!Subtarget.useCRBits())
setOperationAction(ISD::BRCOND, MVT::Other, Expand);
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
if (Subtarget.hasSPE()) {
setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal);
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
} else {
setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
}
if (Subtarget.hasDirectMove() && isPPC64) {
setOperationAction(ISD::BITCAST, MVT::f32, Legal);
setOperationAction(ISD::BITCAST, MVT::i32, Legal);
setOperationAction(ISD::BITCAST, MVT::i64, Legal);
setOperationAction(ISD::BITCAST, MVT::f64, Legal);
if (TM.Options.UnsafeFPMath) {
setOperationAction(ISD::LRINT, MVT::f64, Legal);
setOperationAction(ISD::LRINT, MVT::f32, Legal);
setOperationAction(ISD::LLRINT, MVT::f64, Legal);
setOperationAction(ISD::LLRINT, MVT::f32, Legal);
setOperationAction(ISD::LROUND, MVT::f64, Legal);
setOperationAction(ISD::LROUND, MVT::f32, Legal);
setOperationAction(ISD::LLROUND, MVT::f64, Legal);
setOperationAction(ISD::LLROUND, MVT::f32, Legal);
}
} else {
setOperationAction(ISD::BITCAST, MVT::f32, Expand);
setOperationAction(ISD::BITCAST, MVT::i32, Expand);
setOperationAction(ISD::BITCAST, MVT::i64, Expand);
setOperationAction(ISD::BITCAST, MVT::f64, Expand);
}
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
setOperationAction(ISD::JumpTable, MVT::i32, Custom);
setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
setOperationAction(ISD::JumpTable, MVT::i64, Custom);
setOperationAction(ISD::TRAP, MVT::Other, Legal);
setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
setOperationAction(ISD::VASTART , MVT::Other, Custom);
if (Subtarget.is64BitELFABI()) {
setOperationAction(ISD::VAARG, MVT::i1, Promote);
AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
setOperationAction(ISD::VAARG, MVT::i8, Promote);
AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
setOperationAction(ISD::VAARG, MVT::i16, Promote);
AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
setOperationAction(ISD::VAARG, MVT::i32, Promote);
AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
setOperationAction(ISD::VAARG, MVT::Other, Expand);
} else if (Subtarget.is32BitELFABI()) {
setOperationAction(ISD::VAARG, MVT::Other, Custom);
setOperationAction(ISD::VAARG, MVT::i64, Custom);
} else
setOperationAction(ISD::VAARG, MVT::Other, Expand);
if (Subtarget.is32BitELFABI())
setOperationAction(ISD::VACOPY , MVT::Other, Custom);
else
setOperationAction(ISD::VACOPY , MVT::Other, Expand);
setOperationAction(ISD::VAEND , MVT::Other, Expand);
setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom);
setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f64, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::ppcf128, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f64, Custom);
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
if (Subtarget.hasSPE()) {
setCondCodeAction(ISD::SETO, MVT::f32, Expand);
setCondCodeAction(ISD::SETO, MVT::f64, Expand);
setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
}
setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
if (Subtarget.has64BitSupport()) {
setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Expand);
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand);
setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) {
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
}
} else {
if (Subtarget.hasSPE()) {
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
} else {
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Expand);
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
}
}
if (Subtarget.hasFPCVT()) {
if (Subtarget.has64BitSupport()) {
setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
}
setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
}
if (Subtarget.use64BitRegs()) {
addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
} else {
setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
}
if (Subtarget.has64BitSupport()) {
setOperationAction(ISD::FSHL, MVT::i64, Custom);
setOperationAction(ISD::FSHR, MVT::i64, Custom);
}
setOperationAction(ISD::FSHL, MVT::i32, Custom);
setOperationAction(ISD::FSHR, MVT::i32, Custom);
if (Subtarget.hasVSX()) {
setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
}
if (Subtarget.hasAltivec()) {
for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
setOperationAction(ISD::SADDSAT, VT, Legal);
setOperationAction(ISD::SSUBSAT, VT, Legal);
setOperationAction(ISD::UADDSAT, VT, Legal);
setOperationAction(ISD::USUBSAT, VT, Legal);
}
for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
setOperationAction(ISD::ADD, VT, Legal);
setOperationAction(ISD::SUB, VT, Legal);
if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
setOperationAction(ISD::SMAX, VT, Legal);
setOperationAction(ISD::SMIN, VT, Legal);
setOperationAction(ISD::UMAX, VT, Legal);
setOperationAction(ISD::UMIN, VT, Legal);
}
else {
setOperationAction(ISD::SMAX, VT, Expand);
setOperationAction(ISD::SMIN, VT, Expand);
setOperationAction(ISD::UMAX, VT, Expand);
setOperationAction(ISD::UMIN, VT, Expand);
}
if (Subtarget.hasVSX()) {
setOperationAction(ISD::FMAXNUM, VT, Legal);
setOperationAction(ISD::FMINNUM, VT, Legal);
}
if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
setOperationAction(ISD::CTPOP, VT, Legal);
setOperationAction(ISD::CTLZ, VT, Legal);
}
else {
setOperationAction(ISD::CTPOP, VT, Expand);
setOperationAction(ISD::CTLZ, VT, Expand);
}
if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
setOperationAction(ISD::CTTZ, VT, Legal);
else
setOperationAction(ISD::CTTZ, VT, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
setOperationAction(ISD::AND , VT, Promote);
AddPromotedToType (ISD::AND , VT, MVT::v4i32);
setOperationAction(ISD::OR , VT, Promote);
AddPromotedToType (ISD::OR , VT, MVT::v4i32);
setOperationAction(ISD::XOR , VT, Promote);
AddPromotedToType (ISD::XOR , VT, MVT::v4i32);
setOperationAction(ISD::LOAD , VT, Promote);
AddPromotedToType (ISD::LOAD , VT, MVT::v4i32);
setOperationAction(ISD::SELECT, VT, Promote);
AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
setOperationAction(ISD::VSELECT, VT, Legal);
setOperationAction(ISD::SELECT_CC, VT, Promote);
AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
setOperationAction(ISD::STORE, VT, Promote);
AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
setOperationAction(ISD::MUL , VT, Expand);
setOperationAction(ISD::SDIV, VT, Expand);
setOperationAction(ISD::SREM, VT, Expand);
setOperationAction(ISD::UDIV, VT, Expand);
setOperationAction(ISD::UREM, VT, Expand);
setOperationAction(ISD::FDIV, VT, Expand);
setOperationAction(ISD::FREM, VT, Expand);
setOperationAction(ISD::FNEG, VT, Expand);
setOperationAction(ISD::FSQRT, VT, Expand);
setOperationAction(ISD::FLOG, VT, Expand);
setOperationAction(ISD::FLOG10, VT, Expand);
setOperationAction(ISD::FLOG2, VT, Expand);
setOperationAction(ISD::FEXP, VT, Expand);
setOperationAction(ISD::FEXP2, VT, Expand);
setOperationAction(ISD::FSIN, VT, Expand);
setOperationAction(ISD::FCOS, VT, Expand);
setOperationAction(ISD::FABS, VT, Expand);
setOperationAction(ISD::FFLOOR, VT, Expand);
setOperationAction(ISD::FCEIL, VT, Expand);
setOperationAction(ISD::FTRUNC, VT, Expand);
setOperationAction(ISD::FRINT, VT, Expand);
setOperationAction(ISD::FNEARBYINT, VT, Expand);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
setOperationAction(ISD::MULHU, VT, Expand);
setOperationAction(ISD::MULHS, VT, Expand);
setOperationAction(ISD::UMUL_LOHI, VT, Expand);
setOperationAction(ISD::SMUL_LOHI, VT, Expand);
setOperationAction(ISD::UDIVREM, VT, Expand);
setOperationAction(ISD::SDIVREM, VT, Expand);
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
setOperationAction(ISD::FPOW, VT, Expand);
setOperationAction(ISD::BSWAP, VT, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
setOperationAction(ISD::ROTL, VT, Expand);
setOperationAction(ISD::ROTR, VT, Expand);
for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
setTruncStoreAction(VT, InnerVT, Expand);
setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
}
}
setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand);
if (!Subtarget.hasP8Vector()) {
setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
}
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
setOperationAction(ISD::AND , MVT::v4i32, Legal);
setOperationAction(ISD::OR , MVT::v4i32, Legal);
setOperationAction(ISD::XOR , MVT::v4i32, Legal);
setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
setOperationAction(ISD::SELECT, MVT::v4i32,
Subtarget.useCRBits() ? Legal : Expand);
setOperationAction(ISD::STORE , MVT::v4i32, Legal);
setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal);
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal);
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
setOperationAction(ISD::ROTL, MVT::v1i128, Custom);
if (Subtarget.hasAltivec())
for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
setOperationAction(ISD::ROTL, VT, Legal);
if (Subtarget.hasP8Altivec())
setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
setOperationAction(ISD::MUL, MVT::v4f32, Legal);
setOperationAction(ISD::FMA, MVT::v4f32, Legal);
if (Subtarget.hasVSX()) {
setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
}
if (Subtarget.hasP8Altivec())
setOperationAction(ISD::MUL, MVT::v4i32, Legal);
else
setOperationAction(ISD::MUL, MVT::v4i32, Custom);
if (Subtarget.isISA3_1()) {
setOperationAction(ISD::MUL, MVT::v2i64, Legal);
setOperationAction(ISD::MULHS, MVT::v2i64, Legal);
setOperationAction(ISD::MULHU, MVT::v2i64, Legal);
setOperationAction(ISD::MULHS, MVT::v4i32, Legal);
setOperationAction(ISD::MULHU, MVT::v4i32, Legal);
setOperationAction(ISD::UDIV, MVT::v2i64, Legal);
setOperationAction(ISD::SDIV, MVT::v2i64, Legal);
setOperationAction(ISD::UDIV, MVT::v4i32, Legal);
setOperationAction(ISD::SDIV, MVT::v4i32, Legal);
setOperationAction(ISD::UREM, MVT::v2i64, Legal);
setOperationAction(ISD::SREM, MVT::v2i64, Legal);
setOperationAction(ISD::UREM, MVT::v4i32, Legal);
setOperationAction(ISD::SREM, MVT::v4i32, Legal);
setOperationAction(ISD::UREM, MVT::v1i128, Legal);
setOperationAction(ISD::SREM, MVT::v1i128, Legal);
setOperationAction(ISD::UDIV, MVT::v1i128, Legal);
setOperationAction(ISD::SDIV, MVT::v1i128, Legal);
setOperationAction(ISD::ROTL, MVT::v1i128, Legal);
}
setOperationAction(ISD::MUL, MVT::v8i16, Legal);
setOperationAction(ISD::MUL, MVT::v16i8, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
setCondCodeAction(ISD::SETO, MVT::v4f32, Expand);
setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
if (Subtarget.hasVSX()) {
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
if (Subtarget.hasP8Vector()) {
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
}
if (Subtarget.hasDirectMove() && isPPC64) {
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
}
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
if (TM.Options.UnsafeFPMath) {
setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
}
setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
setOperationAction(ISD::FROUND, MVT::f64, Legal);
setOperationAction(ISD::FRINT, MVT::f64, Legal);
setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
setOperationAction(ISD::FROUND, MVT::f32, Legal);
setOperationAction(ISD::FRINT, MVT::f32, Legal);
setOperationAction(ISD::MUL, MVT::v2f64, Legal);
setOperationAction(ISD::FMA, MVT::v2f64, Legal);
setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
setCondCodeAction(ISD::SETO, MVT::v2f64, Expand);
setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
setOperationAction(ISD::STORE, MVT::v2f64, Legal);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
if (Subtarget.hasP8Vector())
addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
if (Subtarget.hasP8Altivec()) {
setOperationAction(ISD::SHL, MVT::v2i64, Legal);
setOperationAction(ISD::SRA, MVT::v2i64, Legal);
setOperationAction(ISD::SRL, MVT::v2i64, Legal);
setOperationAction(ISD::SHL, MVT::v1i128, Expand);
setOperationAction(ISD::SRL, MVT::v1i128, Expand);
setOperationAction(ISD::SRA, MVT::v1i128, Expand);
setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
}
else {
setOperationAction(ISD::SHL, MVT::v2i64, Expand);
setOperationAction(ISD::SRA, MVT::v2i64, Expand);
setOperationAction(ISD::SRL, MVT::v2i64, Expand);
setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
setOperationAction(ISD::ADD, MVT::v2i64, Expand);
setOperationAction(ISD::SUB, MVT::v2i64, Expand);
}
if (Subtarget.isISA3_1())
setOperationAction(ISD::SETCC, MVT::v1i128, Legal);
else
setOperationAction(ISD::SETCC, MVT::v1i128, Expand);
setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
setOperationAction(ISD::STORE, MVT::v2i64, Promote);
AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal);
setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal);
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i8, Custom);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i8, Custom);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i16, Custom);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i16, Custom);
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i8, Custom);
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i8, Custom);
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i16, Custom);
setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i16, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
setOperationAction(ISD::FABS, MVT::v4f32, Legal);
setOperationAction(ISD::FABS, MVT::v2f64, Legal);
setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal);
setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal);
setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal);
setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
setOperationAction(ISD::STRICT_FCEIL, MVT::v4f32, Legal);
setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal);
setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal);
setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal);
setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
setOperationAction(ISD::STRICT_FCEIL, MVT::v2f64, Legal);
setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
for (MVT FPT : MVT::fp_valuetypes())
setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
setOperationAction(ISD::SELECT, MVT::f128, Expand);
setTruncStoreAction(MVT::f128, MVT::f64, Expand);
setTruncStoreAction(MVT::f128, MVT::f32, Expand);
setOperationAction(ISD::FSIN, MVT::f128, Expand);
setOperationAction(ISD::FCOS, MVT::f128, Expand);
setOperationAction(ISD::FPOW, MVT::f128, Expand);
setOperationAction(ISD::FPOWI, MVT::f128, Expand);
setOperationAction(ISD::FREM, MVT::f128, Expand);
}
if (Subtarget.hasP8Altivec()) {
addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
}
if (Subtarget.hasP9Vector()) {
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
setOperationAction(ISD::SHL, MVT::v1i128, Legal);
setOperationAction(ISD::SRL, MVT::v1i128, Legal);
setOperationAction(ISD::SRA, MVT::v1i128, Expand);
setOperationAction(ISD::FADD, MVT::f128, Legal);
setOperationAction(ISD::FSUB, MVT::f128, Legal);
setOperationAction(ISD::FDIV, MVT::f128, Legal);
setOperationAction(ISD::FMUL, MVT::f128, Legal);
setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
setOperationAction(ISD::FMA, MVT::f128, Legal);
setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
setOperationAction(ISD::FRINT, MVT::f128, Legal);
setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
setOperationAction(ISD::FCEIL, MVT::f128, Legal);
setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
setOperationAction(ISD::FROUND, MVT::f128, Legal);
setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
setOperationAction(ISD::BITCAST, MVT::i128, Custom);
setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal);
setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal);
setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal);
setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal);
setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal);
setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal);
setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal);
setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal);
setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal);
setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal);
setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal);
setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal);
setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal);
setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
} else if (Subtarget.hasVSX()) {
setOperationAction(ISD::LOAD, MVT::f128, Promote);
setOperationAction(ISD::STORE, MVT::f128, Promote);
AddPromotedToType(ISD::LOAD, MVT::f128, MVT::v4i32);
AddPromotedToType(ISD::STORE, MVT::f128, MVT::v4i32);
setOperationAction(ISD::FADD, MVT::f128, LibCall);
setOperationAction(ISD::FSUB, MVT::f128, LibCall);
setOperationAction(ISD::FMUL, MVT::f128, Expand);
setOperationAction(ISD::FDIV, MVT::f128, Expand);
setOperationAction(ISD::FNEG, MVT::f128, Expand);
setOperationAction(ISD::FABS, MVT::f128, Expand);
setOperationAction(ISD::FSQRT, MVT::f128, Expand);
setOperationAction(ISD::FMA, MVT::f128, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand);
setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Expand);
for (MVT VT : {MVT::f32, MVT::f64}) {
setOperationAction(ISD::FP_ROUND, VT, Custom);
setOperationAction(ISD::STRICT_FP_ROUND, VT, Custom);
}
setOperationAction(ISD::SETCC, MVT::f128, Custom);
setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Custom);
setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Custom);
setOperationAction(ISD::BR_CC, MVT::f128, Expand);
setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i64, isPPC64 ? Custom : Expand);
}
if (Subtarget.hasP9Altivec()) {
if (Subtarget.isISA3_1()) {
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Legal);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Legal);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Legal);
} else {
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
}
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
}
if (Subtarget.hasP10Vector()) {
setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
}
}
if (Subtarget.pairedVectorMemops()) {
addRegisterClass(MVT::v256i1, &PPC::VSRpRCRegClass);
setOperationAction(ISD::LOAD, MVT::v256i1, Custom);
setOperationAction(ISD::STORE, MVT::v256i1, Custom);
}
if (Subtarget.hasMMA()) {
addRegisterClass(MVT::v512i1, &PPC::UACCRCRegClass);
setOperationAction(ISD::LOAD, MVT::v512i1, Custom);
setOperationAction(ISD::STORE, MVT::v512i1, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v512i1, Custom);
}
if (Subtarget.has64BitSupport())
setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
if (Subtarget.isISA3_1())
setOperationAction(ISD::SRA, MVT::v1i128, Legal);
setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
if (!isPPC64) {
setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand);
setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
}
if (shouldInlineQuadwordAtomics()) {
setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom);
setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom);
setOperationAction(ISD::INTRINSIC_VOID, MVT::i128, Custom);
}
setBooleanContents(ZeroOrOneBooleanContent);
if (Subtarget.hasAltivec()) {
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
}
setLibcallName(RTLIB::MULO_I128, nullptr);
if (!isPPC64) {
setLibcallName(RTLIB::SHL_I128, nullptr);
setLibcallName(RTLIB::SRL_I128, nullptr);
setLibcallName(RTLIB::SRA_I128, nullptr);
setLibcallName(RTLIB::MUL_I128, nullptr);
setLibcallName(RTLIB::MULO_I64, nullptr);
}
if (!isPPC64)
setMaxAtomicSizeInBitsSupported(32);
else if (shouldInlineQuadwordAtomics())
setMaxAtomicSizeInBitsSupported(128);
else
setMaxAtomicSizeInBitsSupported(64);
setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
setTargetDAGCombine({ISD::ADD, ISD::SHL, ISD::SRA, ISD::SRL, ISD::MUL,
ISD::FMA, ISD::SINT_TO_FP, ISD::BUILD_VECTOR});
if (Subtarget.hasFPCVT())
setTargetDAGCombine(ISD::UINT_TO_FP);
setTargetDAGCombine({ISD::LOAD, ISD::STORE, ISD::BR_CC});
if (Subtarget.useCRBits())
setTargetDAGCombine(ISD::BRCOND);
setTargetDAGCombine({ISD::BSWAP, ISD::INTRINSIC_WO_CHAIN,
ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID});
setTargetDAGCombine({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, ISD::ANY_EXTEND});
setTargetDAGCombine({ISD::TRUNCATE, ISD::VECTOR_SHUFFLE});
if (Subtarget.useCRBits()) {
setTargetDAGCombine({ISD::TRUNCATE, ISD::SETCC, ISD::SELECT_CC});
}
if (Subtarget.hasP9Altivec()) {
setTargetDAGCombine({ISD::ABS, ISD::VSELECT});
}
setLibcallName(RTLIB::LOG_F128, "logf128");
setLibcallName(RTLIB::LOG2_F128, "log2f128");
setLibcallName(RTLIB::LOG10_F128, "log10f128");
setLibcallName(RTLIB::EXP_F128, "expf128");
setLibcallName(RTLIB::EXP2_F128, "exp2f128");
setLibcallName(RTLIB::SIN_F128, "sinf128");
setLibcallName(RTLIB::COS_F128, "cosf128");
setLibcallName(RTLIB::POW_F128, "powf128");
setLibcallName(RTLIB::FMIN_F128, "fminf128");
setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
setLibcallName(RTLIB::REM_F128, "fmodf128");
setLibcallName(RTLIB::SQRT_F128, "sqrtf128");
setLibcallName(RTLIB::CEIL_F128, "ceilf128");
setLibcallName(RTLIB::FLOOR_F128, "floorf128");
setLibcallName(RTLIB::TRUNC_F128, "truncf128");
setLibcallName(RTLIB::ROUND_F128, "roundf128");
setLibcallName(RTLIB::LROUND_F128, "lroundf128");
setLibcallName(RTLIB::LLROUND_F128, "llroundf128");
setLibcallName(RTLIB::RINT_F128, "rintf128");
setLibcallName(RTLIB::LRINT_F128, "lrintf128");
setLibcallName(RTLIB::LLRINT_F128, "llrintf128");
setLibcallName(RTLIB::NEARBYINT_F128, "nearbyintf128");
setLibcallName(RTLIB::FMA_F128, "fmaf128");
if (Subtarget.useCRBits()) {
setHasMultipleConditionRegisters();
setJumpIsExpensive();
}
setMinFunctionAlignment(Align(4));
switch (Subtarget.getCPUDirective()) {
default: break;
case PPC::DIR_970:
case PPC::DIR_A2:
case PPC::DIR_E500:
case PPC::DIR_E500mc:
case PPC::DIR_E5500:
case PPC::DIR_PWR4:
case PPC::DIR_PWR5:
case PPC::DIR_PWR5X:
case PPC::DIR_PWR6:
case PPC::DIR_PWR6X:
case PPC::DIR_PWR7:
case PPC::DIR_PWR8:
case PPC::DIR_PWR9:
case PPC::DIR_PWR10:
case PPC::DIR_PWR_FUTURE:
setPrefLoopAlignment(Align(16));
setPrefFunctionAlignment(Align(16));
break;
}
if (Subtarget.enableMachineScheduler())
setSchedulingPreference(Sched::Source);
else
setSchedulingPreference(Sched::Hybrid);
computeRegisterProperties(STI.getRegisterInfo());
if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
Subtarget.getCPUDirective() == PPC::DIR_E5500) {
MaxStoresPerMemset = 32;
MaxStoresPerMemsetOptSize = 16;
MaxStoresPerMemcpy = 32;
MaxStoresPerMemcpyOptSize = 8;
MaxStoresPerMemmove = 32;
MaxStoresPerMemmoveOptSize = 8;
} else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
MaxStoresPerMemset = 128;
MaxStoresPerMemcpy = 128;
MaxStoresPerMemmove = 128;
MaxLoadsPerMemcmp = 128;
} else {
MaxLoadsPerMemcmp = 8;
MaxLoadsPerMemcmpOptSize = 4;
}
IsStrictFPEnabled = true;
PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive();
}
void PPCTargetLowering::initializeAddrModeMap() {
AddrModesMap[PPC::AM_DForm] = {
PPC::MOF_ZExt | PPC::MOF_RPlusSImm16 | PPC::MOF_WordInt,
PPC::MOF_ZExt | PPC::MOF_RPlusLo | PPC::MOF_WordInt,
PPC::MOF_ZExt | PPC::MOF_NotAddNorCst | PPC::MOF_WordInt,
PPC::MOF_ZExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_WordInt,
PPC::MOF_ZExt | PPC::MOF_RPlusSImm16 | PPC::MOF_SubWordInt,
PPC::MOF_ZExt | PPC::MOF_RPlusLo | PPC::MOF_SubWordInt,
PPC::MOF_ZExt | PPC::MOF_NotAddNorCst | PPC::MOF_SubWordInt,
PPC::MOF_ZExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_SubWordInt,
PPC::MOF_SExt | PPC::MOF_RPlusSImm16 | PPC::MOF_SubWordInt,
PPC::MOF_SExt | PPC::MOF_RPlusLo | PPC::MOF_SubWordInt,
PPC::MOF_SExt | PPC::MOF_NotAddNorCst | PPC::MOF_SubWordInt,
PPC::MOF_SExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_SubWordInt,
PPC::MOF_RPlusSImm16 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9,
PPC::MOF_RPlusLo | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9,
PPC::MOF_NotAddNorCst | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9,
PPC::MOF_AddrIsSImm32 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9,
};
AddrModesMap[PPC::AM_DSForm] = {
PPC::MOF_SExt | PPC::MOF_RPlusSImm16Mult4 | PPC::MOF_WordInt,
PPC::MOF_SExt | PPC::MOF_NotAddNorCst | PPC::MOF_WordInt,
PPC::MOF_SExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_WordInt,
PPC::MOF_RPlusSImm16Mult4 | PPC::MOF_DoubleWordInt,
PPC::MOF_NotAddNorCst | PPC::MOF_DoubleWordInt,
PPC::MOF_AddrIsSImm32 | PPC::MOF_DoubleWordInt,
PPC::MOF_RPlusSImm16Mult4 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetP9,
PPC::MOF_NotAddNorCst | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetP9,
PPC::MOF_AddrIsSImm32 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetP9,
};
AddrModesMap[PPC::AM_DQForm] = {
PPC::MOF_RPlusSImm16Mult16 | PPC::MOF_Vector | PPC::MOF_SubtargetP9,
PPC::MOF_NotAddNorCst | PPC::MOF_Vector | PPC::MOF_SubtargetP9,
PPC::MOF_AddrIsSImm32 | PPC::MOF_Vector | PPC::MOF_SubtargetP9,
};
AddrModesMap[PPC::AM_PrefixDForm] = {PPC::MOF_RPlusSImm34 |
PPC::MOF_SubtargetP10};
}
static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) {
if (MaxAlign == MaxMaxAlign)
return;
if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
if (MaxMaxAlign >= 32 &&
VTy->getPrimitiveSizeInBits().getFixedSize() >= 256)
MaxAlign = Align(32);
else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 &&
MaxAlign < 16)
MaxAlign = Align(16);
} else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Align EltAlign;
getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
if (EltAlign > MaxAlign)
MaxAlign = EltAlign;
} else if (StructType *STy = dyn_cast<StructType>(Ty)) {
for (auto *EltTy : STy->elements()) {
Align EltAlign;
getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
if (EltAlign > MaxAlign)
MaxAlign = EltAlign;
if (MaxAlign == MaxMaxAlign)
break;
}
}
}
uint64_t PPCTargetLowering::getByValTypeAlignment(Type *Ty,
const DataLayout &DL) const {
Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4);
if (Subtarget.hasAltivec())
getMaxByValAlign(Ty, Alignment, Align(16));
return Alignment.value();
}
bool PPCTargetLowering::useSoftFloat() const {
return Subtarget.useSoftFloat();
}
bool PPCTargetLowering::hasSPE() const {
return Subtarget.hasSPE();
}
bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
return VT.isScalarInteger();
}
const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch ((PPCISD::NodeType)Opcode) {
case PPCISD::FIRST_NUMBER: break;
case PPCISD::FSEL: return "PPCISD::FSEL";
case PPCISD::XSMAXC: return "PPCISD::XSMAXC";
case PPCISD::XSMINC: return "PPCISD::XSMINC";
case PPCISD::FCFID: return "PPCISD::FCFID";
case PPCISD::FCFIDU: return "PPCISD::FCFIDU";
case PPCISD::FCFIDS: return "PPCISD::FCFIDS";
case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS";
case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ";
case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ";
case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ";
case PPCISD::FP_TO_UINT_IN_VSR:
return "PPCISD::FP_TO_UINT_IN_VSR,";
case PPCISD::FP_TO_SINT_IN_VSR:
return "PPCISD::FP_TO_SINT_IN_VSR";
case PPCISD::FRE: return "PPCISD::FRE";
case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE";
case PPCISD::FTSQRT:
return "PPCISD::FTSQRT";
case PPCISD::FSQRT:
return "PPCISD::FSQRT";
case PPCISD::STFIWX: return "PPCISD::STFIWX";
case PPCISD::VPERM: return "PPCISD::VPERM";
case PPCISD::XXSPLT: return "PPCISD::XXSPLT";
case PPCISD::XXSPLTI_SP_TO_DP:
return "PPCISD::XXSPLTI_SP_TO_DP";
case PPCISD::XXSPLTI32DX:
return "PPCISD::XXSPLTI32DX";
case PPCISD::VECINSERT: return "PPCISD::VECINSERT";
case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI";
case PPCISD::VECSHL: return "PPCISD::VECSHL";
case PPCISD::CMPB: return "PPCISD::CMPB";
case PPCISD::Hi: return "PPCISD::Hi";
case PPCISD::Lo: return "PPCISD::Lo";
case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY";
case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC";
case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET";
case PPCISD::PROBED_ALLOCA: return "PPCISD::PROBED_ALLOCA";
case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
case PPCISD::SRL: return "PPCISD::SRL";
case PPCISD::SRA: return "PPCISD::SRA";
case PPCISD::SHL: return "PPCISD::SHL";
case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE";
case PPCISD::CALL: return "PPCISD::CALL";
case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP";
case PPCISD::CALL_NOTOC: return "PPCISD::CALL_NOTOC";
case PPCISD::CALL_RM:
return "PPCISD::CALL_RM";
case PPCISD::CALL_NOP_RM:
return "PPCISD::CALL_NOP_RM";
case PPCISD::CALL_NOTOC_RM:
return "PPCISD::CALL_NOTOC_RM";
case PPCISD::MTCTR: return "PPCISD::MTCTR";
case PPCISD::BCTRL: return "PPCISD::BCTRL";
case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC";
case PPCISD::BCTRL_RM:
return "PPCISD::BCTRL_RM";
case PPCISD::BCTRL_LOAD_TOC_RM:
return "PPCISD::BCTRL_LOAD_TOC_RM";
case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE";
case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP";
case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
case PPCISD::MFOCRF: return "PPCISD::MFOCRF";
case PPCISD::MFVSR: return "PPCISD::MFVSR";
case PPCISD::MTVSRA: return "PPCISD::MTVSRA";
case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ";
case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP";
case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP";
case PPCISD::SCALAR_TO_VECTOR_PERMUTED:
return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
case PPCISD::ANDI_rec_1_EQ_BIT:
return "PPCISD::ANDI_rec_1_EQ_BIT";
case PPCISD::ANDI_rec_1_GT_BIT:
return "PPCISD::ANDI_rec_1_GT_BIT";
case PPCISD::VCMP: return "PPCISD::VCMP";
case PPCISD::VCMP_rec: return "PPCISD::VCMP_rec";
case PPCISD::LBRX: return "PPCISD::LBRX";
case PPCISD::STBRX: return "PPCISD::STBRX";
case PPCISD::LFIWAX: return "PPCISD::LFIWAX";
case PPCISD::LFIWZX: return "PPCISD::LFIWZX";
case PPCISD::LXSIZX: return "PPCISD::LXSIZX";
case PPCISD::STXSIX: return "PPCISD::STXSIX";
case PPCISD::VEXTS: return "PPCISD::VEXTS";
case PPCISD::LXVD2X: return "PPCISD::LXVD2X";
case PPCISD::STXVD2X: return "PPCISD::STXVD2X";
case PPCISD::LOAD_VEC_BE: return "PPCISD::LOAD_VEC_BE";
case PPCISD::STORE_VEC_BE: return "PPCISD::STORE_VEC_BE";
case PPCISD::ST_VSR_SCAL_INT:
return "PPCISD::ST_VSR_SCAL_INT";
case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH";
case PPCISD::BDNZ: return "PPCISD::BDNZ";
case PPCISD::BDZ: return "PPCISD::BDZ";
case PPCISD::MFFS: return "PPCISD::MFFS";
case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ";
case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN";
case PPCISD::CR6SET: return "PPCISD::CR6SET";
case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET";
case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT";
case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT";
case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L";
case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS";
case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA";
case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L";
case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR";
case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
case PPCISD::TLSGD_AIX: return "PPCISD::TLSGD_AIX";
case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA";
case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L";
case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR";
case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L";
case PPCISD::PADDI_DTPREL:
return "PPCISD::PADDI_DTPREL";
case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT";
case PPCISD::SC: return "PPCISD::SC";
case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB";
case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE";
case PPCISD::RFEBB: return "PPCISD::RFEBB";
case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD";
case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN";
case PPCISD::VABSD: return "PPCISD::VABSD";
case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128";
case PPCISD::BUILD_SPE64: return "PPCISD::BUILD_SPE64";
case PPCISD::EXTRACT_SPE: return "PPCISD::EXTRACT_SPE";
case PPCISD::EXTSWSLI: return "PPCISD::EXTSWSLI";
case PPCISD::LD_VSX_LH: return "PPCISD::LD_VSX_LH";
case PPCISD::FP_EXTEND_HALF: return "PPCISD::FP_EXTEND_HALF";
case PPCISD::MAT_PCREL_ADDR: return "PPCISD::MAT_PCREL_ADDR";
case PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR:
return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR";
case PPCISD::TLS_LOCAL_EXEC_MAT_ADDR:
return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR";
case PPCISD::ACC_BUILD: return "PPCISD::ACC_BUILD";
case PPCISD::PAIR_BUILD: return "PPCISD::PAIR_BUILD";
case PPCISD::EXTRACT_VSX_REG: return "PPCISD::EXTRACT_VSX_REG";
case PPCISD::XXMFACC: return "PPCISD::XXMFACC";
case PPCISD::LD_SPLAT: return "PPCISD::LD_SPLAT";
case PPCISD::ZEXT_LD_SPLAT: return "PPCISD::ZEXT_LD_SPLAT";
case PPCISD::SEXT_LD_SPLAT: return "PPCISD::SEXT_LD_SPLAT";
case PPCISD::FNMSUB: return "PPCISD::FNMSUB";
case PPCISD::STRICT_FADDRTZ:
return "PPCISD::STRICT_FADDRTZ";
case PPCISD::STRICT_FCTIDZ:
return "PPCISD::STRICT_FCTIDZ";
case PPCISD::STRICT_FCTIWZ:
return "PPCISD::STRICT_FCTIWZ";
case PPCISD::STRICT_FCTIDUZ:
return "PPCISD::STRICT_FCTIDUZ";
case PPCISD::STRICT_FCTIWUZ:
return "PPCISD::STRICT_FCTIWUZ";
case PPCISD::STRICT_FCFID:
return "PPCISD::STRICT_FCFID";
case PPCISD::STRICT_FCFIDU:
return "PPCISD::STRICT_FCFIDU";
case PPCISD::STRICT_FCFIDS:
return "PPCISD::STRICT_FCFIDS";
case PPCISD::STRICT_FCFIDUS:
return "PPCISD::STRICT_FCFIDUS";
case PPCISD::LXVRZX: return "PPCISD::LXVRZX";
}
return nullptr;
}
EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
EVT VT) const {
if (!VT.isVector())
return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
return VT.changeVectorElementTypeToInteger();
}
bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
return true;
}
static bool isFloatingPointZero(SDValue Op) {
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
return CFP->getValueAPF().isZero();
else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
return CFP->getValueAPF().isZero();
}
return false;
}
static bool isConstantOrUndef(int Op, int Val) {
return Op < 0 || Op == Val;
}
bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
SelectionDAG &DAG) {
bool IsLE = DAG.getDataLayout().isLittleEndian();
if (ShuffleKind == 0) {
if (IsLE)
return false;
for (unsigned i = 0; i != 16; ++i)
if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
return false;
} else if (ShuffleKind == 2) {
if (!IsLE)
return false;
for (unsigned i = 0; i != 16; ++i)
if (!isConstantOrUndef(N->getMaskElt(i), i*2))
return false;
} else if (ShuffleKind == 1) {
unsigned j = IsLE ? 0 : 1;
for (unsigned i = 0; i != 8; ++i)
if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) ||
!isConstantOrUndef(N->getMaskElt(i+8), i*2+j))
return false;
}
return true;
}
bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
SelectionDAG &DAG) {
bool IsLE = DAG.getDataLayout().isLittleEndian();
if (ShuffleKind == 0) {
if (IsLE)
return false;
for (unsigned i = 0; i != 16; i += 2)
if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) ||
!isConstantOrUndef(N->getMaskElt(i+1), i*2+3))
return false;
} else if (ShuffleKind == 2) {
if (!IsLE)
return false;
for (unsigned i = 0; i != 16; i += 2)
if (!isConstantOrUndef(N->getMaskElt(i ), i*2) ||
!isConstantOrUndef(N->getMaskElt(i+1), i*2+1))
return false;
} else if (ShuffleKind == 1) {
unsigned j = IsLE ? 0 : 2;
for (unsigned i = 0; i != 8; i += 2)
if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) ||
!isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) ||
!isConstantOrUndef(N->getMaskElt(i+8), i*2+j) ||
!isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1))
return false;
}
return true;
}
bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
SelectionDAG &DAG) {
const PPCSubtarget &Subtarget = DAG.getSubtarget<PPCSubtarget>();
if (!Subtarget.hasP8Vector())
return false;
bool IsLE = DAG.getDataLayout().isLittleEndian();
if (ShuffleKind == 0) {
if (IsLE)
return false;
for (unsigned i = 0; i != 16; i += 4)
if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) ||
!isConstantOrUndef(N->getMaskElt(i+1), i*2+5) ||
!isConstantOrUndef(N->getMaskElt(i+2), i*2+6) ||
!isConstantOrUndef(N->getMaskElt(i+3), i*2+7))
return false;
} else if (ShuffleKind == 2) {
if (!IsLE)
return false;
for (unsigned i = 0; i != 16; i += 4)
if (!isConstantOrUndef(N->getMaskElt(i ), i*2) ||
!isConstantOrUndef(N->getMaskElt(i+1), i*2+1) ||
!isConstantOrUndef(N->getMaskElt(i+2), i*2+2) ||
!isConstantOrUndef(N->getMaskElt(i+3), i*2+3))
return false;
} else if (ShuffleKind == 1) {
unsigned j = IsLE ? 0 : 4;
for (unsigned i = 0; i != 8; i += 4)
if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) ||
!isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) ||
!isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) ||
!isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) ||
!isConstantOrUndef(N->getMaskElt(i+8), i*2+j) ||
!isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) ||
!isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
!isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
return false;
}
return true;
}
static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
unsigned LHSStart, unsigned RHSStart) {
if (N->getValueType(0) != MVT::v16i8)
return false;
assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
"Unsupported merge size!");
for (unsigned i = 0; i != 8/UnitSize; ++i) for (unsigned j = 0; j != UnitSize; ++j) { if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
LHSStart+j+i*UnitSize) ||
!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
RHSStart+j+i*UnitSize))
return false;
}
return true;
}
bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
unsigned ShuffleKind, SelectionDAG &DAG) {
if (DAG.getDataLayout().isLittleEndian()) {
if (ShuffleKind == 1) return isVMerge(N, UnitSize, 0, 0);
else if (ShuffleKind == 2) return isVMerge(N, UnitSize, 0, 16);
else
return false;
} else {
if (ShuffleKind == 1) return isVMerge(N, UnitSize, 8, 8);
else if (ShuffleKind == 0) return isVMerge(N, UnitSize, 8, 24);
else
return false;
}
}
bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
unsigned ShuffleKind, SelectionDAG &DAG) {
if (DAG.getDataLayout().isLittleEndian()) {
if (ShuffleKind == 1) return isVMerge(N, UnitSize, 8, 8);
else if (ShuffleKind == 2) return isVMerge(N, UnitSize, 8, 24);
else
return false;
} else {
if (ShuffleKind == 1) return isVMerge(N, UnitSize, 0, 0);
else if (ShuffleKind == 0) return isVMerge(N, UnitSize, 0, 16);
else
return false;
}
}
static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
unsigned RHSStartValue) {
if (N->getValueType(0) != MVT::v16i8)
return false;
for (unsigned i = 0; i < 2; ++i)
for (unsigned j = 0; j < 4; ++j)
if (!isConstantOrUndef(N->getMaskElt(i*4+j),
i*RHSStartValue+j+IndexOffset) ||
!isConstantOrUndef(N->getMaskElt(i*4+j+8),
i*RHSStartValue+j+IndexOffset+8))
return false;
return true;
}
bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
unsigned ShuffleKind, SelectionDAG &DAG) {
if (DAG.getDataLayout().isLittleEndian()) {
unsigned indexOffset = CheckEven ? 4 : 0;
if (ShuffleKind == 1) return isVMerge(N, indexOffset, 0);
else if (ShuffleKind == 2) return isVMerge(N, indexOffset, 16);
else
return false;
}
else {
unsigned indexOffset = CheckEven ? 0 : 4;
if (ShuffleKind == 1) return isVMerge(N, indexOffset, 0);
else if (ShuffleKind == 0) return isVMerge(N, indexOffset, 16);
else
return false;
}
return false;
}
int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
SelectionDAG &DAG) {
if (N->getValueType(0) != MVT::v16i8)
return -1;
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
unsigned i;
for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
;
if (i == 16) return -1;
unsigned ShiftAmt = SVOp->getMaskElt(i);
if (ShiftAmt < i) return -1;
ShiftAmt -= i;
bool isLE = DAG.getDataLayout().isLittleEndian();
if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
for (++i; i != 16; ++i)
if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
return -1;
} else if (ShuffleKind == 1) {
for (++i; i != 16; ++i)
if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
return -1;
} else
return -1;
if (isLE)
ShiftAmt = 16 - ShiftAmt;
return ShiftAmt;
}
bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
EVT VT = N->getValueType(0);
if (VT == MVT::v2i64 || VT == MVT::v2f64)
return EltSize == 8 && N->getMaskElt(0) == N->getMaskElt(1);
assert(VT == MVT::v16i8 && isPowerOf2_32(EltSize) &&
EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes");
if (N->getMaskElt(0) % EltSize != 0)
return false;
unsigned ElementBase = N->getMaskElt(0);
if (ElementBase >= 16)
return false;
for (unsigned i = 1; i != EltSize; ++i)
if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
return false;
for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
if (N->getMaskElt(i) < 0) continue;
for (unsigned j = 0; j != EltSize; ++j)
if (N->getMaskElt(i+j) != N->getMaskElt(j))
return false;
}
return true;
}
static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
int StepLen) {
assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
"Unexpected element width.");
assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
unsigned NumOfElem = 16 / Width;
unsigned MaskVal[16]; for (unsigned i = 0; i < NumOfElem; ++i) {
MaskVal[0] = N->getMaskElt(i * Width);
if ((StepLen == 1) && (MaskVal[0] % Width)) {
return false;
} else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
return false;
}
for (unsigned int j = 1; j < Width; ++j) {
MaskVal[j] = N->getMaskElt(i * Width + j);
if (MaskVal[j] != MaskVal[j-1] + StepLen) {
return false;
}
}
}
return true;
}
bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
unsigned &InsertAtByte, bool &Swap, bool IsLE) {
if (!isNByteElemShuffleMask(N, 4, 1))
return false;
unsigned M0 = N->getMaskElt(0) / 4;
unsigned M1 = N->getMaskElt(4) / 4;
unsigned M2 = N->getMaskElt(8) / 4;
unsigned M3 = N->getMaskElt(12) / 4;
unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
(M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
InsertAtByte = IsLE ? 12 : 0;
Swap = M0 < 4;
return true;
}
if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
(M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
InsertAtByte = IsLE ? 8 : 4;
Swap = M1 < 4;
return true;
}
if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
(M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
InsertAtByte = IsLE ? 4 : 8;
Swap = M2 < 4;
return true;
}
if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
(M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
InsertAtByte = IsLE ? 0 : 12;
Swap = M3 < 4;
return true;
}
if (N->getOperand(1).isUndef()) {
ShiftElts = 0;
Swap = true;
unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
InsertAtByte = IsLE ? 12 : 0;
return true;
}
if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
InsertAtByte = IsLE ? 8 : 4;
return true;
}
if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
InsertAtByte = IsLE ? 4 : 8;
return true;
}
if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
InsertAtByte = IsLE ? 0 : 12;
return true;
}
}
return false;
}
bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
bool &Swap, bool IsLE) {
assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
if (!isNByteElemShuffleMask(N, 4, 1))
return false;
unsigned M0 = N->getMaskElt(0) / 4;
unsigned M1 = N->getMaskElt(4) / 4;
unsigned M2 = N->getMaskElt(8) / 4;
unsigned M3 = N->getMaskElt(12) / 4;
if (N->getOperand(1).isUndef()) {
assert(M0 < 4 && "Indexing into an undef vector?");
if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
return false;
ShiftElts = IsLE ? (4 - M0) % 4 : M0;
Swap = false;
return true;
}
if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
return false;
if (IsLE) {
if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
Swap = false;
ShiftElts = (8 - M0) % 8;
} else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
Swap = true;
ShiftElts = (4 - M0) % 4;
}
return true;
} else { if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
Swap = false;
ShiftElts = M0;
} else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
Swap = true;
ShiftElts = M0 - 4;
}
return true;
}
}
bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
if (!isNByteElemShuffleMask(N, Width, -1))
return false;
for (int i = 0; i < 16; i += Width)
if (N->getMaskElt(i) != i + Width - 1)
return false;
return true;
}
bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
return isXXBRShuffleMaskHelper(N, 2);
}
bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
return isXXBRShuffleMaskHelper(N, 4);
}
bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
return isXXBRShuffleMaskHelper(N, 8);
}
bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
return isXXBRShuffleMaskHelper(N, 16);
}
bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
bool &Swap, bool IsLE) {
assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
if (!isNByteElemShuffleMask(N, 8, 1))
return false;
unsigned M0 = N->getMaskElt(0) / 8;
unsigned M1 = N->getMaskElt(8) / 8;
assert(((M0 | M1) < 4) && "A mask element out of bounds?");
if (N->getOperand(1).isUndef()) {
if ((M0 | M1) < 2) {
DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
Swap = false;
return true;
} else
return false;
}
if (IsLE) {
if (M0 > 1 && M1 < 2) {
Swap = false;
} else if (M0 < 2 && M1 > 1) {
M0 = (M0 + 2) % 4;
M1 = (M1 + 2) % 4;
Swap = true;
} else
return false;
DM = (((~M1) & 1) << 1) + ((~M0) & 1);
return true;
} else { if (M0 < 2 && M1 > 1) {
Swap = false;
} else if (M0 > 1 && M1 < 2) {
M0 = (M0 + 2) % 4;
M1 = (M1 + 2) % 4;
Swap = true;
} else
return false;
DM = (M0 << 1) + (M1 & 1);
return true;
}
}
unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
SelectionDAG &DAG) {
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
assert(isSplatShuffleMask(SVOp, EltSize));
EVT VT = SVOp->getValueType(0);
if (VT == MVT::v2i64 || VT == MVT::v2f64)
return DAG.getDataLayout().isLittleEndian() ? 1 - SVOp->getMaskElt(0)
: SVOp->getMaskElt(0);
if (DAG.getDataLayout().isLittleEndian())
return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
else
return SVOp->getMaskElt(0) / EltSize;
}
SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
SDValue OpVal;
unsigned EltSize = 16/N->getNumOperands();
if (EltSize < ByteSize) {
unsigned Multiple = ByteSize/EltSize; SDValue UniquedVals[4];
assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
if (N->getOperand(i).isUndef()) continue;
if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
if (!UniquedVals[i&(Multiple-1)].getNode())
UniquedVals[i&(Multiple-1)] = N->getOperand(i);
else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
return SDValue(); }
bool LeadingZero = true;
bool LeadingOnes = true;
for (unsigned i = 0; i != Multiple-1; ++i) {
if (!UniquedVals[i].getNode()) continue;
LeadingZero &= isNullConstant(UniquedVals[i]);
LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
}
if (LeadingZero) {
if (!UniquedVals[Multiple-1].getNode())
return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
if (Val < 16) return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
}
if (LeadingOnes) {
if (!UniquedVals[Multiple-1].getNode())
return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
if (Val >= -16) return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
}
return SDValue();
}
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
if (N->getOperand(i).isUndef()) continue;
if (!OpVal.getNode())
OpVal = N->getOperand(i);
else if (OpVal != N->getOperand(i))
return SDValue();
}
if (!OpVal.getNode()) return SDValue();
unsigned ValSizeInBytes = EltSize;
uint64_t Value = 0;
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
Value = CN->getZExtValue();
} else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
Value = FloatToBits(CN->getValueAPF().convertToFloat());
}
if (ValSizeInBytes < ByteSize) return SDValue();
if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
return SDValue();
int MaskVal = SignExtend32(Value, ByteSize * 8);
if (MaskVal == 0) return SDValue();
if (SignExtend32<5>(MaskVal) == MaskVal)
return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
return SDValue();
}
bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
if (!isa<ConstantSDNode>(N))
return false;
Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
if (N->getValueType(0) == MVT::i32)
return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
else
return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
}
bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
return isIntS16Immediate(Op.getNode(), Imm);
}
static bool provablyDisjointOr(SelectionDAG &DAG, const SDValue &N) {
if (N.getOpcode() != ISD::OR)
return false;
KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
if (!LHSKnown.Zero.getBoolValue())
return false;
KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
return (~(LHSKnown.Zero | RHSKnown.Zero) == 0);
}
bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
SDValue &Index,
SelectionDAG &DAG) const {
for (SDNode *U : N->uses()) {
if (MemSDNode *Memop = dyn_cast<MemSDNode>(U)) {
if (Memop->getMemoryVT() == MVT::f64) {
Base = N.getOperand(0);
Index = N.getOperand(1);
return true;
}
}
}
return false;
}
bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) {
if (!isa<ConstantSDNode>(N))
return false;
Imm = (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
return isInt<34>(Imm);
}
bool llvm::isIntS34Immediate(SDValue Op, int64_t &Imm) {
return isIntS34Immediate(Op.getNode(), Imm);
}
bool PPCTargetLowering::SelectAddressRegReg(
SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG,
MaybeAlign EncodingAlignment) const {
if (SelectAddressPCRel(N, Base))
return false;
int16_t Imm = 0;
if (N.getOpcode() == ISD::ADD) {
if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
return true;
if (isIntS16Immediate(N.getOperand(1), Imm) &&
(!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
return false; if (N.getOperand(1).getOpcode() == PPCISD::Lo)
return false;
Base = N.getOperand(0);
Index = N.getOperand(1);
return true;
} else if (N.getOpcode() == ISD::OR) {
if (isIntS16Immediate(N.getOperand(1), Imm) &&
(!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
return false;
KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
if (LHSKnown.Zero.getBoolValue()) {
KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
Base = N.getOperand(0);
Index = N.getOperand(1);
return true;
}
}
}
return false;
}
static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
if (VT != MVT::i64)
return;
if (FrameIdx < 0)
return;
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
if (MFI.getObjectAlign(FrameIdx) >= Align(4))
return;
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
FuncInfo->setHasNonRISpills();
}
bool PPCTargetLowering::SelectAddressRegImm(
SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG,
MaybeAlign EncodingAlignment) const {
SDLoc dl(N);
if (SelectAddressPCRel(N, Base))
return false;
if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
return false;
if (N.getOpcode() == ISD::ADD) {
int16_t imm = 0;
if (isIntS16Immediate(N.getOperand(1), imm) &&
(!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
} else {
Base = N.getOperand(0);
}
return true; } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
&& "Cannot handle constant offsets yet!");
Disp = N.getOperand(1).getOperand(0); assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
Disp.getOpcode() == ISD::TargetConstantPool ||
Disp.getOpcode() == ISD::TargetJumpTable);
Base = N.getOperand(0);
return true; }
} else if (N.getOpcode() == ISD::OR) {
int16_t imm = 0;
if (isIntS16Immediate(N.getOperand(1), imm) &&
(!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
if (FrameIndexSDNode *FI =
dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
} else {
Base = N.getOperand(0);
}
Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
return true;
}
}
} else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
int16_t Imm;
if (isIntS16Immediate(CN, Imm) &&
(!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) {
Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
CN->getValueType(0));
return true;
}
if ((CN->getValueType(0) == MVT::i32 ||
(int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
(!EncodingAlignment ||
isAligned(*EncodingAlignment, CN->getZExtValue()))) {
int Addr = (int)CN->getZExtValue();
Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
MVT::i32);
unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
return true;
}
}
Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
} else
Base = N;
return true; }
bool PPCTargetLowering::SelectAddressRegImm34(SDValue N, SDValue &Disp,
SDValue &Base,
SelectionDAG &DAG) const {
if (N.getValueType() != MVT::i64)
return false;
SDLoc dl(N);
int64_t Imm = 0;
if (N.getOpcode() == ISD::ADD) {
if (!isIntS34Immediate(N.getOperand(1), Imm))
return false;
Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
else
Base = N.getOperand(0);
return true;
}
if (N.getOpcode() == ISD::OR) {
if (!isIntS34Immediate(N.getOperand(1), Imm))
return false;
KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
if ((LHSKnown.Zero.getZExtValue() | ~(uint64_t)Imm) != ~0ULL)
return false;
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
else
Base = N.getOperand(0);
Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
return true;
}
if (isIntS34Immediate(N, Imm)) { Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
Base = DAG.getRegister(PPC::ZERO8, N.getValueType());
return true;
}
return false;
}
bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
SDValue &Index,
SelectionDAG &DAG) const {
if (SelectAddressRegReg(N, Base, Index, DAG))
return true;
int16_t imm = 0;
if (N.getOpcode() == ISD::ADD &&
(!isIntS16Immediate(N.getOperand(1), imm) ||
!N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
Base = N.getOperand(0);
Index = N.getOperand(1);
return true;
}
Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
N.getValueType());
Index = N;
return true;
}
template <typename Ty> static bool isValidPCRelNode(SDValue N) {
Ty *PCRelCand = dyn_cast<Ty>(N);
return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG);
}
bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const {
Base = N;
if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR)
return true;
if (isValidPCRelNode<ConstantPoolSDNode>(N) ||
isValidPCRelNode<GlobalAddressSDNode>(N) ||
isValidPCRelNode<JumpTableSDNode>(N) ||
isValidPCRelNode<BlockAddressSDNode>(N))
return true;
return false;
}
static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
if (!LD)
return false;
EVT MemVT = LD->getMemoryVT();
if (!MemVT.isSimple())
return false;
switch(MemVT.getSimpleVT().SimpleTy) {
case MVT::i64:
break;
case MVT::i32:
if (!ST.hasP8Vector())
return false;
break;
case MVT::i16:
case MVT::i8:
if (!ST.hasP9Vector())
return false;
break;
default:
return false;
}
SDValue LoadedVal(N, 0);
if (!LoadedVal.hasOneUse())
return false;
for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
UI != UE; ++UI)
if (UI.getUse().get().getResNo() == 0 &&
UI->getOpcode() != ISD::SCALAR_TO_VECTOR &&
UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED)
return false;
return true;
}
bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
SDValue &Offset,
ISD::MemIndexedMode &AM,
SelectionDAG &DAG) const {
if (DisablePPCPreinc) return false;
bool isLoad = true;
SDValue Ptr;
EVT VT;
Align Alignment;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
Ptr = LD->getBasePtr();
VT = LD->getMemoryVT();
Alignment = LD->getAlign();
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
Ptr = ST->getBasePtr();
VT = ST->getMemoryVT();
Alignment = ST->getAlign();
isLoad = false;
} else
return false;
if (isLoad && usePartialVectorLoads(N, Subtarget)) {
return false;
}
if (VT.isVector())
return false;
if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
bool Swap = false;
if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
Swap = true;
else if (!isLoad) {
SDValue Val = cast<StoreSDNode>(N)->getValue();
if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
Swap = true;
}
if (Swap)
std::swap(Base, Offset);
AM = ISD::PRE_INC;
return true;
}
if (VT != MVT::i64) {
if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None))
return false;
} else {
if (Alignment < Align(4))
return false;
if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4)))
return false;
}
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
LD->getExtensionType() == ISD::SEXTLOAD &&
isa<ConstantSDNode>(Offset))
return false;
}
AM = ISD::PRE_INC;
return true;
}
static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
unsigned &HiOpFlags, unsigned &LoOpFlags,
const GlobalValue *GV = nullptr) {
HiOpFlags = PPCII::MO_HA;
LoOpFlags = PPCII::MO_LO;
if (IsPIC) {
HiOpFlags |= PPCII::MO_PIC_FLAG;
LoOpFlags |= PPCII::MO_PIC_FLAG;
}
}
static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
SelectionDAG &DAG) {
SDLoc DL(HiPart);
EVT PtrVT = HiPart.getValueType();
SDValue Zero = DAG.getConstant(0, DL, PtrVT);
SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
if (isPIC)
Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
}
static void setUsesTOCBasePtr(MachineFunction &MF) {
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
FuncInfo->setUsesTOCBasePtr();
}
static void setUsesTOCBasePtr(SelectionDAG &DAG) {
setUsesTOCBasePtr(DAG.getMachineFunction());
}
SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
SDValue GA) const {
const bool Is64Bit = Subtarget.isPPC64();
EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
: Subtarget.isAIXABI()
? DAG.getRegister(PPC::R2, VT)
: DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
SDValue Ops[] = { GA, Reg };
return DAG.getMemIntrinsicNode(
PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
MachinePointerInfo::getGOT(DAG.getMachineFunction()), None,
MachineMemOperand::MOLoad);
}
SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
SelectionDAG &DAG) const {
EVT PtrVT = Op.getValueType();
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
const Constant *C = CP->getConstVal();
if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
if (Subtarget.isUsingPCRelativeCalls()) {
SDLoc DL(CP);
EVT Ty = getPointerTy(DAG.getDataLayout());
SDValue ConstPool = DAG.getTargetConstantPool(
C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG);
return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool);
}
setUsesTOCBasePtr(DAG);
SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0);
return getTOCEntry(DAG, SDLoc(CP), GA);
}
unsigned MOHiFlag, MOLoFlag;
bool IsPIC = isPositionIndependent();
getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
if (IsPIC && Subtarget.isSVR4ABI()) {
SDValue GA =
DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG);
return getTOCEntry(DAG, SDLoc(CP), GA);
}
SDValue CPIHi =
DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag);
SDValue CPILo =
DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag);
return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
}
unsigned PPCTargetLowering::getJumpTableEncoding() const {
if (isJumpTableRelative())
return MachineJumpTableInfo::EK_LabelDifference32;
return TargetLowering::getJumpTableEncoding();
}
bool PPCTargetLowering::isJumpTableRelative() const {
if (UseAbsoluteJumpTables)
return false;
if (Subtarget.isPPC64() || Subtarget.isAIXABI())
return true;
return TargetLowering::isJumpTableRelative();
}
SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
SelectionDAG &DAG) const {
if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
switch (getTargetMachine().getCodeModel()) {
case CodeModel::Small:
case CodeModel::Medium:
return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
default:
return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
getPointerTy(DAG.getDataLayout()));
}
}
const MCExpr *
PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
unsigned JTI,
MCContext &Ctx) const {
if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
switch (getTargetMachine().getCodeModel()) {
case CodeModel::Small:
case CodeModel::Medium:
return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
default:
return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
}
}
SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
EVT PtrVT = Op.getValueType();
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
if (Subtarget.isUsingPCRelativeCalls()) {
SDLoc DL(JT);
EVT Ty = getPointerTy(DAG.getDataLayout());
SDValue GA =
DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG);
SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
return MatAddr;
}
if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
setUsesTOCBasePtr(DAG);
SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
return getTOCEntry(DAG, SDLoc(JT), GA);
}
unsigned MOHiFlag, MOLoFlag;
bool IsPIC = isPositionIndependent();
getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
if (IsPIC && Subtarget.isSVR4ABI()) {
SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
PPCII::MO_PIC_FLAG);
return getTOCEntry(DAG, SDLoc(GA), GA);
}
SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
}
SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
SelectionDAG &DAG) const {
EVT PtrVT = Op.getValueType();
BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
const BlockAddress *BA = BASDN->getBlockAddress();
if (Subtarget.isUsingPCRelativeCalls()) {
SDLoc DL(BASDN);
EVT Ty = getPointerTy(DAG.getDataLayout());
SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(),
PPCII::MO_PCREL_FLAG);
SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
return MatAddr;
}
if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
setUsesTOCBasePtr(DAG);
SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
return getTOCEntry(DAG, SDLoc(BASDN), GA);
}
if (Subtarget.is32BitELFABI() && isPositionIndependent())
return getTOCEntry(
DAG, SDLoc(BASDN),
DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
unsigned MOHiFlag, MOLoFlag;
bool IsPIC = isPositionIndependent();
getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
}
SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
SelectionDAG &DAG) const {
if (Subtarget.isAIXABI())
return LowerGlobalTLSAddressAIX(Op, DAG);
return LowerGlobalTLSAddressLinux(Op, DAG);
}
SDValue PPCTargetLowering::LowerGlobalTLSAddressAIX(SDValue Op,
SelectionDAG &DAG) const {
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
if (DAG.getTarget().useEmulatedTLS())
report_fatal_error("Emulated TLS is not yet supported on AIX");
SDLoc dl(GA);
const GlobalValue *GV = GA->getGlobal();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
SDValue VariableOffsetTGA =
DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TLSGD_FLAG);
SDValue RegionHandleTGA =
DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TLSGDM_FLAG);
SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
SDValue RegionHandle = getTOCEntry(DAG, dl, RegionHandleTGA);
return DAG.getNode(PPCISD::TLSGD_AIX, dl, PtrVT, VariableOffset,
RegionHandle);
}
SDValue PPCTargetLowering::LowerGlobalTLSAddressLinux(SDValue Op,
SelectionDAG &DAG) const {
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
if (DAG.getTarget().useEmulatedTLS())
return LowerToTLSEmulatedModel(GA, DAG);
SDLoc dl(GA);
const GlobalValue *GV = GA->getGlobal();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
bool is64bit = Subtarget.isPPC64();
const Module *M = DAG.getMachineFunction().getFunction().getParent();
PICLevel::Level picLevel = M->getPICLevel();
const TargetMachine &TM = getTargetMachine();
TLSModel::Model Model = TM.getTLSModel(GV);
if (Model == TLSModel::LocalExec) {
if (Subtarget.isUsingPCRelativeCalls()) {
SDValue TLSReg = DAG.getRegister(PPC::X13, MVT::i64);
SDValue TGA = DAG.getTargetGlobalAddress(
GV, dl, PtrVT, 0, (PPCII::MO_PCREL_FLAG | PPCII::MO_TPREL_FLAG));
SDValue MatAddr =
DAG.getNode(PPCISD::TLS_LOCAL_EXEC_MAT_ADDR, dl, PtrVT, TGA);
return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TLSReg, MatAddr);
}
SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
PPCII::MO_TPREL_HA);
SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
PPCII::MO_TPREL_LO);
SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
: DAG.getRegister(PPC::R2, MVT::i32);
SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
}
if (Model == TLSModel::InitialExec) {
bool IsPCRel = Subtarget.isUsingPCRelativeCalls();
SDValue TGA = DAG.getTargetGlobalAddress(
GV, dl, PtrVT, 0, IsPCRel ? PPCII::MO_GOT_TPREL_PCREL_FLAG : 0);
SDValue TGATLS = DAG.getTargetGlobalAddress(
GV, dl, PtrVT, 0,
IsPCRel ? (PPCII::MO_TLS | PPCII::MO_PCREL_FLAG) : PPCII::MO_TLS);
SDValue TPOffset;
if (IsPCRel) {
SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, dl, PtrVT, TGA);
TPOffset = DAG.getLoad(MVT::i64, dl, DAG.getEntryNode(), MatPCRel,
MachinePointerInfo());
} else {
SDValue GOTPtr;
if (is64bit) {
setUsesTOCBasePtr(DAG);
SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
GOTPtr =
DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, PtrVT, GOTReg, TGA);
} else {
if (!TM.isPositionIndependent())
GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
else if (picLevel == PICLevel::SmallPIC)
GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
else
GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
}
TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, PtrVT, TGA, GOTPtr);
}
return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
}
if (Model == TLSModel::GeneralDynamic) {
if (Subtarget.isUsingPCRelativeCalls()) {
SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
PPCII::MO_GOT_TLSGD_PCREL_FLAG);
return DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
}
SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
SDValue GOTPtr;
if (is64bit) {
setUsesTOCBasePtr(DAG);
SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
GOTReg, TGA);
} else {
if (picLevel == PICLevel::SmallPIC)
GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
else
GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
}
return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
GOTPtr, TGA, TGA);
}
if (Model == TLSModel::LocalDynamic) {
if (Subtarget.isUsingPCRelativeCalls()) {
SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
PPCII::MO_GOT_TLSLD_PCREL_FLAG);
SDValue MatPCRel =
DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
return DAG.getNode(PPCISD::PADDI_DTPREL, dl, PtrVT, MatPCRel, TGA);
}
SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
SDValue GOTPtr;
if (is64bit) {
setUsesTOCBasePtr(DAG);
SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
GOTReg, TGA);
} else {
if (picLevel == PICLevel::SmallPIC)
GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
else
GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
}
SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
PtrVT, GOTPtr, TGA, TGA);
SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
PtrVT, TLSAddr, TGA);
return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
}
llvm_unreachable("Unknown TLS model!");
}
SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const {
EVT PtrVT = Op.getValueType();
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
SDLoc DL(GSDN);
const GlobalValue *GV = GSDN->getGlobal();
if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
if (Subtarget.isUsingPCRelativeCalls()) {
EVT Ty = getPointerTy(DAG.getDataLayout());
if (isAccessedAsGotIndirect(Op)) {
SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
PPCII::MO_PCREL_FLAG |
PPCII::MO_GOT_FLAG);
SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel,
MachinePointerInfo());
return Load;
} else {
SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
PPCII::MO_PCREL_FLAG);
return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
}
}
setUsesTOCBasePtr(DAG);
SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
return getTOCEntry(DAG, DL, GA);
}
unsigned MOHiFlag, MOLoFlag;
bool IsPIC = isPositionIndependent();
getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
if (IsPIC && Subtarget.isSVR4ABI()) {
SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
GSDN->getOffset(),
PPCII::MO_PIC_FLAG);
return getTOCEntry(DAG, DL, GA);
}
SDValue GAHi =
DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
SDValue GALo =
DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
return LowerLabelRef(GAHi, GALo, IsPIC, DAG);
}
SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
bool IsStrict = Op->isStrictFPOpcode();
ISD::CondCode CC =
cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get();
SDValue LHS = Op.getOperand(IsStrict ? 1 : 0);
SDValue RHS = Op.getOperand(IsStrict ? 2 : 1);
SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
EVT LHSVT = LHS.getValueType();
SDLoc dl(Op);
if (LHSVT == MVT::f128) {
assert(!Subtarget.hasP9Vector() &&
"SETCC for f128 is already legal under Power9!");
softenSetCCOperands(DAG, LHSVT, LHS, RHS, CC, dl, LHS, RHS, Chain,
Op->getOpcode() == ISD::STRICT_FSETCCS);
if (RHS.getNode())
LHS = DAG.getNode(ISD::SETCC, dl, Op.getValueType(), LHS, RHS,
DAG.getCondCode(CC));
if (IsStrict)
return DAG.getMergeValues({LHS, Chain}, dl);
return LHS;
}
assert(!IsStrict && "Don't know how to handle STRICT_FSETCC!");
if (Op.getValueType() == MVT::v2i64) {
if (LHS.getValueType() == MVT::v2i64) {
if (CC != ISD::SETEQ && CC != ISD::SETNE)
return SDValue();
SDValue SetCC32 = DAG.getSetCC(
dl, MVT::v4i32, DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, LHS),
DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, RHS), CC);
int ShuffV[] = {1, 0, 3, 2};
SDValue Shuff =
DAG.getVectorShuffle(MVT::v4i32, dl, SetCC32, SetCC32, ShuffV);
return DAG.getBitcast(MVT::v2i64,
DAG.getNode(CC == ISD::SETEQ ? ISD::AND : ISD::OR,
dl, MVT::v4i32, Shuff, SetCC32));
}
return Op;
}
if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
return V;
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
if (C->isAllOnes() || C->isZero())
return SDValue();
}
if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
EVT VT = Op.getValueType();
SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, LHS, RHS);
return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
}
return SDValue();
}
SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
SDNode *Node = Op.getNode();
EVT VT = Node->getValueType(0);
EVT PtrVT = getPointerTy(DAG.getDataLayout());
SDValue InChain = Node->getOperand(0);
SDValue VAListPtr = Node->getOperand(1);
const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
SDLoc dl(Node);
assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
VAListPtr, MachinePointerInfo(SV), MVT::i8);
InChain = GprIndex.getValue(1);
if (VT == MVT::i64) {
SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
DAG.getConstant(1, dl, MVT::i32));
SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
DAG.getConstant(1, dl, MVT::i32));
GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
GprIndex);
}
SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
DAG.getConstant(1, dl, MVT::i32));
SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
FprPtr, MachinePointerInfo(SV), MVT::i8);
InChain = FprIndex.getValue(1);
SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
DAG.getConstant(8, dl, MVT::i32));
SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
DAG.getConstant(4, dl, MVT::i32));
SDValue OverflowArea =
DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
InChain = OverflowArea.getValue(1);
SDValue RegSaveArea =
DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
InChain = RegSaveArea.getValue(1);
SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
VT.isInteger() ? GprIndex : FprIndex,
DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
MVT::i32));
SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
RegConstant);
if (VT.isFloatingPoint())
OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
DAG.getConstant(32, dl, MVT::i32));
SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
VT.isInteger() ? GprIndex : FprIndex,
DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
MVT::i32));
InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
VT.isInteger() ? VAListPtr : FprPtr,
MachinePointerInfo(SV), MVT::i8);
SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
DAG.getConstant(VT.isInteger() ? 4 : 8,
dl, MVT::i32));
OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
OverflowAreaPlusN);
InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
MachinePointerInfo(), MVT::i32);
return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
}
SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2),
DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8),
false, true, false, MachinePointerInfo(),
MachinePointerInfo());
}
SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
SelectionDAG &DAG) const {
if (Subtarget.isAIXABI())
report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
return Op.getOperand(0);
}
SDValue PPCTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
PPCFunctionInfo &MFI = *MF.getInfo<PPCFunctionInfo>();
assert((Op.getOpcode() == ISD::INLINEASM ||
Op.getOpcode() == ISD::INLINEASM_BR) &&
"Expecting Inline ASM node.");
if (MFI.isLRStoreRequired())
return Op;
unsigned NumOps = Op.getNumOperands();
if (Op.getOperand(NumOps - 1).getValueType() == MVT::Glue)
--NumOps;
for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
unsigned Flags = cast<ConstantSDNode>(Op.getOperand(i))->getZExtValue();
unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
++i;
switch (InlineAsm::getKind(Flags)) {
default:
llvm_unreachable("Bad flags!");
case InlineAsm::Kind_RegUse:
case InlineAsm::Kind_Imm:
case InlineAsm::Kind_Mem:
i += NumVals;
break;
case InlineAsm::Kind_Clobber:
case InlineAsm::Kind_RegDef:
case InlineAsm::Kind_RegDefEarlyClobber: {
for (; NumVals; --NumVals, ++i) {
Register Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg();
if (Reg != PPC::LR && Reg != PPC::LR8)
continue;
MFI.setLRStoreRequired();
return Op;
}
break;
}
}
}
return Op;
}
SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
SelectionDAG &DAG) const {
if (Subtarget.isAIXABI())
report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
SDValue Chain = Op.getOperand(0);
SDValue Trmp = Op.getOperand(1); SDValue FPtr = Op.getOperand(2); SDValue Nest = Op.getOperand(3); SDLoc dl(Op);
EVT PtrVT = getPointerTy(DAG.getDataLayout());
bool isPPC64 = (PtrVT == MVT::i64);
Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Ty = IntPtrTy;
Entry.Node = Trmp; Args.push_back(Entry);
Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
isPPC64 ? MVT::i64 : MVT::i32);
Args.push_back(Entry);
Entry.Node = FPtr; Args.push_back(Entry);
Entry.Node = Nest; Args.push_back(Entry);
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
CallingConv::C, Type::getVoidTy(*DAG.getContext()),
DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
return CallResult.second;
}
SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
EVT PtrVT = getPointerTy(MF.getDataLayout());
SDLoc dl(Op);
if (Subtarget.isPPC64() || Subtarget.isAIXABI()) {
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
MachinePointerInfo(SV));
}
SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
PtrVT);
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
PtrVT);
uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
uint64_t FPROffset = 1;
SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
SDValue firstStore =
DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
MachinePointerInfo(SV), MVT::i8);
uint64_t nextOffset = FPROffset;
SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
ConstFPROffset);
SDValue secondStore =
DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
MachinePointerInfo(SV, nextOffset), MVT::i8);
nextOffset += StackOffset;
nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
MachinePointerInfo(SV, nextOffset));
nextOffset += FrameOffset;
nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
return DAG.getStore(thirdStore, dl, FR, nextPtr,
MachinePointerInfo(SV, nextOffset));
}
static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5,
PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10,
PPC::F11, PPC::F12, PPC::F13};
static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
unsigned PtrByteSize) {
unsigned ArgSize = ArgVT.getStoreSize();
if (Flags.isByVal())
ArgSize = Flags.getByValSize();
if (!Flags.isInConsecutiveRegs())
ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
return ArgSize;
}
static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
ISD::ArgFlagsTy Flags,
unsigned PtrByteSize) {
Align Alignment(PtrByteSize);
if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
Alignment = Align(16);
if (Flags.isByVal()) {
auto BVAlign = Flags.getNonZeroByValAlign();
if (BVAlign > PtrByteSize) {
if (BVAlign.value() % PtrByteSize != 0)
llvm_unreachable(
"ByVal alignment is not a multiple of the pointer size");
Alignment = BVAlign;
}
}
if (Flags.isInConsecutiveRegs()) {
if (Flags.isSplit() && OrigVT != MVT::ppcf128)
Alignment = Align(OrigVT.getStoreSize());
else
Alignment = Align(ArgVT.getStoreSize());
}
return Alignment;
}
static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags,
unsigned PtrByteSize, unsigned LinkageSize,
unsigned ParamAreaSize, unsigned &ArgOffset,
unsigned &AvailableFPRs,
unsigned &AvailableVRs) {
bool UseMemory = false;
Align Alignment =
CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
ArgOffset = alignTo(ArgOffset, Alignment);
if (ArgOffset >= LinkageSize + ParamAreaSize)
UseMemory = true;
ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
if (Flags.isInConsecutiveRegsLast())
ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
if (ArgOffset > LinkageSize + ParamAreaSize)
UseMemory = true;
if (!Flags.isByVal()) {
if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
if (AvailableFPRs > 0) {
--AvailableFPRs;
return false;
}
if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
if (AvailableVRs > 0) {
--AvailableVRs;
return false;
}
}
return UseMemory;
}
static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
unsigned NumBytes) {
return alignTo(NumBytes, Lowering->getStackAlign());
}
SDValue PPCTargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
if (Subtarget.isAIXABI())
return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
InVals);
if (Subtarget.is64BitELFABI())
return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
InVals);
assert(Subtarget.is32BitELFABI());
return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
InVals);
}
SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
EVT PtrVT = getPointerTy(MF.getDataLayout());
bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
(CallConv == CallingConv::Fast));
const Align PtrAlign(4);
SmallVector<CCValAssign, 16> ArgLocs;
PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
*DAG.getContext());
unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
CCInfo.AllocateStack(LinkageSize, PtrAlign);
if (useSoftFloat())
CCInfo.PreAnalyzeFormalArguments(Ins);
CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
CCInfo.clearWasPPCF128();
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
if (VA.isRegLoc()) {
const TargetRegisterClass *RC;
EVT ValVT = VA.getValVT();
switch (ValVT.getSimpleVT().SimpleTy) {
default:
llvm_unreachable("ValVT not supported by formal arguments Lowering");
case MVT::i1:
case MVT::i32:
RC = &PPC::GPRCRegClass;
break;
case MVT::f32:
if (Subtarget.hasP8Vector())
RC = &PPC::VSSRCRegClass;
else if (Subtarget.hasSPE())
RC = &PPC::GPRCRegClass;
else
RC = &PPC::F4RCRegClass;
break;
case MVT::f64:
if (Subtarget.hasVSX())
RC = &PPC::VSFRCRegClass;
else if (Subtarget.hasSPE())
RC = &PPC::GPRCRegClass;
else
RC = &PPC::F8RCRegClass;
break;
case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
RC = &PPC::VRRCRegClass;
break;
case MVT::v4f32:
RC = &PPC::VRRCRegClass;
break;
case MVT::v2f64:
case MVT::v2i64:
RC = &PPC::VRRCRegClass;
break;
}
SDValue ArgValue;
if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
assert(i + 1 < e && "No second half of double precision argument");
Register RegLo = MF.addLiveIn(VA.getLocReg(), RC);
Register RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
if (!Subtarget.isLittleEndian())
std::swap (ArgValueLo, ArgValueHi);
ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
ArgValueHi);
} else {
Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
ValVT == MVT::i1 ? MVT::i32 : ValVT);
if (ValVT == MVT::i1)
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
}
InVals.push_back(ArgValue);
} else {
assert(VA.isMemLoc());
unsigned ArgSize = VA.getLocVT().getStoreSize();
unsigned ObjSize = VA.getValVT().getStoreSize();
unsigned ArgOffset = VA.getLocMemOffset();
ArgOffset += ArgSize - ObjSize;
int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
InVals.push_back(
DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
}
}
SmallVector<CCValAssign, 16> ByValArgLocs;
CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
ByValArgLocs, *DAG.getContext());
CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
MinReservedArea = std::max(MinReservedArea, LinkageSize);
MinReservedArea =
EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
FuncInfo->setMinReservedArea(MinReservedArea);
SmallVector<SDValue, 8> MemOps;
if (isVarArg) {
static const MCPhysReg GPArgRegs[] = {
PPC::R3, PPC::R4, PPC::R5, PPC::R6,
PPC::R7, PPC::R8, PPC::R9, PPC::R10,
};
const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
static const MCPhysReg FPArgRegs[] = {
PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
PPC::F8
};
unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
if (useSoftFloat() || hasSPE())
NumFPArgRegs = 0;
FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
FuncInfo->setVarArgsStackOffset(
MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
CCInfo.getNextStackOffset(), true));
FuncInfo->setVarArgsFrameIndex(
MFI.CreateStackObject(Depth, Align(8), false));
SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
Register VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
if (!VReg)
VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
MemOps.push_back(Store);
SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
}
for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
Register VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
if (!VReg)
VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
MemOps.push_back(Store);
SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
PtrVT);
FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
}
}
if (!MemOps.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
return Chain;
}
SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
EVT ObjectVT, SelectionDAG &DAG,
SDValue ArgVal,
const SDLoc &dl) const {
if (Flags.isSExt())
ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
DAG.getValueType(ObjectVT));
else if (Flags.isZExt())
ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
DAG.getValueType(ObjectVT));
return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
}
SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
bool isELFv2ABI = Subtarget.isELFv2ABI();
bool isLittleEndian = Subtarget.isLittleEndian();
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
assert(!(CallConv == CallingConv::Fast && isVarArg) &&
"fastcc not supported on varargs functions");
EVT PtrVT = getPointerTy(MF.getDataLayout());
bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
(CallConv == CallingConv::Fast));
unsigned PtrByteSize = 8;
unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
static const MCPhysReg GPR[] = {
PPC::X3, PPC::X4, PPC::X5, PPC::X6,
PPC::X7, PPC::X8, PPC::X9, PPC::X10,
};
static const MCPhysReg VR[] = {
PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
};
const unsigned Num_GPR_Regs = array_lengthof(GPR);
const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
const unsigned Num_VR_Regs = array_lengthof(VR);
bool HasParameterArea = !isELFv2ABI || isVarArg;
unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
unsigned NumBytes = LinkageSize;
unsigned AvailableFPRs = Num_FPR_Regs;
unsigned AvailableVRs = Num_VR_Regs;
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
if (Ins[i].Flags.isNest())
continue;
if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
PtrByteSize, LinkageSize, ParamAreaSize,
NumBytes, AvailableFPRs, AvailableVRs))
HasParameterArea = true;
}
unsigned ArgOffset = LinkageSize;
unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
SmallVector<SDValue, 8> MemOps;
Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
unsigned CurArgIdx = 0;
for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
SDValue ArgVal;
bool needsLoad = false;
EVT ObjectVT = Ins[ArgNo].VT;
EVT OrigVT = Ins[ArgNo].ArgVT;
unsigned ObjSize = ObjectVT.getStoreSize();
unsigned ArgSize = ObjSize;
ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
if (Ins[ArgNo].isOrigArg()) {
std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
CurArgIdx = Ins[ArgNo].getOrigArgIndex();
}
unsigned CurArgOffset;
Align Alignment;
auto ComputeArgOffset = [&]() {
Alignment =
CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
ArgOffset = alignTo(ArgOffset, Alignment);
CurArgOffset = ArgOffset;
};
if (CallConv != CallingConv::Fast) {
ComputeArgOffset();
GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
}
if (Flags.isByVal()) {
assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
if (CallConv == CallingConv::Fast)
ComputeArgOffset();
ObjSize = Flags.getByValSize();
ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
if (!ObjSize) {
int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
InVals.push_back(FIN);
continue;
}
int FI;
if (HasParameterArea ||
ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
else
FI = MFI.CreateStackObject(ArgSize, Alignment, false);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
if (ObjSize < PtrByteSize) {
SDValue Arg = FIN;
if (!isLittleEndian) {
SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
}
InVals.push_back(Arg);
if (GPR_idx != Num_GPR_Regs) {
Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
FuncInfo->addLiveInAttr(VReg, Flags);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
EVT ObjType = EVT::getIntegerVT(*DAG.getContext(), ObjSize * 8);
SDValue Store =
DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
MachinePointerInfo(&*FuncArg), ObjType);
MemOps.push_back(Store);
}
ArgOffset += PtrByteSize;
continue;
}
InVals.push_back(FIN);
for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
if (GPR_idx == Num_GPR_Regs)
break;
Register VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
FuncInfo->addLiveInAttr(VReg, Flags);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Addr = FIN;
if (j) {
SDValue Off = DAG.getConstant(j, dl, PtrVT);
Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
}
unsigned StoreSizeInBits = std::min(PtrByteSize, (ObjSize - j)) * 8;
EVT ObjType = EVT::getIntegerVT(*DAG.getContext(), StoreSizeInBits);
SDValue Store =
DAG.getTruncStore(Val.getValue(1), dl, Val, Addr,
MachinePointerInfo(&*FuncArg, j), ObjType);
MemOps.push_back(Store);
++GPR_idx;
}
ArgOffset += ArgSize;
continue;
}
switch (ObjectVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unhandled argument type!");
case MVT::i1:
case MVT::i32:
case MVT::i64:
if (Flags.isNest()) {
Register VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
break;
}
if (GPR_idx != Num_GPR_Regs) {
Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
FuncInfo->addLiveInAttr(VReg, Flags);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
} else {
if (CallConv == CallingConv::Fast)
ComputeArgOffset();
needsLoad = true;
ArgSize = PtrByteSize;
}
if (CallConv != CallingConv::Fast || needsLoad)
ArgOffset += 8;
break;
case MVT::f32:
case MVT::f64:
if (FPR_idx != Num_FPR_Regs) {
unsigned VReg;
if (ObjectVT == MVT::f32)
VReg = MF.addLiveIn(FPR[FPR_idx],
Subtarget.hasP8Vector()
? &PPC::VSSRCRegClass
: &PPC::F4RCRegClass);
else
VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
? &PPC::VSFRCRegClass
: &PPC::F8RCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
++FPR_idx;
} else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
FuncInfo->addLiveInAttr(VReg, Flags);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
if (ObjectVT == MVT::f32) {
if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
DAG.getConstant(32, dl, MVT::i32));
ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
}
ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
} else {
if (CallConv == CallingConv::Fast)
ComputeArgOffset();
needsLoad = true;
}
if (CallConv != CallingConv::Fast || needsLoad) {
ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
ArgOffset += ArgSize;
if (Flags.isInConsecutiveRegsLast())
ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
}
break;
case MVT::v4f32:
case MVT::v4i32:
case MVT::v8i16:
case MVT::v16i8:
case MVT::v2f64:
case MVT::v2i64:
case MVT::v1i128:
case MVT::f128:
if (VR_idx != Num_VR_Regs) {
Register VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
++VR_idx;
} else {
if (CallConv == CallingConv::Fast)
ComputeArgOffset();
needsLoad = true;
}
if (CallConv != CallingConv::Fast || needsLoad)
ArgOffset += 16;
break;
}
if (needsLoad) {
if (ObjSize < ArgSize && !isLittleEndian)
CurArgOffset += ArgSize - ObjSize;
int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
}
InVals.push_back(ArgVal);
}
unsigned MinReservedArea;
if (HasParameterArea)
MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
else
MinReservedArea = LinkageSize;
MinReservedArea =
EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
FuncInfo->setMinReservedArea(MinReservedArea);
if (isVarArg && MFI.hasVAStart()) {
int Depth = ArgOffset;
FuncInfo->setVarArgsFrameIndex(
MFI.CreateFixedObject(PtrByteSize, Depth, true));
SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
GPR_idx < Num_GPR_Regs; ++GPR_idx) {
Register VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
MemOps.push_back(Store);
SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
}
}
if (!MemOps.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
return Chain;
}
static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
unsigned ParamSize) {
if (!isTailCall) return 0;
PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
unsigned CallerMinReservedArea = FI->getMinReservedArea();
int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
if (SPDiff < FI->getTailCallSPDelta())
FI->setTailCallSPDelta(SPDiff);
return SPDiff;
}
static bool isFunctionGlobalAddress(SDValue Callee);
static bool callsShareTOCBase(const Function *Caller, SDValue Callee,
const TargetMachine &TM) {
#ifndef NDEBUG
const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller);
assert(!STICaller->isUsingPCRelativeCalls() &&
"PC Relative callers do not have a TOC and cannot share a TOC Base");
#endif
GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
if (!G)
return false;
const GlobalValue *GV = G->getGlobal();
if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
return false;
const Function *F = dyn_cast<Function>(GV);
const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV);
if (Alias) {
const GlobalObject *GlobalObj = Alias->getAliaseeObject();
F = dyn_cast<Function>(GlobalObj);
}
if (!F)
return false;
const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F);
if (STICallee->isUsingPCRelativeCalls())
return false;
if (!GV->isStrongDefinitionForLinker())
return false;
if (CodeModel::Medium == TM.getCodeModel() ||
CodeModel::Large == TM.getCodeModel())
return true;
if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
GV->getSection() != Caller->getSection())
return false;
if (const auto *F = dyn_cast<Function>(GV)) {
if (F->getSectionPrefix() != Caller->getSectionPrefix())
return false;
}
return true;
}
static bool
needStackSlotPassParameters(const PPCSubtarget &Subtarget,
const SmallVectorImpl<ISD::OutputArg> &Outs) {
assert(Subtarget.is64BitELFABI());
const unsigned PtrByteSize = 8;
const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
static const MCPhysReg GPR[] = {
PPC::X3, PPC::X4, PPC::X5, PPC::X6,
PPC::X7, PPC::X8, PPC::X9, PPC::X10,
};
static const MCPhysReg VR[] = {
PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
};
const unsigned NumGPRs = array_lengthof(GPR);
const unsigned NumFPRs = 13;
const unsigned NumVRs = array_lengthof(VR);
const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
unsigned NumBytes = LinkageSize;
unsigned AvailableFPRs = NumFPRs;
unsigned AvailableVRs = NumVRs;
for (const ISD::OutputArg& Param : Outs) {
if (Param.Flags.isNest()) continue;
if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, PtrByteSize,
LinkageSize, ParamAreaSize, NumBytes,
AvailableFPRs, AvailableVRs))
return true;
}
return false;
}
static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) {
if (CB.arg_size() != CallerFn->arg_size())
return false;
auto CalleeArgIter = CB.arg_begin();
auto CalleeArgEnd = CB.arg_end();
Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
const Value* CalleeArg = *CalleeArgIter;
const Value* CallerArg = &(*CallerArgIter);
if (CalleeArg == CallerArg)
continue;
if (CalleeArg->getType() == CallerArg->getType() &&
isa<UndefValue>(CalleeArg))
continue;
return false;
}
return true;
}
static bool
areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
CallingConv::ID CalleeCC) {
auto isTailCallableCC = [] (CallingConv::ID CC){
return CC == CallingConv::C || CC == CallingConv::Fast;
};
if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
return false;
return CallerCC == CallingConv::C || CallerCC == CalleeCC;
}
bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
if (DisableSCO && !TailCallOpt) return false;
if (isVarArg) return false;
auto &Caller = DAG.getMachineFunction().getFunction();
if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
return false;
if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
return false;
if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
return false;
if (Caller.getCallingConv() != CalleeCC &&
needStackSlotPassParameters(Subtarget, Outs))
return false;
if (!Subtarget.isUsingPCRelativeCalls() &&
!isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee))
return false;
if (!Subtarget.isUsingPCRelativeCalls() &&
!callsShareTOCBase(&Caller, Callee, getTargetMachine()))
return false;
if (CalleeCC == CallingConv::Fast && TailCallOpt)
return true;
if (DisableSCO) return false;
if (CB && !hasSameArgumentList(&Caller, *CB) &&
needStackSlotPassParameters(Subtarget, Outs))
return false;
else if (!CB && needStackSlotPassParameters(Subtarget, Outs))
return false;
return true;
}
bool
PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
CallingConv::ID CalleeCC,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const {
if (!getTargetMachine().Options.GuaranteedTailCallOpt)
return false;
if (isVarArg)
return false;
MachineFunction &MF = DAG.getMachineFunction();
CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
for (unsigned i = 0; i != Ins.size(); i++) {
ISD::ArgFlagsTy Flags = Ins[i].Flags;
if (Flags.isByVal()) return false;
}
if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
return true;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
return G->getGlobal()->hasHiddenVisibility()
|| G->getGlobal()->hasProtectedVisibility();
}
return false;
}
static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
if (!C) return nullptr;
int Addr = C->getZExtValue();
if ((Addr & 3) != 0 || SignExtend32<26>(Addr) != Addr)
return nullptr;
return DAG
.getConstant(
(int)C->getZExtValue() >> 2, SDLoc(Op),
DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
.getNode();
}
namespace {
struct TailCallArgumentInfo {
SDValue Arg;
SDValue FrameIdxOp;
int FrameIdx = 0;
TailCallArgumentInfo() = default;
};
}
static void StoreTailCallArgumentsToStackSlot(
SelectionDAG &DAG, SDValue Chain,
const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
SDValue Arg = TailCallArgs[i].Arg;
SDValue FIN = TailCallArgs[i].FrameIdxOp;
int FI = TailCallArgs[i].FrameIdx;
MemOpChains.push_back(DAG.getStore(
Chain, dl, Arg, FIN,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
}
}
static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
SDValue OldRetAddr, SDValue OldFP,
int SPDiff, const SDLoc &dl) {
if (SPDiff) {
MachineFunction &MF = DAG.getMachineFunction();
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const PPCFrameLowering *FL = Subtarget.getFrameLowering();
bool isPPC64 = Subtarget.isPPC64();
int SlotSize = isPPC64 ? 8 : 4;
int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
NewRetAddrLoc, true);
EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
MachinePointerInfo::getFixedStack(MF, NewRetAddr));
}
return Chain;
}
static void
CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
SDValue Arg, int SPDiff, unsigned ArgOffset,
SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
int Offset = ArgOffset + SPDiff;
uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
SDValue FIN = DAG.getFrameIndex(FI, VT);
TailCallArgumentInfo Info;
Info.Arg = Arg;
Info.FrameIdxOp = FIN;
Info.FrameIdx = FI;
TailCallArguments.push_back(Info);
}
SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
SDValue &FPOpOut, const SDLoc &dl) const {
if (SPDiff) {
EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
LROpOut = getReturnAddrFrameIndex(DAG);
LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
Chain = SDValue(LROpOut.getNode(), 1);
}
return Chain;
}
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
SDValue Chain, ISD::ArgFlagsTy Flags,
SelectionDAG &DAG, const SDLoc &dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
Flags.getNonZeroByValAlign(), false, false, false,
MachinePointerInfo(), MachinePointerInfo());
}
static void LowerMemOpCallTo(
SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
if (!isTailCall) {
if (isVector) {
SDValue StackPtr;
if (isPPC64)
StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
else
StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
DAG.getConstant(ArgOffset, dl, PtrVT));
}
MemOpChains.push_back(
DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
} else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
TailCallArguments);
}
static void
PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
SDValue FPOp,
SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
SmallVector<SDValue, 8> MemOpChains2;
InFlag = SDValue();
StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
MemOpChains2, dl);
if (!MemOpChains2.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
InFlag = Chain.getValue(1);
}
static bool isFunctionGlobalAddress(SDValue Callee) {
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
return false;
return G->getGlobal()->getValueType()->isFunctionTy();
}
return false;
}
SDValue PPCTargetLowering::LowerCallResult(
SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
*DAG.getContext());
CCRetInfo.AnalyzeCallResult(
Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
? RetCC_PPC_Cold
: RetCC_PPC);
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
SDValue Val;
if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
InFlag);
Chain = Lo.getValue(1);
InFlag = Lo.getValue(2);
VA = RVLocs[++i]; SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
InFlag);
Chain = Hi.getValue(1);
InFlag = Hi.getValue(2);
if (!Subtarget.isLittleEndian())
std::swap (Lo, Hi);
Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
} else {
Val = DAG.getCopyFromReg(Chain, dl,
VA.getLocReg(), VA.getLocVT(), InFlag);
Chain = Val.getValue(1);
InFlag = Val.getValue(2);
}
switch (VA.getLocInfo()) {
default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full: break;
case CCValAssign::AExt:
Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
break;
case CCValAssign::ZExt:
Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
DAG.getValueType(VA.getValVT()));
Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
break;
case CCValAssign::SExt:
Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
DAG.getValueType(VA.getValVT()));
Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
break;
}
InVals.push_back(Val);
}
return Chain;
}
static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
const PPCSubtarget &Subtarget, bool isPatchPoint) {
if (isPatchPoint)
return false;
if (isFunctionGlobalAddress(Callee) || isa<ExternalSymbolSDNode>(Callee))
return false;
if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
isBLACompatibleAddress(Callee, DAG))
return false;
return true;
}
static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) {
return Subtarget.isAIXABI() ||
(Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls());
}
static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags,
const Function &Caller, const SDValue &Callee,
const PPCSubtarget &Subtarget,
const TargetMachine &TM,
bool IsStrictFPCall = false) {
if (CFlags.IsTailCall)
return PPCISD::TC_RETURN;
unsigned RetOpc = 0;
if (CFlags.IsIndirect) {
RetOpc = isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC
: PPCISD::BCTRL;
} else if (Subtarget.isUsingPCRelativeCalls()) {
assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.");
RetOpc = PPCISD::CALL_NOTOC;
} else if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
RetOpc = callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL
: PPCISD::CALL_NOP;
else
RetOpc = PPCISD::CALL;
if (IsStrictFPCall) {
switch (RetOpc) {
default:
llvm_unreachable("Unknown call opcode");
case PPCISD::BCTRL_LOAD_TOC:
RetOpc = PPCISD::BCTRL_LOAD_TOC_RM;
break;
case PPCISD::BCTRL:
RetOpc = PPCISD::BCTRL_RM;
break;
case PPCISD::CALL_NOTOC:
RetOpc = PPCISD::CALL_NOTOC_RM;
break;
case PPCISD::CALL:
RetOpc = PPCISD::CALL_RM;
break;
case PPCISD::CALL_NOP:
RetOpc = PPCISD::CALL_NOP_RM;
break;
}
}
return RetOpc;
}
static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
const SDLoc &dl, const PPCSubtarget &Subtarget) {
if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
return SDValue(Dest, 0);
auto isLocalCallee = [&]() {
const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
const GlobalValue *GV = G ? G->getGlobal() : nullptr;
return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) &&
!isa_and_nonnull<GlobalIFunc>(GV);
};
bool UsePlt =
Subtarget.is32BitELFABI() && !isLocalCallee() &&
Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_;
const auto getAIXFuncEntryPointSymbolSDNode = [&](const GlobalValue *GV) {
const TargetMachine &TM = Subtarget.getTargetMachine();
const TargetLoweringObjectFile *TLOF = TM.getObjFileLowering();
MCSymbolXCOFF *S =
cast<MCSymbolXCOFF>(TLOF->getFunctionEntryPointSymbol(GV, TM));
MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
return DAG.getMCSymbol(S, PtrVT);
};
if (isFunctionGlobalAddress(Callee)) {
const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
if (Subtarget.isAIXABI()) {
assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.");
return getAIXFuncEntryPointSymbolSDNode(GV);
}
return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
UsePlt ? PPCII::MO_PLT : 0);
}
if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
const char *SymName = S->getSymbol();
if (Subtarget.isAIXABI()) {
const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
if (const Function *F =
dyn_cast_or_null<Function>(Mod->getNamedValue(SymName)))
return getAIXFuncEntryPointSymbolSDNode(F);
const auto getExternalFunctionEntryPointSymbol = [&](StringRef SymName) {
auto &Context = DAG.getMachineFunction().getMMI().getContext();
MCSectionXCOFF *Sec = Context.getXCOFFSection(
(Twine(".") + Twine(SymName)).str(), SectionKind::getMetadata(),
XCOFF::CsectProperties(XCOFF::XMC_PR, XCOFF::XTY_ER));
return Sec->getQualNameSymbol();
};
SymName = getExternalFunctionEntryPointSymbol(SymName)->getName().data();
}
return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
UsePlt ? PPCII::MO_PLT : 0);
}
assert(Callee.getNode() && "What no callee?");
return Callee;
}
static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {
assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&
"Expected a CALLSEQ_STARTSDNode.");
SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
if (LastValue.getValueType() != MVT::Glue)
return LastValue;
return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
}
static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
SDValue &Glue, SDValue &Chain,
const SDLoc &dl) {
SDValue MTCTROps[] = {Chain, Callee, Glue};
EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2),
makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
Glue = Chain.getValue(1);
}
static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
SDValue &Glue, SDValue &Chain,
SDValue CallSeqStart,
const CallBase *CB, const SDLoc &dl,
bool hasNest,
const PPCSubtarget &Subtarget) {
SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
? (MachineMemOperand::MODereferenceable |
MachineMemOperand::MOInvariant)
: MachineMemOperand::MONone;
MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr);
const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4;
SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
Alignment, MMOFlags);
SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
SDValue TOCPtr =
DAG.getLoad(RegVT, dl, LDChain, AddTOC,
MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
SDValue LoadEnvPtr =
DAG.getLoad(RegVT, dl, LDChain, AddPtr,
MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
Chain = TOCVal.getValue(0);
Glue = TOCVal.getValue(1);
assert((!hasNest || !Subtarget.isAIXABI()) &&
"Nest parameter is not supported on AIX.");
if (!hasNest) {
SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
Chain = EnvVal.getValue(0);
Glue = EnvVal.getValue(1);
}
prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
}
static void
buildCallOperands(SmallVectorImpl<SDValue> &Ops,
PPCTargetLowering::CallFlags CFlags, const SDLoc &dl,
SelectionDAG &DAG,
SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
const PPCSubtarget &Subtarget) {
const bool IsPPC64 = Subtarget.isPPC64();
const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
Ops.push_back(Chain);
if (!CFlags.IsIndirect)
Ops.push_back(Callee);
else {
assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.");
if (isTOCSaveRestoreRequired(Subtarget)) {
const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
Ops.push_back(AddTOC);
}
if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest)
Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(),
RegVT));
if (CFlags.IsTailCall)
Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
}
if (CFlags.IsTailCall)
Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
RegsToPass[i].second.getValueType()));
if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) &&
!CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls())
Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
if (CFlags.IsVarArg && Subtarget.is32BitELFABI())
Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
const uint32_t *Mask =
TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
Ops.push_back(DAG.getRegisterMask(Mask));
if (Glue.getNode())
Ops.push_back(Glue);
}
SDValue PPCTargetLowering::FinishCall(
CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const {
if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) ||
Subtarget.isAIXABI())
setUsesTOCBasePtr(DAG);
unsigned CallOpc =
getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee,
Subtarget, DAG.getTarget(), CB ? CB->isStrictFP() : false);
if (!CFlags.IsIndirect)
Callee = transformCallee(Callee, DAG, dl, Subtarget);
else if (Subtarget.usesFunctionDescriptors())
prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB,
dl, CFlags.HasNest, Subtarget);
else
prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
SmallVector<SDValue, 8> Ops;
buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee,
SPDiff, Subtarget);
if (CFlags.IsTailCall) {
assert(((Callee.getOpcode() == ISD::Register &&
cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
Callee.getOpcode() == ISD::TargetExternalSymbol ||
Callee.getOpcode() == ISD::TargetGlobalAddress ||
isa<ConstantSDNode>(Callee) ||
(CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&
"Expecting a global address, external symbol, absolute value, "
"register or an indirect tail call when PC Relative calls are "
"used.");
assert(CallOpc == PPCISD::TC_RETURN &&
"Unexpected call opcode for a tail call.");
DAG.getMachineFunction().getFrameInfo().setHasTailCall();
return DAG.getNode(CallOpc, dl, MVT::Other, Ops);
}
std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge);
Glue = Chain.getValue(1);
int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast &&
getTargetMachine().Options.GuaranteedTailCallOpt)
? NumBytes
: 0;
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
DAG.getIntPtrConstant(BytesCalleePops, dl, true),
Glue, dl);
Glue = Chain.getValue(1);
return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
DAG, InVals);
}
SDValue
PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
SDLoc &dl = CLI.DL;
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
CallingConv::ID CallConv = CLI.CallConv;
bool isVarArg = CLI.IsVarArg;
bool isPatchPoint = CLI.IsPatchPoint;
const CallBase *CB = CLI.CB;
if (isTailCall) {
if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall()))
isTailCall = false;
else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
isTailCall = IsEligibleForTailCallOptimization_64SVR4(
Callee, CallConv, CB, isVarArg, Outs, Ins, DAG);
else
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
Ins, DAG);
if (isTailCall) {
++NumTailCalls;
if (!getTargetMachine().Options.GuaranteedTailCallOpt)
++NumSiblingCalls;
assert((Subtarget.isUsingPCRelativeCalls() ||
isa<GlobalAddressSDNode>(Callee)) &&
"Callee should be an llvm::Function object.");
LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName()
<< "\nTCO callee: ");
LLVM_DEBUG(Callee.dump());
}
}
if (!isTailCall && CB && CB->isMustTailCall())
report_fatal_error("failed to perform tail call elimination on a call "
"site marked musttail");
if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
!isTailCall)
Callee = LowerGlobalAddress(Callee, DAG);
CallFlags CFlags(
CallConv, isTailCall, isVarArg, isPatchPoint,
isIndirectCall(Callee, DAG, Subtarget, isPatchPoint),
Subtarget.is64BitELFABI() &&
any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }),
CLI.NoMerge);
if (Subtarget.isAIXABI())
return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
InVals, CB);
assert(Subtarget.isSVR4ABI());
if (Subtarget.isPPC64())
return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
InVals, CB);
return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
InVals, CB);
}
SDValue PPCTargetLowering::LowerCall_32SVR4(
SDValue Chain, SDValue Callee, CallFlags CFlags,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
const CallBase *CB) const {
const CallingConv::ID CallConv = CFlags.CallConv;
const bool IsVarArg = CFlags.IsVarArg;
const bool IsTailCall = CFlags.IsTailCall;
assert((CallConv == CallingConv::C ||
CallConv == CallingConv::Cold ||
CallConv == CallingConv::Fast) && "Unknown calling convention!");
const Align PtrAlign(4);
MachineFunction &MF = DAG.getMachineFunction();
if (getTargetMachine().Options.GuaranteedTailCallOpt &&
CallConv == CallingConv::Fast)
MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
SmallVector<CCValAssign, 16> ArgLocs;
PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
PtrAlign);
if (useSoftFloat())
CCInfo.PreAnalyzeCallOperands(Outs);
if (IsVarArg) {
unsigned NumArgs = Outs.size();
for (unsigned i = 0; i != NumArgs; ++i) {
MVT ArgVT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
bool Result;
if (Outs[i].IsFixed) {
Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
CCInfo);
} else {
Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
ArgFlags, CCInfo);
}
if (Result) {
#ifndef NDEBUG
errs() << "Call operand #" << i << " has unhandled type "
<< EVT(ArgVT).getEVTString() << "\n";
#endif
llvm_unreachable(nullptr);
}
}
} else {
CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
}
CCInfo.clearWasPPCF128();
SmallVector<CCValAssign, 16> ByValArgLocs;
CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext());
CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
unsigned NumBytes = CCByValInfo.getNextStackOffset();
int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes);
Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
SDValue CallSeqStart = Chain;
SDValue LROp, FPOp;
Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
SmallVector<SDValue, 8> MemOpChains;
bool seenFloatArg = false;
for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
i != e;
++i, ++RealArgIdx) {
CCValAssign &VA = ArgLocs[i];
SDValue Arg = OutVals[RealArgIdx];
ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
if (Flags.isByVal()) {
assert((j < ByValArgLocs.size()) && "Index out of bounds!");
CCValAssign &ByValVA = ByValArgLocs[j++];
assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
unsigned LocMemOffset = ByValVA.getLocMemOffset();
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
StackPtr, PtrOff);
SDValue MemcpyCall =
CreateCopyOfByValArgument(Arg, PtrOff,
CallSeqStart.getNode()->getOperand(0),
Flags, DAG, dl);
SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
SDLoc(MemcpyCall));
DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
NewCallSeqStart.getNode());
Chain = CallSeqStart = NewCallSeqStart;
Arg = PtrOff;
}
if (Arg.getValueType() == MVT::i1)
Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
dl, MVT::i32, Arg);
if (VA.isRegLoc()) {
seenFloatArg |= VA.getLocVT().isFloatingPoint();
if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
bool IsLE = Subtarget.isLittleEndian();
SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
SVal.getValue(0)));
} else
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
} else {
assert(VA.isMemLoc());
unsigned LocMemOffset = VA.getLocMemOffset();
if (!IsTailCall) {
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
StackPtr, PtrOff);
MemOpChains.push_back(
DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
} else {
CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
TailCallArguments);
}
}
}
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
SDValue InFlag;
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
RegsToPass[i].second, InFlag);
InFlag = Chain.getValue(1);
}
if (IsVarArg) {
SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, InFlag };
Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
InFlag = Chain.getValue(1);
}
if (IsTailCall)
PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
TailCallArguments);
return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
Callee, SPDiff, NumBytes, Ins, InVals, CB);
}
SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
SelectionDAG &DAG, const SDLoc &dl) const {
SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
CallSeqStart.getNode()->getOperand(0),
Flags, DAG, dl);
int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
SDLoc(MemcpyCall));
DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
NewCallSeqStart.getNode());
return NewCallSeqStart;
}
SDValue PPCTargetLowering::LowerCall_64SVR4(
SDValue Chain, SDValue Callee, CallFlags CFlags,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
const CallBase *CB) const {
bool isELFv2ABI = Subtarget.isELFv2ABI();
bool isLittleEndian = Subtarget.isLittleEndian();
unsigned NumOps = Outs.size();
bool IsSibCall = false;
bool IsFastCall = CFlags.CallConv == CallingConv::Fast;
EVT PtrVT = getPointerTy(DAG.getDataLayout());
unsigned PtrByteSize = 8;
MachineFunction &MF = DAG.getMachineFunction();
if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
IsSibCall = true;
if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
assert(!(IsFastCall && CFlags.IsVarArg) &&
"fastcc not supported on varargs functions");
unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
unsigned NumBytes = LinkageSize;
unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
static const MCPhysReg GPR[] = {
PPC::X3, PPC::X4, PPC::X5, PPC::X6,
PPC::X7, PPC::X8, PPC::X9, PPC::X10,
};
static const MCPhysReg VR[] = {
PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
};
const unsigned NumGPRs = array_lengthof(GPR);
const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
const unsigned NumVRs = array_lengthof(VR);
bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
if (!HasParameterArea) {
unsigned ParamAreaSize = NumGPRs * PtrByteSize;
unsigned AvailableFPRs = NumFPRs;
unsigned AvailableVRs = NumVRs;
unsigned NumBytesTmp = NumBytes;
for (unsigned i = 0; i != NumOps; ++i) {
if (Outs[i].Flags.isNest()) continue;
if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
PtrByteSize, LinkageSize, ParamAreaSize,
NumBytesTmp, AvailableFPRs, AvailableVRs))
HasParameterArea = true;
}
}
unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
if (IsFastCall)
HasParameterArea = false;
for (unsigned i = 0; i != NumOps; ++i) {
ISD::ArgFlagsTy Flags = Outs[i].Flags;
EVT ArgVT = Outs[i].VT;
EVT OrigVT = Outs[i].ArgVT;
if (Flags.isNest())
continue;
if (IsFastCall) {
if (Flags.isByVal()) {
NumGPRsUsed += (Flags.getByValSize()+7)/8;
if (NumGPRsUsed > NumGPRs)
HasParameterArea = true;
} else {
switch (ArgVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unexpected ValueType for argument!");
case MVT::i1:
case MVT::i32:
case MVT::i64:
if (++NumGPRsUsed <= NumGPRs)
continue;
break;
case MVT::v4i32:
case MVT::v8i16:
case MVT::v16i8:
case MVT::v2f64:
case MVT::v2i64:
case MVT::v1i128:
case MVT::f128:
if (++NumVRsUsed <= NumVRs)
continue;
break;
case MVT::v4f32:
if (++NumVRsUsed <= NumVRs)
continue;
break;
case MVT::f32:
case MVT::f64:
if (++NumFPRsUsed <= NumFPRs)
continue;
break;
}
HasParameterArea = true;
}
}
auto Alignement =
CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
NumBytes = alignTo(NumBytes, Alignement);
NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
if (Flags.isInConsecutiveRegsLast())
NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
}
unsigned NumBytesActuallyUsed = NumBytes;
if (HasParameterArea)
NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
else
NumBytes = LinkageSize;
if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
int SPDiff = 0;
if (!IsSibCall)
SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
if (CFlags.IsTailCall)
Chain = DAG.getStackArgumentTokenFactor(Chain);
if (!IsSibCall)
Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
SDValue CallSeqStart = Chain;
SDValue LROp, FPOp;
Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
unsigned ArgOffset = LinkageSize;
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
SmallVector<SDValue, 8> MemOpChains;
for (unsigned i = 0; i != NumOps; ++i) {
SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
EVT ArgVT = Outs[i].VT;
EVT OrigVT = Outs[i].ArgVT;
SDValue PtrOff;
auto ComputePtrOff = [&]() {
auto Alignment =
CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
ArgOffset = alignTo(ArgOffset, Alignment);
PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
};
if (!IsFastCall) {
ComputePtrOff();
GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
GPR_idx = std::min(GPR_idx, NumGPRs);
}
if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
}
if (Flags.isByVal()) {
unsigned Size = Flags.getByValSize();
if (Size == 0)
continue;
if (IsFastCall)
ComputePtrOff();
if (Size==1 || Size==2 || Size==4) {
EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
if (GPR_idx != NumGPRs) {
SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
MachinePointerInfo(), VT);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
ArgOffset += PtrByteSize;
continue;
}
}
if (GPR_idx == NumGPRs && Size < 8) {
SDValue AddPtr = PtrOff;
if (!isLittleEndian) {
SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
PtrOff.getValueType());
AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
}
Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
CallSeqStart,
Flags, DAG, dl);
ArgOffset += PtrByteSize;
continue;
}
if ((NumGPRs - GPR_idx) * PtrByteSize < Size)
Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
CallSeqStart,
Flags, DAG, dl);
if (Size < 8 && GPR_idx != NumGPRs) {
SDValue AddPtr = PtrOff;
if (!isLittleEndian) {
SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
}
Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
CallSeqStart,
Flags, DAG, dl);
SDValue Load =
DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
ArgOffset += PtrByteSize;
continue;
}
for (unsigned j=0; j<Size; j+=PtrByteSize) {
SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
if (GPR_idx != NumGPRs) {
unsigned LoadSizeInBits = std::min(PtrByteSize, (Size - j)) * 8;
EVT ObjType = EVT::getIntegerVT(*DAG.getContext(), LoadSizeInBits);
SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, AddArg,
MachinePointerInfo(), ObjType);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
ArgOffset += PtrByteSize;
} else {
ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
break;
}
}
continue;
}
switch (Arg.getSimpleValueType().SimpleTy) {
default: llvm_unreachable("Unexpected ValueType for argument!");
case MVT::i1:
case MVT::i32:
case MVT::i64:
if (Flags.isNest()) {
RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
break;
}
if (GPR_idx != NumGPRs) {
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
} else {
if (IsFastCall)
ComputePtrOff();
assert(HasParameterArea &&
"Parameter area must exist to pass an argument in memory.");
LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
true, CFlags.IsTailCall, false, MemOpChains,
TailCallArguments, dl);
if (IsFastCall)
ArgOffset += PtrByteSize;
}
if (!IsFastCall)
ArgOffset += PtrByteSize;
break;
case MVT::f32:
case MVT::f64: {
bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
bool NeededLoad = false;
if (FPR_idx != NumFPRs)
RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
if (!NeedGPROrStack)
;
else if (GPR_idx != NumGPRs && !IsFastCall) {
SDValue ArgVal;
if (Arg.getValueType() != MVT::f32) {
ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
} else if (!Flags.isInConsecutiveRegs()) {
ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
} else if (ArgOffset % PtrByteSize != 0) {
SDValue Lo, Hi;
Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
if (!isLittleEndian)
std::swap(Lo, Hi);
ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
} else if (Flags.isInConsecutiveRegsLast()) {
ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
if (!isLittleEndian)
ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
DAG.getConstant(32, dl, MVT::i32));
} else
ArgVal = SDValue();
if (ArgVal.getNode())
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
} else {
if (IsFastCall)
ComputePtrOff();
if (Arg.getValueType() == MVT::f32 &&
!isLittleEndian && !Flags.isInConsecutiveRegs()) {
SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
}
assert(HasParameterArea &&
"Parameter area must exist to pass an argument in memory.");
LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
true, CFlags.IsTailCall, false, MemOpChains,
TailCallArguments, dl);
NeededLoad = true;
}
if (!IsFastCall || NeededLoad) {
ArgOffset += (Arg.getValueType() == MVT::f32 &&
Flags.isInConsecutiveRegs()) ? 4 : 8;
if (Flags.isInConsecutiveRegsLast())
ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
}
break;
}
case MVT::v4f32:
case MVT::v4i32:
case MVT::v8i16:
case MVT::v16i8:
case MVT::v2f64:
case MVT::v2i64:
case MVT::v1i128:
case MVT::f128:
if (CFlags.IsVarArg) {
assert(HasParameterArea &&
"Parameter area must exist if we have a varargs call.");
SDValue Store =
DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
MemOpChains.push_back(Store);
if (VR_idx != NumVRs) {
SDValue Load =
DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
}
ArgOffset += 16;
for (unsigned i=0; i<16; i+=PtrByteSize) {
if (GPR_idx == NumGPRs)
break;
SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
DAG.getConstant(i, dl, PtrVT));
SDValue Load =
DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
}
break;
}
if (VR_idx != NumVRs) {
RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
} else {
if (IsFastCall)
ComputePtrOff();
assert(HasParameterArea &&
"Parameter area must exist to pass an argument in memory.");
LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
true, CFlags.IsTailCall, true, MemOpChains,
TailCallArguments, dl);
if (IsFastCall)
ArgOffset += 16;
}
if (!IsFastCall)
ArgOffset += 16;
break;
}
}
assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
"mismatch in size of parameter area");
(void)NumBytesActuallyUsed;
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
if (CFlags.IsIndirect) {
if (isTOCSaveRestoreRequired(Subtarget)) {
assert(!CFlags.IsTailCall && "Indirect tails calls not supported");
setUsesTOCBasePtr(DAG);
SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
MachinePointerInfo::getStack(
DAG.getMachineFunction(), TOCSaveOffset));
}
if (isELFv2ABI && !CFlags.IsPatchPoint)
RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
}
SDValue InFlag;
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
RegsToPass[i].second, InFlag);
InFlag = Chain.getValue(1);
}
if (CFlags.IsTailCall && !IsSibCall)
PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
TailCallArguments);
return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
Callee, SPDiff, NumBytes, Ins, InVals, CB);
}
static bool isGPRShadowAligned(MCPhysReg Reg, Align RequiredAlign) {
assert(RequiredAlign.value() <= 16 &&
"Required alignment greater than stack alignment.");
switch (Reg) {
default:
report_fatal_error("called on invalid register.");
case PPC::R5:
case PPC::R9:
case PPC::X3:
case PPC::X5:
case PPC::X7:
case PPC::X9:
return true;
case PPC::R3:
case PPC::R7:
case PPC::X4:
case PPC::X6:
case PPC::X8:
case PPC::X10:
return RequiredAlign <= 8;
case PPC::R4:
case PPC::R6:
case PPC::R8:
case PPC::R10:
return RequiredAlign <= 4;
}
}
static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
CCState &S) {
AIXCCState &State = static_cast<AIXCCState &>(S);
const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
State.getMachineFunction().getSubtarget());
const bool IsPPC64 = Subtarget.isPPC64();
const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
if (ValVT == MVT::f128)
report_fatal_error("f128 is unimplemented on AIX.");
if (ArgFlags.isNest())
report_fatal_error("Nest arguments are unimplemented.");
static const MCPhysReg GPR_32[] = { PPC::R3, PPC::R4, PPC::R5, PPC::R6,
PPC::R7, PPC::R8, PPC::R9, PPC::R10};
static const MCPhysReg GPR_64[] = { PPC::X3, PPC::X4, PPC::X5, PPC::X6,
PPC::X7, PPC::X8, PPC::X9, PPC::X10};
static const MCPhysReg VR[] = { PPC::V2, PPC::V3, PPC::V4, PPC::V5,
PPC::V6, PPC::V7, PPC::V8, PPC::V9,
PPC::V10, PPC::V11, PPC::V12, PPC::V13};
if (ArgFlags.isByVal()) {
if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
report_fatal_error("Pass-by-value arguments with alignment greater than "
"register width are not supported.");
const unsigned ByValSize = ArgFlags.getByValSize();
if (ByValSize == 0) {
State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
State.getNextStackOffset(), RegVT,
LocInfo));
return false;
}
const unsigned StackSize = alignTo(ByValSize, PtrAlign);
unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
for (const unsigned E = Offset + StackSize; Offset < E;
Offset += PtrAlign.value()) {
if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
else {
State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
Offset, MVT::INVALID_SIMPLE_VALUE_TYPE,
LocInfo));
break;
}
}
return false;
}
switch (ValVT.SimpleTy) {
default:
report_fatal_error("Unhandled value type for argument.");
case MVT::i64:
assert(IsPPC64 && "PPC32 should have split i64 values.");
LLVM_FALLTHROUGH;
case MVT::i1:
case MVT::i32: {
const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
if (ValVT.getFixedSizeInBits() < RegVT.getFixedSizeInBits())
LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
: CCValAssign::LocInfo::ZExt;
if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
else
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
return false;
}
case MVT::f32:
case MVT::f64: {
const unsigned StoreSize = LocVT.getStoreSize();
const unsigned Offset =
State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4));
unsigned FReg = State.AllocateReg(FPR);
if (FReg)
State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) {
if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
assert(FReg && "An FPR should be available when a GPR is reserved.");
if (State.isVarArg()) {
State.addLoc(
CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
}
} else {
State.addLoc(
FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
LocInfo)
: CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
break;
}
}
return false;
}
case MVT::v4f32:
case MVT::v4i32:
case MVT::v8i16:
case MVT::v16i8:
case MVT::v2i64:
case MVT::v2f64:
case MVT::v1i128: {
const unsigned VecSize = 16;
const Align VecAlign(VecSize);
if (!State.isVarArg()) {
if (unsigned VReg = State.AllocateReg(VR)) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo));
return false;
}
const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
return false;
}
const unsigned PtrSize = IsPPC64 ? 8 : 4;
ArrayRef<MCPhysReg> GPRs = IsPPC64 ? GPR_64 : GPR_32;
unsigned NextRegIndex = State.getFirstUnallocated(GPRs);
while (NextRegIndex != GPRs.size() &&
!isGPRShadowAligned(GPRs[NextRegIndex], VecAlign)) {
unsigned Reg = State.AllocateReg(GPRs);
State.AllocateStack(PtrSize, PtrAlign);
assert(Reg && "Allocating register unexpectedly failed.");
(void)Reg;
NextRegIndex = State.getFirstUnallocated(GPRs);
}
if (State.isFixed(ValNo)) {
if (unsigned VReg = State.AllocateReg(VR)) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo));
for (unsigned I = 0; I != VecSize; I += PtrSize)
State.AllocateReg(GPRs);
State.AllocateStack(VecSize, VecAlign);
return false;
}
const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
return false;
}
if (NextRegIndex == GPRs.size()) {
const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
return false;
}
if (GPRs[NextRegIndex] == PPC::R9) {
const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
State.addLoc(
CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo));
const unsigned FirstReg = State.AllocateReg(PPC::R9);
const unsigned SecondReg = State.AllocateReg(PPC::R10);
assert(FirstReg && SecondReg &&
"Allocating R9 or R10 unexpectedly failed.");
State.addLoc(
CCValAssign::getCustomReg(ValNo, ValVT, FirstReg, RegVT, LocInfo));
State.addLoc(
CCValAssign::getCustomReg(ValNo, ValVT, SecondReg, RegVT, LocInfo));
return false;
}
const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
State.addLoc(
CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo));
for (unsigned I = 0; I != VecSize; I += PtrSize) {
const unsigned Reg = State.AllocateReg(GPRs);
assert(Reg && "Failed to allocated register for vararg vector argument");
State.addLoc(
CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
}
return false;
}
}
return true;
}
static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
bool IsPPC64,
bool HasP8Vector,
bool HasVSX) {
assert((IsPPC64 || SVT != MVT::i64) &&
"i64 should have been split for 32-bit codegen.");
switch (SVT) {
default:
report_fatal_error("Unexpected value type for formal argument");
case MVT::i1:
case MVT::i32:
case MVT::i64:
return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
case MVT::f32:
return HasP8Vector ? &PPC::VSSRCRegClass : &PPC::F4RCRegClass;
case MVT::f64:
return HasVSX ? &PPC::VSFRCRegClass : &PPC::F8RCRegClass;
case MVT::v4f32:
case MVT::v4i32:
case MVT::v8i16:
case MVT::v16i8:
case MVT::v2i64:
case MVT::v2f64:
case MVT::v1i128:
return &PPC::VRRCRegClass;
}
}
static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
SelectionDAG &DAG, SDValue ArgValue,
MVT LocVT, const SDLoc &dl) {
assert(ValVT.isScalarInteger() && LocVT.isScalarInteger());
assert(ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits());
if (Flags.isSExt())
ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
DAG.getValueType(ValVT));
else if (Flags.isZExt())
ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
DAG.getValueType(ValVT));
return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
}
static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {
const unsigned LASize = FL->getLinkageSize();
if (PPC::GPRCRegClass.contains(Reg)) {
assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&
"Reg must be a valid argument register!");
return LASize + 4 * (Reg - PPC::R3);
}
if (PPC::G8RCRegClass.contains(Reg)) {
assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&
"Reg must be a valid argument register!");
return LASize + 8 * (Reg - PPC::X3);
}
llvm_unreachable("Only general purpose registers expected.");
}
SDValue PPCTargetLowering::LowerFormalArguments_AIX(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||
CallConv == CallingConv::Fast) &&
"Unexpected calling convention!");
if (getTargetMachine().Options.GuaranteedTailCallOpt)
report_fatal_error("Tail call support is unimplemented on AIX.");
if (useSoftFloat())
report_fatal_error("Soft float support is unimplemented on AIX.");
const PPCSubtarget &Subtarget = DAG.getSubtarget<PPCSubtarget>();
const bool IsPPC64 = Subtarget.isPPC64();
const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
SmallVector<CCValAssign, 16> ArgLocs;
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
AIXCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
const EVT PtrVT = getPointerTy(MF.getDataLayout());
const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
SmallVector<SDValue, 8> MemOps;
for (size_t I = 0, End = ArgLocs.size(); I != End; ) {
CCValAssign &VA = ArgLocs[I++];
MVT LocVT = VA.getLocVT();
MVT ValVT = VA.getValVT();
ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags;
if (VA.isMemLoc() && VA.needsCustom() && ValVT.isFloatingPoint())
continue;
auto HandleMemLoc = [&]() {
const unsigned LocSize = LocVT.getStoreSize();
const unsigned ValSize = ValVT.getStoreSize();
assert((ValSize <= LocSize) &&
"Object size is larger than size of MemLoc");
int CurArgOffset = VA.getLocMemOffset();
if (LocSize > ValSize)
CurArgOffset += LocSize - ValSize;
const bool IsImmutable =
!(getTargetMachine().Options.GuaranteedTailCallOpt &&
(CallConv == CallingConv::Fast));
int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
SDValue ArgValue =
DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo());
InVals.push_back(ArgValue);
};
if (VA.isMemLoc() && VA.needsCustom()) {
assert(ValVT.isVector() && "Unexpected Custom MemLoc type.");
assert(isVarArg && "Only use custom memloc for vararg.");
const unsigned OriginalValNo = VA.getValNo();
(void)OriginalValNo;
auto HandleCustomVecRegLoc = [&]() {
assert(I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&
"Missing custom RegLoc.");
VA = ArgLocs[I++];
assert(VA.getValVT().isVector() &&
"Unexpected Val type for custom RegLoc.");
assert(VA.getValNo() == OriginalValNo &&
"ValNo mismatch between custom MemLoc and RegLoc.");
MVT::SimpleValueType SVT = VA.getLocVT().SimpleTy;
MF.addLiveIn(VA.getLocReg(),
getRegClassForSVT(SVT, IsPPC64, Subtarget.hasP8Vector(),
Subtarget.hasVSX()));
};
HandleMemLoc();
HandleCustomVecRegLoc();
HandleCustomVecRegLoc();
if (I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom()) {
assert(!IsPPC64 &&
"Only 2 custom RegLocs expected for 64-bit codegen.");
HandleCustomVecRegLoc();
HandleCustomVecRegLoc();
}
continue;
}
if (VA.isRegLoc()) {
if (VA.getValVT().isScalarInteger())
FuncInfo->appendParameterType(PPCFunctionInfo::FixedType);
else if (VA.getValVT().isFloatingPoint() && !VA.getValVT().isVector()) {
switch (VA.getValVT().SimpleTy) {
default:
report_fatal_error("Unhandled value type for argument.");
case MVT::f32:
FuncInfo->appendParameterType(PPCFunctionInfo::ShortFloatingPoint);
break;
case MVT::f64:
FuncInfo->appendParameterType(PPCFunctionInfo::LongFloatingPoint);
break;
}
} else if (VA.getValVT().isVector()) {
switch (VA.getValVT().SimpleTy) {
default:
report_fatal_error("Unhandled value type for argument.");
case MVT::v16i8:
FuncInfo->appendParameterType(PPCFunctionInfo::VectorChar);
break;
case MVT::v8i16:
FuncInfo->appendParameterType(PPCFunctionInfo::VectorShort);
break;
case MVT::v4i32:
case MVT::v2i64:
case MVT::v1i128:
FuncInfo->appendParameterType(PPCFunctionInfo::VectorInt);
break;
case MVT::v4f32:
case MVT::v2f64:
FuncInfo->appendParameterType(PPCFunctionInfo::VectorFloat);
break;
}
}
}
if (Flags.isByVal() && VA.isMemLoc()) {
const unsigned Size =
alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize,
PtrByteSize);
const int FI = MF.getFrameInfo().CreateFixedObject(
Size, VA.getLocMemOffset(), false,
true);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
InVals.push_back(FIN);
continue;
}
if (Flags.isByVal()) {
assert(VA.isRegLoc() && "MemLocs should already be handled.");
const MCPhysReg ArgReg = VA.getLocReg();
const PPCFrameLowering *FL = Subtarget.getFrameLowering();
if (Flags.getNonZeroByValAlign() > PtrByteSize)
report_fatal_error("Over aligned byvals not supported yet.");
const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize);
const int FI = MF.getFrameInfo().CreateFixedObject(
StackSize, mapArgRegToOffsetAIX(ArgReg, FL), false,
true);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
InVals.push_back(FIN);
const TargetRegisterClass *RegClass =
IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
unsigned Offset) {
const Register VReg = MF.addLiveIn(PhysReg, RegClass);
SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
SDValue Store = DAG.getStore(
CopyFrom.getValue(1), dl, CopyFrom,
DAG.getObjectPtrOffset(dl, FIN, TypeSize::Fixed(Offset)),
MachinePointerInfo::getFixedStack(MF, FI, Offset));
MemOps.push_back(Store);
};
unsigned Offset = 0;
HandleRegLoc(VA.getLocReg(), Offset);
Offset += PtrByteSize;
for (; Offset != StackSize && ArgLocs[I].isRegLoc();
Offset += PtrByteSize) {
assert(ArgLocs[I].getValNo() == VA.getValNo() &&
"RegLocs should be for ByVal argument.");
const CCValAssign RL = ArgLocs[I++];
HandleRegLoc(RL.getLocReg(), Offset);
FuncInfo->appendParameterType(PPCFunctionInfo::FixedType);
}
if (Offset != StackSize) {
assert(ArgLocs[I].getValNo() == VA.getValNo() &&
"Expected MemLoc for remaining bytes.");
assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes.");
++I;
}
continue;
}
if (VA.isRegLoc() && !VA.needsCustom()) {
MVT::SimpleValueType SVT = ValVT.SimpleTy;
Register VReg =
MF.addLiveIn(VA.getLocReg(),
getRegClassForSVT(SVT, IsPPC64, Subtarget.hasP8Vector(),
Subtarget.hasVSX()));
SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
if (ValVT.isScalarInteger() &&
(ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits())) {
ArgValue =
truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
}
InVals.push_back(ArgValue);
continue;
}
if (VA.isMemLoc()) {
HandleMemLoc();
continue;
}
}
const unsigned MinParameterSaveArea = 8 * PtrByteSize;
unsigned CallerReservedArea =
std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea);
CallerReservedArea =
EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea);
FuncInfo->setMinReservedArea(CallerReservedArea);
if (isVarArg) {
FuncInfo->setVarArgsFrameIndex(
MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true));
SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
PPC::R7, PPC::R8, PPC::R9, PPC::R10};
static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
PPC::X7, PPC::X8, PPC::X9, PPC::X10};
const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32);
for (unsigned GPRIndex =
(CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize;
GPRIndex < NumGPArgRegs; ++GPRIndex) {
const Register VReg =
IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
: MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
MemOps.push_back(Store);
SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
}
}
if (!MemOps.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
return Chain;
}
SDValue PPCTargetLowering::LowerCall_AIX(
SDValue Chain, SDValue Callee, CallFlags CFlags,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
const CallBase *CB) const {
assert((CFlags.CallConv == CallingConv::C ||
CFlags.CallConv == CallingConv::Cold ||
CFlags.CallConv == CallingConv::Fast) &&
"Unexpected calling convention!");
if (CFlags.IsPatchPoint)
report_fatal_error("This call type is unimplemented on AIX.");
const PPCSubtarget &Subtarget = DAG.getSubtarget<PPCSubtarget>();
MachineFunction &MF = DAG.getMachineFunction();
SmallVector<CCValAssign, 16> ArgLocs;
AIXCCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
*DAG.getContext());
const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
const bool IsPPC64 = Subtarget.isPPC64();
const EVT PtrVT = getPointerTy(DAG.getDataLayout());
const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize,
CCInfo.getNextStackOffset());
Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
SDValue CallSeqStart = Chain;
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
SmallVector<SDValue, 8> MemOpChains;
const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64)
: DAG.getRegister(PPC::R1, MVT::i32);
for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
const unsigned ValNo = ArgLocs[I].getValNo();
SDValue Arg = OutVals[ValNo];
ISD::ArgFlagsTy Flags = Outs[ValNo].Flags;
if (Flags.isByVal()) {
const unsigned ByValSize = Flags.getByValSize();
if (!ByValSize) {
++I;
continue;
}
auto GetLoad = [&](EVT VT, unsigned LoadOffset) {
return DAG.getExtLoad(
ISD::ZEXTLOAD, dl, PtrVT, Chain,
(LoadOffset != 0)
? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
: Arg,
MachinePointerInfo(), VT);
};
unsigned LoadOffset = 0;
while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) {
SDValue Load = GetLoad(PtrVT, LoadOffset);
MemOpChains.push_back(Load.getValue(1));
LoadOffset += PtrByteSize;
const CCValAssign &ByValVA = ArgLocs[I++];
assert(ByValVA.getValNo() == ValNo &&
"Unexpected location for pass-by-value argument.");
RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load));
}
if (LoadOffset == ByValSize)
continue;
assert(ArgLocs[I].getValNo() == ValNo &&
"Expected additional location for by-value argument.");
if (ArgLocs[I].isMemLoc()) {
assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg.");
const CCValAssign &ByValVA = ArgLocs[I++];
ISD::ArgFlagsTy MemcpyFlags = Flags;
MemcpyFlags.setByValSize(ByValSize - LoadOffset);
Chain = CallSeqStart = createMemcpyOutsideCallSeq(
(LoadOffset != 0)
? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
: Arg,
DAG.getObjectPtrOffset(dl, StackPtr,
TypeSize::Fixed(ByValVA.getLocMemOffset())),
CallSeqStart, MemcpyFlags, DAG, dl);
continue;
}
const unsigned ResidueBytes = ByValSize % PtrByteSize;
assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&
"Unexpected register residue for by-value argument.");
SDValue ResidueVal;
for (unsigned Bytes = 0; Bytes != ResidueBytes;) {
const unsigned N = PowerOf2Floor(ResidueBytes - Bytes);
const MVT VT =
N == 1 ? MVT::i8
: ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64));
SDValue Load = GetLoad(VT, LoadOffset);
MemOpChains.push_back(Load.getValue(1));
LoadOffset += N;
Bytes += N;
assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
"Unexpected load emitted during handling of pass-by-value "
"argument.");
unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8);
EVT ShiftAmountTy =
getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout());
SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy);
SDValue ShiftedLoad =
DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt);
ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal,
ShiftedLoad)
: ShiftedLoad;
}
const CCValAssign &ByValVA = ArgLocs[I++];
RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal));
continue;
}
CCValAssign &VA = ArgLocs[I++];
const MVT LocVT = VA.getLocVT();
const MVT ValVT = VA.getValVT();
switch (VA.getLocInfo()) {
default:
report_fatal_error("Unexpected argument extension type.");
case CCValAssign::Full:
break;
case CCValAssign::ZExt:
Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
break;
case CCValAssign::SExt:
Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
break;
}
if (VA.isRegLoc() && !VA.needsCustom()) {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
continue;
}
if (VA.isMemLoc() && VA.needsCustom() && ValVT.isVector()) {
assert(CFlags.IsVarArg && "Custom MemLocs only used for Vector args.");
SDValue PtrOff =
DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
SDValue Store =
DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
MemOpChains.push_back(Store);
const unsigned OriginalValNo = VA.getValNo();
unsigned LoadOffset = 0;
auto HandleCustomVecRegLoc = [&]() {
assert(I != E && "Unexpected end of CCvalAssigns.");
assert(ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&
"Expected custom RegLoc.");
CCValAssign RegVA = ArgLocs[I++];
assert(RegVA.getValNo() == OriginalValNo &&
"Custom MemLoc ValNo and custom RegLoc ValNo must match.");
SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
DAG.getConstant(LoadOffset, dl, PtrVT));
SDValue Load = DAG.getLoad(PtrVT, dl, Store, Add, MachinePointerInfo());
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(RegVA.getLocReg(), Load));
LoadOffset += PtrByteSize;
};
HandleCustomVecRegLoc();
HandleCustomVecRegLoc();
if (I != E && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&
ArgLocs[I].getValNo() == OriginalValNo) {
assert(!IsPPC64 &&
"Only 2 custom RegLocs expected for 64-bit codegen.");
HandleCustomVecRegLoc();
HandleCustomVecRegLoc();
}
continue;
}
if (VA.isMemLoc()) {
SDValue PtrOff =
DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
MemOpChains.push_back(
DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
continue;
}
if (!ValVT.isFloatingPoint())
report_fatal_error(
"Unexpected register handling for calling convention.");
assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg &&
LocVT.isInteger() &&
"Custom register handling only expected for VarArg.");
SDValue ArgAsInt =
DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg);
if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize())
RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
else if (Arg.getValueType().getFixedSizeInBits() <
LocVT.getFixedSizeInBits())
RegsToPass.push_back(std::make_pair(
VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT)));
else {
assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 &&
"Unexpected custom register for argument!");
CCValAssign &GPR1 = VA;
SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
DAG.getConstant(32, dl, MVT::i8));
RegsToPass.push_back(std::make_pair(
GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
if (I != E) {
CCValAssign &PeekArg = ArgLocs[I];
if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) {
assert(PeekArg.needsCustom() && "A second custom GPR is expected.");
CCValAssign &GPR2 = ArgLocs[I++];
RegsToPass.push_back(std::make_pair(
GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
}
}
}
}
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
if (CFlags.IsIndirect) {
assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
const unsigned TOCSaveOffset =
Subtarget.getFrameLowering()->getTOCSaveOffset();
setUsesTOCBasePtr(DAG);
SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
Chain = DAG.getStore(
Val.getValue(1), dl, Val, AddPtr,
MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
}
SDValue InFlag;
for (auto Reg : RegsToPass) {
Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
InFlag = Chain.getValue(1);
}
const int SPDiff = 0;
return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
Callee, SPDiff, NumBytes, Ins, InVals, CB);
}
bool
PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
MachineFunction &MF, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
return CCInfo.CheckReturn(
Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
? RetCC_PPC_Cold
: RetCC_PPC);
}
SDValue
PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SDLoc &dl, SelectionDAG &DAG) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
*DAG.getContext());
CCInfo.AnalyzeReturn(Outs,
(Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
? RetCC_PPC_Cold
: RetCC_PPC);
SDValue Flag;
SmallVector<SDValue, 4> RetOps(1, Chain);
for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
SDValue Arg = OutVals[RealResIdx];
switch (VA.getLocInfo()) {
default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full: break;
case CCValAssign::AExt:
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
break;
case CCValAssign::ZExt:
Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
break;
case CCValAssign::SExt:
Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
break;
}
if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
bool isLittleEndian = Subtarget.isLittleEndian();
SDValue SVal =
DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
Flag = Chain.getValue(1);
VA = RVLocs[++i]; Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
} else
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
Flag = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
}
RetOps[0] = Chain;
if (Flag.getNode())
RetOps.push_back(Flag);
return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
}
SDValue
PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
EVT IntVT = Op.getValueType();
SDValue Chain = Op.getOperand(0);
SDValue FPSIdx = getFramePointerFrameIndex(DAG);
SDValue Ops[2] = {Chain, FPSIdx};
SDVTList VTs = DAG.getVTList(IntVT);
return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
}
SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
EVT PtrVT = getPointerTy(DAG.getDataLayout());
bool isPPC64 = Subtarget.isPPC64();
unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
SDValue StackPtr = DAG.getRegister(SP, PtrVT);
SDValue Chain = Op.getOperand(0);
SDValue SaveSP = Op.getOperand(1);
SDValue LoadLinkSP =
DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
}
SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
bool isPPC64 = Subtarget.isPPC64();
EVT PtrVT = getPointerTy(MF.getDataLayout());
PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
int RASI = FI->getReturnAddrSaveIndex();
if (!RASI) {
int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
FI->setReturnAddrSaveIndex(RASI);
}
return DAG.getFrameIndex(RASI, PtrVT);
}
SDValue
PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
bool isPPC64 = Subtarget.isPPC64();
EVT PtrVT = getPointerTy(MF.getDataLayout());
PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
int FPSI = FI->getFramePointerSaveIndex();
if (!FPSI) {
int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
FI->setFramePointerSaveIndex(FPSI);
}
return DAG.getFrameIndex(FPSI, PtrVT);
}
SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
SDLoc dl(Op);
EVT PtrVT = getPointerTy(DAG.getDataLayout());
SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
DAG.getConstant(0, dl, PtrVT), Size);
SDValue FPSIdx = getFramePointerFrameIndex(DAG);
SDValue Ops[3] = { Chain, NegSize, FPSIdx };
SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
if (hasInlineStackProbe(MF))
return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops);
return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
}
SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
bool isPPC64 = Subtarget.isPPC64();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
return DAG.getFrameIndex(FI, PtrVT);
}
SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
DAG.getVTList(MVT::i32, MVT::Other),
Op.getOperand(0), Op.getOperand(1));
}
SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
Op.getOperand(0), Op.getOperand(1));
}
SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
if (Op.getValueType().isVector())
return LowerVectorLoad(Op, DAG);
assert(Op.getValueType() == MVT::i1 &&
"Custom lowering only for i1 loads");
SDLoc dl(Op);
LoadSDNode *LD = cast<LoadSDNode>(Op);
SDValue Chain = LD->getChain();
SDValue BasePtr = LD->getBasePtr();
MachineMemOperand *MMO = LD->getMemOperand();
SDValue NewLD =
DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
BasePtr, MVT::i8, MMO);
SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
return DAG.getMergeValues(Ops, dl);
}
SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
if (Op.getOperand(1).getValueType().isVector())
return LowerVectorStore(Op, DAG);
assert(Op.getOperand(1).getValueType() == MVT::i1 &&
"Custom lowering only for i1 stores");
SDLoc dl(Op);
StoreSDNode *ST = cast<StoreSDNode>(Op);
SDValue Chain = ST->getChain();
SDValue BasePtr = ST->getBasePtr();
SDValue Value = ST->getValue();
MachineMemOperand *MMO = ST->getMemOperand();
Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
Value);
return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
}
SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
assert(Op.getValueType() == MVT::i1 &&
"Custom lowering only for i1 results");
SDLoc DL(Op);
return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
}
SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
SelectionDAG &DAG) const {
EVT TrgVT = Op.getValueType();
assert(TrgVT.isVector() && "Vector type expected.");
unsigned TrgNumElts = TrgVT.getVectorNumElements();
EVT EltVT = TrgVT.getVectorElementType();
if (!isOperationCustom(Op.getOpcode(), TrgVT) ||
TrgVT.getSizeInBits() > 128 || !isPowerOf2_32(TrgNumElts) ||
!isPowerOf2_32(EltVT.getSizeInBits()))
return SDValue();
SDValue N1 = Op.getOperand(0);
EVT SrcVT = N1.getValueType();
unsigned SrcSize = SrcVT.getSizeInBits();
if (SrcSize > 256 ||
!isPowerOf2_32(SrcVT.getVectorNumElements()) ||
!isPowerOf2_32(SrcVT.getVectorElementType().getSizeInBits()))
return SDValue();
if (SrcSize == 256 && SrcVT.getVectorNumElements() < 2)
return SDValue();
unsigned WideNumElts = 128 / EltVT.getSizeInBits();
EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
SDLoc DL(Op);
SDValue Op1, Op2;
if (SrcSize == 256) {
EVT VecIdxTy = getVectorIdxTy(DAG.getDataLayout());
EVT SplitVT =
N1.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
unsigned SplitNumElts = SplitVT.getVectorNumElements();
Op1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
DAG.getConstant(0, DL, VecIdxTy));
Op2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
DAG.getConstant(SplitNumElts, DL, VecIdxTy));
}
else {
Op1 = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
Op2 = DAG.getUNDEF(WideVT);
}
unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
SmallVector<int, 16> ShuffV;
if (Subtarget.isLittleEndian())
for (unsigned i = 0; i < TrgNumElts; ++i)
ShuffV.push_back(i * SizeMult);
else
for (unsigned i = 1; i <= TrgNumElts; ++i)
ShuffV.push_back(i * SizeMult - 1);
for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
ShuffV.push_back(WideNumElts + 1);
Op1 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op1);
Op2 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op2);
return DAG.getVectorShuffle(WideVT, DL, Op1, Op2, ShuffV);
}
SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
EVT ResVT = Op.getValueType();
EVT CmpVT = Op.getOperand(0).getValueType();
SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
SDValue TV = Op.getOperand(2), FV = Op.getOperand(3);
SDLoc dl(Op);
if (!Subtarget.hasP9Vector() && CmpVT == MVT::f128) {
SDValue Z = DAG.getSetCC(
dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT),
LHS, RHS, CC);
SDValue Zero = DAG.getConstant(0, dl, Z.getValueType());
return DAG.getSelectCC(dl, Z, Zero, TV, FV, ISD::SETNE);
}
if (!CmpVT.isFloatingPoint() || !TV.getValueType().isFloatingPoint() ||
Subtarget.hasSPE())
return Op;
SDNodeFlags Flags = Op.getNode()->getFlags();
if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
switch (CC) {
default:
break;
case ISD::SETOGT:
case ISD::SETGT:
return DAG.getNode(PPCISD::XSMAXC, dl, Op.getValueType(), LHS, RHS);
case ISD::SETOLT:
case ISD::SETLT:
return DAG.getNode(PPCISD::XSMINC, dl, Op.getValueType(), LHS, RHS);
}
}
if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) ||
(!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs()))
return Op;
SDValue Sel1;
if (isFloatingPointZero(RHS))
switch (CC) {
default: break; case ISD::SETNE:
std::swap(TV, FV);
LLVM_FALLTHROUGH;
case ISD::SETEQ:
if (LHS.getValueType() == MVT::f32) LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
if (Sel1.getValueType() == MVT::f32) Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
return DAG.getNode(PPCISD::FSEL, dl, ResVT,
DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
case ISD::SETULT:
case ISD::SETLT:
std::swap(TV, FV); LLVM_FALLTHROUGH;
case ISD::SETOGE:
case ISD::SETGE:
if (LHS.getValueType() == MVT::f32) LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
case ISD::SETUGT:
case ISD::SETGT:
std::swap(TV, FV); LLVM_FALLTHROUGH;
case ISD::SETOLE:
case ISD::SETLE:
if (LHS.getValueType() == MVT::f32) LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
return DAG.getNode(PPCISD::FSEL, dl, ResVT,
DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
}
SDValue Cmp;
switch (CC) {
default: break; case ISD::SETNE:
std::swap(TV, FV);
LLVM_FALLTHROUGH;
case ISD::SETEQ:
Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
if (Cmp.getValueType() == MVT::f32) Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
if (Sel1.getValueType() == MVT::f32) Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
return DAG.getNode(PPCISD::FSEL, dl, ResVT,
DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
case ISD::SETULT:
case ISD::SETLT:
Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
if (Cmp.getValueType() == MVT::f32) Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
case ISD::SETOGE:
case ISD::SETGE:
Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
if (Cmp.getValueType() == MVT::f32) Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
case ISD::SETUGT:
case ISD::SETGT:
Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
if (Cmp.getValueType() == MVT::f32) Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
case ISD::SETOLE:
case ISD::SETLE:
Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
if (Cmp.getValueType() == MVT::f32) Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
}
return Op;
}
static unsigned getPPCStrictOpcode(unsigned Opc) {
switch (Opc) {
default:
llvm_unreachable("No strict version of this opcode!");
case PPCISD::FCTIDZ:
return PPCISD::STRICT_FCTIDZ;
case PPCISD::FCTIWZ:
return PPCISD::STRICT_FCTIWZ;
case PPCISD::FCTIDUZ:
return PPCISD::STRICT_FCTIDUZ;
case PPCISD::FCTIWUZ:
return PPCISD::STRICT_FCTIWUZ;
case PPCISD::FCFID:
return PPCISD::STRICT_FCFID;
case PPCISD::FCFIDU:
return PPCISD::STRICT_FCFIDU;
case PPCISD::FCFIDS:
return PPCISD::STRICT_FCFIDS;
case PPCISD::FCFIDUS:
return PPCISD::STRICT_FCFIDUS;
}
}
static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG,
const PPCSubtarget &Subtarget) {
SDLoc dl(Op);
bool IsStrict = Op->isStrictFPOpcode();
bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
SDNodeFlags Flags;
Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
assert(Src.getValueType().isFloatingPoint());
if (Src.getValueType() == MVT::f32) {
if (IsStrict) {
Src =
DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
DAG.getVTList(MVT::f64, MVT::Other), {Chain, Src}, Flags);
Chain = Src.getValue(1);
} else
Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
}
SDValue Conv;
unsigned Opc = ISD::DELETED_NODE;
switch (Op.getSimpleValueType().SimpleTy) {
default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
case MVT::i32:
Opc = IsSigned ? PPCISD::FCTIWZ
: (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ);
break;
case MVT::i64:
assert((IsSigned || Subtarget.hasFPCVT()) &&
"i64 FP_TO_UINT is supported only with FPCVT");
Opc = IsSigned ? PPCISD::FCTIDZ : PPCISD::FCTIDUZ;
}
if (IsStrict) {
Opc = getPPCStrictOpcode(Opc);
Conv = DAG.getNode(Opc, dl, DAG.getVTList(MVT::f64, MVT::Other),
{Chain, Src}, Flags);
} else {
Conv = DAG.getNode(Opc, dl, MVT::f64, Src);
}
return Conv;
}
void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
SelectionDAG &DAG,
const SDLoc &dl) const {
SDValue Tmp = convertFPToInt(Op, DAG, Subtarget);
bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
bool IsStrict = Op->isStrictFPOpcode();
bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
(IsSigned || Subtarget.hasFPCVT());
SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
MachinePointerInfo MPI =
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
SDValue Chain = IsStrict ? Tmp.getValue(1) : DAG.getEntryNode();
Align Alignment(DAG.getEVTAlign(Tmp.getValueType()));
if (i32Stack) {
MachineFunction &MF = DAG.getMachineFunction();
Alignment = Align(4);
MachineMemOperand *MMO =
MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
SDValue Ops[] = { Chain, Tmp, FIPtr };
Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
} else
Chain = DAG.getStore(Chain, dl, Tmp, FIPtr, MPI, Alignment);
if (Op.getValueType() == MVT::i32 && !i32Stack) {
FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
DAG.getConstant(4, dl, FIPtr.getValueType()));
MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
}
RLI.Chain = Chain;
RLI.Ptr = FIPtr;
RLI.MPI = MPI;
RLI.Alignment = Alignment;
}
SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
SelectionDAG &DAG,
const SDLoc &dl) const {
SDValue Conv = convertFPToInt(Op, DAG, Subtarget);
SDValue Mov = DAG.getNode(PPCISD::MFVSR, dl, Op.getValueType(), Conv);
if (Op->isStrictFPOpcode())
return DAG.getMergeValues({Mov, Conv.getValue(1)}, dl);
else
return Mov;
}
SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
const SDLoc &dl) const {
bool IsStrict = Op->isStrictFPOpcode();
bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
EVT SrcVT = Src.getValueType();
EVT DstVT = Op.getValueType();
if (SrcVT == MVT::f128)
return Subtarget.hasP9Vector() ? Op : SDValue();
if (SrcVT == MVT::ppcf128) {
if (DstVT == MVT::i32) {
SDNodeFlags Flags;
Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
if (IsSigned) {
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
DAG.getIntPtrConstant(0, dl));
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
DAG.getIntPtrConstant(1, dl));
if (IsStrict) {
SDValue Res = DAG.getNode(PPCISD::STRICT_FADDRTZ, dl,
DAG.getVTList(MVT::f64, MVT::Other),
{Op.getOperand(0), Lo, Hi}, Flags);
return DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
DAG.getVTList(MVT::i32, MVT::Other),
{Res.getValue(1), Res}, Flags);
} else {
SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
}
} else {
const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT);
SDValue SignMask = DAG.getConstant(0x80000000, dl, DstVT);
if (IsStrict) {
SDValue Chain = Op.getOperand(0);
EVT SetCCVT =
getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT);
EVT DstSetCCVT =
getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT);
SDValue Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT,
Chain, true);
Chain = Sel.getValue(1);
SDValue FltOfs = DAG.getSelect(
dl, SrcVT, Sel, DAG.getConstantFP(0.0, dl, SrcVT), Cst);
Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT);
SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl,
DAG.getVTList(SrcVT, MVT::Other),
{Chain, Src, FltOfs}, Flags);
Chain = Val.getValue(1);
SDValue SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
DAG.getVTList(DstVT, MVT::Other),
{Chain, Val}, Flags);
Chain = SInt.getValue(1);
SDValue IntOfs = DAG.getSelect(
dl, DstVT, Sel, DAG.getConstant(0, dl, DstVT), SignMask);
SDValue Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs);
return DAG.getMergeValues({Result, Chain}, dl);
} else {
SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, Src, Cst);
True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, SignMask);
SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
return DAG.getSelectCC(dl, Src, Cst, True, False, ISD::SETGE);
}
}
}
return SDValue();
}
if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
return LowerFP_TO_INTDirectMove(Op, DAG, dl);
ReuseLoadInfo RLI;
LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
}
bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
ReuseLoadInfo &RLI,
SelectionDAG &DAG,
ISD::LoadExtType ET) const {
if (Op->isStrictFPOpcode())
return false;
SDLoc dl(Op);
bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
(Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
if (ET == ISD::NON_EXTLOAD &&
(ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
isOperationLegalOrCustom(Op.getOpcode(),
Op.getOperand(0).getValueType())) {
LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
return true;
}
LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
LD->isNonTemporal())
return false;
if (LD->getMemoryVT() != MemVT)
return false;
if (!isTypeLegal(LD->getValueType(0)))
return false;
RLI.Ptr = LD->getBasePtr();
if (LD->isIndexed() && !LD->getOffset().isUndef()) {
assert(LD->getAddressingMode() == ISD::PRE_INC &&
"Non-pre-inc AM on PPC?");
RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
LD->getOffset());
}
RLI.Chain = LD->getChain();
RLI.MPI = LD->getPointerInfo();
RLI.IsDereferenceable = LD->isDereferenceable();
RLI.IsInvariant = LD->isInvariant();
RLI.Alignment = LD->getAlign();
RLI.AAInfo = LD->getAAInfo();
RLI.Ranges = LD->getRanges();
RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
return true;
}
void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
SDValue NewResChain,
SelectionDAG &DAG) const {
if (!ResChain)
return;
SDLoc dl(NewResChain);
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
NewResChain, DAG.getUNDEF(MVT::Other));
assert(TF.getNode() != NewResChain.getNode() &&
"A new TF really is required here");
DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
}
bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
SDNode *Origin = Op.getOperand(0).getNode();
if (Origin->getOpcode() != ISD::LOAD)
return true;
MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
return true;
for (SDNode::use_iterator UI = Origin->use_begin(),
UE = Origin->use_end();
UI != UE; ++UI) {
if (UI.getUse().get().getResNo() != 0)
continue;
if (UI->getOpcode() != ISD::SINT_TO_FP &&
UI->getOpcode() != ISD::UINT_TO_FP &&
UI->getOpcode() != ISD::STRICT_SINT_TO_FP &&
UI->getOpcode() != ISD::STRICT_UINT_TO_FP)
return true;
}
return false;
}
static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG,
const PPCSubtarget &Subtarget,
SDValue Chain = SDValue()) {
bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
SDLoc dl(Op);
SDNodeFlags Flags;
Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
bool IsSingle = Op.getValueType() == MVT::f32 && Subtarget.hasFPCVT();
unsigned ConvOpc = IsSingle ? (IsSigned ? PPCISD::FCFIDS : PPCISD::FCFIDUS)
: (IsSigned ? PPCISD::FCFID : PPCISD::FCFIDU);
EVT ConvTy = IsSingle ? MVT::f32 : MVT::f64;
if (Op->isStrictFPOpcode()) {
if (!Chain)
Chain = Op.getOperand(0);
return DAG.getNode(getPPCStrictOpcode(ConvOpc), dl,
DAG.getVTList(ConvTy, MVT::Other), {Chain, Src}, Flags);
} else
return DAG.getNode(ConvOpc, dl, ConvTy, Src);
}
SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
SelectionDAG &DAG,
const SDLoc &dl) const {
assert((Op.getValueType() == MVT::f32 ||
Op.getValueType() == MVT::f64) &&
"Invalid floating point type as target of conversion");
assert(Subtarget.hasFPCVT() &&
"Int to FP conversions with direct moves require FPCVT");
SDValue Src = Op.getOperand(Op->isStrictFPOpcode() ? 1 : 0);
bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
bool Signed = Op.getOpcode() == ISD::SINT_TO_FP ||
Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
unsigned MovOpc = (WordInt && !Signed) ? PPCISD::MTVSRZ : PPCISD::MTVSRA;
SDValue Mov = DAG.getNode(MovOpc, dl, MVT::f64, Src);
return convertIntToFP(Op, Mov, DAG, Subtarget);
}
static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
EVT VecVT = Vec.getValueType();
assert(VecVT.isVector() && "Expected a vector type.");
assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.");
EVT EltVT = VecVT.getVectorElementType();
unsigned WideNumElts = 128 / EltVT.getSizeInBits();
EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
SmallVector<SDValue, 16> Ops(NumConcat);
Ops[0] = Vec;
SDValue UndefVec = DAG.getUNDEF(VecVT);
for (unsigned i = 1; i < NumConcat; ++i)
Ops[i] = UndefVec;
return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
}
SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
const SDLoc &dl) const {
bool IsStrict = Op->isStrictFPOpcode();
unsigned Opc = Op.getOpcode();
SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP ||
Opc == ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) &&
"Unexpected conversion type");
assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
"Supports conversions to v2f64/v4f32 only.");
SDNodeFlags Flags;
Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
bool SignedConv = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP;
bool FourEltRes = Op.getValueType() == MVT::v4f32;
SDValue Wide = widenVec(DAG, Src, dl);
EVT WideVT = Wide.getValueType();
unsigned WideNumElts = WideVT.getVectorNumElements();
MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
SmallVector<int, 16> ShuffV;
for (unsigned i = 0; i < WideNumElts; ++i)
ShuffV.push_back(i + WideNumElts);
int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
int SaveElts = FourEltRes ? 4 : 2;
if (Subtarget.isLittleEndian())
for (int i = 0; i < SaveElts; i++)
ShuffV[i * Stride] = i;
else
for (int i = 1; i <= SaveElts; i++)
ShuffV[i * Stride - 1] = i - 1;
SDValue ShuffleSrc2 =
SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
SDValue Extend;
if (SignedConv) {
Arrange = DAG.getBitcast(IntermediateVT, Arrange);
EVT ExtVT = Src.getValueType();
if (Subtarget.hasP9Altivec())
ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(),
IntermediateVT.getVectorNumElements());
Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
DAG.getValueType(ExtVT));
} else
Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange);
if (IsStrict)
return DAG.getNode(Opc, dl, DAG.getVTList(Op.getValueType(), MVT::Other),
{Op.getOperand(0), Extend}, Flags);
return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
}
SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
bool IsStrict = Op->isStrictFPOpcode();
SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
SDNodeFlags Flags;
Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
EVT InVT = Src.getValueType();
EVT OutVT = Op.getValueType();
if (OutVT.isVector() && OutVT.isFloatingPoint() &&
isOperationCustom(Op.getOpcode(), InVT))
return LowerINT_TO_FPVector(Op, DAG, dl);
if (Op.getValueType() == MVT::f128)
return Subtarget.hasP9Vector() ? Op : SDValue();
if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
return SDValue();
if (Src.getValueType() == MVT::i1) {
SDValue Sel = DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Src,
DAG.getConstantFP(1.0, dl, Op.getValueType()),
DAG.getConstantFP(0.0, dl, Op.getValueType()));
if (IsStrict)
return DAG.getMergeValues({Sel, Chain}, dl);
else
return Sel;
}
if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
Subtarget.isPPC64() && Subtarget.hasFPCVT())
return LowerINT_TO_FPDirectMove(Op, DAG, dl);
assert((IsSigned || Subtarget.hasFPCVT()) &&
"UINT_TO_FP is supported only with FPCVT");
if (Src.getValueType() == MVT::i64) {
SDValue SINT = Src;
if (Op.getValueType() == MVT::f32 &&
!Subtarget.hasFPCVT() &&
!DAG.getTarget().Options.UnsafeFPMath) {
SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
SINT, DAG.getConstant(2047, dl, MVT::i64));
Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
Round, DAG.getConstant(2047, dl, MVT::i64));
Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
Round = DAG.getNode(ISD::AND, dl, MVT::i64,
Round, DAG.getConstant(-2048, dl, MVT::i64));
SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
SINT, DAG.getConstant(53, dl, MVT::i32));
Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
Cond, DAG.getConstant(1, dl, MVT::i64));
Cond = DAG.getSetCC(
dl,
getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
}
ReuseLoadInfo RLI;
SDValue Bits;
MachineFunction &MF = DAG.getMachineFunction();
if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
} else if (Subtarget.hasLFIWAX() &&
canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
MachineMemOperand *MMO =
MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
RLI.Alignment, RLI.AAInfo, RLI.Ranges);
SDValue Ops[] = { RLI.Chain, RLI.Ptr };
Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
DAG.getVTList(MVT::f64, MVT::Other),
Ops, MVT::i32, MMO);
spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
} else if (Subtarget.hasFPCVT() &&
canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
MachineMemOperand *MMO =
MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
RLI.Alignment, RLI.AAInfo, RLI.Ranges);
SDValue Ops[] = { RLI.Chain, RLI.Ptr };
Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
DAG.getVTList(MVT::f64, MVT::Other),
Ops, MVT::i32, MMO);
spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
} else if (((Subtarget.hasLFIWAX() &&
SINT.getOpcode() == ISD::SIGN_EXTEND) ||
(Subtarget.hasFPCVT() &&
SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
SINT.getOperand(0).getValueType() == MVT::i32) {
MachineFrameInfo &MFI = MF.getFrameInfo();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
SDValue Store = DAG.getStore(Chain, dl, SINT.getOperand(0), FIdx,
MachinePointerInfo::getFixedStack(
DAG.getMachineFunction(), FrameIdx));
Chain = Store;
assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
"Expected an i32 store");
RLI.Ptr = FIdx;
RLI.Chain = Chain;
RLI.MPI =
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
RLI.Alignment = Align(4);
MachineMemOperand *MMO =
MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
RLI.Alignment, RLI.AAInfo, RLI.Ranges);
SDValue Ops[] = { RLI.Chain, RLI.Ptr };
Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
PPCISD::LFIWZX : PPCISD::LFIWAX,
dl, DAG.getVTList(MVT::f64, MVT::Other),
Ops, MVT::i32, MMO);
Chain = Bits.getValue(1);
} else
Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
SDValue FP = convertIntToFP(Op, Bits, DAG, Subtarget, Chain);
if (IsStrict)
Chain = FP.getValue(1);
if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
if (IsStrict)
FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl,
DAG.getVTList(MVT::f32, MVT::Other),
{Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
else
FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
DAG.getIntPtrConstant(0, dl));
}
return FP;
}
assert(Src.getValueType() == MVT::i32 &&
"Unhandled INT_TO_FP type in custom expander!");
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
EVT PtrVT = getPointerTy(MF.getDataLayout());
SDValue Ld;
if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
ReuseLoadInfo RLI;
bool ReusingLoad;
if (!(ReusingLoad = canReuseLoadAddress(Src, MVT::i32, RLI, DAG))) {
int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
SDValue Store = DAG.getStore(Chain, dl, Src, FIdx,
MachinePointerInfo::getFixedStack(
DAG.getMachineFunction(), FrameIdx));
Chain = Store;
assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
"Expected an i32 store");
RLI.Ptr = FIdx;
RLI.Chain = Chain;
RLI.MPI =
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
RLI.Alignment = Align(4);
}
MachineMemOperand *MMO =
MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
RLI.Alignment, RLI.AAInfo, RLI.Ranges);
SDValue Ops[] = { RLI.Chain, RLI.Ptr };
Ld = DAG.getMemIntrinsicNode(IsSigned ? PPCISD::LFIWAX : PPCISD::LFIWZX, dl,
DAG.getVTList(MVT::f64, MVT::Other), Ops,
MVT::i32, MMO);
Chain = Ld.getValue(1);
if (ReusingLoad)
spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
} else {
assert(Subtarget.isPPC64() &&
"i32->FP without LFIWAX supported only on PPC64");
int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, Src);
SDValue Store = DAG.getStore(
Chain, dl, Ext64, FIdx,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
Chain = Store;
Ld = DAG.getLoad(
MVT::f64, dl, Chain, FIdx,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
Chain = Ld.getValue(1);
}
SDValue FP = convertIntToFP(Op, Ld, DAG, Subtarget, Chain);
if (IsStrict)
Chain = FP.getValue(1);
if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
if (IsStrict)
FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl,
DAG.getVTList(MVT::f32, MVT::Other),
{Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
else
FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
DAG.getIntPtrConstant(0, dl));
}
return FP;
}
SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
MachineFunction &MF = DAG.getMachineFunction();
EVT VT = Op.getValueType();
EVT PtrVT = getPointerTy(MF.getDataLayout());
SDValue Chain = Op.getOperand(0);
SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain);
Chain = MFFS.getValue(1);
SDValue CWD;
if (isTypeLegal(MVT::i64)) {
CWD = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
DAG.getNode(ISD::BITCAST, dl, MVT::i64, MFFS));
} else {
int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false);
SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo());
assert(hasBigEndianPartOrdering(MVT::i64, MF.getDataLayout()) &&
"Stack slot adjustment is valid only on big endian subtargets!");
SDValue Four = DAG.getConstant(4, dl, PtrVT);
SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo());
Chain = CWD.getValue(1);
}
SDValue CWD1 =
DAG.getNode(ISD::AND, dl, MVT::i32,
CWD, DAG.getConstant(3, dl, MVT::i32));
SDValue CWD2 =
DAG.getNode(ISD::SRL, dl, MVT::i32,
DAG.getNode(ISD::AND, dl, MVT::i32,
DAG.getNode(ISD::XOR, dl, MVT::i32,
CWD, DAG.getConstant(3, dl, MVT::i32)),
DAG.getConstant(3, dl, MVT::i32)),
DAG.getConstant(1, dl, MVT::i32));
SDValue RetVal =
DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
RetVal =
DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND),
dl, VT, RetVal);
return DAG.getMergeValues({RetVal, Chain}, dl);
}
SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
unsigned BitWidth = VT.getSizeInBits();
SDLoc dl(Op);
assert(Op.getNumOperands() == 3 &&
VT == Op.getOperand(1).getValueType() &&
"Unexpected SHL!");
SDValue Lo = Op.getOperand(0);
SDValue Hi = Op.getOperand(1);
SDValue Amt = Op.getOperand(2);
EVT AmtVT = Amt.getValueType();
SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
DAG.getConstant(BitWidth, dl, AmtVT), Amt);
SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
DAG.getConstant(-BitWidth, dl, AmtVT));
SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
SDValue OutOps[] = { OutLo, OutHi };
return DAG.getMergeValues(OutOps, dl);
}
SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
SDLoc dl(Op);
unsigned BitWidth = VT.getSizeInBits();
assert(Op.getNumOperands() == 3 &&
VT == Op.getOperand(1).getValueType() &&
"Unexpected SRL!");
SDValue Lo = Op.getOperand(0);
SDValue Hi = Op.getOperand(1);
SDValue Amt = Op.getOperand(2);
EVT AmtVT = Amt.getValueType();
SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
DAG.getConstant(BitWidth, dl, AmtVT), Amt);
SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
DAG.getConstant(-BitWidth, dl, AmtVT));
SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
SDValue OutOps[] = { OutLo, OutHi };
return DAG.getMergeValues(OutOps, dl);
}
SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl(Op);
EVT VT = Op.getValueType();
unsigned BitWidth = VT.getSizeInBits();
assert(Op.getNumOperands() == 3 &&
VT == Op.getOperand(1).getValueType() &&
"Unexpected SRA!");
SDValue Lo = Op.getOperand(0);
SDValue Hi = Op.getOperand(1);
SDValue Amt = Op.getOperand(2);
EVT AmtVT = Amt.getValueType();
SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
DAG.getConstant(BitWidth, dl, AmtVT), Amt);
SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
DAG.getConstant(-BitWidth, dl, AmtVT));
SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
Tmp4, Tmp6, ISD::SETLE);
SDValue OutOps[] = { OutLo, OutHi };
return DAG.getMergeValues(OutOps, dl);
}
SDValue PPCTargetLowering::LowerFunnelShift(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
EVT VT = Op.getValueType();
unsigned BitWidth = VT.getSizeInBits();
bool IsFSHL = Op.getOpcode() == ISD::FSHL;
SDValue X = Op.getOperand(0);
SDValue Y = Op.getOperand(1);
SDValue Z = Op.getOperand(2);
EVT AmtVT = Z.getValueType();
Z = DAG.getNode(ISD::AND, dl, AmtVT, Z,
DAG.getConstant(BitWidth - 1, dl, AmtVT));
SDValue SubZ =
DAG.getNode(ISD::SUB, dl, AmtVT, DAG.getConstant(BitWidth, dl, AmtVT), Z);
X = DAG.getNode(PPCISD::SHL, dl, VT, X, IsFSHL ? Z : SubZ);
Y = DAG.getNode(PPCISD::SRL, dl, VT, Y, IsFSHL ? SubZ : Z);
return DAG.getNode(ISD::OR, dl, VT, X, Y);
}
static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT,
SelectionDAG &DAG, const SDLoc &dl) {
static const MVT VTys[] = { MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
};
EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
if (Val == ((1LLU << (SplatSize * 8)) - 1)) {
SplatSize = 1;
Val = 0xFF;
}
EVT CanonicalVT = VTys[SplatSize-1];
return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
}
static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
const SDLoc &dl, EVT DestVT = MVT::Other) {
if (DestVT == MVT::Other) DestVT = Op.getValueType();
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
DAG.getConstant(IID, dl, MVT::i32), Op);
}
static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
SelectionDAG &DAG, const SDLoc &dl,
EVT DestVT = MVT::Other) {
if (DestVT == MVT::Other) DestVT = LHS.getValueType();
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
}
static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
EVT DestVT = MVT::Other) {
if (DestVT == MVT::Other) DestVT = Op0.getValueType();
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
}
static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
SelectionDAG &DAG, const SDLoc &dl) {
LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
int Ops[16];
for (unsigned i = 0; i != 16; ++i)
Ops[i] = i + Amt;
SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
return DAG.getNode(ISD::BITCAST, dl, VT, T);
}
static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
bool HasDirectMove,
bool HasP8Vector) {
EVT VecVT = V->getValueType(0);
bool RightType = VecVT == MVT::v2f64 ||
(HasP8Vector && VecVT == MVT::v4f32) ||
(HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
if (!RightType)
return false;
bool IsSplat = true;
bool IsLoad = false;
SDValue Op0 = V->getOperand(0);
if (V->isConstant())
return false;
for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
if (V->getOperand(i).isUndef())
return false;
if (V->getOperand(i).getOpcode() == ISD::LOAD ||
(V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
(V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
(V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
IsLoad = true;
if (V->getOperand(i) != Op0 ||
(!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
IsSplat = false;
}
return !(IsSplat && IsLoad);
}
SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl(Op);
SDValue Op0 = Op->getOperand(0);
if ((Op.getValueType() != MVT::f128) ||
(Op0.getOpcode() != ISD::BUILD_PAIR) ||
(Op0.getOperand(0).getValueType() != MVT::i64) ||
(Op0.getOperand(1).getValueType() != MVT::i64))
return SDValue();
return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
Op0.getOperand(1));
}
static const SDValue *getNormalLoadInput(const SDValue &Op, bool &IsPermuted) {
const SDValue *InputLoad = &Op;
while (InputLoad->getOpcode() == ISD::BITCAST)
InputLoad = &InputLoad->getOperand(0);
if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR ||
InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED) {
IsPermuted = InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED;
InputLoad = &InputLoad->getOperand(0);
}
if (InputLoad->getOpcode() != ISD::LOAD)
return nullptr;
LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
}
bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) {
APFloat APFloatToConvert = ArgAPFloat;
bool LosesInfo = true;
APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
&LosesInfo);
bool Success = (!LosesInfo && !APFloatToConvert.isDenormal());
if (Success)
ArgAPFloat = APFloatToConvert;
return Success;
}
bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) {
double DpValue = ArgAPInt.bitsToDouble();
APFloat APFloatDp(DpValue);
bool Success = convertToNonDenormSingle(APFloatDp);
if (Success)
ArgAPInt = APFloatDp.bitcastToAPInt();
return Success;
}
bool llvm::checkConvertToNonDenormSingle(APFloat &ArgAPFloat) {
APFloat APFloatToConvert = ArgAPFloat;
bool LosesInfo = true;
APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
&LosesInfo);
return (!LosesInfo && !APFloatToConvert.isDenormal());
}
static bool isValidSplatLoad(const PPCSubtarget &Subtarget, const SDValue &Op,
unsigned &Opcode) {
LoadSDNode *InputNode = dyn_cast<LoadSDNode>(Op.getOperand(0));
if (!InputNode || !Subtarget.hasVSX() || !ISD::isUNINDEXEDLoad(InputNode))
return false;
EVT Ty = Op->getValueType(0);
if ((Ty == MVT::v2f64 || Ty == MVT::v4f32 || Ty == MVT::v4i32) &&
ISD::isNON_EXTLoad(InputNode))
return true;
EVT MemVT = InputNode->getMemoryVT();
if ((Ty == MVT::v8i16 || Ty == MVT::v16i8) && ISD::isEXTLoad(InputNode) &&
(MemVT == Ty.getVectorElementType()))
return true;
if (Ty == MVT::v2i64) {
if (MemVT == MVT::i32) {
if (ISD::isZEXTLoad(InputNode))
Opcode = PPCISD::ZEXT_LD_SPLAT;
if (ISD::isSEXTLoad(InputNode))
Opcode = PPCISD::SEXT_LD_SPLAT;
}
return true;
}
return false;
}
SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
APInt APSplatBits, APSplatUndef;
unsigned SplatBitSize;
bool HasAnyUndefs;
bool BVNIsConstantSplat =
BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
HasAnyUndefs, 0, !Subtarget.isLittleEndian());
if (BVNIsConstantSplat && (SplatBitSize == 64) &&
Subtarget.hasPrefixInstrs()) {
if ((Op->getValueType(0) == MVT::v2f64) &&
convertToNonDenormSingle(APSplatBits)) {
SDValue SplatNode = DAG.getNode(
PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64,
DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32));
return DAG.getBitcast(Op.getValueType(), SplatNode);
} else {
uint32_t Hi =
(uint32_t)((APSplatBits.getZExtValue() & 0xFFFFFFFF00000000LL) >> 32);
uint32_t Lo =
(uint32_t)(APSplatBits.getZExtValue() & 0xFFFFFFFF);
SDValue SplatNode = DAG.getUNDEF(MVT::v2i64);
if (!Hi || !Lo)
SplatNode = DAG.getTargetConstant(0, dl, MVT::v2i64);
if (Hi)
SplatNode = DAG.getNode(
PPCISD::XXSPLTI32DX, dl, MVT::v2i64, SplatNode,
DAG.getTargetConstant(0, dl, MVT::i32),
DAG.getTargetConstant(Hi, dl, MVT::i32));
if (Lo)
SplatNode =
DAG.getNode(PPCISD::XXSPLTI32DX, dl, MVT::v2i64, SplatNode,
DAG.getTargetConstant(1, dl, MVT::i32),
DAG.getTargetConstant(Lo, dl, MVT::i32));
return DAG.getBitcast(Op.getValueType(), SplatNode);
}
}
if (!BVNIsConstantSplat || SplatBitSize > 32) {
unsigned NewOpcode = PPCISD::LD_SPLAT;
if (DAG.isSplatValue(Op, true) &&
isValidSplatLoad(Subtarget, Op, NewOpcode)) {
const SDValue *InputLoad = &Op.getOperand(0);
LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
unsigned MemorySize = LD->getMemoryVT().getScalarSizeInBits();
unsigned ElementSize =
MemorySize * ((NewOpcode == PPCISD::LD_SPLAT) ? 1 : 2);
assert(((ElementSize == 2 * MemorySize)
? (NewOpcode == PPCISD::ZEXT_LD_SPLAT ||
NewOpcode == PPCISD::SEXT_LD_SPLAT)
: (NewOpcode == PPCISD::LD_SPLAT)) &&
"Unmatched element size and opcode!\n");
unsigned NumUsesOfInputLD = 128 / ElementSize;
for (SDValue BVInOp : Op->ops())
if (BVInOp.isUndef())
NumUsesOfInputLD--;
if (NumUsesOfInputLD == 1 &&
(Op->getValueType(0) == MVT::v2i64 && NewOpcode != PPCISD::LD_SPLAT &&
!Subtarget.isLittleEndian() && Subtarget.hasVSX() &&
Subtarget.hasLFIWAX()))
return SDValue();
if (NumUsesOfInputLD == 1 && Subtarget.isLittleEndian() &&
Subtarget.isISA3_1() && ElementSize <= 16)
return SDValue();
assert(NumUsesOfInputLD > 0 && "No uses of input LD of a build_vector?");
if (InputLoad->getNode()->hasNUsesOfValue(NumUsesOfInputLD, 0) &&
Subtarget.hasVSX()) {
SDValue Ops[] = {
LD->getChain(), LD->getBasePtr(), DAG.getValueType(Op.getValueType()) };
SDValue LdSplt = DAG.getMemIntrinsicNode(
NewOpcode, dl, DAG.getVTList(Op.getValueType(), MVT::Other), Ops,
LD->getMemoryVT(), LD->getMemOperand());
DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1),
LdSplt.getValue(1));
return LdSplt;
}
}
if (Subtarget.hasVSX() && Subtarget.isPPC64() &&
haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
Subtarget.hasP8Vector()))
return Op;
return SDValue();
}
uint64_t SplatBits = APSplatBits.getZExtValue();
uint64_t SplatUndef = APSplatUndef.getZExtValue();
unsigned SplatSize = SplatBitSize / 8;
if (SplatBits == 0) {
if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
}
return Op;
}
if (Subtarget.hasPrefixInstrs() && SplatSize == 2)
return getCanonicalConstSplat(SplatBits | (SplatBits << 16), SplatSize * 2,
Op.getValueType(), DAG, dl);
if (Subtarget.hasPrefixInstrs() && SplatSize == 4)
return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
dl);
if (Subtarget.hasP9Vector() && SplatSize == 1)
return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
dl);
int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
(32-SplatBitSize));
if (SextVal >= -16 && SextVal <= 15)
return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG,
dl);
if (SextVal >= -32 && SextVal <= 31) {
SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
EVT VT = (SplatSize == 1 ? MVT::v16i8 :
(SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
if (VT == Op.getValueType())
return RetVal;
else
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
}
if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl);
SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
OnesV, DAG, dl);
Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
}
static const signed char SplatCsts[] = {
-1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
-8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
};
for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
int i = SplatCsts[idx];
unsigned TypeShiftAmt = i & (SplatBitSize-1);
if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
static const unsigned IIDs[] = { Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
Intrinsic::ppc_altivec_vslw
};
Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
}
if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
static const unsigned IIDs[] = { Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
Intrinsic::ppc_altivec_vsrw
};
Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
}
if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
static const unsigned IIDs[] = { Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
Intrinsic::ppc_altivec_vrlw
};
Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
}
if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
}
if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
}
if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
}
}
return SDValue();
}
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
SDValue RHS, SelectionDAG &DAG,
const SDLoc &dl) {
unsigned OpNum = (PFEntry >> 26) & 0x0F;
unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
enum {
OP_COPY = 0, OP_VMRGHW,
OP_VMRGLW,
OP_VSPLTISW0,
OP_VSPLTISW1,
OP_VSPLTISW2,
OP_VSPLTISW3,
OP_VSLDOI4,
OP_VSLDOI8,
OP_VSLDOI12
};
if (OpNum == OP_COPY) {
if (LHSID == (1*9+2)*9+3) return LHS;
assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
return RHS;
}
SDValue OpLHS, OpRHS;
OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
int ShufIdxs[16];
switch (OpNum) {
default: llvm_unreachable("Unknown i32 permute!");
case OP_VMRGHW:
ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
break;
case OP_VMRGLW:
ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
break;
case OP_VSPLTISW0:
for (unsigned i = 0; i != 16; ++i)
ShufIdxs[i] = (i&3)+0;
break;
case OP_VSPLTISW1:
for (unsigned i = 0; i != 16; ++i)
ShufIdxs[i] = (i&3)+4;
break;
case OP_VSPLTISW2:
for (unsigned i = 0; i != 16; ++i)
ShufIdxs[i] = (i&3)+8;
break;
case OP_VSPLTISW3:
for (unsigned i = 0; i != 16; ++i)
ShufIdxs[i] = (i&3)+12;
break;
case OP_VSLDOI4:
return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
case OP_VSLDOI8:
return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
case OP_VSLDOI12:
return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
}
EVT VT = OpLHS.getValueType();
OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
return DAG.getNode(ISD::BITCAST, dl, VT, T);
}
SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
SelectionDAG &DAG) const {
const unsigned BytesInVector = 16;
bool IsLE = Subtarget.isLittleEndian();
SDLoc dl(N);
SDValue V1 = N->getOperand(0);
SDValue V2 = N->getOperand(1);
unsigned ShiftElts = 0, InsertAtByte = 0;
bool Swap = false;
unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1,
0, 15, 14, 13, 12, 11, 10, 9};
unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
1, 2, 3, 4, 5, 6, 7, 8};
ArrayRef<int> Mask = N->getMask();
int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
bool FoundCandidate = false;
unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
for (unsigned i = 0; i < BytesInVector; ++i) {
unsigned CurrentElement = Mask[i];
if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
continue;
bool OtherElementsInOrder = true;
for (unsigned j = 0; j < BytesInVector; ++j) {
if (j == i)
continue;
int MaskOffset =
(!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
if (Mask[j] != OriginalOrder[j] + MaskOffset) {
OtherElementsInOrder = false;
break;
}
}
if (OtherElementsInOrder) {
if (V2.isUndef()) {
ShiftElts = 0;
Swap = false;
} else {
ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
: BigEndianShifts[CurrentElement & 0xF];
Swap = CurrentElement < BytesInVector;
}
InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
FoundCandidate = true;
break;
}
}
if (!FoundCandidate)
return SDValue();
if (Swap)
std::swap(V1, V2);
if (V2.isUndef())
V2 = V1;
if (ShiftElts) {
SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
DAG.getConstant(ShiftElts, dl, MVT::i32));
return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
DAG.getConstant(InsertAtByte, dl, MVT::i32));
}
return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
DAG.getConstant(InsertAtByte, dl, MVT::i32));
}
SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
SelectionDAG &DAG) const {
const unsigned NumHalfWords = 8;
const unsigned BytesInVector = NumHalfWords * 2;
if (!isNByteElemShuffleMask(N, 2, 1))
return SDValue();
bool IsLE = Subtarget.isLittleEndian();
SDLoc dl(N);
SDValue V1 = N->getOperand(0);
SDValue V2 = N->getOperand(1);
unsigned ShiftElts = 0, InsertAtByte = 0;
bool Swap = false;
unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
uint32_t Mask = 0;
uint32_t OriginalOrderLow = 0x1234567;
uint32_t OriginalOrderHigh = 0x89ABCDEF;
for (unsigned i = 0; i < NumHalfWords; ++i) {
unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
}
bool FoundCandidate = false;
for (unsigned i = 0; i < NumHalfWords; ++i) {
unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
uint32_t MaskOtherElts = ~(0xF << MaskShift);
uint32_t TargetOrder = 0x0;
if (V2.isUndef()) {
ShiftElts = 0;
unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
TargetOrder = OriginalOrderLow;
Swap = false;
if (MaskOneElt == VINSERTHSrcElem &&
(Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
FoundCandidate = true;
break;
}
} else { TargetOrder =
(MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
: BigEndianShifts[MaskOneElt & 0x7];
InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
Swap = MaskOneElt < NumHalfWords;
FoundCandidate = true;
break;
}
}
}
if (!FoundCandidate)
return SDValue();
if (Swap)
std::swap(V1, V2);
if (V2.isUndef())
V2 = V1;
SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
if (ShiftElts) {
SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
DAG.getConstant(InsertAtByte, dl, MVT::i32));
return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
}
SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
DAG.getConstant(InsertAtByte, dl, MVT::i32));
return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
}
SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN,
SelectionDAG &DAG) const {
SDValue LHS = peekThroughBitcasts(SVN->getOperand(0));
SDValue RHS = peekThroughBitcasts(SVN->getOperand(1));
auto ShuffleMask = SVN->getMask();
SDValue VecShuffle(SVN, 0);
SDLoc DL(SVN);
if (!isNByteElemShuffleMask(SVN, 4, 1))
return SDValue();
if (RHS->getOpcode() != ISD::BUILD_VECTOR) {
std::swap(LHS, RHS);
VecShuffle = DAG.getCommutedVectorShuffle(*SVN);
ShuffleMask = cast<ShuffleVectorSDNode>(VecShuffle)->getMask();
}
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
if (!BVN)
return SDValue();
APInt APSplatValue, APSplatUndef;
unsigned SplatBitSize;
bool HasAnyUndefs;
if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize,
HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
SplatBitSize > 32)
return SDValue();
SDValue Index;
bool IsLE = Subtarget.isLittleEndian();
if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
(ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32);
else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
(ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32);
else
return SDValue();
unsigned SplatVal = APSplatValue.getZExtValue();
for (; SplatBitSize < 32; SplatBitSize <<= 1)
SplatVal |= (SplatVal << SplatBitSize);
SDValue SplatNode = DAG.getNode(
PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS),
Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32));
return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode);
}
SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL");
assert(Op.getValueType() == MVT::v1i128 &&
"Only set v1i128 as custom, other type shouldn't reach here!");
SDLoc dl(Op);
SDValue N0 = peekThroughBitcasts(Op.getOperand(0));
SDValue N1 = peekThroughBitcasts(Op.getOperand(1));
unsigned SHLAmt = N1.getConstantOperandVal(0);
if (SHLAmt % 8 == 0) {
std::array<int, 16> Mask;
std::iota(Mask.begin(), Mask.end(), 0);
std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end());
if (SDValue Shuffle =
DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0),
DAG.getUNDEF(MVT::v16i8), Mask))
return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle);
}
SDValue ArgVal = DAG.getBitcast(MVT::i128, N0);
SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal,
DAG.getConstant(SHLAmt, dl, MVT::i32));
SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal,
DAG.getConstant(128 - SHLAmt, dl, MVT::i32));
SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp);
return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp);
}
SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) {
if (!isa<ShuffleVectorSDNode>(NewShuffle))
return NewShuffle;
Op = NewShuffle;
SVOp = cast<ShuffleVectorSDNode>(Op);
V1 = Op.getOperand(0);
V2 = Op.getOperand(1);
}
EVT VT = Op.getValueType();
bool isLittleEndian = Subtarget.isLittleEndian();
unsigned ShiftElts, InsertAtByte;
bool Swap = false;
bool IsPermutedLoad = false;
const SDValue *InputLoad = getNormalLoadInput(V1, IsPermutedLoad);
if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
(PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
InputLoad->hasOneUse()) {
bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
int SplatIdx =
PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
if (IsPermutedLoad) {
assert((isLittleEndian || IsFourByte) &&
"Unexpected size for permuted load on big endian target");
SplatIdx += IsFourByte ? 2 : 1;
assert((SplatIdx < (IsFourByte ? 4 : 2)) &&
"Splat of a value outside of the loaded memory");
}
LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
uint64_t Offset = 0;
if (IsFourByte)
Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
else
Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
if (LD->getValueType(0).getSizeInBits() == (IsFourByte ? 32 : 64))
Offset = 0;
SDValue BasePtr = LD->getBasePtr();
if (Offset != 0)
BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
BasePtr, DAG.getIntPtrConstant(Offset, dl));
SDValue Ops[] = {
LD->getChain(), BasePtr, DAG.getValueType(Op.getValueType()) };
SDVTList VTL =
DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
SDValue LdSplt =
DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
Ops, LD->getMemoryVT(), LD->getMemOperand());
DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1), LdSplt.getValue(1));
if (LdSplt.getValueType() != SVOp->getValueType(0))
LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
return LdSplt;
}
}
if (VT == MVT::v2i64 || VT == MVT::v2f64)
return Op;
if (Subtarget.hasP9Vector() &&
PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
isLittleEndian)) {
if (Swap)
std::swap(V1, V2);
SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
if (ShiftElts) {
SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
DAG.getConstant(ShiftElts, dl, MVT::i32));
SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
DAG.getConstant(InsertAtByte, dl, MVT::i32));
return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
}
SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
DAG.getConstant(InsertAtByte, dl, MVT::i32));
return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
}
if (Subtarget.hasPrefixInstrs()) {
SDValue SplatInsertNode;
if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG)))
return SplatInsertNode;
}
if (Subtarget.hasP9Altivec()) {
SDValue NewISDNode;
if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
return NewISDNode;
if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
return NewISDNode;
}
if (Subtarget.hasVSX() &&
PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
if (Swap)
std::swap(V1, V2);
SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
SDValue Conv2 =
DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
DAG.getConstant(ShiftElts, dl, MVT::i32));
return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
}
if (Subtarget.hasVSX() &&
PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
if (Swap)
std::swap(V1, V2);
SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
SDValue Conv2 =
DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
DAG.getConstant(ShiftElts, dl, MVT::i32));
return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
}
if (Subtarget.hasP9Vector()) {
if (PPC::isXXBRHShuffleMask(SVOp)) {
SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
} else if (PPC::isXXBRWShuffleMask(SVOp)) {
SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
} else if (PPC::isXXBRDShuffleMask(SVOp)) {
SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
} else if (PPC::isXXBRQShuffleMask(SVOp)) {
SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
}
}
if (Subtarget.hasVSX()) {
if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
DAG.getConstant(SplatIdx, dl, MVT::i32));
return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
}
if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
}
}
if (V2.isUndef()) {
if (PPC::isSplatShuffleMask(SVOp, 1) ||
PPC::isSplatShuffleMask(SVOp, 2) ||
PPC::isSplatShuffleMask(SVOp, 4) ||
PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
(Subtarget.hasP8Altivec() && (
PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
return Op;
}
}
unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
(Subtarget.hasP8Altivec() && (
PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
return Op;
ArrayRef<int> PermMask = SVOp->getMask();
if (!DisablePerfectShuffle && !isLittleEndian) {
unsigned PFIndexes[4];
bool isFourElementShuffle = true;
for (unsigned i = 0; i != 4 && isFourElementShuffle;
++i) { unsigned EltNo = 8; for (unsigned j = 0; j != 4; ++j) { if (PermMask[i * 4 + j] < 0)
continue;
unsigned ByteSource = PermMask[i * 4 + j];
if ((ByteSource & 3) != j) {
isFourElementShuffle = false;
break;
}
if (EltNo == 8) {
EltNo = ByteSource / 4;
} else if (EltNo != ByteSource / 4) {
isFourElementShuffle = false;
break;
}
}
PFIndexes[i] = EltNo;
}
if (isFourElementShuffle) {
unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 +
PFIndexes[2] * 9 + PFIndexes[3];
unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
unsigned Cost = (PFEntry >> 30);
if (Cost < 3)
return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
}
}
if (V2.isUndef()) V2 = V1;
EVT EltVT = V1.getValueType().getVectorElementType();
unsigned BytesPerElement = EltVT.getSizeInBits()/8;
SmallVector<SDValue, 16> ResultMask;
for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
for (unsigned j = 0; j != BytesPerElement; ++j)
if (isLittleEndian)
ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
dl, MVT::i32));
else
ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
MVT::i32));
}
ShufflesHandledWithVPERM++;
SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
LLVM_DEBUG(dbgs() << "Emitting a VPERM for the following shuffle:\n");
LLVM_DEBUG(SVOp->dump());
LLVM_DEBUG(dbgs() << "With the following permute control vector:\n");
LLVM_DEBUG(VPermMask.dump());
if (isLittleEndian)
return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
V2, V1, VPermMask);
else
return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
V1, V2, VPermMask);
}
static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
bool &isDot, const PPCSubtarget &Subtarget) {
unsigned IntrinsicID =
cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
CompareOpc = -1;
isDot = false;
switch (IntrinsicID) {
default:
return false;
case Intrinsic::ppc_altivec_vcmpbfp_p:
CompareOpc = 966;
isDot = true;
break;
case Intrinsic::ppc_altivec_vcmpeqfp_p:
CompareOpc = 198;
isDot = true;
break;
case Intrinsic::ppc_altivec_vcmpequb_p:
CompareOpc = 6;
isDot = true;
break;
case Intrinsic::ppc_altivec_vcmpequh_p:
CompareOpc = 70;
isDot = true;
break;
case Intrinsic::ppc_altivec_vcmpequw_p:
CompareOpc = 134;
isDot = true;
break;
case Intrinsic::ppc_altivec_vcmpequd_p:
if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) {
CompareOpc = 199;
isDot = true;
} else
return false;
break;
case Intrinsic::ppc_altivec_vcmpneb_p:
case Intrinsic::ppc_altivec_vcmpneh_p:
case Intrinsic::ppc_altivec_vcmpnew_p:
case Intrinsic::ppc_altivec_vcmpnezb_p:
case Intrinsic::ppc_altivec_vcmpnezh_p:
case Intrinsic::ppc_altivec_vcmpnezw_p:
if (Subtarget.hasP9Altivec()) {
switch (IntrinsicID) {
default:
llvm_unreachable("Unknown comparison intrinsic.");
case Intrinsic::ppc_altivec_vcmpneb_p:
CompareOpc = 7;
break;
case Intrinsic::ppc_altivec_vcmpneh_p:
CompareOpc = 71;
break;
case Intrinsic::ppc_altivec_vcmpnew_p:
CompareOpc = 135;
break;
case Intrinsic::ppc_altivec_vcmpnezb_p:
CompareOpc = 263;
break;
case Intrinsic::ppc_altivec_vcmpnezh_p:
CompareOpc = 327;
break;
case Intrinsic::ppc_altivec_vcmpnezw_p:
CompareOpc = 391;
break;
}
isDot = true;
} else
return false;
break;
case Intrinsic::ppc_altivec_vcmpgefp_p:
CompareOpc = 454;
isDot = true;
break;
case Intrinsic::ppc_altivec_vcmpgtfp_p:
CompareOpc = 710;
isDot = true;
break;
case Intrinsic::ppc_altivec_vcmpgtsb_p:
CompareOpc = 774;
isDot = true;
break;
case Intrinsic::ppc_altivec_vcmpgtsh_p:
CompareOpc = 838;
isDot = true;
break;
case Intrinsic::ppc_altivec_vcmpgtsw_p:
CompareOpc = 902;
isDot = true;
break;
case Intrinsic::ppc_altivec_vcmpgtsd_p:
if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) {
CompareOpc = 967;
isDot = true;
} else
return false;
break;
case Intrinsic::ppc_altivec_vcmpgtub_p:
CompareOpc = 518;
isDot = true;
break;
case Intrinsic::ppc_altivec_vcmpgtuh_p:
CompareOpc = 582;
isDot = true;
break;
case Intrinsic::ppc_altivec_vcmpgtuw_p:
CompareOpc = 646;
isDot = true;
break;
case Intrinsic::ppc_altivec_vcmpgtud_p:
if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) {
CompareOpc = 711;
isDot = true;
} else
return false;
break;
case Intrinsic::ppc_altivec_vcmpequq:
case Intrinsic::ppc_altivec_vcmpgtsq:
case Intrinsic::ppc_altivec_vcmpgtuq:
if (!Subtarget.isISA3_1())
return false;
switch (IntrinsicID) {
default:
llvm_unreachable("Unknown comparison intrinsic.");
case Intrinsic::ppc_altivec_vcmpequq:
CompareOpc = 455;
break;
case Intrinsic::ppc_altivec_vcmpgtsq:
CompareOpc = 903;
break;
case Intrinsic::ppc_altivec_vcmpgtuq:
CompareOpc = 647;
break;
}
break;
case Intrinsic::ppc_vsx_xvcmpeqdp_p:
case Intrinsic::ppc_vsx_xvcmpgedp_p:
case Intrinsic::ppc_vsx_xvcmpgtdp_p:
case Intrinsic::ppc_vsx_xvcmpeqsp_p:
case Intrinsic::ppc_vsx_xvcmpgesp_p:
case Intrinsic::ppc_vsx_xvcmpgtsp_p:
if (Subtarget.hasVSX()) {
switch (IntrinsicID) {
case Intrinsic::ppc_vsx_xvcmpeqdp_p:
CompareOpc = 99;
break;
case Intrinsic::ppc_vsx_xvcmpgedp_p:
CompareOpc = 115;
break;
case Intrinsic::ppc_vsx_xvcmpgtdp_p:
CompareOpc = 107;
break;
case Intrinsic::ppc_vsx_xvcmpeqsp_p:
CompareOpc = 67;
break;
case Intrinsic::ppc_vsx_xvcmpgesp_p:
CompareOpc = 83;
break;
case Intrinsic::ppc_vsx_xvcmpgtsp_p:
CompareOpc = 75;
break;
}
isDot = true;
} else
return false;
break;
case Intrinsic::ppc_altivec_vcmpbfp:
CompareOpc = 966;
break;
case Intrinsic::ppc_altivec_vcmpeqfp:
CompareOpc = 198;
break;
case Intrinsic::ppc_altivec_vcmpequb:
CompareOpc = 6;
break;
case Intrinsic::ppc_altivec_vcmpequh:
CompareOpc = 70;
break;
case Intrinsic::ppc_altivec_vcmpequw:
CompareOpc = 134;
break;
case Intrinsic::ppc_altivec_vcmpequd:
if (Subtarget.hasP8Altivec())
CompareOpc = 199;
else
return false;
break;
case Intrinsic::ppc_altivec_vcmpneb:
case Intrinsic::ppc_altivec_vcmpneh:
case Intrinsic::ppc_altivec_vcmpnew:
case Intrinsic::ppc_altivec_vcmpnezb:
case Intrinsic::ppc_altivec_vcmpnezh:
case Intrinsic::ppc_altivec_vcmpnezw:
if (Subtarget.hasP9Altivec())
switch (IntrinsicID) {
default:
llvm_unreachable("Unknown comparison intrinsic.");
case Intrinsic::ppc_altivec_vcmpneb:
CompareOpc = 7;
break;
case Intrinsic::ppc_altivec_vcmpneh:
CompareOpc = 71;
break;
case Intrinsic::ppc_altivec_vcmpnew:
CompareOpc = 135;
break;
case Intrinsic::ppc_altivec_vcmpnezb:
CompareOpc = 263;
break;
case Intrinsic::ppc_altivec_vcmpnezh:
CompareOpc = 327;
break;
case Intrinsic::ppc_altivec_vcmpnezw:
CompareOpc = 391;
break;
}
else
return false;
break;
case Intrinsic::ppc_altivec_vcmpgefp:
CompareOpc = 454;
break;
case Intrinsic::ppc_altivec_vcmpgtfp:
CompareOpc = 710;
break;
case Intrinsic::ppc_altivec_vcmpgtsb:
CompareOpc = 774;
break;
case Intrinsic::ppc_altivec_vcmpgtsh:
CompareOpc = 838;
break;
case Intrinsic::ppc_altivec_vcmpgtsw:
CompareOpc = 902;
break;
case Intrinsic::ppc_altivec_vcmpgtsd:
if (Subtarget.hasP8Altivec())
CompareOpc = 967;
else
return false;
break;
case Intrinsic::ppc_altivec_vcmpgtub:
CompareOpc = 518;
break;
case Intrinsic::ppc_altivec_vcmpgtuh:
CompareOpc = 582;
break;
case Intrinsic::ppc_altivec_vcmpgtuw:
CompareOpc = 646;
break;
case Intrinsic::ppc_altivec_vcmpgtud:
if (Subtarget.hasP8Altivec())
CompareOpc = 711;
else
return false;
break;
case Intrinsic::ppc_altivec_vcmpequq_p:
case Intrinsic::ppc_altivec_vcmpgtsq_p:
case Intrinsic::ppc_altivec_vcmpgtuq_p:
if (!Subtarget.isISA3_1())
return false;
switch (IntrinsicID) {
default:
llvm_unreachable("Unknown comparison intrinsic.");
case Intrinsic::ppc_altivec_vcmpequq_p:
CompareOpc = 455;
break;
case Intrinsic::ppc_altivec_vcmpgtsq_p:
CompareOpc = 903;
break;
case Intrinsic::ppc_altivec_vcmpgtuq_p:
CompareOpc = 647;
break;
}
isDot = true;
break;
}
return true;
}
SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
unsigned IntrinsicID =
cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
SDLoc dl(Op);
switch (IntrinsicID) {
case Intrinsic::thread_pointer:
if (Subtarget.isPPC64())
return DAG.getRegister(PPC::X13, MVT::i64);
return DAG.getRegister(PPC::R2, MVT::i32);
case Intrinsic::ppc_mma_disassemble_acc:
case Intrinsic::ppc_vsx_disassemble_pair: {
int NumVecs = 2;
SDValue WideVec = Op.getOperand(1);
if (IntrinsicID == Intrinsic::ppc_mma_disassemble_acc) {
NumVecs = 4;
WideVec = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, WideVec);
}
SmallVector<SDValue, 4> RetOps;
for (int VecNo = 0; VecNo < NumVecs; VecNo++) {
SDValue Extract = DAG.getNode(
PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, WideVec,
DAG.getConstant(Subtarget.isLittleEndian() ? NumVecs - 1 - VecNo
: VecNo,
dl, getPointerTy(DAG.getDataLayout())));
RetOps.push_back(Extract);
}
return DAG.getMergeValues(RetOps, dl);
}
case Intrinsic::ppc_unpack_longdouble: {
auto *Idx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
assert(Idx && (Idx->getSExtValue() == 0 || Idx->getSExtValue() == 1) &&
"Argument of long double unpack must be 0 or 1!");
return DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Op.getOperand(1),
DAG.getConstant(!!(Idx->getSExtValue()), dl,
Idx->getValueType(0)));
}
case Intrinsic::ppc_compare_exp_lt:
case Intrinsic::ppc_compare_exp_gt:
case Intrinsic::ppc_compare_exp_eq:
case Intrinsic::ppc_compare_exp_uo: {
unsigned Pred;
switch (IntrinsicID) {
case Intrinsic::ppc_compare_exp_lt:
Pred = PPC::PRED_LT;
break;
case Intrinsic::ppc_compare_exp_gt:
Pred = PPC::PRED_GT;
break;
case Intrinsic::ppc_compare_exp_eq:
Pred = PPC::PRED_EQ;
break;
case Intrinsic::ppc_compare_exp_uo:
Pred = PPC::PRED_UN;
break;
}
return SDValue(
DAG.getMachineNode(
PPC::SELECT_CC_I4, dl, MVT::i32,
{SDValue(DAG.getMachineNode(PPC::XSCMPEXPDP, dl, MVT::i32,
Op.getOperand(1), Op.getOperand(2)),
0),
DAG.getConstant(1, dl, MVT::i32), DAG.getConstant(0, dl, MVT::i32),
DAG.getTargetConstant(Pred, dl, MVT::i32)}),
0);
}
case Intrinsic::ppc_test_data_class_d:
case Intrinsic::ppc_test_data_class_f: {
unsigned CmprOpc = PPC::XSTSTDCDP;
if (IntrinsicID == Intrinsic::ppc_test_data_class_f)
CmprOpc = PPC::XSTSTDCSP;
return SDValue(
DAG.getMachineNode(
PPC::SELECT_CC_I4, dl, MVT::i32,
{SDValue(DAG.getMachineNode(CmprOpc, dl, MVT::i32, Op.getOperand(2),
Op.getOperand(1)),
0),
DAG.getConstant(1, dl, MVT::i32), DAG.getConstant(0, dl, MVT::i32),
DAG.getTargetConstant(PPC::PRED_EQ, dl, MVT::i32)}),
0);
}
case Intrinsic::ppc_fnmsub: {
EVT VT = Op.getOperand(1).getValueType();
if (!Subtarget.hasVSX() || (!Subtarget.hasFloat128() && VT == MVT::f128))
return DAG.getNode(
ISD::FNEG, dl, VT,
DAG.getNode(ISD::FMA, dl, VT, Op.getOperand(1), Op.getOperand(2),
DAG.getNode(ISD::FNEG, dl, VT, Op.getOperand(3))));
return DAG.getNode(PPCISD::FNMSUB, dl, VT, Op.getOperand(1),
Op.getOperand(2), Op.getOperand(3));
}
case Intrinsic::ppc_convert_f128_to_ppcf128:
case Intrinsic::ppc_convert_ppcf128_to_f128: {
RTLIB::Libcall LC = IntrinsicID == Intrinsic::ppc_convert_ppcf128_to_f128
? RTLIB::CONVERT_PPCF128_F128
: RTLIB::CONVERT_F128_PPCF128;
MakeLibCallOptions CallOptions;
std::pair<SDValue, SDValue> Result =
makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(1), CallOptions,
dl, SDValue());
return Result.first;
}
case Intrinsic::ppc_maxfe:
case Intrinsic::ppc_maxfl:
case Intrinsic::ppc_maxfs:
case Intrinsic::ppc_minfe:
case Intrinsic::ppc_minfl:
case Intrinsic::ppc_minfs: {
EVT VT = Op.getValueType();
assert(
all_of(Op->ops().drop_front(4),
[VT](const SDUse &Use) { return Use.getValueType() == VT; }) &&
"ppc_[max|min]f[e|l|s] must have uniform type arguments");
(void)VT;
ISD::CondCode CC = ISD::SETGT;
if (IntrinsicID == Intrinsic::ppc_minfe ||
IntrinsicID == Intrinsic::ppc_minfl ||
IntrinsicID == Intrinsic::ppc_minfs)
CC = ISD::SETLT;
unsigned I = Op.getNumOperands() - 2, Cnt = I;
SDValue Res = Op.getOperand(I);
for (--I; Cnt != 0; --Cnt, I = (--I == 0 ? (Op.getNumOperands() - 1) : I)) {
Res =
DAG.getSelectCC(dl, Res, Op.getOperand(I), Res, Op.getOperand(I), CC);
}
return Res;
}
}
int CompareOpc;
bool isDot;
if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
return SDValue();
if (!isDot) {
SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
Op.getOperand(1), Op.getOperand(2),
DAG.getConstant(CompareOpc, dl, MVT::i32));
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
}
SDValue Ops[] = {
Op.getOperand(2), Op.getOperand(3), DAG.getConstant(CompareOpc, dl, MVT::i32)
};
EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops);
SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
DAG.getRegister(PPC::CR6, MVT::i32),
CompNode.getValue(1));
unsigned BitNo; bool InvertBit; switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
default: case 0: BitNo = 0; InvertBit = false;
break;
case 1: BitNo = 0; InvertBit = true;
break;
case 2: BitNo = 2; InvertBit = false;
break;
case 3: BitNo = 2; InvertBit = true;
break;
}
Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
DAG.getConstant(1, dl, MVT::i32));
if (InvertBit)
Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
DAG.getConstant(1, dl, MVT::i32));
return Flags;
}
SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
SelectionDAG &DAG) const {
int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
SDLoc DL(Op);
switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
case Intrinsic::ppc_cfence: {
assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
SDValue Val = Op.getOperand(ArgStart + 1);
EVT Ty = Val.getValueType();
if (Ty == MVT::i128) {
Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, Val);
}
return SDValue(
DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Val),
Op.getOperand(0)),
0);
}
default:
break;
}
return SDValue();
}
SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl(Op);
if (!Subtarget.isPPC64())
return Op;
Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
Op.getOperand(0));
Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
int VectorIndex = 0;
if (Subtarget.isLittleEndian())
VectorIndex = 1;
Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
return Op;
}
SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
SelectionDAG &DAG) const {
assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
"Expecting an atomic compare-and-swap here.");
SDLoc dl(Op);
auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
EVT MemVT = AtomicNode->getMemoryVT();
if (MemVT.getSizeInBits() >= 32)
return Op;
SDValue CmpOp = Op.getOperand(2);
auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
if (DAG.MaskedValueIsZero(CmpOp, HighBits))
return Op;
unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
SDValue NewCmpOp =
DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
DAG.getConstant(MaskVal, dl, MVT::i32));
SmallVector<SDValue, 4> Ops;
for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
Ops.push_back(AtomicNode->getOperand(i));
Ops[2] = NewCmpOp;
MachineMemOperand *MMO = AtomicNode->getMemOperand();
SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
auto NodeTy =
(MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
}
SDValue PPCTargetLowering::LowerATOMIC_LOAD_STORE(SDValue Op,
SelectionDAG &DAG) const {
AtomicSDNode *N = cast<AtomicSDNode>(Op.getNode());
EVT MemVT = N->getMemoryVT();
assert(MemVT.getSimpleVT() == MVT::i128 &&
"Expect quadword atomic operations");
SDLoc dl(N);
unsigned Opc = N->getOpcode();
switch (Opc) {
case ISD::ATOMIC_LOAD: {
SDVTList Tys = DAG.getVTList(MVT::i64, MVT::i64, MVT::Other);
SmallVector<SDValue, 4> Ops{
N->getOperand(0),
DAG.getConstant(Intrinsic::ppc_atomic_load_i128, dl, MVT::i32)};
for (int I = 1, E = N->getNumOperands(); I < E; ++I)
Ops.push_back(N->getOperand(I));
SDValue LoadedVal = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, Tys,
Ops, MemVT, N->getMemOperand());
SDValue ValLo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i128, LoadedVal);
SDValue ValHi =
DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i128, LoadedVal.getValue(1));
ValHi = DAG.getNode(ISD::SHL, dl, MVT::i128, ValHi,
DAG.getConstant(64, dl, MVT::i32));
SDValue Val =
DAG.getNode(ISD::OR, dl, {MVT::i128, MVT::Other}, {ValLo, ValHi});
return DAG.getNode(ISD::MERGE_VALUES, dl, {MVT::i128, MVT::Other},
{Val, LoadedVal.getValue(2)});
}
case ISD::ATOMIC_STORE: {
SDVTList Tys = DAG.getVTList(MVT::Other);
SmallVector<SDValue, 4> Ops{
N->getOperand(0),
DAG.getConstant(Intrinsic::ppc_atomic_store_i128, dl, MVT::i32)};
SDValue Val = N->getOperand(2);
SDValue ValLo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i64, Val);
SDValue ValHi = DAG.getNode(ISD::SRL, dl, MVT::i128, Val,
DAG.getConstant(64, dl, MVT::i32));
ValHi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i64, ValHi);
Ops.push_back(ValLo);
Ops.push_back(ValHi);
Ops.push_back(N->getOperand(1));
return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, dl, Tys, Ops, MemVT,
N->getMemOperand());
}
default:
llvm_unreachable("Unexpected atomic opcode");
}
}
SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
EVT PtrVT = getPointerTy(DAG.getDataLayout());
SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
MachinePointerInfo());
return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
}
SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const {
assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
"Should only be called for ISD::INSERT_VECTOR_ELT");
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
EVT VT = Op.getValueType();
SDLoc dl(Op);
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
if (VT == MVT::v2f64 && C)
return Op;
if (Subtarget.hasP9Vector()) {
if ((VT == MVT::v4f32) && (V2.getValueType() == MVT::f32) &&
(isa<LoadSDNode>(V2))) {
SDValue BitcastVector = DAG.getBitcast(MVT::v4i32, V1);
SDValue BitcastLoad = DAG.getBitcast(MVT::i32, V2);
SDValue InsVecElt =
DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4i32, BitcastVector,
BitcastLoad, Op.getOperand(2));
return DAG.getBitcast(MVT::v4f32, InsVecElt);
}
}
if (Subtarget.isISA3_1()) {
if ((VT == MVT::v2i64 || VT == MVT::v2f64) && !Subtarget.isPPC64())
return SDValue();
if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64)
return Op;
}
if (!C)
return SDValue();
if (VT == MVT::v8i16 || VT == MVT::v16i8) {
SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
unsigned InsertAtElement = C->getZExtValue();
unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
if (Subtarget.isLittleEndian()) {
InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
}
return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
DAG.getConstant(InsertAtByte, dl, MVT::i32));
}
return Op;
}
SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
SDValue LoadChain = LN->getChain();
SDValue BasePtr = LN->getBasePtr();
EVT VT = Op.getValueType();
if (VT != MVT::v256i1 && VT != MVT::v512i1)
return Op;
assert((VT != MVT::v512i1 || Subtarget.hasMMA()) &&
"Type unsupported without MMA");
assert((VT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
"Type unsupported without paired vector support");
Align Alignment = LN->getAlign();
SmallVector<SDValue, 4> Loads;
SmallVector<SDValue, 4> LoadChains;
unsigned NumVecs = VT.getSizeInBits() / 128;
for (unsigned Idx = 0; Idx < NumVecs; ++Idx) {
SDValue Load =
DAG.getLoad(MVT::v16i8, dl, LoadChain, BasePtr,
LN->getPointerInfo().getWithOffset(Idx * 16),
commonAlignment(Alignment, Idx * 16),
LN->getMemOperand()->getFlags(), LN->getAAInfo());
BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
DAG.getConstant(16, dl, BasePtr.getValueType()));
Loads.push_back(Load);
LoadChains.push_back(Load.getValue(1));
}
if (Subtarget.isLittleEndian()) {
std::reverse(Loads.begin(), Loads.end());
std::reverse(LoadChains.begin(), LoadChains.end());
}
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
SDValue Value =
DAG.getNode(VT == MVT::v512i1 ? PPCISD::ACC_BUILD : PPCISD::PAIR_BUILD,
dl, VT, Loads);
SDValue RetOps[] = {Value, TF};
return DAG.getMergeValues(RetOps, dl);
}
SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
SDValue StoreChain = SN->getChain();
SDValue BasePtr = SN->getBasePtr();
SDValue Value = SN->getValue();
EVT StoreVT = Value.getValueType();
if (StoreVT != MVT::v256i1 && StoreVT != MVT::v512i1)
return Op;
assert((StoreVT != MVT::v512i1 || Subtarget.hasMMA()) &&
"Type unsupported without MMA");
assert((StoreVT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
"Type unsupported without paired vector support");
Align Alignment = SN->getAlign();
SmallVector<SDValue, 4> Stores;
unsigned NumVecs = 2;
if (StoreVT == MVT::v512i1) {
Value = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, Value);
NumVecs = 4;
}
for (unsigned Idx = 0; Idx < NumVecs; ++Idx) {
unsigned VecNum = Subtarget.isLittleEndian() ? NumVecs - 1 - Idx : Idx;
SDValue Elt = DAG.getNode(PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, Value,
DAG.getConstant(VecNum, dl, getPointerTy(DAG.getDataLayout())));
SDValue Store =
DAG.getStore(StoreChain, dl, Elt, BasePtr,
SN->getPointerInfo().getWithOffset(Idx * 16),
commonAlignment(Alignment, Idx * 16),
SN->getMemOperand()->getFlags(), SN->getAAInfo());
BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
DAG.getConstant(16, dl, BasePtr.getValueType()));
Stores.push_back(Store);
}
SDValue TF = DAG.getTokenFactor(dl, Stores);
return TF;
}
SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl(Op);
if (Op.getValueType() == MVT::v4i32) {
SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl);
SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl);
SDValue RHSSwap = BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
LHS, RHS, DAG, dl, MVT::v4i32);
SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
Neg16, DAG, dl);
return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
} else if (Op.getValueType() == MVT::v16i8) {
SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
bool isLittleEndian = Subtarget.isLittleEndian();
SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
LHS, RHS, DAG, dl, MVT::v8i16);
EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
LHS, RHS, DAG, dl, MVT::v8i16);
OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
int Ops[16];
for (unsigned i = 0; i != 8; ++i) {
if (isLittleEndian) {
Ops[i*2 ] = 2*i;
Ops[i*2+1] = 2*i+16;
} else {
Ops[i*2 ] = 2*i+1;
Ops[i*2+1] = 2*i+1+16;
}
}
if (isLittleEndian)
return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
else
return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
} else {
llvm_unreachable("Unknown mul to lower!");
}
}
SDValue PPCTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
bool IsStrict = Op->isStrictFPOpcode();
if (Op.getOperand(IsStrict ? 1 : 0).getValueType() == MVT::f128 &&
!Subtarget.hasP9Vector())
return SDValue();
return Op;
}
SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
assert(Op.getOpcode() == ISD::FP_EXTEND &&
"Should only be called for ISD::FP_EXTEND");
if (Op.getValueType() != MVT::v2f64 ||
Op.getOperand(0).getValueType() != MVT::v2f32)
return SDValue();
SDLoc dl(Op);
SDValue Op0 = Op.getOperand(0);
switch (Op0.getOpcode()) {
default:
return SDValue();
case ISD::EXTRACT_SUBVECTOR: {
assert(Op0.getNumOperands() == 2 &&
isa<ConstantSDNode>(Op0->getOperand(1)) &&
"Node should have 2 operands with second one being a constant!");
if (Op0.getOperand(0).getValueType() != MVT::v4f32)
return SDValue();
int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
if (Idx % 2 != 0)
return SDValue();
int DWord = Idx >> 1;
if (Subtarget.isLittleEndian())
DWord ^= 0x1;
return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
}
case ISD::FADD:
case ISD::FMUL:
case ISD::FSUB: {
SDValue NewLoad[2];
for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
SDValue LdOp = Op0.getOperand(i);
if (LdOp.getOpcode() != ISD::LOAD)
return SDValue();
LoadSDNode *LD = cast<LoadSDNode>(LdOp);
SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
NewLoad[i] = DAG.getMemIntrinsicNode(
PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
LD->getMemoryVT(), LD->getMemOperand());
}
SDValue NewOp =
DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
NewLoad[1], Op0.getNode()->getFlags());
return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
DAG.getConstant(0, dl, MVT::i32));
}
case ISD::LOAD: {
LoadSDNode *LD = cast<LoadSDNode>(Op0);
SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
SDValue NewLd = DAG.getMemIntrinsicNode(
PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
LD->getMemoryVT(), LD->getMemOperand());
return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
DAG.getConstant(0, dl, MVT::i32));
}
}
llvm_unreachable("ERROR:Should return for all cases within swtich.");
}
SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default: llvm_unreachable("Wasn't expecting to be able to lower this!");
case ISD::FPOW: return lowerPow(Op, DAG);
case ISD::FSIN: return lowerSin(Op, DAG);
case ISD::FCOS: return lowerCos(Op, DAG);
case ISD::FLOG: return lowerLog(Op, DAG);
case ISD::FLOG10: return lowerLog10(Op, DAG);
case ISD::FEXP: return lowerExp(Op, DAG);
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
case ISD::JumpTable: return LowerJumpTable(Op, DAG);
case ISD::STRICT_FSETCC:
case ISD::STRICT_FSETCCS:
case ISD::SETCC: return LowerSETCC(Op, DAG);
case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
case ISD::INLINEASM:
case ISD::INLINEASM_BR: return LowerINLINEASM(Op, DAG);
case ISD::VASTART: return LowerVASTART(Op, DAG);
case ISD::VAARG: return LowerVAARG(Op, DAG);
case ISD::VACOPY: return LowerVACOPY(Op, DAG);
case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG);
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
case ISD::GET_DYNAMIC_AREA_OFFSET:
return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
case ISD::EH_DWARF_CFA: return LowerEH_DWARF_CFA(Op, DAG);
case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
case ISD::LOAD: return LowerLOAD(Op, DAG);
case ISD::STORE: return LowerSTORE(Op, DAG);
case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
case ISD::STRICT_FP_TO_UINT:
case ISD::STRICT_FP_TO_SINT:
case ISD::FP_TO_UINT:
case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
case ISD::STRICT_UINT_TO_FP:
case ISD::STRICT_SINT_TO_FP:
case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG);
case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG);
case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG);
case ISD::FSHL: return LowerFunnelShift(Op, DAG);
case ISD::FSHR: return LowerFunnelShift(Op, DAG);
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
case ISD::MUL: return LowerMUL(Op, DAG);
case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
case ISD::STRICT_FP_ROUND:
case ISD::FP_ROUND:
return LowerFP_ROUND(Op, DAG);
case ISD::ROTL: return LowerROTL(Op, DAG);
case ISD::INTRINSIC_W_CHAIN: return SDValue();
case ISD::BITCAST: return LowerBITCAST(Op, DAG);
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
case ISD::INTRINSIC_VOID:
return LowerINTRINSIC_VOID(Op, DAG);
case ISD::BSWAP:
return LowerBSWAP(Op, DAG);
case ISD::ATOMIC_CMP_SWAP:
return LowerATOMIC_CMP_SWAP(Op, DAG);
case ISD::ATOMIC_STORE:
return LowerATOMIC_LOAD_STORE(Op, DAG);
}
}
void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const {
SDLoc dl(N);
switch (N->getOpcode()) {
default:
llvm_unreachable("Do not know how to custom type legalize this operation!");
case ISD::ATOMIC_LOAD: {
SDValue Res = LowerATOMIC_LOAD_STORE(SDValue(N, 0), DAG);
Results.push_back(Res);
Results.push_back(Res.getValue(1));
break;
}
case ISD::READCYCLECOUNTER: {
SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
Results.push_back(
DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1)));
Results.push_back(RTB.getValue(2));
break;
}
case ISD::INTRINSIC_W_CHAIN: {
if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
Intrinsic::loop_decrement)
break;
assert(N->getValueType(0) == MVT::i1 &&
"Unexpected result type for CTR decrement intrinsic");
EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
N->getValueType(0));
SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
N->getOperand(1));
Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
Results.push_back(NewInt.getValue(1));
break;
}
case ISD::INTRINSIC_WO_CHAIN: {
switch (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()) {
case Intrinsic::ppc_pack_longdouble:
Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128,
N->getOperand(2), N->getOperand(1)));
break;
case Intrinsic::ppc_maxfe:
case Intrinsic::ppc_minfe:
case Intrinsic::ppc_fnmsub:
case Intrinsic::ppc_convert_f128_to_ppcf128:
Results.push_back(LowerINTRINSIC_WO_CHAIN(SDValue(N, 0), DAG));
break;
}
break;
}
case ISD::VAARG: {
if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
return;
EVT VT = N->getValueType(0);
if (VT == MVT::i64) {
SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
Results.push_back(NewNode);
Results.push_back(NewNode.getValue(1));
}
return;
}
case ISD::STRICT_FP_TO_SINT:
case ISD::STRICT_FP_TO_UINT:
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: {
if (N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType() ==
MVT::ppcf128)
return;
SDValue LoweredValue = LowerFP_TO_INT(SDValue(N, 0), DAG, dl);
Results.push_back(LoweredValue);
if (N->isStrictFPOpcode())
Results.push_back(LoweredValue.getValue(1));
return;
}
case ISD::TRUNCATE: {
if (!N->getValueType(0).isVector())
return;
SDValue Lowered = LowerTRUNCATEVector(SDValue(N, 0), DAG);
if (Lowered)
Results.push_back(Lowered);
return;
}
case ISD::FSHL:
case ISD::FSHR:
return;
case ISD::BITCAST:
return;
case ISD::FP_EXTEND:
SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG);
if (Lowered)
Results.push_back(Lowered);
return;
}
}
static Instruction *callIntrinsic(IRBuilderBase &Builder, Intrinsic::ID Id) {
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
Function *Func = Intrinsic::getDeclaration(M, Id);
return Builder.CreateCall(Func, {});
}
Instruction *PPCTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
Instruction *Inst,
AtomicOrdering Ord) const {
if (Ord == AtomicOrdering::SequentiallyConsistent)
return callIntrinsic(Builder, Intrinsic::ppc_sync);
if (isReleaseOrStronger(Ord))
return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
return nullptr;
}
Instruction *PPCTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
Instruction *Inst,
AtomicOrdering Ord) const {
if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
return Builder.CreateCall(
Intrinsic::getDeclaration(
Builder.GetInsertBlock()->getParent()->getParent(),
Intrinsic::ppc_cfence, {Inst->getType()}),
{Inst});
return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
}
return nullptr;
}
MachineBasicBlock *
PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
unsigned AtomicSize,
unsigned BinOpcode,
unsigned CmpOpcode,
unsigned CmpPred) const {
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
auto LoadMnemonic = PPC::LDARX;
auto StoreMnemonic = PPC::STDCX;
switch (AtomicSize) {
default:
llvm_unreachable("Unexpected size of atomic entity");
case 1:
LoadMnemonic = PPC::LBARX;
StoreMnemonic = PPC::STBCX;
assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
break;
case 2:
LoadMnemonic = PPC::LHARX;
StoreMnemonic = PPC::STHCX;
assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
break;
case 4:
LoadMnemonic = PPC::LWARX;
StoreMnemonic = PPC::STWCX;
break;
case 8:
LoadMnemonic = PPC::LDARX;
StoreMnemonic = PPC::STDCX;
break;
}
const BasicBlock *LLVM_BB = BB->getBasicBlock();
MachineFunction *F = BB->getParent();
MachineFunction::iterator It = ++BB->getIterator();
Register dest = MI.getOperand(0).getReg();
Register ptrA = MI.getOperand(1).getReg();
Register ptrB = MI.getOperand(2).getReg();
Register incr = MI.getOperand(3).getReg();
DebugLoc dl = MI.getDebugLoc();
MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *loop2MBB =
CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
F->insert(It, loopMBB);
if (CmpOpcode)
F->insert(It, loop2MBB);
F->insert(It, exitMBB);
exitMBB->splice(exitMBB->begin(), BB,
std::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
MachineRegisterInfo &RegInfo = F->getRegInfo();
Register TmpReg = (!BinOpcode) ? incr :
RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
: &PPC::GPRCRegClass);
BB->addSuccessor(loopMBB);
BB = loopMBB;
BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
.addReg(ptrA).addReg(ptrB);
if (BinOpcode)
BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
if (CmpOpcode) {
if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
ExtReg).addReg(dest);
BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
.addReg(incr).addReg(ExtReg);
} else
BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
.addReg(incr).addReg(dest);
BuildMI(BB, dl, TII->get(PPC::BCC))
.addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
BB->addSuccessor(loop2MBB);
BB->addSuccessor(exitMBB);
BB = loop2MBB;
}
BuildMI(BB, dl, TII->get(StoreMnemonic))
.addReg(TmpReg).addReg(ptrA).addReg(ptrB);
BuildMI(BB, dl, TII->get(PPC::BCC))
.addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
BB->addSuccessor(loopMBB);
BB->addSuccessor(exitMBB);
BB = exitMBB;
return BB;
}
static bool isSignExtended(MachineInstr &MI, const PPCInstrInfo *TII) {
switch(MI.getOpcode()) {
default:
return false;
case PPC::COPY:
return TII->isSignExtended(MI);
case PPC::LHA:
case PPC::LHA8:
case PPC::LHAU:
case PPC::LHAU8:
case PPC::LHAUX:
case PPC::LHAUX8:
case PPC::LHAX:
case PPC::LHAX8:
case PPC::LWA:
case PPC::LWAUX:
case PPC::LWAX:
case PPC::LWAX_32:
case PPC::LWA_32:
case PPC::PLHA:
case PPC::PLHA8:
case PPC::PLHA8pc:
case PPC::PLHApc:
case PPC::PLWA:
case PPC::PLWA8:
case PPC::PLWA8pc:
case PPC::PLWApc:
case PPC::EXTSB:
case PPC::EXTSB8:
case PPC::EXTSB8_32_64:
case PPC::EXTSB8_rec:
case PPC::EXTSB_rec:
case PPC::EXTSH:
case PPC::EXTSH8:
case PPC::EXTSH8_32_64:
case PPC::EXTSH8_rec:
case PPC::EXTSH_rec:
case PPC::EXTSW:
case PPC::EXTSWSLI:
case PPC::EXTSWSLI_32_64:
case PPC::EXTSWSLI_32_64_rec:
case PPC::EXTSWSLI_rec:
case PPC::EXTSW_32:
case PPC::EXTSW_32_64:
case PPC::EXTSW_32_64_rec:
case PPC::EXTSW_rec:
case PPC::SRAW:
case PPC::SRAWI:
case PPC::SRAWI_rec:
case PPC::SRAW_rec:
return true;
}
return false;
}
MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
MachineInstr &MI, MachineBasicBlock *BB,
bool is8bit, unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
const PPCInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc dl = MI.getDebugLoc();
MachineFunction *F = BB->getParent();
MachineRegisterInfo &RegInfo = F->getRegInfo();
Register incr = MI.getOperand(3).getReg();
bool IsSignExtended = Register::isVirtualRegister(incr) &&
isSignExtended(*RegInfo.getVRegDef(incr), TII);
if (CmpOpcode == PPC::CMPW && !IsSignExtended) {
Register ValueReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
BuildMI(*BB, MI, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueReg)
.addReg(MI.getOperand(3).getReg());
MI.getOperand(3).setReg(ValueReg);
}
if (Subtarget.hasPartwordAtomics())
return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
CmpPred);
bool is64bit = Subtarget.isPPC64();
bool isLittleEndian = Subtarget.isLittleEndian();
unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
const BasicBlock *LLVM_BB = BB->getBasicBlock();
MachineFunction::iterator It = ++BB->getIterator();
Register dest = MI.getOperand(0).getReg();
Register ptrA = MI.getOperand(1).getReg();
Register ptrB = MI.getOperand(2).getReg();
MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *loop2MBB =
CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
F->insert(It, loopMBB);
if (CmpOpcode)
F->insert(It, loop2MBB);
F->insert(It, exitMBB);
exitMBB->splice(exitMBB->begin(), BB,
std::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
const TargetRegisterClass *RC =
is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
Register PtrReg = RegInfo.createVirtualRegister(RC);
Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
Register ShiftReg =
isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
Register MaskReg = RegInfo.createVirtualRegister(GPRC);
Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
Register SrwDestReg = RegInfo.createVirtualRegister(GPRC);
Register Ptr1Reg;
Register TmpReg =
(!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
BB->addSuccessor(loopMBB);
if (ptrA != ZeroReg) {
Ptr1Reg = RegInfo.createVirtualRegister(RC);
BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
.addReg(ptrA)
.addReg(ptrB);
} else {
Ptr1Reg = ptrB;
}
BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
.addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
.addImm(3)
.addImm(27)
.addImm(is8bit ? 28 : 27);
if (!isLittleEndian)
BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
.addReg(Shift1Reg)
.addImm(is8bit ? 24 : 16);
if (is64bit)
BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
.addReg(Ptr1Reg)
.addImm(0)
.addImm(61);
else
BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
.addReg(Ptr1Reg)
.addImm(0)
.addImm(0)
.addImm(29);
BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
if (is8bit)
BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
else {
BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
.addReg(Mask3Reg)
.addImm(65535);
}
BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
.addReg(Mask2Reg)
.addReg(ShiftReg);
BB = loopMBB;
BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
.addReg(ZeroReg)
.addReg(PtrReg);
if (BinOpcode)
BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
.addReg(Incr2Reg)
.addReg(TmpDestReg);
BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
.addReg(TmpDestReg)
.addReg(MaskReg);
BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
if (CmpOpcode) {
Register SReg = RegInfo.createVirtualRegister(GPRC);
BuildMI(BB, dl, TII->get(PPC::AND), SReg)
.addReg(TmpDestReg)
.addReg(MaskReg);
unsigned ValueReg = SReg;
unsigned CmpReg = Incr2Reg;
if (CmpOpcode == PPC::CMPW) {
ValueReg = RegInfo.createVirtualRegister(GPRC);
BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
.addReg(SReg)
.addReg(ShiftReg);
Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
.addReg(ValueReg);
ValueReg = ValueSReg;
CmpReg = incr;
}
BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
.addReg(CmpReg)
.addReg(ValueReg);
BuildMI(BB, dl, TII->get(PPC::BCC))
.addImm(CmpPred)
.addReg(PPC::CR0)
.addMBB(exitMBB);
BB->addSuccessor(loop2MBB);
BB->addSuccessor(exitMBB);
BB = loop2MBB;
}
BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
BuildMI(BB, dl, TII->get(PPC::STWCX))
.addReg(Tmp4Reg)
.addReg(ZeroReg)
.addReg(PtrReg);
BuildMI(BB, dl, TII->get(PPC::BCC))
.addImm(PPC::PRED_NE)
.addReg(PPC::CR0)
.addMBB(loopMBB);
BB->addSuccessor(loopMBB);
BB->addSuccessor(exitMBB);
BB = exitMBB;
BuildMI(*BB, BB->begin(), dl, TII->get(PPC::RLWINM), dest)
.addReg(SrwDestReg)
.addImm(0)
.addImm(is8bit ? 24 : 16)
.addImm(31);
BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), SrwDestReg)
.addReg(TmpDestReg)
.addReg(ShiftReg);
return BB;
}
llvm::MachineBasicBlock *
PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const {
DebugLoc DL = MI.getDebugLoc();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
const BasicBlock *BB = MBB->getBasicBlock();
MachineFunction::iterator I = ++MBB->getIterator();
Register DstReg = MI.getOperand(0).getReg();
const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
Register mainDstReg = MRI.createVirtualRegister(RC);
Register restoreDstReg = MRI.createVirtualRegister(RC);
MVT PVT = getPointerTy(MF->getDataLayout());
assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
MachineBasicBlock *thisMBB = MBB;
MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
MF->insert(I, mainMBB);
MF->insert(I, sinkMBB);
MachineInstrBuilder MIB;
sinkMBB->splice(sinkMBB->begin(), MBB,
std::next(MachineBasicBlock::iterator(MI)), MBB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
const int64_t LabelOffset = 1 * PVT.getStoreSize();
const int64_t TOCOffset = 3 * PVT.getStoreSize();
const int64_t BPOffset = 4 * PVT.getStoreSize();
const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
Register LabelReg = MRI.createVirtualRegister(PtrRC);
Register BufReg = MI.getOperand(1).getReg();
if (Subtarget.is64BitELFABI()) {
setUsesTOCBasePtr(*MBB->getParent());
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
.addReg(PPC::X2)
.addImm(TOCOffset)
.addReg(BufReg)
.cloneMemRefs(MI);
}
unsigned BaseReg;
if (MF->getFunction().hasFnAttribute(Attribute::Naked))
BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
else
BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
MIB = BuildMI(*thisMBB, MI, DL,
TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
.addReg(BaseReg)
.addImm(BPOffset)
.addReg(BufReg)
.cloneMemRefs(MI);
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
MIB.addRegMask(TRI->getNoPreservedMask());
BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
.addMBB(mainMBB);
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
MIB =
BuildMI(mainMBB, DL,
TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
if (Subtarget.isPPC64()) {
MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
.addReg(LabelReg)
.addImm(LabelOffset)
.addReg(BufReg);
} else {
MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
.addReg(LabelReg)
.addImm(LabelOffset)
.addReg(BufReg);
}
MIB.cloneMemRefs(MI);
BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
mainMBB->addSuccessor(sinkMBB);
BuildMI(*sinkMBB, sinkMBB->begin(), DL,
TII->get(PPC::PHI), DstReg)
.addReg(mainDstReg).addMBB(mainMBB)
.addReg(restoreDstReg).addMBB(thisMBB);
MI.eraseFromParent();
return sinkMBB;
}
MachineBasicBlock *
PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const {
DebugLoc DL = MI.getDebugLoc();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
MVT PVT = getPointerTy(MF->getDataLayout());
assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
const TargetRegisterClass *RC =
(PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
Register Tmp = MRI.createVirtualRegister(RC);
unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
unsigned BP =
(PVT == MVT::i64)
? PPC::X30
: (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
: PPC::R30);
MachineInstrBuilder MIB;
const int64_t LabelOffset = 1 * PVT.getStoreSize();
const int64_t SPOffset = 2 * PVT.getStoreSize();
const int64_t TOCOffset = 3 * PVT.getStoreSize();
const int64_t BPOffset = 4 * PVT.getStoreSize();
Register BufReg = MI.getOperand(0).getReg();
if (PVT == MVT::i64) {
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
.addImm(0)
.addReg(BufReg);
} else {
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
.addImm(0)
.addReg(BufReg);
}
MIB.cloneMemRefs(MI);
if (PVT == MVT::i64) {
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
.addImm(LabelOffset)
.addReg(BufReg);
} else {
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
.addImm(LabelOffset)
.addReg(BufReg);
}
MIB.cloneMemRefs(MI);
if (PVT == MVT::i64) {
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
.addImm(SPOffset)
.addReg(BufReg);
} else {
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
.addImm(SPOffset)
.addReg(BufReg);
}
MIB.cloneMemRefs(MI);
if (PVT == MVT::i64) {
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
.addImm(BPOffset)
.addReg(BufReg);
} else {
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
.addImm(BPOffset)
.addReg(BufReg);
}
MIB.cloneMemRefs(MI);
if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
setUsesTOCBasePtr(*MBB->getParent());
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
.addImm(TOCOffset)
.addReg(BufReg)
.cloneMemRefs(MI);
}
BuildMI(*MBB, MI, DL,
TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
MI.eraseFromParent();
return MBB;
}
bool PPCTargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
if (MF.getFunction().hasFnAttribute("probe-stack"))
return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
"inline-asm";
return false;
}
unsigned PPCTargetLowering::getStackProbeSize(MachineFunction &MF) const {
const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
unsigned StackAlign = TFI->getStackAlignment();
assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) &&
"Unexpected stack alignment");
unsigned StackProbeSize = 4096;
const Function &Fn = MF.getFunction();
if (Fn.hasFnAttribute("stack-probe-size"))
Fn.getFnAttribute("stack-probe-size")
.getValueAsString()
.getAsInteger(0, StackProbeSize);
StackProbeSize &= ~(StackAlign - 1);
return StackProbeSize ? StackProbeSize : StackAlign;
}
MachineBasicBlock *
PPCTargetLowering::emitProbedAlloca(MachineInstr &MI,
MachineBasicBlock *MBB) const {
const bool isPPC64 = Subtarget.isPPC64();
MachineFunction *MF = MBB->getParent();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
const unsigned ProbeSize = getStackProbeSize(*MF);
const BasicBlock *ProbedBB = MBB->getBasicBlock();
MachineRegisterInfo &MRI = MF->getRegInfo();
MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB);
MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB);
MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB);
MachineFunction::iterator MBBIter = ++MBB->getIterator();
MF->insert(MBBIter, TestMBB);
MF->insert(MBBIter, BlockMBB);
MF->insert(MBBIter, TailMBB);
const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
Register DstReg = MI.getOperand(0).getReg();
Register NegSizeReg = MI.getOperand(1).getReg();
Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
Register ActualNegSizeReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
unsigned ProbeOpc;
if (!MRI.hasOneNonDBGUse(NegSizeReg))
ProbeOpc =
isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32;
else
ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64
: PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32;
BuildMI(*MBB, {MI}, DL, TII->get(ProbeOpc), FramePointer)
.addDef(ActualNegSizeReg)
.addReg(NegSizeReg)
.add(MI.getOperand(2))
.add(MI.getOperand(3));
BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4),
FinalStackPtr)
.addReg(SPReg)
.addReg(ActualNegSizeReg);
int64_t NegProbeSize = -(int64_t)ProbeSize;
assert(isInt<32>(NegProbeSize) && "Unhandled probe size!");
Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
if (!isInt<16>(NegProbeSize)) {
Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
.addImm(NegProbeSize >> 16);
BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI),
ScratchReg)
.addReg(TempReg)
.addImm(NegProbeSize & 0xFFFF);
} else
BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg)
.addImm(NegProbeSize);
{
Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div)
.addReg(ActualNegSizeReg)
.addReg(ScratchReg);
Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul)
.addReg(Div)
.addReg(ScratchReg);
Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod)
.addReg(Mul)
.addReg(ActualNegSizeReg);
BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
.addReg(FramePointer)
.addReg(SPReg)
.addReg(NegMod);
}
{
Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass);
BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult)
.addReg(SPReg)
.addReg(FinalStackPtr);
BuildMI(TestMBB, DL, TII->get(PPC::BCC))
.addImm(PPC::PRED_EQ)
.addReg(CmpResult)
.addMBB(TailMBB);
TestMBB->addSuccessor(BlockMBB);
TestMBB->addSuccessor(TailMBB);
}
{
BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
.addReg(FramePointer)
.addReg(SPReg)
.addReg(ScratchReg);
BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB);
BlockMBB->addSuccessor(TestMBB);
}
Register MaxCallFrameSizeReg =
MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
BuildMI(TailMBB, DL,
TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
MaxCallFrameSizeReg)
.add(MI.getOperand(2))
.add(MI.getOperand(3));
BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg)
.addReg(SPReg)
.addReg(MaxCallFrameSizeReg);
TailMBB->splice(TailMBB->end(), MBB,
std::next(MachineBasicBlock::iterator(MI)), MBB->end());
TailMBB->transferSuccessorsAndUpdatePHIs(MBB);
MBB->addSuccessor(TestMBB);
MI.eraseFromParent();
++NumDynamicAllocaProbed;
return TailMBB;
}
MachineBasicBlock *
PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const {
if (MI.getOpcode() == TargetOpcode::STACKMAP ||
MI.getOpcode() == TargetOpcode::PATCHPOINT) {
if (Subtarget.is64BitELFABI() &&
MI.getOpcode() == TargetOpcode::PATCHPOINT &&
!Subtarget.isUsingPCRelativeCalls()) {
MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
}
return emitPatchPoint(MI, BB);
}
if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
return emitEHSjLjSetJmp(MI, BB);
} else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
return emitEHSjLjLongJmp(MI, BB);
}
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
MachineFunction::iterator It = ++BB->getIterator();
MachineFunction *F = BB->getParent();
MachineRegisterInfo &MRI = F->getRegInfo();
if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
MI.getOpcode() == PPC::SELECT_I8) {
SmallVector<MachineOperand, 2> Cond;
if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
MI.getOpcode() == PPC::SELECT_CC_I8)
Cond.push_back(MI.getOperand(4));
else
Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
Cond.push_back(MI.getOperand(1));
DebugLoc dl = MI.getDebugLoc();
TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
} else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
MI.getOpcode() == PPC::SELECT_CC_F8 ||
MI.getOpcode() == PPC::SELECT_CC_F16 ||
MI.getOpcode() == PPC::SELECT_CC_VRRC ||
MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
MI.getOpcode() == PPC::SELECT_CC_VSRC ||
MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
MI.getOpcode() == PPC::SELECT_CC_SPE ||
MI.getOpcode() == PPC::SELECT_F4 ||
MI.getOpcode() == PPC::SELECT_F8 ||
MI.getOpcode() == PPC::SELECT_F16 ||
MI.getOpcode() == PPC::SELECT_SPE ||
MI.getOpcode() == PPC::SELECT_SPE4 ||
MI.getOpcode() == PPC::SELECT_VRRC ||
MI.getOpcode() == PPC::SELECT_VSFRC ||
MI.getOpcode() == PPC::SELECT_VSSRC ||
MI.getOpcode() == PPC::SELECT_VSRC) {
MachineBasicBlock *thisMBB = BB;
MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
DebugLoc dl = MI.getDebugLoc();
F->insert(It, copy0MBB);
F->insert(It, sinkMBB);
sinkMBB->splice(sinkMBB->begin(), BB,
std::next(MachineBasicBlock::iterator(MI)), BB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
MI.getOpcode() == PPC::SELECT_F16 ||
MI.getOpcode() == PPC::SELECT_SPE4 ||
MI.getOpcode() == PPC::SELECT_SPE ||
MI.getOpcode() == PPC::SELECT_VRRC ||
MI.getOpcode() == PPC::SELECT_VSFRC ||
MI.getOpcode() == PPC::SELECT_VSSRC ||
MI.getOpcode() == PPC::SELECT_VSRC) {
BuildMI(BB, dl, TII->get(PPC::BC))
.addReg(MI.getOperand(1).getReg())
.addMBB(sinkMBB);
} else {
unsigned SelectPred = MI.getOperand(4).getImm();
BuildMI(BB, dl, TII->get(PPC::BCC))
.addImm(SelectPred)
.addReg(MI.getOperand(1).getReg())
.addMBB(sinkMBB);
}
BB = copy0MBB;
BB->addSuccessor(sinkMBB);
BB = sinkMBB;
BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
.addReg(MI.getOperand(3).getReg())
.addMBB(copy0MBB)
.addReg(MI.getOperand(2).getReg())
.addMBB(thisMBB);
} else if (MI.getOpcode() == PPC::ReadTB) {
MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
DebugLoc dl = MI.getDebugLoc();
F->insert(It, readMBB);
F->insert(It, sinkMBB);
sinkMBB->splice(sinkMBB->begin(), BB,
std::next(MachineBasicBlock::iterator(MI)), BB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
BB->addSuccessor(readMBB);
BB = readMBB;
MachineRegisterInfo &RegInfo = F->getRegInfo();
Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
Register LoReg = MI.getOperand(0).getReg();
Register HiReg = MI.getOperand(1).getReg();
BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
.addReg(HiReg)
.addReg(ReadAgainReg);
BuildMI(BB, dl, TII->get(PPC::BCC))
.addImm(PPC::PRED_NE)
.addReg(CmpReg)
.addMBB(readMBB);
BB->addSuccessor(readMBB);
BB->addSuccessor(sinkMBB);
} else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
BB = EmitAtomicBinary(MI, BB, 4, 0);
else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
BB = EmitAtomicBinary(MI, BB, 8, 0);
else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
(Subtarget.hasPartwordAtomics() &&
MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
(Subtarget.hasPartwordAtomics() &&
MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
auto LoadMnemonic = PPC::LDARX;
auto StoreMnemonic = PPC::STDCX;
switch (MI.getOpcode()) {
default:
llvm_unreachable("Compare and swap of unknown size");
case PPC::ATOMIC_CMP_SWAP_I8:
LoadMnemonic = PPC::LBARX;
StoreMnemonic = PPC::STBCX;
assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
break;
case PPC::ATOMIC_CMP_SWAP_I16:
LoadMnemonic = PPC::LHARX;
StoreMnemonic = PPC::STHCX;
assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
break;
case PPC::ATOMIC_CMP_SWAP_I32:
LoadMnemonic = PPC::LWARX;
StoreMnemonic = PPC::STWCX;
break;
case PPC::ATOMIC_CMP_SWAP_I64:
LoadMnemonic = PPC::LDARX;
StoreMnemonic = PPC::STDCX;
break;
}
Register dest = MI.getOperand(0).getReg();
Register ptrA = MI.getOperand(1).getReg();
Register ptrB = MI.getOperand(2).getReg();
Register oldval = MI.getOperand(3).getReg();
Register newval = MI.getOperand(4).getReg();
DebugLoc dl = MI.getDebugLoc();
MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
F->insert(It, loop1MBB);
F->insert(It, loop2MBB);
F->insert(It, midMBB);
F->insert(It, exitMBB);
exitMBB->splice(exitMBB->begin(), BB,
std::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
BB->addSuccessor(loop1MBB);
BB = loop1MBB;
BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
.addReg(oldval)
.addReg(dest);
BuildMI(BB, dl, TII->get(PPC::BCC))
.addImm(PPC::PRED_NE)
.addReg(PPC::CR0)
.addMBB(midMBB);
BB->addSuccessor(loop2MBB);
BB->addSuccessor(midMBB);
BB = loop2MBB;
BuildMI(BB, dl, TII->get(StoreMnemonic))
.addReg(newval)
.addReg(ptrA)
.addReg(ptrB);
BuildMI(BB, dl, TII->get(PPC::BCC))
.addImm(PPC::PRED_NE)
.addReg(PPC::CR0)
.addMBB(loop1MBB);
BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
BB->addSuccessor(loop1MBB);
BB->addSuccessor(exitMBB);
BB = midMBB;
BuildMI(BB, dl, TII->get(StoreMnemonic))
.addReg(dest)
.addReg(ptrA)
.addReg(ptrB);
BB->addSuccessor(exitMBB);
BB = exitMBB;
} else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
bool is64bit = Subtarget.isPPC64();
bool isLittleEndian = Subtarget.isLittleEndian();
bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
Register dest = MI.getOperand(0).getReg();
Register ptrA = MI.getOperand(1).getReg();
Register ptrB = MI.getOperand(2).getReg();
Register oldval = MI.getOperand(3).getReg();
Register newval = MI.getOperand(4).getReg();
DebugLoc dl = MI.getDebugLoc();
MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
F->insert(It, loop1MBB);
F->insert(It, loop2MBB);
F->insert(It, midMBB);
F->insert(It, exitMBB);
exitMBB->splice(exitMBB->begin(), BB,
std::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
MachineRegisterInfo &RegInfo = F->getRegInfo();
const TargetRegisterClass *RC =
is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
Register PtrReg = RegInfo.createVirtualRegister(RC);
Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
Register ShiftReg =
isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
Register MaskReg = RegInfo.createVirtualRegister(GPRC);
Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
Register Ptr1Reg;
Register TmpReg = RegInfo.createVirtualRegister(GPRC);
Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
BB->addSuccessor(loop1MBB);
if (ptrA != ZeroReg) {
Ptr1Reg = RegInfo.createVirtualRegister(RC);
BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
.addReg(ptrA)
.addReg(ptrB);
} else {
Ptr1Reg = ptrB;
}
BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
.addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
.addImm(3)
.addImm(27)
.addImm(is8bit ? 28 : 27);
if (!isLittleEndian)
BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
.addReg(Shift1Reg)
.addImm(is8bit ? 24 : 16);
if (is64bit)
BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
.addReg(Ptr1Reg)
.addImm(0)
.addImm(61);
else
BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
.addReg(Ptr1Reg)
.addImm(0)
.addImm(0)
.addImm(29);
BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
.addReg(newval)
.addReg(ShiftReg);
BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
.addReg(oldval)
.addReg(ShiftReg);
if (is8bit)
BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
else {
BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
.addReg(Mask3Reg)
.addImm(65535);
}
BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
.addReg(Mask2Reg)
.addReg(ShiftReg);
BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
.addReg(NewVal2Reg)
.addReg(MaskReg);
BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
.addReg(OldVal2Reg)
.addReg(MaskReg);
BB = loop1MBB;
BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
.addReg(ZeroReg)
.addReg(PtrReg);
BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
.addReg(TmpDestReg)
.addReg(MaskReg);
BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
.addReg(TmpReg)
.addReg(OldVal3Reg);
BuildMI(BB, dl, TII->get(PPC::BCC))
.addImm(PPC::PRED_NE)
.addReg(PPC::CR0)
.addMBB(midMBB);
BB->addSuccessor(loop2MBB);
BB->addSuccessor(midMBB);
BB = loop2MBB;
BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
.addReg(TmpDestReg)
.addReg(MaskReg);
BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
.addReg(Tmp2Reg)
.addReg(NewVal3Reg);
BuildMI(BB, dl, TII->get(PPC::STWCX))
.addReg(Tmp4Reg)
.addReg(ZeroReg)
.addReg(PtrReg);
BuildMI(BB, dl, TII->get(PPC::BCC))
.addImm(PPC::PRED_NE)
.addReg(PPC::CR0)
.addMBB(loop1MBB);
BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
BB->addSuccessor(loop1MBB);
BB->addSuccessor(exitMBB);
BB = midMBB;
BuildMI(BB, dl, TII->get(PPC::STWCX))
.addReg(TmpDestReg)
.addReg(ZeroReg)
.addReg(PtrReg);
BB->addSuccessor(exitMBB);
BB = exitMBB;
BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
.addReg(TmpReg)
.addReg(ShiftReg);
} else if (MI.getOpcode() == PPC::FADDrtz) {
Register Dest = MI.getOperand(0).getReg();
Register Src1 = MI.getOperand(1).getReg();
Register Src2 = MI.getOperand(2).getReg();
DebugLoc dl = MI.getDebugLoc();
MachineRegisterInfo &RegInfo = F->getRegInfo();
Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1))
.addImm(31)
.addReg(PPC::RM, RegState::ImplicitDefine);
BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0))
.addImm(30)
.addReg(PPC::RM, RegState::ImplicitDefine);
auto MIB = BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest)
.addReg(Src1)
.addReg(Src2);
if (MI.getFlag(MachineInstr::NoFPExcept))
MIB.setMIFlag(MachineInstr::NoFPExcept);
BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
} else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
? PPC::ANDI8_rec
: PPC::ANDI_rec;
bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
MachineRegisterInfo &RegInfo = F->getRegInfo();
Register Dest = RegInfo.createVirtualRegister(
Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
DebugLoc Dl = MI.getDebugLoc();
BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
.addReg(MI.getOperand(1).getReg())
.addImm(1);
BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
MI.getOperand(0).getReg())
.addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
} else if (MI.getOpcode() == PPC::TCHECK_RET) {
DebugLoc Dl = MI.getDebugLoc();
MachineRegisterInfo &RegInfo = F->getRegInfo();
Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
MI.getOperand(0).getReg())
.addReg(CRReg);
} else if (MI.getOpcode() == PPC::TBEGIN_RET) {
DebugLoc Dl = MI.getDebugLoc();
unsigned Imm = MI.getOperand(1).getImm();
BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
MI.getOperand(0).getReg())
.addReg(PPC::CR0EQ);
} else if (MI.getOpcode() == PPC::SETRNDi) {
DebugLoc dl = MI.getDebugLoc();
Register OldFPSCRReg = MI.getOperand(0).getReg();
if (MRI.use_empty(OldFPSCRReg))
BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), OldFPSCRReg);
else
BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
unsigned Mode = MI.getOperand(1).getImm();
BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
.addImm(31)
.addReg(PPC::RM, RegState::ImplicitDefine);
BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
.addImm(30)
.addReg(PPC::RM, RegState::ImplicitDefine);
} else if (MI.getOpcode() == PPC::SETRND) {
DebugLoc dl = MI.getDebugLoc();
auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
if (Subtarget.hasDirectMove()) {
BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
.addReg(SrcReg);
} else {
unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
MachineRegisterInfo &RegInfo = F->getRegInfo();
const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
if (RC == &PPC::F8RCRegClass) {
assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
"Unsupported RegClass.");
StoreOp = PPC::STFD;
LoadOp = PPC::LD;
} else {
assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
(RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
"Unsupported RegClass.");
}
MachineFrameInfo &MFI = F->getFrameInfo();
int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
MachineMemOperand *MMOStore = F->getMachineMemOperand(
MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
MFI.getObjectAlign(FrameIdx));
BuildMI(*BB, MI, dl, TII->get(StoreOp))
.addReg(SrcReg)
.addImm(0)
.addFrameIndex(FrameIdx)
.addMemOperand(MMOStore);
MachineMemOperand *MMOLoad = F->getMachineMemOperand(
MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
MFI.getObjectAlign(FrameIdx));
BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
.addImm(0)
.addFrameIndex(FrameIdx)
.addMemOperand(MMOLoad);
}
};
Register OldFPSCRReg = MI.getOperand(0).getReg();
BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
MachineOperand SrcOp = MI.getOperand(1);
MachineRegisterInfo &RegInfo = F->getRegInfo();
Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
.addReg(ImDefReg)
.add(SrcOp)
.addImm(1);
Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
.addReg(OldFPSCRTmpReg)
.addReg(ExtSrcReg)
.addImm(0)
.addImm(62);
Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
.addImm(255)
.addReg(NewFPSCRReg)
.addImm(0)
.addImm(0);
} else if (MI.getOpcode() == PPC::SETFLM) {
DebugLoc Dl = MI.getDebugLoc();
Register OldFPSCRReg = MI.getOperand(0).getReg();
if (MRI.use_empty(OldFPSCRReg))
BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::IMPLICIT_DEF), OldFPSCRReg);
else
BuildMI(*BB, MI, Dl, TII->get(PPC::MFFS), OldFPSCRReg);
Register NewFPSCRReg = MI.getOperand(1).getReg();
BuildMI(*BB, MI, Dl, TII->get(PPC::MTFSF))
.addImm(255)
.addReg(NewFPSCRReg)
.addImm(0)
.addImm(0);
} else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
return emitProbedAlloca(MI, BB);
} else if (MI.getOpcode() == PPC::SPLIT_QUADWORD) {
DebugLoc DL = MI.getDebugLoc();
Register Src = MI.getOperand(2).getReg();
Register Lo = MI.getOperand(0).getReg();
Register Hi = MI.getOperand(1).getReg();
BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY))
.addDef(Lo)
.addUse(Src, 0, PPC::sub_gp8_x1);
BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY))
.addDef(Hi)
.addUse(Src, 0, PPC::sub_gp8_x0);
} else if (MI.getOpcode() == PPC::LQX_PSEUDO ||
MI.getOpcode() == PPC::STQX_PSEUDO) {
DebugLoc DL = MI.getDebugLoc();
Register Ptr =
F->getRegInfo().createVirtualRegister(&PPC::G8RC_and_G8RC_NOX0RegClass);
Register Val = MI.getOperand(0).getReg();
Register RA = MI.getOperand(1).getReg();
Register RB = MI.getOperand(2).getReg();
BuildMI(*BB, MI, DL, TII->get(PPC::ADD8), Ptr).addReg(RA).addReg(RB);
BuildMI(*BB, MI, DL,
MI.getOpcode() == PPC::LQX_PSEUDO ? TII->get(PPC::LQ)
: TII->get(PPC::STQ))
.addReg(Val, MI.getOpcode() == PPC::LQX_PSEUDO ? RegState::Define : 0)
.addImm(0)
.addReg(Ptr);
} else {
llvm_unreachable("Unexpected instr type to insert");
}
MI.eraseFromParent(); return BB;
}
static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
if (VT.getScalarType() == MVT::f64)
RefinementSteps++;
return RefinementSteps;
}
SDValue PPCTargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
const DenormalMode &Mode) const {
EVT VT = Op.getValueType();
if (!isTypeLegal(MVT::i1) ||
(VT != MVT::f64 &&
((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX())))
return TargetLowering::getSqrtInputTest(Op, DAG, Mode);
SDLoc DL(Op);
SDValue FTSQRT = DAG.getNode(PPCISD::FTSQRT, DL, MVT::i32, Op);
SDValue SRIdxVal = DAG.getTargetConstant(PPC::sub_eq, DL, MVT::i32);
return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i1,
FTSQRT, SRIdxVal),
0);
}
SDValue
PPCTargetLowering::getSqrtResultForDenormInput(SDValue Op,
SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
if (VT != MVT::f64 &&
((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX()))
return TargetLowering::getSqrtResultForDenormInput(Op, DAG);
return DAG.getNode(PPCISD::FSQRT, SDLoc(Op), VT, Op);
}
SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
int Enabled, int &RefinementSteps,
bool &UseOneConstNR,
bool Reciprocal) const {
EVT VT = Operand.getValueType();
if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
(VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
(VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
(VT == MVT::v2f64 && Subtarget.hasVSX())) {
if (RefinementSteps == ReciprocalEstimate::Unspecified)
RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
UseOneConstNR = !Subtarget.needsTwoConstNR();
return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
}
return SDValue();
}
SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
int Enabled,
int &RefinementSteps) const {
EVT VT = Operand.getValueType();
if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
(VT == MVT::f64 && Subtarget.hasFRE()) ||
(VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
(VT == MVT::v2f64 && Subtarget.hasVSX())) {
if (RefinementSteps == ReciprocalEstimate::Unspecified)
RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
}
return SDValue();
}
unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
switch (Subtarget.getCPUDirective()) {
default:
return 3;
case PPC::DIR_440:
case PPC::DIR_A2:
case PPC::DIR_E500:
case PPC::DIR_E500mc:
case PPC::DIR_E5500:
return 2;
}
}
static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
int64_t& Offset, SelectionDAG &DAG) {
if (DAG.isBaseWithConstantOffset(Loc)) {
Base = Loc.getOperand(0);
Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
}
}
static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
unsigned Bytes, int Dist,
SelectionDAG &DAG) {
if (VT.getSizeInBits() / 8 != Bytes)
return false;
SDValue BaseLoc = Base->getBasePtr();
if (Loc.getOpcode() == ISD::FrameIndex) {
if (BaseLoc.getOpcode() != ISD::FrameIndex)
return false;
const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
int FS = MFI.getObjectSize(FI);
int BFS = MFI.getObjectSize(BFI);
if (FS != BFS || FS != (int)Bytes) return false;
return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
}
SDValue Base1 = Loc, Base2 = BaseLoc;
int64_t Offset1 = 0, Offset2 = 0;
getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
return true;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
const GlobalValue *GV1 = nullptr;
const GlobalValue *GV2 = nullptr;
Offset1 = 0;
Offset2 = 0;
bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
if (isGA1 && isGA2 && GV1 == GV2)
return Offset1 == (Offset2 + Dist*Bytes);
return false;
}
static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
unsigned Bytes, int Dist,
SelectionDAG &DAG) {
if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
EVT VT = LS->getMemoryVT();
SDValue Loc = LS->getBasePtr();
return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
}
if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
EVT VT;
switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
default: return false;
case Intrinsic::ppc_altivec_lvx:
case Intrinsic::ppc_altivec_lvxl:
case Intrinsic::ppc_vsx_lxvw4x:
case Intrinsic::ppc_vsx_lxvw4x_be:
VT = MVT::v4i32;
break;
case Intrinsic::ppc_vsx_lxvd2x:
case Intrinsic::ppc_vsx_lxvd2x_be:
VT = MVT::v2f64;
break;
case Intrinsic::ppc_altivec_lvebx:
VT = MVT::i8;
break;
case Intrinsic::ppc_altivec_lvehx:
VT = MVT::i16;
break;
case Intrinsic::ppc_altivec_lvewx:
VT = MVT::i32;
break;
}
return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
}
if (N->getOpcode() == ISD::INTRINSIC_VOID) {
EVT VT;
switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
default: return false;
case Intrinsic::ppc_altivec_stvx:
case Intrinsic::ppc_altivec_stvxl:
case Intrinsic::ppc_vsx_stxvw4x:
VT = MVT::v4i32;
break;
case Intrinsic::ppc_vsx_stxvd2x:
VT = MVT::v2f64;
break;
case Intrinsic::ppc_vsx_stxvw4x_be:
VT = MVT::v4i32;
break;
case Intrinsic::ppc_vsx_stxvd2x_be:
VT = MVT::v2f64;
break;
case Intrinsic::ppc_altivec_stvebx:
VT = MVT::i8;
break;
case Intrinsic::ppc_altivec_stvehx:
VT = MVT::i16;
break;
case Intrinsic::ppc_altivec_stvewx:
VT = MVT::i32;
break;
}
return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
}
return false;
}
static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
SDValue Chain = LD->getChain();
EVT VT = LD->getMemoryVT();
SmallSet<SDNode *, 16> LoadRoots;
SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
SmallSet<SDNode *, 16> Visited;
while (!Queue.empty()) {
SDNode *ChainNext = Queue.pop_back_val();
if (!Visited.insert(ChainNext).second)
continue;
if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
return true;
if (!Visited.count(ChainLD->getChain().getNode()))
Queue.push_back(ChainLD->getChain().getNode());
} else if (ChainNext->getOpcode() == ISD::TokenFactor) {
for (const SDUse &O : ChainNext->ops())
if (!Visited.count(O.getNode()))
Queue.push_back(O.getNode());
} else
LoadRoots.insert(ChainNext);
}
Visited.clear();
Queue.clear();
for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
IE = LoadRoots.end(); I != IE; ++I) {
Queue.push_back(*I);
while (!Queue.empty()) {
SDNode *LoadRoot = Queue.pop_back_val();
if (!Visited.insert(LoadRoot).second)
continue;
if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
return true;
for (SDNode *U : LoadRoot->uses())
if (((isa<MemSDNode>(U) &&
cast<MemSDNode>(U)->getChain().getNode() == LoadRoot) ||
U->getOpcode() == ISD::TokenFactor) &&
!Visited.count(U))
Queue.push_back(U);
}
}
return false;
}
static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
bool Swap, SDLoc &DL, SelectionDAG &DAG) {
assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
DAG.getConstant(Size, DL, MVT::i32));
auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
DAG.getConstant(Size, DL, MVT::i32));
if (Swap)
std::swap(Op0, Op1);
auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
DAG.getConstant(Size - 1, DL, MVT::i32));
auto Final = Shifted;
if (Complement)
Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
DAG.getConstant(1, DL, MVT::i64));
return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
}
SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
DAGCombinerInfo &DCI) const {
assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
SelectionDAG &DAG = DCI.DAG;
SDLoc DL(N);
if (!DCI.isAfterLegalizeDAG())
return SDValue();
for (const SDNode *U : N->uses())
if (U->getOpcode() != ISD::ZERO_EXTEND)
return SDValue();
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
auto OpSize = N->getOperand(0).getValueSizeInBits();
unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
if (OpSize < Size) {
switch (CC) {
default: break;
case ISD::SETULT:
return generateEquivalentSub(N, Size, false, false, DL, DAG);
case ISD::SETULE:
return generateEquivalentSub(N, Size, true, true, DL, DAG);
case ISD::SETUGT:
return generateEquivalentSub(N, Size, false, true, DL, DAG);
case ISD::SETUGE:
return generateEquivalentSub(N, Size, true, false, DL, DAG);
}
}
return SDValue();
}
SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
SDLoc dl(N);
assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
if (N->getOpcode() == ISD::TRUNCATE &&
N->getValueType(0) != MVT::i1)
return SDValue();
if (N->getOperand(0).getValueType() != MVT::i32 &&
N->getOperand(0).getValueType() != MVT::i64)
return SDValue();
if (N->getOpcode() == ISD::SETCC ||
N->getOpcode() == ISD::SELECT_CC) {
ISD::CondCode CC =
cast<CondCodeSDNode>(N->getOperand(
N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
unsigned OpBits = N->getOperand(0).getValueSizeInBits();
if (ISD::isSignedIntSetCC(CC)) {
if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
return SDValue();
} else if (ISD::isUnsignedIntSetCC(CC)) {
if (!DAG.MaskedValueIsZero(N->getOperand(0),
APInt::getHighBitsSet(OpBits, OpBits-1)) ||
!DAG.MaskedValueIsZero(N->getOperand(1),
APInt::getHighBitsSet(OpBits, OpBits-1)))
return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
: SDValue());
} else {
KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
Op1Known.Zero.setBit(0); Op1Known.One.clearBit(0);
Op2Known.Zero.setBit(0); Op2Known.One.clearBit(0);
if (!Op1Known.isConstant() || !Op2Known.isConstant() ||
Op1Known.getConstant() != Op2Known.getConstant())
return SDValue();
}
}
if (N->getOperand(0).getOpcode() != ISD::AND &&
N->getOperand(0).getOpcode() != ISD::OR &&
N->getOperand(0).getOpcode() != ISD::XOR &&
N->getOperand(0).getOpcode() != ISD::SELECT &&
N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
return SDValue();
if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
N->getOperand(1).getOpcode() != ISD::AND &&
N->getOperand(1).getOpcode() != ISD::OR &&
N->getOperand(1).getOpcode() != ISD::XOR &&
N->getOperand(1).getOpcode() != ISD::SELECT &&
N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
return SDValue();
SmallVector<SDValue, 4> Inputs;
SmallVector<SDValue, 8> BinOps, PromOps;
SmallPtrSet<SDNode *, 16> Visited;
for (unsigned i = 0; i < 2; ++i) {
if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
isa<ConstantSDNode>(N->getOperand(i)))
Inputs.push_back(N->getOperand(i));
else
BinOps.push_back(N->getOperand(i));
if (N->getOpcode() == ISD::TRUNCATE)
break;
}
while (!BinOps.empty()) {
SDValue BinOp = BinOps.pop_back_val();
if (!Visited.insert(BinOp.getNode()).second)
continue;
PromOps.push_back(BinOp);
for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
if (BinOp.getOpcode() == ISD::SELECT && i == 0)
continue;
if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
continue;
if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
isa<ConstantSDNode>(BinOp.getOperand(i))) {
Inputs.push_back(BinOp.getOperand(i));
} else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
BinOp.getOperand(i).getOpcode() == ISD::OR ||
BinOp.getOperand(i).getOpcode() == ISD::XOR ||
BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
BinOps.push_back(BinOp.getOperand(i));
} else {
return SDValue();
}
}
}
for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
if (isa<ConstantSDNode>(Inputs[i]))
continue;
for (const SDNode *User : Inputs[i].getNode()->uses()) {
if (User != N && !Visited.count(User))
return SDValue();
if (User->getOpcode() == ISD::SELECT) {
if (User->getOperand(0) == Inputs[i])
return SDValue();
} else if (User->getOpcode() == ISD::SELECT_CC) {
if (User->getOperand(0) == Inputs[i] ||
User->getOperand(1) == Inputs[i])
return SDValue();
}
}
}
for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
for (const SDNode *User : PromOps[i].getNode()->uses()) {
if (User != N && !Visited.count(User))
return SDValue();
if (User->getOpcode() == ISD::SELECT) {
if (User->getOperand(0) == PromOps[i])
return SDValue();
} else if (User->getOpcode() == ISD::SELECT_CC) {
if (User->getOperand(0) == PromOps[i] ||
User->getOperand(1) == PromOps[i])
return SDValue();
}
}
}
for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
if (isa<ConstantSDNode>(Inputs[i]))
continue;
else
DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
}
std::list<HandleSDNode> PromOpHandles;
for (auto &PromOp : PromOps)
PromOpHandles.emplace_back(PromOp);
while (!PromOpHandles.empty()) {
SDValue PromOp = PromOpHandles.back().getValue();
PromOpHandles.pop_back();
if (PromOp.getOpcode() == ISD::TRUNCATE ||
PromOp.getOpcode() == ISD::SIGN_EXTEND ||
PromOp.getOpcode() == ISD::ZERO_EXTEND ||
PromOp.getOpcode() == ISD::ANY_EXTEND) {
if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
PromOp.getOperand(0).getValueType() != MVT::i1) {
PromOpHandles.emplace_front(PromOp);
continue;
}
SDValue RepValue = PromOp.getOperand(0);
if (isa<ConstantSDNode>(RepValue))
RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
continue;
}
unsigned C;
switch (PromOp.getOpcode()) {
default: C = 0; break;
case ISD::SELECT: C = 1; break;
case ISD::SELECT_CC: C = 2; break;
}
if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
PromOp.getOperand(C).getValueType() != MVT::i1) ||
(!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
PromOpHandles.emplace_front(PromOp);
continue;
}
SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
PromOp.getNode()->op_end());
for (unsigned i = 0; i < 2; ++i)
if (isa<ConstantSDNode>(Ops[C+i]))
Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
DAG.ReplaceAllUsesOfValueWith(PromOp,
DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
}
if (N->getOpcode() == ISD::TRUNCATE)
return N->getOperand(0);
return SDValue(N, 0);
}
SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
SDLoc dl(N);
if (N->getValueType(0) != MVT::i32 &&
N->getValueType(0) != MVT::i64)
return SDValue();
if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
(N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
return SDValue();
if (N->getOperand(0).getOpcode() != ISD::AND &&
N->getOperand(0).getOpcode() != ISD::OR &&
N->getOperand(0).getOpcode() != ISD::XOR &&
N->getOperand(0).getOpcode() != ISD::SELECT &&
N->getOperand(0).getOpcode() != ISD::SELECT_CC)
return SDValue();
SmallVector<SDValue, 4> Inputs;
SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
SmallPtrSet<SDNode *, 16> Visited;
while (!BinOps.empty()) {
SDValue BinOp = BinOps.pop_back_val();
if (!Visited.insert(BinOp.getNode()).second)
continue;
PromOps.push_back(BinOp);
for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
if (BinOp.getOpcode() == ISD::SELECT && i == 0)
continue;
if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
continue;
if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
isa<ConstantSDNode>(BinOp.getOperand(i))) {
Inputs.push_back(BinOp.getOperand(i));
} else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
BinOp.getOperand(i).getOpcode() == ISD::OR ||
BinOp.getOperand(i).getOpcode() == ISD::XOR ||
BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
BinOps.push_back(BinOp.getOperand(i));
} else {
return SDValue();
}
}
}
DenseMap<SDNode *, EVT> SelectTruncOp[2];
for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
if (isa<ConstantSDNode>(Inputs[i]))
continue;
for (SDNode *User : Inputs[i].getNode()->uses()) {
if (User != N && !Visited.count(User))
return SDValue();
if (User->getOpcode() == ISD::SELECT) {
if (User->getOperand(0) == Inputs[i])
SelectTruncOp[0].insert(std::make_pair(User,
User->getOperand(0).getValueType()));
} else if (User->getOpcode() == ISD::SELECT_CC) {
if (User->getOperand(0) == Inputs[i])
SelectTruncOp[0].insert(std::make_pair(User,
User->getOperand(0).getValueType()));
if (User->getOperand(1) == Inputs[i])
SelectTruncOp[1].insert(std::make_pair(User,
User->getOperand(1).getValueType()));
}
}
}
for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
for (SDNode *User : PromOps[i].getNode()->uses()) {
if (User != N && !Visited.count(User))
return SDValue();
if (User->getOpcode() == ISD::SELECT) {
if (User->getOperand(0) == PromOps[i])
SelectTruncOp[0].insert(std::make_pair(User,
User->getOperand(0).getValueType()));
} else if (User->getOpcode() == ISD::SELECT_CC) {
if (User->getOperand(0) == PromOps[i])
SelectTruncOp[0].insert(std::make_pair(User,
User->getOperand(0).getValueType()));
if (User->getOperand(1) == PromOps[i])
SelectTruncOp[1].insert(std::make_pair(User,
User->getOperand(1).getValueType()));
}
}
}
unsigned PromBits = N->getOperand(0).getValueSizeInBits();
bool ReallyNeedsExt = false;
if (N->getOpcode() != ISD::ANY_EXTEND) {
for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
if (isa<ConstantSDNode>(Inputs[i]))
continue;
unsigned OpBits =
Inputs[i].getOperand(0).getValueSizeInBits();
assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
if ((N->getOpcode() == ISD::ZERO_EXTEND &&
!DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
APInt::getHighBitsSet(OpBits,
OpBits-PromBits))) ||
(N->getOpcode() == ISD::SIGN_EXTEND &&
DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
(OpBits-(PromBits-1)))) {
ReallyNeedsExt = true;
break;
}
}
}
for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
if (isa<ConstantSDNode>(Inputs[i]))
continue;
SDValue InSrc = Inputs[i].getOperand(0);
if (Inputs[i].getValueType() == N->getValueType(0))
DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
else if (N->getOpcode() == ISD::SIGN_EXTEND)
DAG.ReplaceAllUsesOfValueWith(Inputs[i],
DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
else if (N->getOpcode() == ISD::ZERO_EXTEND)
DAG.ReplaceAllUsesOfValueWith(Inputs[i],
DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
else
DAG.ReplaceAllUsesOfValueWith(Inputs[i],
DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
}
std::list<HandleSDNode> PromOpHandles;
for (auto &PromOp : PromOps)
PromOpHandles.emplace_back(PromOp);
while (!PromOpHandles.empty()) {
SDValue PromOp = PromOpHandles.back().getValue();
PromOpHandles.pop_back();
unsigned C;
switch (PromOp.getOpcode()) {
default: C = 0; break;
case ISD::SELECT: C = 1; break;
case ISD::SELECT_CC: C = 2; break;
}
if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
(!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
PromOpHandles.emplace_front(PromOp);
continue;
}
if (PromOp.getOpcode() == ISD::SELECT ||
PromOp.getOpcode() == ISD::SELECT_CC) {
if ((SelectTruncOp[0].count(PromOp.getNode()) &&
PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
(SelectTruncOp[1].count(PromOp.getNode()) &&
PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
PromOpHandles.emplace_front(PromOp);
continue;
}
}
SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
PromOp.getNode()->op_end());
for (unsigned i = 0; i < 2; ++i) {
if (!isa<ConstantSDNode>(Ops[C+i]))
continue;
if (Ops[C+i].getValueType() == N->getValueType(0))
continue;
if (N->getOpcode() == ISD::SIGN_EXTEND)
Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
else if (N->getOpcode() == ISD::ZERO_EXTEND)
Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
else
Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
}
if (PromOp.getOpcode() == ISD::SELECT ||
PromOp.getOpcode() == ISD::SELECT_CC) {
auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
if (SI0 != SelectTruncOp[0].end())
Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
if (SI1 != SelectTruncOp[1].end())
Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
}
DAG.ReplaceAllUsesOfValueWith(PromOp,
DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
}
if (!ReallyNeedsExt)
return N->getOperand(0);
if (N->getOpcode() == ISD::ZERO_EXTEND)
return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
DAG.getConstant(APInt::getLowBitsSet(
N->getValueSizeInBits(0), PromBits),
dl, N->getValueType(0)));
assert(N->getOpcode() == ISD::SIGN_EXTEND &&
"Invalid extension type");
EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
SDValue ShiftCst =
DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
return DAG.getNode(
ISD::SRA, dl, N->getValueType(0),
DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
ShiftCst);
}
SDValue PPCTargetLowering::combineSetCC(SDNode *N,
DAGCombinerInfo &DCI) const {
assert(N->getOpcode() == ISD::SETCC &&
"Should be called with a SETCC node");
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
if (CC == ISD::SETNE || CC == ISD::SETEQ) {
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
LHS.hasOneUse())
std::swap(LHS, RHS);
if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
RHS.hasOneUse()) {
SDLoc DL(N);
SelectionDAG &DAG = DCI.DAG;
EVT VT = N->getValueType(0);
EVT OpVT = LHS.getValueType();
SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
}
}
return DAGCombineTruncBoolExt(N, DCI);
}
static bool isFPExtLoad(SDValue Op) {
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
return LD->getExtensionType() == ISD::EXTLOAD &&
Op.getValueType() == MVT::f64;
return false;
}
SDValue PPCTargetLowering::
combineElementTruncationToVectorTruncation(SDNode *N,
DAGCombinerInfo &DCI) const {
assert(N->getOpcode() == ISD::BUILD_VECTOR &&
"Should be called with a BUILD_VECTOR node");
SelectionDAG &DAG = DCI.DAG;
SDLoc dl(N);
SDValue FirstInput = N->getOperand(0);
assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
"The input operand must be an fp-to-int conversion.");
unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
if (FirstConversion == PPCISD::FCTIDZ ||
FirstConversion == PPCISD::FCTIDUZ ||
FirstConversion == PPCISD::FCTIWZ ||
FirstConversion == PPCISD::FCTIWUZ) {
bool IsSplat = true;
bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
FirstConversion == PPCISD::FCTIWUZ;
EVT SrcVT = FirstInput.getOperand(0).getValueType();
SmallVector<SDValue, 4> Ops;
EVT TargetVT = N->getValueType(0);
for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
SDValue NextOp = N->getOperand(i);
if (NextOp.getOpcode() != PPCISD::MFVSR)
return SDValue();
unsigned NextConversion = NextOp.getOperand(0).getOpcode();
if (NextConversion != FirstConversion)
return SDValue();
if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
return SDValue();
if (N->getOperand(i) != FirstInput)
IsSplat = false;
}
if (IsSplat)
return SDValue();
for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
SDValue In = N->getOperand(i).getOperand(0);
if (Is32Bit) {
if (In.isUndef())
Ops.push_back(DAG.getUNDEF(SrcVT));
else {
SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
MVT::f32, In.getOperand(0),
DAG.getIntPtrConstant(1, dl));
Ops.push_back(Trunc);
}
} else
Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
}
unsigned Opcode;
if (FirstConversion == PPCISD::FCTIDZ ||
FirstConversion == PPCISD::FCTIWZ)
Opcode = ISD::FP_TO_SINT;
else
Opcode = ISD::FP_TO_UINT;
EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
return DAG.getNode(Opcode, dl, TargetVT, BV);
}
return SDValue();
}
static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
assert(N->getOpcode() == ISD::BUILD_VECTOR &&
"Should be called with a BUILD_VECTOR node");
SDLoc dl(N);
if (!N->getValueType(0).getVectorElementType().isByteSized())
return SDValue();
bool InputsAreConsecutiveLoads = true;
bool InputsAreReverseConsecutive = true;
unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
SDValue FirstInput = N->getOperand(0);
bool IsRoundOfExtLoad = false;
if (FirstInput.getOpcode() == ISD::FP_ROUND &&
FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
}
if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
N->getNumOperands() == 1)
return SDValue();
for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
return SDValue();
SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
N->getOperand(i);
if (NextInput.getOpcode() != ISD::LOAD)
return SDValue();
SDValue PreviousInput =
IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
return SDValue();
if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
InputsAreConsecutiveLoads = false;
if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
InputsAreReverseConsecutive = false;
if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
return SDValue();
}
assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
"The loads cannot be both consecutive and reverse consecutive.");
SDValue FirstLoadOp =
IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
SDValue LastLoadOp =
IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
N->getOperand(N->getNumOperands()-1);
LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
if (InputsAreConsecutiveLoads) {
assert(LD1 && "Input needs to be a LoadSDNode.");
return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
LD1->getBasePtr(), LD1->getPointerInfo(),
LD1->getAlign());
}
if (InputsAreReverseConsecutive) {
assert(LDL && "Input needs to be a LoadSDNode.");
SDValue Load =
DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), LDL->getBasePtr(),
LDL->getPointerInfo(), LDL->getAlign());
SmallVector<int, 16> Ops;
for (int i = N->getNumOperands() - 1; i >= 0; i--)
Ops.push_back(i);
return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
DAG.getUNDEF(N->getValueType(0)), Ops);
}
return SDValue();
}
static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
SDValue Input, uint64_t Elems,
uint64_t CorrectElems) {
SDLoc dl(N);
unsigned NumElems = Input.getValueType().getVectorNumElements();
SmallVector<int, 16> ShuffleMask(NumElems, -1);
for (unsigned i = 0; i < N->getNumOperands(); i++) {
if (DAG.getDataLayout().isLittleEndian())
ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
else
ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
CorrectElems = CorrectElems >> 8;
Elems = Elems >> 8;
}
SDValue Shuffle =
DAG.getVectorShuffle(Input.getValueType(), dl, Input,
DAG.getUNDEF(Input.getValueType()), ShuffleMask);
EVT VT = N->getValueType(0);
SDValue Conv = DAG.getBitcast(VT, Shuffle);
EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
Input.getValueType().getVectorElementType(),
VT.getVectorNumElements());
return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv,
DAG.getValueType(ExtVT));
}
static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
uint64_t TargetElems[] = {
0x3074B8FC, 0x000070F8, 0x10325476, 0x00003074, 0x00001032, };
uint64_t Elems = 0;
int Index;
SDValue Input;
auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
if (!Op)
return false;
if (Op.getOpcode() != ISD::SIGN_EXTEND &&
Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
return false;
SDValue Extract = Op.getOperand(0);
if (Extract.getOpcode() == ISD::ANY_EXTEND)
Extract = Extract.getOperand(0);
if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return false;
ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
if (!ExtOp)
return false;
Index = ExtOp->getZExtValue();
if (Input && Input != Extract.getOperand(0))
return false;
if (!Input)
Input = Extract.getOperand(0);
Elems = Elems << 8;
Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
Elems |= Index;
return true;
};
for (unsigned i = 0; i < N->getNumOperands(); i++) {
if (!isSExtOfVecExtract(N->getOperand(i))) {
return SDValue();
}
}
int TgtElemArrayIdx;
int InputSize = Input.getValueType().getScalarSizeInBits();
int OutputSize = N->getValueType(0).getScalarSizeInBits();
if (InputSize + OutputSize == 40)
TgtElemArrayIdx = 0;
else if (InputSize + OutputSize == 72)
TgtElemArrayIdx = 1;
else if (InputSize + OutputSize == 48)
TgtElemArrayIdx = 2;
else if (InputSize + OutputSize == 80)
TgtElemArrayIdx = 3;
else if (InputSize + OutputSize == 96)
TgtElemArrayIdx = 4;
else
return SDValue();
uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
CorrectElems = DAG.getDataLayout().isLittleEndian()
? CorrectElems & 0x0F0F0F0F0F0F0F0F
: CorrectElems & 0xF0F0F0F0F0F0F0F0;
if (Elems != CorrectElems) {
return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
}
return SDValue();
}
static SDValue combineBVZEXTLOAD(SDNode *N, SelectionDAG &DAG) {
SDLoc DL(N);
if (N->getValueType(0) != MVT::v1i128)
return SDValue();
SDValue Operand = N->getOperand(0);
if (Operand.getOpcode() != ISD::LOAD)
return SDValue();
auto *LD = cast<LoadSDNode>(Operand);
EVT MemoryType = LD->getMemoryVT();
bool ValidLDType = MemoryType == MVT::i8 || MemoryType == MVT::i16 ||
MemoryType == MVT::i32 || MemoryType == MVT::i64;
if (!ValidLDType ||
(LD->getExtensionType() != ISD::ZEXTLOAD &&
LD->getExtensionType() != ISD::EXTLOAD))
return SDValue();
SDValue LoadOps[] = {
LD->getChain(), LD->getBasePtr(),
DAG.getIntPtrConstant(MemoryType.getScalarSizeInBits(), DL)};
return DAG.getMemIntrinsicNode(PPCISD::LXVRZX, DL,
DAG.getVTList(MVT::v1i128, MVT::Other),
LoadOps, MemoryType, LD->getMemOperand());
}
SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
DAGCombinerInfo &DCI) const {
assert(N->getOpcode() == ISD::BUILD_VECTOR &&
"Should be called with a BUILD_VECTOR node");
SelectionDAG &DAG = DCI.DAG;
SDLoc dl(N);
if (!Subtarget.hasVSX())
return SDValue();
SDValue FirstInput = N->getOperand(0);
if (FirstInput.getOpcode() == PPCISD::MFVSR) {
SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
if (Reduced)
return Reduced;
}
SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
if (Reduced)
return Reduced;
if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
Reduced = combineBVOfVecSExt(N, DAG);
if (Reduced)
return Reduced;
}
if (Subtarget.isISA3_1()) {
SDValue BVOfZLoad = combineBVZEXTLOAD(N, DAG);
if (BVOfZLoad)
return BVOfZLoad;
}
if (N->getValueType(0) != MVT::v2f64)
return SDValue();
if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
FirstInput.getOpcode() != ISD::UINT_TO_FP)
return SDValue();
if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
return SDValue();
if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
return SDValue();
SDValue Ext1 = FirstInput.getOperand(0);
SDValue Ext2 = N->getOperand(1).getOperand(0);
if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return SDValue();
ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
if (!Ext1Op || !Ext2Op)
return SDValue();
if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
Ext1.getOperand(0) != Ext2.getOperand(0))
return SDValue();
int FirstElem = Ext1Op->getZExtValue();
int SecondElem = Ext2Op->getZExtValue();
int SubvecIdx;
if (FirstElem == 0 && SecondElem == 1)
SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
else if (FirstElem == 2 && SecondElem == 3)
SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
else
return SDValue();
SDValue SrcVec = Ext1.getOperand(0);
auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
return DAG.getNode(NodeType, dl, MVT::v2f64,
SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
}
SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
DAGCombinerInfo &DCI) const {
assert((N->getOpcode() == ISD::SINT_TO_FP ||
N->getOpcode() == ISD::UINT_TO_FP) &&
"Need an int -> FP conversion node here");
if (useSoftFloat() || !Subtarget.has64BitSupport())
return SDValue();
SelectionDAG &DAG = DCI.DAG;
SDLoc dl(N);
SDValue Op(N, 0);
if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
return SDValue();
if (!Op.getOperand(0).getValueType().isSimple())
return SDValue();
if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
return SDValue();
SDValue FirstOperand(Op.getOperand(0));
bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
(FirstOperand.getValueType() == MVT::i8 ||
FirstOperand.getValueType() == MVT::i16);
if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
bool DstDouble = Op.getValueType() == MVT::f64;
unsigned ConvOp = Signed ?
(DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) :
(DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
SDValue WidthConst =
DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
dl, false);
LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
DAG.getVTList(MVT::f64, MVT::Other),
Ops, MVT::i8, LDN->getMemOperand());
if (Signed) {
SDValue ExtOps[] = { Ld, WidthConst };
SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
} else
return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
}
if (Op.getOperand(0).getValueType() == MVT::i32)
return SDValue();
assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
"UINT_TO_FP is supported only with FPCVT");
unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
: PPCISD::FCFIDS)
: (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
: PPCISD::FCFID);
MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
? MVT::f32
: MVT::f64;
if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
Subtarget.hasFPCVT()) ||
(Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
SDValue Src = Op.getOperand(0).getOperand(0);
if (Src.getValueType() == MVT::f32) {
Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
DCI.AddToWorklist(Src.getNode());
} else if (Src.getValueType() != MVT::f64) {
return SDValue();
}
unsigned FCTOp =
Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
PPCISD::FCTIDUZ;
SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
FP = DAG.getNode(ISD::FP_ROUND, dl,
MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
DCI.AddToWorklist(FP.getNode());
}
return FP;
}
return SDValue();
}
SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
DAGCombinerInfo &DCI) const {
if (DCI.isBeforeLegalizeOps())
return SDValue();
SelectionDAG &DAG = DCI.DAG;
SDLoc dl(N);
SDValue Chain;
SDValue Base;
MachineMemOperand *MMO;
switch (N->getOpcode()) {
default:
llvm_unreachable("Unexpected opcode for little endian VSX load");
case ISD::LOAD: {
LoadSDNode *LD = cast<LoadSDNode>(N);
Chain = LD->getChain();
Base = LD->getBasePtr();
MMO = LD->getMemOperand();
if (MMO->getSize() < 16)
return SDValue();
break;
}
case ISD::INTRINSIC_W_CHAIN: {
MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
Chain = Intrin->getChain();
Base = Intrin->getOperand(2);
MMO = Intrin->getMemOperand();
break;
}
}
MVT VecTy = N->getValueType(0).getSimpleVT();
SDValue LoadOps[] = { Chain, Base };
SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
DAG.getVTList(MVT::v2f64, MVT::Other),
LoadOps, MVT::v2f64, MMO);
DCI.AddToWorklist(Load.getNode());
Chain = Load.getValue(1);
SDValue Swap = DAG.getNode(
PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
DCI.AddToWorklist(Swap.getNode());
if (VecTy != MVT::v2f64) {
SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
DCI.AddToWorklist(N.getNode());
return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
N, Swap.getValue(1));
}
return Swap;
}
SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
DAGCombinerInfo &DCI) const {
if (DCI.isBeforeLegalizeOps())
return SDValue();
SelectionDAG &DAG = DCI.DAG;
SDLoc dl(N);
SDValue Chain;
SDValue Base;
unsigned SrcOpnd;
MachineMemOperand *MMO;
switch (N->getOpcode()) {
default:
llvm_unreachable("Unexpected opcode for little endian VSX store");
case ISD::STORE: {
StoreSDNode *ST = cast<StoreSDNode>(N);
Chain = ST->getChain();
Base = ST->getBasePtr();
MMO = ST->getMemOperand();
SrcOpnd = 1;
if (MMO->getSize() < 16)
return SDValue();
break;
}
case ISD::INTRINSIC_VOID: {
MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
Chain = Intrin->getChain();
Base = Intrin->getOperand(3);
MMO = Intrin->getMemOperand();
SrcOpnd = 2;
break;
}
}
SDValue Src = N->getOperand(SrcOpnd);
MVT VecTy = Src.getValueType().getSimpleVT();
if (VecTy != MVT::v2f64) {
Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
DCI.AddToWorklist(Src.getNode());
}
SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
DCI.AddToWorklist(Swap.getNode());
Chain = Swap.getValue(1);
SDValue StoreOps[] = { Chain, Swap, Base };
SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
DAG.getVTList(MVT::Other),
StoreOps, VecTy, MMO);
DCI.AddToWorklist(Store.getNode());
return Store;
}
SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
SDLoc dl(N);
unsigned Opcode = N->getOperand(1).getOpcode();
assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
&& "Not a FP_TO_INT Instruction!");
SDValue Val = N->getOperand(1).getOperand(0);
EVT Op1VT = N->getOperand(1).getValueType();
EVT ResVT = Val.getValueType();
if (!isTypeLegal(ResVT))
return SDValue();
bool ValidTypeForStoreFltAsInt =
(Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
(Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
if (ResVT == MVT::f128 && !Subtarget.hasP9Vector())
return SDValue();
if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
return SDValue();
if (ResVT.getScalarSizeInBits() == 32) {
Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
DCI.AddToWorklist(Val.getNode());
}
unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
PPCISD::FP_TO_SINT_IN_VSR :
PPCISD::FP_TO_UINT_IN_VSR;
Val = DAG.getNode(ConvOpcode,
dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
DCI.AddToWorklist(Val.getNode());
unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
DAG.getIntPtrConstant(ByteSize, dl, false),
DAG.getValueType(Op1VT) };
Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
DAG.getVTList(MVT::Other), Ops,
cast<StoreSDNode>(N)->getMemoryVT(),
cast<StoreSDNode>(N)->getMemOperand());
DCI.AddToWorklist(Val.getNode());
return Val;
}
static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) {
bool PrevElemFromFirstVec = Mask[0] < NumElts;
for (int i = 1, e = Mask.size(); i < e; i++) {
if (PrevElemFromFirstVec && Mask[i] < NumElts)
return false;
if (!PrevElemFromFirstVec && Mask[i] >= NumElts)
return false;
PrevElemFromFirstVec = !PrevElemFromFirstVec;
}
return true;
}
static bool isSplatBV(SDValue Op) {
if (Op.getOpcode() != ISD::BUILD_VECTOR)
return false;
SDValue FirstOp;
for (int i = 0, e = Op.getNumOperands(); i < e; i++) {
FirstOp = Op.getOperand(i);
if (!FirstOp.isUndef())
break;
}
for (int i = 1, e = Op.getNumOperands(); i < e; i++)
if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef())
return false;
return true;
}
static SDValue isScalarToVec(SDValue Op) {
if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
return Op;
if (Op.getOpcode() != ISD::BITCAST)
return SDValue();
Op = Op.getOperand(0);
if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
return Op;
return SDValue();
}
static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV,
int LHSMaxIdx, int RHSMinIdx,
int RHSMaxIdx, int HalfVec,
unsigned ValidLaneWidth,
const PPCSubtarget &Subtarget) {
for (int i = 0, e = ShuffV.size(); i < e; i++) {
int Idx = ShuffV[i];
if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx))
ShuffV[i] +=
Subtarget.isLittleEndian() ? HalfVec : HalfVec - ValidLaneWidth;
}
}
static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG,
const PPCSubtarget &Subtarget) {
SDLoc dl(OrigSToV);
EVT VT = OrigSToV.getValueType();
assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR &&
"Expecting a SCALAR_TO_VECTOR here");
SDValue Input = OrigSToV.getOperand(0);
if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1));
SDValue OrigVector = Input.getOperand(0);
if (Idx && VT == OrigVector.getValueType()) {
unsigned NumElts = VT.getVectorNumElements();
assert(
NumElts > 1 &&
"Cannot produce a permuted scalar_to_vector for one element vector");
SmallVector<int, 16> NewMask(NumElts, -1);
unsigned ResultInElt = NumElts / 2;
ResultInElt -= Subtarget.isLittleEndian() ? 0 : 1;
NewMask[ResultInElt] = Idx->getZExtValue();
return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask);
}
}
return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT,
OrigSToV.getOperand(0));
}
SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN,
SelectionDAG &DAG) const {
SDValue LHS = SVN->getOperand(0);
SDValue RHS = SVN->getOperand(1);
auto Mask = SVN->getMask();
int NumElts = LHS.getValueType().getVectorNumElements();
SDValue Res(SVN, 0);
SDLoc dl(SVN);
bool IsLittleEndian = Subtarget.isLittleEndian();
if (!Subtarget.hasDirectMove())
return Res;
if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
RHS.getOpcode() != ISD::VECTOR_SHUFFLE) {
std::swap(LHS, RHS);
Res = DAG.getCommutedVectorShuffle(*SVN);
Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
}
SmallVector<int, 16> ShuffV(Mask.begin(), Mask.end());
SDValue SToVLHS = isScalarToVec(LHS);
SDValue SToVRHS = isScalarToVec(RHS);
if (SToVLHS || SToVRHS) {
if (SToVLHS && SToVRHS &&
(SToVLHS.getValueType().getScalarSizeInBits() !=
SToVRHS.getValueType().getScalarSizeInBits()))
return Res;
int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements()
: SToVRHS.getValueType().getVectorNumElements();
int NumEltsOut = ShuffV.size();
unsigned ValidLaneWidth =
SToVLHS ? SToVLHS.getValueType().getScalarSizeInBits() /
LHS.getValueType().getScalarSizeInBits()
: SToVRHS.getValueType().getScalarSizeInBits() /
RHS.getValueType().getScalarSizeInBits();
int LHSMaxIdx = -1;
int RHSMinIdx = -1;
int RHSMaxIdx = -1;
int HalfVec = LHS.getValueType().getVectorNumElements() / 2;
if (SToVLHS) {
if (!IsLittleEndian && SToVLHS.getValueType().getScalarSizeInBits() >= 64)
return Res;
LHSMaxIdx = NumEltsOut / NumEltsIn;
SToVLHS = getSToVPermuted(SToVLHS, DAG, Subtarget);
if (SToVLHS.getValueType() != LHS.getValueType())
SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS);
LHS = SToVLHS;
}
if (SToVRHS) {
if (!IsLittleEndian && SToVRHS.getValueType().getScalarSizeInBits() >= 64)
return Res;
RHSMinIdx = NumEltsOut;
RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx;
SToVRHS = getSToVPermuted(SToVRHS, DAG, Subtarget);
if (SToVRHS.getValueType() != RHS.getValueType())
SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS);
RHS = SToVRHS;
}
fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx,
HalfVec, ValidLaneWidth, Subtarget);
Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
if (!isa<ShuffleVectorSDNode>(Res))
return Res;
Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
}
SDValue TheSplat = IsLittleEndian ? RHS : LHS;
if (!isSplatBV(TheSplat))
return Res;
if (!isAlternatingShuffMask(Mask, NumElts))
return Res;
if (IsLittleEndian) {
if (Mask[0] < NumElts)
for (int i = 1, e = Mask.size(); i < e; i += 2) {
if (ShuffV[i] < 0)
continue;
ShuffV[i] = (ShuffV[i - 1] + NumElts);
}
else
for (int i = 0, e = Mask.size(); i < e; i += 2) {
if (ShuffV[i] < 0)
continue;
ShuffV[i] = (ShuffV[i + 1] + NumElts);
}
} else {
if (Mask[0] < NumElts)
for (int i = 0, e = Mask.size(); i < e; i += 2) {
if (ShuffV[i] < 0)
continue;
ShuffV[i] = ShuffV[i + 1] - NumElts;
}
else
for (int i = 1, e = Mask.size(); i < e; i += 2) {
if (ShuffV[i] < 0)
continue;
ShuffV[i] = ShuffV[i - 1] - NumElts;
}
}
SDValue SplatVal =
cast<BuildVectorSDNode>(TheSplat.getNode())->getSplatValue();
TheSplat = DAG.getSplatBuildVector(TheSplat.getValueType(), dl, SplatVal);
if (IsLittleEndian)
RHS = TheSplat;
else
LHS = TheSplat;
return DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
}
SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
LSBaseSDNode *LSBase,
DAGCombinerInfo &DCI) const {
assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
"Not a reverse memop pattern!");
auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
auto Mask = SVN->getMask();
int i = 0;
auto I = Mask.rbegin();
auto E = Mask.rend();
for (; I != E; ++I) {
if (*I != i)
return false;
i++;
}
return true;
};
SelectionDAG &DAG = DCI.DAG;
EVT VT = SVN->getValueType(0);
if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
return SDValue();
if (!Subtarget.hasP9Vector())
return SDValue();
if(!IsElementReverse(SVN))
return SDValue();
if (LSBase->getOpcode() == ISD::LOAD) {
for (SDNode::use_iterator UI = LSBase->use_begin(), UE = LSBase->use_end();
UI != UE; ++UI)
if (UI.getUse().getResNo() == 0 && UI->getOpcode() != ISD::VECTOR_SHUFFLE)
return SDValue();
SDLoc dl(LSBase);
SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
return DAG.getMemIntrinsicNode(
PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
LSBase->getMemoryVT(), LSBase->getMemOperand());
}
if (LSBase->getOpcode() == ISD::STORE) {
if (!SVN->hasOneUse())
return SDValue();
SDLoc dl(LSBase);
SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
LSBase->getBasePtr()};
return DAG.getMemIntrinsicNode(
PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
LSBase->getMemoryVT(), LSBase->getMemOperand());
}
llvm_unreachable("Expected a load or store node here");
}
SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
SDLoc dl(N);
switch (N->getOpcode()) {
default: break;
case ISD::ADD:
return combineADD(N, DCI);
case ISD::SHL:
return combineSHL(N, DCI);
case ISD::SRA:
return combineSRA(N, DCI);
case ISD::SRL:
return combineSRL(N, DCI);
case ISD::MUL:
return combineMUL(N, DCI);
case ISD::FMA:
case PPCISD::FNMSUB:
return combineFMALike(N, DCI);
case PPCISD::SHL:
if (isNullConstant(N->getOperand(0))) return N->getOperand(0);
break;
case PPCISD::SRL:
if (isNullConstant(N->getOperand(0))) return N->getOperand(0);
break;
case PPCISD::SRA:
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
if (C->isZero() || C->isAllOnes()) return N->getOperand(0);
}
break;
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND:
return DAGCombineExtBoolTrunc(N, DCI);
case ISD::TRUNCATE:
return combineTRUNCATE(N, DCI);
case ISD::SETCC:
if (SDValue CSCC = combineSetCC(N, DCI))
return CSCC;
LLVM_FALLTHROUGH;
case ISD::SELECT_CC:
return DAGCombineTruncBoolExt(N, DCI);
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
return combineFPToIntToFP(N, DCI);
case ISD::VECTOR_SHUFFLE:
if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
}
return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG);
case ISD::STORE: {
EVT Op1VT = N->getOperand(1).getValueType();
unsigned Opcode = N->getOperand(1).getOpcode();
if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
SDValue Val= combineStoreFPToInt(N, DCI);
if (Val)
return Val;
}
if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
if (Val)
return Val;
}
if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
N->getOperand(1).getNode()->hasOneUse() &&
(Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
(Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
if (mVT.isExtended() || mVT.getSizeInBits() < 16)
break;
SDValue BSwapOp = N->getOperand(1).getOperand(0);
if (BSwapOp.getValueType() == MVT::i16)
BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
if (Op1VT.bitsGT(mVT)) {
int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
DAG.getConstant(Shift, dl, MVT::i32));
if (Op1VT == MVT::i64)
BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
}
SDValue Ops[] = {
N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
};
return
DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
Ops, cast<StoreSDNode>(N)->getMemoryVT(),
cast<StoreSDNode>(N)->getMemOperand());
}
if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
MemVT.getSizeInBits());
SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
N->getOperand(3));
cast<StoreSDNode>(N)->setTruncatingStore(true);
return SDValue(N, 0);
}
if (Op1VT.isSimple()) {
MVT StoreVT = Op1VT.getSimpleVT();
if (Subtarget.needsSwapsForVSXMemOps() &&
(StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
return expandVSXStoreForLE(N, DCI);
}
break;
}
case ISD::LOAD: {
LoadSDNode *LD = cast<LoadSDNode>(N);
EVT VT = LD->getValueType(0);
if (VT.isSimple()) {
MVT LoadVT = VT.getSimpleVT();
if (Subtarget.needsSwapsForVSXMemOps() &&
(LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
return expandVSXLoadForLE(N, DCI);
}
auto ReplaceTwoFloatLoad = [&]() {
if (VT != MVT::i64)
return false;
if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
LD->isVolatile())
return false;
if (!LD->hasNUsesOfValue(2, 0))
return false;
auto UI = LD->use_begin();
while (UI.getUse().getResNo() != 0) ++UI;
SDNode *Trunc = *UI++;
while (UI.getUse().getResNo() != 0) ++UI;
SDNode *RightShift = *UI;
if (Trunc->getOpcode() != ISD::TRUNCATE)
std::swap(Trunc, RightShift);
if (Trunc->getOpcode() != ISD::TRUNCATE ||
Trunc->getValueType(0) != MVT::i32 ||
!Trunc->hasOneUse())
return false;
if (RightShift->getOpcode() != ISD::SRL ||
!isa<ConstantSDNode>(RightShift->getOperand(1)) ||
RightShift->getConstantOperandVal(1) != 32 ||
!RightShift->hasOneUse())
return false;
SDNode *Trunc2 = *RightShift->use_begin();
if (Trunc2->getOpcode() != ISD::TRUNCATE ||
Trunc2->getValueType(0) != MVT::i32 ||
!Trunc2->hasOneUse())
return false;
SDNode *Bitcast = *Trunc->use_begin();
SDNode *Bitcast2 = *Trunc2->use_begin();
if (Bitcast->getOpcode() != ISD::BITCAST ||
Bitcast->getValueType(0) != MVT::f32)
return false;
if (Bitcast2->getOpcode() != ISD::BITCAST ||
Bitcast2->getValueType(0) != MVT::f32)
return false;
if (Subtarget.isLittleEndian())
std::swap(Bitcast, Bitcast2);
SDValue BasePtr = LD->getBasePtr();
if (LD->isIndexed()) {
assert(LD->getAddressingMode() == ISD::PRE_INC &&
"Non-pre-inc AM on PPC?");
BasePtr =
DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
LD->getOffset());
}
auto MMOFlags =
LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
LD->getPointerInfo(), LD->getAlign(),
MMOFlags, LD->getAAInfo());
SDValue AddPtr =
DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
BasePtr, DAG.getIntPtrConstant(4, dl));
SDValue FloatLoad2 = DAG.getLoad(
MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
LD->getPointerInfo().getWithOffset(4),
commonAlignment(LD->getAlign(), 4), MMOFlags, LD->getAAInfo());
if (LD->isIndexed()) {
DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
}
DCI.CombineTo(Bitcast2, FloatLoad);
DCI.CombineTo(Bitcast, FloatLoad2);
DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
SDValue(FloatLoad2.getNode(), 1));
return true;
};
if (ReplaceTwoFloatLoad())
return SDValue(N, 0);
EVT MemVT = LD->getMemoryVT();
Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty);
if (LD->isUnindexed() && VT.isVector() &&
((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
!Subtarget.hasP8Vector() &&
(VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
VT == MVT::v4f32))) &&
LD->getAlign() < ABIAlignment) {
SDValue Chain = LD->getChain();
SDValue Ptr = LD->getBasePtr();
bool isLittleEndian = Subtarget.isLittleEndian();
Intrinsic::ID Intr, IntrLD, IntrPerm;
MVT PermCntlTy, PermTy, LDTy;
Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr
: Intrinsic::ppc_altivec_lvsl;
IntrLD = Intrinsic::ppc_altivec_lvx;
IntrPerm = Intrinsic::ppc_altivec_vperm;
PermCntlTy = MVT::v16i8;
PermTy = MVT::v4i32;
LDTy = MVT::v4i32;
SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
MachineFunction &MF = DAG.getMachineFunction();
MachineMemOperand *BaseMMO =
MF.getMachineMemOperand(LD->getMemOperand(),
-(long)MemVT.getStoreSize()+1,
2*MemVT.getStoreSize()-1);
SDValue LDXIntID =
DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
SDValue BaseLoad =
DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
DAG.getVTList(PermTy, MVT::Other),
BaseLoadOps, LDTy, BaseMMO);
int IncOffset = VT.getSizeInBits() / 8;
int IncValue = IncOffset;
if (!findConsecutiveLoad(LD, DAG))
--IncValue;
SDValue Increment =
DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
MachineMemOperand *ExtraMMO =
MF.getMachineMemOperand(LD->getMemOperand(),
1, 2*MemVT.getStoreSize()-1);
SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
SDValue ExtraLoad =
DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
DAG.getVTList(PermTy, MVT::Other),
ExtraLoadOps, LDTy, ExtraMMO);
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
BaseLoad.getValue(1), ExtraLoad.getValue(1));
SDValue Perm;
if (isLittleEndian)
Perm = BuildIntrinsicOp(IntrPerm,
ExtraLoad, BaseLoad, PermCntl, DAG, dl);
else
Perm = BuildIntrinsicOp(IntrPerm,
BaseLoad, ExtraLoad, PermCntl, DAG, dl);
if (VT != PermTy)
Perm = Subtarget.hasAltivec()
? DAG.getNode(ISD::BITCAST, dl, VT, Perm)
: DAG.getNode(ISD::FP_ROUND, dl, VT, Perm,
DAG.getTargetConstant(1, dl, MVT::i64));
DCI.CombineTo(N, Perm, TF);
return SDValue(N, 0);
}
}
break;
case ISD::INTRINSIC_WO_CHAIN: {
bool isLittleEndian = Subtarget.isLittleEndian();
unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
: Intrinsic::ppc_altivec_lvsl);
if (IID == Intr && N->getOperand(1)->getOpcode() == ISD::ADD) {
SDValue Add = N->getOperand(1);
int Bits = 4 ;
if (DAG.MaskedValueIsZero(Add->getOperand(1),
APInt::getAllOnes(Bits )
.zext(Add.getScalarValueSizeInBits()))) {
SDNode *BasePtr = Add->getOperand(0).getNode();
for (SDNode *U : BasePtr->uses()) {
if (U->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
cast<ConstantSDNode>(U->getOperand(0))->getZExtValue() == IID) {
return SDValue(U, 0);
}
}
}
if (isa<ConstantSDNode>(Add->getOperand(1))) {
SDNode *BasePtr = Add->getOperand(0).getNode();
for (SDNode *U : BasePtr->uses()) {
if (U->getOpcode() == ISD::ADD &&
isa<ConstantSDNode>(U->getOperand(1)) &&
(cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
cast<ConstantSDNode>(U->getOperand(1))->getZExtValue()) %
(1ULL << Bits) ==
0) {
SDNode *OtherAdd = U;
for (SDNode *V : OtherAdd->uses()) {
if (V->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
cast<ConstantSDNode>(V->getOperand(0))->getZExtValue() ==
IID) {
return SDValue(V, 0);
}
}
}
}
}
}
if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
(IID == Intrinsic::ppc_altivec_vmaxsw ||
IID == Intrinsic::ppc_altivec_vmaxsh ||
IID == Intrinsic::ppc_altivec_vmaxsb)) {
SDValue V1 = N->getOperand(1);
SDValue V2 = N->getOperand(2);
if ((V1.getSimpleValueType() == MVT::v4i32 ||
V1.getSimpleValueType() == MVT::v8i16 ||
V1.getSimpleValueType() == MVT::v16i8) &&
V1.getSimpleValueType() == V2.getSimpleValueType()) {
if (V1.getOpcode() == ISD::SUB &&
ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
V1.getOperand(1) == V2) {
return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
}
if (V2.getOpcode() == ISD::SUB &&
ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
V2.getOperand(1) == V1) {
return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
}
if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
V1.getOperand(0) == V2.getOperand(1) &&
V1.getOperand(1) == V2.getOperand(0)) {
return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
}
}
}
}
break;
case ISD::INTRINSIC_W_CHAIN:
if (Subtarget.needsSwapsForVSXMemOps()) {
switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
default:
break;
case Intrinsic::ppc_vsx_lxvw4x:
case Intrinsic::ppc_vsx_lxvd2x:
return expandVSXLoadForLE(N, DCI);
}
}
break;
case ISD::INTRINSIC_VOID:
if (Subtarget.needsSwapsForVSXMemOps()) {
switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
default:
break;
case Intrinsic::ppc_vsx_stxvw4x:
case Intrinsic::ppc_vsx_stxvd2x:
return expandVSXStoreForLE(N, DCI);
}
}
break;
case ISD::BSWAP: {
bool Is64BitBswapOn64BitTgt =
Subtarget.isPPC64() && N->getValueType(0) == MVT::i64;
bool IsSingleUseNormalLd = ISD::isNormalLoad(N->getOperand(0).getNode()) &&
N->getOperand(0).hasOneUse();
if (IsSingleUseNormalLd &&
(N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
(Subtarget.hasLDBRX() && Is64BitBswapOn64BitTgt))) {
SDValue Load = N->getOperand(0);
LoadSDNode *LD = cast<LoadSDNode>(Load);
SDValue Ops[] = {
LD->getChain(), LD->getBasePtr(), DAG.getValueType(N->getValueType(0)) };
SDValue BSLoad =
DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
DAG.getVTList(N->getValueType(0) == MVT::i64 ?
MVT::i64 : MVT::i32, MVT::Other),
Ops, LD->getMemoryVT(), LD->getMemOperand());
SDValue ResVal = BSLoad;
if (N->getValueType(0) == MVT::i16)
ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
DCI.CombineTo(N, ResVal);
DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
return SDValue(N, 0);
}
if (!DCI.isBeforeLegalize() || !Is64BitBswapOn64BitTgt ||
!IsSingleUseNormalLd)
return SDValue();
LoadSDNode *LD = cast<LoadSDNode>(N->getOperand(0));
if (!LD->isSimple())
return SDValue();
SDValue BasePtr = LD->getBasePtr();
SDValue Lo = DAG.getLoad(MVT::i32, dl, LD->getChain(), BasePtr,
LD->getPointerInfo(), LD->getAlign());
Lo = DAG.getNode(ISD::BSWAP, dl, MVT::i32, Lo);
BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
DAG.getIntPtrConstant(4, dl));
MachineMemOperand *NewMMO = DAG.getMachineFunction().getMachineMemOperand(
LD->getMemOperand(), 4, 4);
SDValue Hi = DAG.getLoad(MVT::i32, dl, LD->getChain(), BasePtr, NewMMO);
Hi = DAG.getNode(ISD::BSWAP, dl, MVT::i32, Hi);
SDValue Res;
if (Subtarget.isLittleEndian())
Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Hi, Lo);
else
Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
SDValue TF =
DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
Hi.getOperand(0).getValue(1), Lo.getOperand(0).getValue(1));
DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), TF);
return Res;
}
case PPCISD::VCMP:
if (!N->getOperand(0).hasOneUse() &&
!N->getOperand(1).hasOneUse() &&
!N->getOperand(2).hasOneUse()) {
SDNode *VCMPrecNode = nullptr;
SDNode *LHSN = N->getOperand(0).getNode();
for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
UI != E; ++UI)
if (UI->getOpcode() == PPCISD::VCMP_rec &&
UI->getOperand(1) == N->getOperand(1) &&
UI->getOperand(2) == N->getOperand(2) &&
UI->getOperand(0) == N->getOperand(0)) {
VCMPrecNode = *UI;
break;
}
if (!VCMPrecNode || VCMPrecNode->hasNUsesOfValue(0, 1))
break;
SDNode *FlagUser = nullptr;
for (SDNode::use_iterator UI = VCMPrecNode->use_begin();
FlagUser == nullptr; ++UI) {
assert(UI != VCMPrecNode->use_end() && "Didn't find user!");
SDNode *User = *UI;
for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
if (User->getOperand(i) == SDValue(VCMPrecNode, 1)) {
FlagUser = User;
break;
}
}
}
if (FlagUser->getOpcode() == PPCISD::MFOCRF)
return SDValue(VCMPrecNode, 0);
}
break;
case ISD::BRCOND: {
SDValue Cond = N->getOperand(1);
SDValue Target = N->getOperand(2);
if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
Intrinsic::loop_decrement) {
DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
assert(Cond.getNode()->hasOneUse() &&
"Counter decrement has more than one use");
return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
N->getOperand(0), Target);
}
}
break;
case ISD::BR_CC: {
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
if (LHS.getOpcode() == ISD::AND &&
LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
Intrinsic::loop_decrement &&
isa<ConstantSDNode>(LHS.getOperand(1)) &&
!isNullConstant(LHS.getOperand(1)))
LHS = LHS.getOperand(0);
if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
Intrinsic::loop_decrement &&
isa<ConstantSDNode>(RHS)) {
assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
"Counter decrement comparison is not EQ or NE");
unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
bool isBDNZ = (CC == ISD::SETEQ && Val) ||
(CC == ISD::SETNE && !Val);
DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
assert(LHS.getNode()->hasOneUse() &&
"Counter decrement has more than one use");
return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
N->getOperand(0), N->getOperand(4));
}
int CompareOpc;
bool isDot;
if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
assert(isDot && "Can't compare against a vector result!");
unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
if (Val != 0 && Val != 1) {
if (CC == ISD::SETEQ) return N->getOperand(0);
return DAG.getNode(ISD::BR, dl, MVT::Other,
N->getOperand(0), N->getOperand(4));
}
bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
SDValue Ops[] = {
LHS.getOperand(2), LHS.getOperand(3), DAG.getConstant(CompareOpc, dl, MVT::i32)
};
EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops);
PPC::Predicate CompOpc;
switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
default: case 0: CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
break;
case 1: CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
break;
case 2: CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
break;
case 3: CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
break;
}
return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
DAG.getConstant(CompOpc, dl, MVT::i32),
DAG.getRegister(PPC::CR6, MVT::i32),
N->getOperand(4), CompNode.getValue(1));
}
break;
}
case ISD::BUILD_VECTOR:
return DAGCombineBuildVector(N, DCI);
case ISD::ABS:
return combineABS(N, DCI);
case ISD::VSELECT:
return combineVSelect(N, DCI);
}
return SDValue();
}
SDValue
PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
SelectionDAG &DAG,
SmallVectorImpl<SDNode *> &Created) const {
EVT VT = N->getValueType(0);
if (VT == MVT::i64 && !Subtarget.isPPC64())
return SDValue();
if ((VT != MVT::i32 && VT != MVT::i64) ||
!(Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()))
return SDValue();
SDLoc DL(N);
SDValue N0 = N->getOperand(0);
bool IsNegPow2 = Divisor.isNegatedPowerOf2();
unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
Created.push_back(Op.getNode());
if (IsNegPow2) {
Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
Created.push_back(Op.getNode());
}
return Op;
}
void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const {
Known.resetAll();
switch (Op.getOpcode()) {
default: break;
case PPCISD::LBRX: {
if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
Known.Zero = 0xFFFF0000;
break;
}
case ISD::INTRINSIC_WO_CHAIN: {
switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
default: break;
case Intrinsic::ppc_altivec_vcmpbfp_p:
case Intrinsic::ppc_altivec_vcmpeqfp_p:
case Intrinsic::ppc_altivec_vcmpequb_p:
case Intrinsic::ppc_altivec_vcmpequh_p:
case Intrinsic::ppc_altivec_vcmpequw_p:
case Intrinsic::ppc_altivec_vcmpequd_p:
case Intrinsic::ppc_altivec_vcmpequq_p:
case Intrinsic::ppc_altivec_vcmpgefp_p:
case Intrinsic::ppc_altivec_vcmpgtfp_p:
case Intrinsic::ppc_altivec_vcmpgtsb_p:
case Intrinsic::ppc_altivec_vcmpgtsh_p:
case Intrinsic::ppc_altivec_vcmpgtsw_p:
case Intrinsic::ppc_altivec_vcmpgtsd_p:
case Intrinsic::ppc_altivec_vcmpgtsq_p:
case Intrinsic::ppc_altivec_vcmpgtub_p:
case Intrinsic::ppc_altivec_vcmpgtuh_p:
case Intrinsic::ppc_altivec_vcmpgtuw_p:
case Intrinsic::ppc_altivec_vcmpgtud_p:
case Intrinsic::ppc_altivec_vcmpgtuq_p:
Known.Zero = ~1U; break;
}
break;
}
case ISD::INTRINSIC_W_CHAIN: {
switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
default:
break;
case Intrinsic::ppc_load2r:
Known.Zero = 0xFFFF0000;
break;
}
break;
}
}
}
Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
switch (Subtarget.getCPUDirective()) {
default: break;
case PPC::DIR_970:
case PPC::DIR_PWR4:
case PPC::DIR_PWR5:
case PPC::DIR_PWR5X:
case PPC::DIR_PWR6:
case PPC::DIR_PWR6X:
case PPC::DIR_PWR7:
case PPC::DIR_PWR8:
case PPC::DIR_PWR9:
case PPC::DIR_PWR10:
case PPC::DIR_PWR_FUTURE: {
if (!ML)
break;
if (!DisableInnermostLoopAlign32) {
if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
return Align(32);
}
const PPCInstrInfo *TII = Subtarget.getInstrInfo();
uint64_t LoopSize = 0;
for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
LoopSize += TII->getInstSizeInBytes(*J);
if (LoopSize > 32)
break;
}
if (LoopSize > 16 && LoopSize <= 32)
return Align(32);
break;
}
}
return TargetLowering::getPrefLoopAlignment(ML);
}
PPCTargetLowering::ConstraintType
PPCTargetLowering::getConstraintType(StringRef Constraint) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
default: break;
case 'b':
case 'r':
case 'f':
case 'd':
case 'v':
case 'y':
return C_RegisterClass;
case 'Z':
return C_Memory;
}
} else if (Constraint == "wc") { return C_RegisterClass;
} else if (Constraint == "wa" || Constraint == "wd" ||
Constraint == "wf" || Constraint == "ws" ||
Constraint == "wi" || Constraint == "ww") {
return C_RegisterClass; }
return TargetLowering::getConstraintType(Constraint);
}
TargetLowering::ConstraintWeight
PPCTargetLowering::getSingleConstraintMatchWeight(
AsmOperandInfo &info, const char *constraint) const {
ConstraintWeight weight = CW_Invalid;
Value *CallOperandVal = info.CallOperandVal;
if (!CallOperandVal)
return CW_Default;
Type *type = CallOperandVal->getType();
if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
return CW_Register; else if ((StringRef(constraint) == "wa" ||
StringRef(constraint) == "wd" ||
StringRef(constraint) == "wf") &&
type->isVectorTy())
return CW_Register;
else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
return CW_Register; else if (StringRef(constraint) == "ws" && type->isDoubleTy())
return CW_Register;
else if (StringRef(constraint) == "ww" && type->isFloatTy())
return CW_Register;
switch (*constraint) {
default:
weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
break;
case 'b':
if (type->isIntegerTy())
weight = CW_Register;
break;
case 'f':
if (type->isFloatTy())
weight = CW_Register;
break;
case 'd':
if (type->isDoubleTy())
weight = CW_Register;
break;
case 'v':
if (type->isVectorTy())
weight = CW_Register;
break;
case 'y':
weight = CW_Register;
break;
case 'Z':
weight = CW_Memory;
break;
}
return weight;
}
std::pair<unsigned, const TargetRegisterClass *>
PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
StringRef Constraint,
MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'b': if (VT == MVT::i64 && Subtarget.isPPC64())
return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
case 'r': if (VT == MVT::i64 && Subtarget.isPPC64())
return std::make_pair(0U, &PPC::G8RCRegClass);
return std::make_pair(0U, &PPC::GPRCRegClass);
case 'd':
case 'f':
if (Subtarget.hasSPE()) {
if (VT == MVT::f32 || VT == MVT::i32)
return std::make_pair(0U, &PPC::GPRCRegClass);
if (VT == MVT::f64 || VT == MVT::i64)
return std::make_pair(0U, &PPC::SPERCRegClass);
} else {
if (VT == MVT::f32 || VT == MVT::i32)
return std::make_pair(0U, &PPC::F4RCRegClass);
if (VT == MVT::f64 || VT == MVT::i64)
return std::make_pair(0U, &PPC::F8RCRegClass);
}
break;
case 'v':
if (Subtarget.hasAltivec() && VT.isVector())
return std::make_pair(0U, &PPC::VRRCRegClass);
else if (Subtarget.hasVSX())
return std::make_pair(0U, &PPC::VFRCRegClass);
break;
case 'y': return std::make_pair(0U, &PPC::CRRCRegClass);
}
} else if (Constraint == "wc" && Subtarget.useCRBits()) {
return std::make_pair(0U, &PPC::CRBITRCRegClass);
} else if ((Constraint == "wa" || Constraint == "wd" ||
Constraint == "wf" || Constraint == "wi") &&
Subtarget.hasVSX()) {
if (VT.isVector())
return std::make_pair(0U, &PPC::VSRCRegClass);
if (VT == MVT::f32 && Subtarget.hasP8Vector())
return std::make_pair(0U, &PPC::VSSRCRegClass);
return std::make_pair(0U, &PPC::VSFRCRegClass);
} else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
if (VT == MVT::f32 && Subtarget.hasP8Vector())
return std::make_pair(0U, &PPC::VSSRCRegClass);
else
return std::make_pair(0U, &PPC::VSFRCRegClass);
} else if (Constraint == "lr") {
if (VT == MVT::i64)
return std::make_pair(0U, &PPC::LR8RCRegClass);
else
return std::make_pair(0U, &PPC::LRRCRegClass);
}
if (Constraint[0] == '{' && Constraint[Constraint.size() - 1] == '}') {
if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
int VSNum = atoi(Constraint.data() + 3);
assert(VSNum >= 0 && VSNum <= 63 &&
"Attempted to access a vsr out of range");
if (VSNum < 32)
return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
}
if (Constraint.size() > 3 && Constraint[1] == 'f') {
int RegNum = atoi(Constraint.data() + 2);
if (RegNum > 31 || RegNum < 0)
report_fatal_error("Invalid floating point register number");
if (VT == MVT::f32 || VT == MVT::i32)
return Subtarget.hasSPE()
? std::make_pair(PPC::R0 + RegNum, &PPC::GPRCRegClass)
: std::make_pair(PPC::F0 + RegNum, &PPC::F4RCRegClass);
if (VT == MVT::f64 || VT == MVT::i64)
return Subtarget.hasSPE()
? std::make_pair(PPC::S0 + RegNum, &PPC::SPERCRegClass)
: std::make_pair(PPC::F0 + RegNum, &PPC::F8RCRegClass);
}
}
std::pair<unsigned, const TargetRegisterClass *> R =
TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
PPC::GPRCRegClass.contains(R.first))
return std::make_pair(TRI->getMatchingSuperReg(R.first,
PPC::sub_32, &PPC::G8RCRegClass),
&PPC::G8RCRegClass);
if (!R.second && StringRef("{cc}").equals_insensitive(Constraint)) {
R.first = PPC::CR0;
R.second = &PPC::CRRCRegClass;
}
const auto &TM = getTargetMachine();
if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI()) {
if (((R.first >= PPC::V20 && R.first <= PPC::V31) ||
(R.first >= PPC::VF20 && R.first <= PPC::VF31)) &&
(R.second == &PPC::VSRCRegClass || R.second == &PPC::VSFRCRegClass))
errs() << "warning: vector registers 20 to 32 are reserved in the "
"default AIX AltiVec ABI and cannot be used\n";
}
return R;
}
void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
std::string &Constraint,
std::vector<SDValue>&Ops,
SelectionDAG &DAG) const {
SDValue Result;
if (Constraint.length() > 1) return;
char Letter = Constraint[0];
switch (Letter) {
default: break;
case 'I':
case 'J':
case 'K':
case 'L':
case 'M':
case 'N':
case 'O':
case 'P': {
ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
if (!CST) return; SDLoc dl(Op);
int64_t Value = CST->getSExtValue();
EVT TCVT = MVT::i64; switch (Letter) {
default: llvm_unreachable("Unknown constraint letter!");
case 'I': if (isInt<16>(Value))
Result = DAG.getTargetConstant(Value, dl, TCVT);
break;
case 'J': if (isShiftedUInt<16, 16>(Value))
Result = DAG.getTargetConstant(Value, dl, TCVT);
break;
case 'L': if (isShiftedInt<16, 16>(Value))
Result = DAG.getTargetConstant(Value, dl, TCVT);
break;
case 'K': if (isUInt<16>(Value))
Result = DAG.getTargetConstant(Value, dl, TCVT);
break;
case 'M': if (Value > 31)
Result = DAG.getTargetConstant(Value, dl, TCVT);
break;
case 'N': if (Value > 0 && isPowerOf2_64(Value))
Result = DAG.getTargetConstant(Value, dl, TCVT);
break;
case 'O': if (Value == 0)
Result = DAG.getTargetConstant(Value, dl, TCVT);
break;
case 'P': if (isInt<16>(-Value))
Result = DAG.getTargetConstant(Value, dl, TCVT);
break;
}
break;
}
}
if (Result.getNode()) {
Ops.push_back(Result);
return;
}
TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}
bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
const AddrMode &AM, Type *Ty,
unsigned AS,
Instruction *I) const {
if (Ty->isVectorTy() && AM.BaseOffs != 0 && !Subtarget.hasP9Vector())
return false;
if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
return false;
if (AM.BaseGV)
return false;
switch (AM.Scale) {
case 0: break;
case 1:
if (AM.HasBaseReg && AM.BaseOffs) return false;
break;
case 2:
if (AM.HasBaseReg || AM.BaseOffs) return false;
break;
default:
return false;
}
return true;
}
SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
MFI.setReturnAddressIsTaken(true);
if (verifyReturnAddressArgumentIsConstant(Op, DAG))
return SDValue();
SDLoc dl(Op);
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
FuncInfo->setLRStoreRequired();
bool isPPC64 = Subtarget.isPPC64();
auto PtrVT = getPointerTy(MF.getDataLayout());
if (Depth > 0) {
SDValue FrameAddr =
DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
LowerFRAMEADDR(Op, DAG), MachinePointerInfo());
SDValue Offset =
DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
isPPC64 ? MVT::i64 : MVT::i32);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
MachinePointerInfo());
}
SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
MachinePointerInfo());
}
SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
MFI.setFrameAddressIsTaken(true);
EVT PtrVT = getPointerTy(MF.getDataLayout());
bool isPPC64 = PtrVT == MVT::i64;
unsigned FrameReg;
if (MF.getFunction().hasFnAttribute(Attribute::Naked))
FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
else
FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
PtrVT);
while (Depth--)
FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
FrameAddr, MachinePointerInfo());
return FrameAddr;
}
Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
const MachineFunction &MF) const {
bool isPPC64 = Subtarget.isPPC64();
bool is64Bit = isPPC64 && VT == LLT::scalar(64);
if (!is64Bit && VT != LLT::scalar(32))
report_fatal_error("Invalid register global variable type");
Register Reg = StringSwitch<Register>(RegName)
.Case("r1", is64Bit ? PPC::X1 : PPC::R1)
.Case("r2", isPPC64 ? Register() : PPC::R2)
.Case("r13", (is64Bit ? PPC::X13 : PPC::R13))
.Default(Register());
if (Reg)
return Reg;
report_fatal_error("Invalid register name global variable");
}
bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
if (Subtarget.is32BitELFABI())
return true;
if (Subtarget.isAIXABI())
return true;
CodeModel::Model CModel = getTargetMachine().getCodeModel();
if (CModel == CodeModel::Small || CModel == CodeModel::Large)
return true;
if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
return true;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
return Subtarget.isGVIndirectSymbol(G->getGlobal());
return false;
}
bool
PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
return false;
}
bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
const CallInst &I,
MachineFunction &MF,
unsigned Intrinsic) const {
switch (Intrinsic) {
case Intrinsic::ppc_atomicrmw_xchg_i128:
case Intrinsic::ppc_atomicrmw_add_i128:
case Intrinsic::ppc_atomicrmw_sub_i128:
case Intrinsic::ppc_atomicrmw_nand_i128:
case Intrinsic::ppc_atomicrmw_and_i128:
case Intrinsic::ppc_atomicrmw_or_i128:
case Intrinsic::ppc_atomicrmw_xor_i128:
case Intrinsic::ppc_cmpxchg_i128:
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::i128;
Info.ptrVal = I.getArgOperand(0);
Info.offset = 0;
Info.align = Align(16);
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
MachineMemOperand::MOVolatile;
return true;
case Intrinsic::ppc_atomic_load_i128:
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::i128;
Info.ptrVal = I.getArgOperand(0);
Info.offset = 0;
Info.align = Align(16);
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
return true;
case Intrinsic::ppc_atomic_store_i128:
Info.opc = ISD::INTRINSIC_VOID;
Info.memVT = MVT::i128;
Info.ptrVal = I.getArgOperand(2);
Info.offset = 0;
Info.align = Align(16);
Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
return true;
case Intrinsic::ppc_altivec_lvx:
case Intrinsic::ppc_altivec_lvxl:
case Intrinsic::ppc_altivec_lvebx:
case Intrinsic::ppc_altivec_lvehx:
case Intrinsic::ppc_altivec_lvewx:
case Intrinsic::ppc_vsx_lxvd2x:
case Intrinsic::ppc_vsx_lxvw4x:
case Intrinsic::ppc_vsx_lxvd2x_be:
case Intrinsic::ppc_vsx_lxvw4x_be:
case Intrinsic::ppc_vsx_lxvl:
case Intrinsic::ppc_vsx_lxvll: {
EVT VT;
switch (Intrinsic) {
case Intrinsic::ppc_altivec_lvebx:
VT = MVT::i8;
break;
case Intrinsic::ppc_altivec_lvehx:
VT = MVT::i16;
break;
case Intrinsic::ppc_altivec_lvewx:
VT = MVT::i32;
break;
case Intrinsic::ppc_vsx_lxvd2x:
case Intrinsic::ppc_vsx_lxvd2x_be:
VT = MVT::v2f64;
break;
default:
VT = MVT::v4i32;
break;
}
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = VT;
Info.ptrVal = I.getArgOperand(0);
Info.offset = -VT.getStoreSize()+1;
Info.size = 2*VT.getStoreSize()-1;
Info.align = Align(1);
Info.flags = MachineMemOperand::MOLoad;
return true;
}
case Intrinsic::ppc_altivec_stvx:
case Intrinsic::ppc_altivec_stvxl:
case Intrinsic::ppc_altivec_stvebx:
case Intrinsic::ppc_altivec_stvehx:
case Intrinsic::ppc_altivec_stvewx:
case Intrinsic::ppc_vsx_stxvd2x:
case Intrinsic::ppc_vsx_stxvw4x:
case Intrinsic::ppc_vsx_stxvd2x_be:
case Intrinsic::ppc_vsx_stxvw4x_be:
case Intrinsic::ppc_vsx_stxvl:
case Intrinsic::ppc_vsx_stxvll: {
EVT VT;
switch (Intrinsic) {
case Intrinsic::ppc_altivec_stvebx:
VT = MVT::i8;
break;
case Intrinsic::ppc_altivec_stvehx:
VT = MVT::i16;
break;
case Intrinsic::ppc_altivec_stvewx:
VT = MVT::i32;
break;
case Intrinsic::ppc_vsx_stxvd2x:
case Intrinsic::ppc_vsx_stxvd2x_be:
VT = MVT::v2f64;
break;
default:
VT = MVT::v4i32;
break;
}
Info.opc = ISD::INTRINSIC_VOID;
Info.memVT = VT;
Info.ptrVal = I.getArgOperand(1);
Info.offset = -VT.getStoreSize()+1;
Info.size = 2*VT.getStoreSize()-1;
Info.align = Align(1);
Info.flags = MachineMemOperand::MOStore;
return true;
}
default:
break;
}
return false;
}
EVT PPCTargetLowering::getOptimalMemOpType(
const MemOp &Op, const AttributeList &FuncAttributes) const {
if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
if (Subtarget.hasAltivec() && Op.size() >= 16 &&
(Op.isAligned(Align(16)) ||
((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
return MVT::v4i32;
}
if (Subtarget.isPPC64()) {
return MVT::i64;
}
return MVT::i32;
}
bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const {
assert(Ty->isIntegerTy());
unsigned BitSize = Ty->getPrimitiveSizeInBits();
return !(BitSize == 0 || BitSize > 64);
}
bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
return false;
unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
return NumBits1 == 64 && NumBits2 == 32;
}
bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
if (!VT1.isInteger() || !VT2.isInteger())
return false;
unsigned NumBits1 = VT1.getSizeInBits();
unsigned NumBits2 = VT2.getSizeInBits();
return NumBits1 == 64 && NumBits2 == 32;
}
bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
EVT MemVT = LD->getMemoryVT();
if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
(Subtarget.isPPC64() && MemVT == MVT::i32)) &&
(LD->getExtensionType() == ISD::NON_EXTLOAD ||
LD->getExtensionType() == ISD::ZEXTLOAD))
return true;
}
return TargetLowering::isZExtFree(Val, VT2);
}
bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
"invalid fpext types");
if (DestVT == MVT::f128)
return false;
return true;
}
bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
return isInt<16>(Imm) || isUInt<16>(Imm);
}
bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
return isInt<16>(Imm) || isUInt<16>(Imm);
}
bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align,
MachineMemOperand::Flags,
bool *Fast) const {
if (DisablePPCUnaligned)
return false;
if (!VT.isSimple())
return false;
if (VT.isFloatingPoint() && !VT.isVector() &&
!Subtarget.allowsUnalignedFPAccess())
return false;
if (VT.getSimpleVT().isVector()) {
if (Subtarget.hasVSX()) {
if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
VT != MVT::v4f32 && VT != MVT::v4i32)
return false;
} else {
return false;
}
}
if (VT == MVT::ppcf128)
return false;
if (Fast)
*Fast = true;
return true;
}
bool PPCTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
SDValue C) const {
if (!VT.isScalarInteger())
return false;
if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
if (!ConstNode->getAPIntValue().isSignedIntN(64))
return false;
int64_t Imm = ConstNode->getSExtValue();
unsigned Shift = countTrailingZeros<uint64_t>(Imm);
Imm >>= Shift;
if (isInt<16>(Imm))
return false;
uint64_t UImm = static_cast<uint64_t>(Imm);
if (isPowerOf2_64(UImm + 1) || isPowerOf2_64(UImm - 1) ||
isPowerOf2_64(1 - UImm) || isPowerOf2_64(-1 - UImm))
return true;
}
return false;
}
bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
EVT VT) const {
return isFMAFasterThanFMulAndFAdd(
MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext()));
}
bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
Type *Ty) const {
switch (Ty->getScalarType()->getTypeID()) {
case Type::FloatTyID:
case Type::DoubleTyID:
return true;
case Type::FP128TyID:
return Subtarget.hasP9Vector();
default:
return false;
}
}
bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const {
if (!I->hasOneUse())
return true;
Instruction *User = I->user_back();
assert(User && "A single use instruction with no uses.");
switch (I->getOpcode()) {
case Instruction::FMul: {
if (User->getOpcode() != Instruction::FSub &&
User->getOpcode() != Instruction::FAdd)
return true;
const TargetOptions &Options = getTargetMachine().Options;
const Function *F = I->getFunction();
const DataLayout &DL = F->getParent()->getDataLayout();
Type *Ty = User->getOperand(0)->getType();
return !(
isFMAFasterThanFMulAndFAdd(*F, Ty) &&
isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
(Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath));
}
case Instruction::Load: {
LoadInst *LI = cast<LoadInst>(I);
if (!LI->isUnordered())
return true;
if (User->getOpcode() != Instruction::Store)
return true;
if (I->getType()->getTypeID() != Type::FloatTyID)
return true;
return false;
}
default:
return true;
}
return true;
}
const MCPhysReg *
PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
static const MCPhysReg ScratchRegs[] = {
PPC::X12, PPC::LR8, PPC::CTR8, 0
};
return ScratchRegs;
}
Register PPCTargetLowering::getExceptionPointerRegister(
const Constant *PersonalityFn) const {
return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
}
Register PPCTargetLowering::getExceptionSelectorRegister(
const Constant *PersonalityFn) const {
return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
}
bool
PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
EVT VT , unsigned DefinedValues) const {
if (VT == MVT::v2i64)
return Subtarget.hasDirectMove();
if (Subtarget.hasVSX())
return true;
return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
}
Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
if (DisableILPPref || Subtarget.enableMachineScheduler())
return TargetLowering::getSchedulingPreference(N);
return Sched::ILP;
}
FastISel *
PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
const TargetLibraryInfo *LibInfo) const {
return PPC::createFastISel(FuncInfo, LibInfo);
}
static unsigned invertFMAOpcode(unsigned Opc) {
switch (Opc) {
default:
llvm_unreachable("Invalid FMA opcode for PowerPC!");
case ISD::FMA:
return PPCISD::FNMSUB;
case PPCISD::FNMSUB:
return ISD::FMA;
}
}
SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
bool LegalOps, bool OptForSize,
NegatibleCost &Cost,
unsigned Depth) const {
if (Depth > SelectionDAG::MaxRecursionDepth)
return SDValue();
unsigned Opc = Op.getOpcode();
EVT VT = Op.getValueType();
SDNodeFlags Flags = Op.getNode()->getFlags();
switch (Opc) {
case PPCISD::FNMSUB:
if (!Op.hasOneUse() || !isTypeLegal(VT))
break;
const TargetOptions &Options = getTargetMachine().Options;
SDValue N0 = Op.getOperand(0);
SDValue N1 = Op.getOperand(1);
SDValue N2 = Op.getOperand(2);
SDLoc Loc(Op);
NegatibleCost N2Cost = NegatibleCost::Expensive;
SDValue NegN2 =
getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1);
if (!NegN2)
return SDValue();
if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) {
NegatibleCost N0Cost = NegatibleCost::Expensive;
SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize,
N0Cost, Depth + 1);
NegatibleCost N1Cost = NegatibleCost::Expensive;
SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize,
N1Cost, Depth + 1);
if (NegN0 && N0Cost <= N1Cost) {
Cost = std::min(N0Cost, N2Cost);
return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags);
} else if (NegN1) {
Cost = std::min(N1Cost, N2Cost);
return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags);
}
}
if (isOperationLegal(ISD::FMA, VT)) {
Cost = N2Cost;
return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags);
}
break;
}
return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize,
Cost, Depth);
}
bool PPCTargetLowering::useLoadStackGuardNode() const {
if (!Subtarget.isTargetLinux())
return TargetLowering::useLoadStackGuardNode();
return true;
}
void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
if (Subtarget.isAIXABI()) {
M.getOrInsertGlobal(AIXSSPCanaryWordName,
Type::getInt8PtrTy(M.getContext()));
return;
}
if (!Subtarget.isTargetLinux())
return TargetLowering::insertSSPDeclarations(M);
}
Value *PPCTargetLowering::getSDagStackGuard(const Module &M) const {
if (Subtarget.isAIXABI())
return M.getGlobalVariable(AIXSSPCanaryWordName);
return TargetLowering::getSDagStackGuard(M);
}
bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
bool ForCodeSize) const {
if (!VT.isSimple() || !Subtarget.hasVSX())
return false;
switch(VT.getSimpleVT().SimpleTy) {
default:
return false;
case MVT::f32:
case MVT::f64:
if (Subtarget.hasPrefixInstrs()) {
return true;
}
LLVM_FALLTHROUGH;
case MVT::ppcf128:
return Imm.isPosZero();
}
}
static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
SelectionDAG &DAG) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
unsigned OpSizeInBits = VT.getScalarSizeInBits();
unsigned Opcode = N->getOpcode();
unsigned TargetOpcode;
switch (Opcode) {
default:
llvm_unreachable("Unexpected shift operation");
case ISD::SHL:
TargetOpcode = PPCISD::SHL;
break;
case ISD::SRL:
TargetOpcode = PPCISD::SRL;
break;
case ISD::SRA:
TargetOpcode = PPCISD::SRA;
break;
}
if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
N1->getOpcode() == ISD::AND)
if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
if (Mask->getZExtValue() == OpSizeInBits - 1)
return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
return SDValue();
}
SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
return Value;
SDValue N0 = N->getOperand(0);
ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (!Subtarget.isISA3_0() || !Subtarget.isPPC64() ||
N0.getOpcode() != ISD::SIGN_EXTEND ||
N0.getOperand(0).getValueType() != MVT::i32 || CN1 == nullptr ||
N->getValueType(0) != MVT::i64)
return SDValue();
SDValue ExtsSrc = N0.getOperand(0);
if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
return SDValue();
SDLoc DL(N0);
SDValue ShiftBy = SDValue(CN1, 0);
if (ShiftBy.getValueType() == MVT::i64)
ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
ShiftBy);
}
SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
return Value;
return SDValue();
}
SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
return Value;
return SDValue();
}
static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
const PPCSubtarget &Subtarget) {
if (!Subtarget.isPPC64())
return SDValue();
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
auto isZextOfCompareWithConstant = [](SDValue Op) {
if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
Op.getValueType() != MVT::i64)
return false;
SDValue Cmp = Op.getOperand(0);
if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
Cmp.getOperand(0).getValueType() != MVT::i64)
return false;
if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
int64_t NegConstant = 0 - Constant->getSExtValue();
return isInt<16>(NegConstant);
}
return false;
};
bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
if (LHSHasPattern && !RHSHasPattern)
std::swap(LHS, RHS);
else if (!LHSHasPattern && !RHSHasPattern)
return SDValue();
SDLoc DL(N);
SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
SDValue Cmp = RHS.getOperand(0);
SDValue Z = Cmp.getOperand(0);
auto *Constant = cast<ConstantSDNode>(Cmp.getOperand(1));
int64_t NegConstant = 0 - Constant->getSExtValue();
switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
default: break;
case ISD::SETNE: {
SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
DAG.getConstant(NegConstant, DL, MVT::i64));
SDValue AddOrZ = NegConstant != 0 ? Add : Z;
SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
SDValue(Addc.getNode(), 1));
}
case ISD::SETEQ: {
SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
DAG.getConstant(NegConstant, DL, MVT::i64));
SDValue AddOrZ = NegConstant != 0 ? Add : Z;
SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
DAG.getConstant(0, DL, MVT::i64), AddOrZ);
return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
SDValue(Subc.getNode(), 1));
}
}
return SDValue();
}
static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG,
const PPCSubtarget &Subtarget) {
if (!Subtarget.isUsingPCRelativeCalls())
return SDValue();
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
std::swap(LHS, RHS);
if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
return SDValue();
GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0));
ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS);
if (!GSDN || !ConstNode)
return SDValue();
int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue();
SDLoc DL(GSDN);
if (!isInt<34>(NewOffset))
return SDValue();
SDValue GA =
DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0),
NewOffset, GSDN->getTargetFlags());
SDValue MatPCRel =
DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA);
return MatPCRel;
}
SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
return Value;
if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget))
return Value;
return SDValue();
}
SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
DAGCombinerInfo &DCI) const {
if (Subtarget.useCRBits()) {
if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
return CRTruncValue;
}
SDLoc dl(N);
SDValue Op0 = N->getOperand(0);
if (Subtarget.hasP9Altivec() && Op0.getOpcode() == ISD::ABS) {
EVT VT = N->getValueType(0);
if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
return SDValue();
SDValue Sub = Op0.getOperand(0);
if (Sub.getOpcode() == ISD::SUB) {
SDValue SubOp0 = Sub.getOperand(0);
SDValue SubOp1 = Sub.getOperand(1);
if ((SubOp0.getOpcode() == ISD::ZERO_EXTEND) &&
(SubOp1.getOpcode() == ISD::ZERO_EXTEND)) {
return DCI.DAG.getNode(PPCISD::VABSD, dl, VT, SubOp0.getOperand(0),
SubOp1.getOperand(0),
DCI.DAG.getTargetConstant(0, dl, MVT::i32));
}
}
}
if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
return SDValue();
int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
if (Op0.getOpcode() == ISD::SRL) {
ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
if (!ConstNode || ConstNode->getZExtValue() != 64)
return SDValue();
EltToExtract = EltToExtract ? 0 : 1;
Op0 = Op0.getOperand(0);
}
if (Op0.getOpcode() == ISD::BITCAST &&
Op0.getValueType() == MVT::i128 &&
Op0.getOperand(0).getValueType() == MVT::f128) {
SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
return DCI.DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
}
return SDValue();
}
SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
if (!ConstOpOrElement)
return SDValue();
if (DAG.getMachineFunction().getFunction().hasMinSize() &&
isOperationLegal(ISD::MUL, N->getValueType(0)))
return SDValue();
auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
switch (this->Subtarget.getCPUDirective()) {
default:
return false;
case PPC::DIR_PWR8:
return true;
case PPC::DIR_PWR9:
case PPC::DIR_PWR10:
case PPC::DIR_PWR_FUTURE:
return IsAddOne && IsNeg ? VT.isVector() : true;
}
};
EVT VT = N->getValueType(0);
SDLoc DL(N);
const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
bool IsNeg = MulAmt.isNegative();
APInt MulAmtAbs = MulAmt.abs();
if ((MulAmtAbs - 1).isPowerOf2()) {
if (!IsProfitable(IsNeg, true, VT))
return SDValue();
SDValue Op0 = N->getOperand(0);
SDValue Op1 =
DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
if (!IsNeg)
return Res;
return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
} else if ((MulAmtAbs + 1).isPowerOf2()) {
if (!IsProfitable(IsNeg, false, VT))
return SDValue();
SDValue Op0 = N->getOperand(0);
SDValue Op1 =
DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
if (!IsNeg)
return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
else
return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
} else {
return SDValue();
}
}
SDValue PPCTargetLowering::combineFMALike(SDNode *N,
DAGCombinerInfo &DCI) const {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue N2 = N->getOperand(2);
SDNodeFlags Flags = N->getFlags();
EVT VT = N->getValueType(0);
SelectionDAG &DAG = DCI.DAG;
const TargetOptions &Options = getTargetMachine().Options;
unsigned Opc = N->getOpcode();
bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
bool LegalOps = !DCI.isBeforeLegalizeOps();
SDLoc Loc(N);
if (!isOperationLegal(ISD::FMA, VT))
return SDValue();
if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath)
return SDValue();
if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize))
return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags);
if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize))
return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags);
return SDValue();
}
bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
if (!Subtarget.is64BitELFABI())
return false;
if (!CI->isTailCall())
return false;
auto &TM = getTargetMachine();
if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
return false;
const Function *Callee = CI->getCalledFunction();
if (!Callee || Callee->isVarArg())
return false;
const Function *Caller = CI->getParent()->getParent();
if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
CI->getCallingConv()))
return false;
return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
}
bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
if (!Subtarget.hasVSX())
return false;
if (Subtarget.hasP9Vector() && VT == MVT::f128)
return true;
return VT == MVT::f32 || VT == MVT::f64 ||
VT == MVT::v4f32 || VT == MVT::v2f64;
}
bool PPCTargetLowering::
isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
const Value *Mask = AndI.getOperand(1);
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
if (CI->getBitWidth() > 64)
return false;
int64_t ConstVal = CI->getZExtValue();
return isUInt<16>(ConstVal) ||
(isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
}
return true;
}
SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
assert((N->getOpcode() == ISD::ABS) && "Need ABS node here");
assert(Subtarget.hasP9Altivec() &&
"Only combine this when P9 altivec supported!");
EVT VT = N->getValueType(0);
if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
return SDValue();
SelectionDAG &DAG = DCI.DAG;
SDLoc dl(N);
if (N->getOperand(0).getOpcode() == ISD::SUB) {
unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
if ((SubOpcd0 == ISD::ZERO_EXTEND ||
SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
(SubOpcd1 == ISD::ZERO_EXTEND ||
SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
N->getOperand(0)->getOperand(0),
N->getOperand(0)->getOperand(1),
DAG.getTargetConstant(0, dl, MVT::i32));
}
if (N->getOperand(0).getValueType() == MVT::v4i32 &&
N->getOperand(0).hasOneUse()) {
return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
N->getOperand(0)->getOperand(0),
N->getOperand(0)->getOperand(1),
DAG.getTargetConstant(1, dl, MVT::i32));
}
}
return SDValue();
}
SDValue PPCTargetLowering::combineVSelect(SDNode *N,
DAGCombinerInfo &DCI) const {
assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here");
assert(Subtarget.hasP9Altivec() &&
"Only combine this when P9 altivec supported!");
SelectionDAG &DAG = DCI.DAG;
SDLoc dl(N);
SDValue Cond = N->getOperand(0);
SDValue TrueOpnd = N->getOperand(1);
SDValue FalseOpnd = N->getOperand(2);
EVT VT = N->getOperand(1).getValueType();
if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
FalseOpnd.getOpcode() != ISD::SUB)
return SDValue();
if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
return SDValue();
if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
return SDValue();
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
switch (CC) {
default:
return SDValue();
case ISD::SETUGT:
case ISD::SETUGE:
break;
case ISD::SETULT:
case ISD::SETULE:
std::swap(TrueOpnd, FalseOpnd);
break;
}
SDValue CmpOpnd1 = Cond.getOperand(0);
SDValue CmpOpnd2 = Cond.getOperand(1);
if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
TrueOpnd.getOperand(1) == CmpOpnd2 &&
FalseOpnd.getOperand(0) == CmpOpnd2 &&
FalseOpnd.getOperand(1) == CmpOpnd1) {
return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
CmpOpnd1, CmpOpnd2,
DAG.getTargetConstant(0, dl, MVT::i32));
}
return SDValue();
}
PPC::AddrMode PPCTargetLowering::getAddrModeForFlags(unsigned Flags) const {
if (Flags == PPC::MOF_None)
return PPC::AM_None;
for (auto FlagSet : AddrModesMap.at(PPC::AM_DForm))
if ((Flags & FlagSet) == FlagSet)
return PPC::AM_DForm;
for (auto FlagSet : AddrModesMap.at(PPC::AM_DSForm))
if ((Flags & FlagSet) == FlagSet)
return PPC::AM_DSForm;
for (auto FlagSet : AddrModesMap.at(PPC::AM_DQForm))
if ((Flags & FlagSet) == FlagSet)
return PPC::AM_DQForm;
for (auto FlagSet : AddrModesMap.at(PPC::AM_PrefixDForm))
if ((Flags & FlagSet) == FlagSet)
return PPC::AM_PrefixDForm;
return PPC::AM_XForm;
}
static void setAlignFlagsForFI(SDValue N, unsigned &FlagSet,
SelectionDAG &DAG) {
bool IsAdd = ((N.getOpcode() == ISD::ADD) || (N.getOpcode() == ISD::OR));
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(IsAdd ? N.getOperand(0) : N);
if (!FI)
return;
const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
unsigned FrameIndexAlign = MFI.getObjectAlign(FI->getIndex()).value();
if ((FrameIndexAlign % 4) != 0)
FlagSet &= ~PPC::MOF_RPlusSImm16Mult4;
if ((FrameIndexAlign % 16) != 0)
FlagSet &= ~PPC::MOF_RPlusSImm16Mult16;
if (!IsAdd) {
if ((FrameIndexAlign % 4) == 0)
FlagSet |= PPC::MOF_RPlusSImm16Mult4;
if ((FrameIndexAlign % 16) == 0)
FlagSet |= PPC::MOF_RPlusSImm16Mult16;
}
}
static void computeFlagsForAddressComputation(SDValue N, unsigned &FlagSet,
SelectionDAG &DAG) {
auto SetAlignFlagsForImm = [&](uint64_t Imm) {
if ((Imm & 0x3) == 0)
FlagSet |= PPC::MOF_RPlusSImm16Mult4;
if ((Imm & 0xf) == 0)
FlagSet |= PPC::MOF_RPlusSImm16Mult16;
};
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
const APInt &ConstImm = CN->getAPIntValue();
if (ConstImm.isSignedIntN(32)) { FlagSet |= PPC::MOF_AddrIsSImm32;
SetAlignFlagsForImm(ConstImm.getZExtValue());
setAlignFlagsForFI(N, FlagSet, DAG);
}
if (ConstImm.isSignedIntN(34)) FlagSet |= PPC::MOF_RPlusSImm34;
else FlagSet |= PPC::MOF_NotAddNorCst;
} else if (N.getOpcode() == ISD::ADD || provablyDisjointOr(DAG, N)) {
SDValue RHS = N.getOperand(1);
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(RHS)) {
const APInt &ConstImm = CN->getAPIntValue();
if (ConstImm.isSignedIntN(16)) {
FlagSet |= PPC::MOF_RPlusSImm16; SetAlignFlagsForImm(ConstImm.getZExtValue());
setAlignFlagsForFI(N, FlagSet, DAG);
}
if (ConstImm.isSignedIntN(34))
FlagSet |= PPC::MOF_RPlusSImm34; else
FlagSet |= PPC::MOF_RPlusR; } else if (RHS.getOpcode() == PPCISD::Lo &&
!cast<ConstantSDNode>(RHS.getOperand(1))->getZExtValue())
FlagSet |= PPC::MOF_RPlusLo; else
FlagSet |= PPC::MOF_RPlusR;
} else { setAlignFlagsForFI(N, FlagSet, DAG);
FlagSet |= PPC::MOF_NotAddNorCst;
}
}
static bool isPCRelNode(SDValue N) {
return (N.getOpcode() == PPCISD::MAT_PCREL_ADDR ||
isValidPCRelNode<ConstantPoolSDNode>(N) ||
isValidPCRelNode<GlobalAddressSDNode>(N) ||
isValidPCRelNode<JumpTableSDNode>(N) ||
isValidPCRelNode<BlockAddressSDNode>(N));
}
unsigned PPCTargetLowering::computeMOFlags(const SDNode *Parent, SDValue N,
SelectionDAG &DAG) const {
unsigned FlagSet = PPC::MOF_None;
if (!Subtarget.hasP9Vector())
FlagSet |= PPC::MOF_SubtargetBeforeP9;
else {
FlagSet |= PPC::MOF_SubtargetP9;
if (Subtarget.hasPrefixInstrs())
FlagSet |= PPC::MOF_SubtargetP10;
}
if (Subtarget.hasSPE())
FlagSet |= PPC::MOF_SubtargetSPE;
if ((FlagSet & PPC::MOF_SubtargetP10) && isPCRelNode(N))
return FlagSet;
unsigned ParentOp = Parent->getOpcode();
if (Subtarget.isISA3_1() && ((ParentOp == ISD::INTRINSIC_W_CHAIN) ||
(ParentOp == ISD::INTRINSIC_VOID))) {
unsigned ID = cast<ConstantSDNode>(Parent->getOperand(1))->getZExtValue();
if ((ID == Intrinsic::ppc_vsx_lxvp) || (ID == Intrinsic::ppc_vsx_stxvp)) {
SDValue IntrinOp = (ID == Intrinsic::ppc_vsx_lxvp)
? Parent->getOperand(2)
: Parent->getOperand(3);
computeFlagsForAddressComputation(IntrinOp, FlagSet, DAG);
FlagSet |= PPC::MOF_Vector;
return FlagSet;
}
}
if (const LSBaseSDNode *LSB = dyn_cast<LSBaseSDNode>(Parent))
if (LSB->isIndexed())
return PPC::MOF_None;
const MemSDNode *MN = dyn_cast<MemSDNode>(Parent);
assert(MN && "Parent should be a MemSDNode!");
EVT MemVT = MN->getMemoryVT();
unsigned Size = MemVT.getSizeInBits();
if (MemVT.isScalarInteger()) {
assert(Size <= 128 &&
"Not expecting scalar integers larger than 16 bytes!");
if (Size < 32)
FlagSet |= PPC::MOF_SubWordInt;
else if (Size == 32)
FlagSet |= PPC::MOF_WordInt;
else
FlagSet |= PPC::MOF_DoubleWordInt;
} else if (MemVT.isVector() && !MemVT.isFloatingPoint()) { if (Size == 128)
FlagSet |= PPC::MOF_Vector;
else if (Size == 256) {
assert(Subtarget.pairedVectorMemops() &&
"256-bit vectors are only available when paired vector memops is "
"enabled!");
FlagSet |= PPC::MOF_Vector;
} else
llvm_unreachable("Not expecting illegal vectors!");
} else { if (Size == 32 || Size == 64)
FlagSet |= PPC::MOF_ScalarFloat;
else if (MemVT == MVT::f128 || MemVT.isVector())
FlagSet |= PPC::MOF_Vector;
else
llvm_unreachable("Not expecting illegal scalar floats!");
}
computeFlagsForAddressComputation(N, FlagSet, DAG);
if (const LoadSDNode *LN = dyn_cast<LoadSDNode>(Parent)) {
switch (LN->getExtensionType()) {
case ISD::SEXTLOAD:
FlagSet |= PPC::MOF_SExt;
break;
case ISD::EXTLOAD:
case ISD::ZEXTLOAD:
FlagSet |= PPC::MOF_ZExt;
break;
case ISD::NON_EXTLOAD:
FlagSet |= PPC::MOF_NoExt;
break;
}
} else
FlagSet |= PPC::MOF_NoExt;
if (MemVT.isScalarInteger() && (FlagSet & PPC::MOF_NoExt)) {
FlagSet |= PPC::MOF_ZExt;
FlagSet &= ~PPC::MOF_NoExt;
}
bool IsNonP1034BitConst =
((PPC::MOF_RPlusSImm34 | PPC::MOF_AddrIsSImm32 | PPC::MOF_SubtargetP10) &
FlagSet) == PPC::MOF_RPlusSImm34;
if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::OR &&
IsNonP1034BitConst)
FlagSet |= PPC::MOF_NotAddNorCst;
return FlagSet;
}
PPC::AddrMode PPCTargetLowering::SelectForceXFormMode(SDValue N, SDValue &Disp,
SDValue &Base,
SelectionDAG &DAG) const {
PPC::AddrMode Mode = PPC::AM_XForm;
int16_t ForceXFormImm = 0;
if (provablyDisjointOr(DAG, N) &&
!isIntS16Immediate(N.getOperand(1), ForceXFormImm)) {
Disp = N.getOperand(0);
Base = N.getOperand(1);
return Mode;
}
if (N.getOpcode() == ISD::ADD &&
(!isIntS16Immediate(N.getOperand(1), ForceXFormImm) ||
!N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
Disp = N.getOperand(0);
Base = N.getOperand(1);
return Mode;
}
Disp = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
N.getValueType());
Base = N;
return Mode;
}
bool PPCTargetLowering::splitValueIntoRegisterParts(
SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
EVT ValVT = Val.getValueType();
if (PartVT == MVT::f64 &&
(ValVT == MVT::i32 || ValVT == MVT::i16 || ValVT == MVT::i8)) {
Val = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Val);
Val = DAG.getNode(ISD::BITCAST, DL, MVT::f64, Val);
Parts[0] = Val;
return true;
}
return false;
}
SDValue PPCTargetLowering::lowerToLibCall(const char *LibCallName, SDValue Op,
SelectionDAG &DAG) const {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
TargetLowering::CallLoweringInfo CLI(DAG);
EVT RetVT = Op.getValueType();
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
SDValue Callee =
DAG.getExternalSymbol(LibCallName, TLI.getPointerTy(DAG.getDataLayout()));
bool SignExtend = TLI.shouldSignExtendTypeInLibCall(RetVT, false);
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
for (const SDValue &N : Op->op_values()) {
EVT ArgVT = N.getValueType();
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Node = N;
Entry.Ty = ArgTy;
Entry.IsSExt = TLI.shouldSignExtendTypeInLibCall(ArgVT, SignExtend);
Entry.IsZExt = !Entry.IsSExt;
Args.push_back(Entry);
}
SDValue InChain = DAG.getEntryNode();
SDValue TCChain = InChain;
const Function &F = DAG.getMachineFunction().getFunction();
bool isTailCall =
TLI.isInTailCallPosition(DAG, Op.getNode(), TCChain) &&
(RetTy == F.getReturnType() || F.getReturnType()->isVoidTy());
if (isTailCall)
InChain = TCChain;
CLI.setDebugLoc(SDLoc(Op))
.setChain(InChain)
.setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args))
.setTailCall(isTailCall)
.setSExtResult(SignExtend)
.setZExtResult(!SignExtend)
.setIsPostTypeLegalization(true);
return TLI.LowerCallTo(CLI).first;
}
SDValue PPCTargetLowering::lowerLibCallBasedOnType(
const char *LibCallFloatName, const char *LibCallDoubleName, SDValue Op,
SelectionDAG &DAG) const {
if (Op.getValueType() == MVT::f32)
return lowerToLibCall(LibCallFloatName, Op, DAG);
if (Op.getValueType() == MVT::f64)
return lowerToLibCall(LibCallDoubleName, Op, DAG);
return SDValue();
}
bool PPCTargetLowering::isLowringToMASSFiniteSafe(SDValue Op) const {
SDNodeFlags Flags = Op.getNode()->getFlags();
return isLowringToMASSSafe(Op) && Flags.hasNoSignedZeros() &&
Flags.hasNoNaNs() && Flags.hasNoInfs();
}
bool PPCTargetLowering::isLowringToMASSSafe(SDValue Op) const {
return Op.getNode()->getFlags().hasApproximateFuncs();
}
bool PPCTargetLowering::isScalarMASSConversionEnabled() const {
return getTargetMachine().Options.PPCGenScalarMASSEntries;
}
SDValue PPCTargetLowering::lowerLibCallBase(const char *LibCallDoubleName,
const char *LibCallFloatName,
const char *LibCallDoubleNameFinite,
const char *LibCallFloatNameFinite,
SDValue Op,
SelectionDAG &DAG) const {
if (!isScalarMASSConversionEnabled() || !isLowringToMASSSafe(Op))
return SDValue();
if (!isLowringToMASSFiniteSafe(Op))
return lowerLibCallBasedOnType(LibCallFloatName, LibCallDoubleName, Op,
DAG);
return lowerLibCallBasedOnType(LibCallFloatNameFinite,
LibCallDoubleNameFinite, Op, DAG);
}
SDValue PPCTargetLowering::lowerPow(SDValue Op, SelectionDAG &DAG) const {
return lowerLibCallBase("__xl_pow", "__xl_powf", "__xl_pow_finite",
"__xl_powf_finite", Op, DAG);
}
SDValue PPCTargetLowering::lowerSin(SDValue Op, SelectionDAG &DAG) const {
return lowerLibCallBase("__xl_sin", "__xl_sinf", "__xl_sin_finite",
"__xl_sinf_finite", Op, DAG);
}
SDValue PPCTargetLowering::lowerCos(SDValue Op, SelectionDAG &DAG) const {
return lowerLibCallBase("__xl_cos", "__xl_cosf", "__xl_cos_finite",
"__xl_cosf_finite", Op, DAG);
}
SDValue PPCTargetLowering::lowerLog(SDValue Op, SelectionDAG &DAG) const {
return lowerLibCallBase("__xl_log", "__xl_logf", "__xl_log_finite",
"__xl_logf_finite", Op, DAG);
}
SDValue PPCTargetLowering::lowerLog10(SDValue Op, SelectionDAG &DAG) const {
return lowerLibCallBase("__xl_log10", "__xl_log10f", "__xl_log10_finite",
"__xl_log10f_finite", Op, DAG);
}
SDValue PPCTargetLowering::lowerExp(SDValue Op, SelectionDAG &DAG) const {
return lowerLibCallBase("__xl_exp", "__xl_expf", "__xl_exp_finite",
"__xl_expf_finite", Op, DAG);
}
static void setXFormForUnalignedFI(SDValue N, unsigned Flags,
PPC::AddrMode &Mode) {
if (!isa<FrameIndexSDNode>(N))
return;
if ((Mode == PPC::AM_DSForm && !(Flags & PPC::MOF_RPlusSImm16Mult4)) ||
(Mode == PPC::AM_DQForm && !(Flags & PPC::MOF_RPlusSImm16Mult16)))
Mode = PPC::AM_XForm;
}
PPC::AddrMode PPCTargetLowering::SelectOptimalAddrMode(const SDNode *Parent,
SDValue N, SDValue &Disp,
SDValue &Base,
SelectionDAG &DAG,
MaybeAlign Align) const {
SDLoc DL(Parent);
unsigned Flags = computeMOFlags(Parent, N, DAG);
PPC::AddrMode Mode = getAddrModeForFlags(Flags);
setXFormForUnalignedFI(N, Flags, Mode);
if ((Mode == PPC::AM_XForm) && isPCRelNode(N)) {
assert(Subtarget.isUsingPCRelativeCalls() &&
"Must be using PC-Relative calls when a valid PC-Relative node is "
"present!");
Mode = PPC::AM_PCRel;
}
switch (Mode) {
case PPC::AM_DForm:
case PPC::AM_DSForm:
case PPC::AM_DQForm: {
if (Flags & PPC::MOF_RPlusSImm16) {
SDValue Op0 = N.getOperand(0);
SDValue Op1 = N.getOperand(1);
int16_t Imm = cast<ConstantSDNode>(Op1)->getAPIntValue().getZExtValue();
if (!Align || isAligned(*Align, Imm)) {
Disp = DAG.getTargetConstant(Imm, DL, N.getValueType());
Base = Op0;
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op0)) {
Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
}
break;
}
}
else if (Flags & PPC::MOF_RPlusLo) {
Disp = N.getOperand(1).getOperand(0); assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
Disp.getOpcode() == ISD::TargetConstantPool ||
Disp.getOpcode() == ISD::TargetJumpTable);
Base = N.getOperand(0);
break;
}
else if (Flags & PPC::MOF_AddrIsSImm32) {
auto *CN = cast<ConstantSDNode>(N);
EVT CNType = CN->getValueType(0);
uint64_t CNImm = CN->getZExtValue();
int16_t Imm;
if (isIntS16Immediate(CN, Imm) && (!Align || isAligned(*Align, Imm))) {
Disp = DAG.getTargetConstant(Imm, DL, CNType);
Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
CNType);
break;
}
if ((CNType == MVT::i32 || isInt<32>(CNImm)) &&
(!Align || isAligned(*Align, CNImm))) {
int32_t Addr = (int32_t)CNImm;
Disp = DAG.getTargetConstant((int16_t)Addr, DL, MVT::i32);
Base =
DAG.getTargetConstant((Addr - (int16_t)Addr) >> 16, DL, MVT::i32);
uint32_t LIS = CNType == MVT::i32 ? PPC::LIS : PPC::LIS8;
Base = SDValue(DAG.getMachineNode(LIS, DL, CNType, Base), 0);
break;
}
}
Disp = DAG.getTargetConstant(0, DL, getPointerTy(DAG.getDataLayout()));
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
} else
Base = N;
break;
}
case PPC::AM_PrefixDForm: {
int64_t Imm34 = 0;
unsigned Opcode = N.getOpcode();
if (((Opcode == ISD::ADD) || (Opcode == ISD::OR)) &&
(isIntS34Immediate(N.getOperand(1), Imm34))) {
Disp = DAG.getTargetConstant(Imm34, DL, N.getValueType());
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
else
Base = N.getOperand(0);
} else if (isIntS34Immediate(N, Imm34)) {
Disp = DAG.getTargetConstant(Imm34, DL, N.getValueType());
Base = DAG.getRegister(PPC::ZERO8, N.getValueType());
}
break;
}
case PPC::AM_PCRel: {
Disp = N;
break;
}
case PPC::AM_None:
break;
default: { FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N);
Base = FI ? N : N.getOperand(1);
Disp = FI ? DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
N.getValueType())
: N.getOperand(0);
break;
}
}
return Mode;
}
CCAssignFn *PPCTargetLowering::ccAssignFnForCall(CallingConv::ID CC,
bool Return,
bool IsVarArg) const {
switch (CC) {
case CallingConv::Cold:
return (Return ? RetCC_PPC_Cold : CC_PPC64_ELF_FIS);
default:
return CC_PPC64_ELF_FIS;
}
}
bool PPCTargetLowering::shouldInlineQuadwordAtomics() const {
return Subtarget.isPPC64() &&
(EnableQuadwordAtomics || !Subtarget.getTargetTriple().isOSAIX()) &&
Subtarget.hasQuadwordAtomics();
}
TargetLowering::AtomicExpansionKind
PPCTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
unsigned Size = AI->getType()->getPrimitiveSizeInBits();
if (shouldInlineQuadwordAtomics() && Size == 128)
return AtomicExpansionKind::MaskedIntrinsic;
return TargetLowering::shouldExpandAtomicRMWInIR(AI);
}
TargetLowering::AtomicExpansionKind
PPCTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
unsigned Size = AI->getNewValOperand()->getType()->getPrimitiveSizeInBits();
if (shouldInlineQuadwordAtomics() && Size == 128)
return AtomicExpansionKind::MaskedIntrinsic;
return TargetLowering::shouldExpandAtomicCmpXchgInIR(AI);
}
static Intrinsic::ID
getIntrinsicForAtomicRMWBinOp128(AtomicRMWInst::BinOp BinOp) {
switch (BinOp) {
default:
llvm_unreachable("Unexpected AtomicRMW BinOp");
case AtomicRMWInst::Xchg:
return Intrinsic::ppc_atomicrmw_xchg_i128;
case AtomicRMWInst::Add:
return Intrinsic::ppc_atomicrmw_add_i128;
case AtomicRMWInst::Sub:
return Intrinsic::ppc_atomicrmw_sub_i128;
case AtomicRMWInst::And:
return Intrinsic::ppc_atomicrmw_and_i128;
case AtomicRMWInst::Or:
return Intrinsic::ppc_atomicrmw_or_i128;
case AtomicRMWInst::Xor:
return Intrinsic::ppc_atomicrmw_xor_i128;
case AtomicRMWInst::Nand:
return Intrinsic::ppc_atomicrmw_nand_i128;
}
}
Value *PPCTargetLowering::emitMaskedAtomicRMWIntrinsic(
IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
assert(shouldInlineQuadwordAtomics() && "Only support quadword now");
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
Type *ValTy = Incr->getType();
assert(ValTy->getPrimitiveSizeInBits() == 128);
Function *RMW = Intrinsic::getDeclaration(
M, getIntrinsicForAtomicRMWBinOp128(AI->getOperation()));
Type *Int64Ty = Type::getInt64Ty(M->getContext());
Value *IncrLo = Builder.CreateTrunc(Incr, Int64Ty, "incr_lo");
Value *IncrHi =
Builder.CreateTrunc(Builder.CreateLShr(Incr, 64), Int64Ty, "incr_hi");
Value *Addr =
Builder.CreateBitCast(AlignedAddr, Type::getInt8PtrTy(M->getContext()));
Value *LoHi = Builder.CreateCall(RMW, {Addr, IncrLo, IncrHi});
Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
return Builder.CreateOr(
Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 64)), "val64");
}
Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
assert(shouldInlineQuadwordAtomics() && "Only support quadword now");
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
Type *ValTy = CmpVal->getType();
assert(ValTy->getPrimitiveSizeInBits() == 128);
Function *IntCmpXchg =
Intrinsic::getDeclaration(M, Intrinsic::ppc_cmpxchg_i128);
Type *Int64Ty = Type::getInt64Ty(M->getContext());
Value *CmpLo = Builder.CreateTrunc(CmpVal, Int64Ty, "cmp_lo");
Value *CmpHi =
Builder.CreateTrunc(Builder.CreateLShr(CmpVal, 64), Int64Ty, "cmp_hi");
Value *NewLo = Builder.CreateTrunc(NewVal, Int64Ty, "new_lo");
Value *NewHi =
Builder.CreateTrunc(Builder.CreateLShr(NewVal, 64), Int64Ty, "new_hi");
Value *Addr =
Builder.CreateBitCast(AlignedAddr, Type::getInt8PtrTy(M->getContext()));
emitLeadingFence(Builder, CI, Ord);
Value *LoHi =
Builder.CreateCall(IntCmpXchg, {Addr, CmpLo, CmpHi, NewLo, NewHi});
emitTrailingFence(Builder, CI, Ord);
Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
return Builder.CreateOr(
Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 64)), "val64");
}