Mercurial > hg > CbC > CbC_llvm
diff lib/Target/ARM/ARMISelLowering.h @ 148:63bd29f05246
merged
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Wed, 14 Aug 2019 19:46:37 +0900 |
parents | c2174574ed3a |
children |
line wrap: on
line diff
--- a/lib/Target/ARM/ARMISelLowering.h Sun Dec 23 19:23:36 2018 +0900 +++ b/lib/Target/ARM/ARMISelLowering.h Wed Aug 14 19:46:37 2019 +0900 @@ -1,9 +1,8 @@ //===- ARMISelLowering.h - ARM DAG Lowering Interface -----------*- C++ -*-===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // @@ -21,7 +20,6 @@ #include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/ISDOpcodes.h" #include "llvm/CodeGen/MachineFunction.h" -#include "llvm/CodeGen/MachineValueType.h" #include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/TargetLowering.h" #include "llvm/CodeGen/ValueTypes.h" @@ -31,6 +29,7 @@ #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InlineAsm.h" #include "llvm/Support/CodeGen.h" +#include "llvm/Support/MachineValueType.h" #include <utility> namespace llvm { @@ -77,6 +76,10 @@ PIC_ADD, // Add with a PC operand and a PIC label. + ASRL, // MVE long arithmetic shift right. + LSRL, // MVE long shift right. + LSLL, // MVE long shift left. + CMP, // ARM compare instructions. CMN, // ARM CMN instructions. CMPZ, // ARM compare that sets only Z flag. @@ -85,6 +88,7 @@ FMSTAT, // ARM fmstat instruction. CMOV, // ARM conditional move instructions. + SUBS, // Flag-setting subtraction. SSAT, // Signed saturation USAT, // Unsigned saturation @@ -99,9 +103,11 @@ ADDE, // Add using carry SUBC, // Sub with carry SUBE, // Sub using carry + LSLS, // Shift left producing carry VMOVRRD, // double to two gprs. VMOVDRR, // Two gprs to double. + VMOVSR, // move gpr to single, used for f32 literal constructed in a gpr EH_SJLJ_SETJMP, // SjLj exception handling setjmp. EH_SJLJ_LONGJMP, // SjLj exception handling longjmp. @@ -120,44 +126,46 @@ WIN__CHKSTK, // Windows' __chkstk call to do stack probing. WIN__DBZCHK, // Windows' divide by zero check - VCEQ, // Vector compare equal. - VCEQZ, // Vector compare equal to zero. - VCGE, // Vector compare greater than or equal. - VCGEZ, // Vector compare greater than or equal to zero. - VCLEZ, // Vector compare less than or equal to zero. - VCGEU, // Vector compare unsigned greater than or equal. - VCGT, // Vector compare greater than. - VCGTZ, // Vector compare greater than zero. - VCLTZ, // Vector compare less than zero. - VCGTU, // Vector compare unsigned greater than. + WLS, // Low-overhead loops, While Loop Start + LOOP_DEC, // Really a part of LE, performs the sub + LE, // Low-overhead loops, Loop End + + PREDICATE_CAST, // Predicate cast for MVE i1 types + + VCMP, // Vector compare. + VCMPZ, // Vector compare to zero. VTST, // Vector test bits. + // Vector shift by vector + VSHLs, // ...left/right by signed + VSHLu, // ...left/right by unsigned + // Vector shift by immediate: - VSHL, // ...left - VSHRs, // ...right (signed) - VSHRu, // ...right (unsigned) + VSHLIMM, // ...left + VSHRsIMM, // ...right (signed) + VSHRuIMM, // ...right (unsigned) // Vector rounding shift by immediate: - VRSHRs, // ...right (signed) - VRSHRu, // ...right (unsigned) - VRSHRN, // ...right narrow + VRSHRsIMM, // ...right (signed) + VRSHRuIMM, // ...right (unsigned) + VRSHRNIMM, // ...right narrow // Vector saturating shift by immediate: - VQSHLs, // ...left (signed) - VQSHLu, // ...left (unsigned) - VQSHLsu, // ...left (signed to unsigned) - VQSHRNs, // ...right narrow (signed) - VQSHRNu, // ...right narrow (unsigned) - VQSHRNsu, // ...right narrow (signed to unsigned) + VQSHLsIMM, // ...left (signed) + VQSHLuIMM, // ...left (unsigned) + VQSHLsuIMM, // ...left (signed to unsigned) + VQSHRNsIMM, // ...right narrow (signed) + VQSHRNuIMM, // ...right narrow (unsigned) + VQSHRNsuIMM, // ...right narrow (signed to unsigned) // Vector saturating rounding shift by immediate: - VQRSHRNs, // ...right narrow (signed) - VQRSHRNu, // ...right narrow (unsigned) - VQRSHRNsu, // ...right narrow (signed to unsigned) + VQRSHRNsIMM, // ...right narrow (signed) + VQRSHRNuIMM, // ...right narrow (unsigned) + VQRSHRNsuIMM, // ...right narrow (signed to unsigned) // Vector shift and insert: - VSLI, // ...left - VSRI, // ...right + VSLIIMM, // ...left + VSRIIMM, // ...right // Vector get lane (VMOV scalar to ARM core register) // (These are used for 8- and 16-bit element types only.) @@ -320,17 +328,21 @@ /// is "fast" by reference in the second argument. bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, unsigned Align, + MachineMemOperand::Flags Flags, bool *Fast) const override; EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, - MachineFunction &MF) const override; + const AttributeList &FuncAttributes) const override; bool isTruncateFree(Type *SrcTy, Type *DstTy) const override; bool isTruncateFree(EVT SrcVT, EVT DstVT) const override; bool isZExtFree(SDValue Val, EVT VT2) const override; + bool shouldSinkOperands(Instruction *I, + SmallVectorImpl<Use *> &Ops) const override; + bool isFNegFree(EVT VT) const override; bool isVectorLoadExtDesirable(SDValue ExtVal) const override; @@ -353,7 +365,7 @@ bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const; - /// \brief Returns true if the addresing mode representing by AM is legal + /// Returns true if the addresing mode representing by AM is legal /// for the Thumb1 target, for a load/store of the specified type. bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const; @@ -388,6 +400,9 @@ const SelectionDAG &DAG, unsigned Depth) const override; + bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded, + TargetLoweringOpt &TLO) const override; + bool ExpandInlineAsm(CallInst *CI) const override; @@ -449,7 +464,8 @@ /// getRegClassFor - Return the register class that should be used for the /// specified value type. - const TargetRegisterClass *getRegClassFor(MVT VT) const override; + const TargetRegisterClass * + getRegClassFor(MVT VT, bool isDivergent = false) const override; /// Returns true if a cast between SrcAS and DestAS is a noop. bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override { @@ -474,14 +490,15 @@ /// isFPImmLegal - Returns true if the target can instruction select the /// specified FP immediate natively. If false, the legalizer will /// materialize the FP immediate as a load from a constant pool. - bool isFPImmLegal(const APFloat &Imm, EVT VT) const override; + bool isFPImmLegal(const APFloat &Imm, EVT VT, + bool ForCodeSize = false) const override; bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override; - /// \brief Returns true if it is beneficial to convert a load of a constant + /// Returns true if it is beneficial to convert a load of a constant /// to just the constant itself. bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override; @@ -491,7 +508,7 @@ bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override; - /// \brief Returns true if an argument of type Ty needs to be passed in a + /// Returns true if an argument of type Ty needs to be passed in a /// contiguous block of registers in calling convention CallConv. bool functionArgumentNeedsConsecutiveRegisters( Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override; @@ -534,10 +551,15 @@ bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; - bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override; + TargetLoweringBase::AtomicExpansionKind + shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override; bool useLoadStackGuardNode() const override; + void insertSSPDeclarations(Module &M) const override; + Value *getSDagStackGuard(const Module &M) const override; + Function *getSSPStackGuardCheck(const Module &M) const override; + bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, unsigned &Cost) const override; @@ -562,6 +584,8 @@ return HasStandaloneRem; } + bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override; + CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const; CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const; @@ -571,6 +595,8 @@ bool isLegalInterleavedAccessType(VectorType *VecTy, const DataLayout &DL) const; + bool alignLoopsWithOptSize() const override; + /// Returns the number of interleaved accesses that will be generated when /// lowering accesses of the given type. unsigned getNumInterleavedAccesses(VectorType *VecTy, @@ -578,6 +604,18 @@ void finalizeLowering(MachineFunction &MF) const override; + /// Return the correct alignment for the current calling convention. + unsigned getABIAlignmentForCallingConv(Type *ArgTy, + DataLayout DL) const override; + + bool isDesirableToCommuteWithShift(const SDNode *N, + CombineLevel Level) const override; + + bool shouldFoldConstantShiftPairToMask(const SDNode *N, + CombineLevel Level) const override; + + bool preferIncOfAddToSubOfNot(EVT VT) const override; + protected: std::pair<const TargetRegisterClass *, uint8_t> findRepresentativeClass(const TargetRegisterInfo *TRI, @@ -663,6 +701,7 @@ const ARMSubtarget *ST) const; SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST) const; + SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const; SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const; SDValue LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed) const; @@ -676,10 +715,15 @@ SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; + void lowerABS(SDNode *N, SmallVectorImpl<SDValue> &Results, + SelectionDAG &DAG) const; unsigned getRegisterByName(const char* RegName, EVT VT, SelectionDAG &DAG) const override; + SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, + SmallVectorImpl<SDNode *> &Created) const override; + /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be /// expanded to FMAs when this method returns true, otherwise fmuladd is @@ -735,15 +779,13 @@ /// IsEligibleForTailCallOptimization - Check whether the call is eligible /// for tail call optimization. Targets which want to do tail call /// optimization should implement this function. - bool IsEligibleForTailCallOptimization(SDValue Callee, - CallingConv::ID CalleeCC, - bool isVarArg, - bool isCalleeStructRet, - bool isCallerStructRet, - const SmallVectorImpl<ISD::OutputArg> &Outs, - const SmallVectorImpl<SDValue> &OutVals, - const SmallVectorImpl<ISD::InputArg> &Ins, - SelectionDAG& DAG) const; + bool IsEligibleForTailCallOptimization( + SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, + bool isCalleeStructRet, bool isCallerStructRet, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<SDValue> &OutVals, + const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG, + const bool isIndirect) const; bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, @@ -759,6 +801,10 @@ bool mayBeEmittedAsTailCall(const CallInst *CI) const override; + bool shouldConsiderGEPOffsetSplit() const override { return true; } + + bool isUnsupportedFloatingType(EVT VT) const; + SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal, SDValue ARMcc, SDValue CCR, SDValue Cmp, SelectionDAG &DAG) const; @@ -784,11 +830,15 @@ MachineBasicBlock *MBB) const; MachineBasicBlock *EmitLowered__dbzchk(MachineInstr &MI, MachineBasicBlock *MBB) const; + void addMVEVectorTypes(bool HasMVEFP); + void addAllExtLoads(const MVT From, const MVT To, LegalizeAction Action); + void setAllExpand(MVT VT); }; - enum NEONModImmType { + enum VMOVModImmType { VMOVModImm, VMVNModImm, + MVEVMVNModImm, OtherModImm };