comparison lib/Target/ARM/ARMISelLowering.h @ 148:63bd29f05246

merged
author Shinji KONO <kono@ie.u-ryukyu.ac.jp>
date Wed, 14 Aug 2019 19:46:37 +0900
parents c2174574ed3a
children
comparison
equal deleted inserted replaced
146:3fc4d5c3e21e 148:63bd29f05246
1 //===- ARMISelLowering.h - ARM DAG Lowering Interface -----------*- C++ -*-===// 1 //===- ARMISelLowering.h - ARM DAG Lowering Interface -----------*- C++ -*-===//
2 // 2 //
3 // The LLVM Compiler Infrastructure 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // 4 // See https://llvm.org/LICENSE.txt for license information.
5 // This file is distributed under the University of Illinois Open Source 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 // License. See LICENSE.TXT for details.
7 // 6 //
8 //===----------------------------------------------------------------------===// 7 //===----------------------------------------------------------------------===//
9 // 8 //
10 // This file defines the interfaces that ARM uses to lower LLVM code into a 9 // This file defines the interfaces that ARM uses to lower LLVM code into a
11 // selection DAG. 10 // selection DAG.
19 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringRef.h" 19 #include "llvm/ADT/StringRef.h"
21 #include "llvm/CodeGen/CallingConvLower.h" 20 #include "llvm/CodeGen/CallingConvLower.h"
22 #include "llvm/CodeGen/ISDOpcodes.h" 21 #include "llvm/CodeGen/ISDOpcodes.h"
23 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineValueType.h"
25 #include "llvm/CodeGen/SelectionDAGNodes.h" 23 #include "llvm/CodeGen/SelectionDAGNodes.h"
26 #include "llvm/CodeGen/TargetLowering.h" 24 #include "llvm/CodeGen/TargetLowering.h"
27 #include "llvm/CodeGen/ValueTypes.h" 25 #include "llvm/CodeGen/ValueTypes.h"
28 #include "llvm/IR/Attributes.h" 26 #include "llvm/IR/Attributes.h"
29 #include "llvm/IR/CallingConv.h" 27 #include "llvm/IR/CallingConv.h"
30 #include "llvm/IR/Function.h" 28 #include "llvm/IR/Function.h"
31 #include "llvm/IR/IRBuilder.h" 29 #include "llvm/IR/IRBuilder.h"
32 #include "llvm/IR/InlineAsm.h" 30 #include "llvm/IR/InlineAsm.h"
33 #include "llvm/Support/CodeGen.h" 31 #include "llvm/Support/CodeGen.h"
32 #include "llvm/Support/MachineValueType.h"
34 #include <utility> 33 #include <utility>
35 34
36 namespace llvm { 35 namespace llvm {
37 36
38 class ARMSubtarget; 37 class ARMSubtarget;
75 RET_FLAG, // Return with a flag operand. 74 RET_FLAG, // Return with a flag operand.
76 INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand. 75 INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand.
77 76
78 PIC_ADD, // Add with a PC operand and a PIC label. 77 PIC_ADD, // Add with a PC operand and a PIC label.
79 78
79 ASRL, // MVE long arithmetic shift right.
80 LSRL, // MVE long shift right.
81 LSLL, // MVE long shift left.
82
80 CMP, // ARM compare instructions. 83 CMP, // ARM compare instructions.
81 CMN, // ARM CMN instructions. 84 CMN, // ARM CMN instructions.
82 CMPZ, // ARM compare that sets only Z flag. 85 CMPZ, // ARM compare that sets only Z flag.
83 CMPFP, // ARM VFP compare instruction, sets FPSCR. 86 CMPFP, // ARM VFP compare instruction, sets FPSCR.
84 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR. 87 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
85 FMSTAT, // ARM fmstat instruction. 88 FMSTAT, // ARM fmstat instruction.
86 89
87 CMOV, // ARM conditional move instructions. 90 CMOV, // ARM conditional move instructions.
91 SUBS, // Flag-setting subtraction.
88 92
89 SSAT, // Signed saturation 93 SSAT, // Signed saturation
90 USAT, // Unsigned saturation 94 USAT, // Unsigned saturation
91 95
92 BCC_i64, 96 BCC_i64,
97 101
98 ADDC, // Add with carry 102 ADDC, // Add with carry
99 ADDE, // Add using carry 103 ADDE, // Add using carry
100 SUBC, // Sub with carry 104 SUBC, // Sub with carry
101 SUBE, // Sub using carry 105 SUBE, // Sub using carry
106 LSLS, // Shift left producing carry
102 107
103 VMOVRRD, // double to two gprs. 108 VMOVRRD, // double to two gprs.
104 VMOVDRR, // Two gprs to double. 109 VMOVDRR, // Two gprs to double.
110 VMOVSR, // move gpr to single, used for f32 literal constructed in a gpr
105 111
106 EH_SJLJ_SETJMP, // SjLj exception handling setjmp. 112 EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
107 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp. 113 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
108 EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch. 114 EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch.
109 115
118 PRELOAD, // Preload 124 PRELOAD, // Preload
119 125
120 WIN__CHKSTK, // Windows' __chkstk call to do stack probing. 126 WIN__CHKSTK, // Windows' __chkstk call to do stack probing.
121 WIN__DBZCHK, // Windows' divide by zero check 127 WIN__DBZCHK, // Windows' divide by zero check
122 128
123 VCEQ, // Vector compare equal. 129 WLS, // Low-overhead loops, While Loop Start
124 VCEQZ, // Vector compare equal to zero. 130 LOOP_DEC, // Really a part of LE, performs the sub
125 VCGE, // Vector compare greater than or equal. 131 LE, // Low-overhead loops, Loop End
126 VCGEZ, // Vector compare greater than or equal to zero. 132
127 VCLEZ, // Vector compare less than or equal to zero. 133 PREDICATE_CAST, // Predicate cast for MVE i1 types
128 VCGEU, // Vector compare unsigned greater than or equal. 134
129 VCGT, // Vector compare greater than. 135 VCMP, // Vector compare.
130 VCGTZ, // Vector compare greater than zero. 136 VCMPZ, // Vector compare to zero.
131 VCLTZ, // Vector compare less than zero.
132 VCGTU, // Vector compare unsigned greater than.
133 VTST, // Vector test bits. 137 VTST, // Vector test bits.
134 138
139 // Vector shift by vector
140 VSHLs, // ...left/right by signed
141 VSHLu, // ...left/right by unsigned
142
135 // Vector shift by immediate: 143 // Vector shift by immediate:
136 VSHL, // ...left 144 VSHLIMM, // ...left
137 VSHRs, // ...right (signed) 145 VSHRsIMM, // ...right (signed)
138 VSHRu, // ...right (unsigned) 146 VSHRuIMM, // ...right (unsigned)
139 147
140 // Vector rounding shift by immediate: 148 // Vector rounding shift by immediate:
141 VRSHRs, // ...right (signed) 149 VRSHRsIMM, // ...right (signed)
142 VRSHRu, // ...right (unsigned) 150 VRSHRuIMM, // ...right (unsigned)
143 VRSHRN, // ...right narrow 151 VRSHRNIMM, // ...right narrow
144 152
145 // Vector saturating shift by immediate: 153 // Vector saturating shift by immediate:
146 VQSHLs, // ...left (signed) 154 VQSHLsIMM, // ...left (signed)
147 VQSHLu, // ...left (unsigned) 155 VQSHLuIMM, // ...left (unsigned)
148 VQSHLsu, // ...left (signed to unsigned) 156 VQSHLsuIMM, // ...left (signed to unsigned)
149 VQSHRNs, // ...right narrow (signed) 157 VQSHRNsIMM, // ...right narrow (signed)
150 VQSHRNu, // ...right narrow (unsigned) 158 VQSHRNuIMM, // ...right narrow (unsigned)
151 VQSHRNsu, // ...right narrow (signed to unsigned) 159 VQSHRNsuIMM, // ...right narrow (signed to unsigned)
152 160
153 // Vector saturating rounding shift by immediate: 161 // Vector saturating rounding shift by immediate:
154 VQRSHRNs, // ...right narrow (signed) 162 VQRSHRNsIMM, // ...right narrow (signed)
155 VQRSHRNu, // ...right narrow (unsigned) 163 VQRSHRNuIMM, // ...right narrow (unsigned)
156 VQRSHRNsu, // ...right narrow (signed to unsigned) 164 VQRSHRNsuIMM, // ...right narrow (signed to unsigned)
157 165
158 // Vector shift and insert: 166 // Vector shift and insert:
159 VSLI, // ...left 167 VSLIIMM, // ...left
160 VSRI, // ...right 168 VSRIIMM, // ...right
161 169
162 // Vector get lane (VMOV scalar to ARM core register) 170 // Vector get lane (VMOV scalar to ARM core register)
163 // (These are used for 8- and 16-bit element types only.) 171 // (These are used for 8- and 16-bit element types only.)
164 VGETLANEu, // zero-extend vector extract element 172 VGETLANEu, // zero-extend vector extract element
165 VGETLANEs, // sign-extend vector extract element 173 VGETLANEs, // sign-extend vector extract element
318 /// allowsMisalignedMemoryAccesses - Returns true if the target allows 326 /// allowsMisalignedMemoryAccesses - Returns true if the target allows
319 /// unaligned memory accesses of the specified type. Returns whether it 327 /// unaligned memory accesses of the specified type. Returns whether it
320 /// is "fast" by reference in the second argument. 328 /// is "fast" by reference in the second argument.
321 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, 329 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
322 unsigned Align, 330 unsigned Align,
331 MachineMemOperand::Flags Flags,
323 bool *Fast) const override; 332 bool *Fast) const override;
324 333
325 EVT getOptimalMemOpType(uint64_t Size, 334 EVT getOptimalMemOpType(uint64_t Size,
326 unsigned DstAlign, unsigned SrcAlign, 335 unsigned DstAlign, unsigned SrcAlign,
327 bool IsMemset, bool ZeroMemset, 336 bool IsMemset, bool ZeroMemset,
328 bool MemcpyStrSrc, 337 bool MemcpyStrSrc,
329 MachineFunction &MF) const override; 338 const AttributeList &FuncAttributes) const override;
330 339
331 bool isTruncateFree(Type *SrcTy, Type *DstTy) const override; 340 bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
332 bool isTruncateFree(EVT SrcVT, EVT DstVT) const override; 341 bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
333 bool isZExtFree(SDValue Val, EVT VT2) const override; 342 bool isZExtFree(SDValue Val, EVT VT2) const override;
343 bool shouldSinkOperands(Instruction *I,
344 SmallVectorImpl<Use *> &Ops) const override;
345
334 bool isFNegFree(EVT VT) const override; 346 bool isFNegFree(EVT VT) const override;
335 347
336 bool isVectorLoadExtDesirable(SDValue ExtVal) const override; 348 bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
337 349
338 bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override; 350 bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
351 int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, 363 int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
352 unsigned AS) const override; 364 unsigned AS) const override;
353 365
354 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 366 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
355 367
356 /// \brief Returns true if the addresing mode representing by AM is legal 368 /// Returns true if the addresing mode representing by AM is legal
357 /// for the Thumb1 target, for a load/store of the specified type. 369 /// for the Thumb1 target, for a load/store of the specified type.
358 bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 370 bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
359 371
360 /// isLegalICmpImmediate - Return true if the specified immediate is legal 372 /// isLegalICmpImmediate - Return true if the specified immediate is legal
361 /// icmp immediate, that is the target has icmp instructions which can 373 /// icmp immediate, that is the target has icmp instructions which can
385 397
386 void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, 398 void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
387 const APInt &DemandedElts, 399 const APInt &DemandedElts,
388 const SelectionDAG &DAG, 400 const SelectionDAG &DAG,
389 unsigned Depth) const override; 401 unsigned Depth) const override;
402
403 bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
404 TargetLoweringOpt &TLO) const override;
390 405
391 406
392 bool ExpandInlineAsm(CallInst *CI) const override; 407 bool ExpandInlineAsm(CallInst *CI) const override;
393 408
394 ConstraintType getConstraintType(StringRef Constraint) const override; 409 ConstraintType getConstraintType(StringRef Constraint) const override;
447 return Subtarget; 462 return Subtarget;
448 } 463 }
449 464
450 /// getRegClassFor - Return the register class that should be used for the 465 /// getRegClassFor - Return the register class that should be used for the
451 /// specified value type. 466 /// specified value type.
452 const TargetRegisterClass *getRegClassFor(MVT VT) const override; 467 const TargetRegisterClass *
468 getRegClassFor(MVT VT, bool isDivergent = false) const override;
453 469
454 /// Returns true if a cast between SrcAS and DestAS is a noop. 470 /// Returns true if a cast between SrcAS and DestAS is a noop.
455 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override { 471 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
456 // Addrspacecasts are always noops. 472 // Addrspacecasts are always noops.
457 return true; 473 return true;
472 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; 488 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
473 489
474 /// isFPImmLegal - Returns true if the target can instruction select the 490 /// isFPImmLegal - Returns true if the target can instruction select the
475 /// specified FP immediate natively. If false, the legalizer will 491 /// specified FP immediate natively. If false, the legalizer will
476 /// materialize the FP immediate as a load from a constant pool. 492 /// materialize the FP immediate as a load from a constant pool.
477 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override; 493 bool isFPImmLegal(const APFloat &Imm, EVT VT,
494 bool ForCodeSize = false) const override;
478 495
479 bool getTgtMemIntrinsic(IntrinsicInfo &Info, 496 bool getTgtMemIntrinsic(IntrinsicInfo &Info,
480 const CallInst &I, 497 const CallInst &I,
481 MachineFunction &MF, 498 MachineFunction &MF,
482 unsigned Intrinsic) const override; 499 unsigned Intrinsic) const override;
483 500
484 /// \brief Returns true if it is beneficial to convert a load of a constant 501 /// Returns true if it is beneficial to convert a load of a constant
485 /// to just the constant itself. 502 /// to just the constant itself.
486 bool shouldConvertConstantLoadToIntImm(const APInt &Imm, 503 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
487 Type *Ty) const override; 504 Type *Ty) const override;
488 505
489 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type 506 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
490 /// with this index. 507 /// with this index.
491 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, 508 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
492 unsigned Index) const override; 509 unsigned Index) const override;
493 510
494 /// \brief Returns true if an argument of type Ty needs to be passed in a 511 /// Returns true if an argument of type Ty needs to be passed in a
495 /// contiguous block of registers in calling convention CallConv. 512 /// contiguous block of registers in calling convention CallConv.
496 bool functionArgumentNeedsConsecutiveRegisters( 513 bool functionArgumentNeedsConsecutiveRegisters(
497 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override; 514 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override;
498 515
499 /// If a physical register, this returns the register that receives the 516 /// If a physical register, this returns the register that receives the
532 TargetLoweringBase::AtomicExpansionKind 549 TargetLoweringBase::AtomicExpansionKind
533 shouldExpandAtomicLoadInIR(LoadInst *LI) const override; 550 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
534 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; 551 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
535 TargetLoweringBase::AtomicExpansionKind 552 TargetLoweringBase::AtomicExpansionKind
536 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; 553 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
537 bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override; 554 TargetLoweringBase::AtomicExpansionKind
555 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
538 556
539 bool useLoadStackGuardNode() const override; 557 bool useLoadStackGuardNode() const override;
558
559 void insertSSPDeclarations(Module &M) const override;
560 Value *getSDagStackGuard(const Module &M) const override;
561 Function *getSSPStackGuardCheck(const Module &M) const override;
540 562
541 bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, 563 bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
542 unsigned &Cost) const override; 564 unsigned &Cost) const override;
543 565
544 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, 566 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
560 582
561 bool hasStandaloneRem(EVT VT) const override { 583 bool hasStandaloneRem(EVT VT) const override {
562 return HasStandaloneRem; 584 return HasStandaloneRem;
563 } 585 }
564 586
587 bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override;
588
565 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const; 589 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const;
566 CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const; 590 CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const;
567 591
568 /// Returns true if \p VecTy is a legal interleaved access type. This 592 /// Returns true if \p VecTy is a legal interleaved access type. This
569 /// function checks the vector element type and the overall width of the 593 /// function checks the vector element type and the overall width of the
570 /// vector. 594 /// vector.
571 bool isLegalInterleavedAccessType(VectorType *VecTy, 595 bool isLegalInterleavedAccessType(VectorType *VecTy,
572 const DataLayout &DL) const; 596 const DataLayout &DL) const;
573 597
598 bool alignLoopsWithOptSize() const override;
599
574 /// Returns the number of interleaved accesses that will be generated when 600 /// Returns the number of interleaved accesses that will be generated when
575 /// lowering accesses of the given type. 601 /// lowering accesses of the given type.
576 unsigned getNumInterleavedAccesses(VectorType *VecTy, 602 unsigned getNumInterleavedAccesses(VectorType *VecTy,
577 const DataLayout &DL) const; 603 const DataLayout &DL) const;
578 604
579 void finalizeLowering(MachineFunction &MF) const override; 605 void finalizeLowering(MachineFunction &MF) const override;
606
607 /// Return the correct alignment for the current calling convention.
608 unsigned getABIAlignmentForCallingConv(Type *ArgTy,
609 DataLayout DL) const override;
610
611 bool isDesirableToCommuteWithShift(const SDNode *N,
612 CombineLevel Level) const override;
613
614 bool shouldFoldConstantShiftPairToMask(const SDNode *N,
615 CombineLevel Level) const override;
616
617 bool preferIncOfAddToSubOfNot(EVT VT) const override;
580 618
581 protected: 619 protected:
582 std::pair<const TargetRegisterClass *, uint8_t> 620 std::pair<const TargetRegisterClass *, uint8_t>
583 findRepresentativeClass(const TargetRegisterInfo *TRI, 621 findRepresentativeClass(const TargetRegisterInfo *TRI,
584 MVT VT) const override; 622 MVT VT) const override;
661 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 699 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
662 SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG, 700 SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG,
663 const ARMSubtarget *ST) const; 701 const ARMSubtarget *ST) const;
664 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 702 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
665 const ARMSubtarget *ST) const; 703 const ARMSubtarget *ST) const;
704 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
666 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const; 705 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
667 SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const; 706 SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const;
668 SDValue LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed) const; 707 SDValue LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed) const;
669 void ExpandDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed, 708 void ExpandDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed,
670 SmallVectorImpl<SDValue> &Results) const; 709 SmallVectorImpl<SDValue> &Results) const;
674 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 713 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
675 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const; 714 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
676 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const; 715 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
677 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const; 716 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
678 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 717 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
718 void lowerABS(SDNode *N, SmallVectorImpl<SDValue> &Results,
719 SelectionDAG &DAG) const;
679 720
680 unsigned getRegisterByName(const char* RegName, EVT VT, 721 unsigned getRegisterByName(const char* RegName, EVT VT,
681 SelectionDAG &DAG) const override; 722 SelectionDAG &DAG) const override;
723
724 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
725 SmallVectorImpl<SDNode *> &Created) const override;
682 726
683 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster 727 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
684 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be 728 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
685 /// expanded to FMAs when this method returns true, otherwise fmuladd is 729 /// expanded to FMAs when this method returns true, otherwise fmuladd is
686 /// expanded to fmul + fadd. 730 /// expanded to fmul + fadd.
733 void HandleByVal(CCState *, unsigned &, unsigned) const override; 777 void HandleByVal(CCState *, unsigned &, unsigned) const override;
734 778
735 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 779 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
736 /// for tail call optimization. Targets which want to do tail call 780 /// for tail call optimization. Targets which want to do tail call
737 /// optimization should implement this function. 781 /// optimization should implement this function.
738 bool IsEligibleForTailCallOptimization(SDValue Callee, 782 bool IsEligibleForTailCallOptimization(
739 CallingConv::ID CalleeCC, 783 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
740 bool isVarArg, 784 bool isCalleeStructRet, bool isCallerStructRet,
741 bool isCalleeStructRet, 785 const SmallVectorImpl<ISD::OutputArg> &Outs,
742 bool isCallerStructRet, 786 const SmallVectorImpl<SDValue> &OutVals,
743 const SmallVectorImpl<ISD::OutputArg> &Outs, 787 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG,
744 const SmallVectorImpl<SDValue> &OutVals, 788 const bool isIndirect) const;
745 const SmallVectorImpl<ISD::InputArg> &Ins,
746 SelectionDAG& DAG) const;
747 789
748 bool CanLowerReturn(CallingConv::ID CallConv, 790 bool CanLowerReturn(CallingConv::ID CallConv,
749 MachineFunction &MF, bool isVarArg, 791 MachineFunction &MF, bool isVarArg,
750 const SmallVectorImpl<ISD::OutputArg> &Outs, 792 const SmallVectorImpl<ISD::OutputArg> &Outs,
751 LLVMContext &Context) const override; 793 LLVMContext &Context) const override;
756 const SDLoc &dl, SelectionDAG &DAG) const override; 798 const SDLoc &dl, SelectionDAG &DAG) const override;
757 799
758 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override; 800 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
759 801
760 bool mayBeEmittedAsTailCall(const CallInst *CI) const override; 802 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
803
804 bool shouldConsiderGEPOffsetSplit() const override { return true; }
805
806 bool isUnsupportedFloatingType(EVT VT) const;
761 807
762 SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal, 808 SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal,
763 SDValue ARMcc, SDValue CCR, SDValue Cmp, 809 SDValue ARMcc, SDValue CCR, SDValue Cmp,
764 SelectionDAG &DAG) const; 810 SelectionDAG &DAG) const;
765 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 811 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
782 828
783 MachineBasicBlock *EmitLowered__chkstk(MachineInstr &MI, 829 MachineBasicBlock *EmitLowered__chkstk(MachineInstr &MI,
784 MachineBasicBlock *MBB) const; 830 MachineBasicBlock *MBB) const;
785 MachineBasicBlock *EmitLowered__dbzchk(MachineInstr &MI, 831 MachineBasicBlock *EmitLowered__dbzchk(MachineInstr &MI,
786 MachineBasicBlock *MBB) const; 832 MachineBasicBlock *MBB) const;
833 void addMVEVectorTypes(bool HasMVEFP);
834 void addAllExtLoads(const MVT From, const MVT To, LegalizeAction Action);
835 void setAllExpand(MVT VT);
787 }; 836 };
788 837
789 enum NEONModImmType { 838 enum VMOVModImmType {
790 VMOVModImm, 839 VMOVModImm,
791 VMVNModImm, 840 VMVNModImm,
841 MVEVMVNModImm,
792 OtherModImm 842 OtherModImm
793 }; 843 };
794 844
795 namespace ARM { 845 namespace ARM {
796 846