Mercurial > hg > Members > tobaru > cbc > CbC_llvm
diff lib/Target/X86/X86FrameLowering.cpp @ 120:1172e4bd9c6f
update 4.0.0
author | mir3636 |
---|---|
date | Fri, 25 Nov 2016 19:14:25 +0900 |
parents | 7d135dc70f03 |
children | 803732b1fca8 |
line wrap: on
line diff
--- a/lib/Target/X86/X86FrameLowering.cpp Tue Jan 26 22:56:36 2016 +0900 +++ b/lib/Target/X86/X86FrameLowering.cpp Fri Nov 25 19:14:25 2016 +0900 @@ -50,7 +50,7 @@ } bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { - return !MF.getFrameInfo()->hasVarSizedObjects() && + return !MF.getFrameInfo().hasVarSizedObjects() && !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences(); } @@ -74,7 +74,7 @@ // when there are no stack objects. bool X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const { - return MF.getFrameInfo()->hasStackObjects() || + return MF.getFrameInfo().hasStackObjects() || MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences(); } @@ -82,17 +82,17 @@ /// pointer register. This is true if the function has variable sized allocas /// or if frame pointer elimination is disabled. bool X86FrameLowering::hasFP(const MachineFunction &MF) const { - const MachineFrameInfo *MFI = MF.getFrameInfo(); + const MachineFrameInfo &MFI = MF.getFrameInfo(); const MachineModuleInfo &MMI = MF.getMMI(); return (MF.getTarget().Options.DisableFramePointerElim(MF) || TRI->needsStackRealignment(MF) || - MFI->hasVarSizedObjects() || - MFI->isFrameAddressTaken() || MFI->hasOpaqueSPAdjustment() || + MFI.hasVarSizedObjects() || + MFI.isFrameAddressTaken() || MFI.hasOpaqueSPAdjustment() || MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() || MMI.callsUnwindInit() || MMI.hasEHFunclets() || MMI.callsEHReturn() || - MFI->hasStackMap() || MFI->hasPatchPoint() || - MFI->hasCopyImplyingStackAdjustment()); + MFI.hasStackMap() || MFI.hasPatchPoint() || + MFI.hasCopyImplyingStackAdjustment()); } static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) { @@ -156,9 +156,13 @@ const TargetRegisterClass &AvailableRegs = *TRI->getGPRsForTailCall(*MF); - unsigned Opc = MBBI->getOpcode(); - switch (Opc) { + if (MBBI == MBB.end()) + return 0; + + switch (MBBI->getOpcode()) { default: return 0; + case TargetOpcode::PATCHABLE_RET: + case X86::RET: case X86::RETL: case X86::RETQ: case X86::RETIL: @@ -192,10 +196,9 @@ return 0; } -static bool isEAXLiveIn(MachineFunction &MF) { - for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(), - EE = MF.getRegInfo().livein_end(); II != EE; ++II) { - unsigned Reg = II->first; +static bool isEAXLiveIn(MachineBasicBlock &MBB) { + for (MachineBasicBlock::RegisterMaskPair RegMask : MBB.liveins()) { + unsigned Reg = RegMask.PhysReg; if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX || Reg == X86::AH || Reg == X86::AL) @@ -261,7 +264,7 @@ // load the offset into a register and do one sub/add unsigned Reg = 0; - if (isSub && !isEAXLiveIn(*MBB.getParent())) + if (isSub && !isEAXLiveIn(MBB)) Reg = (unsigned)(Is64Bit ? X86::RAX : X86::EAX); else Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit); @@ -315,8 +318,8 @@ } MachineInstrBuilder X86FrameLowering::BuildStackAdjustment( - MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL, - int64_t Offset, bool InEpilogue) const { + MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, + const DebugLoc &DL, int64_t Offset, bool InEpilogue) const { assert(Offset != 0 && "zero offset stack adjustment requested"); // On Atom, using LEA to adjust SP is preferred, but using it in the epilogue @@ -375,16 +378,33 @@ unsigned Opc = PI->getOpcode(); int Offset = 0; + if (!doMergeWithPrevious && NI != MBB.end() && + NI->getOpcode() == TargetOpcode::CFI_INSTRUCTION) { + // Don't merge with the next instruction if it has CFI. + return Offset; + } + if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || - Opc == X86::ADD32ri || Opc == X86::ADD32ri8 || - Opc == X86::LEA32r || Opc == X86::LEA64_32r) && + Opc == X86::ADD32ri || Opc == X86::ADD32ri8) && PI->getOperand(0).getReg() == StackPtr){ + assert(PI->getOperand(1).getReg() == StackPtr); Offset += PI->getOperand(2).getImm(); MBB.erase(PI); if (!doMergeWithPrevious) MBBI = NI; + } else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) && + PI->getOperand(0).getReg() == StackPtr && + PI->getOperand(1).getReg() == StackPtr && + PI->getOperand(2).getImm() == 1 && + PI->getOperand(3).getReg() == X86::NoRegister && + PI->getOperand(5).getReg() == X86::NoRegister) { + // For LEAs we have: def = lea SP, FI, noreg, Offset, noreg. + Offset += PI->getOperand(4).getImm(); + MBB.erase(PI); + if (!doMergeWithPrevious) MBBI = NI; } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 || Opc == X86::SUB32ri || Opc == X86::SUB32ri8) && PI->getOperand(0).getReg() == StackPtr) { + assert(PI->getOperand(1).getReg() == StackPtr); Offset -= PI->getOperand(2).getImm(); MBB.erase(PI); if (!doMergeWithPrevious) MBBI = NI; @@ -394,31 +414,31 @@ } void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, DebugLoc DL, - MCCFIInstruction CFIInst) const { + MachineBasicBlock::iterator MBBI, + const DebugLoc &DL, + const MCCFIInstruction &CFIInst) const { MachineFunction &MF = *MBB.getParent(); unsigned CFIIndex = MF.getMMI().addFrameInst(CFIInst); BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) .addCFIIndex(CFIIndex); } -void -X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, - DebugLoc DL) const { +void X86FrameLowering::emitCalleeSavedFrameMoves( + MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, + const DebugLoc &DL) const { MachineFunction &MF = *MBB.getParent(); - MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineFrameInfo &MFI = MF.getFrameInfo(); MachineModuleInfo &MMI = MF.getMMI(); const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); // Add callee saved registers to move list. - const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); + const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); if (CSI.empty()) return; // Calculate offsets. for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(), E = CSI.end(); I != E; ++I) { - int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); + int64_t Offset = MFI.getObjectOffset(I->getFrameIdx()); unsigned Reg = I->getReg(); unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); @@ -427,20 +447,19 @@ } } -MachineInstr *X86FrameLowering::emitStackProbe(MachineFunction &MF, - MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, - DebugLoc DL, - bool InProlog) const { +void X86FrameLowering::emitStackProbe(MachineFunction &MF, + MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + const DebugLoc &DL, bool InProlog) const { const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); if (STI.isTargetWindowsCoreCLR()) { if (InProlog) { - return emitStackProbeInlineStub(MF, MBB, MBBI, DL, true); + emitStackProbeInlineStub(MF, MBB, MBBI, DL, true); } else { - return emitStackProbeInline(MF, MBB, MBBI, DL, false); + emitStackProbeInline(MF, MBB, MBBI, DL, false); } } else { - return emitStackProbeCall(MF, MBB, MBBI, DL, InProlog); + emitStackProbeCall(MF, MBB, MBBI, DL, InProlog); } } @@ -458,18 +477,22 @@ } if (ChkStkStub != nullptr) { + assert(!ChkStkStub->isBundled() && + "Not expecting bundled instructions here"); MachineBasicBlock::iterator MBBI = std::next(ChkStkStub->getIterator()); - assert(std::prev(MBBI).operator==(ChkStkStub) && - "MBBI expected after __chkstk_stub."); + assert(std::prev(MBBI) == ChkStkStub && + "MBBI expected after __chkstk_stub."); DebugLoc DL = PrologMBB.findDebugLoc(MBBI); emitStackProbeInline(MF, PrologMBB, MBBI, DL, true); ChkStkStub->eraseFromParent(); } } -MachineInstr *X86FrameLowering::emitStackProbeInline( - MachineFunction &MF, MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const { +void X86FrameLowering::emitStackProbeInline(MachineFunction &MF, + MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + const DebugLoc &DL, + bool InProlog) const { const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); assert(STI.is64Bit() && "different expansion needed for 32 bit"); assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR"); @@ -592,7 +615,7 @@ // lowest touched page on the stack, not the point at which the OS // will cause an overflow exception, so this is just an optimization // to avoid unnecessarily touching pages that are below the current - // SP but already commited to the stack by the OS. + // SP but already committed to the stack by the OS. BuildMI(&MBB, DL, TII.get(X86::MOV64rm), LimitReg) .addReg(0) .addImm(1) @@ -679,13 +702,13 @@ } // Possible TODO: physreg liveness for InProlog case. - - return ContinueMBBI; } -MachineInstr *X86FrameLowering::emitStackProbeCall( - MachineFunction &MF, MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const { +void X86FrameLowering::emitStackProbeCall(MachineFunction &MF, + MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + const DebugLoc &DL, + bool InProlog) const { bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large; unsigned CallOp; @@ -743,20 +766,16 @@ for (++ExpansionMBBI; ExpansionMBBI != MBBI; ++ExpansionMBBI) ExpansionMBBI->setFlag(MachineInstr::FrameSetup); } - - return MBBI; } -MachineInstr *X86FrameLowering::emitStackProbeInlineStub( +void X86FrameLowering::emitStackProbeInlineStub( MachineFunction &MF, MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const { + MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const { assert(InProlog && "ChkStkStub called outside prolog!"); BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) .addExternalSymbol("__chkstk_stub"); - - return MBBI; } static unsigned calculateSetFPREG(uint64_t SPAdjust) { @@ -773,11 +792,11 @@ // have a call out. Otherwise just make sure we have some alignment - we'll // go with the minimum SlotSize. uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const { - const MachineFrameInfo *MFI = MF.getFrameInfo(); - uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment. + const MachineFrameInfo &MFI = MF.getFrameInfo(); + uint64_t MaxAlign = MFI.getMaxAlignment(); // Desired stack alignment. unsigned StackAlign = getStackAlignment(); if (MF.getFunction()->hasFnAttribute("stackrealign")) { - if (MFI->hasCalls()) + if (MFI.hasCalls()) MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; else if (MaxAlign < SlotSize) MaxAlign = SlotSize; @@ -787,7 +806,7 @@ void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, - DebugLoc DL, unsigned Reg, + const DebugLoc &DL, unsigned Reg, uint64_t MaxAlign) const { uint64_t Val = -MaxAlign; unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val); @@ -889,12 +908,12 @@ assert(&STI == &MF.getSubtarget<X86Subtarget>() && "MF used frame lowering for wrong subtarget"); MachineBasicBlock::iterator MBBI = MBB.begin(); - MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineFrameInfo &MFI = MF.getFrameInfo(); const Function *Fn = MF.getFunction(); MachineModuleInfo &MMI = MF.getMMI(); X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment. - uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate. + uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate. bool IsFunclet = MBB.isEHFuncletEntry(); EHPersonality Personality = EHPersonality::Unknown; if (Fn->hasPersonalityFn()) @@ -913,6 +932,7 @@ STI.isTarget64BitILP32() ? getX86SubSuperRegister(FramePtr, 64) : FramePtr; unsigned BasePtr = TRI->getBaseRegister(); + bool HasWinCFI = false; // Debug location must be unknown since the first debug location is used // to determine the end of the prologue. @@ -944,15 +964,16 @@ // push and pop from the stack. if (Is64Bit && !Fn->hasFnAttribute(Attribute::NoRedZone) && !TRI->needsStackRealignment(MF) && - !MFI->hasVarSizedObjects() && // No dynamic alloca. - !MFI->adjustsStack() && // No calls. - !IsWin64CC && // Win64 has no Red Zone - !MFI->hasCopyImplyingStackAdjustment() && // Don't push and pop. - !MF.shouldSplitStack()) { // Regular stack + !MFI.hasVarSizedObjects() && // No dynamic alloca. + !MFI.adjustsStack() && // No calls. + !IsWin64CC && // Win64 has no Red Zone + !MFI.hasCopyImplyingStackAdjustment() && // Don't push and pop. + !MF.shouldSplitStack()) { // Regular stack uint64_t MinSize = X86FI->getCalleeSavedFrameSize(); if (HasFP) MinSize += SlotSize; + X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0); StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0); - MFI->setStackSize(StackSize); + MFI.setStackSize(StackSize); } // Insert stack pointer adjustment for later moving of return addr. Only @@ -1016,9 +1037,9 @@ // guaranteed to be the last slot by processFunctionBeforeFrameFinalized. // Update the frame offset adjustment. if (!IsFunclet) - MFI->setOffsetAdjustment(-NumBytes); + MFI.setOffsetAdjustment(-NumBytes); else - assert(MFI->getOffsetAdjustment() == -(int)NumBytes && + assert(MFI.getOffsetAdjustment() == -(int)NumBytes && "should calculate same local variable offset for funclets"); // Save EBP/RBP into the appropriate stack slot. @@ -1040,6 +1061,7 @@ } if (NeedsWinCFI) { + HasWinCFI = true; BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)) .addImm(FramePtr) .setMIFlag(MachineInstr::FrameSetup); @@ -1101,6 +1123,7 @@ } if (NeedsWinCFI) { + HasWinCFI = true; BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)).addImm(Reg).setMIFlag( MachineInstr::FrameSetup); } @@ -1133,8 +1156,8 @@ if (IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF)) AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign); if (AlignedNumBytes >= StackProbeSize && UseStackProbe) { - // Check whether EAX is livein for this function. - bool isEAXAlive = isEAXLiveIn(MF); + // Check whether EAX is livein for this block. + bool isEAXAlive = isEAXLiveIn(MBB); if (isEAXAlive) { // Sanity check that EAX is not livein for this function. @@ -1186,10 +1209,12 @@ emitSPUpdate(MBB, MBBI, -(int64_t)NumBytes, /*InEpilogue=*/false); } - if (NeedsWinCFI && NumBytes) + if (NeedsWinCFI && NumBytes) { + HasWinCFI = true; BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc)) .addImm(NumBytes) .setMIFlag(MachineInstr::FrameSetup); + } int SEHFrameOffset = 0; unsigned SPOrEstablisher; @@ -1236,6 +1261,7 @@ // If this is not a funclet, emit the CFI describing our frame pointer. if (NeedsWinCFI && !IsFunclet) { + HasWinCFI = true; BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame)) .addImm(FramePtr) .addImm(SEHFrameOffset) @@ -1261,7 +1287,7 @@ } while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) { - const MachineInstr *FrameInstr = &*MBBI; + const MachineInstr &FrameInstr = *MBBI; ++MBBI; if (NeedsWinCFI) { @@ -1272,6 +1298,7 @@ int Offset = getFrameIndexReference(MF, FI, IgnoredFrameReg); Offset += SEHFrameOffset; + HasWinCFI = true; BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM)) .addImm(Reg) .addImm(Offset) @@ -1281,7 +1308,7 @@ } } - if (NeedsWinCFI) + if (NeedsWinCFI && HasWinCFI) BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue)) .setMIFlag(MachineInstr::FrameSetup); @@ -1361,21 +1388,36 @@ if (PushedRegs) emitCalleeSavedFrameMoves(MBB, MBBI, DL); } + + // X86 Interrupt handling function cannot assume anything about the direction + // flag (DF in EFLAGS register). Clear this flag by creating "cld" instruction + // in each prologue of interrupt handler function. + // + // FIXME: Create "cld" instruction only in these cases: + // 1. The interrupt handling function uses any of the "rep" instructions. + // 2. Interrupt handling function calls another function. + // + if (Fn->getCallingConv() == CallingConv::X86_INTR) + BuildMI(MBB, MBBI, DL, TII.get(X86::CLD)) + .setMIFlag(MachineInstr::FrameSetup); + + // At this point we know if the function has WinCFI or not. + MF.setHasWinCFI(HasWinCFI); } bool X86FrameLowering::canUseLEAForSPInEpilogue( const MachineFunction &MF) const { - // We can't use LEA instructions for adjusting the stack pointer if this is a - // leaf function in the Win64 ABI. Only ADD instructions may be used to - // deallocate the stack. + // We can't use LEA instructions for adjusting the stack pointer if we don't + // have a frame pointer in the Win64 ABI. Only ADD instructions may be used + // to deallocate the stack. // This means that we can use LEA for SP in two situations: // 1. We *aren't* using the Win64 ABI which means we are free to use LEA. // 2. We *have* a frame pointer which means we are permitted to use LEA. return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF); } -static bool isFuncletReturnInstr(MachineInstr *MI) { - switch (MI->getOpcode()) { +static bool isFuncletReturnInstr(MachineInstr &MI) { + switch (MI.getOpcode()) { case X86::CATCHRET: case X86::CLEANUPRET: return true; @@ -1401,11 +1443,10 @@ unsigned X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const { const WinEHFuncInfo &Info = *MF.getWinEHFuncInfo(); - // getFrameIndexReferenceFromSP has an out ref parameter for the stack - // pointer register; pass a dummy that we ignore unsigned SPReg; - int Offset = getFrameIndexReferenceFromSP(MF, Info.PSPSymFrameIdx, SPReg); - assert(Offset >= 0); + int Offset = getFrameIndexReferencePreferSP(MF, Info.PSPSymFrameIdx, SPReg, + /*IgnoreSPUpdates*/ true); + assert(Offset >= 0 && SPReg == TRI->getStackRegister()); return static_cast<unsigned>(Offset); } @@ -1425,7 +1466,7 @@ UsedSize = getPSPSlotOffsetFromSP(MF) + SlotSize; } else { // Other funclets just need enough stack for outgoing call arguments. - UsedSize = MF.getFrameInfo()->getMaxCallFrameSize(); + UsedSize = MF.getFrameInfo().getMaxCallFrameSize(); } // RBP is not included in the callee saved register block. After pushing RBP, // everything is 16 byte aligned. Everything we allocate before an outgoing @@ -1436,11 +1477,21 @@ return FrameSizeMinusRBP - CSSize; } +static bool isTailCallOpcode(unsigned Opc) { + return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi || + Opc == X86::TCRETURNmi || + Opc == X86::TCRETURNri64 || Opc == X86::TCRETURNdi64 || + Opc == X86::TCRETURNmi64; +} + void X86FrameLowering::emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const { - const MachineFrameInfo *MFI = MF.getFrameInfo(); + const MachineFrameInfo &MFI = MF.getFrameInfo(); X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); + Optional<unsigned> RetOpcode; + if (MBBI != MBB.end()) + RetOpcode = MBBI->getOpcode(); DebugLoc DL; if (MBBI != MBB.end()) DL = MBBI->getDebugLoc(); @@ -1453,16 +1504,16 @@ bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); bool NeedsWinCFI = IsWin64Prologue && MF.getFunction()->needsUnwindTableEntry(); - bool IsFunclet = isFuncletReturnInstr(MBBI); + bool IsFunclet = MBBI == MBB.end() ? false : isFuncletReturnInstr(*MBBI); MachineBasicBlock *TargetMBB = nullptr; // Get the number of bytes to allocate from the FrameInfo. - uint64_t StackSize = MFI->getStackSize(); + uint64_t StackSize = MFI.getStackSize(); uint64_t MaxAlign = calculateMaxStackAlign(MF); unsigned CSSize = X86FI->getCalleeSavedFrameSize(); uint64_t NumBytes = 0; - if (MBBI->getOpcode() == X86::CATCHRET) { + if (RetOpcode && *RetOpcode == X86::CATCHRET) { // SEH shouldn't use catchret. assert(!isAsynchronousEHPersonality( classifyEHPersonality(MF.getFunction()->getPersonalityFn())) && @@ -1476,7 +1527,7 @@ BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r), MachineFramePtr) .setMIFlag(MachineInstr::FrameDestroy); - } else if (MBBI->getOpcode() == X86::CLEANUPRET) { + } else if (RetOpcode && *RetOpcode == X86::CLEANUPRET) { NumBytes = getWinEHFuncletFrameSize(MF); assert(hasFP(MF) && "EH funclets without FP not yet implemented"); BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r), @@ -1541,14 +1592,14 @@ // If there is an ADD32ri or SUB32ri of ESP immediately before this // instruction, merge the two instructions. - if (NumBytes || MFI->hasVarSizedObjects()) + if (NumBytes || MFI.hasVarSizedObjects()) NumBytes += mergeSPUpdates(MBB, MBBI, true); // If dynamic alloca is used, then reset esp to point to the last callee-saved // slot before popping them off! Same applies for the case, when stack was // realigned. Don't do this if this was a funclet epilogue, since the funclets // will not do realignment or dynamic stack allocation. - if ((TRI->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) && + if ((TRI->needsStackRealignment(MF) || MFI.hasVarSizedObjects()) && !IsFunclet) { if (TRI->needsStackRealignment(MF)) MBBI = FirstCSPop; @@ -1586,18 +1637,20 @@ // into the epilogue. To cope with that, we insert an epilogue marker here, // then replace it with a 'nop' if it ends up immediately after a CALL in the // final emitted code. - if (NeedsWinCFI) + if (NeedsWinCFI && MF.hasWinCFI()) BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue)); - // Add the return addr area delta back since we are not tail calling. - int Offset = -1 * X86FI->getTCReturnAddrDelta(); - assert(Offset >= 0 && "TCDelta should never be positive"); - if (Offset) { - MBBI = MBB.getFirstTerminator(); - - // Check for possible merge with preceding ADD instruction. - Offset += mergeSPUpdates(MBB, MBBI, true); - emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true); + if (!RetOpcode || !isTailCallOpcode(*RetOpcode)) { + // Add the return addr area delta back since we are not tail calling. + int Offset = -1 * X86FI->getTCReturnAddrDelta(); + assert(Offset >= 0 && "TCDelta should never be positive"); + if (Offset) { + MBBI = MBB.getFirstTerminator(); + + // Check for possible merge with preceding ADD instruction. + Offset += mergeSPUpdates(MBB, MBBI, true); + emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true); + } } } @@ -1607,7 +1660,7 @@ // (probably?) it should be moved into here. int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, unsigned &FrameReg) const { - const MachineFrameInfo *MFI = MF.getFrameInfo(); + const MachineFrameInfo &MFI = MF.getFrameInfo(); // We can't calculate offset from frame pointer if the stack is realigned, // so enforce usage of stack/base pointer. The base pointer is used when we @@ -1623,16 +1676,16 @@ // object. // We need to factor in additional offsets applied during the prologue to the // frame, base, and stack pointer depending on which is used. - int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea(); + int Offset = MFI.getObjectOffset(FI) - getOffsetOfLocalArea(); const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); unsigned CSSize = X86FI->getCalleeSavedFrameSize(); - uint64_t StackSize = MFI->getStackSize(); + uint64_t StackSize = MFI.getStackSize(); bool HasFP = hasFP(MF); bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); int64_t FPDelta = 0; if (IsWin64Prologue) { - assert(!MFI->hasCalls() || (StackSize % 16) == 8); + assert(!MFI.hasCalls() || (StackSize % 16) == 8); // Calculate required stack adjustment. uint64_t FrameSize = StackSize - SlotSize; @@ -1650,7 +1703,7 @@ // restricted Win64 prologue. // Add FPDelta to all offsets below that go through the frame pointer. FPDelta = FrameSize - SEHFrameOffset; - assert((!MFI->hasCalls() || (FPDelta % 16) == 0) && + assert((!MFI.hasCalls() || (FPDelta % 16) == 0) && "FPDelta isn't aligned per the Win64 ABI!"); } @@ -1661,7 +1714,7 @@ // Skip the saved EBP. return Offset + SlotSize + FPDelta; } else { - assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0); + assert((-(Offset + StackSize)) % MFI.getObjectAlignment(FI) == 0); return Offset + StackSize; } } else if (TRI->needsStackRealignment(MF)) { @@ -1669,7 +1722,7 @@ // Skip the saved EBP. return Offset + SlotSize + FPDelta; } else { - assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0); + assert((-(Offset + StackSize)) % MFI.getObjectAlignment(FI) == 0); return Offset + StackSize; } // FIXME: Support tail calls @@ -1689,58 +1742,61 @@ return Offset + FPDelta; } -// Simplified from getFrameIndexReference keeping only StackPointer cases -int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF, - int FI, - unsigned &FrameReg) const { - const MachineFrameInfo *MFI = MF.getFrameInfo(); +int +X86FrameLowering::getFrameIndexReferencePreferSP(const MachineFunction &MF, + int FI, unsigned &FrameReg, + bool IgnoreSPUpdates) const { + + const MachineFrameInfo &MFI = MF.getFrameInfo(); // Does not include any dynamic realign. - const uint64_t StackSize = MFI->getStackSize(); - { -#ifndef NDEBUG - // LLVM arranges the stack as follows: - // ... - // ARG2 - // ARG1 - // RETADDR - // PUSH RBP <-- RBP points here - // PUSH CSRs - // ~~~~~~~ <-- possible stack realignment (non-win64) - // ... - // STACK OBJECTS - // ... <-- RSP after prologue points here - // ~~~~~~~ <-- possible stack realignment (win64) - // - // if (hasVarSizedObjects()): - // ... <-- "base pointer" (ESI/RBX) points here - // DYNAMIC ALLOCAS - // ... <-- RSP points here - // - // Case 1: In the simple case of no stack realignment and no dynamic - // allocas, both "fixed" stack objects (arguments and CSRs) are addressable - // with fixed offsets from RSP. - // - // Case 2: In the case of stack realignment with no dynamic allocas, fixed - // stack objects are addressed with RBP and regular stack objects with RSP. - // - // Case 3: In the case of dynamic allocas and stack realignment, RSP is used - // to address stack arguments for outgoing calls and nothing else. The "base - // pointer" points to local variables, and RBP points to fixed objects. - // - // In cases 2 and 3, we can only answer for non-fixed stack objects, and the - // answer we give is relative to the SP after the prologue, and not the - // SP in the middle of the function. - - assert((!MFI->isFixedObjectIndex(FI) || !TRI->needsStackRealignment(MF) || - STI.isTargetWin64()) && - "offset from fixed object to SP is not static"); - - // We don't handle tail calls, and shouldn't be seeing them either. - int TailCallReturnAddrDelta = - MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta(); - assert(!(TailCallReturnAddrDelta < 0) && "we don't handle this case!"); -#endif - } + const uint64_t StackSize = MFI.getStackSize(); + // LLVM arranges the stack as follows: + // ... + // ARG2 + // ARG1 + // RETADDR + // PUSH RBP <-- RBP points here + // PUSH CSRs + // ~~~~~~~ <-- possible stack realignment (non-win64) + // ... + // STACK OBJECTS + // ... <-- RSP after prologue points here + // ~~~~~~~ <-- possible stack realignment (win64) + // + // if (hasVarSizedObjects()): + // ... <-- "base pointer" (ESI/RBX) points here + // DYNAMIC ALLOCAS + // ... <-- RSP points here + // + // Case 1: In the simple case of no stack realignment and no dynamic + // allocas, both "fixed" stack objects (arguments and CSRs) are addressable + // with fixed offsets from RSP. + // + // Case 2: In the case of stack realignment with no dynamic allocas, fixed + // stack objects are addressed with RBP and regular stack objects with RSP. + // + // Case 3: In the case of dynamic allocas and stack realignment, RSP is used + // to address stack arguments for outgoing calls and nothing else. The "base + // pointer" points to local variables, and RBP points to fixed objects. + // + // In cases 2 and 3, we can only answer for non-fixed stack objects, and the + // answer we give is relative to the SP after the prologue, and not the + // SP in the middle of the function. + + if (MFI.isFixedObjectIndex(FI) && TRI->needsStackRealignment(MF) && + !STI.isTargetWin64()) + return getFrameIndexReference(MF, FI, FrameReg); + + // If !hasReservedCallFrame the function might have SP adjustement in the + // body. So, even though the offset is statically known, it depends on where + // we are in the function. + const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); + if (!IgnoreSPUpdates && !TFI->hasReservedCallFrame(MF)) + return getFrameIndexReference(MF, FI, FrameReg); + + // We don't handle tail calls, and shouldn't be seeing them either. + assert(MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta() >= 0 && + "we don't handle this case!"); // Fill in FrameReg output argument. FrameReg = TRI->getStackRegister(); @@ -1759,7 +1815,7 @@ // // A is the incoming stack pointer. // (B - A) is the local area offset (-8 for x86-64) [1] - // (C - A) is the Offset returned by MFI->getObjectOffset for Obj0 [2] + // (C - A) is the Offset returned by MFI.getObjectOffset for Obj0 [2] // // |(E - B)| is the StackSize (absolute value, positive). For a // stack that grown down, this works out to be (B - E). [3] @@ -1772,7 +1828,7 @@ // // Get the Offset from the StackPointer - int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea(); + int Offset = MFI.getObjectOffset(FI) - getOffsetOfLocalArea(); return Offset + StackSize; } @@ -1780,7 +1836,7 @@ bool X86FrameLowering::assignCalleeSavedSpillSlots( MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector<CalleeSavedInfo> &CSI) const { - MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineFrameInfo &MFI = MF.getFrameInfo(); X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); unsigned CalleeSavedFrameSize = 0; @@ -1789,7 +1845,7 @@ if (hasFP(MF)) { // emitPrologue always spills frame register the first thing. SpillSlotOffset -= SlotSize; - MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); + MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); // Since emitPrologue and emitEpilogue will handle spilling and restoring of // the frame register, we can delete it from CSI list and not have to worry @@ -1813,7 +1869,7 @@ SpillSlotOffset -= SlotSize; CalleeSavedFrameSize += SlotSize; - int SlotIndex = MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); + int SlotIndex = MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); CSI[i - 1].setFrameIdx(SlotIndex); } @@ -1831,9 +1887,9 @@ // spill into slot SpillSlotOffset -= RC->getSize(); int SlotIndex = - MFI->CreateFixedSpillStackObject(RC->getSize(), SpillSlotOffset); + MFI.CreateFixedSpillStackObject(RC->getSize(), SpillSlotOffset); CSI[i - 1].setFrameIdx(SlotIndex); - MFI->ensureMaxAlignment(RC->getAlignment()); + MFI.ensureMaxAlignment(RC->getAlignment()); } return true; @@ -1851,16 +1907,37 @@ return true; // Push GPRs. It increases frame size. + const MachineFunction &MF = *MBB.getParent(); unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r; for (unsigned i = CSI.size(); i != 0; --i) { unsigned Reg = CSI[i - 1].getReg(); if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg)) continue; - // Add the callee-saved register as live-in. It's killed at the spill. - MBB.addLiveIn(Reg); - - BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill) + + const MachineRegisterInfo &MRI = MF.getRegInfo(); + bool isLiveIn = MRI.isLiveIn(Reg); + if (!isLiveIn) + MBB.addLiveIn(Reg); + + // Decide whether we can add a kill flag to the use. + bool CanKill = !isLiveIn; + // Check if any subregister is live-in + if (CanKill) { + for (MCRegAliasIterator AReg(Reg, TRI, false); AReg.isValid(); ++AReg) { + if (MRI.isLiveIn(*AReg)) { + CanKill = false; + break; + } + } + } + + // Do not set a kill flag on values that are also marked as live-in. This + // happens with the @llvm-returnaddress intrinsic and with arguments + // passed in callee saved registers. + // Omitting the kill flags is conservatively correct even if the live-in + // is not used after all. + BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, getKillRegState(CanKill)) .setMIFlag(MachineInstr::FrameSetup); } @@ -1891,7 +1968,7 @@ if (CSI.empty()) return false; - if (isFuncletReturnInstr(MI) && STI.isOSWindows()) { + if (MI != MBB.end() && isFuncletReturnInstr(*MI) && STI.isOSWindows()) { // Don't restore CSRs in 32-bit EH funclets. Matches // spillCalleeSavedRegisters. if (STI.is32Bit()) @@ -1939,7 +2016,7 @@ RegScavenger *RS) const { TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); - MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineFrameInfo &MFI = MF.getFrameInfo(); X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); @@ -1954,7 +2031,7 @@ // ... // } // [EBP] - MFI->CreateFixedObject(-TailCallReturnAddrDelta, + MFI.CreateFixedObject(-TailCallReturnAddrDelta, TailCallReturnAddrDelta - SlotSize, true); } @@ -1964,7 +2041,7 @@ // Allocate a spill slot for EBP if we have a base pointer and EH funclets. if (MF.getMMI().hasEHFunclets()) { - int FI = MFI->CreateSpillStackObject(SlotSize, SlotSize); + int FI = MFI.CreateSpillStackObject(SlotSize, SlotSize); X86FI->setHasSEHFramePtrSave(true); X86FI->setSEHFramePtrSaveIndex(FI); } @@ -2025,7 +2102,7 @@ void X86FrameLowering::adjustForSegmentedStacks( MachineFunction &MF, MachineBasicBlock &PrologueMBB) const { - MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineFrameInfo &MFI = MF.getFrameInfo(); uint64_t StackSize; unsigned TlsReg, TlsOffset; DebugLoc DL; @@ -2048,7 +2125,7 @@ // Eventually StackSize will be calculated by a link-time pass; which will // also decide whether checking code needs to be injected into this particular // prologue. - StackSize = MFI->getStackSize(); + StackSize = MFI.getStackSize(); // Do not generate a prologue for functions with a stack of size zero if (StackSize == 0) @@ -2250,11 +2327,33 @@ checkMBB->addSuccessor(allocMBB); checkMBB->addSuccessor(&PrologueMBB); -#ifdef XDEBUG +#ifdef EXPENSIVE_CHECKS MF.verify(); #endif } +/// Lookup an ERTS parameter in the !hipe.literals named metadata node. +/// HiPE provides Erlang Runtime System-internal parameters, such as PCB offsets +/// to fields it needs, through a named metadata node "hipe.literals" containing +/// name-value pairs. +static unsigned getHiPELiteral( + NamedMDNode *HiPELiteralsMD, const StringRef LiteralName) { + for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) { + MDNode *Node = HiPELiteralsMD->getOperand(i); + if (Node->getNumOperands() != 2) continue; + MDString *NodeName = dyn_cast<MDString>(Node->getOperand(0)); + ValueAsMetadata *NodeVal = dyn_cast<ValueAsMetadata>(Node->getOperand(1)); + if (!NodeName || !NodeVal) continue; + ConstantInt *ValConst = dyn_cast_or_null<ConstantInt>(NodeVal->getValue()); + if (ValConst && NodeName->getString() == LiteralName) { + return ValConst->getZExtValue(); + } + } + + report_fatal_error("HiPE literal " + LiteralName + + " required but not provided"); +} + /// Erlang programs may need a special prologue to handle the stack size they /// might need at runtime. That is because Erlang/OTP does not implement a C /// stack but uses a custom implementation of hybrid stack/heap architecture. @@ -2272,7 +2371,7 @@ /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart void X86FrameLowering::adjustForHiPEPrologue( MachineFunction &MF, MachineBasicBlock &PrologueMBB) const { - MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineFrameInfo &MFI = MF.getFrameInfo(); DebugLoc DL; // To support shrink-wrapping we would need to insert the new blocks @@ -2280,12 +2379,19 @@ assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet"); // HiPE-specific values - const unsigned HipeLeafWords = 24; + NamedMDNode *HiPELiteralsMD = MF.getMMI().getModule() + ->getNamedMetadata("hipe.literals"); + if (!HiPELiteralsMD) + report_fatal_error( + "Can't generate HiPE prologue without runtime parameters"); + const unsigned HipeLeafWords + = getHiPELiteral(HiPELiteralsMD, + Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS"); const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5; const unsigned Guaranteed = HipeLeafWords * SlotSize; unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ? MF.getFunction()->arg_size() - CCRegisteredArgs : 0; - unsigned MaxStack = MFI->getStackSize() + CallerStkArity*SlotSize + SlotSize; + unsigned MaxStack = MFI.getStackSize() + CallerStkArity*SlotSize + SlotSize; assert(STI.isTargetLinux() && "HiPE prologue is only supported on Linux operating systems."); @@ -2297,18 +2403,16 @@ // b) outgoing on-stack parameter areas, and // c) the minimum stack space this function needs to make available for the // functions it calls (a tunable ABI property). - if (MFI->hasCalls()) { + if (MFI.hasCalls()) { unsigned MoreStackForCalls = 0; - for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end(); - MBBI != MBBE; ++MBBI) - for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end(); - MI != ME; ++MI) { - if (!MI->isCall()) + for (auto &MBB : MF) { + for (auto &MI : MBB) { + if (!MI.isCall()) continue; // Get callee operand. - const MachineOperand &MO = MI->getOperand(0); + const MachineOperand &MO = MI.getOperand(0); // Only take account of global function calls (no closures etc.). if (!MO.isGlobal()) @@ -2334,6 +2438,7 @@ MoreStackForCalls = std::max(MoreStackForCalls, (HipeLeafWords - 1 - CalleeStkArity) * SlotSize); } + } MaxStack += MoreStackForCalls; } @@ -2353,20 +2458,19 @@ unsigned ScratchReg, SPReg, PReg, SPLimitOffset; unsigned LEAop, CMPop, CALLop; + SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT"); if (Is64Bit) { SPReg = X86::RSP; PReg = X86::RBP; LEAop = X86::LEA64r; CMPop = X86::CMP64rm; CALLop = X86::CALL64pcrel32; - SPLimitOffset = 0x90; } else { SPReg = X86::ESP; PReg = X86::EBP; LEAop = X86::LEA32r; CMPop = X86::CMP32rm; CALLop = X86::CALLpcrel32; - SPLimitOffset = 0x4c; } ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true); @@ -2395,13 +2499,15 @@ incStackMBB->addSuccessor(&PrologueMBB, {99, 100}); incStackMBB->addSuccessor(incStackMBB, {1, 100}); } -#ifdef XDEBUG +#ifdef EXPENSIVE_CHECKS MF.verify(); #endif } bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, DebugLoc DL, int Offset) const { + MachineBasicBlock::iterator MBBI, + const DebugLoc &DL, + int Offset) const { if (Offset <= 0) return false; @@ -2440,7 +2546,8 @@ bool IsDef = false; for (const MachineOperand &MO : Prev->implicit_operands()) { - if (MO.isReg() && MO.isDef() && MO.getReg() == Candidate) { + if (MO.isReg() && MO.isDef() && + TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) { IsDef = true; break; } @@ -2468,7 +2575,7 @@ return true; } -void X86FrameLowering:: +MachineBasicBlock::iterator X86FrameLowering:: eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const { bool reserveCallFrame = hasReservedCallFrame(MF); @@ -2512,7 +2619,7 @@ MCCFIInstruction::createGnuArgsSize(nullptr, Amount)); if (Amount == 0) - return; + return I; // Factor out the amount that gets handled inside the sequence // (Pushes of argument for frame setup, callee pops for frame destroy) @@ -2525,13 +2632,23 @@ BuildCFI(MBB, I, DL, MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt)); - if (Amount) { - // Add Amount to SP to destroy a frame, and subtract to setup. - int Offset = isDestroy ? Amount : -Amount; - - if (!(Fn->optForMinSize() && - adjustStackWithPops(MBB, I, DL, Offset))) - BuildStackAdjustment(MBB, I, DL, Offset, /*InEpilogue=*/false); + // Add Amount to SP to destroy a frame, or subtract to setup. + int64_t StackAdjustment = isDestroy ? Amount : -Amount; + int64_t CfaAdjustment = -StackAdjustment; + + if (StackAdjustment) { + // Merge with any previous or following adjustment instruction. Note: the + // instructions merged with here do not have CFI, so their stack + // adjustments do not feed into CfaAdjustment. + StackAdjustment += mergeSPUpdates(MBB, I, true); + StackAdjustment += mergeSPUpdates(MBB, I, false); + + if (StackAdjustment) { + if (!(Fn->optForMinSize() && + adjustStackWithPops(MBB, I, DL, StackAdjustment))) + BuildStackAdjustment(MBB, I, DL, StackAdjustment, + /*InEpilogue=*/false); + } } if (DwarfCFI && !hasFP(MF)) { @@ -2541,18 +2658,16 @@ // CFI only for EH purposes or for debugging. EH only requires the CFA // offset to be correct at each call site, while for debugging we want // it to be more precise. - int CFAOffset = Amount; + // TODO: When not using precise CFA, we also need to adjust for the // InternalAmt here. - - if (CFAOffset) { - CFAOffset = isDestroy ? -CFAOffset : CFAOffset; - BuildCFI(MBB, I, DL, - MCCFIInstruction::createAdjustCfaOffset(nullptr, CFAOffset)); + if (CfaAdjustment) { + BuildCFI(MBB, I, DL, MCCFIInstruction::createAdjustCfaOffset( + nullptr, CfaAdjustment)); } } - return; + return I; } if (isDestroy && InternalAmt) { @@ -2562,11 +2677,20 @@ // We are not tracking the stack pointer adjustment by the callee, so make // sure we restore the stack pointer immediately after the call, there may // be spill code inserted between the CALL and ADJCALLSTACKUP instructions. + MachineBasicBlock::iterator CI = I; MachineBasicBlock::iterator B = MBB.begin(); - while (I != B && !std::prev(I)->isCall()) - --I; - BuildStackAdjustment(MBB, I, DL, -InternalAmt, /*InEpilogue=*/false); + while (CI != B && !std::prev(CI)->isCall()) + --CI; + BuildStackAdjustment(MBB, CI, DL, -InternalAmt, /*InEpilogue=*/false); } + + return I; +} + +bool X86FrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const { + assert(MBB.getParent() && "Block is not attached to a function!"); + const MachineFunction &MF = *MBB.getParent(); + return !TRI->needsStackRealignment(MF) || !MBB.isLiveIn(X86::EFLAGS); } bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const { @@ -2604,7 +2728,7 @@ MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers( MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, - DebugLoc DL, bool RestoreSP) const { + const DebugLoc &DL, bool RestoreSP) const { assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env"); assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32"); assert(STI.is32Bit() && !Uses64BitFramePtr && @@ -2615,12 +2739,12 @@ unsigned BasePtr = TRI->getBaseRegister(); WinEHFuncInfo &FuncInfo = *MF.getWinEHFuncInfo(); X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); - MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineFrameInfo &MFI = MF.getFrameInfo(); // FIXME: Don't set FrameSetup flag in catchret case. int FI = FuncInfo.EHRegNodeFrameIndex; - int EHRegSize = MFI->getObjectSize(FI); + int EHRegSize = MFI.getObjectSize(FI); if (RestoreSP) { // MOV32rm -EHRegSize(%ebp), %esp @@ -2664,6 +2788,150 @@ return MBBI; } +namespace { +// Struct used by orderFrameObjects to help sort the stack objects. +struct X86FrameSortingObject { + bool IsValid = false; // true if we care about this Object. + unsigned ObjectIndex = 0; // Index of Object into MFI list. + unsigned ObjectSize = 0; // Size of Object in bytes. + unsigned ObjectAlignment = 1; // Alignment of Object in bytes. + unsigned ObjectNumUses = 0; // Object static number of uses. +}; + +// The comparison function we use for std::sort to order our local +// stack symbols. The current algorithm is to use an estimated +// "density". This takes into consideration the size and number of +// uses each object has in order to roughly minimize code size. +// So, for example, an object of size 16B that is referenced 5 times +// will get higher priority than 4 4B objects referenced 1 time each. +// It's not perfect and we may be able to squeeze a few more bytes out of +// it (for example : 0(esp) requires fewer bytes, symbols allocated at the +// fringe end can have special consideration, given their size is less +// important, etc.), but the algorithmic complexity grows too much to be +// worth the extra gains we get. This gets us pretty close. +// The final order leaves us with objects with highest priority going +// at the end of our list. +struct X86FrameSortingComparator { + inline bool operator()(const X86FrameSortingObject &A, + const X86FrameSortingObject &B) { + uint64_t DensityAScaled, DensityBScaled; + + // For consistency in our comparison, all invalid objects are placed + // at the end. This also allows us to stop walking when we hit the + // first invalid item after it's all sorted. + if (!A.IsValid) + return false; + if (!B.IsValid) + return true; + + // The density is calculated by doing : + // (double)DensityA = A.ObjectNumUses / A.ObjectSize + // (double)DensityB = B.ObjectNumUses / B.ObjectSize + // Since this approach may cause inconsistencies in + // the floating point <, >, == comparisons, depending on the floating + // point model with which the compiler was built, we're going + // to scale both sides by multiplying with + // A.ObjectSize * B.ObjectSize. This ends up factoring away + // the division and, with it, the need for any floating point + // arithmetic. + DensityAScaled = static_cast<uint64_t>(A.ObjectNumUses) * + static_cast<uint64_t>(B.ObjectSize); + DensityBScaled = static_cast<uint64_t>(B.ObjectNumUses) * + static_cast<uint64_t>(A.ObjectSize); + + // If the two densities are equal, prioritize highest alignment + // objects. This allows for similar alignment objects + // to be packed together (given the same density). + // There's room for improvement here, also, since we can pack + // similar alignment (different density) objects next to each + // other to save padding. This will also require further + // complexity/iterations, and the overall gain isn't worth it, + // in general. Something to keep in mind, though. + if (DensityAScaled == DensityBScaled) + return A.ObjectAlignment < B.ObjectAlignment; + + return DensityAScaled < DensityBScaled; + } +}; +} // namespace + +// Order the symbols in the local stack. +// We want to place the local stack objects in some sort of sensible order. +// The heuristic we use is to try and pack them according to static number +// of uses and size of object in order to minimize code size. +void X86FrameLowering::orderFrameObjects( + const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const { + const MachineFrameInfo &MFI = MF.getFrameInfo(); + + // Don't waste time if there's nothing to do. + if (ObjectsToAllocate.empty()) + return; + + // Create an array of all MFI objects. We won't need all of these + // objects, but we're going to create a full array of them to make + // it easier to index into when we're counting "uses" down below. + // We want to be able to easily/cheaply access an object by simply + // indexing into it, instead of having to search for it every time. + std::vector<X86FrameSortingObject> SortingObjects(MFI.getObjectIndexEnd()); + + // Walk the objects we care about and mark them as such in our working + // struct. + for (auto &Obj : ObjectsToAllocate) { + SortingObjects[Obj].IsValid = true; + SortingObjects[Obj].ObjectIndex = Obj; + SortingObjects[Obj].ObjectAlignment = MFI.getObjectAlignment(Obj); + // Set the size. + int ObjectSize = MFI.getObjectSize(Obj); + if (ObjectSize == 0) + // Variable size. Just use 4. + SortingObjects[Obj].ObjectSize = 4; + else + SortingObjects[Obj].ObjectSize = ObjectSize; + } + + // Count the number of uses for each object. + for (auto &MBB : MF) { + for (auto &MI : MBB) { + if (MI.isDebugValue()) + continue; + for (const MachineOperand &MO : MI.operands()) { + // Check to see if it's a local stack symbol. + if (!MO.isFI()) + continue; + int Index = MO.getIndex(); + // Check to see if it falls within our range, and is tagged + // to require ordering. + if (Index >= 0 && Index < MFI.getObjectIndexEnd() && + SortingObjects[Index].IsValid) + SortingObjects[Index].ObjectNumUses++; + } + } + } + + // Sort the objects using X86FrameSortingAlgorithm (see its comment for + // info). + std::stable_sort(SortingObjects.begin(), SortingObjects.end(), + X86FrameSortingComparator()); + + // Now modify the original list to represent the final order that + // we want. The order will depend on whether we're going to access them + // from the stack pointer or the frame pointer. For SP, the list should + // end up with the END containing objects that we want with smaller offsets. + // For FP, it should be flipped. + int i = 0; + for (auto &Obj : SortingObjects) { + // All invalid items are sorted at the end, so it's safe to stop. + if (!Obj.IsValid) + break; + ObjectsToAllocate[i++] = Obj.ObjectIndex; + } + + // Flip it if we're accessing off of the FP. + if (!TRI->needsStackRealignment(MF) && hasFP(MF)) + std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end()); +} + + unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const { // RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue. unsigned Offset = 16; @@ -2690,15 +2958,31 @@ // object, so that we can allocate a slot immediately following it. If there // were no fixed objects, use offset -SlotSize, which is immediately after the // return address. Fixed objects have negative frame indices. - MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo(); int64_t MinFixedObjOffset = -SlotSize; - for (int I = MFI->getObjectIndexBegin(); I < 0; ++I) - MinFixedObjOffset = std::min(MinFixedObjOffset, MFI->getObjectOffset(I)); - + for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) + MinFixedObjOffset = std::min(MinFixedObjOffset, MFI.getObjectOffset(I)); + + for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) { + for (WinEHHandlerType &H : TBME.HandlerArray) { + int FrameIndex = H.CatchObj.FrameIndex; + if (FrameIndex != INT_MAX) { + // Ensure alignment. + unsigned Align = MFI.getObjectAlignment(FrameIndex); + MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align; + MinFixedObjOffset -= MFI.getObjectSize(FrameIndex); + MFI.setObjectOffset(FrameIndex, MinFixedObjOffset); + } + } + } + + // Ensure alignment. + MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8; int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize; int UnwindHelpFI = - MFI->CreateFixedObject(SlotSize, UnwindHelpOffset, /*Immutable=*/false); - MF.getWinEHFuncInfo()->UnwindHelpFrameIdx = UnwindHelpFI; + MFI.CreateFixedObject(SlotSize, UnwindHelpOffset, /*Immutable=*/false); + EHInfo.UnwindHelpFrameIdx = UnwindHelpFI; // Store -2 into UnwindHelp on function entry. We have to scan forwards past // other frame setup instructions.