Mercurial > hg > CbC > CbC_llvm
diff lib/Target/X86/X86LegalizerInfo.cpp @ 134:3a76565eade5 LLVM5.0.1
update 5.0.1
author | mir3636 |
---|---|
date | Sat, 17 Feb 2018 09:57:20 +0900 |
parents | 803732b1fca8 |
children | c2174574ed3a |
line wrap: on
line diff
--- a/lib/Target/X86/X86LegalizerInfo.cpp Fri Feb 16 19:10:49 2018 +0900 +++ b/lib/Target/X86/X86LegalizerInfo.cpp Sat Feb 17 09:57:20 2018 +0900 @@ -14,13 +14,46 @@ #include "X86LegalizerInfo.h" #include "X86Subtarget.h" #include "X86TargetMachine.h" +#include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Type.h" -#include "llvm/Target/TargetOpcodes.h" using namespace llvm; using namespace TargetOpcode; +using namespace LegalizeActions; + +/// FIXME: The following static functions are SizeChangeStrategy functions +/// that are meant to temporarily mimic the behaviour of the old legalization +/// based on doubling/halving non-legal types as closely as possible. This is +/// not entirly possible as only legalizing the types that are exactly a power +/// of 2 times the size of the legal types would require specifying all those +/// sizes explicitly. +/// In practice, not specifying those isn't a problem, and the below functions +/// should disappear quickly as we add support for legalizing non-power-of-2 +/// sized types further. +static void +addAndInterleaveWithUnsupported(LegalizerInfo::SizeAndActionsVec &result, + const LegalizerInfo::SizeAndActionsVec &v) { + for (unsigned i = 0; i < v.size(); ++i) { + result.push_back(v[i]); + if (i + 1 < v[i].first && i + 1 < v.size() && + v[i + 1].first != v[i].first + 1) + result.push_back({v[i].first + 1, Unsupported}); + } +} + +static LegalizerInfo::SizeAndActionsVec +widen_1(const LegalizerInfo::SizeAndActionsVec &v) { + assert(v.size() >= 1); + assert(v[0].first > 1); + LegalizerInfo::SizeAndActionsVec result = {{1, WidenScalar}, + {2, Unsupported}}; + addAndInterleaveWithUnsupported(result, v); + auto Largest = result.back().first; + result.push_back({Largest + 1, Unsupported}); + return result; +} X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI, const X86TargetMachine &TM) @@ -37,6 +70,17 @@ setLegalizerInfoAVX512DQ(); setLegalizerInfoAVX512BW(); + setLegalizeScalarToDifferentSizeStrategy(G_PHI, 0, widen_1); + for (unsigned BinOp : {G_SUB, G_MUL, G_AND, G_OR, G_XOR}) + setLegalizeScalarToDifferentSizeStrategy(BinOp, 0, widen_1); + for (unsigned MemOp : {G_LOAD, G_STORE}) + setLegalizeScalarToDifferentSizeStrategy(MemOp, 0, + narrowToSmallerAndWidenToSmallest); + setLegalizeScalarToDifferentSizeStrategy( + G_GEP, 1, widenToLargerTypesUnsupportedOtherwise); + setLegalizeScalarToDifferentSizeStrategy( + G_CONSTANT, 0, widenToLargerTypesAndNarrowToLargest); + computeTables(); } @@ -48,6 +92,7 @@ const LLT s16 = LLT::scalar(16); const LLT s32 = LLT::scalar(32); const LLT s64 = LLT::scalar(64); + const LLT s128 = LLT::scalar(128); for (auto Ty : {p0, s1, s8, s16, s32}) setAction({G_IMPLICIT_DEF, Ty}, Legal); @@ -55,15 +100,10 @@ for (auto Ty : {s8, s16, s32, p0}) setAction({G_PHI, Ty}, Legal); - setAction({G_PHI, s1}, WidenScalar); - - for (unsigned BinOp : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR}) { + for (unsigned BinOp : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR}) for (auto Ty : {s8, s16, s32}) setAction({BinOp, Ty}, Legal); - setAction({BinOp, s1}, WidenScalar); - } - for (unsigned Op : {G_UADDE}) { setAction({Op, s32}, Legal); setAction({Op, 1, s1}, Legal); @@ -73,7 +113,6 @@ for (auto Ty : {s8, s16, s32, p0}) setAction({MemOp, Ty}, Legal); - setAction({MemOp, s1}, WidenScalar); // And everything's fine in addrspace 0. setAction({MemOp, 1, p0}, Legal); } @@ -85,9 +124,6 @@ setAction({G_GEP, p0}, Legal); setAction({G_GEP, 1, s32}, Legal); - for (auto Ty : {s1, s8, s16}) - setAction({G_GEP, 1, Ty}, WidenScalar); - // Control-flow setAction({G_BRCOND, s1}, Legal); @@ -95,27 +131,29 @@ for (auto Ty : {s8, s16, s32, p0}) setAction({TargetOpcode::G_CONSTANT, Ty}, Legal); - setAction({TargetOpcode::G_CONSTANT, s1}, WidenScalar); - setAction({TargetOpcode::G_CONSTANT, s64}, NarrowScalar); - // Extensions for (auto Ty : {s8, s16, s32}) { setAction({G_ZEXT, Ty}, Legal); setAction({G_SEXT, Ty}, Legal); setAction({G_ANYEXT, Ty}, Legal); } - - for (auto Ty : {s1, s8, s16}) { - setAction({G_ZEXT, 1, Ty}, Legal); - setAction({G_SEXT, 1, Ty}, Legal); - setAction({G_ANYEXT, 1, Ty}, Legal); - } + setAction({G_ANYEXT, s128}, Legal); // Comparison setAction({G_ICMP, s1}, Legal); for (auto Ty : {s8, s16, s32, p0}) setAction({G_ICMP, 1, Ty}, Legal); + + // Merge/Unmerge + for (const auto &Ty : {s16, s32, s64}) { + setAction({G_MERGE_VALUES, Ty}, Legal); + setAction({G_UNMERGE_VALUES, 1, Ty}, Legal); + } + for (const auto &Ty : {s8, s16, s32}) { + setAction({G_MERGE_VALUES, 1, Ty}, Legal); + setAction({G_UNMERGE_VALUES, Ty}, Legal); + } } void X86LegalizerInfo::setLegalizerInfo64bit() { @@ -123,10 +161,13 @@ if (!Subtarget.is64Bit()) return; - const LLT s32 = LLT::scalar(32); const LLT s64 = LLT::scalar(64); + const LLT s128 = LLT::scalar(128); setAction({G_IMPLICIT_DEF, s64}, Legal); + // Need to have that, as tryFoldImplicitDef will create this pattern: + // s128 = EXTEND (G_IMPLICIT_DEF s32/s64) -> s128 = G_IMPLICIT_DEF + setAction({G_IMPLICIT_DEF, s128}, Legal); setAction({G_PHI, s64}, Legal); @@ -145,11 +186,16 @@ // Extensions for (unsigned extOp : {G_ZEXT, G_SEXT, G_ANYEXT}) { setAction({extOp, s64}, Legal); - setAction({extOp, 1, s32}, Legal); } // Comparison setAction({G_ICMP, 1, s64}, Legal); + + // Merge/Unmerge + setAction({G_MERGE_VALUES, s128}, Legal); + setAction({G_UNMERGE_VALUES, 1, s128}, Legal); + setAction({G_MERGE_VALUES, 1, s128}, Legal); + setAction({G_UNMERGE_VALUES, s128}, Legal); } void X86LegalizerInfo::setLegalizerInfoSSE1() { @@ -157,6 +203,7 @@ return; const LLT s32 = LLT::scalar(32); + const LLT s64 = LLT::scalar(64); const LLT v4s32 = LLT::vector(4, 32); const LLT v2s64 = LLT::vector(2, 64); @@ -170,6 +217,14 @@ // Constants setAction({TargetOpcode::G_FCONSTANT, s32}, Legal); + + // Merge/Unmerge + for (const auto &Ty : {v4s32, v2s64}) { + setAction({G_MERGE_VALUES, Ty}, Legal); + setAction({G_UNMERGE_VALUES, 1, Ty}, Legal); + } + setAction({G_MERGE_VALUES, 1, s64}, Legal); + setAction({G_UNMERGE_VALUES, s64}, Legal); } void X86LegalizerInfo::setLegalizerInfoSSE2() { @@ -183,6 +238,11 @@ const LLT v4s32 = LLT::vector(4, 32); const LLT v2s64 = LLT::vector(2, 64); + const LLT v32s8 = LLT::vector(32, 8); + const LLT v16s16 = LLT::vector(16, 16); + const LLT v8s32 = LLT::vector(8, 32); + const LLT v4s64 = LLT::vector(4, 64); + for (unsigned BinOp : {G_FADD, G_FSUB, G_FMUL, G_FDIV}) for (auto Ty : {s64, v2s64}) setAction({BinOp, Ty}, Legal); @@ -198,6 +258,17 @@ // Constants setAction({TargetOpcode::G_FCONSTANT, s64}, Legal); + + // Merge/Unmerge + for (const auto &Ty : + {v16s8, v32s8, v8s16, v16s16, v4s32, v8s32, v2s64, v4s64}) { + setAction({G_MERGE_VALUES, Ty}, Legal); + setAction({G_UNMERGE_VALUES, 1, Ty}, Legal); + } + for (const auto &Ty : {v16s8, v8s16, v4s32, v2s64}) { + setAction({G_MERGE_VALUES, 1, Ty}, Legal); + setAction({G_UNMERGE_VALUES, Ty}, Legal); + } } void X86LegalizerInfo::setLegalizerInfoSSE41() { @@ -219,9 +290,13 @@ const LLT v2s64 = LLT::vector(2, 64); const LLT v32s8 = LLT::vector(32, 8); + const LLT v64s8 = LLT::vector(64, 8); const LLT v16s16 = LLT::vector(16, 16); + const LLT v32s16 = LLT::vector(32, 16); const LLT v8s32 = LLT::vector(8, 32); + const LLT v16s32 = LLT::vector(16, 32); const LLT v4s64 = LLT::vector(4, 64); + const LLT v8s64 = LLT::vector(8, 64); for (unsigned MemOp : {G_LOAD, G_STORE}) for (auto Ty : {v8s32, v4s64}) @@ -235,6 +310,17 @@ setAction({G_INSERT, 1, Ty}, Legal); setAction({G_EXTRACT, Ty}, Legal); } + // Merge/Unmerge + for (const auto &Ty : + {v32s8, v64s8, v16s16, v32s16, v8s32, v16s32, v4s64, v8s64}) { + setAction({G_MERGE_VALUES, Ty}, Legal); + setAction({G_UNMERGE_VALUES, 1, Ty}, Legal); + } + for (const auto &Ty : + {v16s8, v32s8, v8s16, v16s16, v4s32, v8s32, v2s64, v4s64}) { + setAction({G_MERGE_VALUES, 1, Ty}, Legal); + setAction({G_UNMERGE_VALUES, Ty}, Legal); + } } void X86LegalizerInfo::setLegalizerInfoAVX2() { @@ -246,12 +332,27 @@ const LLT v8s32 = LLT::vector(8, 32); const LLT v4s64 = LLT::vector(4, 64); + const LLT v64s8 = LLT::vector(64, 8); + const LLT v32s16 = LLT::vector(32, 16); + const LLT v16s32 = LLT::vector(16, 32); + const LLT v8s64 = LLT::vector(8, 64); + for (unsigned BinOp : {G_ADD, G_SUB}) for (auto Ty : {v32s8, v16s16, v8s32, v4s64}) setAction({BinOp, Ty}, Legal); for (auto Ty : {v16s16, v8s32}) setAction({G_MUL, Ty}, Legal); + + // Merge/Unmerge + for (const auto &Ty : {v64s8, v32s16, v16s32, v8s64}) { + setAction({G_MERGE_VALUES, Ty}, Legal); + setAction({G_UNMERGE_VALUES, 1, Ty}, Legal); + } + for (const auto &Ty : {v32s8, v16s16, v8s32, v4s64}) { + setAction({G_MERGE_VALUES, 1, Ty}, Legal); + setAction({G_UNMERGE_VALUES, Ty}, Legal); + } } void X86LegalizerInfo::setLegalizerInfoAVX512() {