llvm/1000-add-loongarch-support-upstream-modified.patch
herengui 728f6ba45b 添加龙芯架构支持
Signed-off-by: herengui <herengui@kylinsec.com.cn>
(cherry picked from commit fcac4183f7b7c9e2c85fdb04ead9c02442b8cd0c)
2023-09-08 16:15:26 +08:00

1989 lines
80 KiB
Diff

From 256422d6f40897e703eb6e73a287aac337f808c2 Mon Sep 17 00:00:00 2001
From: herengui <herengui@kylinsec.com.cn>
Date: Wed, 30 Aug 2023 17:04:06 +0800
Subject: [PATCH 1000/1001] add loongarch support upstream modified
Signed-off-by: herengui <herengui@kylinsec.com.cn>
---
CMakeLists.txt | 1 +
cmake/config-ix.cmake | 2 +
cmake/config.guess | 3 +
include/llvm/ADT/Triple.h | 18 ++
include/llvm/BinaryFormat/ELF.h | 24 ++
include/llvm/Demangle/ItaniumDemangle.h | 2 +-
.../llvm/ExecutionEngine/Orc/OrcABISupport.h | 37 +++
.../Orc/OrcRemoteTargetClient.h | 4 +-
include/llvm/IR/CMakeLists.txt | 1 +
include/llvm/IR/InlineAsm.h | 1 +
include/llvm/IR/Intrinsics.td | 1 +
include/llvm/Object/ELFObjectFile.h | 13 +
include/llvm/Support/Base64.h | 1 +
include/llvm/Support/Signals.h | 1 +
include/llvm/module.modulemap | 1 +
lib/CodeGen/TargetLoweringObjectFileImpl.cpp | 8 +
lib/CodeGen/XRayInstrumentation.cpp | 2 +
lib/ExecutionEngine/Orc/IndirectionUtils.cpp | 5 +
lib/ExecutionEngine/Orc/LazyReexports.cpp | 4 +
lib/ExecutionEngine/Orc/OrcABISupport.cpp | 201 ++++++++++++
.../RuntimeDyld/RuntimeDyldELF.cpp | 296 ++++++++++++++++++
.../RuntimeDyld/RuntimeDyldELF.h | 8 +
lib/IR/Function.cpp | 1 +
lib/Object/ELF.cpp | 7 +
lib/Object/RelocationResolver.cpp | 26 ++
lib/ObjectYAML/ELFYAML.cpp | 11 +
lib/Support/Host.cpp | 69 ++++
lib/Support/Triple.cpp | 23 ++
.../Instrumentation/AddressSanitizer.cpp | 8 +-
.../Instrumentation/DataFlowSanitizer.cpp | 38 ++-
.../Instrumentation/MemorySanitizer.cpp | 130 ++++++++
lib/XRay/InstrumentationMap.cpp | 1 +
.../X86/MachO_GOTAndStubsOptimization.s | 1 +
.../MCJIT/2003-01-04-ArgumentBug.ll | 2 +
test/ExecutionEngine/MCJIT/lit.local.cfg | 5 +-
test/ExecutionEngine/MCJIT/pr13727.ll | 2 +
.../remote/test-common-symbols-remote.ll | 1 +
.../MCJIT/test-common-symbols.ll | 2 +
test/ExecutionEngine/frem.ll | 2 +
.../DataFlowSanitizer/callback.ll | 15 +-
tools/llvm-readobj/ELFDumper.cpp | 17 +
tools/sancov/sancov.cpp | 2 +-
utils/UpdateTestChecks/asm.py | 17 +
utils/benchmark/src/cycleclock.h | 4 +
utils/gn/secondary/clang/lib/Basic/BUILD.gn | 1 +
utils/gn/secondary/clang/lib/Driver/BUILD.gn | 1 +
.../secondary/llvm/include/llvm/IR/BUILD.gn | 11 +
.../gn/secondary/llvm/lib/Target/targets.gni | 4 +
48 files changed, 1018 insertions(+), 17 deletions(-)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 28ccef34..4b958a38 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -296,6 +296,7 @@ set(LLVM_ALL_TARGETS
BPF
Hexagon
Lanai
+ LoongArch
Mips
MSP430
NVPTX
diff --git a/cmake/config-ix.cmake b/cmake/config-ix.cmake
index 818fafbc..023612f3 100644
--- a/cmake/config-ix.cmake
+++ b/cmake/config-ix.cmake
@@ -449,6 +449,8 @@ elseif (LLVM_NATIVE_ARCH MATCHES "riscv32")
set(LLVM_NATIVE_ARCH RISCV)
elseif (LLVM_NATIVE_ARCH MATCHES "riscv64")
set(LLVM_NATIVE_ARCH RISCV)
+elseif (LLVM_NATIVE_ARCH MATCHES "loongarch")
+ set(LLVM_NATIVE_ARCH LoongArch)
else ()
message(FATAL_ERROR "Unknown architecture ${LLVM_NATIVE_ARCH}")
endif ()
diff --git a/cmake/config.guess b/cmake/config.guess
index 60d3f588..255257d4 100644
--- a/cmake/config.guess
+++ b/cmake/config.guess
@@ -1021,6 +1021,9 @@ EOF
x86_64:Linux:*:*)
echo x86_64-unknown-linux-gnu
exit ;;
+ loongarch64:Linux:*:*)
+ echo loongarch64-unknown-linux-gnu
+ exit ;;
xtensa*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
diff --git a/include/llvm/ADT/Triple.h b/include/llvm/ADT/Triple.h
index eed315c9..35bc8ef4 100644
--- a/include/llvm/ADT/Triple.h
+++ b/include/llvm/ADT/Triple.h
@@ -58,6 +58,8 @@ public:
bpfeb, // eBPF or extended BPF or 64-bit BPF (big endian)
csky, // CSKY: csky
hexagon, // Hexagon: hexagon
+ loongarch32, // LoongArch (32-bit): loongarch32
+ loongarch64, // LoongArch (64-bit): loongarch64
mips, // MIPS: mips, mipsallegrex, mipsr6
mipsel, // MIPSEL: mipsel, mipsallegrexe, mipsr6el
mips64, // MIPS64: mips64, mips64r6, mipsn32, mipsn32r6
@@ -211,6 +213,7 @@ public:
GNUX32,
GNUILP32,
CODE16,
+ GNUABILPX32,
EABI,
EABIHF,
Android,
@@ -750,6 +753,21 @@ public:
return isMIPS32() || isMIPS64();
}
+ /// Tests whether the target is LoongArch 32-bit
+ bool isLoongArch32() const {
+ return getArch() == Triple::loongarch32;
+ }
+
+ /// Tests whether the target is LoongArch 64-bit.
+ bool isLoongArch64() const {
+ return getArch() == Triple::loongarch64;
+ }
+
+ /// Tests whether the target is LoongArch (32- or 64-bit).
+ bool isLoongArch() const {
+ return isLoongArch32() || isLoongArch64();
+ }
+
/// Tests whether the target is PowerPC (32- or 64-bit LE or BE).
bool isPPC() const {
return getArch() == Triple::ppc || getArch() == Triple::ppc64 ||
diff --git a/include/llvm/BinaryFormat/ELF.h b/include/llvm/BinaryFormat/ELF.h
index 1552303b..e27c48bb 100644
--- a/include/llvm/BinaryFormat/ELF.h
+++ b/include/llvm/BinaryFormat/ELF.h
@@ -317,6 +317,7 @@ enum {
EM_BPF = 247, // Linux kernel bpf virtual machine
EM_VE = 251, // NEC SX-Aurora VE
EM_CSKY = 252, // C-SKY 32-bit processor
+ EM_LOONGARCH = 258, // LoongArch processor
};
// Object file classes.
@@ -649,6 +650,29 @@ enum {
#include "ELFRelocs/RISCV.def"
};
+// LoongArch Specific e_flags
+enum : unsigned {
+ // Definitions from LoongArch ELF psABI v2.01.
+ // Reference: https://github.com/loongson/LoongArch-Documentation
+ // (commit hash 296de4def055c871809068e0816325a4ac04eb12)
+
+ // Base ABI Modifiers
+ EF_LOONGARCH_ABI_SOFT_FLOAT = 0x1,
+ EF_LOONGARCH_ABI_SINGLE_FLOAT = 0x2,
+ EF_LOONGARCH_ABI_DOUBLE_FLOAT = 0x3,
+ EF_LOONGARCH_ABI_MODIFIER_MASK = 0x7,
+
+ // Object file ABI versions
+ EF_LOONGARCH_OBJABI_V0 = 0x0,
+ EF_LOONGARCH_OBJABI_V1 = 0x40,
+ EF_LOONGARCH_OBJABI_MASK = 0xC0,
+};
+
+// ELF Relocation types for LoongArch
+enum {
+#include "ELFRelocs/LoongArch.def"
+};
+
// ELF Relocation types for S390/zSeries
enum {
#include "ELFRelocs/SystemZ.def"
diff --git a/include/llvm/Demangle/ItaniumDemangle.h b/include/llvm/Demangle/ItaniumDemangle.h
index e5fca98f..afdfca0f 100644
--- a/include/llvm/Demangle/ItaniumDemangle.h
+++ b/include/llvm/Demangle/ItaniumDemangle.h
@@ -5322,7 +5322,7 @@ template <>
struct FloatData<long double>
{
#if defined(__mips__) && defined(__mips_n64) || defined(__aarch64__) || \
- defined(__wasm__)
+ defined(__wasm__) || defined(__loongarch__)
static const size_t mangled_size = 32;
#elif defined(__arm__) || defined(__mips__) || defined(__hexagon__)
static const size_t mangled_size = 16;
diff --git a/include/llvm/ExecutionEngine/Orc/OrcABISupport.h b/include/llvm/ExecutionEngine/Orc/OrcABISupport.h
index 5061c15c..b6914a07 100644
--- a/include/llvm/ExecutionEngine/Orc/OrcABISupport.h
+++ b/include/llvm/ExecutionEngine/Orc/OrcABISupport.h
@@ -331,6 +331,43 @@ public:
JITTargetAddress PointersBlockTargetAddress, unsigned NumStubs);
};
+// @brief LoongArch64 support.
+class OrcLoongArch64 {
+public:
+ static constexpr unsigned PointerSize = 8;
+ static constexpr unsigned TrampolineSize = 40;
+ static constexpr unsigned StubSize = 32;
+ static constexpr unsigned StubToPointerMaxDisplacement = 1 << 31;
+ static constexpr unsigned ResolverCodeSize = 0x120;
+
+ /// Write the resolver code into the given memory. The user is
+ /// responsible for allocating the memory and setting permissions.
+ ///
+ /// ReentryFnAddr should be the address of a function whose signature matches
+ /// void* (*)(void *TrampolineAddr, void *ReentryCtxAddr). The ReentryCtxAddr
+ /// argument of writeResolverCode will be passed as the second argument to
+ /// the function at ReentryFnAddr.
+ static void writeResolverCode(char *ResolverWorkingMem,
+ JITTargetAddress ResolverTargetAddress,
+ JITTargetAddress ReentryFnAddr,
+ JITTargetAddress ReentryCtxAddr);
+
+ /// Write the requested number of trampolines into the given memory,
+ /// which must be big enough to hold 1 pointer, plus NumTrampolines
+ /// trampolines.
+ static void writeTrampolines(char *TrampolineBlockWorkingMem,
+ JITTargetAddress TrampolineBlockTargetAddress,
+ JITTargetAddress ResolverFnAddr,
+ unsigned NumTrampolines);
+ /// Write NumStubs indirect stubs to working memory at StubsBlockWorkingMem.
+ /// Stubs will be written as if linked at StubsBlockTargetAddress, with the
+ /// Nth stub using the Nth pointer in memory starting at
+ /// PointersBlockTargetAddress.
+ static void writeIndirectStubsBlock(
+ char *StubsBlockWorkingMem, JITTargetAddress StubsBlockTargetAddress,
+ JITTargetAddress PointersBlockTargetAddress, unsigned NumStubs);
+};
+
} // end namespace orc
} // end namespace llvm
diff --git a/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h b/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h
index 3d139740..17d55a9a 100644
--- a/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h
+++ b/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h
@@ -289,8 +289,8 @@ public:
bool copyAndProtect(const std::vector<Alloc> &Allocs,
JITTargetAddress RemoteSegmentAddr,
unsigned Permissions) {
- if (RemoteSegmentAddr) {
- assert(!Allocs.empty() && "No sections in allocated segment");
+ if (RemoteSegmentAddr && !Allocs.empty()) {
+ // assert(!Allocs.empty() && "No sections in allocated segment");
for (auto &Alloc : Allocs) {
LLVM_DEBUG(dbgs() << " copying section: "
diff --git a/include/llvm/IR/CMakeLists.txt b/include/llvm/IR/CMakeLists.txt
index 0498fc26..b675a45d 100644
--- a/include/llvm/IR/CMakeLists.txt
+++ b/include/llvm/IR/CMakeLists.txt
@@ -9,6 +9,7 @@ tablegen(LLVM IntrinsicsAMDGPU.h -gen-intrinsic-enums -intrinsic-prefix=amdgcn)
tablegen(LLVM IntrinsicsARM.h -gen-intrinsic-enums -intrinsic-prefix=arm)
tablegen(LLVM IntrinsicsBPF.h -gen-intrinsic-enums -intrinsic-prefix=bpf)
tablegen(LLVM IntrinsicsHexagon.h -gen-intrinsic-enums -intrinsic-prefix=hexagon)
+tablegen(LLVM IntrinsicsLoongArch.h -gen-intrinsic-enums -intrinsic-prefix=loongarch)
tablegen(LLVM IntrinsicsMips.h -gen-intrinsic-enums -intrinsic-prefix=mips)
tablegen(LLVM IntrinsicsNVPTX.h -gen-intrinsic-enums -intrinsic-prefix=nvvm)
tablegen(LLVM IntrinsicsPowerPC.h -gen-intrinsic-enums -intrinsic-prefix=ppc)
diff --git a/include/llvm/IR/InlineAsm.h b/include/llvm/IR/InlineAsm.h
index b6f37709..427cb3ed 100644
--- a/include/llvm/IR/InlineAsm.h
+++ b/include/llvm/IR/InlineAsm.h
@@ -259,6 +259,7 @@ public:
Constraint_Uy,
Constraint_X,
Constraint_Z,
+ Constraint_ZB,
Constraint_ZC,
Constraint_Zy,
Constraints_Max = Constraint_Zy,
diff --git a/include/llvm/IR/Intrinsics.td b/include/llvm/IR/Intrinsics.td
index 21307ed1..320e6c78 100644
--- a/include/llvm/IR/Intrinsics.td
+++ b/include/llvm/IR/Intrinsics.td
@@ -1664,3 +1664,4 @@ include "llvm/IR/IntrinsicsSystemZ.td"
include "llvm/IR/IntrinsicsWebAssembly.td"
include "llvm/IR/IntrinsicsRISCV.td"
include "llvm/IR/IntrinsicsVE.td"
+include "llvm/IR/IntrinsicsLoongArch.td"
diff --git a/include/llvm/Object/ELFObjectFile.h b/include/llvm/Object/ELFObjectFile.h
index fed53eef..dd3f6abc 100644
--- a/include/llvm/Object/ELFObjectFile.h
+++ b/include/llvm/Object/ELFObjectFile.h
@@ -1165,6 +1165,8 @@ StringRef ELFObjectFile<ELFT>::getFileFormatName() const {
return "elf32-littleriscv";
case ELF::EM_CSKY:
return "elf32-csky";
+ case ELF::EM_LOONGARCH:
+ return "elf32-loongarch";
case ELF::EM_SPARC:
case ELF::EM_SPARC32PLUS:
return "elf32-sparc";
@@ -1189,6 +1191,8 @@ StringRef ELFObjectFile<ELFT>::getFileFormatName() const {
return "elf64-s390";
case ELF::EM_SPARCV9:
return "elf64-sparc";
+ case ELF::EM_LOONGARCH:
+ return "elf64-loongarch";
case ELF::EM_MIPS:
return "elf64-mips";
case ELF::EM_AMDGPU:
@@ -1248,6 +1252,15 @@ template <class ELFT> Triple::ArchType ELFObjectFile<ELFT>::getArch() const {
default:
report_fatal_error("Invalid ELFCLASS!");
}
+ case ELF::EM_LOONGARCH:
+ switch (EF.getHeader().e_ident[ELF::EI_CLASS]) {
+ case ELF::ELFCLASS32:
+ return Triple::loongarch32;
+ case ELF::ELFCLASS64:
+ return Triple::loongarch64;
+ default:
+ report_fatal_error("Invalid ELFCLASS!");
+ }
case ELF::EM_S390:
return Triple::systemz;
diff --git a/include/llvm/Support/Base64.h b/include/llvm/Support/Base64.h
index 62064a35..da4ae168 100644
--- a/include/llvm/Support/Base64.h
+++ b/include/llvm/Support/Base64.h
@@ -13,6 +13,7 @@
#ifndef LLVM_SUPPORT_BASE64_H
#define LLVM_SUPPORT_BASE64_H
+#include <cstdint>
#include <string>
namespace llvm {
diff --git a/include/llvm/Support/Signals.h b/include/llvm/Support/Signals.h
index 44f5a750..937e0572 100644
--- a/include/llvm/Support/Signals.h
+++ b/include/llvm/Support/Signals.h
@@ -14,6 +14,7 @@
#ifndef LLVM_SUPPORT_SIGNALS_H
#define LLVM_SUPPORT_SIGNALS_H
+#include <cstdint>
#include <string>
namespace llvm {
diff --git a/include/llvm/module.modulemap b/include/llvm/module.modulemap
index a199f7f2..6b788404 100644
--- a/include/llvm/module.modulemap
+++ b/include/llvm/module.modulemap
@@ -70,6 +70,7 @@ module LLVM_BinaryFormat {
textual header "BinaryFormat/ELFRelocs/Hexagon.def"
textual header "BinaryFormat/ELFRelocs/i386.def"
textual header "BinaryFormat/ELFRelocs/Lanai.def"
+ textual header "BinaryFormat/ELFRelocs/LoongArch.def"
textual header "BinaryFormat/ELFRelocs/Mips.def"
textual header "BinaryFormat/ELFRelocs/MSP430.def"
textual header "BinaryFormat/ELFRelocs/PowerPC64.def"
diff --git a/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index fe64b38c..4bd3a8e6 100644
--- a/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -206,6 +206,14 @@ void TargetLoweringObjectFileELF::Initialize(MCContext &Ctx,
PersonalityEncoding = dwarf::DW_EH_PE_absptr;
TTypeEncoding = dwarf::DW_EH_PE_absptr;
break;
+ case Triple::loongarch32:
+ case Triple::loongarch64:
+ LSDAEncoding = dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4;
+ PersonalityEncoding = dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |
+ dwarf::DW_EH_PE_sdata4;
+ TTypeEncoding = dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |
+ dwarf::DW_EH_PE_sdata4;
+ break;
case Triple::mips:
case Triple::mipsel:
case Triple::mips64:
diff --git a/lib/CodeGen/XRayInstrumentation.cpp b/lib/CodeGen/XRayInstrumentation.cpp
index 11d1b309..1655cac5 100644
--- a/lib/CodeGen/XRayInstrumentation.cpp
+++ b/lib/CodeGen/XRayInstrumentation.cpp
@@ -226,6 +226,8 @@ bool XRayInstrumentation::runOnMachineFunction(MachineFunction &MF) {
case Triple::ArchType::arm:
case Triple::ArchType::thumb:
case Triple::ArchType::aarch64:
+ case Triple::ArchType::loongarch32:
+ case Triple::ArchType::loongarch64:
case Triple::ArchType::mips:
case Triple::ArchType::mipsel:
case Triple::ArchType::mips64:
diff --git a/lib/ExecutionEngine/Orc/IndirectionUtils.cpp b/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
index 1cfcf8ae..c3166162 100644
--- a/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
+++ b/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
@@ -147,6 +147,11 @@ createLocalCompileCallbackManager(const Triple &T, ExecutionSession &ES,
return CCMgrT::Create(ES, ErrorHandlerAddress);
}
+ case Triple::loongarch64: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcLoongArch64> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
case Triple::x86_64: {
if (T.getOS() == Triple::OSType::Win32) {
typedef orc::LocalJITCompileCallbackManager<orc::OrcX86_64_Win32> CCMgrT;
diff --git a/lib/ExecutionEngine/Orc/LazyReexports.cpp b/lib/ExecutionEngine/Orc/LazyReexports.cpp
index e1f49441..4dba00f1 100644
--- a/lib/ExecutionEngine/Orc/LazyReexports.cpp
+++ b/lib/ExecutionEngine/Orc/LazyReexports.cpp
@@ -131,6 +131,10 @@ createLocalLazyCallThroughManager(const Triple &T, ExecutionSession &ES,
case Triple::mips64el:
return LocalLazyCallThroughManager::Create<OrcMips64>(ES, ErrorHandlerAddr);
+ case Triple::loongarch64:
+ return LocalLazyCallThroughManager::Create<OrcLoongArch64>(
+ ES, ErrorHandlerAddr);
+
case Triple::x86_64:
if (T.getOS() == Triple::OSType::Win32)
return LocalLazyCallThroughManager::Create<OrcX86_64_Win32>(
diff --git a/lib/ExecutionEngine/Orc/OrcABISupport.cpp b/lib/ExecutionEngine/Orc/OrcABISupport.cpp
index 18b3c5e1..440831d7 100644
--- a/lib/ExecutionEngine/Orc/OrcABISupport.cpp
+++ b/lib/ExecutionEngine/Orc/OrcABISupport.cpp
@@ -906,5 +906,206 @@ void OrcMips64::writeIndirectStubsBlock(
Stub[8 * I + 7] = 0x00000000; // nop
}
}
+
+void OrcLoongArch64::writeResolverCode(char *ResolverWorkingMem,
+ JITTargetAddress ResolverTargetAddress,
+ JITTargetAddress ReentryFnAddr,
+ JITTargetAddress ReentryCtxAddr) {
+
+ const uint32_t ResolverCode[] = {
+ // resolver_entry:
+ 0x02fc8063, // 0x0: addi.d $r3,$r3,-224(0xf20)
+ 0x29c00064, // 0x4: st.d $r4,$r3,0
+ 0x29c02065, // 0x8: st.d $r5,$r3,8(0x8)
+ 0x29c04066, // 0xc: st.d $r6,$r3,16(0x10)
+ 0x29c06067, // 0x10: st.d $r7,$r3,24(0x18)
+ 0x29c08068, // 0x14: st.d $r8,$r3,32(0x20)
+ 0x29c0a069, // 0x18: st.d $r9,$r3,40(0x28)
+ 0x29c0c06a, // 0x1c: st.d $r10,$r3,48(0x30)
+ 0x29c0e06b, // 0x20: st.d $r11,$r3,56(0x38)
+ 0x29c1006c, // 0x24: st.d $r12,$r3,64(0x40)
+ 0x29c1206d, // 0x28: st.d $r13,$r3,72(0x48)
+ 0x29c1406e, // 0x2c: st.d $r14,$r3,80(0x50)
+ 0x29c1606f, // 0x30: st.d $r15,$r3,88(0x58)
+ 0x29c18070, // 0x34: st.d $r16,$r3,96(0x60)
+ 0x29c1a071, // 0x38: st.d $r17,$r3,104(0x68)
+ 0x29c1c072, // 0x3c: st.d $r18,$r3,112(0x70)
+ 0x29c1e073, // 0x40: st.d $r19,$r3,120(0x78)
+ 0x29c20074, // 0x44: st.d $r20,$r3,128(0x80)
+ 0x29c22076, // 0x48: st.d $r22,$r3,136(0x88)
+ 0x29c24077, // 0x4c: st.d $r23,$r3,144(0x90)
+ 0x29c26078, // 0x50: st.d $r24,$r3,152(0x98)
+ 0x29c28079, // 0x54: st.d $r25,$r3,160(0xa0)
+ 0x29c2a07a, // 0x58: st.d $r26,$r3,168(0xa8)
+ 0x29c2c07b, // 0x5c: st.d $r27,$r3,176(0xb0)
+ 0x29c2e07c, // 0x60: st.d $r28,$r3,184(0xb8)
+ 0x29c3007d, // 0x64: st.d $r29,$r3,192(0xc0)
+ 0x29c3207e, // 0x68: st.d $r30,$r3,200(0xc8)
+ 0x29c3407f, // 0x6c: st.d $r31,$r3,208(0xd0)
+ 0x29c36061, // 0x70: st.d $r1,$r3,216(0xd8)
+ // JIT re-entry ctx addr.
+ 0x00000000, // 0x74: lu12i.w $a0,hi(ctx)
+ 0x00000000, // 0x78: ori $a0,$a0,lo(ctx)
+ 0x00000000, // 0x7c: lu32i.d $a0,higher(ctx)
+ 0x00000000, // 0x80: lu52i.d $a0,$a0,highest(ctx)
+
+ 0x00150025, // 0x84: move $r5,$r1
+ 0x02ffa0a5, // 0x88: addi.d $r5,$r5,-24(0xfe8)
+
+ // JIT re-entry fn addr:
+ 0x00000000, // 0x8c: lu12i.w $t0,hi(reentry)
+ 0x00000000, // 0x90: ori $t0,$t0,lo(reentry)
+ 0x00000000, // 0x94: lu32i.d $t0,higher(reentry)
+ 0x00000000, // 0x98: lu52i.d $t0,$t0,highest(reentry)
+ 0x4c0002a1, // 0x9c: jirl $r1,$r21,0
+ 0x00150095, // 0xa0: move $r21,$r4
+ 0x28c36061, // 0xa4: ld.d $r1,$r3,216(0xd8)
+ 0x28c3407f, // 0xa8: ld.d $r31,$r3,208(0xd0)
+ 0x28c3207e, // 0xac: ld.d $r30,$r3,200(0xc8)
+ 0x28c3007d, // 0xb0: ld.d $r29,$r3,192(0xc0)
+ 0x28c2e07c, // 0xb4: ld.d $r28,$r3,184(0xb8)
+ 0x28c2c07b, // 0xb8: ld.d $r27,$r3,176(0xb0)
+ 0x28c2a07a, // 0xbc: ld.d $r26,$r3,168(0xa8)
+ 0x28c28079, // 0xc0: ld.d $r25,$r3,160(0xa0)
+ 0x28c26078, // 0xc4: ld.d $r24,$r3,152(0x98)
+ 0x28c24077, // 0xc8: ld.d $r23,$r3,144(0x90)
+ 0x28c22076, // 0xcc: ld.d $r22,$r3,136(0x88)
+ 0x28c20074, // 0xd0: ld.d $r20,$r3,128(0x80)
+ 0x28c1e073, // 0xd4: ld.d $r19,$r3,120(0x78)
+ 0x28c1c072, // 0xd8: ld.d $r18,$r3,112(0x70)
+ 0x28c1a071, // 0xdc: ld.d $r17,$r3,104(0x68)
+ 0x28c18070, // 0xe0: ld.d $r16,$r3,96(0x60)
+ 0x28c1606f, // 0xe4: ld.d $r15,$r3,88(0x58)
+ 0x28c1406e, // 0xe8: ld.d $r14,$r3,80(0x50)
+ 0x28c1206d, // 0xec: ld.d $r13,$r3,72(0x48)
+ 0x28c1006c, // 0xf0: ld.d $r12,$r3,64(0x40)
+ 0x28c0e06b, // 0xf4: ld.d $r11,$r3,56(0x38)
+ 0x28c0c06a, // 0xf8: ld.d $r10,$r3,48(0x30)
+ 0x28c0a069, // 0xfc: ld.d $r9,$r3,40(0x28)
+ 0x28c08068, // 0x100: ld.d $r8,$r3,32(0x20)
+ 0x28c06067, // 0x104: ld.d $r7,$r3,24(0x18)
+ 0x28c04066, // 0x108: ld.d $r6,$r3,16(0x10)
+ 0x28c02065, // 0x10c: ld.d $r5,$r3,8(0x8)
+ 0x28c00064, // 0x110: ld.d $r4,$r3,0
+ 0x02c38063, // 0x114: addi.d $r3,$r3,224(0xe0)
+ 0x00150281, // 0x118: move $r1,$r20
+ 0x4c0002a0, // 0x11c: jirl $r0,$r21,0
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x8c; // JIT re-entry fn addr lu12i.w
+ const unsigned ReentryCtxAddrOffset = 0x74; // JIT re-entry ctx addr lu12i.w
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+
+ uint32_t ReentryCtxLU12i = 0x14000004 | ((ReentryCtxAddr << 32 >> 44) << 5);
+ uint32_t ReentryCtxORi = 0x03800084 | ((ReentryCtxAddr & 0xFFF) << 10);
+ uint32_t ReentryCtxLU32i = 0x16000004 | ((ReentryCtxAddr << 12 >> 44) << 5);
+ uint32_t ReentryCtxLU52i = 0x03000084 | ((ReentryCtxAddr >> 52) << 10);
+
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxLU12i,
+ sizeof(ReentryCtxLU12i));
+ memcpy(ResolverWorkingMem + (ReentryCtxAddrOffset + 4), &ReentryCtxORi,
+ sizeof(ReentryCtxORi));
+ memcpy(ResolverWorkingMem + (ReentryCtxAddrOffset + 8), &ReentryCtxLU32i,
+ sizeof(ReentryCtxLU32i));
+ memcpy(ResolverWorkingMem + (ReentryCtxAddrOffset + 12), &ReentryCtxLU52i,
+ sizeof(ReentryCtxLU52i));
+
+ uint32_t ReentryLU12i = 0x14000015 | ((ReentryFnAddr << 32 >> 44) << 5);
+ uint32_t ReentryORi = 0x038002b5 | ((ReentryFnAddr & 0xFFF) << 10);
+ uint32_t ReentryLU32i = 0x16000015 | ((ReentryFnAddr << 12 >> 44) << 5);
+ uint32_t ReentryLU52i = 0x030002b5 | ((ReentryFnAddr >> 52) << 10);
+
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryLU12i,
+ sizeof(ReentryLU12i));
+ memcpy(ResolverWorkingMem + (ReentryFnAddrOffset + 4), &ReentryORi,
+ sizeof(ReentryORi));
+ memcpy(ResolverWorkingMem + (ReentryFnAddrOffset + 8), &ReentryLU32i,
+ sizeof(ReentryLU32i));
+ memcpy(ResolverWorkingMem + (ReentryFnAddrOffset + 12), &ReentryLU52i,
+ sizeof(ReentryLU52i));
+}
+
+void OrcLoongArch64::writeTrampolines(
+ char *TrampolineBlockWorkingMem,
+ JITTargetAddress TrampolineBlockTargetAddress,
+ JITTargetAddress ResolverFnAddr, unsigned NumTrampolines) {
+
+ uint32_t *Trampolines =
+ reinterpret_cast<uint32_t *>(TrampolineBlockWorkingMem);
+
+ uint64_t HiBits = ((ResolverFnAddr << 32 >> 44) << 5);
+ uint64_t LoBits = ((ResolverFnAddr & 0xFFF) << 10);
+ uint64_t HigherBits = ((ResolverFnAddr << 12 >> 44) << 5);
+ uint64_t HighestBits = ((ResolverFnAddr >> 52) << 10);
+
+ for (unsigned I = 0; I < NumTrampolines; ++I) {
+ Trampolines[10 * I + 0] = 0x00150034; // move $t8,$ra
+ Trampolines[10 * I + 1] =
+ 0x14000015 | HiBits; // lu12i.w $r21,hi(ResolveAddr)
+ Trampolines[10 * I + 2] =
+ 0x038002b5 | LoBits; // ori $r21,$r21,lo(ResolveAddr)
+ Trampolines[10 * I + 3] =
+ 0x16000015 | HigherBits; // lu32i $r21,higher(ResolveAddr)
+ Trampolines[10 * I + 4] =
+ 0x030002b5 | HighestBits; // lu52i $r21,$r21,highest(ResolveAddr)
+ Trampolines[10 * I + 5] = 0x4c0002a1; // jirl $ra, $r21, 0
+ }
+}
+
+void OrcLoongArch64::writeIndirectStubsBlock(
+ char *StubsBlockWorkingMem, JITTargetAddress StubsBlockTargetAddress,
+ JITTargetAddress PointersBlockTargetAddress, unsigned NumStubs) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // lu12i.w $r21, %abs(ptr1)<<32>>44
+ // ori $r21, $r21, %abs(ptr1)&0xfff
+ // lu32i.d $r21, %abs(ptr1)<<12>>44
+ // lu52i.d $r21, $r21, %abs(ptr1)>>52
+ // ld.d $r21, $r21, 0
+ // jirl $r0, $r21, 0
+ // stub2:
+ // lu12i.w $r21, %abs(ptr2)<<32>>44
+ // ori $r21, $r21, %abs(ptr2)&0xfff
+ // lu32i.d $r21, %abs(ptr2)<<12>>44
+ // lu52i.d $r21, $r21, %abs(ptr2)>>52
+ // ld.d $r21, $r21, 0
+ // jirl $r0, $r21, 0
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .dword 0x0
+ // ptr2:
+ // .dword 0x0
+ //
+ // ...
+
+ assert(stubAndPointerRangesOk<OrcLoongArch64>(
+ StubsBlockTargetAddress, PointersBlockTargetAddress, NumStubs) &&
+ "PointersBlock is out of range");
+
+ // Populate the stubs page stubs and mark it executable.
+ uint32_t *Stub = reinterpret_cast<uint32_t *>(StubsBlockWorkingMem);
+ uint64_t PtrAddr = PointersBlockTargetAddress;
+
+ for (unsigned I = 0; I < NumStubs; ++I, PtrAddr += 8) {
+ uint64_t HiBits = ((PtrAddr << 32 >> 44) << 5);
+ uint64_t LoBits = ((PtrAddr & 0xFFF) << 10);
+ uint64_t HigherBits = ((PtrAddr << 12 >> 44) << 5);
+ uint64_t HighestBits = ((PtrAddr >> 52) << 10);
+ Stub[8 * I + 0] = 0x14000015 | HiBits; // lu12i.w $r21, hi(PtrAddr)
+ Stub[8 * I + 1] = 0x038002b5 | LoBits; // ori $r21, $r21, lo(PtrAddr)
+ Stub[8 * I + 2] = 0x16000015 | HigherBits; // lu32i.d $r21, higher(PtrAddr)
+ Stub[8 * I + 3] =
+ 0x030002b5 | HighestBits; // lu52i.d $r21, $r21, highest(PtrAddr)
+ Stub[8 * I + 4] = 0x28c002b5; // ld.d $r21, $r21, 0
+ Stub[8 * I + 5] = 0x4c0002a0; // jirl $r0, $r21, 0
+ }
+}
+
} // End namespace orc.
} // End namespace llvm.
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
index 28e1faab..397a10e7 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
@@ -541,6 +541,266 @@ void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
}
}
+void RuntimeDyldELF::resolveLoongArch64Relocation(const SectionEntry &Section,
+ uint64_t Offset,
+ uint64_t Value, uint32_t Type,
+ int64_t Addend) {
+ uint32_t *TargetPtr =
+ reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset));
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ uint64_t tmp1, tmp2, tmp3;
+
+ LLVM_DEBUG(dbgs() << "[XXX] resolveLoongArch64Relocation, LocalAddress: 0x"
+ << format("%llx", Section.getAddressWithOffset(Offset))
+ << " FinalAddress: 0x" << format("%llx", FinalAddress)
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << " Addend: 0x"
+ << format("%llx", Addend) << "\n");
+
+ switch (Type) {
+ case ELF::R_LARCH_SOP_PUSH_GPREL:
+ case ELF::R_LARCH_SOP_PUSH_TLS_TPREL:
+ case ELF::R_LARCH_SOP_PUSH_TLS_GOT:
+ case ELF::R_LARCH_SOP_PUSH_TLS_GD:
+ default:
+ llvm_unreachable("Relocation type not implemented yet!");
+ break;
+ case ELF::R_LARCH_MARK_LA:
+ // mark la
+ MarkLA = true;
+ break;
+ case ELF::R_LARCH_SOP_PUSH_ABSOLUTE:
+ if (MarkLA && !Addend)
+ // push(value)
+ ValuesStack.push_back(Value);
+ else
+ // push(addend)
+ ValuesStack.push_back(Addend);
+ break;
+ case ELF::R_LARCH_SOP_PUSH_PLT_PCREL:
+ case ELF::R_LARCH_SOP_PUSH_PCREL:
+ MarkLA = false;
+ // push(value -pc + addend)
+ ValuesStack.push_back(Value - FinalAddress + Addend);
+ break;
+ case ELF::R_LARCH_SOP_NOT:
+ // pop(tmp1)
+ // push(!tmp1)
+ tmp1 = ValuesStack.pop_back_val();
+ ValuesStack.push_back(!tmp1);
+ break;
+ case ELF::R_LARCH_SOP_AND:
+ // pop(tmp2)
+ // pop(tmp1)
+ // push(tmp1 & tmp2)
+ tmp2 = ValuesStack.pop_back_val();
+ tmp1 = ValuesStack.pop_back_val();
+ ValuesStack.push_back(tmp1 & tmp2);
+ break;
+ case ELF::R_LARCH_SOP_IF_ELSE:
+ // pop(tmp3)
+ // pop(tmp2)
+ // pop(tmp1)
+ // push(tmp1 ? tmp2 : tmp3)
+ tmp3 = ValuesStack.pop_back_val();
+ tmp2 = ValuesStack.pop_back_val();
+ tmp1 = ValuesStack.pop_back_val();
+ ValuesStack.push_back(tmp1 ? tmp2 : tmp3);
+ break;
+ case ELF::R_LARCH_SOP_ADD:
+ // pop(tmp2)
+ // pop(tmp1)
+ // push(tmp1 + tmp2)
+ tmp2 = ValuesStack.pop_back_val();
+ tmp1 = ValuesStack.pop_back_val();
+ ValuesStack.push_back(tmp1 + tmp2);
+ break;
+ case ELF::R_LARCH_SOP_SUB:
+ // pop(tmp2)
+ // pop(tmp1)
+ // push(tmp1 - tmp2)
+ tmp2 = ValuesStack.pop_back_val();
+ tmp1 = ValuesStack.pop_back_val();
+ ValuesStack.push_back(tmp1 - tmp2);
+ break;
+ case ELF::R_LARCH_SOP_SR:
+ // pop(tmp2)
+ // pop(tmp1)
+ // push(tmp1 >> tmp2)
+ tmp2 = ValuesStack.pop_back_val();
+ tmp1 = ValuesStack.pop_back_val();
+ ValuesStack.push_back(tmp1 >> tmp2);
+ break;
+ case ELF::R_LARCH_SOP_SL:
+ // pop(tmp2)
+ // pop(tmp1)
+ // push(tmp1 << tmp2)
+ tmp2 = ValuesStack.pop_back_val();
+ tmp1 = ValuesStack.pop_back_val();
+ ValuesStack.push_back(tmp1 << tmp2);
+ break;
+ case ELF::R_LARCH_32:
+ support::ulittle32_t::ref{TargetPtr} =
+ static_cast<uint32_t>(Value + Addend);
+ break;
+ case ELF::R_LARCH_64:
+ support::ulittle64_t::ref{TargetPtr} = Value + Addend;
+ break;
+ case ELF::R_LARCH_SOP_POP_32_U_10_12:
+ case ELF::R_LARCH_SOP_POP_32_S_10_12:
+ // pop(tmp1)
+ // get(inst)
+ // inst=(inst & 0xffc003ff)|((tmp1 & 0xfff) << 10)
+ // write(inst)
+ tmp1 = ValuesStack.pop_back_val();
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & 0xffc003ff) |
+ static_cast<uint32_t>((tmp1 & 0xfff) << 10);
+ break;
+ case ELF::R_LARCH_SOP_POP_32_S_5_20:
+ // pop(tmp1)
+ // get(inst)
+ // inst=(inst & 0xfe00001f)|((tmp1 & 0xfffff) << 5)
+ // write(inst)
+ tmp1 = ValuesStack.pop_back_val();
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & 0xfe00001f) |
+ static_cast<uint32_t>((tmp1 & 0xfffff) << 5);
+ break;
+ case ELF::R_LARCH_SOP_POP_32_S_10_16_S2:
+ // pop(tmp1)
+ // tmp1 >>=2
+ // get(inst)
+ // inst=(inst & 0xfc0003ff)|((tmp1 & 0xffff) << 10)
+ // write(inst)
+ tmp1 = ValuesStack.pop_back_val();
+ tmp1 >>= 2;
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & 0xfc0003ff) |
+ static_cast<uint32_t>((tmp1 & 0xffff) << 10);
+ break;
+ case ELF::R_LARCH_SOP_POP_32_S_0_5_10_16_S2:
+ // pop(tmp1)
+ // tmp1 >>= 2
+ // get(inst)
+ // inst=(inst & 0xfc0003e0)|((tmp1 & 0xffff) << 10)|((tmp1 & 0x1f0000) >>
+ // 16) write(inst)
+ tmp1 = ValuesStack.pop_back_val();
+ tmp1 >>= 2;
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & 0xfc0003e0) |
+ static_cast<uint32_t>((tmp1 & 0xffff) << 10) |
+ static_cast<uint32_t>((tmp1 & 0x1f0000) >> 16);
+ break;
+ case ELF::R_LARCH_SOP_POP_32_S_0_10_10_16_S2:
+ // pop(tmp1)
+ // tmp1 >>= 2
+ // get(inst)
+ // inst=(inst & 0xfc000000)|((tmp1 & 0xffff) << 10)|((tmp1 & 0x3ff0000) >>
+ // 16) write(inst)
+ tmp1 = ValuesStack.pop_back_val();
+ tmp1 >>= 2;
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & 0xfc000000) |
+ static_cast<uint32_t>((tmp1 & 0xffff) << 10) |
+ static_cast<uint32_t>((tmp1 & 0x3ff0000) >> 16);
+ break;
+ case ELF::R_LARCH_ADD32:
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} +
+ static_cast<uint32_t>(Value + Addend));
+ break;
+ case ELF::R_LARCH_SUB32:
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} -
+ static_cast<uint32_t>(Value + Addend));
+ break;
+ case ELF::R_LARCH_ADD64:
+ support::ulittle64_t::ref{TargetPtr} =
+ (support::ulittle64_t::ref{TargetPtr} + Value + Addend);
+ break;
+ case ELF::R_LARCH_SUB64:
+ support::ulittle64_t::ref{TargetPtr} =
+ (support::ulittle64_t::ref{TargetPtr} - Value - Addend);
+ break;
+ case ELF::R_LARCH_GOT_PC_HI20:
+ FinalAddress &= (~0xfff);
+ // Addend is G(offset in got)
+ Value += Addend;
+ if ((Value & 0xfff) > 0x7ff)
+ Value += 0x1000;
+ Value &= (~0xfff);
+ tmp1 = Value - FinalAddress;
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & (~0x1ffffe0)) |
+ static_cast<uint32_t>(((tmp1 >> 12) & 0xfffff) << 5);
+ break;
+ case ELF::R_LARCH_GOT_PC_LO12:
+ Value += Addend;
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & (~0x3ffc00)) |
+ static_cast<uint32_t>((Value & 0xfff) << 10);
+ break;
+ case ELF::R_LARCH_PCALA_HI20:
+ Value += Addend;
+ tmp1 = Value & 0xfff;
+ tmp2 = FinalAddress & (~0xfff);
+ if (tmp1 > 0x7ff)
+ Value += 0x1000;
+ Value &= ~(0xfff);
+ Value -= tmp2;
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & (~0x1ffffe0)) |
+ static_cast<uint32_t>(((Value >> 12) & 0xfffff) << 5);
+ break;
+ case ELF::R_LARCH_PCALA_LO12:
+ Value += Addend;
+ tmp1 = Value & 0xfff;
+ Value = (tmp1 ^ 0x800) - 0x800;
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & (~0x3ffc00)) |
+ static_cast<uint32_t>((Value & 0xfff) << 10);
+ break;
+ case ELF::R_LARCH_B26:
+ tmp1 = Value - FinalAddress + Addend;
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & (~0x03ffffff)) |
+ static_cast<uint32_t>((tmp1 >> 18) & 0x03ff) |
+ static_cast<uint32_t>(((tmp1 >> 2) & 0xffff) << 10);
+ break;
+ case ELF::R_LARCH_ABS_HI20:
+ Value += Addend;
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & (~0x1ffffe0)) |
+ static_cast<uint32_t>(((Value >> 12) & 0xfffff) << 5);
+ break;
+ case ELF::R_LARCH_ABS_LO12:
+ Value += Addend;
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & (~0x3ffc00)) |
+ static_cast<uint32_t>((Value & 0xfff) << 10);
+ break;
+ case ELF::R_LARCH_ABS64_HI12:
+ Value += Addend;
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & (~0x3ffc00)) |
+ static_cast<uint32_t>(((Value >> 52) & 0xfff) << 10);
+ break;
+ case ELF::R_LARCH_ABS64_LO20:
+ Value += Addend;
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & (~0x1ffffe0)) |
+ static_cast<uint32_t>(((Value >> 32) & 0xfffff) << 5);
+ break;
+ case ELF::R_LARCH_32_PCREL:
+ tmp1 = Value - FinalAddress + Addend;
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & (~0xffffffff)) |
+ static_cast<uint32_t>(tmp1 & 0xffffffff);
+ break;
+ }
+}
+
void RuntimeDyldELF::setMipsABI(const ObjectFile &Obj) {
if (Arch == Triple::UnknownArch ||
!StringRef(Triple::getArchTypePrefix(Arch)).equals("mips")) {
@@ -954,6 +1214,9 @@ void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section,
resolveARMRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
(uint32_t)(Addend & 0xffffffffL));
break;
+ case Triple::loongarch64:
+ resolveLoongArch64Relocation(Section, Offset, Value, Type, Addend);
+ break;
case Triple::ppc: // Fall through.
case Triple::ppcle:
resolvePPC32Relocation(Section, Offset, Value, Type, Addend);
@@ -1266,6 +1529,34 @@ RuntimeDyldELF::processRelocationRef(
}
processSimpleRelocation(SectionID, Offset, RelType, Value);
}
+ } else if (Arch == Triple::loongarch64) {
+ if (RelType == ELF::R_LARCH_GOT_PC_HI20) {
+ uint64_t GOTOffset = allocateGOTEntries(1);
+
+ // Fill in the value of the symbol we're targeting into the GOT
+ RelocationEntry RE =
+ computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_LARCH_64);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
+ ELF::R_LARCH_GOT_PC_HI20);
+ } else if (RelType == ELF::R_LARCH_GOT_PC_LO12) {
+ uint64_t GOTOffset = allocateGOTEntries(1);
+
+ // Fill in the value of the symbol we're targeting into the GOT
+ RelocationEntry RE =
+ computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_LARCH_64);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
+ ELF::R_LARCH_GOT_PC_LO12);
+ } else {
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
} else if (IsMipsO32ABI) {
uint8_t *Placeholder = reinterpret_cast<uint8_t *>(
computePlaceholderAddress(SectionID, Offset));
@@ -1779,6 +2070,7 @@ size_t RuntimeDyldELF::getGOTEntrySize() {
case Triple::x86_64:
case Triple::aarch64:
case Triple::aarch64_be:
+ case Triple::loongarch64:
case Triple::ppc64:
case Triple::ppc64le:
case Triple::systemz:
@@ -1929,6 +2221,10 @@ bool RuntimeDyldELF::relocationNeedsGot(const RelocationRef &R) const {
return RelTy == ELF::R_AARCH64_ADR_GOT_PAGE ||
RelTy == ELF::R_AARCH64_LD64_GOT_LO12_NC;
+ if (Arch == Triple::loongarch64)
+ return RelTy == ELF::R_LARCH_GOT_PC_HI20 ||
+ RelTy == ELF::R_LARCH_GOT_PC_LO12;
+
if (Arch == Triple::x86_64)
return RelTy == ELF::R_X86_64_GOTPCREL ||
RelTy == ELF::R_X86_64_GOTPCRELX ||
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
index 31892b74..5295853e 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
@@ -48,6 +48,10 @@ class RuntimeDyldELF : public RuntimeDyldImpl {
void resolveARMRelocation(const SectionEntry &Section, uint64_t Offset,
uint32_t Value, uint32_t Type, int32_t Addend);
+ void resolveLoongArch64Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend);
+
void resolvePPC32Relocation(const SectionEntry &Section, uint64_t Offset,
uint64_t Value, uint32_t Type, int64_t Addend);
@@ -155,6 +159,10 @@ private:
// EH frame sections with the memory manager.
SmallVector<SID, 2> UnregisteredEHFrameSections;
+ // For loongarch evaluteRelocation
+ SmallVector<uint64_t, 8> ValuesStack;
+ bool MarkLA;
+
// Map between GOT relocation value and corresponding GOT offset
std::map<RelocationValueRef, uint64_t> GOTOffsetMap;
diff --git a/lib/IR/Function.cpp b/lib/IR/Function.cpp
index 17247123..e74c6b6a 100644
--- a/lib/IR/Function.cpp
+++ b/lib/IR/Function.cpp
@@ -37,6 +37,7 @@
#include "llvm/IR/IntrinsicsARM.h"
#include "llvm/IR/IntrinsicsBPF.h"
#include "llvm/IR/IntrinsicsHexagon.h"
+#include "llvm/IR/IntrinsicsLoongArch.h"
#include "llvm/IR/IntrinsicsMips.h"
#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/IR/IntrinsicsPowerPC.h"
diff --git a/lib/Object/ELF.cpp b/lib/Object/ELF.cpp
index 264f115d..21c820fd 100644
--- a/lib/Object/ELF.cpp
+++ b/lib/Object/ELF.cpp
@@ -87,6 +87,13 @@ StringRef llvm::object::getELFRelocationTypeName(uint32_t Machine,
break;
}
break;
+ case ELF::EM_LOONGARCH:
+ switch (Type) {
+#include "llvm/BinaryFormat/ELFRelocs/LoongArch.def"
+ default:
+ break;
+ }
+ break;
case ELF::EM_PPC:
switch (Type) {
#include "llvm/BinaryFormat/ELFRelocs/PowerPC.def"
diff --git a/lib/Object/RelocationResolver.cpp b/lib/Object/RelocationResolver.cpp
index 204577af..5496db46 100644
--- a/lib/Object/RelocationResolver.cpp
+++ b/lib/Object/RelocationResolver.cpp
@@ -463,6 +463,28 @@ static uint64_t resolveRISCV(uint64_t Type, uint64_t Offset, uint64_t S,
}
}
+static bool supportsLoongArch(uint64_t Type) {
+ switch (Type) {
+ case ELF::R_LARCH_32:
+ case ELF::R_LARCH_64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static uint64_t resolveLoongArch(uint64_t Type, uint64_t Offset, uint64_t S,
+ uint64_t LocData, int64_t Addend) {
+ switch (Type) {
+ case ELF::R_LARCH_32:
+ return (S + Addend) & 0xFFFFFFFF;
+ case ELF::R_LARCH_64:
+ return S + Addend;
+ default:
+ llvm_unreachable("Invalid relocation type");
+ }
+}
+
static bool supportsCOFFX86(uint64_t Type) {
switch (Type) {
case COFF::IMAGE_REL_I386_SECREL:
@@ -675,6 +697,8 @@ getRelocationResolver(const ObjectFile &Obj) {
return {supportsAmdgpu, resolveAmdgpu};
case Triple::riscv64:
return {supportsRISCV, resolveRISCV};
+ case Triple::loongarch64:
+ return {supportsLoongArch, resolveLoongArch};
default:
return {nullptr, nullptr};
}
@@ -708,6 +732,8 @@ getRelocationResolver(const ObjectFile &Obj) {
return {supportsHexagon, resolveHexagon};
case Triple::riscv32:
return {supportsRISCV, resolveRISCV};
+ case Triple::loongarch32:
+ return {supportsLoongArch, resolveLoongArch};
default:
return {nullptr, nullptr};
}
diff --git a/lib/ObjectYAML/ELFYAML.cpp b/lib/ObjectYAML/ELFYAML.cpp
index 05d30577..dac60766 100644
--- a/lib/ObjectYAML/ELFYAML.cpp
+++ b/lib/ObjectYAML/ELFYAML.cpp
@@ -234,6 +234,7 @@ void ScalarEnumerationTraits<ELFYAML::ELF_EM>::enumeration(
ECase(EM_BPF);
ECase(EM_VE);
ECase(EM_CSKY);
+ ECase(EM_LOONGARCH);
#undef ECase
IO.enumFallback<Hex16>(Value);
}
@@ -452,6 +453,13 @@ void ScalarBitSetTraits<ELFYAML::ELF_EF>::bitset(IO &IO,
BCase(EF_AMDGPU_XNACK);
BCase(EF_AMDGPU_SRAM_ECC);
break;
+ case ELF::EM_LOONGARCH:
+ BCaseMask(EF_LOONGARCH_ABI_SOFT_FLOAT, EF_LOONGARCH_ABI_MODIFIER_MASK);
+ BCaseMask(EF_LOONGARCH_ABI_SINGLE_FLOAT, EF_LOONGARCH_ABI_MODIFIER_MASK);
+ BCaseMask(EF_LOONGARCH_ABI_DOUBLE_FLOAT, EF_LOONGARCH_ABI_MODIFIER_MASK);
+ BCaseMask(EF_LOONGARCH_OBJABI_V0, EF_LOONGARCH_OBJABI_MASK);
+ BCaseMask(EF_LOONGARCH_OBJABI_V1, EF_LOONGARCH_OBJABI_MASK);
+ break;
default:
break;
}
@@ -691,6 +699,9 @@ void ScalarEnumerationTraits<ELFYAML::ELF_REL>::enumeration(
case ELF::EM_PPC64:
#include "llvm/BinaryFormat/ELFRelocs/PowerPC64.def"
break;
+ case ELF::EM_LOONGARCH:
+#include "llvm/BinaryFormat/ELFRelocs/LoongArch.def"
+ break;
default:
// Nothing to do.
break;
diff --git a/lib/Support/Host.cpp b/lib/Support/Host.cpp
index 09146c47..64246c65 100644
--- a/lib/Support/Host.cpp
+++ b/lib/Support/Host.cpp
@@ -1219,6 +1219,45 @@ StringRef sys::getHostCPUName() {
StringRef Content = P ? P->getBuffer() : "";
return detail::getHostCPUNameForS390x(Content);
}
+#elif defined(__linux__) && defined(__loongarch__)
+// loongarch prid register
+// +----------------+----------------+----------------+----------------+
+// | Company Options| Company ID | Processor ID | Revision |
+// +----------------+----------------+----------------+----------------+
+// 31 24 23 16 15 8 7 0
+
+#define PRID_OPT_MASK 0xff000000
+#define PRID_COMP_MASK 0xff0000
+#define PRID_COMP_LOONGSON 0x140000
+#define PRID_IMP_MASK 0xff00
+
+#define PRID_IMP_LOONGSON_32 0x4200 /* Loongson 32bit */
+#define PRID_IMP_LOONGSON_64R 0x6100 /* Reduced Loongson 64bit */
+#define PRID_IMP_LOONGSON_64C 0x6300 /* Classic Loongson 64bit */
+#define PRID_IMP_LOONGSON_64G 0xc000 /* Generic Loongson 64bit */
+
+StringRef sys::getHostCPUName() {
+ // use prid to detect cpu name
+ unsigned CPUCFG_NUM = 0; // prid
+ unsigned prid;
+
+ __asm__("cpucfg %[prid], %[CPUCFG_NUM]\n\t"
+ :[prid]"=r"(prid)
+ :[CPUCFG_NUM]"r"(CPUCFG_NUM));
+
+ if ((prid & PRID_COMP_MASK) == PRID_COMP_LOONGSON) {// for Loongson
+ switch (prid & PRID_IMP_MASK) {
+ case PRID_IMP_LOONGSON_32: // not support
+ return "generic-la32";
+ case PRID_IMP_LOONGSON_64R:
+ case PRID_IMP_LOONGSON_64C:
+ case PRID_IMP_LOONGSON_64G:
+ return "la464";
+ }
+ }
+
+ return "generic";
+}
#elif defined(__APPLE__) && defined(__aarch64__)
StringRef sys::getHostCPUName() {
return "cyclone";
@@ -1629,6 +1668,36 @@ bool sys::getHostCPUFeatures(StringMap<bool> &Features) {
return true;
}
+#elif defined(__linux__) && defined(__loongarch__)
+bool sys::getHostCPUFeatures(StringMap<bool> &Features) {
+ std::unique_ptr<llvm::MemoryBuffer> P = getProcCpuinfoContent();
+ if (!P)
+ return false;
+
+ SmallVector<StringRef, 32> Lines;
+ P->getBuffer().split(Lines, "\n");
+
+ SmallVector<StringRef, 32> CPUFeatures;
+
+ // Look for the CPU features.
+ for (unsigned I = 0, E = Lines.size(); I != E; ++I)
+ if (Lines[I].startswith("features")) {
+ Lines[I].split(CPUFeatures, ' ');
+ break;
+ }
+
+ for (unsigned I = 0, E = CPUFeatures.size(); I != E; ++I) {
+ StringRef LLVMFeatureStr = StringSwitch<StringRef>(CPUFeatures[I])
+ .Case("lsx", "lsx")
+ .Case("lasx", "lasx")
+ .Default("");
+
+ if (LLVMFeatureStr != "")
+ Features[LLVMFeatureStr] = true;
+ }
+
+ return true;
+}
#else
bool sys::getHostCPUFeatures(StringMap<bool> &Features) { return false; }
#endif
diff --git a/lib/Support/Triple.cpp b/lib/Support/Triple.cpp
index 4f483c96..de0ae535 100644
--- a/lib/Support/Triple.cpp
+++ b/lib/Support/Triple.cpp
@@ -44,6 +44,8 @@ StringRef Triple::getArchTypeName(ArchType Kind) {
case lanai: return "lanai";
case le32: return "le32";
case le64: return "le64";
+ case loongarch32: return "loongarch32";
+ case loongarch64: return "loongarch64";
case mips64: return "mips64";
case mips64el: return "mips64el";
case mips: return "mips";
@@ -155,6 +157,9 @@ StringRef Triple::getArchTypePrefix(ArchType Kind) {
case ve: return "ve";
case csky: return "csky";
+
+ case loongarch32:
+ case loongarch64: return "loongarch";
}
}
@@ -241,6 +246,7 @@ StringRef Triple::getEnvironmentTypeName(EnvironmentType Kind) {
case GNUEABIHF: return "gnueabihf";
case GNUX32: return "gnux32";
case GNUILP32: return "gnu_ilp32";
+ case GNUABILPX32: return "gnuabilpx32";
case Itanium: return "itanium";
case MSVC: return "msvc";
case MacABI: return "macabi";
@@ -327,6 +333,8 @@ Triple::ArchType Triple::getArchTypeForLLVMName(StringRef Name) {
.Case("renderscript64", renderscript64)
.Case("ve", ve)
.Case("csky", csky)
+ .Case("loongarch32", loongarch32)
+ .Case("loongarch64", loongarch64)
.Default(UnknownArch);
}
@@ -459,6 +467,8 @@ static Triple::ArchType parseArch(StringRef ArchName) {
.Case("wasm32", Triple::wasm32)
.Case("wasm64", Triple::wasm64)
.Case("csky", Triple::csky)
+ .Case("loongarch32", Triple::loongarch32)
+ .Case("loongarch64", Triple::loongarch64)
.Default(Triple::UnknownArch);
// Some architectures require special parsing logic just to compute the
@@ -703,6 +713,8 @@ static Triple::ObjectFormatType getDefaultFormat(const Triple &T) {
case Triple::lanai:
case Triple::le32:
case Triple::le64:
+ case Triple::loongarch32:
+ case Triple::loongarch64:
case Triple::mips64:
case Triple::mips64el:
case Triple::mips:
@@ -779,6 +791,7 @@ Triple::Triple(const Twine &Str)
.StartsWith("mipsisa64", Triple::GNUABI64)
.StartsWith("mipsisa32", Triple::GNU)
.Cases("mips", "mipsel", "mipsr6", "mipsr6el", Triple::GNU)
+ .Cases("loongarch32", "loongarch64", Triple::GNU)
.Default(UnknownEnvironment);
}
}
@@ -1276,6 +1289,7 @@ static unsigned getArchPointerBitWidth(llvm::Triple::ArchType Arch) {
case llvm::Triple::kalimba:
case llvm::Triple::lanai:
case llvm::Triple::le32:
+ case llvm::Triple::loongarch32:
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::nvptx:
@@ -1305,6 +1319,7 @@ static unsigned getArchPointerBitWidth(llvm::Triple::ArchType Arch) {
case llvm::Triple::bpfel:
case llvm::Triple::hsail64:
case llvm::Triple::le64:
+ case llvm::Triple::loongarch64:
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
case llvm::Triple::nvptx64:
@@ -1360,6 +1375,7 @@ Triple Triple::get32BitArchVariant() const {
case Triple::kalimba:
case Triple::lanai:
case Triple::le32:
+ case Triple::loongarch32:
case Triple::mips:
case Triple::mipsel:
case Triple::nvptx:
@@ -1387,6 +1403,7 @@ Triple Triple::get32BitArchVariant() const {
case Triple::amdil64: T.setArch(Triple::amdil); break;
case Triple::hsail64: T.setArch(Triple::hsail); break;
case Triple::le64: T.setArch(Triple::le32); break;
+ case Triple::loongarch64: T.setArch(Triple::loongarch32); break;
case Triple::mips64: T.setArch(Triple::mips); break;
case Triple::mips64el: T.setArch(Triple::mipsel); break;
case Triple::nvptx64: T.setArch(Triple::nvptx); break;
@@ -1430,6 +1447,7 @@ Triple Triple::get64BitArchVariant() const {
case Triple::bpfel:
case Triple::hsail64:
case Triple::le64:
+ case Triple::loongarch64:
case Triple::mips64:
case Triple::mips64el:
case Triple::nvptx64:
@@ -1452,6 +1470,7 @@ Triple Triple::get64BitArchVariant() const {
case Triple::armeb: T.setArch(Triple::aarch64_be); break;
case Triple::hsail: T.setArch(Triple::hsail64); break;
case Triple::le32: T.setArch(Triple::le64); break;
+ case Triple::loongarch32: T.setArch(Triple::loongarch64); break;
case Triple::mips: T.setArch(Triple::mips64); break;
case Triple::mipsel: T.setArch(Triple::mips64el); break;
case Triple::nvptx: T.setArch(Triple::nvptx64); break;
@@ -1486,6 +1505,8 @@ Triple Triple::getBigEndianArchVariant() const {
case Triple::kalimba:
case Triple::le32:
case Triple::le64:
+ case Triple::loongarch32:
+ case Triple::loongarch64:
case Triple::msp430:
case Triple::nvptx64:
case Triple::nvptx:
@@ -1575,6 +1596,8 @@ bool Triple::isLittleEndian() const {
case Triple::kalimba:
case Triple::le32:
case Triple::le64:
+ case Triple::loongarch32:
+ case Triple::loongarch64:
case Triple::mips64el:
case Triple::mipsel:
case Triple::msp430:
diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index f4e47170..5d24fa29 100644
--- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -104,6 +104,7 @@ static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44;
static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52;
static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
+static const uint64_t kLoongArch64_ShadowOffset64 = 1ULL << 46;
static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
static const uint64_t kRISCV64_ShadowOffset64 = 0x20000000;
static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
@@ -438,6 +439,7 @@ static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize,
bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
bool IsMIPS32 = TargetTriple.isMIPS32();
bool IsMIPS64 = TargetTriple.isMIPS64();
+ bool IsLoongArch64 = TargetTriple.isLoongArch64();
bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb();
bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64;
bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64;
@@ -503,7 +505,9 @@ static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize,
Mapping.Offset = kWindowsShadowOffset64;
} else if (IsMIPS64)
Mapping.Offset = kMIPS64_ShadowOffset64;
- else if (IsIOS)
+ else if (IsLoongArch64) {
+ Mapping.Offset = kLoongArch64_ShadowOffset64;
+ } else if (IsIOS)
Mapping.Offset = kDynamicShadowSentinel;
else if (IsMacOS && IsAArch64)
Mapping.Offset = kDynamicShadowSentinel;
@@ -529,7 +533,7 @@ static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize,
// we could OR the constant in a single instruction, but it's more
// efficient to load it once and use indexed addressing.
Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS4CPU &&
- !IsRISCV64 &&
+ !IsRISCV64 && !IsLoongArch64 &&
!(Mapping.Offset & (Mapping.Offset - 1)) &&
Mapping.Offset != kDynamicShadowSentinel;
bool IsAndroidWithIfuncSupport =
diff --git a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 1b14b8d5..97ebd2db 100644
--- a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -805,6 +805,7 @@ bool DataFlowSanitizer::init(Module &M) {
bool IsMIPS64 = TargetTriple.isMIPS64();
bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 ||
TargetTriple.getArch() == Triple::aarch64_be;
+ bool IsLoongArch64 = TargetTriple.getArch() == Triple::loongarch64;
const DataLayout &DL = M.getDataLayout();
@@ -823,6 +824,8 @@ bool DataFlowSanitizer::init(Module &M) {
// AArch64 supports multiple VMAs and the shadow mask is set at runtime.
else if (IsAArch64)
DFSanRuntimeShadowMask = true;
+ else if (IsLoongArch64)
+ ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x7ffff0000000LL);
else
report_fatal_error("unsupported triple");
@@ -1040,14 +1043,21 @@ void DataFlowSanitizer::initializeRuntimeFunctions(Module &M) {
// Initializes event callback functions and declare them in the module
void DataFlowSanitizer::initializeCallbackFunctions(Module &M) {
- DFSanLoadCallbackFn = Mod->getOrInsertFunction("__dfsan_load_callback",
- DFSanLoadStoreCallbackFnTy);
- DFSanStoreCallbackFn = Mod->getOrInsertFunction("__dfsan_store_callback",
- DFSanLoadStoreCallbackFnTy);
+ Triple TargetTriple(M.getTargetTriple());
+ bool IsLoongArch64 = TargetTriple.isLoongArch64();
+ AttributeList AL;
+ AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
+ DFSanLoadCallbackFn = Mod->getOrInsertFunction(
+ "__dfsan_load_callback", DFSanLoadStoreCallbackFnTy,
+ IsLoongArch64 ? AL : AttributeList());
+ DFSanStoreCallbackFn = Mod->getOrInsertFunction(
+ "__dfsan_store_callback", DFSanLoadStoreCallbackFnTy,
+ IsLoongArch64 ? AL : AttributeList());
DFSanMemTransferCallbackFn = Mod->getOrInsertFunction(
"__dfsan_mem_transfer_callback", DFSanMemTransferCallbackFnTy);
DFSanCmpCallbackFn =
- Mod->getOrInsertFunction("__dfsan_cmp_callback", DFSanCmpCallbackFnTy);
+ Mod->getOrInsertFunction("__dfsan_cmp_callback", DFSanCmpCallbackFnTy,
+ IsLoongArch64 ? AL : AttributeList());
}
bool DataFlowSanitizer::runImpl(Module &M) {
@@ -1686,7 +1696,11 @@ void DFSanVisitor::visitLoadInst(LoadInst &LI) {
if (ClEventCallbacks) {
IRBuilder<> IRB(&LI);
Value *Addr8 = IRB.CreateBitCast(LI.getPointerOperand(), DFSF.DFS.Int8Ptr);
- IRB.CreateCall(DFSF.DFS.DFSanLoadCallbackFn, {PrimitiveShadow, Addr8});
+ CallInst *CallI =
+ IRB.CreateCall(DFSF.DFS.DFSanLoadCallbackFn, {PrimitiveShadow, Addr8});
+ Triple TargetTriple(LI.getModule()->getTargetTriple());
+ if (TargetTriple.getArch() == Triple::loongarch64)
+ CallI->addParamAttr(0, Attribute::ZExt);
}
}
@@ -1768,7 +1782,11 @@ void DFSanVisitor::visitStoreInst(StoreInst &SI) {
if (ClEventCallbacks) {
IRBuilder<> IRB(&SI);
Value *Addr8 = IRB.CreateBitCast(SI.getPointerOperand(), DFSF.DFS.Int8Ptr);
- IRB.CreateCall(DFSF.DFS.DFSanStoreCallbackFn, {PrimitiveShadow, Addr8});
+ CallInst *CallI =
+ IRB.CreateCall(DFSF.DFS.DFSanStoreCallbackFn, {PrimitiveShadow, Addr8});
+ Triple TargetTriple(SI.getModule()->getTargetTriple());
+ if (TargetTriple.getArch() == Triple::loongarch64)
+ CallI->addParamAttr(0, Attribute::ZExt);
}
}
@@ -1786,7 +1804,11 @@ void DFSanVisitor::visitCmpInst(CmpInst &CI) {
Value *CombinedShadow = visitOperandShadowInst(CI);
if (ClEventCallbacks) {
IRBuilder<> IRB(&CI);
- IRB.CreateCall(DFSF.DFS.DFSanCmpCallbackFn, CombinedShadow);
+ CallInst *CallI =
+ IRB.CreateCall(DFSF.DFS.DFSanCmpCallbackFn, CombinedShadow);
+ Triple TargetTriple(CI.getModule()->getTargetTriple());
+ if (TargetTriple.getArch() == Triple::loongarch64)
+ CallI->addParamAttr(0, Attribute::ZExt);
}
}
diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 7a687458..75f2e66e 100644
--- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -384,6 +384,14 @@ static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
#endif
};
+// loongarch64 Linux
+static const MemoryMapParams Linux_LOONGARCH64_MemoryMapParams = {
+ 0, // AndMask (not used)
+ 0x500000000000, // XorMask
+ 0, // ShadowBase (not used)
+ 0x100000000000, // OriginBase
+};
+
// mips64 Linux
static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
0, // AndMask (not used)
@@ -445,6 +453,11 @@ static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = {
&Linux_X86_64_MemoryMapParams,
};
+static const PlatformMemoryMapParams Linux_LOONGARCH_MemoryMapParams = {
+ nullptr,
+ &Linux_LOONGARCH64_MemoryMapParams,
+};
+
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
nullptr,
&Linux_MIPS64_MemoryMapParams,
@@ -502,6 +515,7 @@ public:
private:
friend struct MemorySanitizerVisitor;
friend struct VarArgAMD64Helper;
+ friend struct VarArgLoongArch64Helper;
friend struct VarArgMIPS64Helper;
friend struct VarArgAArch64Helper;
friend struct VarArgPowerPC64Helper;
@@ -944,6 +958,9 @@ void MemorySanitizer::initializeModule(Module &M) {
case Triple::x86:
MapParams = Linux_X86_MemoryMapParams.bits32;
break;
+ case Triple::loongarch64:
+ MapParams = Linux_LOONGARCH_MemoryMapParams.bits64;
+ break;
case Triple::mips64:
case Triple::mips64el:
MapParams = Linux_MIPS_MemoryMapParams.bits64;
@@ -4397,6 +4414,117 @@ struct VarArgAMD64Helper : public VarArgHelper {
}
};
+/// LoongArch64-specific implementation of VarArgHelper.
+struct VarArgLoongArch64Helper : public VarArgHelper {
+ Function &F;
+ MemorySanitizer &MS;
+ MemorySanitizerVisitor &MSV;
+ Value *VAArgTLSCopy = nullptr;
+ Value *VAArgSize = nullptr;
+
+ SmallVector<CallInst*, 16> VAStartInstrumentationList;
+
+ VarArgLoongArch64Helper(Function &F, MemorySanitizer &MS,
+ MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
+
+ void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
+ unsigned VAArgOffset = 0;
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ for (auto ArgIt = CB.arg_begin() + CB.getFunctionType()->getNumParams(),
+ End = CB.arg_end();
+ ArgIt != End; ++ArgIt) {
+ Triple TargetTriple(F.getParent()->getTargetTriple());
+ Value *A = *ArgIt;
+ Value *Base;
+ uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
+ Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset, ArgSize);
+ VAArgOffset += ArgSize;
+ VAArgOffset = alignTo(VAArgOffset, 8);
+ if (!Base)
+ continue;
+ IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
+ }
+
+ Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset);
+ // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
+ // a new class member i.e. it is the total size of all VarArgs.
+ IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
+ }
+
+ /// Compute the shadow address for a given va_arg.
+ Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
+ unsigned ArgOffset, unsigned ArgSize) {
+ // Make sure we don't overflow __msan_va_arg_tls.
+ if (ArgOffset + ArgSize > kParamTLSSize)
+ return nullptr;
+ Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
+ Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
+ return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
+ "_msarg");
+ }
+
+ void visitVAStartInst(VAStartInst &I) override {
+ IRBuilder<> IRB(&I);
+ VAStartInstrumentationList.push_back(&I);
+ Value *VAListTag = I.getArgOperand(0);
+ Value *ShadowPtr, *OriginPtr;
+ const Align Alignment = Align(8);
+ std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
+ VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
+ IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
+ /* size */ 8, Alignment, false);
+ }
+
+ void visitVACopyInst(VACopyInst &I) override {
+ IRBuilder<> IRB(&I);
+ VAStartInstrumentationList.push_back(&I);
+ Value *VAListTag = I.getArgOperand(0);
+ Value *ShadowPtr, *OriginPtr;
+ const Align Alignment = Align(8);
+ std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
+ VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
+ IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
+ /* size */ 8, Alignment, false);
+ }
+
+ void finalizeInstrumentation() override {
+ assert(!VAArgSize && !VAArgTLSCopy &&
+ "finalizeInstrumentation called twice");
+ IRBuilder<> IRB(MSV.FnPrologueEnd);
+ VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
+ Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
+ VAArgSize);
+
+ if (!VAStartInstrumentationList.empty()) {
+ // If there is a va_start in this function, make a backup copy of
+ // va_arg_tls somewhere in the function entry block.
+ VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
+ IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
+ }
+
+ // Instrument va_start.
+ // Copy va_list shadow from the backup copy of the TLS contents.
+ for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
+ CallInst *OrigInst = VAStartInstrumentationList[i];
+ IRBuilder<> IRB(OrigInst->getNextNode());
+ Value *VAListTag = OrigInst->getArgOperand(0);
+ Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
+ Value *RegSaveAreaPtrPtr =
+ IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
+ PointerType::get(Type::getInt64PtrTy(*MS.C), 0));
+ Value *RegSaveAreaPtr =
+ IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
+ Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
+ const Align Alignment = Align(8);
+ std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
+ MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
+ Alignment, /*isStore*/ true);
+ IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
+ CopySize);
+ }
+ }
+};
+
/// MIPS64-specific implementation of VarArgHelper.
struct VarArgMIPS64Helper : public VarArgHelper {
Function &F;
@@ -5296,6 +5424,8 @@ static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
return new VarArgPowerPC64Helper(Func, Msan, Visitor);
else if (TargetTriple.getArch() == Triple::systemz)
return new VarArgSystemZHelper(Func, Msan, Visitor);
+ else if (TargetTriple.getArch() == Triple::loongarch64)
+ return new VarArgLoongArch64Helper(Func, Msan, Visitor);
else
return new VarArgNoOpHelper(Func, Msan, Visitor);
}
diff --git a/lib/XRay/InstrumentationMap.cpp b/lib/XRay/InstrumentationMap.cpp
index e6534e5a..66c07f9d 100644
--- a/lib/XRay/InstrumentationMap.cpp
+++ b/lib/XRay/InstrumentationMap.cpp
@@ -61,6 +61,7 @@ loadObj(StringRef Filename, object::OwningBinary<object::ObjectFile> &ObjFile,
if ((!ObjFile.getBinary()->isELF() && !ObjFile.getBinary()->isMachO()) ||
!(ObjFile.getBinary()->getArch() == Triple::x86_64 ||
ObjFile.getBinary()->getArch() == Triple::ppc64le ||
+ ObjFile.getBinary()->getArch() == Triple::loongarch64 ||
ObjFile.getBinary()->getArch() == Triple::arm ||
ObjFile.getBinary()->getArch() == Triple::aarch64))
return make_error<StringError>(
diff --git a/test/ExecutionEngine/JITLink/X86/MachO_GOTAndStubsOptimization.s b/test/ExecutionEngine/JITLink/X86/MachO_GOTAndStubsOptimization.s
index 98df053c..45c078c1 100644
--- a/test/ExecutionEngine/JITLink/X86/MachO_GOTAndStubsOptimization.s
+++ b/test/ExecutionEngine/JITLink/X86/MachO_GOTAndStubsOptimization.s
@@ -1,3 +1,4 @@
+# UNSUPPORTED: loongarch64
# RUN: rm -rf %t && mkdir -p %t
# RUN: llvm-mc -triple=x86_64-apple-macos10.9 -filetype=obj \
# RUN: -o %t/helper.o %S/Inputs/MachO_GOTAndStubsOptimizationHelper.s
diff --git a/test/ExecutionEngine/MCJIT/2003-01-04-ArgumentBug.ll b/test/ExecutionEngine/MCJIT/2003-01-04-ArgumentBug.ll
index 68fdefef..f7bb02b2 100644
--- a/test/ExecutionEngine/MCJIT/2003-01-04-ArgumentBug.ll
+++ b/test/ExecutionEngine/MCJIT/2003-01-04-ArgumentBug.ll
@@ -1,4 +1,6 @@
; RUN: %lli %s > /dev/null
+; The test needs to add the -mattr='+d' parameter to pass, set them to XFAIL for now
+; XFAIL: loongarch64
define i32 @foo(i32 %X, i32 %Y, double %A) {
%cond212 = fcmp une double %A, 1.000000e+00 ; <i1> [#uses=1]
diff --git a/test/ExecutionEngine/MCJIT/lit.local.cfg b/test/ExecutionEngine/MCJIT/lit.local.cfg
index e2535ef1..09f1a2ab 100644
--- a/test/ExecutionEngine/MCJIT/lit.local.cfg
+++ b/test/ExecutionEngine/MCJIT/lit.local.cfg
@@ -1,7 +1,8 @@
root = config.root
targets = root.targets
if ('X86' in targets) | ('AArch64' in targets) | ('ARM' in targets) | \
- ('Mips' in targets) | ('PowerPC' in targets) | ('SystemZ' in targets):
+ ('Mips' in targets) | ('PowerPC' in targets) | ('SystemZ' in targets) | \
+ ('LoongArch' in targets) :
config.unsupported = False
else:
config.unsupported = True
@@ -9,7 +10,7 @@ else:
# FIXME: autoconf and cmake produce different arch names. We should normalize
# them before getting here.
if root.host_arch not in ['i386', 'x86', 'x86_64', 'AMD64',
- 'AArch64', 'ARM', 'Mips',
+ 'AArch64', 'ARM', 'Mips', 'loongarch64',
'PowerPC', 'ppc64', 'ppc64le', 'SystemZ']:
config.unsupported = True
diff --git a/test/ExecutionEngine/MCJIT/pr13727.ll b/test/ExecutionEngine/MCJIT/pr13727.ll
index 79dd9b4c..6438756b 100644
--- a/test/ExecutionEngine/MCJIT/pr13727.ll
+++ b/test/ExecutionEngine/MCJIT/pr13727.ll
@@ -1,4 +1,6 @@
; RUN: %lli -O0 -disable-lazy-compilation=false %s
+; The test needs to add the -mattr='+d' parameter to pass, set them to XFAIL for now
+; XFAIL: loongarch64
; The intention of this test is to verify that symbols mapped to COMMON in ELF
; work as expected.
diff --git a/test/ExecutionEngine/MCJIT/remote/test-common-symbols-remote.ll b/test/ExecutionEngine/MCJIT/remote/test-common-symbols-remote.ll
index eda2c8e8..f9fc0e22 100644
--- a/test/ExecutionEngine/MCJIT/remote/test-common-symbols-remote.ll
+++ b/test/ExecutionEngine/MCJIT/remote/test-common-symbols-remote.ll
@@ -1,5 +1,6 @@
; RUN: %lli -remote-mcjit -O0 -disable-lazy-compilation=false -mcjit-remote-process=lli-child-target%exeext %s
; XFAIL: windows-gnu,windows-msvc
+; XFAIL: loongarch64
; UNSUPPORTED: powerpc64-unknown-linux-gnu
; Remove UNSUPPORTED for powerpc64-unknown-linux-gnu if problem caused by r266663 is fixed
diff --git a/test/ExecutionEngine/MCJIT/test-common-symbols.ll b/test/ExecutionEngine/MCJIT/test-common-symbols.ll
index b63c2fea..86c09883 100644
--- a/test/ExecutionEngine/MCJIT/test-common-symbols.ll
+++ b/test/ExecutionEngine/MCJIT/test-common-symbols.ll
@@ -1,4 +1,6 @@
; RUN: %lli -O0 -disable-lazy-compilation=false %s
+; The test needs to add the -mattr='+d' parameter to pass, set them to XFAIL for now
+; XFAIL: loongarch64
; The intention of this test is to verify that symbols mapped to COMMON in ELF
; work as expected.
diff --git a/test/ExecutionEngine/frem.ll b/test/ExecutionEngine/frem.ll
index aedaae38..c6709be0 100644
--- a/test/ExecutionEngine/frem.ll
+++ b/test/ExecutionEngine/frem.ll
@@ -3,6 +3,8 @@
; This unit test guards against the failure.
;
; RUN: %lli %s | FileCheck %s
+; The test needs to add the -mattr='+d' parameter to pass, set them to XFAIL for now
+; XFAIL: loongarch64
@flt = internal global float 12.0e+0
@str = internal constant [18 x i8] c"Double value: %f\0A\00"
diff --git a/test/Instrumentation/DataFlowSanitizer/callback.ll b/test/Instrumentation/DataFlowSanitizer/callback.ll
index 7194535b..8d9cf66a 100644
--- a/test/Instrumentation/DataFlowSanitizer/callback.ll
+++ b/test/Instrumentation/DataFlowSanitizer/callback.ll
@@ -1,10 +1,13 @@
; RUN: opt < %s -dfsan -dfsan-event-callbacks=1 -S | FileCheck %s
+; RUN: opt --mtriple=loongarch64-linux-gnu < %s -dfsan -dfsan-event-callbacks=1 -S | FileCheck %s --check-prefix=LA64
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
define i8 @load8(i8* %p) {
; CHECK: call void @__dfsan_load_callback(i16 %{{.*}}, i8* %p)
; CHECK: %a = load i8, i8* %p
+ ; LA64: call void @__dfsan_load_callback(i16 zeroext %{{.*}}, i8* %p)
+ ; LA64: %a = load i8, i8* %p
%a = load i8, i8* %p
ret i8 %a
@@ -14,6 +17,9 @@ define void @store8(i8* %p, i8 %a) {
; CHECK: store i16 %[[l:.*]], i16* %{{.*}}
; CHECK: call void @__dfsan_store_callback(i16 %[[l]], i8* %p)
; CHECK: store i8 %a, i8* %p
+ ; LA64: store i16 %[[l:.*]], i16* %{{.*}}
+ ; LA64: call void @__dfsan_store_callback(i16 zeroext %[[l]], i8* %p)
+ ; LA64: store i8 %a, i8* %p
store i8 %a, i8* %p
ret void
@@ -23,7 +29,14 @@ define i1 @cmp(i8 %a, i8 %b) {
; CHECK: call void @__dfsan_cmp_callback(i16 %[[l:.*]])
; CHECK: %c = icmp ne i8 %a, %b
; CHECK: store i16 %[[l]], i16* bitcast ({{.*}}* @__dfsan_retval_tls to i16*)
+ ; LA64: call void @__dfsan_cmp_callback(i16 zeroext %[[l:.*]])
+ ; LA64: %c = icmp ne i8 %a, %b
+ ; LA64: store i16 %[[l]], i16* bitcast ({{.*}}* @__dfsan_retval_tls to i16*)
%c = icmp ne i8 %a, %b
ret i1 %c
-}
\ No newline at end of file
+}
+
+; LA64: declare void @__dfsan_load_callback(i16 zeroext, i8*)
+; LA64: declare void @__dfsan_store_callback(i16 zeroext, i8*)
+; LA64: declare void @__dfsan_cmp_callback(i16 zeroext)
diff --git a/tools/llvm-readobj/ELFDumper.cpp b/tools/llvm-readobj/ELFDumper.cpp
index 0f508f8d..17fd17da 100644
--- a/tools/llvm-readobj/ELFDumper.cpp
+++ b/tools/llvm-readobj/ELFDumper.cpp
@@ -1164,6 +1164,7 @@ static const EnumEntry<unsigned> ElfMachineType[] = {
ENUM_ENT(EM_LANAI, "EM_LANAI"),
ENUM_ENT(EM_BPF, "EM_BPF"),
ENUM_ENT(EM_VE, "NEC SX-Aurora Vector Engine"),
+ ENUM_ENT(EM_LOONGARCH, "LoongArch"),
};
static const EnumEntry<unsigned> ElfSymbolBindings[] = {
@@ -1485,6 +1486,14 @@ static const EnumEntry<unsigned> ElfHeaderRISCVFlags[] = {
ENUM_ENT(EF_RISCV_RVE, "RVE")
};
+static const EnumEntry<unsigned> ElfHeaderLoongArchFlags[] = {
+ ENUM_ENT(EF_LOONGARCH_ABI_SOFT_FLOAT, "SOFT-FLOAT"),
+ ENUM_ENT(EF_LOONGARCH_ABI_SINGLE_FLOAT, "SINGLE-FLOAT"),
+ ENUM_ENT(EF_LOONGARCH_ABI_DOUBLE_FLOAT, "DOUBLE-FLOAT"),
+ ENUM_ENT(EF_LOONGARCH_OBJABI_V0, "OBJ-v0"),
+ ENUM_ENT(EF_LOONGARCH_OBJABI_V1, "OBJ-v1"),
+};
+
static const EnumEntry<unsigned> ElfSymOtherFlags[] = {
LLVM_READOBJ_ENUM_ENT(ELF, STV_INTERNAL),
LLVM_READOBJ_ENUM_ENT(ELF, STV_HIDDEN),
@@ -3170,6 +3179,10 @@ template <class ELFT> void GNUELFDumper<ELFT>::printFileHeaders() {
unsigned(ELF::EF_MIPS_MACH));
else if (e.e_machine == EM_RISCV)
ElfFlags = printFlags(e.e_flags, makeArrayRef(ElfHeaderRISCVFlags));
+ else if (e.e_machine == EM_LOONGARCH)
+ ElfFlags = printFlags(e.e_flags, makeArrayRef(ElfHeaderLoongArchFlags),
+ unsigned(ELF::EF_LOONGARCH_ABI_MODIFIER_MASK),
+ unsigned(ELF::EF_LOONGARCH_OBJABI_MASK));
Str = "0x" + to_hexString(e.e_flags);
if (!ElfFlags.empty())
Str = Str + ", " + ElfFlags;
@@ -5982,6 +5995,10 @@ template <class ELFT> void LLVMELFDumper<ELFT>::printFileHeaders() {
unsigned(ELF::EF_AMDGPU_MACH));
else if (E.e_machine == EM_RISCV)
W.printFlags("Flags", E.e_flags, makeArrayRef(ElfHeaderRISCVFlags));
+ else if (E.e_machine == EM_LOONGARCH)
+ W.printFlags("Flags", E.e_flags, makeArrayRef(ElfHeaderLoongArchFlags),
+ unsigned(ELF::EF_LOONGARCH_ABI_MODIFIER_MASK),
+ unsigned(ELF::EF_LOONGARCH_OBJABI_MASK));
else
W.printFlags("Flags", E.e_flags);
W.printNumber("HeaderSize", E.e_ehsize);
diff --git a/tools/sancov/sancov.cpp b/tools/sancov/sancov.cpp
index f1d756f2..136ad502 100644
--- a/tools/sancov/sancov.cpp
+++ b/tools/sancov/sancov.cpp
@@ -691,7 +691,7 @@ static uint64_t getPreviousInstructionPc(uint64_t PC,
Triple TheTriple) {
if (TheTriple.isARM()) {
return (PC - 3) & (~1);
- } else if (TheTriple.isAArch64()) {
+ } else if (TheTriple.isAArch64() || TheTriple.isLoongArch64()) {
return PC - 4;
} else if (TheTriple.isMIPS()) {
return PC - 8;
diff --git a/utils/UpdateTestChecks/asm.py b/utils/UpdateTestChecks/asm.py
index 6390ace4..3b0385cd 100644
--- a/utils/UpdateTestChecks/asm.py
+++ b/utils/UpdateTestChecks/asm.py
@@ -73,6 +73,12 @@ ASM_FUNCTION_AVR_RE = re.compile(
r'.Lfunc_end[0-9]+:\n',
flags=(re.M | re.S))
+ASM_FUNCTION_LOONGARCH_RE = re.compile(
+ r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n[^:]*?' # f: (name of func)
+ r'(?P<body>^##?[ \t]+[^:]+:.*?)\s*' # (body of the function)
+ r'.Lfunc_end[0-9]+:\n', # .Lfunc_end[0-9]:
+ flags=(re.M | re.S))
+
ASM_FUNCTION_PPC_RE = re.compile(
r'#[ \-\t]*Begin function (?P<func>[^.:]+)\n'
r'.*?'
@@ -277,6 +283,16 @@ def scrub_asm_avr(asm, args):
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
+def scrub_asm_loongarch(asm, args):
+ # Scrub runs of whitespace out of the assembly, but leave the leading
+ # whitespace in place.
+ asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
+ # Expand the tabs used for indentation.
+ asm = string.expandtabs(asm, 2)
+ # Strip trailing whitespace.
+ asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
+ return asm
+
def scrub_asm_riscv(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
@@ -366,6 +382,7 @@ def get_run_handler(triple):
'avr': (scrub_asm_avr, ASM_FUNCTION_AVR_RE),
'ppc32': (scrub_asm_powerpc, ASM_FUNCTION_PPC_RE),
'powerpc': (scrub_asm_powerpc, ASM_FUNCTION_PPC_RE),
+ 'loongarch64': (scrub_asm_loongarch, ASM_FUNCTION_LOONGARCH_RE),
'riscv32': (scrub_asm_riscv, ASM_FUNCTION_RISCV_RE),
'riscv64': (scrub_asm_riscv, ASM_FUNCTION_RISCV_RE),
'lanai': (scrub_asm_lanai, ASM_FUNCTION_LANAI_RE),
diff --git a/utils/benchmark/src/cycleclock.h b/utils/benchmark/src/cycleclock.h
index 040ec22c..cc7c0595 100644
--- a/utils/benchmark/src/cycleclock.h
+++ b/utils/benchmark/src/cycleclock.h
@@ -167,6 +167,10 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
+#elif defined(__loongarch__)
+ struct timeval tv;
+ gettimeofday(&tv, nullptr);
+ return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
#elif defined(__s390__) // Covers both s390 and s390x.
// Return the CPU clock.
uint64_t tsc;
diff --git a/utils/gn/secondary/clang/lib/Basic/BUILD.gn b/utils/gn/secondary/clang/lib/Basic/BUILD.gn
index d6e322f7..60eff129 100644
--- a/utils/gn/secondary/clang/lib/Basic/BUILD.gn
+++ b/utils/gn/secondary/clang/lib/Basic/BUILD.gn
@@ -83,6 +83,7 @@ static_library("Basic") {
"Targets/Hexagon.cpp",
"Targets/Lanai.cpp",
"Targets/Le64.cpp",
+ "Targets/LoongArch.cpp",
"Targets/MSP430.cpp",
"Targets/Mips.cpp",
"Targets/NVPTX.cpp",
diff --git a/utils/gn/secondary/clang/lib/Driver/BUILD.gn b/utils/gn/secondary/clang/lib/Driver/BUILD.gn
index ab29e6be..3111f03a 100644
--- a/utils/gn/secondary/clang/lib/Driver/BUILD.gn
+++ b/utils/gn/secondary/clang/lib/Driver/BUILD.gn
@@ -47,6 +47,7 @@ static_library("Driver") {
"ToolChains/Ananas.cpp",
"ToolChains/Arch/AArch64.cpp",
"ToolChains/Arch/ARM.cpp",
+ "ToolChains/Arch/LoongArch.cpp",
"ToolChains/Arch/Mips.cpp",
"ToolChains/Arch/PPC.cpp",
"ToolChains/Arch/RISCV.cpp",
diff --git a/utils/gn/secondary/llvm/include/llvm/IR/BUILD.gn b/utils/gn/secondary/llvm/include/llvm/IR/BUILD.gn
index f12d39ad..1280c748 100644
--- a/utils/gn/secondary/llvm/include/llvm/IR/BUILD.gn
+++ b/utils/gn/secondary/llvm/include/llvm/IR/BUILD.gn
@@ -67,6 +67,16 @@ tablegen("IntrinsicsHexagon") {
td_file = "Intrinsics.td"
}
+tablegen("IntrinsicsLoongArch") {
+ visibility = [ ":public_tablegen" ]
+ output_name = "IntrinsicsLoongArch.h"
+ args = [
+ "-gen-intrinsic-enums",
+ "-intrinsic-prefix=loongarch",
+ ]
+ td_file = "Intrinsics.td"
+}
+
tablegen("IntrinsicsMips") {
visibility = [ ":public_tablegen" ]
output_name = "IntrinsicsMips.h"
@@ -186,6 +196,7 @@ group("public_tablegen") {
":IntrinsicsARM",
":IntrinsicsBPF",
":IntrinsicsHexagon",
+ ":IntrinsicsLoongArch",
":IntrinsicsMips",
":IntrinsicsNVPTX",
":IntrinsicsPowerPC",
diff --git a/utils/gn/secondary/llvm/lib/Target/targets.gni b/utils/gn/secondary/llvm/lib/Target/targets.gni
index 102040c2..062526be 100644
--- a/utils/gn/secondary/llvm/lib/Target/targets.gni
+++ b/utils/gn/secondary/llvm/lib/Target/targets.gni
@@ -14,6 +14,7 @@ llvm_all_targets = [
"BPF",
"Hexagon",
"Lanai",
+ "LoongArch",
"Mips",
"NVPTX",
"PowerPC",
@@ -47,6 +48,7 @@ llvm_build_AArch64 = false
llvm_build_AMDGPU = false
llvm_build_ARM = false
llvm_build_BPF = false
+llvm_build_LoongArch = false
llvm_build_Mips = false
llvm_build_PowerPC = false
llvm_build_WebAssembly = false
@@ -60,6 +62,8 @@ foreach(target, llvm_targets_to_build) {
llvm_build_ARM = true
} else if (target == "BPF") {
llvm_build_BPF = true
+ } else if (target == "LoongArch") {
+ llvm_build_LoongArch = true
} else if (target == "Mips") {
llvm_build_Mips = true
} else if (target == "PowerPC") {
--
2.41.0