papi/add-loongarch-support.patch
yangchenguang 6b2652f6a6 Add loongarch64 and sw_64 support
Signed-off-by: yangchenguang <yangchenguang@kylinsec.com.cn>
(cherry picked from commit 6119b0e0db6b0edb77bbfc4fa2050e6e314d1a1e)
2023-09-21 19:21:51 +08:00

91 lines
3.0 KiB
Diff

diff -Nur a/src/linux-context.h b/src/linux-context.h
--- a/src/linux-context.h 2023-02-13 17:13:46.695350258 +0800
+++ b/src/linux-context.h 2023-02-13 17:12:54.094497176 +0800
@@ -33,6 +33,8 @@
#define OVERFLOW_ADDRESS(ctx) ctx.ucontext->uc_mcontext.arm_pc
#elif defined(__aarch64__)
#define OVERFLOW_ADDRESS(ctx) ctx.ucontext->uc_mcontext.pc
+#elif defined(__loongarch__)
+#define OVERFLOW_ADDRESS(ctx) ctx.ucontext->uc_mcontext.__pc
#elif defined(__mips__)
#define OVERFLOW_ADDRESS(ctx) ctx.ucontext->uc_mcontext.pc
#elif defined(__hppa__)
diff -Nur a/src/linux-lock.h b/src/linux-lock.h
--- a/src/linux-lock.h 2019-03-05 03:56:23.000000000 +0800
+++ b/src/linux-lock.h 2023-02-13 17:12:54.094497176 +0800
@@ -223,6 +223,37 @@
}
#define _papi_hwd_lock(lck) __raw_spin_lock(&_papi_hwd_lock_data[lck]);
#define _papi_hwd_unlock(lck) __raw_spin_unlock(&_papi_hwd_lock_data[lck])
+#elif defined(__loongarch__)
+static inline void __raw_spin_lock(volatile unsigned int *lock)
+{
+ unsigned int tmp;
+ __asm__ __volatile__(
+ "1: ll.w %1, %2 \n"
+ " bnez %1, 1b \n"
+ " li.w %1, 1 \n"
+ " sc.w %1, %0 \n"
+ " beqz %1, 1b \n"
+ " nop \n"
+ : "=m" (*lock), "=&r" (tmp)
+ : "m" (*lock)
+ : "memory");
+}
+
+static inline void __raw_spin_unlock(volatile unsigned int *lock)
+{
+ unsigned int tmp;
+ __asm__ __volatile__(
+ " nop \n"
+ " li.w %1, 0 \n"
+ " st.w %1, %0 \n"
+ : "=m" (*lock), "=&r" (tmp)
+ : "m" (*lock)
+ : "memory");
+}
+#define _papi_hwd_lock(lck) { rmb(); __raw_spin_lock(&_papi_hwd_lock_data[lck]); rmb(); }
+#define _papi_hwd_unlock(lck) { rmb(); __raw_spin_unlock(&_papi_hwd_lock_data[lck]); rmb(); }
+
+
#else
#error "_papi_hwd_lock/unlock undefined!"
diff -Nur a/src/linux-timer.c b/src/linux-timer.c
--- a/src/linux-timer.c 2023-02-13 17:13:46.695350258 +0800
+++ b/src/linux-timer.c 2023-02-13 17:12:54.094497176 +0800
@@ -245,6 +245,20 @@
return ret;
}
+/****************************/
+/* loongarch64 get_cycles() */
+/****************************/
+#elif defined(__loongarch__)
+static inline long long
+get_cycles(void)
+{
+ register unsigned long ret = 0;
+ int rID = 0;
+ __asm__ __volatile__ ("ibar 0" ::: "memory");
+ __asm__ __volatile__ ("rdtime.d %0, %1" :"=r"(ret),"=r"(rID));
+ return ret;
+}
+
/************************/
/* POWER get_cycles() */
/************************/
diff -Nur a/src/mb.h b/src/mb.h
--- a/src/mb.h 2023-02-13 17:13:46.695350258 +0800
+++ b/src/mb.h 2023-02-13 17:12:54.098497240 +0800
@@ -39,6 +39,9 @@
#elif defined(__aarch64__)
#define rmb() asm volatile("dmb ld" ::: "memory")
+#elif defined(__loongarch__)
+#define rmb() asm volatile("dbar 0" ::: "memory")
+
#elif defined(__riscv)
#define rmb() asm volatile("fence ir, ir" ::: "memory")