forked from pool/python-greenlet
a1e4200c4c
- Add python-greenlet-aarch64-support.diff, for, well, aarch64 support. OBS-URL: https://build.opensuse.org/request/show/175017 OBS-URL: https://build.opensuse.org/package/show/devel:languages:python/python-greenlet?expand=0&rev=9
89 lines
3.1 KiB
Diff
89 lines
3.1 KiB
Diff
--- ./slp_platformselect.h.mm 2012-05-12 22:56:32.000000000 +0000
|
|
+++ ./slp_platformselect.h 2013-04-08 11:28:47.000000000 +0000
|
|
@@ -32,4 +32,6 @@
|
|
#include "platform/switch_arm32_gcc.h" /* gcc using arm32 */
|
|
#elif defined(__GNUC__) && defined(__mips__) && defined(__linux__)
|
|
#include "platform/switch_mips_unix.h" /* Linux/MIPS */
|
|
+#elif defined(__GNUC__) && defined(__aarch64__)
|
|
+#include "platform/switch_aarch64_gcc.h" /* Aarch64 ABI */
|
|
#endif
|
|
--- ./platform/switch_aarch64_gcc.h.mm 2013-04-13 21:40:23.000000000 +0000
|
|
+++ ./platform/switch_aarch64_gcc.h 2013-04-13 21:41:39.000000000 +0000
|
|
@@ -0,0 +1,76 @@
|
|
+/*
|
|
+ * this is the internal transfer function.
|
|
+ *
|
|
+ * HISTORY
|
|
+ * 13-Apr-13 Add support for strange GCC caller-save decisions
|
|
+ * 08-Apr-13 File creation. Michael Matz
|
|
+ *
|
|
+ * NOTES
|
|
+ *
|
|
+ * Simply save all callee saved registers
|
|
+ *
|
|
+ */
|
|
+
|
|
+#define STACK_REFPLUS 1
|
|
+
|
|
+#ifdef SLP_EVAL
|
|
+#define STACK_MAGIC 0
|
|
+#define REGS_TO_SAVE "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", \
|
|
+ "r27", "r28", "r30" /* aka lr */, \
|
|
+ "v8", "v9", "v10", "v11", \
|
|
+ "v12", "v13", "v14", "v15"
|
|
+
|
|
+/* See below for the purpose of this function. */
|
|
+__attribute__((noinline, noclone)) int fancy_return_zero(void);
|
|
+__attribute__((noinline, noclone)) int
|
|
+fancy_return_zero(void)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int
|
|
+slp_switch(void)
|
|
+{
|
|
+ int err = 0;
|
|
+ void *fp;
|
|
+ register long *stackref, stsizediff;
|
|
+ __asm__ volatile ("" : : : REGS_TO_SAVE);
|
|
+ __asm__ volatile ("str x29, %0" : "=m"(fp) : : );
|
|
+ __asm__ ("mov %0, sp" : "=r" (stackref));
|
|
+ {
|
|
+ SLP_SAVE_STATE(stackref, stsizediff);
|
|
+ __asm__ volatile (
|
|
+ "add sp,sp,%0\n"
|
|
+ "add x29,x29,%0\n"
|
|
+ :
|
|
+ : "r" (stsizediff)
|
|
+ );
|
|
+ SLP_RESTORE_STATE();
|
|
+ /* SLP_SAVE_STATE macro contains some return statements
|
|
+ (of -1 and 1). It falls through only when
|
|
+ the return value of slp_save_state() is zero, which
|
|
+ is placed in x0.
|
|
+ In that case we (slp_switch) also want to return zero
|
|
+ (also in x0 of course).
|
|
+ Now, some GCC versions (seen with 4.8) think it's a
|
|
+ good idea to save/restore x0 around the call to
|
|
+ slp_restore_state(), instead of simply zeroing it
|
|
+ at the return below. But slp_restore_state
|
|
+ writes random values to the stack slot used for this
|
|
+ save/restore (from when it once was saved above in
|
|
+ SLP_SAVE_STATE, when it was still uninitialized), so
|
|
+ "restoring" that precious zero actually makes us
|
|
+ return random values. There are some ways to make
|
|
+ GCC not use that zero value in the normal return path
|
|
+ (e.g. making err volatile, but that costs a little
|
|
+ stack space), and the simplest is to call a function
|
|
+ that returns an unknown value (which happens to be zero),
|
|
+ so the saved/restored value is unused. */
|
|
+ err = fancy_return_zero();
|
|
+ }
|
|
+ __asm__ volatile ("ldr x29, %0" : : "m" (fp) :);
|
|
+ __asm__ volatile ("" : : : REGS_TO_SAVE);
|
|
+ return err;
|
|
+}
|
|
+
|
|
+#endif
|