# HG changeset patch # User Wolfgang Rosenauer # Parent f805a250257be9c3ea570b34557150450e16dfec diff --git a/js/src/jit/GenerateAtomicOperations.py b/js/src/jit/GenerateAtomicOperations.py --- a/js/src/jit/GenerateAtomicOperations.py +++ b/js/src/jit/GenerateAtomicOperations.py @@ -5,40 +5,41 @@ # This script generates jit/AtomicOperationsGenerated.h # # See the big comment in jit/AtomicOperations.h for an explanation. import buildconfig is_64bit = "JS_64BIT" in buildconfig.defines cpu_arch = buildconfig.substs["CPU_ARCH"] +is_gcc = buildconfig.substs["CC_TYPE"] == "gcc" def fmt_insn(s): return '"' + s + '\\n\\t"\n' def gen_seqcst(fun_name): if cpu_arch in ("x86", "x86_64"): return r""" - inline void %(fun_name)s() { + INLINE_ATTR void %(fun_name)s() { asm volatile ("mfence\n\t" ::: "memory"); }""" % { "fun_name": fun_name, } if cpu_arch == "aarch64": return r""" - inline void %(fun_name)s() { + INLINE_ATTR void %(fun_name)s() { asm volatile ("dmb ish\n\t" ::: "memory"); }""" % { "fun_name": fun_name, } if cpu_arch == "arm": return r""" - inline void %(fun_name)s() { + INLINE_ATTR void %(fun_name)s() { asm volatile ("dmb sy\n\t" ::: "memory"); }""" % { "fun_name": fun_name, } raise Exception("Unexpected arch") def gen_load(fun_name, cpp_type, size, barrier): @@ -58,17 +59,17 @@ def gen_load(fun_name, cpp_type, size, b elif size == 32: insns += fmt_insn("movl (%[arg]), %[res]") else: assert size == 64 insns += fmt_insn("movq (%[arg]), %[res]") if barrier: insns += fmt_insn("mfence") return """ - inline %(cpp_type)s %(fun_name)s(const %(cpp_type)s* arg) { + INLINE_ATTR %(cpp_type)s %(fun_name)s(const %(cpp_type)s* arg) { %(cpp_type)s res; asm volatile (%(insns)s : [res] "=r" (res) : [arg] "r" (arg) : "memory"); return res; }""" % { "cpp_type": cpp_type, @@ -86,17 +87,17 @@ def gen_load(fun_name, cpp_type, size, b elif size == 32: insns += fmt_insn("ldr %w[res], [%x[arg]]") else: assert size == 64 insns += fmt_insn("ldr %x[res], [%x[arg]]") if barrier: insns += fmt_insn("dmb ish") return """ - inline %(cpp_type)s %(fun_name)s(const %(cpp_type)s* arg) { + INLINE_ATTR %(cpp_type)s %(fun_name)s(const %(cpp_type)s* arg) { %(cpp_type)s res; asm volatile (%(insns)s : [res] "=r" (res) : [arg] "r" (arg) : "memory"); return res; }""" % { "cpp_type": cpp_type, @@ -112,17 +113,17 @@ def gen_load(fun_name, cpp_type, size, b elif size == 16: insns += fmt_insn("ldrh %[res], [%[arg]]") else: assert size == 32 insns += fmt_insn("ldr %[res], [%[arg]]") if barrier: insns += fmt_insn("dmb sy") return """ - inline %(cpp_type)s %(fun_name)s(const %(cpp_type)s* arg) { + INLINE_ATTR %(cpp_type)s %(fun_name)s(const %(cpp_type)s* arg) { %(cpp_type)s res; asm volatile (%(insns)s : [res] "=r" (res) : [arg] "r" (arg) : "memory"); return res; }""" % { "cpp_type": cpp_type, @@ -149,17 +150,17 @@ def gen_store(fun_name, cpp_type, size, elif size == 32: insns += fmt_insn("movl %[val], (%[addr])") else: assert size == 64 insns += fmt_insn("movq %[val], (%[addr])") if barrier: insns += fmt_insn("mfence") return """ - inline void %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { + INLINE_ATTR void %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { asm volatile (%(insns)s : : [addr] "r" (addr), [val] "r"(val) : "memory"); }""" % { "cpp_type": cpp_type, "fun_name": fun_name, "insns": insns, @@ -175,17 +176,17 @@ def gen_store(fun_name, cpp_type, size, elif size == 32: insns += fmt_insn("str %w[val], [%x[addr]]") else: assert size == 64 insns += fmt_insn("str %x[val], [%x[addr]]") if barrier: insns += fmt_insn("dmb ish") return """ - inline void %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { + INLINE_ATTR void %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { asm volatile (%(insns)s : : [addr] "r" (addr), [val] "r"(val) : "memory"); }""" % { "cpp_type": cpp_type, "fun_name": fun_name, "insns": insns, @@ -199,17 +200,17 @@ def gen_store(fun_name, cpp_type, size, elif size == 16: insns += fmt_insn("strh %[val], [%[addr]]") else: assert size == 32 insns += fmt_insn("str %[val], [%[addr]]") if barrier: insns += fmt_insn("dmb sy") return """ - inline void %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { + INLINE_ATTR void %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { asm volatile (%(insns)s : : [addr] "r" (addr), [val] "r"(val) : "memory"); }""" % { "cpp_type": cpp_type, "fun_name": fun_name, "insns": insns, @@ -230,17 +231,17 @@ def gen_exchange(fun_name, cpp_type, siz elif size == 16: insns += fmt_insn("xchgw %[val], (%[addr])") elif size == 32: insns += fmt_insn("xchgl %[val], (%[addr])") else: assert size == 64 insns += fmt_insn("xchgq %[val], (%[addr])") return """ - inline %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { + INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { asm volatile (%(insns)s : [val] "+r" (val) : [addr] "r" (addr) : "memory"); return val; }""" % { "cpp_type": cpp_type, "fun_name": fun_name, @@ -261,17 +262,17 @@ def gen_exchange(fun_name, cpp_type, siz insns += fmt_insn("stxr %w[scratch], %w[val], [%x[addr]]") else: assert size == 64 insns += fmt_insn("ldxr %x[res], [%x[addr]]") insns += fmt_insn("stxr %w[scratch], %x[val], [%x[addr]]") insns += fmt_insn("cbnz %w[scratch], 0b") insns += fmt_insn("dmb ish") return """ - inline %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { + INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { %(cpp_type)s res; uint32_t scratch; asm volatile (%(insns)s : [res] "=&r"(res), [scratch] "=&r"(scratch) : [addr] "r" (addr), [val] "r"(val) : "memory", "cc"); return res; }""" % { @@ -292,17 +293,17 @@ def gen_exchange(fun_name, cpp_type, siz else: assert size == 32 insns += fmt_insn("ldrex %[res], [%[addr]]") insns += fmt_insn("strex %[scratch], %[val], [%[addr]]") insns += fmt_insn("cmp %[scratch], #1") insns += fmt_insn("beq 0b") insns += fmt_insn("dmb sy") return """ - inline %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { + INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { %(cpp_type)s res; uint32_t scratch; asm volatile (%(insns)s : [res] "=&r"(res), [scratch] "=&r"(scratch) : [addr] "r" (addr), [val] "r"(val) : "memory", "cc"); return res; }""" % { @@ -316,33 +317,33 @@ def gen_exchange(fun_name, cpp_type, siz def gen_cmpxchg(fun_name, cpp_type, size): # NOTE: the assembly code must match the generated code in: # - MacroAssembler::compareExchange # - MacroAssembler::compareExchange64 if cpu_arch == "x86" and size == 64: # Use a +A constraint to load `oldval` into EDX:EAX as input/output. # `newval` is loaded into ECX:EBX. return r""" - inline %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, + INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s oldval, %(cpp_type)s newval) { asm volatile ("lock; cmpxchg8b (%%[addr])\n\t" : "+A" (oldval) : [addr] "r" (addr), "b" (uint32_t(newval & 0xffff'ffff)), "c" (uint32_t(newval >> 32)) : "memory", "cc"); return oldval; }""" % { "cpp_type": cpp_type, "fun_name": fun_name, } if cpu_arch == "arm" and size == 64: return r""" - inline %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, + INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s oldval, %(cpp_type)s newval) { uint32_t oldval0 = oldval & 0xffff'ffff; uint32_t oldval1 = oldval >> 32; uint32_t newval0 = newval & 0xffff'ffff; uint32_t newval1 = newval >> 32; asm volatile ( "dmb sy\n\t" @@ -375,17 +376,17 @@ def gen_cmpxchg(fun_name, cpp_type, size elif size == 16: insns += fmt_insn("lock; cmpxchgw %[newval], (%[addr])") elif size == 32: insns += fmt_insn("lock; cmpxchgl %[newval], (%[addr])") else: assert size == 64 insns += fmt_insn("lock; cmpxchgq %[newval], (%[addr])") return """ - inline %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, + INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s oldval, %(cpp_type)s newval) { asm volatile (%(insns)s : [oldval] "+a" (oldval) : [addr] "r" (addr), [newval] "r" (newval) : "memory", "cc"); return oldval; }""" % { @@ -420,17 +421,17 @@ def gen_cmpxchg(fun_name, cpp_type, size insns += fmt_insn("mov %x[scratch], %x[oldval]") insns += fmt_insn("ldxr %x[res], [%x[addr]]") insns += fmt_insn("cmp %x[res], %x[scratch]") insns += fmt_insn("b.ne 1f") insns += fmt_insn("stxr %w[scratch], %x[newval], [%x[addr]]") insns += fmt_insn("cbnz %w[scratch], 0b") insns += fmt_insn("1: dmb ish") return """ - inline %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, + INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s oldval, %(cpp_type)s newval) { %(cpp_type)s res, scratch; asm volatile (%(insns)s : [res] "=&r" (res), [scratch] "=&r" (scratch) : [addr] "r" (addr), [oldval] "r"(oldval), [newval] "r" (newval) : "memory", "cc"); return res; @@ -461,17 +462,17 @@ def gen_cmpxchg(fun_name, cpp_type, size insns += fmt_insn("ldrex %[res], [%[addr]]") insns += fmt_insn("cmp %[res], %[scratch]") insns += fmt_insn("bne 1f") insns += fmt_insn("strex %[scratch], %[newval], [%[addr]]") insns += fmt_insn("cmp %[scratch], #1") insns += fmt_insn("beq 0b") insns += fmt_insn("1: dmb sy") return """ - inline %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, + INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s oldval, %(cpp_type)s newval) { %(cpp_type)s res, scratch; asm volatile (%(insns)s : [res] "=&r" (res), [scratch] "=&r" (scratch) : [addr] "r" (addr), [oldval] "r"(oldval), [newval] "r" (newval) : "memory", "cc"); return res; @@ -496,17 +497,17 @@ def gen_fetchop(fun_name, cpp_type, size elif size == 16: insns += fmt_insn("lock; xaddw %[val], (%[addr])") elif size == 32: insns += fmt_insn("lock; xaddl %[val], (%[addr])") else: assert size == 64 insns += fmt_insn("lock; xaddq %[val], (%[addr])") return """ - inline %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { + INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { asm volatile (%(insns)s : [val] "+&r" (val) : [addr] "r" (addr) : "memory", "cc"); return val; }""" % { "cpp_type": cpp_type, "fun_name": fun_name, @@ -534,17 +535,17 @@ def gen_fetchop(fun_name, cpp_type, size assert size == 64 insns += fmt_insn("movq (%[addr]), %[res]") insns += fmt_insn("0: movq %[res], %[scratch]") insns += fmt_insn("OPq %[val], %[scratch]") insns += fmt_insn("lock; cmpxchgq %[scratch], (%[addr])") insns = insns.replace("OP", op) insns += fmt_insn("jnz 0b") return """ - inline %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { + INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { %(cpp_type)s res, scratch; asm volatile (%(insns)s : [res] "=&a" (res), [scratch] "=&r" (scratch) : [addr] "r" (addr), [val] "r"(val) : "memory", "cc"); return res; }""" % { "cpp_type": cpp_type, @@ -576,17 +577,17 @@ def gen_fetchop(fun_name, cpp_type, size if cpu_op == "or": cpu_op = "orr" if cpu_op == "xor": cpu_op = "eor" insns = insns.replace("OP", cpu_op) insns += fmt_insn("cbnz %w[scratch2], 0b") insns += fmt_insn("dmb ish") return """ - inline %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { + INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { %(cpp_type)s res; uintptr_t scratch1, scratch2; asm volatile (%(insns)s : [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2) : [addr] "r" (addr), [val] "r"(val) : "memory", "cc"); return res; }""" % { @@ -616,17 +617,17 @@ def gen_fetchop(fun_name, cpp_type, size cpu_op = "orr" if cpu_op == "xor": cpu_op = "eor" insns = insns.replace("OP", cpu_op) insns += fmt_insn("cmp %[scratch2], #1") insns += fmt_insn("beq 0b") insns += fmt_insn("dmb sy") return """ - inline %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { + INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) { %(cpp_type)s res; uintptr_t scratch1, scratch2; asm volatile (%(insns)s : [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2) : [addr] "r" (addr), [val] "r"(val) : "memory", "cc"); return res; }""" % { @@ -660,33 +661,33 @@ def gen_copy(fun_name, cpp_type, size, u insns += fmt_insn("ldrb %w[scratch], [%x[src], OFFSET]") insns += fmt_insn("strb %w[scratch], [%x[dst], OFFSET]") else: assert size == 8 insns += fmt_insn("ldr %x[scratch], [%x[src], OFFSET]") insns += fmt_insn("str %x[scratch], [%x[dst], OFFSET]") elif cpu_arch == "arm": if size == 1: - insns += fmt_insn("ldrb %[scratch], [%[src], OFFSET]") - insns += fmt_insn("strb %[scratch], [%[dst], OFFSET]") + insns += fmt_insn("ldrb %[scratch], [%[src], #OFFSET]") + insns += fmt_insn("strb %[scratch], [%[dst], #OFFSET]") else: assert size == 4 - insns += fmt_insn("ldr %[scratch], [%[src], OFFSET]") - insns += fmt_insn("str %[scratch], [%[dst], OFFSET]") + insns += fmt_insn("ldr %[scratch], [%[src], #OFFSET]") + insns += fmt_insn("str %[scratch], [%[dst], #OFFSET]") else: raise Exception("Unexpected arch") insns = insns.replace("OFFSET", str(offset * size)) if direction == "down": offset += 1 else: offset -= 1 return """ - inline void %(fun_name)s(uint8_t* dst, const uint8_t* src) { + INLINE_ATTR void %(fun_name)s(uint8_t* dst, const uint8_t* src) { %(cpp_type)s* dst_ = reinterpret_cast<%(cpp_type)s*>(dst); const %(cpp_type)s* src_ = reinterpret_cast(src); %(cpp_type)s scratch; asm volatile (%(insns)s : [scratch] "=&r" (scratch) : [dst] "r" (dst_), [src] "r"(src_) : "memory"); }""" % { @@ -848,14 +849,21 @@ def generate_atomics_header(c_out): "constexpr size_t JS_GENERATED_ATOMICS_BLOCKSIZE = " + str(blocksize) + ";\n" ) contents += ( "constexpr size_t JS_GENERATED_ATOMICS_WORDSIZE = " + str(wordsize) + ";\n" ) + # Work around a GCC issue on 32-bit x86 by adding MOZ_NEVER_INLINE. + # See bug 1756347. + if is_gcc and cpu_arch == "x86": + contents = contents.replace("INLINE_ATTR", "MOZ_NEVER_INLINE inline") + else: + contents = contents.replace("INLINE_ATTR", "inline") + c_out.write( HEADER_TEMPLATE % { "contents": contents, } )