diff --git a/gdb.changes b/gdb.changes index f9950ce..fd23840 100644 --- a/gdb.changes +++ b/gdb.changes @@ -1,3 +1,15 @@ +------------------------------------------------------------------- +Mon Jan 20 14:22:22 UTC 2025 - Andreas Schwab + +- riscv-lrsc.patch: Fix stepping over atomic sequences + +------------------------------------------------------------------- +Tue Dec 17 16:11:55 UTC 2024 - Tom de Vries + +- Maintenance script qa.sh: + * Add PR29770 xfail (glibc). + * Add PR31229 kfail. + ------------------------------------------------------------------- Wed Dec 11 18:13:33 UTC 2024 - Tom de Vries diff --git a/gdb.spec b/gdb.spec index 692c2f6..abca7c0 100644 --- a/gdb.spec +++ b/gdb.spec @@ -1,7 +1,7 @@ # -# spec file +# spec file for package gdb # -# Copyright (c) 2024 SUSE LLC +# Copyright (c) 2025 SUSE LLC # Copyright (c) 2012 RedHat # # All modifications and additions to the file contributed by third parties @@ -341,6 +341,7 @@ Patch2075: gdb-python-make-gdb.unwindinfo.add_saved_register-mo-fixup.patch Patch2076: gdb-exp-redo-cast-handling-for-indirection.patch Patch2077: s390-provide-ibm-z16-arch14-instruction-descriptions.patch Patch2078: gdb-s390-add-arch14-record-replay-support.patch +Patch2079: riscv-lrsc.patch # Backports from master, not yet available in next release (GDB 16). @@ -830,6 +831,7 @@ find -name "*.info*"|xargs rm -f %patch -P 2076 -p1 %patch -P 2077 -p1 %patch -P 2078 -p1 +%patch -P 2079 -p1 %patch -P 2090 -p1 %patch -P 2091 -p1 diff --git a/qa.sh b/qa.sh index 14b2145..1e95056 100644 --- a/qa.sh +++ b/qa.sh @@ -644,8 +644,12 @@ kfail_powerpc64le=( # https://sourceware.org/bugzilla/show_bug.cgi?id=31827 "FAIL: gdb.base/gnu_vector.exp: call add_structvecs" - # Failures on opensuse Leap 15.6. To be reproduced and investigated. + # GLIBC problem with prctl on ppc64le. Fixed in 2.40. + # https://sourceware.org/bugzilla/show_bug.cgi?id=29770 + # https://bugzilla.suse.com/show_bug.cgi?id=1234665 "FAIL: gdb.ada/tasks.exp: info threads" + + # https://sourceware.org/bugzilla/show_bug.cgi?id=31229 "FAIL: gdb.linespec/explicit.exp: complete after -qualified -source: cmd complete .b -qualified -source thr." "FAIL: gdb.linespec/explicit.exp: complete after -qualified -source: tab complete .b -qualified -source thr." "FAIL: gdb.linespec/explicit.exp: complete after -source: cmd complete .b -source thr." diff --git a/riscv-lrsc.patch b/riscv-lrsc.patch new file mode 100644 index 0000000..83ea87c --- /dev/null +++ b/riscv-lrsc.patch @@ -0,0 +1,380 @@ +From b273287f4e7f111c1bb667267c6bde4fdd8418c7 Mon Sep 17 00:00:00 2001 +From: Yang Liu +Date: Sun, 14 Jan 2024 01:20:59 +0800 +Subject: [PATCH] gdb: RISC-V: Refine lr/sc sequence support + +Per RISC-V spec, the lr/sc sequence can consist of up to 16 instructions, and we +cannot insert breakpoints in the middle of this sequence. Before this, we only +detected a specific pattern (the most common one). This patch improves this part +and now supports more complex pattern detection. + +Signed-off-by: Yang Liu +Approved-By: Andrew Burgess +Reviewed-by: Palmer Dabbelt +--- + gdb/riscv-tdep.c | 290 ++++++++++++++++++++++++++++++++++++++++------- + 1 file changed, 251 insertions(+), 39 deletions(-) + +diff --git a/gdb/riscv-tdep.c b/gdb/riscv-tdep.c +index 55d21cc4ac5..9a7cfa35afd 100644 +--- a/gdb/riscv-tdep.c ++++ b/gdb/riscv-tdep.c +@@ -1578,8 +1578,34 @@ class riscv_insn + BLTU, + BGEU, + /* These are needed for stepping over atomic sequences. */ +- LR, +- SC, ++ SLTI, ++ SLTIU, ++ XORI, ++ ORI, ++ ANDI, ++ SLLI, ++ SLLIW, ++ SRLI, ++ SRLIW, ++ SRAI, ++ SRAIW, ++ SUB, ++ SUBW, ++ SLL, ++ SLLW, ++ SLT, ++ SLTU, ++ XOR, ++ SRL, ++ SRLW, ++ SRA, ++ SRAW, ++ OR, ++ AND, ++ LR_W, ++ LR_D, ++ SC_W, ++ SC_D, + /* This instruction is used to do a syscall. */ + ECALL, + +@@ -1768,6 +1794,13 @@ class riscv_insn + m_imm.s = EXTRACT_CBTYPE_IMM (ival); + } + ++ void decode_ca_type_insn (enum opcode opcode, ULONGEST ival) ++ { ++ m_opcode = opcode; ++ m_rs1 = decode_register_index_short (ival, OP_SH_CRS1S); ++ m_rs2 = decode_register_index_short (ival, OP_SH_CRS2S); ++ } ++ + /* Fetch instruction from target memory at ADDR, return the content of + the instruction, and update LEN with the instruction length. */ + static ULONGEST fetch_instruction (struct gdbarch *gdbarch, +@@ -1882,14 +1915,62 @@ riscv_insn::decode (struct gdbarch *gdbarch, CORE_ADDR pc) + decode_b_type_insn (BLTU, ival); + else if (is_bgeu_insn (ival)) + decode_b_type_insn (BGEU, ival); ++ else if (is_slti_insn(ival)) ++ decode_i_type_insn (SLTI, ival); ++ else if (is_sltiu_insn(ival)) ++ decode_i_type_insn (SLTIU, ival); ++ else if (is_xori_insn(ival)) ++ decode_i_type_insn (XORI, ival); ++ else if (is_ori_insn(ival)) ++ decode_i_type_insn (ORI, ival); ++ else if (is_andi_insn(ival)) ++ decode_i_type_insn (ANDI, ival); ++ else if (is_slli_insn(ival)) ++ decode_i_type_insn (SLLI, ival); ++ else if (is_slliw_insn(ival)) ++ decode_i_type_insn (SLLIW, ival); ++ else if (is_srli_insn(ival)) ++ decode_i_type_insn (SRLI, ival); ++ else if (is_srliw_insn(ival)) ++ decode_i_type_insn (SRLIW, ival); ++ else if (is_srai_insn(ival)) ++ decode_i_type_insn (SRAI, ival); ++ else if (is_sraiw_insn(ival)) ++ decode_i_type_insn (SRAIW, ival); ++ else if (is_sub_insn(ival)) ++ decode_r_type_insn (SUB, ival); ++ else if (is_subw_insn(ival)) ++ decode_r_type_insn (SUBW, ival); ++ else if (is_sll_insn(ival)) ++ decode_r_type_insn (SLL, ival); ++ else if (is_sllw_insn(ival)) ++ decode_r_type_insn (SLLW, ival); ++ else if (is_slt_insn(ival)) ++ decode_r_type_insn (SLT, ival); ++ else if (is_sltu_insn(ival)) ++ decode_r_type_insn (SLTU, ival); ++ else if (is_xor_insn(ival)) ++ decode_r_type_insn (XOR, ival); ++ else if (is_srl_insn(ival)) ++ decode_r_type_insn (SRL, ival); ++ else if (is_srlw_insn(ival)) ++ decode_r_type_insn (SRLW, ival); ++ else if (is_sra_insn(ival)) ++ decode_r_type_insn (SRA, ival); ++ else if (is_sraw_insn(ival)) ++ decode_r_type_insn (SRAW, ival); ++ else if (is_or_insn(ival)) ++ decode_r_type_insn (OR, ival); ++ else if (is_and_insn(ival)) ++ decode_r_type_insn (AND, ival); + else if (is_lr_w_insn (ival)) +- decode_r_type_insn (LR, ival); ++ decode_r_type_insn (LR_W, ival); + else if (is_lr_d_insn (ival)) +- decode_r_type_insn (LR, ival); ++ decode_r_type_insn (LR_D, ival); + else if (is_sc_w_insn (ival)) +- decode_r_type_insn (SC, ival); ++ decode_r_type_insn (SC_W, ival); + else if (is_sc_d_insn (ival)) +- decode_r_type_insn (SC, ival); ++ decode_r_type_insn (SC_D, ival); + else if (is_ecall_insn (ival)) + decode_i_type_insn (ECALL, ival); + else if (is_ld_insn (ival)) +@@ -1944,6 +2025,24 @@ riscv_insn::decode (struct gdbarch *gdbarch, CORE_ADDR pc) + m_rd = decode_register_index (ival, OP_SH_CRS1S); + m_imm.s = EXTRACT_CITYPE_LUI_IMM (ival); + } ++ else if (is_c_srli_insn (ival)) ++ decode_cb_type_insn (SRLI, ival); ++ else if (is_c_srai_insn (ival)) ++ decode_cb_type_insn (SRAI, ival); ++ else if (is_c_andi_insn (ival)) ++ decode_cb_type_insn (ANDI, ival); ++ else if (is_c_sub_insn (ival)) ++ decode_ca_type_insn (SUB, ival); ++ else if (is_c_xor_insn (ival)) ++ decode_ca_type_insn (XOR, ival); ++ else if (is_c_or_insn (ival)) ++ decode_ca_type_insn (OR, ival); ++ else if (is_c_and_insn (ival)) ++ decode_ca_type_insn (AND, ival); ++ else if (is_c_subw_insn (ival)) ++ decode_ca_type_insn (SUBW, ival); ++ else if (is_c_addw_insn (ival)) ++ decode_ca_type_insn (ADDW, ival); + else if (is_c_li_insn (ival)) + decode_ci_type_insn (LI, ival); + /* C_SD and C_FSW have the same opcode. C_SD is RV64 and RV128 only, +@@ -4405,51 +4504,164 @@ riscv_next_pc (struct regcache *regcache, CORE_ADDR pc) + return next_pc; + } + ++/* Return true if INSN is not a control transfer instruction and is allowed to ++ appear in the middle of the lr/sc sequence. */ ++ ++static bool ++riscv_insn_is_non_cti_and_allowed_in_atomic_sequence ++ (const struct riscv_insn &insn) ++{ ++ switch (insn.opcode ()) ++ { ++ case riscv_insn::LUI: ++ case riscv_insn::AUIPC: ++ case riscv_insn::ADDI: ++ case riscv_insn::ADDIW: ++ case riscv_insn::SLTI: ++ case riscv_insn::SLTIU: ++ case riscv_insn::XORI: ++ case riscv_insn::ORI: ++ case riscv_insn::ANDI: ++ case riscv_insn::SLLI: ++ case riscv_insn::SLLIW: ++ case riscv_insn::SRLI: ++ case riscv_insn::SRLIW: ++ case riscv_insn::SRAI: ++ case riscv_insn::ADD: ++ case riscv_insn::ADDW: ++ case riscv_insn::SRAIW: ++ case riscv_insn::SUB: ++ case riscv_insn::SUBW: ++ case riscv_insn::SLL: ++ case riscv_insn::SLLW: ++ case riscv_insn::SLT: ++ case riscv_insn::SLTU: ++ case riscv_insn::XOR: ++ case riscv_insn::SRL: ++ case riscv_insn::SRLW: ++ case riscv_insn::SRA: ++ case riscv_insn::SRAW: ++ case riscv_insn::OR: ++ case riscv_insn::AND: ++ return true; ++ } ++ ++ return false; ++} ++ ++/* Return true if INSN is a direct branch instruction. */ ++ ++static bool ++riscv_insn_is_direct_branch (const struct riscv_insn &insn) ++{ ++ switch (insn.opcode ()) ++ { ++ case riscv_insn::BEQ: ++ case riscv_insn::BNE: ++ case riscv_insn::BLT: ++ case riscv_insn::BGE: ++ case riscv_insn::BLTU: ++ case riscv_insn::BGEU: ++ case riscv_insn::JAL: ++ return true; ++ } ++ ++ return false; ++} ++ + /* We can't put a breakpoint in the middle of a lr/sc atomic sequence, so look + for the end of the sequence and put the breakpoint there. */ + +-static bool +-riscv_next_pc_atomic_sequence (struct regcache *regcache, CORE_ADDR pc, +- CORE_ADDR *next_pc) ++static std::vector ++riscv_deal_with_atomic_sequence (struct regcache *regcache, CORE_ADDR pc) + { + struct gdbarch *gdbarch = regcache->arch (); + struct riscv_insn insn; +- CORE_ADDR cur_step_pc = pc; +- CORE_ADDR last_addr = 0; ++ CORE_ADDR cur_step_pc = pc, next_pc; ++ std::vector next_pcs; ++ bool found_valid_atomic_sequence = false; ++ enum riscv_insn::opcode lr_opcode; + + /* First instruction has to be a load reserved. */ + insn.decode (gdbarch, cur_step_pc); +- if (insn.opcode () != riscv_insn::LR) +- return false; +- cur_step_pc = cur_step_pc + insn.length (); ++ lr_opcode = insn.opcode (); ++ if (lr_opcode != riscv_insn::LR_D && lr_opcode != riscv_insn::LR_W) ++ return {}; ++ ++ /* The loop comprises only an LR/SC sequence and code to retry the sequence in ++ the case of failure, and must comprise at most 16 instructions placed ++ sequentially in memory. While our code tries to follow these restrictions, ++ it has the following limitations: ++ ++ (a) We expect the loop to start with an LR and end with a BNE. ++ Apparently this does not cover all cases for a valid sequence. ++ (b) The atomic limitations only apply to the code that is actually ++ executed, so here again it's overly restrictive. ++ (c) The lr/sc are required to be for the same target address, but this ++ information is only known at runtime. Same as (b), in order to check ++ this we will end up needing to simulate the sequence, which is more ++ complicated than what we're doing right now. ++ ++ Also note that we only expect a maximum of (16-2) instructions in the for ++ loop as we have assumed the presence of LR and BNE at the beginning and end ++ respectively. */ ++ for (int insn_count = 0; insn_count < 16 - 2; ++insn_count) ++ { ++ cur_step_pc += insn.length (); ++ insn.decode (gdbarch, cur_step_pc); + +- /* Next instruction should be branch to exit. */ +- insn.decode (gdbarch, cur_step_pc); +- if (insn.opcode () != riscv_insn::BNE) +- return false; +- last_addr = cur_step_pc + insn.imm_signed (); +- cur_step_pc = cur_step_pc + insn.length (); ++ /* The dynamic code executed between lr/sc can only contain instructions ++ from the base I instruction set, excluding loads, stores, backward ++ jumps, taken backward branches, JALR, FENCE, FENCE.I, and SYSTEM ++ instructions. If the C extension is supported, then compressed forms ++ of the aforementioned I instructions are also permitted. */ + +- /* Next instruction should be store conditional. */ +- insn.decode (gdbarch, cur_step_pc); +- if (insn.opcode () != riscv_insn::SC) +- return false; +- cur_step_pc = cur_step_pc + insn.length (); ++ if (riscv_insn_is_non_cti_and_allowed_in_atomic_sequence (insn)) ++ continue; ++ /* Look for a conditional branch instruction, check if it's taken forward ++ or not. */ ++ else if (riscv_insn_is_direct_branch (insn)) ++ { ++ if (insn.imm_signed () > 0) ++ { ++ next_pc = cur_step_pc + insn.imm_signed (); ++ next_pcs.push_back (next_pc); ++ } ++ else ++ break; ++ } ++ /* Look for a paired SC instruction which closes the atomic sequence. */ ++ else if ((insn.opcode () == riscv_insn::SC_D ++ && lr_opcode == riscv_insn::LR_D) ++ || (insn.opcode () == riscv_insn::SC_W ++ && lr_opcode == riscv_insn::LR_W)) ++ found_valid_atomic_sequence = true; ++ else ++ break; ++ } ++ ++ if (!found_valid_atomic_sequence) ++ return {}; + + /* Next instruction should be branch to start. */ + insn.decode (gdbarch, cur_step_pc); + if (insn.opcode () != riscv_insn::BNE) +- return false; ++ return {}; + if (pc != (cur_step_pc + insn.imm_signed ())) +- return false; +- cur_step_pc = cur_step_pc + insn.length (); ++ return {}; ++ cur_step_pc += insn.length (); + +- /* We should now be at the end of the sequence. */ +- if (cur_step_pc != last_addr) +- return false; ++ /* Remove all PCs that jump within the sequence. */ ++ auto matcher = [cur_step_pc] (const CORE_ADDR addr) -> bool ++ { ++ return addr < cur_step_pc; ++ }; ++ auto it = std::remove_if (next_pcs.begin (), next_pcs.end (), matcher); ++ next_pcs.erase (it, next_pcs.end ()); + +- *next_pc = cur_step_pc; +- return true; ++ next_pc = cur_step_pc; ++ next_pcs.push_back (next_pc); ++ return next_pcs; + } + + /* This is called just before we want to resume the inferior, if we want to +@@ -4459,14 +4671,14 @@ riscv_next_pc_atomic_sequence (struct regcache *regcache, CORE_ADDR pc, + std::vector + riscv_software_single_step (struct regcache *regcache) + { +- CORE_ADDR pc, next_pc; +- +- pc = regcache_read_pc (regcache); ++ CORE_ADDR cur_pc = regcache_read_pc (regcache), next_pc; ++ std::vector next_pcs ++ = riscv_deal_with_atomic_sequence (regcache, cur_pc); + +- if (riscv_next_pc_atomic_sequence (regcache, pc, &next_pc)) +- return {next_pc}; ++ if (!next_pcs.empty ()) ++ return next_pcs; + +- next_pc = riscv_next_pc (regcache, pc); ++ next_pc = riscv_next_pc (regcache, cur_pc); + + return {next_pc}; + } +-- +2.48.0 +