aboutsummaryrefslogtreecommitdiff
path: root/lib/std/debug.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2025-08-22 18:30:21 -0700
committerAndrew Kelley <andrew@ziglang.org>2025-08-23 13:48:52 -0700
commitd00cc100867bbb7a5e4305a5597cb45982c4593f (patch)
tree6c04d8c2d13a72d3a2117d6723494311d53018ed /lib/std/debug.zig
parent77c09d16f9d992b19bc7cb2d2497f8009e0250ba (diff)
downloadzig-d00cc100867bbb7a5e4305a5597cb45982c4593f.tar.gz
zig-d00cc100867bbb7a5e4305a5597cb45982c4593f.zip
std.debug: delete MemoryAccessor
This API is based around the unsound idea that a process can perform checked virtual memory loads to prevent crashing. This depends on OS-specific APIs that may be unavailable, disabled, or impossible due to virtualization. It also makes collecting stack traces ridiculously slow, which is a problem for users of DebugAllocator - in other words, everybody, all the time. It also makes strace go from being superbly clean to being awful.
Diffstat (limited to 'lib/std/debug.zig')
-rw-r--r--lib/std/debug.zig13
1 files changed, 3 insertions, 10 deletions
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index ebbcad627b..dd307a0254 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -14,7 +14,6 @@ const native_os = builtin.os.tag;
const native_endian = native_arch.endian();
const Writer = std.io.Writer;
-pub const MemoryAccessor = @import("debug/MemoryAccessor.zig");
pub const FixedBufferReader = @import("debug/FixedBufferReader.zig");
pub const Dwarf = @import("debug/Dwarf.zig");
pub const Pdb = @import("debug/Pdb.zig");
@@ -773,7 +772,6 @@ pub const StackIterator = struct {
first_address: ?usize,
// Last known value of the frame pointer register.
fp: usize,
- ma: MemoryAccessor = MemoryAccessor.init,
// When SelfInfo and a register context is available, this iterator can unwind
// stacks with frames that don't use a frame pointer (ie. -fomit-frame-pointer),
@@ -795,7 +793,7 @@ pub const StackIterator = struct {
::: .{ .memory = true });
}
- return StackIterator{
+ return .{
.first_address = first_address,
// TODO: this is a workaround for #16876
//.fp = fp orelse @frameAddress(),
@@ -825,7 +823,6 @@ pub const StackIterator = struct {
}
pub fn deinit(it: *StackIterator) void {
- it.ma.deinit();
if (have_ucontext and it.unwind_state != null) it.unwind_state.?.dwarf_context.deinit();
}
@@ -896,7 +893,6 @@ pub const StackIterator = struct {
unwind_state.debug_info.allocator,
module.base_address,
&unwind_state.dwarf_context,
- &it.ma,
unwind_info,
module.eh_frame,
)) |return_address| {
@@ -915,7 +911,6 @@ pub const StackIterator = struct {
di,
module.base_address,
&unwind_state.dwarf_context,
- &it.ma,
null,
);
} else return error.MissingDebugInfo;
@@ -951,7 +946,7 @@ pub const StackIterator = struct {
// Sanity check.
if (fp == 0 or !mem.isAligned(fp, @alignOf(usize))) return null;
- const new_fp = math.add(usize, it.ma.load(usize, fp) orelse return null, fp_bias) catch
+ const new_fp = math.add(usize, @as(*usize, @ptrFromInt(fp)).*, fp_bias) catch
return null;
// Sanity check: the stack grows down thus all the parent frames must be
@@ -959,8 +954,7 @@ pub const StackIterator = struct {
// A zero frame pointer often signals this is the last frame, that case
// is gracefully handled by the next call to next_internal.
if (new_fp != 0 and new_fp < it.fp) return null;
- const new_pc = it.ma.load(usize, math.add(usize, fp, pc_offset) catch return null) orelse
- return null;
+ const new_pc = @as(*usize, @ptrFromInt(math.add(usize, fp, pc_offset) catch return null)).*;
it.fp = new_fp;
@@ -1774,7 +1768,6 @@ pub inline fn inValgrind() bool {
test {
_ = &Dwarf;
- _ = &MemoryAccessor;
_ = &FixedBufferReader;
_ = &Pdb;
_ = &SelfInfo;