aboutsummaryrefslogtreecommitdiff
path: root/src/Package/Fetch
diff options
context:
space:
mode:
authorMatthew Lugg <mlugg@mlugg.co.uk>2023-11-19 16:19:06 +0000
committerGitHub <noreply@github.com>2023-11-19 16:19:06 +0000
commit6b1a823b2b30d9318c9877dbdbd3d02fa939fba0 (patch)
tree6e5afdad2397ac7224119811583d19107b6e517a /src/Package/Fetch
parent325e0f5f0e8a9ce2540ec3ec5b7cbbecac15257a (diff)
parent9cf6c1ad11bb5f0247ff3458cba5f3bd156d1fb9 (diff)
downloadzig-6b1a823b2b30d9318c9877dbdbd3d02fa939fba0.tar.gz
zig-6b1a823b2b30d9318c9877dbdbd3d02fa939fba0.zip
Merge pull request #18017 from mlugg/var-never-mutated
compiler: add error for unnecessary use of 'var'
Diffstat (limited to 'src/Package/Fetch')
-rw-r--r--src/Package/Fetch/git.zig18
1 files changed, 9 insertions, 9 deletions
diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig
index 1fdf5152d6..827b608cc6 100644
--- a/src/Package/Fetch/git.zig
+++ b/src/Package/Fetch/git.zig
@@ -83,7 +83,7 @@ pub const Repository = struct {
) !void {
try repository.odb.seekOid(commit_oid);
const tree_oid = tree_oid: {
- var commit_object = try repository.odb.readObject();
+ const commit_object = try repository.odb.readObject();
if (commit_object.type != .commit) return error.NotACommit;
break :tree_oid try getCommitTree(commit_object.data);
};
@@ -122,14 +122,14 @@ pub const Repository = struct {
var file = try dir.createFile(entry.name, .{});
defer file.close();
try repository.odb.seekOid(entry.oid);
- var file_object = try repository.odb.readObject();
+ const file_object = try repository.odb.readObject();
if (file_object.type != .blob) return error.InvalidFile;
try file.writeAll(file_object.data);
try file.sync();
},
.symlink => {
try repository.odb.seekOid(entry.oid);
- var symlink_object = try repository.odb.readObject();
+ const symlink_object = try repository.odb.readObject();
if (symlink_object.type != .blob) return error.InvalidFile;
const link_name = symlink_object.data;
dir.symLink(link_name, entry.name, .{}) catch |e| {
@@ -1230,7 +1230,7 @@ fn resolveDeltaChain(
const delta_offset = delta_offsets[i];
try pack.seekTo(delta_offset);
const delta_header = try EntryHeader.read(pack.reader());
- var delta_data = try readObjectRaw(allocator, pack.reader(), delta_header.uncompressedLength());
+ const delta_data = try readObjectRaw(allocator, pack.reader(), delta_header.uncompressedLength());
defer allocator.free(delta_data);
var delta_stream = std.io.fixedBufferStream(delta_data);
const delta_reader = delta_stream.reader();
@@ -1238,7 +1238,7 @@ fn resolveDeltaChain(
const expanded_size = try readSizeVarInt(delta_reader);
const expanded_alloc_size = std.math.cast(usize, expanded_size) orelse return error.ObjectTooLarge;
- var expanded_data = try allocator.alloc(u8, expanded_alloc_size);
+ const expanded_data = try allocator.alloc(u8, expanded_alloc_size);
errdefer allocator.free(expanded_data);
var expanded_delta_stream = std.io.fixedBufferStream(expanded_data);
var base_stream = std.io.fixedBufferStream(base_data);
@@ -1259,7 +1259,7 @@ fn readObjectRaw(allocator: Allocator, reader: anytype, size: u64) ![]u8 {
var buffered_reader = std.io.bufferedReader(reader);
var decompress_stream = try std.compress.zlib.decompressStream(allocator, buffered_reader.reader());
defer decompress_stream.deinit();
- var data = try allocator.alloc(u8, alloc_size);
+ const data = try allocator.alloc(u8, alloc_size);
errdefer allocator.free(data);
try decompress_stream.reader().readNoEof(data);
_ = decompress_stream.reader().readByte() catch |e| switch (e) {
@@ -1290,14 +1290,14 @@ fn expandDelta(base_object: anytype, delta_reader: anytype, writer: anytype) !vo
size2: bool,
size3: bool,
} = @bitCast(inst.value);
- var offset_parts: packed struct { offset1: u8, offset2: u8, offset3: u8, offset4: u8 } = .{
+ const offset_parts: packed struct { offset1: u8, offset2: u8, offset3: u8, offset4: u8 } = .{
.offset1 = if (available.offset1) try delta_reader.readByte() else 0,
.offset2 = if (available.offset2) try delta_reader.readByte() else 0,
.offset3 = if (available.offset3) try delta_reader.readByte() else 0,
.offset4 = if (available.offset4) try delta_reader.readByte() else 0,
};
const offset: u32 = @bitCast(offset_parts);
- var size_parts: packed struct { size1: u8, size2: u8, size3: u8 } = .{
+ const size_parts: packed struct { size1: u8, size2: u8, size3: u8 } = .{
.size1 = if (available.size1) try delta_reader.readByte() else 0,
.size2 = if (available.size2) try delta_reader.readByte() else 0,
.size3 = if (available.size3) try delta_reader.readByte() else 0,
@@ -1414,7 +1414,7 @@ test "packfile indexing and checkout" {
defer walker.deinit();
while (try walker.next()) |entry| {
if (entry.kind != .file) continue;
- var path = try testing.allocator.dupe(u8, entry.path);
+ const path = try testing.allocator.dupe(u8, entry.path);
errdefer testing.allocator.free(path);
mem.replaceScalar(u8, path, std.fs.path.sep, '/');
try actual_files.append(testing.allocator, path);