aboutsummaryrefslogtreecommitdiff
path: root/std
diff options
context:
space:
mode:
authorAndrew Kelley <superjoe30@gmail.com>2016-12-18 19:40:26 -0500
committerAndrew Kelley <superjoe30@gmail.com>2016-12-18 19:40:26 -0500
commita71fbe49cbbf068e00300533d5f3874efadb8c18 (patch)
tree57c6c4150b910efce96251b14003d46f00cf6afa /std
parentf12fbce0f51d58b429afd8a359aeb8a3b27a4eb0 (diff)
downloadzig-a71fbe49cbbf068e00300533d5f3874efadb8c18.tar.gz
zig-a71fbe49cbbf068e00300533d5f3874efadb8c18.zip
IR: add FnProto instruction
Diffstat (limited to 'std')
-rw-r--r--std/debug.zig36
-rw-r--r--std/elf.zig16
-rw-r--r--std/hash_map.zig375
-rw-r--r--std/list.zig70
-rw-r--r--std/mem.zig4
-rw-r--r--std/net.zig8
-rw-r--r--std/rand.zig96
7 files changed, 307 insertions, 298 deletions
diff --git a/std/debug.zig b/std/debug.zig
index 0001bc11ac..2f78a6c7ce 100644
--- a/std/debug.zig
+++ b/std/debug.zig
@@ -69,7 +69,7 @@ pub fn writeStackTrace(out_stream: &io.OutStream) -> %void {
}
}
-struct ElfStackTrace {
+const ElfStackTrace = struct {
self_exe_stream: io.InStream,
elf: elf.Elf,
debug_info: &elf.SectionHeader,
@@ -77,36 +77,36 @@ struct ElfStackTrace {
debug_str: &elf.SectionHeader,
abbrev_table_list: List(AbbrevTableHeader),
compile_unit_list: List(CompileUnit),
-}
+};
-struct CompileUnit {
+const CompileUnit = struct {
is_64: bool,
die: &Die,
pc_start: u64,
pc_end: u64,
-}
+};
const AbbrevTable = List(AbbrevTableEntry);
-struct AbbrevTableHeader {
+const AbbrevTableHeader = struct {
// offset from .debug_abbrev
offset: u64,
table: AbbrevTable,
-}
+};
-struct AbbrevTableEntry {
+const AbbrevTableEntry = struct {
has_children: bool,
abbrev_code: u64,
tag_id: u64,
attrs: List(AbbrevAttr),
-}
+};
-struct AbbrevAttr {
+const AbbrevAttr = struct {
attr_id: u64,
form_id: u64,
-}
+};
-enum FormValue {
+const FormValue = enum {
Address: u64,
Block: []u8,
Const: Constant,
@@ -118,9 +118,9 @@ enum FormValue {
RefSig8: u64,
String: []u8,
StrPtr: u64,
-}
+};
-struct Constant {
+const Constant = struct {
payload: []u8,
signed: bool,
@@ -131,17 +131,17 @@ struct Constant {
return error.InvalidDebugInfo;
return mem.sliceAsInt(self.payload, false, u64);
}
-}
+};
-struct Die {
+const Die = struct {
tag_id: u64,
has_children: bool,
attrs: List(Attr),
- struct Attr {
+ const Attr = struct {
id: u64,
value: FormValue,
- }
+ };
fn getAttr(self: &const Die, id: u64) -> ?&const FormValue {
for (self.attrs.toSlice()) |*attr| {
@@ -175,7 +175,7 @@ struct Die {
else => error.InvalidDebugInfo,
}
}
-}
+};
fn readString(in_stream: &io.InStream) -> %[]u8 {
var buf = List(u8).init(&global_allocator);
diff --git a/std/elf.zig b/std/elf.zig
index cf20cf3b63..5ca6242ef4 100644
--- a/std/elf.zig
+++ b/std/elf.zig
@@ -30,14 +30,14 @@ pub const SHT_HIPROC = 0x7fffffff;
pub const SHT_LOUSER = 0x80000000;
pub const SHT_HIUSER = 0xffffffff;
-pub enum FileType {
+pub const FileType = enum {
Relocatable,
Executable,
Shared,
Core,
-}
+};
-pub enum Arch {
+pub const Arch = enum {
Sparc,
x86,
Mips,
@@ -47,9 +47,9 @@ pub enum Arch {
IA_64,
x86_64,
AArch64,
-}
+};
-pub struct SectionHeader {
+pub const SectionHeader = struct {
name: u32,
sh_type: u32,
flags: u64,
@@ -60,9 +60,9 @@ pub struct SectionHeader {
info: u32,
addr_align: u64,
ent_size: u64,
-}
+};
-pub struct Elf {
+pub const Elf = struct {
in_stream: &io.InStream,
auto_close_stream: bool,
is_64: bool,
@@ -258,4 +258,4 @@ pub struct Elf {
pub fn seekToSection(elf: &Elf, section: &SectionHeader) -> %void {
%return elf.in_stream.seekTo(section.offset);
}
-}
+};
diff --git a/std/hash_map.zig b/std/hash_map.zig
index 1428954220..35ede91981 100644
--- a/std/hash_map.zig
+++ b/std/hash_map.zig
@@ -13,220 +13,225 @@ pub fn HashMap(inline K: type, inline V: type, inline hash: fn(key: K)->u32,
SmallHashMap(K, V, hash, eql, @sizeOf(usize))
}
-pub struct SmallHashMap(K: type, V: type, hash: fn(key: K)->u32, eql: fn(a: K, b: K)->bool, static_size: usize) {
- entries: []Entry,
- size: usize,
- max_distance_from_start_index: usize,
- allocator: &Allocator,
- // if the hash map is small enough, we use linear search through these
- // entries instead of allocating memory
- prealloc_entries: [static_size]Entry,
- // this is used to detect bugs where a hashtable is edited while an iterator is running.
- modification_count: debug_u32,
-
- const Self = this;
-
- pub struct Entry {
- used: bool,
- distance_from_start_index: usize,
- key: K,
- value: V,
- }
-
- pub struct Iterator {
- hm: &Self,
- // how many items have we returned
- count: usize,
- // iterator through the entry array
- index: usize,
- // used to detect concurrent modification
- initial_modification_count: debug_u32,
+pub fn SmallHashMap(inline K: type, inline V: type,
+ inline hash: fn(key: K)->u32, inline eql: fn(a: K, b: K)->bool,
+ inline static_size: usize) -> type
+{
+ struct {
+ entries: []Entry,
+ size: usize,
+ max_distance_from_start_index: usize,
+ allocator: &Allocator,
+ // if the hash map is small enough, we use linear search through these
+ // entries instead of allocating memory
+ prealloc_entries: [static_size]Entry,
+ // this is used to detect bugs where a hashtable is edited while an iterator is running.
+ modification_count: debug_u32,
+
+ const Self = this;
+
+ pub const Entry = struct {
+ used: bool,
+ distance_from_start_index: usize,
+ key: K,
+ value: V,
+ };
- pub fn next(it: &Iterator) -> ?&Entry {
- if (want_modification_safety) {
- assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification
- }
- if (it.count >= it.hm.size) return null;
- while (it.index < it.hm.entries.len; it.index += 1) {
- const entry = &it.hm.entries[it.index];
- if (entry.used) {
- it.index += 1;
- it.count += 1;
- return entry;
+ pub const Iterator = struct {
+ hm: &Self,
+ // how many items have we returned
+ count: usize,
+ // iterator through the entry array
+ index: usize,
+ // used to detect concurrent modification
+ initial_modification_count: debug_u32,
+
+ pub fn next(it: &Iterator) -> ?&Entry {
+ if (want_modification_safety) {
+ assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification
}
+ if (it.count >= it.hm.size) return null;
+ while (it.index < it.hm.entries.len; it.index += 1) {
+ const entry = &it.hm.entries[it.index];
+ if (entry.used) {
+ it.index += 1;
+ it.count += 1;
+ return entry;
+ }
+ }
+ @unreachable() // no next item
}
- @unreachable() // no next item
- }
- }
-
- pub fn init(hm: &Self, allocator: &Allocator) {
- hm.entries = hm.prealloc_entries[0...];
- hm.allocator = allocator;
- hm.size = 0;
- hm.max_distance_from_start_index = 0;
- hm.prealloc_entries = zeroes; // sets used to false for all entries
- hm.modification_count = zeroes;
- }
+ };
- pub fn deinit(hm: &Self) {
- if (hm.entries.ptr != &hm.prealloc_entries[0]) {
- hm.allocator.free(Entry, hm.entries);
+ pub fn init(hm: &Self, allocator: &Allocator) {
+ hm.entries = hm.prealloc_entries[0...];
+ hm.allocator = allocator;
+ hm.size = 0;
+ hm.max_distance_from_start_index = 0;
+ hm.prealloc_entries = zeroes; // sets used to false for all entries
+ hm.modification_count = zeroes;
}
- }
- pub fn clear(hm: &Self) {
- for (hm.entries) |*entry| {
- entry.used = false;
+ pub fn deinit(hm: &Self) {
+ if (hm.entries.ptr != &hm.prealloc_entries[0]) {
+ hm.allocator.free(Entry, hm.entries);
+ }
}
- hm.size = 0;
- hm.max_distance_from_start_index = 0;
- hm.incrementModificationCount();
- }
-
- pub fn put(hm: &Self, key: K, value: V) -> %void {
- hm.incrementModificationCount();
- const resize = if (hm.entries.ptr == &hm.prealloc_entries[0]) {
- // preallocated entries table is full
- hm.size == hm.entries.len
- } else {
- // if we get too full (60%), double the capacity
- hm.size * 5 >= hm.entries.len * 3
- };
- if (resize) {
- const old_entries = hm.entries;
- %return hm.initCapacity(hm.entries.len * 2);
- // dump all of the old elements into the new table
- for (old_entries) |*old_entry| {
- if (old_entry.used) {
- hm.internalPut(old_entry.key, old_entry.value);
- }
- }
- if (old_entries.ptr != &hm.prealloc_entries[0]) {
- hm.allocator.free(Entry, old_entries);
+ pub fn clear(hm: &Self) {
+ for (hm.entries) |*entry| {
+ entry.used = false;
}
+ hm.size = 0;
+ hm.max_distance_from_start_index = 0;
+ hm.incrementModificationCount();
}
- hm.internalPut(key, value);
- }
-
- pub fn get(hm: &Self, key: K) -> ?&Entry {
- return hm.internalGet(key);
- }
+ pub fn put(hm: &Self, key: K, value: V) -> %void {
+ hm.incrementModificationCount();
- pub fn remove(hm: &Self, key: K) {
- hm.incrementModificationCount();
- const start_index = hm.keyToIndex(key);
- {var roll_over: usize = 0; while (roll_over <= hm.max_distance_from_start_index; roll_over += 1) {
- const index = (start_index + roll_over) % hm.entries.len;
- var entry = &hm.entries[index];
+ const resize = if (hm.entries.ptr == &hm.prealloc_entries[0]) {
+ // preallocated entries table is full
+ hm.size == hm.entries.len
+ } else {
+ // if we get too full (60%), double the capacity
+ hm.size * 5 >= hm.entries.len * 3
+ };
+ if (resize) {
+ const old_entries = hm.entries;
+ %return hm.initCapacity(hm.entries.len * 2);
+ // dump all of the old elements into the new table
+ for (old_entries) |*old_entry| {
+ if (old_entry.used) {
+ hm.internalPut(old_entry.key, old_entry.value);
+ }
+ }
+ if (old_entries.ptr != &hm.prealloc_entries[0]) {
+ hm.allocator.free(Entry, old_entries);
+ }
+ }
- assert(entry.used); // key not found
+ hm.internalPut(key, value);
+ }
- if (!eql(entry.key, key)) continue;
+ pub fn get(hm: &Self, key: K) -> ?&Entry {
+ return hm.internalGet(key);
+ }
- while (roll_over < hm.entries.len; roll_over += 1) {
- const next_index = (start_index + roll_over + 1) % hm.entries.len;
- const next_entry = &hm.entries[next_index];
- if (!next_entry.used || next_entry.distance_from_start_index == 0) {
- entry.used = false;
- hm.size -= 1;
- return;
+ pub fn remove(hm: &Self, key: K) {
+ hm.incrementModificationCount();
+ const start_index = hm.keyToIndex(key);
+ {var roll_over: usize = 0; while (roll_over <= hm.max_distance_from_start_index; roll_over += 1) {
+ const index = (start_index + roll_over) % hm.entries.len;
+ var entry = &hm.entries[index];
+
+ assert(entry.used); // key not found
+
+ if (!eql(entry.key, key)) continue;
+
+ while (roll_over < hm.entries.len; roll_over += 1) {
+ const next_index = (start_index + roll_over + 1) % hm.entries.len;
+ const next_entry = &hm.entries[next_index];
+ if (!next_entry.used || next_entry.distance_from_start_index == 0) {
+ entry.used = false;
+ hm.size -= 1;
+ return;
+ }
+ *entry = *next_entry;
+ entry.distance_from_start_index -= 1;
+ entry = next_entry;
}
- *entry = *next_entry;
- entry.distance_from_start_index -= 1;
- entry = next_entry;
- }
- @unreachable() // shifting everything in the table
- }}
- @unreachable() // key not found
- }
+ @unreachable() // shifting everything in the table
+ }}
+ @unreachable() // key not found
+ }
- pub fn entryIterator(hm: &Self) -> Iterator {
- return Iterator {
- .hm = hm,
- .count = 0,
- .index = 0,
- .initial_modification_count = hm.modification_count,
- };
- }
+ pub fn entryIterator(hm: &Self) -> Iterator {
+ return Iterator {
+ .hm = hm,
+ .count = 0,
+ .index = 0,
+ .initial_modification_count = hm.modification_count,
+ };
+ }
- fn initCapacity(hm: &Self, capacity: usize) -> %void {
- hm.entries = %return hm.allocator.alloc(Entry, capacity);
- hm.size = 0;
- hm.max_distance_from_start_index = 0;
- for (hm.entries) |*entry| {
- entry.used = false;
+ fn initCapacity(hm: &Self, capacity: usize) -> %void {
+ hm.entries = %return hm.allocator.alloc(Entry, capacity);
+ hm.size = 0;
+ hm.max_distance_from_start_index = 0;
+ for (hm.entries) |*entry| {
+ entry.used = false;
+ }
}
- }
- fn incrementModificationCount(hm: &Self) {
- if (want_modification_safety) {
- hm.modification_count +%= 1;
+ fn incrementModificationCount(hm: &Self) {
+ if (want_modification_safety) {
+ hm.modification_count +%= 1;
+ }
}
- }
- fn internalPut(hm: &Self, orig_key: K, orig_value: V) {
- var key = orig_key;
- var value = orig_value;
- const start_index = hm.keyToIndex(key);
- var roll_over: usize = 0;
- var distance_from_start_index: usize = 0;
- while (roll_over < hm.entries.len; {roll_over += 1; distance_from_start_index += 1}) {
- const index = (start_index + roll_over) % hm.entries.len;
- const entry = &hm.entries[index];
-
- if (entry.used && !eql(entry.key, key)) {
- if (entry.distance_from_start_index < distance_from_start_index) {
- // robin hood to the rescue
- const tmp = *entry;
- hm.max_distance_from_start_index = math.max(hm.max_distance_from_start_index,
- distance_from_start_index);
- *entry = Entry {
- .used = true,
- .distance_from_start_index = distance_from_start_index,
- .key = key,
- .value = value,
- };
- key = tmp.key;
- value = tmp.value;
- distance_from_start_index = tmp.distance_from_start_index;
+ fn internalPut(hm: &Self, orig_key: K, orig_value: V) {
+ var key = orig_key;
+ var value = orig_value;
+ const start_index = hm.keyToIndex(key);
+ var roll_over: usize = 0;
+ var distance_from_start_index: usize = 0;
+ while (roll_over < hm.entries.len; {roll_over += 1; distance_from_start_index += 1}) {
+ const index = (start_index + roll_over) % hm.entries.len;
+ const entry = &hm.entries[index];
+
+ if (entry.used && !eql(entry.key, key)) {
+ if (entry.distance_from_start_index < distance_from_start_index) {
+ // robin hood to the rescue
+ const tmp = *entry;
+ hm.max_distance_from_start_index = math.max(hm.max_distance_from_start_index,
+ distance_from_start_index);
+ *entry = Entry {
+ .used = true,
+ .distance_from_start_index = distance_from_start_index,
+ .key = key,
+ .value = value,
+ };
+ key = tmp.key;
+ value = tmp.value;
+ distance_from_start_index = tmp.distance_from_start_index;
+ }
+ continue;
}
- continue;
- }
- if (!entry.used) {
- // adding an entry. otherwise overwriting old value with
- // same key
- hm.size += 1;
- }
+ if (!entry.used) {
+ // adding an entry. otherwise overwriting old value with
+ // same key
+ hm.size += 1;
+ }
- hm.max_distance_from_start_index = math.max(distance_from_start_index, hm.max_distance_from_start_index);
- *entry = Entry {
- .used = true,
- .distance_from_start_index = distance_from_start_index,
- .key = key,
- .value = value,
- };
- return;
+ hm.max_distance_from_start_index = math.max(distance_from_start_index, hm.max_distance_from_start_index);
+ *entry = Entry {
+ .used = true,
+ .distance_from_start_index = distance_from_start_index,
+ .key = key,
+ .value = value,
+ };
+ return;
+ }
+ @unreachable() // put into a full map
}
- @unreachable() // put into a full map
- }
- fn internalGet(hm: &Self, key: K) -> ?&Entry {
- const start_index = hm.keyToIndex(key);
- {var roll_over: usize = 0; while (roll_over <= hm.max_distance_from_start_index; roll_over += 1) {
- const index = (start_index + roll_over) % hm.entries.len;
- const entry = &hm.entries[index];
+ fn internalGet(hm: &Self, key: K) -> ?&Entry {
+ const start_index = hm.keyToIndex(key);
+ {var roll_over: usize = 0; while (roll_over <= hm.max_distance_from_start_index; roll_over += 1) {
+ const index = (start_index + roll_over) % hm.entries.len;
+ const entry = &hm.entries[index];
- if (!entry.used) return null;
- if (eql(entry.key, key)) return entry;
- }}
- return null;
- }
+ if (!entry.used) return null;
+ if (eql(entry.key, key)) return entry;
+ }}
+ return null;
+ }
- fn keyToIndex(hm: &Self, key: K) -> usize {
- return usize(hash(key)) % hm.entries.len;
+ fn keyToIndex(hm: &Self, key: K) -> usize {
+ return usize(hash(key)) % hm.entries.len;
+ }
}
}
diff --git a/std/list.zig b/std/list.zig
index 0c5bf92da6..4269f7b1e0 100644
--- a/std/list.zig
+++ b/std/list.zig
@@ -3,49 +3,51 @@ const assert = debug.assert;
const mem = @import("mem.zig");
const Allocator = mem.Allocator;
-pub struct List(T: type) {
- const Self = this;
+pub fn List(inline T: type) -> type{
+ struct {
+ const Self = this;
- items: []T,
- len: usize,
- allocator: &Allocator,
+ items: []T,
+ len: usize,
+ allocator: &Allocator,
- pub fn init(allocator: &Allocator) -> Self {
- Self {
- .items = zeroes,
- .len = 0,
- .allocator = allocator,
+ pub fn init(allocator: &Allocator) -> Self {
+ Self {
+ .items = zeroes,
+ .len = 0,
+ .allocator = allocator,
+ }
}
- }
- pub fn deinit(l: &Self) {
- l.allocator.free(T, l.items);
- }
+ pub fn deinit(l: &Self) {
+ l.allocator.free(T, l.items);
+ }
- pub fn toSlice(l: &Self) -> []T {
- return l.items[0...l.len];
- }
+ pub fn toSlice(l: &Self) -> []T {
+ return l.items[0...l.len];
+ }
- pub fn append(l: &Self, item: T) -> %void {
- const new_length = l.len + 1;
- %return l.ensureCapacity(new_length);
- l.items[l.len] = item;
- l.len = new_length;
- }
+ pub fn append(l: &Self, item: T) -> %void {
+ const new_length = l.len + 1;
+ %return l.ensureCapacity(new_length);
+ l.items[l.len] = item;
+ l.len = new_length;
+ }
- pub fn resize(l: &Self, new_len: usize) -> %void {
- %return l.ensureCapacity(new_len);
- l.len = new_len;
- }
+ pub fn resize(l: &Self, new_len: usize) -> %void {
+ %return l.ensureCapacity(new_len);
+ l.len = new_len;
+ }
- pub fn ensureCapacity(l: &Self, new_capacity: usize) -> %void {
- var better_capacity = l.items.len;
- if (better_capacity >= new_capacity) return;
- while (true) {
- better_capacity += better_capacity / 2 + 8;
- if (better_capacity >= new_capacity) break;
+ pub fn ensureCapacity(l: &Self, new_capacity: usize) -> %void {
+ var better_capacity = l.items.len;
+ if (better_capacity >= new_capacity) return;
+ while (true) {
+ better_capacity += better_capacity / 2 + 8;
+ if (better_capacity >= new_capacity) break;
+ }
+ l.items = %return l.allocator.realloc(T, l.items, better_capacity);
}
- l.items = %return l.allocator.realloc(T, l.items, better_capacity);
}
}
diff --git a/std/mem.zig b/std/mem.zig
index 9f2cefd62e..03d131c95d 100644
--- a/std/mem.zig
+++ b/std/mem.zig
@@ -8,7 +8,7 @@ pub const Cmp = math.Cmp;
pub error NoMem;
pub type Context = u8;
-pub struct Allocator {
+pub const Allocator = struct {
allocFn: fn (self: &Allocator, n: usize) -> %[]u8,
reallocFn: fn (self: &Allocator, old_mem: []u8, new_size: usize) -> %[]u8,
freeFn: fn (self: &Allocator, mem: []u8),
@@ -39,7 +39,7 @@ pub struct Allocator {
fn free(self: &Allocator, inline T: type, mem: []T) {
self.freeFn(self, ([]u8)(mem));
}
-}
+};
/// Copy all of source into dest at position 0.
/// dest.len must be >= source.len.
diff --git a/std/net.zig b/std/net.zig
index 2b52e8bf97..d6ff2b7cff 100644
--- a/std/net.zig
+++ b/std/net.zig
@@ -13,7 +13,7 @@ pub error NoMem;
pub error NotSocket;
pub error BadFd;
-struct Connection {
+const Connection = struct {
socket_fd: i32,
pub fn send(c: Connection, buf: []const u8) -> %usize {
@@ -56,14 +56,14 @@ struct Connection {
else => return error.Unexpected,
}
}
-}
+};
-struct Address {
+const Address = struct {
family: u16,
scope_id: u32,
addr: [16]u8,
sort_key: i32,
-}
+};
pub fn lookup(hostname: []const u8, out_addrs: []Address) -> %[]Address {
if (hostname.len == 0) {
diff --git a/std/rand.zig b/std/rand.zig
index 16af7c490d..15d0c52a36 100644
--- a/std/rand.zig
+++ b/std/rand.zig
@@ -18,7 +18,7 @@ pub const MT19937_64 = MersenneTwister(
43, 6364136223846793005);
/// Use `init` to initialize this state.
-pub struct Rand {
+pub const Rand = struct {
const Rng = if (@sizeOf(usize) >= 8) MT19937_64 else MT19937_32;
rng: Rng,
@@ -91,65 +91,67 @@ pub struct Rand {
};
return T(r.rangeUnsigned(int_type, 0, precision)) / T(precision);
}
-}
-
-struct MersenneTwister(
- int: type, n: usize, m: usize, r: int,
- a: int,
- u: int, d: int,
- s: int, b: int,
- t: int, c: int,
- l: int, f: int)
+};
+
+fn MersenneTwister(
+ inline int: type, inline n: usize, inline m: usize, inline r: int,
+ inline a: int,
+ inline u: int, inline d: int,
+ inline s: int, inline b: int,
+ inline t: int, inline c: int,
+ inline l: int, inline f: int) -> type
{
- const Self = this;
+ struct {
+ const Self = this;
- array: [n]int,
- index: usize,
+ array: [n]int,
+ index: usize,
- pub fn init(mt: &Self, seed: int) {
- mt.index = n;
+ pub fn init(mt: &Self, seed: int) {
+ mt.index = n;
- var prev_value = seed;
- mt.array[0] = prev_value;
- {var i: usize = 1; while (i < n; i += 1) {
- prev_value = int(i) +% f *% (prev_value ^ (prev_value >> (int.bit_count - 2)));
- mt.array[i] = prev_value;
- }};
- }
+ var prev_value = seed;
+ mt.array[0] = prev_value;
+ {var i: usize = 1; while (i < n; i += 1) {
+ prev_value = int(i) +% f *% (prev_value ^ (prev_value >> (int.bit_count - 2)));
+ mt.array[i] = prev_value;
+ }};
+ }
- pub fn get(mt: &Self) -> int {
- const mag01 = []int{0, a};
- const LM: int = (1 << r) - 1;
- const UM = ~LM;
+ pub fn get(mt: &Self) -> int {
+ const mag01 = []int{0, a};
+ const LM: int = (1 << r) - 1;
+ const UM = ~LM;
- if (mt.index >= n) {
- var i: usize = 0;
+ if (mt.index >= n) {
+ var i: usize = 0;
- while (i < n - m; i += 1) {
- const x = (mt.array[i] & UM) | (mt.array[i + 1] & LM);
- mt.array[i] = mt.array[i + m] ^ (x >> 1) ^ mag01[x & 0x1];
- }
+ while (i < n - m; i += 1) {
+ const x = (mt.array[i] & UM) | (mt.array[i + 1] & LM);
+ mt.array[i] = mt.array[i + m] ^ (x >> 1) ^ mag01[x & 0x1];
+ }
- while (i < n - 1; i += 1) {
- const x = (mt.array[i] & UM) | (mt.array[i + 1] & LM);
- mt.array[i] = mt.array[i + m - n] ^ (x >> 1) ^ mag01[x & 0x1];
+ while (i < n - 1; i += 1) {
+ const x = (mt.array[i] & UM) | (mt.array[i + 1] & LM);
+ mt.array[i] = mt.array[i + m - n] ^ (x >> 1) ^ mag01[x & 0x1];
- }
- const x = (mt.array[i] & UM) | (mt.array[0] & LM);
- mt.array[i] = mt.array[m - 1] ^ (x >> 1) ^ mag01[x & 0x1];
+ }
+ const x = (mt.array[i] & UM) | (mt.array[0] & LM);
+ mt.array[i] = mt.array[m - 1] ^ (x >> 1) ^ mag01[x & 0x1];
- mt.index = 0;
- }
+ mt.index = 0;
+ }
- var x = mt.array[mt.index];
- mt.index += 1;
+ var x = mt.array[mt.index];
+ mt.index += 1;
- x ^= ((x >> u) & d);
- x ^= ((x <<% s) & b);
- x ^= ((x <<% t) & c);
- x ^= (x >> l);
+ x ^= ((x >> u) & d);
+ x ^= ((x <<% s) & b);
+ x ^= ((x <<% t) & c);
+ x ^= (x >> l);
- return x;
+ return x;
+ }
}
}