Fixed a dumb memory bug with arrays
This commit is contained in:
parent
3ae9a418ec
commit
737e8596f3
@ -93,7 +93,7 @@ startup :: proc( prof : ^SpallProfiler, persistent_mem, frame_mem, transient_mem
|
||||
push( policy_ptr, SlabSizeClass { 2 * Megabyte, 1 * Megabyte, alignment })
|
||||
push( policy_ptr, SlabSizeClass { 2 * Megabyte, 2 * Megabyte, alignment })
|
||||
push( policy_ptr, SlabSizeClass { 4 * Megabyte, 4 * Megabyte, alignment })
|
||||
// push( policy_ptr, SlabSizeClass { 8 * Megabyte, 8 * Megabyte, alignment })
|
||||
push( policy_ptr, SlabSizeClass { 8 * Megabyte, 8 * Megabyte, alignment })
|
||||
// push( policy_ptr, SlabSizeClass { 16 * Megabyte, 16 * Megabyte, alignment })
|
||||
// push( policy_ptr, SlabSizeClass { 32 * Megabyte, 32 * Megabyte, alignment })
|
||||
// push( policy_ptr, SlabSizeClass { 64 * Megabyte, 64 * Megabyte, alignment })
|
||||
@ -103,10 +103,10 @@ startup :: proc( prof : ^SpallProfiler, persistent_mem, frame_mem, transient_mem
|
||||
}
|
||||
|
||||
alloc_error : AllocatorError
|
||||
persistent_slab, alloc_error = slab_init( policy_ptr, allocator = persistent_allocator() )
|
||||
persistent_slab, alloc_error = slab_init( policy_ptr, allocator = persistent_allocator(), dbg_name = "persistent slab" )
|
||||
verify( alloc_error == .None, "Failed to allocate the persistent slab" )
|
||||
|
||||
transient_slab, alloc_error = slab_init( & default_slab_policy, allocator = transient_allocator() )
|
||||
transient_slab, alloc_error = slab_init( & default_slab_policy, allocator = transient_allocator(), dbg_name = "transient slab" )
|
||||
verify( alloc_error == .None, "Failed to allocate transient slab" )
|
||||
|
||||
transient_clear_time = 120 // Seconds, 2 Minutes
|
||||
@ -213,7 +213,7 @@ startup :: proc( prof : ^SpallProfiler, persistent_mem, frame_mem, transient_mem
|
||||
ui_startup( & workspace.ui, cache_allocator = persistent_slab_allocator() )
|
||||
}
|
||||
|
||||
debug.path_lorem = str_fmt_alloc("C:/projects/SectrPrototype/examples/Lorem Ipsum.txt", allocator = persistent_allocator())
|
||||
debug.path_lorem = str_fmt_alloc("C:/projects/SectrPrototype/examples/Lorem Ipsum.txt", allocator = persistent_slab_allocator())
|
||||
|
||||
alloc_error : AllocatorError; success : bool
|
||||
debug.lorem_content, success = os.read_entire_file( debug.path_lorem, persistent_slab_allocator() )
|
||||
@ -309,7 +309,7 @@ tick :: proc( host_delta_time : f64, host_delta_ns : Duration ) -> b32
|
||||
// Setup Frame Slab
|
||||
{
|
||||
alloc_error : AllocatorError
|
||||
frame_slab, alloc_error = slab_init( & default_slab_policy, bucket_reserve_num = 0, allocator = frame_allocator() )
|
||||
frame_slab, alloc_error = slab_init( & default_slab_policy, bucket_reserve_num = 0, allocator = frame_allocator(), dbg_name = "frame slab" )
|
||||
verify( alloc_error == .None, "Failed to allocate frame slab" )
|
||||
}
|
||||
|
||||
@ -389,7 +389,7 @@ clean_frame :: proc()
|
||||
free_all( transient_allocator() )
|
||||
|
||||
alloc_error : AllocatorError
|
||||
transient_slab, alloc_error = slab_init( & default_slab_policy, allocator = transient_allocator() )
|
||||
transient_slab, alloc_error = slab_init( & default_slab_policy, allocator = transient_allocator(), dbg_name = "transient slab" )
|
||||
verify( alloc_error == .None, "Failed to allocate transient slab" )
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,38 @@
|
||||
package sectr
|
||||
|
||||
import "base:runtime"
|
||||
import "core:io"
|
||||
import "core:os"
|
||||
import "core:text/table"
|
||||
|
||||
dump_stacktrace :: proc( allocator := context.temp_allocator ) -> string
|
||||
{
|
||||
trace_result := stacktrace()
|
||||
lines, error := stacktrace_lines( trace_result )
|
||||
|
||||
padding := " "
|
||||
|
||||
log_table := table.init( & table.Table{}, context.temp_allocator, context.temp_allocator )
|
||||
for line in lines {
|
||||
table.row( log_table, padding, line.symbol, " - ", line.location )
|
||||
}
|
||||
table.build(log_table)
|
||||
|
||||
// writer_builder_backing : [Kilobyte * 16] u8
|
||||
// writer_builder := from_bytes( writer_builder_backing[:] )
|
||||
writer_builder : StringBuilder
|
||||
str_builder_init( & writer_builder, allocator = allocator )
|
||||
|
||||
writer := to_writer( & writer_builder )
|
||||
for row in 2 ..< log_table.nr_rows {
|
||||
for col in 0 ..< log_table.nr_cols {
|
||||
table.write_table_cell( writer, log_table, row, col )
|
||||
}
|
||||
io.write_byte( writer, '\n' )
|
||||
}
|
||||
|
||||
return to_string( writer_builder )
|
||||
}
|
||||
|
||||
ensure :: proc( condition : b32, msg : string, location := #caller_location )
|
||||
{
|
||||
|
@ -33,6 +33,8 @@ import fmt_io "core:fmt"
|
||||
str_tmp_from_any :: fmt_io.tprint
|
||||
import "core:mem"
|
||||
align_forward_int :: mem.align_forward_int
|
||||
align_forward_uint :: mem.align_forward_uint
|
||||
align_forward_uintptr :: mem.align_forward_uintptr
|
||||
Allocator :: mem.Allocator
|
||||
AllocatorError :: mem.Allocator_Error
|
||||
AllocatorMode :: mem.Allocator_Mode
|
||||
@ -71,6 +73,10 @@ import "core:os"
|
||||
import "core:path/filepath"
|
||||
file_name_from_path :: filepath.short_stem
|
||||
import str "core:strings"
|
||||
StringBuilder :: str.Builder
|
||||
str_builder_from_bytes :: str.builder_from_bytes
|
||||
str_builder_init :: str.builder_init
|
||||
str_builder_to_writer :: str.to_writer
|
||||
str_builder_to_string :: str.to_string
|
||||
import "core:time"
|
||||
Duration :: time.Duration
|
||||
@ -83,6 +89,10 @@ import "core:unicode/utf8"
|
||||
str_rune_count :: utf8.rune_count_in_string
|
||||
runes_to_string :: utf8.runes_to_string
|
||||
// string_to_runes :: utf8.string_to_runes
|
||||
import "thirdparty:backtrace"
|
||||
StackTraceData :: backtrace.Trace_Const
|
||||
stacktrace :: backtrace.trace
|
||||
stacktrace_lines :: backtrace.lines
|
||||
|
||||
OS_Type :: type_of(ODIN_OS)
|
||||
|
||||
@ -108,9 +118,8 @@ draw_text :: proc {
|
||||
draw_text_string_cached,
|
||||
}
|
||||
|
||||
mov_avg_exp :: proc {
|
||||
mov_avg_exp_f32,
|
||||
mov_avg_exp_f64,
|
||||
from_bytes :: proc {
|
||||
str_builder_from_bytes,
|
||||
}
|
||||
|
||||
get_bounds :: proc {
|
||||
@ -122,6 +131,11 @@ is_power_of_two :: proc {
|
||||
is_power_of_two_uintptr,
|
||||
}
|
||||
|
||||
mov_avg_exp :: proc {
|
||||
mov_avg_exp_f32,
|
||||
mov_avg_exp_f64,
|
||||
}
|
||||
|
||||
pixels_to_cm :: proc {
|
||||
f32_pixels_to_cm,
|
||||
vec2_pixels_to_cm,
|
||||
@ -164,6 +178,10 @@ to_string :: proc {
|
||||
str_builder_to_string,
|
||||
}
|
||||
|
||||
to_writer :: proc {
|
||||
str_builder_to_writer,
|
||||
}
|
||||
|
||||
ui_set_layout :: proc {
|
||||
ui_style_set_layout,
|
||||
ui_style_theme_set_layout,
|
||||
|
@ -55,10 +55,12 @@ array_init :: proc( $ Type : typeid, allocator : Allocator ) -> ( Array(Type), A
|
||||
array_init_reserve :: proc
|
||||
( $ Type : typeid, allocator : Allocator, capacity : u64 ) -> ( result : Array(Type), alloc_error : AllocatorError )
|
||||
{
|
||||
header_size :: size_of(ArrayHeader)
|
||||
header_size := size_of(ArrayHeader(Type))
|
||||
array_size := header_size + int(capacity) * size_of(Type)
|
||||
|
||||
raw_mem : rawptr
|
||||
raw_mem, alloc_error = alloc( header_size + int(capacity) * size_of(Type), allocator = allocator )
|
||||
raw_mem, alloc_error = alloc( array_size, allocator = allocator )
|
||||
log( str_fmt_tmp("array reserved: %d", header_size + int(capacity) * size_of(Type) ))
|
||||
if alloc_error != AllocatorError.None do return
|
||||
|
||||
result.header = cast( ^ArrayHeader(Type)) raw_mem;
|
||||
@ -266,8 +268,8 @@ array_set_capacity :: proc( self : ^Array( $ Type ), new_capacity : u64 ) -> All
|
||||
|
||||
header_size :: size_of(ArrayHeader(Type))
|
||||
|
||||
new_size := header_size + cast(int) new_capacity * size_of(Type)
|
||||
old_size := header_size + cast(int) self.capacity * size_of(Type)
|
||||
new_size := header_size + (cast(int) new_capacity ) * size_of(Type)
|
||||
old_size := header_size + (cast(int) self.capacity) * size_of(Type)
|
||||
|
||||
// new_mem, result_code := resize( self.header, old_size, new_size, allocator = self.backing )
|
||||
new_mem, result_code := resize_non_zeroed( self.header, old_size, new_size, mem.DEFAULT_ALIGNMENT, allocator = self.backing )
|
||||
@ -281,9 +283,9 @@ array_set_capacity :: proc( self : ^Array( $ Type ), new_capacity : u64 ) -> All
|
||||
return result_code
|
||||
}
|
||||
|
||||
self.header = cast( ^ArrayHeader(Type)) raw_data(new_mem);
|
||||
self.data = cast( [^]Type ) (cast( [^]ArrayHeader(Type)) self.header)[ 1:]
|
||||
self.capacity = new_capacity
|
||||
self.num = self.num
|
||||
self.header = cast( ^ArrayHeader(Type)) raw_data(new_mem);
|
||||
self.header.data = cast( [^]Type ) (cast( [^]ArrayHeader(Type)) self.header)[ 1:]
|
||||
self.header.capacity = new_capacity
|
||||
self.header.num = self.num
|
||||
return result_code
|
||||
}
|
||||
|
@ -116,7 +116,7 @@ zpl_hmap_rehash :: proc( ht : ^ HMapZPL( $ Type ), new_num : u64 ) -> AllocatorE
|
||||
{
|
||||
profile(#procedure)
|
||||
// For now the prototype should never allow this to happen.
|
||||
// ensure( false, "ZPL HMAP IS REHASHING" )
|
||||
ensure( false, "ZPL HMAP IS REHASHING" )
|
||||
last_added_index : i64
|
||||
|
||||
new_ht, init_result := zpl_hmap_init_reserve( Type, ht.hashes.backing, new_num )
|
||||
@ -263,6 +263,8 @@ zpl_hmap_find :: proc( using self : ^ HMapZPL( $ Type), key : u64 ) -> HMapZPL_F
|
||||
}
|
||||
|
||||
zpl_hmap_full :: proc( using self : ^ HMapZPL( $ Type) ) -> b32 {
|
||||
result : b32 = entries.num > u64(HMapZPL_CritialLoadScale * cast(f64) hashes.num)
|
||||
critical_load := u64(HMapZPL_CritialLoadScale * cast(f64) hashes.num)
|
||||
result : b32 = entries.num > critical_load
|
||||
ensure( !result, "HASHTABLE IS FULL" )
|
||||
return result
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ pool_init :: proc (
|
||||
|
||||
pool.header = cast( ^PoolHeader) raw_mem
|
||||
pool.backing = allocator
|
||||
pool.block_size = block_size
|
||||
pool.block_size = align_forward_uint(block_size, alignment)
|
||||
pool.bucket_capacity = bucket_capacity
|
||||
pool.alignment = alignment
|
||||
|
||||
@ -96,7 +96,8 @@ pool_allocate_buckets :: proc( using self : Pool, num_buckets : uint ) -> Alloca
|
||||
return .Invalid_Argument
|
||||
}
|
||||
header_size := cast(uint) align_forward_int( size_of(PoolBucket), int(alignment))
|
||||
to_allocate := cast(int) (header_size + bucket_capacity * num_buckets)
|
||||
bucket_size := header_size + bucket_capacity
|
||||
to_allocate := cast(int) (bucket_size * num_buckets)
|
||||
|
||||
bucket_memory, alloc_error := alloc_bytes_non_zeroed( to_allocate, int(alignment), backing )
|
||||
if alloc_error != .None {
|
||||
@ -109,7 +110,7 @@ pool_allocate_buckets :: proc( using self : Pool, num_buckets : uint ) -> Alloca
|
||||
bucket := cast( ^PoolBucket) next_bucket_ptr
|
||||
bucket.blocks = memory_after_header(bucket)
|
||||
bucket.next_block = 0
|
||||
// log( str_fmt_tmp("Pool allocated block: %p capacity: %d", raw_data(bucket_memory), bucket_capacity))
|
||||
log( str_fmt_tmp("Pool (%d) allocated bucket: %p capacity: %d", self.block_size, raw_data(bucket_memory), bucket_capacity / self.block_size))
|
||||
|
||||
|
||||
if self.bucket_list.first == nil {
|
||||
@ -121,19 +122,20 @@ pool_allocate_buckets :: proc( using self : Pool, num_buckets : uint ) -> Alloca
|
||||
}
|
||||
// log( str_fmt_tmp("Bucket List First: %p", self.bucket_list.first))
|
||||
|
||||
next_bucket_ptr = next_bucket_ptr[ bucket_capacity: ]
|
||||
next_bucket_ptr = next_bucket_ptr[ bucket_size: ]
|
||||
}
|
||||
return alloc_error
|
||||
}
|
||||
|
||||
pool_grab :: proc( using pool : Pool, zero_memory := false ) -> ( block : []byte, alloc_error : AllocatorError )
|
||||
pool_grab :: proc( pool : Pool, zero_memory := false ) -> ( block : []byte, alloc_error : AllocatorError )
|
||||
{
|
||||
pool := pool
|
||||
// profile(#procedure)
|
||||
alloc_error = .None
|
||||
|
||||
// Check the free-list first for a block
|
||||
if free_list_head != nil
|
||||
// if pool.free_list_head != nil && false
|
||||
if false
|
||||
{
|
||||
head := & pool.free_list_head
|
||||
|
||||
@ -144,7 +146,10 @@ pool_grab :: proc( using pool : Pool, zero_memory := false ) -> ( block : []byte
|
||||
pool.free_list_head = pool.free_list_head.next
|
||||
|
||||
block = byte_slice( cast([^]byte) last_free, int(pool.block_size) )
|
||||
// log( str_fmt_tmp("Returning free block: %p %d", raw_data(block), pool.block_size))
|
||||
log( str_fmt_tmp("Returning free block: %p %d", raw_data(block), pool.block_size))
|
||||
if zero_memory {
|
||||
slice.zero(block)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -157,7 +162,8 @@ pool_grab :: proc( using pool : Pool, zero_memory := false ) -> ( block : []byte
|
||||
ensure(false, "Failed to allocate bucket")
|
||||
return
|
||||
}
|
||||
pool.current_bucket = bucket_list.first
|
||||
pool.current_bucket = pool.bucket_list.first
|
||||
log( "First bucket allocation")
|
||||
}
|
||||
|
||||
// Compiler Bug ? (Won't work without "pool."")
|
||||
@ -173,12 +179,12 @@ pool_grab :: proc( using pool : Pool, zero_memory := false ) -> ( block : []byte
|
||||
// if current_bucket.next != nil {
|
||||
if pool.current_bucket.next != nil {
|
||||
// current_bucket = current_bucket.next
|
||||
// log( str_fmt_tmp("Bucket %p exhausted using %p", pool.current_bucket, pool.current_bucket.next))
|
||||
log( str_fmt_tmp("Bucket %p exhausted using %p", pool.current_bucket, pool.current_bucket.next))
|
||||
pool.current_bucket = pool.current_bucket.next
|
||||
}
|
||||
else
|
||||
{
|
||||
// log( "All previous buckets exhausted, allocating new bucket")
|
||||
log( "All previous buckets exhausted, allocating new bucket")
|
||||
alloc_error := pool_allocate_buckets( pool, 1 )
|
||||
if alloc_error != .None {
|
||||
ensure(false, "Failed to allocate bucket")
|
||||
@ -191,8 +197,14 @@ pool_grab :: proc( using pool : Pool, zero_memory := false ) -> ( block : []byte
|
||||
// Compiler Bug
|
||||
// block = slice_ptr( current_bucket.blocks[ current_bucket.next_block:], int(block_size) )
|
||||
// self.current_bucket.next_block += block_size
|
||||
block = byte_slice( pool.current_bucket.blocks[ pool.current_bucket.next_block:], int(block_size) )
|
||||
pool.current_bucket.next_block += block_size
|
||||
|
||||
block_ptr := cast(rawptr) (uintptr(pool.current_bucket.blocks) + uintptr(pool.current_bucket.next_block))
|
||||
|
||||
block = byte_slice( block_ptr, int(pool.block_size) )
|
||||
pool.current_bucket.next_block += pool.block_size
|
||||
|
||||
next = uintptr(pool.current_bucket.blocks) + uintptr(pool.current_bucket.next_block)
|
||||
log( str_fmt_tmp("grabbing block: %p blocks left: %d", raw_data(block), (end - next) / uintptr(pool.block_size) ))
|
||||
|
||||
if zero_memory {
|
||||
slice.zero(block)
|
||||
@ -220,6 +232,7 @@ pool_release :: proc( self : Pool, block : []byte, loc := #caller_location )
|
||||
self.free_list_head = new_free_block
|
||||
|
||||
new_free_block = new_free_block
|
||||
log( str_fmt_tmp("Released block: %p %d", new_free_block, self.block_size))
|
||||
}
|
||||
|
||||
pool_reset :: proc( using pool : Pool )
|
||||
|
@ -42,8 +42,9 @@ Slab_Max_Size_Classes :: 64
|
||||
SlabPolicy :: StackFixed(SlabSizeClass, Slab_Max_Size_Classes)
|
||||
|
||||
SlabHeader :: struct {
|
||||
backing : Allocator,
|
||||
pools : StackFixed(Pool, Slab_Max_Size_Classes),
|
||||
dbg_name : string,
|
||||
backing : Allocator,
|
||||
pools : StackFixed(Pool, Slab_Max_Size_Classes),
|
||||
}
|
||||
|
||||
Slab :: struct {
|
||||
@ -56,7 +57,7 @@ slab_allocator :: proc( slab : Slab ) -> ( allocator : Allocator ) {
|
||||
return
|
||||
}
|
||||
|
||||
slab_init :: proc( policy : ^SlabPolicy, bucket_reserve_num : uint = 0, allocator : Allocator ) -> ( slab : Slab, alloc_error : AllocatorError )
|
||||
slab_init :: proc( policy : ^SlabPolicy, bucket_reserve_num : uint = 0, allocator : Allocator, dbg_name : string = "" ) -> ( slab : Slab, alloc_error : AllocatorError )
|
||||
{
|
||||
header_size :: size_of( SlabHeader )
|
||||
|
||||
@ -64,9 +65,10 @@ slab_init :: proc( policy : ^SlabPolicy, bucket_reserve_num : uint = 0, allocato
|
||||
raw_mem, alloc_error = alloc( header_size, mem.DEFAULT_ALIGNMENT, allocator )
|
||||
if alloc_error != .None do return
|
||||
|
||||
slab.header = cast( ^SlabHeader) raw_mem
|
||||
slab.backing = allocator
|
||||
alloc_error = slab_init_pools( slab, policy, bucket_reserve_num )
|
||||
slab.header = cast( ^SlabHeader) raw_mem
|
||||
slab.backing = allocator
|
||||
slab.dbg_name = dbg_name
|
||||
alloc_error = slab_init_pools( slab, policy, bucket_reserve_num )
|
||||
return
|
||||
}
|
||||
|
||||
@ -129,11 +131,7 @@ slab_alloc :: proc( using self : Slab,
|
||||
ensure(false, "Bad block from pool")
|
||||
return nil, alloc_error
|
||||
}
|
||||
// log( str_fmt_tmp("Retrieved block: %p %d", raw_data(block), len(block) ))
|
||||
|
||||
// if zero_memory {
|
||||
// slice.zero(block)
|
||||
// }
|
||||
log( str_fmt_tmp("%v: Retrieved block: %p %d", dbg_name, raw_data(block), len(block) ))
|
||||
|
||||
data = byte_slice(raw_data(block), size)
|
||||
if zero_memory {
|
||||
@ -187,16 +185,17 @@ slab_resize :: proc( using self : Slab,
|
||||
verify( pool_resize.header != nil, "Requested resize not supported by the slab allocator", location = loc )
|
||||
|
||||
// Resize will keep block in the same size_class, just give it more of its already allocated block
|
||||
if pool_old == pool_resize
|
||||
if pool_old.block_size == pool_resize.block_size
|
||||
{
|
||||
new_data_ptr := memory_after(data)
|
||||
new_data = byte_slice( raw_data(data), new_size )
|
||||
// log( str_fmt_tmp("Resize via expanding block space allocation %p %d", new_data_ptr, int(new_size - old_size)))
|
||||
// log( dump_stacktrace() )
|
||||
log( str_fmt_tmp("%v: Resize via expanding block space allocation %p %d", dbg_name, new_data_ptr, int(new_size - old_size)))
|
||||
|
||||
if zero_memory && new_size > old_size {
|
||||
to_zero := byte_slice( memory_after(data), int(new_size - old_size) )
|
||||
to_zero := byte_slice( new_data_ptr, int(new_size - old_size) )
|
||||
slice.zero( to_zero )
|
||||
// log( str_fmt_tmp("Zeroed memory - Range(%p to %p)", new_data_ptr, int(new_size - old_size)))
|
||||
log( str_fmt_tmp("Zeroed memory - Range(%p to %p)", new_data_ptr, cast(rawptr) (uintptr(new_data_ptr) + uintptr(new_size - old_size))))
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -217,6 +216,7 @@ slab_resize :: proc( using self : Slab,
|
||||
// log( str_fmt_tmp("Resize via new block: %p %d (old : %p $d )", raw_data(new_block), len(new_block), raw_data(data), old_size ))
|
||||
|
||||
if raw_data(data) != raw_data(new_block) {
|
||||
log( str_fmt_tmp("%v: Resize view new block, copying from old data block to new block: (%p %d), (%p %d)", dbg_name, raw_data(data), len(data), raw_data(new_block), len(new_block)))
|
||||
copy_non_overlapping( raw_data(new_block), raw_data(data), int(old_size) )
|
||||
pool_release( pool_old, data )
|
||||
}
|
||||
|
@ -24,7 +24,6 @@ StringCached :: struct {
|
||||
runes : []rune,
|
||||
}
|
||||
|
||||
|
||||
StringCache :: struct {
|
||||
slab : Slab,
|
||||
table : HMapZPL(StringCached),
|
||||
@ -54,7 +53,7 @@ str_cache_init :: proc( /*allocator : Allocator*/ ) -> ( cache : StringCache ) {
|
||||
header_size :: size_of( Slab )
|
||||
|
||||
alloc_error : AllocatorError
|
||||
cache.slab, alloc_error = slab_init( & policy, allocator = persistent_allocator() )
|
||||
cache.slab, alloc_error = slab_init( & policy, allocator = persistent_allocator(), dbg_name = "StringCache slab" )
|
||||
verify(alloc_error == .None, "Failed to initialize the string cache" )
|
||||
|
||||
cache.table, alloc_error = zpl_hmap_init_reserve( StringCached, persistent_slab_allocator(), 64 * Kilobyte )
|
||||
|
@ -88,9 +88,10 @@ PWS_ParseError :: struct {
|
||||
msg : string,
|
||||
}
|
||||
|
||||
PWS_ParseError_Max :: 32
|
||||
PWS_NodeArray_ReserveSize :: Kilobyte * 4
|
||||
PWS_LineArray_RserveSize :: Kilobyte
|
||||
PWS_ParseError_Max :: 32
|
||||
PWS_TokenArray_ReserveSize :: Kilobyte * 64
|
||||
PWS_NodeArray_ReserveSize :: Kilobyte * 64
|
||||
PWS_LineArray_ReserveSize :: Kilobyte * 64
|
||||
|
||||
// TODO(Ed) : The ast arrays should be handled by a slab allocator dedicated to PWS_ASTs
|
||||
// This can grow in undeterministic ways, persistent will get very polluted otherwise.
|
||||
@ -164,7 +165,7 @@ pws_parser_lex :: proc ( text : string, allocator : Allocator ) -> ( PWS_LexResu
|
||||
}
|
||||
|
||||
alloc_error : AllocatorError
|
||||
tokens, alloc_error = array_init_reserve( PWS_Token, allocator, 8 )
|
||||
tokens, alloc_error = array_init_reserve( PWS_Token, allocator, Kilobyte * 4 )
|
||||
if alloc_error != AllocatorError.None {
|
||||
ensure(false, "Failed to allocate token's array")
|
||||
return result, alloc_error
|
||||
@ -239,7 +240,6 @@ PWS_ParseData :: struct {
|
||||
pws_parser_parse :: proc( text : string, allocator : Allocator ) -> ( PWS_ParseResult, AllocatorError )
|
||||
{
|
||||
bytes := transmute([]byte) text
|
||||
log( str_fmt_tmp( "parsing: %v ...", (len(text) > 30 ? transmute(string) bytes[ :30] : text) ))
|
||||
|
||||
profile(#procedure)
|
||||
using parser : PWS_ParseData
|
||||
@ -255,10 +255,12 @@ pws_parser_parse :: proc( text : string, allocator : Allocator ) -> ( PWS_ParseR
|
||||
|
||||
tokens = lex.tokens
|
||||
|
||||
nodes, alloc_error = array_init_reserve( PWS_AST, allocator, 8 )
|
||||
log( str_fmt_tmp( "parsing: %v ...", (len(text) > 30 ? transmute(string) bytes[ :30] : text) ))
|
||||
|
||||
nodes, alloc_error = array_init_reserve( PWS_AST, allocator, PWS_NodeArray_ReserveSize )
|
||||
verify( alloc_error == nil, "Allocation failure creating nodes array")
|
||||
|
||||
parser.lines, alloc_error = array_init_reserve( ^PWS_AST, allocator, 8 )
|
||||
parser.lines, alloc_error = array_init_reserve( ^PWS_AST, allocator, PWS_LineArray_ReserveSize )
|
||||
verify( alloc_error == nil, "Allocation failure creating line array")
|
||||
|
||||
//region Helper procs
|
||||
|
@ -350,7 +350,8 @@ update :: proc( delta_time : f64 ) -> b32
|
||||
|
||||
// index := 0
|
||||
widgets : Array(UI_Widget)
|
||||
widgets, alloc_error = array_init_reserve( UI_Widget, frame_slab_allocator(), 8 )
|
||||
widgets, alloc_error = array_init_reserve( UI_Widget, frame_slab_allocator(), Kilobyte * 64 )
|
||||
// widgets, alloc_error = array_init_reserve( UI_Widget, frame_slab_allocator(), Kilobyte * 8 )
|
||||
widgets_ptr := & widgets
|
||||
|
||||
label_id := 0
|
||||
|
@ -274,7 +274,7 @@ UI_Box :: struct {
|
||||
UI_Layout_Stack_Size :: 512
|
||||
UI_Style_Stack_Size :: 512
|
||||
UI_Parent_Stack_Size :: 512
|
||||
UI_Built_Boxes_Array_Size :: 8
|
||||
UI_Built_Boxes_Array_Size :: 512
|
||||
|
||||
UI_State :: struct {
|
||||
// TODO(Ed) : Use these
|
||||
|
Loading…
Reference in New Issue
Block a user