General codebase refactor & cleanup
Renamed HashTable to HMapZPL, with procs having the zpl_ namespace prefix. (I want to eventually get away from using it) Started to use the grime pattern for library aliasing better.
This commit is contained in:
		| @@ -17,7 +17,7 @@ UI_State :: struct { | ||||
| 	// TODO(ED) : Put this in its own struct? | ||||
| 	first_free_box : UI_Box, | ||||
| 	box_table_size : u64, | ||||
| 	box_table      : ^ UI_BoxHashSlot,  // TODO(Ed) : Can the cache use HashTable? | ||||
| 	box_table      : ^ UI_BoxHashSlot,  // TODO(Ed) : Can the cache use HMapZPL? | ||||
|  | ||||
| 	// Note(rjf) : Build phase output | ||||
| 	// TODO(ED) : Put this in its own struct? | ||||
|   | ||||
| @@ -3,7 +3,6 @@ package sectr | ||||
| import    "base:runtime" | ||||
| import  c "core:c/libc" | ||||
| import    "core:dynlib" | ||||
| import    "core:fmt" | ||||
| import    "core:mem" | ||||
| import    "core:mem/virtual" | ||||
| import    "core:os" | ||||
| @@ -16,7 +15,7 @@ Path_Input_Replay :: "scratch.sectr_replay" | ||||
|  | ||||
| ModuleAPI :: struct { | ||||
| 	lib         : dynlib.Library, | ||||
| 	write_time  : os.File_Time, | ||||
| 	write_time  : FileTime, | ||||
| 	lib_version : i32, | ||||
|  | ||||
| 	startup    : type_of( startup ), | ||||
| @@ -29,23 +28,23 @@ ModuleAPI :: struct { | ||||
| @export | ||||
| startup :: proc( live_mem : virtual.Arena, snapshot_mem : []u8, host_logger : ^ Logger ) | ||||
| { | ||||
| 	init( & memory.logger, "Sectr", host_logger.file_path, host_logger.file ) | ||||
| 	context.logger = to_odin_logger( & memory.logger ) | ||||
| 	logger_init( & Memory_App.logger, "Sectr", host_logger.file_path, host_logger.file ) | ||||
| 	context.logger = to_odin_logger( & Memory_App.logger ) | ||||
|  | ||||
| 	// Setup memory for the first time | ||||
| 	{ | ||||
| 		arena_size     :: size_of( mem.Arena) | ||||
| 		arena_size     :: size_of( Arena) | ||||
| 		internals_size :: 4 * Megabyte | ||||
|  | ||||
| 		using memory; | ||||
| 		using Memory_App; | ||||
| 		block := live_mem.curr_block | ||||
|  | ||||
| 		live     = live_mem | ||||
| 		snapshot = snapshot_mem | ||||
|  | ||||
| 		persistent_slice := slice_ptr( block.base, memory_persistent_size ) | ||||
| 		transient_slice  := slice_ptr( memory_after( persistent_slice), memory_trans_temp_size ) | ||||
| 		temp_slice       := slice_ptr( memory_after( transient_slice),  memory_trans_temp_size ) | ||||
| 		persistent_slice := slice_ptr( block.base, Memory_Persistent_Size ) | ||||
| 		transient_slice  := slice_ptr( memory_after( persistent_slice), Memory_Trans_Temp_Szie ) | ||||
| 		temp_slice       := slice_ptr( memory_after( transient_slice),  Memory_Trans_Temp_Szie ) | ||||
|  | ||||
| 		when Use_TrackingAllocator { | ||||
| 			// We assign the beginning of the block to be the host's persistent memory's arena. | ||||
| @@ -95,7 +94,7 @@ startup :: proc( live_mem : virtual.Arena, snapshot_mem : []u8, host_logger : ^ | ||||
| 	monitor_id         = rl.GetCurrentMonitor() | ||||
| 	monitor_refresh_hz = rl.GetMonitorRefreshRate( monitor_id ) | ||||
| 	rl.SetTargetFPS( monitor_refresh_hz ) | ||||
| 	log( fmt.tprintf( "Set target FPS to: %v", monitor_refresh_hz ) ) | ||||
| 	log( str_fmt_tmp( "Set target FPS to: %v", monitor_refresh_hz ) ) | ||||
|  | ||||
| 	// Basic Font Setup | ||||
| 	{ | ||||
| @@ -153,14 +152,14 @@ startup :: proc( live_mem : virtual.Arena, snapshot_mem : []u8, host_logger : ^ | ||||
| @export | ||||
| sectr_shutdown :: proc() | ||||
| { | ||||
| 	if memory.persistent == nil { | ||||
| 	if Memory_App.persistent == nil { | ||||
| 		return | ||||
| 	} | ||||
| 	state := get_state() | ||||
|  | ||||
| 	// Replay | ||||
| 	{ | ||||
| 		os.close( memory.replay.active_file ) | ||||
| 		os.close( Memory_App.replay.active_file ) | ||||
| 	} | ||||
|  | ||||
| 	font_provider_shutdown() | ||||
| @@ -171,27 +170,30 @@ sectr_shutdown :: proc() | ||||
| @export | ||||
| reload :: proc( live_mem : virtual.Arena, snapshot_mem : []u8, host_logger : ^ Logger ) | ||||
| { | ||||
| 	using memory; | ||||
| 	using Memory_App; | ||||
| 	block := live_mem.curr_block | ||||
|  | ||||
| 	live     = live_mem | ||||
| 	snapshot = snapshot_mem | ||||
|  | ||||
| 	persistent_slice := slice_ptr( block.base, memory_persistent_size ) | ||||
| 	transient_slice  := slice_ptr( memory_after( persistent_slice), memory_trans_temp_size ) | ||||
| 	temp_slice       := slice_ptr( memory_after( transient_slice),  memory_trans_temp_size ) | ||||
| 	// This is no longer necessary as we have proper base address setting | ||||
| 	when false | ||||
| 	{ | ||||
| 		persistent_slice := slice_ptr( block.base, Memory_Persistent_Size ) | ||||
| 		transient_slice  := slice_ptr( memory_after( persistent_slice), Memory_Trans_Temp_Szie ) | ||||
| 		temp_slice       := slice_ptr( memory_after( transient_slice),  Memory_Trans_Temp_Szie ) | ||||
|  | ||||
| 	when Use_TrackingAllocator { | ||||
| 		persistent = cast( ^ TrackedAllocator ) & persistent_slice[0] | ||||
| 		transient  = cast( ^ TrackedAllocator ) & transient_slice[0] | ||||
| 		temp       = cast( ^ TrackedAllocator ) & temp_slice[0] | ||||
| 		when Use_TrackingAllocator { | ||||
| 			persistent = cast( ^ TrackedAllocator ) & persistent_slice[0] | ||||
| 			transient  = cast( ^ TrackedAllocator ) & transient_slice[0] | ||||
| 			temp       = cast( ^ TrackedAllocator ) & temp_slice[0] | ||||
| 		} | ||||
| 		else { | ||||
| 			persistent = cast( ^ Arena ) & persistent_slice[0] | ||||
| 			transient  = cast( ^ Arena ) & transient_slice[0] | ||||
| 			temp       = cast( ^ Arena ) & temp_slice[0] | ||||
| 		} | ||||
| 	} | ||||
| 	else { | ||||
| 		persistent = cast( ^ Arena ) & persistent_slice[0] | ||||
| 		transient  = cast( ^ Arena ) & transient_slice[0] | ||||
| 		temp       = cast( ^ Arena ) & temp_slice[0] | ||||
| 	} | ||||
|  | ||||
| 	context.allocator      = transient_allocator() | ||||
| 	context.temp_allocator = temp_allocator() | ||||
|  | ||||
| @@ -203,7 +205,6 @@ reload :: proc( live_mem : virtual.Arena, snapshot_mem : []u8, host_logger : ^ L | ||||
| 	// font_provider_data := & get_state().font_provider_data | ||||
| 	// font_provider_data.font_cache.allocator = arena_allocator( & font_provider_data.font_arena ) | ||||
|  | ||||
| 	// Have to reload allocators for all dynamic allocating data-structures. | ||||
| 	ui_reload( & get_state().project.workspace.ui, persistent_allocator() ) | ||||
|  | ||||
| 	log("Module reloaded") | ||||
| @@ -228,7 +229,7 @@ tick :: proc( delta_time : f64 ) -> b32 | ||||
| @export | ||||
| clean_temp :: proc() { | ||||
| 	when Use_TrackingAllocator { | ||||
| 		mem.tracking_allocator_clear( & memory.temp.tracker ) | ||||
| 		mem.tracking_allocator_clear( & Memory_App.temp.tracker ) | ||||
| 	} | ||||
| 	else { | ||||
| 		free_all( temp_allocator() ) | ||||
|   | ||||
| @@ -1,4 +1,4 @@ | ||||
| package sectr  | ||||
| package sectr | ||||
|  | ||||
| import "base:runtime" | ||||
| import "core:os" | ||||
|   | ||||
| @@ -2,15 +2,14 @@ package sectr | ||||
|  | ||||
| import rl "vendor:raylib" | ||||
|  | ||||
| Color :: rl.Color | ||||
|  | ||||
| Color       :: rl.Color | ||||
| Color_White :: rl.WHITE | ||||
|  | ||||
| Color_Transparent  :: Color {   0,   0,   0,   0 } | ||||
| Color_BG           :: Color {  41,  41,  45, 255 } | ||||
| Color_BG_TextBox   :: Color {  32,  32,  32, 255 } | ||||
| Color_BG_TextBox_Green   :: Color {  102,  102,  110, 255 } | ||||
| Color_Frame_Hover  :: Color { 122, 122, 125, 255 } | ||||
| Color_Frame_Select :: Color { 188, 188, 188, 255 } | ||||
| Color_GreyRed      :: Color { 220, 100, 100, 125 } | ||||
| Color_White_A125   :: Color { 255, 255, 255, 125 } | ||||
| Color_Transparent      :: Color {   0,   0,   0,   0 } | ||||
| Color_BG               :: Color {  41,  41,  45, 255 } | ||||
| Color_BG_TextBox       :: Color {  32,  32,  32, 255 } | ||||
| Color_BG_TextBox_Green :: Color { 102, 102, 110, 255 } | ||||
| Color_Frame_Hover      :: Color { 122, 122, 125, 255 } | ||||
| Color_Frame_Select     :: Color { 188, 188, 188, 255 } | ||||
| Color_GreyRed          :: Color { 220, 100, 100, 125 } | ||||
| Color_White_A125       :: Color { 255, 255, 255, 125 } | ||||
|   | ||||
| @@ -8,11 +8,12 @@ import "core:os" | ||||
|  | ||||
| import rl "vendor:raylib" | ||||
|  | ||||
| memory : Memory | ||||
| Memory_App : Memory | ||||
|  | ||||
| memory_chunk_size      :: 2 * Gigabyte | ||||
| memory_persistent_size :: 256 * Megabyte | ||||
| memory_trans_temp_size :: (memory_chunk_size - memory_persistent_size ) / 2 | ||||
| Memory_Base_Address    :: Terabyte * 1 | ||||
| Memory_Chunk_Size      :: 2 * Gigabyte | ||||
| Memory_Persistent_Size :: 256 * Megabyte | ||||
| Memory_Trans_Temp_Szie :: (Memory_Chunk_Size - Memory_Persistent_Size ) / 2 | ||||
|  | ||||
| // TODO(Ed): There is an issue with mutex locks on the tracking allocator.. | ||||
| Use_TrackingAllocator :: false | ||||
| @@ -48,39 +49,39 @@ else | ||||
|  | ||||
| persistent_allocator :: proc() -> Allocator { | ||||
| 	when Use_TrackingAllocator { | ||||
| 		return tracked_allocator( memory.persistent ) | ||||
| 		return tracked_allocator( Memory_App.persistent ) | ||||
| 	} | ||||
| 	else { | ||||
| 		return arena_allocator( memory.persistent ) | ||||
| 		return arena_allocator( Memory_App.persistent ) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| transient_allocator :: proc() -> Allocator { | ||||
| 	when Use_TrackingAllocator { | ||||
| 		return tracked_allocator( memory.transient ) | ||||
| 		return tracked_allocator( Memory_App.transient ) | ||||
| 	} | ||||
| 	else { | ||||
| 		return arena_allocator( memory.transient ) | ||||
| 		return arena_allocator( Memory_App.transient ) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| temp_allocator :: proc() -> Allocator { | ||||
| 	when Use_TrackingAllocator { | ||||
| 		return tracked_allocator( memory.temp ) | ||||
| 		return tracked_allocator( Memory_App.temp ) | ||||
| 	} | ||||
| 	else { | ||||
| 		return arena_allocator( memory.temp ) | ||||
| 		return arena_allocator( Memory_App.temp ) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| save_snapshot :: proc( snapshot : [^]u8 ) { | ||||
| 	live_ptr := cast( ^ rawptr ) memory.live.curr_block.base | ||||
| 	mem.copy_non_overlapping( & snapshot[0], live_ptr, memory_chunk_size ) | ||||
| 	live_ptr := cast( ^ rawptr ) Memory_App.live.curr_block.base | ||||
| 	mem.copy_non_overlapping( & snapshot[0], live_ptr, Memory_Chunk_Size ) | ||||
| } | ||||
|  | ||||
| load_snapshot :: proc( snapshot : [^]u8 ) { | ||||
| 	live_ptr := cast( ^ rawptr ) memory.live.curr_block.base | ||||
| 	mem.copy_non_overlapping( live_ptr, snapshot, memory_chunk_size ) | ||||
| 	live_ptr := cast( ^ rawptr ) Memory_App.live.curr_block.base | ||||
| 	mem.copy_non_overlapping( live_ptr, snapshot, Memory_Chunk_Size ) | ||||
| } | ||||
|  | ||||
| AppConfig :: struct { | ||||
| @@ -124,10 +125,10 @@ State :: struct { | ||||
|  | ||||
| get_state :: proc "contextless" () -> ^ State { | ||||
| 	when Use_TrackingAllocator { | ||||
| 		return cast( ^ State ) raw_data( memory.persistent.backing.data ) | ||||
| 		return cast( ^ State ) raw_data( Memory_App.persistent.backing.data ) | ||||
| 	} | ||||
| 	else { | ||||
| 		return cast( ^ State ) raw_data( memory.persistent. data ) | ||||
| 		return cast( ^ State ) raw_data( Memory_App.persistent. data ) | ||||
| 	} | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -56,7 +56,7 @@ FontProviderData :: struct { | ||||
|  | ||||
| 	//TODO(Ed) : There is an issue with hot-reload and map allocations that I can't figure out right now.. | ||||
| 	// font_cache : ^ map [FontID](FontDef), | ||||
| 	// font_cache : HashTable(FontDef), | ||||
| 	// font_cache : HMapZPL(FontDef), | ||||
| 	font_cache : [10] FontDef, | ||||
| 	open_id    : i32 | ||||
| } | ||||
| @@ -101,7 +101,7 @@ font_load :: proc( path_file : string, | ||||
| 	font_provider_data := & get_state().font_provider_data; using font_provider_data | ||||
|  | ||||
| 	font_data, read_succeded : = os.read_entire_file( path_file  ) | ||||
| 	verify( b32(read_succeded), fmt.tprintf("Failed to read font file for: %v", path_file) ) | ||||
| 	verify( b32(read_succeded), str_fmt_tmp("Failed to read font file for: %v", path_file) ) | ||||
| 	font_data_size := cast(i32) len(font_data) | ||||
|  | ||||
| 	desired_id := desired_id | ||||
| @@ -141,7 +141,7 @@ font_load :: proc( path_file : string, | ||||
| 			codepoints     = nil, | ||||
| 			codepointCount = count, | ||||
| 			type = rl.FontType.DEFAULT ) | ||||
| 		verify( glyphs != nil, fmt.tprintf("Failed to load glyphs for font: %v at desired size: %v", desired_id, size ) ) | ||||
| 		verify( glyphs != nil, str_fmt_tmp("Failed to load glyphs for font: %v at desired size: %v", desired_id, size ) ) | ||||
|  | ||||
| 		atlas  := rl.GenImageFontAtlas( glyphs, & recs, count, size, padding, i32(Font_Atlas_Packing_Method.Raylib_Basic) ) | ||||
| 		texture = rl.LoadTextureFromImage( atlas ) | ||||
|   | ||||
							
								
								
									
										26
									
								
								code/girme_stack.odin
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								code/girme_stack.odin
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,26 @@ | ||||
| package sectr | ||||
|  | ||||
| Stack :: struct ( $ Type : typeid, $ Size : i32 ) { | ||||
| 	idx   : i32, | ||||
| 	items : [ Size ] Type, | ||||
| } | ||||
|  | ||||
| stack_push :: proc( using stack : ^ Stack( $ Type, $ Size ), value : Type ) { | ||||
| 	verify( idx < len( items ), "Attempted to push on a full stack" ) | ||||
|  | ||||
| 	items[ idx ] = value | ||||
| 	idx += 1 | ||||
| } | ||||
|  | ||||
| stack_pop :: proc( using stack : ^ Stack( $ Type, $ Size ) ) { | ||||
| 	verify( idx > 0, "Attempted to pop an empty stack" ) | ||||
|  | ||||
| 	idx -= 1 | ||||
| 	if idx == 0 { | ||||
| 		items[idx] = {} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| stack_peek :: proc( using stack : ^ Stack( $ Type, $ Size ) ) -> ^ Type { | ||||
| 	return & items[idx] | ||||
| } | ||||
							
								
								
									
										221
									
								
								code/grime.odin
									
									
									
									
									
								
							
							
						
						
									
										221
									
								
								code/grime.odin
									
									
									
									
									
								
							| @@ -1,92 +1,88 @@ | ||||
|  | ||||
| package sectr | ||||
| // At least its less than C/C++ ... | ||||
|  | ||||
| import "base:builtin" | ||||
| 	copy :: builtin.copy | ||||
| import "base:runtime" | ||||
| 	Byte     :: runtime.Byte | ||||
| 	Kilobyte :: runtime.Kilobyte | ||||
| 	Megabyte :: runtime.Megabyte | ||||
| 	Gigabyte :: runtime.Gigabyte | ||||
| 	Terabyte :: runtime.Terabyte | ||||
| 	Petabyte :: runtime.Petabyte | ||||
| 	Exabyte  :: runtime.Exabyte | ||||
| import c "core:c/libc" | ||||
| import "core:dynlib" | ||||
| import "core:hash" | ||||
| 	crc32 :: hash.crc32 | ||||
| import fmt_io "core:fmt" | ||||
| 	str_fmt          :: fmt_io.printf | ||||
| 	str_fmt_tmp      :: fmt_io.tprintf | ||||
| 	str_fmt_builder  :: fmt_io.sbprintf | ||||
| 	str_fmt_buffer   :: fmt_io.bprintf | ||||
| 	str_to_file_ln   :: fmt_io.fprintln | ||||
| 	str_tmp_from_any :: fmt_io.tprint | ||||
| import "core:mem" | ||||
| 	Allocator               :: mem.Allocator | ||||
| 	AllocatorError          :: mem.Allocator_Error | ||||
| 	alloc                   :: mem.alloc | ||||
| 	alloc_bytes             :: mem.alloc_bytes | ||||
| 	Arena                   :: mem.Arena | ||||
| 	arena_allocator         :: mem.arena_allocator | ||||
| 	arena_init              :: mem.arena_init | ||||
| 	free                    :: mem.free | ||||
| 	ptr_offset              :: mem.ptr_offset | ||||
| 	slice_ptr               :: mem.slice_ptr | ||||
| 	TrackingAllocator       :: mem.Tracking_Allocator | ||||
| 	tracking_allocator      :: mem.tracking_allocator | ||||
| 	tracking_allocator_init :: mem.tracking_allocator_init | ||||
| import "core:mem/virtual" | ||||
| import "core:os" | ||||
| 	FileFlag_Create    :: os.O_CREATE | ||||
| 	FileFlag_ReadWrite :: os.O_RDWR | ||||
| 	FileTime           :: os.File_Time | ||||
| 	file_close         :: os.close | ||||
| 	file_open          :: os.open | ||||
| 	file_read          :: os.read | ||||
| 	file_remove        :: os.remove | ||||
| 	file_seek          :: os.seek | ||||
| 	file_status        :: os.stat | ||||
| 	file_write         :: os.write | ||||
| import "core:path/filepath" | ||||
| 	file_name_from_path :: filepath.short_stem | ||||
| import str "core:strings" | ||||
| 	str_builder_to_string  :: str.to_string | ||||
| import "core:unicode/utf8" | ||||
|  | ||||
| import c "core:c/libc" | ||||
| to_runes :: proc { | ||||
| 	utf8.string_to_runes, | ||||
| } | ||||
|  | ||||
| Byte     :: 1 | ||||
| Kilobyte :: 1024 * Byte | ||||
| Megabyte :: 1024 * Kilobyte | ||||
| Gigabyte :: 1024 * Megabyte | ||||
| Terabyte :: 1024 * Gigabyte | ||||
| Petabyte :: 1024 * Terabyte | ||||
| Exabyte  :: 1024 * Petabyte | ||||
| to_string :: proc { | ||||
| 	str_builder_to_string, | ||||
| } | ||||
|  | ||||
| kilobytes :: proc( kb : $ integer_type ) -> integer_type { | ||||
| OS_Type :: type_of(ODIN_OS) | ||||
|  | ||||
| kilobytes :: #force_inline proc "contextless" ( kb : $ integer_type ) -> integer_type { | ||||
| 	return kb * Kilobyte | ||||
| } | ||||
| megabytes :: proc( mb : $ integer_type ) -> integer_type { | ||||
| megabytes :: #force_inline proc "contextless" ( mb : $ integer_type ) -> integer_type { | ||||
| 	return mb * Megabyte | ||||
| } | ||||
| gigabyte  :: proc( gb : $ integer_type ) -> integer_type { | ||||
| gigabytes  :: #force_inline proc "contextless" ( gb : $ integer_type ) -> integer_type { | ||||
| 	return gb * Gigabyte | ||||
| } | ||||
| terabyte  :: proc( tb : $ integer_type ) -> integer_type { | ||||
| terabytes  :: #force_inline proc "contextless" ( tb : $ integer_type ) -> integer_type { | ||||
| 	return tb * Terabyte | ||||
| } | ||||
|  | ||||
| copy                    :: builtin.copy | ||||
| crc32                   :: hash.crc32 | ||||
| Allocator               :: mem.Allocator | ||||
| AllocatorError          :: mem.Allocator_Error | ||||
| alloc                   :: mem.alloc | ||||
| alloc_bytes             :: mem.alloc_bytes | ||||
| Arena                   :: mem.Arena | ||||
| arena_allocator         :: mem.arena_allocator | ||||
| arena_init              :: mem.arena_init | ||||
| free                    :: mem.free | ||||
| ptr_offset              :: mem.ptr_offset | ||||
| slice_ptr               :: mem.slice_ptr | ||||
| Tracking_Allocator      :: mem.Tracking_Allocator | ||||
| tracking_allocator      :: mem.tracking_allocator | ||||
| tracking_allocator_init :: mem.tracking_allocator_init | ||||
| file_name_from_path     :: filepath.short_stem | ||||
| OS_Type                 :: type_of(ODIN_OS) | ||||
|  | ||||
| get_bounds :: proc { | ||||
| 	box_get_bounds, | ||||
| 	view_get_bounds, | ||||
| } | ||||
|  | ||||
| //region Stack - Basic fixed-size stack container | ||||
| Stack :: struct ( $ Type : typeid, $ Size : i32 ) { | ||||
| 	idx   : i32, | ||||
| 	items : [ Size ] Type, | ||||
| } | ||||
|  | ||||
| stack_push :: proc( stack : ^ $ StackType / Stack( $ Type, $ Size ), value : Type ) { | ||||
| 	using stack | ||||
| 	verify( idx < len( items ), "Attempted to push on a full stack" ) | ||||
|  | ||||
| 	items[ idx ] = value | ||||
| 	idx += 1 | ||||
| } | ||||
|  | ||||
| stack_pop :: proc( stack : ^ $ StackType / Stack( $ Type, $ Size ) ) { | ||||
| 	using stack | ||||
| 	verify( idx > 0, "Attempted to pop an empty stack" ) | ||||
|  | ||||
| 	idx -= 1 | ||||
| 	if idx == 0 { | ||||
| 		items[idx] = {} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| stack_peek :: proc( stack : ^ Stack( $ Type, $ Size ) ) -> ^ Type { | ||||
| 	using stack | ||||
| 	return & items[idx] | ||||
| } | ||||
| //endregion Stack | ||||
|  | ||||
|  | ||||
| // TODO(Ed): Review | ||||
| //region Doubly Linked List generic procs (verbose) | ||||
|  | ||||
| dbl_linked_list_push_back :: proc(first: ^(^ $ Type), last: ^(^ Type), new_node: ^ Type) | ||||
| @@ -109,104 +105,3 @@ dbl_linked_list_push_back :: proc(first: ^(^ $ Type), last: ^(^ Type), new_node: | ||||
| } | ||||
|  | ||||
| //endregion | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
| // TODO(Ed) : This is extremely jank, Raylib requires a 'heap' allocator with the way it works. | ||||
| // We do not have persistent segmented in such a way for this. Eventually we might just want to segment vmem and just shove a heap allocator on a segment of it. | ||||
|  | ||||
| when false { | ||||
| RL_MALLOC :: proc "c" ( size : c.size_t ) -> rawptr | ||||
| { | ||||
| 	allocator : Allocator | ||||
| 	when Use_TrackingAllocator { | ||||
| 		allocator = Allocator { | ||||
| 			data      = & memory.persistent.tracker, | ||||
| 			procedure = mem.tracking_allocator_proc, | ||||
| 		} | ||||
| 	} | ||||
| 	else { | ||||
| 		allocator = Allocator { | ||||
| 			data      = & memory.persistent, | ||||
| 			procedure = mem.arena_allocator_proc, | ||||
| 		} | ||||
| 	} | ||||
| 	result, error_code := allocator.procedure( allocator.data, mem.Allocator_Mode.Alloc_Non_Zeroed, cast(int) size, mem.DEFAULT_ALIGNMENT, nil, 0, auto_cast {} ) | ||||
| 	if error_code != AllocatorError.None { | ||||
| 		runtime.debug_trap() | ||||
| 		os.exit( -1 ) | ||||
| 	} | ||||
| 	return raw_data(result) | ||||
| } | ||||
|  | ||||
| RL_CALLOC :: proc "c" ( count : c.size_t, size : c.size_t ) -> rawptr | ||||
| { | ||||
| 	allocator : Allocator | ||||
| 	when Use_TrackingAllocator { | ||||
| 		allocator = Allocator { | ||||
| 			data      = & memory.persistent.tracker, | ||||
| 			procedure = mem.tracking_allocator_proc, | ||||
| 		} | ||||
| 	} | ||||
| 	else { | ||||
| 		allocator = Allocator { | ||||
| 			data      = & memory.persistent, | ||||
| 			procedure = mem.arena_allocator_proc, | ||||
| 		} | ||||
| 	} | ||||
| 	result, error_code := allocator.procedure( allocator.data, mem.Allocator_Mode.Alloc, cast(int) size, mem.DEFAULT_ALIGNMENT, nil, 0, auto_cast {} ) | ||||
| 	if error_code != AllocatorError.None { | ||||
| 		runtime.debug_trap() | ||||
| 		os.exit( -1 ) | ||||
| 	} | ||||
| 	return raw_data(result) | ||||
| } | ||||
|  | ||||
| RL_REALLOC :: proc "c" ( block : rawptr, size : c.size_t ) -> rawptr | ||||
| { | ||||
| 	allocator : Allocator | ||||
| 	when Use_TrackingAllocator { | ||||
| 		allocator = Allocator { | ||||
| 			data      = & memory.persistent.tracker, | ||||
| 			procedure = mem.tracking_allocator_proc, | ||||
| 		} | ||||
| 	} | ||||
| 	else { | ||||
| 		allocator = Allocator { | ||||
| 			data      = & memory.persistent, | ||||
| 			procedure = mem.arena_allocator_proc, | ||||
| 		} | ||||
| 	} | ||||
| 	result, error_code := allocator.procedure( allocator.data, mem.Allocator_Mode.Resize_Non_Zeroed, cast(int) size, mem.DEFAULT_ALIGNMENT, block, 0, auto_cast {} ) | ||||
| 	if error_code != AllocatorError.None { | ||||
| 		runtime.debug_trap() | ||||
| 		os.exit( -1 ) | ||||
| 	} | ||||
| 	return raw_data(result) | ||||
| } | ||||
|  | ||||
| RL_FREE :: proc "c" ( block : rawptr ) | ||||
| { | ||||
| 	allocator : Allocator | ||||
| 	when Use_TrackingAllocator { | ||||
| 		allocator = Allocator { | ||||
| 			data      = & memory.persistent.tracker, | ||||
| 			procedure = mem.tracking_allocator_proc, | ||||
| 		} | ||||
| 	} | ||||
| 	else { | ||||
| 		allocator = Allocator { | ||||
| 			data      = & memory.persistent, | ||||
| 			procedure = mem.arena_allocator_proc, | ||||
| 		} | ||||
| 	} | ||||
| 	result, error_code := allocator.procedure( allocator.data, mem.Allocator_Mode.Free, 0, 0, block, 0, auto_cast {} ) | ||||
| 	if error_code != AllocatorError.None { | ||||
| 		runtime.debug_trap() | ||||
| 		os.exit( -1 ) | ||||
| 	} | ||||
| } | ||||
| } | ||||
| @@ -1,6 +1,6 @@ | ||||
| // Based on gencpp's and thus zpl's Array implementation | ||||
| // Made becasue of the map issue with fonts during hot-reload. | ||||
| // I didn't want to make the HashTable impl with the [dynamic] array for now to isolate | ||||
| // I didn't want to make the HMapZPL impl with the [dynamic] array for now to isolate | ||||
| // what in the world is going on with the memory... | ||||
| package sectr | ||||
|  | ||||
| @@ -15,8 +15,11 @@ Array :: struct ( $ Type : typeid ) { | ||||
| 	data      : [^]Type, | ||||
| } | ||||
|  | ||||
| array_to_slice :: proc( arr : Array( $ Type) ) -> []Type { | ||||
| 	using arr; return slice_ptr( data, num ) | ||||
| // @(private=file) | ||||
| // Array :: Array_ZPL | ||||
|  | ||||
| array_to_slice :: proc( using self : Array( $ Type) ) -> []Type { | ||||
| 	return slice_ptr( data, num ) | ||||
| } | ||||
|  | ||||
| array_grow_formula :: proc( value : u64 ) -> u64 { | ||||
| @@ -30,19 +33,18 @@ array_init :: proc( $ Type : typeid, allocator : Allocator ) -> ( Array(Type), A | ||||
| array_init_reserve :: proc( $ Type : typeid, allocator : Allocator, capacity : u64 ) -> ( Array(Type), AllocatorError ) | ||||
| { | ||||
| 	raw_data, result_code := alloc( int(capacity) * size_of(Type), allocator = allocator ) | ||||
| 	result : Array( Type) | ||||
| 	result : Array( Type); | ||||
| 	result.data      = cast( [^] Type ) raw_data | ||||
| 	result.allocator = allocator | ||||
| 	result.capacity  = capacity | ||||
| 	return result, result_code | ||||
| } | ||||
|  | ||||
| array_append :: proc( array : ^ Array( $ Type), value : Type ) -> AllocatorError | ||||
| array_append :: proc( using self : ^ Array( $ Type), value : Type ) -> AllocatorError | ||||
| { | ||||
| 	using array | ||||
| 	if num == capacity | ||||
| 	{ | ||||
| 		grow_result := array_grow( array, capacity ) | ||||
| 		grow_result := array_grow( self, capacity ) | ||||
| 		if grow_result != AllocatorError.None { | ||||
| 			return grow_result | ||||
| 		} | ||||
| @@ -53,12 +55,11 @@ array_append :: proc( array : ^ Array( $ Type), value : Type ) -> AllocatorError | ||||
| 	return AllocatorError.None | ||||
| } | ||||
|  | ||||
| array_append_slice :: proc( array : ^ Array( $ Type ), items : []Type ) -> AllocatorError | ||||
| array_append_slice :: proc( using self : ^ Array( $ Type ), items : []Type ) -> AllocatorError | ||||
| { | ||||
| 	using array | ||||
| 	if num + len(items) > capacity | ||||
| 	{ | ||||
| 		grow_result := array_grow( array, capacity ) | ||||
| 		grow_result := array_grow( self, capacity ) | ||||
| 		if grow_result != AllocatorError.None { | ||||
| 			return grow_result | ||||
| 		} | ||||
| @@ -75,11 +76,9 @@ array_append_slice :: proc( array : ^ Array( $ Type ), items : []Type ) -> Alloc | ||||
| 	return AllocatorError.None | ||||
| } | ||||
|  | ||||
| array_append_at :: proc( array : ^ Array( $ Type ), item : Type, id : u64 ) -> AllocatorError | ||||
| array_append_at :: proc( using self : ^ Array( $ Type ), item : Type, id : u64 ) -> AllocatorError | ||||
| { | ||||
| 	id := id | ||||
| 	using array | ||||
|  | ||||
| 	if id >= num { | ||||
| 		id = num - 1 | ||||
| 	} | ||||
| @@ -89,7 +88,7 @@ array_append_at :: proc( array : ^ Array( $ Type ), item : Type, id : u64 ) -> A | ||||
|  | ||||
| 	if capacity < num + 1 | ||||
| 	{ | ||||
| 		grow_result := array_grow( array, capacity ) | ||||
| 		grow_result := array_grow( self, capacity ) | ||||
| 		if grow_result != AllocatorError.None { | ||||
| 			return grow_result | ||||
| 		} | ||||
| @@ -109,17 +108,15 @@ array_append_at :: proc( array : ^ Array( $ Type ), item : Type, id : u64 ) -> A | ||||
| 	return AllocatorError.None | ||||
| } | ||||
|  | ||||
| array_append_at_slice :: proc( array : ^ Array( $ Type ), items : []Type, id : u64 ) -> AllocatorError | ||||
| array_append_at_slice :: proc( using self : ^ Array( $ Type ), items : []Type, id : u64 ) -> AllocatorError | ||||
| { | ||||
| 	id := id | ||||
| 	using array | ||||
|  | ||||
| 	if id >= num { | ||||
| 		return array_append_slice( items ) | ||||
| 	} | ||||
| 	if len(items) > capacity | ||||
| 	{ | ||||
| 		grow_result := array_grow( array, capacity ) | ||||
| 		grow_result := array_grow( self, capacity ) | ||||
| 		if grow_result != AllocatorError.None { | ||||
| 			return grow_result | ||||
| 		} | ||||
| @@ -142,18 +139,19 @@ array_append_at_slice :: proc( array : ^ Array( $ Type ), items : []Type, id : u | ||||
| 	return AllocatorError.None | ||||
| } | ||||
|  | ||||
| array_back :: proc( array : ^ Array( $ Type ) ) -> ^ Type { | ||||
| 	using array; return & data[ num - 1 ] | ||||
| array_back :: proc( using self : ^ Array( $ Type ) ) -> ^ Type { | ||||
| 	return & data[ num - 1 ] | ||||
| } | ||||
|  | ||||
| array_clear :: proc( array : ^ Array( $ Type ) ) { | ||||
| 	array.num = 0 | ||||
| array_clear :: proc( using self : ^ Array( $ Type ), zero_data : b32 ) { | ||||
| 	if zero_data { | ||||
| 		mem.set( raw_data( data ), 0, num ) | ||||
| 	} | ||||
| 	num = 0 | ||||
| } | ||||
|  | ||||
| array_fill :: proc( array : ^ Array( $ Type ), begin, end : u64, value : Type ) -> b32 | ||||
| array_fill :: proc( using self : ^ Array( $ Type ), begin, end : u64, value : Type ) -> b32 | ||||
| { | ||||
| 	using array | ||||
|  | ||||
| 	if begin < 0 || end >= num { | ||||
| 		return false | ||||
| 	} | ||||
| @@ -168,32 +166,28 @@ array_fill :: proc( array : ^ Array( $ Type ), begin, end : u64, value : Type ) | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| array_free :: proc( array : ^ Array( $ Type ) ) { | ||||
| 	using array | ||||
| array_free :: proc( using self : ^ Array( $ Type ) ) { | ||||
| 	free( data, allocator ) | ||||
| 	data = nil | ||||
| } | ||||
|  | ||||
| array_grow :: proc( array : ^ Array( $ Type ), min_capacity : u64 ) -> AllocatorError | ||||
| array_grow :: proc( using self : ^ Array( $ Type ), min_capacity : u64 ) -> AllocatorError | ||||
| { | ||||
| 	using array | ||||
| 	new_capacity := array_grow_formula( capacity ) | ||||
|  | ||||
| 	if new_capacity < min_capacity { | ||||
| 		new_capacity = min_capacity | ||||
| 	} | ||||
|  | ||||
| 	return array_set_capacity( array, new_capacity ) | ||||
| 	return array_set_capacity( self, new_capacity ) | ||||
| } | ||||
|  | ||||
| array_pop :: proc( array : ^ Array( $ Type ) ) { | ||||
| 	verify( array.num != 0, "Attempted to pop an array with no elements" ) | ||||
| 	array.num -= 1 | ||||
| array_pop :: proc( using self : ^ Array( $ Type ) ) { | ||||
| 	verify( num != 0, "Attempted to pop an array with no elements" ) | ||||
| 	num -= 1 | ||||
| } | ||||
|  | ||||
| array_remove_at :: proc( array : ^ Array( $ Type ), id : u64 ) | ||||
| array_remove_at :: proc( using self : ^ Array( $ Type ), id : u64 ) | ||||
| { | ||||
| 	using array | ||||
| 	verify( id >= num, "Attempted to remove from an index larger than the array" ) | ||||
|  | ||||
| 	left  = slice_ptr( data, id ) | ||||
| @@ -203,11 +197,10 @@ array_remove_at :: proc( array : ^ Array( $ Type ), id : u64 ) | ||||
| 	num -= 1 | ||||
| } | ||||
|  | ||||
| array_reserve :: proc( array : ^ Array( $ Type ), new_capacity : u64 ) -> AllocatorError | ||||
| array_reserve :: proc( using self : ^ Array( $ Type ), new_capacity : u64 ) -> AllocatorError | ||||
| { | ||||
| 	using array | ||||
| 	if capacity < new_capacity { | ||||
| 		return array_set_capacity( array, new_capacity ) | ||||
| 		return array_set_capacity( self, new_capacity ) | ||||
| 	} | ||||
| 	return AllocatorError.None | ||||
| } | ||||
| @@ -226,9 +219,8 @@ array_resize :: proc( array : ^ Array( $ Type ), num : u64 ) -> AllocatorError | ||||
| 	return AllocatorError.None | ||||
| } | ||||
|  | ||||
| array_set_capacity :: proc( array : ^ Array( $ Type ), new_capacity : u64 ) -> AllocatorError | ||||
| array_set_capacity :: proc( using self : ^ Array( $ Type ), new_capacity : u64 ) -> AllocatorError | ||||
| { | ||||
| 	using array | ||||
| 	if new_capacity == capacity { | ||||
| 		return AllocatorError.None | ||||
| 	} | ||||
|   | ||||
| @@ -5,11 +5,11 @@ import "core:fmt" | ||||
| import "core:os" | ||||
| import "core:runtime" | ||||
|  | ||||
| copy_file_sync :: proc( path_src, path_dst: string ) -> b32 | ||||
| file_copy_sync :: proc( path_src, path_dst: string ) -> b32 | ||||
| { | ||||
|   file_size : i64 | ||||
| 	{ | ||||
| 		path_info, result := os.stat( path_src, context.temp_allocator ) | ||||
| 		path_info, result := file_status( path_src, context.temp_allocator ) | ||||
| 		if result != os.ERROR_NONE { | ||||
| 			logf("Could not get file info: %v", result, LogLevel.Error ) | ||||
| 			return false | ||||
| @@ -34,32 +34,32 @@ copy_file_sync :: proc( path_src, path_dst: string ) -> b32 | ||||
| } | ||||
|  | ||||
| file_exists :: proc( file_path : string ) -> b32 { | ||||
| 	path_info, result := os.stat( file_path, context.temp_allocator ) | ||||
| 	path_info, result := file_status( file_path, context.temp_allocator ) | ||||
| 	if result != os.ERROR_NONE { | ||||
| 		return false | ||||
| 	} | ||||
| 	return true; | ||||
| } | ||||
|  | ||||
| is_file_locked :: proc( file_path : string ) -> b32 { | ||||
| 	handle, err := os.open(file_path, os.O_RDONLY) | ||||
| file_is_locked :: proc( file_path : string ) -> b32 { | ||||
| 	handle, err := file_open(file_path, os.O_RDONLY) | ||||
| 	if err != os.ERROR_NONE { | ||||
| 			// If the error indicates the file is in use, return true. | ||||
| 			return true | ||||
| 	} | ||||
|  | ||||
| 	// If the file opens successfully, close it and return false. | ||||
| 	os.close(handle) | ||||
| 	file_close(handle) | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| rewind :: proc( file : os.Handle ) { | ||||
| 	os.seek( file, 0, 0 ) | ||||
| file_rewind :: proc( file : os.Handle ) { | ||||
| 	file_seek( file, 0, 0 ) | ||||
| } | ||||
|  | ||||
| read_looped :: proc( file : os.Handle, data : []byte ) { | ||||
| 	total_read, result_code := os.read( file, data ) | ||||
| file_read_looped :: proc( file : os.Handle, data : []byte ) { | ||||
| 	total_read, result_code := file_read( file, data ) | ||||
| 	if result_code == os.ERROR_HANDLE_EOF { | ||||
| 		rewind( file ) | ||||
| 		file_rewind( file ) | ||||
| 	} | ||||
| } | ||||
|   | ||||
							
								
								
									
										4
									
								
								code/grime_hashmap.odin
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								code/grime_hashmap.odin
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,4 @@ | ||||
| // TODO(Ed) : Make your own hashmap implementation (open-addressing, round-robin possibly) | ||||
| package sectr | ||||
|  | ||||
|  | ||||
| @@ -1,6 +1,10 @@ | ||||
| // This is an alternative to Odin's default map type. | ||||
| // The only reason I may need this is due to issues with allocator callbacks or something else going on | ||||
| // with hot-reloads... | ||||
| 
 | ||||
| // This implementation uses two ZPL-Based Arrays to hold entires and the actual hash table. | ||||
| // Its algorithim isn't that great, removal of elements is very expensive. | ||||
| // To the point where if thats done quite a bit another implementation should be looked at. | ||||
| package sectr | ||||
| 
 | ||||
| import "core:slice" | ||||
| @@ -9,33 +13,35 @@ import "core:slice" | ||||
| // Note(Ed) : See core:hash for hasing procs. | ||||
| 
 | ||||
| // This might be problematic... | ||||
| HT_MapProc    :: #type proc( $ Type : typeid, key : u64, value :   Type ) | ||||
| HT_MapMutProc :: #type proc( $ Type : typeid, key : u64, value : ^ Type ) | ||||
| HMapZPL_MapProc    :: #type proc( $ Type : typeid, key : u64, value :   Type ) | ||||
| HMapZPL_MapMutProc :: #type proc( $ Type : typeid, key : u64, value : ^ Type ) | ||||
| 
 | ||||
| HT_FindResult :: struct { | ||||
| HMapZPL_CritialLoadScale :: 0.70 | ||||
| 
 | ||||
| HMapZPL_FindResult :: struct { | ||||
| 	hash_index  : i64, | ||||
| 	prev_index  : i64, | ||||
| 	entry_index : i64, | ||||
| } | ||||
| 
 | ||||
| HashTable_Entry :: struct ( $ Type : typeid) { | ||||
| HMapZPL_Entry :: struct ( $ Type : typeid) { | ||||
| 	key   : u64, | ||||
| 	next  : i64, | ||||
| 	value : Type, | ||||
| } | ||||
| 
 | ||||
| HashTable :: struct ( $ Type : typeid ) { | ||||
| HMapZPL :: struct ( $ Type : typeid ) { | ||||
| 	hashes  : Array( i64 ), | ||||
| 	entries : Array( HashTable_Entry(Type) ), | ||||
| 	entries : Array( HMapZPL_Entry(Type) ), | ||||
| } | ||||
| 
 | ||||
| hashtable_init :: proc( $ Type : typeid, allocator : Allocator ) -> ( HashTable( Type), AllocatorError ) { | ||||
| 	return hashtable_init_reserve( Type, allocator ) | ||||
| zpl_hmap_init :: proc( $ Type : typeid, allocator : Allocator ) -> ( HMapZPL( Type), AllocatorError ) { | ||||
| 	return zpl_hmap_init_reserve( Type, allocator ) | ||||
| } | ||||
| 
 | ||||
| hashtable_init_reserve :: proc( $ Type : typeid, allocator : Allocator, num : u64 ) -> ( HashTable( Type), AllocatorError ) | ||||
| zpl_hmap_init_reserve :: proc( $ Type : typeid, allocator : Allocator, num : u64 ) -> ( HMapZPL( Type), AllocatorError ) | ||||
| { | ||||
| 	result                        : HashTable(Type) | ||||
| 	result                        : HMapZPL(Type) | ||||
| 	hashes_result, entries_result : AllocatorError | ||||
| 
 | ||||
| 	result.hashes, hashes_result = array_init_reserve( i64, allocator, num ) | ||||
| @@ -46,7 +52,7 @@ hashtable_init_reserve :: proc( $ Type : typeid, allocator : Allocator, num : u6 | ||||
| 	array_resize( & result.hashes, num ) | ||||
| 	slice.fill( slice_ptr( result.hashes.data, cast(int) result.hashes.num), -1 ) | ||||
| 
 | ||||
| 	result.entries, entries_result = array_init_reserve( HashTable_Entry(Type), allocator, num ) | ||||
| 	result.entries, entries_result = array_init_reserve( HMapZPL_Entry(Type), allocator, num ) | ||||
| 	if entries_result != AllocatorError.None { | ||||
| 		ensure( false, "Failed to allocate entries array" ) | ||||
| 		return result, entries_result | ||||
| @@ -54,8 +60,7 @@ hashtable_init_reserve :: proc( $ Type : typeid, allocator : Allocator, num : u6 | ||||
| 	return result, AllocatorError.None | ||||
| } | ||||
| 
 | ||||
| hashtable_clear :: proc( ht : ^ HashTable( $ Type ) ) { | ||||
| 	using ht | ||||
| zpl_hmap_clear :: proc( using self : ^ HMapZPL( $ Type ) ) { | ||||
| 	for id := 0; id < hashes.num; id += 1 { | ||||
| 		hashes[id] = -1 | ||||
| 	} | ||||
| @@ -64,18 +69,16 @@ hashtable_clear :: proc( ht : ^ HashTable( $ Type ) ) { | ||||
| 	array_clear( entries ) | ||||
| } | ||||
| 
 | ||||
| hashtable_destroy :: proc( using ht : ^ HashTable( $ Type ) ) { | ||||
| zpl_hmap_destroy :: proc( using self : ^ HMapZPL( $ Type ) ) { | ||||
| 	if hashes.data != nil && hashes.capacity > 0 { | ||||
| 		array_free( & hashes ) | ||||
| 		array_free( & entries ) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| hashtable_get :: proc( ht : ^ HashTable( $ Type ), key : u64 ) -> ^ Type | ||||
| zpl_hmap_get :: proc( using self : ^ HMapZPL( $ Type ), key : u64 ) -> ^ Type | ||||
| { | ||||
| 	using ht | ||||
| 
 | ||||
| 	id := hashtable_find( ht, key ).entry_index | ||||
| 	id := zpl_hmap_find( self, key ).entry_index | ||||
| 	if id >= 0 { | ||||
| 		return & entries.data[id].value | ||||
| 	} | ||||
| @@ -83,44 +86,41 @@ hashtable_get :: proc( ht : ^ HashTable( $ Type ), key : u64 ) -> ^ Type | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| hashtable_map :: proc( ht : ^ HashTable( $ Type), map_proc : HT_MapProc ) { | ||||
| 	using ht | ||||
| zpl_hmap_map :: proc( using self : ^ HMapZPL( $ Type), map_proc : HMapZPL_MapProc ) { | ||||
| 	ensure( map_proc != nil, "Mapping procedure must not be null" ) | ||||
| 	for id := 0; id < entries.num; id += 1 { | ||||
| 		map_proc( Type, entries[id].key, entries[id].value ) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| hashtable_map_mut :: proc( ht : ^ HashTable( $ Type), map_proc : HT_MapMutProc ) { | ||||
| 	using ht | ||||
| zpl_hmap_map_mut :: proc( using self : ^ HMapZPL( $ Type), map_proc : HMapZPL_MapMutProc ) { | ||||
| 	ensure( map_proc != nil, "Mapping procedure must not be null" ) | ||||
| 	for id := 0; id < entries.num; id += 1 { | ||||
| 		map_proc( Type, entries[id].key, & entries[id].value ) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| hashtable_grow :: proc( ht : ^ HashTable( $ Type ) ) -> AllocatorError { | ||||
| 	using ht | ||||
| zpl_hmap_grow :: proc( using self : ^ HMapZPL( $ Type ) ) -> AllocatorError { | ||||
| 	new_num := array_grow_formula( entries.num ) | ||||
| 	return hashtable_rehash( ht, new_num ) | ||||
| 	return zpl_hmap_rehash( self, new_num ) | ||||
| } | ||||
| 
 | ||||
| hashtable_rehash :: proc( ht : ^ HashTable( $ Type ), new_num : u64 ) -> AllocatorError | ||||
| zpl_hmap_rehash :: proc( ht : ^ HMapZPL( $ Type ), new_num : u64 ) -> AllocatorError | ||||
| { | ||||
| 	last_added_index : i64 | ||||
| 
 | ||||
| 	new_ht, init_result := hashtable_init_reserve( Type, ht.hashes.allocator, new_num ) | ||||
| 	new_ht, init_result := zpl_hmap_init_reserve( Type, ht.hashes.allocator, new_num ) | ||||
| 	if init_result != AllocatorError.None { | ||||
| 		ensure( false, "New hashtable failed to allocate" ) | ||||
| 		ensure( false, "New zpl_hmap failed to allocate" ) | ||||
| 		return init_result | ||||
| 	} | ||||
| 
 | ||||
| 	for id : u64 = 0; id < ht.entries.num; id += 1 { | ||||
| 		find_result : HT_FindResult | ||||
| 		find_result : HMapZPL_FindResult | ||||
| 
 | ||||
| 		entry           := & ht.entries.data[id] | ||||
| 		find_result      = hashtable_find( & new_ht, entry.key ) | ||||
| 		last_added_index = hashtable_add_entry( & new_ht, entry.key ) | ||||
| 		find_result      = zpl_hmap_find( & new_ht, entry.key ) | ||||
| 		last_added_index = zpl_hmap_add_entry( & new_ht, entry.key ) | ||||
| 
 | ||||
| 		if find_result.prev_index < 0 { | ||||
| 			new_ht.hashes.data[ find_result.hash_index ] = last_added_index | ||||
| @@ -133,15 +133,14 @@ hashtable_rehash :: proc( ht : ^ HashTable( $ Type ), new_num : u64 ) -> Allocat | ||||
| 		new_ht.entries.data[ last_added_index ].value = entry.value | ||||
| 	} | ||||
| 
 | ||||
| 	hashtable_destroy( ht ) | ||||
| 	zpl_hmap_destroy( ht ) | ||||
| 
 | ||||
| 	(ht ^) = new_ht | ||||
| 	return AllocatorError.None | ||||
| } | ||||
| 
 | ||||
| hashtable_rehash_fast :: proc( ht : ^ HashTable( $ Type ) ) | ||||
| zpl_hmap_rehash_fast :: proc( using self : ^ HMapZPL( $ Type ) ) | ||||
| { | ||||
| 	using ht | ||||
| 	for id := 0; id < entries.num; id += 1 { | ||||
| 		entries[id].Next = -1; | ||||
| 	} | ||||
| @@ -150,7 +149,7 @@ hashtable_rehash_fast :: proc( ht : ^ HashTable( $ Type ) ) | ||||
| 	} | ||||
| 	for id := 0; id < entries.num; id += 1 { | ||||
| 		entry       := & entries[id] | ||||
| 		find_result := hashtable_find( entry.key ) | ||||
| 		find_result := zpl_hmap_find( entry.key ) | ||||
| 
 | ||||
| 		if find_result.prev_index < 0 { | ||||
| 			hashes[ find_result.hash_index ] = id | ||||
| @@ -161,42 +160,39 @@ hashtable_rehash_fast :: proc( ht : ^ HashTable( $ Type ) ) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| hashtable_remove :: proc( ht : ^ HashTable( $ Type ), key : u64 ) { | ||||
| 	using ht | ||||
| 	find_result := hashtable_find( key ) | ||||
| zpl_hmap_remove :: proc( self : ^ HMapZPL( $ Type ), key : u64 ) { | ||||
| 	find_result := zpl_hmap_find( key ) | ||||
| 
 | ||||
| 	if find_result.entry_index >= 0 { | ||||
| 		array_remove_at( & ht.entries, find_result.entry_index ) | ||||
| 		hashtable_rehash_fast( ht ) | ||||
| 		array_remove_at( & entries, find_result.entry_index ) | ||||
| 		zpl_hmap_rehash_fast( self ) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| hashtable_remove_entry :: proc( ht : ^ HashTable( $ Type ), id : i64 ) { | ||||
| 	array_remove_at( & ht.entries, id ) | ||||
| zpl_hmap_remove_entry :: proc( using self : ^ HMapZPL( $ Type ), id : i64 ) { | ||||
| 	array_remove_at( & entries, id ) | ||||
| } | ||||
| 
 | ||||
| hashtable_set :: proc( ht : ^ HashTable( $ Type), key : u64, value : Type ) -> (^ Type, AllocatorError) | ||||
| zpl_hmap_set :: proc( using self : ^ HMapZPL( $ Type), key : u64, value : Type ) -> (^ Type, AllocatorError) | ||||
| { | ||||
| 	using ht | ||||
| 
 | ||||
| 	id          : i64 = 0 | ||||
| 	find_result : HT_FindResult | ||||
| 	find_result : HMapZPL_FindResult | ||||
| 
 | ||||
| 	if hashtable_full( ht ) | ||||
| 	if zpl_hmap_full( self ) | ||||
| 	{ | ||||
| 		grow_result := hashtable_grow(ht) | ||||
| 		grow_result := zpl_hmap_grow( self ) | ||||
| 		if grow_result != AllocatorError.None { | ||||
| 				return nil, grow_result | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	find_result = hashtable_find( ht, key ) | ||||
| 	find_result = zpl_hmap_find( self, key ) | ||||
| 	if find_result.entry_index >= 0 { | ||||
| 		id = find_result.entry_index | ||||
| 	} | ||||
| 	else | ||||
| 	{ | ||||
| 		id = hashtable_add_entry( ht, key ) | ||||
| 		id = zpl_hmap_add_entry( self, key ) | ||||
| 		if find_result.prev_index >= 0 { | ||||
| 			entries.data[ find_result.prev_index ].next = id | ||||
| 		} | ||||
| @@ -207,15 +203,14 @@ hashtable_set :: proc( ht : ^ HashTable( $ Type), key : u64, value : Type ) -> ( | ||||
| 
 | ||||
| 	entries.data[id].value = value | ||||
| 
 | ||||
| 	if hashtable_full( ht ) { | ||||
| 		return & entries.data[id].value, hashtable_grow( ht ) | ||||
| 	if zpl_hmap_full( self ) { | ||||
| 		return & entries.data[id].value, zpl_hmap_grow( self ) | ||||
| 	} | ||||
| 
 | ||||
| 	return & entries.data[id].value, AllocatorError.None | ||||
| } | ||||
| 
 | ||||
| hashtable_slot :: proc( ht : ^ HashTable( $ Type), key : u64 ) -> i64 { | ||||
| 	using ht | ||||
| zpl_hmap_slot :: proc( using self : ^ HMapZPL( $ Type), key : u64 ) -> i64 { | ||||
| 	for id : i64 = 0; id < hashes.num; id += 1 { | ||||
| 		if hashes.data[id] == key                { | ||||
| 			return id | ||||
| @@ -224,36 +219,35 @@ hashtable_slot :: proc( ht : ^ HashTable( $ Type), key : u64 ) -> i64 { | ||||
| 	return -1 | ||||
| } | ||||
| 
 | ||||
| hashtable_add_entry :: proc( ht : ^ HashTable( $ Type), key : u64 ) -> i64 { | ||||
| 	using ht | ||||
| 	entry : HashTable_Entry(Type) = { key, -1, {} } | ||||
| zpl_hmap_add_entry :: proc( using self : ^ HMapZPL( $ Type), key : u64 ) -> i64 { | ||||
| 	entry : HMapZPL_Entry(Type) = { key, -1, {} } | ||||
| 	id    := cast(i64) entries.num | ||||
| 	array_append( & entries, entry ) | ||||
| 	return id | ||||
| } | ||||
| 
 | ||||
| hashtable_find :: proc( ht : ^ HashTable( $ Type), key : u64 ) -> HT_FindResult | ||||
| zpl_hmap_find :: proc( using self : ^ HMapZPL( $ Type), key : u64 ) -> HMapZPL_FindResult | ||||
| { | ||||
| 	using ht | ||||
| 	result : HT_FindResult = { -1, -1, -1 } | ||||
| 	result : HMapZPL_FindResult = { -1, -1, -1 } | ||||
| 
 | ||||
| 	if hashes.num > 0 { | ||||
| 		result.hash_index  = cast(i64)( key % hashes.num ) | ||||
| 		result.entry_index = hashes.data[ result.hash_index ] | ||||
| 
 | ||||
| 		for ; result.entry_index >= 0;                     { | ||||
| 			if entries.data[ result.entry_index ].key == key { | ||||
| 			entry := & entries.data[ result.entry_index ] | ||||
| 			if entry.key == key { | ||||
| 				break | ||||
| 			} | ||||
| 
 | ||||
| 			result.prev_index  = result.entry_index | ||||
| 			result.entry_index = entries.data[ result.entry_index ].next | ||||
| 			result.entry_index = entry.next | ||||
| 		} | ||||
| 	} | ||||
| 	return result | ||||
| } | ||||
| 
 | ||||
| hashtable_full :: proc( using ht : ^ HashTable( $ Type) ) -> b32 { | ||||
| 	result : b32 = entries.num > u64(0.75 * cast(f64) hashes.num) | ||||
| zpl_hmap_full :: proc( using self : ^ HMapZPL( $ Type) ) -> b32 { | ||||
| 	result : b32 = entries.num > u64(HMapZPL_CritialLoadScale * cast(f64) hashes.num) | ||||
| 	return result | ||||
| } | ||||
| @@ -27,7 +27,7 @@ memory_after :: proc( slice : []byte ) -> ( ^ byte) { | ||||
| TrackedAllocator :: struct { | ||||
| 	backing   : Arena, | ||||
| 	internals : Arena, | ||||
| 	tracker   : Tracking_Allocator, | ||||
| 	tracker   : TrackingAllocator, | ||||
| } | ||||
|  | ||||
| tracked_allocator :: proc( self : ^ TrackedAllocator ) -> Allocator { | ||||
| @@ -45,7 +45,7 @@ tracked_allocator_init :: proc( size, internals_size : int, allocator := context | ||||
| 	raw_size       := backing_size + internals_size | ||||
|  | ||||
| 	raw_mem, raw_mem_code := alloc( raw_size, mem.DEFAULT_ALIGNMENT, allocator ) | ||||
| 	verify( raw_mem_code == mem.Allocator_Error.None, "Failed to allocate memory for the TrackingAllocator" ) | ||||
| 	verify( raw_mem_code == AllocatorError.None, "Failed to allocate memory for the TrackingAllocator" ) | ||||
|  | ||||
| 	backing_slice   := slice_ptr( cast( ^ byte) raw_mem,        backing_size ) | ||||
| 	internals_slice := slice_ptr( memory_after( backing_slice), internals_size ) | ||||
| @@ -67,7 +67,7 @@ tracked_allocator_init :: proc( size, internals_size : int, allocator := context | ||||
| tracked_allocator_init_vmem :: proc( vmem : [] byte, internals_size : int ) -> ^ TrackedAllocator | ||||
| { | ||||
| 	arena_size              :: size_of( Arena) | ||||
| 	tracking_allocator_size :: size_of( Tracking_Allocator ) | ||||
| 	tracking_allocator_size :: size_of( TrackingAllocator ) | ||||
| 	backing_size            := len(vmem)    - internals_size | ||||
| 	raw_size                := backing_size + internals_size | ||||
|  | ||||
|   | ||||
| @@ -1,42 +1,84 @@ | ||||
| package host | ||||
|  | ||||
| import       "base:runtime" | ||||
| import      "core:dynlib" | ||||
| import      "core:io" | ||||
| import      "core:fmt" | ||||
| import      "core:log" | ||||
| import      "core:mem" | ||||
| import      "core:mem/virtual" | ||||
| 	Byte     :: 1 | ||||
| 	Kilobyte :: 1024 * Byte | ||||
| 	Megabyte :: 1024 * Kilobyte | ||||
| 	Gigabyte :: 1024 * Megabyte | ||||
| 	Terabyte :: 1024 * Gigabyte | ||||
| 	Petabyte :: 1024 * Terabyte | ||||
| 	Exabyte  :: 1024 * Petabyte | ||||
| import       "core:os" | ||||
| 	file_resize :: os.ftruncate | ||||
| import       "core:strings" | ||||
| import       "core:time" | ||||
| //region Grime & Dependencies | ||||
| import "base:runtime" | ||||
| 	Byte     :: runtime.Byte | ||||
| 	Kilobyte :: runtime.Kilobyte | ||||
| 	Megabyte :: runtime.Megabyte | ||||
| 	Gigabyte :: runtime.Gigabyte | ||||
| 	Terabyte :: runtime.Terabyte | ||||
| 	Petabyte :: runtime.Petabyte | ||||
| 	Exabyte  :: runtime.Exabyte | ||||
| import "core:dynlib" | ||||
| 	os_lib_load     :: dynlib.load_library | ||||
| 	os_lib_unload   :: dynlib.unload_library | ||||
| 	os_lib_get_proc :: dynlib.symbol_address | ||||
| import "core:io" | ||||
| import fmt_io "core:fmt" | ||||
| 	str_fmt         :: fmt_io.printf | ||||
| 	str_fmt_tmp     :: fmt_io.tprintf | ||||
| 	str_fmt_builder :: fmt_io.sbprintf | ||||
| import "core:log" | ||||
| import "core:mem" | ||||
| 	Allocator         :: mem.Allocator | ||||
| 	TrackingAllocator :: mem.Tracking_Allocator | ||||
| import "core:mem/virtual" | ||||
| 	Arena        :: virtual.Arena | ||||
| 	MapFileError :: virtual.Map_File_Error | ||||
| 	MapFileFlag  :: virtual.Map_File_Flag | ||||
| 	MapFileFlags :: virtual.Map_File_Flags | ||||
| import "core:os" | ||||
| 	FileFlag_Create        :: os.O_CREATE | ||||
| 	FileFlag_ReadWrite     :: os.O_RDWR | ||||
| 	file_open              :: os.open | ||||
| 	file_close             :: os.close | ||||
| 	file_rename            :: os.rename | ||||
| 	file_remove            :: os.remove | ||||
| 	file_resize            :: os.ftruncate | ||||
| 	file_status_via_handle :: os.fstat | ||||
| 	file_status_via_path   :: os.stat | ||||
| import "core:strings" | ||||
| 	builder_to_string      :: strings.to_string | ||||
| 	str_clone              :: strings.clone | ||||
| 	str_builder_from_bytes :: strings.builder_from_bytes | ||||
| import "core:time" | ||||
| 	Millisecond      :: time.Millisecond | ||||
| 	Second           :: time.Second | ||||
| 	Duration         :: time.Duration | ||||
| 	duration_seconds :: time.duration_seconds | ||||
| 	thread_sleep     :: time.sleep | ||||
| import rl    "vendor:raylib" | ||||
| import sectr "../." | ||||
| 	fatal                  :: sectr.fatal | ||||
| 	file_is_locked         :: sectr.file_is_locked | ||||
| 	file_copy_sync         :: sectr.file_copy_sync | ||||
| 	Logger                 :: sectr.Logger | ||||
| 	logger_init            :: sectr.logger_init | ||||
| 	LogLevel               :: sectr.LogLevel | ||||
| 	log                    :: sectr.log | ||||
| 	to_odin_logger         :: sectr.to_odin_logger | ||||
| 	TrackedAllocator       :: sectr.TrackedAllocator | ||||
| 	tracked_allocator      :: sectr.tracked_allocator | ||||
| 	tracked_allocator_init :: sectr.tracked_allocator_init | ||||
| 	verify                 :: sectr.verify | ||||
|  | ||||
| TrackedAllocator       :: sectr.TrackedAllocator | ||||
| tracked_allocator      :: sectr.tracked_allocator | ||||
| tracked_allocator_init :: sectr.tracked_allocator_init | ||||
| file_status :: proc { | ||||
| 	file_status_via_handle, | ||||
| 	file_status_via_path, | ||||
| } | ||||
|  | ||||
| LogLevel :: sectr.LogLevel | ||||
| log      :: sectr.log | ||||
| fatal    :: sectr.fatal | ||||
| verify   :: sectr.verify | ||||
| to_str :: proc { | ||||
| 	builder_to_string, | ||||
| } | ||||
| //endregion Grime & Dependencies | ||||
|  | ||||
| path_snapshot :: "VMemChunk_1.snapshot" | ||||
| Path_Snapshot :: "VMemChunk_1.snapshot" | ||||
| Path_Logs     :: "../logs" | ||||
| when ODIN_OS == runtime.Odin_OS_Type.Windows | ||||
| { | ||||
| 	path_logs                :: "../logs" | ||||
| 	path_sectr_module        :: "sectr.dll" | ||||
| 	path_sectr_live_module   :: "sectr_live.dll" | ||||
| 	path_sectr_debug_symbols :: "sectr.pdb" | ||||
| 	Path_Sectr_Module        :: "sectr.dll" | ||||
| 	Path_Sectr_Live_Module   :: "sectr_live.dll" | ||||
| 	Path_Sectr_Debug_Symbols :: "sectr.pdb" | ||||
| } | ||||
|  | ||||
| RuntimeState :: struct { | ||||
| @@ -46,53 +88,48 @@ RuntimeState :: struct { | ||||
| } | ||||
|  | ||||
| VMemChunk :: struct { | ||||
| 	og_allocator            : mem.Allocator, | ||||
| 	og_temp_allocator       : mem.Allocator, | ||||
| 	og_allocator            : Allocator, | ||||
| 	og_temp_allocator       : Allocator, | ||||
| 	host_persistent         : TrackedAllocator, | ||||
| 	host_transient          : TrackedAllocator, | ||||
| 	sectr_live              : virtual.Arena, | ||||
| 	sectr_live              : Arena, | ||||
| 	sectr_snapshot          : []u8 | ||||
| } | ||||
|  | ||||
| setup_memory :: proc() -> VMemChunk | ||||
| { | ||||
| 	Arena              :: mem.Arena | ||||
| 	Tracking_Allocator :: mem.Tracking_Allocator | ||||
| 	memory : VMemChunk; using memory | ||||
|  | ||||
| 	host_persistent_size :: 32 * Megabyte | ||||
| 	host_transient_size  :: 96 * Megabyte | ||||
| 	internals_size       :: 4  * Megabyte | ||||
| 	Host_Persistent_Size :: 32 * Megabyte | ||||
| 	Host_Transient_Size  :: 96 * Megabyte | ||||
| 	Internals_Size       :: 4  * Megabyte | ||||
|  | ||||
| 	host_persistent = tracked_allocator_init( host_persistent_size, internals_size ) | ||||
| 	host_transient  = tracked_allocator_init( host_transient_size,  internals_size ) | ||||
| 	host_persistent = tracked_allocator_init( Host_Persistent_Size, Internals_Size ) | ||||
| 	host_transient  = tracked_allocator_init( Host_Transient_Size,  Internals_Size ) | ||||
|  | ||||
| 	// Setup the static arena for the entire application | ||||
| 	{ | ||||
| 		base_address : rawptr = transmute( rawptr) u64(Terabyte * 1) | ||||
|  | ||||
| 		result := arena_init_static( & sectr_live, base_address, sectr.memory_chunk_size, sectr.memory_chunk_size ) | ||||
| 		base_address : rawptr = transmute( rawptr) u64(sectr.Memory_Base_Address) | ||||
| 		result       := arena_init_static( & sectr_live, base_address, sectr.Memory_Chunk_Size, sectr.Memory_Chunk_Size ) | ||||
| 		verify( result == runtime.Allocator_Error.None, "Failed to allocate live memory for the sectr module" ) | ||||
| 	} | ||||
|  | ||||
| 	// Setup memory mapped io for snapshots | ||||
| 	{ | ||||
| 		snapshot_file, open_error := os.open( path_snapshot, os.O_RDWR | os.O_CREATE ) | ||||
| 		snapshot_file, open_error := file_open( Path_Snapshot, FileFlag_ReadWrite | FileFlag_Create ) | ||||
| 		verify( open_error == os.ERROR_NONE, "Failed to open snapshot file for the sectr module" ) | ||||
|  | ||||
| 		file_info, stat_code := os.stat( path_snapshot ) | ||||
| 		file_info, stat_code := file_status( snapshot_file ) | ||||
| 		{ | ||||
| 			if file_info.size != sectr.memory_chunk_size { | ||||
| 				file_resize( snapshot_file, sectr.memory_chunk_size ) | ||||
| 			if file_info.size != sectr.Memory_Chunk_Size { | ||||
| 				file_resize( snapshot_file, sectr.Memory_Chunk_Size ) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		map_error                : virtual.Map_File_Error | ||||
| 		map_flags                : virtual.Map_File_Flags = { virtual.Map_File_Flag.Read, virtual.Map_File_Flag.Write } | ||||
| 		map_error                : MapFileError | ||||
| 		map_flags                : MapFileFlags = { MapFileFlag.Read, MapFileFlag.Write } | ||||
| 		sectr_snapshot, map_error = virtual.map_file_from_file_descriptor( uintptr(snapshot_file), map_flags ) | ||||
| 		verify( map_error == virtual.Map_File_Error.None, "Failed to allocate snapshot memory for the sectr module" ) | ||||
|  | ||||
| 		os.close(snapshot_file) | ||||
| 		verify( map_error == MapFileError.None, "Failed to allocate snapshot memory for the sectr module" ) | ||||
| 		file_close(snapshot_file) | ||||
| 	} | ||||
|  | ||||
| 	// Reassign default allocators for host | ||||
| @@ -113,28 +150,28 @@ load_sectr_api :: proc( version_id : i32 ) -> sectr.ModuleAPI | ||||
| 		return {} | ||||
| 	} | ||||
|  | ||||
| 	live_file := path_sectr_live_module | ||||
| 	sectr.copy_file_sync( path_sectr_module, live_file ) | ||||
| 	live_file := Path_Sectr_Live_Module | ||||
| 	file_copy_sync( Path_Sectr_Module, live_file ) | ||||
|  | ||||
| 	lib, load_result := dynlib.load_library( live_file ) | ||||
| 	lib, load_result := os_lib_load( live_file ) | ||||
| 	if ! load_result { | ||||
| 		log( "Failed to load the sectr module.", LogLevel.Warning ) | ||||
| 		runtime.debug_trap() | ||||
| 		return {} | ||||
| 	} | ||||
|  | ||||
| 	startup    := cast( type_of( sectr.startup        )) dynlib.symbol_address( lib, "startup" ) | ||||
| 	shutdown   := cast( type_of( sectr.sectr_shutdown )) dynlib.symbol_address( lib, "sectr_shutdown" ) | ||||
| 	reload     := cast( type_of( sectr.reload         )) dynlib.symbol_address( lib, "reload" ) | ||||
| 	tick       := cast( type_of( sectr.tick           )) dynlib.symbol_address( lib, "tick" ) | ||||
| 	clean_temp := cast( type_of( sectr.clean_temp     )) dynlib.symbol_address( lib, "clean_temp" ) | ||||
| 	startup    := cast( type_of( sectr.startup        )) os_lib_get_proc( lib, "startup" ) | ||||
| 	shutdown   := cast( type_of( sectr.sectr_shutdown )) os_lib_get_proc( lib, "sectr_shutdown" ) | ||||
| 	reload     := cast( type_of( sectr.reload         )) os_lib_get_proc( lib, "reload" ) | ||||
| 	tick       := cast( type_of( sectr.tick           )) os_lib_get_proc( lib, "tick" ) | ||||
| 	clean_temp := cast( type_of( sectr.clean_temp     )) os_lib_get_proc( lib, "clean_temp" ) | ||||
|  | ||||
| 	missing_symbol : b32 = false | ||||
| 	if startup    == nil do fmt.println("Failed to load sectr.startup symbol") | ||||
| 	if shutdown   == nil do fmt.println("Failed to load sectr.shutdown symbol") | ||||
| 	if reload     == nil do fmt.println("Failed to load sectr.reload symbol") | ||||
| 	if tick       == nil do fmt.println("Failed to load sectr.tick symbol") | ||||
| 	if clean_temp == nil do fmt.println("Failed to load sector.clean_temp symbol") | ||||
| 	if startup    == nil do log("Failed to load sectr.startup symbol", LogLevel.Warning ) | ||||
| 	if shutdown   == nil do log("Failed to load sectr.shutdown symbol", LogLevel.Warning ) | ||||
| 	if reload     == nil do log("Failed to load sectr.reload symbol", LogLevel.Warning ) | ||||
| 	if tick       == nil do log("Failed to load sectr.tick symbol", LogLevel.Warning ) | ||||
| 	if clean_temp == nil do log("Failed to load sector.clean_temp symbol", LogLevel.Warning ) | ||||
| 	if missing_symbol { | ||||
| 		runtime.debug_trap() | ||||
| 		return {} | ||||
| @@ -157,23 +194,23 @@ load_sectr_api :: proc( version_id : i32 ) -> sectr.ModuleAPI | ||||
|  | ||||
| unload_sectr_api :: proc( module : ^ sectr.ModuleAPI ) | ||||
| { | ||||
| 	dynlib.unload_library( module.lib ) | ||||
| 	os.remove( path_sectr_live_module ) | ||||
| 	os_lib_unload( module.lib ) | ||||
| 	file_remove( Path_Sectr_Live_Module ) | ||||
| 	module^ = {} | ||||
| 	log("Unloaded sectr API") | ||||
| } | ||||
|  | ||||
| sync_sectr_api :: proc( sectr_api : ^ sectr.ModuleAPI, memory : ^ VMemChunk, logger : ^ sectr.Logger ) | ||||
| sync_sectr_api :: proc( sectr_api : ^ sectr.ModuleAPI, memory : ^ VMemChunk, logger : ^ Logger ) | ||||
| { | ||||
| 	if write_time, result := os.last_write_time_by_name( path_sectr_module ); | ||||
| 	if write_time, result := os.last_write_time_by_name( Path_Sectr_Module ); | ||||
| 	result == os.ERROR_NONE && sectr_api.write_time != write_time | ||||
| 	{ | ||||
| 		version_id := sectr_api.lib_version + 1 | ||||
| 		unload_sectr_api( sectr_api ) | ||||
|  | ||||
| 		// Wait for pdb to unlock (linker may still be writting) | ||||
| 		for ; sectr.is_file_locked( path_sectr_debug_symbols ) && sectr.is_file_locked( path_sectr_live_module ); {} | ||||
| 		time.sleep( time.Millisecond ) | ||||
| 		for ; file_is_locked( Path_Sectr_Debug_Symbols ) && file_is_locked( Path_Sectr_Live_Module ); {} | ||||
| 		thread_sleep( Millisecond * 50 ) | ||||
|  | ||||
| 		sectr_api ^ = load_sectr_api( version_id ) | ||||
| 		verify( sectr_api.lib_version != 0, "Failed to hot-reload the sectr module" ) | ||||
| @@ -187,30 +224,32 @@ main :: proc() | ||||
| 	state : RuntimeState | ||||
| 	using state | ||||
|  | ||||
| 	// Generating the logger's name, it will be used when the app is shutting down. | ||||
| 	path_logger_finalized : string | ||||
| 	{ | ||||
| 		startup_time     := time.now() | ||||
| 		year, month, day := time.date( startup_time) | ||||
| 		hour, min, sec   := time.clock_from_time( startup_time) | ||||
|  | ||||
| 		if ! os.is_dir( path_logs ) { | ||||
| 			os.make_directory( path_logs ) | ||||
| 		if ! os.is_dir( Path_Logs ) { | ||||
| 			os.make_directory( Path_Logs ) | ||||
| 		} | ||||
|  | ||||
| 		timestamp            := fmt.tprintf("%04d-%02d-%02d_%02d-%02d-%02d", year, month, day, hour, min, sec) | ||||
| 		path_logger_finalized = strings.clone( fmt.tprintf( "%s/sectr_%v.log", path_logs, timestamp) ) | ||||
| 		timestamp            := str_fmt_tmp("%04d-%02d-%02d_%02d-%02d-%02d", year, month, day, hour, min, sec) | ||||
| 		path_logger_finalized = str_clone( str_fmt_tmp( "%s/sectr_%v.log", Path_Logs, timestamp) ) | ||||
| 	} | ||||
|  | ||||
| 	logger :  sectr.Logger | ||||
| 	sectr.init( & logger, "Sectr Host", fmt.tprintf( "%s/sectr.log", path_logs ) ) | ||||
| 	context.logger = sectr.to_odin_logger( & logger ) | ||||
| 	logger_init( & logger, "Sectr Host", str_fmt_tmp( "%s/sectr.log", Path_Logs ) ) | ||||
| 	context.logger = to_odin_logger( & logger ) | ||||
| 	{ | ||||
| 		// Log System Context | ||||
| 		backing_builder : [16 * Kilobyte] u8 | ||||
| 		builder         := strings.builder_from_bytes( backing_builder[:] ) | ||||
| 		fmt.sbprintf( & builder, "Core Count: %v, ", os.processor_core_count() ) | ||||
| 		fmt.sbprintf( & builder, "Page Size: %v", os.get_page_size() ) | ||||
| 		builder         := str_builder_from_bytes( backing_builder[:] ) | ||||
| 		str_fmt_builder( & builder, "Core Count: %v, ", os.processor_core_count() ) | ||||
| 		str_fmt_builder( & builder, "Page Size: %v",    os.get_page_size() ) | ||||
|  | ||||
| 		sectr.log( strings.to_string(builder) ) | ||||
| 		log( to_str(builder) ) | ||||
| 	} | ||||
|  | ||||
| 	// Basic Giant VMem Block | ||||
| @@ -232,12 +271,11 @@ main :: proc() | ||||
| 		verify( sectr_api.lib_version != 0, "Failed to initially load the sectr module" ) | ||||
| 	} | ||||
|  | ||||
| 	running            = true; | ||||
| 	memory             = memory | ||||
| 	sectr_api          = sectr_api | ||||
| 	running   = true; | ||||
| 	sectr_api = sectr_api | ||||
| 	sectr_api.startup( memory.sectr_live, memory.sectr_snapshot, & logger ) | ||||
|  | ||||
| 	delta_ns : time.Duration | ||||
| 	delta_ns : Duration | ||||
|  | ||||
| 	// TODO(Ed) : This should have an end status so that we know the reason the engine stopped. | ||||
| 	for ; running ; | ||||
| @@ -247,7 +285,7 @@ main :: proc() | ||||
| 		// Hot-Reload | ||||
| 		sync_sectr_api( & sectr_api, & memory, & logger ) | ||||
|  | ||||
| 		running = sectr_api.tick( time.duration_seconds( delta_ns ) ) | ||||
| 		running = sectr_api.tick( duration_seconds( delta_ns ) ) | ||||
| 		sectr_api.clean_temp() | ||||
|  | ||||
| 		delta_ns = time.tick_lap_time( & start_tick ) | ||||
| @@ -263,6 +301,6 @@ main :: proc() | ||||
| 	unload_sectr_api( & sectr_api ) | ||||
|  | ||||
| 	log("Succesfuly closed") | ||||
| 	os.close( logger.file ) | ||||
| 	os.rename( logger.file_path, path_logger_finalized ) | ||||
| 	file_close( logger.file ) | ||||
| 	file_rename( logger.file_path, path_logger_finalized ) | ||||
| } | ||||
|   | ||||
| @@ -23,7 +23,7 @@ align_formula :: #force_inline proc "contextless" (size, align: uint) -> uint { | ||||
| } | ||||
|  | ||||
| @(private="file") | ||||
| win32_reserve :: proc "contextless" (base_address : rawptr, size: uint) -> (data: []byte, err: virtual.Allocator_Error) { | ||||
| win32_reserve_with_base_address :: proc "contextless" (base_address : rawptr, size: uint) -> (data: []byte, err: virtual.Allocator_Error) { | ||||
| 	result := win32.VirtualAlloc(base_address, size, win32.MEM_RESERVE, win32.PAGE_READWRITE) | ||||
| 	if result == nil { | ||||
| 		err = .Out_Of_Memory | ||||
| @@ -43,7 +43,7 @@ platform_memory_alloc :: proc "contextless" (to_commit, to_reserve: uint, base_a | ||||
| 	total_to_reserved := max(to_reserve, size_of( virtual_Platform_Memory_Block)) | ||||
| 	to_commit = clamp(to_commit, size_of( virtual_Platform_Memory_Block), total_to_reserved) | ||||
|  | ||||
| 	data := win32_reserve(base_address, total_to_reserved) or_return | ||||
| 	data := win32_reserve_with_base_address(base_address, total_to_reserved) or_return | ||||
| 	virtual.commit(raw_data(data), to_commit) | ||||
|  | ||||
| 	block = (^virtual_Platform_Memory_Block)(raw_data(data)) | ||||
| @@ -126,13 +126,10 @@ arena_init_static :: proc(arena: ^virtual.Arena, base_address : rawptr, | ||||
| 	arena.total_reserved = arena.curr_block.reserved | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // END WINDOWS CHECK WRAP | ||||
| } | ||||
| /* END OF: when ODIN_OS == runtime.Odin_OS_Type.Windows */ } | ||||
| else | ||||
| { | ||||
| 	// Fallback to regular init_static impl for other platforms for now. | ||||
|  | ||||
| 	arena_init_static :: proc(arena: ^virtual.Arena, base_address : rawptr, | ||||
| 		reserved    : uint = virtual.DEFAULT_ARENA_STATIC_RESERVE_SIZE, | ||||
| 		commit_size : uint = virtual.DEFAULT_ARENA_STATIC_COMMIT_SIZE | ||||
|   | ||||
							
								
								
									
										21
									
								
								code/hot_reload.odin
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								code/hot_reload.odin
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,21 @@ | ||||
| package sectr | ||||
|  | ||||
| // TODO(Ed): Do this if we got over 25 bindings we're dealing with in persistent. | ||||
| // The purpose of this is to manage anything we need to help mitigate the difficulties with hot-reloading. | ||||
|  | ||||
| // ProcAddressEntry :: struct { | ||||
| // 	addresses : [2] rawptr | ||||
| // } | ||||
|  | ||||
| // ProcAddressTracker :: struct { | ||||
| // 	table   : HMapZPL( ProcAddressEntry ), | ||||
| // 	current :  | ||||
| // } | ||||
|  | ||||
| // reload_report_callback :: proc( binding : ^Type ) { | ||||
|  | ||||
| // } | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
| @@ -346,15 +346,15 @@ poll_input :: proc( old, new : ^ InputState ) | ||||
|  | ||||
| record_input :: proc( replay_file : os.Handle, input : ^ InputState ) { | ||||
| 	raw_data := slice_ptr( transmute(^ byte) input, size_of(InputState) ) | ||||
| 	os.write( replay_file, raw_data ) | ||||
| 	file_write( replay_file, raw_data ) | ||||
| } | ||||
|  | ||||
| play_input :: proc( replay_file : os.Handle, input : ^ InputState ) { | ||||
| 	raw_data := slice_ptr( transmute(^ byte) input, size_of(InputState) ) | ||||
| 	total_read, result_code := os.read( replay_file, raw_data ) | ||||
| 	total_read, result_code := file_read( replay_file, raw_data ) | ||||
| 	if result_code == os.ERROR_HANDLE_EOF { | ||||
| 		rewind( replay_file ) | ||||
| 		load_snapshot( & memory.snapshot[0] ) | ||||
| 		file_rewind( replay_file ) | ||||
| 		load_snapshot( & Memory_App.snapshot[0] ) | ||||
| 	} | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -4,7 +4,7 @@ import "base:runtime" | ||||
| import "core:fmt" | ||||
| import "core:mem" | ||||
| import "core:os" | ||||
| import "core:strings" | ||||
| import str "core:strings" | ||||
| import "core:time" | ||||
| import core_log "core:log" | ||||
|  | ||||
| @@ -22,15 +22,15 @@ to_odin_logger :: proc( logger : ^ Logger ) -> core_log.Logger { | ||||
| 	return { logger_interface, logger, core_log.Level.Debug, core_log.Default_File_Logger_Opts } | ||||
| } | ||||
|  | ||||
| init :: proc( logger : ^ Logger,  id : string, file_path : string, file := os.INVALID_HANDLE ) | ||||
| logger_init :: proc( logger : ^ Logger,  id : string, file_path : string, file := os.INVALID_HANDLE ) | ||||
| { | ||||
| 	if file == os.INVALID_HANDLE | ||||
| 	{ | ||||
| 		logger_file, result_code := os.open( file_path, os.O_RDWR | os.O_CREATE ) | ||||
| 		logger_file, result_code := file_open( file_path, os.O_RDWR | os.O_CREATE ) | ||||
| 		if result_code != os.ERROR_NONE { | ||||
| 				// Log failures are fatal and must never occur at runtime (there is no logging) | ||||
| 				runtime.debug_trap() | ||||
| 				os.     exit( -1 ) | ||||
| 				os.exit( -1 ) | ||||
| 				// TODO(Ed) : Figure out the error code enums.. | ||||
| 		} | ||||
| 		logger.file = logger_file | ||||
| @@ -60,37 +60,37 @@ logger_interface :: proc( | ||||
| 	@static builder_backing : [16 * Kilobyte] byte; { | ||||
| 		mem.set( raw_data( builder_backing[:] ), 0, len(builder_backing) ) | ||||
| 	} | ||||
| 	builder := strings.builder_from_bytes( builder_backing[:] ) | ||||
| 	builder := str.builder_from_bytes( builder_backing[:] ) | ||||
|  | ||||
| 	first_line_length := len(text) > Max_Logger_Message_Width ? Max_Logger_Message_Width : len(text) | ||||
| 	first_line        := transmute(string) text[ 0 : first_line_length ] | ||||
| 	fmt.sbprintf( & builder, "%-*s ", Max_Logger_Message_Width, first_line ) | ||||
| 	str_fmt_builder( & builder, "%-*s ", Max_Logger_Message_Width, first_line ) | ||||
|  | ||||
| 	// Signature | ||||
| 	{ | ||||
| 		when time.IS_SUPPORTED | ||||
| 		{ | ||||
| 			if core_log.Full_Timestamp_Opts & options != nil { | ||||
| 				fmt.sbprint( & builder, "[") | ||||
| 				str_fmt_builder( & builder, "[") | ||||
|  | ||||
| 				t := time.now() | ||||
| 				y, m,   d := time.date(t) | ||||
| 				h, min, s := time.clock(t) | ||||
| 				year, month,  day    := time.date(t) | ||||
| 				hour, minute, second := time.clock(t) | ||||
|  | ||||
| 				if .Date in options { | ||||
| 					fmt.sbprintf( & builder, "%d-%02d-%02d ", y, m, d ) | ||||
| 					str_fmt_builder( & builder, "%d-%02d-%02d ", year, month, day ) | ||||
| 				} | ||||
| 				if .Time in options { | ||||
| 					fmt.sbprintf( & builder, "%02d:%02d:%02d", h, min, s) | ||||
| 					str_fmt_builder( & builder, "%02d:%02d:%02d", hour, minute, second) | ||||
| 				} | ||||
|  | ||||
| 				fmt.sbprint( & builder, "] ") | ||||
| 				str_fmt_builder( & builder, "] ") | ||||
| 			} | ||||
| 		} | ||||
| 		core_log.do_level_header( options, level, & builder ) | ||||
|  | ||||
| 		if logger.id != "" { | ||||
| 			fmt.sbprintf( & builder, "[%s] ", logger.id ) | ||||
| 			str_fmt_builder( & builder, "[%s] ", logger.id ) | ||||
| 		} | ||||
| 		core_log.do_location_header( options, & builder, location  ) | ||||
| 	} | ||||
| @@ -99,21 +99,21 @@ logger_interface :: proc( | ||||
| 	if len(text) > Max_Logger_Message_Width | ||||
| 	{ | ||||
| 		offset := Max_Logger_Message_Width | ||||
| 		bytes  := transmute([]u8) text | ||||
| 		bytes  := transmute( []u8 ) text | ||||
| 		for left := len(bytes) - Max_Logger_Message_Width; left > 0; left -= Max_Logger_Message_Width | ||||
| 		{ | ||||
| 			fmt.sbprintf( & builder, "\n" ) | ||||
| 			str_fmt_builder( & builder, "\n" ) | ||||
| 			subset_length := len(text) - offset | ||||
| 			if subset_length > Max_Logger_Message_Width { | ||||
| 				subset_length = Max_Logger_Message_Width | ||||
| 			} | ||||
| 			subset := slice_ptr( ptr_offset( raw_data(bytes), offset), subset_length ) | ||||
| 			fmt.sbprintf( & builder, "%s", transmute(string)subset ) | ||||
| 			str_fmt_builder( & builder, "%s", transmute(string) subset ) | ||||
| 			offset += Max_Logger_Message_Width | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	fmt.fprintln( logger.file, strings.to_string(builder) ) | ||||
| 	str_to_file_ln( logger.file, to_string(builder) ) | ||||
| } | ||||
|  | ||||
| log :: proc( msg : string, level := LogLevel.Info, loc := #caller_location ) { | ||||
|   | ||||
| @@ -17,46 +17,46 @@ ReplayState :: struct { | ||||
| replay_recording_begin :: proc( path : string ) | ||||
| { | ||||
| 	if file_exists( path ) { | ||||
| 		result := os.remove( path ) | ||||
| 		result := file_remove( path ) | ||||
| 		verify( result != os.ERROR_NONE, "Failed to delete replay file before beginning a new one" ) | ||||
| 	} | ||||
|  | ||||
| 	replay_file, open_error := os.open( path, os.O_RDWR | os.O_CREATE ) | ||||
| 	replay_file, open_error := file_open( path, FileFlag_ReadWrite | FileFlag_Create ) | ||||
| 	verify( open_error != os.ERROR_NONE, "Failed to create or open the replay file" ) | ||||
|  | ||||
| 	os.seek( replay_file, 0, 0 ) | ||||
| 	file_seek( replay_file, 0, 0 ) | ||||
|  | ||||
| 	replay := & memory.replay | ||||
| 	replay := & Memory_App.replay | ||||
| 	replay.active_file = replay_file | ||||
| 	replay.mode        = ReplayMode.Record | ||||
| } | ||||
|  | ||||
| replay_recording_end :: proc() { | ||||
| 	replay := & memory.replay | ||||
| 	replay := & Memory_App.replay | ||||
| 	replay.mode = ReplayMode.Off | ||||
|  | ||||
| 	os.seek( replay.active_file, 0, 0 ) | ||||
| 	os.close( replay.active_file ) | ||||
| 	file_seek( replay.active_file, 0, 0 ) | ||||
| 	file_close( replay.active_file ) | ||||
| } | ||||
|  | ||||
| replay_playback_begin :: proc( path : string ) | ||||
| { | ||||
| 	verify( ! file_exists( path ), "Failed to find replay file" ) | ||||
|  | ||||
| 	replay_file, open_error := os.open( path, os.O_RDWR | os.O_CREATE ) | ||||
| 	replay_file, open_error := file_open( path, FileFlag_ReadWrite | FileFlag_Create ) | ||||
| 	verify( open_error != os.ERROR_NONE, "Failed to create or open the replay file" ) | ||||
|  | ||||
| 	os.seek( replay_file, 0, 0 ) | ||||
| 	file_seek( replay_file, 0, 0 ) | ||||
|  | ||||
| 	replay := & memory.replay | ||||
| 	replay := & Memory_App.replay | ||||
| 	replay.active_file = replay_file | ||||
| 	replay.mode        = ReplayMode.Playback | ||||
| } | ||||
|  | ||||
| replay_playback_end :: proc() { | ||||
| 	input  := get_state().input | ||||
| 	replay := & memory.replay | ||||
| 	replay := & Memory_App.replay | ||||
| 	replay.mode = ReplayMode.Off | ||||
| 	os.seek( replay.active_file, 0, 0 ) | ||||
| 	os.close( replay.active_file ) | ||||
| 	file_seek( replay.active_file, 0, 0 ) | ||||
| 	file_close( replay.active_file ) | ||||
| } | ||||
|   | ||||
| @@ -168,7 +168,7 @@ project_save :: proc( project : ^ Project, archive : ^ ArchiveData = nil ) | ||||
| 		verify( cast(b32) os.is_dir( project.path ), "Failed to create project path for saving" ) | ||||
| 	} | ||||
|  | ||||
| 	os.write_entire_file( fmt.tprint( project.path, project.name, ".sectr_proj", sep = ""), archive.data ) | ||||
| 	os.write_entire_file( str_tmp_from_any( project.path, project.name, ".sectr_proj", sep = ""), archive.data ) | ||||
| } | ||||
|  | ||||
| project_load :: proc( path : string, project : ^ Project, archive : ^ ArchiveData = nil ) | ||||
|   | ||||
							
								
								
									
										11
									
								
								code/string_format.odin
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								code/string_format.odin
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| // This provides a string generator using a token replacement approach instead of a %<id> verb-syntax to parse. | ||||
| // This was done just for preference as I personally don't like the c-printf-like syntax. | ||||
| package sectr | ||||
|  | ||||
|  | ||||
|  | ||||
| // str_format :: proc ( format : string, tokens : ..args ) { | ||||
|  | ||||
| // } | ||||
|  | ||||
|  | ||||
| @@ -1,7 +1,7 @@ | ||||
| package sectr | ||||
|  | ||||
| import "core:math" | ||||
| import    "core:unicode/utf8" | ||||
| import "core:unicode/utf8" | ||||
| import rl "vendor:raylib" | ||||
|  | ||||
| debug_draw_text :: proc( content : string, pos : Vec2, size : f32, color : rl.Color = rl.WHITE, font : FontID = Font_Default ) | ||||
| @@ -11,7 +11,7 @@ debug_draw_text :: proc( content : string, pos : Vec2, size : f32, color : rl.Co | ||||
| 	if len( content ) == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	runes := utf8.string_to_runes( content, context.temp_allocator ) | ||||
| 	runes := to_runes( content, context.temp_allocator ) | ||||
|  | ||||
| 	font := font | ||||
| 	if font == 0 { | ||||
| @@ -38,7 +38,7 @@ debug_draw_text_world :: proc( content : string, pos : Vec2, size : f32, color : | ||||
| 	if len( content ) == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	runes := utf8.string_to_runes( content, context.temp_allocator ) | ||||
| 	runes := to_runes( content, context.temp_allocator ) | ||||
|  | ||||
| 	font := font | ||||
| 	if  font == 0 { | ||||
|   | ||||
| @@ -7,7 +7,7 @@ import rl "vendor:raylib" | ||||
| render :: proc() | ||||
| { | ||||
| 	state  := get_state(); using state | ||||
| 	replay := & memory.replay | ||||
| 	replay := & Memory_App.replay | ||||
| 	cam    := & project.workspace.cam | ||||
| 	win_extent := state.app_window.extent | ||||
|  | ||||
| @@ -21,7 +21,7 @@ render :: proc() | ||||
| 	render_mode_2d() | ||||
| 	//region Render Screenspace | ||||
| 	{ | ||||
| 		fps_msg       := fmt.tprint( "FPS:", rl.GetFPS() ) | ||||
| 		fps_msg       := str_fmt_tmp( "FPS:", rl.GetFPS() ) | ||||
| 		fps_msg_width := measure_text_size( fps_msg, default_font, 16.0, 0.0 ).x | ||||
| 		fps_msg_pos   := screen_get_corners().top_right - { fps_msg_width, 0 } | ||||
| 		debug_draw_text( fps_msg, fps_msg_pos, 16.0, color = rl.GREEN ) | ||||
| @@ -42,7 +42,7 @@ render :: proc() | ||||
| 			position.x -= 200 | ||||
| 			position.y += debug.draw_debug_text_y | ||||
|  | ||||
| 			content := fmt.bprintf( draw_text_scratch[:], format, ..args ) | ||||
| 			content := str_fmt_buffer( draw_text_scratch[:], format, ..args ) | ||||
| 			debug_draw_text( content, position, 16.0 ) | ||||
|  | ||||
| 			debug.draw_debug_text_y += 16 | ||||
| @@ -50,8 +50,8 @@ render :: proc() | ||||
|  | ||||
| 		// Debug Text | ||||
| 		{ | ||||
| 			// debug_text( "Screen Width : %v", rl.GetScreenWidth () ) | ||||
| 			// debug_text( "Screen Height: %v", rl.GetScreenHeight() ) | ||||
| 			debug_text( "Screen Width : %v", rl.GetScreenWidth () ) | ||||
| 			debug_text( "Screen Height: %v", rl.GetScreenHeight() ) | ||||
| 			if replay.mode == ReplayMode.Record { | ||||
| 				debug_text( "Recording Input") | ||||
| 			} | ||||
|   | ||||
| @@ -2,7 +2,6 @@ package sectr | ||||
|  | ||||
| import "base:runtime" | ||||
| import "core:math" | ||||
| import "core:fmt" | ||||
|  | ||||
| import rl "vendor:raylib" | ||||
|  | ||||
| @@ -60,7 +59,7 @@ poll_debug_actions :: proc( actions : ^ DebugActions, input : ^ InputState ) | ||||
| update :: proc( delta_time : f64 ) -> b32 | ||||
| { | ||||
| 	state  := get_state(); using state | ||||
| 	replay := & memory.replay | ||||
| 	replay := & Memory_App.replay | ||||
|  | ||||
| 	if rl.IsWindowResized() { | ||||
| 		window := & state.app_window | ||||
| @@ -82,7 +81,7 @@ update :: proc( delta_time : f64 ) -> b32 | ||||
| 			project_save( & project ) | ||||
| 		} | ||||
| 		if debug_actions.load_project { | ||||
| 			project_load( fmt.tprint( project.path, project.name, ".sectr_proj", sep = "" ), & project ) | ||||
| 			project_load( str_tmp_from_any( project.path, project.name, ".sectr_proj", sep = "" ), & project ) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| @@ -91,7 +90,7 @@ update :: proc( delta_time : f64 ) -> b32 | ||||
| 		if debug_actions.record_replay { #partial switch replay.mode | ||||
| 		{ | ||||
| 			case ReplayMode.Off : { | ||||
| 				save_snapshot( & memory.snapshot[0] ) | ||||
| 				save_snapshot( & Memory_App.snapshot[0] ) | ||||
| 				replay_recording_begin( Path_Input_Replay ) | ||||
| 			} | ||||
| 			case ReplayMode.Record : { | ||||
| @@ -103,21 +102,21 @@ update :: proc( delta_time : f64 ) -> b32 | ||||
| 		{ | ||||
| 			case ReplayMode.Off : { | ||||
| 				if ! file_exists( Path_Input_Replay ) { | ||||
| 					save_snapshot( & memory.snapshot[0] ) | ||||
| 					save_snapshot( & Memory_App.snapshot[0] ) | ||||
| 					replay_recording_begin( Path_Input_Replay ) | ||||
| 				} | ||||
| 				else { | ||||
| 					load_snapshot( & memory.snapshot[0] ) | ||||
| 					load_snapshot( & Memory_App.snapshot[0] ) | ||||
| 					replay_playback_begin( Path_Input_Replay ) | ||||
| 				} | ||||
| 			} | ||||
| 			case ReplayMode.Playback : { | ||||
| 				replay_playback_end() | ||||
| 				load_snapshot( & memory.snapshot[0] ) | ||||
| 				load_snapshot( & Memory_App.snapshot[0] ) | ||||
| 			} | ||||
| 			case ReplayMode.Record : { | ||||
| 				replay_recording_end() | ||||
| 				load_snapshot( & memory.snapshot[0] ) | ||||
| 				load_snapshot( & Memory_App.snapshot[0] ) | ||||
| 				replay_playback_begin( Path_Input_Replay ) | ||||
| 			} | ||||
| 		}} | ||||
| @@ -175,15 +174,16 @@ update :: proc( delta_time : f64 ) -> b32 | ||||
| 	//endregion | ||||
|  | ||||
| 	//region Imgui Tick | ||||
|  | ||||
| 	{ | ||||
| 		ui_context := & state.project.workspace.ui | ||||
| 		// Creates the root box node, set its as the first parent. | ||||
| 		ui_graph_build( & state.project.workspace.ui ) | ||||
|  | ||||
| 		// Build Graph (Determines if layout is dirty) | ||||
| 		ui_graph_build_begin( ui_context ) | ||||
| 		ui_style({ bg_color = Color_BG_TextBox }) | ||||
| 		ui_set_layout({ size = { 200, 200 }}) | ||||
|  | ||||
| 		 | ||||
|  | ||||
| 		// Regnerate compute if layout is dirty. | ||||
| 		first_flags : UI_BoxFlags = { .Mouse_Clickable, .Focusable, .Click_To_Focus  } | ||||
| 		ui_box_make( first_flags, "FIRST BOX BOIS" ) | ||||
| 	} | ||||
| 	// endregion | ||||
|  | ||||
|   | ||||
							
								
								
									
										281
									
								
								code/ui.odin
									
									
									
									
									
								
							
							
						
						
									
										281
									
								
								code/ui.odin
									
									
									
									
									
								
							| @@ -138,30 +138,34 @@ UI_Layout :: struct { | ||||
|  | ||||
| 	corner_radii : [Corner.Count]f32, | ||||
|  | ||||
| 	// TODO(Ed) I problably won't use this as I can determine | ||||
| 	// the size of content manually when needed and make helper procs... | ||||
| 	// size_to_content : b32, | ||||
| 	// TODO(Ed) : Add support for this | ||||
| 	size_to_content : b32, | ||||
| 	size            : Vec2, | ||||
| } | ||||
|  | ||||
| 	size : Vec2, | ||||
| UI_BoxState :: enum { | ||||
| 	Disabled, | ||||
| 	Default, | ||||
| 	Hovered, | ||||
| 	Focused, | ||||
| } | ||||
|  | ||||
| UI_Style :: struct { | ||||
| 	bg_color      : Color, | ||||
| 	overlay_color : Color, | ||||
| 	border_color  : Color, | ||||
| 	bg_color     : Color, | ||||
| 	border_color : Color, | ||||
|  | ||||
| 	// blur_size : f32, | ||||
| 	blur_size : f32, | ||||
|  | ||||
| 	font                    : FontID, | ||||
| 	font_size               : f32, | ||||
| 	text_color              : Color, | ||||
| 	text_alignment          : UI_TextAlign, | ||||
| 	// text_wrap_width_pixels  : f32, | ||||
| 	// text_wrap_width_percent : f32, | ||||
| 	font           : FontID, | ||||
| 	font_size      : f32, | ||||
| 	text_color     : Color, | ||||
| 	text_alignment : UI_TextAlign, | ||||
|  | ||||
| 	// cursors       : [CursorKind.Count]UI_Cursor, | ||||
| 	// active_cursor : ^UI_Cursor, | ||||
| 	// hover_cursor : ^ UI_Cursor, | ||||
| 	cursor : UI_Cursor, | ||||
|  | ||||
| 	layout : UI_Layout, | ||||
|  | ||||
| 	transition_time : f32, | ||||
| } | ||||
|  | ||||
| UI_TextAlign :: enum u32 { | ||||
| @@ -171,124 +175,239 @@ UI_TextAlign :: enum u32 { | ||||
| 	Count | ||||
| } | ||||
|  | ||||
| UI_Box :: struct { | ||||
| 	first, last, prev, next : ^ UI_Box, | ||||
| 	num_children : i32, | ||||
|  | ||||
| 	flags : UI_BoxFlags, | ||||
|  | ||||
| 	key   : UI_Key, | ||||
| 	label : string, | ||||
|  | ||||
| 	computed : UI_Computed, | ||||
|  | ||||
| 	layout : UI_Layout, | ||||
| 	style  : UI_Style, | ||||
|  | ||||
| 	// Persistent Data | ||||
| UI_InteractState :: struct { | ||||
| 	hot_time      : f32, | ||||
| 	active_time   : f32, | ||||
| 	disabled_time : f32, | ||||
| } | ||||
|  | ||||
| Layout_Stack_Size :: 512 | ||||
| Style_Stack_Size  :: 512 | ||||
| UI_Box :: struct { | ||||
| 	// Cache ID | ||||
| 	key   : UI_Key, | ||||
| 	label : string, | ||||
|  | ||||
| 	// Regenerated per frame. | ||||
| 	first, last, prev, next, parent : ^ UI_Box, | ||||
| 	num_children : i32, | ||||
|  | ||||
| 	flags    : UI_BoxFlags, | ||||
| 	computed : UI_Computed, | ||||
| 	style    : UI_Style, | ||||
|  | ||||
| 	// Persistent Data | ||||
| 	// prev_computed : UI_Computed, | ||||
| 	// prev_style    : UI_Style, | ||||
| 	mouse         : UI_InteractState, | ||||
| 	keyboard      : UI_InteractState, | ||||
| } | ||||
|  | ||||
| UI_Layout_Stack_Size      :: 512 | ||||
| UI_Style_Stack_Size       :: 512 | ||||
| UI_Parent_Stack_Size      :: 1024 | ||||
| UI_Built_Boxes_Array_Size :: Kilobyte * 8 | ||||
|  | ||||
| UI_FramePassKind :: enum { | ||||
| 	Generate, | ||||
| 	Compute, | ||||
| 	Logical, | ||||
| } | ||||
|  | ||||
| UI_State :: struct { | ||||
| 	box_cache : HashTable( UI_Box ), | ||||
| 	// TODO(Ed) : Use these | ||||
| 	build_arenas : [2]Arena, | ||||
| 	build_arena  : ^ Arena, | ||||
|  | ||||
| 	built_box_count : i32, | ||||
|  | ||||
| 	caches     : [2] HMapZPL( UI_Box ), | ||||
| 	prev_cache : ^ HMapZPL( UI_Box ), | ||||
| 	curr_cache : ^ HMapZPL( UI_Box ), | ||||
|  | ||||
| 	root : ^ UI_Box, | ||||
|  | ||||
| 	// Do we need to recompute the layout? | ||||
| 	layout_dirty  : b32, | ||||
| 	layout_stack  : Stack( UI_Layout, Layout_Stack_Size ), | ||||
| 	style_stack   : Stack( UI_Style, Style_Stack_Size ), | ||||
|  | ||||
| 	hot                : UI_Key, | ||||
| 	active             : UI_Key, | ||||
| 	clipboard_copy_key : UI_Key, | ||||
| 	// TODO(Ed) : Look into using a build arena like Ryan does for these possibly (and thus have a linked-list stack) | ||||
| 	style_stack   : Stack( UI_Style,  UI_Style_Stack_Size ), | ||||
| 	parent_stack  : Stack( ^ UI_Box,  UI_Parent_Stack_Size ), | ||||
|  | ||||
| 	hot            : UI_Key, | ||||
| 	active         : UI_Key, | ||||
| 	clipboard_copy : UI_Key, | ||||
| 	last_clicked   : UI_Key, | ||||
|  | ||||
| 	drag_start_mouse : Vec2, | ||||
| 	// drag_state_arena : ^ Arena, | ||||
| 	// drag_state data  : string, | ||||
|  | ||||
| } | ||||
|  | ||||
| ui_key_from_string :: proc ( value : string ) -> UI_Key { | ||||
| ui_key_from_string :: proc( value : string ) -> UI_Key { | ||||
| 	key := cast(UI_Key) crc32( transmute([]byte) value ) | ||||
| 	return key | ||||
| } | ||||
|  | ||||
| ui_box_equal :: proc ( a, b : ^ UI_Box ) -> b32 { | ||||
| ui_box_equal :: proc( a, b : ^ UI_Box ) -> b32 { | ||||
| 	BoxSize :: size_of(UI_Box) | ||||
| 	hash_a := crc32( transmute([]u8) slice_ptr( a, BoxSize ) ) | ||||
| 	hash_b := crc32( transmute([]u8) slice_ptr( b, BoxSize ) ) | ||||
| 	result : b32 = hash_a == hash_b | ||||
|  | ||||
| 	result : b32 = true | ||||
| 	result &= a.key   == b.key   // We assume for now the label is the same as the key, if not something is terribly wrong. | ||||
| 	result &= a.flags == b.flags | ||||
|  | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| ui_startup :: proc ( ui : ^ UI_State, cache_allocator : Allocator ) { | ||||
| ui_startup :: proc( ui : ^ UI_State, cache_allocator : Allocator ) | ||||
| { | ||||
| 	ui := ui | ||||
| 	ui^ = {} | ||||
|  | ||||
| 	box_cache, allocation_error := hashtable_init_reserve( UI_Box, cache_allocator, Kilobyte * 8 ) | ||||
| 	verify( allocation_error != AllocatorError.None, "Failed to allocate box cache" ) | ||||
| 	ui.box_cache = box_cache | ||||
| 	for cache in (& ui.caches) { | ||||
| 		box_cache, allocation_error := zpl_hmap_init_reserve( UI_Box, cache_allocator, UI_Built_Boxes_Array_Size ) | ||||
| 		verify( allocation_error == AllocatorError.None, "Failed to allocate box cache" ) | ||||
| 		cache = box_cache | ||||
| 	} | ||||
|  | ||||
| 	ui.curr_cache = & ui.caches[1] | ||||
| 	ui.prev_cache = & ui.caches[0] | ||||
| } | ||||
|  | ||||
| ui_reload :: proc ( ui : ^ UI_State, cache_allocator : Allocator ) { | ||||
| ui_reload :: proc( ui : ^ UI_State, cache_allocator : Allocator ) | ||||
| { | ||||
| 	// We need to repopulate Allocator references | ||||
| 	ui.box_cache.entries.allocator = cache_allocator | ||||
| 	ui.box_cache.hashes.allocator  = cache_allocator | ||||
| 	for cache in & ui.caches { | ||||
| 		cache.entries.allocator = cache_allocator | ||||
| 		cache.hashes.allocator  = cache_allocator | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // TODO(Ed) : Is this even needed? | ||||
| ui_shutdown :: proc () { | ||||
| ui_shutdown :: proc() { | ||||
| } | ||||
|  | ||||
| ui_graph_build_begin :: proc ( ui : ^ UI_State ) | ||||
| ui_graph_build_begin :: proc( ui : ^ UI_State, bounds : Vec2 = {} ) | ||||
| { | ||||
| 	ui_context := & get_state().ui_context | ||||
| 	ui_context = ui | ||||
| 	using ui_context | ||||
| 	get_state().ui_context = ui | ||||
| 	using get_state().ui_context | ||||
|  | ||||
| 	box : UI_Box = {} | ||||
| 	box.label  = "root#001" | ||||
| 	box.key    = ui_key_from_string( box.label ) | ||||
| 	box.layout = stack_peek( & layout_stack ) ^ | ||||
| 	box.style  = stack_peek( & style_stack )  ^ | ||||
| 	swap( & curr_cache, & prev_cache ) | ||||
|  | ||||
| 	cached_box := hashtable_get( & box_cache, cast(u64) box.key ) | ||||
| 	root = ui_box_make( {}, "root#001" ) | ||||
| 	ui_parent_push(root) | ||||
|  | ||||
| 	if cached_box != nil { | ||||
| 		layout_dirty &= ! ui_box_equal( & box, cached_box ) | ||||
| 	log("BUILD GRAPH BEGIN") | ||||
| } | ||||
|  | ||||
| // TODO(Ed) :: Is this even needed? | ||||
| ui_graph_build_end :: proc() | ||||
| { | ||||
| 	ui_parent_pop() | ||||
|  | ||||
| 	// Regenerate the computed layout if dirty | ||||
| 	// ui_compute_layout() | ||||
|  | ||||
| 	get_state().ui_context = nil | ||||
| 	log("BUILD GRAPH END") | ||||
| } | ||||
|  | ||||
| @(deferred_none = ui_graph_build_end) | ||||
| ui_graph_build :: proc( ui : ^ UI_State ) { | ||||
| 	ui_graph_build_begin( ui ) | ||||
| } | ||||
|  | ||||
| ui_parent_push :: proc( ui : ^ UI_Box ) { | ||||
| 	stack := & get_state().ui_context.parent_stack | ||||
| 	stack_push( & get_state().ui_context.parent_stack, ui ) | ||||
| } | ||||
|  | ||||
| ui_parent_pop :: proc() { | ||||
| 	// If size_to_content is set, we need to compute the layout now. | ||||
|  | ||||
| 	// Check to make sure that the parent's children are the same for this frame, | ||||
| 	// if its not we need to mark the layout as dirty. | ||||
|  | ||||
| 	stack_pop( & get_state().ui_context.parent_stack ) | ||||
| } | ||||
|  | ||||
| @(deferred_none = ui_parent_pop) | ||||
| ui_parent :: proc( ui : ^ UI_Box) { | ||||
| 	ui_parent_push( ui ) | ||||
| } | ||||
|  | ||||
| ui_box_make :: proc( flags : UI_BoxFlags, label : string ) -> (^ UI_Box) | ||||
| { | ||||
| 	using get_state().ui_context | ||||
|  | ||||
| 	key := ui_key_from_string( label ) | ||||
|  | ||||
| 	curr_box : (^ UI_Box) | ||||
| 	prev_box := zpl_hmap_get( prev_cache, cast(u64) key ) | ||||
| 	{ | ||||
| 		set_result : ^ UI_Box | ||||
| 		set_error  : AllocatorError | ||||
| 		if prev_box != nil { | ||||
| 			// Previous history was found, copy over previous state. | ||||
| 			set_result, set_error = zpl_hmap_set( curr_cache, cast(u64) key, (prev_box ^) ) | ||||
| 		} | ||||
| 		else { | ||||
| 			box : UI_Box | ||||
| 			box.key    = key | ||||
| 			box.label  = label | ||||
| 			set_result, set_error = zpl_hmap_set( curr_cache, cast(u64) key, box ) | ||||
| 		} | ||||
|  | ||||
| 		verify( set_error == AllocatorError.None, "Failed to set zpl_hmap due to allocator error" ) | ||||
| 		curr_box = set_result | ||||
| 	} | ||||
|  | ||||
| 	if prev_box != nil { | ||||
| 		layout_dirty &= ! ui_box_equal( curr_box, prev_box ) | ||||
| 	} | ||||
| 	else { | ||||
| 		layout_dirty = true | ||||
| 	} | ||||
|  | ||||
| 	set_result, set_error: = hashtable_set( & box_cache, cast(u64) box.key, box ) | ||||
| 	verify( set_error != AllocatorError.None, "Failed to set hashtable due to allocator error" ) | ||||
| 	root = set_result | ||||
| 	curr_box.flags  = flags | ||||
| 	curr_box.style  = ( stack_peek( & style_stack )  ^ ) | ||||
| 	curr_box.parent = ( stack_peek( & parent_stack ) ^ ) | ||||
|  | ||||
| 	// If there is a parent, setup the relevant references | ||||
| 	if curr_box.parent != nil | ||||
| 	{ | ||||
| 		// dbl_linked_list_push_back( box.parent, nil, box ) | ||||
| 		curr_box.parent.last = curr_box | ||||
|  | ||||
| 		if curr_box.parent.first == nil { | ||||
| 			curr_box.parent.first = curr_box | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return curr_box | ||||
| } | ||||
|  | ||||
| ui_box_make :: proc( flags : UI_BoxFlags, label : string ) -> (^ UI_Box) | ||||
| { | ||||
|  | ||||
|  | ||||
| 	return nil | ||||
| ui_set_layout :: proc ( layout : UI_Layout ) { | ||||
| 	log("LAYOUT SET") | ||||
| } | ||||
|  | ||||
| ui_layout_push :: proc ( preset : UI_Layout ) { | ||||
|  | ||||
| ui_compute_layout :: proc() { | ||||
| 	// TODO(Ed) : This generates the bounds for each box. | ||||
| } | ||||
|  | ||||
| ui_layout_pop :: proc () { | ||||
|  | ||||
| ui_layout_set_size :: proc( size : Vec2 ) { | ||||
| } | ||||
|  | ||||
| ui_layout_push_size :: proc( size : Vec2 ) { | ||||
|  | ||||
| ui_style_push :: proc( preset : UI_Style ) { | ||||
| 	log("STYLE PUSH") | ||||
| 	stack_push( & get_state().ui_context.style_stack, preset ) | ||||
| } | ||||
|  | ||||
| ui_style_push :: proc ( preset : UI_Style ) { | ||||
| 	 | ||||
| ui_style_pop :: proc() { | ||||
| 	log("STYLE POP") | ||||
| 	stack_pop( & get_state().ui_context.style_stack ) | ||||
| } | ||||
|  | ||||
| @(deferred_none = ui_style_pop) | ||||
| ui_style :: proc( preset : UI_Style ) { | ||||
| 	ui_style_push( preset ) | ||||
| } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user