Minor cleanup for formatting, reverse verify condition usage
Verify condition needed to only fire off if false not the other way around.
This commit is contained in:
		| @@ -268,7 +268,7 @@ ui_button :: proc( label : string ) -> UI_Signal { | ||||
| 	return signal | ||||
| } | ||||
|  | ||||
| ui_spacer :: proc ( label : string = UI_NullLabel ) -> UI_Signal { | ||||
| ui_spacer :: proc( label : string = UI_NullLabel ) -> UI_Signal { | ||||
| 	box    := ui_box_make( UI_BoxFlags_Null, label ) | ||||
| 	signal := ui_signal_from_box( box ) | ||||
| 	return signal | ||||
|   | ||||
| @@ -110,7 +110,7 @@ startup :: proc( live_mem : virtual.Arena, snapshot_mem : []u8, host_logger : ^ | ||||
| 		font_firacode  = font_load( path_firacode, 24.0, "FiraCode" ) | ||||
|  | ||||
| 		// font_data, read_succeded : = os.read_entire_file( path_rec_mono_semicasual_reg  ) | ||||
| 		// verify( ! read_succeded, fmt.tprintf("Failed to read font file for: %v", path_rec_mono_semicasual_reg) ) | ||||
| 		// verify( read_succeded, fmt.tprintf("Failed to read font file for: %v", path_rec_mono_semicasual_reg) ) | ||||
|  | ||||
| 		// cstr                         := strings.clone_to_cstring( path_rec_mono_semicasual_reg ) | ||||
| 		// font_rec_mono_semicasual_reg  = rl.LoadFontEx( cstr, cast(i32) points_to_pixels(24.0), nil, 0 ) | ||||
| @@ -215,7 +215,7 @@ swap :: proc( a, b : ^ $Type ) -> ( ^ Type, ^ Type ) { | ||||
| } | ||||
|  | ||||
| @export | ||||
| tick :: proc ( delta_time : f64 ) -> b32 | ||||
| tick :: proc( delta_time : f64 ) -> b32 | ||||
| { | ||||
| 	context.allocator      = transient_allocator() | ||||
| 	context.temp_allocator = temp_allocator() | ||||
|   | ||||
| @@ -5,7 +5,7 @@ import "core:os" | ||||
|  | ||||
| ensure :: proc( condition : b32, msg : string, location := #caller_location ) | ||||
| { | ||||
| 	if ! condition { | ||||
| 	if condition { | ||||
| 		return | ||||
| 	} | ||||
| 	log( msg, LogLevel.Warning, location ) | ||||
| @@ -13,16 +13,16 @@ ensure :: proc( condition : b32, msg : string, location := #caller_location ) | ||||
| } | ||||
|  | ||||
| // TODO(Ed) : Setup exit codes! | ||||
| fatal :: proc ( msg : string, exit_code : int = -1, location := #caller_location ) | ||||
| fatal :: proc( msg : string, exit_code : int = -1, location := #caller_location ) | ||||
| { | ||||
| 	log( msg, LogLevel.Fatal, location ) | ||||
| 	runtime.debug_trap() | ||||
| 	os.exit( exit_code ) | ||||
| } | ||||
|  | ||||
| verify :: proc ( condition : b32, msg : string, exit_code : int = -1, location := #caller_location ) | ||||
| verify :: proc( condition : b32, msg : string, exit_code : int = -1, location := #caller_location ) | ||||
| { | ||||
| 	if ! condition { | ||||
| 	if condition { | ||||
| 		return | ||||
| 	} | ||||
| 	log( msg, LogLevel.Fatal, location ) | ||||
|   | ||||
| @@ -3,7 +3,7 @@ package sectr | ||||
| import "core:math/linalg" | ||||
|  | ||||
|  | ||||
| box_is_within :: proc ( box : ^ Box2, pos : Vec2 ) -> b32 { | ||||
| box_is_within :: proc( box : ^ Box2, pos : Vec2 ) -> b32 { | ||||
| 	bounds := box_get_bounds( box ) | ||||
| 	within_x_bounds : b32 = pos.x >= bounds.top_left.x     && pos.x <= bounds.bottom_right.x | ||||
| 	within_y_bounds : b32 = pos.y >= bounds.bottom_right.y && pos.y <= bounds.top_left.y | ||||
|   | ||||
| @@ -30,7 +30,7 @@ box_set_size :: proc( box : ^ Box2, size : AreaSize ) { | ||||
| } | ||||
|  | ||||
| // TODO(Ed) : Fix this up? | ||||
| get_rl_rect :: proc ( box : ^ Box2 ) -> rl.Rectangle { | ||||
| get_rl_rect :: proc( box : ^ Box2 ) -> rl.Rectangle { | ||||
| 	rect : rl.Rectangle = { | ||||
| 		x = box.position.x - box.extent.x, | ||||
| 		y = box.position.y - box.extent.y, | ||||
|   | ||||
| @@ -46,7 +46,7 @@ else | ||||
| 	} | ||||
| } | ||||
|  | ||||
| persistent_allocator :: proc () -> Allocator { | ||||
| persistent_allocator :: proc() -> Allocator { | ||||
| 	when Use_TrackingAllocator { | ||||
| 		return tracked_allocator( memory.persistent ) | ||||
| 	} | ||||
| @@ -55,7 +55,7 @@ persistent_allocator :: proc () -> Allocator { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| transient_allocator :: proc () -> Allocator { | ||||
| transient_allocator :: proc() -> Allocator { | ||||
| 	when Use_TrackingAllocator { | ||||
| 		return tracked_allocator( memory.transient ) | ||||
| 	} | ||||
| @@ -64,7 +64,7 @@ transient_allocator :: proc () -> Allocator { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| temp_allocator :: proc () -> Allocator { | ||||
| temp_allocator :: proc() -> Allocator { | ||||
| 	when Use_TrackingAllocator { | ||||
| 		return tracked_allocator( memory.temp ) | ||||
| 	} | ||||
| @@ -119,7 +119,7 @@ State :: struct { | ||||
| 	// There are two potential UI contextes for this prototype so far, | ||||
| 	// the screen-space UI and the current workspace UI. | ||||
| 	// This is used so that the ui api doesn't need to have the user pass the context every single time. | ||||
| 	ui_context : UI_State, | ||||
| 	ui_context : ^ UI_State, | ||||
| } | ||||
|  | ||||
| get_state :: proc "contextless" () -> ^ State { | ||||
| @@ -161,8 +161,6 @@ Workspace :: struct { | ||||
| 	name : string, | ||||
|  | ||||
| 	cam     : Camera, | ||||
| 	frame_1 : Box2, | ||||
| 	frame_2 : Box2, | ||||
|  | ||||
| 	// TODO(Ed) : The workspace is mainly a 'UI' conceptually... | ||||
| 	ui : UI_State, | ||||
|   | ||||
| @@ -66,7 +66,7 @@ font_provider_startup :: proc() | ||||
| 	font_provider_data := & get_state().font_provider_data; using font_provider_data | ||||
|  | ||||
| 	data, alloc_result := alloc_bytes( Font_Arena_Size, allocator = persistent_allocator() ) | ||||
| 	verify( alloc_result != AllocatorError.None, "Failed to allocate memory for font_arena from persistent" ) | ||||
| 	verify( alloc_result == AllocatorError.None, "Failed to allocate memory for font_arena from persistent" ) | ||||
| 	log("font_arena allocated from persistent memory") | ||||
|  | ||||
| 	arena_init( & font_arena, data ) | ||||
| @@ -93,7 +93,7 @@ font_provider_shutdown :: proc() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| font_load :: proc ( path_file : string, | ||||
| font_load :: proc( path_file : string, | ||||
| 	default_size : f32    = Font_Load_Use_Default_Size, | ||||
| 	desired_id   : string = Font_Load_Gen_ID | ||||
| ) -> FontID | ||||
| @@ -101,7 +101,7 @@ font_load :: proc ( path_file : string, | ||||
| 	font_provider_data := & get_state().font_provider_data; using font_provider_data | ||||
|  | ||||
| 	font_data, read_succeded : = os.read_entire_file( path_file  ) | ||||
| 	verify( ! read_succeded, fmt.tprintf("Failed to read font file for: %v", path_file) ) | ||||
| 	verify( b32(read_succeded), fmt.tprintf("Failed to read font file for: %v", path_file) ) | ||||
| 	font_data_size := cast(i32) len(font_data) | ||||
|  | ||||
| 	desired_id := desired_id | ||||
| @@ -141,7 +141,7 @@ font_load :: proc ( path_file : string, | ||||
| 			codepoints     = nil, | ||||
| 			codepointCount = count, | ||||
| 			type = rl.FontType.DEFAULT ) | ||||
| 		verify( glyphs == nil, fmt.tprintf("Failed to load glyphs for font: %v at desired size: %v", desired_id, size ) ) | ||||
| 		verify( glyphs != nil, fmt.tprintf("Failed to load glyphs for font: %v at desired size: %v", desired_id, size ) ) | ||||
|  | ||||
| 		atlas  := rl.GenImageFontAtlas( glyphs, & recs, count, size, padding, i32(Font_Atlas_Packing_Method.Raylib_Basic) ) | ||||
| 		texture = rl.LoadTextureFromImage( atlas ) | ||||
| @@ -166,7 +166,7 @@ font_load :: proc ( path_file : string, | ||||
|  | ||||
| Font_Use_Default_Size :: f32(0.0) | ||||
|  | ||||
| to_rl_Font :: proc ( id : FontID, size := Font_Use_Default_Size ) -> rl.Font { | ||||
| to_rl_Font :: proc( id : FontID, size := Font_Use_Default_Size ) -> rl.Font { | ||||
| 	font_provider_data := & get_state().font_provider_data; using font_provider_data | ||||
|  | ||||
| 	even_size := math.round(size * 0.5) * 2.0 | ||||
|   | ||||
| @@ -20,16 +20,16 @@ Terabyte :: 1024 * Gigabyte | ||||
| Petabyte :: 1024 * Terabyte | ||||
| Exabyte  :: 1024 * Petabyte | ||||
|  | ||||
| kilobytes :: proc ( kb : $ integer_type ) -> integer_type { | ||||
| kilobytes :: proc( kb : $ integer_type ) -> integer_type { | ||||
| 	return kb * Kilobyte | ||||
| } | ||||
| megabytes :: proc ( mb : $ integer_type ) -> integer_type { | ||||
| megabytes :: proc( mb : $ integer_type ) -> integer_type { | ||||
| 	return mb * Megabyte | ||||
| } | ||||
| gigabyte  :: proc ( gb : $ integer_type ) -> integer_type { | ||||
| gigabyte  :: proc( gb : $ integer_type ) -> integer_type { | ||||
| 	return gb * Gigabyte | ||||
| } | ||||
| terabyte  :: proc ( tb : $ integer_type ) -> integer_type { | ||||
| terabyte  :: proc( tb : $ integer_type ) -> integer_type { | ||||
| 	return tb * Terabyte | ||||
| } | ||||
|  | ||||
| @@ -75,16 +75,40 @@ stack_pop :: proc( stack : ^ $ StackType / Stack( $ Type, $ Size ) ) { | ||||
| 	verify( idx > 0, "Attempted to pop an empty stack" ) | ||||
|  | ||||
| 	idx -= 1 | ||||
| 	if idx == 0 { | ||||
| 		items[idx] = {} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| stack_peek :: proc ( stack : ^ Stack( $ Type, $ Size ) ) -> ^ Type { | ||||
| stack_peek :: proc( stack : ^ Stack( $ Type, $ Size ) ) -> ^ Type { | ||||
| 	using stack | ||||
| 	return & items[idx] | ||||
| } | ||||
| //endregion Stack | ||||
|  | ||||
|  | ||||
| //region Doubly Linked List generic procs (verbose) | ||||
|  | ||||
| dbl_linked_list_push_back :: proc(first: ^(^ $ Type), last: ^(^ Type), new_node: ^ Type) | ||||
| { | ||||
| 	if first == nil || first^ == nil { | ||||
| 			// List is empty, set first and last to the new node | ||||
| 			(first ^) = new_node | ||||
| 			(last  ^) = new_node | ||||
| 			new_node.next = nil | ||||
| 			new_node.prev = nil | ||||
| 	} | ||||
| 	else | ||||
| 	{ | ||||
| 			// List is not empty, add new node to the end | ||||
| 			(last^).next = new_node | ||||
| 			new_node.prev = last^ | ||||
| 			(last ^) = new_node | ||||
| 			new_node.next = nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| //endregion | ||||
|  | ||||
|  | ||||
|  | ||||
|   | ||||
| @@ -15,7 +15,7 @@ Array :: struct ( $ Type : typeid ) { | ||||
| 	data      : [^]Type, | ||||
| } | ||||
|  | ||||
| array_to_slice :: proc ( arr : Array( $ Type) ) -> []Type { | ||||
| array_to_slice :: proc( arr : Array( $ Type) ) -> []Type { | ||||
| 	using arr; return slice_ptr( data, num ) | ||||
| } | ||||
|  | ||||
| @@ -109,7 +109,7 @@ array_append_at :: proc( array : ^ Array( $ Type ), item : Type, id : u64 ) -> A | ||||
| 	return AllocatorError.None | ||||
| } | ||||
|  | ||||
| array_append_at_slice :: proc ( array : ^ Array( $ Type ), items : []Type, id : u64 ) -> AllocatorError | ||||
| array_append_at_slice :: proc( array : ^ Array( $ Type ), items : []Type, id : u64 ) -> AllocatorError | ||||
| { | ||||
| 	id := id | ||||
| 	using array | ||||
| @@ -146,11 +146,11 @@ array_back :: proc( array : ^ Array( $ Type ) ) -> ^ Type { | ||||
| 	using array; return & data[ num - 1 ] | ||||
| } | ||||
|  | ||||
| array_clear :: proc ( array : ^ Array( $ Type ) ) { | ||||
| array_clear :: proc( array : ^ Array( $ Type ) ) { | ||||
| 	array.num = 0 | ||||
| } | ||||
|  | ||||
| array_fill :: proc ( array : ^ Array( $ Type ), begin, end : u64, value : Type ) -> b32 | ||||
| array_fill :: proc( array : ^ Array( $ Type ), begin, end : u64, value : Type ) -> b32 | ||||
| { | ||||
| 	using array | ||||
|  | ||||
| @@ -187,7 +187,7 @@ array_grow :: proc( array : ^ Array( $ Type ), min_capacity : u64 ) -> Allocator | ||||
| } | ||||
|  | ||||
| array_pop :: proc( array : ^ Array( $ Type ) ) { | ||||
| 	verify( array.num == 0, "Attempted to pop an array with no elements" ) | ||||
| 	verify( array.num != 0, "Attempted to pop an array with no elements" ) | ||||
| 	array.num -= 1 | ||||
| } | ||||
|  | ||||
| @@ -212,7 +212,7 @@ array_reserve :: proc( array : ^ Array( $ Type ), new_capacity : u64 ) -> Alloca | ||||
| 	return AllocatorError.None | ||||
| } | ||||
|  | ||||
| array_resize :: proc ( array : ^ Array( $ Type ), num : u64 ) -> AllocatorError | ||||
| array_resize :: proc( array : ^ Array( $ Type ), num : u64 ) -> AllocatorError | ||||
| { | ||||
| 	if array.capacity < num | ||||
| 	{ | ||||
| @@ -238,6 +238,7 @@ array_set_capacity :: proc( array : ^ Array( $ Type ), new_capacity : u64 ) -> A | ||||
| 	} | ||||
|  | ||||
| 	raw_data, result_code := alloc( cast(int) new_capacity * size_of(Type), allocator = allocator ) | ||||
| 	ensure( result_code == AllocatorError.None, "Failed to allocate for new array capacity" ) | ||||
| 	data     = cast( [^] Type ) raw_data | ||||
| 	capacity = new_capacity | ||||
| 	return result_code | ||||
|   | ||||
| @@ -33,7 +33,7 @@ copy_file_sync :: proc( path_src, path_dst: string ) -> b32 | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| file_exists :: proc ( file_path : string ) -> b32 { | ||||
| file_exists :: proc( file_path : string ) -> b32 { | ||||
| 	path_info, result := os.stat( file_path, context.temp_allocator ) | ||||
| 	if result != os.ERROR_NONE { | ||||
| 		return false | ||||
| @@ -53,11 +53,11 @@ is_file_locked :: proc( file_path : string ) -> b32 { | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| rewind :: proc ( file : os.Handle ) { | ||||
| rewind :: proc( file : os.Handle ) { | ||||
| 	os.seek( file, 0, 0 ) | ||||
| } | ||||
|  | ||||
| read_looped :: proc ( file : os.Handle, data : []byte ) { | ||||
| read_looped :: proc( file : os.Handle, data : []byte ) { | ||||
| 	total_read, result_code := os.read( file, data ) | ||||
| 	if result_code == os.ERROR_HANDLE_EOF { | ||||
| 		rewind( file ) | ||||
|   | ||||
| @@ -33,7 +33,7 @@ hashtable_init :: proc( $ Type : typeid, allocator : Allocator ) -> ( HashTable( | ||||
| 	return hashtable_init_reserve( Type, allocator ) | ||||
| } | ||||
|  | ||||
| hashtable_init_reserve :: proc ( $ Type : typeid, allocator : Allocator, num : u64 ) -> ( HashTable( Type), AllocatorError ) | ||||
| hashtable_init_reserve :: proc( $ Type : typeid, allocator : Allocator, num : u64 ) -> ( HashTable( Type), AllocatorError ) | ||||
| { | ||||
| 	result                        : HashTable(Type) | ||||
| 	hashes_result, entries_result : AllocatorError | ||||
| @@ -45,7 +45,6 @@ hashtable_init_reserve :: proc ( $ Type : typeid, allocator : Allocator, num : u | ||||
| 	} | ||||
| 	array_resize( & result.hashes, num ) | ||||
| 	slice.fill( slice_ptr( result.hashes.data, cast(int) result.hashes.num), -1 ) | ||||
| 	// array_fill( result.hashes, 0, num - 1, -1 ) | ||||
|  | ||||
| 	result.entries, entries_result = array_init_reserve( HashTable_Entry(Type), allocator, num ) | ||||
| 	if entries_result != AllocatorError.None { | ||||
| @@ -106,7 +105,7 @@ hashtable_grow :: proc( ht : ^ HashTable( $ Type ) ) -> AllocatorError { | ||||
| 	return hashtable_rehash( ht, new_num ) | ||||
| } | ||||
|  | ||||
| hashtable_rehash :: proc ( ht : ^ HashTable( $ Type ), new_num : u64 ) -> AllocatorError | ||||
| hashtable_rehash :: proc( ht : ^ HashTable( $ Type ), new_num : u64 ) -> AllocatorError | ||||
| { | ||||
| 	last_added_index : i64 | ||||
|  | ||||
| @@ -116,11 +115,6 @@ hashtable_rehash :: proc ( ht : ^ HashTable( $ Type ), new_num : u64 ) -> Alloca | ||||
| 		return init_result | ||||
| 	} | ||||
|  | ||||
| 	// for id : u64 = 0; id < new_ht.hashes.num; id += 1 { | ||||
| 	// 	new_ht.hashes.data[id] = -1 | ||||
| 	// } | ||||
| 	slice.fill( slice_ptr( new_ht.hashes.data, cast(int) new_ht.hashes.num ), -1 ) | ||||
|  | ||||
| 	for id : u64 = 0; id < ht.entries.num; id += 1 { | ||||
| 		find_result : HT_FindResult | ||||
|  | ||||
| @@ -145,7 +139,7 @@ hashtable_rehash :: proc ( ht : ^ HashTable( $ Type ), new_num : u64 ) -> Alloca | ||||
| 	return AllocatorError.None | ||||
| } | ||||
|  | ||||
| hashtable_rehash_fast :: proc ( ht : ^ HashTable( $ Type ) ) | ||||
| hashtable_rehash_fast :: proc( ht : ^ HashTable( $ Type ) ) | ||||
| { | ||||
| 	using ht | ||||
| 	for id := 0; id < entries.num; id += 1 { | ||||
| @@ -167,7 +161,7 @@ hashtable_rehash_fast :: proc ( ht : ^ HashTable( $ Type ) ) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| hashtable_remove :: proc ( ht : ^ HashTable( $ Type ), key : u64 ) { | ||||
| hashtable_remove :: proc( ht : ^ HashTable( $ Type ), key : u64 ) { | ||||
| 	using ht | ||||
| 	find_result := hashtable_find( key ) | ||||
|  | ||||
|   | ||||
| @@ -30,7 +30,7 @@ TrackedAllocator :: struct { | ||||
| 	tracker   : Tracking_Allocator, | ||||
| } | ||||
|  | ||||
| tracked_allocator :: proc ( self : ^ TrackedAllocator ) -> Allocator { | ||||
| tracked_allocator :: proc( self : ^ TrackedAllocator ) -> Allocator { | ||||
| 	return tracking_allocator( & self.tracker ) | ||||
| } | ||||
|  | ||||
| @@ -45,7 +45,7 @@ tracked_allocator_init :: proc( size, internals_size : int, allocator := context | ||||
| 	raw_size       := backing_size + internals_size | ||||
|  | ||||
| 	raw_mem, raw_mem_code := alloc( raw_size, mem.DEFAULT_ALIGNMENT, allocator ) | ||||
| 	verify( raw_mem_code != mem.Allocator_Error.None, "Failed to allocate memory for the TrackingAllocator" ) | ||||
| 	verify( raw_mem_code == mem.Allocator_Error.None, "Failed to allocate memory for the TrackingAllocator" ) | ||||
|  | ||||
| 	backing_slice   := slice_ptr( cast( ^ byte) raw_mem,        backing_size ) | ||||
| 	internals_slice := slice_ptr( memory_after( backing_slice), internals_size ) | ||||
| @@ -59,7 +59,7 @@ tracked_allocator_init :: proc( size, internals_size : int, allocator := context | ||||
| 	{ | ||||
| 		tracker_arena := cast(^Arena) result.tracker.backing.data | ||||
| 		arena_len     := len( tracker_arena.data ) | ||||
| 		verify( arena_len != len(result.backing.data), "BAD SIZE ON TRACKER'S ARENA" ) | ||||
| 		verify( arena_len == len(result.backing.data), "BAD SIZE ON TRACKER'S ARENA" ) | ||||
| 	} | ||||
| 	return result | ||||
| } | ||||
| @@ -71,7 +71,7 @@ tracked_allocator_init_vmem :: proc( vmem : [] byte, internals_size : int ) -> ^ | ||||
| 	backing_size            := len(vmem)    - internals_size | ||||
| 	raw_size                := backing_size + internals_size | ||||
|  | ||||
| 	verify( backing_size < 0 || len(vmem) < raw_size, "Provided virtual memory slice is not large enough to hold the TrackedAllocator" ) | ||||
| 	verify( backing_size >= 0 && len(vmem) >= raw_size, "Provided virtual memory slice is not large enough to hold the TrackedAllocator" ) | ||||
|  | ||||
| 	result       := cast( ^ TrackedAllocator) & vmem[0] | ||||
| 	result_slice := slice_ptr( & vmem[0], tracking_allocator_size ) | ||||
| @@ -87,7 +87,7 @@ tracked_allocator_init_vmem :: proc( vmem : [] byte, internals_size : int ) -> ^ | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| arena_allocator_init_vmem :: proc ( vmem : [] byte ) -> ^ Arena | ||||
| arena_allocator_init_vmem :: proc( vmem : [] byte ) -> ^ Arena | ||||
| { | ||||
| 	arena_size   :: size_of( Arena) | ||||
| 	backing_size := len(vmem) | ||||
|   | ||||
| @@ -54,7 +54,7 @@ VMemChunk :: struct { | ||||
| 	sectr_snapshot          : []u8 | ||||
| } | ||||
|  | ||||
| setup_memory :: proc () -> VMemChunk | ||||
| setup_memory :: proc() -> VMemChunk | ||||
| { | ||||
| 	Arena              :: mem.Arena | ||||
| 	Tracking_Allocator :: mem.Tracking_Allocator | ||||
| @@ -72,13 +72,13 @@ setup_memory :: proc () -> VMemChunk | ||||
| 		base_address : rawptr = transmute( rawptr) u64(Terabyte * 1) | ||||
|  | ||||
| 		result := arena_init_static( & sectr_live, base_address, sectr.memory_chunk_size, sectr.memory_chunk_size ) | ||||
| 		verify( result != runtime.Allocator_Error.None, "Failed to allocate live memory for the sectr module" ) | ||||
| 		verify( result == runtime.Allocator_Error.None, "Failed to allocate live memory for the sectr module" ) | ||||
| 	} | ||||
|  | ||||
| 	// Setup memory mapped io for snapshots | ||||
| 	{ | ||||
| 		snapshot_file, open_error := os.open( path_snapshot, os.O_RDWR | os.O_CREATE ) | ||||
| 		verify( open_error != os.ERROR_NONE, "Failed to open snapshot file for the sectr module" ) | ||||
| 		verify( open_error == os.ERROR_NONE, "Failed to open snapshot file for the sectr module" ) | ||||
|  | ||||
| 		file_info, stat_code := os.stat( path_snapshot ) | ||||
| 		{ | ||||
| @@ -90,7 +90,7 @@ setup_memory :: proc () -> VMemChunk | ||||
| 		map_error                : virtual.Map_File_Error | ||||
| 		map_flags                : virtual.Map_File_Flags = { virtual.Map_File_Flag.Read, virtual.Map_File_Flag.Write } | ||||
| 		sectr_snapshot, map_error = virtual.map_file_from_file_descriptor( uintptr(snapshot_file), map_flags ) | ||||
| 		verify( map_error != virtual.Map_File_Error.None, "Failed to allocate snapshot memory for the sectr module" ) | ||||
| 		verify( map_error == virtual.Map_File_Error.None, "Failed to allocate snapshot memory for the sectr module" ) | ||||
|  | ||||
| 		os.close(snapshot_file) | ||||
| 	} | ||||
| @@ -102,7 +102,7 @@ setup_memory :: proc () -> VMemChunk | ||||
| 	return memory; | ||||
| } | ||||
|  | ||||
| load_sectr_api :: proc ( version_id : i32 ) -> sectr.ModuleAPI | ||||
| load_sectr_api :: proc( version_id : i32 ) -> sectr.ModuleAPI | ||||
| { | ||||
| 	loaded_module : sectr.ModuleAPI | ||||
|  | ||||
| @@ -155,7 +155,7 @@ load_sectr_api :: proc ( version_id : i32 ) -> sectr.ModuleAPI | ||||
| 	return loaded_module | ||||
| } | ||||
|  | ||||
| unload_sectr_api :: proc ( module : ^ sectr.ModuleAPI ) | ||||
| unload_sectr_api :: proc( module : ^ sectr.ModuleAPI ) | ||||
| { | ||||
| 	dynlib.unload_library( module.lib ) | ||||
| 	os.remove( path_sectr_live_module ) | ||||
| @@ -163,7 +163,7 @@ unload_sectr_api :: proc ( module : ^ sectr.ModuleAPI ) | ||||
| 	log("Unloaded sectr API") | ||||
| } | ||||
|  | ||||
| sync_sectr_api :: proc ( sectr_api : ^ sectr.ModuleAPI, memory : ^ VMemChunk, logger : ^ sectr.Logger ) | ||||
| sync_sectr_api :: proc( sectr_api : ^ sectr.ModuleAPI, memory : ^ VMemChunk, logger : ^ sectr.Logger ) | ||||
| { | ||||
| 	if write_time, result := os.last_write_time_by_name( path_sectr_module ); | ||||
| 	result == os.ERROR_NONE && sectr_api.write_time != write_time | ||||
| @@ -176,7 +176,7 @@ sync_sectr_api :: proc ( sectr_api : ^ sectr.ModuleAPI, memory : ^ VMemChunk, lo | ||||
| 		time.sleep( time.Millisecond ) | ||||
|  | ||||
| 		sectr_api ^ = load_sectr_api( version_id ) | ||||
| 		verify( sectr_api.lib_version == 0, "Failed to hot-reload the sectr module" ) | ||||
| 		verify( sectr_api.lib_version != 0, "Failed to hot-reload the sectr module" ) | ||||
|  | ||||
| 		sectr_api.reload( memory.sectr_live, memory.sectr_snapshot, logger ) | ||||
| 	} | ||||
| @@ -229,7 +229,7 @@ main :: proc() | ||||
| 	// Load the Enviornment API for the first-time | ||||
| 	{ | ||||
| 		sectr_api = load_sectr_api( 1 ) | ||||
| 		verify( sectr_api.lib_version == 0, "Failed to initially load the sectr module" ) | ||||
| 		verify( sectr_api.lib_version != 0, "Failed to initially load the sectr module" ) | ||||
| 	} | ||||
|  | ||||
| 	running            = true; | ||||
|   | ||||
| @@ -284,7 +284,7 @@ import rl "vendor:raylib" | ||||
|  | ||||
| poll_input :: proc( old, new : ^ InputState ) | ||||
| { | ||||
| 	input_process_digital_btn :: proc ( old_state, new_state : ^ DigitalBtn, is_down : b32 ) | ||||
| 	input_process_digital_btn :: proc( old_state, new_state : ^ DigitalBtn, is_down : b32 ) | ||||
| 	{ | ||||
| 		new_state.ended_down = is_down | ||||
| 		had_transition := old_state.ended_down != new_state.ended_down | ||||
| @@ -358,7 +358,7 @@ play_input :: proc( replay_file : os.Handle, input : ^ InputState ) { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| to_raylib_key :: proc ( key : i32 ) -> rl.KeyboardKey { | ||||
| to_raylib_key :: proc( key : i32 ) -> rl.KeyboardKey { | ||||
| 	@static raylib_key_lookup_table := [?] rl.KeyboardKey { | ||||
| 		rl.KeyboardKey.KEY_NULL, | ||||
| 		rl.KeyboardKey.ENTER, | ||||
| @@ -477,7 +477,7 @@ to_raylib_key :: proc ( key : i32 ) -> rl.KeyboardKey { | ||||
| 	return raylib_key_lookup_table[ key ] | ||||
| } | ||||
|  | ||||
| to_raylib_mouse_btn :: proc ( btn : i32 ) -> rl.MouseButton { | ||||
| to_raylib_mouse_btn :: proc( btn : i32 ) -> rl.MouseButton { | ||||
| 	@static raylib_mouse_btn_lookup_table := [?] rl.MouseButton { | ||||
| 		rl.MouseButton.LEFT, | ||||
| 		rl.MouseButton.MIDDLE, | ||||
|   | ||||
| @@ -18,11 +18,11 @@ Logger :: struct { | ||||
| 	id        : string, | ||||
| } | ||||
|  | ||||
| to_odin_logger :: proc ( logger : ^ Logger ) -> core_log.Logger { | ||||
| to_odin_logger :: proc( logger : ^ Logger ) -> core_log.Logger { | ||||
| 	return { logger_interface, logger, core_log.Level.Debug, core_log.Default_File_Logger_Opts } | ||||
| } | ||||
|  | ||||
| init :: proc ( logger : ^ Logger,  id : string, file_path : string, file := os.INVALID_HANDLE ) | ||||
| init :: proc( logger : ^ Logger,  id : string, file_path : string, file := os.INVALID_HANDLE ) | ||||
| { | ||||
| 	if file == os.INVALID_HANDLE | ||||
| 	{ | ||||
| @@ -48,7 +48,7 @@ init :: proc ( logger : ^ Logger,  id : string, file_path : string, file := os.I | ||||
| 	} | ||||
| } | ||||
|  | ||||
| logger_interface :: proc ( | ||||
| logger_interface :: proc( | ||||
| 	logger_data :  rawptr, | ||||
| 	level       :  core_log.Level, | ||||
| 	text        :  string, | ||||
| @@ -116,10 +116,10 @@ logger_interface :: proc ( | ||||
| 	fmt.fprintln( logger.file, strings.to_string(builder) ) | ||||
| } | ||||
|  | ||||
| log :: proc ( msg : string, level := LogLevel.Info, loc := #caller_location ) { | ||||
| log :: proc( msg : string, level := LogLevel.Info, loc := #caller_location ) { | ||||
| 	core_log.log( level, msg, location = loc ) | ||||
| } | ||||
|  | ||||
| logf :: proc ( fmt : string, args : ..any,  level := LogLevel.Info, loc := #caller_location  ) { | ||||
| logf :: proc( fmt : string, args : ..any,  level := LogLevel.Info, loc := #caller_location  ) { | ||||
| 	core_log.logf( level, fmt, args, location = loc ) | ||||
| } | ||||
|   | ||||
| @@ -19,7 +19,7 @@ Vec2_f32 :: struct #raw_union { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // make_vec2 :: proc ( x, y : f32 ) { | ||||
| // make_vec2 :: proc( x, y : f32 ) { | ||||
|  | ||||
| // } | ||||
|  | ||||
|   | ||||
| @@ -52,7 +52,7 @@ unmarshal_from_object :: proc( $Type: typeid, object : json.Object ) -> Type | ||||
| 	type_info := type_info_of(Type) | ||||
| 	#partial switch type in type_info.variant { | ||||
| 		case runtime.Type_Info_Union: | ||||
| 			ensure( true, "This proc doesn't support raw unions" ) | ||||
| 			ensure( false, "This proc doesn't support raw unions" ) | ||||
| 	} | ||||
|  | ||||
| 	base_ptr := uintptr( & result ) | ||||
| @@ -81,17 +81,17 @@ ArchiveData :: struct { | ||||
| 	version     : i32, | ||||
| } | ||||
|  | ||||
| archive_init_temp :: proc () -> ^ ArchiveData { | ||||
| archive_init_temp :: proc() -> ^ ArchiveData { | ||||
| 	archive := new( ArchiveData, context.temp_allocator ) | ||||
| 	archive.version = Serializer_Version | ||||
| 	return archive | ||||
| } | ||||
|  | ||||
| state_serialize :: proc ( archive : ^ ArchiveData = nil ) { | ||||
| state_serialize :: proc( archive : ^ ArchiveData = nil ) { | ||||
| 	// TODO(Ed): We'll need this for a better save/load snapshot setup. | ||||
| } | ||||
|  | ||||
| project_serialize :: proc ( project : ^ Project, archive : ^ ArchiveData, is_writting : b32 = true ) | ||||
| project_serialize :: proc( project : ^ Project, archive : ^ ArchiveData, is_writting : b32 = true ) | ||||
| { | ||||
| 	options : json.Marshal_Options | ||||
| 	options.spec        = json.Specification.MJSON | ||||
| @@ -112,18 +112,18 @@ project_serialize :: proc ( project : ^ Project, archive : ^ ArchiveData, is_wri | ||||
| 		// TODO(Ed): In the future this will be more complicated, as serialization of workspaces and the code database won't be trivial | ||||
|  | ||||
| 		json_data, marshal_code := json.marshal( marshal_archive, options, allocator = context.temp_allocator ) | ||||
| 		verify( marshal_code != json.Marshal_Data_Error.None, "Failed to marshal the project to JSON" ) | ||||
| 		verify( marshal_code == json.Marshal_Data_Error.None, "Failed to marshal the project to JSON" ) | ||||
|  | ||||
| 		archive.data = json_data | ||||
| 	} | ||||
| 	else | ||||
| 	{ | ||||
| 		parsed_json, parse_code := json.parse( archive.data, json.Specification.MJSON, allocator = context.temp_allocator ) | ||||
| 		verify( parse_code != json.Error.None, "Failed to parse project JSON") | ||||
| 		verify( parse_code == json.Error.None, "Failed to parse project JSON") | ||||
|  | ||||
| 		archive_json := parsed_json.(json.Object) | ||||
| 		archive_version : i32 = cast(i32) archive_json["version"].(json.Float) | ||||
| 		verify( Serializer_Version != archive_version, "Version mismatch on archive!" ) | ||||
| 		verify( Serializer_Version == archive_version, "Version mismatch on archive!" ) | ||||
|  | ||||
| 		// Note(Ed) : This works fine for now, but eventually it will most likely break with pointers... | ||||
| 		// We'll most likely set things up so that all refs in the project & workspace are handles. | ||||
| @@ -155,7 +155,7 @@ project_serialize :: proc ( project : ^ Project, archive : ^ ArchiveData, is_wri | ||||
| 	} | ||||
| } | ||||
|  | ||||
| project_save :: proc ( project : ^ Project, archive : ^ ArchiveData = nil ) | ||||
| project_save :: proc( project : ^ Project, archive : ^ ArchiveData = nil ) | ||||
| { | ||||
| 	archive := archive | ||||
| 	if archive == nil { | ||||
| @@ -165,13 +165,13 @@ project_save :: proc ( project : ^ Project, archive : ^ ArchiveData = nil ) | ||||
|  | ||||
| 	if ! os.is_dir( project.path ) { | ||||
| 		os.make_directory( project.path ) | ||||
| 		verify( ! os.is_dir( project.path ), "Failed to create project path for saving" ) | ||||
| 		verify( cast(b32) os.is_dir( project.path ), "Failed to create project path for saving" ) | ||||
| 	} | ||||
|  | ||||
| 	os.write_entire_file( fmt.tprint( project.path, project.name, ".sectr_proj", sep = ""), archive.data ) | ||||
| } | ||||
|  | ||||
| project_load :: proc ( path : string, project : ^ Project, archive : ^ ArchiveData = nil ) | ||||
| project_load :: proc( path : string, project : ^ Project, archive : ^ ArchiveData = nil ) | ||||
| { | ||||
| 	archive := archive | ||||
| 	if archive == nil { | ||||
| @@ -179,7 +179,7 @@ project_load :: proc ( path : string, project : ^ Project, archive : ^ ArchiveDa | ||||
| 	} | ||||
|  | ||||
| 	data, read_code := os.read_entire_file( path, context.temp_allocator ) | ||||
| 	verify( ! read_code, "Failed to read from project file" ) | ||||
| 	verify( b32(read_code), "Failed to read from project file" ) | ||||
|  | ||||
| 	archive.data = data | ||||
| 	project_serialize( project, archive, Serializer_Loading ) | ||||
|   | ||||
| @@ -5,7 +5,7 @@ import "core:reflect" | ||||
|  | ||||
| // TODO(Ed) : Generic Unmarshling of json objects (There should be a way I believe todo this generically but the reflect library is not well documented) | ||||
|  | ||||
| vec2_json_unmarshal :: proc ( value : ^ json.Value ) -> Vec2 { | ||||
| vec2_json_unmarshal :: proc( value : ^ json.Value ) -> Vec2 { | ||||
| 	json_v := value.(json.Array) | ||||
| 	return { | ||||
| 		f32(json_v[0].(json.Float)), | ||||
| @@ -13,7 +13,7 @@ vec2_json_unmarshal :: proc ( value : ^ json.Value ) -> Vec2 { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| color_json_unmarshal :: proc ( value : ^ json.Value ) -> Color { | ||||
| color_json_unmarshal :: proc( value : ^ json.Value ) -> Color { | ||||
| 	json_color := value.(json.Array) | ||||
| 	r := u8(json_color[0].(json.Float)) | ||||
| 	g := u8(json_color[1].(json.Float)) | ||||
| @@ -22,7 +22,7 @@ color_json_unmarshal :: proc ( value : ^ json.Value ) -> Color { | ||||
| 	return { r, g, b, a } | ||||
| } | ||||
|  | ||||
| box_json_unmarshal :: proc ( value : ^ json.Value ) -> Box2 { | ||||
| box_json_unmarshal :: proc( value : ^ json.Value ) -> Box2 { | ||||
| 	object     := value.(json.Object) | ||||
| 	json_pos   := object["position"].(json.Array) | ||||
|  | ||||
|   | ||||
| @@ -38,7 +38,7 @@ points_to_pixels :: proc { | ||||
|  | ||||
| //region Unit Conversion Impl | ||||
|  | ||||
| // cm_to_points :: proc ( cm : f32 ) -> f32 { | ||||
| // cm_to_points :: proc( cm : f32 ) -> f32 { | ||||
|  | ||||
| // } | ||||
|  | ||||
| @@ -90,7 +90,7 @@ vec2_points_to_pixels :: proc(vpoints: Vec2) -> Vec2 { | ||||
| } | ||||
|  | ||||
|  | ||||
| // vec2_points_to_cm :: proc ( vpoints : Vec2 ) -> Vec2 { | ||||
| // vec2_points_to_cm :: proc( vpoints : Vec2 ) -> Vec2 { | ||||
|  | ||||
| // } | ||||
|  | ||||
|   | ||||
| @@ -61,7 +61,7 @@ debug_draw_text_world :: proc( content : string, pos : Vec2, size : f32, color : | ||||
|  | ||||
| // Raylib's equivalent doesn't take a length for the string (making it a pain in the ass) | ||||
| // So this is a 1:1 copy except it takes Odin strings | ||||
| measure_text_size :: proc ( text : string, font : FontID, font_size := Font_Use_Default_Size, spacing : f32 ) -> AreaSize | ||||
| measure_text_size :: proc( text : string, font : FontID, font_size := Font_Use_Default_Size, spacing : f32 ) -> AreaSize | ||||
| { | ||||
| 	px_size := math.round( points_to_pixels( font_size ) ) | ||||
| 	rl_font := to_rl_Font( font, font_size ) | ||||
|   | ||||
		Reference in New Issue
	
	Block a user