diff --git a/Readme.md b/Readme.md index c73acf9..056702d 100644 --- a/Readme.md +++ b/Readme.md @@ -17,7 +17,7 @@ Features: * Robust quality of life features: * Tracks text layers! * Push and pop stack for font, font_size, colour, view, position, scale and zoom! - * Enforce even only font-sizing (useful for linear-zoom) [TODO] + * Enforce even only font-sizing (useful for linear-zoom) * Snap-positining to view for better hinting * Basic or advanced text shaping via Harfbuzz * All rendering is real-time, triangulation done on the CPU, vertex rendering and texture blitting on the gpu. @@ -31,8 +31,9 @@ Features: Upcoming: -* Support for ear-clipping triangulation +* Support for ear-clipping triangulation, or just better triangulation.. * Support for which triangulation method used on a by font basis? + * https://www.microsoft.com/en-us/research/wp-content/uploads/2005/01/p1000-loop.pdf * Multi-threading supported job queue. * Lift heavy-lifting portion of the library's context into a thread context. * Synchronize threads by merging their generated layered draw list into a finished draw-list for processing on the user's render thread. diff --git a/examples/sokol_demo/sokol_demo.odin b/examples/sokol_demo/sokol_demo.odin index 222b578..4c8b372 100644 --- a/examples/sokol_demo/sokol_demo.odin +++ b/examples/sokol_demo/sokol_demo.odin @@ -224,16 +224,16 @@ init :: proc "c" () } glyph_draw_opts := ve.Init_Glyph_Draw_Params_Default - glyph_draw_opts.snap_glyph_height = false + glyph_draw_opts.snap_glyph_height = true shaper_opts := ve.Init_Shaper_Params_Default - shaper_opts.snap_glyph_position = false + shaper_opts.snap_glyph_position = true ve.startup( & demo_ctx.ve_ctx, .STB_TrueType, allocator = context.allocator, glyph_draw_params = glyph_draw_opts, shaper_params = shaper_opts, - px_scalar = 1.89, - alpha_sharpen = 0.1, + px_scalar = 1.25, + alpha_sharpen = 0.0, ) ve_sokol.setup_gfx_objects( & demo_ctx.render_ctx, & demo_ctx.ve_ctx, vert_cap = 256 * 1024, index_cap = 512 * 1024 ) @@ -388,10 +388,10 @@ Glyphs are first rendered to an intermediate 2k x 512px R8 texture. This allows 4 x 4 = 16x supersampling, and 8 Region C glyphs similarly. A simple 16-tap box downsample shader is then used to blit from this intermediate texture to the final atlas location.` - draw_text("How it works", demo_ctx.font_title, { 0.2, current_scroll - (section_start + 0.06) }) - draw_text(how_it_works, demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.1) }) - draw_text(caching_strategy, demo_ctx.font_mono, { 0.28, current_scroll - (section_start + 0.32) }) - draw_text(how_it_works2, demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.82) }) + draw_text("How it works", demo_ctx.font_title, { 0.2, current_scroll - (section_start + 0.06) }, size = 92) + draw_text(how_it_works, demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.1) }, size = 19) + draw_text(caching_strategy, demo_ctx.font_mono, { 0.28, current_scroll - (section_start + 0.32) }, size = 21) + draw_text(how_it_works2, demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.82) }, size = 19) } // Showcase section @@ -408,43 +408,43 @@ etiam dignissim diam quis enim. Convallis convallis tellus id interdum.` draw_text("Sans serif", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.28) }, size = 19) draw_text(font_family_test, demo_ctx.font_demo_sans, { 0.3, current_scroll - (section_start + 0.28) }, size = 18) - draw_text("Serif", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.36) }) - draw_text(font_family_test, demo_ctx.font_demo_serif, { 0.3, current_scroll - (section_start + 0.36) }) + draw_text("Serif", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.36) }, size = 19) + draw_text(font_family_test, demo_ctx.font_demo_serif, { 0.3, current_scroll - (section_start + 0.36) }, size = 18) - draw_text("Script", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.44) }) - draw_text(font_family_test, demo_ctx.font_demo_script, { 0.3, current_scroll - (section_start + 0.44) }) + draw_text("Script", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.44) }, size = 19) + draw_text(font_family_test, demo_ctx.font_demo_script, { 0.3, current_scroll - (section_start + 0.44) }, size = 22) - draw_text("Monospace", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.52) }) - draw_text(font_family_test, demo_ctx.font_demo_mono, { 0.3, current_scroll - (section_start + 0.52) }) + draw_text("Monospace", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.52) }, size = 19) + draw_text(font_family_test, demo_ctx.font_demo_mono, { 0.3, current_scroll - (section_start + 0.52) }, size = 22) - draw_text("Small", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.60) }) - draw_text(font_family_test, demo_ctx.font_small, { 0.3, current_scroll - (section_start + 0.60) }) + draw_text("Small", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.60) }, size = 19) + draw_text(font_family_test, demo_ctx.font_small, { 0.3, current_scroll - (section_start + 0.60) }, size = 10) - draw_text("Greek", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.72) }) - draw_text("Ήταν απλώς θέμα χρόνου.", demo_ctx.font_demo_sans, { 0.3, current_scroll - (section_start + 0.72) }) + draw_text("Greek", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.72) }, size = 19) + draw_text("Ήταν απλώς θέμα χρόνου.", demo_ctx.font_demo_sans, { 0.3, current_scroll - (section_start + 0.72) }, size = 18) - draw_text("Vietnamese", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.76) }) - draw_text("Bầu trời trong xanh thăm thẳm, không một gợn mây.", demo_ctx.font_demo_sans, { 0.3, current_scroll - (section_start + 0.76) }) + draw_text("Vietnamese", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.76) }, size = 19) + draw_text("Bầu trời trong xanh thăm thẳm, không một gợn mây.", demo_ctx.font_demo_sans, { 0.3, current_scroll - (section_start + 0.76) }, size = 18) - draw_text("Thai", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.80) }) - draw_text("การเดินทางขากลับคงจะเหงา", demo_ctx.font_demo_thai, { 0.3, current_scroll - (section_start + 0.80) }) + draw_text("Thai", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.80) }, size = 19) + draw_text("การเดินทางขากลับคงจะเหงา", demo_ctx.font_demo_thai, { 0.3, current_scroll - (section_start + 0.80) }, size = 24) - draw_text("Chinese", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.84) }) - draw_text("床前明月光 疑是地上霜 举头望明月 低头思故乡", demo_ctx.font_demo_chinese, {0.3, current_scroll - (section_start + 0.84) }) + draw_text("Chinese", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.84) }, size = 19) + draw_text("床前明月光 疑是地上霜 举头望明月 低头思故乡", demo_ctx.font_demo_chinese, {0.3, current_scroll - (section_start + 0.84) }, size = 24) - draw_text("Japanese", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.88) }) - draw_text("ぎょしょうとナレズシの研究 モンスーン・アジアの食事文化", demo_ctx.font_demo_japanese, { 0.3, current_scroll - (section_start + 0.88) }) + draw_text("Japanese", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.88) }, size = 19) + draw_text("ぎょしょうとナレズシの研究 モンスーン・アジアの食事文化", demo_ctx.font_demo_japanese, { 0.3, current_scroll - (section_start + 0.88) }, size = 24) - draw_text("Korean", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.92) }) - draw_text("그들의 장비와 기구는 모두 살아 있다.", demo_ctx.font_demo_korean, { 0.3, current_scroll - (section_start + 0.92) }) + draw_text("Korean", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.92) }, size = 19) + draw_text("그들의 장비와 기구는 모두 살아 있다.", demo_ctx.font_demo_korean, { 0.3, current_scroll - (section_start + 0.92) }, size = 36) - draw_text("Needs harfbuzz to work:", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.96)}) + draw_text("Needs harfbuzz to work:", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 0.96)}, size = 14) - draw_text("Arabic", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 1.00) }) - draw_text("حب السماء لا تمطر غير الأحلام.", demo_ctx.font_demo_arabic, { 0.3, current_scroll - (section_start + 1.00) }) + draw_text("Arabic", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 1.00) }, size = 19) + draw_text("حب السماء لا تمطر غير الأحلام.", demo_ctx.font_demo_arabic, { 0.3, current_scroll - (section_start + 1.00) }, size = 24) - draw_text("Hebrew", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 1.04) }) - draw_text("אז הגיע הלילה של כוכב השביט הראשון.", demo_ctx.font_demo_hebrew, { 0.3, current_scroll - (section_start + 1.04) }) + draw_text("Hebrew", demo_ctx.font_print, { 0.2, current_scroll - (section_start + 1.04) }, size = 19) + draw_text("אז הגיע הלילה של כוכב השביט הראשון.", demo_ctx.font_demo_hebrew, { 0.3, current_scroll - (section_start + 1.04) }, size = 22) } // Zoom Test @@ -486,13 +486,10 @@ etiam dignissim diam quis enim. Convallis convallis tellus id interdum.` zoomed_text_base_size : f32 = 12.0 zoom_adjust_size := zoomed_text_base_size * current_zoom - // ve_id, resolved_size := font_resolve_draw_id( font_firacode, zoom_adjust_size * OVER_SAMPLE_ZOOM ) - resolved_size := zoom_adjust_size + resolved_size, _ := ve.resolve_zoom_size_scale(current_zoom, zoomed_text_base_size, 1.0, 2, 2, 999.0, demo_ctx.screen_size) current_zoom_text := fmt.tprintf("Current Zoom : %.2f x\nCurrent Resolved Size: %v px", current_zoom, resolved_size ) draw_text(current_zoom_text, demo_ctx.font_firacode, { 0.2, zoom_info_y }) - // ve.configure_snap( & demo_ctx.ve_ctx, u32(0), u32(0) ) - size := measure_text_size( zoom_text, demo_ctx.font_firacode, zoomed_text_base_size, 0 ) * current_zoom x_offset := (size.x / demo_ctx.screen_size.x) * 0.5 zoomed_text_pos := Vec2 { 0.5 - x_offset, zoomed_text_y } @@ -567,8 +564,6 @@ etiam dignissim diam quis enim. Convallis convallis tellus id interdum.` draw_text(codes[grid[y * GRID_W + x]], demo_ctx.font_demo_raincode, { pos_x, pos_y }, size = 20, color = code_colour) } - - // ve.set_colour(&ve_ctx, {1.0, 1.0, 1.0, 1.0}) } // Cache pressure test diff --git a/scripts/build_sokol_demo.ps1 b/scripts/build_sokol_demo.ps1 index 5dad272..c600cf0 100644 --- a/scripts/build_sokol_demo.ps1 +++ b/scripts/build_sokol_demo.ps1 @@ -67,6 +67,15 @@ push-location $path_thirdparty } pop-location +$path_stb_truetype = join-path $path_thirdparty 'stb\src' + +push-location $path_stb_truetype + $devshell = Join-Path $PSScriptRoot 'helpers/devshell.ps1' + . $devshell -arch amd64 + + & .\build.bat +pop-location + $odin_compiler_defs = join-path $PSScriptRoot 'helpers/odin_compiler_defs.ps1' . $odin_compiler_defs diff --git a/thirdparty/stb/lib/stb_truetype.lib b/thirdparty/stb/lib/stb_truetype.lib index 16ecf94..4f1ea6c 100644 Binary files a/thirdparty/stb/lib/stb_truetype.lib and b/thirdparty/stb/lib/stb_truetype.lib differ diff --git a/thirdparty/stb/src/gb/gb.h b/thirdparty/stb/src/gb/gb.h new file mode 100644 index 0000000..adeb554 --- /dev/null +++ b/thirdparty/stb/src/gb/gb.h @@ -0,0 +1,10824 @@ +/* gb.h - v0.33 - Ginger Bill's C Helper Library - public domain + - no warranty implied; use at your own risk + + This is a single header file with a bunch of useful stuff + to replace the C/C++ standard library + +=========================================================================== + YOU MUST + + #define GB_IMPLEMENTATION + + in EXACTLY _one_ C or C++ file that includes this header, BEFORE the + include like this: + + #define GB_IMPLEMENTATION + #include "gb.h" + + All other files should just #include "gb.h" without #define + + + If you want the platform layer, YOU MUST + + #define GB_PLATFORM + + BEFORE the include like this: + + #define GB_PLATFORM + #include "gb.h" + +=========================================================================== + +LICENSE + This software is dual-licensed to the public domain and under the following + license: you are granted a perpetual, irrevocable license to copy, modify, + publish, and distribute this file as you see fit. + +WARNING + - This library is _slightly_ experimental and features may not work as expected. + - This also means that many functions are not documented. + +CREDITS + Written by Ginger Bill + +TODOS + - Remove CRT dependency for people who want that + - But do I really? + - Or make it only depend on the really needed stuff? + - Older compiler support? + - How old do you wanna go? + - Only support C90+extension and C99 not pure C89. + - File handling + - All files to be UTF-8 (even on windows) + - Better Virtual Memory handling + - Generic Heap Allocator (tcmalloc/dlmalloc/?) + - Fixed Heap Allocator + - Better UTF support and conversion + - Free List, best fit rather than first fit + - More date & time functions + +VERSION HISTORY + 0.33 - Minor fixes + 0.32 - Minor fixes + 0.31 - Add gb_file_remove + 0.30 - Changes to gbThread (and gbMutex on Windows) + 0.29 - Add extras for gbString + 0.28 - Handle UCS2 correctly in Win32 part + 0.27 - OSX fixes and Linux gbAffinity + 0.26d - Minor changes to how gbFile works + 0.26c - gb_str_to_f* fix + 0.26b - Minor fixes + 0.26a - gbString Fix + 0.26 - Default allocator flags and generic hash table + 0.25a - Fix UTF-8 stuff + 0.25 - OS X gbPlatform Support (missing some things) + 0.24b - Compile on OSX (excluding platform part) + 0.24a - Minor additions + 0.24 - Enum convention change + 0.23 - Optional Windows.h removal (because I'm crazy) + 0.22a - Remove gbVideoMode from gb_platform_init_* + 0.22 - gbAffinity - (Missing Linux version) + 0.21 - Platform Layer Restructuring + 0.20 - Improve file io + 0.19 - Clipboard Text + 0.18a - Controller vibration + 0.18 - Raw keyboard and mouse input for WIN32 + 0.17d - Fixed printf bug for strings + 0.17c - Compile as 32 bit + 0.17b - Change formating style because why not? + 0.17a - Dropped C90 Support (For numerous reasons) + 0.17 - Instantiated Hash Table + 0.16a - Minor code layout changes + 0.16 - New file API and improved platform layer + 0.15d - Linux Experimental Support (DON'T USE IT PLEASE) + 0.15c - Linux Experimental Support (DON'T USE IT) + 0.15b - C90 Support + 0.15a - gb_atomic(32|64)_spin_(lock|unlock) + 0.15 - Recursive "Mutex"; Key States; gbRandom + 0.14 - Better File Handling and better printf (WIN32 Only) + 0.13 - Highly experimental platform layer (WIN32 Only) + 0.12b - Fix minor file bugs + 0.12a - Compile as C++ + 0.12 - New File Handing System! No stdio or stdlib! (WIN32 Only) + 0.11a - Add string precision and width (experimental) + 0.11 - Started making stdio & stdlib optional (Not tested much) + 0.10c - Fix gb_endian_swap32() + 0.10b - Probable timing bug for gb_time_now() + 0.10a - Work on multiple compilers + 0.10 - Scratch Memory Allocator + 0.09a - Faster Mutex and the Free List is slightly improved + 0.09 - Basic Virtual Memory System and Dreadful Free List allocator + 0.08a - Fix *_appendv bug + 0.08 - Huge Overhaul! + 0.07a - Fix alignment in gb_heap_allocator_proc + 0.07 - Hash Table and Hashing Functions + 0.06c - Better Documentation + 0.06b - OS X Support + 0.06a - Linux Support + 0.06 - Windows GCC Support and MSVC x86 Support + 0.05b - Formatting + 0.05a - Minor function name changes + 0.05 - Radix Sort for unsigned integers (TODO: Other primitives) + 0.04 - Better UTF support and search/sort procs + 0.03 - Completely change procedure naming convention + 0.02a - Bug fixes + 0.02 - Change naming convention and gbArray(Type) + 0.01 - Initial Version +*/ + + +#ifndef GB_INCLUDE_GB_H +#define GB_INCLUDE_GB_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__cplusplus) + #define GB_EXTERN extern "C" +#else + #define GB_EXTERN extern +#endif + +#if defined(_WIN32) + #define GB_DLL_EXPORT GB_EXTERN __declspec(dllexport) + #define GB_DLL_IMPORT GB_EXTERN __declspec(dllimport) +#else + #define GB_DLL_EXPORT GB_EXTERN __attribute__((visibility("default"))) + #define GB_DLL_IMPORT GB_EXTERN +#endif + +// NOTE(bill): Redefine for DLL, etc. +#ifndef GB_DEF + #ifdef GB_STATIC + #define GB_DEF static + #else + #define GB_DEF extern + #endif +#endif + +#if defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__64BIT__) || defined(__powerpc64__) || defined(__ppc64__) + #ifndef GB_ARCH_64_BIT + #define GB_ARCH_64_BIT 1 + #endif +#else + // NOTE(bill): I'm only supporting 32 bit and 64 bit systems + #ifndef GB_ARCH_32_BIT + #define GB_ARCH_32_BIT 1 + #endif +#endif + + +#ifndef GB_ENDIAN_ORDER +#define GB_ENDIAN_ORDER + // TODO(bill): Is the a good way or is it better to test for certain compilers and macros? + #define GB_IS_BIG_ENDIAN (!*(u8*)&(u16){1}) + #define GB_IS_LITTLE_ENDIAN (!GB_IS_BIG_ENDIAN) +#endif + +#if defined(_WIN32) || defined(_WIN64) + #ifndef GB_SYSTEM_WINDOWS + #define GB_SYSTEM_WINDOWS 1 + #endif +#elif defined(__APPLE__) && defined(__MACH__) + #ifndef GB_SYSTEM_OSX + #define GB_SYSTEM_OSX 1 + #endif +#elif defined(__unix__) + #ifndef GB_SYSTEM_UNIX + #define GB_SYSTEM_UNIX 1 + #endif + + #if defined(__linux__) + #ifndef GB_SYSTEM_LINUX + #define GB_SYSTEM_LINUX 1 + #endif + #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) + #ifndef GB_SYSTEM_FREEBSD + #define GB_SYSTEM_FREEBSD 1 + #endif + #else + #error This UNIX operating system is not supported + #endif +#else + #error This operating system is not supported +#endif + +#if defined(_MSC_VER) + #define GB_COMPILER_MSVC 1 +#elif defined(__GNUC__) + #define GB_COMPILER_GCC 1 +#elif defined(__clang__) + #define GB_COMPILER_CLANG 1 +#else + #error Unknown compiler +#endif + +#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || defined(__x86_64__) + #ifndef GB_CPU_X86 + #define GB_CPU_X86 1 + #endif + #ifndef GB_CACHE_LINE_SIZE + #define GB_CACHE_LINE_SIZE 64 + #endif + +#elif defined(_M_PPC) || defined(__powerpc__) || defined(__powerpc64__) + #ifndef GB_CPU_PPC + #define GB_CPU_PPC 1 + #endif + #ifndef GB_CACHE_LINE_SIZE + #define GB_CACHE_LINE_SIZE 128 + #endif + +#elif defined(__arm__) + #ifndef GB_CPU_ARM + #define GB_CPU_ARM 1 + #endif + #ifndef GB_CACHE_LINE_SIZE + #define GB_CACHE_LINE_SIZE 64 + #endif + +#elif defined(__MIPSEL__) || defined(__mips_isa_rev) + #ifndef GB_CPU_MIPS + #define GB_CPU_MIPS 1 + #endif + #ifndef GB_CACHE_LINE_SIZE + #define GB_CACHE_LINE_SIZE 64 + #endif + +#else + #error Unknown CPU Type +#endif + + + +#ifndef GB_STATIC_ASSERT + #define GB_STATIC_ASSERT3(cond, msg) typedef char static_assertion_##msg[(!!(cond))*2-1] + // NOTE(bill): Token pasting madness!! + #define GB_STATIC_ASSERT2(cond, line) GB_STATIC_ASSERT3(cond, static_assertion_at_line_##line) + #define GB_STATIC_ASSERT1(cond, line) GB_STATIC_ASSERT2(cond, line) + #define GB_STATIC_ASSERT(cond) GB_STATIC_ASSERT1(cond, __LINE__) +#endif + + +//////////////////////////////////////////////////////////////// +// +// Headers +// +// + +#if defined(_WIN32) && !defined(__MINGW32__) + #ifndef _CRT_SECURE_NO_WARNINGS + #define _CRT_SECURE_NO_WARNINGS + #endif +#endif + +#if defined(GB_SYSTEM_UNIX) + #define _GNU_SOURCE + #define _LARGEFILE64_SOURCE +#endif + + +// TODO(bill): How many of these headers do I really need? +// #include +#if !defined(GB_SYSTEM_WINDOWS) + #include + #include +#endif + + + +#if defined(GB_SYSTEM_WINDOWS) + #if !defined(GB_NO_WINDOWS_H) + #define NOMINMAX 1 + #define WIN32_LEAN_AND_MEAN 1 + #define WIN32_MEAN_AND_LEAN 1 + #define VC_EXTRALEAN 1 + #include + #undef NOMINMAX + #undef WIN32_LEAN_AND_MEAN + #undef WIN32_MEAN_AND_LEAN + #undef VC_EXTRALEAN + #endif + + #include // NOTE(bill): _aligned_*() + #include +#else + #include + #include + #include + #include + #ifndef _IOSC11_SOURCE + #define _IOSC11_SOURCE + #endif + #include // NOTE(bill): malloc on linux + #include + #if !defined(GB_SYSTEM_OSX) + #include + #endif + #include + #include + #include + #include + #include + + #if defined(GB_CPU_X86) + #include + #endif +#endif + +#if defined(GB_SYSTEM_OSX) + #include + #include + #include + #include + #include + #include + #include + #include +#endif + +#if defined(GB_SYSTEM_UNIX) + #include +#endif + + +//////////////////////////////////////////////////////////////// +// +// Base Types +// +// + +#if defined(GB_COMPILER_MSVC) + #if _MSC_VER < 1300 + typedef unsigned char u8; + typedef signed char i8; + typedef unsigned short u16; + typedef signed short i16; + typedef unsigned int u32; + typedef signed int i32; + #else + typedef unsigned __int8 u8; + typedef signed __int8 i8; + typedef unsigned __int16 u16; + typedef signed __int16 i16; + typedef unsigned __int32 u32; + typedef signed __int32 i32; + #endif + typedef unsigned __int64 u64; + typedef signed __int64 i64; +#else + #include + typedef uint8_t u8; + typedef int8_t i8; + typedef uint16_t u16; + typedef int16_t i16; + typedef uint32_t u32; + typedef int32_t i32; + typedef uint64_t u64; + typedef int64_t i64; +#endif + +GB_STATIC_ASSERT(sizeof(u8) == sizeof(i8)); +GB_STATIC_ASSERT(sizeof(u16) == sizeof(i16)); +GB_STATIC_ASSERT(sizeof(u32) == sizeof(i32)); +GB_STATIC_ASSERT(sizeof(u64) == sizeof(i64)); + +GB_STATIC_ASSERT(sizeof(u8) == 1); +GB_STATIC_ASSERT(sizeof(u16) == 2); +GB_STATIC_ASSERT(sizeof(u32) == 4); +GB_STATIC_ASSERT(sizeof(u64) == 8); + +typedef size_t usize; +typedef ptrdiff_t isize; + +GB_STATIC_ASSERT(sizeof(usize) == sizeof(isize)); + +// NOTE(bill): (u)intptr is only here for semantic reasons really as this library will only support 32/64 bit OSes. +// NOTE(bill): Are there any modern OSes (not 16 bit) where intptr != isize ? +#if defined(_WIN64) + typedef signed __int64 intptr; + typedef unsigned __int64 uintptr; +#elif defined(_WIN32) + // NOTE(bill); To mark types changing their size, e.g. intptr + #ifndef _W64 + #if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 + #define _W64 __w64 + #else + #define _W64 + #endif + #endif + + typedef _W64 signed int intptr; + typedef _W64 unsigned int uintptr; +#else + typedef uintptr_t uintptr; + typedef intptr_t intptr; +#endif + +GB_STATIC_ASSERT(sizeof(uintptr) == sizeof(intptr)); + +typedef float f32; +typedef double f64; + +GB_STATIC_ASSERT(sizeof(f32) == 4); +GB_STATIC_ASSERT(sizeof(f64) == 8); + +typedef i32 Rune; // NOTE(bill): Unicode codepoint +#define GB_RUNE_INVALID cast(Rune)(0xfffd) +#define GB_RUNE_MAX cast(Rune)(0x0010ffff) +#define GB_RUNE_BOM cast(Rune)(0xfeff) +#define GB_RUNE_EOF cast(Rune)(-1) + + +typedef i8 b8; +typedef i16 b16; +typedef i32 b32; // NOTE(bill): Prefer this!!! + +// NOTE(bill): Get true and false +#if !defined(__cplusplus) + #if (defined(_MSC_VER) && _MSC_VER < 1800) || (!defined(_MSC_VER) && !defined(__STDC_VERSION__)) + #ifndef true + #define true (0 == 0) + #endif + #ifndef false + #define false (0 != 0) + #endif + typedef b8 bool; + #else + #include + #endif +#endif + +// NOTE(bill): These do are not prefixed with gb because the types are not. +#ifndef U8_MIN +#define U8_MIN 0u +#define U8_MAX 0xffu +#define I8_MIN (-0x7f - 1) +#define I8_MAX 0x7f + +#define U16_MIN 0u +#define U16_MAX 0xffffu +#define I16_MIN (-0x7fff - 1) +#define I16_MAX 0x7fff + +#define U32_MIN 0u +#define U32_MAX 0xffffffffu +#define I32_MIN (-0x7fffffff - 1) +#define I32_MAX 0x7fffffff + +#define U64_MIN 0ull +#define U64_MAX 0xffffffffffffffffull +#define I64_MIN (-0x7fffffffffffffffll - 1) +#define I64_MAX 0x7fffffffffffffffll + +#if defined(GB_ARCH_32_BIT) + #define USIZE_MIX U32_MIN + #define USIZE_MAX U32_MAX + + #define ISIZE_MIX S32_MIN + #define ISIZE_MAX S32_MAX +#elif defined(GB_ARCH_64_BIT) + #define USIZE_MIX U64_MIN + #define USIZE_MAX U64_MAX + + #define ISIZE_MIX I64_MIN + #define ISIZE_MAX I64_MAX +#else + #error Unknown architecture size. This library only supports 32 bit and 64 bit architectures. +#endif + +#define F32_MIN 1.17549435e-38f +#define F32_MAX 3.40282347e+38f + +#define F64_MIN 2.2250738585072014e-308 +#define F64_MAX 1.7976931348623157e+308 + +#endif + +#ifndef NULL + #if defined(__cplusplus) + #if __cplusplus >= 201103L + #define NULL nullptr + #else + #define NULL 0 + #endif + #else + #define NULL ((void *)0) + #endif +#endif + +// TODO(bill): Is this enough to get inline working? +#if !defined(__cplusplus) + #if defined(_MSC_VER) && _MSC_VER <= 1800 + #define inline __inline + #elif !defined(__STDC_VERSION__) + #define inline __inline__ + #else + #define inline + #endif +#endif + +#if !defined(gb_restrict) + #if defined(_MSC_VER) + #define gb_restrict __restrict + #elif defined(__STDC_VERSION__) + #define gb_restrict restrict + #else + #define gb_restrict + #endif +#endif + +// TODO(bill): Should force inline be a separate keyword and gb_inline be inline? +#if !defined(gb_inline) + #if defined(_MSC_VER) + #if _MSC_VER < 1300 + #define gb_inline + #else + #define gb_inline __forceinline + #endif + #else + #define gb_inline __attribute__ ((__always_inline__)) + #endif +#endif + +#if !defined(gb_no_inline) + #if defined(_MSC_VER) + #define gb_no_inline __declspec(noinline) + #else + #define gb_no_inline __attribute__ ((noinline)) + #endif +#endif + + +#if !defined(gb_thread_local) + #if defined(_MSC_VER) && _MSC_VER >= 1300 + #define gb_thread_local __declspec(thread) + #elif defined(__GNUC__) + #define gb_thread_local __thread + #else + #define gb_thread_local thread_local + #endif +#endif + + +// NOTE(bill): Easy to grep +// NOTE(bill): Not needed in macros +#ifndef cast +#define cast(Type) (Type) +#endif + +// NOTE(bill): Because a signed sizeof is more useful +#ifndef gb_size_of +#define gb_size_of(x) (isize)(sizeof(x)) +#endif + +#ifndef gb_count_of +#define gb_count_of(x) ((gb_size_of(x)/gb_size_of(0[x])) / ((isize)(!(gb_size_of(x) % gb_size_of(0[x]))))) +#endif + +#ifndef gb_offset_of +#define gb_offset_of(Type, element) ((isize)&(((Type *)0)->element)) +#endif + +#if defined(__cplusplus) +#ifndef gb_align_of + #if __cplusplus >= 201103L + #define gb_align_of(Type) (isize)alignof(Type) + #else +extern "C++" { + // NOTE(bill): Fucking Templates! + template struct gbAlignment_Trick { char c; T member; }; + #define gb_align_of(Type) gb_offset_of(gbAlignment_Trick, member) +} + #endif +#endif +#else + #ifndef gb_align_of + #define gb_align_of(Type) gb_offset_of(struct { char c; Type member; }, member) + #endif +#endif + +// NOTE(bill): I do wish I had a type_of that was portable +#ifndef gb_swap +#define gb_swap(Type, a, b) do { Type tmp = (a); (a) = (b); (b) = tmp; } while (0) +#endif + +// NOTE(bill): Because static means 3/4 different things in C/C++. Great design (!) +#ifndef gb_global +#define gb_global static // Global variables +#define gb_internal static // Internal linkage +#define gb_local_persist static // Local Persisting variables +#endif + + +#ifndef gb_unused + #if defined(_MSC_VER) + #define gb_unused(x) (__pragma(warning(suppress:4100))(x)) + #elif defined (__GCC__) + #define gb_unused(x) __attribute__((__unused__))(x) + #else + #define gb_unused(x) ((void)(gb_size_of(x))) + #endif +#endif + + + + +//////////////////////////////////////////////////////////////// +// +// Defer statement +// Akin to D's SCOPE_EXIT or +// similar to Go's defer but scope-based +// +// NOTE: C++11 (and above) only! +// +#if !defined(GB_NO_DEFER) && defined(__cplusplus) && ((defined(_MSC_VER) && _MSC_VER >= 1400) || (__cplusplus >= 201103L)) +extern "C++" { + // NOTE(bill): Stupid fucking templates + template struct gbRemoveReference { typedef T Type; }; + template struct gbRemoveReference { typedef T Type; }; + template struct gbRemoveReference { typedef T Type; }; + + /// NOTE(bill): "Move" semantics - invented because the C++ committee are idiots (as a collective not as indiviuals (well a least some aren't)) + template inline T &&gb_forward(typename gbRemoveReference::Type &t) { return static_cast(t); } + template inline T &&gb_forward(typename gbRemoveReference::Type &&t) { return static_cast(t); } + template inline T &&gb_move (T &&t) { return static_cast::Type &&>(t); } + template + struct gbprivDefer { + F f; + gbprivDefer(F &&f) : f(gb_forward(f)) {} + ~gbprivDefer() { f(); } + }; + template gbprivDefer gb__defer_func(F &&f) { return gbprivDefer(gb_forward(f)); } + + #define GB_DEFER_1(x, y) x##y + #define GB_DEFER_2(x, y) GB_DEFER_1(x, y) + #define GB_DEFER_3(x) GB_DEFER_2(x, __COUNTER__) + #define defer(code) auto GB_DEFER_3(_defer_) = gb__defer_func([&]()->void{code;}) +} + +// Example +#if 0 + gbMutex m; + gb_mutex_init(&m); + { + gb_mutex_lock(&m); + defer (gb_mutex_unlock(&m)); + + ... + } +#endif + +#endif + + +//////////////////////////////////////////////////////////////// +// +// Macro Fun! +// +// + +#ifndef GB_JOIN_MACROS +#define GB_JOIN_MACROS + #define GB_JOIN2_IND(a, b) a##b + + #define GB_JOIN2(a, b) GB_JOIN2_IND(a, b) + #define GB_JOIN3(a, b, c) GB_JOIN2(GB_JOIN2(a, b), c) + #define GB_JOIN4(a, b, c, d) GB_JOIN2(GB_JOIN2(GB_JOIN2(a, b), c), d) +#endif + + +#ifndef GB_BIT +#define GB_BIT(x) (1<<(x)) +#endif + +#ifndef gb_min +#define gb_min(a, b) ((a) < (b) ? (a) : (b)) +#endif + +#ifndef gb_max +#define gb_max(a, b) ((a) > (b) ? (a) : (b)) +#endif + +#ifndef gb_min3 +#define gb_min3(a, b, c) gb_min(gb_min(a, b), c) +#endif + +#ifndef gb_max3 +#define gb_max3(a, b, c) gb_max(gb_max(a, b), c) +#endif + +#ifndef gb_clamp +#define gb_clamp(x, lower, upper) gb_min(gb_max((x), (lower)), (upper)) +#endif + +#ifndef gb_clamp01 +#define gb_clamp01(x) gb_clamp((x), 0, 1) +#endif + +#ifndef gb_is_between +#define gb_is_between(x, lower, upper) (((lower) <= (x)) && ((x) <= (upper))) +#endif + +#ifndef gb_abs +#define gb_abs(x) ((x) < 0 ? -(x) : (x)) +#endif + +/* NOTE(bill): Very useful bit setting */ +#ifndef GB_MASK_SET +#define GB_MASK_SET(var, set, mask) do { \ + if (set) (var) |= (mask); \ + else (var) &= ~(mask); \ +} while (0) +#endif + + +// NOTE(bill): Some compilers support applying printf-style warnings to user functions. +#if defined(__clang__) || defined(__GNUC__) +#define GB_PRINTF_ARGS(FMT) __attribute__((format(printf, FMT, (FMT+1)))) +#else +#define GB_PRINTF_ARGS(FMT) +#endif + +//////////////////////////////////////////////////////////////// +// +// Debug +// +// + + +#ifndef GB_DEBUG_TRAP + #if defined(_MSC_VER) + #if _MSC_VER < 1300 + #define GB_DEBUG_TRAP() __asm int 3 /* Trap to debugger! */ + #else + #define GB_DEBUG_TRAP() __debugbreak() + #endif + #else + #define GB_DEBUG_TRAP() __builtin_trap() + #endif +#endif + +#ifndef GB_ASSERT_MSG +#define GB_ASSERT_MSG(cond, msg, ...) do { \ + if (!(cond)) { \ + gb_assert_handler("Assertion Failure", #cond, __FILE__, cast(i64)__LINE__, msg, ##__VA_ARGS__); \ + GB_DEBUG_TRAP(); \ + } \ +} while (0) +#endif + +#ifndef GB_ASSERT +#define GB_ASSERT(cond) GB_ASSERT_MSG(cond, NULL) +#endif + +#ifndef GB_ASSERT_NOT_NULL +#define GB_ASSERT_NOT_NULL(ptr) GB_ASSERT_MSG((ptr) != NULL, #ptr " must not be NULL") +#endif + +// NOTE(bill): Things that shouldn't happen with a message! +#ifndef GB_PANIC +#define GB_PANIC(msg, ...) do { \ + gb_assert_handler("Panic", NULL, __FILE__, cast(i64)__LINE__, msg, ##__VA_ARGS__); \ + GB_DEBUG_TRAP(); \ +} while (0) +#endif + +GB_DEF void gb_assert_handler(char const *prefix, char const *condition, char const *file, i32 line, char const *msg, ...); + + + +//////////////////////////////////////////////////////////////// +// +// Memory +// +// + + +GB_DEF b32 gb_is_power_of_two(isize x); + +GB_DEF void * gb_align_forward(void *ptr, isize alignment); + +GB_DEF void * gb_pointer_add (void *ptr, isize bytes); +GB_DEF void * gb_pointer_sub (void *ptr, isize bytes); +GB_DEF void const *gb_pointer_add_const(void const *ptr, isize bytes); +GB_DEF void const *gb_pointer_sub_const(void const *ptr, isize bytes); +GB_DEF isize gb_pointer_diff (void const *begin, void const *end); + + +GB_DEF void gb_zero_size(void *ptr, isize size); +#ifndef gb_zero_item +#define gb_zero_item(t) gb_zero_size((t), gb_size_of(*(t))) // NOTE(bill): Pass pointer of struct +#define gb_zero_array(a, count) gb_zero_size((a), gb_size_of(*(a))*count) +#endif + +GB_DEF void * gb_memcopy (void *dest, void const *source, isize size); +GB_DEF void * gb_memmove (void *dest, void const *source, isize size); +GB_DEF void * gb_memset (void *data, u8 byte_value, isize size); +GB_DEF i32 gb_memcompare(void const *s1, void const *s2, isize size); +GB_DEF void gb_memswap (void *i, void *j, isize size); +GB_DEF void const *gb_memchr (void const *data, u8 byte_value, isize size); +GB_DEF void const *gb_memrchr (void const *data, u8 byte_value, isize size); + + +#ifndef gb_memcopy_array +#define gb_memcopy_array(dst, src, count) gb_memcopy((dst), (src), gb_size_of(*(dst))*(count)) +#endif + +#ifndef gb_memmove_array +#define gb_memmove_array(dst, src, count) gb_memmove((dst), (src), gb_size_of(*(dst))*(count)) +#endif + +// NOTE(bill): Very similar to doing `*cast(T *)(&u)` +#ifndef GB_BIT_CAST +#define GB_BIT_CAST(dest, source) do { \ + GB_STATIC_ASSERT(gb_size_of(*(dest)) <= gb_size_of(source)); \ + gb_memcopy((dest), &(source), gb_size_of(*dest)); \ +} while (0) +#endif + + + + +#ifndef gb_kilobytes +#define gb_kilobytes(x) ( (x) * (i64)(1024)) +#define gb_megabytes(x) (gb_kilobytes(x) * (i64)(1024)) +#define gb_gigabytes(x) (gb_megabytes(x) * (i64)(1024)) +#define gb_terabytes(x) (gb_gigabytes(x) * (i64)(1024)) +#endif + + + + +// Atomics + +// TODO(bill): Be specific with memory order? +// e.g. relaxed, acquire, release, acquire_release + +#if defined(GB_COMPILER_MSVC) +typedef struct gbAtomic32 { i32 volatile value; } gbAtomic32; +typedef struct gbAtomic64 { i64 volatile value; } gbAtomic64; +typedef struct gbAtomicPtr { void *volatile value; } gbAtomicPtr; +#else + #if defined(GB_ARCH_32_BIT) + #define GB_ATOMIC_PTR_ALIGNMENT 4 + #elif defined(GB_ARCH_64_BIT) + #define GB_ATOMIC_PTR_ALIGNMENT 8 + #else + #error Unknown architecture + #endif + +typedef struct gbAtomic32 { i32 volatile value; } __attribute__ ((aligned(4))) gbAtomic32; +typedef struct gbAtomic64 { i64 volatile value; } __attribute__ ((aligned(8))) gbAtomic64; +typedef struct gbAtomicPtr { void *volatile value; } __attribute__ ((aligned(GB_ATOMIC_PTR_ALIGNMENT))) gbAtomicPtr; +#endif + +GB_DEF i32 gb_atomic32_load (gbAtomic32 const volatile *a); +GB_DEF void gb_atomic32_store (gbAtomic32 volatile *a, i32 value); +GB_DEF i32 gb_atomic32_compare_exchange(gbAtomic32 volatile *a, i32 expected, i32 desired); +GB_DEF i32 gb_atomic32_exchanged (gbAtomic32 volatile *a, i32 desired); +GB_DEF i32 gb_atomic32_fetch_add (gbAtomic32 volatile *a, i32 operand); +GB_DEF i32 gb_atomic32_fetch_and (gbAtomic32 volatile *a, i32 operand); +GB_DEF i32 gb_atomic32_fetch_or (gbAtomic32 volatile *a, i32 operand); +GB_DEF b32 gb_atomic32_spin_lock (gbAtomic32 volatile *a, isize time_out); // NOTE(bill): time_out = -1 as default +GB_DEF void gb_atomic32_spin_unlock (gbAtomic32 volatile *a); +GB_DEF b32 gb_atomic32_try_acquire_lock(gbAtomic32 volatile *a); + + +GB_DEF i64 gb_atomic64_load (gbAtomic64 const volatile *a); +GB_DEF void gb_atomic64_store (gbAtomic64 volatile *a, i64 value); +GB_DEF i64 gb_atomic64_compare_exchange(gbAtomic64 volatile *a, i64 expected, i64 desired); +GB_DEF i64 gb_atomic64_exchanged (gbAtomic64 volatile *a, i64 desired); +GB_DEF i64 gb_atomic64_fetch_add (gbAtomic64 volatile *a, i64 operand); +GB_DEF i64 gb_atomic64_fetch_and (gbAtomic64 volatile *a, i64 operand); +GB_DEF i64 gb_atomic64_fetch_or (gbAtomic64 volatile *a, i64 operand); +GB_DEF b32 gb_atomic64_spin_lock (gbAtomic64 volatile *a, isize time_out); // NOTE(bill): time_out = -1 as default +GB_DEF void gb_atomic64_spin_unlock (gbAtomic64 volatile *a); +GB_DEF b32 gb_atomic64_try_acquire_lock(gbAtomic64 volatile *a); + + +GB_DEF void *gb_atomic_ptr_load (gbAtomicPtr const volatile *a); +GB_DEF void gb_atomic_ptr_store (gbAtomicPtr volatile *a, void *value); +GB_DEF void *gb_atomic_ptr_compare_exchange(gbAtomicPtr volatile *a, void *expected, void *desired); +GB_DEF void *gb_atomic_ptr_exchanged (gbAtomicPtr volatile *a, void *desired); +GB_DEF void *gb_atomic_ptr_fetch_add (gbAtomicPtr volatile *a, void *operand); +GB_DEF void *gb_atomic_ptr_fetch_and (gbAtomicPtr volatile *a, void *operand); +GB_DEF void *gb_atomic_ptr_fetch_or (gbAtomicPtr volatile *a, void *operand); +GB_DEF b32 gb_atomic_ptr_spin_lock (gbAtomicPtr volatile *a, isize time_out); // NOTE(bill): time_out = -1 as default +GB_DEF void gb_atomic_ptr_spin_unlock (gbAtomicPtr volatile *a); +GB_DEF b32 gb_atomic_ptr_try_acquire_lock(gbAtomicPtr volatile *a); + + +// Fences +GB_DEF void gb_yield_thread(void); +GB_DEF void gb_mfence (void); +GB_DEF void gb_sfence (void); +GB_DEF void gb_lfence (void); + + +#if defined(GB_SYSTEM_WINDOWS) +typedef struct gbSemaphore { void *win32_handle; } gbSemaphore; +#elif defined(GB_SYSTEM_OSX) +typedef struct gbSemaphore { semaphore_t osx_handle; } gbSemaphore; +#elif defined(GB_SYSTEM_UNIX) +typedef struct gbSemaphore { sem_t unix_handle; } gbSemaphore; +#else +#error +#endif + +GB_DEF void gb_semaphore_init (gbSemaphore *s); +GB_DEF void gb_semaphore_destroy(gbSemaphore *s); +GB_DEF void gb_semaphore_post (gbSemaphore *s, i32 count); +GB_DEF void gb_semaphore_release(gbSemaphore *s); // NOTE(bill): gb_semaphore_post(s, 1) +GB_DEF void gb_semaphore_wait (gbSemaphore *s); + + +// Mutex +typedef struct gbMutex { +#if defined(GB_SYSTEM_WINDOWS) + CRITICAL_SECTION win32_critical_section; +#else + pthread_mutex_t pthread_mutex; + pthread_mutexattr_t pthread_mutexattr; +#endif +} gbMutex; + +GB_DEF void gb_mutex_init (gbMutex *m); +GB_DEF void gb_mutex_destroy (gbMutex *m); +GB_DEF void gb_mutex_lock (gbMutex *m); +GB_DEF b32 gb_mutex_try_lock(gbMutex *m); +GB_DEF void gb_mutex_unlock (gbMutex *m); + +// NOTE(bill): If you wanted a Scoped Mutex in C++, why not use the defer() construct? +// No need for a silly wrapper class and it's clear! +#if 0 +gbMutex m = {0}; +gb_mutex_init(&m); +{ + gb_mutex_lock(&m); + defer (gb_mutex_unlock(&m)); + + // Do whatever as the mutex is now scoped based! +} +#endif + + + +#define GB_THREAD_PROC(name) isize name(struct gbThread *thread) +typedef GB_THREAD_PROC(gbThreadProc); + +typedef struct gbThread { +#if defined(GB_SYSTEM_WINDOWS) + void * win32_handle; +#else + pthread_t posix_handle; +#endif + + gbThreadProc *proc; + void * user_data; + isize user_index; + isize return_value; + + gbSemaphore semaphore; + isize stack_size; + b32 volatile is_running; +} gbThread; + +GB_DEF void gb_thread_init (gbThread *t); +GB_DEF void gb_thread_destroy (gbThread *t); +GB_DEF void gb_thread_start (gbThread *t, gbThreadProc *proc, void *data); +GB_DEF void gb_thread_start_with_stack(gbThread *t, gbThreadProc *proc, void *data, isize stack_size); +GB_DEF void gb_thread_join (gbThread *t); +GB_DEF b32 gb_thread_is_running (gbThread const *t); +GB_DEF u32 gb_thread_current_id (void); +GB_DEF void gb_thread_set_name (gbThread *t, char const *name); + + +// NOTE(bill): Thread Merge Operation +// Based on Sean Barrett's stb_sync +typedef struct gbSync { + i32 target; // Target Number of threads + i32 current; // Threads to hit + i32 waiting; // Threads waiting + + gbMutex start; + gbMutex mutex; + gbSemaphore release; +} gbSync; + +GB_DEF void gb_sync_init (gbSync *s); +GB_DEF void gb_sync_destroy (gbSync *s); +GB_DEF void gb_sync_set_target (gbSync *s, i32 count); +GB_DEF void gb_sync_release (gbSync *s); +GB_DEF i32 gb_sync_reach (gbSync *s); +GB_DEF void gb_sync_reach_and_wait(gbSync *s); + + + +#if defined(GB_SYSTEM_WINDOWS) + +typedef struct gbAffinity { + b32 is_accurate; + isize core_count; + isize thread_count; + #define GB_WIN32_MAX_THREADS (8 * gb_size_of(usize)) + usize core_masks[GB_WIN32_MAX_THREADS]; + +} gbAffinity; + +#elif defined(GB_SYSTEM_OSX) +typedef struct gbAffinity { + b32 is_accurate; + isize core_count; + isize thread_count; + isize threads_per_core; +} gbAffinity; + +#elif defined(GB_SYSTEM_LINUX) +typedef struct gbAffinity { + b32 is_accurate; + isize core_count; + isize thread_count; + isize threads_per_core; +} gbAffinity; +#else +#error TODO(bill): Unknown system +#endif + +GB_DEF void gb_affinity_init (gbAffinity *a); +GB_DEF void gb_affinity_destroy(gbAffinity *a); +GB_DEF b32 gb_affinity_set (gbAffinity *a, isize core, isize thread); +GB_DEF isize gb_affinity_thread_count_for_core(gbAffinity *a, isize core); + + + + +//////////////////////////////////////////////////////////////// +// +// Virtual Memory +// +// + +typedef struct gbVirtualMemory { + void *data; + isize size; +} gbVirtualMemory; + +GB_DEF gbVirtualMemory gb_virtual_memory(void *data, isize size); +GB_DEF gbVirtualMemory gb_vm_alloc (void *addr, isize size); +GB_DEF b32 gb_vm_free (gbVirtualMemory vm); +GB_DEF gbVirtualMemory gb_vm_trim (gbVirtualMemory vm, isize lead_size, isize size); +GB_DEF b32 gb_vm_purge (gbVirtualMemory vm); +GB_DEF isize gb_virtual_memory_page_size(isize *alignment_out); + + + + +//////////////////////////////////////////////////////////////// +// +// Custom Allocation +// +// + +typedef enum gbAllocationType { + gbAllocation_Alloc, + gbAllocation_Free, + gbAllocation_FreeAll, + gbAllocation_Resize, +} gbAllocationType; + +// NOTE(bill): This is useful so you can define an allocator of the same type and parameters +#define GB_ALLOCATOR_PROC(name) \ +void *name(void *allocator_data, gbAllocationType type, \ + isize size, isize alignment, \ + void *old_memory, isize old_size, \ + u64 flags) +typedef GB_ALLOCATOR_PROC(gbAllocatorProc); + +typedef struct gbAllocator { + gbAllocatorProc *proc; + void * data; +} gbAllocator; + +typedef enum gbAllocatorFlag { + gbAllocatorFlag_ClearToZero = GB_BIT(0), +} gbAllocatorFlag; + +// TODO(bill): Is this a decent default alignment? +#ifndef GB_DEFAULT_MEMORY_ALIGNMENT +#define GB_DEFAULT_MEMORY_ALIGNMENT (2 * gb_size_of(void *)) +#endif + +#ifndef GB_DEFAULT_ALLOCATOR_FLAGS +#define GB_DEFAULT_ALLOCATOR_FLAGS (gbAllocatorFlag_ClearToZero) +#endif + +GB_DEF void *gb_alloc_align (gbAllocator a, isize size, isize alignment); +GB_DEF void *gb_alloc (gbAllocator a, isize size); +GB_DEF void gb_free (gbAllocator a, void *ptr); +GB_DEF void gb_free_all (gbAllocator a); +GB_DEF void *gb_resize (gbAllocator a, void *ptr, isize old_size, isize new_size); +GB_DEF void *gb_resize_align(gbAllocator a, void *ptr, isize old_size, isize new_size, isize alignment); +// TODO(bill): For gb_resize, should the use need to pass the old_size or only the new_size? + +GB_DEF void *gb_alloc_copy (gbAllocator a, void const *src, isize size); +GB_DEF void *gb_alloc_copy_align(gbAllocator a, void const *src, isize size, isize alignment); +GB_DEF char *gb_alloc_str (gbAllocator a, char const *str); +GB_DEF char *gb_alloc_str_len (gbAllocator a, char const *str, isize len); + + +// NOTE(bill): These are very useful and the type cast has saved me from numerous bugs +#ifndef gb_alloc_item +#define gb_alloc_item(allocator_, Type) (Type *)gb_alloc(allocator_, gb_size_of(Type)) +#define gb_alloc_array(allocator_, Type, count) (Type *)gb_alloc(allocator_, gb_size_of(Type) * (count)) +#endif + +// NOTE(bill): Use this if you don't need a "fancy" resize allocation +GB_DEF void *gb_default_resize_align(gbAllocator a, void *ptr, isize old_size, isize new_size, isize alignment); + + + +// TODO(bill): Probably use a custom heap allocator system that doesn't depend on malloc/free +// Base it off TCMalloc or something else? Or something entirely custom? +GB_DEF gbAllocator gb_heap_allocator(void); +GB_DEF GB_ALLOCATOR_PROC(gb_heap_allocator_proc); + +// NOTE(bill): Yep, I use my own allocator system! +#ifndef gb_malloc +#define gb_malloc(sz) gb_alloc(gb_heap_allocator(), sz) +#define gb_mfree(ptr) gb_free(gb_heap_allocator(), ptr) +#endif + + + +// +// Arena Allocator +// +typedef struct gbArena { + gbAllocator backing; + void * physical_start; + isize total_size; + isize total_allocated; + isize temp_count; +} gbArena; + +GB_DEF void gb_arena_init_from_memory (gbArena *arena, void *start, isize size); +GB_DEF void gb_arena_init_from_allocator(gbArena *arena, gbAllocator backing, isize size); +GB_DEF void gb_arena_init_sub (gbArena *arena, gbArena *parent_arena, isize size); +GB_DEF void gb_arena_free (gbArena *arena); + +GB_DEF isize gb_arena_alignment_of (gbArena *arena, isize alignment); +GB_DEF isize gb_arena_size_remaining(gbArena *arena, isize alignment); +GB_DEF void gb_arena_check (gbArena *arena); + + +// Allocation Types: alloc, free_all, resize +GB_DEF gbAllocator gb_arena_allocator(gbArena *arena); +GB_DEF GB_ALLOCATOR_PROC(gb_arena_allocator_proc); + + + +typedef struct gbTempArenaMemory { + gbArena *arena; + isize original_count; +} gbTempArenaMemory; + +GB_DEF gbTempArenaMemory gb_temp_arena_memory_begin(gbArena *arena); +GB_DEF void gb_temp_arena_memory_end (gbTempArenaMemory tmp_mem); + + + + + + + +// +// Pool Allocator +// + + +typedef struct gbPool { + gbAllocator backing; + void * physical_start; + void * free_list; + isize block_size; + isize block_align; + isize total_size; +} gbPool; + +GB_DEF void gb_pool_init (gbPool *pool, gbAllocator backing, isize num_blocks, isize block_size); +GB_DEF void gb_pool_init_align(gbPool *pool, gbAllocator backing, isize num_blocks, isize block_size, isize block_align); +GB_DEF void gb_pool_free (gbPool *pool); + +// Allocation Types: alloc, free +GB_DEF gbAllocator gb_pool_allocator(gbPool *pool); +GB_DEF GB_ALLOCATOR_PROC(gb_pool_allocator_proc); + + + +// NOTE(bill): Used for allocators to keep track of sizes +typedef struct gbAllocationHeader { + isize size; +} gbAllocationHeader; + +GB_DEF gbAllocationHeader *gb_allocation_header (void *data); +GB_DEF void gb_allocation_header_fill(gbAllocationHeader *header, void *data, isize size); + +// TODO(bill): Find better way of doing this without #if #elif etc. +#if defined(GB_ARCH_32_BIT) +#define GB_ISIZE_HIGH_BIT 0x80000000 +#elif defined(GB_ARCH_64_BIT) +#define GB_ISIZE_HIGH_BIT 0x8000000000000000ll +#else +#error +#endif + +// +// Free List Allocator +// + +// IMPORTANT TODO(bill): Thoroughly test the free list allocator! +// NOTE(bill): This is a very shitty free list as it just picks the first free block not the best size +// as I am just being lazy. Also, I will probably remove it later; it's only here because why not?! +// +// NOTE(bill): I may also complete remove this if I completely implement a fixed heap allocator + +typedef struct gbFreeListBlock gbFreeListBlock; +struct gbFreeListBlock { + gbFreeListBlock *next; + isize size; +}; + +typedef struct gbFreeList { + void * physical_start; + isize total_size; + + gbFreeListBlock *curr_block; + + isize total_allocated; + isize allocation_count; +} gbFreeList; + +GB_DEF void gb_free_list_init (gbFreeList *fl, void *start, isize size); +GB_DEF void gb_free_list_init_from_allocator(gbFreeList *fl, gbAllocator backing, isize size); + +// Allocation Types: alloc, free, free_all, resize +GB_DEF gbAllocator gb_free_list_allocator(gbFreeList *fl); +GB_DEF GB_ALLOCATOR_PROC(gb_free_list_allocator_proc); + + + +// +// Scratch Memory Allocator - Ring Buffer Based Arena +// + +typedef struct gbScratchMemory { + void *physical_start; + isize total_size; + void *alloc_point; + void *free_point; +} gbScratchMemory; + +GB_DEF void gb_scratch_memory_init (gbScratchMemory *s, void *start, isize size); +GB_DEF b32 gb_scratch_memory_is_in_use(gbScratchMemory *s, void *ptr); + + +// Allocation Types: alloc, free, free_all, resize +GB_DEF gbAllocator gb_scratch_allocator(gbScratchMemory *s); +GB_DEF GB_ALLOCATOR_PROC(gb_scratch_allocator_proc); + +// TODO(bill): Stack allocator +// TODO(bill): Fixed heap allocator +// TODO(bill): General heap allocator. Maybe a TCMalloc like clone? + + +//////////////////////////////////////////////////////////////// +// +// Sort & Search +// +// + +#define GB_COMPARE_PROC(name) int name(void const *a, void const *b) +typedef GB_COMPARE_PROC(gbCompareProc); + +#define GB_COMPARE_PROC_PTR(def) GB_COMPARE_PROC((*def)) + +// Producure pointers +// NOTE(bill): The offset parameter specifies the offset in the structure +// e.g. gb_i32_cmp(gb_offset_of(Thing, value)) +// Use 0 if it's just the type instead. + +GB_DEF GB_COMPARE_PROC_PTR(gb_i16_cmp (isize offset)); +GB_DEF GB_COMPARE_PROC_PTR(gb_i32_cmp (isize offset)); +GB_DEF GB_COMPARE_PROC_PTR(gb_i64_cmp (isize offset)); +GB_DEF GB_COMPARE_PROC_PTR(gb_isize_cmp(isize offset)); +GB_DEF GB_COMPARE_PROC_PTR(gb_str_cmp (isize offset)); +GB_DEF GB_COMPARE_PROC_PTR(gb_f32_cmp (isize offset)); +GB_DEF GB_COMPARE_PROC_PTR(gb_f64_cmp (isize offset)); +GB_DEF GB_COMPARE_PROC_PTR(gb_char_cmp (isize offset)); + +// TODO(bill): Better sorting algorithms +// NOTE(bill): Uses quick sort for large arrays but insertion sort for small +#define gb_sort_array(array, count, compare_proc) gb_sort(array, count, gb_size_of(*(array)), compare_proc) +GB_DEF void gb_sort(void *base, isize count, isize size, gbCompareProc compare_proc); + +// NOTE(bill): the count of temp == count of items +#define gb_radix_sort(Type) gb_radix_sort_##Type +#define GB_RADIX_SORT_PROC(Type) void gb_radix_sort(Type)(Type *items, Type *temp, isize count) + +GB_DEF GB_RADIX_SORT_PROC(u8); +GB_DEF GB_RADIX_SORT_PROC(u16); +GB_DEF GB_RADIX_SORT_PROC(u32); +GB_DEF GB_RADIX_SORT_PROC(u64); + + +// NOTE(bill): Returns index or -1 if not found +#define gb_binary_search_array(array, count, key, compare_proc) gb_binary_search(array, count, gb_size_of(*(array)), key, compare_proc) +GB_DEF isize gb_binary_search(void const *base, isize count, isize size, void const *key, gbCompareProc compare_proc); + +#define gb_shuffle_array(array, count) gb_shuffle(array, count, gb_size_of(*(array))) +GB_DEF void gb_shuffle(void *base, isize count, isize size); + +#define gb_reverse_array(array, count) gb_reverse(array, count, gb_size_of(*(array))) +GB_DEF void gb_reverse(void *base, isize count, isize size); + +//////////////////////////////////////////////////////////////// +// +// Char Functions +// +// + +GB_DEF char gb_char_to_lower (char c); +GB_DEF char gb_char_to_upper (char c); +GB_DEF b32 gb_char_is_space (char c); +GB_DEF b32 gb_char_is_digit (char c); +GB_DEF b32 gb_char_is_hex_digit (char c); +GB_DEF b32 gb_char_is_alpha (char c); +GB_DEF b32 gb_char_is_alphanumeric(char c); +GB_DEF i32 gb_digit_to_int (char c); +GB_DEF i32 gb_hex_digit_to_int (char c); + +// NOTE(bill): ASCII only +GB_DEF void gb_str_to_lower(char *str); +GB_DEF void gb_str_to_upper(char *str); + +GB_DEF isize gb_strlen (char const *str); +GB_DEF isize gb_strnlen(char const *str, isize max_len); +GB_DEF i32 gb_strcmp (char const *s1, char const *s2); +GB_DEF i32 gb_strncmp(char const *s1, char const *s2, isize len); +GB_DEF char *gb_strcpy (char *dest, char const *source); +GB_DEF char *gb_strncpy(char *dest, char const *source, isize len); +GB_DEF isize gb_strlcpy(char *dest, char const *source, isize len); +GB_DEF char *gb_strrev (char *str); // NOTE(bill): ASCII only + +// NOTE(bill): A less fucking crazy strtok! +GB_DEF char const *gb_strtok(char *output, char const *src, char const *delimit); + +GB_DEF b32 gb_str_has_prefix(char const *str, char const *prefix); +GB_DEF b32 gb_str_has_suffix(char const *str, char const *suffix); + +GB_DEF char const *gb_char_first_occurence(char const *str, char c); +GB_DEF char const *gb_char_last_occurence (char const *str, char c); + +GB_DEF void gb_str_concat(char *dest, isize dest_len, + char const *src_a, isize src_a_len, + char const *src_b, isize src_b_len); + +GB_DEF u64 gb_str_to_u64(char const *str, char **end_ptr, i32 base); // TODO(bill): Support more than just decimal and hexadecimal +GB_DEF i64 gb_str_to_i64(char const *str, char **end_ptr, i32 base); // TODO(bill): Support more than just decimal and hexadecimal +GB_DEF f32 gb_str_to_f32(char const *str, char **end_ptr); +GB_DEF f64 gb_str_to_f64(char const *str, char **end_ptr); +GB_DEF void gb_i64_to_str(i64 value, char *string, i32 base); +GB_DEF void gb_u64_to_str(u64 value, char *string, i32 base); + + +//////////////////////////////////////////////////////////////// +// +// UTF-8 Handling +// +// + +// NOTE(bill): Does not check if utf-8 string is valid +GB_DEF isize gb_utf8_strlen (u8 const *str); +GB_DEF isize gb_utf8_strnlen(u8 const *str, isize max_len); + +// NOTE(bill): Windows doesn't handle 8 bit filenames well ('cause Micro$hit) +GB_DEF u16 *gb_utf8_to_ucs2 (u16 *buffer, isize len, u8 const *str); +GB_DEF u8 * gb_ucs2_to_utf8 (u8 *buffer, isize len, u16 const *str); +GB_DEF u16 *gb_utf8_to_ucs2_buf(u8 const *str); // NOTE(bill): Uses locally persisting buffer +GB_DEF u8 * gb_ucs2_to_utf8_buf(u16 const *str); // NOTE(bill): Uses locally persisting buffer + +// NOTE(bill): Returns size of codepoint in bytes +GB_DEF isize gb_utf8_decode (u8 const *str, isize str_len, Rune *codepoint); +GB_DEF isize gb_utf8_codepoint_size(u8 const *str, isize str_len); +GB_DEF isize gb_utf8_encode_rune (u8 buf[4], Rune r); + +//////////////////////////////////////////////////////////////// +// +// gbString - C Read-Only-Compatible +// +// +/* +Reasoning: + + By default, strings in C are null terminated which means you have to count + the number of character up to the null character to calculate the length. + Many "better" C string libraries will create a struct for a string. + i.e. + + struct String { + Allocator allocator; + size_t length; + size_t capacity; + char * cstring; + }; + + This library tries to augment normal C strings in a better way that is still + compatible with C-style strings. + + +--------+-----------------------+-----------------+ + | Header | Binary C-style String | Null Terminator | + +--------+-----------------------+-----------------+ + | + +-> Pointer returned by functions + + Due to the meta-data being stored before the string pointer and every gb string + having an implicit null terminator, gb strings are full compatible with c-style + strings and read-only functions. + +Advantages: + + * gb strings can be passed to C-style string functions without accessing a struct + member of calling a function, i.e. + + gb_printf("%s\n", gb_str); + + Many other libraries do either of these: + + gb_printf("%s\n", string->cstr); + gb_printf("%s\n", get_cstring(string)); + + * You can access each character just like a C-style string: + + gb_printf("%c %c\n", str[0], str[13]); + + * gb strings are singularly allocated. The meta-data is next to the character + array which is better for the cache. + +Disadvantages: + + * In the C version of these functions, many return the new string. i.e. + str = gb_string_appendc(str, "another string"); + This could be changed to gb_string_appendc(&str, "another string"); but I'm still not sure. + + * This is incompatible with "gb_string.h" strings +*/ + +#if 0 +#define GB_IMPLEMENTATION +#include "gb.h" +int main(int argc, char **argv) { + gbString str = gb_string_make("Hello"); + gbString other_str = gb_string_make_length(", ", 2); + str = gb_string_append(str, other_str); + str = gb_string_appendc(str, "world!"); + + gb_printf("%s\n", str); // Hello, world! + + gb_printf("str length = %d\n", gb_string_length(str)); + + str = gb_string_set(str, "Potato soup"); + gb_printf("%s\n", str); // Potato soup + + str = gb_string_set(str, "Hello"); + other_str = gb_string_set(other_str, "Pizza"); + if (gb_strings_are_equal(str, other_str)) + gb_printf("Not called\n"); + else + gb_printf("Called\n"); + + str = gb_string_set(str, "Ab.;!...AHello World ??"); + str = gb_string_trim(str, "Ab.;!. ?"); + gb_printf("%s\n", str); // "Hello World" + + gb_string_free(str); + gb_string_free(other_str); + + return 0; +} +#endif + +// TODO(bill): Should this be a wrapper to gbArray(char) or this extra type safety better? +typedef char *gbString; + +// NOTE(bill): If you only need a small string, just use a standard c string or change the size from isize to u16, etc. +typedef struct gbStringHeader { + gbAllocator allocator; + isize length; + isize capacity; +} gbStringHeader; + +#define GB_STRING_HEADER(str) (cast(gbStringHeader *)(str) - 1) + +GB_DEF gbString gb_string_make_reserve (gbAllocator a, isize capacity); +GB_DEF gbString gb_string_make (gbAllocator a, char const *str); +GB_DEF gbString gb_string_make_length (gbAllocator a, void const *str, isize num_bytes); +GB_DEF void gb_string_free (gbString str); +GB_DEF gbString gb_string_duplicate (gbAllocator a, gbString const str); +GB_DEF isize gb_string_length (gbString const str); +GB_DEF isize gb_string_capacity (gbString const str); +GB_DEF isize gb_string_available_space(gbString const str); +GB_DEF void gb_string_clear (gbString str); +GB_DEF gbString gb_string_append (gbString str, gbString const other); +GB_DEF gbString gb_string_append_length (gbString str, void const *other, isize num_bytes); +GB_DEF gbString gb_string_appendc (gbString str, char const *other); +GB_DEF gbString gb_string_append_rune (gbString str, Rune r); +GB_DEF gbString gb_string_append_fmt (gbString str, char const *fmt, ...); +GB_DEF gbString gb_string_set (gbString str, char const *cstr); +GB_DEF gbString gb_string_make_space_for (gbString str, isize add_len); +GB_DEF isize gb_string_allocation_size(gbString const str); +GB_DEF b32 gb_string_are_equal (gbString const lhs, gbString const rhs); +GB_DEF gbString gb_string_trim (gbString str, char const *cut_set); +GB_DEF gbString gb_string_trim_space (gbString str); // Whitespace ` \t\r\n\v\f` + + + +//////////////////////////////////////////////////////////////// +// +// Fixed Capacity Buffer (POD Types) +// +// +// gbBuffer(Type) works like gbString or gbArray where the actual type is just a pointer to the first +// element. +// + +typedef struct gbBufferHeader { + isize count; + isize capacity; +} gbBufferHeader; + +#define gbBuffer(Type) Type * + +#define GB_BUFFER_HEADER(x) (cast(gbBufferHeader *)(x) - 1) +#define gb_buffer_count(x) (GB_BUFFER_HEADER(x)->count) +#define gb_buffer_capacity(x) (GB_BUFFER_HEADER(x)->capacity) + +#define gb_buffer_init(x, allocator, cap) do { \ + void **nx = cast(void **)&(x); \ + gbBufferHeader *gb__bh = cast(gbBufferHeader *)gb_alloc((allocator), (cap)*gb_size_of(*(x))); \ + gb__bh->count = 0; \ + gb__bh->capacity = cap; \ + *nx = cast(void *)(gb__bh+1); \ +} while (0) + + +#define gb_buffer_free(x, allocator) (gb_free(allocator, GB_BUFFER_HEADER(x))) + +#define gb_buffer_append(x, item) do { (x)[gb_buffer_count(x)++] = (item); } while (0) + +#define gb_buffer_appendv(x, items, item_count) do { \ + GB_ASSERT(gb_size_of(*(items)) == gb_size_of(*(x))); \ + GB_ASSERT(gb_buffer_count(x)+item_count <= gb_buffer_capacity(x)); \ + gb_memcopy(&(x)[gb_buffer_count(x)], (items), gb_size_of(*(x))*(item_count)); \ + gb_buffer_count(x) += (item_count); \ +} while (0) + +#define gb_buffer_pop(x) do { GB_ASSERT(gb_buffer_count(x) > 0); gb_buffer_count(x)--; } while (0) +#define gb_buffer_clear(x) do { gb_buffer_count(x) = 0; } while (0) + + + +//////////////////////////////////////////////////////////////// +// +// Dynamic Array (POD Types) +// +// NOTE(bill): I know this is a macro hell but C is an old (and shit) language with no proper arrays +// Also why the fuck not?! It fucking works! And it has custom allocation, which is already better than C++! +// +// gbArray(Type) works like gbString or gbBuffer where the actual type is just a pointer to the first +// element. +// + + + +// Available Procedures for gbArray(Type) +// gb_array_init +// gb_array_free +// gb_array_set_capacity +// gb_array_grow +// gb_array_append +// gb_array_appendv +// gb_array_pop +// gb_array_clear +// gb_array_resize +// gb_array_reserve +// + +#if 0 // Example +void foo(void) { + isize i; + int test_values[] = {4, 2, 1, 7}; + gbAllocator a = gb_heap_allocator(); + gbArray(int) items; + + gb_array_init(items, a); + + gb_array_append(items, 1); + gb_array_append(items, 4); + gb_array_append(items, 9); + gb_array_append(items, 16); + + items[1] = 3; // Manually set value + // NOTE: No array bounds checking + + for (i = 0; i < items.count; i++) + gb_printf("%d\n", items[i]); + // 1 + // 3 + // 9 + // 16 + + gb_array_clear(items); + + gb_array_appendv(items, test_values, gb_count_of(test_values)); + for (i = 0; i < items.count; i++) + gb_printf("%d\n", items[i]); + // 4 + // 2 + // 1 + // 7 + + gb_array_free(items); +} +#endif + +typedef struct gbArrayHeader { + gbAllocator allocator; + isize count; + isize capacity; +} gbArrayHeader; + +// NOTE(bill): This thing is magic! +#define gbArray(Type) Type * + +#ifndef GB_ARRAY_GROW_FORMULA +#define GB_ARRAY_GROW_FORMULA(x) (2*(x) + 8) +#endif + +GB_STATIC_ASSERT(GB_ARRAY_GROW_FORMULA(0) > 0); + +#define GB_ARRAY_HEADER(x) (cast(gbArrayHeader *)(x) - 1) +#define gb_array_allocator(x) (GB_ARRAY_HEADER(x)->allocator) +#define gb_array_count(x) (GB_ARRAY_HEADER(x)->count) +#define gb_array_capacity(x) (GB_ARRAY_HEADER(x)->capacity) + +// TODO(bill): Have proper alignment! +#define gb_array_init_reserve(x, allocator_, cap) do { \ + void **gb__array_ = cast(void **)&(x); \ + gbArrayHeader *gb__ah = cast(gbArrayHeader *)gb_alloc(allocator_, gb_size_of(gbArrayHeader)+gb_size_of(*(x))*(cap)); \ + gb__ah->allocator = allocator_; \ + gb__ah->count = 0; \ + gb__ah->capacity = cap; \ + *gb__array_ = cast(void *)(gb__ah+1); \ +} while (0) + +// NOTE(bill): Give it an initial default capacity +#define gb_array_init(x, allocator) gb_array_init_reserve(x, allocator, GB_ARRAY_GROW_FORMULA(0)) + +#define gb_array_free(x) do { \ + gbArrayHeader *gb__ah = GB_ARRAY_HEADER(x); \ + gb_free(gb__ah->allocator, gb__ah); \ +} while (0) + +#define gb_array_set_capacity(x, capacity) do { \ + if (x) { \ + void **gb__array_ = cast(void **)&(x); \ + *gb__array_ = gb__array_set_capacity((x), (capacity), gb_size_of(*(x))); \ + } \ +} while (0) + +// NOTE(bill): Do not use the thing below directly, use the macro +GB_DEF void *gb__array_set_capacity(void *array, isize capacity, isize element_size); + + +// TODO(bill): Decide on a decent growing formula for gbArray +#define gb_array_grow(x, min_capacity) do { \ + isize new_capacity = GB_ARRAY_GROW_FORMULA(gb_array_capacity(x)); \ + if (new_capacity < (min_capacity)) \ + new_capacity = (min_capacity); \ + gb_array_set_capacity(x, new_capacity); \ +} while (0) + + +#define gb_array_append(x, item) do { \ + if (gb_array_capacity(x) < gb_array_count(x)+1) \ + gb_array_grow(x, 0); \ + (x)[gb_array_count(x)++] = (item); \ +} while (0) + +#define gb_array_appendv(x, items, item_count) do { \ + gbArrayHeader *gb__ah = GB_ARRAY_HEADER(x); \ + GB_ASSERT(gb_size_of((items)[0]) == gb_size_of((x)[0])); \ + if (gb__ah->capacity < gb__ah->count+(item_count)) \ + gb_array_grow(x, gb__ah->count+(item_count)); \ + gb_memcopy(&(x)[gb__ah->count], (items), gb_size_of((x)[0])*(item_count));\ + gb__ah->count += (item_count); \ +} while (0) + + + +#define gb_array_pop(x) do { GB_ASSERT(GB_ARRAY_HEADER(x)->count > 0); GB_ARRAY_HEADER(x)->count--; } while (0) +#define gb_array_clear(x) do { GB_ARRAY_HEADER(x)->count = 0; } while (0) + +#define gb_array_resize(x, new_count) do { \ + if (GB_ARRAY_HEADER(x)->capacity < (new_count)) \ + gb_array_grow(x, (new_count)); \ + GB_ARRAY_HEADER(x)->count = (new_count); \ +} while (0) + + +#define gb_array_reserve(x, new_capacity) do { \ + if (GB_ARRAY_HEADER(x)->capacity < (new_capacity)) \ + gb_array_set_capacity(x, new_capacity); \ +} while (0) + + + + + +//////////////////////////////////////////////////////////////// +// +// Hashing and Checksum Functions +// +// + +GB_EXTERN u32 gb_adler32(void const *data, isize len); + +GB_EXTERN u32 gb_crc32(void const *data, isize len); +GB_EXTERN u64 gb_crc64(void const *data, isize len); + +GB_EXTERN u32 gb_fnv32 (void const *data, isize len); +GB_EXTERN u64 gb_fnv64 (void const *data, isize len); +GB_EXTERN u32 gb_fnv32a(void const *data, isize len); +GB_EXTERN u64 gb_fnv64a(void const *data, isize len); + +// NOTE(bill): Default seed of 0x9747b28c +// NOTE(bill): I prefer using murmur64 for most hashes +GB_EXTERN u32 gb_murmur32(void const *data, isize len); +GB_EXTERN u64 gb_murmur64(void const *data, isize len); + +GB_EXTERN u32 gb_murmur32_seed(void const *data, isize len, u32 seed); +GB_EXTERN u64 gb_murmur64_seed(void const *data, isize len, u64 seed); + + +//////////////////////////////////////////////////////////////// +// +// Instantiated Hash Table +// +// This is an attempt to implement a templated hash table +// NOTE(bill): The key is aways a u64 for simplicity and you will _probably_ _never_ need anything bigger. +// +// Hash table type and function declaration, call: GB_TABLE_DECLARE(PREFIX, NAME, N, VALUE) +// Hash table function definitions, call: GB_TABLE_DEFINE(NAME, N, VALUE) +// +// PREFIX - a prefix for function prototypes e.g. extern, static, etc. +// NAME - Name of the Hash Table +// FUNC - the name will prefix function names +// VALUE - the type of the value to be stored +// +// NOTE(bill): I really wish C had decent metaprogramming capabilities (and no I don't mean C++'s templates either) +// + +typedef struct gbHashTableFindResult { + isize hash_index; + isize entry_prev; + isize entry_index; +} gbHashTableFindResult; + +#define GB_TABLE(PREFIX, NAME, FUNC, VALUE) \ + GB_TABLE_DECLARE(PREFIX, NAME, FUNC, VALUE); \ + GB_TABLE_DEFINE(NAME, FUNC, VALUE); + +#define GB_TABLE_DECLARE(PREFIX, NAME, FUNC, VALUE) \ +typedef struct GB_JOIN2(NAME,Entry) { \ + u64 key; \ + isize next; \ + VALUE value; \ +} GB_JOIN2(NAME,Entry); \ +\ +typedef struct NAME { \ + gbArray(isize) hashes; \ + gbArray(GB_JOIN2(NAME,Entry)) entries; \ +} NAME; \ +\ +PREFIX void GB_JOIN2(FUNC,init) (NAME *h, gbAllocator a); \ +PREFIX void GB_JOIN2(FUNC,destroy) (NAME *h); \ +PREFIX VALUE * GB_JOIN2(FUNC,get) (NAME *h, u64 key); \ +PREFIX void GB_JOIN2(FUNC,set) (NAME *h, u64 key, VALUE value); \ +PREFIX void GB_JOIN2(FUNC,grow) (NAME *h); \ +PREFIX void GB_JOIN2(FUNC,rehash) (NAME *h, isize new_count); \ + + + + + +#define GB_TABLE_DEFINE(NAME, FUNC, VALUE) \ +void GB_JOIN2(FUNC,init)(NAME *h, gbAllocator a) { \ + gb_array_init(h->hashes, a); \ + gb_array_init(h->entries, a); \ +} \ +\ +void GB_JOIN2(FUNC,destroy)(NAME *h) { \ + if (h->entries) gb_array_free(h->entries); \ + if (h->hashes) gb_array_free(h->hashes); \ +} \ +\ +gb_internal isize GB_JOIN2(FUNC,_add_entry)(NAME *h, u64 key) { \ + isize index; \ + GB_JOIN2(NAME,Entry) e = {0}; \ + e.key = key; \ + e.next = -1; \ + index = gb_array_count(h->entries); \ + gb_array_append(h->entries, e); \ + return index; \ +} \ +\ +gb_internal gbHashTableFindResult GB_JOIN2(FUNC,_find)(NAME *h, u64 key) { \ + gbHashTableFindResult r = {-1, -1, -1}; \ + if (gb_array_count(h->hashes) > 0) { \ + r.hash_index = key % gb_array_count(h->hashes); \ + r.entry_index = h->hashes[r.hash_index]; \ + while (r.entry_index >= 0) { \ + if (h->entries[r.entry_index].key == key) \ + return r; \ + r.entry_prev = r.entry_index; \ + r.entry_index = h->entries[r.entry_index].next; \ + } \ + } \ + return r; \ +} \ +\ +gb_internal b32 GB_JOIN2(FUNC,_full)(NAME *h) { \ + return 0.75f * gb_array_count(h->hashes) < gb_array_count(h->entries); \ +} \ +\ +void GB_JOIN2(FUNC,grow)(NAME *h) { \ + isize new_count = GB_ARRAY_GROW_FORMULA(gb_array_count(h->entries)); \ + GB_JOIN2(FUNC,rehash)(h, new_count); \ +} \ +\ +void GB_JOIN2(FUNC,rehash)(NAME *h, isize new_count) { \ + isize i, j; \ + NAME nh = {0}; \ + GB_JOIN2(FUNC,init)(&nh, gb_array_allocator(h->hashes)); \ + gb_array_resize(nh.hashes, new_count); \ + gb_array_reserve(nh.entries, gb_array_count(h->entries)); \ + for (i = 0; i < new_count; i++) \ + nh.hashes[i] = -1; \ + for (i = 0; i < gb_array_count(h->entries); i++) { \ + GB_JOIN2(NAME,Entry) *e; \ + gbHashTableFindResult fr; \ + if (gb_array_count(nh.hashes) == 0) \ + GB_JOIN2(FUNC,grow)(&nh); \ + e = &h->entries[i]; \ + fr = GB_JOIN2(FUNC,_find)(&nh, e->key); \ + j = GB_JOIN2(FUNC,_add_entry)(&nh, e->key); \ + if (fr.entry_prev < 0) \ + nh.hashes[fr.hash_index] = j; \ + else \ + nh.entries[fr.entry_prev].next = j; \ + nh.entries[j].next = fr.entry_index; \ + nh.entries[j].value = e->value; \ + if (GB_JOIN2(FUNC,_full)(&nh)) \ + GB_JOIN2(FUNC,grow)(&nh); \ + } \ + GB_JOIN2(FUNC,destroy)(h); \ + h->hashes = nh.hashes; \ + h->entries = nh.entries; \ +} \ +\ +VALUE *GB_JOIN2(FUNC,get)(NAME *h, u64 key) { \ + isize index = GB_JOIN2(FUNC,_find)(h, key).entry_index; \ + if (index >= 0) \ + return &h->entries[index].value; \ + return NULL; \ +} \ +\ +void GB_JOIN2(FUNC,set)(NAME *h, u64 key, VALUE value) { \ + isize index; \ + gbHashTableFindResult fr; \ + if (gb_array_count(h->hashes) == 0) \ + GB_JOIN2(FUNC,grow)(h); \ + fr = GB_JOIN2(FUNC,_find)(h, key); \ + if (fr.entry_index >= 0) { \ + index = fr.entry_index; \ + } else { \ + index = GB_JOIN2(FUNC,_add_entry)(h, key); \ + if (fr.entry_prev >= 0) { \ + h->entries[fr.entry_prev].next = index; \ + } else { \ + h->hashes[fr.hash_index] = index; \ + } \ + } \ + h->entries[index].value = value; \ + if (GB_JOIN2(FUNC,_full)(h)) \ + GB_JOIN2(FUNC,grow)(h); \ +} \ + + + + +//////////////////////////////////////////////////////////////// +// +// File Handling +// + + +typedef u32 gbFileMode; +typedef enum gbFileModeFlag { + gbFileMode_Read = GB_BIT(0), + gbFileMode_Write = GB_BIT(1), + gbFileMode_Append = GB_BIT(2), + gbFileMode_Rw = GB_BIT(3), + + gbFileMode_Modes = gbFileMode_Read | gbFileMode_Write | gbFileMode_Append | gbFileMode_Rw, +} gbFileModeFlag; + +// NOTE(bill): Only used internally and for the file operations +typedef enum gbSeekWhenceType { + gbSeekWhence_Begin = 0, + gbSeekWhence_Current = 1, + gbSeekWhence_End = 2, +} gbSeekWhenceType; + +typedef enum gbFileError { + gbFileError_None, + gbFileError_Invalid, + gbFileError_InvalidFilename, + gbFileError_Exists, + gbFileError_NotExists, + gbFileError_Permission, + gbFileError_TruncationFailure, +} gbFileError; + +typedef union gbFileDescriptor { + void * p; + intptr i; + uintptr u; +} gbFileDescriptor; + +typedef struct gbFileOperations gbFileOperations; + +#define GB_FILE_OPEN_PROC(name) gbFileError name(gbFileDescriptor *fd, gbFileOperations *ops, gbFileMode mode, char const *filename) +#define GB_FILE_READ_AT_PROC(name) b32 name(gbFileDescriptor fd, void *buffer, isize size, i64 offset, isize *bytes_read) +#define GB_FILE_WRITE_AT_PROC(name) b32 name(gbFileDescriptor fd, void const *buffer, isize size, i64 offset, isize *bytes_written) +#define GB_FILE_SEEK_PROC(name) b32 name(gbFileDescriptor fd, i64 offset, gbSeekWhenceType whence, i64 *new_offset) +#define GB_FILE_CLOSE_PROC(name) void name(gbFileDescriptor fd) +typedef GB_FILE_OPEN_PROC(gbFileOpenProc); +typedef GB_FILE_READ_AT_PROC(gbFileReadProc); +typedef GB_FILE_WRITE_AT_PROC(gbFileWriteProc); +typedef GB_FILE_SEEK_PROC(gbFileSeekProc); +typedef GB_FILE_CLOSE_PROC(gbFileCloseProc); + +struct gbFileOperations { + gbFileReadProc *read_at; + gbFileWriteProc *write_at; + gbFileSeekProc *seek; + gbFileCloseProc *close; +}; + +extern gbFileOperations const gbDefaultFileOperations; + + +// typedef struct gbDirInfo { +// u8 *buf; +// isize buf_count; +// isize buf_pos; +// } gbDirInfo; + +typedef u64 gbFileTime; + +typedef struct gbFile { + gbFileOperations ops; + gbFileDescriptor fd; + char const * filename; + gbFileTime last_write_time; + // gbDirInfo * dir_info; // TODO(bill): Get directory info +} gbFile; + +// TODO(bill): gbAsyncFile + +typedef enum gbFileStandardType { + gbFileStandard_Input, + gbFileStandard_Output, + gbFileStandard_Error, + + gbFileStandard_Count, +} gbFileStandardType; + +GB_DEF gbFile *const gb_file_get_standard(gbFileStandardType std); + +GB_DEF gbFileError gb_file_create (gbFile *file, char const *filename); +GB_DEF gbFileError gb_file_open (gbFile *file, char const *filename); +GB_DEF gbFileError gb_file_open_mode (gbFile *file, gbFileMode mode, char const *filename); +GB_DEF gbFileError gb_file_new (gbFile *file, gbFileDescriptor fd, gbFileOperations ops, char const *filename); +GB_DEF b32 gb_file_read_at_check (gbFile *file, void *buffer, isize size, i64 offset, isize *bytes_read); +GB_DEF b32 gb_file_write_at_check(gbFile *file, void const *buffer, isize size, i64 offset, isize *bytes_written); +GB_DEF b32 gb_file_read_at (gbFile *file, void *buffer, isize size, i64 offset); +GB_DEF b32 gb_file_write_at (gbFile *file, void const *buffer, isize size, i64 offset); +GB_DEF i64 gb_file_seek (gbFile *file, i64 offset); +GB_DEF i64 gb_file_seek_to_end (gbFile *file); +GB_DEF i64 gb_file_skip (gbFile *file, i64 bytes); // NOTE(bill): Skips a certain amount of bytes +GB_DEF i64 gb_file_tell (gbFile *file); +GB_DEF gbFileError gb_file_close (gbFile *file); +GB_DEF b32 gb_file_read (gbFile *file, void *buffer, isize size); +GB_DEF b32 gb_file_write (gbFile *file, void const *buffer, isize size); +GB_DEF i64 gb_file_size (gbFile *file); +GB_DEF char const *gb_file_name (gbFile *file); +GB_DEF gbFileError gb_file_truncate (gbFile *file, i64 size); +GB_DEF b32 gb_file_has_changed (gbFile *file); // NOTE(bill): Changed since lasted checked +// TODO(bill): +// gbFileError gb_file_temp(gbFile *file); +// + +typedef struct gbFileContents { + gbAllocator allocator; + void * data; + isize size; +} gbFileContents; + + +GB_DEF gbFileContents gb_file_read_contents(gbAllocator a, b32 zero_terminate, char const *filepath); +GB_DEF void gb_file_free_contents(gbFileContents *fc); + + +// TODO(bill): Should these have different na,es as they do not take in a gbFile * ??? +GB_DEF b32 gb_file_exists (char const *filepath); +GB_DEF gbFileTime gb_file_last_write_time(char const *filepath); +GB_DEF b32 gb_file_copy (char const *existing_filename, char const *new_filename, b32 fail_if_exists); +GB_DEF b32 gb_file_move (char const *existing_filename, char const *new_filename); +GB_DEF b32 gb_file_remove (char const *filename); + + +#ifndef GB_PATH_SEPARATOR + #if defined(GB_SYSTEM_WINDOWS) + #define GB_PATH_SEPARATOR '\\' + #else + #define GB_PATH_SEPARATOR '/' + #endif +#endif + +GB_DEF b32 gb_path_is_absolute (char const *path); +GB_DEF b32 gb_path_is_relative (char const *path); +GB_DEF b32 gb_path_is_root (char const *path); +GB_DEF char const *gb_path_base_name (char const *path); +GB_DEF char const *gb_path_extension (char const *path); +GB_DEF char * gb_path_get_full_name(gbAllocator a, char const *path); + + +//////////////////////////////////////////////////////////////// +// +// Printing +// +// + +GB_DEF isize gb_printf (char const *fmt, ...) GB_PRINTF_ARGS(1); +GB_DEF isize gb_printf_va (char const *fmt, va_list va); +GB_DEF isize gb_printf_err (char const *fmt, ...) GB_PRINTF_ARGS(1); +GB_DEF isize gb_printf_err_va (char const *fmt, va_list va); +GB_DEF isize gb_fprintf (gbFile *f, char const *fmt, ...) GB_PRINTF_ARGS(2); +GB_DEF isize gb_fprintf_va (gbFile *f, char const *fmt, va_list va); + +GB_DEF char *gb_bprintf (char const *fmt, ...) GB_PRINTF_ARGS(1); // NOTE(bill): A locally persisting buffer is used internally +GB_DEF char *gb_bprintf_va (char const *fmt, va_list va); // NOTE(bill): A locally persisting buffer is used internally +GB_DEF isize gb_snprintf (char *str, isize n, char const *fmt, ...) GB_PRINTF_ARGS(3); +GB_DEF isize gb_snprintf_va(char *str, isize n, char const *fmt, va_list va); + +//////////////////////////////////////////////////////////////// +// +// DLL Handling +// +// + +typedef void *gbDllHandle; +typedef void (*gbDllProc)(void); + +GB_DEF gbDllHandle gb_dll_load (char const *filepath); +GB_DEF void gb_dll_unload (gbDllHandle dll); +GB_DEF gbDllProc gb_dll_proc_address(gbDllHandle dll, char const *proc_name); + + +//////////////////////////////////////////////////////////////// +// +// Time +// +// + +GB_DEF u64 gb_rdtsc (void); +GB_DEF f64 gb_time_now (void); // NOTE(bill): This is only for relative time e.g. game loops +GB_DEF u64 gb_utc_time_now(void); // NOTE(bill): Number of microseconds since 1601-01-01 UTC +GB_DEF void gb_sleep_ms (u32 ms); + + +//////////////////////////////////////////////////////////////// +// +// Miscellany +// +// + +typedef struct gbRandom { + u32 offsets[8]; + u32 value; +} gbRandom; + +// NOTE(bill): Generates from numerous sources to produce a decent pseudo-random seed +GB_DEF void gb_random_init (gbRandom *r); +GB_DEF u32 gb_random_gen_u32 (gbRandom *r); +GB_DEF u32 gb_random_gen_u32_unique(gbRandom *r); +GB_DEF u64 gb_random_gen_u64 (gbRandom *r); // NOTE(bill): (gb_random_gen_u32() << 32) | gb_random_gen_u32() +GB_DEF isize gb_random_gen_isize (gbRandom *r); +GB_DEF i64 gb_random_range_i64 (gbRandom *r, i64 lower_inc, i64 higher_inc); +GB_DEF isize gb_random_range_isize (gbRandom *r, isize lower_inc, isize higher_inc); +GB_DEF f64 gb_random_range_f64 (gbRandom *r, f64 lower_inc, f64 higher_inc); + + + + +GB_DEF void gb_exit (u32 code); +GB_DEF void gb_yield (void); +GB_DEF void gb_set_env (char const *name, char const *value); +GB_DEF void gb_unset_env(char const *name); + +GB_DEF u16 gb_endian_swap16(u16 i); +GB_DEF u32 gb_endian_swap32(u32 i); +GB_DEF u64 gb_endian_swap64(u64 i); + +GB_DEF isize gb_count_set_bits(u64 mask); + +//////////////////////////////////////////////////////////////// +// +// Platform Stuff +// +// + +#if defined(GB_PLATFORM) + +// NOTE(bill): +// Coordiate system - +ve x - left to right +// - +ve y - bottom to top +// - Relative to window + +// TODO(bill): Proper documentation for this with code examples + +// Window Support - Complete +// OS X Support - Missing: +// * Sofware framebuffer +// * (show|hide) window +// * show_cursor +// * toggle (fullscreen|borderless) +// * set window position +// * Clipboard +// * GameControllers +// Linux Support - None +// Other OS Support - None + +#ifndef GB_MAX_GAME_CONTROLLER_COUNT +#define GB_MAX_GAME_CONTROLLER_COUNT 4 +#endif + +typedef enum gbKeyType { + gbKey_Unknown = 0, // Unhandled key + + // NOTE(bill): Allow the basic printable keys to be aliased with their chars + gbKey_0 = '0', + gbKey_1, + gbKey_2, + gbKey_3, + gbKey_4, + gbKey_5, + gbKey_6, + gbKey_7, + gbKey_8, + gbKey_9, + + gbKey_A = 'A', + gbKey_B, + gbKey_C, + gbKey_D, + gbKey_E, + gbKey_F, + gbKey_G, + gbKey_H, + gbKey_I, + gbKey_J, + gbKey_K, + gbKey_L, + gbKey_M, + gbKey_N, + gbKey_O, + gbKey_P, + gbKey_Q, + gbKey_R, + gbKey_S, + gbKey_T, + gbKey_U, + gbKey_V, + gbKey_W, + gbKey_X, + gbKey_Y, + gbKey_Z, + + gbKey_Lbracket = '[', + gbKey_Rbracket = ']', + gbKey_Semicolon = ';', + gbKey_Comma = ',', + gbKey_Period = '.', + gbKey_Quote = '\'', + gbKey_Slash = '/', + gbKey_Backslash = '\\', + gbKey_Grave = '`', + gbKey_Equals = '=', + gbKey_Minus = '-', + gbKey_Space = ' ', + + gbKey__Pad = 128, // NOTE(bill): make sure ASCII is reserved + + gbKey_Escape, // Escape + gbKey_Lcontrol, // Left Control + gbKey_Lshift, // Left Shift + gbKey_Lalt, // Left Alt + gbKey_Lsystem, // Left OS specific: window (Windows and Linux), apple/cmd (MacOS X), ... + gbKey_Rcontrol, // Right Control + gbKey_Rshift, // Right Shift + gbKey_Ralt, // Right Alt + gbKey_Rsystem, // Right OS specific: window (Windows and Linux), apple/cmd (MacOS X), ... + gbKey_Menu, // Menu + gbKey_Return, // Return + gbKey_Backspace, // Backspace + gbKey_Tab, // Tabulation + gbKey_Pageup, // Page up + gbKey_Pagedown, // Page down + gbKey_End, // End + gbKey_Home, // Home + gbKey_Insert, // Insert + gbKey_Delete, // Delete + gbKey_Plus, // + + gbKey_Subtract, // - + gbKey_Multiply, // * + gbKey_Divide, // / + gbKey_Left, // Left arrow + gbKey_Right, // Right arrow + gbKey_Up, // Up arrow + gbKey_Down, // Down arrow + gbKey_Numpad0, // Numpad 0 + gbKey_Numpad1, // Numpad 1 + gbKey_Numpad2, // Numpad 2 + gbKey_Numpad3, // Numpad 3 + gbKey_Numpad4, // Numpad 4 + gbKey_Numpad5, // Numpad 5 + gbKey_Numpad6, // Numpad 6 + gbKey_Numpad7, // Numpad 7 + gbKey_Numpad8, // Numpad 8 + gbKey_Numpad9, // Numpad 9 + gbKey_NumpadDot, // Numpad . + gbKey_NumpadEnter, // Numpad Enter + gbKey_F1, // F1 + gbKey_F2, // F2 + gbKey_F3, // F3 + gbKey_F4, // F4 + gbKey_F5, // F5 + gbKey_F6, // F6 + gbKey_F7, // F7 + gbKey_F8, // F8 + gbKey_F9, // F8 + gbKey_F10, // F10 + gbKey_F11, // F11 + gbKey_F12, // F12 + gbKey_F13, // F13 + gbKey_F14, // F14 + gbKey_F15, // F15 + gbKey_Pause, // Pause + + gbKey_Count, +} gbKeyType; + +/* TODO(bill): Change name? */ +typedef u8 gbKeyState; +typedef enum gbKeyStateFlag { + gbKeyState_Down = GB_BIT(0), + gbKeyState_Pressed = GB_BIT(1), + gbKeyState_Released = GB_BIT(2) +} gbKeyStateFlag; + +GB_DEF void gb_key_state_update(gbKeyState *s, b32 is_down); + +typedef enum gbMouseButtonType { + gbMouseButton_Left, + gbMouseButton_Middle, + gbMouseButton_Right, + gbMouseButton_X1, + gbMouseButton_X2, + + gbMouseButton_Count +} gbMouseButtonType; + +typedef enum gbControllerAxisType { + gbControllerAxis_LeftX, + gbControllerAxis_LeftY, + gbControllerAxis_RightX, + gbControllerAxis_RightY, + gbControllerAxis_LeftTrigger, + gbControllerAxis_RightTrigger, + + gbControllerAxis_Count +} gbControllerAxisType; + +typedef enum gbControllerButtonType { + gbControllerButton_Up, + gbControllerButton_Down, + gbControllerButton_Left, + gbControllerButton_Right, + gbControllerButton_A, + gbControllerButton_B, + gbControllerButton_X, + gbControllerButton_Y, + gbControllerButton_LeftShoulder, + gbControllerButton_RightShoulder, + gbControllerButton_Back, + gbControllerButton_Start, + gbControllerButton_LeftThumb, + gbControllerButton_RightThumb, + + gbControllerButton_Count +} gbControllerButtonType; + +typedef struct gbGameController { + b16 is_connected, is_analog; + + f32 axes[gbControllerAxis_Count]; + gbKeyState buttons[gbControllerButton_Count]; +} gbGameController; + +#if defined(GB_SYSTEM_WINDOWS) + typedef struct _XINPUT_GAMEPAD XINPUT_GAMEPAD; + typedef struct _XINPUT_STATE XINPUT_STATE; + typedef struct _XINPUT_VIBRATION XINPUT_VIBRATION; + + #define GB_XINPUT_GET_STATE(name) unsigned long __stdcall name(unsigned long dwUserIndex, XINPUT_STATE *pState) + typedef GB_XINPUT_GET_STATE(gbXInputGetStateProc); + + #define GB_XINPUT_SET_STATE(name) unsigned long __stdcall name(unsigned long dwUserIndex, XINPUT_VIBRATION *pVibration) + typedef GB_XINPUT_SET_STATE(gbXInputSetStateProc); +#endif + + +typedef enum gbWindowFlag { + gbWindow_Fullscreen = GB_BIT(0), + gbWindow_Hidden = GB_BIT(1), + gbWindow_Borderless = GB_BIT(2), + gbWindow_Resizable = GB_BIT(3), + gbWindow_Minimized = GB_BIT(4), + gbWindow_Maximized = GB_BIT(5), + gbWindow_FullscreenDesktop = gbWindow_Fullscreen | gbWindow_Borderless, +} gbWindowFlag; + +typedef enum gbRendererType { + gbRenderer_Opengl, + gbRenderer_Software, + + gbRenderer_Count, +} gbRendererType; + + + +#if defined(GB_SYSTEM_WINDOWS) && !defined(_WINDOWS_) +typedef struct tagBITMAPINFOHEADER { + unsigned long biSize; + long biWidth; + long biHeight; + u16 biPlanes; + u16 biBitCount; + unsigned long biCompression; + unsigned long biSizeImage; + long biXPelsPerMeter; + long biYPelsPerMeter; + unsigned long biClrUsed; + unsigned long biClrImportant; +} BITMAPINFOHEADER, *PBITMAPINFOHEADER; +typedef struct tagRGBQUAD { + u8 rgbBlue; + u8 rgbGreen; + u8 rgbRed; + u8 rgbReserved; +} RGBQUAD; +typedef struct tagBITMAPINFO { + BITMAPINFOHEADER bmiHeader; + RGBQUAD bmiColors[1]; +} BITMAPINFO, *PBITMAPINFO; +#endif + +typedef struct gbPlatform { + b32 is_initialized; + + void *window_handle; + i32 window_x, window_y; + i32 window_width, window_height; + u32 window_flags; + b16 window_is_closed, window_has_focus; + +#if defined(GB_SYSTEM_WINDOWS) + void *win32_dc; +#elif defined(GB_SYSTEM_OSX) + void *osx_autorelease_pool; // TODO(bill): Is this really needed? +#endif + + gbRendererType renderer_type; + union { + struct { + void * context; + i32 major; + i32 minor; + b16 core, compatible; + gbDllHandle dll_handle; + } opengl; + + // NOTE(bill): Software rendering + struct { +#if defined(GB_SYSTEM_WINDOWS) + BITMAPINFO win32_bmi; +#endif + void * memory; + isize memory_size; + i32 pitch; + i32 bits_per_pixel; + } sw_framebuffer; + }; + + gbKeyState keys[gbKey_Count]; + struct { + gbKeyState control; + gbKeyState alt; + gbKeyState shift; + } key_modifiers; + + Rune char_buffer[256]; + isize char_buffer_count; + + b32 mouse_clip; + i32 mouse_x, mouse_y; + i32 mouse_dx, mouse_dy; // NOTE(bill): Not raw mouse movement + i32 mouse_raw_dx, mouse_raw_dy; // NOTE(bill): Raw mouse movement + f32 mouse_wheel_delta; + gbKeyState mouse_buttons[gbMouseButton_Count]; + + gbGameController game_controllers[GB_MAX_GAME_CONTROLLER_COUNT]; + + f64 curr_time; + f64 dt_for_frame; + b32 quit_requested; + +#if defined(GB_SYSTEM_WINDOWS) + struct { + gbXInputGetStateProc *get_state; + gbXInputSetStateProc *set_state; + } xinput; +#endif +} gbPlatform; + + +typedef struct gbVideoMode { + i32 width, height; + i32 bits_per_pixel; +} gbVideoMode; + +GB_DEF gbVideoMode gb_video_mode (i32 width, i32 height, i32 bits_per_pixel); +GB_DEF b32 gb_video_mode_is_valid (gbVideoMode mode); +GB_DEF gbVideoMode gb_video_mode_get_desktop (void); +GB_DEF isize gb_video_mode_get_fullscreen_modes(gbVideoMode *modes, isize max_mode_count); // NOTE(bill): returns mode count +GB_DEF GB_COMPARE_PROC(gb_video_mode_cmp); // NOTE(bill): Sort smallest to largest (Ascending) +GB_DEF GB_COMPARE_PROC(gb_video_mode_dsc_cmp); // NOTE(bill): Sort largest to smallest (Descending) + + +// NOTE(bill): Software rendering +GB_DEF b32 gb_platform_init_with_software (gbPlatform *p, char const *window_title, i32 width, i32 height, u32 window_flags); +// NOTE(bill): OpenGL Rendering +GB_DEF b32 gb_platform_init_with_opengl (gbPlatform *p, char const *window_title, i32 width, i32 height, u32 window_flags, i32 major, i32 minor, b32 core, b32 compatible); +GB_DEF void gb_platform_update (gbPlatform *p); +GB_DEF void gb_platform_display (gbPlatform *p); +GB_DEF void gb_platform_destroy (gbPlatform *p); +GB_DEF void gb_platform_show_cursor (gbPlatform *p, b32 show); +GB_DEF void gb_platform_set_mouse_position (gbPlatform *p, i32 x, i32 y); +GB_DEF void gb_platform_set_controller_vibration (gbPlatform *p, isize index, f32 left_motor, f32 right_motor); +GB_DEF b32 gb_platform_has_clipboard_text (gbPlatform *p); +GB_DEF void gb_platform_set_clipboard_text (gbPlatform *p, char const *str); +GB_DEF char *gb_platform_get_clipboard_text (gbPlatform *p, gbAllocator a); +GB_DEF void gb_platform_set_window_position (gbPlatform *p, i32 x, i32 y); +GB_DEF void gb_platform_set_window_title (gbPlatform *p, char const *title, ...) GB_PRINTF_ARGS(2); +GB_DEF void gb_platform_toggle_fullscreen (gbPlatform *p, b32 fullscreen_desktop); +GB_DEF void gb_platform_toggle_borderless (gbPlatform *p); +GB_DEF void gb_platform_make_opengl_context_current(gbPlatform *p); +GB_DEF void gb_platform_show_window (gbPlatform *p); +GB_DEF void gb_platform_hide_window (gbPlatform *p); + + +#endif // GB_PLATFORM + +#if defined(__cplusplus) +} +#endif + +#endif // GB_INCLUDE_GB_H + + + + + + +//////////////////////////////////////////////////////////////// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// Implementation +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// It's turtles all the way down! +//////////////////////////////////////////////////////////////// +#if defined(GB_IMPLEMENTATION) && !defined(GB_IMPLEMENTATION_DONE) +#define GB_IMPLEMENTATION_DONE + +#if defined(__cplusplus) +extern "C" { +#endif + + +#if defined(GB_COMPILER_MSVC) && !defined(_WINDOWS_) + //////////////////////////////////////////////////////////////// + // + // Bill's Mini Windows.h + // + // + + #define WINAPI __stdcall + #define WINAPIV __cdecl + #define CALLBACK __stdcall + #define MAX_PATH 260 + #define CCHDEVICENAME 32 + #define CCHFORMNAME 32 + + typedef unsigned long DWORD; + typedef int WINBOOL; + #ifndef XFree86Server + #ifndef __OBJC__ + typedef WINBOOL BOOL; + #else + #define BOOL WINBOOL + #endif + typedef unsigned char BYTE; + #endif + typedef unsigned short WORD; + typedef float FLOAT; + typedef int INT; + typedef unsigned int UINT; + typedef short SHORT; + typedef long LONG; + typedef long long LONGLONG; + typedef unsigned short USHORT; + typedef unsigned long ULONG; + typedef unsigned long long ULONGLONG; + + typedef UINT WPARAM; + typedef LONG LPARAM; + typedef LONG LRESULT; + #ifndef _HRESULT_DEFINED + typedef LONG HRESULT; + #define _HRESULT_DEFINED + #endif + #ifndef XFree86Server + typedef WORD ATOM; + #endif /* XFree86Server */ + typedef void *HANDLE; + typedef HANDLE HGLOBAL; + typedef HANDLE HLOCAL; + typedef HANDLE GLOBALHANDLE; + typedef HANDLE LOCALHANDLE; + typedef void *HGDIOBJ; + + #define DECLARE_HANDLE(name) typedef HANDLE name + DECLARE_HANDLE(HACCEL); + DECLARE_HANDLE(HBITMAP); + DECLARE_HANDLE(HBRUSH); + DECLARE_HANDLE(HCOLORSPACE); + DECLARE_HANDLE(HDC); + DECLARE_HANDLE(HGLRC); + DECLARE_HANDLE(HDESK); + DECLARE_HANDLE(HENHMETAFILE); + DECLARE_HANDLE(HFONT); + DECLARE_HANDLE(HICON); + DECLARE_HANDLE(HKEY); + typedef HKEY *PHKEY; + DECLARE_HANDLE(HMENU); + DECLARE_HANDLE(HMETAFILE); + DECLARE_HANDLE(HINSTANCE); + typedef HINSTANCE HMODULE; + DECLARE_HANDLE(HPALETTE); + DECLARE_HANDLE(HPEN); + DECLARE_HANDLE(HRGN); + DECLARE_HANDLE(HRSRC); + DECLARE_HANDLE(HSTR); + DECLARE_HANDLE(HTASK); + DECLARE_HANDLE(HWND); + DECLARE_HANDLE(HWINSTA); + DECLARE_HANDLE(HKL); + DECLARE_HANDLE(HRAWINPUT); + DECLARE_HANDLE(HMONITOR); + #undef DECLARE_HANDLE + + typedef int HFILE; + typedef HICON HCURSOR; + typedef DWORD COLORREF; + typedef int (WINAPI *FARPROC)(); + typedef int (WINAPI *NEARPROC)(); + typedef int (WINAPI *PROC)(); + typedef LRESULT (CALLBACK *WNDPROC)(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam); + + #if defined(_WIN64) + typedef unsigned __int64 ULONG_PTR; + typedef signed __int64 LONG_PTR; + #else + typedef unsigned long ULONG_PTR; + typedef signed long LONG_PTR; + #endif + typedef ULONG_PTR DWORD_PTR; + + typedef struct tagRECT { + LONG left; + LONG top; + LONG right; + LONG bottom; + } RECT; + typedef struct tagRECTL { + LONG left; + LONG top; + LONG right; + LONG bottom; + } RECTL; + typedef struct tagPOINT { + LONG x; + LONG y; + } POINT; + typedef struct tagSIZE { + LONG cx; + LONG cy; + } SIZE; + typedef struct tagPOINTS { + SHORT x; + SHORT y; + } POINTS; + typedef struct _SECURITY_ATTRIBUTES { + DWORD nLength; + HANDLE lpSecurityDescriptor; + BOOL bInheritHandle; + } SECURITY_ATTRIBUTES; + typedef enum _LOGICAL_PROCESSOR_RELATIONSHIP { + RelationProcessorCore, + RelationNumaNode, + RelationCache, + RelationProcessorPackage, + RelationGroup, + RelationAll = 0xffff + } LOGICAL_PROCESSOR_RELATIONSHIP; + typedef enum _PROCESSOR_CACHE_TYPE { + CacheUnified, + CacheInstruction, + CacheData, + CacheTrace + } PROCESSOR_CACHE_TYPE; + typedef struct _CACHE_DESCRIPTOR { + BYTE Level; + BYTE Associativity; + WORD LineSize; + DWORD Size; + PROCESSOR_CACHE_TYPE Type; + } CACHE_DESCRIPTOR; + typedef struct _SYSTEM_LOGICAL_PROCESSOR_INFORMATION { + ULONG_PTR ProcessorMask; + LOGICAL_PROCESSOR_RELATIONSHIP Relationship; + union { + struct { + BYTE Flags; + } ProcessorCore; + struct { + DWORD NodeNumber; + } NumaNode; + CACHE_DESCRIPTOR Cache; + ULONGLONG Reserved[2]; + }; + } SYSTEM_LOGICAL_PROCESSOR_INFORMATION; + typedef struct _MEMORY_BASIC_INFORMATION { + void *BaseAddress; + void *AllocationBase; + DWORD AllocationProtect; + usize RegionSize; + DWORD State; + DWORD Protect; + DWORD Type; + } MEMORY_BASIC_INFORMATION; + typedef struct _SYSTEM_INFO { + union { + DWORD dwOemId; + struct { + WORD wProcessorArchitecture; + WORD wReserved; + }; + }; + DWORD dwPageSize; + void * lpMinimumApplicationAddress; + void * lpMaximumApplicationAddress; + DWORD_PTR dwActiveProcessorMask; + DWORD dwNumberOfProcessors; + DWORD dwProcessorType; + DWORD dwAllocationGranularity; + WORD wProcessorLevel; + WORD wProcessorRevision; + } SYSTEM_INFO; + typedef union _LARGE_INTEGER { + struct { + DWORD LowPart; + LONG HighPart; + }; + struct { + DWORD LowPart; + LONG HighPart; + } u; + LONGLONG QuadPart; + } LARGE_INTEGER; + typedef union _ULARGE_INTEGER { + struct { + DWORD LowPart; + DWORD HighPart; + }; + struct { + DWORD LowPart; + DWORD HighPart; + } u; + ULONGLONG QuadPart; + } ULARGE_INTEGER; + + typedef struct _OVERLAPPED { + ULONG_PTR Internal; + ULONG_PTR InternalHigh; + union { + struct { + DWORD Offset; + DWORD OffsetHigh; + }; + void *Pointer; + }; + HANDLE hEvent; + } OVERLAPPED; + typedef struct _FILETIME { + DWORD dwLowDateTime; + DWORD dwHighDateTime; + } FILETIME; + typedef struct _WIN32_FIND_DATAW { + DWORD dwFileAttributes; + FILETIME ftCreationTime; + FILETIME ftLastAccessTime; + FILETIME ftLastWriteTime; + DWORD nFileSizeHigh; + DWORD nFileSizeLow; + DWORD dwReserved0; + DWORD dwReserved1; + wchar_t cFileName[MAX_PATH]; + wchar_t cAlternateFileName[14]; + } WIN32_FIND_DATAW; + typedef struct _WIN32_FILE_ATTRIBUTE_DATA { + DWORD dwFileAttributes; + FILETIME ftCreationTime; + FILETIME ftLastAccessTime; + FILETIME ftLastWriteTime; + DWORD nFileSizeHigh; + DWORD nFileSizeLow; + } WIN32_FILE_ATTRIBUTE_DATA; + typedef enum _GET_FILEEX_INFO_LEVELS { + GetFileExInfoStandard, + GetFileExMaxInfoLevel + } GET_FILEEX_INFO_LEVELS; + typedef struct tagRAWINPUTHEADER { + DWORD dwType; + DWORD dwSize; + HANDLE hDevice; + WPARAM wParam; + } RAWINPUTHEADER; + typedef struct tagRAWINPUTDEVICE { + USHORT usUsagePage; + USHORT usUsage; + DWORD dwFlags; + HWND hwndTarget; + } RAWINPUTDEVICE; + typedef struct tagRAWMOUSE { + WORD usFlags; + union { + ULONG ulButtons; + struct { + WORD usButtonFlags; + WORD usButtonData; + }; + }; + ULONG ulRawButtons; + LONG lLastX; + LONG lLastY; + ULONG ulExtraInformation; + } RAWMOUSE; + typedef struct tagRAWKEYBOARD { + WORD MakeCode; + WORD Flags; + WORD Reserved; + WORD VKey; + UINT Message; + ULONG ExtraInformation; + } RAWKEYBOARD; + typedef struct tagRAWHID { + DWORD dwSizeHid; + DWORD dwCount; + BYTE bRawData[1]; + } RAWHID; + typedef struct tagRAWINPUT { + RAWINPUTHEADER header; + union { + RAWMOUSE mouse; + RAWKEYBOARD keyboard; + RAWHID hid; + } data; + } RAWINPUT; + typedef struct tagWNDCLASSEXW { + UINT cbSize; + UINT style; + WNDPROC lpfnWndProc; + INT cbClsExtra; + INT cbWndExtra; + HINSTANCE hInstance; + HICON hIcon; + HCURSOR hCursor; + HANDLE hbrBackground; + wchar_t const *lpszMenuName; + wchar_t const *lpszClassName; + HICON hIconSm; + } WNDCLASSEXW; + typedef struct _POINTL { + LONG x; + LONG y; + } POINTL; + typedef struct _devicemodew { + wchar_t dmDeviceName[CCHDEVICENAME]; + WORD dmSpecVersion; + WORD dmDriverVersion; + WORD dmSize; + WORD dmDriverExtra; + DWORD dmFields; + union { + struct { + short dmOrientation; + short dmPaperSize; + short dmPaperLength; + short dmPaperWidth; + short dmScale; + short dmCopies; + short dmDefaultSource; + short dmPrintQuality; + }; + struct { + POINTL dmPosition; + DWORD dmDisplayOrientation; + DWORD dmDisplayFixedOutput; + }; + }; + short dmColor; + short dmDuplex; + short dmYResolution; + short dmTTOption; + short dmCollate; + wchar_t dmFormName[CCHFORMNAME]; + WORD dmLogPixels; + DWORD dmBitsPerPel; + DWORD dmPelsWidth; + DWORD dmPelsHeight; + union { + DWORD dmDisplayFlags; + DWORD dmNup; + }; + DWORD dmDisplayFrequency; + #if (WINVER >= 0x0400) + DWORD dmICMMethod; + DWORD dmICMIntent; + DWORD dmMediaType; + DWORD dmDitherType; + DWORD dmReserved1; + DWORD dmReserved2; + #if (WINVER >= 0x0500) || (_WIN32_WINNT >= 0x0400) + DWORD dmPanningWidth; + DWORD dmPanningHeight; + #endif + #endif + } DEVMODEW; + typedef struct tagPIXELFORMATDESCRIPTOR { + WORD nSize; + WORD nVersion; + DWORD dwFlags; + BYTE iPixelType; + BYTE cColorBits; + BYTE cRedBits; + BYTE cRedShift; + BYTE cGreenBits; + BYTE cGreenShift; + BYTE cBlueBits; + BYTE cBlueShift; + BYTE cAlphaBits; + BYTE cAlphaShift; + BYTE cAccumBits; + BYTE cAccumRedBits; + BYTE cAccumGreenBits; + BYTE cAccumBlueBits; + BYTE cAccumAlphaBits; + BYTE cDepthBits; + BYTE cStencilBits; + BYTE cAuxBuffers; + BYTE iLayerType; + BYTE bReserved; + DWORD dwLayerMask; + DWORD dwVisibleMask; + DWORD dwDamageMask; + } PIXELFORMATDESCRIPTOR; + typedef struct tagMSG { // msg + HWND hwnd; + UINT message; + WPARAM wParam; + LPARAM lParam; + DWORD time; + POINT pt; + } MSG; + typedef struct tagWINDOWPLACEMENT { + UINT length; + UINT flags; + UINT showCmd; + POINT ptMinPosition; + POINT ptMaxPosition; + RECT rcNormalPosition; + } WINDOWPLACEMENT; + typedef struct tagMONITORINFO { + DWORD cbSize; + RECT rcMonitor; + RECT rcWork; + DWORD dwFlags; + } MONITORINFO; + + #define INFINITE 0xffffffffl + #define INVALID_HANDLE_VALUE ((void *)(intptr)(-1)) + + + typedef DWORD WINAPI THREAD_START_ROUTINE(void *parameter); + + GB_DLL_IMPORT DWORD WINAPI GetLastError (void); + GB_DLL_IMPORT BOOL WINAPI CloseHandle (HANDLE object); + GB_DLL_IMPORT HANDLE WINAPI CreateSemaphoreA (SECURITY_ATTRIBUTES *semaphore_attributes, LONG initial_count, + LONG maximum_count, char const *name); + GB_DLL_IMPORT BOOL WINAPI ReleaseSemaphore (HANDLE semaphore, LONG release_count, LONG *previous_count); + GB_DLL_IMPORT DWORD WINAPI WaitForSingleObject(HANDLE handle, DWORD milliseconds); + GB_DLL_IMPORT HANDLE WINAPI CreateThread (SECURITY_ATTRIBUTES *semaphore_attributes, usize stack_size, + THREAD_START_ROUTINE *start_address, void *parameter, + DWORD creation_flags, DWORD *thread_id); + GB_DLL_IMPORT DWORD WINAPI GetThreadId (HANDLE handle); + GB_DLL_IMPORT void WINAPI RaiseException (DWORD, DWORD, DWORD, ULONG_PTR const *); + + + GB_DLL_IMPORT BOOL WINAPI GetLogicalProcessorInformation(SYSTEM_LOGICAL_PROCESSOR_INFORMATION *buffer, DWORD *return_length); + GB_DLL_IMPORT DWORD_PTR WINAPI SetThreadAffinityMask(HANDLE thread, DWORD_PTR check_mask); + GB_DLL_IMPORT HANDLE WINAPI GetCurrentThread(void); + + #define PAGE_NOACCESS 0x01 + #define PAGE_READONLY 0x02 + #define PAGE_READWRITE 0x04 + #define PAGE_WRITECOPY 0x08 + #define PAGE_EXECUTE 0x10 + #define PAGE_EXECUTE_READ 0x20 + #define PAGE_EXECUTE_READWRITE 0x40 + #define PAGE_EXECUTE_WRITECOPY 0x80 + #define PAGE_GUARD 0x100 + #define PAGE_NOCACHE 0x200 + #define PAGE_WRITECOMBINE 0x400 + + #define MEM_COMMIT 0x1000 + #define MEM_RESERVE 0x2000 + #define MEM_DECOMMIT 0x4000 + #define MEM_RELEASE 0x8000 + #define MEM_FREE 0x10000 + #define MEM_PRIVATE 0x20000 + #define MEM_MAPPED 0x40000 + #define MEM_RESET 0x80000 + #define MEM_TOP_DOWN 0x100000 + #define MEM_LARGE_PAGES 0x20000000 + #define MEM_4MB_PAGES 0x80000000 + + + + + GB_DLL_IMPORT void * WINAPI VirtualAlloc (void *addr, usize size, DWORD allocation_type, DWORD protect); + GB_DLL_IMPORT usize WINAPI VirtualQuery (void const *address, MEMORY_BASIC_INFORMATION *buffer, usize length); + GB_DLL_IMPORT BOOL WINAPI VirtualFree (void *address, usize size, DWORD free_type); + GB_DLL_IMPORT void WINAPI GetSystemInfo(SYSTEM_INFO *system_info); + + + #ifndef VK_UNKNOWN + #define VK_UNKNOWN 0 + #define VK_LBUTTON 0x01 + #define VK_RBUTTON 0x02 + #define VK_CANCEL 0x03 + #define VK_MBUTTON 0x04 + #define VK_XBUTTON1 0x05 + #define VK_XBUTTON2 0x06 + #define VK_BACK 0x08 + #define VK_TAB 0x09 + #define VK_CLEAR 0x0C + #define VK_RETURN 0x0D + #define VK_SHIFT 0x10 + #define VK_CONTROL 0x11 // CTRL key + #define VK_MENU 0x12 // ALT key + #define VK_PAUSE 0x13 // PAUSE key + #define VK_CAPITAL 0x14 // CAPS LOCK key + #define VK_KANA 0x15 // Input Method Editor (IME) Kana mode + #define VK_HANGUL 0x15 // IME Hangul mode + #define VK_JUNJA 0x17 // IME Junja mode + #define VK_FINAL 0x18 // IME final mode + #define VK_HANJA 0x19 // IME Hanja mode + #define VK_KANJI 0x19 // IME Kanji mode + #define VK_ESCAPE 0x1B // ESC key + #define VK_CONVERT 0x1C // IME convert + #define VK_NONCONVERT 0x1D // IME nonconvert + #define VK_ACCEPT 0x1E // IME accept + #define VK_MODECHANGE 0x1F // IME mode change request + #define VK_SPACE 0x20 // SPACE key + #define VK_PRIOR 0x21 // PAGE UP key + #define VK_NEXT 0x22 // PAGE DOWN key + #define VK_END 0x23 // END key + #define VK_HOME 0x24 // HOME key + #define VK_LEFT 0x25 // LEFT ARROW key + #define VK_UP 0x26 // UP ARROW key + #define VK_RIGHT 0x27 // RIGHT ARROW key + #define VK_DOWN 0x28 // DOWN ARROW key + #define VK_SELECT 0x29 // SELECT key + #define VK_PRINT 0x2A // PRINT key + #define VK_EXECUTE 0x2B // EXECUTE key + #define VK_SNAPSHOT 0x2C // PRINT SCREEN key + #define VK_INSERT 0x2D // INS key + #define VK_DELETE 0x2E // DEL key + #define VK_HELP 0x2F // HELP key + #define VK_0 0x30 + #define VK_1 0x31 + #define VK_2 0x32 + #define VK_3 0x33 + #define VK_4 0x34 + #define VK_5 0x35 + #define VK_6 0x36 + #define VK_7 0x37 + #define VK_8 0x38 + #define VK_9 0x39 + #define VK_A 0x41 + #define VK_B 0x42 + #define VK_C 0x43 + #define VK_D 0x44 + #define VK_E 0x45 + #define VK_F 0x46 + #define VK_G 0x47 + #define VK_H 0x48 + #define VK_I 0x49 + #define VK_J 0x4A + #define VK_K 0x4B + #define VK_L 0x4C + #define VK_M 0x4D + #define VK_N 0x4E + #define VK_O 0x4F + #define VK_P 0x50 + #define VK_Q 0x51 + #define VK_R 0x52 + #define VK_S 0x53 + #define VK_T 0x54 + #define VK_U 0x55 + #define VK_V 0x56 + #define VK_W 0x57 + #define VK_X 0x58 + #define VK_Y 0x59 + #define VK_Z 0x5A + #define VK_LWIN 0x5B // Left Windows key (Microsoft Natural keyboard) + #define VK_RWIN 0x5C // Right Windows key (Natural keyboard) + #define VK_APPS 0x5D // Applications key (Natural keyboard) + #define VK_SLEEP 0x5F // Computer Sleep key + // Num pad keys + #define VK_NUMPAD0 0x60 + #define VK_NUMPAD1 0x61 + #define VK_NUMPAD2 0x62 + #define VK_NUMPAD3 0x63 + #define VK_NUMPAD4 0x64 + #define VK_NUMPAD5 0x65 + #define VK_NUMPAD6 0x66 + #define VK_NUMPAD7 0x67 + #define VK_NUMPAD8 0x68 + #define VK_NUMPAD9 0x69 + #define VK_MULTIPLY 0x6A + #define VK_ADD 0x6B + #define VK_SEPARATOR 0x6C + #define VK_SUBTRACT 0x6D + #define VK_DECIMAL 0x6E + #define VK_DIVIDE 0x6F + #define VK_F1 0x70 + #define VK_F2 0x71 + #define VK_F3 0x72 + #define VK_F4 0x73 + #define VK_F5 0x74 + #define VK_F6 0x75 + #define VK_F7 0x76 + #define VK_F8 0x77 + #define VK_F9 0x78 + #define VK_F10 0x79 + #define VK_F11 0x7A + #define VK_F12 0x7B + #define VK_F13 0x7C + #define VK_F14 0x7D + #define VK_F15 0x7E + #define VK_F16 0x7F + #define VK_F17 0x80 + #define VK_F18 0x81 + #define VK_F19 0x82 + #define VK_F20 0x83 + #define VK_F21 0x84 + #define VK_F22 0x85 + #define VK_F23 0x86 + #define VK_F24 0x87 + #define VK_NUMLOCK 0x90 + #define VK_SCROLL 0x91 + #define VK_LSHIFT 0xA0 + #define VK_RSHIFT 0xA1 + #define VK_LCONTROL 0xA2 + #define VK_RCONTROL 0xA3 + #define VK_LMENU 0xA4 + #define VK_RMENU 0xA5 + #define VK_BROWSER_BACK 0xA6 // Windows 2000/XP: Browser Back key + #define VK_BROWSER_FORWARD 0xA7 // Windows 2000/XP: Browser Forward key + #define VK_BROWSER_REFRESH 0xA8 // Windows 2000/XP: Browser Refresh key + #define VK_BROWSER_STOP 0xA9 // Windows 2000/XP: Browser Stop key + #define VK_BROWSER_SEARCH 0xAA // Windows 2000/XP: Browser Search key + #define VK_BROWSER_FAVORITES 0xAB // Windows 2000/XP: Browser Favorites key + #define VK_BROWSER_HOME 0xAC // Windows 2000/XP: Browser Start and Home key + #define VK_VOLUME_MUTE 0xAD // Windows 2000/XP: Volume Mute key + #define VK_VOLUME_DOWN 0xAE // Windows 2000/XP: Volume Down key + #define VK_VOLUME_UP 0xAF // Windows 2000/XP: Volume Up key + #define VK_MEDIA_NEXT_TRACK 0xB0 // Windows 2000/XP: Next Track key + #define VK_MEDIA_PREV_TRACK 0xB1 // Windows 2000/XP: Previous Track key + #define VK_MEDIA_STOP 0xB2 // Windows 2000/XP: Stop Media key + #define VK_MEDIA_PLAY_PAUSE 0xB3 // Windows 2000/XP: Play/Pause Media key + #define VK_MEDIA_LAUNCH_MAIL 0xB4 // Windows 2000/XP: Start Mail key + #define VK_MEDIA_LAUNCH_MEDIA_SELECT 0xB5 // Windows 2000/XP: Select Media key + #define VK_MEDIA_LAUNCH_APP1 0xB6 // VK_LAUNCH_APP1 (B6) Windows 2000/XP: Start Application 1 key + #define VK_MEDIA_LAUNCH_APP2 0xB7 // VK_LAUNCH_APP2 (B7) Windows 2000/XP: Start Application 2 key + #define VK_OEM_1 0xBA + #define VK_OEM_PLUS 0xBB + #define VK_OEM_COMMA 0xBC + #define VK_OEM_MINUS 0xBD + #define VK_OEM_PERIOD 0xBE + #define VK_OEM_2 0xBF + #define VK_OEM_3 0xC0 + #define VK_OEM_4 0xDB + #define VK_OEM_5 0xDC + #define VK_OEM_6 0xDD + #define VK_OEM_7 0xDE + #define VK_OEM_8 0xDF + #define VK_OEM_102 0xE2 + #define VK_PROCESSKEY 0xE5 + #define VK_PACKET 0xE7 + #define VK_ATTN 0xF6 // Attn key + #define VK_CRSEL 0xF7 // CrSel key + #define VK_EXSEL 0xF8 // ExSel key + #define VK_EREOF 0xF9 // Erase EOF key + #define VK_PLAY 0xFA // Play key + #define VK_ZOOM 0xFB // Zoom key + #define VK_NONAME 0xFC // Reserved for future use + #define VK_PA1 0xFD // VK_PA1 (FD) PA1 key + #define VK_OEM_CLEAR 0xFE // Clear key + #endif // VK_UNKNOWN + + + + #define GENERIC_READ 0x80000000 + #define GENERIC_WRITE 0x40000000 + #define GENERIC_EXECUTE 0x20000000 + #define GENERIC_ALL 0x10000000 + #define FILE_SHARE_READ 0x00000001 + #define FILE_SHARE_WRITE 0x00000002 + #define FILE_SHARE_DELETE 0x00000004 + #define CREATE_NEW 1 + #define CREATE_ALWAYS 2 + #define OPEN_EXISTING 3 + #define OPEN_ALWAYS 4 + #define TRUNCATE_EXISTING 5 + #define FILE_ATTRIBUTE_READONLY 0x00000001 + #define FILE_ATTRIBUTE_NORMAL 0x00000080 + #define FILE_ATTRIBUTE_TEMPORARY 0x00000100 + #define ERROR_FILE_NOT_FOUND 2l + #define ERROR_ACCESS_DENIED 5L + #define ERROR_NO_MORE_FILES 18l + #define ERROR_FILE_EXISTS 80l + #define ERROR_ALREADY_EXISTS 183l + #define STD_INPUT_HANDLE ((DWORD)-10) + #define STD_OUTPUT_HANDLE ((DWORD)-11) + #define STD_ERROR_HANDLE ((DWORD)-12) + + GB_DLL_IMPORT int MultiByteToWideChar(UINT code_page, DWORD flags, char const * multi_byte_str, int multi_byte_len, wchar_t const *wide_char_str, int wide_char_len); + GB_DLL_IMPORT int WideCharToMultiByte(UINT code_page, DWORD flags, wchar_t const *wide_char_str, int wide_char_len, char const * multi_byte_str, int multi_byte_len); + GB_DLL_IMPORT BOOL WINAPI SetFilePointerEx(HANDLE file, LARGE_INTEGER distance_to_move, + LARGE_INTEGER *new_file_pointer, DWORD move_method); + GB_DLL_IMPORT BOOL WINAPI ReadFile (HANDLE file, void *buffer, DWORD bytes_to_read, DWORD *bytes_read, OVERLAPPED *overlapped); + GB_DLL_IMPORT BOOL WINAPI WriteFile (HANDLE file, void const *buffer, DWORD bytes_to_write, DWORD *bytes_written, OVERLAPPED *overlapped); + GB_DLL_IMPORT HANDLE WINAPI CreateFileW (wchar_t const *path, DWORD desired_access, DWORD share_mode, + SECURITY_ATTRIBUTES *, DWORD creation_disposition, + DWORD flags_and_attributes, HANDLE template_file); + GB_DLL_IMPORT HANDLE WINAPI GetStdHandle (DWORD std_handle); + GB_DLL_IMPORT BOOL WINAPI GetFileSizeEx (HANDLE file, LARGE_INTEGER *size); + GB_DLL_IMPORT BOOL WINAPI SetEndOfFile (HANDLE file); + GB_DLL_IMPORT HANDLE WINAPI FindFirstFileW (wchar_t const *path, WIN32_FIND_DATAW *data); + GB_DLL_IMPORT BOOL WINAPI FindClose (HANDLE find_file); + GB_DLL_IMPORT BOOL WINAPI GetFileAttributesExW(wchar_t const *path, GET_FILEEX_INFO_LEVELS info_level_id, WIN32_FILE_ATTRIBUTE_DATA *data); + GB_DLL_IMPORT BOOL WINAPI CopyFileW(wchar_t const *old_f, wchar_t const *new_f, BOOL fail_if_exists); + GB_DLL_IMPORT BOOL WINAPI MoveFileW(wchar_t const *old_f, wchar_t const *new_f); + + GB_DLL_IMPORT HMODULE WINAPI LoadLibraryA (char const *filename); + GB_DLL_IMPORT BOOL WINAPI FreeLibrary (HMODULE module); + GB_DLL_IMPORT FARPROC WINAPI GetProcAddress(HMODULE module, char const *name); + + GB_DLL_IMPORT BOOL WINAPI QueryPerformanceFrequency(LARGE_INTEGER *frequency); + GB_DLL_IMPORT BOOL WINAPI QueryPerformanceCounter (LARGE_INTEGER *counter); + GB_DLL_IMPORT void WINAPI GetSystemTimeAsFileTime (FILETIME *system_time_as_file_time); + GB_DLL_IMPORT void WINAPI Sleep(DWORD milliseconds); + GB_DLL_IMPORT void WINAPI ExitProcess(UINT exit_code); + + GB_DLL_IMPORT BOOL WINAPI SetEnvironmentVariableA(char const *name, char const *value); + + + #define WM_NULL 0x0000 + #define WM_CREATE 0x0001 + #define WM_DESTROY 0x0002 + #define WM_MOVE 0x0003 + #define WM_SIZE 0x0005 + #define WM_ACTIVATE 0x0006 + #define WM_SETFOCUS 0x0007 + #define WM_KILLFOCUS 0x0008 + #define WM_ENABLE 0x000A + #define WM_SETREDRAW 0x000B + #define WM_SETTEXT 0x000C + #define WM_GETTEXT 0x000D + #define WM_GETTEXTLENGTH 0x000E + #define WM_PAINT 0x000F + #define WM_CLOSE 0x0010 + #define WM_QUERYENDSESSION 0x0011 + #define WM_QUERYOPEN 0x0013 + #define WM_ENDSESSION 0x0016 + #define WM_QUIT 0x0012 + #define WM_ERASEBKGND 0x0014 + #define WM_SYSCOLORCHANGE 0x0015 + #define WM_SHOWWINDOW 0x0018 + #define WM_WININICHANGE 0x001A + #define WM_SETTINGCHANGE WM_WININICHANGE + #define WM_DEVMODECHANGE 0x001B + #define WM_ACTIVATEAPP 0x001C + #define WM_FONTCHANGE 0x001D + #define WM_TIMECHANGE 0x001E + #define WM_CANCELMODE 0x001F + #define WM_SETCURSOR 0x0020 + #define WM_MOUSEACTIVATE 0x0021 + #define WM_CHILDACTIVATE 0x0022 + #define WM_QUEUESYNC 0x0023 + #define WM_GETMINMAXINFO 0x0024 + #define WM_PAINTICON 0x0026 + #define WM_ICONERASEBKGND 0x0027 + #define WM_NEXTDLGCTL 0x0028 + #define WM_SPOOLERSTATUS 0x002A + #define WM_DRAWITEM 0x002B + #define WM_MEASUREITEM 0x002C + #define WM_DELETEITEM 0x002D + #define WM_VKEYTOITEM 0x002E + #define WM_CHARTOITEM 0x002F + #define WM_SETFONT 0x0030 + #define WM_GETFONT 0x0031 + #define WM_SETHOTKEY 0x0032 + #define WM_GETHOTKEY 0x0033 + #define WM_QUERYDRAGICON 0x0037 + #define WM_COMPAREITEM 0x0039 + #define WM_GETOBJECT 0x003D + #define WM_COMPACTING 0x0041 + #define WM_COMMNOTIFY 0x0044 /* no longer suported */ + #define WM_WINDOWPOSCHANGING 0x0046 + #define WM_WINDOWPOSCHANGED 0x0047 + #define WM_POWER 0x0048 + #define WM_COPYDATA 0x004A + #define WM_CANCELJOURNAL 0x004B + #define WM_NOTIFY 0x004E + #define WM_INPUTLANGCHANGEREQUEST 0x0050 + #define WM_INPUTLANGCHANGE 0x0051 + #define WM_TCARD 0x0052 + #define WM_HELP 0x0053 + #define WM_USERCHANGED 0x0054 + #define WM_NOTIFYFORMAT 0x0055 + #define WM_CONTEXTMENU 0x007B + #define WM_STYLECHANGING 0x007C + #define WM_STYLECHANGED 0x007D + #define WM_DISPLAYCHANGE 0x007E + #define WM_GETICON 0x007F + #define WM_SETICON 0x0080 + #define WM_INPUT 0x00FF + #define WM_KEYFIRST 0x0100 + #define WM_KEYDOWN 0x0100 + #define WM_KEYUP 0x0101 + #define WM_CHAR 0x0102 + #define WM_DEADCHAR 0x0103 + #define WM_SYSKEYDOWN 0x0104 + #define WM_SYSKEYUP 0x0105 + #define WM_SYSCHAR 0x0106 + #define WM_SYSDEADCHAR 0x0107 + #define WM_UNICHAR 0x0109 + #define WM_KEYLAST 0x0109 + #define WM_APP 0x8000 + + + #define RID_INPUT 0x10000003 + + #define RIM_TYPEMOUSE 0x00000000 + #define RIM_TYPEKEYBOARD 0x00000001 + #define RIM_TYPEHID 0x00000002 + + #define RI_KEY_MAKE 0x0000 + #define RI_KEY_BREAK 0x0001 + #define RI_KEY_E0 0x0002 + #define RI_KEY_E1 0x0004 + #define RI_MOUSE_WHEEL 0x0400 + + #define RIDEV_NOLEGACY 0x00000030 + + #define MAPVK_VK_TO_VSC 0 + #define MAPVK_VSC_TO_VK 1 + #define MAPVK_VK_TO_CHAR 2 + #define MAPVK_VSC_TO_VK_EX 3 + + GB_DLL_IMPORT BOOL WINAPI RegisterRawInputDevices(RAWINPUTDEVICE const *raw_input_devices, UINT num_devices, UINT size); + GB_DLL_IMPORT UINT WINAPI GetRawInputData(HRAWINPUT raw_input, UINT ui_command, void *data, UINT *size, UINT size_header); + GB_DLL_IMPORT UINT WINAPI MapVirtualKeyW(UINT code, UINT map_type); + + + #define CS_DBLCLKS 0x0008 + #define CS_VREDRAW 0x0001 + #define CS_HREDRAW 0x0002 + + #define MB_OK 0x0000l + #define MB_ICONSTOP 0x0010l + #define MB_YESNO 0x0004l + #define MB_HELP 0x4000l + #define MB_ICONEXCLAMATION 0x0030l + + GB_DLL_IMPORT LRESULT WINAPI DefWindowProcW(HWND wnd, UINT msg, WPARAM wParam, LPARAM lParam); + GB_DLL_IMPORT HGDIOBJ WINAPI GetStockObject(int object); + GB_DLL_IMPORT HMODULE WINAPI GetModuleHandleW(wchar_t const *); + GB_DLL_IMPORT ATOM WINAPI RegisterClassExW(WNDCLASSEXW const *wcx); // u16 == ATOM + GB_DLL_IMPORT int WINAPI MessageBoxW(void *wnd, wchar_t const *text, wchar_t const *caption, unsigned int type); + + + #define DM_BITSPERPEL 0x00040000l + #define DM_PELSWIDTH 0x00080000l + #define DM_PELSHEIGHT 0x00100000l + + #define CDS_FULLSCREEN 0x4 + #define DISP_CHANGE_SUCCESSFUL 0 + #define IDYES 6 + + #define WS_VISIBLE 0x10000000 + #define WS_THICKFRAME 0x00040000 + #define WS_MAXIMIZE 0x01000000 + #define WS_MAXIMIZEBOX 0x00010000 + #define WS_MINIMIZE 0x20000000 + #define WS_MINIMIZEBOX 0x00020000 + #define WS_POPUP 0x80000000 + #define WS_OVERLAPPED 0 + #define WS_OVERLAPPEDWINDOW 0xcf0000 + #define CW_USEDEFAULT 0x80000000 + #define WS_BORDER 0x800000 + #define WS_CAPTION 0xc00000 + #define WS_SYSMENU 0x80000 + + #define HWND_NOTOPMOST (HWND)(-2) + #define HWND_TOPMOST (HWND)(-1) + #define HWND_TOP (HWND)(+0) + #define HWND_BOTTOM (HWND)(+1) + #define SWP_NOSIZE 0x0001 + #define SWP_NOMOVE 0x0002 + #define SWP_NOZORDER 0x0004 + #define SWP_NOREDRAW 0x0008 + #define SWP_NOACTIVATE 0x0010 + #define SWP_FRAMECHANGED 0x0020 + #define SWP_SHOWWINDOW 0x0040 + #define SWP_HIDEWINDOW 0x0080 + #define SWP_NOCOPYBITS 0x0100 + #define SWP_NOOWNERZORDER 0x0200 + #define SWP_NOSENDCHANGING 0x0400 + + #define SW_HIDE 0 + #define SW_SHOWNORMAL 1 + #define SW_NORMAL 1 + #define SW_SHOWMINIMIZED 2 + #define SW_SHOWMAXIMIZED 3 + #define SW_MAXIMIZE 3 + #define SW_SHOWNOACTIVATE 4 + #define SW_SHOW 5 + #define SW_MINIMIZE 6 + #define SW_SHOWMINNOACTIVE 7 + #define SW_SHOWNA 8 + #define SW_RESTORE 9 + #define SW_SHOWDEFAULT 10 + #define SW_FORCEMINIMIZE 11 + #define SW_MAX 11 + + #define ENUM_CURRENT_SETTINGS cast(DWORD)-1 + #define ENUM_REGISTRY_SETTINGS cast(DWORD)-2 + + GB_DLL_IMPORT LONG WINAPI ChangeDisplaySettingsW(DEVMODEW *dev_mode, DWORD flags); + GB_DLL_IMPORT BOOL WINAPI AdjustWindowRect(RECT *rect, DWORD style, BOOL enu); + GB_DLL_IMPORT HWND WINAPI CreateWindowExW(DWORD ex_style, wchar_t const *class_name, wchar_t const *window_name, + DWORD style, int x, int y, int width, int height, HWND wnd_parent, + HMENU menu, HINSTANCE instance, void *param); + GB_DLL_IMPORT HMODULE WINAPI GetModuleHandleW(wchar_t const *); + GB_DLL_IMPORT HDC GetDC(HANDLE); + GB_DLL_IMPORT BOOL WINAPI GetWindowPlacement(HWND hWnd, WINDOWPLACEMENT *lpwndpl); + GB_DLL_IMPORT BOOL GetMonitorInfoW(HMONITOR hMonitor, MONITORINFO *lpmi); + GB_DLL_IMPORT HMONITOR MonitorFromWindow(HWND hwnd, DWORD dwFlags); + GB_DLL_IMPORT LONG WINAPI SetWindowLongW(HWND hWnd, int nIndex, LONG dwNewLong); + GB_DLL_IMPORT BOOL WINAPI SetWindowPos(HWND hWnd, HWND hWndInsertAfter, int X, int Y, int cx, int cy, UINT uFlags); + GB_DLL_IMPORT BOOL WINAPI SetWindowPlacement(HWND hWnd, WINDOWPLACEMENT const *lpwndpl); + GB_DLL_IMPORT BOOL WINAPI ShowWindow(HWND hWnd, int nCmdShow); + GB_DLL_IMPORT LONG_PTR WINAPI GetWindowLongPtrW(HWND wnd, int index); + + GB_DLL_IMPORT BOOL EnumDisplaySettingsW(wchar_t const *lpszDeviceName, DWORD iModeNum, DEVMODEW *lpDevMode); + GB_DLL_IMPORT void * WINAPI GlobalLock(HGLOBAL hMem); + GB_DLL_IMPORT BOOL WINAPI GlobalUnlock(HGLOBAL hMem); + GB_DLL_IMPORT HGLOBAL WINAPI GlobalAlloc(UINT uFlags, usize dwBytes); + GB_DLL_IMPORT HANDLE WINAPI GetClipboardData(UINT uFormat); + GB_DLL_IMPORT BOOL WINAPI IsClipboardFormatAvailable(UINT format); + GB_DLL_IMPORT BOOL WINAPI OpenClipboard(HWND hWndNewOwner); + GB_DLL_IMPORT BOOL WINAPI EmptyClipboard(void); + GB_DLL_IMPORT BOOL WINAPI CloseClipboard(void); + GB_DLL_IMPORT HANDLE WINAPI SetClipboardData(UINT uFormat, HANDLE hMem); + + #define PFD_TYPE_RGBA 0 + #define PFD_TYPE_COLORINDEX 1 + #define PFD_MAIN_PLANE 0 + #define PFD_OVERLAY_PLANE 1 + #define PFD_UNDERLAY_PLANE (-1) + #define PFD_DOUBLEBUFFER 1 + #define PFD_STEREO 2 + #define PFD_DRAW_TO_WINDOW 4 + #define PFD_DRAW_TO_BITMAP 8 + #define PFD_SUPPORT_GDI 16 + #define PFD_SUPPORT_OPENGL 32 + #define PFD_GENERIC_FORMAT 64 + #define PFD_NEED_PALETTE 128 + #define PFD_NEED_SYSTEM_PALETTE 0x00000100 + #define PFD_SWAP_EXCHANGE 0x00000200 + #define PFD_SWAP_COPY 0x00000400 + #define PFD_SWAP_LAYER_BUFFERS 0x00000800 + #define PFD_GENERIC_ACCELERATED 0x00001000 + #define PFD_DEPTH_DONTCARE 0x20000000 + #define PFD_DOUBLEBUFFER_DONTCARE 0x40000000 + #define PFD_STEREO_DONTCARE 0x80000000 + + #define GWLP_USERDATA -21 + + #define GWL_ID -12 + #define GWL_STYLE -16 + + GB_DLL_IMPORT BOOL WINAPI SetPixelFormat (HDC hdc, int pixel_format, PIXELFORMATDESCRIPTOR const *pfd); + GB_DLL_IMPORT int WINAPI ChoosePixelFormat(HDC hdc, PIXELFORMATDESCRIPTOR const *pfd); + GB_DLL_IMPORT HGLRC WINAPI wglCreateContext (HDC hdc); + GB_DLL_IMPORT BOOL WINAPI wglMakeCurrent (HDC hdc, HGLRC hglrc); + GB_DLL_IMPORT PROC WINAPI wglGetProcAddress(char const *str); + GB_DLL_IMPORT BOOL WINAPI wglDeleteContext (HGLRC hglrc); + + GB_DLL_IMPORT BOOL WINAPI SetForegroundWindow(HWND hWnd); + GB_DLL_IMPORT HWND WINAPI SetFocus(HWND hWnd); + GB_DLL_IMPORT LONG_PTR WINAPI SetWindowLongPtrW(HWND hWnd, int nIndex, LONG_PTR dwNewLong); + GB_DLL_IMPORT BOOL WINAPI GetClientRect(HWND hWnd, RECT *lpRect); + GB_DLL_IMPORT BOOL WINAPI IsIconic(HWND hWnd); + GB_DLL_IMPORT HWND WINAPI GetFocus(void); + GB_DLL_IMPORT int WINAPI ShowCursor(BOOL bShow); + GB_DLL_IMPORT SHORT WINAPI GetAsyncKeyState(int key); + GB_DLL_IMPORT BOOL WINAPI GetCursorPos(POINT *lpPoint); + GB_DLL_IMPORT BOOL WINAPI SetCursorPos(int x, int y); + GB_DLL_IMPORT BOOL ScreenToClient(HWND hWnd, POINT *lpPoint); + GB_DLL_IMPORT BOOL ClientToScreen(HWND hWnd, POINT *lpPoint); + GB_DLL_IMPORT BOOL WINAPI MoveWindow(HWND hWnd, int X, int Y, int nWidth, int nHeight, BOOL bRepaint); + GB_DLL_IMPORT BOOL WINAPI SetWindowTextW(HWND hWnd, wchar_t const *lpString); + GB_DLL_IMPORT DWORD WINAPI GetWindowLongW(HWND hWnd, int nIndex); + + + + + #define PM_NOREMOVE 0 + #define PM_REMOVE 1 + + GB_DLL_IMPORT BOOL WINAPI PeekMessageW(MSG *lpMsg, HWND hWnd, UINT wMsgFilterMin, UINT wMsgFilterMax, UINT wRemoveMsg); + GB_DLL_IMPORT BOOL WINAPI TranslateMessage(MSG const *lpMsg); + GB_DLL_IMPORT LRESULT WINAPI DispatchMessageW(MSG const *lpMsg); + + typedef enum + { + DIB_RGB_COLORS = 0x00, + DIB_PAL_COLORS = 0x01, + DIB_PAL_INDICES = 0x02 + } DIBColors; + + #define SRCCOPY (u32)0x00CC0020 + #define SRCPAINT (u32)0x00EE0086 + #define SRCAND (u32)0x008800C6 + #define SRCINVERT (u32)0x00660046 + #define SRCERASE (u32)0x00440328 + #define NOTSRCCOPY (u32)0x00330008 + #define NOTSRCERASE (u32)0x001100A6 + #define MERGECOPY (u32)0x00C000CA + #define MERGEPAINT (u32)0x00BB0226 + #define PATCOPY (u32)0x00F00021 + #define PATPAINT (u32)0x00FB0A09 + #define PATINVERT (u32)0x005A0049 + #define DSTINVERT (u32)0x00550009 + #define BLACKNESS (u32)0x00000042 + #define WHITENESS (u32)0x00FF0062 + + GB_DLL_IMPORT BOOL WINAPI SwapBuffers(HDC hdc); + GB_DLL_IMPORT BOOL WINAPI DestroyWindow(HWND hWnd); + GB_DLL_IMPORT int StretchDIBits(HDC hdc, int XDest, int YDest, int nDestWidth, int nDestHeight, + int XSrc, int YSrc, int nSrcWidth, int nSrcHeight, + void const *lpBits, /*BITMAPINFO*/void const *lpBitsInfo, UINT iUsage, DWORD dwRop); + // IMPORTANT TODO(bill): FIX THIS!!!! +#endif // Bill's Mini Windows.h + + + +#if defined(__GCC__) || defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wattributes" +#pragma GCC diagnostic ignored "-Wmissing-braces" +#endif + +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable:4201) +#pragma warning(disable:4127) // Conditional expression is constant +#endif + +void gb_assert_handler(char const *prefix, char const *condition, char const *file, i32 line, char const *msg, ...) { + gb_printf_err("%s(%d): %s: ", file, line, prefix); + if (condition) + gb_printf_err( "`%s` ", condition); + if (msg) { + va_list va; + va_start(va, msg); + gb_printf_err_va(msg, va); + va_end(va); + } + gb_printf_err("\n"); +} + +b32 gb_is_power_of_two(isize x) { + if (x <= 0) + return false; + return !(x & (x-1)); +} + +gb_inline void *gb_align_forward(void *ptr, isize alignment) { + uintptr p; + + GB_ASSERT(gb_is_power_of_two(alignment)); + + p = cast(uintptr)ptr; + return cast(void *)((p + (alignment-1)) &~ (alignment-1)); +} + + + +gb_inline void * gb_pointer_add (void *ptr, isize bytes) { return cast(void *)(cast(u8 *)ptr + bytes); } +gb_inline void * gb_pointer_sub (void *ptr, isize bytes) { return cast(void *)(cast(u8 *)ptr - bytes); } +gb_inline void const *gb_pointer_add_const(void const *ptr, isize bytes) { return cast(void const *)(cast(u8 const *)ptr + bytes); } +gb_inline void const *gb_pointer_sub_const(void const *ptr, isize bytes) { return cast(void const *)(cast(u8 const *)ptr - bytes); } +gb_inline isize gb_pointer_diff (void const *begin, void const *end) { return cast(isize)(cast(u8 const *)end - cast(u8 const *)begin); } + +gb_inline void gb_zero_size(void *ptr, isize size) { gb_memset(ptr, 0, size); } + + +#if defined(_MSC_VER) +#pragma intrinsic(__movsb) +#endif + +gb_inline void *gb_memcopy(void *dest, void const *source, isize n) { +#if defined(_MSC_VER) + if (dest == NULL) { + return NULL; + } + // TODO(bill): Is this good enough? + __movsb(cast(u8 *)dest, cast(u8 *)source, n); +// #elif defined(GB_SYSTEM_OSX) || defined(GB_SYSTEM_UNIX) + // NOTE(zangent): I assume there's a reason this isn't being used elsewhere, + // but casting pointers as arguments to an __asm__ call is considered an + // error on MacOS and (I think) Linux + // TODO(zangent): Figure out how to refactor the asm code so it works on MacOS, + // since this is probably not the way the author intended this to work. + // memcpy(dest, source, n); +#elif defined(GB_CPU_X86) + if (dest == NULL) { + return NULL; + } + + void *dest_copy = dest; + __asm__ __volatile__("rep movsb" : "+D"(dest_copy), "+S"(source), "+c"(n) : : "memory"); +#else + u8 *d = cast(u8 *)dest; + u8 const *s = cast(u8 const *)source; + u32 w, x; + + if (dest == NULL) { + return NULL; + } + + for (; cast(uintptr)s % 4 && n; n--) { + *d++ = *s++; + } + + if (cast(uintptr)d % 4 == 0) { + for (; n >= 16; + s += 16, d += 16, n -= 16) { + *cast(u32 *)(d+ 0) = *cast(u32 *)(s+ 0); + *cast(u32 *)(d+ 4) = *cast(u32 *)(s+ 4); + *cast(u32 *)(d+ 8) = *cast(u32 *)(s+ 8); + *cast(u32 *)(d+12) = *cast(u32 *)(s+12); + } + if (n & 8) { + *cast(u32 *)(d+0) = *cast(u32 *)(s+0); + *cast(u32 *)(d+4) = *cast(u32 *)(s+4); + d += 8; + s += 8; + } + if (n&4) { + *cast(u32 *)(d+0) = *cast(u32 *)(s+0); + d += 4; + s += 4; + } + if (n&2) { + *d++ = *s++; *d++ = *s++; + } + if (n&1) { + *d = *s; + } + return dest; + } + + if (n >= 32) { + #if __BYTE_ORDER == __BIG_ENDIAN + #define LS << + #define RS >> + #else + #define LS >> + #define RS << + #endif + switch (cast(uintptr)d % 4) { + case 1: { + w = *cast(u32 *)s; + *d++ = *s++; + *d++ = *s++; + *d++ = *s++; + n -= 3; + while (n > 16) { + x = *cast(u32 *)(s+1); + *cast(u32 *)(d+0) = (w LS 24) | (x RS 8); + w = *cast(u32 *)(s+5); + *cast(u32 *)(d+4) = (x LS 24) | (w RS 8); + x = *cast(u32 *)(s+9); + *cast(u32 *)(d+8) = (w LS 24) | (x RS 8); + w = *cast(u32 *)(s+13); + *cast(u32 *)(d+12) = (x LS 24) | (w RS 8); + + s += 16; + d += 16; + n -= 16; + } + } break; + case 2: { + w = *cast(u32 *)s; + *d++ = *s++; + *d++ = *s++; + n -= 2; + while (n > 17) { + x = *cast(u32 *)(s+2); + *cast(u32 *)(d+0) = (w LS 16) | (x RS 16); + w = *cast(u32 *)(s+6); + *cast(u32 *)(d+4) = (x LS 16) | (w RS 16); + x = *cast(u32 *)(s+10); + *cast(u32 *)(d+8) = (w LS 16) | (x RS 16); + w = *cast(u32 *)(s+14); + *cast(u32 *)(d+12) = (x LS 16) | (w RS 16); + + s += 16; + d += 16; + n -= 16; + } + } break; + case 3: { + w = *cast(u32 *)s; + *d++ = *s++; + n -= 1; + while (n > 18) { + x = *cast(u32 *)(s+3); + *cast(u32 *)(d+0) = (w LS 8) | (x RS 24); + w = *cast(u32 *)(s+7); + *cast(u32 *)(d+4) = (x LS 8) | (w RS 24); + x = *cast(u32 *)(s+11); + *cast(u32 *)(d+8) = (w LS 8) | (x RS 24); + w = *cast(u32 *)(s+15); + *cast(u32 *)(d+12) = (x LS 8) | (w RS 24); + + s += 16; + d += 16; + n -= 16; + } + } break; + default: break; // NOTE(bill): Do nowt! + } + #undef LS + #undef RS + if (n & 16) { + *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; + *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; + *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; + *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; + } + if (n & 8) { + *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; + *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; + } + if (n & 4) { + *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; + } + if (n & 2) { + *d++ = *s++; *d++ = *s++; + } + if (n & 1) { + *d = *s; + } + } + +#endif + return dest; +} + +gb_inline void *gb_memmove(void *dest, void const *source, isize n) { + u8 *d = cast(u8 *)dest; + u8 const *s = cast(u8 const *)source; + + if (dest == NULL) { + return NULL; + } + + if (d == s) { + return d; + } + if (s+n <= d || d+n <= s) { // NOTE(bill): Non-overlapping + return gb_memcopy(d, s, n); + } + + if (d < s) { + if (cast(uintptr)s % gb_size_of(isize) == cast(uintptr)d % gb_size_of(isize)) { + while (cast(uintptr)d % gb_size_of(isize)) { + if (!n--) return dest; + *d++ = *s++; + } + while (n>=gb_size_of(isize)) { + *cast(isize *)d = *cast(isize *)s; + n -= gb_size_of(isize); + d += gb_size_of(isize); + s += gb_size_of(isize); + } + } + for (; n; n--) *d++ = *s++; + } else { + if ((cast(uintptr)s % gb_size_of(isize)) == (cast(uintptr)d % gb_size_of(isize))) { + while (cast(uintptr)(d+n) % gb_size_of(isize)) { + if (!n--) + return dest; + d[n] = s[n]; + } + while (n >= gb_size_of(isize)) { + n -= gb_size_of(isize); + *cast(isize *)(d+n) = *cast(isize *)(s+n); + } + } + while (n) n--, d[n] = s[n]; + } + + return dest; +} + +gb_inline void *gb_memset(void *dest, u8 c, isize n) { + u8 *s = cast(u8 *)dest; + isize k; + u32 c32 = ((u32)-1)/255 * c; + + if (dest == NULL) { + return NULL; + } + + if (n == 0) + return dest; + s[0] = s[n-1] = c; + if (n < 3) + return dest; + s[1] = s[n-2] = c; + s[2] = s[n-3] = c; + if (n < 7) + return dest; + s[3] = s[n-4] = c; + if (n < 9) + return dest; + + k = -cast(intptr)s & 3; + s += k; + n -= k; + n &= -4; + + *cast(u32 *)(s+0) = c32; + *cast(u32 *)(s+n-4) = c32; + if (n < 9) { + return dest; + } + *cast(u32 *)(s + 4) = c32; + *cast(u32 *)(s + 8) = c32; + *cast(u32 *)(s+n-12) = c32; + *cast(u32 *)(s+n- 8) = c32; + if (n < 25) { + return dest; + } + *cast(u32 *)(s + 12) = c32; + *cast(u32 *)(s + 16) = c32; + *cast(u32 *)(s + 20) = c32; + *cast(u32 *)(s + 24) = c32; + *cast(u32 *)(s+n-28) = c32; + *cast(u32 *)(s+n-24) = c32; + *cast(u32 *)(s+n-20) = c32; + *cast(u32 *)(s+n-16) = c32; + + k = 24 + (cast(uintptr)s & 4); + s += k; + n -= k; + + + { + u64 c64 = (cast(u64)c32 << 32) | c32; + while (n > 31) { + *cast(u64 *)(s+0) = c64; + *cast(u64 *)(s+8) = c64; + *cast(u64 *)(s+16) = c64; + *cast(u64 *)(s+24) = c64; + + n -= 32; + s += 32; + } + } + + return dest; +} + +gb_inline i32 gb_memcompare(void const *s1, void const *s2, isize size) { + // TODO(bill): Heavily optimize + u8 const *s1p8 = cast(u8 const *)s1; + u8 const *s2p8 = cast(u8 const *)s2; + + if (s1 == NULL || s2 == NULL) { + return 0; + } + + while (size--) { + if (*s1p8 != *s2p8) { + return (*s1p8 - *s2p8); + } + s1p8++, s2p8++; + } + return 0; +} + +void gb_memswap(void *i, void *j, isize size) { + if (i == j) return; + + if (size == 4) { + gb_swap(u32, *cast(u32 *)i, *cast(u32 *)j); + } else if (size == 8) { + gb_swap(u64, *cast(u64 *)i, *cast(u64 *)j); + } else if (size < 8) { + u8 *a = cast(u8 *)i; + u8 *b = cast(u8 *)j; + if (a != b) { + while (size--) { + gb_swap(u8, *a, *b); + a++, b++; + } + } + } else { + char buffer[256]; + + // TODO(bill): Is the recursion ever a problem? + while (size > gb_size_of(buffer)) { + gb_memswap(i, j, gb_size_of(buffer)); + i = gb_pointer_add(i, gb_size_of(buffer)); + j = gb_pointer_add(j, gb_size_of(buffer)); + size -= gb_size_of(buffer); + } + + gb_memcopy(buffer, i, size); + gb_memcopy(i, j, size); + gb_memcopy(j, buffer, size); + } +} + +#define GB__ONES (cast(usize)-1/U8_MAX) +#define GB__HIGHS (GB__ONES * (U8_MAX/2+1)) +#define GB__HAS_ZERO(x) ((x)-GB__ONES & ~(x) & GB__HIGHS) + + +void const *gb_memchr(void const *data, u8 c, isize n) { + u8 const *s = cast(u8 const *)data; + while ((cast(uintptr)s & (sizeof(usize)-1)) && + n && *s != c) { + s++; + n--; + } + if (n && *s != c) { + isize const *w; + isize k = GB__ONES * c; + w = cast(isize const *)s; + while (n >= gb_size_of(isize) && !GB__HAS_ZERO(*w ^ k)) { + w++; + n -= gb_size_of(isize); + } + s = cast(u8 const *)w; + while (n && *s != c) { + s++; + n--; + } + } + + return n ? cast(void const *)s : NULL; +} + + +void const *gb_memrchr(void const *data, u8 c, isize n) { + u8 const *s = cast(u8 const *)data; + while (n--) { + if (s[n] == c) + return cast(void const *)(s + n); + } + return NULL; +} + + + +gb_inline void *gb_alloc_align (gbAllocator a, isize size, isize alignment) { return a.proc(a.data, gbAllocation_Alloc, size, alignment, NULL, 0, GB_DEFAULT_ALLOCATOR_FLAGS); } +gb_inline void *gb_alloc (gbAllocator a, isize size) { return gb_alloc_align(a, size, GB_DEFAULT_MEMORY_ALIGNMENT); } +gb_inline void gb_free (gbAllocator a, void *ptr) { if (ptr != NULL) a.proc(a.data, gbAllocation_Free, 0, 0, ptr, 0, GB_DEFAULT_ALLOCATOR_FLAGS); } +gb_inline void gb_free_all (gbAllocator a) { a.proc(a.data, gbAllocation_FreeAll, 0, 0, NULL, 0, GB_DEFAULT_ALLOCATOR_FLAGS); } +gb_inline void *gb_resize (gbAllocator a, void *ptr, isize old_size, isize new_size) { return gb_resize_align(a, ptr, old_size, new_size, GB_DEFAULT_MEMORY_ALIGNMENT); } +gb_inline void *gb_resize_align(gbAllocator a, void *ptr, isize old_size, isize new_size, isize alignment) { return a.proc(a.data, gbAllocation_Resize, new_size, alignment, ptr, old_size, GB_DEFAULT_ALLOCATOR_FLAGS); } + +gb_inline void *gb_alloc_copy (gbAllocator a, void const *src, isize size) { + return gb_memcopy(gb_alloc(a, size), src, size); +} +gb_inline void *gb_alloc_copy_align(gbAllocator a, void const *src, isize size, isize alignment) { + return gb_memcopy(gb_alloc_align(a, size, alignment), src, size); +} + +gb_inline char *gb_alloc_str(gbAllocator a, char const *str) { + return gb_alloc_str_len(a, str, gb_strlen(str)); +} + +gb_inline char *gb_alloc_str_len(gbAllocator a, char const *str, isize len) { + char *result; + result = cast(char *)gb_alloc_copy(a, str, len+1); + result[len] = '\0'; + return result; +} + + +gb_inline void *gb_default_resize_align(gbAllocator a, void *old_memory, isize old_size, isize new_size, isize alignment) { + if (!old_memory) return gb_alloc_align(a, new_size, alignment); + + if (new_size == 0) { + gb_free(a, old_memory); + return NULL; + } + + if (new_size < old_size) + new_size = old_size; + + if (old_size == new_size) { + return old_memory; + } else { + void *new_memory = gb_alloc_align(a, new_size, alignment); + if (!new_memory) return NULL; + gb_memmove(new_memory, old_memory, gb_min(new_size, old_size)); + gb_free(a, old_memory); + return new_memory; + } +} + + + + +//////////////////////////////////////////////////////////////// +// +// Concurrency +// +// +// IMPORTANT TODO(bill): Use compiler intrinsics for the atomics + +#if defined(GB_COMPILER_MSVC) && !defined(GB_COMPILER_CLANG) +gb_inline i32 gb_atomic32_load (gbAtomic32 const volatile *a) { return a->value; } +gb_inline void gb_atomic32_store(gbAtomic32 volatile *a, i32 value) { a->value = value; } + +gb_inline i32 gb_atomic32_compare_exchange(gbAtomic32 volatile *a, i32 expected, i32 desired) { + return _InterlockedCompareExchange(cast(long volatile *)a, desired, expected); +} +gb_inline i32 gb_atomic32_exchanged(gbAtomic32 volatile *a, i32 desired) { + return _InterlockedExchange(cast(long volatile *)a, desired); +} +gb_inline i32 gb_atomic32_fetch_add(gbAtomic32 volatile *a, i32 operand) { + return _InterlockedExchangeAdd(cast(long volatile *)a, operand); +} +gb_inline i32 gb_atomic32_fetch_and(gbAtomic32 volatile *a, i32 operand) { + return _InterlockedAnd(cast(long volatile *)a, operand); +} +gb_inline i32 gb_atomic32_fetch_or(gbAtomic32 volatile *a, i32 operand) { + return _InterlockedOr(cast(long volatile *)a, operand); +} + +gb_inline i64 gb_atomic64_load(gbAtomic64 const volatile *a) { +#if defined(GB_ARCH_64_BIT) + return a->value; +#elif GB_CPU_X86 + // NOTE(bill): The most compatible way to get an atomic 64-bit load on x86 is with cmpxchg8b + i64 result; + __asm { + mov esi, a; + mov ebx, eax; + mov ecx, edx; + lock cmpxchg8b [esi]; + mov dword ptr result, eax; + mov dword ptr result[4], edx; + } + return result; +#else +#error TODO(bill): atomics for this CPU +#endif +} + +gb_inline void gb_atomic64_store(gbAtomic64 volatile *a, i64 value) { +#if defined(GB_ARCH_64_BIT) + a->value = value; +#elif GB_CPU_X86 + // NOTE(bill): The most compatible way to get an atomic 64-bit store on x86 is with cmpxchg8b + __asm { + mov esi, a; + mov ebx, dword ptr value; + mov ecx, dword ptr value[4]; + retry: + cmpxchg8b [esi]; + jne retry; + } +#else +#error TODO(bill): atomics for this CPU +#endif +} + +gb_inline i64 gb_atomic64_compare_exchange(gbAtomic64 volatile *a, i64 expected, i64 desired) { + return _InterlockedCompareExchange64(cast(i64 volatile *)a, desired, expected); +} + +gb_inline i64 gb_atomic64_exchanged(gbAtomic64 volatile *a, i64 desired) { +#if defined(GB_ARCH_64_BIT) + return _InterlockedExchange64(cast(i64 volatile *)a, desired); +#elif GB_CPU_X86 + i64 expected = a->value; + for (;;) { + i64 original = _InterlockedCompareExchange64(cast(i64 volatile *)a, desired, expected); + if (original == expected) + return original; + expected = original; + } +#else +#error TODO(bill): atomics for this CPU +#endif +} + +gb_inline i64 gb_atomic64_fetch_add(gbAtomic64 volatile *a, i64 operand) { +#if defined(GB_ARCH_64_BIT) + return _InterlockedExchangeAdd64(cast(i64 volatile *)a, operand); +#elif GB_CPU_X86 + i64 expected = a->value; + for (;;) { + i64 original = _InterlockedCompareExchange64(cast(i64 volatile *)a, expected + operand, expected); + if (original == expected) + return original; + expected = original; + } +#else +#error TODO(bill): atomics for this CPU +#endif +} + +gb_inline i64 gb_atomic64_fetch_and(gbAtomic64 volatile *a, i64 operand) { +#if defined(GB_ARCH_64_BIT) + return _InterlockedAnd64(cast(i64 volatile *)a, operand); +#elif GB_CPU_X86 + i64 expected = a->value; + for (;;) { + i64 original = _InterlockedCompareExchange64(cast(i64 volatile *)a, expected & operand, expected); + if (original == expected) + return original; + expected = original; + } +#else +#error TODO(bill): atomics for this CPU +#endif +} + +gb_inline i64 gb_atomic64_fetch_or(gbAtomic64 volatile *a, i64 operand) { +#if defined(GB_ARCH_64_BIT) + return _InterlockedOr64(cast(i64 volatile *)a, operand); +#elif GB_CPU_X86 + i64 expected = a->value; + for (;;) { + i64 original = _InterlockedCompareExchange64(cast(i64 volatile *)a, expected | operand, expected); + if (original == expected) + return original; + expected = original; + } +#else +#error TODO(bill): atomics for this CPU +#endif +} + + + +#elif defined(GB_CPU_X86) + +gb_inline i32 gb_atomic32_load (gbAtomic32 const volatile *a) { return a->value; } +gb_inline void gb_atomic32_store(gbAtomic32 volatile *a, i32 value) { a->value = value; } + +gb_inline i32 gb_atomic32_compare_exchange(gbAtomic32 volatile *a, i32 expected, i32 desired) { + i32 original; + __asm__ volatile( + "lock; cmpxchgl %2, %1" + : "=a"(original), "+m"(a->value) + : "q"(desired), "0"(expected) + ); + return original; +} + +gb_inline i32 gb_atomic32_exchanged(gbAtomic32 volatile *a, i32 desired) { + // NOTE(bill): No lock prefix is necessary for xchgl + i32 original; + __asm__ volatile( + "xchgl %0, %1" + : "=r"(original), "+m"(a->value) + : "0"(desired) + ); + return original; +} + +gb_inline i32 gb_atomic32_fetch_add(gbAtomic32 volatile *a, i32 operand) { + i32 original; + __asm__ volatile( + "lock; xaddl %0, %1" + : "=r"(original), "+m"(a->value) + : "0"(operand) + ); + return original; +} + +gb_inline i32 gb_atomic32_fetch_and(gbAtomic32 volatile *a, i32 operand) { + i32 original; + i32 tmp; + __asm__ volatile( + "1: movl %1, %0\n" + " movl %0, %2\n" + " andl %3, %2\n" + " lock; cmpxchgl %2, %1\n" + " jne 1b" + : "=&a"(original), "+m"(a->value), "=&r"(tmp) + : "r"(operand) + ); + return original; +} + +gb_inline i32 gb_atomic32_fetch_or(gbAtomic32 volatile *a, i32 operand) { + i32 original; + i32 temp; + __asm__ volatile( + "1: movl %1, %0\n" + " movl %0, %2\n" + " orl %3, %2\n" + " lock; cmpxchgl %2, %1\n" + " jne 1b" + : "=&a"(original), "+m"(a->value), "=&r"(temp) + : "r"(operand) + ); + return original; +} + + +gb_inline i64 gb_atomic64_load(gbAtomic64 const volatile *a) { +#if defined(GB_ARCH_64_BIT) + return a->value; +#else + i64 original; + __asm__ volatile( + "movl %%ebx, %%eax\n" + "movl %%ecx, %%edx\n" + "lock; cmpxchg8b %1" + : "=&A"(original) + : "m"(a->value) + ); + return original; +#endif +} + +gb_inline void gb_atomic64_store(gbAtomic64 volatile *a, i64 value) { +#if defined(GB_ARCH_64_BIT) + a->value = value; +#else + i64 expected = a->value; + __asm__ volatile( + "1: cmpxchg8b %0\n" + " jne 1b" + : "=m"(a->value) + : "b"((i32)value), "c"((i32)(value >> 32)), "A"(expected) + ); +#endif +} + +gb_inline i64 gb_atomic64_compare_exchange(gbAtomic64 volatile *a, i64 expected, i64 desired) { +#if defined(GB_ARCH_64_BIT) + i64 original; + __asm__ volatile( + "lock; cmpxchgq %2, %1" + : "=a"(original), "+m"(a->value) + : "q"(desired), "0"(expected) + ); + return original; +#else + i64 original; + __asm__ volatile( + "lock; cmpxchg8b %1" + : "=A"(original), "+m"(a->value) + : "b"((i32)desired), "c"((i32)(desired >> 32)), "0"(expected) + ); + return original; +#endif +} + +gb_inline i64 gb_atomic64_exchanged(gbAtomic64 volatile *a, i64 desired) { +#if defined(GB_ARCH_64_BIT) + i64 original; + __asm__ volatile( + "xchgq %0, %1" + : "=r"(original), "+m"(a->value) + : "0"(desired) + ); + return original; +#else + i64 original = a->value; + for (;;) { + i64 previous = gb_atomic64_compare_exchange(a, original, desired); + if (original == previous) + return original; + original = previous; + } +#endif +} + +gb_inline i64 gb_atomic64_fetch_add(gbAtomic64 volatile *a, i64 operand) { +#if defined(GB_ARCH_64_BIT) + i64 original; + __asm__ volatile( + "lock; xaddq %0, %1" + : "=r"(original), "+m"(a->value) + : "0"(operand) + ); + return original; +#else + for (;;) { + i64 original = a->value; + if (gb_atomic64_compare_exchange(a, original, original + operand) == original) + return original; + } +#endif +} + +gb_inline i64 gb_atomic64_fetch_and(gbAtomic64 volatile *a, i64 operand) { +#if defined(GB_ARCH_64_BIT) + i64 original; + i64 tmp; + __asm__ volatile( + "1: movq %1, %0\n" + " movq %0, %2\n" + " andq %3, %2\n" + " lock; cmpxchgq %2, %1\n" + " jne 1b" + : "=&a"(original), "+m"(a->value), "=&r"(tmp) + : "r"(operand) + ); + return original; +#else + for (;;) { + i64 original = a->value; + if (gb_atomic64_compare_exchange(a, original, original & operand) == original) + return original; + } +#endif +} + +gb_inline i64 gb_atomic64_fetch_or(gbAtomic64 volatile *a, i64 operand) { +#if defined(GB_ARCH_64_BIT) + i64 original; + i64 temp; + __asm__ volatile( + "1: movq %1, %0\n" + " movq %0, %2\n" + " orq %3, %2\n" + " lock; cmpxchgq %2, %1\n" + " jne 1b" + : "=&a"(original), "+m"(a->value), "=&r"(temp) + : "r"(operand) + ); + return original; +#else + for (;;) { + i64 original = a->value; + if (gb_atomic64_compare_exchange(a, original, original | operand) == original) + return original; + } +#endif +} + +#else +#error TODO(bill): Implement Atomics for this CPU +#endif + +gb_inline b32 gb_atomic32_spin_lock(gbAtomic32 volatile *a, isize time_out) { + i32 old_value = gb_atomic32_compare_exchange(a, 1, 0); + i32 counter = 0; + while (old_value != 0 && (time_out < 0 || counter++ < time_out)) { + gb_yield_thread(); + old_value = gb_atomic32_compare_exchange(a, 1, 0); + gb_mfence(); + } + return old_value == 0; +} +gb_inline void gb_atomic32_spin_unlock(gbAtomic32 volatile *a) { + gb_atomic32_store(a, 0); + gb_mfence(); +} + +gb_inline b32 gb_atomic64_spin_lock(gbAtomic64 volatile *a, isize time_out) { + i64 old_value = gb_atomic64_compare_exchange(a, 1, 0); + i64 counter = 0; + while (old_value != 0 && (time_out < 0 || counter++ < time_out)) { + gb_yield_thread(); + old_value = gb_atomic64_compare_exchange(a, 1, 0); + gb_mfence(); + } + return old_value == 0; +} + +gb_inline void gb_atomic64_spin_unlock(gbAtomic64 volatile *a) { + gb_atomic64_store(a, 0); + gb_mfence(); +} + +gb_inline b32 gb_atomic32_try_acquire_lock(gbAtomic32 volatile *a) { + i32 old_value; + gb_yield_thread(); + old_value = gb_atomic32_compare_exchange(a, 1, 0); + gb_mfence(); + return old_value == 0; +} + +gb_inline b32 gb_atomic64_try_acquire_lock(gbAtomic64 volatile *a) { + i64 old_value; + gb_yield_thread(); + old_value = gb_atomic64_compare_exchange(a, 1, 0); + gb_mfence(); + return old_value == 0; +} + + + +#if defined(GB_ARCH_32_BIT) + +gb_inline void *gb_atomic_ptr_load(gbAtomicPtr const volatile *a) { + return cast(void *)cast(intptr)gb_atomic32_load(cast(gbAtomic32 const volatile *)a); +} +gb_inline void gb_atomic_ptr_store(gbAtomicPtr volatile *a, void *value) { + gb_atomic32_store(cast(gbAtomic32 volatile *)a, cast(i32)cast(intptr)value); +} +gb_inline void *gb_atomic_ptr_compare_exchange(gbAtomicPtr volatile *a, void *expected, void *desired) { + return cast(void *)cast(intptr)gb_atomic32_compare_exchange(cast(gbAtomic32 volatile *)a, cast(i32)cast(intptr)expected, cast(i32)cast(intptr)desired); +} +gb_inline void *gb_atomic_ptr_exchanged(gbAtomicPtr volatile *a, void *desired) { + return cast(void *)cast(intptr)gb_atomic32_exchanged(cast(gbAtomic32 volatile *)a, cast(i32)cast(intptr)desired); +} +gb_inline void *gb_atomic_ptr_fetch_add(gbAtomicPtr volatile *a, void *operand) { + return cast(void *)cast(intptr)gb_atomic32_fetch_add(cast(gbAtomic32 volatile *)a, cast(i32)cast(intptr)operand); +} +gb_inline void *gb_atomic_ptr_fetch_and(gbAtomicPtr volatile *a, void *operand) { + return cast(void *)cast(intptr)gb_atomic32_fetch_and(cast(gbAtomic32 volatile *)a, cast(i32)cast(intptr)operand); +} +gb_inline void *gb_atomic_ptr_fetch_or(gbAtomicPtr volatile *a, void *operand) { + return cast(void *)cast(intptr)gb_atomic32_fetch_or(cast(gbAtomic32 volatile *)a, cast(i32)cast(intptr)operand); +} +gb_inline b32 gb_atomic_ptr_spin_lock(gbAtomicPtr volatile *a, isize time_out) { + return gb_atomic32_spin_lock(cast(gbAtomic32 volatile *)a, time_out); +} +gb_inline void gb_atomic_ptr_spin_unlock(gbAtomicPtr volatile *a) { + gb_atomic32_spin_unlock(cast(gbAtomic32 volatile *)a); +} +gb_inline b32 gb_atomic_ptr_try_acquire_lock(gbAtomicPtr volatile *a) { + return gb_atomic32_try_acquire_lock(cast(gbAtomic32 volatile *)a); +} + +#elif defined(GB_ARCH_64_BIT) + +gb_inline void *gb_atomic_ptr_load(gbAtomicPtr const volatile *a) { + return cast(void *)cast(intptr)gb_atomic64_load(cast(gbAtomic64 const volatile *)a); +} +gb_inline void gb_atomic_ptr_store(gbAtomicPtr volatile *a, void *value) { + gb_atomic64_store(cast(gbAtomic64 volatile *)a, cast(i64)cast(intptr)value); +} +gb_inline void *gb_atomic_ptr_compare_exchange(gbAtomicPtr volatile *a, void *expected, void *desired) { + return cast(void *)cast(intptr)gb_atomic64_compare_exchange(cast(gbAtomic64 volatile *)a, cast(i64)cast(intptr)expected, cast(i64)cast(intptr)desired); +} +gb_inline void *gb_atomic_ptr_exchanged(gbAtomicPtr volatile *a, void *desired) { + return cast(void *)cast(intptr)gb_atomic64_exchanged(cast(gbAtomic64 volatile *)a, cast(i64)cast(intptr)desired); +} +gb_inline void *gb_atomic_ptr_fetch_add(gbAtomicPtr volatile *a, void *operand) { + return cast(void *)cast(intptr)gb_atomic64_fetch_add(cast(gbAtomic64 volatile *)a, cast(i64)cast(intptr)operand); +} +gb_inline void *gb_atomic_ptr_fetch_and(gbAtomicPtr volatile *a, void *operand) { + return cast(void *)cast(intptr)gb_atomic64_fetch_and(cast(gbAtomic64 volatile *)a, cast(i64)cast(intptr)operand); +} +gb_inline void *gb_atomic_ptr_fetch_or(gbAtomicPtr volatile *a, void *operand) { + return cast(void *)cast(intptr)gb_atomic64_fetch_or(cast(gbAtomic64 volatile *)a, cast(i64)cast(intptr)operand); +} +gb_inline b32 gb_atomic_ptr_spin_lock(gbAtomicPtr volatile *a, isize time_out) { + return gb_atomic64_spin_lock(cast(gbAtomic64 volatile *)a, time_out); +} +gb_inline void gb_atomic_ptr_spin_unlock(gbAtomicPtr volatile *a) { + gb_atomic64_spin_unlock(cast(gbAtomic64 volatile *)a); +} +gb_inline b32 gb_atomic_ptr_try_acquire_lock(gbAtomicPtr volatile *a) { + return gb_atomic64_try_acquire_lock(cast(gbAtomic64 volatile *)a); +} +#endif + + +gb_inline void gb_yield_thread(void) { +#if defined(GB_SYSTEM_WINDOWS) + _mm_pause(); +#elif defined(GB_SYSTEM_OSX) + __asm__ volatile ("" : : : "memory"); +#elif defined(GB_CPU_X86) + _mm_pause(); +#else +#error Unknown architecture +#endif +} + +gb_inline void gb_mfence(void) { +#if defined(GB_SYSTEM_WINDOWS) + _ReadWriteBarrier(); +#elif defined(GB_SYSTEM_OSX) + __sync_synchronize(); +#elif defined(GB_CPU_X86) + _mm_mfence(); +#else +#error Unknown architecture +#endif +} + +gb_inline void gb_sfence(void) { +#if defined(GB_SYSTEM_WINDOWS) + _WriteBarrier(); +#elif defined(GB_SYSTEM_OSX) + __asm__ volatile ("" : : : "memory"); +#elif defined(GB_CPU_X86) + _mm_sfence(); +#else +#error Unknown architecture +#endif +} + +gb_inline void gb_lfence(void) { +#if defined(GB_SYSTEM_WINDOWS) + _ReadBarrier(); +#elif defined(GB_SYSTEM_OSX) + __asm__ volatile ("" : : : "memory"); +#elif defined(GB_CPU_X86) + _mm_lfence(); +#else +#error Unknown architecture +#endif +} + + +gb_inline void gb_semaphore_release(gbSemaphore *s) { gb_semaphore_post(s, 1); } + +#if defined(GB_SYSTEM_WINDOWS) + gb_inline void gb_semaphore_init (gbSemaphore *s) { s->win32_handle = CreateSemaphoreA(NULL, 0, I32_MAX, NULL); } + gb_inline void gb_semaphore_destroy(gbSemaphore *s) { CloseHandle(s->win32_handle); } + gb_inline void gb_semaphore_post (gbSemaphore *s, i32 count) { ReleaseSemaphore(s->win32_handle, count, NULL); } + gb_inline void gb_semaphore_wait (gbSemaphore *s) { WaitForSingleObject(s->win32_handle, INFINITE); } + +#elif defined(GB_SYSTEM_OSX) + gb_inline void gb_semaphore_init (gbSemaphore *s) { semaphore_create(mach_task_self(), &s->osx_handle, SYNC_POLICY_FIFO, 0); } + gb_inline void gb_semaphore_destroy(gbSemaphore *s) { semaphore_destroy(mach_task_self(), s->osx_handle); } + gb_inline void gb_semaphore_post (gbSemaphore *s, i32 count) { while (count --> 0) semaphore_signal(s->osx_handle); } + gb_inline void gb_semaphore_wait (gbSemaphore *s) { semaphore_wait(s->osx_handle); } + +#elif defined(GB_SYSTEM_UNIX) + gb_inline void gb_semaphore_init (gbSemaphore *s) { sem_init(&s->unix_handle, 0, 0); } + gb_inline void gb_semaphore_destroy(gbSemaphore *s) { sem_destroy(&s->unix_handle); } + gb_inline void gb_semaphore_post (gbSemaphore *s, i32 count) { while (count --> 0) sem_post(&s->unix_handle); } + gb_inline void gb_semaphore_wait (gbSemaphore *s) { int i; do { i = sem_wait(&s->unix_handle); } while (i == -1 && errno == EINTR); } + +#else +#error +#endif + +gb_inline void gb_mutex_init(gbMutex *m) { +#if defined(GB_SYSTEM_WINDOWS) + InitializeCriticalSection(&m->win32_critical_section); +#else + pthread_mutexattr_init(&m->pthread_mutexattr); + pthread_mutexattr_settype(&m->pthread_mutexattr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init(&m->pthread_mutex, &m->pthread_mutexattr); +#endif +} + +gb_inline void gb_mutex_destroy(gbMutex *m) { +#if defined(GB_SYSTEM_WINDOWS) + DeleteCriticalSection(&m->win32_critical_section); +#else + pthread_mutex_destroy(&m->pthread_mutex); +#endif +} + +gb_inline void gb_mutex_lock(gbMutex *m) { +#if defined(GB_SYSTEM_WINDOWS) + EnterCriticalSection(&m->win32_critical_section); +#else + pthread_mutex_lock(&m->pthread_mutex); +#endif +} + +gb_inline b32 gb_mutex_try_lock(gbMutex *m) { +#if defined(GB_SYSTEM_WINDOWS) + return TryEnterCriticalSection(&m->win32_critical_section) != 0; +#else + return pthread_mutex_trylock(&m->pthread_mutex) == 0; +#endif +} + +gb_inline void gb_mutex_unlock(gbMutex *m) { +#if defined(GB_SYSTEM_WINDOWS) + LeaveCriticalSection(&m->win32_critical_section); +#else + pthread_mutex_unlock(&m->pthread_mutex); +#endif +} + + + + + + + +void gb_thread_init(gbThread *t) { + gb_zero_item(t); +#if defined(GB_SYSTEM_WINDOWS) + t->win32_handle = INVALID_HANDLE_VALUE; +#else + t->posix_handle = 0; +#endif + gb_semaphore_init(&t->semaphore); +} + +void gb_thread_destroy(gbThread *t) { + if (t->is_running) gb_thread_join(t); + gb_semaphore_destroy(&t->semaphore); +} + + +gb_inline void gb__thread_run(gbThread *t) { + gb_semaphore_release(&t->semaphore); + t->return_value = t->proc(t); +} + +#if defined(GB_SYSTEM_WINDOWS) + gb_inline DWORD __stdcall gb__thread_proc(void *arg) { + gbThread *t = cast(gbThread *)arg; + gb__thread_run(t); + t->is_running = false; + return 0; + } +#else + gb_inline void * gb__thread_proc(void *arg) { + gbThread *t = cast(gbThread *)arg; + gb__thread_run(t); + t->is_running = false; + return NULL; + } +#endif + +gb_inline void gb_thread_start(gbThread *t, gbThreadProc *proc, void *user_data) { gb_thread_start_with_stack(t, proc, user_data, 0); } + +gb_inline void gb_thread_start_with_stack(gbThread *t, gbThreadProc *proc, void *user_data, isize stack_size) { + GB_ASSERT(!t->is_running); + GB_ASSERT(proc != NULL); + t->proc = proc; + t->user_data = user_data; + t->stack_size = stack_size; + t->is_running = true; + +#if defined(GB_SYSTEM_WINDOWS) + t->win32_handle = CreateThread(NULL, stack_size, gb__thread_proc, t, 0, NULL); + GB_ASSERT_MSG(t->win32_handle != NULL, "CreateThread: GetLastError"); +#else + { + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); + if (stack_size != 0) { + pthread_attr_setstacksize(&attr, stack_size); + } + pthread_create(&t->posix_handle, &attr, gb__thread_proc, t); + pthread_attr_destroy(&attr); + } +#endif + + gb_semaphore_wait(&t->semaphore); +} + +gb_inline void gb_thread_join(gbThread *t) { + if (!t->is_running) return; + +#if defined(GB_SYSTEM_WINDOWS) + WaitForSingleObject(t->win32_handle, INFINITE); + CloseHandle(t->win32_handle); + t->win32_handle = INVALID_HANDLE_VALUE; +#else + pthread_join(t->posix_handle, NULL); + t->posix_handle = 0; +#endif + t->is_running = false; +} + +gb_inline b32 gb_thread_is_running(gbThread const *t) { return t->is_running != 0; } + +gb_inline u32 gb_thread_current_id(void) { + u32 thread_id; +#if defined(GB_SYSTEM_WINDOWS) + #if defined(GB_ARCH_32_BIT) && defined(GB_CPU_X86) + thread_id = (cast(u32 *)__readfsdword(24))[9]; + #elif defined(GB_ARCH_64_BIT) && defined(GB_CPU_X86) + thread_id = (cast(u32 *)__readgsqword(48))[18]; + #else + thread_id = GetCurrentThreadId(); + #endif + +#elif defined(GB_SYSTEM_OSX) && defined(GB_ARCH_64_BIT) + thread_id = pthread_mach_thread_np(pthread_self()); +#elif defined(GB_ARCH_32_BIT) && defined(GB_CPU_X86) + __asm__("mov %%gs:0x08,%0" : "=r"(thread_id)); +#elif defined(GB_ARCH_64_BIT) && defined(GB_CPU_X86) + __asm__("mov %%fs:0x10,%0" : "=r"(thread_id)); +#else + #error Unsupported architecture for gb_thread_current_id() +#endif + + return thread_id; +} + + + +void gb_thread_set_name(gbThread *t, char const *name) { +#if defined(GB_COMPILER_MSVC) + #pragma pack(push, 8) + typedef struct { + DWORD type; + char const *name; + DWORD id; + DWORD flags; + } gbprivThreadName; + #pragma pack(pop) + gbprivThreadName tn; + tn.type = 0x1000; + tn.name = name; + tn.id = GetThreadId(cast(HANDLE)t->win32_handle); + tn.flags = 0; + + __try { + RaiseException(0x406d1388, 0, gb_size_of(tn)/4, cast(ULONG_PTR *)&tn); + } __except(1 /*EXCEPTION_EXECUTE_HANDLER*/) { + } + +#elif defined(GB_SYSTEM_WINDOWS) && !defined(GB_COMPILER_MSVC) + // IMPORTANT TODO(bill): Set thread name for GCC/Clang on windows + return; +#elif defined(GB_SYSTEM_OSX) + // TODO(bill): Test if this works + pthread_setname_np(name); +#else + // TODO(bill): Test if this works + pthread_setname_np(t->posix_handle, name); +#endif +} + + + + +void gb_sync_init(gbSync *s) { + gb_zero_item(s); + gb_mutex_init(&s->mutex); + gb_mutex_init(&s->start); + gb_semaphore_init(&s->release); +} + +void gb_sync_destroy(gbSync *s) { + if (s->waiting) + GB_PANIC("Cannot destroy while threads are waiting!"); + + gb_mutex_destroy(&s->mutex); + gb_mutex_destroy(&s->start); + gb_semaphore_destroy(&s->release); +} + +void gb_sync_set_target(gbSync *s, i32 count) { + gb_mutex_lock(&s->start); + + gb_mutex_lock(&s->mutex); + GB_ASSERT(s->target == 0); + s->target = count; + s->current = 0; + s->waiting = 0; + gb_mutex_unlock(&s->mutex); +} + +void gb_sync_release(gbSync *s) { + if (s->waiting) { + gb_semaphore_release(&s->release); + } else { + s->target = 0; + gb_mutex_unlock(&s->start); + } +} + +i32 gb_sync_reach(gbSync *s) { + i32 n; + gb_mutex_lock(&s->mutex); + GB_ASSERT(s->current < s->target); + n = ++s->current; // NOTE(bill): Record this value to avoid possible race if `return s->current` was done + if (s->current == s->target) + gb_sync_release(s); + gb_mutex_unlock(&s->mutex); + return n; +} + +void gb_sync_reach_and_wait(gbSync *s) { + gb_mutex_lock(&s->mutex); + GB_ASSERT(s->current < s->target); + s->current++; + if (s->current == s->target) { + gb_sync_release(s); + gb_mutex_unlock(&s->mutex); + } else { + s->waiting++; // NOTE(bill): Waiting, so one more waiter + gb_mutex_unlock(&s->mutex); // NOTE(bill): Release the mutex to other threads + + gb_semaphore_wait(&s->release); // NOTE(bill): Wait for merge completion + + gb_mutex_lock(&s->mutex); // NOTE(bill): On merge completion, lock mutex + s->waiting--; // NOTE(bill): Done waiting + gb_sync_release(s); // NOTE(bill): Restart the next waiter + gb_mutex_unlock(&s->mutex); + } +} + + + + + + + + +gb_inline gbAllocator gb_heap_allocator(void) { + gbAllocator a; + a.proc = gb_heap_allocator_proc; + a.data = NULL; + return a; +} + +GB_ALLOCATOR_PROC(gb_heap_allocator_proc) { + void *ptr = NULL; + gb_unused(allocator_data); + gb_unused(old_size); +// TODO(bill): Throughly test! + switch (type) { +#if defined(GB_COMPILER_MSVC) + case gbAllocation_Alloc: + ptr = _aligned_malloc(size, alignment); + if (flags & gbAllocatorFlag_ClearToZero) + gb_zero_size(ptr, size); + break; + case gbAllocation_Free: + _aligned_free(old_memory); + break; + case gbAllocation_Resize: + ptr = _aligned_realloc(old_memory, size, alignment); + break; + +#elif defined(GB_SYSTEM_LINUX) + // TODO(bill): *nix version that's decent + case gbAllocation_Alloc: { + ptr = aligned_alloc(alignment, size); + // ptr = malloc(size+alignment); + + if (flags & gbAllocatorFlag_ClearToZero) { + gb_zero_size(ptr, size); + } + } break; + + case gbAllocation_Free: { + free(old_memory); + } break; + + case gbAllocation_Resize: { + // ptr = realloc(old_memory, size); + ptr = gb_default_resize_align(gb_heap_allocator(), old_memory, old_size, size, alignment); + } break; +#else + // TODO(bill): *nix version that's decent + case gbAllocation_Alloc: { + posix_memalign(&ptr, alignment, size); + + if (flags & gbAllocatorFlag_ClearToZero) { + gb_zero_size(ptr, size); + } + } break; + + case gbAllocation_Free: { + free(old_memory); + } break; + + case gbAllocation_Resize: { + ptr = gb_default_resize_align(gb_heap_allocator(), old_memory, old_size, size, alignment); + } break; +#endif + + case gbAllocation_FreeAll: + break; + } + + return ptr; +} + + +#if defined(GB_SYSTEM_WINDOWS) +void gb_affinity_init(gbAffinity *a) { + SYSTEM_LOGICAL_PROCESSOR_INFORMATION *start_processor_info = NULL; + DWORD length = 0; + b32 result = GetLogicalProcessorInformation(NULL, &length); + + gb_zero_item(a); + + if (!result && GetLastError() == 122l /*ERROR_INSUFFICIENT_BUFFER*/ && length > 0) { + start_processor_info = cast(SYSTEM_LOGICAL_PROCESSOR_INFORMATION *)gb_alloc(gb_heap_allocator(), length); + result = GetLogicalProcessorInformation(start_processor_info, &length); + if (result) { + SYSTEM_LOGICAL_PROCESSOR_INFORMATION *end_processor_info, *processor_info; + + a->is_accurate = true; + a->core_count = 0; + a->thread_count = 0; + end_processor_info = cast(SYSTEM_LOGICAL_PROCESSOR_INFORMATION *)gb_pointer_add(start_processor_info, length); + + for (processor_info = start_processor_info; + processor_info < end_processor_info; + processor_info++) { + if (processor_info->Relationship == RelationProcessorCore) { + isize thread = gb_count_set_bits(processor_info->ProcessorMask); + if (thread == 0) { + a->is_accurate = false; + } else if (a->thread_count + thread > GB_WIN32_MAX_THREADS) { + a->is_accurate = false; + } else { + GB_ASSERT(a->core_count <= a->thread_count && + a->thread_count < GB_WIN32_MAX_THREADS); + a->core_masks[a->core_count++] = processor_info->ProcessorMask; + a->thread_count += thread; + } + } + } + } + + gb_free(gb_heap_allocator(), start_processor_info); + } + + GB_ASSERT(a->core_count <= a->thread_count); + if (a->thread_count == 0) { + a->is_accurate = false; + a->core_count = 1; + a->thread_count = 1; + a->core_masks[0] = 1; + } + +} +void gb_affinity_destroy(gbAffinity *a) { + gb_unused(a); +} + + +b32 gb_affinity_set(gbAffinity *a, isize core, isize thread) { + usize available_mask, check_mask = 1; + GB_ASSERT(thread < gb_affinity_thread_count_for_core(a, core)); + + available_mask = a->core_masks[core]; + for (;;) { + if ((available_mask & check_mask) != 0) { + if (thread-- == 0) { + usize result = SetThreadAffinityMask(GetCurrentThread(), check_mask); + return result != 0; + } + } + check_mask <<= 1; // NOTE(bill): Onto the next bit + } +} + +isize gb_affinity_thread_count_for_core(gbAffinity *a, isize core) { + GB_ASSERT(core >= 0 && core < a->core_count); + return gb_count_set_bits(a->core_masks[core]); +} + +#elif defined(GB_SYSTEM_OSX) +void gb_affinity_init(gbAffinity *a) { + usize count = 0; + usize count_size = sizeof(count); + + a->is_accurate = false; + a->thread_count = 1; + a->core_count = 1; + a->threads_per_core = 1; + + if (sysctlbyname("hw.logicalcpu", &count, &count_size, NULL, 0) == 0) { + if (count > 0) { + a->thread_count = count; + // Get # of physical cores + if (sysctlbyname("hw.physicalcpu", &count, &count_size, NULL, 0) == 0) { + if (count > 0) { + a->core_count = count; + a->threads_per_core = a->thread_count / count; + if (a->threads_per_core < 1) + a->threads_per_core = 1; + else + a->is_accurate = true; + } + } + } + } + +} + +void gb_affinity_destroy(gbAffinity *a) { + gb_unused(a); +} + +b32 gb_affinity_set(gbAffinity *a, isize core, isize thread_index) { + isize index; + thread_t thread; + thread_affinity_policy_data_t info; + kern_return_t result; + + GB_ASSERT(core < a->core_count); + GB_ASSERT(thread_index < a->threads_per_core); + + index = core * a->threads_per_core + thread_index; + thread = mach_thread_self(); + info.affinity_tag = cast(integer_t)index; + result = thread_policy_set(thread, THREAD_AFFINITY_POLICY, cast(thread_policy_t)&info, THREAD_AFFINITY_POLICY_COUNT); + return result == KERN_SUCCESS; +} + +isize gb_affinity_thread_count_for_core(gbAffinity *a, isize core) { + GB_ASSERT(core >= 0 && core < a->core_count); + return a->threads_per_core; +} + +#elif defined(GB_SYSTEM_LINUX) +// IMPORTANT TODO(bill): This gbAffinity stuff for linux needs be improved a lot! +// NOTE(zangent): I have to read /proc/cpuinfo to get the number of threads per core. +#include + +void gb_affinity_init(gbAffinity *a) { + b32 accurate = true; + isize threads = 0; + + a->thread_count = 1; + a->core_count = sysconf(_SC_NPROCESSORS_ONLN); + a->threads_per_core = 1; + + + if(a->core_count <= 0) { + a->core_count = 1; + accurate = false; + } + + // Parsing /proc/cpuinfo to get the number of threads per core. + // NOTE(zangent): This calls the CPU's threads "cores", although the wording + // is kind of weird. This should be right, though. + + FILE* cpu_info = fopen("/proc/cpuinfo", "r"); + + if (cpu_info != NULL) { + for (;;) { + // The 'temporary char'. Everything goes into this char, + // so that we can check against EOF at the end of this loop. + char c; + +#define AF__CHECK(letter) ((c = getc(cpu_info)) == letter) + if (AF__CHECK('c') && AF__CHECK('p') && AF__CHECK('u') && AF__CHECK(' ') && + AF__CHECK('c') && AF__CHECK('o') && AF__CHECK('r') && AF__CHECK('e') && AF__CHECK('s')) { + // We're on a CPU info line. + while (!AF__CHECK(EOF)) { + if (c == '\n') { + break; + } else if (c < '0' || '9' > c) { + continue; + } + threads = threads * 10 + (c - '0'); + } + break; + } else { + while (!AF__CHECK('\n')) { + if (c==EOF) { + break; + } + } + } + if (c == EOF) { + break; + } +#undef AF__CHECK + } + + fclose(cpu_info); + } + + if (threads == 0) { + threads = 1; + accurate = false; + } + + a->threads_per_core = threads; + a->thread_count = a->threads_per_core * a->core_count; + a->is_accurate = accurate; + +} + +void gb_affinity_destroy(gbAffinity *a) { + gb_unused(a); +} + +b32 gb_affinity_set(gbAffinity *a, isize core, isize thread_index) { + return true; +} + +isize gb_affinity_thread_count_for_core(gbAffinity *a, isize core) { + GB_ASSERT(0 <= core && core < a->core_count); + return a->threads_per_core; +} +#else +#error TODO(bill): Unknown system +#endif + + + + + + + + + +//////////////////////////////////////////////////////////////// +// +// Virtual Memory +// +// + +gbVirtualMemory gb_virtual_memory(void *data, isize size) { + gbVirtualMemory vm; + vm.data = data; + vm.size = size; + return vm; +} + + +#if defined(GB_SYSTEM_WINDOWS) +gb_inline gbVirtualMemory gb_vm_alloc(void *addr, isize size) { + gbVirtualMemory vm; + GB_ASSERT(size > 0); + vm.data = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); + vm.size = size; + return vm; +} + +gb_inline b32 gb_vm_free(gbVirtualMemory vm) { + MEMORY_BASIC_INFORMATION info; + while (vm.size > 0) { + if (VirtualQuery(vm.data, &info, gb_size_of(info)) == 0) + return false; + if (info.BaseAddress != vm.data || + info.AllocationBase != vm.data || + info.State != MEM_COMMIT || info.RegionSize > cast(usize)vm.size) { + return false; + } + if (VirtualFree(vm.data, 0, MEM_RELEASE) == 0) + return false; + vm.data = gb_pointer_add(vm.data, info.RegionSize); + vm.size -= info.RegionSize; + } + return true; +} + +gb_inline gbVirtualMemory gb_vm_trim(gbVirtualMemory vm, isize lead_size, isize size) { + gbVirtualMemory new_vm = {0}; + void *ptr; + GB_ASSERT(vm.size >= lead_size + size); + + ptr = gb_pointer_add(vm.data, lead_size); + + gb_vm_free(vm); + new_vm = gb_vm_alloc(ptr, size); + if (new_vm.data == ptr) + return new_vm; + if (new_vm.data) + gb_vm_free(new_vm); + return new_vm; +} + +gb_inline b32 gb_vm_purge(gbVirtualMemory vm) { + VirtualAlloc(vm.data, vm.size, MEM_RESET, PAGE_READWRITE); + // NOTE(bill): Can this really fail? + return true; +} + +isize gb_virtual_memory_page_size(isize *alignment_out) { + SYSTEM_INFO info; + GetSystemInfo(&info); + if (alignment_out) *alignment_out = info.dwAllocationGranularity; + return info.dwPageSize; +} + +#else + +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif + +gb_inline gbVirtualMemory gb_vm_alloc(void *addr, isize size) { + gbVirtualMemory vm; + GB_ASSERT(size > 0); + vm.data = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + vm.size = size; + return vm; +} + +gb_inline b32 gb_vm_free(gbVirtualMemory vm) { + munmap(vm.data, vm.size); + return true; +} + +gb_inline gbVirtualMemory gb_vm_trim(gbVirtualMemory vm, isize lead_size, isize size) { + void *ptr; + isize trail_size; + GB_ASSERT(vm.size >= lead_size + size); + + ptr = gb_pointer_add(vm.data, lead_size); + trail_size = vm.size - lead_size - size; + + if (lead_size != 0) + gb_vm_free(gb_virtual_memory(vm.data, lead_size)); + if (trail_size != 0) + gb_vm_free(gb_virtual_memory(ptr, trail_size)); + return gb_virtual_memory(ptr, size); + +} + +gb_inline b32 gb_vm_purge(gbVirtualMemory vm) { + int err = madvise(vm.data, vm.size, MADV_DONTNEED); + return err != 0; +} + +isize gb_virtual_memory_page_size(isize *alignment_out) { + // TODO(bill): Is this always true? + isize result = cast(isize)sysconf(_SC_PAGE_SIZE); + if (alignment_out) *alignment_out = result; + return result; +} + +#endif + + + + +//////////////////////////////////////////////////////////////// +// +// Custom Allocation +// +// + + +// +// Arena Allocator +// + +gb_inline void gb_arena_init_from_memory(gbArena *arena, void *start, isize size) { + arena->backing.proc = NULL; + arena->backing.data = NULL; + arena->physical_start = start; + arena->total_size = size; + arena->total_allocated = 0; + arena->temp_count = 0; +} + +gb_inline void gb_arena_init_from_allocator(gbArena *arena, gbAllocator backing, isize size) { + arena->backing = backing; + arena->physical_start = gb_alloc(backing, size); // NOTE(bill): Uses default alignment + arena->total_size = size; + arena->total_allocated = 0; + arena->temp_count = 0; +} + +gb_inline void gb_arena_init_sub(gbArena *arena, gbArena *parent_arena, isize size) { gb_arena_init_from_allocator(arena, gb_arena_allocator(parent_arena), size); } + + +gb_inline void gb_arena_free(gbArena *arena) { + if (arena->backing.proc) { + gb_free(arena->backing, arena->physical_start); + arena->physical_start = NULL; + } +} + + +gb_inline isize gb_arena_alignment_of(gbArena *arena, isize alignment) { + isize alignment_offset, result_pointer, mask; + GB_ASSERT(gb_is_power_of_two(alignment)); + + alignment_offset = 0; + result_pointer = cast(isize)arena->physical_start + arena->total_allocated; + mask = alignment - 1; + if (result_pointer & mask) + alignment_offset = alignment - (result_pointer & mask); + + return alignment_offset; +} + +gb_inline isize gb_arena_size_remaining(gbArena *arena, isize alignment) { + isize result = arena->total_size - (arena->total_allocated + gb_arena_alignment_of(arena, alignment)); + return result; +} + +gb_inline void gb_arena_check(gbArena *arena) { GB_ASSERT(arena->temp_count == 0); } + + + + + + +gb_inline gbAllocator gb_arena_allocator(gbArena *arena) { + gbAllocator allocator; + allocator.proc = gb_arena_allocator_proc; + allocator.data = arena; + return allocator; +} + +GB_ALLOCATOR_PROC(gb_arena_allocator_proc) { + gbArena *arena = cast(gbArena *)allocator_data; + void *ptr = NULL; + + gb_unused(old_size); + + switch (type) { + case gbAllocation_Alloc: { + void *end = gb_pointer_add(arena->physical_start, arena->total_allocated); + isize total_size = size + alignment; + + // NOTE(bill): Out of memory + if (arena->total_allocated + total_size > cast(isize)arena->total_size) { + gb_printf_err("Arena out of memory\n"); + return NULL; + } + + ptr = gb_align_forward(end, alignment); + arena->total_allocated += total_size; + if (flags & gbAllocatorFlag_ClearToZero) + gb_zero_size(ptr, size); + } break; + + case gbAllocation_Free: + // NOTE(bill): Free all at once + // Use Temp_Arena_Memory if you want to free a block + break; + + case gbAllocation_FreeAll: + arena->total_allocated = 0; + break; + + case gbAllocation_Resize: { + // TODO(bill): Check if ptr is on top of stack and just extend + gbAllocator a = gb_arena_allocator(arena); + ptr = gb_default_resize_align(a, old_memory, old_size, size, alignment); + } break; + } + return ptr; +} + + +gb_inline gbTempArenaMemory gb_temp_arena_memory_begin(gbArena *arena) { + gbTempArenaMemory tmp; + tmp.arena = arena; + tmp.original_count = arena->total_allocated; + arena->temp_count++; + return tmp; +} + +gb_inline void gb_temp_arena_memory_end(gbTempArenaMemory tmp) { + GB_ASSERT_MSG(tmp.arena->total_allocated >= tmp.original_count, + "%td >= %td", tmp.arena->total_allocated, tmp.original_count); + GB_ASSERT(tmp.arena->temp_count > 0); + tmp.arena->total_allocated = tmp.original_count; + tmp.arena->temp_count--; +} + + + + +// +// Pool Allocator +// + + +gb_inline void gb_pool_init(gbPool *pool, gbAllocator backing, isize num_blocks, isize block_size) { + gb_pool_init_align(pool, backing, num_blocks, block_size, GB_DEFAULT_MEMORY_ALIGNMENT); +} + +void gb_pool_init_align(gbPool *pool, gbAllocator backing, isize num_blocks, isize block_size, isize block_align) { + isize actual_block_size, pool_size, block_index; + void *data, *curr; + uintptr *end; + + gb_zero_item(pool); + + pool->backing = backing; + pool->block_size = block_size; + pool->block_align = block_align; + + actual_block_size = block_size + block_align; + pool_size = num_blocks * actual_block_size; + + data = gb_alloc_align(backing, pool_size, block_align); + + // NOTE(bill): Init intrusive freelist + curr = data; + for (block_index = 0; block_index < num_blocks-1; block_index++) { + uintptr *next = cast(uintptr *)curr; + *next = cast(uintptr)curr + actual_block_size; + curr = gb_pointer_add(curr, actual_block_size); + } + + end = cast(uintptr *)curr; + *end = cast(uintptr)NULL; + + pool->physical_start = data; + pool->free_list = data; +} + +gb_inline void gb_pool_free(gbPool *pool) { + if (pool->backing.proc) { + gb_free(pool->backing, pool->physical_start); + } +} + + +gb_inline gbAllocator gb_pool_allocator(gbPool *pool) { + gbAllocator allocator; + allocator.proc = gb_pool_allocator_proc; + allocator.data = pool; + return allocator; +} +GB_ALLOCATOR_PROC(gb_pool_allocator_proc) { + gbPool *pool = cast(gbPool *)allocator_data; + void *ptr = NULL; + + gb_unused(old_size); + + switch (type) { + case gbAllocation_Alloc: { + uintptr next_free; + GB_ASSERT(size == pool->block_size); + GB_ASSERT(alignment == pool->block_align); + GB_ASSERT(pool->free_list != NULL); + + next_free = *cast(uintptr *)pool->free_list; + ptr = pool->free_list; + pool->free_list = cast(void *)next_free; + pool->total_size += pool->block_size; + if (flags & gbAllocatorFlag_ClearToZero) + gb_zero_size(ptr, size); + } break; + + case gbAllocation_Free: { + uintptr *next; + if (old_memory == NULL) return NULL; + + next = cast(uintptr *)old_memory; + *next = cast(uintptr)pool->free_list; + pool->free_list = old_memory; + pool->total_size -= pool->block_size; + } break; + + case gbAllocation_FreeAll: + // TODO(bill): + break; + + case gbAllocation_Resize: + // NOTE(bill): Cannot resize + GB_PANIC("You cannot resize something allocated by with a pool."); + break; + } + + return ptr; +} + + + + + +gb_inline gbAllocationHeader *gb_allocation_header(void *data) { + isize *p = cast(isize *)data; + while (p[-1] == cast(isize)(-1)) { + p--; + } + return cast(gbAllocationHeader *)p - 1; +} + +gb_inline void gb_allocation_header_fill(gbAllocationHeader *header, void *data, isize size) { + isize *ptr; + header->size = size; + ptr = cast(isize *)(header + 1); + while (cast(void *)ptr < data) { + *ptr++ = cast(isize)(-1); + } +} + + + +// +// Free List Allocator +// + +gb_inline void gb_free_list_init(gbFreeList *fl, void *start, isize size) { + GB_ASSERT(size > gb_size_of(gbFreeListBlock)); + + fl->physical_start = start; + fl->total_size = size; + fl->curr_block = cast(gbFreeListBlock *)start; + fl->curr_block->size = size; + fl->curr_block->next = NULL; +} + + +gb_inline void gb_free_list_init_from_allocator(gbFreeList *fl, gbAllocator backing, isize size) { + void *start = gb_alloc(backing, size); + gb_free_list_init(fl, start, size); +} + + + +gb_inline gbAllocator gb_free_list_allocator(gbFreeList *fl) { + gbAllocator a; + a.proc = gb_free_list_allocator_proc; + a.data = fl; + return a; +} + +GB_ALLOCATOR_PROC(gb_free_list_allocator_proc) { + gbFreeList *fl = cast(gbFreeList *)allocator_data; + void *ptr = NULL; + + GB_ASSERT_NOT_NULL(fl); + + switch (type) { + case gbAllocation_Alloc: { + gbFreeListBlock *prev_block = NULL; + gbFreeListBlock *curr_block = fl->curr_block; + + while (curr_block) { + isize total_size; + gbAllocationHeader *header; + + total_size = size + alignment + gb_size_of(gbAllocationHeader); + + if (curr_block->size < total_size) { + prev_block = curr_block; + curr_block = curr_block->next; + continue; + } + + if (curr_block->size - total_size <= gb_size_of(gbAllocationHeader)) { + total_size = curr_block->size; + + if (prev_block) + prev_block->next = curr_block->next; + else + fl->curr_block = curr_block->next; + } else { + // NOTE(bill): Create a new block for the remaining memory + gbFreeListBlock *next_block; + next_block = cast(gbFreeListBlock *)gb_pointer_add(curr_block, total_size); + + GB_ASSERT(cast(void *)next_block < gb_pointer_add(fl->physical_start, fl->total_size)); + + next_block->size = curr_block->size - total_size; + next_block->next = curr_block->next; + + if (prev_block) + prev_block->next = next_block; + else + fl->curr_block = next_block; + } + + + // TODO(bill): Set Header Info + header = cast(gbAllocationHeader *)curr_block; + ptr = gb_align_forward(header+1, alignment); + gb_allocation_header_fill(header, ptr, size); + + fl->total_allocated += total_size; + fl->allocation_count++; + + + if (flags & gbAllocatorFlag_ClearToZero) + gb_zero_size(ptr, size); + return ptr; + } + // NOTE(bill): if ptr == NULL, ran out of free list memory! FUCK! + return NULL; + } break; + + case gbAllocation_Free: { + gbAllocationHeader *header = gb_allocation_header(old_memory); + isize block_size = header->size; + uintptr block_start, block_end; + gbFreeListBlock *prev_block = NULL; + gbFreeListBlock *curr_block = fl->curr_block; + + block_start = cast(uintptr)header; + block_end = cast(uintptr)block_start + block_size; + + while (curr_block) { + if (cast(uintptr)curr_block >= block_end) + break; + prev_block = curr_block; + curr_block = curr_block->next; + } + + if (prev_block == NULL) { + prev_block = cast(gbFreeListBlock *)block_start; + prev_block->size = block_size; + prev_block->next = fl->curr_block; + + fl->curr_block = prev_block; + } else if ((cast(uintptr)prev_block + prev_block->size) == block_start) { + prev_block->size += block_size; + } else { + gbFreeListBlock *tmp = cast(gbFreeListBlock *)block_start; + tmp->size = block_size; + tmp->next = prev_block->next; + prev_block->next = tmp; + + prev_block = tmp; + } + + if (curr_block && (cast(uintptr)curr_block == block_end)) { + prev_block->size += curr_block->size; + prev_block->next = curr_block->next; + } + + fl->allocation_count--; + fl->total_allocated -= block_size; + } break; + + case gbAllocation_FreeAll: + gb_free_list_init(fl, fl->physical_start, fl->total_size); + break; + + case gbAllocation_Resize: + ptr = gb_default_resize_align(gb_free_list_allocator(fl), old_memory, old_size, size, alignment); + break; + } + + return ptr; +} + + + +void gb_scratch_memory_init(gbScratchMemory *s, void *start, isize size) { + s->physical_start = start; + s->total_size = size; + s->alloc_point = start; + s->free_point = start; +} + + +b32 gb_scratch_memory_is_in_use(gbScratchMemory *s, void *ptr) { + if (s->free_point == s->alloc_point) return false; + if (s->alloc_point > s->free_point) + return ptr >= s->free_point && ptr < s->alloc_point; + return ptr >= s->free_point || ptr < s->alloc_point; +} + + +gbAllocator gb_scratch_allocator(gbScratchMemory *s) { + gbAllocator a; + a.proc = gb_scratch_allocator_proc; + a.data = s; + return a; +} + +GB_ALLOCATOR_PROC(gb_scratch_allocator_proc) { + gbScratchMemory *s = cast(gbScratchMemory *)allocator_data; + void *ptr = NULL; + GB_ASSERT_NOT_NULL(s); + + switch (type) { + case gbAllocation_Alloc: { + void *pt = s->alloc_point; + gbAllocationHeader *header = cast(gbAllocationHeader *)pt; + void *data = gb_align_forward(header+1, alignment); + void *end = gb_pointer_add(s->physical_start, s->total_size); + + GB_ASSERT(alignment % 4 == 0); + size = ((size + 3)/4)*4; + pt = gb_pointer_add(pt, size); + + // NOTE(bill): Wrap around + if (pt > end) { + header->size = gb_pointer_diff(header, end) | GB_ISIZE_HIGH_BIT; + pt = s->physical_start; + header = cast(gbAllocationHeader *)pt; + data = gb_align_forward(header+1, alignment); + pt = gb_pointer_add(pt, size); + } + + if (!gb_scratch_memory_is_in_use(s, pt)) { + gb_allocation_header_fill(header, pt, gb_pointer_diff(header, pt)); + s->alloc_point = cast(u8 *)pt; + ptr = data; + } + + if (flags & gbAllocatorFlag_ClearToZero) + gb_zero_size(ptr, size); + } break; + + case gbAllocation_Free: { + if (old_memory) { + void *end = gb_pointer_add(s->physical_start, s->total_size); + if (old_memory < s->physical_start || old_memory >= end) { + GB_ASSERT(false); + } else { + // NOTE(bill): Mark as free + gbAllocationHeader *h = gb_allocation_header(old_memory); + GB_ASSERT((h->size & GB_ISIZE_HIGH_BIT) == 0); + h->size = h->size | GB_ISIZE_HIGH_BIT; + + while (s->free_point != s->alloc_point) { + gbAllocationHeader *header = cast(gbAllocationHeader *)s->free_point; + if ((header->size & GB_ISIZE_HIGH_BIT) == 0) + break; + + s->free_point = gb_pointer_add(s->free_point, h->size & (~GB_ISIZE_HIGH_BIT)); + if (s->free_point == end) + s->free_point = s->physical_start; + } + } + } + } break; + + case gbAllocation_FreeAll: + s->alloc_point = s->physical_start; + s->free_point = s->physical_start; + break; + + case gbAllocation_Resize: + ptr = gb_default_resize_align(gb_scratch_allocator(s), old_memory, old_size, size, alignment); + break; + } + + return ptr; +} + + + + + + +//////////////////////////////////////////////////////////////// +// +// Sorting +// +// + +// TODO(bill): Should I make all the macros local? + +#define GB__COMPARE_PROC(Type) \ +gb_global isize gb__##Type##_cmp_offset; GB_COMPARE_PROC(gb__##Type##_cmp) { \ + Type const p = *cast(Type const *)gb_pointer_add_const(a, gb__##Type##_cmp_offset); \ + Type const q = *cast(Type const *)gb_pointer_add_const(b, gb__##Type##_cmp_offset); \ + return p < q ? -1 : p > q; \ +} \ +GB_COMPARE_PROC_PTR(gb_##Type##_cmp(isize offset)) { \ + gb__##Type##_cmp_offset = offset; \ + return &gb__##Type##_cmp; \ +} + + +GB__COMPARE_PROC(i16); +GB__COMPARE_PROC(i32); +GB__COMPARE_PROC(i64); +GB__COMPARE_PROC(isize); +GB__COMPARE_PROC(f32); +GB__COMPARE_PROC(f64); +GB__COMPARE_PROC(char); + +// NOTE(bill): str_cmp is special as it requires a funny type and funny comparison +gb_global isize gb__str_cmp_offset; GB_COMPARE_PROC(gb__str_cmp) { + char const *p = *cast(char const **)gb_pointer_add_const(a, gb__str_cmp_offset); + char const *q = *cast(char const **)gb_pointer_add_const(b, gb__str_cmp_offset); + return gb_strcmp(p, q); +} +GB_COMPARE_PROC_PTR(gb_str_cmp(isize offset)) { + gb__str_cmp_offset = offset; + return &gb__str_cmp; +} + +#undef GB__COMPARE_PROC + + + + +// TODO(bill): Make user definable? +#define GB__SORT_STACK_SIZE 64 +#define GB__SORT_INSERT_SORT_THRESHOLD 8 + +#define GB__SORT_PUSH(_base, _limit) do { \ + stack_ptr[0] = (_base); \ + stack_ptr[1] = (_limit); \ + stack_ptr += 2; \ +} while (0) + + +#define GB__SORT_POP(_base, _limit) do { \ + stack_ptr -= 2; \ + (_base) = stack_ptr[0]; \ + (_limit) = stack_ptr[1]; \ +} while (0) + + + +void gb_sort(void *base_, isize count, isize size, gbCompareProc cmp) { + u8 *i, *j; + u8 *base = cast(u8 *)base_; + u8 *limit = base + count*size; + isize threshold = GB__SORT_INSERT_SORT_THRESHOLD * size; + + // NOTE(bill): Prepare the stack + u8 *stack[GB__SORT_STACK_SIZE] = {0}; + u8 **stack_ptr = stack; + + for (;;) { + if ((limit-base) > threshold) { + // NOTE(bill): Quick sort + i = base + size; + j = limit - size; + + gb_memswap(((limit-base)/size/2) * size + base, base, size); + if (cmp(i, j) > 0) gb_memswap(i, j, size); + if (cmp(base, j) > 0) gb_memswap(base, j, size); + if (cmp(i, base) > 0) gb_memswap(i, base, size); + + for (;;) { + do i += size; while (cmp(i, base) < 0); + do j -= size; while (cmp(j, base) > 0); + if (i > j) break; + gb_memswap(i, j, size); + } + + gb_memswap(base, j, size); + + if (j - base > limit - i) { + GB__SORT_PUSH(base, j); + base = i; + } else { + GB__SORT_PUSH(i, limit); + limit = j; + } + } else { + // NOTE(bill): Insertion sort + for (j = base, i = j+size; + i < limit; + j = i, i += size) { + for (; cmp(j, j+size) > 0; j -= size) { + gb_memswap(j, j+size, size); + if (j == base) break; + } + } + + if (stack_ptr == stack) break; // NOTE(bill): Sorting is done! + GB__SORT_POP(base, limit); + } + } +} + +#undef GB__SORT_PUSH +#undef GB__SORT_POP + + +#define GB_RADIX_SORT_PROC_GEN(Type) GB_RADIX_SORT_PROC(Type) { \ + Type *source = items; \ + Type *dest = temp; \ + isize byte_index, i, byte_max = 8*gb_size_of(Type); \ + for (byte_index = 0; byte_index < byte_max; byte_index += 8) { \ + isize offsets[256] = {0}; \ + isize total = 0; \ + /* NOTE(bill): First pass - count how many of each key */ \ + for (i = 0; i < count; i++) { \ + Type radix_value = source[i]; \ + Type radix_piece = (radix_value >> byte_index) & 0xff; \ + offsets[radix_piece]++; \ + } \ + /* NOTE(bill): Change counts to offsets */ \ + for (i = 0; i < gb_count_of(offsets); i++) { \ + isize skcount = offsets[i]; \ + offsets[i] = total; \ + total += skcount; \ + } \ + /* NOTE(bill): Second pass - place elements into the right location */ \ + for (i = 0; i < count; i++) { \ + Type radix_value = source[i]; \ + Type radix_piece = (radix_value >> byte_index) & 0xff; \ + dest[offsets[radix_piece]++] = source[i]; \ + } \ + gb_swap(Type *, source, dest); \ + } \ +} + +GB_RADIX_SORT_PROC_GEN(u8); +GB_RADIX_SORT_PROC_GEN(u16); +GB_RADIX_SORT_PROC_GEN(u32); +GB_RADIX_SORT_PROC_GEN(u64); + +gb_inline isize gb_binary_search(void const *base, isize count, isize size, void const *key, gbCompareProc compare_proc) { + isize start = 0; + isize end = count; + + while (start < end) { + isize mid = start + (end-start)/2; + isize result = compare_proc(key, cast(u8 *)base + mid*size); + if (result < 0) + end = mid; + else if (result > 0) + start = mid+1; + else + return mid; + } + + return -1; +} + +void gb_shuffle(void *base, isize count, isize size) { + u8 *a; + isize i, j; + gbRandom random; gb_random_init(&random); + + a = cast(u8 *)base + (count-1) * size; + for (i = count; i > 1; i--) { + j = gb_random_gen_isize(&random) % i; + gb_memswap(a, cast(u8 *)base + j*size, size); + a -= size; + } +} + +void gb_reverse(void *base, isize count, isize size) { + isize i, j = count-1; + for (i = 0; i < j; i++, j++) { + gb_memswap(cast(u8 *)base + i*size, cast(u8 *)base + j*size, size); + } +} + + + +//////////////////////////////////////////////////////////////// +// +// Char things +// +// + + + + +gb_inline char gb_char_to_lower(char c) { + if (c >= 'A' && c <= 'Z') + return 'a' + (c - 'A'); + return c; +} + +gb_inline char gb_char_to_upper(char c) { + if (c >= 'a' && c <= 'z') + return 'A' + (c - 'a'); + return c; +} + +gb_inline b32 gb_char_is_space(char c) { + if (c == ' ' || + c == '\t' || + c == '\n' || + c == '\r' || + c == '\f' || + c == '\v') + return true; + return false; +} + +gb_inline b32 gb_char_is_digit(char c) { + if (c >= '0' && c <= '9') + return true; + return false; +} + +gb_inline b32 gb_char_is_hex_digit(char c) { + if (gb_char_is_digit(c) || + (c >= 'a' && c <= 'f') || + (c >= 'A' && c <= 'F')) + return true; + return false; +} + +gb_inline b32 gb_char_is_alpha(char c) { + if ((c >= 'A' && c <= 'Z') || + (c >= 'a' && c <= 'z')) + return true; + return false; +} + +gb_inline b32 gb_char_is_alphanumeric(char c) { + return gb_char_is_alpha(c) || gb_char_is_digit(c); +} + +gb_inline i32 gb_digit_to_int(char c) { + return gb_char_is_digit(c) ? c - '0' : c - 'W'; +} + +gb_inline i32 gb_hex_digit_to_int(char c) { + if (gb_char_is_digit(c)) + return gb_digit_to_int(c); + else if (gb_is_between(c, 'a', 'f')) + return c - 'a' + 10; + else if (gb_is_between(c, 'A', 'F')) + return c - 'A' + 10; + return -1; +} + + + + +gb_inline void gb_str_to_lower(char *str) { + if (!str) return; + while (*str) { + *str = gb_char_to_lower(*str); + str++; + } +} + +gb_inline void gb_str_to_upper(char *str) { + if (!str) return; + while (*str) { + *str = gb_char_to_upper(*str); + str++; + } +} + + +gb_inline isize gb_strlen(char const *str) { + char const *begin = str; + isize const *w; + if (str == NULL) { + return 0; + } + while (cast(uintptr)str % sizeof(usize)) { + if (!*str) + return str - begin; + str++; + } + w = cast(isize const *)str; + while (!GB__HAS_ZERO(*w)) { + w++; + } + str = cast(char const *)w; + while (*str) { + str++; + } + return str - begin; +} + +gb_inline isize gb_strnlen(char const *str, isize max_len) { + char const *end = cast(char const *)gb_memchr(str, 0, max_len); + if (end) { + return end - str; + } + return max_len; +} + +gb_inline isize gb_utf8_strlen(u8 const *str) { + isize count = 0; + for (; *str; count++) { + u8 c = *str; + isize inc = 0; + if (c < 0x80) inc = 1; + else if ((c & 0xe0) == 0xc0) inc = 2; + else if ((c & 0xf0) == 0xe0) inc = 3; + else if ((c & 0xf8) == 0xf0) inc = 4; + else return -1; + + str += inc; + } + return count; +} + +gb_inline isize gb_utf8_strnlen(u8 const *str, isize max_len) { + isize count = 0; + for (; *str && max_len > 0; count++) { + u8 c = *str; + isize inc = 0; + if (c < 0x80) inc = 1; + else if ((c & 0xe0) == 0xc0) inc = 2; + else if ((c & 0xf0) == 0xe0) inc = 3; + else if ((c & 0xf8) == 0xf0) inc = 4; + else return -1; + + str += inc; + max_len -= inc; + } + return count; +} + + +gb_inline i32 gb_strcmp(char const *s1, char const *s2) { + while (*s1 && (*s1 == *s2)) { + s1++, s2++; + } + return *(u8 *)s1 - *(u8 *)s2; +} + +gb_inline char *gb_strcpy(char *dest, char const *source) { + GB_ASSERT_NOT_NULL(dest); + if (source) { + char *str = dest; + while (*source) *str++ = *source++; + } + return dest; +} + + +gb_inline char *gb_strncpy(char *dest, char const *source, isize len) { + GB_ASSERT_NOT_NULL(dest); + if (source) { + char *str = dest; + while (len > 0 && *source) { + *str++ = *source++; + len--; + } + while (len > 0) { + *str++ = '\0'; + len--; + } + } + return dest; +} + +gb_inline isize gb_strlcpy(char *dest, char const *source, isize len) { + isize result = 0; + GB_ASSERT_NOT_NULL(dest); + if (source) { + char const *source_start = source; + char *str = dest; + while (len > 0 && *source) { + *str++ = *source++; + len--; + } + while (len > 0) { + *str++ = '\0'; + len--; + } + + result = source - source_start; + } + return result; +} + +gb_inline char *gb_strrev(char *str) { + isize len = gb_strlen(str); + char *a = str + 0; + char *b = str + len-1; + len /= 2; + while (len--) { + gb_swap(char, *a, *b); + a++, b--; + } + return str; +} + + + + +gb_inline i32 gb_strncmp(char const *s1, char const *s2, isize len) { + for (; len > 0; + s1++, s2++, len--) { + if (*s1 != *s2) { + return ((s1 < s2) ? -1 : +1); + } else if (*s1 == '\0') { + return 0; + } + } + return 0; +} + + +gb_inline char const *gb_strtok(char *output, char const *src, char const *delimit) { + while (*src && gb_char_first_occurence(delimit, *src) != NULL) { + *output++ = *src++; + } + + *output = 0; + return *src ? src+1 : src; +} + +gb_inline b32 gb_str_has_prefix(char const *str, char const *prefix) { + while (*prefix) { + if (*str++ != *prefix++) { + return false; + } + } + return true; +} + +gb_inline b32 gb_str_has_suffix(char const *str, char const *suffix) { + isize i = gb_strlen(str); + isize j = gb_strlen(suffix); + if (j <= i) { + return gb_strcmp(str+i-j, suffix) == 0; + } + return false; +} + + + + +gb_inline char const *gb_char_first_occurence(char const *s, char c) { + char ch = c; + for (; *s != ch; s++) { + if (*s == '\0') { + return NULL; + } + } + return s; +} + + +gb_inline char const *gb_char_last_occurence(char const *s, char c) { + char const *result = NULL; + do { + if (*s == c) { + result = s; + } + } while (*s++); + + return result; +} + + + +gb_inline void gb_str_concat(char *dest, isize dest_len, + char const *src_a, isize src_a_len, + char const *src_b, isize src_b_len) { + GB_ASSERT(dest_len >= src_a_len+src_b_len+1); + if (dest) { + gb_memcopy(dest, src_a, src_a_len); + gb_memcopy(dest+src_a_len, src_b, src_b_len); + dest[src_a_len+src_b_len] = '\0'; + } +} + + +gb_internal isize gb__scan_i64(char const *text, i32 base, i64 *value) { + char const *text_begin = text; + i64 result = 0; + b32 negative = false; + + if (*text == '-') { + negative = true; + text++; + } + + if (base == 16 && gb_strncmp(text, "0x", 2) == 0) { + text += 2; + } + + for (;;) { + i64 v; + if (gb_char_is_digit(*text)) { + v = *text - '0'; + } else if (base == 16 && gb_char_is_hex_digit(*text)) { + v = gb_hex_digit_to_int(*text); + } else { + break; + } + + result *= base; + result += v; + text++; + } + + if (value) { + if (negative) result = -result; + *value = result; + } + + return (text - text_begin); +} + +gb_internal isize gb__scan_u64(char const *text, i32 base, u64 *value) { + char const *text_begin = text; + u64 result = 0; + + if (base == 16 && gb_strncmp(text, "0x", 2) == 0) { + text += 2; + } + + for (;;) { + u64 v; + if (gb_char_is_digit(*text)) { + v = *text - '0'; + } else if (base == 16 && gb_char_is_hex_digit(*text)) { + v = gb_hex_digit_to_int(*text); + } else { + break; + } + + result *= base; + result += v; + text++; + } + + if (value) *value = result; + return (text - text_begin); +} + + +// TODO(bill): Make better +u64 gb_str_to_u64(char const *str, char **end_ptr, i32 base) { + isize len; + u64 value = 0; + + if (!base) { + if ((gb_strlen(str) > 2) && (gb_strncmp(str, "0x", 2) == 0)) { + base = 16; + } else { + base = 10; + } + } + + len = gb__scan_u64(str, base, &value); + if (end_ptr) *end_ptr = (char *)str + len; + return value; +} + +i64 gb_str_to_i64(char const *str, char **end_ptr, i32 base) { + isize len; + i64 value; + + if (!base) { + if ((gb_strlen(str) > 2) && (gb_strncmp(str, "0x", 2) == 0)) { + base = 16; + } else { + base = 10; + } + } + + len = gb__scan_i64(str, base, &value); + if (end_ptr) *end_ptr = (char *)str + len; + return value; +} + +// TODO(bill): Are these good enough for characters? +gb_global char const gb__num_to_char_table[] = + "0123456789" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz" + "@$"; + +gb_inline void gb_i64_to_str(i64 value, char *string, i32 base) { + char *buf = string; + b32 negative = false; + u64 v; + if (value < 0) { + negative = true; + value = -value; + } + v = cast(u64)value; + if (v != 0) { + while (v > 0) { + *buf++ = gb__num_to_char_table[v % base]; + v /= base; + } + } else { + *buf++ = '0'; + } + if (negative) { + *buf++ = '-'; + } + *buf = '\0'; + gb_strrev(string); +} + + + +gb_inline void gb_u64_to_str(u64 value, char *string, i32 base) { + char *buf = string; + + if (value) { + while (value > 0) { + *buf++ = gb__num_to_char_table[value % base]; + value /= base; + } + } else { + *buf++ = '0'; + } + *buf = '\0'; + + gb_strrev(string); +} + +gb_inline f32 gb_str_to_f32(char const *str, char **end_ptr) { + f64 f = gb_str_to_f64(str, end_ptr); + f32 r = cast(f32)f; + return r; +} + +gb_inline f64 gb_str_to_f64(char const *str, char **end_ptr) { + f64 result, value, sign, scale; + i32 frac; + + while (gb_char_is_space(*str)) { + str++; + } + + sign = 1.0; + if (*str == '-') { + sign = -1.0; + str++; + } else if (*str == '+') { + str++; + } + + for (value = 0.0; gb_char_is_digit(*str); str++) { + value = value * 10.0 + (*str-'0'); + } + + if (*str == '.') { + f64 pow10 = 10.0; + str++; + while (gb_char_is_digit(*str)) { + value += (*str-'0') / pow10; + pow10 *= 10.0; + str++; + } + } + + frac = 0; + scale = 1.0; + if ((*str == 'e') || (*str == 'E')) { + u32 exp; + + str++; + if (*str == '-') { + frac = 1; + str++; + } else if (*str == '+') { + str++; + } + + for (exp = 0; gb_char_is_digit(*str); str++) { + exp = exp * 10 + (*str-'0'); + } + if (exp > 308) exp = 308; + + while (exp >= 50) { scale *= 1e50; exp -= 50; } + while (exp >= 8) { scale *= 1e8; exp -= 8; } + while (exp > 0) { scale *= 10.0; exp -= 1; } + } + + result = sign * (frac ? (value / scale) : (value * scale)); + + if (end_ptr) *end_ptr = cast(char *)str; + + return result; +} + + + + + + + +gb_inline void gb__set_string_length (gbString str, isize len) { GB_STRING_HEADER(str)->length = len; } +gb_inline void gb__set_string_capacity(gbString str, isize cap) { GB_STRING_HEADER(str)->capacity = cap; } + + +gbString gb_string_make_reserve(gbAllocator a, isize capacity) { + isize header_size = gb_size_of(gbStringHeader); + void *ptr = gb_alloc(a, header_size + capacity + 1); + + gbString str; + gbStringHeader *header; + + if (ptr == NULL) return NULL; + gb_zero_size(ptr, header_size + capacity + 1); + + str = cast(char *)ptr + header_size; + header = GB_STRING_HEADER(str); + header->allocator = a; + header->length = 0; + header->capacity = capacity; + str[capacity] = '\0'; + + return str; +} + + +gb_inline gbString gb_string_make(gbAllocator a, char const *str) { + isize len = str ? gb_strlen(str) : 0; + return gb_string_make_length(a, str, len); +} + +gbString gb_string_make_length(gbAllocator a, void const *init_str, isize num_bytes) { + isize header_size = gb_size_of(gbStringHeader); + void *ptr = gb_alloc(a, header_size + num_bytes + 1); + + gbString str; + gbStringHeader *header; + + if (ptr == NULL) return NULL; + if (!init_str) gb_zero_size(ptr, header_size + num_bytes + 1); + + str = cast(char *)ptr + header_size; + header = GB_STRING_HEADER(str); + header->allocator = a; + header->length = num_bytes; + header->capacity = num_bytes; + if (num_bytes && init_str) { + gb_memcopy(str, init_str, num_bytes); + } + str[num_bytes] = '\0'; + + return str; +} + +gb_inline void gb_string_free(gbString str) { + if (str) { + gbStringHeader *header = GB_STRING_HEADER(str); + gb_free(header->allocator, header); + } + +} + +gb_inline gbString gb_string_duplicate(gbAllocator a, gbString const str) { return gb_string_make_length(a, str, gb_string_length(str)); } + +gb_inline isize gb_string_length (gbString const str) { return GB_STRING_HEADER(str)->length; } +gb_inline isize gb_string_capacity(gbString const str) { return GB_STRING_HEADER(str)->capacity; } + +gb_inline isize gb_string_available_space(gbString const str) { + gbStringHeader *h = GB_STRING_HEADER(str); + if (h->capacity > h->length) { + return h->capacity - h->length; + } + return 0; +} + + +gb_inline void gb_string_clear(gbString str) { gb__set_string_length(str, 0); str[0] = '\0'; } + +gb_inline gbString gb_string_append(gbString str, gbString const other) { return gb_string_append_length(str, other, gb_string_length(other)); } + +gbString gb_string_append_length(gbString str, void const *other, isize other_len) { + if (other_len > 0) { + isize curr_len = gb_string_length(str); + + str = gb_string_make_space_for(str, other_len); + if (str == NULL) { + return NULL; + } + + gb_memcopy(str + curr_len, other, other_len); + str[curr_len + other_len] = '\0'; + gb__set_string_length(str, curr_len + other_len); + } + return str; +} + +gb_inline gbString gb_string_appendc(gbString str, char const *other) { + return gb_string_append_length(str, other, gb_strlen(other)); +} + +gbString gb_string_append_rune(gbString str, Rune r) { + if (r >= 0) { + u8 buf[8] = {0}; + isize len = gb_utf8_encode_rune(buf, r); + return gb_string_append_length(str, buf, len); + } + return str; +} + +gbString gb_string_append_fmt(gbString str, char const *fmt, ...) { + isize res; + char buf[4096] = {0}; + va_list va; + va_start(va, fmt); + res = gb_snprintf_va(buf, gb_count_of(buf)-1, fmt, va)-1; + va_end(va); + return gb_string_append_length(str, buf, res); +} + + + +gbString gb_string_set(gbString str, char const *cstr) { + isize len = gb_strlen(cstr); + if (gb_string_capacity(str) < len) { + str = gb_string_make_space_for(str, len - gb_string_length(str)); + if (str == NULL) { + return NULL; + } + } + + gb_memcopy(str, cstr, len); + str[len] = '\0'; + gb__set_string_length(str, len); + + return str; +} + + + +gbString gb_string_make_space_for(gbString str, isize add_len) { + isize available = gb_string_available_space(str); + + // NOTE(bill): Return if there is enough space left + if (available >= add_len) { + return str; + } else { + isize new_len, old_size, new_size; + void *ptr, *new_ptr; + gbAllocator a = GB_STRING_HEADER(str)->allocator; + gbStringHeader *header; + + new_len = gb_string_length(str) + add_len; + ptr = GB_STRING_HEADER(str); + old_size = gb_size_of(gbStringHeader) + gb_string_length(str) + 1; + new_size = gb_size_of(gbStringHeader) + new_len + 1; + + new_ptr = gb_resize(a, ptr, old_size, new_size); + if (new_ptr == NULL) return NULL; + + header = cast(gbStringHeader *)new_ptr; + header->allocator = a; + + str = cast(gbString)(header+1); + gb__set_string_capacity(str, new_len); + + return str; + } +} + +gb_inline isize gb_string_allocation_size(gbString const str) { + isize cap = gb_string_capacity(str); + return gb_size_of(gbStringHeader) + cap; +} + + +gb_inline b32 gb_string_are_equal(gbString const lhs, gbString const rhs) { + isize lhs_len, rhs_len, i; + lhs_len = gb_string_length(lhs); + rhs_len = gb_string_length(rhs); + if (lhs_len != rhs_len) { + return false; + } + + for (i = 0; i < lhs_len; i++) { + if (lhs[i] != rhs[i]) { + return false; + } + } + + return true; +} + + +gbString gb_string_trim(gbString str, char const *cut_set) { + char *start, *end, *start_pos, *end_pos; + isize len; + + start_pos = start = str; + end_pos = end = str + gb_string_length(str) - 1; + + while (start_pos <= end && gb_char_first_occurence(cut_set, *start_pos)) { + start_pos++; + } + while (end_pos > start_pos && gb_char_first_occurence(cut_set, *end_pos)) { + end_pos--; + } + + len = cast(isize)((start_pos > end_pos) ? 0 : ((end_pos - start_pos)+1)); + + if (str != start_pos) + gb_memmove(str, start_pos, len); + str[len] = '\0'; + + gb__set_string_length(str, len); + + return str; +} + +gb_inline gbString gb_string_trim_space(gbString str) { return gb_string_trim(str, " \t\r\n\v\f"); } + + + + +//////////////////////////////////////////////////////////////// +// +// Windows UTF-8 Handling +// +// + + +u16 *gb_utf8_to_ucs2(u16 *buffer, isize len, u8 const *str) { + Rune c; + isize i = 0; + len--; + while (*str) { + if (i >= len) + return NULL; + if (!(*str & 0x80)) { + buffer[i++] = *str++; + } else if ((*str & 0xe0) == 0xc0) { + if (*str < 0xc2) + return NULL; + c = (*str++ & 0x1f) << 6; + if ((*str & 0xc0) != 0x80) + return NULL; + buffer[i++] = cast(u16)(c + (*str++ & 0x3f)); + } else if ((*str & 0xf0) == 0xe0) { + if (*str == 0xe0 && + (str[1] < 0xa0 || str[1] > 0xbf)) + return NULL; + if (*str == 0xed && str[1] > 0x9f) // str[1] < 0x80 is checked below + return NULL; + c = (*str++ & 0x0f) << 12; + if ((*str & 0xc0) != 0x80) + return NULL; + c += (*str++ & 0x3f) << 6; + if ((*str & 0xc0) != 0x80) + return NULL; + buffer[i++] = cast(u16)(c + (*str++ & 0x3f)); + } else if ((*str & 0xf8) == 0xf0) { + if (*str > 0xf4) + return NULL; + if (*str == 0xf0 && (str[1] < 0x90 || str[1] > 0xbf)) + return NULL; + if (*str == 0xf4 && str[1] > 0x8f) // str[1] < 0x80 is checked below + return NULL; + c = (*str++ & 0x07) << 18; + if ((*str & 0xc0) != 0x80) + return NULL; + c += (*str++ & 0x3f) << 12; + if ((*str & 0xc0) != 0x80) + return NULL; + c += (*str++ & 0x3f) << 6; + if ((*str & 0xc0) != 0x80) + return NULL; + c += (*str++ & 0x3f); + // UTF-8 encodings of values used in surrogate pairs are invalid + if ((c & 0xfffff800) == 0xd800) + return NULL; + if (c >= 0x10000) { + c -= 0x10000; + if (i+2 > len) + return NULL; + buffer[i++] = 0xd800 | (0x3ff & (c>>10)); + buffer[i++] = 0xdc00 | (0x3ff & (c )); + } + } else { + return NULL; + } + } + buffer[i] = 0; + return buffer; +} + +u8 *gb_ucs2_to_utf8(u8 *buffer, isize len, u16 const *str) { + isize i = 0; + len--; + while (*str) { + if (*str < 0x80) { + if (i+1 > len) + return NULL; + buffer[i++] = (char) *str++; + } else if (*str < 0x800) { + if (i+2 > len) + return NULL; + buffer[i++] = cast(char)(0xc0 + (*str >> 6)); + buffer[i++] = cast(char)(0x80 + (*str & 0x3f)); + str += 1; + } else if (*str >= 0xd800 && *str < 0xdc00) { + Rune c; + if (i+4 > len) + return NULL; + c = ((str[0] - 0xd800) << 10) + ((str[1]) - 0xdc00) + 0x10000; + buffer[i++] = cast(char)(0xf0 + (c >> 18)); + buffer[i++] = cast(char)(0x80 + ((c >> 12) & 0x3f)); + buffer[i++] = cast(char)(0x80 + ((c >> 6) & 0x3f)); + buffer[i++] = cast(char)(0x80 + ((c ) & 0x3f)); + str += 2; + } else if (*str >= 0xdc00 && *str < 0xe000) { + return NULL; + } else { + if (i+3 > len) + return NULL; + buffer[i++] = 0xe0 + (*str >> 12); + buffer[i++] = 0x80 + ((*str >> 6) & 0x3f); + buffer[i++] = 0x80 + ((*str ) & 0x3f); + str += 1; + } + } + buffer[i] = 0; + return buffer; +} + +u16 *gb_utf8_to_ucs2_buf(u8 const *str) { // NOTE(bill): Uses locally persisting buffer + gb_local_persist u16 buf[4096]; + return gb_utf8_to_ucs2(buf, gb_count_of(buf), str); +} + +u8 *gb_ucs2_to_utf8_buf(u16 const *str) { // NOTE(bill): Uses locally persisting buffer + gb_local_persist u8 buf[4096]; + return gb_ucs2_to_utf8(buf, gb_count_of(buf), str); +} + + + +gb_global u8 const gb__utf8_first[256] = { + 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, // 0x00-0x0F + 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, // 0x10-0x1F + 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, // 0x20-0x2F + 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, // 0x30-0x3F + 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, // 0x40-0x4F + 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, // 0x50-0x5F + 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, // 0x60-0x6F + 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, // 0x70-0x7F + 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, // 0x80-0x8F + 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, // 0x90-0x9F + 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, // 0xA0-0xAF + 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, // 0xB0-0xBF + 0xf1, 0xf1, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, // 0xC0-0xCF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, // 0xD0-0xDF + 0x13, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x23, 0x03, 0x03, // 0xE0-0xEF + 0x34, 0x04, 0x04, 0x04, 0x44, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, // 0xF0-0xFF +}; + + +typedef struct gbUtf8AcceptRange { + u8 lo, hi; +} gbUtf8AcceptRange; + +gb_global gbUtf8AcceptRange const gb__utf8_accept_ranges[] = { + {0x80, 0xbf}, + {0xa0, 0xbf}, + {0x80, 0x9f}, + {0x90, 0xbf}, + {0x80, 0x8f}, +}; + + +isize gb_utf8_decode(u8 const *str, isize str_len, Rune *codepoint_out) { + isize width = 0; + Rune codepoint = GB_RUNE_INVALID; + + if (str_len > 0) { + u8 s0 = str[0]; + u8 x = gb__utf8_first[s0], sz; + u8 b1, b2, b3; + gbUtf8AcceptRange accept; + if (x >= 0xf0) { + Rune mask = (cast(Rune)x << 31) >> 31; + codepoint = (cast(Rune)s0 & (~mask)) | (GB_RUNE_INVALID & mask); + width = 1; + goto end; + } + if (s0 < 0x80) { + codepoint = s0; + width = 1; + goto end; + } + + sz = x&7; + accept = gb__utf8_accept_ranges[x>>4]; + if (str_len < gb_size_of(sz)) + goto invalid_codepoint; + + b1 = str[1]; + if (b1 < accept.lo || accept.hi < b1) + goto invalid_codepoint; + + if (sz == 2) { + codepoint = (cast(Rune)s0&0x1f)<<6 | (cast(Rune)b1&0x3f); + width = 2; + goto end; + } + + b2 = str[2]; + if (!gb_is_between(b2, 0x80, 0xbf)) + goto invalid_codepoint; + + if (sz == 3) { + codepoint = (cast(Rune)s0&0x1f)<<12 | (cast(Rune)b1&0x3f)<<6 | (cast(Rune)b2&0x3f); + width = 3; + goto end; + } + + b3 = str[3]; + if (!gb_is_between(b3, 0x80, 0xbf)) + goto invalid_codepoint; + + codepoint = (cast(Rune)s0&0x07)<<18 | (cast(Rune)b1&0x3f)<<12 | (cast(Rune)b2&0x3f)<<6 | (cast(Rune)b3&0x3f); + width = 4; + goto end; + + invalid_codepoint: + codepoint = GB_RUNE_INVALID; + width = 1; + } + +end: + if (codepoint_out) *codepoint_out = codepoint; + return width; +} + +isize gb_utf8_codepoint_size(u8 const *str, isize str_len) { + isize i = 0; + for (; i < str_len && str[i]; i++) { + if ((str[i] & 0xc0) != 0x80) + break; + } + return i+1; +} + +isize gb_utf8_encode_rune(u8 buf[4], Rune r) { + u32 i = cast(u32)r; + u8 mask = 0x3f; + if (i <= (1<<7)-1) { + buf[0] = cast(u8)r; + return 1; + } + if (i <= (1<<11)-1) { + buf[0] = 0xc0 | cast(u8)(r>>6); + buf[1] = 0x80 | (cast(u8)(r)&mask); + return 2; + } + + // Invalid or Surrogate range + if (i > GB_RUNE_MAX || + gb_is_between(i, 0xd800, 0xdfff)) { + r = GB_RUNE_INVALID; + + buf[0] = 0xe0 | cast(u8)(r>>12); + buf[1] = 0x80 | (cast(u8)(r>>6)&mask); + buf[2] = 0x80 | (cast(u8)(r)&mask); + return 3; + } + + if (i <= (1<<16)-1) { + buf[0] = 0xe0 | cast(u8)(r>>12); + buf[1] = 0x80 | (cast(u8)(r>>6)&mask); + buf[2] = 0x80 | (cast(u8)(r)&mask); + return 3; + } + + buf[0] = 0xf0 | cast(u8)(r>>18); + buf[1] = 0x80 | (cast(u8)(r>>12)&mask); + buf[2] = 0x80 | (cast(u8)(r>>6)&mask); + buf[3] = 0x80 | (cast(u8)(r)&mask); + return 4; +} + + + + +//////////////////////////////////////////////////////////////// +// +// gbArray +// +// + + +gb_no_inline void *gb__array_set_capacity(void *array, isize capacity, isize element_size) { + gbArrayHeader *h = GB_ARRAY_HEADER(array); + + GB_ASSERT(element_size > 0); + + if (capacity == h->capacity) + return array; + + if (capacity < h->count) { + if (h->capacity < capacity) { + isize new_capacity = GB_ARRAY_GROW_FORMULA(h->capacity); + if (new_capacity < capacity) + new_capacity = capacity; + gb__array_set_capacity(array, new_capacity, element_size); + } + h->count = capacity; + } + + { + isize size = gb_size_of(gbArrayHeader) + element_size*capacity; + gbArrayHeader *nh = cast(gbArrayHeader *)gb_alloc(h->allocator, size); + gb_memmove(nh, h, gb_size_of(gbArrayHeader) + element_size*h->count); + nh->allocator = h->allocator; + nh->count = h->count; + nh->capacity = capacity; + gb_free(h->allocator, h); + return nh+1; + } +} + + +//////////////////////////////////////////////////////////////// +// +// Hashing functions +// +// + +u32 gb_adler32(void const *data, isize len) { + u32 const MOD_ALDER = 65521; + u32 a = 1, b = 0; + isize i, block_len; + u8 const *bytes = cast(u8 const *)data; + + block_len = len % 5552; + + while (len) { + for (i = 0; i+7 < block_len; i += 8) { + a += bytes[0], b += a; + a += bytes[1], b += a; + a += bytes[2], b += a; + a += bytes[3], b += a; + a += bytes[4], b += a; + a += bytes[5], b += a; + a += bytes[6], b += a; + a += bytes[7], b += a; + + bytes += 8; + } + for (; i < block_len; i++) { + a += *bytes++, b += a; + } + + a %= MOD_ALDER, b %= MOD_ALDER; + len -= block_len; + block_len = 5552; + } + + return (b << 16) | a; +} + + +gb_global u32 const GB__CRC32_TABLE[256] = { + 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, + 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, + 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, + 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, + 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, + 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, + 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, + 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, + 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, + 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, + 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, + 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, + 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, + 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, + 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, + 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, + 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, + 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, + 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, + 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, + 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, + 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, + 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, + 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, + 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, + 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, + 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, + 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, + 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, + 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, + 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, + 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, + 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, + 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, + 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, + 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, + 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, + 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, + 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, + 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, + 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, + 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, + 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, + 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, + 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, + 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, + 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, + 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, + 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, + 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, + 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, + 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, + 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, + 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, + 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, + 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, + 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, + 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, + 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, + 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, + 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, + 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, + 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, + 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d, +}; + +gb_global u64 const GB__CRC64_TABLE[256] = { + 0x0000000000000000ull, 0x42f0e1eba9ea3693ull, 0x85e1c3d753d46d26ull, 0xc711223cfa3e5bb5ull, + 0x493366450e42ecdfull, 0x0bc387aea7a8da4cull, 0xccd2a5925d9681f9ull, 0x8e224479f47cb76aull, + 0x9266cc8a1c85d9beull, 0xd0962d61b56fef2dull, 0x17870f5d4f51b498ull, 0x5577eeb6e6bb820bull, + 0xdb55aacf12c73561ull, 0x99a54b24bb2d03f2ull, 0x5eb4691841135847ull, 0x1c4488f3e8f96ed4ull, + 0x663d78ff90e185efull, 0x24cd9914390bb37cull, 0xe3dcbb28c335e8c9ull, 0xa12c5ac36adfde5aull, + 0x2f0e1eba9ea36930ull, 0x6dfeff5137495fa3ull, 0xaaefdd6dcd770416ull, 0xe81f3c86649d3285ull, + 0xf45bb4758c645c51ull, 0xb6ab559e258e6ac2ull, 0x71ba77a2dfb03177ull, 0x334a9649765a07e4ull, + 0xbd68d2308226b08eull, 0xff9833db2bcc861dull, 0x388911e7d1f2dda8ull, 0x7a79f00c7818eb3bull, + 0xcc7af1ff21c30bdeull, 0x8e8a101488293d4dull, 0x499b3228721766f8ull, 0x0b6bd3c3dbfd506bull, + 0x854997ba2f81e701ull, 0xc7b97651866bd192ull, 0x00a8546d7c558a27ull, 0x4258b586d5bfbcb4ull, + 0x5e1c3d753d46d260ull, 0x1cecdc9e94ace4f3ull, 0xdbfdfea26e92bf46ull, 0x990d1f49c77889d5ull, + 0x172f5b3033043ebfull, 0x55dfbadb9aee082cull, 0x92ce98e760d05399ull, 0xd03e790cc93a650aull, + 0xaa478900b1228e31ull, 0xe8b768eb18c8b8a2ull, 0x2fa64ad7e2f6e317ull, 0x6d56ab3c4b1cd584ull, + 0xe374ef45bf6062eeull, 0xa1840eae168a547dull, 0x66952c92ecb40fc8ull, 0x2465cd79455e395bull, + 0x3821458aada7578full, 0x7ad1a461044d611cull, 0xbdc0865dfe733aa9ull, 0xff3067b657990c3aull, + 0x711223cfa3e5bb50ull, 0x33e2c2240a0f8dc3ull, 0xf4f3e018f031d676ull, 0xb60301f359dbe0e5ull, + 0xda050215ea6c212full, 0x98f5e3fe438617bcull, 0x5fe4c1c2b9b84c09ull, 0x1d14202910527a9aull, + 0x93366450e42ecdf0ull, 0xd1c685bb4dc4fb63ull, 0x16d7a787b7faa0d6ull, 0x5427466c1e109645ull, + 0x4863ce9ff6e9f891ull, 0x0a932f745f03ce02ull, 0xcd820d48a53d95b7ull, 0x8f72eca30cd7a324ull, + 0x0150a8daf8ab144eull, 0x43a04931514122ddull, 0x84b16b0dab7f7968ull, 0xc6418ae602954ffbull, + 0xbc387aea7a8da4c0ull, 0xfec89b01d3679253ull, 0x39d9b93d2959c9e6ull, 0x7b2958d680b3ff75ull, + 0xf50b1caf74cf481full, 0xb7fbfd44dd257e8cull, 0x70eadf78271b2539ull, 0x321a3e938ef113aaull, + 0x2e5eb66066087d7eull, 0x6cae578bcfe24bedull, 0xabbf75b735dc1058ull, 0xe94f945c9c3626cbull, + 0x676dd025684a91a1ull, 0x259d31cec1a0a732ull, 0xe28c13f23b9efc87ull, 0xa07cf2199274ca14ull, + 0x167ff3eacbaf2af1ull, 0x548f120162451c62ull, 0x939e303d987b47d7ull, 0xd16ed1d631917144ull, + 0x5f4c95afc5edc62eull, 0x1dbc74446c07f0bdull, 0xdaad56789639ab08ull, 0x985db7933fd39d9bull, + 0x84193f60d72af34full, 0xc6e9de8b7ec0c5dcull, 0x01f8fcb784fe9e69ull, 0x43081d5c2d14a8faull, + 0xcd2a5925d9681f90ull, 0x8fdab8ce70822903ull, 0x48cb9af28abc72b6ull, 0x0a3b7b1923564425ull, + 0x70428b155b4eaf1eull, 0x32b26afef2a4998dull, 0xf5a348c2089ac238ull, 0xb753a929a170f4abull, + 0x3971ed50550c43c1ull, 0x7b810cbbfce67552ull, 0xbc902e8706d82ee7ull, 0xfe60cf6caf321874ull, + 0xe224479f47cb76a0ull, 0xa0d4a674ee214033ull, 0x67c58448141f1b86ull, 0x253565a3bdf52d15ull, + 0xab1721da49899a7full, 0xe9e7c031e063acecull, 0x2ef6e20d1a5df759ull, 0x6c0603e6b3b7c1caull, + 0xf6fae5c07d3274cdull, 0xb40a042bd4d8425eull, 0x731b26172ee619ebull, 0x31ebc7fc870c2f78ull, + 0xbfc9838573709812ull, 0xfd39626eda9aae81ull, 0x3a28405220a4f534ull, 0x78d8a1b9894ec3a7ull, + 0x649c294a61b7ad73ull, 0x266cc8a1c85d9be0ull, 0xe17dea9d3263c055ull, 0xa38d0b769b89f6c6ull, + 0x2daf4f0f6ff541acull, 0x6f5faee4c61f773full, 0xa84e8cd83c212c8aull, 0xeabe6d3395cb1a19ull, + 0x90c79d3fedd3f122ull, 0xd2377cd44439c7b1ull, 0x15265ee8be079c04ull, 0x57d6bf0317edaa97ull, + 0xd9f4fb7ae3911dfdull, 0x9b041a914a7b2b6eull, 0x5c1538adb04570dbull, 0x1ee5d94619af4648ull, + 0x02a151b5f156289cull, 0x4051b05e58bc1e0full, 0x87409262a28245baull, 0xc5b073890b687329ull, + 0x4b9237f0ff14c443ull, 0x0962d61b56fef2d0ull, 0xce73f427acc0a965ull, 0x8c8315cc052a9ff6ull, + 0x3a80143f5cf17f13ull, 0x7870f5d4f51b4980ull, 0xbf61d7e80f251235ull, 0xfd913603a6cf24a6ull, + 0x73b3727a52b393ccull, 0x31439391fb59a55full, 0xf652b1ad0167feeaull, 0xb4a25046a88dc879ull, + 0xa8e6d8b54074a6adull, 0xea16395ee99e903eull, 0x2d071b6213a0cb8bull, 0x6ff7fa89ba4afd18ull, + 0xe1d5bef04e364a72ull, 0xa3255f1be7dc7ce1ull, 0x64347d271de22754ull, 0x26c49cccb40811c7ull, + 0x5cbd6cc0cc10fafcull, 0x1e4d8d2b65facc6full, 0xd95caf179fc497daull, 0x9bac4efc362ea149ull, + 0x158e0a85c2521623ull, 0x577eeb6e6bb820b0ull, 0x906fc95291867b05ull, 0xd29f28b9386c4d96ull, + 0xcedba04ad0952342ull, 0x8c2b41a1797f15d1ull, 0x4b3a639d83414e64ull, 0x09ca82762aab78f7ull, + 0x87e8c60fded7cf9dull, 0xc51827e4773df90eull, 0x020905d88d03a2bbull, 0x40f9e43324e99428ull, + 0x2cffe7d5975e55e2ull, 0x6e0f063e3eb46371ull, 0xa91e2402c48a38c4ull, 0xebeec5e96d600e57ull, + 0x65cc8190991cb93dull, 0x273c607b30f68faeull, 0xe02d4247cac8d41bull, 0xa2dda3ac6322e288ull, + 0xbe992b5f8bdb8c5cull, 0xfc69cab42231bacfull, 0x3b78e888d80fe17aull, 0x7988096371e5d7e9ull, + 0xf7aa4d1a85996083ull, 0xb55aacf12c735610ull, 0x724b8ecdd64d0da5ull, 0x30bb6f267fa73b36ull, + 0x4ac29f2a07bfd00dull, 0x08327ec1ae55e69eull, 0xcf235cfd546bbd2bull, 0x8dd3bd16fd818bb8ull, + 0x03f1f96f09fd3cd2ull, 0x41011884a0170a41ull, 0x86103ab85a2951f4ull, 0xc4e0db53f3c36767ull, + 0xd8a453a01b3a09b3ull, 0x9a54b24bb2d03f20ull, 0x5d45907748ee6495ull, 0x1fb5719ce1045206ull, + 0x919735e51578e56cull, 0xd367d40ebc92d3ffull, 0x1476f63246ac884aull, 0x568617d9ef46bed9ull, + 0xe085162ab69d5e3cull, 0xa275f7c11f7768afull, 0x6564d5fde549331aull, 0x279434164ca30589ull, + 0xa9b6706fb8dfb2e3ull, 0xeb46918411358470ull, 0x2c57b3b8eb0bdfc5ull, 0x6ea7525342e1e956ull, + 0x72e3daa0aa188782ull, 0x30133b4b03f2b111ull, 0xf7021977f9cceaa4ull, 0xb5f2f89c5026dc37ull, + 0x3bd0bce5a45a6b5dull, 0x79205d0e0db05dceull, 0xbe317f32f78e067bull, 0xfcc19ed95e6430e8ull, + 0x86b86ed5267cdbd3ull, 0xc4488f3e8f96ed40ull, 0x0359ad0275a8b6f5ull, 0x41a94ce9dc428066ull, + 0xcf8b0890283e370cull, 0x8d7be97b81d4019full, 0x4a6acb477bea5a2aull, 0x089a2aacd2006cb9ull, + 0x14dea25f3af9026dull, 0x562e43b4931334feull, 0x913f6188692d6f4bull, 0xd3cf8063c0c759d8ull, + 0x5dedc41a34bbeeb2ull, 0x1f1d25f19d51d821ull, 0xd80c07cd676f8394ull, 0x9afce626ce85b507ull, +}; + +u32 gb_crc32(void const *data, isize len) { + isize remaining; + u32 result = ~(cast(u32)0); + u8 const *c = cast(u8 const *)data; + for (remaining = len; remaining--; c++) { + result = (result >> 8) ^ (GB__CRC32_TABLE[(result ^ *c) & 0xff]); + } + return ~result; +} + +u64 gb_crc64(void const *data, isize len) { + isize remaining; + u64 result = ~(cast(u64)0); + u8 const *c = cast(u8 const *)data; + for (remaining = len; remaining--; c++) { + result = (result >> 8) ^ (GB__CRC64_TABLE[(result ^ *c) & 0xff]); + } + return ~result; +} + +u32 gb_fnv32(void const *data, isize len) { + isize i; + u32 h = 0x811c9dc5; + u8 const *c = cast(u8 const *)data; + + for (i = 0; i < len; i++) { + h = (h * 0x01000193) ^ c[i]; + } + + return h; +} + +u64 gb_fnv64(void const *data, isize len) { + isize i; + u64 h = 0xcbf29ce484222325ull; + u8 const *c = cast(u8 const *)data; + + for (i = 0; i < len; i++) { + h = (h * 0x100000001b3ll) ^ c[i]; + } + + return h; +} + +u32 gb_fnv32a(void const *data, isize len) { + isize i; + u32 h = 0x811c9dc5; + u8 const *c = cast(u8 const *)data; + + for (i = 0; i < len; i++) { + h = (h ^ c[i]) * 0x01000193; + } + + return h; +} + +u64 gb_fnv64a(void const *data, isize len) { + isize i; + u64 h = 0xcbf29ce484222325ull; + u8 const *c = cast(u8 const *)data; + + for (i = 0; i < len; i++) { + h = (h ^ c[i]) * 0x100000001b3ll; + } + + return h; +} + +gb_inline u32 gb_murmur32(void const *data, isize len) { return gb_murmur32_seed(data, len, 0x9747b28c); } +gb_inline u64 gb_murmur64(void const *data, isize len) { return gb_murmur64_seed(data, len, 0x9747b28c); } + +u32 gb_murmur32_seed(void const *data, isize len, u32 seed) { + u32 const c1 = 0xcc9e2d51; + u32 const c2 = 0x1b873593; + u32 const r1 = 15; + u32 const r2 = 13; + u32 const m = 5; + u32 const n = 0xe6546b64; + + isize i, nblocks = len / 4; + u32 hash = seed, k1 = 0; + u32 const *blocks = cast(u32 const*)data; + u8 const *tail = cast(u8 const *)(data) + nblocks*4; + + for (i = 0; i < nblocks; i++) { + u32 k = blocks[i]; + k *= c1; + k = (k << r1) | (k >> (32 - r1)); + k *= c2; + + hash ^= k; + hash = ((hash << r2) | (hash >> (32 - r2))) * m + n; + } + + switch (len & 3) { + case 3: + k1 ^= tail[2] << 16; + case 2: + k1 ^= tail[1] << 8; + case 1: + k1 ^= tail[0]; + + k1 *= c1; + k1 = (k1 << r1) | (k1 >> (32 - r1)); + k1 *= c2; + hash ^= k1; + } + + hash ^= len; + hash ^= (hash >> 16); + hash *= 0x85ebca6b; + hash ^= (hash >> 13); + hash *= 0xc2b2ae35; + hash ^= (hash >> 16); + + return hash; +} + +u64 gb_murmur64_seed(void const *data_, isize len, u64 seed) { +#if defined(GB_ARCH_64_BIT) + u64 const m = 0xc6a4a7935bd1e995ULL; + i32 const r = 47; + + u64 h = seed ^ (len * m); + + u64 const *data = cast(u64 const *)data_; + u8 const *data2 = cast(u8 const *)data_; + u64 const* end = data + (len / 8); + + while (data != end) { + u64 k = *data++; + + k *= m; + k ^= k >> r; + k *= m; + + h ^= k; + h *= m; + } + + switch (len & 7) { + case 7: h ^= cast(u64)(data2[6]) << 48; + case 6: h ^= cast(u64)(data2[5]) << 40; + case 5: h ^= cast(u64)(data2[4]) << 32; + case 4: h ^= cast(u64)(data2[3]) << 24; + case 3: h ^= cast(u64)(data2[2]) << 16; + case 2: h ^= cast(u64)(data2[1]) << 8; + case 1: h ^= cast(u64)(data2[0]); + h *= m; + }; + + h ^= h >> r; + h *= m; + h ^= h >> r; + + return h; +#else + u64 h; + u32 const m = 0x5bd1e995; + i32 const r = 24; + + u32 h1 = cast(u32)(seed) ^ cast(u32)(len); + u32 h2 = cast(u32)(seed >> 32); + + u32 const *data = cast(u32 const *)data_; + + while (len >= 8) { + u32 k1, k2; + k1 = *data++; + k1 *= m; + k1 ^= k1 >> r; + k1 *= m; + h1 *= m; + h1 ^= k1; + len -= 4; + + k2 = *data++; + k2 *= m; + k2 ^= k2 >> r; + k2 *= m; + h2 *= m; + h2 ^= k2; + len -= 4; + } + + if (len >= 4) { + u32 k1 = *data++; + k1 *= m; + k1 ^= k1 >> r; + k1 *= m; + h1 *= m; + h1 ^= k1; + len -= 4; + } + + switch (len) { + case 3: h2 ^= (cast(u8 const *)data)[2] << 16; + case 2: h2 ^= (cast(u8 const *)data)[1] << 8; + case 1: h2 ^= (cast(u8 const *)data)[0] << 0; + h2 *= m; + }; + + h1 ^= h2 >> 18; + h1 *= m; + h2 ^= h1 >> 22; + h2 *= m; + h1 ^= h2 >> 17; + h1 *= m; + h2 ^= h1 >> 19; + h2 *= m; + + h = h1; + h = (h << 32) | h2; + + return h; +#endif +} + + + + + + + +//////////////////////////////////////////////////////////////// +// +// File Handling +// +// + +#if defined(GB_SYSTEM_WINDOWS) + + gb_internal wchar_t *gb__alloc_utf8_to_ucs2(gbAllocator a, char const *text, isize *w_len_) { + wchar_t *w_text = NULL; + isize len = 0, w_len = 0, w_len1 = 0; + if (text == NULL) { + if (w_len_) *w_len_ = w_len; + return NULL; + } + len = gb_strlen(text); + if (len == 0) { + if (w_len_) *w_len_ = w_len; + return NULL; + } + w_len = MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, text, cast(int)len, NULL, 0); + if (w_len == 0) { + if (w_len_) *w_len_ = w_len; + return NULL; + } + w_text = gb_alloc_array(a, wchar_t, w_len+1); + w_len1 = MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, text, cast(int)len, w_text, cast(int)w_len); + if (w_len1 == 0) { + gb_free(a, w_text); + if (w_len_) *w_len_ = 0; + return NULL; + } + w_text[w_len] = 0; + if (w_len_) *w_len_ = w_len; + return w_text; + } + + gb_internal GB_FILE_SEEK_PROC(gb__win32_file_seek) { + LARGE_INTEGER li_offset; + li_offset.QuadPart = offset; + if (!SetFilePointerEx(fd.p, li_offset, &li_offset, whence)) { + return false; + } + + if (new_offset) *new_offset = li_offset.QuadPart; + return true; + } + + gb_internal GB_FILE_READ_AT_PROC(gb__win32_file_read) { + b32 result = false; + DWORD size_ = cast(DWORD)(size > I32_MAX ? I32_MAX : size); + DWORD bytes_read_; + gb__win32_file_seek(fd, offset, gbSeekWhence_Begin, NULL); + if (ReadFile(fd.p, buffer, size_, &bytes_read_, NULL)) { + if (bytes_read) *bytes_read = bytes_read_; + result = true; + } + + return result; + } + + gb_internal GB_FILE_WRITE_AT_PROC(gb__win32_file_write) { + DWORD size_ = cast(DWORD)(size > I32_MAX ? I32_MAX : size); + DWORD bytes_written_; + gb__win32_file_seek(fd, offset, gbSeekWhence_Begin, NULL); + if (WriteFile(fd.p, buffer, size_, &bytes_written_, NULL)) { + if (bytes_written) *bytes_written = bytes_written_; + return true; + } + return false; + } + + gb_internal GB_FILE_CLOSE_PROC(gb__win32_file_close) { + CloseHandle(fd.p); + } + + gbFileOperations const gbDefaultFileOperations = { + gb__win32_file_read, + gb__win32_file_write, + gb__win32_file_seek, + gb__win32_file_close + }; + + gb_no_inline GB_FILE_OPEN_PROC(gb__win32_file_open) { + DWORD desired_access; + DWORD creation_disposition; + void *handle; + wchar_t *w_text; + + switch (mode & gbFileMode_Modes) { + case gbFileMode_Read: + desired_access = GENERIC_READ; + creation_disposition = OPEN_EXISTING; + break; + case gbFileMode_Write: + desired_access = GENERIC_WRITE; + creation_disposition = CREATE_ALWAYS; + break; + case gbFileMode_Append: + desired_access = GENERIC_WRITE; + creation_disposition = OPEN_ALWAYS; + break; + case gbFileMode_Read | gbFileMode_Rw: + desired_access = GENERIC_READ | GENERIC_WRITE; + creation_disposition = OPEN_EXISTING; + break; + case gbFileMode_Write | gbFileMode_Rw: + desired_access = GENERIC_READ | GENERIC_WRITE; + creation_disposition = CREATE_ALWAYS; + break; + case gbFileMode_Append | gbFileMode_Rw: + desired_access = GENERIC_READ | GENERIC_WRITE; + creation_disposition = OPEN_ALWAYS; + break; + default: + GB_PANIC("Invalid file mode"); + return gbFileError_Invalid; + } + + w_text = gb__alloc_utf8_to_ucs2(gb_heap_allocator(), filename, NULL); + if (w_text == NULL) { + return gbFileError_InvalidFilename; + } + handle = CreateFileW(w_text, + desired_access, + FILE_SHARE_READ|FILE_SHARE_DELETE, NULL, + creation_disposition, FILE_ATTRIBUTE_NORMAL, NULL); + + gb_free(gb_heap_allocator(), w_text); + + if (handle == INVALID_HANDLE_VALUE) { + DWORD err = GetLastError(); + switch (err) { + case ERROR_FILE_NOT_FOUND: return gbFileError_NotExists; + case ERROR_FILE_EXISTS: return gbFileError_Exists; + case ERROR_ALREADY_EXISTS: return gbFileError_Exists; + case ERROR_ACCESS_DENIED: return gbFileError_Permission; + } + return gbFileError_Invalid; + } + + if (mode & gbFileMode_Append) { + LARGE_INTEGER offset = {0}; + if (!SetFilePointerEx(handle, offset, NULL, gbSeekWhence_End)) { + CloseHandle(handle); + return gbFileError_Invalid; + } + } + + fd->p = handle; + *ops = gbDefaultFileOperations; + return gbFileError_None; + } + +#else // POSIX + gb_internal GB_FILE_SEEK_PROC(gb__posix_file_seek) { + #if defined(GB_SYSTEM_OSX) + i64 res = lseek(fd.i, offset, whence); + #else + i64 res = lseek64(fd.i, offset, whence); + #endif + if (res < 0) return false; + if (new_offset) *new_offset = res; + return true; + } + + gb_internal GB_FILE_READ_AT_PROC(gb__posix_file_read) { + isize res = pread(fd.i, buffer, size, offset); + if (res < 0) return false; + if (bytes_read) *bytes_read = res; + return true; + } + + gb_internal GB_FILE_WRITE_AT_PROC(gb__posix_file_write) { + isize res; + i64 curr_offset = 0; + gb__posix_file_seek(fd, 0, gbSeekWhence_Current, &curr_offset); + if (curr_offset == offset) { + // NOTE(bill): Writing to stdout et al. doesn't like pwrite for numerous reasons + res = write(cast(int)fd.i, buffer, size); + } else { + res = pwrite(cast(int)fd.i, buffer, size, offset); + } + if (res < 0) return false; + if (bytes_written) *bytes_written = res; + return true; + } + + + gb_internal GB_FILE_CLOSE_PROC(gb__posix_file_close) { + close(fd.i); + } + + gbFileOperations const gbDefaultFileOperations = { + gb__posix_file_read, + gb__posix_file_write, + gb__posix_file_seek, + gb__posix_file_close + }; + + gb_no_inline GB_FILE_OPEN_PROC(gb__posix_file_open) { + i32 os_mode; + switch (mode & gbFileMode_Modes) { + case gbFileMode_Read: + os_mode = O_RDONLY; + break; + case gbFileMode_Write: + os_mode = O_WRONLY | O_CREAT | O_TRUNC; + break; + case gbFileMode_Append: + os_mode = O_WRONLY | O_APPEND | O_CREAT; + break; + case gbFileMode_Read | gbFileMode_Rw: + os_mode = O_RDWR; + break; + case gbFileMode_Write | gbFileMode_Rw: + os_mode = O_RDWR | O_CREAT | O_TRUNC; + break; + case gbFileMode_Append | gbFileMode_Rw: + os_mode = O_RDWR | O_APPEND | O_CREAT; + break; + default: + GB_PANIC("Invalid file mode"); + return gbFileError_Invalid; + } + + fd->i = open(filename, os_mode, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH); + if (fd->i < 0) { + // TODO(bill): More file errors + return gbFileError_Invalid; + } + + *ops = gbDefaultFileOperations; + return gbFileError_None; + } + +#endif + + + +gbFileError gb_file_new(gbFile *f, gbFileDescriptor fd, gbFileOperations ops, char const *filename) { + gbFileError err = gbFileError_None; + isize len = gb_strlen(filename); + + // gb_printf_err("gb_file_new: %s\n", filename); + + f->ops = ops; + f->fd = fd; + f->filename = gb_alloc_array(gb_heap_allocator(), char, len+1); + gb_memcopy(cast(char *)f->filename, cast(char *)filename, len+1); + f->last_write_time = gb_file_last_write_time(f->filename); + + return err; +} + + + +gbFileError gb_file_open_mode(gbFile *f, gbFileMode mode, char const *filename) { + gbFileError err; +#if defined(GB_SYSTEM_WINDOWS) + err = gb__win32_file_open(&f->fd, &f->ops, mode, filename); +#else + err = gb__posix_file_open(&f->fd, &f->ops, mode, filename); +#endif + if (err == gbFileError_None) { + return gb_file_new(f, f->fd, f->ops, filename); + } + return err; +} + +gbFileError gb_file_close(gbFile *f) { + if (f == NULL) { + return gbFileError_Invalid; + } + +#if defined(GB_COMPILER_MSVC) + if (f->filename != NULL) { + gb_free(gb_heap_allocator(), cast(char *)f->filename); + } +#else + // TODO HACK(bill): Memory Leak!!! +#endif + +#if defined(GB_SYSTEM_WINDOWS) + if (f->fd.p == INVALID_HANDLE_VALUE) { + return gbFileError_Invalid; + } +#else + if (f->fd.i < 0) { + return gbFileError_Invalid; + } +#endif + + if (!f->ops.read_at) f->ops = gbDefaultFileOperations; + f->ops.close(f->fd); + + return gbFileError_None; +} + +gb_inline b32 gb_file_read_at_check(gbFile *f, void *buffer, isize size, i64 offset, isize *bytes_read) { + if (!f->ops.read_at) f->ops = gbDefaultFileOperations; + return f->ops.read_at(f->fd, buffer, size, offset, bytes_read); +} + +gb_inline b32 gb_file_write_at_check(gbFile *f, void const *buffer, isize size, i64 offset, isize *bytes_written) { + if (!f->ops.read_at) f->ops = gbDefaultFileOperations; + return f->ops.write_at(f->fd, buffer, size, offset, bytes_written); +} + + +gb_inline b32 gb_file_read_at(gbFile *f, void *buffer, isize size, i64 offset) { + return gb_file_read_at_check(f, buffer, size, offset, NULL); +} + +gb_inline b32 gb_file_write_at(gbFile *f, void const *buffer, isize size, i64 offset) { + return gb_file_write_at_check(f, buffer, size, offset, NULL); +} + +gb_inline i64 gb_file_seek(gbFile *f, i64 offset) { + i64 new_offset = 0; + if (!f->ops.read_at) f->ops = gbDefaultFileOperations; + f->ops.seek(f->fd, offset, gbSeekWhence_Begin, &new_offset); + return new_offset; +} + +gb_inline i64 gb_file_seek_to_end(gbFile *f) { + i64 new_offset = 0; + if (!f->ops.read_at) f->ops = gbDefaultFileOperations; + f->ops.seek(f->fd, 0, gbSeekWhence_End, &new_offset); + return new_offset; +} + +// NOTE(bill): Skips a certain amount of bytes +gb_inline i64 gb_file_skip(gbFile *f, i64 bytes) { + i64 new_offset = 0; + if (!f->ops.read_at) f->ops = gbDefaultFileOperations; + f->ops.seek(f->fd, bytes, gbSeekWhence_Current, &new_offset); + return new_offset; +} + +gb_inline i64 gb_file_tell(gbFile *f) { + i64 new_offset = 0; + if (!f->ops.read_at) f->ops = gbDefaultFileOperations; + f->ops.seek(f->fd, 0, gbSeekWhence_Current, &new_offset); + return new_offset; +} +gb_inline b32 gb_file_read (gbFile *f, void *buffer, isize size) { return gb_file_read_at(f, buffer, size, gb_file_tell(f)); } +gb_inline b32 gb_file_write(gbFile *f, void const *buffer, isize size) { return gb_file_write_at(f, buffer, size, gb_file_tell(f)); } + + +gbFileError gb_file_create(gbFile *f, char const *filename) { + return gb_file_open_mode(f, gbFileMode_Write|gbFileMode_Rw, filename); +} + + +gbFileError gb_file_open(gbFile *f, char const *filename) { + return gb_file_open_mode(f, gbFileMode_Read, filename); +} + + +char const *gb_file_name(gbFile *f) { return f->filename ? f->filename : ""; } + +gb_inline b32 gb_file_has_changed(gbFile *f) { + b32 result = false; + gbFileTime last_write_time = gb_file_last_write_time(f->filename); + if (f->last_write_time != last_write_time) { + result = true; + f->last_write_time = last_write_time; + } + return result; +} + +// TODO(bill): Is this a bad idea? +gb_global b32 gb__std_file_set = false; +gb_global gbFile gb__std_files[gbFileStandard_Count] = {{0}}; + + +#if defined(GB_SYSTEM_WINDOWS) + +gb_inline gbFile *const gb_file_get_standard(gbFileStandardType std) { + if (!gb__std_file_set) { + #define GB__SET_STD_FILE(type, v) gb__std_files[type].fd.p = v; gb__std_files[type].ops = gbDefaultFileOperations + GB__SET_STD_FILE(gbFileStandard_Input, GetStdHandle(STD_INPUT_HANDLE)); + GB__SET_STD_FILE(gbFileStandard_Output, GetStdHandle(STD_OUTPUT_HANDLE)); + GB__SET_STD_FILE(gbFileStandard_Error, GetStdHandle(STD_ERROR_HANDLE)); + #undef GB__SET_STD_FILE + gb__std_file_set = true; + } + return &gb__std_files[std]; +} + +gb_inline i64 gb_file_size(gbFile *f) { + LARGE_INTEGER size; + GetFileSizeEx(f->fd.p, &size); + return size.QuadPart; +} + +gbFileError gb_file_truncate(gbFile *f, i64 size) { + gbFileError err = gbFileError_None; + i64 prev_offset = gb_file_tell(f); + gb_file_seek(f, size); + if (!SetEndOfFile(f)) { + err = gbFileError_TruncationFailure; + } + gb_file_seek(f, prev_offset); + return err; +} + + +b32 gb_file_exists(char const *name) { + WIN32_FIND_DATAW data; + wchar_t *w_text; + void *handle; + b32 found = false; + gbAllocator a = gb_heap_allocator(); + + w_text = gb__alloc_utf8_to_ucs2(a, name, NULL); + if (w_text == NULL) { + return false; + } + handle = FindFirstFileW(w_text, &data); + gb_free(a, w_text); + found = handle != INVALID_HANDLE_VALUE; + if (found) FindClose(handle); + return found; +} + +#else // POSIX + +gb_inline gbFile *const gb_file_get_standard(gbFileStandardType std) { + if (!gb__std_file_set) { + #define GB__SET_STD_FILE(type, v) gb__std_files[type].fd.i = v; gb__std_files[type].ops = gbDefaultFileOperations + GB__SET_STD_FILE(gbFileStandard_Input, 0); + GB__SET_STD_FILE(gbFileStandard_Output, 1); + GB__SET_STD_FILE(gbFileStandard_Error, 2); + #undef GB__SET_STD_FILE + gb__std_file_set = true; + } + return &gb__std_files[std]; +} + +gb_inline i64 gb_file_size(gbFile *f) { + i64 size = 0; + i64 prev_offset = gb_file_tell(f); + gb_file_seek_to_end(f); + size = gb_file_tell(f); + gb_file_seek(f, prev_offset); + return size; +} + +gb_inline gbFileError gb_file_truncate(gbFile *f, i64 size) { + gbFileError err = gbFileError_None; + int i = ftruncate(f->fd.i, size); + if (i != 0) err = gbFileError_TruncationFailure; + return err; +} + +gb_inline b32 gb_file_exists(char const *name) { + return access(name, F_OK) != -1; +} +#endif + + + +#if defined(GB_SYSTEM_WINDOWS) +gbFileTime gb_file_last_write_time(char const *filepath) { + ULARGE_INTEGER li = {0}; + FILETIME last_write_time = {0}; + WIN32_FILE_ATTRIBUTE_DATA data = {0}; + gbAllocator a = gb_heap_allocator(); + + wchar_t *w_text = gb__alloc_utf8_to_ucs2(a, filepath, NULL); + if (w_text == NULL) { + return 0; + } + + if (GetFileAttributesExW(w_text, GetFileExInfoStandard, &data)) { + last_write_time = data.ftLastWriteTime; + } + gb_free(a, w_text); + + li.LowPart = last_write_time.dwLowDateTime; + li.HighPart = last_write_time.dwHighDateTime; + return cast(gbFileTime)li.QuadPart; +} + + +gb_inline b32 gb_file_copy(char const *existing_filename, char const *new_filename, b32 fail_if_exists) { + wchar_t *w_old = NULL; + wchar_t *w_new = NULL; + gbAllocator a = gb_heap_allocator(); + b32 result = false; + + w_old = gb__alloc_utf8_to_ucs2(a, existing_filename, NULL); + if (w_old == NULL) { + return false; + } + w_new = gb__alloc_utf8_to_ucs2(a, new_filename, NULL); + if (w_new != NULL) { + result = CopyFileW(w_old, w_new, fail_if_exists); + } + gb_free(a, w_new); + gb_free(a, w_old); + return result; +} + +gb_inline b32 gb_file_move(char const *existing_filename, char const *new_filename) { + wchar_t *w_old = NULL; + wchar_t *w_new = NULL; + gbAllocator a = gb_heap_allocator(); + b32 result = false; + + w_old = gb__alloc_utf8_to_ucs2(a, existing_filename, NULL); + if (w_old == NULL) { + return false; + } + w_new = gb__alloc_utf8_to_ucs2(a, new_filename, NULL); + if (w_new != NULL) { + result = MoveFileW(w_old, w_new); + } + gb_free(a, w_new); + gb_free(a, w_old); + return result; +} + +b32 gb_file_remove(char const *filename) { + wchar_t *w_filename = NULL; + gbAllocator a = gb_heap_allocator(); + b32 result = false; + w_filename = gb__alloc_utf8_to_ucs2(a, filename, NULL); + if (w_filename == NULL) { + return false; + } + result = DeleteFileW(w_filename); + gb_free(a, w_filename); + return result; +} + + + +#else + +gbFileTime gb_file_last_write_time(char const *filepath) { + time_t result = 0; + struct stat file_stat; + + if (stat(filepath, &file_stat) == 0) { + result = file_stat.st_mtime; + } + + return cast(gbFileTime)result; +} + + +gb_inline b32 gb_file_copy(char const *existing_filename, char const *new_filename, b32 fail_if_exists) { +#if defined(GB_SYSTEM_OSX) + return copyfile(existing_filename, new_filename, NULL, COPYFILE_DATA) == 0; +#else + isize size; + int existing_fd = open(existing_filename, O_RDONLY, 0); + int new_fd = open(new_filename, O_WRONLY|O_CREAT, 0666); + + struct stat stat_existing; + fstat(existing_fd, &stat_existing); + + size = sendfile(new_fd, existing_fd, 0, stat_existing.st_size); + + close(new_fd); + close(existing_fd); + + return size == stat_existing.st_size; +#endif +} + +gb_inline b32 gb_file_move(char const *existing_filename, char const *new_filename) { + if (link(existing_filename, new_filename) == 0) { + return unlink(existing_filename) != -1; + } + return false; +} + +b32 gb_file_remove(char const *filename) { +#if defined(GB_SYSTEM_OSX) + return unlink(filename) != -1; +#else + return remove(filename) == 0; +#endif +} + + +#endif + + + + + +gbFileContents gb_file_read_contents(gbAllocator a, b32 zero_terminate, char const *filepath) { + gbFileContents result = {0}; + gbFile file = {0}; + + result.allocator = a; + + if (gb_file_open(&file, filepath) == gbFileError_None) { + isize file_size = cast(isize)gb_file_size(&file); + if (file_size > 0) { + result.data = gb_alloc(a, zero_terminate ? file_size+1 : file_size); + result.size = file_size; + gb_file_read_at(&file, result.data, result.size, 0); + if (zero_terminate) { + u8 *str = cast(u8 *)result.data; + str[file_size] = '\0'; + } + } + gb_file_close(&file); + } + + return result; +} + +void gb_file_free_contents(gbFileContents *fc) { + GB_ASSERT_NOT_NULL(fc->data); + gb_free(fc->allocator, fc->data); + fc->data = NULL; + fc->size = 0; +} + + + + + +gb_inline b32 gb_path_is_absolute(char const *path) { + b32 result = false; + GB_ASSERT_NOT_NULL(path); +#if defined(GB_SYSTEM_WINDOWS) + result == (gb_strlen(path) > 2) && + gb_char_is_alpha(path[0]) && + (path[1] == ':' && path[2] == GB_PATH_SEPARATOR); +#else + result = (gb_strlen(path) > 0 && path[0] == GB_PATH_SEPARATOR); +#endif + return result; +} + +gb_inline b32 gb_path_is_relative(char const *path) { return !gb_path_is_absolute(path); } + +gb_inline b32 gb_path_is_root(char const *path) { + b32 result = false; + GB_ASSERT_NOT_NULL(path); +#if defined(GB_SYSTEM_WINDOWS) + result = gb_path_is_absolute(path) && (gb_strlen(path) == 3); +#else + result = gb_path_is_absolute(path) && (gb_strlen(path) == 1); +#endif + return result; +} + +gb_inline char const *gb_path_base_name(char const *path) { + char const *ls; + GB_ASSERT_NOT_NULL(path); + ls = gb_char_last_occurence(path, '/'); + return (ls == NULL) ? path : ls+1; +} + +gb_inline char const *gb_path_extension(char const *path) { + char const *ld; + GB_ASSERT_NOT_NULL(path); + ld = gb_char_last_occurence(path, '.'); + return (ld == NULL) ? NULL : ld+1; +} + + +#if !defined(_WINDOWS_) && defined(GB_SYSTEM_WINDOWS) +GB_DLL_IMPORT DWORD WINAPI GetFullPathNameA(char const *lpFileName, DWORD nBufferLength, char *lpBuffer, char **lpFilePart); +GB_DLL_IMPORT DWORD WINAPI GetFullPathNameW(wchar_t const *lpFileName, DWORD nBufferLength, wchar_t *lpBuffer, wchar_t **lpFilePart); +#endif + +char *gb_path_get_full_name(gbAllocator a, char const *path) { +#if defined(GB_SYSTEM_WINDOWS) +// TODO(bill): Make UTF-8 + wchar_t *w_path = NULL; + wchar_t *w_fullpath = NULL; + isize w_len = 0; + isize new_len = 0; + isize new_len1 = 0; + char *new_path = 0; + w_path = gb__alloc_utf8_to_ucs2(gb_heap_allocator(), path, NULL); + if (w_path == NULL) { + return NULL; + } + w_len = GetFullPathNameW(w_path, 0, NULL, NULL); + if (w_len == 0) { + return NULL; + } + w_fullpath = gb_alloc_array(gb_heap_allocator(), wchar_t, w_len+1); + GetFullPathNameW(w_path, cast(int)w_len, w_fullpath, NULL); + w_fullpath[w_len] = 0; + gb_free(gb_heap_allocator(), w_path); + + new_len = WideCharToMultiByte(CP_UTF8, WC_ERR_INVALID_CHARS, w_fullpath, cast(int)w_len, NULL, 0, NULL, NULL); + if (new_len == 0) { + gb_free(gb_heap_allocator(), w_fullpath); + return NULL; + } + new_path = gb_alloc_array(a, char, new_len+1); + new_len1 = WideCharToMultiByte(CP_UTF8, WC_ERR_INVALID_CHARS, w_fullpath, cast(int)w_len, new_path, cast(int)new_len, NULL, NULL); + if (new_len1 == 0) { + gb_free(gb_heap_allocator(), w_fullpath); + gb_free(a, new_path); + return NULL; + } + new_path[new_len] = 0; + return new_path; +#else + char *p, *result, *fullpath = NULL; + isize len; + p = realpath(path, NULL); + fullpath = p; + if (p == NULL) { + // NOTE(bill): File does not exist + fullpath = cast(char *)path; + } + + len = gb_strlen(fullpath); + + result = gb_alloc_array(a, char, len + 1); + gb_memmove(result, fullpath, len); + result[len] = 0; + free(p); + + return result; +#endif +} + + + + + +//////////////////////////////////////////////////////////////// +// +// Printing +// +// + + +isize gb_printf(char const *fmt, ...) { + isize res; + va_list va; + va_start(va, fmt); + res = gb_printf_va(fmt, va); + va_end(va); + return res; +} + + +isize gb_printf_err(char const *fmt, ...) { + isize res; + va_list va; + va_start(va, fmt); + res = gb_printf_err_va(fmt, va); + va_end(va); + return res; +} + +isize gb_fprintf(struct gbFile *f, char const *fmt, ...) { + isize res; + va_list va; + va_start(va, fmt); + res = gb_fprintf_va(f, fmt, va); + va_end(va); + return res; +} + +char *gb_bprintf(char const *fmt, ...) { + va_list va; + char *str; + va_start(va, fmt); + str = gb_bprintf_va(fmt, va); + va_end(va); + return str; +} + +isize gb_snprintf(char *str, isize n, char const *fmt, ...) { + isize res; + va_list va; + va_start(va, fmt); + res = gb_snprintf_va(str, n, fmt, va); + va_end(va); + return res; +} + + + +gb_inline isize gb_printf_va(char const *fmt, va_list va) { + return gb_fprintf_va(gb_file_get_standard(gbFileStandard_Output), fmt, va); +} + +gb_inline isize gb_printf_err_va(char const *fmt, va_list va) { + return gb_fprintf_va(gb_file_get_standard(gbFileStandard_Error), fmt, va); +} + +gb_inline isize gb_fprintf_va(struct gbFile *f, char const *fmt, va_list va) { + gb_local_persist char buf[4096]; + isize len = gb_snprintf_va(buf, gb_size_of(buf), fmt, va); + gb_file_write(f, buf, len-1); // NOTE(bill): prevent extra whitespace + return len; +} + + +gb_inline char *gb_bprintf_va(char const *fmt, va_list va) { + gb_local_persist char buffer[4096]; + gb_snprintf_va(buffer, gb_size_of(buffer), fmt, va); + return buffer; +} + + +enum { + gbFmt_Minus = GB_BIT(0), + gbFmt_Plus = GB_BIT(1), + gbFmt_Alt = GB_BIT(2), + gbFmt_Space = GB_BIT(3), + gbFmt_Zero = GB_BIT(4), + + gbFmt_Char = GB_BIT(5), + gbFmt_Short = GB_BIT(6), + gbFmt_Int = GB_BIT(7), + gbFmt_Long = GB_BIT(8), + gbFmt_Llong = GB_BIT(9), + gbFmt_Size = GB_BIT(10), + gbFmt_Intptr = GB_BIT(11), + + gbFmt_Unsigned = GB_BIT(12), + gbFmt_Lower = GB_BIT(13), + gbFmt_Upper = GB_BIT(14), + + + gbFmt_Done = GB_BIT(30), + + gbFmt_Ints = gbFmt_Char|gbFmt_Short|gbFmt_Int|gbFmt_Long|gbFmt_Llong|gbFmt_Size|gbFmt_Intptr +}; + +typedef struct { + i32 base; + i32 flags; + i32 width; + i32 precision; +} gbprivFmtInfo; + + +gb_internal isize gb__print_string(char *text, isize max_len, gbprivFmtInfo *info, char const *str) { + // TODO(bill): Get precision and width to work correctly. How does it actually work?! + // TODO(bill): This looks very buggy indeed. + isize res = 0, len; + isize remaining = max_len; + + if (info && info->precision >= 0) { + len = gb_strnlen(str, info->precision); + } else { + len = gb_strlen(str); + } + + if (info && (info->width == 0 || info->flags & gbFmt_Minus)) { + if (info->precision > 0) { + len = info->precision < len ? info->precision : len; + } + + res += gb_strlcpy(text, str, len); + + if (info->width > res) { + isize padding = info->width - len; + char pad = (info->flags & gbFmt_Zero) ? '0' : ' '; + while (padding --> 0 && remaining --> 0) { + *text++ = pad, res++; + } + } + } else { + if (info && (info->width > res)) { + isize padding = info->width - len; + char pad = (info->flags & gbFmt_Zero) ? '0' : ' '; + while (padding --> 0 && remaining --> 0) { + *text++ = pad, res++; + } + } + + res += gb_strlcpy(text, str, len); + } + + + if (info) { + if (info->flags & gbFmt_Upper) { + gb_str_to_upper(text); + } else if (info->flags & gbFmt_Lower) { + gb_str_to_lower(text); + } + } + + return res; +} + +gb_internal isize gb__print_char(char *text, isize max_len, gbprivFmtInfo *info, char arg) { + char str[2] = ""; + str[0] = arg; + return gb__print_string(text, max_len, info, str); +} + + +gb_internal isize gb__print_i64(char *text, isize max_len, gbprivFmtInfo *info, i64 value) { + char num[130]; + gb_i64_to_str(value, num, info ? info->base : 10); + return gb__print_string(text, max_len, info, num); +} + +gb_internal isize gb__print_u64(char *text, isize max_len, gbprivFmtInfo *info, u64 value) { + char num[130]; + gb_u64_to_str(value, num, info ? info->base : 10); + return gb__print_string(text, max_len, info, num); +} + + +gb_internal isize gb__print_f64(char *text, isize max_len, gbprivFmtInfo *info, f64 arg) { + // TODO(bill): Handle exponent notation + isize width, len, remaining = max_len; + char *text_begin = text; + + if (arg) { + u64 value; + if (arg < 0) { + if (remaining > 1) { + *text = '-', remaining--; + } + text++; + arg = -arg; + } else if (info->flags & gbFmt_Minus) { + if (remaining > 1) { + *text = '+', remaining--; + } + text++; + } + + value = cast(u64)arg; + len = gb__print_u64(text, remaining, NULL, value); + text += len; + + if (len >= remaining) { + remaining = gb_min(remaining, 1); + } else { + remaining -= len; + } + arg -= value; + + if (info->precision < 0) { + info->precision = 6; + } + + if ((info->flags & gbFmt_Alt) || info->precision > 0) { + i64 mult = 10; + if (remaining > 1) { + *text = '.', remaining--; + } + text++; + while (info->precision-- > 0) { + value = cast(u64)(arg * mult); + len = gb__print_u64(text, remaining, NULL, value); + text += len; + if (len >= remaining) { + remaining = gb_min(remaining, 1); + } else { + remaining -= len; + } + arg -= cast(f64)value / mult; + mult *= 10; + } + } + } else { + if (remaining > 1) { + *text = '0', remaining--; + } + text++; + if (info->flags & gbFmt_Alt) { + if (remaining > 1) { + *text = '.', remaining--; + } + text++; + } + } + + width = info->width - (text - text_begin); + if (width > 0) { + char fill = (info->flags & gbFmt_Zero) ? '0' : ' '; + char *end = text+remaining-1; + len = (text - text_begin); + + for (len = (text - text_begin); len--; ) { + if ((text_begin+len+width) < end) { + *(text_begin+len+width) = *(text_begin+len); + } + } + + len = width; + text += len; + if (len >= remaining) { + remaining = gb_min(remaining, 1); + } else { + remaining -= len; + } + + while (len--) { + if (text_begin+len < end) { + text_begin[len] = fill; + } + } + } + + return (text - text_begin); +} + + + +gb_no_inline isize gb_snprintf_va(char *text, isize max_len, char const *fmt, va_list va) { + char const *text_begin = text; + isize remaining = max_len, res; + + while (*fmt) { + gbprivFmtInfo info = {0}; + isize len = 0; + info.precision = -1; + + while (*fmt && *fmt != '%' && remaining) { + *text++ = *fmt++; + } + + if (*fmt == '%') { + do { + switch (*++fmt) { + case '-': info.flags |= gbFmt_Minus; break; + case '+': info.flags |= gbFmt_Plus; break; + case '#': info.flags |= gbFmt_Alt; break; + case ' ': info.flags |= gbFmt_Space; break; + case '0': info.flags |= gbFmt_Zero; break; + default: info.flags |= gbFmt_Done; break; + } + } while (!(info.flags & gbFmt_Done)); + } + + // NOTE(bill): Optional Width + if (*fmt == '*') { + int width = va_arg(va, int); + if (width < 0) { + info.flags |= gbFmt_Minus; + info.width = -width; + } else { + info.width = width; + } + fmt++; + } else { + info.width = cast(i32)gb_str_to_i64(fmt, cast(char **)&fmt, 10); + } + + // NOTE(bill): Optional Precision + if (*fmt == '.') { + fmt++; + if (*fmt == '*') { + info.precision = va_arg(va, int); + fmt++; + } else { + info.precision = cast(i32)gb_str_to_i64(fmt, cast(char **)&fmt, 10); + } + info.flags &= ~gbFmt_Zero; + } + + + switch (*fmt++) { + case 'h': + if (*fmt == 'h') { // hh => char + info.flags |= gbFmt_Char; + fmt++; + } else { // h => short + info.flags |= gbFmt_Short; + } + break; + + case 'l': + if (*fmt == 'l') { // ll => long long + info.flags |= gbFmt_Llong; + fmt++; + } else { // l => long + info.flags |= gbFmt_Long; + } + break; + + break; + + case 'z': // NOTE(bill): usize + info.flags |= gbFmt_Unsigned; + // fallthrough + case 't': // NOTE(bill): isize + info.flags |= gbFmt_Size; + break; + + default: fmt--; break; + } + + + switch (*fmt) { + case 'u': + info.flags |= gbFmt_Unsigned; + // fallthrough + case 'd': + case 'i': + info.base = 10; + break; + + case 'o': + info.base = 8; + break; + + case 'x': + info.base = 16; + info.flags |= (gbFmt_Unsigned | gbFmt_Lower); + break; + + case 'X': + info.base = 16; + info.flags |= (gbFmt_Unsigned | gbFmt_Upper); + break; + + case 'f': + case 'F': + case 'g': + case 'G': + len = gb__print_f64(text, remaining, &info, va_arg(va, f64)); + break; + + case 'a': + case 'A': + // TODO(bill): + break; + + case 'c': + len = gb__print_char(text, remaining, &info, cast(char)va_arg(va, int)); + break; + + case 's': + len = gb__print_string(text, remaining, &info, va_arg(va, char *)); + break; + + case 'p': + info.base = 16; + info.flags |= (gbFmt_Lower|gbFmt_Unsigned|gbFmt_Alt|gbFmt_Intptr); + break; + + case '%': + len = gb__print_char(text, remaining, &info, '%'); + break; + + default: fmt--; break; + } + + fmt++; + + if (info.base != 0) { + if (info.flags & gbFmt_Unsigned) { + u64 value = 0; + switch (info.flags & gbFmt_Ints) { + case gbFmt_Char: value = cast(u64)cast(u8) va_arg(va, int); break; + case gbFmt_Short: value = cast(u64)cast(u16)va_arg(va, int); break; + case gbFmt_Long: value = cast(u64)va_arg(va, unsigned long); break; + case gbFmt_Llong: value = cast(u64)va_arg(va, unsigned long long); break; + case gbFmt_Size: value = cast(u64)va_arg(va, usize); break; + case gbFmt_Intptr: value = cast(u64)va_arg(va, uintptr); break; + default: value = cast(u64)va_arg(va, unsigned int); break; + } + + len = gb__print_u64(text, remaining, &info, value); + + } else { + i64 value = 0; + switch (info.flags & gbFmt_Ints) { + case gbFmt_Char: value = cast(i64)cast(i8) va_arg(va, int); break; + case gbFmt_Short: value = cast(i64)cast(i16)va_arg(va, int); break; + case gbFmt_Long: value = cast(i64)va_arg(va, long); break; + case gbFmt_Llong: value = cast(i64)va_arg(va, long long); break; + case gbFmt_Size: value = cast(i64)va_arg(va, usize); break; + case gbFmt_Intptr: value = cast(i64)va_arg(va, uintptr); break; + default: value = cast(i64)va_arg(va, int); break; + } + + len = gb__print_i64(text, remaining, &info, value); + } + } + + + text += len; + if (len >= remaining) { + remaining = gb_min(remaining, 1); + } else { + remaining -= len; + } + } + + *text++ = '\0'; + res = (text - text_begin); + return (res >= max_len || res < 0) ? -1 : res; +} + + +//////////////////////////////////////////////////////////////// +// +// DLL Handling +// +// + +#if defined(GB_SYSTEM_WINDOWS) + +gbDllHandle gb_dll_load(char const *filepath) { + return cast(gbDllHandle)LoadLibraryA(filepath); +} +gb_inline void gb_dll_unload (gbDllHandle dll) { FreeLibrary(cast(HMODULE)dll); } +gb_inline gbDllProc gb_dll_proc_address(gbDllHandle dll, char const *proc_name) { return cast(gbDllProc)GetProcAddress(cast(HMODULE)dll, proc_name); } + +#else // POSIX + +gbDllHandle gb_dll_load(char const *filepath) { + // TODO(bill): Should this be RTLD_LOCAL? + return cast(gbDllHandle)dlopen(filepath, RTLD_LAZY|RTLD_GLOBAL); +} + +gb_inline void gb_dll_unload (gbDllHandle dll) { dlclose(dll); } +gb_inline gbDllProc gb_dll_proc_address(gbDllHandle dll, char const *proc_name) { return cast(gbDllProc)dlsym(dll, proc_name); } + +#endif + + +//////////////////////////////////////////////////////////////// +// +// Time +// +// + +#if defined(GB_COMPILER_MSVC) && !defined(__clang__) + gb_inline u64 gb_rdtsc(void) { return __rdtsc(); } +#elif defined(__i386__) + gb_inline u64 gb_rdtsc(void) { + u64 x; + __asm__ volatile (".byte 0x0f, 0x31" : "=A" (x)); + return x; + } +#elif defined(__x86_64__) + gb_inline u64 gb_rdtsc(void) { + u32 hi, lo; + __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi)); + return (cast(u64)lo) | ((cast(u64)hi)<<32); + } +#elif defined(__powerpc__) + gb_inline u64 gb_rdtsc(void) { + u64 result = 0; + u32 upper, lower,tmp; + __asm__ volatile( + "0: \n" + "\tmftbu %0 \n" + "\tmftb %1 \n" + "\tmftbu %2 \n" + "\tcmpw %2,%0 \n" + "\tbne 0b \n" + : "=r"(upper),"=r"(lower),"=r"(tmp) + ); + result = upper; + result = result<<32; + result = result|lower; + + return result; + } +#endif + +#if defined(GB_SYSTEM_WINDOWS) + + gb_inline f64 gb_time_now(void) { + gb_local_persist LARGE_INTEGER win32_perf_count_freq = {0}; + f64 result; + LARGE_INTEGER counter; + if (!win32_perf_count_freq.QuadPart) { + QueryPerformanceFrequency(&win32_perf_count_freq); + GB_ASSERT(win32_perf_count_freq.QuadPart != 0); + } + + QueryPerformanceCounter(&counter); + + result = counter.QuadPart / cast(f64)(win32_perf_count_freq.QuadPart); + return result; + } + + gb_inline u64 gb_utc_time_now(void) { + FILETIME ft; + ULARGE_INTEGER li; + + GetSystemTimeAsFileTime(&ft); + li.LowPart = ft.dwLowDateTime; + li.HighPart = ft.dwHighDateTime; + + return li.QuadPart/10; + } + + gb_inline void gb_sleep_ms(u32 ms) { Sleep(ms); } + +#else + + gb_global f64 gb__timebase = 0.0; + gb_global u64 gb__timestart = 0; + + gb_inline f64 gb_time_now(void) { +#if defined(GB_SYSTEM_OSX) + f64 result; + + if (!gb__timestart) { + mach_timebase_info_data_t tb = {0}; + mach_timebase_info(&tb); + gb__timebase = tb.numer; + gb__timebase /= tb.denom; + gb__timestart = mach_absolute_time(); + } + + // NOTE(bill): mach_absolute_time() returns things in nanoseconds + result = 1.0e-9 * (mach_absolute_time() - gb__timestart) * gb__timebase; + return result; +#else + struct timespec t; + f64 result; + + // IMPORTANT TODO(bill): THIS IS A HACK + clock_gettime(1 /*CLOCK_MONOTONIC*/, &t); + result = t.tv_sec + 1.0e-9 * t.tv_nsec; + return result; +#endif + } + + gb_inline u64 gb_utc_time_now(void) { + struct timespec t; +#if defined(GB_SYSTEM_OSX) + clock_serv_t cclock; + mach_timespec_t mts; + host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); + clock_get_time(cclock, &mts); + mach_port_deallocate(mach_task_self(), cclock); + t.tv_sec = mts.tv_sec; + t.tv_nsec = mts.tv_nsec; +#else + // IMPORTANT TODO(bill): THIS IS A HACK + clock_gettime(0 /*CLOCK_REALTIME*/, &t); +#endif + return cast(u64)t.tv_sec * 1000000ull + t.tv_nsec/1000 + 11644473600000000ull; + } + + gb_inline void gb_sleep_ms(u32 ms) { + struct timespec req = {cast(time_t)ms/1000, cast(long)((ms%1000)*1000000)}; + struct timespec rem = {0, 0}; + nanosleep(&req, &rem); + } + +#endif + + + +//////////////////////////////////////////////////////////////// +// +// Miscellany +// +// + +gb_global gbAtomic32 gb__random_shared_counter = {0}; + +gb_internal u32 gb__get_noise_from_time(void) { + u32 accum = 0; + f64 start, remaining, end, curr = 0; + u64 interval = 100000ll; + + start = gb_time_now(); + remaining = (interval - cast(u64)(interval*start)%interval) / cast(f64)interval; + end = start + remaining; + + do { + curr = gb_time_now(); + accum += cast(u32)curr; + } while (curr >= end); + return accum; +} + +// NOTE(bill): Partly from http://preshing.com/20121224/how-to-generate-a-sequence-of-unique-random-integers/ +// But the generation is even more random-er-est + +gb_internal gb_inline u32 gb__permute_qpr(u32 x) { + gb_local_persist u32 const prime = 4294967291; // 2^32 - 5 + if (x >= prime) { + return x; + } else { + u32 residue = cast(u32)(cast(u64) x * x) % prime; + if (x <= prime / 2) { + return residue; + } else { + return prime - residue; + } + } +} + +gb_internal gb_inline u32 gb__permute_with_offset(u32 x, u32 offset) { + return (gb__permute_qpr(x) + offset) ^ 0x5bf03635; +} + + +void gb_random_init(gbRandom *r) { + u64 time, tick; + isize i, j; + u32 x = 0; + r->value = 0; + + r->offsets[0] = gb__get_noise_from_time(); + r->offsets[1] = gb_atomic32_fetch_add(&gb__random_shared_counter, 1); + r->offsets[2] = gb_thread_current_id(); + r->offsets[3] = gb_thread_current_id() * 3 + 1; + time = gb_utc_time_now(); + r->offsets[4] = cast(u32)(time >> 32); + r->offsets[5] = cast(u32)time; + r->offsets[6] = gb__get_noise_from_time(); + tick = gb_rdtsc(); + r->offsets[7] = cast(u32)(tick ^ (tick >> 32)); + + for (j = 0; j < 4; j++) { + for (i = 0; i < gb_count_of(r->offsets); i++) { + r->offsets[i] = x = gb__permute_with_offset(x, r->offsets[i]); + } + } +} + +u32 gb_random_gen_u32(gbRandom *r) { + u32 x = r->value; + u32 carry = 1; + isize i; + for (i = 0; i < gb_count_of(r->offsets); i++) { + x = gb__permute_with_offset(x, r->offsets[i]); + if (carry > 0) { + carry = ++r->offsets[i] ? 0 : 1; + } + } + + r->value = x; + return x; +} + +u32 gb_random_gen_u32_unique(gbRandom *r) { + u32 x = r->value; + isize i; + r->value++; + for (i = 0; i < gb_count_of(r->offsets); i++) { + x = gb__permute_with_offset(x, r->offsets[i]); + } + + return x; +} + +u64 gb_random_gen_u64(gbRandom *r) { + return ((cast(u64)gb_random_gen_u32(r)) << 32) | gb_random_gen_u32(r); +} + + +isize gb_random_gen_isize(gbRandom *r) { + u64 u = gb_random_gen_u64(r); + return *cast(isize *)&u; +} + + + + +i64 gb_random_range_i64(gbRandom *r, i64 lower_inc, i64 higher_inc) { + u64 u = gb_random_gen_u64(r); + i64 i = *cast(i64 *)&u; + i64 diff = higher_inc-lower_inc+1; + i %= diff; + i += lower_inc; + return i; +} + +isize gb_random_range_isize(gbRandom *r, isize lower_inc, isize higher_inc) { + u64 u = gb_random_gen_u64(r); + isize i = *cast(isize *)&u; + isize diff = higher_inc-lower_inc+1; + i %= diff; + i += lower_inc; + return i; +} + +// NOTE(bill): Semi-cc'ed from gb_math to remove need for fmod and math.h +f64 gb__copy_sign64(f64 x, f64 y) { + i64 ix, iy; + ix = *(i64 *)&x; + iy = *(i64 *)&y; + + ix &= 0x7fffffffffffffff; + ix |= iy & 0x8000000000000000; + return *cast(f64 *)&ix; +} + +f64 gb__floor64 (f64 x) { return cast(f64)((x >= 0.0) ? cast(i64)x : cast(i64)(x-0.9999999999999999)); } +f64 gb__ceil64 (f64 x) { return cast(f64)((x < 0) ? cast(i64)x : (cast(i64)x)+1); } +f64 gb__round64 (f64 x) { return cast(f64)((x >= 0.0) ? gb__floor64(x + 0.5) : gb__ceil64(x - 0.5)); } +f64 gb__remainder64(f64 x, f64 y) { return x - (gb__round64(x/y)*y); } +f64 gb__abs64 (f64 x) { return x < 0 ? -x : x; } +f64 gb__sign64 (f64 x) { return x < 0 ? -1.0 : +1.0; } + +f64 gb__mod64(f64 x, f64 y) { + f64 result; + y = gb__abs64(y); + result = gb__remainder64(gb__abs64(x), y); + if (gb__sign64(result)) result += y; + return gb__copy_sign64(result, x); +} + + +f64 gb_random_range_f64(gbRandom *r, f64 lower_inc, f64 higher_inc) { + u64 u = gb_random_gen_u64(r); + f64 f = *cast(f64 *)&u; + f64 diff = higher_inc-lower_inc+1.0; + f = gb__mod64(f, diff); + f += lower_inc; + return f; +} + + + +#if defined(GB_SYSTEM_WINDOWS) +gb_inline void gb_exit(u32 code) { ExitProcess(code); } +#else +gb_inline void gb_exit(u32 code) { exit(code); } +#endif + +gb_inline void gb_yield(void) { +#if defined(GB_SYSTEM_WINDOWS) + Sleep(0); +#else + sched_yield(); +#endif +} + +gb_inline void gb_set_env(char const *name, char const *value) { +#if defined(GB_SYSTEM_WINDOWS) + // TODO(bill): Should this be a Wide version? + SetEnvironmentVariableA(name, value); +#else + setenv(name, value, 1); +#endif +} + +gb_inline void gb_unset_env(char const *name) { +#if defined(GB_SYSTEM_WINDOWS) + // TODO(bill): Should this be a Wide version? + SetEnvironmentVariableA(name, NULL); +#else + unsetenv(name); +#endif +} + + +gb_inline u16 gb_endian_swap16(u16 i) { + return (i>>8) | (i<<8); +} + +gb_inline u32 gb_endian_swap32(u32 i) { + return (i>>24) |(i<<24) | + ((i&0x00ff0000u)>>8) | ((i&0x0000ff00u)<<8); +} + +gb_inline u64 gb_endian_swap64(u64 i) { + return (i>>56) | (i<<56) | + ((i&0x00ff000000000000ull)>>40) | ((i&0x000000000000ff00ull)<<40) | + ((i&0x0000ff0000000000ull)>>24) | ((i&0x0000000000ff0000ull)<<24) | + ((i&0x000000ff00000000ull)>>8) | ((i&0x00000000ff000000ull)<<8); +} + + +gb_inline isize gb_count_set_bits(u64 mask) { + isize count = 0; + while (mask) { + count += (mask & 1); + mask >>= 1; + } + return count; +} + + + + + + +//////////////////////////////////////////////////////////////// +// +// Platform +// +// + +#if defined(GB_PLATFORM) + +gb_inline void gb_key_state_update(gbKeyState *s, b32 is_down) { + b32 was_down = (*s & gbKeyState_Down) != 0; + is_down = is_down != 0; // NOTE(bill): Make sure it's a boolean + GB_MASK_SET(*s, is_down, gbKeyState_Down); + GB_MASK_SET(*s, !was_down && is_down, gbKeyState_Pressed); + GB_MASK_SET(*s, was_down && !is_down, gbKeyState_Released); +} + +#if defined(GB_SYSTEM_WINDOWS) + +#ifndef ERROR_DEVICE_NOT_CONNECTED +#define ERROR_DEVICE_NOT_CONNECTED 1167 +#endif + +GB_XINPUT_GET_STATE(gbXInputGetState_Stub) { + gb_unused(dwUserIndex); gb_unused(pState); + return ERROR_DEVICE_NOT_CONNECTED; +} +GB_XINPUT_SET_STATE(gbXInputSetState_Stub) { + gb_unused(dwUserIndex); gb_unused(pVibration); + return ERROR_DEVICE_NOT_CONNECTED; +} + + +gb_internal gb_inline f32 gb__process_xinput_stick_value(i16 value, i16 dead_zone_threshold) { + f32 result = 0; + + if (value < -dead_zone_threshold) { + result = cast(f32) (value + dead_zone_threshold) / (32768.0f - dead_zone_threshold); + } else if (value > dead_zone_threshold) { + result = cast(f32) (value - dead_zone_threshold) / (32767.0f - dead_zone_threshold); + } + + return result; +} + +gb_internal void gb__platform_resize_dib_section(gbPlatform *p, i32 width, i32 height) { + if ((p->renderer_type == gbRenderer_Software) && + !(p->window_width == width && p->window_height == height)) { + BITMAPINFO bmi = {0}; + + if (width == 0 || height == 0) { + return; + } + + p->window_width = width; + p->window_height = height; + + // TODO(bill): Is this slow to get the desktop mode everytime? + p->sw_framebuffer.bits_per_pixel = gb_video_mode_get_desktop().bits_per_pixel; + p->sw_framebuffer.pitch = (p->sw_framebuffer.bits_per_pixel * width / 8); + + bmi.bmiHeader.biSize = gb_size_of(bmi.bmiHeader); + bmi.bmiHeader.biWidth = width; + bmi.bmiHeader.biHeight = height; // NOTE(bill): -ve is top-down, +ve is bottom-up + bmi.bmiHeader.biPlanes = 1; + bmi.bmiHeader.biBitCount = cast(u16)p->sw_framebuffer.bits_per_pixel; + bmi.bmiHeader.biCompression = 0 /*BI_RGB*/; + + p->sw_framebuffer.win32_bmi = bmi; + + + if (p->sw_framebuffer.memory) { + gb_vm_free(gb_virtual_memory(p->sw_framebuffer.memory, p->sw_framebuffer.memory_size)); + } + + { + isize memory_size = p->sw_framebuffer.pitch * height; + gbVirtualMemory vm = gb_vm_alloc(0, memory_size); + p->sw_framebuffer.memory = vm.data; + p->sw_framebuffer.memory_size = vm.size; + } + } +} + + +gb_internal gbKeyType gb__win32_from_vk(unsigned int key) { + // NOTE(bill): Letters and numbers are defined the same for VK_* and GB_* + if (key >= 'A' && key < 'Z') return cast(gbKeyType)key; + if (key >= '0' && key < '9') return cast(gbKeyType)key; + switch (key) { + case VK_ESCAPE: return gbKey_Escape; + + case VK_LCONTROL: return gbKey_Lcontrol; + case VK_LSHIFT: return gbKey_Lshift; + case VK_LMENU: return gbKey_Lalt; + case VK_LWIN: return gbKey_Lsystem; + case VK_RCONTROL: return gbKey_Rcontrol; + case VK_RSHIFT: return gbKey_Rshift; + case VK_RMENU: return gbKey_Ralt; + case VK_RWIN: return gbKey_Rsystem; + case VK_MENU: return gbKey_Menu; + + case VK_OEM_4: return gbKey_Lbracket; + case VK_OEM_6: return gbKey_Rbracket; + case VK_OEM_1: return gbKey_Semicolon; + case VK_OEM_COMMA: return gbKey_Comma; + case VK_OEM_PERIOD: return gbKey_Period; + case VK_OEM_7: return gbKey_Quote; + case VK_OEM_2: return gbKey_Slash; + case VK_OEM_5: return gbKey_Backslash; + case VK_OEM_3: return gbKey_Grave; + case VK_OEM_PLUS: return gbKey_Equals; + case VK_OEM_MINUS: return gbKey_Minus; + + case VK_SPACE: return gbKey_Space; + case VK_RETURN: return gbKey_Return; + case VK_BACK: return gbKey_Backspace; + case VK_TAB: return gbKey_Tab; + + case VK_PRIOR: return gbKey_Pageup; + case VK_NEXT: return gbKey_Pagedown; + case VK_END: return gbKey_End; + case VK_HOME: return gbKey_Home; + case VK_INSERT: return gbKey_Insert; + case VK_DELETE: return gbKey_Delete; + + case VK_ADD: return gbKey_Plus; + case VK_SUBTRACT: return gbKey_Subtract; + case VK_MULTIPLY: return gbKey_Multiply; + case VK_DIVIDE: return gbKey_Divide; + + case VK_LEFT: return gbKey_Left; + case VK_RIGHT: return gbKey_Right; + case VK_UP: return gbKey_Up; + case VK_DOWN: return gbKey_Down; + + case VK_NUMPAD0: return gbKey_Numpad0; + case VK_NUMPAD1: return gbKey_Numpad1; + case VK_NUMPAD2: return gbKey_Numpad2; + case VK_NUMPAD3: return gbKey_Numpad3; + case VK_NUMPAD4: return gbKey_Numpad4; + case VK_NUMPAD5: return gbKey_Numpad5; + case VK_NUMPAD6: return gbKey_Numpad6; + case VK_NUMPAD7: return gbKey_Numpad7; + case VK_NUMPAD8: return gbKey_Numpad8; + case VK_NUMPAD9: return gbKey_Numpad9; + case VK_SEPARATOR: return gbKey_NumpadEnter; + case VK_DECIMAL: return gbKey_NumpadDot; + + case VK_F1: return gbKey_F1; + case VK_F2: return gbKey_F2; + case VK_F3: return gbKey_F3; + case VK_F4: return gbKey_F4; + case VK_F5: return gbKey_F5; + case VK_F6: return gbKey_F6; + case VK_F7: return gbKey_F7; + case VK_F8: return gbKey_F8; + case VK_F9: return gbKey_F9; + case VK_F10: return gbKey_F10; + case VK_F11: return gbKey_F11; + case VK_F12: return gbKey_F12; + case VK_F13: return gbKey_F13; + case VK_F14: return gbKey_F14; + case VK_F15: return gbKey_F15; + + case VK_PAUSE: return gbKey_Pause; + } + return gbKey_Unknown; +} +LRESULT CALLBACK gb__win32_window_callback(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam) { + // NOTE(bill): Silly callbacks + gbPlatform *platform = cast(gbPlatform *)GetWindowLongPtrW(hWnd, GWLP_USERDATA); + b32 window_has_focus = (platform != NULL) && platform->window_has_focus; + + if (msg == WM_CREATE) { // NOTE(bill): Doesn't need the platform + // NOTE(bill): https://msdn.microsoft.com/en-us/library/windows/desktop/ms645536(v=vs.85).aspx + RAWINPUTDEVICE rid[2] = {0}; + + // NOTE(bill): Keyboard + rid[0].usUsagePage = 0x01; + rid[0].usUsage = 0x06; + rid[0].dwFlags = 0x00000030/*RIDEV_NOLEGACY*/; // NOTE(bill): Do not generate legacy messages such as WM_KEYDOWN + rid[0].hwndTarget = hWnd; + + // NOTE(bill): Mouse + rid[1].usUsagePage = 0x01; + rid[1].usUsage = 0x02; + rid[1].dwFlags = 0; // NOTE(bill): adds HID mouse and also allows legacy mouse messages to allow for window movement etc. + rid[1].hwndTarget = hWnd; + + if (RegisterRawInputDevices(rid, gb_count_of(rid), gb_size_of(rid[0])) == false) { + DWORD err = GetLastError(); + GB_PANIC("Failed to initialize raw input device for win32." + "Err: %u", err); + } + } + + if (!platform) { + return DefWindowProcW(hWnd, msg, wParam, lParam); + } + + switch (msg) { + case WM_CLOSE: + case WM_DESTROY: + platform->window_is_closed = true; + return 0; + + case WM_QUIT: { + platform->quit_requested = true; + } break; + + case WM_UNICHAR: { + if (window_has_focus) { + if (wParam == '\r') { + wParam = '\n'; + } + // TODO(bill): Does this need to be thread-safe? + platform->char_buffer[platform->char_buffer_count++] = cast(Rune)wParam; + } + } break; + + + case WM_INPUT: { + RAWINPUT raw = {0}; + unsigned int size = gb_size_of(RAWINPUT); + + if (!GetRawInputData(cast(HRAWINPUT)lParam, RID_INPUT, &raw, &size, gb_size_of(RAWINPUTHEADER))) { + return 0; + } + switch (raw.header.dwType) { + case RIM_TYPEKEYBOARD: { + // NOTE(bill): Many thanks to https://blog.molecular-matters.com/2011/09/05/properly-handling-keyboard-input/ + // for the + RAWKEYBOARD *raw_kb = &raw.data.keyboard; + unsigned int vk = raw_kb->VKey; + unsigned int scan_code = raw_kb->MakeCode; + unsigned int flags = raw_kb->Flags; + // NOTE(bill): e0 and e1 are escape sequences used for certain special keys, such as PRINT and PAUSE/BREAK. + // NOTE(bill): http://www.win.tue.nl/~aeb/linux/kbd/scancodes-1.html + b32 is_e0 = (flags & RI_KEY_E0) != 0; + b32 is_e1 = (flags & RI_KEY_E1) != 0; + b32 is_up = (flags & RI_KEY_BREAK) != 0; + b32 is_down = !is_up; + + // TODO(bill): Should I handle scan codes? + + if (vk == 255) { + // NOTE(bill): Discard "fake keys" + return 0; + } else if (vk == VK_SHIFT) { + // NOTE(bill): Correct left/right shift + vk = MapVirtualKeyW(scan_code, MAPVK_VSC_TO_VK_EX); + } else if (vk == VK_NUMLOCK) { + // NOTE(bill): Correct PAUSE/BREAK and NUM LOCK and set the extended bit + scan_code = MapVirtualKeyW(vk, MAPVK_VK_TO_VSC) | 0x100; + } + + if (is_e1) { + // NOTE(bill): Escaped sequences, turn vk into the correct scan code + // except for VK_PAUSE (it's a bug) + if (vk == VK_PAUSE) { + scan_code = 0x45; + } else { + scan_code = MapVirtualKeyW(vk, MAPVK_VK_TO_VSC); + } + } + + switch (vk) { + case VK_CONTROL: vk = (is_e0) ? VK_RCONTROL : VK_LCONTROL; break; + case VK_MENU: vk = (is_e0) ? VK_RMENU : VK_LMENU; break; + + case VK_RETURN: if (is_e0) vk = VK_SEPARATOR; break; // NOTE(bill): Numpad return + case VK_DELETE: if (!is_e0) vk = VK_DECIMAL; break; // NOTE(bill): Numpad dot + case VK_INSERT: if (!is_e0) vk = VK_NUMPAD0; break; + case VK_HOME: if (!is_e0) vk = VK_NUMPAD7; break; + case VK_END: if (!is_e0) vk = VK_NUMPAD1; break; + case VK_PRIOR: if (!is_e0) vk = VK_NUMPAD9; break; + case VK_NEXT: if (!is_e0) vk = VK_NUMPAD3; break; + + // NOTE(bill): The standard arrow keys will always have their e0 bit set, but the + // corresponding keys on the NUMPAD will not. + case VK_LEFT: if (!is_e0) vk = VK_NUMPAD4; break; + case VK_RIGHT: if (!is_e0) vk = VK_NUMPAD6; break; + case VK_UP: if (!is_e0) vk = VK_NUMPAD8; break; + case VK_DOWN: if (!is_e0) vk = VK_NUMPAD2; break; + + // NUMPAD 5 doesn't have its e0 bit set + case VK_CLEAR: if (!is_e0) vk = VK_NUMPAD5; break; + } + + // NOTE(bill): Set appropriate key state flags + gb_key_state_update(&platform->keys[gb__win32_from_vk(vk)], is_down); + + } break; + case RIM_TYPEMOUSE: { + RAWMOUSE *raw_mouse = &raw.data.mouse; + u16 flags = raw_mouse->usButtonFlags; + long dx = +raw_mouse->lLastX; + long dy = -raw_mouse->lLastY; + + if (flags & RI_MOUSE_WHEEL) { + platform->mouse_wheel_delta = cast(i16)raw_mouse->usButtonData; + } + + platform->mouse_raw_dx = dx; + platform->mouse_raw_dy = dy; + } break; + } + } break; + + default: break; + } + + return DefWindowProcW(hWnd, msg, wParam, lParam); +} + + +typedef void *wglCreateContextAttribsARB_Proc(void *hDC, void *hshareContext, int const *attribList); + + +b32 gb__platform_init(gbPlatform *p, char const *window_title, gbVideoMode mode, gbRendererType type, u32 window_flags) { + WNDCLASSEXW wc = {gb_size_of(WNDCLASSEXW)}; + DWORD ex_style = 0, style = 0; + RECT wr; + u16 title_buffer[256] = {0}; // TODO(bill): gb_local_persist this? + + wc.style = CS_HREDRAW | CS_VREDRAW; // | CS_OWNDC + wc.lpfnWndProc = gb__win32_window_callback; + wc.hbrBackground = cast(HBRUSH)GetStockObject(0/*WHITE_BRUSH*/); + wc.lpszMenuName = NULL; + wc.lpszClassName = L"gb-win32-wndclass"; // TODO(bill): Is this enough? + wc.hInstance = GetModuleHandleW(NULL); + + if (RegisterClassExW(&wc) == 0) { + MessageBoxW(NULL, L"Failed to register the window class", L"ERROR", MB_OK | MB_ICONEXCLAMATION); + return false; + } + + if ((window_flags & gbWindow_Fullscreen) && !(window_flags & gbWindow_Borderless)) { + DEVMODEW screen_settings = {gb_size_of(DEVMODEW)}; + screen_settings.dmPelsWidth = mode.width; + screen_settings.dmPelsHeight = mode.height; + screen_settings.dmBitsPerPel = mode.bits_per_pixel; + screen_settings.dmFields = DM_BITSPERPEL|DM_PELSWIDTH|DM_PELSHEIGHT; + + if (ChangeDisplaySettingsW(&screen_settings, CDS_FULLSCREEN) != DISP_CHANGE_SUCCESSFUL) { + if (MessageBoxW(NULL, L"The requested fullscreen mode is not supported by\n" + L"your video card. Use windowed mode instead?", + L"", + MB_YESNO|MB_ICONEXCLAMATION) == IDYES) { + window_flags &= ~gbWindow_Fullscreen; + } else { + mode = gb_video_mode_get_desktop(); + screen_settings.dmPelsWidth = mode.width; + screen_settings.dmPelsHeight = mode.height; + screen_settings.dmBitsPerPel = mode.bits_per_pixel; + ChangeDisplaySettingsW(&screen_settings, CDS_FULLSCREEN); + } + } + } + + + // ex_style = WS_EX_APPWINDOW | WS_EX_WINDOWEDGE; + // style = WS_CLIPSIBLINGS | WS_CLIPCHILDREN | WS_VISIBLE | WS_THICKFRAME | WS_SYSMENU | WS_MAXIMIZEBOX | WS_MINIMIZEBOX; + + style |= WS_VISIBLE; + + if (window_flags & gbWindow_Hidden) style &= ~WS_VISIBLE; + if (window_flags & gbWindow_Resizable) style |= WS_THICKFRAME | WS_MAXIMIZEBOX; + if (window_flags & gbWindow_Maximized) style |= WS_MAXIMIZE; + if (window_flags & gbWindow_Minimized) style |= WS_MINIMIZE; + + // NOTE(bill): Completely ignore the given mode and just change it + if (window_flags & gbWindow_FullscreenDesktop) { + mode = gb_video_mode_get_desktop(); + } + + if ((window_flags & gbWindow_Fullscreen) || (window_flags & gbWindow_Borderless)) { + style |= WS_POPUP; + } else { + style |= WS_OVERLAPPED | WS_CAPTION | WS_SYSMENU | WS_MINIMIZEBOX; + } + + + wr.left = 0; + wr.top = 0; + wr.right = mode.width; + wr.bottom = mode.height; + AdjustWindowRect(&wr, style, false); + + p->window_flags = window_flags; + p->window_handle = CreateWindowExW(ex_style, + wc.lpszClassName, + cast(wchar_t const *)gb_utf8_to_ucs2(title_buffer, gb_size_of(title_buffer), window_title), + style, + CW_USEDEFAULT, CW_USEDEFAULT, + wr.right - wr.left, wr.bottom - wr.top, + 0, 0, + GetModuleHandleW(NULL), + NULL); + + if (!p->window_handle) { + MessageBoxW(NULL, L"Window creation failed", L"Error", MB_OK|MB_ICONEXCLAMATION); + return false; + } + + p->win32_dc = GetDC(cast(HWND)p->window_handle); + + p->renderer_type = type; + switch (p->renderer_type) { + case gbRenderer_Opengl: { + wglCreateContextAttribsARB_Proc *wglCreateContextAttribsARB; + i32 attribs[8] = {0}; + isize c = 0; + + PIXELFORMATDESCRIPTOR pfd = {gb_size_of(PIXELFORMATDESCRIPTOR)}; + pfd.nVersion = 1; + pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER; + pfd.iPixelType = PFD_TYPE_RGBA; + pfd.cColorBits = 32; + pfd.cAlphaBits = 8; + pfd.cDepthBits = 24; + pfd.cStencilBits = 8; + pfd.iLayerType = PFD_MAIN_PLANE; + + SetPixelFormat(cast(HDC)p->win32_dc, ChoosePixelFormat(cast(HDC)p->win32_dc, &pfd), NULL); + p->opengl.context = cast(void *)wglCreateContext(cast(HDC)p->win32_dc); + wglMakeCurrent(cast(HDC)p->win32_dc, cast(HGLRC)p->opengl.context); + + if (p->opengl.major > 0) { + attribs[c++] = 0x2091; // WGL_CONTEXT_MAJOR_VERSION_ARB + attribs[c++] = gb_max(p->opengl.major, 1); + } + if (p->opengl.major > 0 && p->opengl.minor >= 0) { + attribs[c++] = 0x2092; // WGL_CONTEXT_MINOR_VERSION_ARB + attribs[c++] = gb_max(p->opengl.minor, 0); + } + + if (p->opengl.core) { + attribs[c++] = 0x9126; // WGL_CONTEXT_PROFILE_MASK_ARB + attribs[c++] = 0x0001; // WGL_CONTEXT_CORE_PROFILE_BIT_ARB + } else if (p->opengl.compatible) { + attribs[c++] = 0x9126; // WGL_CONTEXT_PROFILE_MASK_ARB + attribs[c++] = 0x0002; // WGL_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB + } + attribs[c++] = 0; // NOTE(bill): tells the proc that this is the end of attribs + + wglCreateContextAttribsARB = cast(wglCreateContextAttribsARB_Proc *)wglGetProcAddress("wglCreateContextAttribsARB"); + if (wglCreateContextAttribsARB) { + HGLRC rc = cast(HGLRC)wglCreateContextAttribsARB(p->win32_dc, 0, attribs); + if (rc && wglMakeCurrent(cast(HDC)p->win32_dc, rc)) { + p->opengl.context = rc; + } else { + // TODO(bill): Handle errors from GetLastError + // ERROR_INVALID_VERSION_ARB 0x2095 + // ERROR_INVALID_PROFILE_ARB 0x2096 + } + } + + } break; + + case gbRenderer_Software: + gb__platform_resize_dib_section(p, mode.width, mode.height); + break; + + default: + GB_PANIC("Unknown window type"); + break; + } + + SetForegroundWindow(cast(HWND)p->window_handle); + SetFocus(cast(HWND)p->window_handle); + SetWindowLongPtrW(cast(HWND)p->window_handle, GWLP_USERDATA, cast(LONG_PTR)p); + + p->window_width = mode.width; + p->window_height = mode.height; + + if (p->renderer_type == gbRenderer_Opengl) { + p->opengl.dll_handle = gb_dll_load("opengl32.dll"); + } + + { // Load XInput + // TODO(bill): What other dlls should I look for? + gbDllHandle xinput_library = gb_dll_load("xinput1_4.dll"); + p->xinput.get_state = gbXInputGetState_Stub; + p->xinput.set_state = gbXInputSetState_Stub; + + if (!xinput_library) xinput_library = gb_dll_load("xinput9_1_0.dll"); + if (!xinput_library) xinput_library = gb_dll_load("xinput1_3.dll"); + if (!xinput_library) { + // TODO(bill): Proper Diagnostic + gb_printf_err("XInput could not be loaded. Controllers will not work!\n"); + } else { + p->xinput.get_state = cast(gbXInputGetStateProc *)gb_dll_proc_address(xinput_library, "XInputGetState"); + p->xinput.set_state = cast(gbXInputSetStateProc *)gb_dll_proc_address(xinput_library, "XInputSetState"); + } + } + + // Init keys + gb_zero_array(p->keys, gb_count_of(p->keys)); + + p->is_initialized = true; + return true; +} + +gb_inline b32 gb_platform_init_with_software(gbPlatform *p, char const *window_title, + i32 width, i32 height, u32 window_flags) { + gbVideoMode mode; + mode.width = width; + mode.height = height; + mode.bits_per_pixel = 32; + return gb__platform_init(p, window_title, mode, gbRenderer_Software, window_flags); +} + +gb_inline b32 gb_platform_init_with_opengl(gbPlatform *p, char const *window_title, + i32 width, i32 height, u32 window_flags, i32 major, i32 minor, b32 core, b32 compatible) { + gbVideoMode mode; + mode.width = width; + mode.height = height; + mode.bits_per_pixel = 32; + p->opengl.major = major; + p->opengl.minor = minor; + p->opengl.core = cast(b16)core; + p->opengl.compatible = cast(b16)compatible; + return gb__platform_init(p, window_title, mode, gbRenderer_Opengl, window_flags); +} + +#ifndef _XINPUT_H_ +typedef struct _XINPUT_GAMEPAD { + u16 wButtons; + u8 bLeftTrigger; + u8 bRightTrigger; + u16 sThumbLX; + u16 sThumbLY; + u16 sThumbRX; + u16 sThumbRY; +} XINPUT_GAMEPAD; + +typedef struct _XINPUT_STATE { + DWORD dwPacketNumber; + XINPUT_GAMEPAD Gamepad; +} XINPUT_STATE; + +typedef struct _XINPUT_VIBRATION { + u16 wLeftMotorSpeed; + u16 wRightMotorSpeed; +} XINPUT_VIBRATION; + +#define XINPUT_GAMEPAD_DPAD_UP 0x00000001 +#define XINPUT_GAMEPAD_DPAD_DOWN 0x00000002 +#define XINPUT_GAMEPAD_DPAD_LEFT 0x00000004 +#define XINPUT_GAMEPAD_DPAD_RIGHT 0x00000008 +#define XINPUT_GAMEPAD_START 0x00000010 +#define XINPUT_GAMEPAD_BACK 0x00000020 +#define XINPUT_GAMEPAD_LEFT_THUMB 0x00000040 +#define XINPUT_GAMEPAD_RIGHT_THUMB 0x00000080 +#define XINPUT_GAMEPAD_LEFT_SHOULDER 0x0100 +#define XINPUT_GAMEPAD_RIGHT_SHOULDER 0x0200 +#define XINPUT_GAMEPAD_A 0x1000 +#define XINPUT_GAMEPAD_B 0x2000 +#define XINPUT_GAMEPAD_X 0x4000 +#define XINPUT_GAMEPAD_Y 0x8000 +#define XINPUT_GAMEPAD_LEFT_THUMB_DEADZONE 7849 +#define XINPUT_GAMEPAD_RIGHT_THUMB_DEADZONE 8689 +#define XINPUT_GAMEPAD_TRIGGER_THRESHOLD 30 +#endif + +#ifndef XUSER_MAX_COUNT +#define XUSER_MAX_COUNT 4 +#endif + +void gb_platform_update(gbPlatform *p) { + isize i; + + { // NOTE(bill): Set window state + // TODO(bill): Should this be moved to gb__win32_window_callback ? + RECT window_rect; + i32 x, y, w, h; + + GetClientRect(cast(HWND)p->window_handle, &window_rect); + x = window_rect.left; + y = window_rect.top; + w = window_rect.right - window_rect.left; + h = window_rect.bottom - window_rect.top; + + if ((p->window_width != w) || (p->window_height != h)) { + if (p->renderer_type == gbRenderer_Software) { + gb__platform_resize_dib_section(p, w, h); + } + } + + + p->window_x = x; + p->window_y = y; + p->window_width = w; + p->window_height = h; + GB_MASK_SET(p->window_flags, IsIconic(cast(HWND)p->window_handle) != 0, gbWindow_Minimized); + + p->window_has_focus = GetFocus() == cast(HWND)p->window_handle; + } + + { // NOTE(bill): Set mouse position + POINT mouse_pos; + DWORD win_button_id[gbMouseButton_Count] = { + VK_LBUTTON, + VK_MBUTTON, + VK_RBUTTON, + VK_XBUTTON1, + VK_XBUTTON2, + }; + + // NOTE(bill): This needs to be GetAsyncKeyState as RAWMOUSE doesn't aways work for some odd reason + // TODO(bill): Try and get RAWMOUSE to work for key presses + for (i = 0; i < gbMouseButton_Count; i++) { + gb_key_state_update(p->mouse_buttons+i, GetAsyncKeyState(win_button_id[i]) < 0); + } + + GetCursorPos(&mouse_pos); + ScreenToClient(cast(HWND)p->window_handle, &mouse_pos); + { + i32 x = mouse_pos.x; + i32 y = p->window_height-1 - mouse_pos.y; + p->mouse_dx = x - p->mouse_x; + p->mouse_dy = y - p->mouse_y; + p->mouse_x = x; + p->mouse_y = y; + } + + if (p->mouse_clip) { + b32 update = false; + i32 x = p->mouse_x; + i32 y = p->mouse_y; + if (p->mouse_x < 0) { + x = 0; + update = true; + } else if (p->mouse_y > p->window_height-1) { + y = p->window_height-1; + update = true; + } + + if (p->mouse_y < 0) { + y = 0; + update = true; + } else if (p->mouse_x > p->window_width-1) { + x = p->window_width-1; + update = true; + } + + if (update) { + gb_platform_set_mouse_position(p, x, y); + } + } + + + } + + + // NOTE(bill): Set Key/Button states + if (p->window_has_focus) { + p->char_buffer_count = 0; // TODO(bill): Reset buffer count here or else where? + + // NOTE(bill): Need to update as the keys only get updates on events + for (i = 0; i < gbKey_Count; i++) { + b32 is_down = (p->keys[i] & gbKeyState_Down) != 0; + gb_key_state_update(&p->keys[i], is_down); + } + + p->key_modifiers.control = p->keys[gbKey_Lcontrol] | p->keys[gbKey_Rcontrol]; + p->key_modifiers.alt = p->keys[gbKey_Lalt] | p->keys[gbKey_Ralt]; + p->key_modifiers.shift = p->keys[gbKey_Lshift] | p->keys[gbKey_Rshift]; + + } + + { // NOTE(bill): Set Controller states + isize max_controller_count = XUSER_MAX_COUNT; + if (max_controller_count > gb_count_of(p->game_controllers)) { + max_controller_count = gb_count_of(p->game_controllers); + } + + for (i = 0; i < max_controller_count; i++) { + gbGameController *controller = &p->game_controllers[i]; + XINPUT_STATE controller_state = {0}; + if (p->xinput.get_state(cast(DWORD)i, &controller_state) != 0) { + // NOTE(bill): The controller is not available + controller->is_connected = false; + } else { + // NOTE(bill): This controller is plugged in + // TODO(bill): See if ControllerState.dwPacketNumber increments too rapidly + XINPUT_GAMEPAD *pad = &controller_state.Gamepad; + + controller->is_connected = true; + + // TODO(bill): This is a square deadzone, check XInput to verify that the deadzone is "round" and do round deadzone processing. + controller->axes[gbControllerAxis_LeftX] = gb__process_xinput_stick_value(pad->sThumbLX, XINPUT_GAMEPAD_LEFT_THUMB_DEADZONE); + controller->axes[gbControllerAxis_LeftY] = gb__process_xinput_stick_value(pad->sThumbLY, XINPUT_GAMEPAD_LEFT_THUMB_DEADZONE); + controller->axes[gbControllerAxis_RightX] = gb__process_xinput_stick_value(pad->sThumbRX, XINPUT_GAMEPAD_RIGHT_THUMB_DEADZONE); + controller->axes[gbControllerAxis_RightY] = gb__process_xinput_stick_value(pad->sThumbRY, XINPUT_GAMEPAD_RIGHT_THUMB_DEADZONE); + + controller->axes[gbControllerAxis_LeftTrigger] = cast(f32)pad->bLeftTrigger / 255.0f; + controller->axes[gbControllerAxis_RightTrigger] = cast(f32)pad->bRightTrigger / 255.0f; + + + if ((controller->axes[gbControllerAxis_LeftX] != 0.0f) || + (controller->axes[gbControllerAxis_LeftY] != 0.0f)) { + controller->is_analog = true; + } + + #define GB__PROCESS_DIGITAL_BUTTON(button_type, xinput_button) \ + gb_key_state_update(&controller->buttons[button_type], (pad->wButtons & xinput_button) == xinput_button) + + GB__PROCESS_DIGITAL_BUTTON(gbControllerButton_A, XINPUT_GAMEPAD_A); + GB__PROCESS_DIGITAL_BUTTON(gbControllerButton_B, XINPUT_GAMEPAD_B); + GB__PROCESS_DIGITAL_BUTTON(gbControllerButton_X, XINPUT_GAMEPAD_X); + GB__PROCESS_DIGITAL_BUTTON(gbControllerButton_Y, XINPUT_GAMEPAD_Y); + GB__PROCESS_DIGITAL_BUTTON(gbControllerButton_LeftShoulder, XINPUT_GAMEPAD_LEFT_SHOULDER); + GB__PROCESS_DIGITAL_BUTTON(gbControllerButton_RightShoulder, XINPUT_GAMEPAD_RIGHT_SHOULDER); + GB__PROCESS_DIGITAL_BUTTON(gbControllerButton_Start, XINPUT_GAMEPAD_START); + GB__PROCESS_DIGITAL_BUTTON(gbControllerButton_Back, XINPUT_GAMEPAD_BACK); + GB__PROCESS_DIGITAL_BUTTON(gbControllerButton_Left, XINPUT_GAMEPAD_DPAD_LEFT); + GB__PROCESS_DIGITAL_BUTTON(gbControllerButton_Right, XINPUT_GAMEPAD_DPAD_RIGHT); + GB__PROCESS_DIGITAL_BUTTON(gbControllerButton_Down, XINPUT_GAMEPAD_DPAD_DOWN); + GB__PROCESS_DIGITAL_BUTTON(gbControllerButton_Up, XINPUT_GAMEPAD_DPAD_UP); + GB__PROCESS_DIGITAL_BUTTON(gbControllerButton_LeftThumb, XINPUT_GAMEPAD_LEFT_THUMB); + GB__PROCESS_DIGITAL_BUTTON(gbControllerButton_RightThumb, XINPUT_GAMEPAD_RIGHT_THUMB); + #undef GB__PROCESS_DIGITAL_BUTTON + } + } + } + + { // NOTE(bill): Process pending messages + MSG message; + for (;;) { + BOOL is_okay = PeekMessageW(&message, 0, 0, 0, PM_REMOVE); + if (!is_okay) break; + + switch (message.message) { + case WM_QUIT: + p->quit_requested = true; + break; + + default: + TranslateMessage(&message); + DispatchMessageW(&message); + break; + } + } + } +} + +void gb_platform_display(gbPlatform *p) { + if (p->renderer_type == gbRenderer_Opengl) { + SwapBuffers(cast(HDC)p->win32_dc); + } else if (p->renderer_type == gbRenderer_Software) { + StretchDIBits(cast(HDC)p->win32_dc, + 0, 0, p->window_width, p->window_height, + 0, 0, p->window_width, p->window_height, + p->sw_framebuffer.memory, + &p->sw_framebuffer.win32_bmi, + DIB_RGB_COLORS, SRCCOPY); + } else { + GB_PANIC("Invalid window rendering type"); + } + + { + f64 prev_time = p->curr_time; + f64 curr_time = gb_time_now(); + p->dt_for_frame = curr_time - prev_time; + p->curr_time = curr_time; + } +} + + +void gb_platform_destroy(gbPlatform *p) { + if (p->renderer_type == gbRenderer_Opengl) { + wglDeleteContext(cast(HGLRC)p->opengl.context); + } else if (p->renderer_type == gbRenderer_Software) { + gb_vm_free(gb_virtual_memory(p->sw_framebuffer.memory, p->sw_framebuffer.memory_size)); + } + + DestroyWindow(cast(HWND)p->window_handle); +} + +void gb_platform_show_cursor(gbPlatform *p, b32 show) { + gb_unused(p); + ShowCursor(show); +} + +void gb_platform_set_mouse_position(gbPlatform *p, i32 x, i32 y) { + POINT point; + point.x = cast(LONG)x; + point.y = cast(LONG)(p->window_height-1 - y); + ClientToScreen(cast(HWND)p->window_handle, &point); + SetCursorPos(point.x, point.y); + + p->mouse_x = point.x; + p->mouse_y = p->window_height-1 - point.y; +} + + + +void gb_platform_set_controller_vibration(gbPlatform *p, isize index, f32 left_motor, f32 right_motor) { + if (gb_is_between(index, 0, GB_MAX_GAME_CONTROLLER_COUNT-1)) { + XINPUT_VIBRATION vibration = {0}; + left_motor = gb_clamp01(left_motor); + right_motor = gb_clamp01(right_motor); + vibration.wLeftMotorSpeed = cast(WORD)(65535 * left_motor); + vibration.wRightMotorSpeed = cast(WORD)(65535 * right_motor); + + p->xinput.set_state(cast(DWORD)index, &vibration); + } +} + + +void gb_platform_set_window_position(gbPlatform *p, i32 x, i32 y) { + RECT rect; + i32 width, height; + + GetClientRect(cast(HWND)p->window_handle, &rect); + width = rect.right - rect.left; + height = rect.bottom - rect.top; + MoveWindow(cast(HWND)p->window_handle, x, y, width, height, false); +} + +void gb_platform_set_window_title(gbPlatform *p, char const *title, ...) { + u16 buffer[256] = {0}; + char str[512] = {0}; + va_list va; + va_start(va, title); + gb_snprintf_va(str, gb_size_of(str), title, va); + va_end(va); + + if (str[0] != '\0') { + SetWindowTextW(cast(HWND)p->window_handle, cast(wchar_t const *)gb_utf8_to_ucs2(buffer, gb_size_of(buffer), str)); + } +} + +void gb_platform_toggle_fullscreen(gbPlatform *p, b32 fullscreen_desktop) { + // NOTE(bill): From the man himself, Raymond Chen! (Modified for my need.) + HWND handle = cast(HWND)p->window_handle; + DWORD style = cast(DWORD)GetWindowLongW(handle, GWL_STYLE); + WINDOWPLACEMENT placement; + + if (style & WS_OVERLAPPEDWINDOW) { + MONITORINFO monitor_info = {gb_size_of(monitor_info)}; + if (GetWindowPlacement(handle, &placement) && + GetMonitorInfoW(MonitorFromWindow(handle, 1), &monitor_info)) { + style &= ~WS_OVERLAPPEDWINDOW; + if (fullscreen_desktop) { + style &= ~WS_CAPTION; + style |= WS_POPUP; + } + SetWindowLongW(handle, GWL_STYLE, style); + SetWindowPos(handle, HWND_TOP, + monitor_info.rcMonitor.left, monitor_info.rcMonitor.top, + monitor_info.rcMonitor.right - monitor_info.rcMonitor.left, + monitor_info.rcMonitor.bottom - monitor_info.rcMonitor.top, + SWP_NOOWNERZORDER | SWP_FRAMECHANGED); + + if (fullscreen_desktop) { + p->window_flags |= gbWindow_FullscreenDesktop; + } else { + p->window_flags |= gbWindow_Fullscreen; + } + } + } else { + style &= ~WS_POPUP; + style |= WS_OVERLAPPEDWINDOW | WS_CAPTION; + SetWindowLongW(handle, GWL_STYLE, style); + SetWindowPlacement(handle, &placement); + SetWindowPos(handle, 0, 0, 0, 0, 0, + SWP_NOMOVE | SWP_NOSIZE | SWP_NOZORDER | + SWP_NOOWNERZORDER | SWP_FRAMECHANGED); + + p->window_flags &= ~gbWindow_Fullscreen; + } +} + +void gb_platform_toggle_borderless(gbPlatform *p) { + HWND handle = cast(HWND)p->window_handle; + DWORD style = GetWindowLongW(handle, GWL_STYLE); + b32 is_borderless = (style & WS_POPUP) != 0; + + GB_MASK_SET(style, is_borderless, WS_OVERLAPPEDWINDOW | WS_CAPTION); + GB_MASK_SET(style, !is_borderless, WS_POPUP); + + SetWindowLongW(handle, GWL_STYLE, style); + + GB_MASK_SET(p->window_flags, !is_borderless, gbWindow_Borderless); +} + + + +gb_inline void gb_platform_make_opengl_context_current(gbPlatform *p) { + if (p->renderer_type == gbRenderer_Opengl) { + wglMakeCurrent(cast(HDC)p->win32_dc, cast(HGLRC)p->opengl.context); + } +} + +gb_inline void gb_platform_show_window(gbPlatform *p) { + ShowWindow(cast(HWND)p->window_handle, SW_SHOW); + p->window_flags &= ~gbWindow_Hidden; +} + +gb_inline void gb_platform_hide_window(gbPlatform *p) { + ShowWindow(cast(HWND)p->window_handle, SW_HIDE); + p->window_flags |= gbWindow_Hidden; +} + +gb_inline gbVideoMode gb_video_mode_get_desktop(void) { + DEVMODEW win32_mode = {gb_size_of(win32_mode)}; + EnumDisplaySettingsW(NULL, ENUM_CURRENT_SETTINGS, &win32_mode); + return gb_video_mode(win32_mode.dmPelsWidth, win32_mode.dmPelsHeight, win32_mode.dmBitsPerPel); +} + +isize gb_video_mode_get_fullscreen_modes(gbVideoMode *modes, isize max_mode_count) { + DEVMODEW win32_mode = {gb_size_of(win32_mode)}; + i32 count; + for (count = 0; + count < max_mode_count && EnumDisplaySettingsW(NULL, count, &win32_mode); + count++) { + modes[count] = gb_video_mode(win32_mode.dmPelsWidth, win32_mode.dmPelsHeight, win32_mode.dmBitsPerPel); + } + + gb_sort_array(modes, count, gb_video_mode_dsc_cmp); + return count; +} + + + +b32 gb_platform_has_clipboard_text(gbPlatform *p) { + b32 result = false; + + if (IsClipboardFormatAvailable(1/*CF_TEXT*/) && + OpenClipboard(cast(HWND)p->window_handle)) { + HANDLE mem = GetClipboardData(1/*CF_TEXT*/); + if (mem) { + char *str = cast(char *)GlobalLock(mem); + if (str && str[0] != '\0') { + result = true; + } + GlobalUnlock(mem); + } else { + return false; + } + + CloseClipboard(); + } + + return result; +} + +// TODO(bill): Handle UTF-8 +void gb_platform_set_clipboard_text(gbPlatform *p, char const *str) { + if (OpenClipboard(cast(HWND)p->window_handle)) { + isize i, len = gb_strlen(str)+1; + + HANDLE mem = cast(HANDLE)GlobalAlloc(0x0002/*GMEM_MOVEABLE*/, len); + if (mem) { + char *dst = cast(char *)GlobalLock(mem); + if (dst) { + for (i = 0; str[i]; i++) { + // TODO(bill): Does this cause a buffer overflow? + // NOTE(bill): Change \n to \r\n 'cause windows + if (str[i] == '\n' && (i == 0 || str[i-1] != '\r')) { + *dst++ = '\r'; + } + *dst++ = str[i]; + } + *dst = 0; + } + GlobalUnlock(mem); + } + + EmptyClipboard(); + if (!SetClipboardData(1/*CF_TEXT*/, mem)) { + return; + } + CloseClipboard(); + } +} + +// TODO(bill): Handle UTF-8 +char *gb_platform_get_clipboard_text(gbPlatform *p, gbAllocator a) { + char *text = NULL; + + if (IsClipboardFormatAvailable(1/*CF_TEXT*/) && + OpenClipboard(cast(HWND)p->window_handle)) { + HANDLE mem = GetClipboardData(1/*CF_TEXT*/); + if (mem) { + char *str = cast(char *)GlobalLock(mem); + text = gb_alloc_str(a, str); + GlobalUnlock(mem); + } else { + return NULL; + } + + CloseClipboard(); + } + + return text; +} + +#elif defined(GB_SYSTEM_OSX) + +#include +#include +#include +#include + +#if __LP64__ || (TARGET_OS_EMBEDDED && !TARGET_OS_IPHONE) || TARGET_OS_WIN32 || NS_BUILD_32_LIKE_64 + #define NSIntegerEncoding "q" + #define NSUIntegerEncoding "L" +#else + #define NSIntegerEncoding "i" + #define NSUIntegerEncoding "I" +#endif + +#ifdef __OBJC__ + #import +#else + typedef CGPoint NSPoint; + typedef CGSize NSSize; + typedef CGRect NSRect; + + extern id NSApp; + extern id const NSDefaultRunLoopMode; +#endif + +#if defined(__OBJC__) && __has_feature(objc_arc) +#error TODO(bill): Cannot compile as objective-c code just yet! +#endif + +// ABI is a bit different between platforms +#ifdef __arm64__ +#define abi_objc_msgSend_stret objc_msgSend +#else +#define abi_objc_msgSend_stret objc_msgSend_stret +#endif +#ifdef __i386__ +#define abi_objc_msgSend_fpret objc_msgSend_fpret +#else +#define abi_objc_msgSend_fpret objc_msgSend +#endif + +#define objc_msgSend_id ((id (*)(id, SEL))objc_msgSend) +#define objc_msgSend_void ((void (*)(id, SEL))objc_msgSend) +#define objc_msgSend_void_id ((void (*)(id, SEL, id))objc_msgSend) +#define objc_msgSend_void_bool ((void (*)(id, SEL, BOOL))objc_msgSend) +#define objc_msgSend_id_char_const ((id (*)(id, SEL, char const *))objc_msgSend) + +gb_internal NSUInteger gb__osx_application_should_terminate(id self, SEL _sel, id sender) { + // NOTE(bill): Do nothing + return 0; +} + +gb_internal void gb__osx_window_will_close(id self, SEL _sel, id notification) { + NSUInteger value = true; + object_setInstanceVariable(self, "closed", cast(void *)value); +} + +gb_internal void gb__osx_window_did_become_key(id self, SEL _sel, id notification) { + gbPlatform *p = NULL; + object_getInstanceVariable(self, "gbPlatform", cast(void **)&p); + if (p) { + // TODO(bill): + } +} + +b32 gb__platform_init(gbPlatform *p, char const *window_title, gbVideoMode mode, gbRendererType type, u32 window_flags) { + if (p->is_initialized) { + return true; + } + // Init Platform + { // Initial OSX State + Class appDelegateClass; + b32 resultAddProtoc, resultAddMethod; + id dgAlloc, dg, menubarAlloc, menubar; + id appMenuItemAlloc, appMenuItem; + id appMenuAlloc, appMenu; + + #if defined(ARC_AVAILABLE) + #error TODO(bill): This code should be compiled as C for now + #else + id poolAlloc = objc_msgSend_id(cast(id)objc_getClass("NSAutoreleasePool"), sel_registerName("alloc")); + p->osx_autorelease_pool = objc_msgSend_id(poolAlloc, sel_registerName("init")); + #endif + + objc_msgSend_id(cast(id)objc_getClass("NSApplication"), sel_registerName("sharedApplication")); + ((void (*)(id, SEL, NSInteger))objc_msgSend)(NSApp, sel_registerName("setActivationPolicy:"), 0); + + appDelegateClass = objc_allocateClassPair((Class)objc_getClass("NSObject"), "AppDelegate", 0); + resultAddProtoc = class_addProtocol(appDelegateClass, objc_getProtocol("NSApplicationDelegate")); + assert(resultAddProtoc); + resultAddMethod = class_addMethod(appDelegateClass, sel_registerName("applicationShouldTerminate:"), cast(IMP)gb__osx_application_should_terminate, NSUIntegerEncoding "@:@"); + assert(resultAddMethod); + dgAlloc = objc_msgSend_id(cast(id)appDelegateClass, sel_registerName("alloc")); + dg = objc_msgSend_id(dgAlloc, sel_registerName("init")); + #ifndef ARC_AVAILABLE + objc_msgSend_void(dg, sel_registerName("autorelease")); + #endif + + objc_msgSend_void_id(NSApp, sel_registerName("setDelegate:"), dg); + objc_msgSend_void(NSApp, sel_registerName("finishLaunching")); + + menubarAlloc = objc_msgSend_id(cast(id)objc_getClass("NSMenu"), sel_registerName("alloc")); + menubar = objc_msgSend_id(menubarAlloc, sel_registerName("init")); + #ifndef ARC_AVAILABLE + objc_msgSend_void(menubar, sel_registerName("autorelease")); + #endif + + appMenuItemAlloc = objc_msgSend_id(cast(id)objc_getClass("NSMenuItem"), sel_registerName("alloc")); + appMenuItem = objc_msgSend_id(appMenuItemAlloc, sel_registerName("init")); + #ifndef ARC_AVAILABLE + objc_msgSend_void(appMenuItem, sel_registerName("autorelease")); + #endif + + objc_msgSend_void_id(menubar, sel_registerName("addItem:"), appMenuItem); + ((id (*)(id, SEL, id))objc_msgSend)(NSApp, sel_registerName("setMainMenu:"), menubar); + + appMenuAlloc = objc_msgSend_id(cast(id)objc_getClass("NSMenu"), sel_registerName("alloc")); + appMenu = objc_msgSend_id(appMenuAlloc, sel_registerName("init")); + #ifndef ARC_AVAILABLE + objc_msgSend_void(appMenu, sel_registerName("autorelease")); + #endif + + { + id processInfo = objc_msgSend_id(cast(id)objc_getClass("NSProcessInfo"), sel_registerName("processInfo")); + id appName = objc_msgSend_id(processInfo, sel_registerName("processName")); + + id quitTitlePrefixString = objc_msgSend_id_char_const(cast(id)objc_getClass("NSString"), sel_registerName("stringWithUTF8String:"), "Quit "); + id quitTitle = ((id (*)(id, SEL, id))objc_msgSend)(quitTitlePrefixString, sel_registerName("stringByAppendingString:"), appName); + + id quitMenuItemKey = objc_msgSend_id_char_const(cast(id)objc_getClass("NSString"), sel_registerName("stringWithUTF8String:"), "q"); + id quitMenuItemAlloc = objc_msgSend_id(cast(id)objc_getClass("NSMenuItem"), sel_registerName("alloc")); + id quitMenuItem = ((id (*)(id, SEL, id, SEL, id))objc_msgSend)(quitMenuItemAlloc, sel_registerName("initWithTitle:action:keyEquivalent:"), quitTitle, sel_registerName("terminate:"), quitMenuItemKey); + #ifndef ARC_AVAILABLE + objc_msgSend_void(quitMenuItem, sel_registerName("autorelease")); + #endif + + objc_msgSend_void_id(appMenu, sel_registerName("addItem:"), quitMenuItem); + objc_msgSend_void_id(appMenuItem, sel_registerName("setSubmenu:"), appMenu); + } + } + + { // Init Window + NSRect rect = {{0, 0}, {cast(CGFloat)mode.width, cast(CGFloat)mode.height}}; + id windowAlloc, window, wdgAlloc, wdg, contentView, titleString; + Class WindowDelegateClass; + b32 resultAddProtoc, resultAddIvar, resultAddMethod; + + windowAlloc = objc_msgSend_id(cast(id)objc_getClass("NSWindow"), sel_registerName("alloc")); + window = ((id (*)(id, SEL, NSRect, NSUInteger, NSUInteger, BOOL))objc_msgSend)(windowAlloc, sel_registerName("initWithContentRect:styleMask:backing:defer:"), rect, 15, 2, NO); + #ifndef ARC_AVAILABLE + objc_msgSend_void(window, sel_registerName("autorelease")); + #endif + + // when we are not using ARC, than window will be added to autorelease pool + // so if we close it by hand (pressing red button), we don't want it to be released for us + // so it will be released by autorelease pool later + objc_msgSend_void_bool(window, sel_registerName("setReleasedWhenClosed:"), NO); + + WindowDelegateClass = objc_allocateClassPair((Class)objc_getClass("NSObject"), "WindowDelegate", 0); + resultAddProtoc = class_addProtocol(WindowDelegateClass, objc_getProtocol("NSWindowDelegate")); + GB_ASSERT(resultAddProtoc); + resultAddIvar = class_addIvar(WindowDelegateClass, "closed", gb_size_of(NSUInteger), rint(log2(gb_size_of(NSUInteger))), NSUIntegerEncoding); + GB_ASSERT(resultAddIvar); + resultAddIvar = class_addIvar(WindowDelegateClass, "gbPlatform", gb_size_of(void *), rint(log2(gb_size_of(void *))), "ˆv"); + GB_ASSERT(resultAddIvar); + resultAddMethod = class_addMethod(WindowDelegateClass, sel_registerName("windowWillClose:"), cast(IMP)gb__osx_window_will_close, "v@:@"); + GB_ASSERT(resultAddMethod); + resultAddMethod = class_addMethod(WindowDelegateClass, sel_registerName("windowDidBecomeKey:"), cast(IMP)gb__osx_window_did_become_key, "v@:@"); + GB_ASSERT(resultAddMethod); + wdgAlloc = objc_msgSend_id(cast(id)WindowDelegateClass, sel_registerName("alloc")); + wdg = objc_msgSend_id(wdgAlloc, sel_registerName("init")); + #ifndef ARC_AVAILABLE + objc_msgSend_void(wdg, sel_registerName("autorelease")); + #endif + + objc_msgSend_void_id(window, sel_registerName("setDelegate:"), wdg); + + contentView = objc_msgSend_id(window, sel_registerName("contentView")); + + { + NSPoint point = {20, 20}; + ((void (*)(id, SEL, NSPoint))objc_msgSend)(window, sel_registerName("cascadeTopLeftFromPoint:"), point); + } + + titleString = objc_msgSend_id_char_const(cast(id)objc_getClass("NSString"), sel_registerName("stringWithUTF8String:"), window_title); + objc_msgSend_void_id(window, sel_registerName("setTitle:"), titleString); + + if (type == gbRenderer_Opengl) { + // TODO(bill): Make sure this works correctly + u32 opengl_hex_version = (p->opengl.major << 12) | (p->opengl.minor << 8); + u32 gl_attribs[] = { + 8, 24, // NSOpenGLPFAColorSize, 24, + 11, 8, // NSOpenGLPFAAlphaSize, 8, + 5, // NSOpenGLPFADoubleBuffer, + 73, // NSOpenGLPFAAccelerated, + //72, // NSOpenGLPFANoRecovery, + //55, 1, // NSOpenGLPFASampleBuffers, 1, + //56, 4, // NSOpenGLPFASamples, 4, + 99, opengl_hex_version, // NSOpenGLPFAOpenGLProfile, NSOpenGLProfileVersion3_2Core, + 0 + }; + + id pixel_format_alloc, pixel_format; + id opengl_context_alloc, opengl_context; + + pixel_format_alloc = objc_msgSend_id(cast(id)objc_getClass("NSOpenGLPixelFormat"), sel_registerName("alloc")); + pixel_format = ((id (*)(id, SEL, const uint32_t*))objc_msgSend)(pixel_format_alloc, sel_registerName("initWithAttributes:"), gl_attribs); + #ifndef ARC_AVAILABLE + objc_msgSend_void(pixel_format, sel_registerName("autorelease")); + #endif + + opengl_context_alloc = objc_msgSend_id(cast(id)objc_getClass("NSOpenGLContext"), sel_registerName("alloc")); + opengl_context = ((id (*)(id, SEL, id, id))objc_msgSend)(opengl_context_alloc, sel_registerName("initWithFormat:shareContext:"), pixel_format, nil); + #ifndef ARC_AVAILABLE + objc_msgSend_void(opengl_context, sel_registerName("autorelease")); + #endif + + objc_msgSend_void_id(opengl_context, sel_registerName("setView:"), contentView); + objc_msgSend_void_id(window, sel_registerName("makeKeyAndOrderFront:"), window); + objc_msgSend_void_bool(window, sel_registerName("setAcceptsMouseMovedEvents:"), YES); + + + p->window_handle = cast(void *)window; + p->opengl.context = cast(void *)opengl_context; + } else { + GB_PANIC("TODO(bill): Software rendering"); + } + + { + id blackColor = objc_msgSend_id(cast(id)objc_getClass("NSColor"), sel_registerName("blackColor")); + objc_msgSend_void_id(window, sel_registerName("setBackgroundColor:"), blackColor); + objc_msgSend_void_bool(NSApp, sel_registerName("activateIgnoringOtherApps:"), YES); + } + object_setInstanceVariable(wdg, "gbPlatform", cast(void *)p); + + p->is_initialized = true; + } + + return true; +} + +// NOTE(bill): Software rendering +b32 gb_platform_init_with_software(gbPlatform *p, char const *window_title, i32 width, i32 height, u32 window_flags) { + GB_PANIC("TODO(bill): Software rendering in not yet implemented on OS X\n"); + return gb__platform_init(p, window_title, gb_video_mode(width, height, 32), gbRenderer_Software, window_flags); +} +// NOTE(bill): OpenGL Rendering +b32 gb_platform_init_with_opengl(gbPlatform *p, char const *window_title, i32 width, i32 height, u32 window_flags, + i32 major, i32 minor, b32 core, b32 compatible) { + + p->opengl.major = major; + p->opengl.minor = minor; + p->opengl.core = core; + p->opengl.compatible = compatible; + return gb__platform_init(p, window_title, gb_video_mode(width, height, 32), gbRenderer_Opengl, window_flags); +} + +// NOTE(bill): Reverse engineering can be fun!!! +gb_internal gbKeyType gb__osx_from_key_code(u16 key_code) { + switch (key_code) { + default: return gbKey_Unknown; + // NOTE(bill): WHO THE FUCK DESIGNED THIS VIRTUAL KEY CODE SYSTEM?! + // THEY ARE FUCKING IDIOTS! + case 0x1d: return gbKey_0; + case 0x12: return gbKey_1; + case 0x13: return gbKey_2; + case 0x14: return gbKey_3; + case 0x15: return gbKey_4; + case 0x17: return gbKey_5; + case 0x16: return gbKey_6; + case 0x1a: return gbKey_7; + case 0x1c: return gbKey_8; + case 0x19: return gbKey_9; + + case 0x00: return gbKey_A; + case 0x0b: return gbKey_B; + case 0x08: return gbKey_C; + case 0x02: return gbKey_D; + case 0x0e: return gbKey_E; + case 0x03: return gbKey_F; + case 0x05: return gbKey_G; + case 0x04: return gbKey_H; + case 0x22: return gbKey_I; + case 0x26: return gbKey_J; + case 0x28: return gbKey_K; + case 0x25: return gbKey_L; + case 0x2e: return gbKey_M; + case 0x2d: return gbKey_N; + case 0x1f: return gbKey_O; + case 0x23: return gbKey_P; + case 0x0c: return gbKey_Q; + case 0x0f: return gbKey_R; + case 0x01: return gbKey_S; + case 0x11: return gbKey_T; + case 0x20: return gbKey_U; + case 0x09: return gbKey_V; + case 0x0d: return gbKey_W; + case 0x07: return gbKey_X; + case 0x10: return gbKey_Y; + case 0x06: return gbKey_Z; + + case 0x21: return gbKey_Lbracket; + case 0x1e: return gbKey_Rbracket; + case 0x29: return gbKey_Semicolon; + case 0x2b: return gbKey_Comma; + case 0x2f: return gbKey_Period; + case 0x27: return gbKey_Quote; + case 0x2c: return gbKey_Slash; + case 0x2a: return gbKey_Backslash; + case 0x32: return gbKey_Grave; + case 0x18: return gbKey_Equals; + case 0x1b: return gbKey_Minus; + case 0x31: return gbKey_Space; + + case 0x35: return gbKey_Escape; // Escape + case 0x3b: return gbKey_Lcontrol; // Left Control + case 0x38: return gbKey_Lshift; // Left Shift + case 0x3a: return gbKey_Lalt; // Left Alt + case 0x37: return gbKey_Lsystem; // Left OS specific: window (Windows and Linux), apple/cmd (MacOS X), ... + case 0x3e: return gbKey_Rcontrol; // Right Control + case 0x3c: return gbKey_Rshift; // Right Shift + case 0x3d: return gbKey_Ralt; // Right Alt + // case 0x37: return gbKey_Rsystem; // Right OS specific: window (Windows and Linux), apple/cmd (MacOS X), ... + case 0x6e: return gbKey_Menu; // Menu + case 0x24: return gbKey_Return; // Return + case 0x33: return gbKey_Backspace; // Backspace + case 0x30: return gbKey_Tab; // Tabulation + case 0x74: return gbKey_Pageup; // Page up + case 0x79: return gbKey_Pagedown; // Page down + case 0x77: return gbKey_End; // End + case 0x73: return gbKey_Home; // Home + case 0x72: return gbKey_Insert; // Insert + case 0x75: return gbKey_Delete; // Delete + case 0x45: return gbKey_Plus; // + + case 0x4e: return gbKey_Subtract; // - + case 0x43: return gbKey_Multiply; // * + case 0x4b: return gbKey_Divide; // / + case 0x7b: return gbKey_Left; // Left arrow + case 0x7c: return gbKey_Right; // Right arrow + case 0x7e: return gbKey_Up; // Up arrow + case 0x7d: return gbKey_Down; // Down arrow + case 0x52: return gbKey_Numpad0; // Numpad 0 + case 0x53: return gbKey_Numpad1; // Numpad 1 + case 0x54: return gbKey_Numpad2; // Numpad 2 + case 0x55: return gbKey_Numpad3; // Numpad 3 + case 0x56: return gbKey_Numpad4; // Numpad 4 + case 0x57: return gbKey_Numpad5; // Numpad 5 + case 0x58: return gbKey_Numpad6; // Numpad 6 + case 0x59: return gbKey_Numpad7; // Numpad 7 + case 0x5b: return gbKey_Numpad8; // Numpad 8 + case 0x5c: return gbKey_Numpad9; // Numpad 9 + case 0x41: return gbKey_NumpadDot; // Numpad . + case 0x4c: return gbKey_NumpadEnter; // Numpad Enter + case 0x7a: return gbKey_F1; // F1 + case 0x78: return gbKey_F2; // F2 + case 0x63: return gbKey_F3; // F3 + case 0x76: return gbKey_F4; // F4 + case 0x60: return gbKey_F5; // F5 + case 0x61: return gbKey_F6; // F6 + case 0x62: return gbKey_F7; // F7 + case 0x64: return gbKey_F8; // F8 + case 0x65: return gbKey_F9; // F8 + case 0x6d: return gbKey_F10; // F10 + case 0x67: return gbKey_F11; // F11 + case 0x6f: return gbKey_F12; // F12 + case 0x69: return gbKey_F13; // F13 + case 0x6b: return gbKey_F14; // F14 + case 0x71: return gbKey_F15; // F15 + // case : return gbKey_Pause; // Pause // NOTE(bill): Not possible on OS X + } +} + +gb_internal void gb__osx_on_cocoa_event(gbPlatform *p, id event, id window) { + if (!event) { + return; + } else if (objc_msgSend_id(window, sel_registerName("delegate"))) { + NSUInteger event_type = ((NSUInteger (*)(id, SEL))objc_msgSend)(event, sel_registerName("type")); + switch (event_type) { + case 1: gb_key_state_update(&p->mouse_buttons[gbMouseButton_Left], true); break; // NSLeftMouseDown + case 2: gb_key_state_update(&p->mouse_buttons[gbMouseButton_Left], false); break; // NSLeftMouseUp + case 3: gb_key_state_update(&p->mouse_buttons[gbMouseButton_Right], true); break; // NSRightMouseDown + case 4: gb_key_state_update(&p->mouse_buttons[gbMouseButton_Right], false); break; // NSRightMouseUp + case 25: { // NSOtherMouseDown + // TODO(bill): Test thoroughly + NSInteger number = ((NSInteger (*)(id, SEL))objc_msgSend)(event, sel_registerName("buttonNumber")); + if (number == 2) gb_key_state_update(&p->mouse_buttons[gbMouseButton_Middle], true); + if (number == 3) gb_key_state_update(&p->mouse_buttons[gbMouseButton_X1], true); + if (number == 4) gb_key_state_update(&p->mouse_buttons[gbMouseButton_X2], true); + } break; + case 26: { // NSOtherMouseUp + NSInteger number = ((NSInteger (*)(id, SEL))objc_msgSend)(event, sel_registerName("buttonNumber")); + if (number == 2) gb_key_state_update(&p->mouse_buttons[gbMouseButton_Middle], false); + if (number == 3) gb_key_state_update(&p->mouse_buttons[gbMouseButton_X1], false); + if (number == 4) gb_key_state_update(&p->mouse_buttons[gbMouseButton_X2], false); + + } break; + + // TODO(bill): Scroll wheel + case 22: { // NSScrollWheel + CGFloat dx = ((CGFloat (*)(id, SEL))abi_objc_msgSend_fpret)(event, sel_registerName("scrollingDeltaX")); + CGFloat dy = ((CGFloat (*)(id, SEL))abi_objc_msgSend_fpret)(event, sel_registerName("scrollingDeltaY")); + BOOL precision_scrolling = ((BOOL (*)(id, SEL))objc_msgSend)(event, sel_registerName("hasPreciseScrollingDeltas")); + if (precision_scrolling) { + dx *= 0.1f; + dy *= 0.1f; + } + // TODO(bill): Handle sideways + p->mouse_wheel_delta = dy; + // p->mouse_wheel_dy = dy; + // gb_printf("%f %f\n", dx, dy); + } break; + + case 12: { // NSFlagsChanged + #if 0 + // TODO(bill): Reverse engineer this properly + NSUInteger modifiers = ((NSUInteger (*)(id, SEL))objc_msgSend)(event, sel_registerName("modifierFlags")); + u32 upper_mask = (modifiers & 0xffff0000ul) >> 16; + b32 shift = (upper_mask & 0x02) != 0; + b32 control = (upper_mask & 0x04) != 0; + b32 alt = (upper_mask & 0x08) != 0; + b32 command = (upper_mask & 0x10) != 0; + #endif + + // gb_printf("%u\n", keys.mask); + // gb_printf("%x\n", cast(u32)modifiers); + } break; + + case 10: { // NSKeyDown + u16 key_code; + + id input_text = objc_msgSend_id(event, sel_registerName("characters")); + char const *input_text_utf8 = ((char const *(*)(id, SEL))objc_msgSend)(input_text, sel_registerName("UTF8String")); + p->char_buffer_count = gb_strnlen(input_text_utf8, gb_size_of(p->char_buffer)); + gb_memcopy(p->char_buffer, input_text_utf8, p->char_buffer_count); + + key_code = ((unsigned short (*)(id, SEL))objc_msgSend)(event, sel_registerName("keyCode")); + gb_key_state_update(&p->keys[gb__osx_from_key_code(key_code)], true); + } break; + + case 11: { // NSKeyUp + u16 key_code = ((unsigned short (*)(id, SEL))objc_msgSend)(event, sel_registerName("keyCode")); + gb_key_state_update(&p->keys[gb__osx_from_key_code(key_code)], false); + } break; + + default: break; + } + + objc_msgSend_void_id(NSApp, sel_registerName("sendEvent:"), event); + } +} + + +void gb_platform_update(gbPlatform *p) { + id window, key_window, content_view; + NSRect original_frame; + + window = cast(id)p->window_handle; + key_window = objc_msgSend_id(NSApp, sel_registerName("keyWindow")); + p->window_has_focus = key_window == window; // TODO(bill): Is this right + + + if (p->window_has_focus) { + isize i; + p->char_buffer_count = 0; // TODO(bill): Reset buffer count here or else where? + + // NOTE(bill): Need to update as the keys only get updates on events + for (i = 0; i < gbKey_Count; i++) { + b32 is_down = (p->keys[i] & gbKeyState_Down) != 0; + gb_key_state_update(&p->keys[i], is_down); + } + + for (i = 0; i < gbMouseButton_Count; i++) { + b32 is_down = (p->mouse_buttons[i] & gbKeyState_Down) != 0; + gb_key_state_update(&p->mouse_buttons[i], is_down); + } + + } + + { // Handle Events + id distant_past = objc_msgSend_id(cast(id)objc_getClass("NSDate"), sel_registerName("distantPast")); + id event = ((id (*)(id, SEL, NSUInteger, id, id, BOOL))objc_msgSend)(NSApp, sel_registerName("nextEventMatchingMask:untilDate:inMode:dequeue:"), NSUIntegerMax, distant_past, NSDefaultRunLoopMode, YES); + gb__osx_on_cocoa_event(p, event, window); + } + + if (p->window_has_focus) { + p->key_modifiers.control = p->keys[gbKey_Lcontrol] | p->keys[gbKey_Rcontrol]; + p->key_modifiers.alt = p->keys[gbKey_Lalt] | p->keys[gbKey_Ralt]; + p->key_modifiers.shift = p->keys[gbKey_Lshift] | p->keys[gbKey_Rshift]; + } + + { // Check if window is closed + id wdg = objc_msgSend_id(window, sel_registerName("delegate")); + if (!wdg) { + p->window_is_closed = false; + } else { + NSUInteger value = 0; + object_getInstanceVariable(wdg, "closed", cast(void **)&value); + p->window_is_closed = (value != 0); + } + } + + + + content_view = objc_msgSend_id(window, sel_registerName("contentView")); + original_frame = ((NSRect (*)(id, SEL))abi_objc_msgSend_stret)(content_view, sel_registerName("frame")); + + { // Window + NSRect frame = original_frame; + frame = ((NSRect (*)(id, SEL, NSRect))abi_objc_msgSend_stret)(content_view, sel_registerName("convertRectToBacking:"), frame); + p->window_width = frame.size.width; + p->window_height = frame.size.height; + frame = ((NSRect (*)(id, SEL, NSRect))abi_objc_msgSend_stret)(window, sel_registerName("convertRectToScreen:"), frame); + p->window_x = frame.origin.x; + p->window_y = frame.origin.y; + } + + { // Mouse + NSRect frame = original_frame; + NSPoint mouse_pos = ((NSPoint (*)(id, SEL))objc_msgSend)(window, sel_registerName("mouseLocationOutsideOfEventStream")); + mouse_pos.x = gb_clamp(mouse_pos.x, 0, frame.size.width-1); + mouse_pos.y = gb_clamp(mouse_pos.y, 0, frame.size.height-1); + + { + i32 x = mouse_pos.x; + i32 y = mouse_pos.y; + p->mouse_dx = x - p->mouse_x; + p->mouse_dy = y - p->mouse_y; + p->mouse_x = x; + p->mouse_y = y; + } + + if (p->mouse_clip) { + b32 update = false; + i32 x = p->mouse_x; + i32 y = p->mouse_y; + if (p->mouse_x < 0) { + x = 0; + update = true; + } else if (p->mouse_y > p->window_height-1) { + y = p->window_height-1; + update = true; + } + + if (p->mouse_y < 0) { + y = 0; + update = true; + } else if (p->mouse_x > p->window_width-1) { + x = p->window_width-1; + update = true; + } + + if (update) { + gb_platform_set_mouse_position(p, x, y); + } + } + } + + { // TODO(bill): Controllers + + } + + // TODO(bill): Is this in the correct place? + objc_msgSend_void(NSApp, sel_registerName("updateWindows")); + if (p->renderer_type == gbRenderer_Opengl) { + objc_msgSend_void(cast(id)p->opengl.context, sel_registerName("update")); + gb_platform_make_opengl_context_current(p); + } +} + +void gb_platform_display(gbPlatform *p) { + // TODO(bill): Do more + if (p->renderer_type == gbRenderer_Opengl) { + gb_platform_make_opengl_context_current(p); + objc_msgSend_void(cast(id)p->opengl.context, sel_registerName("flushBuffer")); + } else if (p->renderer_type == gbRenderer_Software) { + // TODO(bill): + } else { + GB_PANIC("Invalid window rendering type"); + } + + { + f64 prev_time = p->curr_time; + f64 curr_time = gb_time_now(); + p->dt_for_frame = curr_time - prev_time; + p->curr_time = curr_time; + } +} + +void gb_platform_destroy(gbPlatform *p) { + gb_platform_make_opengl_context_current(p); + + objc_msgSend_void(cast(id)p->window_handle, sel_registerName("close")); + + #if defined(ARC_AVAILABLE) + // TODO(bill): autorelease pool + #else + objc_msgSend_void(cast(id)p->osx_autorelease_pool, sel_registerName("drain")); + #endif +} + +void gb_platform_show_cursor(gbPlatform *p, b32 show) { + if (show ) { + // objc_msgSend_void(class_registerName("NSCursor"), sel_registerName("unhide")); + } else { + // objc_msgSend_void(class_registerName("NSCursor"), sel_registerName("hide")); + } +} + +void gb_platform_set_mouse_position(gbPlatform *p, i32 x, i32 y) { + // TODO(bill): + CGPoint pos = {cast(CGFloat)x, cast(CGFloat)y}; + pos.x += p->window_x; + pos.y += p->window_y; + CGWarpMouseCursorPosition(pos); +} + +void gb_platform_set_controller_vibration(gbPlatform *p, isize index, f32 left_motor, f32 right_motor) { + // TODO(bill): +} + +b32 gb_platform_has_clipboard_text(gbPlatform *p) { + // TODO(bill): + return false; +} + +void gb_platform_set_clipboard_text(gbPlatform *p, char const *str) { + // TODO(bill): +} + +char *gb_platform_get_clipboard_text(gbPlatform *p, gbAllocator a) { + // TODO(bill): + return NULL; +} + +void gb_platform_set_window_position(gbPlatform *p, i32 x, i32 y) { + // TODO(bill): +} + +void gb_platform_set_window_title(gbPlatform *p, char const *title, ...) { + id title_string; + char buf[256] = {0}; + va_list va; + va_start(va, title); + gb_snprintf_va(buf, gb_count_of(buf), title, va); + va_end(va); + + title_string = objc_msgSend_id_char_const(cast(id)objc_getClass("NSString"), sel_registerName("stringWithUTF8String:"), buf); + objc_msgSend_void_id(cast(id)p->window_handle, sel_registerName("setTitle:"), title_string); +} + +void gb_platform_toggle_fullscreen(gbPlatform *p, b32 fullscreen_desktop) { + // TODO(bill): +} + +void gb_platform_toggle_borderless(gbPlatform *p) { + // TODO(bill): +} + +void gb_platform_make_opengl_context_current(gbPlatform *p) { + objc_msgSend_void(cast(id)p->opengl.context, sel_registerName("makeCurrentContext")); +} + +void gb_platform_show_window(gbPlatform *p) { + // TODO(bill): +} + +void gb_platform_hide_window(gbPlatform *p) { + // TODO(bill): +} + +i32 gb__osx_mode_bits_per_pixel(CGDisplayModeRef mode) { + i32 bits_per_pixel = 0; + CFStringRef pixel_encoding = CGDisplayModeCopyPixelEncoding(mode); + if(CFStringCompare(pixel_encoding, CFSTR(IO32BitDirectPixels), kCFCompareCaseInsensitive) == kCFCompareEqualTo) { + bits_per_pixel = 32; + } else if(CFStringCompare(pixel_encoding, CFSTR(IO16BitDirectPixels), kCFCompareCaseInsensitive) == kCFCompareEqualTo) { + bits_per_pixel = 16; + } else if(CFStringCompare(pixel_encoding, CFSTR(IO8BitIndexedPixels), kCFCompareCaseInsensitive) == kCFCompareEqualTo) { + bits_per_pixel = 8; + } + CFRelease(pixel_encoding); + + return bits_per_pixel; +} + +i32 gb__osx_display_bits_per_pixel(CGDirectDisplayID display) { + CGDisplayModeRef mode = CGDisplayCopyDisplayMode(display); + i32 bits_per_pixel = gb__osx_mode_bits_per_pixel(mode); + CGDisplayModeRelease(mode); + return bits_per_pixel; +} + +gbVideoMode gb_video_mode_get_desktop(void) { + CGDirectDisplayID display = CGMainDisplayID(); + return gb_video_mode(CGDisplayPixelsWide(display), + CGDisplayPixelsHigh(display), + gb__osx_display_bits_per_pixel(display)); +} + + +isize gb_video_mode_get_fullscreen_modes(gbVideoMode *modes, isize max_mode_count) { + CFArrayRef cg_modes = CGDisplayCopyAllDisplayModes(CGMainDisplayID(), NULL); + CFIndex i, count; + if (cg_modes == NULL) { + return 0; + } + + count = gb_min(CFArrayGetCount(cg_modes), max_mode_count); + for (i = 0; i < count; i++) { + CGDisplayModeRef cg_mode = cast(CGDisplayModeRef)CFArrayGetValueAtIndex(cg_modes, i); + modes[i] = gb_video_mode(CGDisplayModeGetWidth(cg_mode), + CGDisplayModeGetHeight(cg_mode), + gb__osx_mode_bits_per_pixel(cg_mode)); + } + + CFRelease(cg_modes); + + gb_sort_array(modes, count, gb_video_mode_dsc_cmp); + return cast(isize)count; +} + +#endif + + +// TODO(bill): OSX Platform Layer +// NOTE(bill): Use this as a guide so there is no need for Obj-C https://github.com/jimon/osx_app_in_plain_c + +gb_inline gbVideoMode gb_video_mode(i32 width, i32 height, i32 bits_per_pixel) { + gbVideoMode m; + m.width = width; + m.height = height; + m.bits_per_pixel = bits_per_pixel; + return m; +} + +gb_inline b32 gb_video_mode_is_valid(gbVideoMode mode) { + gb_local_persist gbVideoMode modes[256] = {0}; + gb_local_persist isize mode_count = 0; + gb_local_persist b32 is_set = false; + isize i; + + if (!is_set) { + mode_count = gb_video_mode_get_fullscreen_modes(modes, gb_count_of(modes)); + is_set = true; + } + + for (i = 0; i < mode_count; i++) { + gb_printf("%d %d\n", modes[i].width, modes[i].height); + } + + return gb_binary_search_array(modes, mode_count, &mode, gb_video_mode_cmp) >= 0; +} + +GB_COMPARE_PROC(gb_video_mode_cmp) { + gbVideoMode const *x = cast(gbVideoMode const *)a; + gbVideoMode const *y = cast(gbVideoMode const *)b; + + if (x->bits_per_pixel == y->bits_per_pixel) { + if (x->width == y->width) { + return x->height < y->height ? -1 : x->height > y->height; + } + return x->width < y->width ? -1 : x->width > y->width; + } + return x->bits_per_pixel < y->bits_per_pixel ? -1 : +1; +} + +GB_COMPARE_PROC(gb_video_mode_dsc_cmp) { + return gb_video_mode_cmp(b, a); +} + +#endif // defined(GB_PLATFORM) + + + + +#if defined(GB_COMPILER_MSVC) +#pragma warning(pop) +#endif + +#if defined(__GCC__) || defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + + +#if defined(__cplusplus) +} +#endif + +#endif // GB_IMPLEMENTATION diff --git a/thirdparty/stb/src/stb_image.c b/thirdparty/stb/src/stb_image.c new file mode 100644 index 0000000..badb3ef --- /dev/null +++ b/thirdparty/stb/src/stb_image.c @@ -0,0 +1,2 @@ +#define STB_IMAGE_IMPLEMENTATION +#include "stb_image.h" \ No newline at end of file diff --git a/thirdparty/stb/src/stb_image.h b/thirdparty/stb/src/stb_image.h new file mode 100644 index 0000000..39acae6 --- /dev/null +++ b/thirdparty/stb/src/stb_image.h @@ -0,0 +1,7897 @@ +/* stb_image - v2.27 - public domain image loader - http://nothings.org/stb + no warranty implied; use at your own risk + + Do this: + #define STB_IMAGE_IMPLEMENTATION + before you include this file in *one* C or C++ file to create the implementation. + + // i.e. it should look like this: + #include ... + #include ... + #include ... + #define STB_IMAGE_IMPLEMENTATION + #include "stb_image.h" + + You can #define STBI_ASSERT(x) before the #include to avoid using assert.h. + And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free + + + QUICK NOTES: + Primarily of interest to game developers and other people who can + avoid problematic images and only need the trivial interface + + JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) + PNG 1/2/4/8/16-bit-per-channel + + TGA (not sure what subset, if a subset) + BMP non-1bpp, non-RLE + PSD (composited view only, no extra channels, 8/16 bit-per-channel) + + GIF (*comp always reports as 4-channel) + HDR (radiance rgbE format) + PIC (Softimage PIC) + PNM (PPM and PGM binary only) + + Animated GIF still needs a proper API, but here's one way to do it: + http://gist.github.com/urraka/685d9a6340b26b830d49 + + - decode from memory or through FILE (define STBI_NO_STDIO to remove code) + - decode from arbitrary I/O callbacks + - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) + + Full documentation under "DOCUMENTATION" below. + + +LICENSE + + See end of file for license information. + +RECENT REVISION HISTORY: + + 2.27 (2021-07-11) document stbi_info better, 16-bit PNM support, bug fixes + 2.26 (2020-07-13) many minor fixes + 2.25 (2020-02-02) fix warnings + 2.24 (2020-02-02) fix warnings; thread-local failure_reason and flip_vertically + 2.23 (2019-08-11) fix clang static analysis warning + 2.22 (2019-03-04) gif fixes, fix warnings + 2.21 (2019-02-25) fix typo in comment + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings + 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes + 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 + RGB-format JPEG; remove white matting in PSD; + allocate large structures on the stack; + correct channel count for PNG & BMP + 2.10 (2016-01-22) avoid warning introduced in 2.09 + 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED + + See end of file for full revision history. + + + ============================ Contributors ========================= + + Image formats Extensions, features + Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) + Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) + Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) + Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) + Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) + Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) + Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) + github:urraka (animated gif) Junggon Kim (PNM comments) + Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA) + socks-the-fox (16-bit PNG) + Jeremy Sawicki (handle all ImageNet JPGs) + Optimizations & bugfixes Mikhail Morozov (1-bit BMP) + Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query) + Arseny Kapoulkine Simon Breuss (16-bit PNM) + John-Mark Allen + Carmelo J Fdez-Aguera + + Bug & warning fixes + Marc LeBlanc David Woo Guillaume George Martins Mozeiko + Christpher Lloyd Jerry Jansson Joseph Thomson Blazej Dariusz Roszkowski + Phil Jordan Dave Moore Roy Eltham + Hayaki Saito Nathan Reed Won Chun + Luke Graham Johan Duparc Nick Verigakis the Horde3D community + Thomas Ruf Ronny Chevalier github:rlyeh + Janez Zemva John Bartholomew Michal Cichon github:romigrou + Jonathan Blow Ken Hamada Tero Hanninen github:svdijk + Eugene Golushkov Laurent Gomila Cort Stratton github:snagar + Aruelien Pocheville Sergio Gonzalez Thibault Reuille github:Zelex + Cass Everitt Ryamond Barbiero github:grim210 + Paul Du Bois Engin Manap Aldo Culquicondor github:sammyhw + Philipp Wiesemann Dale Weiler Oriol Ferrer Mesia github:phprus + Josh Tobin Matthew Gregan github:poppolopoppo + Julian Raschke Gregory Mullen Christian Floisand github:darealshinji + Baldur Karlsson Kevin Schmidt JR Smith github:Michaelangel007 + Brad Weinberger Matvey Cherevko github:mosra + Luca Sas Alexander Veselov Zack Middleton [reserved] + Ryan C. Gordon [reserved] [reserved] + DO NOT ADD YOUR NAME HERE + + Jacko Dirks + + To add your name to the credits, pick a random blank space in the middle and fill it. + 80% of merge conflicts on stb PRs are due to people adding their name at the end + of the credits. +*/ + +#ifndef STBI_INCLUDE_STB_IMAGE_H +#define STBI_INCLUDE_STB_IMAGE_H + +// DOCUMENTATION +// +// Limitations: +// - no 12-bit-per-channel JPEG +// - no JPEGs with arithmetic coding +// - GIF always returns *comp=4 +// +// Basic usage (see HDR discussion below for HDR usage): +// int x,y,n; +// unsigned char *data = stbi_load(filename, &x, &y, &n, 0); +// // ... process data if not NULL ... +// // ... x = width, y = height, n = # 8-bit components per pixel ... +// // ... replace '0' with '1'..'4' to force that many components per pixel +// // ... but 'n' will always be the number that it would have been if you said 0 +// stbi_image_free(data) +// +// Standard parameters: +// int *x -- outputs image width in pixels +// int *y -- outputs image height in pixels +// int *channels_in_file -- outputs # of image components in image file +// int desired_channels -- if non-zero, # of image components requested in result +// +// The return value from an image loader is an 'unsigned char *' which points +// to the pixel data, or NULL on an allocation failure or if the image is +// corrupt or invalid. The pixel data consists of *y scanlines of *x pixels, +// with each pixel consisting of N interleaved 8-bit components; the first +// pixel pointed to is top-left-most in the image. There is no padding between +// image scanlines or between pixels, regardless of format. The number of +// components N is 'desired_channels' if desired_channels is non-zero, or +// *channels_in_file otherwise. If desired_channels is non-zero, +// *channels_in_file has the number of components that _would_ have been +// output otherwise. E.g. if you set desired_channels to 4, you will always +// get RGBA output, but you can check *channels_in_file to see if it's trivially +// opaque because e.g. there were only 3 channels in the source image. +// +// An output image with N components has the following components interleaved +// in this order in each pixel: +// +// N=#comp components +// 1 grey +// 2 grey, alpha +// 3 red, green, blue +// 4 red, green, blue, alpha +// +// If image loading fails for any reason, the return value will be NULL, +// and *x, *y, *channels_in_file will be unchanged. The function +// stbi_failure_reason() can be queried for an extremely brief, end-user +// unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS +// to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly +// more user-friendly ones. +// +// Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. +// +// To query the width, height and component count of an image without having to +// decode the full file, you can use the stbi_info family of functions: +// +// int x,y,n,ok; +// ok = stbi_info(filename, &x, &y, &n); +// // returns ok=1 and sets x, y, n if image is a supported format, +// // 0 otherwise. +// +// Note that stb_image pervasively uses ints in its public API for sizes, +// including sizes of memory buffers. This is now part of the API and thus +// hard to change without causing breakage. As a result, the various image +// loaders all have certain limits on image size; these differ somewhat +// by format but generally boil down to either just under 2GB or just under +// 1GB. When the decoded image would be larger than this, stb_image decoding +// will fail. +// +// Additionally, stb_image will reject image files that have any of their +// dimensions set to a larger value than the configurable STBI_MAX_DIMENSIONS, +// which defaults to 2**24 = 16777216 pixels. Due to the above memory limit, +// the only way to have an image with such dimensions load correctly +// is for it to have a rather extreme aspect ratio. Either way, the +// assumption here is that such larger images are likely to be malformed +// or malicious. If you do need to load an image with individual dimensions +// larger than that, and it still fits in the overall size limit, you can +// #define STBI_MAX_DIMENSIONS on your own to be something larger. +// +// =========================================================================== +// +// UNICODE: +// +// If compiling for Windows and you wish to use Unicode filenames, compile +// with +// #define STBI_WINDOWS_UTF8 +// and pass utf8-encoded filenames. Call stbi_convert_wchar_to_utf8 to convert +// Windows wchar_t filenames to utf8. +// +// =========================================================================== +// +// Philosophy +// +// stb libraries are designed with the following priorities: +// +// 1. easy to use +// 2. easy to maintain +// 3. good performance +// +// Sometimes I let "good performance" creep up in priority over "easy to maintain", +// and for best performance I may provide less-easy-to-use APIs that give higher +// performance, in addition to the easy-to-use ones. Nevertheless, it's important +// to keep in mind that from the standpoint of you, a client of this library, +// all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all. +// +// Some secondary priorities arise directly from the first two, some of which +// provide more explicit reasons why performance can't be emphasized. +// +// - Portable ("ease of use") +// - Small source code footprint ("easy to maintain") +// - No dependencies ("ease of use") +// +// =========================================================================== +// +// I/O callbacks +// +// I/O callbacks allow you to read from arbitrary sources, like packaged +// files or some other source. Data read from callbacks are processed +// through a small internal buffer (currently 128 bytes) to try to reduce +// overhead. +// +// The three functions you must define are "read" (reads some bytes of data), +// "skip" (skips some bytes of data), "eof" (reports if the stream is at the end). +// +// =========================================================================== +// +// SIMD support +// +// The JPEG decoder will try to automatically use SIMD kernels on x86 when +// supported by the compiler. For ARM Neon support, you must explicitly +// request it. +// +// (The old do-it-yourself SIMD API is no longer supported in the current +// code.) +// +// On x86, SSE2 will automatically be used when available based on a run-time +// test; if not, the generic C versions are used as a fall-back. On ARM targets, +// the typical path is to have separate builds for NEON and non-NEON devices +// (at least this is true for iOS and Android). Therefore, the NEON support is +// toggled by a build flag: define STBI_NEON to get NEON loops. +// +// If for some reason you do not want to use any of SIMD code, or if +// you have issues compiling it, you can disable it entirely by +// defining STBI_NO_SIMD. +// +// =========================================================================== +// +// HDR image support (disable by defining STBI_NO_HDR) +// +// stb_image supports loading HDR images in general, and currently the Radiance +// .HDR file format specifically. You can still load any file through the existing +// interface; if you attempt to load an HDR file, it will be automatically remapped +// to LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; +// both of these constants can be reconfigured through this interface: +// +// stbi_hdr_to_ldr_gamma(2.2f); +// stbi_hdr_to_ldr_scale(1.0f); +// +// (note, do not use _inverse_ constants; stbi_image will invert them +// appropriately). +// +// Additionally, there is a new, parallel interface for loading files as +// (linear) floats to preserve the full dynamic range: +// +// float *data = stbi_loadf(filename, &x, &y, &n, 0); +// +// If you load LDR images through this interface, those images will +// be promoted to floating point values, run through the inverse of +// constants corresponding to the above: +// +// stbi_ldr_to_hdr_scale(1.0f); +// stbi_ldr_to_hdr_gamma(2.2f); +// +// Finally, given a filename (or an open file or memory block--see header +// file for details) containing image data, you can query for the "most +// appropriate" interface to use (that is, whether the image is HDR or +// not), using: +// +// stbi_is_hdr(char *filename); +// +// =========================================================================== +// +// iPhone PNG support: +// +// We optionally support converting iPhone-formatted PNGs (which store +// premultiplied BGRA) back to RGB, even though they're internally encoded +// differently. To enable this conversion, call +// stbi_convert_iphone_png_to_rgb(1). +// +// Call stbi_set_unpremultiply_on_load(1) as well to force a divide per +// pixel to remove any premultiplied alpha *only* if the image file explicitly +// says there's premultiplied data (currently only happens in iPhone images, +// and only if iPhone convert-to-rgb processing is on). +// +// =========================================================================== +// +// ADDITIONAL CONFIGURATION +// +// - You can suppress implementation of any of the decoders to reduce +// your code footprint by #defining one or more of the following +// symbols before creating the implementation. +// +// STBI_NO_JPEG +// STBI_NO_PNG +// STBI_NO_BMP +// STBI_NO_PSD +// STBI_NO_TGA +// STBI_NO_GIF +// STBI_NO_HDR +// STBI_NO_PIC +// STBI_NO_PNM (.ppm and .pgm) +// +// - You can request *only* certain decoders and suppress all other ones +// (this will be more forward-compatible, as addition of new decoders +// doesn't require you to disable them explicitly): +// +// STBI_ONLY_JPEG +// STBI_ONLY_PNG +// STBI_ONLY_BMP +// STBI_ONLY_PSD +// STBI_ONLY_TGA +// STBI_ONLY_GIF +// STBI_ONLY_HDR +// STBI_ONLY_PIC +// STBI_ONLY_PNM (.ppm and .pgm) +// +// - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still +// want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB +// +// - If you define STBI_MAX_DIMENSIONS, stb_image will reject images greater +// than that size (in either width or height) without further processing. +// This is to let programs in the wild set an upper bound to prevent +// denial-of-service attacks on untrusted data, as one could generate a +// valid image of gigantic dimensions and force stb_image to allocate a +// huge block of memory and spend disproportionate time decoding it. By +// default this is set to (1 << 24), which is 16777216, but that's still +// very big. + +#ifndef STBI_NO_STDIO +#include +#endif // STBI_NO_STDIO + +#define STBI_VERSION 1 + +enum +{ + STBI_default = 0, // only used for desired_channels + + STBI_grey = 1, + STBI_grey_alpha = 2, + STBI_rgb = 3, + STBI_rgb_alpha = 4 +}; + +#include +typedef unsigned char stbi_uc; +typedef unsigned short stbi_us; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef STBIDEF +#ifdef STB_IMAGE_STATIC +#define STBIDEF static +#else +#define STBIDEF extern +#endif +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// PRIMARY API - works on images of any type +// + +// +// load image by filename, open file, or memory buffer +// + +typedef struct +{ + int (*read) (void *user,char *data,int size); // fill 'data' with 'size' bytes. return number of bytes actually read + void (*skip) (void *user,int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative + int (*eof) (void *user); // returns nonzero if we are at end of file/data +} stbi_io_callbacks; + +//////////////////////////////////// +// +// 8-bits-per-channel interface +// + +STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +// for stbi_load_from_file, file pointer is left pointing immediately after image +#endif + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +#endif + +#ifdef STBI_WINDOWS_UTF8 +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input); +#endif + +//////////////////////////////////// +// +// 16-bits-per-channel interface +// + +STBIDEF stbi_us *stbi_load_16_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_us *stbi_load_16 (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +#endif + +//////////////////////////////////// +// +// float-per-channel interface +// +#ifndef STBI_NO_LINEAR + STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + + #ifndef STBI_NO_STDIO + STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); + #endif +#endif + +#ifndef STBI_NO_HDR + STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); + STBIDEF void stbi_hdr_to_ldr_scale(float scale); +#endif // STBI_NO_HDR + +#ifndef STBI_NO_LINEAR + STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); + STBIDEF void stbi_ldr_to_hdr_scale(float scale); +#endif // STBI_NO_LINEAR + +// stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename); +STBIDEF int stbi_is_hdr_from_file(FILE *f); +#endif // STBI_NO_STDIO + + +// get a VERY brief reason for failure +// on most compilers (and ALL modern mainstream compilers) this is threadsafe +STBIDEF const char *stbi_failure_reason (void); + +// free the loaded image -- this is just free() +STBIDEF void stbi_image_free (void *retval_from_stbi_load); + +// get image dimensions & components without fully decoding +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len); +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user); + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit (char const *filename); +STBIDEF int stbi_is_16_bit_from_file(FILE *f); +#endif + + + +// for image formats that explicitly notate that they have premultiplied alpha, +// we just return the colors as stored in the file. set this flag to force +// unpremultiplication. results are undefined if the unpremultiply overflow. +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); + +// indicate whether we should process iphone images back to canonical format, +// or just pass them through "as-is" +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); + +// flip the image vertically, so the first pixel in the output array is the bottom left +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); + +// as above, but only applies to images loaded on the thread that calls the function +// this function is only available if your compiler supports thread-local variables; +// calling it will fail to link if your compiler doesn't +STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply); +STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert); +STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip); + +// ZLIB client - used by PNG, available for other purposes + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); +STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + +STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + + +#ifdef __cplusplus +} +#endif + +// +// +//// end header file ///////////////////////////////////////////////////// +#endif // STBI_INCLUDE_STB_IMAGE_H + +#ifdef STB_IMAGE_IMPLEMENTATION + +#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \ + || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \ + || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \ + || defined(STBI_ONLY_ZLIB) + #ifndef STBI_ONLY_JPEG + #define STBI_NO_JPEG + #endif + #ifndef STBI_ONLY_PNG + #define STBI_NO_PNG + #endif + #ifndef STBI_ONLY_BMP + #define STBI_NO_BMP + #endif + #ifndef STBI_ONLY_PSD + #define STBI_NO_PSD + #endif + #ifndef STBI_ONLY_TGA + #define STBI_NO_TGA + #endif + #ifndef STBI_ONLY_GIF + #define STBI_NO_GIF + #endif + #ifndef STBI_ONLY_HDR + #define STBI_NO_HDR + #endif + #ifndef STBI_ONLY_PIC + #define STBI_NO_PIC + #endif + #ifndef STBI_ONLY_PNM + #define STBI_NO_PNM + #endif +#endif + +#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB) +#define STBI_NO_ZLIB +#endif + + +#include +#include // ptrdiff_t on osx +#include +#include +#include + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +#include // ldexp, pow +#endif + +#ifndef STBI_NO_STDIO +#include +#endif + +#ifndef STBI_ASSERT +#include +#define STBI_ASSERT(x) assert(x) +#endif + +#ifdef __cplusplus +#define STBI_EXTERN extern "C" +#else +#define STBI_EXTERN extern +#endif + + +#ifndef _MSC_VER + #ifdef __cplusplus + #define stbi_inline inline + #else + #define stbi_inline + #endif +#else + #define stbi_inline __forceinline +#endif + +#ifndef STBI_NO_THREAD_LOCALS + #if defined(__cplusplus) && __cplusplus >= 201103L + #define STBI_THREAD_LOCAL thread_local + #elif defined(__GNUC__) && __GNUC__ < 5 + #define STBI_THREAD_LOCAL __thread + #elif defined(_MSC_VER) + #define STBI_THREAD_LOCAL __declspec(thread) + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_THREADS__) + #define STBI_THREAD_LOCAL _Thread_local + #endif + + #ifndef STBI_THREAD_LOCAL + #if defined(__GNUC__) + #define STBI_THREAD_LOCAL __thread + #endif + #endif +#endif + +#ifdef _MSC_VER +typedef unsigned short stbi__uint16; +typedef signed short stbi__int16; +typedef unsigned int stbi__uint32; +typedef signed int stbi__int32; +#else +#include +typedef uint16_t stbi__uint16; +typedef int16_t stbi__int16; +typedef uint32_t stbi__uint32; +typedef int32_t stbi__int32; +#endif + +// should produce compiler error if size is wrong +typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1]; + +#ifdef _MSC_VER +#define STBI_NOTUSED(v) (void)(v) +#else +#define STBI_NOTUSED(v) (void)sizeof(v) +#endif + +#ifdef _MSC_VER +#define STBI_HAS_LROTL +#endif + +#ifdef STBI_HAS_LROTL + #define stbi_lrot(x,y) _lrotl(x,y) +#else + #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (-(y) & 31))) +#endif + +#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) +// ok +#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED) +// ok +#else +#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)." +#endif + +#ifndef STBI_MALLOC +#define STBI_MALLOC(sz) malloc(sz) +#define STBI_REALLOC(p,newsz) realloc(p,newsz) +#define STBI_FREE(p) free(p) +#endif + +#ifndef STBI_REALLOC_SIZED +#define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz) +#endif + +// x86/x64 detection +#if defined(__x86_64__) || defined(_M_X64) +#define STBI__X64_TARGET +#elif defined(__i386) || defined(_M_IX86) +#define STBI__X86_TARGET +#endif + +#if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) +// gcc doesn't support sse2 intrinsics unless you compile with -msse2, +// which in turn means it gets to use SSE2 everywhere. This is unfortunate, +// but previous attempts to provide the SSE2 functions with runtime +// detection caused numerous issues. The way architecture extensions are +// exposed in GCC/Clang is, sadly, not really suited for one-file libs. +// New behavior: if compiled with -msse2, we use SSE2 without any +// detection; if not, we don't use it at all. +#define STBI_NO_SIMD +#endif + +#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD) +// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET +// +// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the +// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant. +// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not +// simultaneously enabling "-mstackrealign". +// +// See https://github.com/nothings/stb/issues/81 for more information. +// +// So default to no SSE2 on 32-bit MinGW. If you've read this far and added +// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2. +#define STBI_NO_SIMD +#endif + +#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) +#define STBI_SSE2 +#include + +#ifdef _MSC_VER + +#if _MSC_VER >= 1400 // not VC6 +#include // __cpuid +static int stbi__cpuid3(void) +{ + int info[4]; + __cpuid(info,1); + return info[3]; +} +#else +static int stbi__cpuid3(void) +{ + int res; + __asm { + mov eax,1 + cpuid + mov res,edx + } + return res; +} +#endif + +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name + +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) +{ + int info3 = stbi__cpuid3(); + return ((info3 >> 26) & 1) != 0; +} +#endif + +#else // assume GCC-style if not VC++ +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) + +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) +{ + // If we're even attempting to compile this on GCC/Clang, that means + // -msse2 is on, which means the compiler is allowed to use SSE2 + // instructions at will, and so are we. + return 1; +} +#endif + +#endif +#endif + +// ARM NEON +#if defined(STBI_NO_SIMD) && defined(STBI_NEON) +#undef STBI_NEON +#endif + +#ifdef STBI_NEON +#include +#ifdef _MSC_VER +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name +#else +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) +#endif +#endif + +#ifndef STBI_SIMD_ALIGN +#define STBI_SIMD_ALIGN(type, name) type name +#endif + +#ifndef STBI_MAX_DIMENSIONS +#define STBI_MAX_DIMENSIONS (1 << 24) +#endif + +/////////////////////////////////////////////// +// +// stbi__context struct and start_xxx functions + +// stbi__context structure is our basic context used by all images, so it +// contains all the IO context, plus some basic image information +typedef struct +{ + stbi__uint32 img_x, img_y; + int img_n, img_out_n; + + stbi_io_callbacks io; + void *io_user_data; + + int read_from_callbacks; + int buflen; + stbi_uc buffer_start[128]; + int callback_already_read; + + stbi_uc *img_buffer, *img_buffer_end; + stbi_uc *img_buffer_original, *img_buffer_original_end; +} stbi__context; + + +static void stbi__refill_buffer(stbi__context *s); + +// initialize a memory-decode context +static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) +{ + s->io.read = NULL; + s->read_from_callbacks = 0; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer; + s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len; +} + +// initialize a callback-based context +static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) +{ + s->io = *c; + s->io_user_data = user; + s->buflen = sizeof(s->buffer_start); + s->read_from_callbacks = 1; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = s->buffer_start; + stbi__refill_buffer(s); + s->img_buffer_original_end = s->img_buffer_end; +} + +#ifndef STBI_NO_STDIO + +static int stbi__stdio_read(void *user, char *data, int size) +{ + return (int) fread(data,1,size,(FILE*) user); +} + +static void stbi__stdio_skip(void *user, int n) +{ + int ch; + fseek((FILE*) user, n, SEEK_CUR); + ch = fgetc((FILE*) user); /* have to read a byte to reset feof()'s flag */ + if (ch != EOF) { + ungetc(ch, (FILE *) user); /* push byte back onto stream if valid. */ + } +} + +static int stbi__stdio_eof(void *user) +{ + return feof((FILE*) user) || ferror((FILE *) user); +} + +static stbi_io_callbacks stbi__stdio_callbacks = +{ + stbi__stdio_read, + stbi__stdio_skip, + stbi__stdio_eof, +}; + +static void stbi__start_file(stbi__context *s, FILE *f) +{ + stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f); +} + +//static void stop_file(stbi__context *s) { } + +#endif // !STBI_NO_STDIO + +static void stbi__rewind(stbi__context *s) +{ + // conceptually rewind SHOULD rewind to the beginning of the stream, + // but we just rewind to the beginning of the initial buffer, because + // we only use it after doing 'test', which only ever looks at at most 92 bytes + s->img_buffer = s->img_buffer_original; + s->img_buffer_end = s->img_buffer_original_end; +} + +enum +{ + STBI_ORDER_RGB, + STBI_ORDER_BGR +}; + +typedef struct +{ + int bits_per_channel; + int num_channels; + int channel_order; +} stbi__result_info; + +#ifndef STBI_NO_JPEG +static int stbi__jpeg_test(stbi__context *s); +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNG +static int stbi__png_test(stbi__context *s); +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__png_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_BMP +static int stbi__bmp_test(stbi__context *s); +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_TGA +static int stbi__tga_test(stbi__context *s); +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s); +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc); +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__psd_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_HDR +static int stbi__hdr_test(stbi__context *s); +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_test(stbi__context *s); +static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_GIF +static int stbi__gif_test(stbi__context *s); +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNM +static int stbi__pnm_test(stbi__context *s); +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__pnm_is16(stbi__context *s); +#endif + +static +#ifdef STBI_THREAD_LOCAL +STBI_THREAD_LOCAL +#endif +const char *stbi__g_failure_reason; + +STBIDEF const char *stbi_failure_reason(void) +{ + return stbi__g_failure_reason; +} + +#ifndef STBI_NO_FAILURE_STRINGS +static int stbi__err(const char *str) +{ + stbi__g_failure_reason = str; + return 0; +} +#endif + +static void *stbi__malloc(size_t size) +{ + return STBI_MALLOC(size); +} + +// stb_image uses ints pervasively, including for offset calculations. +// therefore the largest decoded image size we can support with the +// current code, even on 64-bit targets, is INT_MAX. this is not a +// significant limitation for the intended use case. +// +// we do, however, need to make sure our size calculations don't +// overflow. hence a few helper functions for size calculations that +// multiply integers together, making sure that they're non-negative +// and no overflow occurs. + +// return 1 if the sum is valid, 0 on overflow. +// negative terms are considered invalid. +static int stbi__addsizes_valid(int a, int b) +{ + if (b < 0) return 0; + // now 0 <= b <= INT_MAX, hence also + // 0 <= INT_MAX - b <= INTMAX. + // And "a + b <= INT_MAX" (which might overflow) is the + // same as a <= INT_MAX - b (no overflow) + return a <= INT_MAX - b; +} + +// returns 1 if the product is valid, 0 on overflow. +// negative factors are considered invalid. +static int stbi__mul2sizes_valid(int a, int b) +{ + if (a < 0 || b < 0) return 0; + if (b == 0) return 1; // mul-by-0 is always safe + // portable way to check for no overflows in a*b + return a <= INT_MAX/b; +} + +#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) +// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow +static int stbi__mad2sizes_valid(int a, int b, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); +} +#endif + +// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow +static int stbi__mad3sizes_valid(int a, int b, int c, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__addsizes_valid(a*b*c, add); +} + +// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM) +static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); +} +#endif + +#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) +// mallocs with size overflow checking +static void *stbi__malloc_mad2(int a, int b, int add) +{ + if (!stbi__mad2sizes_valid(a, b, add)) return NULL; + return stbi__malloc(a*b + add); +} +#endif + +static void *stbi__malloc_mad3(int a, int b, int c, int add) +{ + if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL; + return stbi__malloc(a*b*c + add); +} + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM) +static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) +{ + if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL; + return stbi__malloc(a*b*c*d + add); +} +#endif + +// stbi__err - error +// stbi__errpf - error returning pointer to float +// stbi__errpuc - error returning pointer to unsigned char + +#ifdef STBI_NO_FAILURE_STRINGS + #define stbi__err(x,y) 0 +#elif defined(STBI_FAILURE_USERMSG) + #define stbi__err(x,y) stbi__err(y) +#else + #define stbi__err(x,y) stbi__err(x) +#endif + +#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL)) +#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL)) + +STBIDEF void stbi_image_free(void *retval_from_stbi_load) +{ + STBI_FREE(retval_from_stbi_load); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp); +#endif + +#ifndef STBI_NO_HDR +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp); +#endif + +static int stbi__vertically_flip_on_load_global = 0; + +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load_global = flag_true_if_should_flip; +} + +#ifndef STBI_THREAD_LOCAL +#define stbi__vertically_flip_on_load stbi__vertically_flip_on_load_global +#else +static STBI_THREAD_LOCAL int stbi__vertically_flip_on_load_local, stbi__vertically_flip_on_load_set; + +STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load_local = flag_true_if_should_flip; + stbi__vertically_flip_on_load_set = 1; +} + +#define stbi__vertically_flip_on_load (stbi__vertically_flip_on_load_set \ + ? stbi__vertically_flip_on_load_local \ + : stbi__vertically_flip_on_load_global) +#endif // STBI_THREAD_LOCAL + +static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields + ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed + ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order + ri->num_channels = 0; + + // test the formats with a very explicit header first (at least a FOURCC + // or distinctive magic number first) + #ifndef STBI_NO_PNG + if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_BMP + if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_GIF + if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PSD + if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp, ri, bpc); + #else + STBI_NOTUSED(bpc); + #endif + #ifndef STBI_NO_PIC + if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri); + #endif + + // then the formats that can end up attempting to load with just 1 or 2 + // bytes matching expectations; these are prone to false positives, so + // try them later + #ifndef STBI_NO_JPEG + if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNM + if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri); + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + float *hdr = stbi__hdr_load(s, x,y,comp,req_comp, ri); + return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); + } + #endif + + #ifndef STBI_NO_TGA + // test tga last because it's a crappy test! + if (stbi__tga_test(s)) + return stbi__tga_load(s,x,y,comp,req_comp, ri); + #endif + + return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); +} + +static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi_uc *reduced; + + reduced = (stbi_uc *) stbi__malloc(img_len); + if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling + + STBI_FREE(orig); + return reduced; +} + +static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi__uint16 *enlarged; + + enlarged = (stbi__uint16 *) stbi__malloc(img_len*2); + if (enlarged == NULL) return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff + + STBI_FREE(orig); + return enlarged; +} + +static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel) +{ + int row; + size_t bytes_per_row = (size_t)w * bytes_per_pixel; + stbi_uc temp[2048]; + stbi_uc *bytes = (stbi_uc *)image; + + for (row = 0; row < (h>>1); row++) { + stbi_uc *row0 = bytes + row*bytes_per_row; + stbi_uc *row1 = bytes + (h - row - 1)*bytes_per_row; + // swap row0 with row1 + size_t bytes_left = bytes_per_row; + while (bytes_left) { + size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp); + memcpy(temp, row0, bytes_copy); + memcpy(row0, row1, bytes_copy); + memcpy(row1, temp, bytes_copy); + row0 += bytes_copy; + row1 += bytes_copy; + bytes_left -= bytes_copy; + } + } +} + +#ifndef STBI_NO_GIF +static void stbi__vertical_flip_slices(void *image, int w, int h, int z, int bytes_per_pixel) +{ + int slice; + int slice_size = w * h * bytes_per_pixel; + + stbi_uc *bytes = (stbi_uc *)image; + for (slice = 0; slice < z; ++slice) { + stbi__vertical_flip(bytes, w, h, bytes_per_pixel); + bytes += slice_size; + } +} +#endif + +static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); + + if (result == NULL) + return NULL; + + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + + if (ri.bits_per_channel != 8) { + result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 8; + } + + // @TODO: move stbi__convert_format to here + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc)); + } + + return (unsigned char *) result; +} + +static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); + + if (result == NULL) + return NULL; + + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + + if (ri.bits_per_channel != 16) { + result = stbi__convert_8_to_16((stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 16; + } + + // @TODO: move stbi__convert_format16 to here + // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16)); + } + + return (stbi__uint16 *) result; +} + +#if !defined(STBI_NO_HDR) && !defined(STBI_NO_LINEAR) +static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) +{ + if (stbi__vertically_flip_on_load && result != NULL) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(float)); + } +} +#endif + +#ifndef STBI_NO_STDIO + +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) +STBI_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char *str, int cbmb, wchar_t *widestr, int cchwide); +STBI_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t *widestr, int cchwide, char *str, int cbmb, const char *defchar, int *used_default); +#endif + +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input) +{ + return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int) bufferlen, NULL, NULL); +} +#endif + +static FILE *stbi__fopen(char const *filename, char const *mode) +{ + FILE *f; +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) + wchar_t wMode[64]; + wchar_t wFilename[1024]; + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename)/sizeof(*wFilename))) + return 0; + + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode)/sizeof(*wMode))) + return 0; + +#if defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != _wfopen_s(&f, wFilename, wMode)) + f = 0; +#else + f = _wfopen(wFilename, wMode); +#endif + +#elif defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != fopen_s(&f, filename, mode)) + f=0; +#else + f = fopen(filename, mode); +#endif + return f; +} + + +STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + unsigned char *result; + if (!f) return stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__uint16 *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_16bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + stbi__uint16 *result; + if (!f) return (stbi_us *) stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file_16(f,x,y,comp,req_comp); + fclose(f); + return result; +} + + +#endif //!STBI_NO_STDIO + +STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_mem(&s,buffer,len); + + result = (unsigned char*) stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp); + if (stbi__vertically_flip_on_load) { + stbi__vertical_flip_slices( result, *x, *y, *z, *comp ); + } + + return result; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *data; + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + stbi__result_info ri; + float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp, &ri); + if (hdr_data) + stbi__float_postprocess(hdr_data,x,y,comp,req_comp); + return hdr_data; + } + #endif + data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); + if (data) + return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); + return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); +} + +STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_STDIO +STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + float *result; + FILE *f = stbi__fopen(filename, "rb"); + if (!f) return stbi__errpf("can't fopen", "Unable to open file"); + result = stbi_loadf_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_file(&s,f); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} +#endif // !STBI_NO_STDIO + +#endif // !STBI_NO_LINEAR + +// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is +// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always +// reports false! + +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(buffer); + STBI_NOTUSED(len); + return 0; + #endif +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result=0; + if (f) { + result = stbi_is_hdr_from_file(f); + fclose(f); + } + return result; +} + +STBIDEF int stbi_is_hdr_from_file(FILE *f) +{ + #ifndef STBI_NO_HDR + long pos = ftell(f); + int res; + stbi__context s; + stbi__start_file(&s,f); + res = stbi__hdr_test(&s); + fseek(f, pos, SEEK_SET); + return res; + #else + STBI_NOTUSED(f); + return 0; + #endif +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(clbk); + STBI_NOTUSED(user); + return 0; + #endif +} + +#ifndef STBI_NO_LINEAR +static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f; + +STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; } +STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; } +#endif + +static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f; + +STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; } +STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1/scale; } + + +////////////////////////////////////////////////////////////////////////////// +// +// Common code used by all image loaders +// + +enum +{ + STBI__SCAN_load=0, + STBI__SCAN_type, + STBI__SCAN_header +}; + +static void stbi__refill_buffer(stbi__context *s) +{ + int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen); + s->callback_already_read += (int) (s->img_buffer - s->img_buffer_original); + if (n == 0) { + // at end of file, treat same as if from memory, but need to handle case + // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file + s->read_from_callbacks = 0; + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start+1; + *s->img_buffer = 0; + } else { + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + n; + } +} + +stbi_inline static stbi_uc stbi__get8(stbi__context *s) +{ + if (s->img_buffer < s->img_buffer_end) + return *s->img_buffer++; + if (s->read_from_callbacks) { + stbi__refill_buffer(s); + return *s->img_buffer++; + } + return 0; +} + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_HDR) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +stbi_inline static int stbi__at_eof(stbi__context *s) +{ + if (s->io.read) { + if (!(s->io.eof)(s->io_user_data)) return 0; + // if feof() is true, check if buffer = end + // special case: we've only got the special 0 character at the end + if (s->read_from_callbacks == 0) return 1; + } + + return s->img_buffer >= s->img_buffer_end; +} +#endif + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) +// nothing +#else +static void stbi__skip(stbi__context *s, int n) +{ + if (n == 0) return; // already there! + if (n < 0) { + s->img_buffer = s->img_buffer_end; + return; + } + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + s->img_buffer = s->img_buffer_end; + (s->io.skip)(s->io_user_data, n - blen); + return; + } + } + s->img_buffer += n; +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_TGA) && defined(STBI_NO_HDR) && defined(STBI_NO_PNM) +// nothing +#else +static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) +{ + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + int res, count; + + memcpy(buffer, s->img_buffer, blen); + + count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen); + res = (count == (n-blen)); + s->img_buffer = s->img_buffer_end; + return res; + } + } + + if (s->img_buffer+n <= s->img_buffer_end) { + memcpy(buffer, s->img_buffer, n); + s->img_buffer += n; + return 1; + } else + return 0; +} +#endif + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) +// nothing +#else +static int stbi__get16be(stbi__context *s) +{ + int z = stbi__get8(s); + return (z << 8) + stbi__get8(s); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) +// nothing +#else +static stbi__uint32 stbi__get32be(stbi__context *s) +{ + stbi__uint32 z = stbi__get16be(s); + return (z << 16) + stbi__get16be(s); +} +#endif + +#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) +// nothing +#else +static int stbi__get16le(stbi__context *s) +{ + int z = stbi__get8(s); + return z + (stbi__get8(s) << 8); +} +#endif + +#ifndef STBI_NO_BMP +static stbi__uint32 stbi__get32le(stbi__context *s) +{ + stbi__uint32 z = stbi__get16le(s); + z += (stbi__uint32)stbi__get16le(s) << 16; + return z; +} +#endif + +#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +////////////////////////////////////////////////////////////////////////////// +// +// generic converter from built-in img_n to req_comp +// individual types do this automatically as much as possible (e.g. jpeg +// does all cases internally since it needs to colorspace convert anyway, +// and it never has alpha, so very few cases ). png can automatically +// interleave an alpha=255 channel, but falls back to this for other cases +// +// assume data buffer is malloced, so malloc a new one and free that one +// only failure mode is malloc failing + +static stbi_uc stbi__compute_y(int r, int g, int b) +{ + return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + unsigned char *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (unsigned char *) stbi__malloc_mad3(req_comp, x, y, 0); + if (good == NULL) { + STBI_FREE(data); + return stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + unsigned char *src = data + j * x * img_n ; + unsigned char *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=255; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=255; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=255; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = 255; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return stbi__errpuc("unsupported", "Unsupported format conversion"); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) +// nothing +#else +static stbi__uint16 stbi__compute_y_16(int r, int g, int b) +{ + return (stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) +// nothing +#else +static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + stbi__uint16 *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2); + if (good == NULL) { + STBI_FREE(data); + return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + stbi__uint16 *src = data + j * x * img_n ; + stbi__uint16 *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=0xffff; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=0xffff; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=0xffff; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = 0xffff; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return (stbi__uint16*) stbi__errpuc("unsupported", "Unsupported format conversion"); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) +{ + int i,k,n; + float *output; + if (!data) return NULL; + output = (float *) stbi__malloc_mad4(x, y, comp, sizeof(float), 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale); + } + } + if (n < comp) { + for (i=0; i < x*y; ++i) { + output[i*comp + n] = data[i*comp + n]/255.0f; + } + } + STBI_FREE(data); + return output; +} +#endif + +#ifndef STBI_NO_HDR +#define stbi__float2int(x) ((int) (x)) +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) +{ + int i,k,n; + stbi_uc *output; + if (!data) return NULL; + output = (stbi_uc *) stbi__malloc_mad3(x, y, comp, 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + float z = (float) pow(data[i*comp+k]*stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + if (k < comp) { + float z = data[i*comp+k] * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + } + STBI_FREE(data); + return output; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// "baseline" JPEG/JFIF decoder +// +// simple implementation +// - doesn't support delayed output of y-dimension +// - simple interface (only one output format: 8-bit interleaved RGB) +// - doesn't try to recover corrupt jpegs +// - doesn't allow partial loading, loading multiple at once +// - still fast on x86 (copying globals into locals doesn't help x86) +// - allocates lots of intermediate memory (full size of all components) +// - non-interleaved case requires this anyway +// - allows good upsampling (see next) +// high-quality +// - upsampled channels are bilinearly interpolated, even across blocks +// - quality integer IDCT derived from IJG's 'slow' +// performance +// - fast huffman; reasonable integer IDCT +// - some SIMD kernels for common paths on targets with SSE2/NEON +// - uses a lot of intermediate memory, could cache poorly + +#ifndef STBI_NO_JPEG + +// huffman decoding acceleration +#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache + +typedef struct +{ + stbi_uc fast[1 << FAST_BITS]; + // weirdly, repacking this into AoS is a 10% speed loss, instead of a win + stbi__uint16 code[256]; + stbi_uc values[256]; + stbi_uc size[257]; + unsigned int maxcode[18]; + int delta[17]; // old 'firstsymbol' - old 'firstcode' +} stbi__huffman; + +typedef struct +{ + stbi__context *s; + stbi__huffman huff_dc[4]; + stbi__huffman huff_ac[4]; + stbi__uint16 dequant[4][64]; + stbi__int16 fast_ac[4][1 << FAST_BITS]; + +// sizes for components, interleaved MCUs + int img_h_max, img_v_max; + int img_mcu_x, img_mcu_y; + int img_mcu_w, img_mcu_h; + +// definition of jpeg image component + struct + { + int id; + int h,v; + int tq; + int hd,ha; + int dc_pred; + + int x,y,w2,h2; + stbi_uc *data; + void *raw_data, *raw_coeff; + stbi_uc *linebuf; + short *coeff; // progressive only + int coeff_w, coeff_h; // number of 8x8 coefficient blocks + } img_comp[4]; + + stbi__uint32 code_buffer; // jpeg entropy-coded buffer + int code_bits; // number of valid bits + unsigned char marker; // marker seen while filling entropy buffer + int nomore; // flag if we saw a marker so must stop + + int progressive; + int spec_start; + int spec_end; + int succ_high; + int succ_low; + int eob_run; + int jfif; + int app14_color_transform; // Adobe APP14 tag + int rgb; + + int scan_n, order[4]; + int restart_interval, todo; + +// kernels + void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); + void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); + stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); +} stbi__jpeg; + +static int stbi__build_huffman(stbi__huffman *h, int *count) +{ + int i,j,k=0; + unsigned int code; + // build size list for each symbol (from JPEG spec) + for (i=0; i < 16; ++i) + for (j=0; j < count[i]; ++j) + h->size[k++] = (stbi_uc) (i+1); + h->size[k] = 0; + + // compute actual symbols (from jpeg spec) + code = 0; + k = 0; + for(j=1; j <= 16; ++j) { + // compute delta to add to code to compute symbol id + h->delta[j] = k - code; + if (h->size[k] == j) { + while (h->size[k] == j) + h->code[k++] = (stbi__uint16) (code++); + if (code-1 >= (1u << j)) return stbi__err("bad code lengths","Corrupt JPEG"); + } + // compute largest code + 1 for this size, preshifted as needed later + h->maxcode[j] = code << (16-j); + code <<= 1; + } + h->maxcode[j] = 0xffffffff; + + // build non-spec acceleration table; 255 is flag for not-accelerated + memset(h->fast, 255, 1 << FAST_BITS); + for (i=0; i < k; ++i) { + int s = h->size[i]; + if (s <= FAST_BITS) { + int c = h->code[i] << (FAST_BITS-s); + int m = 1 << (FAST_BITS-s); + for (j=0; j < m; ++j) { + h->fast[c+j] = (stbi_uc) i; + } + } + } + return 1; +} + +// build a table that decodes both magnitude and value of small ACs in +// one go. +static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) +{ + int i; + for (i=0; i < (1 << FAST_BITS); ++i) { + stbi_uc fast = h->fast[i]; + fast_ac[i] = 0; + if (fast < 255) { + int rs = h->values[fast]; + int run = (rs >> 4) & 15; + int magbits = rs & 15; + int len = h->size[fast]; + + if (magbits && len + magbits <= FAST_BITS) { + // magnitude code followed by receive_extend code + int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); + int m = 1 << (magbits - 1); + if (k < m) k += (~0U << magbits) + 1; + // if the result is small enough, we can fit it in fast_ac table + if (k >= -128 && k <= 127) + fast_ac[i] = (stbi__int16) ((k * 256) + (run * 16) + (len + magbits)); + } + } + } +} + +static void stbi__grow_buffer_unsafe(stbi__jpeg *j) +{ + do { + unsigned int b = j->nomore ? 0 : stbi__get8(j->s); + if (b == 0xff) { + int c = stbi__get8(j->s); + while (c == 0xff) c = stbi__get8(j->s); // consume fill bytes + if (c != 0) { + j->marker = (unsigned char) c; + j->nomore = 1; + return; + } + } + j->code_buffer |= b << (24 - j->code_bits); + j->code_bits += 8; + } while (j->code_bits <= 24); +} + +// (1 << n) - 1 +static const stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535}; + +// decode a jpeg huffman value from the bitstream +stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) +{ + unsigned int temp; + int c,k; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + // look at the top FAST_BITS and determine what symbol ID it is, + // if the code is <= FAST_BITS + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + k = h->fast[c]; + if (k < 255) { + int s = h->size[k]; + if (s > j->code_bits) + return -1; + j->code_buffer <<= s; + j->code_bits -= s; + return h->values[k]; + } + + // naive test is to shift the code_buffer down so k bits are + // valid, then test against maxcode. To speed this up, we've + // preshifted maxcode left so that it has (16-k) 0s at the + // end; in other words, regardless of the number of bits, it + // wants to be compared against something shifted to have 16; + // that way we don't need to shift inside the loop. + temp = j->code_buffer >> 16; + for (k=FAST_BITS+1 ; ; ++k) + if (temp < h->maxcode[k]) + break; + if (k == 17) { + // error! code not found + j->code_bits -= 16; + return -1; + } + + if (k > j->code_bits) + return -1; + + // convert the huffman code to the symbol id + c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); + + // convert the id to a symbol + j->code_bits -= k; + j->code_buffer <<= k; + return h->values[c]; +} + +// bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j); + + sgn = j->code_buffer >> 31; // sign bit always in MSB; 0 if MSB clear (positive), 1 if MSB set (negative) + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k + (stbi__jbias[n] & (sgn - 1)); +} + +// get some unsigned bits +stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) +{ + unsigned int k; + if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k; +} + +stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) +{ + unsigned int k; + if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); + k = j->code_buffer; + j->code_buffer <<= 1; + --j->code_bits; + return k & 0x80000000; +} + +// given a value that's at position X in the zigzag stream, +// where does it appear in the 8x8 matrix coded as row-major? +static const stbi_uc stbi__jpeg_dezigzag[64+15] = +{ + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, + // let corrupt input sample past end + 63, 63, 63, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63 +}; + +// decode one 64-entry block-- +static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant) +{ + int diff,dc,k; + int t; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0 || t > 15) return stbi__err("bad huffman code","Corrupt JPEG"); + + // 0 all the ac values now so we can do it 32-bits at a time + memset(data,0,64*sizeof(data[0])); + + diff = t ? stbi__extend_receive(j, t) : 0; + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc * dequant[0]); + + // decode AC components, see JPEG spec + k = 1; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * dequant[zig]); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (rs != 0xf0) break; // end block + k += 16; + } else { + k += r; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]); + } + } + } while (k < 64); + return 1; +} + +static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b) +{ + int diff,dc; + int t; + if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + if (j->succ_high == 0) { + // first scan for DC coefficient, must be first + memset(data,0,64*sizeof(data[0])); // 0 all the ac values now + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0 || t > 15) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + diff = t ? stbi__extend_receive(j, t) : 0; + + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc * (1 << j->succ_low)); + } else { + // refinement scan for DC coefficient + if (stbi__jpeg_get_bit(j)) + data[0] += (short) (1 << j->succ_low); + } + return 1; +} + +// @OPTIMIZE: store non-zigzagged during the decode passes, +// and only de-zigzag when dequantizing +static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac) +{ + int k; + if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->succ_high == 0) { + int shift = j->succ_low; + + if (j->eob_run) { + --j->eob_run; + return 1; + } + + k = j->spec_start; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * (1 << shift)); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r); + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + --j->eob_run; + break; + } + k += 16; + } else { + k += r; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * (1 << shift)); + } + } + } while (k <= j->spec_end); + } else { + // refinement scan for these AC coefficients + + short bit = (short) (1 << j->succ_low); + + if (j->eob_run) { + --j->eob_run; + for (k = j->spec_start; k <= j->spec_end; ++k) { + short *p = &data[stbi__jpeg_dezigzag[k]]; + if (*p != 0) + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + } else { + k = j->spec_start; + do { + int r,s; + int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r) - 1; + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + r = 64; // force end of block + } else { + // r=15 s=0 should write 16 0s, so we just do + // a run of 15 0s and then write s (which is 0), + // so we don't have to do anything special here + } + } else { + if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG"); + // sign bit + if (stbi__jpeg_get_bit(j)) + s = bit; + else + s = -bit; + } + + // advance by r + while (k <= j->spec_end) { + short *p = &data[stbi__jpeg_dezigzag[k++]]; + if (*p != 0) { + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } else { + if (r == 0) { + *p = (short) s; + break; + } + --r; + } + } + } while (k <= j->spec_end); + } + } + return 1; +} + +// take a -128..127 value and stbi__clamp it and convert to 0..255 +stbi_inline static stbi_uc stbi__clamp(int x) +{ + // trick to use a single test to catch both cases + if ((unsigned int) x > 255) { + if (x < 0) return 0; + if (x > 255) return 255; + } + return (stbi_uc) x; +} + +#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5))) +#define stbi__fsh(x) ((x) * 4096) + +// derived from jidctint -- DCT_ISLOW +#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \ + int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \ + p2 = s2; \ + p3 = s6; \ + p1 = (p2+p3) * stbi__f2f(0.5411961f); \ + t2 = p1 + p3*stbi__f2f(-1.847759065f); \ + t3 = p1 + p2*stbi__f2f( 0.765366865f); \ + p2 = s0; \ + p3 = s4; \ + t0 = stbi__fsh(p2+p3); \ + t1 = stbi__fsh(p2-p3); \ + x0 = t0+t3; \ + x3 = t0-t3; \ + x1 = t1+t2; \ + x2 = t1-t2; \ + t0 = s7; \ + t1 = s5; \ + t2 = s3; \ + t3 = s1; \ + p3 = t0+t2; \ + p4 = t1+t3; \ + p1 = t0+t3; \ + p2 = t1+t2; \ + p5 = (p3+p4)*stbi__f2f( 1.175875602f); \ + t0 = t0*stbi__f2f( 0.298631336f); \ + t1 = t1*stbi__f2f( 2.053119869f); \ + t2 = t2*stbi__f2f( 3.072711026f); \ + t3 = t3*stbi__f2f( 1.501321110f); \ + p1 = p5 + p1*stbi__f2f(-0.899976223f); \ + p2 = p5 + p2*stbi__f2f(-2.562915447f); \ + p3 = p3*stbi__f2f(-1.961570560f); \ + p4 = p4*stbi__f2f(-0.390180644f); \ + t3 += p1+p4; \ + t2 += p2+p3; \ + t1 += p2+p4; \ + t0 += p1+p3; + +static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) +{ + int i,val[64],*v=val; + stbi_uc *o; + short *d = data; + + // columns + for (i=0; i < 8; ++i,++d, ++v) { + // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing + if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0 + && d[40]==0 && d[48]==0 && d[56]==0) { + // no shortcut 0 seconds + // (1|2|3|4|5|6|7)==0 0 seconds + // all separate -0.047 seconds + // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds + int dcterm = d[0]*4; + v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; + } else { + STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56]) + // constants scaled things up by 1<<12; let's bring them back + // down, but keep 2 extra bits of precision + x0 += 512; x1 += 512; x2 += 512; x3 += 512; + v[ 0] = (x0+t3) >> 10; + v[56] = (x0-t3) >> 10; + v[ 8] = (x1+t2) >> 10; + v[48] = (x1-t2) >> 10; + v[16] = (x2+t1) >> 10; + v[40] = (x2-t1) >> 10; + v[24] = (x3+t0) >> 10; + v[32] = (x3-t0) >> 10; + } + } + + for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) { + // no fast case since the first 1D IDCT spread components out + STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7]) + // constants scaled things up by 1<<12, plus we had 1<<2 from first + // loop, plus horizontal and vertical each scale by sqrt(8) so together + // we've got an extra 1<<3, so 1<<17 total we need to remove. + // so we want to round that, which means adding 0.5 * 1<<17, + // aka 65536. Also, we'll end up with -128 to 127 that we want + // to encode as 0..255 by adding 128, so we'll add that before the shift + x0 += 65536 + (128<<17); + x1 += 65536 + (128<<17); + x2 += 65536 + (128<<17); + x3 += 65536 + (128<<17); + // tried computing the shifts into temps, or'ing the temps to see + // if any were out of range, but that was slower + o[0] = stbi__clamp((x0+t3) >> 17); + o[7] = stbi__clamp((x0-t3) >> 17); + o[1] = stbi__clamp((x1+t2) >> 17); + o[6] = stbi__clamp((x1-t2) >> 17); + o[2] = stbi__clamp((x2+t1) >> 17); + o[5] = stbi__clamp((x2-t1) >> 17); + o[3] = stbi__clamp((x3+t0) >> 17); + o[4] = stbi__clamp((x3-t0) >> 17); + } +} + +#ifdef STBI_SSE2 +// sse2 integer IDCT. not the fastest possible implementation but it +// produces bit-identical results to the generic C version so it's +// fully "transparent". +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + // This is constructed to match our regular (generic) integer IDCT exactly. + __m128i row0, row1, row2, row3, row4, row5, row6, row7; + __m128i tmp; + + // dot product constant: even elems=x, odd elems=y + #define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y)) + + // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit) + // out(1) = c1[even]*x + c1[odd]*y + #define dct_rot(out0,out1, x,y,c0,c1) \ + __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \ + __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \ + __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ + __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ + __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ + __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) + + // out = in << 12 (in 16-bit, out 32-bit) + #define dct_widen(out, in) \ + __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ + __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) + + // wide add + #define dct_wadd(out, a, b) \ + __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_add_epi32(a##_h, b##_h) + + // wide sub + #define dct_wsub(out, a, b) \ + __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) + + // butterfly a/b, add bias, then shift by "s" and pack + #define dct_bfly32o(out0, out1, a,b,bias,s) \ + { \ + __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ + __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ + dct_wadd(sum, abiased, b); \ + dct_wsub(dif, abiased, b); \ + out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ + out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ + } + + // 8-bit interleave step (for transposes) + #define dct_interleave8(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi8(a, b); \ + b = _mm_unpackhi_epi8(tmp, b) + + // 16-bit interleave step (for transposes) + #define dct_interleave16(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi16(a, b); \ + b = _mm_unpackhi_epi16(tmp, b) + + #define dct_pass(bias,shift) \ + { \ + /* even part */ \ + dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \ + __m128i sum04 = _mm_add_epi16(row0, row4); \ + __m128i dif04 = _mm_sub_epi16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \ + dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \ + __m128i sum17 = _mm_add_epi16(row1, row7); \ + __m128i sum35 = _mm_add_epi16(row3, row5); \ + dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \ + dct_wadd(x4, y0o, y4o); \ + dct_wadd(x5, y1o, y5o); \ + dct_wadd(x6, y2o, y5o); \ + dct_wadd(x7, y3o, y4o); \ + dct_bfly32o(row0,row7, x0,x7,bias,shift); \ + dct_bfly32o(row1,row6, x1,x6,bias,shift); \ + dct_bfly32o(row2,row5, x2,x5,bias,shift); \ + dct_bfly32o(row3,row4, x3,x4,bias,shift); \ + } + + __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); + __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f)); + __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); + __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); + __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f)); + __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f)); + __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f)); + __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f)); + + // rounding biases in column/row passes, see stbi__idct_block for explanation. + __m128i bias_0 = _mm_set1_epi32(512); + __m128i bias_1 = _mm_set1_epi32(65536 + (128<<17)); + + // load + row0 = _mm_load_si128((const __m128i *) (data + 0*8)); + row1 = _mm_load_si128((const __m128i *) (data + 1*8)); + row2 = _mm_load_si128((const __m128i *) (data + 2*8)); + row3 = _mm_load_si128((const __m128i *) (data + 3*8)); + row4 = _mm_load_si128((const __m128i *) (data + 4*8)); + row5 = _mm_load_si128((const __m128i *) (data + 5*8)); + row6 = _mm_load_si128((const __m128i *) (data + 6*8)); + row7 = _mm_load_si128((const __m128i *) (data + 7*8)); + + // column pass + dct_pass(bias_0, 10); + + { + // 16bit 8x8 transpose pass 1 + dct_interleave16(row0, row4); + dct_interleave16(row1, row5); + dct_interleave16(row2, row6); + dct_interleave16(row3, row7); + + // transpose pass 2 + dct_interleave16(row0, row2); + dct_interleave16(row1, row3); + dct_interleave16(row4, row6); + dct_interleave16(row5, row7); + + // transpose pass 3 + dct_interleave16(row0, row1); + dct_interleave16(row2, row3); + dct_interleave16(row4, row5); + dct_interleave16(row6, row7); + } + + // row pass + dct_pass(bias_1, 17); + + { + // pack + __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 + __m128i p1 = _mm_packus_epi16(row2, row3); + __m128i p2 = _mm_packus_epi16(row4, row5); + __m128i p3 = _mm_packus_epi16(row6, row7); + + // 8bit 8x8 transpose pass 1 + dct_interleave8(p0, p2); // a0e0a1e1... + dct_interleave8(p1, p3); // c0g0c1g1... + + // transpose pass 2 + dct_interleave8(p0, p1); // a0c0e0g0... + dct_interleave8(p2, p3); // b0d0f0h0... + + // transpose pass 3 + dct_interleave8(p0, p2); // a0b0c0d0... + dct_interleave8(p1, p3); // a4b4c4d4... + + // store + _mm_storel_epi64((__m128i *) out, p0); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p2); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p1); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p3); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e)); + } + +#undef dct_const +#undef dct_rot +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_interleave8 +#undef dct_interleave16 +#undef dct_pass +} + +#endif // STBI_SSE2 + +#ifdef STBI_NEON + +// NEON integer IDCT. should produce bit-identical +// results to the generic C version. +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; + + int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); + int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); + int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f)); + int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f)); + int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); + int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); + int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); + int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); + int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f)); + int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f)); + int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f)); + int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f)); + +#define dct_long_mul(out, inq, coeff) \ + int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) + +#define dct_long_mac(out, acc, inq, coeff) \ + int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) + +#define dct_widen(out, inq) \ + int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ + int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) + +// wide add +#define dct_wadd(out, a, b) \ + int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vaddq_s32(a##_h, b##_h) + +// wide sub +#define dct_wsub(out, a, b) \ + int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vsubq_s32(a##_h, b##_h) + +// butterfly a/b, then shift using "shiftop" by "s" and pack +#define dct_bfly32o(out0,out1, a,b,shiftop,s) \ + { \ + dct_wadd(sum, a, b); \ + dct_wsub(dif, a, b); \ + out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ + out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ + } + +#define dct_pass(shiftop, shift) \ + { \ + /* even part */ \ + int16x8_t sum26 = vaddq_s16(row2, row6); \ + dct_long_mul(p1e, sum26, rot0_0); \ + dct_long_mac(t2e, p1e, row6, rot0_1); \ + dct_long_mac(t3e, p1e, row2, rot0_2); \ + int16x8_t sum04 = vaddq_s16(row0, row4); \ + int16x8_t dif04 = vsubq_s16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + int16x8_t sum15 = vaddq_s16(row1, row5); \ + int16x8_t sum17 = vaddq_s16(row1, row7); \ + int16x8_t sum35 = vaddq_s16(row3, row5); \ + int16x8_t sum37 = vaddq_s16(row3, row7); \ + int16x8_t sumodd = vaddq_s16(sum17, sum35); \ + dct_long_mul(p5o, sumodd, rot1_0); \ + dct_long_mac(p1o, p5o, sum17, rot1_1); \ + dct_long_mac(p2o, p5o, sum35, rot1_2); \ + dct_long_mul(p3o, sum37, rot2_0); \ + dct_long_mul(p4o, sum15, rot2_1); \ + dct_wadd(sump13o, p1o, p3o); \ + dct_wadd(sump24o, p2o, p4o); \ + dct_wadd(sump23o, p2o, p3o); \ + dct_wadd(sump14o, p1o, p4o); \ + dct_long_mac(x4, sump13o, row7, rot3_0); \ + dct_long_mac(x5, sump24o, row5, rot3_1); \ + dct_long_mac(x6, sump23o, row3, rot3_2); \ + dct_long_mac(x7, sump14o, row1, rot3_3); \ + dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \ + dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \ + dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \ + dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \ + } + + // load + row0 = vld1q_s16(data + 0*8); + row1 = vld1q_s16(data + 1*8); + row2 = vld1q_s16(data + 2*8); + row3 = vld1q_s16(data + 3*8); + row4 = vld1q_s16(data + 4*8); + row5 = vld1q_s16(data + 5*8); + row6 = vld1q_s16(data + 6*8); + row7 = vld1q_s16(data + 7*8); + + // add DC bias + row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); + + // column pass + dct_pass(vrshrn_n_s32, 10); + + // 16bit 8x8 transpose + { +// these three map to a single VTRN.16, VTRN.32, and VSWP, respectively. +// whether compilers actually get this is another story, sadly. +#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); } +#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); } + + // pass 1 + dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 + dct_trn16(row2, row3); + dct_trn16(row4, row5); + dct_trn16(row6, row7); + + // pass 2 + dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 + dct_trn32(row1, row3); + dct_trn32(row4, row6); + dct_trn32(row5, row7); + + // pass 3 + dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 + dct_trn64(row1, row5); + dct_trn64(row2, row6); + dct_trn64(row3, row7); + +#undef dct_trn16 +#undef dct_trn32 +#undef dct_trn64 + } + + // row pass + // vrshrn_n_s32 only supports shifts up to 16, we need + // 17. so do a non-rounding shift of 16 first then follow + // up with a rounding shift by 1. + dct_pass(vshrn_n_s32, 16); + + { + // pack and round + uint8x8_t p0 = vqrshrun_n_s16(row0, 1); + uint8x8_t p1 = vqrshrun_n_s16(row1, 1); + uint8x8_t p2 = vqrshrun_n_s16(row2, 1); + uint8x8_t p3 = vqrshrun_n_s16(row3, 1); + uint8x8_t p4 = vqrshrun_n_s16(row4, 1); + uint8x8_t p5 = vqrshrun_n_s16(row5, 1); + uint8x8_t p6 = vqrshrun_n_s16(row6, 1); + uint8x8_t p7 = vqrshrun_n_s16(row7, 1); + + // again, these can translate into one instruction, but often don't. +#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); } +#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); } + + // sadly can't use interleaved stores here since we only write + // 8 bytes to each scan line! + + // 8x8 8-bit transpose pass 1 + dct_trn8_8(p0, p1); + dct_trn8_8(p2, p3); + dct_trn8_8(p4, p5); + dct_trn8_8(p6, p7); + + // pass 2 + dct_trn8_16(p0, p2); + dct_trn8_16(p1, p3); + dct_trn8_16(p4, p6); + dct_trn8_16(p5, p7); + + // pass 3 + dct_trn8_32(p0, p4); + dct_trn8_32(p1, p5); + dct_trn8_32(p2, p6); + dct_trn8_32(p3, p7); + + // store + vst1_u8(out, p0); out += out_stride; + vst1_u8(out, p1); out += out_stride; + vst1_u8(out, p2); out += out_stride; + vst1_u8(out, p3); out += out_stride; + vst1_u8(out, p4); out += out_stride; + vst1_u8(out, p5); out += out_stride; + vst1_u8(out, p6); out += out_stride; + vst1_u8(out, p7); + +#undef dct_trn8_8 +#undef dct_trn8_16 +#undef dct_trn8_32 + } + +#undef dct_long_mul +#undef dct_long_mac +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_pass +} + +#endif // STBI_NEON + +#define STBI__MARKER_none 0xff +// if there's a pending marker from the entropy stream, return that +// otherwise, fetch from the stream and get a marker. if there's no +// marker, return 0xff, which is never a valid marker value +static stbi_uc stbi__get_marker(stbi__jpeg *j) +{ + stbi_uc x; + if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; } + x = stbi__get8(j->s); + if (x != 0xff) return STBI__MARKER_none; + while (x == 0xff) + x = stbi__get8(j->s); // consume repeated 0xff fill bytes + return x; +} + +// in each scan, we'll have scan_n components, and the order +// of the components is specified by order[] +#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7) + +// after a restart interval, stbi__jpeg_reset the entropy decoder and +// the dc prediction +static void stbi__jpeg_reset(stbi__jpeg *j) +{ + j->code_bits = 0; + j->code_buffer = 0; + j->nomore = 0; + j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0; + j->marker = STBI__MARKER_none; + j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; + j->eob_run = 0; + // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, + // since we don't even allow 1<<30 pixels +} + +static int stbi__parse_entropy_coded_data(stbi__jpeg *z) +{ + stbi__jpeg_reset(z); + if (!z->progressive) { + if (z->scan_n == 1) { + int i,j; + STBI_SIMD_ALIGN(short, data[64]); + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + // if it's NOT a restart, then just bail, so we get corrupt data + // rather than no data + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + STBI_SIMD_ALIGN(short, data[64]); + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x)*8; + int y2 = (j*z->img_comp[n].v + y)*8; + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data); + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } else { + if (z->scan_n == 1) { + int i,j; + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + if (z->spec_start == 0) { + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } else { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) + return 0; + } + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x); + int y2 = (j*z->img_comp[n].v + y); + short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } +} + +static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant) +{ + int i; + for (i=0; i < 64; ++i) + data[i] *= dequant[i]; +} + +static void stbi__jpeg_finish(stbi__jpeg *z) +{ + if (z->progressive) { + // dequantize and idct the data + int i,j,n; + for (n=0; n < z->s->img_n; ++n) { + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + } + } + } + } +} + +static int stbi__process_marker(stbi__jpeg *z, int m) +{ + int L; + switch (m) { + case STBI__MARKER_none: // no marker found + return stbi__err("expected marker","Corrupt JPEG"); + + case 0xDD: // DRI - specify restart interval + if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG"); + z->restart_interval = stbi__get16be(z->s); + return 1; + + case 0xDB: // DQT - define quantization table + L = stbi__get16be(z->s)-2; + while (L > 0) { + int q = stbi__get8(z->s); + int p = q >> 4, sixteen = (p != 0); + int t = q & 15,i; + if (p != 0 && p != 1) return stbi__err("bad DQT type","Corrupt JPEG"); + if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG"); + + for (i=0; i < 64; ++i) + z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s)); + L -= (sixteen ? 129 : 65); + } + return L==0; + + case 0xC4: // DHT - define huffman table + L = stbi__get16be(z->s)-2; + while (L > 0) { + stbi_uc *v; + int sizes[16],i,n=0; + int q = stbi__get8(z->s); + int tc = q >> 4; + int th = q & 15; + if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG"); + for (i=0; i < 16; ++i) { + sizes[i] = stbi__get8(z->s); + n += sizes[i]; + } + L -= 17; + if (tc == 0) { + if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0; + v = z->huff_dc[th].values; + } else { + if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0; + v = z->huff_ac[th].values; + } + for (i=0; i < n; ++i) + v[i] = stbi__get8(z->s); + if (tc != 0) + stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); + L -= n; + } + return L==0; + } + + // check for comment block or APP blocks + if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) { + L = stbi__get16be(z->s); + if (L < 2) { + if (m == 0xFE) + return stbi__err("bad COM len","Corrupt JPEG"); + else + return stbi__err("bad APP len","Corrupt JPEG"); + } + L -= 2; + + if (m == 0xE0 && L >= 5) { // JFIF APP0 segment + static const unsigned char tag[5] = {'J','F','I','F','\0'}; + int ok = 1; + int i; + for (i=0; i < 5; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 5; + if (ok) + z->jfif = 1; + } else if (m == 0xEE && L >= 12) { // Adobe APP14 segment + static const unsigned char tag[6] = {'A','d','o','b','e','\0'}; + int ok = 1; + int i; + for (i=0; i < 6; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 6; + if (ok) { + stbi__get8(z->s); // version + stbi__get16be(z->s); // flags0 + stbi__get16be(z->s); // flags1 + z->app14_color_transform = stbi__get8(z->s); // color transform + L -= 6; + } + } + + stbi__skip(z->s, L); + return 1; + } + + return stbi__err("unknown marker","Corrupt JPEG"); +} + +// after we see SOS +static int stbi__process_scan_header(stbi__jpeg *z) +{ + int i; + int Ls = stbi__get16be(z->s); + z->scan_n = stbi__get8(z->s); + if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG"); + if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG"); + for (i=0; i < z->scan_n; ++i) { + int id = stbi__get8(z->s), which; + int q = stbi__get8(z->s); + for (which = 0; which < z->s->img_n; ++which) + if (z->img_comp[which].id == id) + break; + if (which == z->s->img_n) return 0; // no match + z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG"); + z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG"); + z->order[i] = which; + } + + { + int aa; + z->spec_start = stbi__get8(z->s); + z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 + aa = stbi__get8(z->s); + z->succ_high = (aa >> 4); + z->succ_low = (aa & 15); + if (z->progressive) { + if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) + return stbi__err("bad SOS", "Corrupt JPEG"); + } else { + if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG"); + if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG"); + z->spec_end = 63; + } + } + + return 1; +} + +static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why) +{ + int i; + for (i=0; i < ncomp; ++i) { + if (z->img_comp[i].raw_data) { + STBI_FREE(z->img_comp[i].raw_data); + z->img_comp[i].raw_data = NULL; + z->img_comp[i].data = NULL; + } + if (z->img_comp[i].raw_coeff) { + STBI_FREE(z->img_comp[i].raw_coeff); + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].coeff = 0; + } + if (z->img_comp[i].linebuf) { + STBI_FREE(z->img_comp[i].linebuf); + z->img_comp[i].linebuf = NULL; + } + } + return why; +} + +static int stbi__process_frame_header(stbi__jpeg *z, int scan) +{ + stbi__context *s = z->s; + int Lf,p,i,q, h_max=1,v_max=1,c; + Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG"); // JPEG + p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline + s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG + s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + c = stbi__get8(s); + if (c != 3 && c != 1 && c != 4) return stbi__err("bad component count","Corrupt JPEG"); + s->img_n = c; + for (i=0; i < c; ++i) { + z->img_comp[i].data = NULL; + z->img_comp[i].linebuf = NULL; + } + + if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG"); + + z->rgb = 0; + for (i=0; i < s->img_n; ++i) { + static const unsigned char rgb[3] = { 'R', 'G', 'B' }; + z->img_comp[i].id = stbi__get8(s); + if (s->img_n == 3 && z->img_comp[i].id == rgb[i]) + ++z->rgb; + q = stbi__get8(s); + z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG"); + z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG"); + z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG"); + } + + if (scan != STBI__SCAN_load) return 1; + + if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode"); + + for (i=0; i < s->img_n; ++i) { + if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h; + if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; + } + + // check that plane subsampling factors are integer ratios; our resamplers can't deal with fractional ratios + // and I've never seen a non-corrupted JPEG file actually use them + for (i=0; i < s->img_n; ++i) { + if (h_max % z->img_comp[i].h != 0) return stbi__err("bad H","Corrupt JPEG"); + if (v_max % z->img_comp[i].v != 0) return stbi__err("bad V","Corrupt JPEG"); + } + + // compute interleaved mcu info + z->img_h_max = h_max; + z->img_v_max = v_max; + z->img_mcu_w = h_max * 8; + z->img_mcu_h = v_max * 8; + // these sizes can't be more than 17 bits + z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w; + z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h; + + for (i=0; i < s->img_n; ++i) { + // number of effective pixels (e.g. for non-interleaved MCU) + z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max; + z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max; + // to simplify generation, we'll allocate enough memory to decode + // the bogus oversized data from using interleaved MCUs and their + // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't + // discard the extra data until colorspace conversion + // + // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) + // so these muls can't overflow with 32-bit ints (which we require) + z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; + z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; + z->img_comp[i].coeff = 0; + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].linebuf = NULL; + z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); + if (z->img_comp[i].raw_data == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + // align blocks for idct using mmx/sse + z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15); + if (z->progressive) { + // w2, h2 are multiples of 8 (see above) + z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; + z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; + z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); + if (z->img_comp[i].raw_coeff == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15); + } + } + + return 1; +} + +// use comparisons since in some cases we handle more than one case (e.g. SOF) +#define stbi__DNL(x) ((x) == 0xdc) +#define stbi__SOI(x) ((x) == 0xd8) +#define stbi__EOI(x) ((x) == 0xd9) +#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2) +#define stbi__SOS(x) ((x) == 0xda) + +#define stbi__SOF_progressive(x) ((x) == 0xc2) + +static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) +{ + int m; + z->jfif = 0; + z->app14_color_transform = -1; // valid values are 0,1,2 + z->marker = STBI__MARKER_none; // initialize cached marker to empty + m = stbi__get_marker(z); + if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG"); + if (scan == STBI__SCAN_type) return 1; + m = stbi__get_marker(z); + while (!stbi__SOF(m)) { + if (!stbi__process_marker(z,m)) return 0; + m = stbi__get_marker(z); + while (m == STBI__MARKER_none) { + // some files have extra padding after their blocks, so ok, we'll scan + if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG"); + m = stbi__get_marker(z); + } + } + z->progressive = stbi__SOF_progressive(m); + if (!stbi__process_frame_header(z, scan)) return 0; + return 1; +} + +// decode image to YCbCr format +static int stbi__decode_jpeg_image(stbi__jpeg *j) +{ + int m; + for (m = 0; m < 4; m++) { + j->img_comp[m].raw_data = NULL; + j->img_comp[m].raw_coeff = NULL; + } + j->restart_interval = 0; + if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0; + m = stbi__get_marker(j); + while (!stbi__EOI(m)) { + if (stbi__SOS(m)) { + if (!stbi__process_scan_header(j)) return 0; + if (!stbi__parse_entropy_coded_data(j)) return 0; + if (j->marker == STBI__MARKER_none ) { + // handle 0s at the end of image data from IP Kamera 9060 + while (!stbi__at_eof(j->s)) { + int x = stbi__get8(j->s); + if (x == 255) { + j->marker = stbi__get8(j->s); + break; + } + } + // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 + } + } else if (stbi__DNL(m)) { + int Ld = stbi__get16be(j->s); + stbi__uint32 NL = stbi__get16be(j->s); + if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG"); + if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG"); + } else { + if (!stbi__process_marker(j, m)) return 0; + } + m = stbi__get_marker(j); + } + if (j->progressive) + stbi__jpeg_finish(j); + return 1; +} + +// static jfif-centered resampling (across block boundaries) + +typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1, + int w, int hs); + +#define stbi__div4(x) ((stbi_uc) ((x) >> 2)) + +static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + STBI_NOTUSED(out); + STBI_NOTUSED(in_far); + STBI_NOTUSED(w); + STBI_NOTUSED(hs); + return in_near; +} + +static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples vertically for every one in input + int i; + STBI_NOTUSED(hs); + for (i=0; i < w; ++i) + out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2); + return out; +} + +static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples horizontally for every one in input + int i; + stbi_uc *input = in_near; + + if (w == 1) { + // if only one sample, can't do any interpolation + out[0] = out[1] = input[0]; + return out; + } + + out[0] = input[0]; + out[1] = stbi__div4(input[0]*3 + input[1] + 2); + for (i=1; i < w-1; ++i) { + int n = 3*input[i]+2; + out[i*2+0] = stbi__div4(n+input[i-1]); + out[i*2+1] = stbi__div4(n+input[i+1]); + } + out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2); + out[i*2+1] = input[w-1]; + + STBI_NOTUSED(in_far); + STBI_NOTUSED(hs); + + return out; +} + +#define stbi__div16(x) ((stbi_uc) ((x) >> 4)) + +static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i,t0,t1; + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + out[0] = stbi__div4(t1+2); + for (i=1; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i=0,t0,t1; + + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + // process groups of 8 pixels for as long as we can. + // note we can't handle the last pixel in a row in this loop + // because we need to handle the filter boundary conditions. + for (; i < ((w-1) & ~7); i += 8) { +#if defined(STBI_SSE2) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + __m128i zero = _mm_setzero_si128(); + __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i)); + __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i)); + __m128i farw = _mm_unpacklo_epi8(farb, zero); + __m128i nearw = _mm_unpacklo_epi8(nearb, zero); + __m128i diff = _mm_sub_epi16(farw, nearw); + __m128i nears = _mm_slli_epi16(nearw, 2); + __m128i curr = _mm_add_epi16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + __m128i prv0 = _mm_slli_si128(curr, 2); + __m128i nxt0 = _mm_srli_si128(curr, 2); + __m128i prev = _mm_insert_epi16(prv0, t1, 0); + __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + __m128i bias = _mm_set1_epi16(8); + __m128i curs = _mm_slli_epi16(curr, 2); + __m128i prvd = _mm_sub_epi16(prev, curr); + __m128i nxtd = _mm_sub_epi16(next, curr); + __m128i curb = _mm_add_epi16(curs, bias); + __m128i even = _mm_add_epi16(prvd, curb); + __m128i odd = _mm_add_epi16(nxtd, curb); + + // interleave even and odd pixels, then undo scaling. + __m128i int0 = _mm_unpacklo_epi16(even, odd); + __m128i int1 = _mm_unpackhi_epi16(even, odd); + __m128i de0 = _mm_srli_epi16(int0, 4); + __m128i de1 = _mm_srli_epi16(int1, 4); + + // pack and write output + __m128i outv = _mm_packus_epi16(de0, de1); + _mm_storeu_si128((__m128i *) (out + i*2), outv); +#elif defined(STBI_NEON) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + uint8x8_t farb = vld1_u8(in_far + i); + uint8x8_t nearb = vld1_u8(in_near + i); + int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); + int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); + int16x8_t curr = vaddq_s16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + int16x8_t prv0 = vextq_s16(curr, curr, 7); + int16x8_t nxt0 = vextq_s16(curr, curr, 1); + int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); + int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + int16x8_t curs = vshlq_n_s16(curr, 2); + int16x8_t prvd = vsubq_s16(prev, curr); + int16x8_t nxtd = vsubq_s16(next, curr); + int16x8_t even = vaddq_s16(curs, prvd); + int16x8_t odd = vaddq_s16(curs, nxtd); + + // undo scaling and round, then store with even/odd phases interleaved + uint8x8x2_t o; + o.val[0] = vqrshrun_n_s16(even, 4); + o.val[1] = vqrshrun_n_s16(odd, 4); + vst2_u8(out + i*2, o); +#endif + + // "previous" value for next iter + t1 = 3*in_near[i+7] + in_far[i+7]; + } + + t0 = t1; + t1 = 3*in_near[i] + in_far[i]; + out[i*2] = stbi__div16(3*t1 + t0 + 8); + + for (++i; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} +#endif + +static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // resample with nearest-neighbor + int i,j; + STBI_NOTUSED(in_far); + for (i=0; i < w; ++i) + for (j=0; j < hs; ++j) + out[i*hs+j] = in_near[i]; + return out; +} + +// this is a reduced-precision calculation of YCbCr-to-RGB introduced +// to make sure the code produces the same results in both SIMD and scalar +#define stbi__float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i=0; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + (cr*-stbi__float2fixed(0.71414f)) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) +{ + int i = 0; + +#ifdef STBI_SSE2 + // step == 3 is pretty ugly on the final interleave, and i'm not convinced + // it's useful in practice (you wouldn't use it for textures, for example). + // so just accelerate step == 4 case. + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + __m128i signflip = _mm_set1_epi8(-0x80); + __m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f)); + __m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f)); + __m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f)); + __m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f)); + __m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128); + __m128i xw = _mm_set1_epi16(255); // alpha channel + + for (; i+7 < count; i += 8) { + // load + __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i)); + __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i)); + __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i)); + __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 + __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 + + // unpack to short (and left-shift cr, cb by 8) + __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); + __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); + __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); + + // color transform + __m128i yws = _mm_srli_epi16(yw, 4); + __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); + __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); + __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); + __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); + __m128i rws = _mm_add_epi16(cr0, yws); + __m128i gwt = _mm_add_epi16(cb0, yws); + __m128i bws = _mm_add_epi16(yws, cb1); + __m128i gws = _mm_add_epi16(gwt, cr1); + + // descale + __m128i rw = _mm_srai_epi16(rws, 4); + __m128i bw = _mm_srai_epi16(bws, 4); + __m128i gw = _mm_srai_epi16(gws, 4); + + // back to byte, set up for transpose + __m128i brb = _mm_packus_epi16(rw, bw); + __m128i gxb = _mm_packus_epi16(gw, xw); + + // transpose to interleave channels + __m128i t0 = _mm_unpacklo_epi8(brb, gxb); + __m128i t1 = _mm_unpackhi_epi8(brb, gxb); + __m128i o0 = _mm_unpacklo_epi16(t0, t1); + __m128i o1 = _mm_unpackhi_epi16(t0, t1); + + // store + _mm_storeu_si128((__m128i *) (out + 0), o0); + _mm_storeu_si128((__m128i *) (out + 16), o1); + out += 32; + } + } +#endif + +#ifdef STBI_NEON + // in this version, step=3 support would be easy to add. but is there demand? + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + uint8x8_t signflip = vdup_n_u8(0x80); + int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f)); + int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f)); + int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f)); + int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f)); + + for (; i+7 < count; i += 8) { + // load + uint8x8_t y_bytes = vld1_u8(y + i); + uint8x8_t cr_bytes = vld1_u8(pcr + i); + uint8x8_t cb_bytes = vld1_u8(pcb + i); + int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); + int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); + + // expand to s16 + int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); + int16x8_t crw = vshll_n_s8(cr_biased, 7); + int16x8_t cbw = vshll_n_s8(cb_biased, 7); + + // color transform + int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); + int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); + int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); + int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); + int16x8_t rws = vaddq_s16(yws, cr0); + int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); + int16x8_t bws = vaddq_s16(yws, cb1); + + // undo scaling, round, convert to byte + uint8x8x4_t o; + o.val[0] = vqrshrun_n_s16(rws, 4); + o.val[1] = vqrshrun_n_s16(gws, 4); + o.val[2] = vqrshrun_n_s16(bws, 4); + o.val[3] = vdup_n_u8(255); + + // store, interleaving r/g/b/a + vst4_u8(out, o); + out += 8*4; + } + } +#endif + + for (; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + cr*-stbi__float2fixed(0.71414f) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +// set up the kernels +static void stbi__setup_jpeg(stbi__jpeg *j) +{ + j->idct_block_kernel = stbi__idct_block; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; + +#ifdef STBI_SSE2 + if (stbi__sse2_available()) { + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; + } +#endif + +#ifdef STBI_NEON + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; +#endif +} + +// clean up the temporary component buffers +static void stbi__cleanup_jpeg(stbi__jpeg *j) +{ + stbi__free_jpeg_components(j, j->s->img_n, 0); +} + +typedef struct +{ + resample_row_func resample; + stbi_uc *line0,*line1; + int hs,vs; // expansion factor in each axis + int w_lores; // horizontal pixels pre-expansion + int ystep; // how far through vertical expansion we are + int ypos; // which pre-expansion row we're on +} stbi__resample; + +// fast 0..255 * 0..255 => 0..255 rounded multiplication +static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y) +{ + unsigned int t = x*y + 128; + return (stbi_uc) ((t + (t >>8)) >> 8); +} + +static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) +{ + int n, decode_n, is_rgb; + z->s->img_n = 0; // make stbi__cleanup_jpeg safe + + // validate req_comp + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + + // load a jpeg image from whichever source, but leave in YCbCr format + if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; } + + // determine actual number of components to generate + n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1; + + is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif)); + + if (z->s->img_n == 3 && n < 3 && !is_rgb) + decode_n = 1; + else + decode_n = z->s->img_n; + + // nothing to do if no components requested; check this now to avoid + // accessing uninitialized coutput[0] later + if (decode_n <= 0) { stbi__cleanup_jpeg(z); return NULL; } + + // resample and color-convert + { + int k; + unsigned int i,j; + stbi_uc *output; + stbi_uc *coutput[4] = { NULL, NULL, NULL, NULL }; + + stbi__resample res_comp[4]; + + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + + // allocate line buffer big enough for upsampling off the edges + // with upsample factor of 4 + z->img_comp[k].linebuf = (stbi_uc *) stbi__malloc(z->s->img_x + 3); + if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + r->hs = z->img_h_max / z->img_comp[k].h; + r->vs = z->img_v_max / z->img_comp[k].v; + r->ystep = r->vs >> 1; + r->w_lores = (z->s->img_x + r->hs-1) / r->hs; + r->ypos = 0; + r->line0 = r->line1 = z->img_comp[k].data; + + if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1; + else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2; + else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2; + else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel; + else r->resample = stbi__resample_row_generic; + } + + // can't error after this so, this is safe + output = (stbi_uc *) stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); + if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + // now go ahead and resample + for (j=0; j < z->s->img_y; ++j) { + stbi_uc *out = output + n * z->s->img_x * j; + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + int y_bot = r->ystep >= (r->vs >> 1); + coutput[k] = r->resample(z->img_comp[k].linebuf, + y_bot ? r->line1 : r->line0, + y_bot ? r->line0 : r->line1, + r->w_lores, r->hs); + if (++r->ystep >= r->vs) { + r->ystep = 0; + r->line0 = r->line1; + if (++r->ypos < z->img_comp[k].y) + r->line1 += z->img_comp[k].w2; + } + } + if (n >= 3) { + stbi_uc *y = coutput[0]; + if (z->s->img_n == 3) { + if (is_rgb) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = y[i]; + out[1] = coutput[1][i]; + out[2] = coutput[2][i]; + out[3] = 255; + out += n; + } + } else { + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else if (z->s->img_n == 4) { + if (z->app14_color_transform == 0) { // CMYK + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(coutput[0][i], m); + out[1] = stbi__blinn_8x8(coutput[1][i], m); + out[2] = stbi__blinn_8x8(coutput[2][i], m); + out[3] = 255; + out += n; + } + } else if (z->app14_color_transform == 2) { // YCCK + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(255 - out[0], m); + out[1] = stbi__blinn_8x8(255 - out[1], m); + out[2] = stbi__blinn_8x8(255 - out[2], m); + out += n; + } + } else { // YCbCr + alpha? Ignore the fourth channel for now + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else + for (i=0; i < z->s->img_x; ++i) { + out[0] = out[1] = out[2] = y[i]; + out[3] = 255; // not used if n==3 + out += n; + } + } else { + if (is_rgb) { + if (n == 1) + for (i=0; i < z->s->img_x; ++i) + *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + else { + for (i=0; i < z->s->img_x; ++i, out += 2) { + out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + out[1] = 255; + } + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 0) { + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + stbi_uc r = stbi__blinn_8x8(coutput[0][i], m); + stbi_uc g = stbi__blinn_8x8(coutput[1][i], m); + stbi_uc b = stbi__blinn_8x8(coutput[2][i], m); + out[0] = stbi__compute_y(r, g, b); + out[1] = 255; + out += n; + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 2) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]); + out[1] = 255; + out += n; + } + } else { + stbi_uc *y = coutput[0]; + if (n == 1) + for (i=0; i < z->s->img_x; ++i) out[i] = y[i]; + else + for (i=0; i < z->s->img_x; ++i) { *out++ = y[i]; *out++ = 255; } + } + } + } + stbi__cleanup_jpeg(z); + *out_x = z->s->img_x; + *out_y = z->s->img_y; + if (comp) *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output + return output; + } +} + +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + unsigned char* result; + stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg)); + if (!j) return stbi__errpuc("outofmem", "Out of memory"); + STBI_NOTUSED(ri); + j->s = s; + stbi__setup_jpeg(j); + result = load_jpeg_image(j, x,y,comp,req_comp); + STBI_FREE(j); + return result; +} + +static int stbi__jpeg_test(stbi__context *s) +{ + int r; + stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); + if (!j) return stbi__err("outofmem", "Out of memory"); + j->s = s; + stbi__setup_jpeg(j); + r = stbi__decode_jpeg_header(j, STBI__SCAN_type); + stbi__rewind(s); + STBI_FREE(j); + return r; +} + +static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) +{ + if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) { + stbi__rewind( j->s ); + return 0; + } + if (x) *x = j->s->img_x; + if (y) *y = j->s->img_y; + if (comp) *comp = j->s->img_n >= 3 ? 3 : 1; + return 1; +} + +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) +{ + int result; + stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg))); + if (!j) return stbi__err("outofmem", "Out of memory"); + j->s = s; + result = stbi__jpeg_info_raw(j, x, y, comp); + STBI_FREE(j); + return result; +} +#endif + +// public domain zlib decode v0.2 Sean Barrett 2006-11-18 +// simple implementation +// - all input must be provided in an upfront buffer +// - all output is written to a single output buffer (can malloc/realloc) +// performance +// - fast huffman + +#ifndef STBI_NO_ZLIB + +// fast-way is faster to check than jpeg huffman, but slow way is slower +#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables +#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) +#define STBI__ZNSYMS 288 // number of symbols in literal/length alphabet + +// zlib-style huffman encoding +// (jpegs packs from left, zlib from right, so can't share code) +typedef struct +{ + stbi__uint16 fast[1 << STBI__ZFAST_BITS]; + stbi__uint16 firstcode[16]; + int maxcode[17]; + stbi__uint16 firstsymbol[16]; + stbi_uc size[STBI__ZNSYMS]; + stbi__uint16 value[STBI__ZNSYMS]; +} stbi__zhuffman; + +stbi_inline static int stbi__bitreverse16(int n) +{ + n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); + n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); + n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); + n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); + return n; +} + +stbi_inline static int stbi__bit_reverse(int v, int bits) +{ + STBI_ASSERT(bits <= 16); + // to bit reverse n bits, reverse 16 and shift + // e.g. 11 bits, bit reverse and shift away 5 + return stbi__bitreverse16(v) >> (16-bits); +} + +static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num) +{ + int i,k=0; + int code, next_code[16], sizes[17]; + + // DEFLATE spec for generating codes + memset(sizes, 0, sizeof(sizes)); + memset(z->fast, 0, sizeof(z->fast)); + for (i=0; i < num; ++i) + ++sizes[sizelist[i]]; + sizes[0] = 0; + for (i=1; i < 16; ++i) + if (sizes[i] > (1 << i)) + return stbi__err("bad sizes", "Corrupt PNG"); + code = 0; + for (i=1; i < 16; ++i) { + next_code[i] = code; + z->firstcode[i] = (stbi__uint16) code; + z->firstsymbol[i] = (stbi__uint16) k; + code = (code + sizes[i]); + if (sizes[i]) + if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG"); + z->maxcode[i] = code << (16-i); // preshift for inner loop + code <<= 1; + k += sizes[i]; + } + z->maxcode[16] = 0x10000; // sentinel + for (i=0; i < num; ++i) { + int s = sizelist[i]; + if (s) { + int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; + stbi__uint16 fastv = (stbi__uint16) ((s << 9) | i); + z->size [c] = (stbi_uc ) s; + z->value[c] = (stbi__uint16) i; + if (s <= STBI__ZFAST_BITS) { + int j = stbi__bit_reverse(next_code[s],s); + while (j < (1 << STBI__ZFAST_BITS)) { + z->fast[j] = fastv; + j += (1 << s); + } + } + ++next_code[s]; + } + } + return 1; +} + +// zlib-from-memory implementation for PNG reading +// because PNG allows splitting the zlib stream arbitrarily, +// and it's annoying structurally to have PNG call ZLIB call PNG, +// we require PNG read all the IDATs and combine them into a single +// memory buffer + +typedef struct +{ + stbi_uc *zbuffer, *zbuffer_end; + int num_bits; + stbi__uint32 code_buffer; + + char *zout; + char *zout_start; + char *zout_end; + int z_expandable; + + stbi__zhuffman z_length, z_distance; +} stbi__zbuf; + +stbi_inline static int stbi__zeof(stbi__zbuf *z) +{ + return (z->zbuffer >= z->zbuffer_end); +} + +stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) +{ + return stbi__zeof(z) ? 0 : *z->zbuffer++; +} + +static void stbi__fill_bits(stbi__zbuf *z) +{ + do { + if (z->code_buffer >= (1U << z->num_bits)) { + z->zbuffer = z->zbuffer_end; /* treat this as EOF so we fail. */ + return; + } + z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits; + z->num_bits += 8; + } while (z->num_bits <= 24); +} + +stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n) +{ + unsigned int k; + if (z->num_bits < n) stbi__fill_bits(z); + k = z->code_buffer & ((1 << n) - 1); + z->code_buffer >>= n; + z->num_bits -= n; + return k; +} + +static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s,k; + // not resolved by fast table, so compute it the slow way + // use jpeg approach, which requires MSbits at top + k = stbi__bit_reverse(a->code_buffer, 16); + for (s=STBI__ZFAST_BITS+1; ; ++s) + if (k < z->maxcode[s]) + break; + if (s >= 16) return -1; // invalid code! + // code size is s, so: + b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s]; + if (b >= STBI__ZNSYMS) return -1; // some data was corrupt somewhere! + if (z->size[b] != s) return -1; // was originally an assert, but report failure instead. + a->code_buffer >>= s; + a->num_bits -= s; + return z->value[b]; +} + +stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s; + if (a->num_bits < 16) { + if (stbi__zeof(a)) { + return -1; /* report error for unexpected end of data. */ + } + stbi__fill_bits(a); + } + b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; + if (b) { + s = b >> 9; + a->code_buffer >>= s; + a->num_bits -= s; + return b & 511; + } + return stbi__zhuffman_decode_slowpath(a, z); +} + +static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes +{ + char *q; + unsigned int cur, limit, old_limit; + z->zout = zout; + if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG"); + cur = (unsigned int) (z->zout - z->zout_start); + limit = old_limit = (unsigned) (z->zout_end - z->zout_start); + if (UINT_MAX - cur < (unsigned) n) return stbi__err("outofmem", "Out of memory"); + while (cur + n > limit) { + if(limit > UINT_MAX / 2) return stbi__err("outofmem", "Out of memory"); + limit *= 2; + } + q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); + STBI_NOTUSED(old_limit); + if (q == NULL) return stbi__err("outofmem", "Out of memory"); + z->zout_start = q; + z->zout = q + cur; + z->zout_end = q + limit; + return 1; +} + +static const int stbi__zlength_base[31] = { + 3,4,5,6,7,8,9,10,11,13, + 15,17,19,23,27,31,35,43,51,59, + 67,83,99,115,131,163,195,227,258,0,0 }; + +static const int stbi__zlength_extra[31]= +{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; + +static const int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, +257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0}; + +static const int stbi__zdist_extra[32] = +{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; + +static int stbi__parse_huffman_block(stbi__zbuf *a) +{ + char *zout = a->zout; + for(;;) { + int z = stbi__zhuffman_decode(a, &a->z_length); + if (z < 256) { + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); // error in huffman codes + if (zout >= a->zout_end) { + if (!stbi__zexpand(a, zout, 1)) return 0; + zout = a->zout; + } + *zout++ = (char) z; + } else { + stbi_uc *p; + int len,dist; + if (z == 256) { + a->zout = zout; + return 1; + } + z -= 257; + len = stbi__zlength_base[z]; + if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); + z = stbi__zhuffman_decode(a, &a->z_distance); + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); + dist = stbi__zdist_base[z]; + if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); + if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG"); + if (zout + len > a->zout_end) { + if (!stbi__zexpand(a, zout, len)) return 0; + zout = a->zout; + } + p = (stbi_uc *) (zout - dist); + if (dist == 1) { // run of one byte; common in images. + stbi_uc v = *p; + if (len) { do *zout++ = v; while (--len); } + } else { + if (len) { do *zout++ = *p++; while (--len); } + } + } + } +} + +static int stbi__compute_huffman_codes(stbi__zbuf *a) +{ + static const stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; + stbi__zhuffman z_codelength; + stbi_uc lencodes[286+32+137];//padding for maximum single op + stbi_uc codelength_sizes[19]; + int i,n; + + int hlit = stbi__zreceive(a,5) + 257; + int hdist = stbi__zreceive(a,5) + 1; + int hclen = stbi__zreceive(a,4) + 4; + int ntot = hlit + hdist; + + memset(codelength_sizes, 0, sizeof(codelength_sizes)); + for (i=0; i < hclen; ++i) { + int s = stbi__zreceive(a,3); + codelength_sizes[length_dezigzag[i]] = (stbi_uc) s; + } + if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0; + + n = 0; + while (n < ntot) { + int c = stbi__zhuffman_decode(a, &z_codelength); + if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG"); + if (c < 16) + lencodes[n++] = (stbi_uc) c; + else { + stbi_uc fill = 0; + if (c == 16) { + c = stbi__zreceive(a,2)+3; + if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG"); + fill = lencodes[n-1]; + } else if (c == 17) { + c = stbi__zreceive(a,3)+3; + } else if (c == 18) { + c = stbi__zreceive(a,7)+11; + } else { + return stbi__err("bad codelengths", "Corrupt PNG"); + } + if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG"); + memset(lencodes+n, fill, c); + n += c; + } + } + if (n != ntot) return stbi__err("bad codelengths","Corrupt PNG"); + if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0; + return 1; +} + +static int stbi__parse_uncompressed_block(stbi__zbuf *a) +{ + stbi_uc header[4]; + int len,nlen,k; + if (a->num_bits & 7) + stbi__zreceive(a, a->num_bits & 7); // discard + // drain the bit-packed data into header + k = 0; + while (a->num_bits > 0) { + header[k++] = (stbi_uc) (a->code_buffer & 255); // suppress MSVC run-time check + a->code_buffer >>= 8; + a->num_bits -= 8; + } + if (a->num_bits < 0) return stbi__err("zlib corrupt","Corrupt PNG"); + // now fill header the normal way + while (k < 4) + header[k++] = stbi__zget8(a); + len = header[1] * 256 + header[0]; + nlen = header[3] * 256 + header[2]; + if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG"); + if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG"); + if (a->zout + len > a->zout_end) + if (!stbi__zexpand(a, a->zout, len)) return 0; + memcpy(a->zout, a->zbuffer, len); + a->zbuffer += len; + a->zout += len; + return 1; +} + +static int stbi__parse_zlib_header(stbi__zbuf *a) +{ + int cmf = stbi__zget8(a); + int cm = cmf & 15; + /* int cinfo = cmf >> 4; */ + int flg = stbi__zget8(a); + if (stbi__zeof(a)) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png + if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png + // window = 1 << (8 + cinfo)... but who cares, we fully buffer output + return 1; +} + +static const stbi_uc stbi__zdefault_length[STBI__ZNSYMS] = +{ + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8 +}; +static const stbi_uc stbi__zdefault_distance[32] = +{ + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5 +}; +/* +Init algorithm: +{ + int i; // use <= to match clearly with spec + for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8; + for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9; + for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7; + for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8; + + for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5; +} +*/ + +static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) +{ + int final, type; + if (parse_header) + if (!stbi__parse_zlib_header(a)) return 0; + a->num_bits = 0; + a->code_buffer = 0; + do { + final = stbi__zreceive(a,1); + type = stbi__zreceive(a,2); + if (type == 0) { + if (!stbi__parse_uncompressed_block(a)) return 0; + } else if (type == 3) { + return 0; + } else { + if (type == 1) { + // use fixed code lengths + if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , STBI__ZNSYMS)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; + } else { + if (!stbi__compute_huffman_codes(a)) return 0; + } + if (!stbi__parse_huffman_block(a)) return 0; + } + } while (!final); + return 1; +} + +static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header) +{ + a->zout_start = obuf; + a->zout = obuf; + a->zout_end = obuf + olen; + a->z_expandable = exp; + + return stbi__parse_zlib(a, parse_header); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, 1)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen) +{ + return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) + return (int) (a.zout - a.zout_start); + else + return -1; +} + +STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(16384); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer+len; + if (stbi__do_zlib(&a, p, 16384, 1, 0)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) + return (int) (a.zout - a.zout_start); + else + return -1; +} +#endif + +// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 +// simple implementation +// - only 8-bit samples +// - no CRC checking +// - allocates lots of intermediate memory +// - avoids problem of streaming data between subsystems +// - avoids explicit window management +// performance +// - uses stb_zlib, a PD zlib implementation with fast huffman decoding + +#ifndef STBI_NO_PNG +typedef struct +{ + stbi__uint32 length; + stbi__uint32 type; +} stbi__pngchunk; + +static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) +{ + stbi__pngchunk c; + c.length = stbi__get32be(s); + c.type = stbi__get32be(s); + return c; +} + +static int stbi__check_png_header(stbi__context *s) +{ + static const stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; + int i; + for (i=0; i < 8; ++i) + if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG"); + return 1; +} + +typedef struct +{ + stbi__context *s; + stbi_uc *idata, *expanded, *out; + int depth; +} stbi__png; + + +enum { + STBI__F_none=0, + STBI__F_sub=1, + STBI__F_up=2, + STBI__F_avg=3, + STBI__F_paeth=4, + // synthetic filters used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static stbi_uc first_row_filter[5] = +{ + STBI__F_none, + STBI__F_sub, + STBI__F_none, + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static int stbi__paeth(int a, int b, int c) +{ + int p = a + b - c; + int pa = abs(p-a); + int pb = abs(p-b); + int pc = abs(p-c); + if (pa <= pb && pa <= pc) return a; + if (pb <= pc) return b; + return c; +} + +static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; + +// create the png data from post-deflated data +static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) +{ + int bytes = (depth == 16? 2 : 1); + stbi__context *s = a->s; + stbi__uint32 i,j,stride = x*out_n*bytes; + stbi__uint32 img_len, img_width_bytes; + int k; + int img_n = s->img_n; // copy it into a local for later + + int output_bytes = out_n*bytes; + int filter_bytes = img_n*bytes; + int width = x; + + STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1); + a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into + if (!a->out) return stbi__err("outofmem", "Out of memory"); + + if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG"); + img_width_bytes = (((img_n * x * depth) + 7) >> 3); + img_len = (img_width_bytes + 1) * y; + + // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, + // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros), + // so just check for raw_len < img_len always. + if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); + + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *prior; + int filter = *raw++; + + if (filter > 4) + return stbi__err("invalid filter","Corrupt PNG"); + + if (depth < 8) { + if (img_width_bytes > x) return stbi__err("invalid width","Corrupt PNG"); + cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place + filter_bytes = 1; + width = img_width_bytes; + } + prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above + + // if first row, use special filter that doesn't sample previous row + if (j == 0) filter = first_row_filter[filter]; + + // handle first byte explicitly + for (k=0; k < filter_bytes; ++k) { + switch (filter) { + case STBI__F_none : cur[k] = raw[k]; break; + case STBI__F_sub : cur[k] = raw[k]; break; + case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; + case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break; + case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break; + case STBI__F_avg_first : cur[k] = raw[k]; break; + case STBI__F_paeth_first: cur[k] = raw[k]; break; + } + } + + if (depth == 8) { + if (img_n != out_n) + cur[img_n] = 255; // first pixel + raw += img_n; + cur += out_n; + prior += out_n; + } else if (depth == 16) { + if (img_n != out_n) { + cur[filter_bytes] = 255; // first pixel top byte + cur[filter_bytes+1] = 255; // first pixel bottom byte + } + raw += filter_bytes; + cur += output_bytes; + prior += output_bytes; + } else { + raw += 1; + cur += 1; + prior += 1; + } + + // this is a little gross, so that we don't switch per-pixel or per-component + if (depth < 8 || img_n == out_n) { + int nk = (width - 1)*filter_bytes; + #define STBI__CASE(f) \ + case f: \ + for (k=0; k < nk; ++k) + switch (filter) { + // "none" filter turns into a memcpy here; make that explicit. + case STBI__F_none: memcpy(cur, raw, nk); break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break; + } + #undef STBI__CASE + raw += nk; + } else { + STBI_ASSERT(img_n+1 == out_n); + #define STBI__CASE(f) \ + case f: \ + for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \ + for (k=0; k < filter_bytes; ++k) + switch (filter) { + STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break; + } + #undef STBI__CASE + + // the loop above sets the high byte of the pixels' alpha, but for + // 16 bit png files we also need the low byte set. we'll do that here. + if (depth == 16) { + cur = a->out + stride*j; // start at the beginning of the row again + for (i=0; i < x; ++i,cur+=output_bytes) { + cur[filter_bytes+1] = 255; + } + } + } + } + + // we make a separate pass to expand bits to pixels; for performance, + // this could run two scanlines behind the above code, so it won't + // intefere with filtering but will still be in the cache. + if (depth < 8) { + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes; + // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit + // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop + stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + + // note that the final byte might overshoot and write more data than desired. + // we can allocate enough data that this never writes out of memory, but it + // could also overwrite the next scanline. can it overwrite non-empty data + // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. + // so we need to explicitly clamp the final ones + + if (depth == 4) { + for (k=x*img_n; k >= 2; k-=2, ++in) { + *cur++ = scale * ((*in >> 4) ); + *cur++ = scale * ((*in ) & 0x0f); + } + if (k > 0) *cur++ = scale * ((*in >> 4) ); + } else if (depth == 2) { + for (k=x*img_n; k >= 4; k-=4, ++in) { + *cur++ = scale * ((*in >> 6) ); + *cur++ = scale * ((*in >> 4) & 0x03); + *cur++ = scale * ((*in >> 2) & 0x03); + *cur++ = scale * ((*in ) & 0x03); + } + if (k > 0) *cur++ = scale * ((*in >> 6) ); + if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03); + if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03); + } else if (depth == 1) { + for (k=x*img_n; k >= 8; k-=8, ++in) { + *cur++ = scale * ((*in >> 7) ); + *cur++ = scale * ((*in >> 6) & 0x01); + *cur++ = scale * ((*in >> 5) & 0x01); + *cur++ = scale * ((*in >> 4) & 0x01); + *cur++ = scale * ((*in >> 3) & 0x01); + *cur++ = scale * ((*in >> 2) & 0x01); + *cur++ = scale * ((*in >> 1) & 0x01); + *cur++ = scale * ((*in ) & 0x01); + } + if (k > 0) *cur++ = scale * ((*in >> 7) ); + if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01); + if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01); + if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01); + if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01); + if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01); + if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01); + } + if (img_n != out_n) { + int q; + // insert alpha = 255 + cur = a->out + stride*j; + if (img_n == 1) { + for (q=x-1; q >= 0; --q) { + cur[q*2+1] = 255; + cur[q*2+0] = cur[q]; + } + } else { + STBI_ASSERT(img_n == 3); + for (q=x-1; q >= 0; --q) { + cur[q*4+3] = 255; + cur[q*4+2] = cur[q*3+2]; + cur[q*4+1] = cur[q*3+1]; + cur[q*4+0] = cur[q*3+0]; + } + } + } + } + } else if (depth == 16) { + // force the image data from big-endian to platform-native. + // this is done in a separate pass due to the decoding relying + // on the data being untouched, but could probably be done + // per-line during decode if care is taken. + stbi_uc *cur = a->out; + stbi__uint16 *cur16 = (stbi__uint16*)cur; + + for(i=0; i < x*y*out_n; ++i,cur16++,cur+=2) { + *cur16 = (cur[0] << 8) | cur[1]; + } + } + + return 1; +} + +static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) +{ + int bytes = (depth == 16 ? 2 : 1); + int out_bytes = out_n * bytes; + stbi_uc *final; + int p; + if (!interlaced) + return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); + + // de-interlacing + final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); + if (!final) return stbi__err("outofmem", "Out of memory"); + for (p=0; p < 7; ++p) { + int xorig[] = { 0,4,0,2,0,1,0 }; + int yorig[] = { 0,0,4,0,2,0,1 }; + int xspc[] = { 8,8,4,4,2,2,1 }; + int yspc[] = { 8,8,8,4,4,2,2 }; + int i,j,x,y; + // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 + x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p]; + y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p]; + if (x && y) { + stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; + if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { + STBI_FREE(final); + return 0; + } + for (j=0; j < y; ++j) { + for (i=0; i < x; ++i) { + int out_y = j*yspc[p]+yorig[p]; + int out_x = i*xspc[p]+xorig[p]; + memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes, + a->out + (j*x+i)*out_bytes, out_bytes); + } + } + STBI_FREE(a->out); + image_data += img_len; + image_data_len -= img_len; + } + } + a->out = final; + + return 1; +} + +static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + // compute color-based transparency, assuming we've + // already got 255 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i=0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 255); + p += 2; + } + } else { + for (i=0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi__uint16 *p = (stbi__uint16*) z->out; + + // compute color-based transparency, assuming we've + // already got 65535 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i = 0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 65535); + p += 2; + } + } else { + for (i = 0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) +{ + stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; + stbi_uc *p, *temp_out, *orig = a->out; + + p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0); + if (p == NULL) return stbi__err("outofmem", "Out of memory"); + + // between here and free(out) below, exitting would leak + temp_out = p; + + if (pal_img_n == 3) { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p += 3; + } + } else { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p[3] = palette[n+3]; + p += 4; + } + } + STBI_FREE(a->out); + a->out = temp_out; + + STBI_NOTUSED(len); + + return 1; +} + +static int stbi__unpremultiply_on_load_global = 0; +static int stbi__de_iphone_flag_global = 0; + +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load_global = flag_true_if_should_unpremultiply; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag_global = flag_true_if_should_convert; +} + +#ifndef STBI_THREAD_LOCAL +#define stbi__unpremultiply_on_load stbi__unpremultiply_on_load_global +#define stbi__de_iphone_flag stbi__de_iphone_flag_global +#else +static STBI_THREAD_LOCAL int stbi__unpremultiply_on_load_local, stbi__unpremultiply_on_load_set; +static STBI_THREAD_LOCAL int stbi__de_iphone_flag_local, stbi__de_iphone_flag_set; + +STBIDEF void stbi__unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load_local = flag_true_if_should_unpremultiply; + stbi__unpremultiply_on_load_set = 1; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag_local = flag_true_if_should_convert; + stbi__de_iphone_flag_set = 1; +} + +#define stbi__unpremultiply_on_load (stbi__unpremultiply_on_load_set \ + ? stbi__unpremultiply_on_load_local \ + : stbi__unpremultiply_on_load_global) +#define stbi__de_iphone_flag (stbi__de_iphone_flag_set \ + ? stbi__de_iphone_flag_local \ + : stbi__de_iphone_flag_global) +#endif // STBI_THREAD_LOCAL + +static void stbi__de_iphone(stbi__png *z) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + if (s->img_out_n == 3) { // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 3; + } + } else { + STBI_ASSERT(s->img_out_n == 4); + if (stbi__unpremultiply_on_load) { + // convert bgr to rgb and unpremultiply + for (i=0; i < pixel_count; ++i) { + stbi_uc a = p[3]; + stbi_uc t = p[0]; + if (a) { + stbi_uc half = a / 2; + p[0] = (p[2] * 255 + half) / a; + p[1] = (p[1] * 255 + half) / a; + p[2] = ( t * 255 + half) / a; + } else { + p[0] = p[2]; + p[2] = t; + } + p += 4; + } + } else { + // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 4; + } + } + } +} + +#define STBI__PNG_TYPE(a,b,c,d) (((unsigned) (a) << 24) + ((unsigned) (b) << 16) + ((unsigned) (c) << 8) + (unsigned) (d)) + +static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) +{ + stbi_uc palette[1024], pal_img_n=0; + stbi_uc has_trans=0, tc[3]={0}; + stbi__uint16 tc16[3]; + stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0; + int first=1,k,interlace=0, color=0, is_iphone=0; + stbi__context *s = z->s; + + z->expanded = NULL; + z->idata = NULL; + z->out = NULL; + + if (!stbi__check_png_header(s)) return 0; + + if (scan == STBI__SCAN_type) return 1; + + for (;;) { + stbi__pngchunk c = stbi__get_chunk_header(s); + switch (c.type) { + case STBI__PNG_TYPE('C','g','B','I'): + is_iphone = 1; + stbi__skip(s, c.length); + break; + case STBI__PNG_TYPE('I','H','D','R'): { + int comp,filter; + if (!first) return stbi__err("multiple IHDR","Corrupt PNG"); + first = 0; + if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG"); + s->img_x = stbi__get32be(s); + s->img_y = stbi__get32be(s); + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only"); + color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG"); + comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG"); + filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG"); + interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG"); + if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG"); + if (!pal_img_n) { + s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); + if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); + if (scan == STBI__SCAN_header) return 1; + } else { + // if paletted, then pal_n is our final components, and + // img_n is # components to decompress/filter. + s->img_n = 1; + if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG"); + // if SCAN_header, have to scan to see if we have a tRNS + } + break; + } + + case STBI__PNG_TYPE('P','L','T','E'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG"); + pal_len = c.length / 3; + if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG"); + for (i=0; i < pal_len; ++i) { + palette[i*4+0] = stbi__get8(s); + palette[i*4+1] = stbi__get8(s); + palette[i*4+2] = stbi__get8(s); + palette[i*4+3] = 255; + } + break; + } + + case STBI__PNG_TYPE('t','R','N','S'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG"); + if (pal_img_n) { + if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; } + if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG"); + if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG"); + pal_img_n = 4; + for (i=0; i < c.length; ++i) + palette[i*4+3] = stbi__get8(s); + } else { + if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG"); + if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG"); + has_trans = 1; + if (z->depth == 16) { + for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is + } else { + for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger + } + } + break; + } + + case STBI__PNG_TYPE('I','D','A','T'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG"); + if (scan == STBI__SCAN_header) { s->img_n = pal_img_n; return 1; } + if ((int)(ioff + c.length) < (int)ioff) return 0; + if (ioff + c.length > idata_limit) { + stbi__uint32 idata_limit_old = idata_limit; + stbi_uc *p; + if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; + while (ioff + c.length > idata_limit) + idata_limit *= 2; + STBI_NOTUSED(idata_limit_old); + p = (stbi_uc *) STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory"); + z->idata = p; + } + if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG"); + ioff += c.length; + break; + } + + case STBI__PNG_TYPE('I','E','N','D'): { + stbi__uint32 raw_len, bpl; + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (scan != STBI__SCAN_load) return 1; + if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG"); + // initial guess for decoded data size to avoid unnecessary reallocs + bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component + raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; + z->expanded = (stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone); + if (z->expanded == NULL) return 0; // zlib should set error + STBI_FREE(z->idata); z->idata = NULL; + if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans) + s->img_out_n = s->img_n+1; + else + s->img_out_n = s->img_n; + if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0; + if (has_trans) { + if (z->depth == 16) { + if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0; + } else { + if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0; + } + } + if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) + stbi__de_iphone(z); + if (pal_img_n) { + // pal_img_n == 3 or 4 + s->img_n = pal_img_n; // record the actual colors we had + s->img_out_n = pal_img_n; + if (req_comp >= 3) s->img_out_n = req_comp; + if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) + return 0; + } else if (has_trans) { + // non-paletted image with tRNS -> source image has (constant) alpha + ++s->img_n; + } + STBI_FREE(z->expanded); z->expanded = NULL; + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + return 1; + } + + default: + // if critical, fail + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if ((c.type & (1 << 29)) == 0) { + #ifndef STBI_NO_FAILURE_STRINGS + // not threadsafe + static char invalid_chunk[] = "XXXX PNG chunk not known"; + invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); + invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); + invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); + invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); + #endif + return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); + } + stbi__skip(s, c.length); + break; + } + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + } +} + +static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) +{ + void *result=NULL; + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { + if (p->depth <= 8) + ri->bits_per_channel = 8; + else if (p->depth == 16) + ri->bits_per_channel = 16; + else + return stbi__errpuc("bad bits_per_channel", "PNG not supported: unsupported color depth"); + result = p->out; + p->out = NULL; + if (req_comp && req_comp != p->s->img_out_n) { + if (ri->bits_per_channel == 8) + result = stbi__convert_format((unsigned char *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + else + result = stbi__convert_format16((stbi__uint16 *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + p->s->img_out_n = req_comp; + if (result == NULL) return result; + } + *x = p->s->img_x; + *y = p->s->img_y; + if (n) *n = p->s->img_n; + } + STBI_FREE(p->out); p->out = NULL; + STBI_FREE(p->expanded); p->expanded = NULL; + STBI_FREE(p->idata); p->idata = NULL; + + return result; +} + +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi__png p; + p.s = s; + return stbi__do_png(&p, x,y,comp,req_comp, ri); +} + +static int stbi__png_test(stbi__context *s) +{ + int r; + r = stbi__check_png_header(s); + stbi__rewind(s); + return r; +} + +static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) +{ + if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { + stbi__rewind( p->s ); + return 0; + } + if (x) *x = p->s->img_x; + if (y) *y = p->s->img_y; + if (comp) *comp = p->s->img_n; + return 1; +} + +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__png p; + p.s = s; + return stbi__png_info_raw(&p, x, y, comp); +} + +static int stbi__png_is16(stbi__context *s) +{ + stbi__png p; + p.s = s; + if (!stbi__png_info_raw(&p, NULL, NULL, NULL)) + return 0; + if (p.depth != 16) { + stbi__rewind(p.s); + return 0; + } + return 1; +} +#endif + +// Microsoft/Windows BMP image + +#ifndef STBI_NO_BMP +static int stbi__bmp_test_raw(stbi__context *s) +{ + int r; + int sz; + if (stbi__get8(s) != 'B') return 0; + if (stbi__get8(s) != 'M') return 0; + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + stbi__get32le(s); // discard data offset + sz = stbi__get32le(s); + r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); + return r; +} + +static int stbi__bmp_test(stbi__context *s) +{ + int r = stbi__bmp_test_raw(s); + stbi__rewind(s); + return r; +} + + +// returns 0..31 for the highest set bit +static int stbi__high_bit(unsigned int z) +{ + int n=0; + if (z == 0) return -1; + if (z >= 0x10000) { n += 16; z >>= 16; } + if (z >= 0x00100) { n += 8; z >>= 8; } + if (z >= 0x00010) { n += 4; z >>= 4; } + if (z >= 0x00004) { n += 2; z >>= 2; } + if (z >= 0x00002) { n += 1;/* >>= 1;*/ } + return n; +} + +static int stbi__bitcount(unsigned int a) +{ + a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 + a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits + a = (a + (a >> 8)); // max 16 per 8 bits + a = (a + (a >> 16)); // max 32 per 8 bits + return a & 0xff; +} + +// extract an arbitrarily-aligned N-bit value (N=bits) +// from v, and then make it 8-bits long and fractionally +// extend it to full full range. +static int stbi__shiftsigned(unsigned int v, int shift, int bits) +{ + static unsigned int mul_table[9] = { + 0, + 0xff/*0b11111111*/, 0x55/*0b01010101*/, 0x49/*0b01001001*/, 0x11/*0b00010001*/, + 0x21/*0b00100001*/, 0x41/*0b01000001*/, 0x81/*0b10000001*/, 0x01/*0b00000001*/, + }; + static unsigned int shift_table[9] = { + 0, 0,0,1,0,2,4,6,0, + }; + if (shift < 0) + v <<= -shift; + else + v >>= shift; + STBI_ASSERT(v < 256); + v >>= (8-bits); + STBI_ASSERT(bits >= 0 && bits <= 8); + return (int) ((unsigned) v * mul_table[bits]) >> shift_table[bits]; +} + +typedef struct +{ + int bpp, offset, hsz; + unsigned int mr,mg,mb,ma, all_a; + int extra_read; +} stbi__bmp_data; + +static int stbi__bmp_set_mask_defaults(stbi__bmp_data *info, int compress) +{ + // BI_BITFIELDS specifies masks explicitly, don't override + if (compress == 3) + return 1; + + if (compress == 0) { + if (info->bpp == 16) { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } else if (info->bpp == 32) { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } else { + // otherwise, use defaults, which is all-0 + info->mr = info->mg = info->mb = info->ma = 0; + } + return 1; + } + return 0; // error +} + +static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) +{ + int hsz; + if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP"); + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + info->offset = stbi__get32le(s); + info->hsz = hsz = stbi__get32le(s); + info->mr = info->mg = info->mb = info->ma = 0; + info->extra_read = 14; + + if (info->offset < 0) return stbi__errpuc("bad BMP", "bad BMP"); + + if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); + if (hsz == 12) { + s->img_x = stbi__get16le(s); + s->img_y = stbi__get16le(s); + } else { + s->img_x = stbi__get32le(s); + s->img_y = stbi__get32le(s); + } + if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP"); + info->bpp = stbi__get16le(s); + if (hsz != 12) { + int compress = stbi__get32le(s); + if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + if (compress >= 4) return stbi__errpuc("BMP JPEG/PNG", "BMP type not supported: unsupported compression"); // this includes PNG/JPEG modes + if (compress == 3 && info->bpp != 16 && info->bpp != 32) return stbi__errpuc("bad BMP", "bad BMP"); // bitfields requires 16 or 32 bits/pixel + stbi__get32le(s); // discard sizeof + stbi__get32le(s); // discard hres + stbi__get32le(s); // discard vres + stbi__get32le(s); // discard colorsused + stbi__get32le(s); // discard max important + if (hsz == 40 || hsz == 56) { + if (hsz == 56) { + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + } + if (info->bpp == 16 || info->bpp == 32) { + if (compress == 0) { + stbi__bmp_set_mask_defaults(info, compress); + } else if (compress == 3) { + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->extra_read += 12; + // not documented, but generated by photoshop and handled by mspaint + if (info->mr == info->mg && info->mg == info->mb) { + // ?!?!? + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else { + // V4/V5 header + int i; + if (hsz != 108 && hsz != 124) + return stbi__errpuc("bad BMP", "bad BMP"); + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->ma = stbi__get32le(s); + if (compress != 3) // override mr/mg/mb unless in BI_BITFIELDS mode, as per docs + stbi__bmp_set_mask_defaults(info, compress); + stbi__get32le(s); // discard color space + for (i=0; i < 12; ++i) + stbi__get32le(s); // discard color space parameters + if (hsz == 124) { + stbi__get32le(s); // discard rendering intent + stbi__get32le(s); // discard offset of profile data + stbi__get32le(s); // discard size of profile data + stbi__get32le(s); // discard reserved + } + } + } + return (void *) 1; +} + + +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + unsigned int mr=0,mg=0,mb=0,ma=0, all_a; + stbi_uc pal[256][4]; + int psize=0,i,j,width; + int flip_vertically, pad, target; + stbi__bmp_data info; + STBI_NOTUSED(ri); + + info.all_a = 255; + if (stbi__bmp_parse_header(s, &info) == NULL) + return NULL; // error code already set + + flip_vertically = ((int) s->img_y) > 0; + s->img_y = abs((int) s->img_y); + + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + mr = info.mr; + mg = info.mg; + mb = info.mb; + ma = info.ma; + all_a = info.all_a; + + if (info.hsz == 12) { + if (info.bpp < 24) + psize = (info.offset - info.extra_read - 24) / 3; + } else { + if (info.bpp < 16) + psize = (info.offset - info.extra_read - info.hsz) >> 2; + } + if (psize == 0) { + if (info.offset != s->callback_already_read + (s->img_buffer - s->img_buffer_original)) { + return stbi__errpuc("bad offset", "Corrupt BMP"); + } + } + + if (info.bpp == 24 && ma == 0xff000000) + s->img_n = 3; + else + s->img_n = ma ? 4 : 3; + if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 + target = req_comp; + else + target = s->img_n; // if they want monochrome, we'll post-convert + + // sanity-check size + if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "Corrupt BMP"); + + out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (info.bpp < 16) { + int z=0; + if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); } + for (i=0; i < psize; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + if (info.hsz != 12) stbi__get8(s); + pal[i][3] = 255; + } + stbi__skip(s, info.offset - info.extra_read - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); + if (info.bpp == 1) width = (s->img_x + 7) >> 3; + else if (info.bpp == 4) width = (s->img_x + 1) >> 1; + else if (info.bpp == 8) width = s->img_x; + else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } + pad = (-width)&3; + if (info.bpp == 1) { + for (j=0; j < (int) s->img_y; ++j) { + int bit_offset = 7, v = stbi__get8(s); + for (i=0; i < (int) s->img_x; ++i) { + int color = (v>>bit_offset)&0x1; + out[z++] = pal[color][0]; + out[z++] = pal[color][1]; + out[z++] = pal[color][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + if((--bit_offset) < 0) { + bit_offset = 7; + v = stbi__get8(s); + } + } + stbi__skip(s, pad); + } + } else { + for (j=0; j < (int) s->img_y; ++j) { + for (i=0; i < (int) s->img_x; i += 2) { + int v=stbi__get8(s),v2=0; + if (info.bpp == 4) { + v2 = v & 15; + v >>= 4; + } + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + v = (info.bpp == 8) ? stbi__get8(s) : v2; + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + } + stbi__skip(s, pad); + } + } + } else { + int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0; + int z = 0; + int easy=0; + stbi__skip(s, info.offset - info.extra_read - info.hsz); + if (info.bpp == 24) width = 3 * s->img_x; + else if (info.bpp == 16) width = 2*s->img_x; + else /* bpp = 32 and pad = 0 */ width=0; + pad = (-width) & 3; + if (info.bpp == 24) { + easy = 1; + } else if (info.bpp == 32) { + if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) + easy = 2; + } + if (!easy) { + if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + // right shift amt to put high bit in position #7 + rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr); + gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg); + bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb); + ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma); + if (rcount > 8 || gcount > 8 || bcount > 8 || acount > 8) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + } + for (j=0; j < (int) s->img_y; ++j) { + if (easy) { + for (i=0; i < (int) s->img_x; ++i) { + unsigned char a; + out[z+2] = stbi__get8(s); + out[z+1] = stbi__get8(s); + out[z+0] = stbi__get8(s); + z += 3; + a = (easy == 2 ? stbi__get8(s) : 255); + all_a |= a; + if (target == 4) out[z++] = a; + } + } else { + int bpp = info.bpp; + for (i=0; i < (int) s->img_x; ++i) { + stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s)); + unsigned int a; + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); + a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); + all_a |= a; + if (target == 4) out[z++] = STBI__BYTECAST(a); + } + } + stbi__skip(s, pad); + } + } + + // if alpha channel is all 0s, replace with all 255s + if (target == 4 && all_a == 0) + for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4) + out[i] = 255; + + if (flip_vertically) { + stbi_uc t; + for (j=0; j < (int) s->img_y>>1; ++j) { + stbi_uc *p1 = out + j *s->img_x*target; + stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target; + for (i=0; i < (int) s->img_x*target; ++i) { + t = p1[i]; p1[i] = p2[i]; p2[i] = t; + } + } + } + + if (req_comp && req_comp != target) { + out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + return out; +} +#endif + +// Targa Truevision - TGA +// by Jonathan Dummer +#ifndef STBI_NO_TGA +// returns STBI_rgb or whatever, 0 on error +static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) +{ + // only RGB or RGBA (incl. 16bit) or grey allowed + if (is_rgb16) *is_rgb16 = 0; + switch(bits_per_pixel) { + case 8: return STBI_grey; + case 16: if(is_grey) return STBI_grey_alpha; + // fallthrough + case 15: if(is_rgb16) *is_rgb16 = 1; + return STBI_rgb; + case 24: // fallthrough + case 32: return bits_per_pixel/8; + default: return 0; + } +} + +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp) +{ + int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; + int sz, tga_colormap_type; + stbi__get8(s); // discard Offset + tga_colormap_type = stbi__get8(s); // colormap type + if( tga_colormap_type > 1 ) { + stbi__rewind(s); + return 0; // only RGB or indexed allowed + } + tga_image_type = stbi__get8(s); // image type + if ( tga_colormap_type == 1 ) { // colormapped (paletted) image + if (tga_image_type != 1 && tga_image_type != 9) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip image x and y origin + tga_colormap_bpp = sz; + } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE + if ( (tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11) ) { + stbi__rewind(s); + return 0; // only RGB or grey allowed, +/- RLE + } + stbi__skip(s,9); // skip colormap specification and image x/y origin + tga_colormap_bpp = 0; + } + tga_w = stbi__get16le(s); + if( tga_w < 1 ) { + stbi__rewind(s); + return 0; // test width + } + tga_h = stbi__get16le(s); + if( tga_h < 1 ) { + stbi__rewind(s); + return 0; // test height + } + tga_bits_per_pixel = stbi__get8(s); // bits per pixel + stbi__get8(s); // ignore alpha bits + if (tga_colormap_bpp != 0) { + if((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) { + // when using a colormap, tga_bits_per_pixel is the size of the indexes + // I don't think anything but 8 or 16bit indexes makes sense + stbi__rewind(s); + return 0; + } + tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); + } else { + tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); + } + if(!tga_comp) { + stbi__rewind(s); + return 0; + } + if (x) *x = tga_w; + if (y) *y = tga_h; + if (comp) *comp = tga_comp; + return 1; // seems to have passed everything +} + +static int stbi__tga_test(stbi__context *s) +{ + int res = 0; + int sz, tga_color_type; + stbi__get8(s); // discard Offset + tga_color_type = stbi__get8(s); // color type + if ( tga_color_type > 1 ) goto errorEnd; // only RGB or indexed allowed + sz = stbi__get8(s); // image type + if ( tga_color_type == 1 ) { // colormapped (paletted) image + if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9 + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + stbi__skip(s,4); // skip image x and y origin + } else { // "normal" image w/o colormap + if ( (sz != 2) && (sz != 3) && (sz != 10) && (sz != 11) ) goto errorEnd; // only RGB or grey allowed, +/- RLE + stbi__skip(s,9); // skip colormap specification and image x/y origin + } + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test width + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test height + sz = stbi__get8(s); // bits per pixel + if ( (tga_color_type == 1) && (sz != 8) && (sz != 16) ) goto errorEnd; // for colormapped images, bpp is size of an index + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + + res = 1; // if we got this far, everything's good and we can return 1 instead of 0 + +errorEnd: + stbi__rewind(s); + return res; +} + +// read 16bit value and convert to 24bit RGB +static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) +{ + stbi__uint16 px = (stbi__uint16)stbi__get16le(s); + stbi__uint16 fiveBitMask = 31; + // we have 3 channels with 5bits each + int r = (px >> 10) & fiveBitMask; + int g = (px >> 5) & fiveBitMask; + int b = px & fiveBitMask; + // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later + out[0] = (stbi_uc)((r * 255)/31); + out[1] = (stbi_uc)((g * 255)/31); + out[2] = (stbi_uc)((b * 255)/31); + + // some people claim that the most significant bit might be used for alpha + // (possibly if an alpha-bit is set in the "image descriptor byte") + // but that only made 16bit test images completely translucent.. + // so let's treat all 15 and 16bit TGAs as RGB with no alpha. +} + +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + // read in the TGA header stuff + int tga_offset = stbi__get8(s); + int tga_indexed = stbi__get8(s); + int tga_image_type = stbi__get8(s); + int tga_is_RLE = 0; + int tga_palette_start = stbi__get16le(s); + int tga_palette_len = stbi__get16le(s); + int tga_palette_bits = stbi__get8(s); + int tga_x_origin = stbi__get16le(s); + int tga_y_origin = stbi__get16le(s); + int tga_width = stbi__get16le(s); + int tga_height = stbi__get16le(s); + int tga_bits_per_pixel = stbi__get8(s); + int tga_comp, tga_rgb16=0; + int tga_inverted = stbi__get8(s); + // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) + // image data + unsigned char *tga_data; + unsigned char *tga_palette = NULL; + int i, j; + unsigned char raw_data[4] = {0}; + int RLE_count = 0; + int RLE_repeating = 0; + int read_next_pixel = 1; + STBI_NOTUSED(ri); + STBI_NOTUSED(tga_x_origin); // @TODO + STBI_NOTUSED(tga_y_origin); // @TODO + + if (tga_height > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (tga_width > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + // do a tiny bit of precessing + if ( tga_image_type >= 8 ) + { + tga_image_type -= 8; + tga_is_RLE = 1; + } + tga_inverted = 1 - ((tga_inverted >> 5) & 1); + + // If I'm paletted, then I'll use the number of bits from the palette + if ( tga_indexed ) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); + else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); + + if(!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency + return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); + + // tga info + *x = tga_width; + *y = tga_height; + if (comp) *comp = tga_comp; + + if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) + return stbi__errpuc("too large", "Corrupt TGA"); + + tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); + if (!tga_data) return stbi__errpuc("outofmem", "Out of memory"); + + // skip to the data's starting position (offset usually = 0) + stbi__skip(s, tga_offset ); + + if ( !tga_indexed && !tga_is_RLE && !tga_rgb16 ) { + for (i=0; i < tga_height; ++i) { + int row = tga_inverted ? tga_height -i - 1 : i; + stbi_uc *tga_row = tga_data + row*tga_width*tga_comp; + stbi__getn(s, tga_row, tga_width * tga_comp); + } + } else { + // do I need to load a palette? + if ( tga_indexed) + { + if (tga_palette_len == 0) { /* you have to have at least one entry! */ + STBI_FREE(tga_data); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + + // any data to skip? (offset usually = 0) + stbi__skip(s, tga_palette_start ); + // load the palette + tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); + if (!tga_palette) { + STBI_FREE(tga_data); + return stbi__errpuc("outofmem", "Out of memory"); + } + if (tga_rgb16) { + stbi_uc *pal_entry = tga_palette; + STBI_ASSERT(tga_comp == STBI_rgb); + for (i=0; i < tga_palette_len; ++i) { + stbi__tga_read_rgb16(s, pal_entry); + pal_entry += tga_comp; + } + } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) { + STBI_FREE(tga_data); + STBI_FREE(tga_palette); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + } + // load the data + for (i=0; i < tga_width * tga_height; ++i) + { + // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? + if ( tga_is_RLE ) + { + if ( RLE_count == 0 ) + { + // yep, get the next byte as a RLE command + int RLE_cmd = stbi__get8(s); + RLE_count = 1 + (RLE_cmd & 127); + RLE_repeating = RLE_cmd >> 7; + read_next_pixel = 1; + } else if ( !RLE_repeating ) + { + read_next_pixel = 1; + } + } else + { + read_next_pixel = 1; + } + // OK, if I need to read a pixel, do it now + if ( read_next_pixel ) + { + // load however much data we did have + if ( tga_indexed ) + { + // read in index, then perform the lookup + int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); + if ( pal_idx >= tga_palette_len ) { + // invalid index + pal_idx = 0; + } + pal_idx *= tga_comp; + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = tga_palette[pal_idx+j]; + } + } else if(tga_rgb16) { + STBI_ASSERT(tga_comp == STBI_rgb); + stbi__tga_read_rgb16(s, raw_data); + } else { + // read in the data raw + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = stbi__get8(s); + } + } + // clear the reading flag for the next pixel + read_next_pixel = 0; + } // end of reading a pixel + + // copy data + for (j = 0; j < tga_comp; ++j) + tga_data[i*tga_comp+j] = raw_data[j]; + + // in case we're in RLE mode, keep counting down + --RLE_count; + } + // do I need to invert the image? + if ( tga_inverted ) + { + for (j = 0; j*2 < tga_height; ++j) + { + int index1 = j * tga_width * tga_comp; + int index2 = (tga_height - 1 - j) * tga_width * tga_comp; + for (i = tga_width * tga_comp; i > 0; --i) + { + unsigned char temp = tga_data[index1]; + tga_data[index1] = tga_data[index2]; + tga_data[index2] = temp; + ++index1; + ++index2; + } + } + } + // clear my palette, if I had one + if ( tga_palette != NULL ) + { + STBI_FREE( tga_palette ); + } + } + + // swap RGB - if the source data was RGB16, it already is in the right order + if (tga_comp >= 3 && !tga_rgb16) + { + unsigned char* tga_pixel = tga_data; + for (i=0; i < tga_width * tga_height; ++i) + { + unsigned char temp = tga_pixel[0]; + tga_pixel[0] = tga_pixel[2]; + tga_pixel[2] = temp; + tga_pixel += tga_comp; + } + } + + // convert to target component count + if (req_comp && req_comp != tga_comp) + tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); + + // the things I do to get rid of an error message, and yet keep + // Microsoft's C compilers happy... [8^( + tga_palette_start = tga_palette_len = tga_palette_bits = + tga_x_origin = tga_y_origin = 0; + STBI_NOTUSED(tga_palette_start); + // OK, done + return tga_data; +} +#endif + +// ************************************************************************************************* +// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s) +{ + int r = (stbi__get32be(s) == 0x38425053); + stbi__rewind(s); + return r; +} + +static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount) +{ + int count, nleft, len; + + count = 0; + while ((nleft = pixelCount - count) > 0) { + len = stbi__get8(s); + if (len == 128) { + // No-op. + } else if (len < 128) { + // Copy next len+1 bytes literally. + len++; + if (len > nleft) return 0; // corrupt data + count += len; + while (len) { + *p = stbi__get8(s); + p += 4; + len--; + } + } else if (len > 128) { + stbi_uc val; + // Next -len+1 bytes in the dest are replicated from next source byte. + // (Interpret len as a negative 8-bit int.) + len = 257 - len; + if (len > nleft) return 0; // corrupt data + val = stbi__get8(s); + count += len; + while (len) { + *p = val; + p += 4; + len--; + } + } + } + + return 1; +} + +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + int pixelCount; + int channelCount, compression; + int channel, i; + int bitdepth; + int w,h; + stbi_uc *out; + STBI_NOTUSED(ri); + + // Check identifier + if (stbi__get32be(s) != 0x38425053) // "8BPS" + return stbi__errpuc("not PSD", "Corrupt PSD image"); + + // Check file type version. + if (stbi__get16be(s) != 1) + return stbi__errpuc("wrong version", "Unsupported version of PSD image"); + + // Skip 6 reserved bytes. + stbi__skip(s, 6 ); + + // Read the number of channels (R, G, B, A, etc). + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); + + // Read the rows and columns of the image. + h = stbi__get32be(s); + w = stbi__get32be(s); + + if (h > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (w > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + // Make sure the depth is 8 bits. + bitdepth = stbi__get16be(s); + if (bitdepth != 8 && bitdepth != 16) + return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); + + // Make sure the color mode is RGB. + // Valid options are: + // 0: Bitmap + // 1: Grayscale + // 2: Indexed color + // 3: RGB color + // 4: CMYK color + // 7: Multichannel + // 8: Duotone + // 9: Lab color + if (stbi__get16be(s) != 3) + return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); + + // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) + stbi__skip(s,stbi__get32be(s) ); + + // Skip the image resources. (resolution, pen tool paths, etc) + stbi__skip(s, stbi__get32be(s) ); + + // Skip the reserved data. + stbi__skip(s, stbi__get32be(s) ); + + // Find out if the data is compressed. + // Known values: + // 0: no compression + // 1: RLE compressed + compression = stbi__get16be(s); + if (compression > 1) + return stbi__errpuc("bad compression", "PSD has an unknown compression format"); + + // Check size + if (!stbi__mad3sizes_valid(4, w, h, 0)) + return stbi__errpuc("too large", "Corrupt PSD"); + + // Create the destination image. + + if (!compression && bitdepth == 16 && bpc == 16) { + out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0); + ri->bits_per_channel = 16; + } else + out = (stbi_uc *) stbi__malloc(4 * w*h); + + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + pixelCount = w*h; + + // Initialize the data to zero. + //memset( out, 0, pixelCount * 4 ); + + // Finally, the image data. + if (compression) { + // RLE as used by .PSD and .TIFF + // Loop until you get the number of unpacked bytes you are expecting: + // Read the next source byte into n. + // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. + // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. + // Else if n is 128, noop. + // Endloop + + // The RLE-compressed data is preceded by a 2-byte data count for each row in the data, + // which we're going to just skip. + stbi__skip(s, h * channelCount * 2 ); + + // Read the RLE data by channel. + for (channel = 0; channel < 4; channel++) { + stbi_uc *p; + + p = out+channel; + if (channel >= channelCount) { + // Fill this channel with default data. + for (i = 0; i < pixelCount; i++, p += 4) + *p = (channel == 3 ? 255 : 0); + } else { + // Read the RLE data. + if (!stbi__psd_decode_rle(s, p, pixelCount)) { + STBI_FREE(out); + return stbi__errpuc("corrupt", "bad RLE data"); + } + } + } + + } else { + // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) + // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. + + // Read the data by channel. + for (channel = 0; channel < 4; channel++) { + if (channel >= channelCount) { + // Fill this channel with default data. + if (bitdepth == 16 && bpc == 16) { + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + stbi__uint16 val = channel == 3 ? 65535 : 0; + for (i = 0; i < pixelCount; i++, q += 4) + *q = val; + } else { + stbi_uc *p = out+channel; + stbi_uc val = channel == 3 ? 255 : 0; + for (i = 0; i < pixelCount; i++, p += 4) + *p = val; + } + } else { + if (ri->bits_per_channel == 16) { // output bpc + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + for (i = 0; i < pixelCount; i++, q += 4) + *q = (stbi__uint16) stbi__get16be(s); + } else { + stbi_uc *p = out+channel; + if (bitdepth == 16) { // input bpc + for (i = 0; i < pixelCount; i++, p += 4) + *p = (stbi_uc) (stbi__get16be(s) >> 8); + } else { + for (i = 0; i < pixelCount; i++, p += 4) + *p = stbi__get8(s); + } + } + } + } + } + + // remove weird white matte from PSD + if (channelCount >= 4) { + if (ri->bits_per_channel == 16) { + for (i=0; i < w*h; ++i) { + stbi__uint16 *pixel = (stbi__uint16 *) out + 4*i; + if (pixel[3] != 0 && pixel[3] != 65535) { + float a = pixel[3] / 65535.0f; + float ra = 1.0f / a; + float inv_a = 65535.0f * (1 - ra); + pixel[0] = (stbi__uint16) (pixel[0]*ra + inv_a); + pixel[1] = (stbi__uint16) (pixel[1]*ra + inv_a); + pixel[2] = (stbi__uint16) (pixel[2]*ra + inv_a); + } + } + } else { + for (i=0; i < w*h; ++i) { + unsigned char *pixel = out + 4*i; + if (pixel[3] != 0 && pixel[3] != 255) { + float a = pixel[3] / 255.0f; + float ra = 1.0f / a; + float inv_a = 255.0f * (1 - ra); + pixel[0] = (unsigned char) (pixel[0]*ra + inv_a); + pixel[1] = (unsigned char) (pixel[1]*ra + inv_a); + pixel[2] = (unsigned char) (pixel[2]*ra + inv_a); + } + } + } + } + + // convert to desired output format + if (req_comp && req_comp != 4) { + if (ri->bits_per_channel == 16) + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, 4, req_comp, w, h); + else + out = stbi__convert_format(out, 4, req_comp, w, h); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + if (comp) *comp = 4; + *y = h; + *x = w; + + return out; +} +#endif + +// ************************************************************************************************* +// Softimage PIC loader +// by Tom Seddon +// +// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format +// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/ + +#ifndef STBI_NO_PIC +static int stbi__pic_is4(stbi__context *s,const char *str) +{ + int i; + for (i=0; i<4; ++i) + if (stbi__get8(s) != (stbi_uc)str[i]) + return 0; + + return 1; +} + +static int stbi__pic_test_core(stbi__context *s) +{ + int i; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) + return 0; + + for(i=0;i<84;++i) + stbi__get8(s); + + if (!stbi__pic_is4(s,"PICT")) + return 0; + + return 1; +} + +typedef struct +{ + stbi_uc size,type,channel; +} stbi__pic_packet; + +static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest) +{ + int mask=0x80, i; + + for (i=0; i<4; ++i, mask>>=1) { + if (channel & mask) { + if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short"); + dest[i]=stbi__get8(s); + } + } + + return dest; +} + +static void stbi__copyval(int channel,stbi_uc *dest,const stbi_uc *src) +{ + int mask=0x80,i; + + for (i=0;i<4; ++i, mask>>=1) + if (channel&mask) + dest[i]=src[i]; +} + +static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *comp, stbi_uc *result) +{ + int act_comp=0,num_packets=0,y,chained; + stbi__pic_packet packets[10]; + + // this will (should...) cater for even some bizarre stuff like having data + // for the same channel in multiple packets. + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return stbi__errpuc("bad format","too many packets"); + + packet = &packets[num_packets++]; + + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + + act_comp |= packet->channel; + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (reading packets)"); + if (packet->size != 8) return stbi__errpuc("bad format","packet isn't 8bpp"); + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? + + for(y=0; ytype) { + default: + return stbi__errpuc("bad format","packet has bad compression type"); + + case 0: {//uncompressed + int x; + + for(x=0;xchannel,dest)) + return 0; + break; + } + + case 1://Pure RLE + { + int left=width, i; + + while (left>0) { + stbi_uc count,value[4]; + + count=stbi__get8(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pure read count)"); + + if (count > left) + count = (stbi_uc) left; + + if (!stbi__readval(s,packet->channel,value)) return 0; + + for(i=0; ichannel,dest,value); + left -= count; + } + } + break; + + case 2: {//Mixed RLE + int left=width; + while (left>0) { + int count = stbi__get8(s), i; + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (mixed read count)"); + + if (count >= 128) { // Repeated + stbi_uc value[4]; + + if (count==128) + count = stbi__get16be(s); + else + count -= 127; + if (count > left) + return stbi__errpuc("bad file","scanline overrun"); + + if (!stbi__readval(s,packet->channel,value)) + return 0; + + for(i=0;ichannel,dest,value); + } else { // Raw + ++count; + if (count>left) return stbi__errpuc("bad file","scanline overrun"); + + for(i=0;ichannel,dest)) + return 0; + } + left-=count; + } + break; + } + } + } + } + + return result; +} + +static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp, stbi__result_info *ri) +{ + stbi_uc *result; + int i, x,y, internal_comp; + STBI_NOTUSED(ri); + + if (!comp) comp = &internal_comp; + + for (i=0; i<92; ++i) + stbi__get8(s); + + x = stbi__get16be(s); + y = stbi__get16be(s); + + if (y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)"); + if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode"); + + stbi__get32be(s); //skip `ratio' + stbi__get16be(s); //skip `fields' + stbi__get16be(s); //skip `pad' + + // intermediate buffer is RGBA + result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0); + if (!result) return stbi__errpuc("outofmem", "Out of memory"); + memset(result, 0xff, x*y*4); + + if (!stbi__pic_load_core(s,x,y,comp, result)) { + STBI_FREE(result); + result=0; + } + *px = x; + *py = y; + if (req_comp == 0) req_comp = *comp; + result=stbi__convert_format(result,4,req_comp,x,y); + + return result; +} + +static int stbi__pic_test(stbi__context *s) +{ + int r = stbi__pic_test_core(s); + stbi__rewind(s); + return r; +} +#endif + +// ************************************************************************************************* +// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb + +#ifndef STBI_NO_GIF +typedef struct +{ + stbi__int16 prefix; + stbi_uc first; + stbi_uc suffix; +} stbi__gif_lzw; + +typedef struct +{ + int w,h; + stbi_uc *out; // output buffer (always 4 components) + stbi_uc *background; // The current "background" as far as a gif is concerned + stbi_uc *history; + int flags, bgindex, ratio, transparent, eflags; + stbi_uc pal[256][4]; + stbi_uc lpal[256][4]; + stbi__gif_lzw codes[8192]; + stbi_uc *color_table; + int parse, step; + int lflags; + int start_x, start_y; + int max_x, max_y; + int cur_x, cur_y; + int line_size; + int delay; +} stbi__gif; + +static int stbi__gif_test_raw(stbi__context *s) +{ + int sz; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0; + sz = stbi__get8(s); + if (sz != '9' && sz != '7') return 0; + if (stbi__get8(s) != 'a') return 0; + return 1; +} + +static int stbi__gif_test(stbi__context *s) +{ + int r = stbi__gif_test_raw(s); + stbi__rewind(s); + return r; +} + +static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp) +{ + int i; + for (i=0; i < num_entries; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + pal[i][3] = transp == i ? 0 : 255; + } +} + +static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info) +{ + stbi_uc version; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') + return stbi__err("not GIF", "Corrupt GIF"); + + version = stbi__get8(s); + if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF"); + if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF"); + + stbi__g_failure_reason = ""; + g->w = stbi__get16le(s); + g->h = stbi__get16le(s); + g->flags = stbi__get8(s); + g->bgindex = stbi__get8(s); + g->ratio = stbi__get8(s); + g->transparent = -1; + + if (g->w > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (g->h > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + + if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments + + if (is_info) return 1; + + if (g->flags & 0x80) + stbi__gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1); + + return 1; +} + +static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif)); + if (!g) return stbi__err("outofmem", "Out of memory"); + if (!stbi__gif_header(s, g, comp, 1)) { + STBI_FREE(g); + stbi__rewind( s ); + return 0; + } + if (x) *x = g->w; + if (y) *y = g->h; + STBI_FREE(g); + return 1; +} + +static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) +{ + stbi_uc *p, *c; + int idx; + + // recurse to decode the prefixes, since the linked-list is backwards, + // and working backwards through an interleaved image would be nasty + if (g->codes[code].prefix >= 0) + stbi__out_gif_code(g, g->codes[code].prefix); + + if (g->cur_y >= g->max_y) return; + + idx = g->cur_x + g->cur_y; + p = &g->out[idx]; + g->history[idx / 4] = 1; + + c = &g->color_table[g->codes[code].suffix * 4]; + if (c[3] > 128) { // don't render transparent pixels; + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = c[3]; + } + g->cur_x += 4; + + if (g->cur_x >= g->max_x) { + g->cur_x = g->start_x; + g->cur_y += g->step; + + while (g->cur_y >= g->max_y && g->parse > 0) { + g->step = (1 << g->parse) * g->line_size; + g->cur_y = g->start_y + (g->step >> 1); + --g->parse; + } + } +} + +static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) +{ + stbi_uc lzw_cs; + stbi__int32 len, init_code; + stbi__uint32 first; + stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; + stbi__gif_lzw *p; + + lzw_cs = stbi__get8(s); + if (lzw_cs > 12) return NULL; + clear = 1 << lzw_cs; + first = 1; + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + bits = 0; + valid_bits = 0; + for (init_code = 0; init_code < clear; init_code++) { + g->codes[init_code].prefix = -1; + g->codes[init_code].first = (stbi_uc) init_code; + g->codes[init_code].suffix = (stbi_uc) init_code; + } + + // support no starting clear code + avail = clear+2; + oldcode = -1; + + len = 0; + for(;;) { + if (valid_bits < codesize) { + if (len == 0) { + len = stbi__get8(s); // start new block + if (len == 0) + return g->out; + } + --len; + bits |= (stbi__int32) stbi__get8(s) << valid_bits; + valid_bits += 8; + } else { + stbi__int32 code = bits & codemask; + bits >>= codesize; + valid_bits -= codesize; + // @OPTIMIZE: is there some way we can accelerate the non-clear path? + if (code == clear) { // clear code + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + avail = clear + 2; + oldcode = -1; + first = 0; + } else if (code == clear + 1) { // end of stream code + stbi__skip(s, len); + while ((len = stbi__get8(s)) > 0) + stbi__skip(s,len); + return g->out; + } else if (code <= avail) { + if (first) { + return stbi__errpuc("no clear code", "Corrupt GIF"); + } + + if (oldcode >= 0) { + p = &g->codes[avail++]; + if (avail > 8192) { + return stbi__errpuc("too many codes", "Corrupt GIF"); + } + + p->prefix = (stbi__int16) oldcode; + p->first = g->codes[oldcode].first; + p->suffix = (code == avail) ? p->first : g->codes[code].first; + } else if (code == avail) + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + + stbi__out_gif_code(g, (stbi__uint16) code); + + if ((avail & codemask) == 0 && avail <= 0x0FFF) { + codesize++; + codemask = (1 << codesize) - 1; + } + + oldcode = code; + } else { + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + } + } + } +} + +// this function is designed to support animated gifs, although stb_image doesn't support it +// two back is the image from two frames ago, used for a very specific disposal format +static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back) +{ + int dispose; + int first_frame; + int pi; + int pcount; + STBI_NOTUSED(req_comp); + + // on first frame, any non-written pixels get the background colour (non-transparent) + first_frame = 0; + if (g->out == 0) { + if (!stbi__gif_header(s, g, comp,0)) return 0; // stbi__g_failure_reason set by stbi__gif_header + if (!stbi__mad3sizes_valid(4, g->w, g->h, 0)) + return stbi__errpuc("too large", "GIF image is too large"); + pcount = g->w * g->h; + g->out = (stbi_uc *) stbi__malloc(4 * pcount); + g->background = (stbi_uc *) stbi__malloc(4 * pcount); + g->history = (stbi_uc *) stbi__malloc(pcount); + if (!g->out || !g->background || !g->history) + return stbi__errpuc("outofmem", "Out of memory"); + + // image is treated as "transparent" at the start - ie, nothing overwrites the current background; + // background colour is only used for pixels that are not rendered first frame, after that "background" + // color refers to the color that was there the previous frame. + memset(g->out, 0x00, 4 * pcount); + memset(g->background, 0x00, 4 * pcount); // state of the background (starts transparent) + memset(g->history, 0x00, pcount); // pixels that were affected previous frame + first_frame = 1; + } else { + // second frame - how do we dispose of the previous one? + dispose = (g->eflags & 0x1C) >> 2; + pcount = g->w * g->h; + + if ((dispose == 3) && (two_back == 0)) { + dispose = 2; // if I don't have an image to revert back to, default to the old background + } + + if (dispose == 3) { // use previous graphic + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &two_back[pi * 4], 4 ); + } + } + } else if (dispose == 2) { + // restore what was changed last frame to background before that frame; + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &g->background[pi * 4], 4 ); + } + } + } else { + // This is a non-disposal case eithe way, so just + // leave the pixels as is, and they will become the new background + // 1: do not dispose + // 0: not specified. + } + + // background is what out is after the undoing of the previou frame; + memcpy( g->background, g->out, 4 * g->w * g->h ); + } + + // clear my history; + memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame + + for (;;) { + int tag = stbi__get8(s); + switch (tag) { + case 0x2C: /* Image Descriptor */ + { + stbi__int32 x, y, w, h; + stbi_uc *o; + + x = stbi__get16le(s); + y = stbi__get16le(s); + w = stbi__get16le(s); + h = stbi__get16le(s); + if (((x + w) > (g->w)) || ((y + h) > (g->h))) + return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); + + g->line_size = g->w * 4; + g->start_x = x * 4; + g->start_y = y * g->line_size; + g->max_x = g->start_x + w * 4; + g->max_y = g->start_y + h * g->line_size; + g->cur_x = g->start_x; + g->cur_y = g->start_y; + + // if the width of the specified rectangle is 0, that means + // we may not see *any* pixels or the image is malformed; + // to make sure this is caught, move the current y down to + // max_y (which is what out_gif_code checks). + if (w == 0) + g->cur_y = g->max_y; + + g->lflags = stbi__get8(s); + + if (g->lflags & 0x40) { + g->step = 8 * g->line_size; // first interlaced spacing + g->parse = 3; + } else { + g->step = g->line_size; + g->parse = 0; + } + + if (g->lflags & 0x80) { + stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); + g->color_table = (stbi_uc *) g->lpal; + } else if (g->flags & 0x80) { + g->color_table = (stbi_uc *) g->pal; + } else + return stbi__errpuc("missing color table", "Corrupt GIF"); + + o = stbi__process_gif_raster(s, g); + if (!o) return NULL; + + // if this was the first frame, + pcount = g->w * g->h; + if (first_frame && (g->bgindex > 0)) { + // if first frame, any pixel not drawn to gets the background color + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi] == 0) { + g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be; + memcpy( &g->out[pi * 4], &g->pal[g->bgindex], 4 ); + } + } + } + + return o; + } + + case 0x21: // Comment Extension. + { + int len; + int ext = stbi__get8(s); + if (ext == 0xF9) { // Graphic Control Extension. + len = stbi__get8(s); + if (len == 4) { + g->eflags = stbi__get8(s); + g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths. + + // unset old transparent + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 255; + } + if (g->eflags & 0x01) { + g->transparent = stbi__get8(s); + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 0; + } + } else { + // don't need transparent + stbi__skip(s, 1); + g->transparent = -1; + } + } else { + stbi__skip(s, len); + break; + } + } + while ((len = stbi__get8(s)) != 0) { + stbi__skip(s, len); + } + break; + } + + case 0x3B: // gif stream termination code + return (stbi_uc *) s; // using '1' causes warning on some compilers + + default: + return stbi__errpuc("unknown code", "Corrupt GIF"); + } + } +} + +static void *stbi__load_gif_main_outofmem(stbi__gif *g, stbi_uc *out, int **delays) +{ + STBI_FREE(g->out); + STBI_FREE(g->history); + STBI_FREE(g->background); + + if (out) STBI_FREE(out); + if (delays && *delays) STBI_FREE(*delays); + return stbi__errpuc("outofmem", "Out of memory"); +} + +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + if (stbi__gif_test(s)) { + int layers = 0; + stbi_uc *u = 0; + stbi_uc *out = 0; + stbi_uc *two_back = 0; + stbi__gif g; + int stride; + int out_size = 0; + int delays_size = 0; + + STBI_NOTUSED(out_size); + STBI_NOTUSED(delays_size); + + memset(&g, 0, sizeof(g)); + if (delays) { + *delays = 0; + } + + do { + u = stbi__gif_load_next(s, &g, comp, req_comp, two_back); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + + if (u) { + *x = g.w; + *y = g.h; + ++layers; + stride = g.w * g.h * 4; + + if (out) { + void *tmp = (stbi_uc*) STBI_REALLOC_SIZED( out, out_size, layers * stride ); + if (!tmp) + return stbi__load_gif_main_outofmem(&g, out, delays); + else { + out = (stbi_uc*) tmp; + out_size = layers * stride; + } + + if (delays) { + int *new_delays = (int*) STBI_REALLOC_SIZED( *delays, delays_size, sizeof(int) * layers ); + if (!new_delays) + return stbi__load_gif_main_outofmem(&g, out, delays); + *delays = new_delays; + delays_size = layers * sizeof(int); + } + } else { + out = (stbi_uc*)stbi__malloc( layers * stride ); + if (!out) + return stbi__load_gif_main_outofmem(&g, out, delays); + out_size = layers * stride; + if (delays) { + *delays = (int*) stbi__malloc( layers * sizeof(int) ); + if (!*delays) + return stbi__load_gif_main_outofmem(&g, out, delays); + delays_size = layers * sizeof(int); + } + } + memcpy( out + ((layers - 1) * stride), u, stride ); + if (layers >= 2) { + two_back = out - 2 * stride; + } + + if (delays) { + (*delays)[layers - 1U] = g.delay; + } + } + } while (u != 0); + + // free temp buffer; + STBI_FREE(g.out); + STBI_FREE(g.history); + STBI_FREE(g.background); + + // do the final conversion after loading everything; + if (req_comp && req_comp != 4) + out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h); + + *z = layers; + return out; + } else { + return stbi__errpuc("not GIF", "Image was not as a gif type."); + } +} + +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *u = 0; + stbi__gif g; + memset(&g, 0, sizeof(g)); + STBI_NOTUSED(ri); + + u = stbi__gif_load_next(s, &g, comp, req_comp, 0); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + if (u) { + *x = g.w; + *y = g.h; + + // moved conversion to after successful load so that the same + // can be done for multiple frames. + if (req_comp && req_comp != 4) + u = stbi__convert_format(u, 4, req_comp, g.w, g.h); + } else if (g.out) { + // if there was an error and we allocated an image buffer, free it! + STBI_FREE(g.out); + } + + // free buffers needed for multiple frame loading; + STBI_FREE(g.history); + STBI_FREE(g.background); + + return u; +} + +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) +{ + return stbi__gif_info_raw(s,x,y,comp); +} +#endif + +// ************************************************************************************************* +// Radiance RGBE HDR loader +// originally by Nicolas Schulz +#ifndef STBI_NO_HDR +static int stbi__hdr_test_core(stbi__context *s, const char *signature) +{ + int i; + for (i=0; signature[i]; ++i) + if (stbi__get8(s) != signature[i]) + return 0; + stbi__rewind(s); + return 1; +} + +static int stbi__hdr_test(stbi__context* s) +{ + int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); + stbi__rewind(s); + if(!r) { + r = stbi__hdr_test_core(s, "#?RGBE\n"); + stbi__rewind(s); + } + return r; +} + +#define STBI__HDR_BUFLEN 1024 +static char *stbi__hdr_gettoken(stbi__context *z, char *buffer) +{ + int len=0; + char c = '\0'; + + c = (char) stbi__get8(z); + + while (!stbi__at_eof(z) && c != '\n') { + buffer[len++] = c; + if (len == STBI__HDR_BUFLEN-1) { + // flush to end of line + while (!stbi__at_eof(z) && stbi__get8(z) != '\n') + ; + break; + } + c = (char) stbi__get8(z); + } + + buffer[len] = 0; + return buffer; +} + +static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) +{ + if ( input[3] != 0 ) { + float f1; + // Exponent + f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8)); + if (req_comp <= 2) + output[0] = (input[0] + input[1] + input[2]) * f1 / 3; + else { + output[0] = input[0] * f1; + output[1] = input[1] * f1; + output[2] = input[2] * f1; + } + if (req_comp == 2) output[1] = 1; + if (req_comp == 4) output[3] = 1; + } else { + switch (req_comp) { + case 4: output[3] = 1; /* fallthrough */ + case 3: output[0] = output[1] = output[2] = 0; + break; + case 2: output[1] = 1; /* fallthrough */ + case 1: output[0] = 0; + break; + } + } +} + +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int width, height; + stbi_uc *scanline; + float *hdr_data; + int len; + unsigned char count, value; + int i, j, k, c1,c2, z; + const char *headerToken; + STBI_NOTUSED(ri); + + // Check identifier + headerToken = stbi__hdr_gettoken(s,buffer); + if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) + return stbi__errpf("not HDR", "Corrupt HDR image"); + + // Parse header + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format"); + + // Parse width and height + // can't use sscanf() if we're not using stdio! + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + height = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + width = (int) strtol(token, NULL, 10); + + if (height > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); + if (width > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); + + *x = width; + *y = height; + + if (comp) *comp = 3; + if (req_comp == 0) req_comp = 3; + + if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) + return stbi__errpf("too large", "HDR image is too large"); + + // Read data + hdr_data = (float *) stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); + if (!hdr_data) + return stbi__errpf("outofmem", "Out of memory"); + + // Load image data + // image data is stored as some number of sca + if ( width < 8 || width >= 32768) { + // Read flat data + for (j=0; j < height; ++j) { + for (i=0; i < width; ++i) { + stbi_uc rgbe[4]; + main_decode_loop: + stbi__getn(s, rgbe, 4); + stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); + } + } + } else { + // Read RLE-encoded data + scanline = NULL; + + for (j = 0; j < height; ++j) { + c1 = stbi__get8(s); + c2 = stbi__get8(s); + len = stbi__get8(s); + if (c1 != 2 || c2 != 2 || (len & 0x80)) { + // not run-length encoded, so we have to actually use THIS data as a decoded + // pixel (note this can't be a valid pixel--one of RGB must be >= 128) + stbi_uc rgbe[4]; + rgbe[0] = (stbi_uc) c1; + rgbe[1] = (stbi_uc) c2; + rgbe[2] = (stbi_uc) len; + rgbe[3] = (stbi_uc) stbi__get8(s); + stbi__hdr_convert(hdr_data, rgbe, req_comp); + i = 1; + j = 0; + STBI_FREE(scanline); + goto main_decode_loop; // yes, this makes no sense + } + len <<= 8; + len |= stbi__get8(s); + if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); } + if (scanline == NULL) { + scanline = (stbi_uc *) stbi__malloc_mad2(width, 4, 0); + if (!scanline) { + STBI_FREE(hdr_data); + return stbi__errpf("outofmem", "Out of memory"); + } + } + + for (k = 0; k < 4; ++k) { + int nleft; + i = 0; + while ((nleft = width - i) > 0) { + count = stbi__get8(s); + if (count > 128) { + // Run + value = stbi__get8(s); + count -= 128; + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = value; + } else { + // Dump + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = stbi__get8(s); + } + } + } + for (i=0; i < width; ++i) + stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp); + } + if (scanline) + STBI_FREE(scanline); + } + + return hdr_data; +} + +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int dummy; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (stbi__hdr_test(s) == 0) { + stbi__rewind( s ); + return 0; + } + + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) { + stbi__rewind( s ); + return 0; + } + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *y = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *x = (int) strtol(token, NULL, 10); + *comp = 3; + return 1; +} +#endif // STBI_NO_HDR + +#ifndef STBI_NO_BMP +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) +{ + void *p; + stbi__bmp_data info; + + info.all_a = 255; + p = stbi__bmp_parse_header(s, &info); + if (p == NULL) { + stbi__rewind( s ); + return 0; + } + if (x) *x = s->img_x; + if (y) *y = s->img_y; + if (comp) { + if (info.bpp == 24 && info.ma == 0xff000000) + *comp = 3; + else + *comp = info.ma ? 4 : 3; + } + return 1; +} +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) +{ + int channelCount, dummy, depth; + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + *y = stbi__get32be(s); + *x = stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 8 && depth != 16) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 3) { + stbi__rewind( s ); + return 0; + } + *comp = 4; + return 1; +} + +static int stbi__psd_is16(stbi__context *s) +{ + int channelCount, depth; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + STBI_NOTUSED(stbi__get32be(s)); + STBI_NOTUSED(stbi__get32be(s)); + depth = stbi__get16be(s); + if (depth != 16) { + stbi__rewind( s ); + return 0; + } + return 1; +} +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) +{ + int act_comp=0,num_packets=0,chained,dummy; + stbi__pic_packet packets[10]; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) { + stbi__rewind(s); + return 0; + } + + stbi__skip(s, 88); + + *x = stbi__get16be(s); + *y = stbi__get16be(s); + if (stbi__at_eof(s)) { + stbi__rewind( s); + return 0; + } + if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) { + stbi__rewind( s ); + return 0; + } + + stbi__skip(s, 8); + + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return 0; + + packet = &packets[num_packets++]; + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + act_comp |= packet->channel; + + if (stbi__at_eof(s)) { + stbi__rewind( s ); + return 0; + } + if (packet->size != 8) { + stbi__rewind( s ); + return 0; + } + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); + + return 1; +} +#endif + +// ************************************************************************************************* +// Portable Gray Map and Portable Pixel Map loader +// by Ken Miller +// +// PGM: http://netpbm.sourceforge.net/doc/pgm.html +// PPM: http://netpbm.sourceforge.net/doc/ppm.html +// +// Known limitations: +// Does not support comments in the header section +// Does not support ASCII image data (formats P2 and P3) + +#ifndef STBI_NO_PNM + +static int stbi__pnm_test(stbi__context *s) +{ + char p, t; + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind( s ); + return 0; + } + return 1; +} + +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + STBI_NOTUSED(ri); + + ri->bits_per_channel = stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n); + if (ri->bits_per_channel == 0) + return 0; + + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + + if (!stbi__mad4sizes_valid(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0)) + return stbi__errpuc("too large", "PNM too large"); + + out = (stbi_uc *) stbi__malloc_mad4(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + stbi__getn(s, out, s->img_n * s->img_x * s->img_y * (ri->bits_per_channel / 8)); + + if (req_comp && req_comp != s->img_n) { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + return out; +} + +static int stbi__pnm_isspace(char c) +{ + return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; +} + +static void stbi__pnm_skip_whitespace(stbi__context *s, char *c) +{ + for (;;) { + while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) + *c = (char) stbi__get8(s); + + if (stbi__at_eof(s) || *c != '#') + break; + + while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' ) + *c = (char) stbi__get8(s); + } +} + +static int stbi__pnm_isdigit(char c) +{ + return c >= '0' && c <= '9'; +} + +static int stbi__pnm_getinteger(stbi__context *s, char *c) +{ + int value = 0; + + while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { + value = value*10 + (*c - '0'); + *c = (char) stbi__get8(s); + } + + return value; +} + +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) +{ + int maxv, dummy; + char c, p, t; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + stbi__rewind(s); + + // Get identifier + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind(s); + return 0; + } + + *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm + + c = (char) stbi__get8(s); + stbi__pnm_skip_whitespace(s, &c); + + *x = stbi__pnm_getinteger(s, &c); // read width + stbi__pnm_skip_whitespace(s, &c); + + *y = stbi__pnm_getinteger(s, &c); // read height + stbi__pnm_skip_whitespace(s, &c); + + maxv = stbi__pnm_getinteger(s, &c); // read max value + if (maxv > 65535) + return stbi__err("max value > 65535", "PPM image supports only 8-bit and 16-bit images"); + else if (maxv > 255) + return 16; + else + return 8; +} + +static int stbi__pnm_is16(stbi__context *s) +{ + if (stbi__pnm_info(s, NULL, NULL, NULL) == 16) + return 1; + return 0; +} +#endif + +static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) +{ + #ifndef STBI_NO_JPEG + if (stbi__jpeg_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNG + if (stbi__png_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_GIF + if (stbi__gif_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_BMP + if (stbi__bmp_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PIC + if (stbi__pic_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_info(s, x, y, comp)) return 1; + #endif + + // test tga last because it's a crappy test! + #ifndef STBI_NO_TGA + if (stbi__tga_info(s, x, y, comp)) + return 1; + #endif + return stbi__err("unknown image type", "Image not of any known type, or corrupt"); +} + +static int stbi__is_16_main(stbi__context *s) +{ + #ifndef STBI_NO_PNG + if (stbi__png_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_is16(s)) return 1; + #endif + return 0; +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_info_from_file(f, x, y, comp); + fclose(f); + return result; +} + +STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__info_main(&s,x,y,comp); + fseek(f,pos,SEEK_SET); + return r; +} + +STBIDEF int stbi_is_16_bit(char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_is_16_bit_from_file(f); + fclose(f); + return result; +} + +STBIDEF int stbi_is_16_bit_from_file(FILE *f) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__is_16_main(&s); + fseek(f,pos,SEEK_SET); + return r; +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__is_16_main(&s); +} + +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__is_16_main(&s); +} + +#endif // STB_IMAGE_IMPLEMENTATION + +/* + revision history: + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug + 1-bit BMP + *_is_16_bit api + avoid warnings + 2.16 (2017-07-23) all functions have 16-bit variants; + STBI_NO_STDIO works again; + compilation fixes; + fix rounding in unpremultiply; + optimize vertical flip; + disable raw_len validation; + documentation fixes + 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode; + warning fixes; disable run-time SSE detection on gcc; + uniform handling of optional "return" values; + thread-safe initialization of zlib tables + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) allocate large structures on the stack + remove white matting for transparent PSD + fix reported channel count for PNG & BMP + re-enable SSE2 in non-gcc 64-bit + support RGB-formatted JPEG + read 16-bit PNGs (only as 8-bit) + 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED + 2.09 (2016-01-16) allow comments in PNM files + 16-bit-per-pixel TGA (not bit-per-component) + info() for TGA could break due to .hdr handling + info() for BMP to shares code instead of sloppy parse + can use STBI_REALLOC_SIZED if allocator doesn't support realloc + code cleanup + 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA + 2.07 (2015-09-13) fix compiler warnings + partial animated GIF support + limited 16-bpc PSD support + #ifdef unused functions + bug with < 92 byte PIC,PNM,HDR,TGA + 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value + 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning + 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit + 2.03 (2015-04-12) extra corruption checking (mmozeiko) + stbi_set_flip_vertically_on_load (nguillemot) + fix NEON support; fix mingw support + 2.02 (2015-01-19) fix incorrect assert, fix warning + 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 + 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG + 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) + progressive JPEG (stb) + PGM/PPM support (Ken Miller) + STBI_MALLOC,STBI_REALLOC,STBI_FREE + GIF bugfix -- seemingly never worked + STBI_NO_*, STBI_ONLY_* + 1.48 (2014-12-14) fix incorrectly-named assert() + 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) + optimize PNG (ryg) + fix bug in interlaced PNG with user-specified channel count (stb) + 1.46 (2014-08-26) + fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG + 1.45 (2014-08-16) + fix MSVC-ARM internal compiler error by wrapping malloc + 1.44 (2014-08-07) + various warning fixes from Ronny Chevalier + 1.43 (2014-07-15) + fix MSVC-only compiler problem in code changed in 1.42 + 1.42 (2014-07-09) + don't define _CRT_SECURE_NO_WARNINGS (affects user code) + fixes to stbi__cleanup_jpeg path + added STBI_ASSERT to avoid requiring assert.h + 1.41 (2014-06-25) + fix search&replace from 1.36 that messed up comments/error messages + 1.40 (2014-06-22) + fix gcc struct-initialization warning + 1.39 (2014-06-15) + fix to TGA optimization when req_comp != number of components in TGA; + fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) + add support for BMP version 5 (more ignored fields) + 1.38 (2014-06-06) + suppress MSVC warnings on integer casts truncating values + fix accidental rename of 'skip' field of I/O + 1.37 (2014-06-04) + remove duplicate typedef + 1.36 (2014-06-03) + convert to header file single-file library + if de-iphone isn't set, load iphone images color-swapped instead of returning NULL + 1.35 (2014-05-27) + various warnings + fix broken STBI_SIMD path + fix bug where stbi_load_from_file no longer left file pointer in correct place + fix broken non-easy path for 32-bit BMP (possibly never used) + TGA optimization by Arseny Kapoulkine + 1.34 (unknown) + use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case + 1.33 (2011-07-14) + make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements + 1.32 (2011-07-13) + support for "info" function for all supported filetypes (SpartanJ) + 1.31 (2011-06-20) + a few more leak fixes, bug in PNG handling (SpartanJ) + 1.30 (2011-06-11) + added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) + removed deprecated format-specific test/load functions + removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway + error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) + fix inefficiency in decoding 32-bit BMP (David Woo) + 1.29 (2010-08-16) + various warning fixes from Aurelien Pocheville + 1.28 (2010-08-01) + fix bug in GIF palette transparency (SpartanJ) + 1.27 (2010-08-01) + cast-to-stbi_uc to fix warnings + 1.26 (2010-07-24) + fix bug in file buffering for PNG reported by SpartanJ + 1.25 (2010-07-17) + refix trans_data warning (Won Chun) + 1.24 (2010-07-12) + perf improvements reading from files on platforms with lock-heavy fgetc() + minor perf improvements for jpeg + deprecated type-specific functions so we'll get feedback if they're needed + attempt to fix trans_data warning (Won Chun) + 1.23 fixed bug in iPhone support + 1.22 (2010-07-10) + removed image *writing* support + stbi_info support from Jetro Lauha + GIF support from Jean-Marc Lienher + iPhone PNG-extensions from James Brown + warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) + 1.21 fix use of 'stbi_uc' in header (reported by jon blow) + 1.20 added support for Softimage PIC, by Tom Seddon + 1.19 bug in interlaced PNG corruption check (found by ryg) + 1.18 (2008-08-02) + fix a threading bug (local mutable static) + 1.17 support interlaced PNG + 1.16 major bugfix - stbi__convert_format converted one too many pixels + 1.15 initialize some fields for thread safety + 1.14 fix threadsafe conversion bug + header-file-only version (#define STBI_HEADER_FILE_ONLY before including) + 1.13 threadsafe + 1.12 const qualifiers in the API + 1.11 Support installable IDCT, colorspace conversion routines + 1.10 Fixes for 64-bit (don't use "unsigned long") + optimized upsampling by Fabian "ryg" Giesen + 1.09 Fix format-conversion for PSD code (bad global variables!) + 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz + 1.07 attempt to fix C++ warning/errors again + 1.06 attempt to fix C++ warning/errors again + 1.05 fix TGA loading to return correct *comp and use good luminance calc + 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free + 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR + 1.02 support for (subset of) HDR files, float interface for preferred access to them + 1.01 fix bug: possible bug in handling right-side up bmps... not sure + fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all + 1.00 interface to zlib that skips zlib header + 0.99 correct handling of alpha in palette + 0.98 TGA loader by lonesock; dynamically add loaders (untested) + 0.97 jpeg errors on too large a file; also catch another malloc failure + 0.96 fix detection of invalid v value - particleman@mollyrocket forum + 0.95 during header scan, seek to markers in case of padding + 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same + 0.93 handle jpegtran output; verbose errors + 0.92 read 4,8,16,24,32-bit BMP files of several formats + 0.91 output 24-bit Windows 3.0 BMP files + 0.90 fix a few more warnings; bump version number to approach 1.0 + 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd + 0.60 fix compiling as c++ + 0.59 fix warnings: merge Dave Moore's -Wall fixes + 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian + 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available + 0.56 fix bug: zlib uncompressed mode len vs. nlen + 0.55 fix bug: restart_interval not initialized to 0 + 0.54 allow NULL for 'int *comp' + 0.53 fix bug in png 3->4; speedup png decoding + 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments + 0.51 obey req_comp requests, 1-component jpegs return as 1-component, + on 'test' only check type, not whether we support this variant + 0.50 (2006-11-19) + first released version +*/ + + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ \ No newline at end of file diff --git a/thirdparty/stb/src/stb_truetype.h b/thirdparty/stb/src/stb_truetype.h index 5e2a2e4..2ca3ff9 100644 --- a/thirdparty/stb/src/stb_truetype.h +++ b/thirdparty/stb/src/stb_truetype.h @@ -412,6 +412,43 @@ int main(int arg, char **argv) } #endif +#pragma region ODIN: CUSTOM ALLOCATOR + +#ifdef STB_TRUETYPE_IMPLEMENTATION +#define GB_IMPLEMENTATION +#endif +#include "gb/gb.h" + +#ifdef STBTT_STATIC +#define STBTT_DEF static +#else +#define STBTT_DEF extern +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +STBTT_DEF void stbtt_SetAllocator( gbAllocator allocator ); + +#ifdef __cplusplus +} +#endif + +#ifndef STBTT_malloc +#define STBTT_malloc(x,u) ((void)(u), gb_alloc(stbtt__allocator, x)) +#define STBTT_free(x,u) ((void)(u), gb_free(stbtt__allocator, x)) +#endif + +#ifdef STB_TRUETYPE_IMPLEMENTATION +gb_global gbAllocator stbtt__allocator = { gb_heap_allocator_proc, NULL }; + +STBTT_DEF void stbtt_SetAllocator( gbAllocator allocator ) { + stbtt__allocator = allocator; +} +#endif + +#pragma endregion ODIN: CUSTOM ALLOCATOR ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// diff --git a/thirdparty/stb/truetype/stb_truetype.odin b/thirdparty/stb/truetype/stb_truetype.odin index bd521c6..f128ad0 100644 --- a/thirdparty/stb/truetype/stb_truetype.odin +++ b/thirdparty/stb/truetype/stb_truetype.odin @@ -36,6 +36,37 @@ when ODIN_ARCH == .wasm32 || ODIN_ARCH == .wasm64p32 { #assert(size_of(c.int) == size_of(rune)) #assert(size_of(c.int) == size_of(b32)) +//----------------------------------------------------------------------------- +// CUSTOM: ODIN COMPATIBLE ALLOCATOR +//----------------------------------------------------------------------------- + +gbAllocationType :: enum(i32) { + Alloc, + Free, + FreeAll, + Resize, +} + +gbAllocatorProc :: #type proc(allocator_data: rawptr, type: gbAllocationType, + size: c.ssize_t, alignment: c.ssize_t, + old_memory: rawptr, old_size: c.ssize_t, + flags : c.ulonglong +) -> rawptr + +gbAllocator :: struct { + procedure: gbAllocatorProc, + data: rawptr, +} + +@(default_calling_convention="c", link_prefix="stbtt_") +foreign stbtt { + SetAllocator :: proc(allocator : gbAllocator) --- +} + +//----------------------------------------------------------------------------- +// END CUSTOM: ODIN COMPATIBLE ALLOCATOR +//----------------------------------------------------------------------------- + ////////////////////////////////////////////////////////////////////////////// // // TEXTURE BAKING API diff --git a/vefontcache/draw.odin b/vefontcache/draw.odin index 9d91ba5..189e308 100644 --- a/vefontcache/draw.odin +++ b/vefontcache/draw.odin @@ -596,6 +596,7 @@ batch_generate_glyphs_draw_list :: proc ( draw_list : ^Draw_List, error : Allocator_Error glyph_pack[pack_id].shape, error = parser_get_glyph_shape(entry.parser_info, shape.glyph[pack_id]) assert(error == .None) + assert(glyph_pack[pack_id].shape != nil) } for id, index in oversized { @@ -633,7 +634,10 @@ batch_generate_glyphs_draw_list :: proc ( draw_list : ^Draw_List, } flush_glyph_buffer_draw_list(draw_list, & glyph_buffer.draw_list, & glyph_buffer.clear_draw_list, & glyph_buffer.allocated_x) - for id, index in oversized do parser_free_shape(entry.parser_info, glyph_pack[id].shape) + for pack_id, index in oversized { + assert(glyph_pack[pack_id].shape != nil) + parser_free_shape(entry.parser_info, glyph_pack[pack_id].shape) + } } profile_end() @@ -666,6 +670,7 @@ batch_generate_glyphs_draw_list :: proc ( draw_list : ^Draw_List, error : Allocator_Error glyph_pack[pack_id].shape, error = parser_get_glyph_shape(entry.parser_info, shape.glyph[pack_id]) assert(error == .None) + assert(glyph_pack[pack_id].shape != nil) } for id, index in to_cache @@ -731,7 +736,10 @@ batch_generate_glyphs_draw_list :: proc ( draw_list : ^Draw_List, } flush_glyph_buffer_draw_list(draw_list, & glyph_buffer.draw_list, & glyph_buffer.clear_draw_list, & glyph_buffer.allocated_x) - for id, index in to_cache do parser_free_shape(entry.parser_info, glyph_pack[id].shape) + for pack_id, index in to_cache { + assert(glyph_pack[pack_id].shape != nil) + parser_free_shape(entry.parser_info, glyph_pack[pack_id].shape) + } profile_begin("gen_cached_draw_list: to_cache") when ENABLE_DRAW_TYPE_VISUALIZATION { diff --git a/vefontcache/parser.odin b/vefontcache/parser.odin index ee05912..666f058 100644 --- a/vefontcache/parser.odin +++ b/vefontcache/parser.odin @@ -18,6 +18,7 @@ Already wanted to do so anyway to evaluate the shape generation implementation. import "base:runtime" import "core:c" import "core:math" +import "core:mem" import "core:slice" import stbtt "thirdparty:stb/truetype" // import freetype "thirdparty:freetype" @@ -57,13 +58,44 @@ Parser_Glyph_Vertex :: struct { Parser_Glyph_Shape :: [dynamic]Parser_Glyph_Vertex Parser_Context :: struct { - kind : Parser_Kind, + lib_backing : Allocator, + kind : Parser_Kind, // ft_library : freetype.Library, } -parser_init :: proc( ctx : ^Parser_Context, kind : Parser_Kind ) +parser_stbtt_allocator_proc :: proc( + allocator_data : rawptr, + type : stbtt.gbAllocationType, + size : c.ssize_t, + alignment : c.ssize_t, + old_memory : rawptr, + old_size : c.ssize_t, + flags : c.ulonglong +) -> rawptr { - ctx.kind = kind + allocator := transmute(^Allocator) allocator_data + result, error := allocator.procedure( allocator.data, cast(mem.Allocator_Mode) type, cast(int) size, cast(int) alignment, old_memory, cast(int) old_size ) + assert(error == .None) + + if type == .Alloc || type == .Resize { + return transmute(rawptr) & result[0] + } + else do return nil +} + +parser_init :: proc( ctx : ^Parser_Context, kind : Parser_Kind, allocator := context.allocator ) +{ + ctx.kind = kind + ctx.lib_backing = allocator + + stbtt_allocator := stbtt.gbAllocator { parser_stbtt_allocator_proc, & ctx.lib_backing } + stbtt.SetAllocator( stbtt_allocator ) +} + +parser_reload :: proc( ctx : ^Parser_Context, allocator := context.allocator) { + ctx.lib_backing = allocator + stbtt_allocator := stbtt.gbAllocator { parser_stbtt_allocator_proc, & ctx.lib_backing } + stbtt.SetAllocator( stbtt_allocator ) } parser_shutdown :: proc( ctx : ^Parser_Context ) {