From d0612c27b2b4abfec3e75ed24b476d20f9dc31a3 Mon Sep 17 00:00:00 2001 From: Milan Nikolic Date: Thu, 9 Nov 2023 14:32:33 +0100 Subject: [PATCH] Update C sources --- raylib/external/cgltf.h | 400 +++++++---- raylib/external/dr_flac.h | 81 ++- raylib/external/dr_mp3.h | 61 +- raylib/external/dr_wav.h | 1235 +++++++++++++++++++++++----------- raylib/external/m3d.h | 40 +- raylib/external/miniaudio.h | 22 +- raylib/external/qoa.h | 170 +++-- raylib/external/qoi.h | 12 +- raylib/external/rl_gputex.h | 2 +- raylib/external/rprand.h | 6 +- raylib/external/sinfl.h | 28 +- raylib/platforms/rcore_drm.c | 240 ++++++- raylib/raylib.h | 31 +- raylib/rcore.c | 32 +- raylib/rshapes.c | 852 +++++++++++++++-------- raylib/rshapes.go | 55 +- raylib/rtextures.c | 2 +- 17 files changed, 2216 insertions(+), 1053 deletions(-) diff --git a/raylib/external/cgltf.h b/raylib/external/cgltf.h index d2a9097..ddec501 100644 --- a/raylib/external/cgltf.h +++ b/raylib/external/cgltf.h @@ -328,15 +328,6 @@ typedef struct cgltf_accessor_sparse cgltf_component_type indices_component_type; cgltf_buffer_view* values_buffer_view; cgltf_size values_byte_offset; - cgltf_extras extras; - cgltf_extras indices_extras; - cgltf_extras values_extras; - cgltf_size extensions_count; - cgltf_extension* extensions; - cgltf_size indices_extensions_count; - cgltf_extension* indices_extensions; - cgltf_size values_extensions_count; - cgltf_extension* values_extensions; } cgltf_accessor_sparse; typedef struct cgltf_accessor @@ -419,9 +410,6 @@ typedef struct cgltf_texture_view cgltf_float scale; /* equivalent to strength for occlusion_texture */ cgltf_bool has_transform; cgltf_texture_transform transform; - cgltf_extras extras; - cgltf_size extensions_count; - cgltf_extension* extensions; } cgltf_texture_view; typedef struct cgltf_pbr_metallic_roughness @@ -504,6 +492,13 @@ typedef struct cgltf_iridescence cgltf_texture_view iridescence_thickness_texture; } cgltf_iridescence; +typedef struct cgltf_anisotropy +{ + cgltf_float anisotropy_strength; + cgltf_float anisotropy_rotation; + cgltf_texture_view anisotropy_texture; +} cgltf_anisotropy; + typedef struct cgltf_material { char* name; @@ -517,6 +512,7 @@ typedef struct cgltf_material cgltf_bool has_sheen; cgltf_bool has_emissive_strength; cgltf_bool has_iridescence; + cgltf_bool has_anisotropy; cgltf_pbr_metallic_roughness pbr_metallic_roughness; cgltf_pbr_specular_glossiness pbr_specular_glossiness; cgltf_clearcoat clearcoat; @@ -527,6 +523,7 @@ typedef struct cgltf_material cgltf_volume volume; cgltf_emissive_strength emissive_strength; cgltf_iridescence iridescence; + cgltf_anisotropy anisotropy; cgltf_texture_view normal_texture; cgltf_texture_view occlusion_texture; cgltf_texture_view emissive_texture; @@ -559,7 +556,6 @@ typedef struct cgltf_draco_mesh_compression { } cgltf_draco_mesh_compression; typedef struct cgltf_mesh_gpu_instancing { - cgltf_buffer_view* buffer_view; cgltf_attribute* attributes; cgltf_size attributes_count; } cgltf_mesh_gpu_instancing; @@ -842,10 +838,28 @@ cgltf_size cgltf_component_size(cgltf_component_type component_type); cgltf_size cgltf_calc_size(cgltf_type type, cgltf_component_type component_type); cgltf_size cgltf_accessor_unpack_floats(const cgltf_accessor* accessor, cgltf_float* out, cgltf_size float_count); +cgltf_size cgltf_accessor_unpack_indices(const cgltf_accessor* accessor, cgltf_uint* out, cgltf_size index_count); /* this function is deprecated and will be removed in the future; use cgltf_extras::data instead */ cgltf_result cgltf_copy_extras_json(const cgltf_data* data, const cgltf_extras* extras, char* dest, cgltf_size* dest_size); +cgltf_size cgltf_mesh_index(const cgltf_data* data, const cgltf_mesh* object); +cgltf_size cgltf_material_index(const cgltf_data* data, const cgltf_material* object); +cgltf_size cgltf_accessor_index(const cgltf_data* data, const cgltf_accessor* object); +cgltf_size cgltf_buffer_view_index(const cgltf_data* data, const cgltf_buffer_view* object); +cgltf_size cgltf_buffer_index(const cgltf_data* data, const cgltf_buffer* object); +cgltf_size cgltf_image_index(const cgltf_data* data, const cgltf_image* object); +cgltf_size cgltf_texture_index(const cgltf_data* data, const cgltf_texture* object); +cgltf_size cgltf_sampler_index(const cgltf_data* data, const cgltf_sampler* object); +cgltf_size cgltf_skin_index(const cgltf_data* data, const cgltf_skin* object); +cgltf_size cgltf_camera_index(const cgltf_data* data, const cgltf_camera* object); +cgltf_size cgltf_light_index(const cgltf_data* data, const cgltf_light* object); +cgltf_size cgltf_node_index(const cgltf_data* data, const cgltf_node* object); +cgltf_size cgltf_scene_index(const cgltf_data* data, const cgltf_scene* object); +cgltf_size cgltf_animation_index(const cgltf_data* data, const cgltf_animation* object); +cgltf_size cgltf_animation_sampler_index(const cgltf_animation* animation, const cgltf_animation_sampler* object); +cgltf_size cgltf_animation_channel_index(const cgltf_animation* animation, const cgltf_animation_channel* object); + #ifdef __cplusplus } #endif @@ -866,6 +880,7 @@ cgltf_result cgltf_copy_extras_json(const cgltf_data* data, const cgltf_extras* #ifdef CGLTF_IMPLEMENTATION +#include /* For assert */ #include /* For strncpy */ #include /* For fopen */ #include /* For UINT_MAX etc */ @@ -875,10 +890,6 @@ cgltf_result cgltf_copy_extras_json(const cgltf_data* data, const cgltf_extras* #include /* For malloc, free, atoi, atof */ #endif -#if CGLTF_VALIDATE_ENABLE_ASSERTS -#include -#endif - /* JSMN_PARENT_LINKS is necessary to make parsing large structures linear in input size */ #define JSMN_PARENT_LINKS @@ -1000,7 +1011,7 @@ static cgltf_result cgltf_default_file_read(const struct cgltf_memory_options* m { fseek(file, 0, SEEK_END); -#ifdef _WIN32 +#ifdef _MSC_VER __int64 length = _ftelli64(file); #else long length = ftell(file); @@ -1144,7 +1155,7 @@ cgltf_result cgltf_parse(const cgltf_options* options, const void* data, cgltf_s json_chunk += GlbChunkHeaderSize; - const void* bin = 0; + const void* bin = NULL; cgltf_size bin_size = 0; if (GlbHeaderSize + GlbChunkHeaderSize + json_length + GlbChunkHeaderSize <= size) @@ -1763,12 +1774,6 @@ static void cgltf_free_extensions(cgltf_data* data, cgltf_extension* extensions, data->memory.free_func(data->memory.user_data, extensions); } -static void cgltf_free_texture_view(cgltf_data* data, cgltf_texture_view* view) -{ - cgltf_free_extensions(data, view->extensions, view->extensions_count); - cgltf_free_extras(data, &view->extras); -} - void cgltf_free(cgltf_data* data) { if (!data) @@ -1790,15 +1795,6 @@ void cgltf_free(cgltf_data* data) { data->memory.free_func(data->memory.user_data, data->accessors[i].name); - if(data->accessors[i].is_sparse) - { - cgltf_free_extensions(data, data->accessors[i].sparse.extensions, data->accessors[i].sparse.extensions_count); - cgltf_free_extensions(data, data->accessors[i].sparse.indices_extensions, data->accessors[i].sparse.indices_extensions_count); - cgltf_free_extensions(data, data->accessors[i].sparse.values_extensions, data->accessors[i].sparse.values_extensions_count); - cgltf_free_extras(data, &data->accessors[i].sparse.extras); - cgltf_free_extras(data, &data->accessors[i].sparse.indices_extras); - cgltf_free_extras(data, &data->accessors[i].sparse.values_extras); - } cgltf_free_extensions(data, data->accessors[i].extensions, data->accessors[i].extensions_count); cgltf_free_extras(data, &data->accessors[i].extras); } @@ -1900,50 +1896,6 @@ void cgltf_free(cgltf_data* data) { data->memory.free_func(data->memory.user_data, data->materials[i].name); - if(data->materials[i].has_pbr_metallic_roughness) - { - cgltf_free_texture_view(data, &data->materials[i].pbr_metallic_roughness.metallic_roughness_texture); - cgltf_free_texture_view(data, &data->materials[i].pbr_metallic_roughness.base_color_texture); - } - if(data->materials[i].has_pbr_specular_glossiness) - { - cgltf_free_texture_view(data, &data->materials[i].pbr_specular_glossiness.diffuse_texture); - cgltf_free_texture_view(data, &data->materials[i].pbr_specular_glossiness.specular_glossiness_texture); - } - if(data->materials[i].has_clearcoat) - { - cgltf_free_texture_view(data, &data->materials[i].clearcoat.clearcoat_texture); - cgltf_free_texture_view(data, &data->materials[i].clearcoat.clearcoat_roughness_texture); - cgltf_free_texture_view(data, &data->materials[i].clearcoat.clearcoat_normal_texture); - } - if(data->materials[i].has_specular) - { - cgltf_free_texture_view(data, &data->materials[i].specular.specular_texture); - cgltf_free_texture_view(data, &data->materials[i].specular.specular_color_texture); - } - if(data->materials[i].has_transmission) - { - cgltf_free_texture_view(data, &data->materials[i].transmission.transmission_texture); - } - if (data->materials[i].has_volume) - { - cgltf_free_texture_view(data, &data->materials[i].volume.thickness_texture); - } - if(data->materials[i].has_sheen) - { - cgltf_free_texture_view(data, &data->materials[i].sheen.sheen_color_texture); - cgltf_free_texture_view(data, &data->materials[i].sheen.sheen_roughness_texture); - } - if(data->materials[i].has_iridescence) - { - cgltf_free_texture_view(data, &data->materials[i].iridescence.iridescence_texture); - cgltf_free_texture_view(data, &data->materials[i].iridescence.iridescence_thickness_texture); - } - - cgltf_free_texture_view(data, &data->materials[i].normal_texture); - cgltf_free_texture_view(data, &data->materials[i].occlusion_texture); - cgltf_free_texture_view(data, &data->materials[i].emissive_texture); - cgltf_free_extensions(data, data->materials[i].extensions, data->materials[i].extensions_count); cgltf_free_extras(data, &data->materials[i].extras); } @@ -2198,8 +2150,6 @@ static cgltf_ssize cgltf_component_read_integer(const void* in, cgltf_component_ return *((const uint16_t*) in); case cgltf_component_type_r_32u: return *((const uint32_t*) in); - case cgltf_component_type_r_32f: - return (cgltf_ssize)*((const float*) in); case cgltf_component_type_r_8: return *((const int8_t*) in); case cgltf_component_type_r_8u: @@ -2217,8 +2167,6 @@ static cgltf_size cgltf_component_read_index(const void* in, cgltf_component_typ return *((const uint16_t*) in); case cgltf_component_type_r_32u: return *((const uint32_t*) in); - case cgltf_component_type_r_32f: - return (cgltf_size)((cgltf_ssize)*((const float*) in)); case cgltf_component_type_r_8u: return *((const uint8_t*) in); default: @@ -2356,21 +2304,41 @@ cgltf_size cgltf_accessor_unpack_floats(const cgltf_accessor* accessor, cgltf_fl cgltf_size element_count = float_count / floats_per_element; // First pass: convert each element in the base accessor. - cgltf_float* dest = out; - cgltf_accessor dense = *accessor; - dense.is_sparse = 0; - for (cgltf_size index = 0; index < element_count; index++, dest += floats_per_element) + if (accessor->buffer_view == NULL) { - if (!cgltf_accessor_read_float(&dense, index, dest, floats_per_element)) + memset(out, 0, element_count * floats_per_element * sizeof(cgltf_float)); + } + else + { + const uint8_t* element = cgltf_buffer_view_data(accessor->buffer_view); + if (element == NULL) { return 0; } + element += accessor->offset; + + if (accessor->component_type == cgltf_component_type_r_32f && accessor->stride == floats_per_element * sizeof(cgltf_float)) + { + memcpy(out, element, element_count * floats_per_element * sizeof(cgltf_float)); + } + else + { + cgltf_float* dest = out; + + for (cgltf_size index = 0; index < element_count; index++, dest += floats_per_element, element += accessor->stride) + { + if (!cgltf_element_read_float(element, accessor->type, accessor->component_type, accessor->normalized, dest, floats_per_element)) + { + return 0; + } + } + } } // Second pass: write out each element in the sparse accessor. if (accessor->is_sparse) { - const cgltf_accessor_sparse* sparse = &dense.sparse; + const cgltf_accessor_sparse* sparse = &accessor->sparse; const uint8_t* index_data = cgltf_buffer_view_data(sparse->indices_buffer_view); const uint8_t* reader_head = cgltf_buffer_view_data(sparse->values_buffer_view); @@ -2384,17 +2352,15 @@ cgltf_size cgltf_accessor_unpack_floats(const cgltf_accessor* accessor, cgltf_fl reader_head += sparse->values_byte_offset; cgltf_size index_stride = cgltf_component_size(sparse->indices_component_type); - for (cgltf_size reader_index = 0; reader_index < sparse->count; reader_index++, index_data += index_stride) + for (cgltf_size reader_index = 0; reader_index < sparse->count; reader_index++, index_data += index_stride, reader_head += accessor->stride) { size_t writer_index = cgltf_component_read_index(index_data, sparse->indices_component_type); float* writer_head = out + writer_index * floats_per_element; - if (!cgltf_element_read_float(reader_head, dense.type, dense.component_type, dense.normalized, writer_head, floats_per_element)) + if (!cgltf_element_read_float(reader_head, accessor->type, accessor->component_type, accessor->normalized, writer_head, floats_per_element)) { return 0; } - - reader_head += dense.stride; } } @@ -2488,6 +2454,143 @@ cgltf_size cgltf_accessor_read_index(const cgltf_accessor* accessor, cgltf_size return cgltf_component_read_index(element, accessor->component_type); } +cgltf_size cgltf_mesh_index(const cgltf_data* data, const cgltf_mesh* object) +{ + assert(object && (cgltf_size)(object - data->meshes) < data->meshes_count); + return (cgltf_size)(object - data->meshes); +} + +cgltf_size cgltf_material_index(const cgltf_data* data, const cgltf_material* object) +{ + assert(object && (cgltf_size)(object - data->materials) < data->materials_count); + return (cgltf_size)(object - data->materials); +} + +cgltf_size cgltf_accessor_index(const cgltf_data* data, const cgltf_accessor* object) +{ + assert(object && (cgltf_size)(object - data->accessors) < data->accessors_count); + return (cgltf_size)(object - data->accessors); +} + +cgltf_size cgltf_buffer_view_index(const cgltf_data* data, const cgltf_buffer_view* object) +{ + assert(object && (cgltf_size)(object - data->buffer_views) < data->buffer_views_count); + return (cgltf_size)(object - data->buffer_views); +} + +cgltf_size cgltf_buffer_index(const cgltf_data* data, const cgltf_buffer* object) +{ + assert(object && (cgltf_size)(object - data->buffers) < data->buffers_count); + return (cgltf_size)(object - data->buffers); +} + +cgltf_size cgltf_image_index(const cgltf_data* data, const cgltf_image* object) +{ + assert(object && (cgltf_size)(object - data->images) < data->images_count); + return (cgltf_size)(object - data->images); +} + +cgltf_size cgltf_texture_index(const cgltf_data* data, const cgltf_texture* object) +{ + assert(object && (cgltf_size)(object - data->textures) < data->textures_count); + return (cgltf_size)(object - data->textures); +} + +cgltf_size cgltf_sampler_index(const cgltf_data* data, const cgltf_sampler* object) +{ + assert(object && (cgltf_size)(object - data->samplers) < data->samplers_count); + return (cgltf_size)(object - data->samplers); +} + +cgltf_size cgltf_skin_index(const cgltf_data* data, const cgltf_skin* object) +{ + assert(object && (cgltf_size)(object - data->skins) < data->skins_count); + return (cgltf_size)(object - data->skins); +} + +cgltf_size cgltf_camera_index(const cgltf_data* data, const cgltf_camera* object) +{ + assert(object && (cgltf_size)(object - data->cameras) < data->cameras_count); + return (cgltf_size)(object - data->cameras); +} + +cgltf_size cgltf_light_index(const cgltf_data* data, const cgltf_light* object) +{ + assert(object && (cgltf_size)(object - data->lights) < data->lights_count); + return (cgltf_size)(object - data->lights); +} + +cgltf_size cgltf_node_index(const cgltf_data* data, const cgltf_node* object) +{ + assert(object && (cgltf_size)(object - data->nodes) < data->nodes_count); + return (cgltf_size)(object - data->nodes); +} + +cgltf_size cgltf_scene_index(const cgltf_data* data, const cgltf_scene* object) +{ + assert(object && (cgltf_size)(object - data->scenes) < data->scenes_count); + return (cgltf_size)(object - data->scenes); +} + +cgltf_size cgltf_animation_index(const cgltf_data* data, const cgltf_animation* object) +{ + assert(object && (cgltf_size)(object - data->animations) < data->animations_count); + return (cgltf_size)(object - data->animations); +} + +cgltf_size cgltf_animation_sampler_index(const cgltf_animation* animation, const cgltf_animation_sampler* object) +{ + assert(object && (cgltf_size)(object - animation->samplers) < animation->samplers_count); + return (cgltf_size)(object - animation->samplers); +} + +cgltf_size cgltf_animation_channel_index(const cgltf_animation* animation, const cgltf_animation_channel* object) +{ + assert(object && (cgltf_size)(object - animation->channels) < animation->channels_count); + return (cgltf_size)(object - animation->channels); +} + +cgltf_size cgltf_accessor_unpack_indices(const cgltf_accessor* accessor, cgltf_uint* out, cgltf_size index_count) +{ + if (out == NULL) + { + return accessor->count; + } + + index_count = accessor->count < index_count ? accessor->count : index_count; + + if (accessor->is_sparse) + { + return 0; + } + if (accessor->buffer_view == NULL) + { + return 0; + } + const uint8_t* element = cgltf_buffer_view_data(accessor->buffer_view); + if (element == NULL) + { + return 0; + } + element += accessor->offset; + + if (accessor->component_type == cgltf_component_type_r_32u && accessor->stride == sizeof(cgltf_uint)) + { + memcpy(out, element, index_count * sizeof(cgltf_uint)); + } + else + { + cgltf_uint* dest = out; + + for (cgltf_size index = 0; index < index_count; index++, dest++, element += accessor->stride) + { + *dest = (cgltf_uint)cgltf_component_read_index(element, accessor->component_type); + } + } + + return index_count; +} + #define CGLTF_ERROR_JSON -1 #define CGLTF_ERROR_NOMEM -2 #define CGLTF_ERROR_LEGACY -3 @@ -2864,6 +2967,10 @@ static int cgltf_parse_json_draco_mesh_compression(cgltf_options* options, jsmnt out_draco_mesh_compression->buffer_view = CGLTF_PTRINDEX(cgltf_buffer_view, cgltf_json_to_int(tokens + i, json_chunk)); ++i; } + else + { + i = cgltf_skip_json(tokens, i+1); + } if (i < 0) { @@ -2889,11 +2996,9 @@ static int cgltf_parse_json_mesh_gpu_instancing(cgltf_options* options, jsmntok_ { i = cgltf_parse_json_attribute_list(options, tokens, i + 1, json_chunk, &out_mesh_gpu_instancing->attributes, &out_mesh_gpu_instancing->attributes_count); } - else if (cgltf_json_strcmp(tokens + i, json_chunk, "bufferView") == 0) + else { - ++i; - out_mesh_gpu_instancing->buffer_view = CGLTF_PTRINDEX(cgltf_buffer_view, cgltf_json_to_int(tokens + i, json_chunk)); - ++i; + i = cgltf_skip_json(tokens, i+1); } if (i < 0) @@ -3291,7 +3396,7 @@ static cgltf_component_type cgltf_json_to_component_type(jsmntok_t const* tok, c } } -static int cgltf_parse_json_accessor_sparse(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_accessor_sparse* out_sparse) +static int cgltf_parse_json_accessor_sparse(jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_accessor_sparse* out_sparse) { CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); @@ -3338,14 +3443,6 @@ static int cgltf_parse_json_accessor_sparse(cgltf_options* options, jsmntok_t co out_sparse->indices_component_type = cgltf_json_to_component_type(tokens + i, json_chunk); ++i; } - else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) - { - i = cgltf_parse_json_extras(options, tokens, i + 1, json_chunk, &out_sparse->indices_extras); - } - else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) - { - i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_sparse->indices_extensions_count, &out_sparse->indices_extensions); - } else { i = cgltf_skip_json(tokens, i+1); @@ -3381,14 +3478,6 @@ static int cgltf_parse_json_accessor_sparse(cgltf_options* options, jsmntok_t co out_sparse->values_byte_offset = cgltf_json_to_size(tokens + i, json_chunk); ++i; } - else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) - { - i = cgltf_parse_json_extras(options, tokens, i + 1, json_chunk, &out_sparse->values_extras); - } - else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) - { - i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_sparse->values_extensions_count, &out_sparse->values_extensions); - } else { i = cgltf_skip_json(tokens, i+1); @@ -3400,14 +3489,6 @@ static int cgltf_parse_json_accessor_sparse(cgltf_options* options, jsmntok_t co } } } - else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) - { - i = cgltf_parse_json_extras(options, tokens, i + 1, json_chunk, &out_sparse->extras); - } - else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) - { - i = cgltf_parse_json_unprocessed_extensions(options, tokens, i, json_chunk, &out_sparse->extensions_count, &out_sparse->extensions); - } else { i = cgltf_skip_json(tokens, i+1); @@ -3521,7 +3602,7 @@ static int cgltf_parse_json_accessor(cgltf_options* options, jsmntok_t const* to else if (cgltf_json_strcmp(tokens + i, json_chunk, "sparse") == 0) { out_accessor->is_sparse = 1; - i = cgltf_parse_json_accessor_sparse(options, tokens, i + 1, json_chunk, &out_accessor->sparse); + i = cgltf_parse_json_accessor_sparse(tokens, i + 1, json_chunk, &out_accessor->sparse); } else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) { @@ -3593,6 +3674,8 @@ static int cgltf_parse_json_texture_transform(jsmntok_t const* tokens, int i, co static int cgltf_parse_json_texture_view(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_texture_view* out_texture_view) { + (void)options; + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); out_texture_view->scale = 1.0f; @@ -3629,28 +3712,12 @@ static int cgltf_parse_json_texture_view(cgltf_options* options, jsmntok_t const out_texture_view->scale = cgltf_json_to_float(tokens + i, json_chunk); ++i; } - else if (cgltf_json_strcmp(tokens + i, json_chunk, "extras") == 0) - { - i = cgltf_parse_json_extras(options, tokens, i + 1, json_chunk, &out_texture_view->extras); - } else if (cgltf_json_strcmp(tokens + i, json_chunk, "extensions") == 0) { ++i; CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); - if(out_texture_view->extensions) - { - return CGLTF_ERROR_JSON; - } - int extensions_size = tokens[i].size; - out_texture_view->extensions_count = 0; - out_texture_view->extensions = (cgltf_extension*)cgltf_calloc(options, sizeof(cgltf_extension), extensions_size); - - if (!out_texture_view->extensions) - { - return CGLTF_ERROR_NOMEM; - } ++i; @@ -3665,7 +3732,7 @@ static int cgltf_parse_json_texture_view(cgltf_options* options, jsmntok_t const } else { - i = cgltf_parse_json_unprocessed_extension(options, tokens, i, json_chunk, &(out_texture_view->extensions[out_texture_view->extensions_count++])); + i = cgltf_skip_json(tokens, i + 1); } if (i < 0) @@ -3719,13 +3786,11 @@ static int cgltf_parse_json_pbr_metallic_roughness(cgltf_options* options, jsmnt } else if (cgltf_json_strcmp(tokens+i, json_chunk, "baseColorTexture") == 0) { - i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, - &out_pbr->base_color_texture); + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_pbr->base_color_texture); } else if (cgltf_json_strcmp(tokens + i, json_chunk, "metallicRoughnessTexture") == 0) { - i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, - &out_pbr->metallic_roughness_texture); + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_pbr->metallic_roughness_texture); } else { @@ -4128,6 +4193,47 @@ static int cgltf_parse_json_iridescence(cgltf_options* options, jsmntok_t const* return i; } +static int cgltf_parse_json_anisotropy(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_anisotropy* out_anisotropy) +{ + CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); + int size = tokens[i].size; + ++i; + + + for (int j = 0; j < size; ++j) + { + CGLTF_CHECK_KEY(tokens[i]); + + if (cgltf_json_strcmp(tokens + i, json_chunk, "anisotropyStrength") == 0) + { + ++i; + out_anisotropy->anisotropy_strength = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "anisotropyRotation") == 0) + { + ++i; + out_anisotropy->anisotropy_rotation = cgltf_json_to_float(tokens + i, json_chunk); + ++i; + } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "anisotropyTexture") == 0) + { + i = cgltf_parse_json_texture_view(options, tokens, i + 1, json_chunk, &out_anisotropy->anisotropy_texture); + } + else + { + i = cgltf_skip_json(tokens, i + 1); + } + + if (i < 0) + { + return i; + } + } + + return i; +} + static int cgltf_parse_json_image(cgltf_options* options, jsmntok_t const* tokens, int i, const uint8_t* json_chunk, cgltf_image* out_image) { CGLTF_CHECK_TOKTYPE(tokens[i], JSMN_OBJECT); @@ -4516,6 +4622,11 @@ static int cgltf_parse_json_material(cgltf_options* options, jsmntok_t const* to out_material->has_iridescence = 1; i = cgltf_parse_json_iridescence(options, tokens, i + 1, json_chunk, &out_material->iridescence); } + else if (cgltf_json_strcmp(tokens + i, json_chunk, "KHR_materials_anisotropy") == 0) + { + out_material->has_anisotropy = 1; + i = cgltf_parse_json_anisotropy(options, tokens, i + 1, json_chunk, &out_material->anisotropy); + } else { i = cgltf_parse_json_unprocessed_extension(options, tokens, i, json_chunk, &(out_material->extensions[out_material->extensions_count++])); @@ -6367,6 +6478,8 @@ static int cgltf_fixup_pointers(cgltf_data* data) CGLTF_PTRFIXUP(data->materials[i].iridescence.iridescence_texture.texture, data->textures, data->textures_count); CGLTF_PTRFIXUP(data->materials[i].iridescence.iridescence_thickness_texture.texture, data->textures, data->textures_count); + + CGLTF_PTRFIXUP(data->materials[i].anisotropy.anisotropy_texture.texture, data->textures, data->textures_count); } for (cgltf_size i = 0; i < data->buffer_views_count; ++i) @@ -6411,7 +6524,6 @@ static int cgltf_fixup_pointers(cgltf_data* data) if (data->nodes[i].has_mesh_gpu_instancing) { - CGLTF_PTRFIXUP_REQ(data->nodes[i].mesh_gpu_instancing.buffer_view, data->buffer_views, data->buffer_views_count); for (cgltf_size m = 0; m < data->nodes[i].mesh_gpu_instancing.attributes_count; ++m) { CGLTF_PTRFIXUP_REQ(data->nodes[i].mesh_gpu_instancing.attributes[m].data, data->accessors, data->accessors_count); diff --git a/raylib/external/dr_flac.h b/raylib/external/dr_flac.h index 0c43eed..14324cf 100644 --- a/raylib/external/dr_flac.h +++ b/raylib/external/dr_flac.h @@ -1,6 +1,6 @@ /* FLAC audio decoder. Choice of public domain or MIT-0. See license statements at the end of this file. -dr_flac - v0.12.39 - 2022-09-17 +dr_flac - v0.12.42 - 2023-11-02 David Reid - mackron@gmail.com @@ -235,12 +235,12 @@ extern "C" { #define DRFLAC_VERSION_MAJOR 0 #define DRFLAC_VERSION_MINOR 12 -#define DRFLAC_VERSION_REVISION 39 +#define DRFLAC_VERSION_REVISION 42 #define DRFLAC_VERSION_STRING DRFLAC_XSTRINGIFY(DRFLAC_VERSION_MAJOR) "." DRFLAC_XSTRINGIFY(DRFLAC_VERSION_MINOR) "." DRFLAC_XSTRINGIFY(DRFLAC_VERSION_REVISION) #include /* For size_t. */ -/* Sized types. */ +/* Sized Types */ typedef signed char drflac_int8; typedef unsigned char drflac_uint8; typedef signed short drflac_int16; @@ -273,7 +273,9 @@ typedef drflac_uint8 drflac_bool8; typedef drflac_uint32 drflac_bool32; #define DRFLAC_TRUE 1 #define DRFLAC_FALSE 0 +/* End Sized Types */ +/* Decorations */ #if !defined(DRFLAC_API) #if defined(DRFLAC_DLL) #if defined(_WIN32) @@ -303,6 +305,7 @@ typedef drflac_uint32 drflac_bool32; #define DRFLAC_PRIVATE static #endif #endif +/* End Decorations */ #if defined(_MSC_VER) && _MSC_VER >= 1700 /* Visual Studio 2012 */ #define DRFLAC_DEPRECATED __declspec(deprecated) @@ -321,6 +324,16 @@ typedef drflac_uint32 drflac_bool32; DRFLAC_API void drflac_version(drflac_uint32* pMajor, drflac_uint32* pMinor, drflac_uint32* pRevision); DRFLAC_API const char* drflac_version_string(void); +/* Allocation Callbacks */ +typedef struct +{ + void* pUserData; + void* (* onMalloc)(size_t sz, void* pUserData); + void* (* onRealloc)(void* p, size_t sz, void* pUserData); + void (* onFree)(void* p, void* pUserData); +} drflac_allocation_callbacks; +/* End Allocation Callbacks */ + /* As data is read from the client it is placed into an internal buffer for fast access. This controls the size of that buffer. Larger values means more speed, but also more memory. In my testing there is diminishing returns after about 4KB, but you can fiddle with this to suit your own needs. Must be a multiple of 8. @@ -329,11 +342,22 @@ but also more memory. In my testing there is diminishing returns after about 4KB #define DR_FLAC_BUFFER_SIZE 4096 #endif -/* Check if we can enable 64-bit optimizations. */ + +/* Architecture Detection */ #if defined(_WIN64) || defined(_LP64) || defined(__LP64__) #define DRFLAC_64BIT #endif +#if defined(__x86_64__) || defined(_M_X64) + #define DRFLAC_X64 +#elif defined(__i386) || defined(_M_IX86) + #define DRFLAC_X86 +#elif defined(__arm__) || defined(_M_ARM) || defined(__arm64) || defined(__arm64__) || defined(__aarch64__) || defined(_M_ARM64) + #define DRFLAC_ARM +#endif +/* End Architecture Detection */ + + #ifdef DRFLAC_64BIT typedef drflac_uint64 drflac_cache_t; #else @@ -562,14 +586,6 @@ will be set to one of the DRFLAC_METADATA_BLOCK_TYPE_* tokens. typedef void (* drflac_meta_proc)(void* pUserData, drflac_metadata* pMetadata); -typedef struct -{ - void* pUserData; - void* (* onMalloc)(size_t sz, void* pUserData); - void* (* onRealloc)(void* p, size_t sz, void* pUserData); - void (* onFree)(void* p, void* pUserData); -} drflac_allocation_callbacks; - /* Structure for internal use. Only used for decoders opened with drflac_open_memory. */ typedef struct { @@ -1351,6 +1367,7 @@ DRFLAC_API drflac_bool32 drflac_next_cuesheet_track(drflac_cuesheet_track_iterat #include #include +/* Inline */ #ifdef _MSC_VER #define DRFLAC_INLINE __forceinline #elif defined(__GNUC__) @@ -1377,15 +1394,7 @@ DRFLAC_API drflac_bool32 drflac_next_cuesheet_track(drflac_cuesheet_track_iterat #else #define DRFLAC_INLINE #endif - -/* CPU architecture. */ -#if defined(__x86_64__) || defined(_M_X64) - #define DRFLAC_X64 -#elif defined(__i386) || defined(_M_IX86) - #define DRFLAC_X86 -#elif defined(__arm__) || defined(_M_ARM) || defined(__arm64) || defined(__arm64__) || defined(__aarch64__) || defined(_M_ARM64) - #define DRFLAC_ARM -#endif +/* End Inline */ /* Intrinsics Support @@ -1623,6 +1632,7 @@ static DRFLAC_INLINE drflac_bool32 drflac_has_sse41(void) #define DRFLAC_MAX_SIMD_VECTOR_SIZE 64 /* 64 for AVX-512 in the future. */ +/* Result Codes */ typedef drflac_int32 drflac_result; #define DRFLAC_SUCCESS 0 #define DRFLAC_ERROR -1 /* A generic error. */ @@ -1678,7 +1688,10 @@ typedef drflac_int32 drflac_result; #define DRFLAC_CANCELLED -51 #define DRFLAC_MEMORY_ALREADY_MAPPED -52 #define DRFLAC_AT_END -53 -#define DRFLAC_CRC_MISMATCH -128 + +#define DRFLAC_CRC_MISMATCH -100 +/* End Result Codes */ + #define DRFLAC_SUBFRAME_CONSTANT 0 #define DRFLAC_SUBFRAME_VERBATIM 1 @@ -1838,7 +1851,7 @@ static DRFLAC_INLINE drflac_uint32 drflac__swap_endian_uint32(drflac_uint32 n) #if defined(_MSC_VER) && !defined(__clang__) return _byteswap_ulong(n); #elif defined(__GNUC__) || defined(__clang__) - #if defined(DRFLAC_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 6) && !defined(DRFLAC_64BIT) /* <-- 64-bit inline assembly has not been tested, so disabling for now. */ + #if defined(DRFLAC_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 6) && !defined(__ARM_ARCH_6M__) && !defined(DRFLAC_64BIT) /* <-- 64-bit inline assembly has not been tested, so disabling for now. */ /* Inline assembly optimized implementation for ARM. In my testing, GCC does not generate optimized code with __builtin_bswap32(). */ drflac_uint32 r; __asm__ __volatile__ ( @@ -2802,7 +2815,7 @@ static DRFLAC_INLINE drflac_uint32 drflac__clz_lzcnt(drflac_cache_t x) return r; } - #elif defined(DRFLAC_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 5) && !defined(DRFLAC_64BIT) /* <-- I haven't tested 64-bit inline assembly, so only enabling this for the 32-bit build for now. */ + #elif defined(DRFLAC_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 5) && !defined(__ARM_ARCH_6M__) && !defined(DRFLAC_64BIT) /* <-- I haven't tested 64-bit inline assembly, so only enabling this for the 32-bit build for now. */ { unsigned int r; __asm__ __volatile__ ( @@ -6479,7 +6492,7 @@ static drflac_bool32 drflac__read_and_decode_metadata(drflac_read_proc onRead, d for (;;) { drflac_metadata metadata; drflac_uint8 isLastBlock = 0; - drflac_uint8 blockType; + drflac_uint8 blockType = 0; drflac_uint32 blockSize; if (drflac__read_and_decode_block_header(onRead, pUserData, &isLastBlock, &blockType, &blockSize) == DRFLAC_FALSE) { return DRFLAC_FALSE; @@ -8141,6 +8154,7 @@ static drflac* drflac_open_with_metadata_private(drflac_read_proc onRead, drflac #include /* For wcslen(), wcsrtombs() */ #endif +/* Errno */ /* drflac_result_from_errno() is only used for fopen() and wfopen() so putting it inside DR_WAV_NO_STDIO for now. If something else needs this later we can move it out. */ #include static drflac_result drflac_result_from_errno(int e) @@ -8544,7 +8558,9 @@ static drflac_result drflac_result_from_errno(int e) default: return DRFLAC_ERROR; } } +/* End Errno */ +/* fopen */ static drflac_result drflac_fopen(FILE** ppFile, const char* pFilePath, const char* pOpenMode) { #if defined(_MSC_VER) && _MSC_VER >= 1400 @@ -8702,6 +8718,7 @@ static drflac_result drflac_wfopen(FILE** ppFile, const wchar_t* pFilePath, cons return DRFLAC_SUCCESS; } #endif +/* End fopen */ static size_t drflac__on_read_stdio(void* pUserData, void* bufferOut, size_t bytesToRead) { @@ -11666,6 +11683,7 @@ DRFLAC_API drflac_bool32 drflac_seek_to_pcm_frame(drflac* pFlac, drflac_uint64 p /* High Level APIs */ +/* SIZE_MAX */ #if defined(SIZE_MAX) #define DRFLAC_SIZE_MAX SIZE_MAX #else @@ -11675,6 +11693,7 @@ DRFLAC_API drflac_bool32 drflac_seek_to_pcm_frame(drflac* pFlac, drflac_uint64 p #define DRFLAC_SIZE_MAX 0xFFFFFFFF #endif #endif +/* End SIZE_MAX */ /* Using a macro as the definition of the drflac__full_decode_and_close_*() API family. Sue me. */ @@ -12058,6 +12077,16 @@ DRFLAC_API drflac_bool32 drflac_next_cuesheet_track(drflac_cuesheet_track_iterat /* REVISION HISTORY ================ +v0.12.42 - 2023-11-02 + - Fix build for ARMv6-M. + - Fix a compilation warning with GCC. + +v0.12.41 - 2023-06-17 + - Fix an incorrect date in revision history. No functional change. + +v0.12.40 - 2023-05-22 + - Minor code restructure. No functional change. + v0.12.39 - 2022-09-17 - Fix compilation with DJGPP. - Fix compilation error with Visual Studio 2019 and the ARM build. @@ -12488,7 +12517,7 @@ For more information, please refer to =============================================================================== ALTERNATIVE 2 - MIT No Attribution =============================================================================== -Copyright 2020 David Reid +Copyright 2023 David Reid Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/raylib/external/dr_mp3.h b/raylib/external/dr_mp3.h index 59876c8..84849ee 100644 --- a/raylib/external/dr_mp3.h +++ b/raylib/external/dr_mp3.h @@ -1,6 +1,6 @@ /* MP3 audio decoder. Choice of public domain or MIT-0. See license statements at the end of this file. -dr_mp3 - v0.6.34 - 2022-09-17 +dr_mp3 - v0.6.38 - 2023-11-02 David Reid - mackron@gmail.com @@ -95,12 +95,12 @@ extern "C" { #define DRMP3_VERSION_MAJOR 0 #define DRMP3_VERSION_MINOR 6 -#define DRMP3_VERSION_REVISION 34 +#define DRMP3_VERSION_REVISION 38 #define DRMP3_VERSION_STRING DRMP3_XSTRINGIFY(DRMP3_VERSION_MAJOR) "." DRMP3_XSTRINGIFY(DRMP3_VERSION_MINOR) "." DRMP3_XSTRINGIFY(DRMP3_VERSION_REVISION) #include /* For size_t. */ -/* Sized types. */ +/* Sized Types */ typedef signed char drmp3_int8; typedef unsigned char drmp3_uint8; typedef signed short drmp3_int16; @@ -133,7 +133,9 @@ typedef drmp3_uint8 drmp3_bool8; typedef drmp3_uint32 drmp3_bool32; #define DRMP3_TRUE 1 #define DRMP3_FALSE 0 +/* End Sized Types */ +/* Decorations */ #if !defined(DRMP3_API) #if defined(DRMP3_DLL) #if defined(_WIN32) @@ -163,7 +165,9 @@ typedef drmp3_uint32 drmp3_bool32; #define DRMP3_PRIVATE static #endif #endif +/* End Decorations */ +/* Result Codes */ typedef drmp3_int32 drmp3_result; #define DRMP3_SUCCESS 0 #define DRMP3_ERROR -1 /* A generic error. */ @@ -219,11 +223,12 @@ typedef drmp3_int32 drmp3_result; #define DRMP3_CANCELLED -51 #define DRMP3_MEMORY_ALREADY_MAPPED -52 #define DRMP3_AT_END -53 - +/* End Result Codes */ #define DRMP3_MAX_PCM_FRAMES_PER_MP3_FRAME 1152 #define DRMP3_MAX_SAMPLES_PER_FRAME (DRMP3_MAX_PCM_FRAMES_PER_MP3_FRAME*2) +/* Inline */ #ifdef _MSC_VER #define DRMP3_INLINE __forceinline #elif defined(__GNUC__) @@ -250,12 +255,24 @@ typedef drmp3_int32 drmp3_result; #else #define DRMP3_INLINE #endif +/* End Inline */ DRMP3_API void drmp3_version(drmp3_uint32* pMajor, drmp3_uint32* pMinor, drmp3_uint32* pRevision); DRMP3_API const char* drmp3_version_string(void); +/* Allocation Callbacks */ +typedef struct +{ + void* pUserData; + void* (* onMalloc)(size_t sz, void* pUserData); + void* (* onRealloc)(void* p, size_t sz, void* pUserData); + void (* onFree)(void* p, void* pUserData); +} drmp3_allocation_callbacks; +/* End Allocation Callbacks */ + + /* Low Level Push API ================== @@ -329,14 +346,6 @@ will be either drmp3_seek_origin_start or drmp3_seek_origin_current. */ typedef drmp3_bool32 (* drmp3_seek_proc)(void* pUserData, int offset, drmp3_seek_origin origin); -typedef struct -{ - void* pUserData; - void* (* onMalloc)(size_t sz, void* pUserData); - void* (* onRealloc)(void* p, size_t sz, void* pUserData); - void (* onFree)(void* p, void* pUserData); -} drmp3_allocation_callbacks; - typedef struct { drmp3_uint32 channels; @@ -704,7 +713,7 @@ static int drmp3_have_simd(void) #endif -#if defined(__ARM_ARCH) && (__ARM_ARCH >= 6) && !defined(__aarch64__) && !defined(_M_ARM64) +#if defined(__ARM_ARCH) && (__ARM_ARCH >= 6) && !defined(__aarch64__) && !defined(_M_ARM64) && !defined(__ARM_ARCH_6M__) #define DRMP3_HAVE_ARMV6 1 static __inline__ __attribute__((always_inline)) drmp3_int32 drmp3_clip_int16_arm(drmp3_int32 a) { @@ -2415,6 +2424,7 @@ DRMP3_API void drmp3dec_f32_to_s16(const float *in, drmp3_int16 *out, size_t num Main Public API ************************************************************************************************************************************************************/ +/* SIZE_MAX */ #if defined(SIZE_MAX) #define DRMP3_SIZE_MAX SIZE_MAX #else @@ -2424,6 +2434,7 @@ DRMP3_API void drmp3dec_f32_to_s16(const float *in, drmp3_int16 *out, size_t num #define DRMP3_SIZE_MAX 0xFFFFFFFF #endif #endif +/* End SIZE_MAX */ /* Options. */ #ifndef DRMP3_SEEK_LEADING_MP3_FRAMES @@ -2690,6 +2701,11 @@ static drmp3_uint32 drmp3_decode_next_frame_ex__callbacks(drmp3* pMP3, drmp3d_sa DRMP3_ASSERT(pMP3->pData != NULL); DRMP3_ASSERT(pMP3->dataCapacity > 0); + /* Do a runtime check here to try silencing a false-positive from clang-analyzer. */ + if (pMP3->pData == NULL) { + return 0; + } + pcmFramesRead = drmp3dec_decode_frame(&pMP3->decoder, pMP3->pData + pMP3->dataConsumed, (int)pMP3->dataSize, pPCMFrames, &info); /* <-- Safe size_t -> int conversion thanks to the check above. */ /* Consume the data. */ @@ -2931,6 +2947,7 @@ DRMP3_API drmp3_bool32 drmp3_init_memory(drmp3* pMP3, const void* pData, size_t #include #include /* For wcslen(), wcsrtombs() */ +/* Errno */ /* drmp3_result_from_errno() is only used inside DR_MP3_NO_STDIO for now. Move this out if it's ever used elsewhere. */ #include static drmp3_result drmp3_result_from_errno(int e) @@ -3334,7 +3351,9 @@ static drmp3_result drmp3_result_from_errno(int e) default: return DRMP3_ERROR; } } +/* End Errno */ +/* fopen */ static drmp3_result drmp3_fopen(FILE** ppFile, const char* pFilePath, const char* pOpenMode) { #if defined(_MSC_VER) && _MSC_VER >= 1400 @@ -3490,7 +3509,7 @@ static drmp3_result drmp3_wfopen(FILE** ppFile, const wchar_t* pFilePath, const return DRMP3_SUCCESS; } - +/* End fopen */ static size_t drmp3__on_read_stdio(void* pUserData, void* pBufferOut, size_t bytesToRead) @@ -4476,6 +4495,18 @@ counts rather than sample counts. /* REVISION HISTORY ================ +v0.6.38 - 2023-11-02 + - Fix build for ARMv6-M. + +v0.6.37 - 2023-07-07 + - Silence a static analysis warning. + +v0.6.36 - 2023-06-17 + - Fix an incorrect date in revision history. No functional change. + +v0.6.35 - 2023-05-22 + - Minor code restructure. No functional change. + v0.6.34 - 2022-09-17 - Fix compilation with DJGPP. - Fix compilation when compiling with x86 with no SSE2. @@ -4777,7 +4808,7 @@ For more information, please refer to =============================================================================== ALTERNATIVE 2 - MIT No Attribution =============================================================================== -Copyright 2020 David Reid +Copyright 2023 David Reid Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/raylib/external/dr_wav.h b/raylib/external/dr_wav.h index 2f885a0..36451b5 100644 --- a/raylib/external/dr_wav.h +++ b/raylib/external/dr_wav.h @@ -1,6 +1,6 @@ /* WAV audio loader and writer. Choice of public domain or MIT-0. See license statements at the end of this file. -dr_wav - v0.13.7 - 2022-09-17 +dr_wav - v0.13.13 - 2023-11-02 David Reid - mackron@gmail.com @@ -79,7 +79,9 @@ dr_wav can also be used to output WAV files. This does not currently support com drwav_uint64 framesWritten = drwav_write_pcm_frames(pWav, frameCount, pSamples); ``` -dr_wav has seamless support the Sony Wave64 format. The decoder will automatically detect it and it should Just Work without any manual intervention. +Note that writing to AIFF or RIFX is not supported. + +dr_wav has support for decoding from a number of different encapsulation formats. See below for details. Build Options @@ -96,23 +98,40 @@ Build Options Disables all functions ending with `_w`. Use this if your compiler does not provide wchar.h. Not required if DR_WAV_NO_STDIO is also defined. +Supported Encapsulations +======================== +- RIFF (Regular WAV) +- RIFX (Big-Endian) +- AIFF (Does not currently support ADPCM) +- RF64 +- W64 + +Note that AIFF and RIFX do not support write mode, nor do they support reading of metadata. + + +Supported Encodings +=================== +- Unsigned 8-bit PCM +- Signed 12-bit PCM +- Signed 16-bit PCM +- Signed 24-bit PCM +- Signed 32-bit PCM +- IEEE 32-bit floating point +- IEEE 64-bit floating point +- A-law and u-law +- Microsoft ADPCM +- IMA ADPCM (DVI, format code 0x11) + +8-bit PCM encodings are always assumed to be unsigned. Signed 8-bit encoding can only be read with `drwav_read_raw()`. + +Note that ADPCM is not currently supported with AIFF. Contributions welcome. + Notes ===== - Samples are always interleaved. - The default read function does not do any data conversion. Use `drwav_read_pcm_frames_f32()`, `drwav_read_pcm_frames_s32()` and `drwav_read_pcm_frames_s16()` - to read and convert audio data to 32-bit floating point, signed 32-bit integer and signed 16-bit integer samples respectively. Tested and supported internal - formats include the following: - - Unsigned 8-bit PCM - - Signed 12-bit PCM - - Signed 16-bit PCM - - Signed 24-bit PCM - - Signed 32-bit PCM - - IEEE 32-bit floating point - - IEEE 64-bit floating point - - A-law and u-law - - Microsoft ADPCM - - IMA ADPCM (DVI, format code 0x11) + to read and convert audio data to 32-bit floating point, signed 32-bit integer and signed 16-bit integer samples respectively. - dr_wav will try to read the WAV file as best it can, even if it's not strictly conformant to the WAV format. */ @@ -128,12 +147,12 @@ extern "C" { #define DRWAV_VERSION_MAJOR 0 #define DRWAV_VERSION_MINOR 13 -#define DRWAV_VERSION_REVISION 7 +#define DRWAV_VERSION_REVISION 13 #define DRWAV_VERSION_STRING DRWAV_XSTRINGIFY(DRWAV_VERSION_MAJOR) "." DRWAV_XSTRINGIFY(DRWAV_VERSION_MINOR) "." DRWAV_XSTRINGIFY(DRWAV_VERSION_REVISION) #include /* For size_t. */ -/* Sized types. */ +/* Sized Types */ typedef signed char drwav_int8; typedef unsigned char drwav_uint8; typedef signed short drwav_int16; @@ -166,7 +185,9 @@ typedef drwav_uint8 drwav_bool8; typedef drwav_uint32 drwav_bool32; #define DRWAV_TRUE 1 #define DRWAV_FALSE 0 +/* End Sized Types */ +/* Decorations */ #if !defined(DRWAV_API) #if defined(DRWAV_DLL) #if defined(_WIN32) @@ -196,7 +217,9 @@ typedef drwav_uint32 drwav_bool32; #define DRWAV_PRIVATE static #endif #endif +/* End Decorations */ +/* Result Codes */ typedef drwav_int32 drwav_result; #define DRWAV_SUCCESS 0 #define DRWAV_ERROR -1 /* A generic error. */ @@ -252,6 +275,7 @@ typedef drwav_int32 drwav_result; #define DRWAV_CANCELLED -51 #define DRWAV_MEMORY_ALREADY_MAPPED -52 #define DRWAV_AT_END -53 +/* End Result Codes */ /* Common data formats. */ #define DR_WAVE_FORMAT_PCM 0x1 @@ -264,10 +288,21 @@ typedef drwav_int32 drwav_result; /* Flags to pass into drwav_init_ex(), etc. */ #define DRWAV_SEQUENTIAL 0x00000001 +#define DRWAV_WITH_METADATA 0x00000002 DRWAV_API void drwav_version(drwav_uint32* pMajor, drwav_uint32* pMinor, drwav_uint32* pRevision); DRWAV_API const char* drwav_version_string(void); +/* Allocation Callbacks */ +typedef struct +{ + void* pUserData; + void* (* onMalloc)(size_t sz, void* pUserData); + void* (* onRealloc)(void* p, size_t sz, void* pUserData); + void (* onFree)(void* p, void* pUserData); +} drwav_allocation_callbacks; +/* End Allocation Callbacks */ + typedef enum { drwav_seek_origin_start, @@ -277,8 +312,10 @@ typedef enum typedef enum { drwav_container_riff, + drwav_container_rifx, drwav_container_w64, - drwav_container_rf64 + drwav_container_rf64, + drwav_container_aiff } drwav_container; typedef struct @@ -409,13 +446,6 @@ The read pointer will be sitting on the first byte after the chunk's header. You */ typedef drwav_uint64 (* drwav_chunk_proc)(void* pChunkUserData, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pReadSeekUserData, const drwav_chunk_header* pChunkHeader, drwav_container container, const drwav_fmt* pFMT); -typedef struct -{ - void* pUserData; - void* (* onMalloc)(size_t sz, void* pUserData); - void* (* onRealloc)(void* p, size_t sz, void* pUserData); - void (* onFree)(void* p, void* pUserData); -} drwav_allocation_callbacks; /* Structure for internal use. Only used for loaders opened with drwav_init_memory(). */ typedef struct @@ -869,9 +899,6 @@ typedef struct drwav_bool32 isSequentialWrite; - /* A bit-field of drwav_metadata_type values, only bits set in this variable are parsed and saved */ - drwav_metadata_type allowedMetadataTypes; - /* A array of metadata. This is valid after the *init_with_metadata call returns. It will be valid until drwav_uninit() is called. You can take ownership of this data with drwav_take_ownership_of_metadata(). */ drwav_metadata* pMetadata; drwav_uint32 metadataCount; @@ -902,6 +929,13 @@ typedef struct drwav_int32 cachedFrames[16]; /* Samples are stored in this cache during decoding. */ drwav_uint32 cachedFrameCount; } ima; + + /* AIFF specific data. */ + struct + { + drwav_bool8 isLE; /* Will be set to true if the audio data is little-endian encoded. */ + drwav_bool8 isUnsigned; /* Only used for 8-bit samples. When set to true, will be treated as unsigned. */ + } aiff; } drwav; @@ -1347,9 +1381,9 @@ DRWAV_API drwav_bool32 drwav_fourcc_equal(const drwav_uint8* a, const char* b); #define drwav_clamp(x, lo, hi) (drwav_max((lo), drwav_min((hi), (x)))) #define drwav_offset_ptr(p, offset) (((drwav_uint8*)(p)) + (offset)) -#define DRWAV_MAX_SIMD_VECTOR_SIZE 64 /* 64 for AVX-512 in the future. */ +#define DRWAV_MAX_SIMD_VECTOR_SIZE 32 -/* CPU architecture. */ +/* Architecture Detection */ #if defined(__x86_64__) || defined(_M_X64) #define DRWAV_X64 #elif defined(__i386) || defined(_M_IX86) @@ -1357,7 +1391,9 @@ DRWAV_API drwav_bool32 drwav_fourcc_equal(const drwav_uint8* a, const char* b); #elif defined(__arm__) || defined(_M_ARM) #define DRWAV_ARM #endif +/* End Architecture Detection */ +/* Inline */ #ifdef _MSC_VER #define DRWAV_INLINE __forceinline #elif defined(__GNUC__) @@ -1384,7 +1420,9 @@ DRWAV_API drwav_bool32 drwav_fourcc_equal(const drwav_uint8* a, const char* b); #else #define DRWAV_INLINE #endif +/* End Inline */ +/* SIZE_MAX */ #if defined(SIZE_MAX) #define DRWAV_SIZE_MAX SIZE_MAX #else @@ -1394,6 +1432,11 @@ DRWAV_API drwav_bool32 drwav_fourcc_equal(const drwav_uint8* a, const char* b); #define DRWAV_SIZE_MAX 0xFFFFFFFF #endif #endif +/* End SIZE_MAX */ + +/* Weird bit manipulation is for C89 compatibility (no direct support for 64-bit integers). */ +#define DRWAV_INT64_MIN ((drwav_int64) ((drwav_uint64)0x80000000 << 32)) +#define DRWAV_INT64_MAX ((drwav_int64)(((drwav_uint64)0x7FFFFFFF << 32) | 0xFFFFFFFF)) #if defined(_MSC_VER) && _MSC_VER >= 1400 #define DRWAV_HAS_BYTESWAP16_INTRINSIC @@ -1603,6 +1646,20 @@ static DRWAV_INLINE void drwav__bswap_samples_s32(drwav_int32* pSamples, drwav_u } +static DRWAV_INLINE drwav_int64 drwav__bswap_s64(drwav_int64 n) +{ + return (drwav_int64)drwav__bswap64((drwav_uint64)n); +} + +static DRWAV_INLINE void drwav__bswap_samples_s64(drwav_int64* pSamples, drwav_uint64 sampleCount) +{ + drwav_uint64 iSample; + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamples[iSample] = drwav__bswap_s64(pSamples[iSample]); + } +} + + static DRWAV_INLINE float drwav__bswap_f32(float n) { union { @@ -1624,73 +1681,29 @@ static DRWAV_INLINE void drwav__bswap_samples_f32(float* pSamples, drwav_uint64 } -static DRWAV_INLINE double drwav__bswap_f64(double n) +static DRWAV_INLINE void drwav__bswap_samples(void* pSamples, drwav_uint64 sampleCount, drwav_uint32 bytesPerSample) { - union { - drwav_uint64 i; - double f; - } x; - x.f = n; - x.i = drwav__bswap64(x.i); - - return x.f; -} - -static DRWAV_INLINE void drwav__bswap_samples_f64(double* pSamples, drwav_uint64 sampleCount) -{ - drwav_uint64 iSample; - for (iSample = 0; iSample < sampleCount; iSample += 1) { - pSamples[iSample] = drwav__bswap_f64(pSamples[iSample]); - } -} - - -static DRWAV_INLINE void drwav__bswap_samples_pcm(void* pSamples, drwav_uint64 sampleCount, drwav_uint32 bytesPerSample) -{ - /* Assumes integer PCM. Floating point PCM is done in drwav__bswap_samples_ieee(). */ switch (bytesPerSample) { - case 1: /* u8 */ + case 1: { - /* no-op. */ + /* No-op. */ } break; - case 2: /* s16, s12 (loosely packed) */ + case 2: { drwav__bswap_samples_s16((drwav_int16*)pSamples, sampleCount); } break; - case 3: /* s24 */ + case 3: { drwav__bswap_samples_s24((drwav_uint8*)pSamples, sampleCount); } break; - case 4: /* s32 */ + case 4: { drwav__bswap_samples_s32((drwav_int32*)pSamples, sampleCount); } break; - default: + case 8: { - /* Unsupported format. */ - DRWAV_ASSERT(DRWAV_FALSE); - } break; - } -} - -static DRWAV_INLINE void drwav__bswap_samples_ieee(void* pSamples, drwav_uint64 sampleCount, drwav_uint32 bytesPerSample) -{ - switch (bytesPerSample) - { - #if 0 /* Contributions welcome for f16 support. */ - case 2: /* f16 */ - { - drwav__bswap_samples_f16((drwav_float16*)pSamples, sampleCount); - } break; - #endif - case 4: /* f32 */ - { - drwav__bswap_samples_f32((float*)pSamples, sampleCount); - } break; - case 8: /* f64 */ - { - drwav__bswap_samples_f64((double*)pSamples, sampleCount); + drwav__bswap_samples_s64((drwav_int64*)pSamples, sampleCount); } break; default: { @@ -1700,33 +1713,91 @@ static DRWAV_INLINE void drwav__bswap_samples_ieee(void* pSamples, drwav_uint64 } } -static DRWAV_INLINE void drwav__bswap_samples(void* pSamples, drwav_uint64 sampleCount, drwav_uint32 bytesPerSample, drwav_uint16 format) + + +DRWAV_PRIVATE DRWAV_INLINE drwav_bool32 drwav_is_container_be(drwav_container container) { - switch (format) - { - case DR_WAVE_FORMAT_PCM: - { - drwav__bswap_samples_pcm(pSamples, sampleCount, bytesPerSample); - } break; + if (container == drwav_container_rifx || container == drwav_container_aiff) { + return DRWAV_TRUE; + } else { + return DRWAV_FALSE; + } +} - case DR_WAVE_FORMAT_IEEE_FLOAT: - { - drwav__bswap_samples_ieee(pSamples, sampleCount, bytesPerSample); - } break; - case DR_WAVE_FORMAT_ALAW: - case DR_WAVE_FORMAT_MULAW: - { - drwav__bswap_samples_s16((drwav_int16*)pSamples, sampleCount); - } break; +DRWAV_PRIVATE DRWAV_INLINE drwav_uint16 drwav_bytes_to_u16_le(const drwav_uint8* data) +{ + return ((drwav_uint16)data[0] << 0) | ((drwav_uint16)data[1] << 8); +} - case DR_WAVE_FORMAT_ADPCM: - case DR_WAVE_FORMAT_DVI_ADPCM: - default: - { - /* Unsupported format. */ - DRWAV_ASSERT(DRWAV_FALSE); - } break; +DRWAV_PRIVATE DRWAV_INLINE drwav_uint16 drwav_bytes_to_u16_be(const drwav_uint8* data) +{ + return ((drwav_uint16)data[1] << 0) | ((drwav_uint16)data[0] << 8); +} + +DRWAV_PRIVATE DRWAV_INLINE drwav_uint16 drwav_bytes_to_u16_ex(const drwav_uint8* data, drwav_container container) +{ + if (drwav_is_container_be(container)) { + return drwav_bytes_to_u16_be(data); + } else { + return drwav_bytes_to_u16_le(data); + } +} + + +DRWAV_PRIVATE DRWAV_INLINE drwav_uint32 drwav_bytes_to_u32_le(const drwav_uint8* data) +{ + return ((drwav_uint32)data[0] << 0) | ((drwav_uint32)data[1] << 8) | ((drwav_uint32)data[2] << 16) | ((drwav_uint32)data[3] << 24); +} + +DRWAV_PRIVATE DRWAV_INLINE drwav_uint32 drwav_bytes_to_u32_be(const drwav_uint8* data) +{ + return ((drwav_uint32)data[3] << 0) | ((drwav_uint32)data[2] << 8) | ((drwav_uint32)data[1] << 16) | ((drwav_uint32)data[0] << 24); +} + +DRWAV_PRIVATE DRWAV_INLINE drwav_uint32 drwav_bytes_to_u32_ex(const drwav_uint8* data, drwav_container container) +{ + if (drwav_is_container_be(container)) { + return drwav_bytes_to_u32_be(data); + } else { + return drwav_bytes_to_u32_le(data); + } +} + + + +DRWAV_PRIVATE drwav_int64 drwav_aiff_extented_to_s64(const drwav_uint8* data) +{ + drwav_uint32 exponent = ((drwav_uint32)data[0] << 8) | data[1]; + drwav_uint64 hi = ((drwav_uint64)data[2] << 24) | ((drwav_uint64)data[3] << 16) | ((drwav_uint64)data[4] << 8) | ((drwav_uint64)data[5] << 0); + drwav_uint64 lo = ((drwav_uint64)data[6] << 24) | ((drwav_uint64)data[7] << 16) | ((drwav_uint64)data[8] << 8) | ((drwav_uint64)data[9] << 0); + drwav_uint64 significand = (hi << 32) | lo; + int sign = exponent >> 15; + + /* Remove sign bit. */ + exponent &= 0x7FFF; + + /* Special cases. */ + if (exponent == 0 && significand == 0) { + return 0; + } else if (exponent == 0x7FFF) { + return sign ? DRWAV_INT64_MIN : DRWAV_INT64_MAX; /* Infinite. */ + } + + exponent -= 16383; + + if (exponent > 63) { + return sign ? DRWAV_INT64_MIN : DRWAV_INT64_MAX; /* Too big for a 64-bit integer. */ + } else if (exponent < 1) { + return 0; /* Number is less than 1, so rounds down to 0. */ + } + + significand >>= (63 - exponent); + + if (sign) { + return -(drwav_int64)significand; + } else { + return (drwav_int64)significand; } } @@ -1850,7 +1921,7 @@ DRWAV_PRIVATE drwav_bool32 drwav_init_write__internal(drwav* pWav, const drwav_d DRWAV_PRIVATE drwav_result drwav__read_chunk_header(drwav_read_proc onRead, void* pUserData, drwav_container container, drwav_uint64* pRunningBytesReadOut, drwav_chunk_header* pHeaderOut) { - if (container == drwav_container_riff || container == drwav_container_rf64) { + if (container == drwav_container_riff || container == drwav_container_rifx || container == drwav_container_rf64 || container == drwav_container_aiff) { drwav_uint8 sizeInBytes[4]; if (onRead(pUserData, pHeaderOut->id.fourcc, 4) != 4) { @@ -1861,10 +1932,11 @@ DRWAV_PRIVATE drwav_result drwav__read_chunk_header(drwav_read_proc onRead, void return DRWAV_INVALID_FILE; } - pHeaderOut->sizeInBytes = drwav_bytes_to_u32(sizeInBytes); + pHeaderOut->sizeInBytes = drwav_bytes_to_u32_ex(sizeInBytes, container); pHeaderOut->paddingSize = drwav__chunk_padding_size_riff(pHeaderOut->sizeInBytes); + *pRunningBytesReadOut += 8; - } else { + } else if (container == drwav_container_w64) { drwav_uint8 sizeInBytes[8]; if (onRead(pUserData, pHeaderOut->id.guid, 16) != 16) { @@ -1878,6 +1950,8 @@ DRWAV_PRIVATE drwav_result drwav__read_chunk_header(drwav_read_proc onRead, void pHeaderOut->sizeInBytes = drwav_bytes_to_u64(sizeInBytes) - 24; /* <-- Subtract 24 because w64 includes the size of the header. */ pHeaderOut->paddingSize = drwav__chunk_padding_size_w64(pHeaderOut->sizeInBytes); *pRunningBytesReadOut += 24; + } else { + return DRWAV_INVALID_FILE; } return DRWAV_SUCCESS; @@ -1931,115 +2005,6 @@ DRWAV_PRIVATE drwav_bool32 drwav__seek_from_start(drwav_seek_proc onSeek, drwav_ } -DRWAV_PRIVATE drwav_bool32 drwav__read_fmt(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, drwav_container container, drwav_uint64* pRunningBytesReadOut, drwav_fmt* fmtOut) -{ - drwav_chunk_header header; - drwav_uint8 fmt[16]; - - if (drwav__read_chunk_header(onRead, pUserData, container, pRunningBytesReadOut, &header) != DRWAV_SUCCESS) { - return DRWAV_FALSE; - } - - - /* Skip non-fmt chunks. */ - while (((container == drwav_container_riff || container == drwav_container_rf64) && !drwav_fourcc_equal(header.id.fourcc, "fmt ")) || (container == drwav_container_w64 && !drwav_guid_equal(header.id.guid, drwavGUID_W64_FMT))) { - if (!drwav__seek_forward(onSeek, header.sizeInBytes + header.paddingSize, pUserData)) { - return DRWAV_FALSE; - } - *pRunningBytesReadOut += header.sizeInBytes + header.paddingSize; - - /* Try the next header. */ - if (drwav__read_chunk_header(onRead, pUserData, container, pRunningBytesReadOut, &header) != DRWAV_SUCCESS) { - return DRWAV_FALSE; - } - } - - - /* Validation. */ - if (container == drwav_container_riff || container == drwav_container_rf64) { - if (!drwav_fourcc_equal(header.id.fourcc, "fmt ")) { - return DRWAV_FALSE; - } - } else { - if (!drwav_guid_equal(header.id.guid, drwavGUID_W64_FMT)) { - return DRWAV_FALSE; - } - } - - - if (onRead(pUserData, fmt, sizeof(fmt)) != sizeof(fmt)) { - return DRWAV_FALSE; - } - *pRunningBytesReadOut += sizeof(fmt); - - fmtOut->formatTag = drwav_bytes_to_u16(fmt + 0); - fmtOut->channels = drwav_bytes_to_u16(fmt + 2); - fmtOut->sampleRate = drwav_bytes_to_u32(fmt + 4); - fmtOut->avgBytesPerSec = drwav_bytes_to_u32(fmt + 8); - fmtOut->blockAlign = drwav_bytes_to_u16(fmt + 12); - fmtOut->bitsPerSample = drwav_bytes_to_u16(fmt + 14); - - fmtOut->extendedSize = 0; - fmtOut->validBitsPerSample = 0; - fmtOut->channelMask = 0; - DRWAV_ZERO_MEMORY(fmtOut->subFormat, sizeof(fmtOut->subFormat)); - - if (header.sizeInBytes > 16) { - drwav_uint8 fmt_cbSize[2]; - int bytesReadSoFar = 0; - - if (onRead(pUserData, fmt_cbSize, sizeof(fmt_cbSize)) != sizeof(fmt_cbSize)) { - return DRWAV_FALSE; /* Expecting more data. */ - } - *pRunningBytesReadOut += sizeof(fmt_cbSize); - - bytesReadSoFar = 18; - - fmtOut->extendedSize = drwav_bytes_to_u16(fmt_cbSize); - if (fmtOut->extendedSize > 0) { - /* Simple validation. */ - if (fmtOut->formatTag == DR_WAVE_FORMAT_EXTENSIBLE) { - if (fmtOut->extendedSize != 22) { - return DRWAV_FALSE; - } - } - - if (fmtOut->formatTag == DR_WAVE_FORMAT_EXTENSIBLE) { - drwav_uint8 fmtext[22]; - if (onRead(pUserData, fmtext, fmtOut->extendedSize) != fmtOut->extendedSize) { - return DRWAV_FALSE; /* Expecting more data. */ - } - - fmtOut->validBitsPerSample = drwav_bytes_to_u16(fmtext + 0); - fmtOut->channelMask = drwav_bytes_to_u32(fmtext + 2); - drwav_bytes_to_guid(fmtext + 6, fmtOut->subFormat); - } else { - if (!onSeek(pUserData, fmtOut->extendedSize, drwav_seek_origin_current)) { - return DRWAV_FALSE; - } - } - *pRunningBytesReadOut += fmtOut->extendedSize; - - bytesReadSoFar += fmtOut->extendedSize; - } - - /* Seek past any leftover bytes. For w64 the leftover will be defined based on the chunk size. */ - if (!onSeek(pUserData, (int)(header.sizeInBytes - bytesReadSoFar), drwav_seek_origin_current)) { - return DRWAV_FALSE; - } - *pRunningBytesReadOut += (header.sizeInBytes - bytesReadSoFar); - } - - if (header.paddingSize > 0) { - if (!onSeek(pUserData, header.paddingSize, drwav_seek_origin_current)) { - return DRWAV_FALSE; - } - *pRunningBytesReadOut += header.paddingSize; - } - - return DRWAV_TRUE; -} - DRWAV_PRIVATE size_t drwav__on_read(drwav_read_proc onRead, void* pUserData, void* pBufferOut, size_t bytesToRead, drwav_uint64* pCursor) { @@ -2164,7 +2129,7 @@ DRWAV_PRIVATE drwav_result drwav__metadata_alloc(drwav__metadata_parser* pParser /* We don't need to worry about specifying an alignment here because malloc always returns something - of suitable alignment. This also means than pParser->pMetadata is all that we need to store in order + of suitable alignment. This also means pParser->pMetadata is all that we need to store in order for us to free when we are done. */ pParser->pMetadata = (drwav_metadata*)drwav__metadata_get_memory(pParser, sizeof(drwav_metadata) * pParser->metadataCount, 1); @@ -2187,12 +2152,18 @@ DRWAV_PRIVATE drwav_uint64 drwav__read_smpl_to_metadata_obj(drwav__metadata_pars { drwav_uint8 smplHeaderData[DRWAV_SMPL_BYTES]; drwav_uint64 totalBytesRead = 0; - size_t bytesJustRead = drwav__metadata_parser_read(pParser, smplHeaderData, sizeof(smplHeaderData), &totalBytesRead); + size_t bytesJustRead; + + if (pMetadata == NULL) { + return 0; + } + + bytesJustRead = drwav__metadata_parser_read(pParser, smplHeaderData, sizeof(smplHeaderData), &totalBytesRead); DRWAV_ASSERT(pParser->stage == drwav__metadata_parser_stage_read); DRWAV_ASSERT(pChunkHeader != NULL); - if (bytesJustRead == sizeof(smplHeaderData)) { + if (pMetadata != NULL && bytesJustRead == sizeof(smplHeaderData)) { drwav_uint32 iSampleLoop; pMetadata->type = drwav_metadata_type_smpl; @@ -2245,7 +2216,13 @@ DRWAV_PRIVATE drwav_uint64 drwav__read_cue_to_metadata_obj(drwav__metadata_parse { drwav_uint8 cueHeaderSectionData[DRWAV_CUE_BYTES]; drwav_uint64 totalBytesRead = 0; - size_t bytesJustRead = drwav__metadata_parser_read(pParser, cueHeaderSectionData, sizeof(cueHeaderSectionData), &totalBytesRead); + size_t bytesJustRead; + + if (pMetadata == NULL) { + return 0; + } + + bytesJustRead = drwav__metadata_parser_read(pParser, cueHeaderSectionData, sizeof(cueHeaderSectionData), &totalBytesRead); DRWAV_ASSERT(pParser->stage == drwav__metadata_parser_stage_read); @@ -2292,7 +2269,13 @@ DRWAV_PRIVATE drwav_uint64 drwav__read_cue_to_metadata_obj(drwav__metadata_parse DRWAV_PRIVATE drwav_uint64 drwav__read_inst_to_metadata_obj(drwav__metadata_parser* pParser, drwav_metadata* pMetadata) { drwav_uint8 instData[DRWAV_INST_BYTES]; - drwav_uint64 bytesRead = drwav__metadata_parser_read(pParser, instData, sizeof(instData), NULL); + drwav_uint64 bytesRead; + + if (pMetadata == NULL) { + return 0; + } + + bytesRead = drwav__metadata_parser_read(pParser, instData, sizeof(instData), NULL); DRWAV_ASSERT(pParser->stage == drwav__metadata_parser_stage_read); @@ -2313,7 +2296,13 @@ DRWAV_PRIVATE drwav_uint64 drwav__read_inst_to_metadata_obj(drwav__metadata_pars DRWAV_PRIVATE drwav_uint64 drwav__read_acid_to_metadata_obj(drwav__metadata_parser* pParser, drwav_metadata* pMetadata) { drwav_uint8 acidData[DRWAV_ACID_BYTES]; - drwav_uint64 bytesRead = drwav__metadata_parser_read(pParser, acidData, sizeof(acidData), NULL); + drwav_uint64 bytesRead; + + if (pMetadata == NULL) { + return 0; + } + + bytesRead = drwav__metadata_parser_read(pParser, acidData, sizeof(acidData), NULL); DRWAV_ASSERT(pParser->stage == drwav__metadata_parser_stage_read); @@ -2663,7 +2652,7 @@ DRWAV_PRIVATE drwav_uint64 drwav__metadata_process_unknown_chunk(drwav__metadata return 0; } - if (drwav_fourcc_equal(pChunkId, "data") || drwav_fourcc_equal(pChunkId, "fmt") || drwav_fourcc_equal(pChunkId, "fact")) { + if (drwav_fourcc_equal(pChunkId, "data") || drwav_fourcc_equal(pChunkId, "fmt ") || drwav_fourcc_equal(pChunkId, "fact")) { return 0; } @@ -3018,20 +3007,25 @@ DRWAV_PRIVATE drwav_bool32 drwav_preinit(drwav* pWav, drwav_read_proc onRead, dr DRWAV_PRIVATE drwav_bool32 drwav_init__internal(drwav* pWav, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags) { /* This function assumes drwav_preinit() has been called beforehand. */ - + drwav_result result; drwav_uint64 cursor; /* <-- Keeps track of the byte position so we can seek to specific locations. */ drwav_bool32 sequential; drwav_uint8 riff[4]; drwav_fmt fmt; unsigned short translatedFormatTag; - drwav_bool32 foundDataChunk; - drwav_uint64 dataChunkSize = 0; /* <-- Important! Don't explicitly set this to 0 anywhere else. Calculation of the size of the data chunk is performed in different paths depending on the container. */ + drwav_uint64 dataChunkSize = 0; /* <-- Important! Don't explicitly set this to 0 anywhere else. Calculation of the size of the data chunk is performed in different paths depending on the container. */ drwav_uint64 sampleCountFromFactChunk = 0; /* Same as dataChunkSize - make sure this is the only place this is initialized to 0. */ - drwav_uint64 chunkSize; + drwav_uint64 metadataStartPos; drwav__metadata_parser metadataParser; + drwav_bool8 isProcessingMetadata = DRWAV_FALSE; + drwav_bool8 foundChunk_fmt = DRWAV_FALSE; + drwav_bool8 foundChunk_data = DRWAV_FALSE; + drwav_bool8 isAIFCFormType = DRWAV_FALSE; /* Only used with AIFF. */ + drwav_uint64 aiffFrameCount = 0; cursor = 0; sequential = (flags & DRWAV_SEQUENTIAL) != 0; + DRWAV_ZERO_OBJECT(&fmt); /* The first 4 bytes should be the RIFF identifier. */ if (drwav__on_read(pWav->onRead, pWav->pUserData, riff, sizeof(riff), &cursor) != sizeof(riff)) { @@ -3044,6 +3038,8 @@ DRWAV_PRIVATE drwav_bool32 drwav_init__internal(drwav* pWav, drwav_chunk_proc on */ if (drwav_fourcc_equal(riff, "RIFF")) { pWav->container = drwav_container_riff; + } else if (drwav_fourcc_equal(riff, "RIFX")) { + pWav->container = drwav_container_rifx; } else if (drwav_fourcc_equal(riff, "riff")) { int i; drwav_uint8 riff2[12]; @@ -3062,28 +3058,31 @@ DRWAV_PRIVATE drwav_bool32 drwav_init__internal(drwav* pWav, drwav_chunk_proc on } } else if (drwav_fourcc_equal(riff, "RF64")) { pWav->container = drwav_container_rf64; + } else if (drwav_fourcc_equal(riff, "FORM")) { + pWav->container = drwav_container_aiff; } else { return DRWAV_FALSE; /* Unknown or unsupported container. */ } - if (pWav->container == drwav_container_riff || pWav->container == drwav_container_rf64) { + if (pWav->container == drwav_container_riff || pWav->container == drwav_container_rifx || pWav->container == drwav_container_rf64) { drwav_uint8 chunkSizeBytes[4]; drwav_uint8 wave[4]; - /* RIFF/WAVE */ if (drwav__on_read(pWav->onRead, pWav->pUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) { return DRWAV_FALSE; } - if (pWav->container == drwav_container_riff) { - if (drwav_bytes_to_u32(chunkSizeBytes) < 36) { + if (pWav->container == drwav_container_riff || pWav->container == drwav_container_rifx) { + if (drwav_bytes_to_u32_ex(chunkSizeBytes, pWav->container) < 36) { return DRWAV_FALSE; /* Chunk size should always be at least 36 bytes. */ } - } else { - if (drwav_bytes_to_u32(chunkSizeBytes) != 0xFFFFFFFF) { + } else if (pWav->container == drwav_container_rf64) { + if (drwav_bytes_to_u32_le(chunkSizeBytes) != 0xFFFFFFFF) { return DRWAV_FALSE; /* Chunk size should always be set to -1/0xFFFFFFFF for RF64. The actual size is retrieved later. */ } + } else { + return DRWAV_FALSE; /* Should never hit this. */ } if (drwav__on_read(pWav->onRead, pWav->pUserData, wave, sizeof(wave), &cursor) != sizeof(wave)) { @@ -3093,11 +3092,10 @@ DRWAV_PRIVATE drwav_bool32 drwav_init__internal(drwav* pWav, drwav_chunk_proc on if (!drwav_fourcc_equal(wave, "WAVE")) { return DRWAV_FALSE; /* Expecting "WAVE". */ } - } else { + } else if (pWav->container == drwav_container_w64) { drwav_uint8 chunkSizeBytes[8]; drwav_uint8 wave[16]; - /* W64 */ if (drwav__on_read(pWav->onRead, pWav->pUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) { return DRWAV_FALSE; } @@ -3113,6 +3111,31 @@ DRWAV_PRIVATE drwav_bool32 drwav_init__internal(drwav* pWav, drwav_chunk_proc on if (!drwav_guid_equal(wave, drwavGUID_W64_WAVE)) { return DRWAV_FALSE; } + } else if (pWav->container == drwav_container_aiff) { + drwav_uint8 chunkSizeBytes[4]; + drwav_uint8 aiff[4]; + + if (drwav__on_read(pWav->onRead, pWav->pUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) { + return DRWAV_FALSE; + } + + if (drwav_bytes_to_u32_be(chunkSizeBytes) < 18) { + return DRWAV_FALSE; + } + + if (drwav__on_read(pWav->onRead, pWav->pUserData, aiff, sizeof(aiff), &cursor) != sizeof(aiff)) { + return DRWAV_FALSE; + } + + if (drwav_fourcc_equal(aiff, "AIFF")) { + isAIFCFormType = DRWAV_FALSE; + } else if (drwav_fourcc_equal(aiff, "AIFC")) { + isAIFCFormType = DRWAV_TRUE; + } else { + return DRWAV_FALSE; /* Expecting "AIFF" or "AIFC". */ + } + } else { + return DRWAV_FALSE; } @@ -3121,7 +3144,7 @@ DRWAV_PRIVATE drwav_bool32 drwav_init__internal(drwav* pWav, drwav_chunk_proc on drwav_uint8 sizeBytes[8]; drwav_uint64 bytesRemainingInChunk; drwav_chunk_header header; - drwav_result result = drwav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header); + result = drwav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header); if (result != DRWAV_SUCCESS) { return DRWAV_FALSE; } @@ -3164,88 +3187,52 @@ DRWAV_PRIVATE drwav_bool32 drwav_init__internal(drwav* pWav, drwav_chunk_proc on } - /* The next bytes should be the "fmt " chunk. */ - if (!drwav__read_fmt(pWav->onRead, pWav->onSeek, pWav->pUserData, pWav->container, &cursor, &fmt)) { - return DRWAV_FALSE; /* Failed to read the "fmt " chunk. */ - } + metadataStartPos = cursor; - /* Basic validation. */ - if ((fmt.sampleRate == 0 || fmt.sampleRate > DRWAV_MAX_SAMPLE_RATE) || - (fmt.channels == 0 || fmt.channels > DRWAV_MAX_CHANNELS) || - (fmt.bitsPerSample == 0 || fmt.bitsPerSample > DRWAV_MAX_BITS_PER_SAMPLE) || - fmt.blockAlign == 0) { - return DRWAV_FALSE; /* Probably an invalid WAV file. */ - } + /* + Whether or not we are processing metadata controls how we load. We can load more efficiently when + metadata is not being processed, but we also cannot process metadata for Wave64 because I have not + been able to test it. If someone is able to test this and provide a patch I'm happy to enable it. + Seqential mode cannot support metadata because it involves seeking backwards. + */ + isProcessingMetadata = !sequential && ((flags & DRWAV_WITH_METADATA) != 0); - /* Translate the internal format. */ - translatedFormatTag = fmt.formatTag; - if (translatedFormatTag == DR_WAVE_FORMAT_EXTENSIBLE) { - translatedFormatTag = drwav_bytes_to_u16(fmt.subFormat + 0); + /* Don't allow processing of metadata with untested containers. */ + if (pWav->container != drwav_container_riff && pWav->container != drwav_container_rf64) { + isProcessingMetadata = DRWAV_FALSE; } DRWAV_ZERO_MEMORY(&metadataParser, sizeof(metadataParser)); - - /* Not tested on W64. */ - if (!sequential && pWav->allowedMetadataTypes != drwav_metadata_type_none && (pWav->container == drwav_container_riff || pWav->container == drwav_container_rf64)) { - drwav_uint64 cursorForMetadata = cursor; - + if (isProcessingMetadata) { metadataParser.onRead = pWav->onRead; metadataParser.onSeek = pWav->onSeek; metadataParser.pReadSeekUserData = pWav->pUserData; - metadataParser.stage = drwav__metadata_parser_stage_count; - - for (;;) { - drwav_result result; - drwav_uint64 bytesRead; - drwav_uint64 remainingBytes; - drwav_chunk_header header; - - result = drwav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursorForMetadata, &header); - if (result != DRWAV_SUCCESS) { - break; - } - - bytesRead = drwav__metadata_process_chunk(&metadataParser, &header, pWav->allowedMetadataTypes); - DRWAV_ASSERT(bytesRead <= header.sizeInBytes); - - remainingBytes = header.sizeInBytes - bytesRead + header.paddingSize; - if (!drwav__seek_forward(pWav->onSeek, remainingBytes, pWav->pUserData)) { - break; - } - cursorForMetadata += remainingBytes; - } - - if (!drwav__seek_from_start(pWav->onSeek, cursor, pWav->pUserData)) { - return DRWAV_FALSE; - } - - drwav__metadata_alloc(&metadataParser, &pWav->allocationCallbacks); - metadataParser.stage = drwav__metadata_parser_stage_read; + metadataParser.stage = drwav__metadata_parser_stage_count; } + /* - We need to enumerate over each chunk for two reasons: - 1) The "data" chunk may not be the next one - 2) We may want to report each chunk back to the client - - In order to correctly report each chunk back to the client we will need to keep looping until the end of the file. + From here on out, chunks might be in any order. In order to robustly handle metadata we'll need + to loop through every chunk and handle them as we find them. In sequential mode we need to get + out of the loop as soon as we find the data chunk because we won't be able to seek back. */ - foundDataChunk = DRWAV_FALSE; - - /* The next chunk we care about is the "data" chunk. This is not necessarily the next chunk so we'll need to loop. */ - for (;;) { + for (;;) { /* For each chunk... */ drwav_chunk_header header; - drwav_result result = drwav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header); + drwav_uint64 chunkSize; + + result = drwav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header); if (result != DRWAV_SUCCESS) { - if (!foundDataChunk) { - return DRWAV_FALSE; - } else { - break; /* Probably at the end of the file. Get out of the loop. */ - } + break; } - /* Tell the client about this chunk. */ + chunkSize = header.sizeInBytes; + + + /* + Always tell the caller about this chunk. We cannot do this in sequential mode because the + callback is allowed to read from the file, in which case we'll need to rewind. + */ if (!sequential && onChunk != NULL) { drwav_uint64 callbackBytesRead = onChunk(pChunkUserData, pWav->onRead, pWav->onSeek, pWav->pUserData, &header, pWav->container, &fmt); @@ -3254,108 +3241,358 @@ DRWAV_PRIVATE drwav_bool32 drwav_init__internal(drwav* pWav, drwav_chunk_proc on we called the callback. */ if (callbackBytesRead > 0) { - if (!drwav__seek_from_start(pWav->onSeek, cursor, pWav->pUserData)) { - return DRWAV_FALSE; - } - } - } - - if (!sequential && pWav->allowedMetadataTypes != drwav_metadata_type_none && (pWav->container == drwav_container_riff || pWav->container == drwav_container_rf64)) { - drwav_uint64 bytesRead = drwav__metadata_process_chunk(&metadataParser, &header, pWav->allowedMetadataTypes); - - if (bytesRead > 0) { - if (!drwav__seek_from_start(pWav->onSeek, cursor, pWav->pUserData)) { + if (drwav__seek_from_start(pWav->onSeek, cursor, pWav->pUserData) == DRWAV_FALSE) { return DRWAV_FALSE; } } } - if (!foundDataChunk) { - pWav->dataChunkDataPos = cursor; + /* Explicitly handle known chunks first. */ + + /* "fmt " */ + if (((pWav->container == drwav_container_riff || pWav->container == drwav_container_rifx || pWav->container == drwav_container_rf64) && drwav_fourcc_equal(header.id.fourcc, "fmt ")) || + ((pWav->container == drwav_container_w64) && drwav_guid_equal(header.id.guid, drwavGUID_W64_FMT))) { + drwav_uint8 fmtData[16]; + + foundChunk_fmt = DRWAV_TRUE; + + if (pWav->onRead(pWav->pUserData, fmtData, sizeof(fmtData)) != sizeof(fmtData)) { + return DRWAV_FALSE; + } + cursor += sizeof(fmtData); + + fmt.formatTag = drwav_bytes_to_u16_ex(fmtData + 0, pWav->container); + fmt.channels = drwav_bytes_to_u16_ex(fmtData + 2, pWav->container); + fmt.sampleRate = drwav_bytes_to_u32_ex(fmtData + 4, pWav->container); + fmt.avgBytesPerSec = drwav_bytes_to_u32_ex(fmtData + 8, pWav->container); + fmt.blockAlign = drwav_bytes_to_u16_ex(fmtData + 12, pWav->container); + fmt.bitsPerSample = drwav_bytes_to_u16_ex(fmtData + 14, pWav->container); + + fmt.extendedSize = 0; + fmt.validBitsPerSample = 0; + fmt.channelMask = 0; + DRWAV_ZERO_MEMORY(fmt.subFormat, sizeof(fmt.subFormat)); + + if (header.sizeInBytes > 16) { + drwav_uint8 fmt_cbSize[2]; + int bytesReadSoFar = 0; + + if (pWav->onRead(pWav->pUserData, fmt_cbSize, sizeof(fmt_cbSize)) != sizeof(fmt_cbSize)) { + return DRWAV_FALSE; /* Expecting more data. */ + } + cursor += sizeof(fmt_cbSize); + + bytesReadSoFar = 18; + + fmt.extendedSize = drwav_bytes_to_u16_ex(fmt_cbSize, pWav->container); + if (fmt.extendedSize > 0) { + /* Simple validation. */ + if (fmt.formatTag == DR_WAVE_FORMAT_EXTENSIBLE) { + if (fmt.extendedSize != 22) { + return DRWAV_FALSE; + } + } + + if (fmt.formatTag == DR_WAVE_FORMAT_EXTENSIBLE) { + drwav_uint8 fmtext[22]; + + if (pWav->onRead(pWav->pUserData, fmtext, fmt.extendedSize) != fmt.extendedSize) { + return DRWAV_FALSE; /* Expecting more data. */ + } + + fmt.validBitsPerSample = drwav_bytes_to_u16_ex(fmtext + 0, pWav->container); + fmt.channelMask = drwav_bytes_to_u32_ex(fmtext + 2, pWav->container); + drwav_bytes_to_guid(fmtext + 6, fmt.subFormat); + } else { + if (pWav->onSeek(pWav->pUserData, fmt.extendedSize, drwav_seek_origin_current) == DRWAV_FALSE) { + return DRWAV_FALSE; + } + } + cursor += fmt.extendedSize; + + bytesReadSoFar += fmt.extendedSize; + } + + /* Seek past any leftover bytes. For w64 the leftover will be defined based on the chunk size. */ + if (pWav->onSeek(pWav->pUserData, (int)(header.sizeInBytes - bytesReadSoFar), drwav_seek_origin_current) == DRWAV_FALSE) { + return DRWAV_FALSE; + } + cursor += (header.sizeInBytes - bytesReadSoFar); + } + + if (header.paddingSize > 0) { + if (drwav__seek_forward(pWav->onSeek, header.paddingSize, pWav->pUserData) == DRWAV_FALSE) { + break; + } + cursor += header.paddingSize; + } + + /* Go to the next chunk. Don't include this chunk in metadata. */ + continue; } - chunkSize = header.sizeInBytes; - if (pWav->container == drwav_container_riff || pWav->container == drwav_container_rf64) { - if (drwav_fourcc_equal(header.id.fourcc, "data")) { - foundDataChunk = DRWAV_TRUE; - if (pWav->container != drwav_container_rf64) { /* The data chunk size for RF64 will always be set to 0xFFFFFFFF here. It was set to it's true value earlier. */ - dataChunkSize = chunkSize; - } - } - } else { - if (drwav_guid_equal(header.id.guid, drwavGUID_W64_DATA)) { - foundDataChunk = DRWAV_TRUE; + /* "data" */ + if (((pWav->container == drwav_container_riff || pWav->container == drwav_container_rifx || pWav->container == drwav_container_rf64) && drwav_fourcc_equal(header.id.fourcc, "data")) || + ((pWav->container == drwav_container_w64) && drwav_guid_equal(header.id.guid, drwavGUID_W64_DATA))) { + foundChunk_data = DRWAV_TRUE; + + pWav->dataChunkDataPos = cursor; + + if (pWav->container != drwav_container_rf64) { /* The data chunk size for RF64 will always be set to 0xFFFFFFFF here. It was set to it's true value earlier. */ dataChunkSize = chunkSize; } + + /* If we're running in sequential mode, or we're not reading metadata, we have enough now that we can get out of the loop. */ + if (sequential || !isProcessingMetadata) { + break; /* No need to keep reading beyond the data chunk. */ + } else { + chunkSize += header.paddingSize; /* <-- Make sure we seek past the padding. */ + if (drwav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData) == DRWAV_FALSE) { + break; + } + cursor += chunkSize; + + continue; /* There may be some more metadata to read. */ + } } - /* - If at this point we have found the data chunk and we're running in sequential mode, we need to break out of this loop. The reason for - this is that we would otherwise require a backwards seek which sequential mode forbids. - */ - if (foundDataChunk && sequential) { - break; - } - - /* Optional. Get the total sample count from the FACT chunk. This is useful for compressed formats. */ - if (pWav->container == drwav_container_riff) { - if (drwav_fourcc_equal(header.id.fourcc, "fact")) { - drwav_uint32 sampleCount; + /* "fact". This is optional. Can use this to get the sample count which is useful for compressed formats. For RF64 we retrieved the sample count from the ds64 chunk earlier. */ + if (((pWav->container == drwav_container_riff || pWav->container == drwav_container_rifx || pWav->container == drwav_container_rf64) && drwav_fourcc_equal(header.id.fourcc, "fact")) || + ((pWav->container == drwav_container_w64) && drwav_guid_equal(header.id.guid, drwavGUID_W64_FACT))) { + if (pWav->container == drwav_container_riff || pWav->container == drwav_container_rifx) { + drwav_uint8 sampleCount[4]; if (drwav__on_read(pWav->onRead, pWav->pUserData, &sampleCount, 4, &cursor) != 4) { return DRWAV_FALSE; } - chunkSize -= 4; - if (!foundDataChunk) { - pWav->dataChunkDataPos = cursor; - } + chunkSize -= 4; /* The sample count in the "fact" chunk is either unreliable, or I'm not understanding it properly. For now I am only enabling this for Microsoft ADPCM formats. */ if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) { - sampleCountFromFactChunk = sampleCount; + sampleCountFromFactChunk = drwav_bytes_to_u32_ex(sampleCount, pWav->container); } else { sampleCountFromFactChunk = 0; } - } - } else if (pWav->container == drwav_container_w64) { - if (drwav_guid_equal(header.id.guid, drwavGUID_W64_FACT)) { + } else if (pWav->container == drwav_container_w64) { if (drwav__on_read(pWav->onRead, pWav->pUserData, &sampleCountFromFactChunk, 8, &cursor) != 8) { return DRWAV_FALSE; } - chunkSize -= 8; - if (!foundDataChunk) { - pWav->dataChunkDataPos = cursor; - } + chunkSize -= 8; + } else if (pWav->container == drwav_container_rf64) { + /* We retrieved the sample count from the ds64 chunk earlier so no need to do that here. */ } - } else if (pWav->container == drwav_container_rf64) { - /* We retrieved the sample count from the ds64 chunk earlier so no need to do that here. */ + + /* Seek to the next chunk in preparation for the next iteration. */ + chunkSize += header.paddingSize; /* <-- Make sure we seek past the padding. */ + if (drwav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData) == DRWAV_FALSE) { + break; + } + cursor += chunkSize; + + continue; } - /* Make sure we seek past the padding. */ - chunkSize += header.paddingSize; - if (!drwav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData)) { + + /* "COMM". AIFF/AIFC only. */ + if (pWav->container == drwav_container_aiff && drwav_fourcc_equal(header.id.fourcc, "COMM")) { + drwav_uint8 commData[24]; + drwav_uint32 commDataBytesToRead; + drwav_uint16 channels; + drwav_uint32 frameCount; + drwav_uint16 sampleSizeInBits; + drwav_int64 sampleRate; + drwav_uint16 compressionFormat; + + foundChunk_fmt = DRWAV_TRUE; + + if (isAIFCFormType) { + commDataBytesToRead = 24; + if (header.sizeInBytes < commDataBytesToRead) { + return DRWAV_FALSE; /* Invalid COMM chunk. */ + } + } else { + commDataBytesToRead = 18; + if (header.sizeInBytes != commDataBytesToRead) { + return DRWAV_FALSE; /* INVALID COMM chunk. */ + } + } + + if (drwav__on_read(pWav->onRead, pWav->pUserData, commData, commDataBytesToRead, &cursor) != commDataBytesToRead) { + return DRWAV_FALSE; + } + + + channels = drwav_bytes_to_u16_ex (commData + 0, pWav->container); + frameCount = drwav_bytes_to_u32_ex (commData + 2, pWav->container); + sampleSizeInBits = drwav_bytes_to_u16_ex (commData + 6, pWav->container); + sampleRate = drwav_aiff_extented_to_s64(commData + 8); + + if (sampleRate < 0 || sampleRate > 0xFFFFFFFF) { + return DRWAV_FALSE; /* Invalid sample rate. */ + } + + if (isAIFCFormType) { + const drwav_uint8* type = commData + 18; + + if (drwav_fourcc_equal(type, "NONE")) { + compressionFormat = DR_WAVE_FORMAT_PCM; /* PCM, big-endian. */ + } else if (drwav_fourcc_equal(type, "raw ")) { + compressionFormat = DR_WAVE_FORMAT_PCM; + + /* In my testing, it looks like when the "raw " compression type is used, 8-bit samples should be considered unsigned. */ + if (sampleSizeInBits == 8) { + pWav->aiff.isUnsigned = DRWAV_TRUE; + } + } else if (drwav_fourcc_equal(type, "sowt")) { + compressionFormat = DR_WAVE_FORMAT_PCM; /* PCM, little-endian. */ + pWav->aiff.isLE = DRWAV_TRUE; + } else if (drwav_fourcc_equal(type, "fl32") || drwav_fourcc_equal(type, "fl64") || drwav_fourcc_equal(type, "FL32") || drwav_fourcc_equal(type, "FL64")) { + compressionFormat = DR_WAVE_FORMAT_IEEE_FLOAT; + } else if (drwav_fourcc_equal(type, "alaw") || drwav_fourcc_equal(type, "ALAW")) { + compressionFormat = DR_WAVE_FORMAT_ALAW; + } else if (drwav_fourcc_equal(type, "ulaw") || drwav_fourcc_equal(type, "ULAW")) { + compressionFormat = DR_WAVE_FORMAT_MULAW; + } else if (drwav_fourcc_equal(type, "ima4")) { + compressionFormat = DR_WAVE_FORMAT_DVI_ADPCM; + sampleSizeInBits = 4; + + /* + I haven't been able to figure out how to get correct decoding for IMA ADPCM. Until this is figured out + we'll need to abort when we encounter such an encoding. Advice welcome! + */ + return DRWAV_FALSE; + } else { + return DRWAV_FALSE; /* Unknown or unsupported compression format. Need to abort. */ + } + } else { + compressionFormat = DR_WAVE_FORMAT_PCM; /* It's a standard AIFF form which is always compressed. */ + } + + /* With AIFF we want to use the explicitly defined frame count rather than deriving it from the size of the chunk. */ + aiffFrameCount = frameCount; + + /* We should now have enough information to fill out our fmt structure. */ + fmt.formatTag = compressionFormat; + fmt.channels = channels; + fmt.sampleRate = (drwav_uint32)sampleRate; + fmt.bitsPerSample = sampleSizeInBits; + fmt.blockAlign = (drwav_uint16)(fmt.channels * fmt.bitsPerSample / 8); + fmt.avgBytesPerSec = fmt.blockAlign * fmt.sampleRate; + + if (fmt.blockAlign == 0 && compressionFormat == DR_WAVE_FORMAT_DVI_ADPCM) { + fmt.blockAlign = 34 * fmt.channels; + } + + /* + Weird one. I've seen some alaw and ulaw encoded files that for some reason set the bits per sample to 16 when + it should be 8. To get this working I need to explicitly check for this and change it. + */ + if (compressionFormat == DR_WAVE_FORMAT_ALAW || compressionFormat == DR_WAVE_FORMAT_MULAW) { + if (fmt.bitsPerSample > 8) { + fmt.bitsPerSample = 8; + fmt.blockAlign = fmt.channels; + } + } + + /* In AIFF, samples are padded to 8 byte boundaries. We need to round up our bits per sample here. */ + fmt.bitsPerSample += (fmt.bitsPerSample & 7); + + + /* If the form type is AIFC there will be some additional data in the chunk. We need to seek past it. */ + if (isAIFCFormType) { + if (drwav__seek_forward(pWav->onSeek, (chunkSize - commDataBytesToRead), pWav->pUserData) == DRWAV_FALSE) { + return DRWAV_FALSE; + } + cursor += (chunkSize - commDataBytesToRead); + } + + /* Don't fall through or else we'll end up treating this chunk as metadata which is incorrect. */ + continue; + } + + + /* "SSND". AIFF/AIFC only. This is the AIFF equivalent of the "data" chunk. */ + if (pWav->container == drwav_container_aiff && drwav_fourcc_equal(header.id.fourcc, "SSND")) { + drwav_uint8 offsetAndBlockSizeData[8]; + drwav_uint32 offset; + + foundChunk_data = DRWAV_TRUE; + + if (drwav__on_read(pWav->onRead, pWav->pUserData, offsetAndBlockSizeData, sizeof(offsetAndBlockSizeData), &cursor) != sizeof(offsetAndBlockSizeData)) { + return DRWAV_FALSE; + } + + /* We need to seek forward by the offset. */ + offset = drwav_bytes_to_u32_ex(offsetAndBlockSizeData + 0, pWav->container); + if (drwav__seek_forward(pWav->onSeek, offset, pWav->pUserData) == DRWAV_FALSE) { + return DRWAV_FALSE; + } + cursor += offset; + + pWav->dataChunkDataPos = cursor; + dataChunkSize = chunkSize; + + /* If we're running in sequential mode, or we're not reading metadata, we have enough now that we can get out of the loop. */ + if (sequential || !isProcessingMetadata) { + break; /* No need to keep reading beyond the data chunk. */ + } else { + if (drwav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData) == DRWAV_FALSE) { + break; + } + cursor += chunkSize; + + continue; /* There may be some more metadata to read. */ + } + } + + + + /* Getting here means it's not a chunk that we care about internally, but might need to be handled as metadata by the caller. */ + if (isProcessingMetadata) { + drwav_uint64 metadataBytesRead; + + metadataBytesRead = drwav__metadata_process_chunk(&metadataParser, &header, drwav_metadata_type_all_including_unknown); + DRWAV_ASSERT(metadataBytesRead <= header.sizeInBytes); + + /* Go back to the start of the chunk so we can normalize the position of the cursor. */ + if (drwav__seek_from_start(pWav->onSeek, cursor, pWav->pUserData) == DRWAV_FALSE) { + break; /* Failed to seek. Can't reliable read the remaining chunks. Get out. */ + } + } + + + /* Make sure we skip past the content of this chunk before we go to the next one. */ + chunkSize += header.paddingSize; /* <-- Make sure we seek past the padding. */ + if (drwav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData) == DRWAV_FALSE) { break; } cursor += chunkSize; - - if (!foundDataChunk) { - pWav->dataChunkDataPos = cursor; - } } - pWav->pMetadata = metadataParser.pMetadata; - pWav->metadataCount = metadataParser.metadataCount; - - /* If we haven't found a data chunk, return an error. */ - if (!foundDataChunk) { + /* There's some mandatory chunks that must exist. If they were not found in the iteration above we must abort. */ + if (!foundChunk_fmt || !foundChunk_data) { return DRWAV_FALSE; } + /* Basic validation. */ + if ((fmt.sampleRate == 0 || fmt.sampleRate > DRWAV_MAX_SAMPLE_RATE ) || + (fmt.channels == 0 || fmt.channels > DRWAV_MAX_CHANNELS ) || + (fmt.bitsPerSample == 0 || fmt.bitsPerSample > DRWAV_MAX_BITS_PER_SAMPLE) || + fmt.blockAlign == 0) { + return DRWAV_FALSE; /* Probably an invalid WAV file. */ + } + + /* Translate the internal format. */ + translatedFormatTag = fmt.formatTag; + if (translatedFormatTag == DR_WAVE_FORMAT_EXTENSIBLE) { + translatedFormatTag = drwav_bytes_to_u16_ex(fmt.subFormat + 0, pWav->container); + } + /* We may have moved passed the data chunk. If so we need to move back. If running in sequential mode we can assume we are already sitting on the data chunk. */ if (!sequential) { if (!drwav__seek_from_start(pWav->onSeek, pWav->dataChunkDataPos, pWav->pUserData)) { @@ -3365,8 +3602,77 @@ DRWAV_PRIVATE drwav_bool32 drwav_init__internal(drwav* pWav, drwav_chunk_proc on } + /* + At this point we should have done the initial parsing of each of our chunks, but we now need to + do a second pass to extract the actual contents of the metadata (the first pass just calculated + the length of the memory allocation). + + We only do this if we've actually got metadata to parse. + */ + if (isProcessingMetadata && metadataParser.metadataCount > 0) { + if (drwav__seek_from_start(pWav->onSeek, metadataStartPos, pWav->pUserData) == DRWAV_FALSE) { + return DRWAV_FALSE; + } + + result = drwav__metadata_alloc(&metadataParser, &pWav->allocationCallbacks); + if (result != DRWAV_SUCCESS) { + return DRWAV_FALSE; + } + + metadataParser.stage = drwav__metadata_parser_stage_read; + + for (;;) { + drwav_chunk_header header; + drwav_uint64 metadataBytesRead; + + result = drwav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header); + if (result != DRWAV_SUCCESS) { + break; + } + + metadataBytesRead = drwav__metadata_process_chunk(&metadataParser, &header, drwav_metadata_type_all_including_unknown); + + /* Move to the end of the chunk so we can keep iterating. */ + if (drwav__seek_forward(pWav->onSeek, (header.sizeInBytes + header.paddingSize) - metadataBytesRead, pWav->pUserData) == DRWAV_FALSE) { + drwav_free(metadataParser.pMetadata, &pWav->allocationCallbacks); + return DRWAV_FALSE; + } + } + + /* Getting here means we're finished parsing the metadata. */ + pWav->pMetadata = metadataParser.pMetadata; + pWav->metadataCount = metadataParser.metadataCount; + } + + /* At this point we should be sitting on the first byte of the raw audio data. */ + /* + I've seen a WAV file in the wild where a RIFF-ecapsulated file has the size of it's "RIFF" and + "data" chunks set to 0xFFFFFFFF when the file is definitely not that big. In this case we're + going to have to calculate the size by reading and discarding bytes, and then seeking back. We + cannot do this in sequential mode. We just assume that the rest of the file is audio data. + */ + if (dataChunkSize == 0xFFFFFFFF && (pWav->container == drwav_container_riff || pWav->container == drwav_container_rifx) && pWav->isSequentialWrite == DRWAV_FALSE) { + dataChunkSize = 0; + + for (;;) { + drwav_uint8 temp[4096]; + size_t bytesRead = pWav->onRead(pWav->pUserData, temp, sizeof(temp)); + dataChunkSize += bytesRead; + + if (bytesRead < sizeof(temp)) { + break; + } + } + } + + if (drwav__seek_from_start(pWav->onSeek, pWav->dataChunkDataPos, pWav->pUserData) == DRWAV_FALSE) { + drwav_free(pWav->pMetadata, &pWav->allocationCallbacks); + return DRWAV_FALSE; + } + + pWav->fmt = fmt; pWav->sampleRate = fmt.sampleRate; pWav->channels = fmt.channels; @@ -3377,9 +3683,12 @@ DRWAV_PRIVATE drwav_bool32 drwav_init__internal(drwav* pWav, drwav_chunk_proc on if (sampleCountFromFactChunk != 0) { pWav->totalPCMFrameCount = sampleCountFromFactChunk; + } else if (aiffFrameCount != 0) { + pWav->totalPCMFrameCount = aiffFrameCount; } else { drwav_uint32 bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { + drwav_free(pWav->pMetadata, &pWav->allocationCallbacks); return DRWAV_FALSE; /* Invalid file. */ } @@ -3419,12 +3728,14 @@ DRWAV_PRIVATE drwav_bool32 drwav_init__internal(drwav* pWav, drwav_chunk_proc on /* Some formats only support a certain number of channels. */ if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM || pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) { if (pWav->channels > 2) { + drwav_free(pWav->pMetadata, &pWav->allocationCallbacks); return DRWAV_FALSE; } } /* The number of bytes per frame must be known. If not, it's an invalid file and not decodable. */ if (drwav_get_bytes_per_pcm_frame(pWav) == 0) { + drwav_free(pWav->pMetadata, &pWav->allocationCallbacks); return DRWAV_FALSE; } @@ -3470,8 +3781,7 @@ DRWAV_API drwav_bool32 drwav_init_with_metadata(drwav* pWav, drwav_read_proc onR return DRWAV_FALSE; } - pWav->allowedMetadataTypes = drwav_metadata_type_all_including_unknown; /* <-- Needs to be set to tell drwav_init_ex() that we need to process metadata. */ - return drwav_init__internal(pWav, NULL, NULL, flags); + return drwav_init__internal(pWav, NULL, NULL, flags | DRWAV_WITH_METADATA); } DRWAV_API drwav_metadata* drwav_take_ownership_of_metadata(drwav* pWav) @@ -3689,8 +3999,8 @@ DRWAV_PRIVATE size_t drwav__write_or_count_metadata(drwav* pWav, drwav_metadata* } if (pMetadata->data.smpl.samplerSpecificDataSizeInBytes > 0) { - bytesWritten += drwav__write(pWav, pMetadata->data.smpl.pSamplerSpecificData, pMetadata->data.smpl.samplerSpecificDataSizeInBytes); - } + bytesWritten += drwav__write_or_count(pWav, pMetadata->data.smpl.pSamplerSpecificData, pMetadata->data.smpl.samplerSpecificDataSizeInBytes); + } } break; case drwav_metadata_type_inst: @@ -4134,6 +4444,8 @@ DRWAV_PRIVATE drwav_bool32 drwav_init_write__internal(drwav* pWav, const drwav_d runningPos += drwav__write(pWav, "RF64", 4); runningPos += drwav__write_u32ne_to_le(pWav, 0xFFFFFFFF); /* Always 0xFFFFFFFF for RF64. Set to a proper value in the "ds64" chunk. */ runningPos += drwav__write(pWav, "WAVE", 4); + } else { + return DRWAV_FALSE; /* Container not supported for writing. */ } @@ -4267,6 +4579,7 @@ DRWAV_API drwav_uint64 drwav_target_write_size_bytes(const drwav_data_format* pF #ifndef DR_WAV_NO_STDIO +/* Errno */ /* drwav_result_from_errno() is only used for fopen() and wfopen() so putting it inside DR_WAV_NO_STDIO for now. If something else needs this later we can move it out. */ #include DRWAV_PRIVATE drwav_result drwav_result_from_errno(int e) @@ -4670,7 +4983,9 @@ DRWAV_PRIVATE drwav_result drwav_result_from_errno(int e) default: return DRWAV_ERROR; } } +/* End Errno */ +/* fopen */ DRWAV_PRIVATE drwav_result drwav_fopen(FILE** ppFile, const char* pFilePath, const char* pOpenMode) { #if defined(_MSC_VER) && _MSC_VER >= 1400 @@ -4828,6 +5143,7 @@ DRWAV_PRIVATE drwav_result drwav_wfopen(FILE** ppFile, const wchar_t* pFilePath, return DRWAV_SUCCESS; } #endif +/* End fopen */ DRWAV_PRIVATE size_t drwav__on_read_stdio(void* pUserData, void* pBufferOut, size_t bytesToRead) @@ -4851,7 +5167,7 @@ DRWAV_API drwav_bool32 drwav_init_file(drwav* pWav, const char* filename, const } -DRWAV_PRIVATE drwav_bool32 drwav_init_file__internal_FILE(drwav* pWav, FILE* pFile, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, drwav_metadata_type allowedMetadataTypes, const drwav_allocation_callbacks* pAllocationCallbacks) +DRWAV_PRIVATE drwav_bool32 drwav_init_file__internal_FILE(drwav* pWav, FILE* pFile, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks) { drwav_bool32 result; @@ -4861,8 +5177,6 @@ DRWAV_PRIVATE drwav_bool32 drwav_init_file__internal_FILE(drwav* pWav, FILE* pFi return result; } - pWav->allowedMetadataTypes = allowedMetadataTypes; - result = drwav_init__internal(pWav, onChunk, pChunkUserData, flags); if (result != DRWAV_TRUE) { fclose(pFile); @@ -4880,7 +5194,7 @@ DRWAV_API drwav_bool32 drwav_init_file_ex(drwav* pWav, const char* filename, drw } /* This takes ownership of the FILE* object. */ - return drwav_init_file__internal_FILE(pWav, pFile, onChunk, pChunkUserData, flags, drwav_metadata_type_none, pAllocationCallbacks); + return drwav_init_file__internal_FILE(pWav, pFile, onChunk, pChunkUserData, flags, pAllocationCallbacks); } #ifndef DR_WAV_NO_WCHAR @@ -4897,7 +5211,7 @@ DRWAV_API drwav_bool32 drwav_init_file_ex_w(drwav* pWav, const wchar_t* filename } /* This takes ownership of the FILE* object. */ - return drwav_init_file__internal_FILE(pWav, pFile, onChunk, pChunkUserData, flags, drwav_metadata_type_none, pAllocationCallbacks); + return drwav_init_file__internal_FILE(pWav, pFile, onChunk, pChunkUserData, flags, pAllocationCallbacks); } #endif @@ -4909,7 +5223,7 @@ DRWAV_API drwav_bool32 drwav_init_file_with_metadata(drwav* pWav, const char* fi } /* This takes ownership of the FILE* object. */ - return drwav_init_file__internal_FILE(pWav, pFile, NULL, NULL, flags, drwav_metadata_type_all_including_unknown, pAllocationCallbacks); + return drwav_init_file__internal_FILE(pWav, pFile, NULL, NULL, flags | DRWAV_WITH_METADATA, pAllocationCallbacks); } #ifndef DR_WAV_NO_WCHAR @@ -4921,7 +5235,7 @@ DRWAV_API drwav_bool32 drwav_init_file_with_metadata_w(drwav* pWav, const wchar_ } /* This takes ownership of the FILE* object. */ - return drwav_init_file__internal_FILE(pWav, pFile, NULL, NULL, flags, drwav_metadata_type_all_including_unknown, pAllocationCallbacks); + return drwav_init_file__internal_FILE(pWav, pFile, NULL, NULL, flags | DRWAV_WITH_METADATA, pAllocationCallbacks); } #endif @@ -5166,9 +5480,7 @@ DRWAV_API drwav_bool32 drwav_init_memory_with_metadata(drwav* pWav, const void* pWav->memoryStream.dataSize = dataSize; pWav->memoryStream.currentReadPos = 0; - pWav->allowedMetadataTypes = drwav_metadata_type_all_including_unknown; - - return drwav_init__internal(pWav, NULL, NULL, flags); + return drwav_init__internal(pWav, NULL, NULL, flags | DRWAV_WITH_METADATA); } @@ -5297,9 +5609,7 @@ DRWAV_API drwav_result drwav_uninit(drwav* pWav) } } } else { - if (pWav->pMetadata != NULL) { - pWav->allocationCallbacks.onFree(pWav->pMetadata, pWav->allocationCallbacks.pUserData); - } + drwav_free(pWav->pMetadata, &pWav->allocationCallbacks); } #ifndef DR_WAV_NO_STDIO @@ -5387,6 +5697,7 @@ DRWAV_API drwav_uint64 drwav_read_pcm_frames_le(drwav* pWav, drwav_uint64 frames { drwav_uint32 bytesPerFrame; drwav_uint64 bytesToRead; /* Intentionally uint64 instead of size_t so we can do a check that we're not reading too much on 32-bit builds. */ + drwav_uint64 framesRemainingInFile; if (pWav == NULL || framesToRead == 0) { return 0; @@ -5397,6 +5708,11 @@ DRWAV_API drwav_uint64 drwav_read_pcm_frames_le(drwav* pWav, drwav_uint64 frames return 0; } + framesRemainingInFile = pWav->totalPCMFrameCount - pWav->readCursorInPCMFrames; + if (framesToRead > framesRemainingInFile) { + framesToRead = framesRemainingInFile; + } + bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { return 0; @@ -5429,7 +5745,7 @@ DRWAV_API drwav_uint64 drwav_read_pcm_frames_be(drwav* pWav, drwav_uint64 frames return 0; /* Could not get the bytes per frame which means bytes per sample cannot be determined and we don't know how to byte swap. */ } - drwav__bswap_samples(pBufferOut, framesRead*pWav->channels, bytesPerFrame/pWav->channels, pWav->translatedFormatTag); + drwav__bswap_samples(pBufferOut, framesRead*pWav->channels, bytesPerFrame/pWav->channels); } return framesRead; @@ -5437,11 +5753,50 @@ DRWAV_API drwav_uint64 drwav_read_pcm_frames_be(drwav* pWav, drwav_uint64 frames DRWAV_API drwav_uint64 drwav_read_pcm_frames(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut) { - if (drwav__is_little_endian()) { - return drwav_read_pcm_frames_le(pWav, framesToRead, pBufferOut); - } else { - return drwav_read_pcm_frames_be(pWav, framesToRead, pBufferOut); + drwav_uint64 framesRead = 0; + + if (drwav_is_container_be(pWav->container)) { + /* + Special case for AIFF. AIFF is a big-endian encoded format, but it supports a format that is + PCM in little-endian encoding. In this case, we fall through this branch and treate it as + little-endian. + */ + if (pWav->container != drwav_container_aiff || pWav->aiff.isLE == DRWAV_FALSE) { + if (drwav__is_little_endian()) { + framesRead = drwav_read_pcm_frames_be(pWav, framesToRead, pBufferOut); + } else { + framesRead = drwav_read_pcm_frames_le(pWav, framesToRead, pBufferOut); + } + + goto post_process; + } } + + /* Getting here means the data should be considered little-endian. */ + if (drwav__is_little_endian()) { + framesRead = drwav_read_pcm_frames_le(pWav, framesToRead, pBufferOut); + } else { + framesRead = drwav_read_pcm_frames_be(pWav, framesToRead, pBufferOut); + } + + /* + Here is where we check if we need to do a signed/unsigned conversion for AIFF. The reason we need to do this + is because dr_wav always assumes an 8-bit sample is unsigned, whereas AIFF can have signed 8-bit formats. + */ + post_process: + { + if (pWav->container == drwav_container_aiff && pWav->bitsPerSample == 8 && pWav->aiff.isUnsigned == DRWAV_FALSE) { + if (pBufferOut != NULL) { + drwav_uint64 iSample; + + for (iSample = 0; iSample < framesRead * pWav->channels; iSample += 1) { + ((drwav_uint8*)pBufferOut)[iSample] += 128; + } + } + } + } + + return framesRead; } @@ -5552,7 +5907,7 @@ DRWAV_API drwav_bool32 drwav_seek_to_pcm_frame(drwav* pWav, drwav_uint64 targetF } totalSizeInBytes = pWav->totalPCMFrameCount * bytesPerFrame; - DRWAV_ASSERT(totalSizeInBytes >= pWav->bytesRemaining); + /*DRWAV_ASSERT(totalSizeInBytes >= pWav->bytesRemaining);*/ currentBytePos = totalSizeInBytes - pWav->bytesRemaining; targetBytePos = targetFrameIndex * bytesPerFrame; @@ -5714,7 +6069,7 @@ DRWAV_API drwav_uint64 drwav_write_pcm_frames_be(drwav* pWav, drwav_uint64 frame } DRWAV_COPY_MEMORY(temp, pRunningData, (size_t)bytesToWriteThisIteration); - drwav__bswap_samples(temp, sampleCount, bytesPerSample, pWav->translatedFormatTag); + drwav__bswap_samples(temp, sampleCount, bytesPerSample); bytesJustWritten = drwav_write_raw(pWav, (size_t)bytesToWriteThisIteration, temp); if (bytesJustWritten == 0) { @@ -5967,7 +6322,7 @@ DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_s16__ima(drwav* pWav, drwav_uin return totalFramesRead; /* Invalid data. */ } - pWav->ima.predictor[0] = drwav_bytes_to_s16(header + 0); + pWav->ima.predictor[0] = (drwav_int16)drwav_bytes_to_u16(header + 0); pWav->ima.stepIndex[0] = drwav_clamp(header[2], 0, (drwav_int32)drwav_countof(stepTable)-1); /* Clamp not necessary because we checked above, but adding here to silence a static analysis warning. */ pWav->ima.cachedFrames[drwav_countof(pWav->ima.cachedFrames) - 1] = pWav->ima.predictor[0]; pWav->ima.cachedFrameCount = 1; @@ -6341,6 +6696,23 @@ DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_s16__alaw(drwav* pWav, drwav_ui drwav_alaw_to_s16(pBufferOut, sampleData, (size_t)samplesRead); + /* + For some reason libsndfile seems to be returning samples of the opposite sign for a-law, but only + with AIFF files. For WAV files it seems to be the same as dr_wav. This is resulting in dr_wav's + automated tests failing. I'm not sure which is correct, but will assume dr_wav. If we're enforcing + libsndfile compatibility we'll swap the signs here. + */ + #ifdef DR_WAV_LIBSNDFILE_COMPAT + { + if (pWav->container == drwav_container_aiff) { + drwav_uint64 iSample; + for (iSample = 0; iSample < samplesRead; iSample += 1) { + pBufferOut[iSample] = -pBufferOut[iSample]; + } + } + } + #endif + pBufferOut += samplesRead; framesToRead -= framesRead; totalFramesRead += framesRead; @@ -6391,6 +6763,21 @@ DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_s16__mulaw(drwav* pWav, drwav_u drwav_mulaw_to_s16(pBufferOut, sampleData, (size_t)samplesRead); + /* + Just like with alaw, for some reason the signs between libsndfile and dr_wav are opposite. We just need to + swap the sign if we're compiling with libsndfile compatiblity so our automated tests don't fail. + */ + #ifdef DR_WAV_LIBSNDFILE_COMPAT + { + if (pWav->container == drwav_container_aiff) { + drwav_uint64 iSample; + for (iSample = 0; iSample < samplesRead; iSample += 1) { + pBufferOut[iSample] = -pBufferOut[iSample]; + } + } + } + #endif + pBufferOut += samplesRead; framesToRead -= framesRead; totalFramesRead += framesRead; @@ -6543,7 +6930,6 @@ DRWAV_API void drwav_mulaw_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, siz } - DRWAV_PRIVATE void drwav__pcm_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount, unsigned int bytesPerSample) { unsigned int i; @@ -6777,6 +7163,17 @@ DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_f32__alaw(drwav* pWav, drwav_ui drwav_alaw_to_f32(pBufferOut, sampleData, (size_t)samplesRead); + #ifdef DR_WAV_LIBSNDFILE_COMPAT + { + if (pWav->container == drwav_container_aiff) { + drwav_uint64 iSample; + for (iSample = 0; iSample < samplesRead; iSample += 1) { + pBufferOut[iSample] = -pBufferOut[iSample]; + } + } + } + #endif + pBufferOut += samplesRead; framesToRead -= framesRead; totalFramesRead += framesRead; @@ -6823,6 +7220,17 @@ DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_f32__mulaw(drwav* pWav, drwav_u drwav_mulaw_to_f32(pBufferOut, sampleData, (size_t)samplesRead); + #ifdef DR_WAV_LIBSNDFILE_COMPAT + { + if (pWav->container == drwav_container_aiff) { + drwav_uint64 iSample; + for (iSample = 0; iSample < samplesRead; iSample += 1) { + pBufferOut[iSample] = -pBufferOut[iSample]; + } + } + } + #endif + pBufferOut += samplesRead; framesToRead -= framesRead; totalFramesRead += framesRead; @@ -7234,6 +7642,17 @@ DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_s32__alaw(drwav* pWav, drwav_ui drwav_alaw_to_s32(pBufferOut, sampleData, (size_t)samplesRead); + #ifdef DR_WAV_LIBSNDFILE_COMPAT + { + if (pWav->container == drwav_container_aiff) { + drwav_uint64 iSample; + for (iSample = 0; iSample < samplesRead; iSample += 1) { + pBufferOut[iSample] = -pBufferOut[iSample]; + } + } + } + #endif + pBufferOut += samplesRead; framesToRead -= framesRead; totalFramesRead += framesRead; @@ -7280,6 +7699,17 @@ DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_s32__mulaw(drwav* pWav, drwav_u drwav_mulaw_to_s32(pBufferOut, sampleData, (size_t)samplesRead); + #ifdef DR_WAV_LIBSNDFILE_COMPAT + { + if (pWav->container == drwav_container_aiff) { + drwav_uint64 iSample; + for (iSample = 0; iSample < samplesRead; iSample += 1) { + pBufferOut[iSample] = -pBufferOut[iSample]; + } + } + } + #endif + pBufferOut += samplesRead; framesToRead -= framesRead; totalFramesRead += framesRead; @@ -7853,7 +8283,7 @@ DRWAV_API drwav_int16 drwav_bytes_to_s16(const drwav_uint8* data) DRWAV_API drwav_uint32 drwav_bytes_to_u32(const drwav_uint8* data) { - return ((drwav_uint32)data[0] << 0) | ((drwav_uint32)data[1] << 8) | ((drwav_uint32)data[2] << 16) | ((drwav_uint32)data[3] << 24); + return drwav_bytes_to_u32_le(data); } DRWAV_API float drwav_bytes_to_f32(const drwav_uint8* data) @@ -7917,6 +8347,29 @@ DRWAV_API drwav_bool32 drwav_fourcc_equal(const drwav_uint8* a, const char* b) /* REVISION HISTORY ================ +v0.13.13 - 2023-11-02 + - Fix a warning when compiling with Clang. + +v0.13.12 - 2023-08-07 + - Fix a possible crash in drwav_read_pcm_frames(). + +v0.13.11 - 2023-07-07 + - AIFF compatibility improvements. + +v0.13.10 - 2023-05-29 + - Fix a bug where drwav_init_with_metadata() does not decode any frames after initializtion. + +v0.13.9 - 2023-05-22 + - Add support for AIFF decoding (writing and metadata not supported). + - Add support for RIFX decoding (writing and metadata not supported). + - Fix a bug where metadata is not processed if it's located before the "fmt " chunk. + - Add a workaround for a type of malformed WAV file where the size of the "RIFF" and "data" chunks + are incorrectly set to 0xFFFFFFFF. + +v0.13.8 - 2023-03-25 + - Fix a possible null pointer dereference. + - Fix a crash when loading files with badly formed metadata. + v0.13.7 - 2022-09-17 - Fix compilation with DJGPP. - Add support for disabling wchar_t with DR_WAV_NO_WCHAR. @@ -8331,7 +8784,7 @@ For more information, please refer to =============================================================================== ALTERNATIVE 2 - MIT No Attribution =============================================================================== -Copyright 2020 David Reid +Copyright 2023 David Reid Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/raylib/external/m3d.h b/raylib/external/m3d.h index 3b29927..6abaad1 100644 --- a/raylib/external/m3d.h +++ b/raylib/external/m3d.h @@ -89,7 +89,7 @@ typedef uint8_t M3D_VOXEL; #define M3D_NUMBONE 4 #endif #ifndef M3D_BONEMAXLEVEL -#define M3D_BONEMAXLEVEL 8 +#define M3D_BONEMAXLEVEL 64 #endif #ifndef _MSC_VER #ifndef _inline @@ -2172,6 +2172,8 @@ M3D_INDEX _m3d_gettx(m3d_t *model, m3dread_t readfilecb, m3dfree_t freecb, char stbi__context s; stbi__result_info ri; + /* failsafe */ + if(!fn || !*fn) return M3D_UNDEF; /* do we have loaded this texture already? */ for(i = 0; i < model->numtexture; i++) if(!strcmp(fn, model->texture[i].name)) return i; @@ -2246,9 +2248,9 @@ void _m3d_getpr(m3d_t *model, _unused m3dread_t readfilecb, _unused m3dfree_t f { #ifdef M3D_PR_INTERP unsigned int i, len = 0; - unsigned char *buff = readfilecb ? (*readfilecb)(fn, &len) : NULL; + unsigned char *buff = readfilecb && fn && *fn ? (*readfilecb)(fn, &len) : NULL; - if(!buff && model->inlined) { + if(!buff && fn && *fn && model->inlined) { for(i = 0; i < model->numinlined; i++) if(!strcmp(fn, model->inlined[i].name)) { buff = model->inlined[i].data; @@ -3439,6 +3441,7 @@ memerr: M3D_LOG("Out of memory"); model->bone[i].numweight = 0; model->bone[i].weight = NULL; } + if(i != model->numbone) { M3D_LOG("Truncated bone chunk"); model->numbone = i; model->numskin = 0; model->errcode = M3D_ERR_BONE; } /* read skin definitions */ if(model->numskin) { model->skin = (m3ds_t*)M3D_MALLOC(model->numskin * sizeof(m3ds_t)); @@ -3471,6 +3474,7 @@ memerr: M3D_LOG("Out of memory"); model->skin[i].weight[j] /= w; } } + if(i != model->numskin) { M3D_LOG("Truncated skin in bone chunk"); model->numskin = i; model->errcode = M3D_ERR_BONE; } } } else /* material */ @@ -4726,14 +4730,14 @@ unsigned char *m3d_save(m3d_t *model, int quality, int flags, unsigned int *size unsigned char *out = NULL, *z = NULL, weights[M3D_NUMBONE < 8 ? 8 : M3D_NUMBONE], *norm = NULL; unsigned int i, j, k, l, n, o, len, chunklen, *length; int maxvox = 0, minvox = 0; - M3D_FLOAT scale = (M3D_FLOAT)0.0, min_x, max_x, min_y, max_y, min_z, max_z; + M3D_FLOAT scale = (M3D_FLOAT)0.0, min_x, max_x, min_y, max_y, min_z, max_z, mw; M3D_INDEX last, *vrtxidx = NULL, *mtrlidx = NULL, *tmapidx = NULL, *skinidx = NULL; #ifdef M3D_VERTEXMAX M3D_INDEX lastp; #endif uint32_t idx, numcmap = 0, *cmap = NULL, numvrtx = 0, maxvrtx = 0, numtmap = 0, maxtmap = 0, numproc = 0; uint32_t numskin = 0, maxskin = 0, numstr = 0, maxt = 0, maxbone = 0, numgrp = 0, maxgrp = 0, *grpidx = NULL; - uint8_t *opa; + uint8_t *opa = NULL; m3dcd_t *cd; m3dc_t *cmd; m3dstr_t *str = NULL; @@ -5072,10 +5076,9 @@ unsigned char *m3d_save(m3d_t *model, int quality, int flags, unsigned int *size for(i = 0; i < model->numskin; i++) { if(skinidx[i] == M3D_UNDEF) continue; memset(&sk, 0, sizeof(m3dssave_t)); - for(j = 0, min_x = (M3D_FLOAT)0.0; j < M3D_NUMBONE && model->skin[i].boneid[j] != M3D_UNDEF && - model->skin[i].weight[j] > (M3D_FLOAT)0.0; j++) { + for(j = 0, min_x = (M3D_FLOAT)0.0; j < M3D_NUMBONE && model->skin[i].boneid[j] != M3D_UNDEF; j++) { sk.data.boneid[j] = model->skin[i].boneid[j]; - sk.data.weight[j] = model->skin[i].weight[j]; + sk.data.weight[j] = model->skin[i].weight[j] > (M3D_FLOAT)0.0 ? model->skin[i].weight[j] : (M3D_FLOAT)0.01; min_x += sk.data.weight[j]; } if(j > maxbone) maxbone = j; @@ -5191,6 +5194,7 @@ memerr: if(vrtxidx) M3D_FREE(vrtxidx); if(sa) M3D_FREE(sa); if(sd) M3D_FREE(sd); if(out) M3D_FREE(out); + if(opa) free(opa); if(h) M3D_FREE(h); M3D_LOG("Out of memory"); model->errcode = M3D_ERR_ALLOC; @@ -5218,8 +5222,16 @@ memerr: if(vrtxidx) M3D_FREE(vrtxidx); if(model->preview.data && model->preview.length) { sl = _m3d_safestr(sn, 0); if(sl) { +/* gcc thinks that "ptr is used after free", well, gcc is simply wrong. */ +#ifdef __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wuse-after-free" +#endif ptr -= (uintptr_t)out; len = (unsigned int)((uintptr_t)ptr + (uintptr_t)20 + strlen(sl)); out = (unsigned char*)M3D_REALLOC(out, len); ptr += (uintptr_t)out; +#ifdef __GNUC__ +#pragma GCC diagnostic pop +#endif if(!out) { setlocale(LC_NUMERIC, ol); goto memerr; } ptr += sprintf(ptr, "Preview\r\n%s.png\r\n\r\n", sl); M3D_FREE(sl); sl = NULL; @@ -5228,6 +5240,7 @@ memerr: if(vrtxidx) M3D_FREE(vrtxidx); M3D_FREE(sn); sn = NULL; /* texture map */ if(numtmap && tmap && !(flags & M3D_EXP_NOTXTCRD) && !(flags & M3D_EXP_NOFACE)) { +/* interestingly gcc does not complain about "ptr is used after free" here, although the code is 100% the same */ ptr -= (uintptr_t)out; len = (unsigned int)((uintptr_t)ptr + (uintptr_t)(maxtmap * 32) + (uintptr_t)12); out = (unsigned char*)M3D_REALLOC(out, len); ptr += (uintptr_t)out; if(!out) { setlocale(LC_NUMERIC, ol); goto memerr; } @@ -5846,9 +5859,13 @@ memerr: if(vrtxidx) M3D_FREE(vrtxidx); if(skin[i].newidx == last) continue; last = skin[i].newidx; memset(&weights, 0, nb_s); - for(j = 0; j < (uint32_t)nb_s && skin[i].data.boneid[j] != M3D_UNDEF && - skin[i].data.weight[j] > (M3D_FLOAT)0.0; j++) + for(j = k = l = 0, mw = 0.0; j < (uint32_t)nb_s && skin[i].data.boneid[j] != M3D_UNDEF && + skin[i].data.weight[j] > (M3D_FLOAT)0.0; j++) { + if(mw < skin[i].data.weight[j]) { mw = skin[i].data.weight[j]; k = j; } weights[j] = (uint8_t)(skin[i].data.weight[j] * 255); + if(!weights[j]) { weights[j]++; l--; } + } + weights[k] += l; switch(nb_s) { case 1: weights[0] = 255; break; case 2: memcpy(out, weights, 2); out += 2; break; @@ -5941,7 +5958,7 @@ memerr: if(vrtxidx) M3D_FREE(vrtxidx); } /* mesh face */ if(model->numface && face && !(flags & M3D_EXP_NOFACE)) { - chunklen = 8 + si_s + model->numface * (6 * vi_s + 3 * ti_s + si_s + 1); + chunklen = 8 + si_s + model->numface * (9 * vi_s + 3 * ti_s + si_s + 1); h = (m3dhdr_t*)M3D_REALLOC(h, len + chunklen); if(!h) goto memerr; memcpy((uint8_t*)h + len, "MESH", 4); @@ -6268,6 +6285,7 @@ memerr: if(vrtxidx) M3D_FREE(vrtxidx); if(skin) M3D_FREE(skin); if(str) M3D_FREE(str); if(vrtx) M3D_FREE(vrtx); + if(opa) free(opa); if(h) M3D_FREE(h); return out; } diff --git a/raylib/external/miniaudio.h b/raylib/external/miniaudio.h index ac2da69..b5a8771 100644 --- a/raylib/external/miniaudio.h +++ b/raylib/external/miniaudio.h @@ -1,6 +1,6 @@ /* Audio playback and capture library. Choice of public domain or MIT-0. See license statements at the end of this file. -miniaudio - v0.11.19 - TBD +miniaudio - v0.11.19 - 2023-11-04 David Reid - mackron@gmail.com @@ -7042,7 +7042,7 @@ struct ma_device_config ma_uint32 periods; ma_performance_profile performanceProfile; ma_bool8 noPreSilencedOutputBuffer; /* When set to true, the contents of the output buffer passed into the data callback will be left undefined rather than initialized to silence. */ - ma_bool8 noClip; /* When set to true, the contents of the output buffer passed into the data callback will be clipped after returning. Only applies when the playback sample format is f32. */ + ma_bool8 noClip; /* When set to true, the contents of the output buffer passed into the data callback will not be clipped after returning. Only applies when the playback sample format is f32. */ ma_bool8 noDisableDenormals; /* Do not disable denormals when firing the data callback. */ ma_bool8 noFixedSizedCallback; /* Disables strict fixed-sized data callbacks. Setting this to true will result in the period size being treated only as a hint to the backend. This is an optimization for those who don't need fixed sized callbacks. */ ma_device_data_proc dataCallback; @@ -8626,8 +8626,8 @@ then be set directly on the structure. Below are the members of the `ma_device_c callback will write to every sample in the output buffer, or if you are doing your own clearing. noClip - When set to true, the contents of the output buffer passed into the data callback will be clipped after returning. When set to false (default), the - contents of the output buffer are left alone after returning and it will be left up to the backend itself to decide whether or not the clip. This only + When set to true, the contents of the output buffer are left alone after returning and it will be left up to the backend itself to decide whether or + not to clip. When set to false (default), the contents of the output buffer passed into the data callback will be clipped after returning. This only applies when the playback sample format is f32. noDisableDenormals @@ -40464,7 +40464,7 @@ static ma_result ma_context_init__webaudio(ma_context* pContext, const ma_contex }; miniaudio.unlock_event_types = (function(){ - return ['touchstart', 'touchend', 'click']; + return ['touchend', 'click']; })(); miniaudio.unlock = function() { @@ -60158,7 +60158,7 @@ extern "C" { #define MA_DR_FLAC_XSTRINGIFY(x) MA_DR_FLAC_STRINGIFY(x) #define MA_DR_FLAC_VERSION_MAJOR 0 #define MA_DR_FLAC_VERSION_MINOR 12 -#define MA_DR_FLAC_VERSION_REVISION 41 +#define MA_DR_FLAC_VERSION_REVISION 42 #define MA_DR_FLAC_VERSION_STRING MA_DR_FLAC_XSTRINGIFY(MA_DR_FLAC_VERSION_MAJOR) "." MA_DR_FLAC_XSTRINGIFY(MA_DR_FLAC_VERSION_MINOR) "." MA_DR_FLAC_XSTRINGIFY(MA_DR_FLAC_VERSION_REVISION) #include #if defined(_MSC_VER) && _MSC_VER >= 1700 @@ -60445,7 +60445,7 @@ extern "C" { #define MA_DR_MP3_XSTRINGIFY(x) MA_DR_MP3_STRINGIFY(x) #define MA_DR_MP3_VERSION_MAJOR 0 #define MA_DR_MP3_VERSION_MINOR 6 -#define MA_DR_MP3_VERSION_REVISION 37 +#define MA_DR_MP3_VERSION_REVISION 38 #define MA_DR_MP3_VERSION_STRING MA_DR_MP3_XSTRINGIFY(MA_DR_MP3_VERSION_MAJOR) "." MA_DR_MP3_XSTRINGIFY(MA_DR_MP3_VERSION_MINOR) "." MA_DR_MP3_XSTRINGIFY(MA_DR_MP3_VERSION_REVISION) #include #define MA_DR_MP3_MAX_PCM_FRAMES_PER_MP3_FRAME 1152 @@ -82387,7 +82387,7 @@ static MA_INLINE ma_uint32 ma_dr_flac__swap_endian_uint32(ma_uint32 n) #if defined(_MSC_VER) && !defined(__clang__) return _byteswap_ulong(n); #elif defined(__GNUC__) || defined(__clang__) - #if defined(MA_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 6) && !defined(MA_64BIT) + #if defined(MA_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 6) && !defined(__ARM_ARCH_6M__) && !defined(MA_64BIT) ma_uint32 r; __asm__ __volatile__ ( #if defined(MA_64BIT) @@ -83128,7 +83128,7 @@ static MA_INLINE ma_uint32 ma_dr_flac__clz_lzcnt(ma_dr_flac_cache_t x) ); return r; } - #elif defined(MA_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 5) && !defined(MA_64BIT) + #elif defined(MA_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 5) && !defined(__ARM_ARCH_6M__) && !defined(MA_64BIT) { unsigned int r; __asm__ __volatile__ ( @@ -85869,7 +85869,7 @@ static ma_bool32 ma_dr_flac__read_and_decode_metadata(ma_dr_flac_read_proc onRea for (;;) { ma_dr_flac_metadata metadata; ma_uint8 isLastBlock = 0; - ma_uint8 blockType; + ma_uint8 blockType = 0; ma_uint32 blockSize; if (ma_dr_flac__read_and_decode_block_header(onRead, pUserData, &isLastBlock, &blockType, &blockSize) == MA_FALSE) { return MA_FALSE; @@ -89949,7 +89949,7 @@ static int ma_dr_mp3_have_simd(void) #else #define MA_DR_MP3_HAVE_SIMD 0 #endif -#if defined(__ARM_ARCH) && (__ARM_ARCH >= 6) && !defined(__aarch64__) && !defined(_M_ARM64) +#if defined(__ARM_ARCH) && (__ARM_ARCH >= 6) && !defined(__aarch64__) && !defined(_M_ARM64) && !defined(__ARM_ARCH_6M__) #define MA_DR_MP3_HAVE_ARMV6 1 static __inline__ __attribute__((always_inline)) ma_int32 ma_dr_mp3_clip_int16_arm(ma_int32 a) { diff --git a/raylib/external/qoa.h b/raylib/external/qoa.h index 59d90ad..fc62f47 100644 --- a/raylib/external/qoa.h +++ b/raylib/external/qoa.h @@ -8,71 +8,96 @@ QOA - The "Quite OK Audio" format for fast, lossy audio compression -- Data Format -A QOA file has an 8 byte file header, followed by a number of frames. Each frame -consists of an 8 byte frame header, the current 8 byte en-/decoder state per -channel and 256 slices per channel. Each slice is 8 bytes wide and encodes 20 -samples of audio data. +QOA encodes pulse-code modulated (PCM) audio data with up to 255 channels, +sample rates from 1 up to 16777215 hertz and a bit depth of 16 bits. -Note that the last frame of a file may contain less than 256 slices per channel. -The last slice (per channel) in the last frame may contain less 20 samples, but -the slice will still be 8 bytes wide, with the unused samples zeroed out. +The compression method employed in QOA is lossy; it discards some information +from the uncompressed PCM data. For many types of audio signals this compression +is "transparent", i.e. the difference from the original file is often not +audible. -The samplerate and number of channels is only stated in the frame headers, but -not in the file header. A decoder may peek into the first frame of the file to -find these values. +QOA encodes 20 samples of 16 bit PCM data into slices of 64 bits. A single +sample therefore requires 3.2 bits of storage space, resulting in a 5x +compression (16 / 3.2). -In a valid QOA file all frames have the same number of channels and the same -samplerate. These restrictions may be relaxed for streaming. This remains to -be decided. +A QOA file consists of an 8 byte file header, followed by a number of frames. +Each frame contains an 8 byte frame header, the current 16 byte en-/decoder +state per channel and 256 slices per channel. Each slice is 8 bytes wide and +encodes 20 samples of audio data. -All values in a QOA file are BIG ENDIAN. Luckily, EVERYTHING in a QOA file, -including the headers, is 64 bit aligned, so it's possible to read files with -just a read_u64() that does the byte swapping if necessary. - -In pseudocode, the file layout is as follows: +All values, including the slices, are big endian. The file layout is as follows: struct { struct { - char magic[4]; // magic bytes 'qoaf' - uint32_t samples; // number of samples per channel in this file - } file_header; // = 64 bits + char magic[4]; // magic bytes "qoaf" + uint32_t samples; // samples per channel in this file + } file_header; struct { struct { - uint8_t num_channels; // number of channels + uint8_t num_channels; // no. of channels uint24_t samplerate; // samplerate in hz - uint16_t fsamples; // sample count per channel in this frame - uint16_t fsize; // frame size (including the frame header) - } frame_header; // = 64 bits + uint16_t fsamples; // samples per channel in this frame + uint16_t fsize; // frame size (includes this header) + } frame_header; struct { - int16_t history[4]; // = 64 bits - int16_t weights[4]; // = 64 bits + int16_t history[4]; // most recent last + int16_t weights[4]; // most recent last } lms_state[num_channels]; - qoa_slice_t slices[256][num_channels]; // = 64 bits each - } frames[samples * channels / qoa_max_framesize()]; -} qoa_file; + qoa_slice_t slices[256][num_channels]; -Wheras the 64bit qoa_slice_t is defined as follows: + } frames[ceil(samples / (256 * 20))]; +} qoa_file_t; + +Each `qoa_slice_t` contains a quantized scalefactor `sf_quant` and 20 quantized +residuals `qrNN`: .- QOA_SLICE -- 64 bits, 20 samples --------------------------/ /------------. | Byte[0] | Byte[1] | Byte[2] \ \ Byte[7] | | 7 6 5 4 3 2 1 0 | 7 6 5 4 3 2 1 0 | 7 6 5 / / 2 1 0 | |------------+--------+--------+--------+---------+---------+-\ \--+---------| -| sf_index | r00 | r01 | r02 | r03 | r04 | / / | r19 | +| sf_quant | qr00 | qr01 | qr02 | qr03 | qr04 | / / | qr19 | `-------------------------------------------------------------\ \------------` -`sf_index` defines the scalefactor to use for this slice as an index into the -qoa_scalefactor_tab[16] +Each frame except the last must contain exactly 256 slices per channel. The last +frame may contain between 1 .. 256 (inclusive) slices per channel. The last +slice (for each channel) in the last frame may contain less than 20 samples; the +slice still must be 8 bytes wide, with the unused samples zeroed out. -`r00`--`r19` are the residuals for the individual samples, divided by the -scalefactor and quantized by the qoa_quant_tab[]. +Channels are interleaved per slice. E.g. for 2 channel stereo: +slice[0] = L, slice[1] = R, slice[2] = L, slice[3] = R ... -In the decoder, a prediction of the next sample is computed by multiplying the -state (the last four output samples) with the predictor. The residual from the -slice is then dequantized using the qoa_dequant_tab[] and added to the -prediction. The result is clamped to int16 to form the final output sample. +A valid QOA file or stream must have at least one frame. Each frame must contain +at least one channel and one sample with a samplerate between 1 .. 16777215 +(inclusive). + +If the total number of samples is not known by the encoder, the samples in the +file header may be set to 0x00000000 to indicate that the encoder is +"streaming". In a streaming context, the samplerate and number of channels may +differ from frame to frame. For static files (those with samples set to a +non-zero value), each frame must have the same number of channels and same +samplerate. + +Note that this implementation of QOA only handles files with a known total +number of samples. + +A decoder should support at least 8 channels. The channel layout for channel +counts 1 .. 8 is: + + 1. Mono + 2. L, R + 3. L, R, C + 4. FL, FR, B/SL, B/SR + 5. FL, FR, C, B/SL, B/SR + 6. FL, FR, C, LFE, B/SL, B/SR + 7. FL, FR, C, LFE, B, SL, SR + 8. FL, FR, C, LFE, BL, BR, SL, SR + +QOA predicts each audio sample based on the previously decoded ones using a +"Sign-Sign Least Mean Squares Filter" (LMS). This prediction plus the +dequantized residual forms the final output sample. */ @@ -158,7 +183,7 @@ the higher end. Note that the residual zero is identical to the lowest positive value. This is mostly fine, since the qoa_div() function always rounds away from zero. */ -static int qoa_quant_tab[17] = { +static const int qoa_quant_tab[17] = { 7, 7, 7, 5, 5, 3, 3, 1, /* -8..-1 */ 0, /* 0 */ 0, 2, 2, 4, 4, 6, 6, 6 /* 1.. 8 */ @@ -169,13 +194,13 @@ static int qoa_quant_tab[17] = { less accurate at the higher end. In theory, the highest scalefactor that we would need to encode the highest 16bit residual is (2**16)/8 = 8192. However we rely on the LMS filter to predict samples accurately enough that a maximum -residual of one quarter of the 16 bit range is high sufficient. I.e. with the +residual of one quarter of the 16 bit range is sufficient. I.e. with the scalefactor 2048 times the quant range of 8 we can encode residuals up to 2**14. The scalefactor values are computed as: scalefactor_tab[s] <- round(pow(s + 1, 2.75)) */ -static int qoa_scalefactor_tab[16] = { +static const int qoa_scalefactor_tab[16] = { 1, 7, 21, 45, 84, 138, 211, 304, 421, 562, 731, 928, 1157, 1419, 1715, 2048 }; @@ -188,7 +213,7 @@ do this in .16 fixed point with integers, instead of floats. The reciprocal_tab is computed as: reciprocal_tab[s] <- ((1<<16) + scalefactor_tab[s] - 1) / scalefactor_tab[s] */ -static int qoa_reciprocal_tab[16] = { +static const int qoa_reciprocal_tab[16] = { 65536, 9363, 3121, 1457, 781, 475, 311, 216, 156, 117, 90, 71, 57, 47, 39, 32 }; @@ -200,9 +225,13 @@ Since qoa_div rounds away from the zero, the smallest entries are mapped to 3/4 instead of 1. The dequant_tab assumes the following dequantized values for each of the quant_tab indices and is computed as: float dqt[8] = {0.75, -0.75, 2.5, -2.5, 4.5, -4.5, 7, -7}; -dequant_tab[s][q] <- round(scalefactor_tab[s] * dqt[q]) */ +dequant_tab[s][q] <- round_ties_away_from_zero(scalefactor_tab[s] * dqt[q]) -static int qoa_dequant_tab[16][8] = { +The rounding employed here is "to nearest, ties away from zero", i.e. positive +and negative values are treated symmetrically. +*/ + +static const int qoa_dequant_tab[16][8] = { { 1, -1, 3, -3, 5, -5, 7, -7}, { 5, -5, 18, -18, 32, -32, 49, -49}, { 16, -16, 53, -53, 95, -95, 147, -147}, @@ -270,7 +299,21 @@ static inline int qoa_div(int v, int scalefactor) { } static inline int qoa_clamp(int v, int min, int max) { - return (v < min) ? min : (v > max) ? max : v; + if (v < min) { return min; } + if (v > max) { return max; } + return v; +} + +/* This specialized clamp function for the signed 16 bit range improves decode +performance quite a bit. The extra if() statement works nicely with the CPUs +branch prediction as this branch is rarely taken. */ + +static inline int qoa_clamp_s16(int v) { + if ((unsigned int)(v + 32768) > 65535) { + if (v < -32768) { return -32768; } + if (v > 32767) { return 32767; } + } + return v; } static inline qoa_uint64_t qoa_read_u64(const unsigned char *bytes, unsigned int *p) { @@ -312,6 +355,7 @@ unsigned int qoa_encode_frame(const short *sample_data, qoa_desc *qoa, unsigned unsigned int p = 0; unsigned int slices = (frame_len + QOA_SLICE_LEN - 1) / QOA_SLICE_LEN; unsigned int frame_size = QOA_FRAME_SIZE(channels, slices); + int prev_scalefactor[QOA_MAX_CHANNELS] = {0}; /* Write the frame header */ qoa_write_u64(( @@ -321,8 +365,24 @@ unsigned int qoa_encode_frame(const short *sample_data, qoa_desc *qoa, unsigned (qoa_uint64_t)frame_size ), bytes, &p); - /* Write the current LMS state */ + for (int c = 0; c < channels; c++) { + /* If the weights have grown too large, reset them to 0. This may happen + with certain high-frequency sounds. This is a last resort and will + introduce quite a bit of noise, but should at least prevent pops/clicks */ + int weights_sum = + qoa->lms[c].weights[0] * qoa->lms[c].weights[0] + + qoa->lms[c].weights[1] * qoa->lms[c].weights[1] + + qoa->lms[c].weights[2] * qoa->lms[c].weights[2] + + qoa->lms[c].weights[3] * qoa->lms[c].weights[3]; + if (weights_sum > 0x2fffffff) { + qoa->lms[c].weights[0] = 0; + qoa->lms[c].weights[1] = 0; + qoa->lms[c].weights[2] = 0; + qoa->lms[c].weights[3] = 0; + } + + /* Write the current LMS state */ qoa_uint64_t weights = 0; qoa_uint64_t history = 0; for (int i = 0; i < QOA_LMS_LEN; i++) { @@ -348,8 +408,13 @@ unsigned int qoa_encode_frame(const short *sample_data, qoa_desc *qoa, unsigned qoa_uint64_t best_error = -1; qoa_uint64_t best_slice; qoa_lms_t best_lms; + int best_scalefactor; - for (int scalefactor = 0; scalefactor < 16; scalefactor++) { + for (int sfi = 0; sfi < 16; sfi++) { + /* There is a strong correlation between the scalefactors of + neighboring slices. As an optimization, start testing + the best scalefactor of the previous slice first. */ + int scalefactor = (sfi + prev_scalefactor[c]) % 16; /* We have to reset the LMS state to the last known good one before trying each scalefactor, as each pass updates the LMS @@ -367,7 +432,7 @@ unsigned int qoa_encode_frame(const short *sample_data, qoa_desc *qoa, unsigned int clamped = qoa_clamp(scaled, -8, 8); int quantized = qoa_quant_tab[clamped + 8]; int dequantized = qoa_dequant_tab[scalefactor][quantized]; - int reconstructed = qoa_clamp(predicted + dequantized, -32768, 32767); + int reconstructed = qoa_clamp_s16(predicted + dequantized); long long error = (sample - reconstructed); current_error += error * error; @@ -383,9 +448,12 @@ unsigned int qoa_encode_frame(const short *sample_data, qoa_desc *qoa, unsigned best_error = current_error; best_slice = slice; best_lms = lms; + best_scalefactor = scalefactor; } } + prev_scalefactor[c] = best_scalefactor; + qoa->lms[c] = best_lms; #ifdef QOA_RECORD_TOTAL_ERROR qoa->error += best_error; @@ -553,7 +621,7 @@ unsigned int qoa_decode_frame(const unsigned char *bytes, unsigned int size, qoa int predicted = qoa_lms_predict(&qoa->lms[c]); int quantized = (slice >> 57) & 0x7; int dequantized = qoa_dequant_tab[scalefactor][quantized]; - int reconstructed = qoa_clamp(predicted + dequantized, -32768, 32767); + int reconstructed = qoa_clamp_s16(predicted + dequantized); sample_data[si] = reconstructed; slice <<= 3; diff --git a/raylib/external/qoi.h b/raylib/external/qoi.h index 6734ac4..f2800b0 100644 --- a/raylib/external/qoi.h +++ b/raylib/external/qoi.h @@ -594,7 +594,7 @@ void *qoi_decode(const void *data, int size, qoi_desc *desc, int channels) { int qoi_write(const char *filename, const void *data, const qoi_desc *desc) { FILE *f = fopen(filename, "wb"); - int size; + int size, err; void *encoded; if (!f) { @@ -608,10 +608,12 @@ int qoi_write(const char *filename, const void *data, const qoi_desc *desc) { } fwrite(encoded, 1, size, f); + fflush(f); + err = ferror(f); fclose(f); QOI_FREE(encoded); - return size; + return err ? 0 : size; } void *qoi_read(const char *filename, qoi_desc *desc, int channels) { @@ -625,11 +627,10 @@ void *qoi_read(const char *filename, qoi_desc *desc, int channels) { fseek(f, 0, SEEK_END); size = ftell(f); - if (size <= 0) { + if (size <= 0 || fseek(f, 0, SEEK_SET) != 0) { fclose(f); return NULL; } - fseek(f, 0, SEEK_SET); data = QOI_MALLOC(size); if (!data) { @@ -639,8 +640,7 @@ void *qoi_read(const char *filename, qoi_desc *desc, int channels) { bytes_read = fread(data, 1, size, f); fclose(f); - - pixels = qoi_decode(data, bytes_read, desc, channels); + pixels = (bytes_read != size) ? NULL : qoi_decode(data, bytes_read, desc, channels); QOI_FREE(data); return pixels; } diff --git a/raylib/external/rl_gputex.h b/raylib/external/rl_gputex.h index fb7b5e8..2cbe596 100644 --- a/raylib/external/rl_gputex.h +++ b/raylib/external/rl_gputex.h @@ -1,6 +1,6 @@ /********************************************************************************************** * -* rl_gputex - GPU compressed textures loading and saving +* rl_gputex v1.0 - GPU compressed textures loading and saving * * DESCRIPTION: * diff --git a/raylib/external/rprand.h b/raylib/external/rprand.h index acdeac6..c9b460c 100644 --- a/raylib/external/rprand.h +++ b/raylib/external/rprand.h @@ -186,7 +186,7 @@ int *rprand_load_sequence(unsigned int count, int min, int max) { int *sequence = NULL; - if (count > (abs(max - min) + 1)) + if (count > (unsigned int)(abs(max - min) + 1)) { RPRAND_LOG("WARNING: Sequence count required is greater than range provided\n"); //count = (max - min); @@ -198,9 +198,9 @@ int *rprand_load_sequence(unsigned int count, int min, int max) int value = 0; bool value_is_dup = false; - for (int i = 0; i < count;) + for (unsigned int i = 0; i < count;) { - value = (rprand_xoshiro()%(abs(max - min) + 1)) + min; + value = ((int)rprand_xoshiro()%(abs(max - min) + 1)) + min; value_is_dup = false; for (int j = 0; j < i; j++) diff --git a/raylib/external/sinfl.h b/raylib/external/sinfl.h index 8979fcd..16589c1 100644 --- a/raylib/external/sinfl.h +++ b/raylib/external/sinfl.h @@ -122,6 +122,7 @@ extern "C" { struct sinfl { const unsigned char *bitptr; + const unsigned char *bitend; // @raysan5: added unsigned long long bitbuf; int bitcnt; @@ -185,9 +186,10 @@ sinfl_read64(const void *p) { } static void sinfl_copy64(unsigned char **dst, unsigned char **src) { - unsigned long long n; - memcpy(&n, *src, 8); - memcpy(*dst, &n, 8); + //unsigned long long n; + //memcpy(&n, *src, 8); + //memcpy(*dst, &n, 8); + memcpy(*dst, *src, 8); // @raysan5 *dst += 8, *src += 8; } static unsigned char* @@ -210,9 +212,22 @@ sinfl_copy128(unsigned char **dst, unsigned char **src) { #endif static void sinfl_refill(struct sinfl *s) { - s->bitbuf |= sinfl_read64(s->bitptr) << s->bitcnt; - s->bitptr += (63 - s->bitcnt) >> 3; - s->bitcnt |= 56; /* bitcount in range [56,63] */ + if (s->bitend - s->bitptr >= 8) { + // @raysan5: original code, only those 3 lines + s->bitbuf |= sinfl_read64(s->bitptr) << s->bitcnt; + s->bitptr += (63 - s->bitcnt) >> 3; + s->bitcnt |= 56; /* bitcount in range [56,63] */ + } else { + // @raysan5: added this case when bits remaining < 8 + int bitswant = 63 - s->bitcnt; + int byteswant = bitswant >> 3; + int bytesuse = s->bitend - s->bitptr <= byteswant ? (int)(s->bitend - s->bitptr) : byteswant; + unsigned long long n = 0; + memcpy(&n, s->bitptr, bytesuse); + s->bitbuf |= n << s->bitcnt; + s->bitptr += bytesuse; + s->bitcnt += bytesuse << 3; + } } static int sinfl_peek(struct sinfl *s, int cnt) { @@ -384,6 +399,7 @@ sinfl_decompress(unsigned char *out, int cap, const unsigned char *in, int size) int last = 0; s.bitptr = in; + s.bitend = e; // @raysan5: added while (1) { switch (state) { case hdr: { diff --git a/raylib/platforms/rcore_drm.c b/raylib/platforms/rcore_drm.c index 7bdc2d0..bdfc9e0 100644 --- a/raylib/platforms/rcore_drm.c +++ b/raylib/platforms/rcore_drm.c @@ -123,6 +123,9 @@ typedef struct { // NOTE: currentButtonState[] can't be written directly due to multithreading, app could miss the update char currentButtonStateEvdev[MAX_MOUSE_BUTTONS]; // Holds the new mouse state for the next polling event to grab bool cursorRelative; // Relative cursor mode + int mouseFd; // File descriptor for the evdev mouse/touch/gestures + Rectangle absRange; // Range of values for absolute pointing devices (touchscreens) + int touchSlot; // Hold the touch slot number of the currently being sent multitouch block // Gamepad data pthread_t gamepadThreadId; // Gamepad reading thread id @@ -170,11 +173,6 @@ static const int EvkeyToUnicodeLUT[] = { // LUT currently incomplete, just mapped the most essential keys }; -#if defined(SUPPORT_GESTURES_SYSTEM) -GestureEvent gestureEvent = { 0 }; // Gesture event to hold data between EventThread() and PollInputEvents() -bool newGesture = false; // Var to trigger ProcessGestureEvent(gestureEvent) on PollInputEvents() -#endif - //---------------------------------------------------------------------------------- // Module Internal Functions Declaration //---------------------------------------------------------------------------------- @@ -448,7 +446,7 @@ void EnableCursor(void) void DisableCursor(void) { // Set cursor position in the middle - SetMousePosition(CORE.Window.screen.width/2, CORE.Window.screen.height/2); + SetMousePosition(0, 0); platform.cursorRelative = true; CORE.Input.Mouse.cursorHidden = true; @@ -565,11 +563,8 @@ void PollInputEvents(void) PollKeyboardEvents(); // Register previous mouse position - if (platform.cursorRelative) - { - CORE.Input.Mouse.previousPosition = CORE.Input.Mouse.currentPosition; - CORE.Input.Mouse.currentPosition = (Vector2){ 0.0f, 0.0f }; - } + if (platform.cursorRelative) CORE.Input.Mouse.currentPosition = (Vector2){ 0.0f, 0.0f }; + else CORE.Input.Mouse.previousPosition = CORE.Input.Mouse.currentPosition; // Register previous mouse states CORE.Input.Mouse.previousWheelMove = CORE.Input.Mouse.currentWheelMove; @@ -600,15 +595,6 @@ void PollInputEvents(void) // Map touch position to mouse position for convenience CORE.Input.Touch.position[0] = CORE.Input.Mouse.currentPosition; -#if defined(SUPPORT_GESTURES_SYSTEM) - // Call the ProcessGestureEvent here instead of on EventThread() to workaround the threads not matching - if (newGesture) - { - ProcessGestureEvent(gestureEvent); - newGesture = false; - } -#endif - #if defined(SUPPORT_SSH_KEYBOARD_RPI) // NOTE: Keyboard reading could be done using input_event(s) or just read from stdin, both methods are used here. // stdin reading is still used for legacy purposes, it allows keyboard input trough SSH console @@ -618,6 +604,183 @@ void PollInputEvents(void) // NOTE: Mouse input events polling is done asynchronously in another pthread - EventThread() // NOTE: Gamepad (Joystick) input events polling is done asynchonously in another pthread - GamepadThread() #endif + + // Handle the mouse/touch/gestures events: + // NOTE: Replaces the EventThread handling that is now commented. + { + int fd = platform.mouseFd; + if (fd == -1) return; + + struct input_event event = { 0 }; + + int touchAction = -1; // 0-TOUCH_ACTION_UP, 1-TOUCH_ACTION_DOWN, 2-TOUCH_ACTION_MOVE + bool gestureUpdate = false; // Flag to note gestures require to update + + // Try to read data from the mouse/touch/gesture and only continue if successful + while (read(fd, &event, sizeof(event)) == (int)sizeof(event)) + { + // Relative movement parsing + if (event.type == EV_REL) + { + if (event.code == REL_X) + { + if (platform.cursorRelative) + { + CORE.Input.Mouse.currentPosition.x = event.value; + CORE.Input.Mouse.previousPosition.x = 0.0f; + } + else CORE.Input.Mouse.currentPosition.x += event.value; + CORE.Input.Touch.position[0].x = CORE.Input.Mouse.currentPosition.x; + + touchAction = 2; // TOUCH_ACTION_MOVE + gestureUpdate = true; + } + + if (event.code == REL_Y) + { + if (platform.cursorRelative) + { + CORE.Input.Mouse.currentPosition.y = event.value; + CORE.Input.Mouse.previousPosition.y = 0.0f; + } + else CORE.Input.Mouse.currentPosition.y += event.value; + CORE.Input.Touch.position[0].y = CORE.Input.Mouse.currentPosition.y; + + touchAction = 2; // TOUCH_ACTION_MOVE + gestureUpdate = true; + } + + if (event.code == REL_WHEEL) platform.eventWheelMove.y += event.value; + } + + // Absolute movement parsing + if (event.type == EV_ABS) + { + // Basic movement + if (event.code == ABS_X) + { + CORE.Input.Mouse.currentPosition.x = (event.value - platform.absRange.x)*CORE.Window.screen.width/platform.absRange.width; // Scale according to absRange + CORE.Input.Touch.position[0].x = (event.value - platform.absRange.x)*CORE.Window.screen.width/platform.absRange.width; // Scale according to absRange + + touchAction = 2; // TOUCH_ACTION_MOVE + gestureUpdate = true; + } + + if (event.code == ABS_Y) + { + CORE.Input.Mouse.currentPosition.y = (event.value - platform.absRange.y)*CORE.Window.screen.height/platform.absRange.height; // Scale according to absRange + CORE.Input.Touch.position[0].y = (event.value - platform.absRange.y)*CORE.Window.screen.height/platform.absRange.height; // Scale according to absRange + + touchAction = 2; // TOUCH_ACTION_MOVE + gestureUpdate = true; + } + + // Multitouch movement + if (event.code == ABS_MT_SLOT) platform.touchSlot = event.value; // Remember the slot number for the folowing events + + if (event.code == ABS_MT_POSITION_X) + { + if (platform.touchSlot < MAX_TOUCH_POINTS) CORE.Input.Touch.position[platform.touchSlot].x = (event.value - platform.absRange.x)*CORE.Window.screen.width/platform.absRange.width; // Scale according to absRange + } + + if (event.code == ABS_MT_POSITION_Y) + { + if (platform.touchSlot < MAX_TOUCH_POINTS) CORE.Input.Touch.position[platform.touchSlot].y = (event.value - platform.absRange.y)*CORE.Window.screen.height/platform.absRange.height; // Scale according to absRange + } + + if (event.code == ABS_MT_TRACKING_ID) + { + if ((event.value < 0) && (platform.touchSlot < MAX_TOUCH_POINTS)) + { + // Touch has ended for this point + CORE.Input.Touch.position[platform.touchSlot].x = -1; + CORE.Input.Touch.position[platform.touchSlot].y = -1; + } + } + + // Touchscreen tap + if (event.code == ABS_PRESSURE) + { + int previousMouseLeftButtonState = platform.currentButtonStateEvdev[MOUSE_BUTTON_LEFT]; + + if (!event.value && previousMouseLeftButtonState) + { + platform.currentButtonStateEvdev[MOUSE_BUTTON_LEFT] = 0; + + touchAction = 0; // TOUCH_ACTION_UP + gestureUpdate = true; + } + + if (event.value && !previousMouseLeftButtonState) + { + platform.currentButtonStateEvdev[MOUSE_BUTTON_LEFT] = 1; + + touchAction = 1; // TOUCH_ACTION_DOWN + gestureUpdate = true; + } + } + + } + + // Button parsing + if (event.type == EV_KEY) + { + // Mouse button parsing + if ((event.code == BTN_TOUCH) || (event.code == BTN_LEFT)) + { + platform.currentButtonStateEvdev[MOUSE_BUTTON_LEFT] = event.value; + + if (event.value > 0) touchAction = 1; // TOUCH_ACTION_DOWN + else touchAction = 0; // TOUCH_ACTION_UP + gestureUpdate = true; + } + + if (event.code == BTN_RIGHT) platform.currentButtonStateEvdev[MOUSE_BUTTON_RIGHT] = event.value; + if (event.code == BTN_MIDDLE) platform.currentButtonStateEvdev[MOUSE_BUTTON_MIDDLE] = event.value; + if (event.code == BTN_SIDE) platform.currentButtonStateEvdev[MOUSE_BUTTON_SIDE] = event.value; + if (event.code == BTN_EXTRA) platform.currentButtonStateEvdev[MOUSE_BUTTON_EXTRA] = event.value; + if (event.code == BTN_FORWARD) platform.currentButtonStateEvdev[MOUSE_BUTTON_FORWARD] = event.value; + if (event.code == BTN_BACK) platform.currentButtonStateEvdev[MOUSE_BUTTON_BACK] = event.value; + } + + // Screen confinement + if (!CORE.Input.Mouse.cursorHidden) + { + if (CORE.Input.Mouse.currentPosition.x < 0) CORE.Input.Mouse.currentPosition.x = 0; + if (CORE.Input.Mouse.currentPosition.x > CORE.Window.screen.width/CORE.Input.Mouse.scale.x) CORE.Input.Mouse.currentPosition.x = CORE.Window.screen.width/CORE.Input.Mouse.scale.x; + + if (CORE.Input.Mouse.currentPosition.y < 0) CORE.Input.Mouse.currentPosition.y = 0; + if (CORE.Input.Mouse.currentPosition.y > CORE.Window.screen.height/CORE.Input.Mouse.scale.y) CORE.Input.Mouse.currentPosition.y = CORE.Window.screen.height/CORE.Input.Mouse.scale.y; + } + + // Update touch point count + CORE.Input.Touch.pointCount = 0; + for (int i = 0; i < MAX_TOUCH_POINTS; i++) + { + if (CORE.Input.Touch.position[i].x >= 0) CORE.Input.Touch.pointCount++; + } + +#if defined(SUPPORT_GESTURES_SYSTEM) + if (gestureUpdate) + { + GestureEvent gestureEvent = { 0 }; + + gestureEvent.touchAction = touchAction; + gestureEvent.pointCount = CORE.Input.Touch.pointCount; + + for (int i = 0; i < MAX_TOUCH_POINTS; i++) + { + gestureEvent.pointId[i] = i; + gestureEvent.position[i] = CORE.Input.Touch.position[i]; + } + + ProcessGestureEvent(gestureEvent); + + gestureUpdate = false; + } +#endif + } + } } //---------------------------------------------------------------------------------- @@ -1386,9 +1549,14 @@ static void ConfigureEvdevDevice(char *device) ioctl(fd, EVIOCGABS(ABS_X), &absinfo); worker->absRange.x = absinfo.minimum; worker->absRange.width = absinfo.maximum - absinfo.minimum; + platform.absRange.x = absinfo.minimum; + platform.absRange.width = absinfo.maximum - absinfo.minimum; + ioctl(fd, EVIOCGABS(ABS_Y), &absinfo); worker->absRange.y = absinfo.minimum; worker->absRange.height = absinfo.maximum - absinfo.minimum; + platform.absRange.y = absinfo.minimum; + platform.absRange.height = absinfo.maximum - absinfo.minimum; } // Check for multiple absolute movement support (usually multitouch touchscreens) @@ -1400,9 +1568,14 @@ static void ConfigureEvdevDevice(char *device) ioctl(fd, EVIOCGABS(ABS_X), &absinfo); worker->absRange.x = absinfo.minimum; worker->absRange.width = absinfo.maximum - absinfo.minimum; + platform.absRange.x = absinfo.minimum; + platform.absRange.width = absinfo.maximum - absinfo.minimum; + ioctl(fd, EVIOCGABS(ABS_Y), &absinfo); worker->absRange.y = absinfo.minimum; worker->absRange.height = absinfo.maximum - absinfo.minimum; + platform.absRange.y = absinfo.minimum; + platform.absRange.height = absinfo.maximum - absinfo.minimum; } } @@ -1462,15 +1635,20 @@ static void ConfigureEvdevDevice(char *device) worker->isMultitouch? "multitouch " : "", worker->isTouch? "touchscreen " : "", worker->isGamepad? "gamepad " : ""); + platform.mouseFd = worker->fd; + + // NOTE: moved the mouse/touch/gesture input to PollInputEvents()/ + // so added the "platform.mouseFd = worker->fd;" line above + // and commented the thread code below: // Create a thread for this device - int error = pthread_create(&worker->threadId, NULL, &EventThread, (void *)worker); - if (error != 0) - { - TRACELOG(LOG_WARNING, "RPI: Failed to create input device thread: %s (error: %d)", device, error); - worker->threadId = 0; - close(fd); - } + //int error = pthread_create(&worker->threadId, NULL, &EventThread, (void *)worker); + //if (error != 0) + //{ + // TRACELOG(LOG_WARNING, "RPI: Failed to create input device thread: %s (error: %d)", device, error); + // worker->threadId = 0; + // close(fd); + //} #if defined(USE_LAST_TOUCH_DEVICE) // Find touchscreen with the highest index @@ -1570,6 +1748,7 @@ static void PollKeyboardEvents(void) // Input device events reading thread static void *EventThread(void *arg) { +/* struct input_event event = { 0 }; InputEventWorker *worker = (InputEventWorker *)arg; @@ -1731,7 +1910,7 @@ static void *EventThread(void *arg) #if defined(SUPPORT_GESTURES_SYSTEM) if (gestureUpdate) { - //GestureEvent gestureEvent = { 0 }; + GestureEvent gestureEvent = { 0 }; gestureEvent.touchAction = touchAction; gestureEvent.pointCount = CORE.Input.Touch.pointCount; @@ -1742,8 +1921,7 @@ static void *EventThread(void *arg) gestureEvent.position[i] = CORE.Input.Touch.position[i]; } - //ProcessGestureEvent(gestureEvent); - newGesture = true; + ProcessGestureEvent(gestureEvent); } #endif } @@ -1752,7 +1930,7 @@ static void *EventThread(void *arg) } close(worker->fd); - +*/ return NULL; } diff --git a/raylib/raylib.h b/raylib/raylib.h index 4a353a7..769eaf5 100644 --- a/raylib/raylib.h +++ b/raylib/raylib.h @@ -1221,14 +1221,10 @@ RLAPI void SetShapesTexture(Texture2D texture, Rectangle source); // Set t RLAPI void DrawPixel(int posX, int posY, Color color); // Draw a pixel RLAPI void DrawPixelV(Vector2 position, Color color); // Draw a pixel (Vector version) RLAPI void DrawLine(int startPosX, int startPosY, int endPosX, int endPosY, Color color); // Draw a line -RLAPI void DrawLineV(Vector2 startPos, Vector2 endPos, Color color); // Draw a line (Vector version) -RLAPI void DrawLineEx(Vector2 startPos, Vector2 endPos, float thick, Color color); // Draw a line defining thickness -RLAPI void DrawLineBezier(Vector2 startPos, Vector2 endPos, float thick, Color color); // Draw a line using cubic-bezier curves in-out -RLAPI void DrawLineBezierQuad(Vector2 startPos, Vector2 endPos, Vector2 controlPos, float thick, Color color); // Draw line using quadratic bezier curves with a control point -RLAPI void DrawLineBezierCubic(Vector2 startPos, Vector2 endPos, Vector2 startControlPos, Vector2 endControlPos, float thick, Color color); // Draw line using cubic bezier curves with 2 control points -RLAPI void DrawLineBSpline(Vector2 *points, int pointCount, float thick, Color color); // Draw a B-Spline line, minimum 4 points -RLAPI void DrawLineCatmullRom(Vector2 *points, int pointCount, float thick, Color color); // Draw a Catmull Rom spline line, minimum 4 points -RLAPI void DrawLineStrip(Vector2 *points, int pointCount, Color color); // Draw lines sequence +RLAPI void DrawLineV(Vector2 startPos, Vector2 endPos, Color color); // Draw a line (using gl lines) +RLAPI void DrawLineEx(Vector2 startPos, Vector2 endPos, float thick, Color color); // Draw a line (using triangles/quads) +RLAPI void DrawLineStrip(Vector2 *points, int pointCount, Color color); // Draw lines sequence (using gl lines) +RLAPI void DrawLineBezier(Vector2 startPos, Vector2 endPos, float thick, Color color); // Draw line segment cubic-bezier in-out interpolation RLAPI void DrawCircle(int centerX, int centerY, float radius, Color color); // Draw a color-filled circle RLAPI void DrawCircleSector(Vector2 center, float radius, float startAngle, float endAngle, int segments, Color color); // Draw a piece of a circle RLAPI void DrawCircleSectorLines(Vector2 center, float radius, float startAngle, float endAngle, int segments, Color color); // Draw circle sector outline @@ -1259,6 +1255,25 @@ RLAPI void DrawPoly(Vector2 center, int sides, float radius, float rotation, Col RLAPI void DrawPolyLines(Vector2 center, int sides, float radius, float rotation, Color color); // Draw a polygon outline of n sides RLAPI void DrawPolyLinesEx(Vector2 center, int sides, float radius, float rotation, float lineThick, Color color); // Draw a polygon outline of n sides with extended parameters +// Splines drawing functions +RLAPI void DrawSplineLinear(Vector2 *points, int pointCount, float thick, Color color); // Draw spline: Linear, minimum 2 points +RLAPI void DrawSplineBasis(Vector2 *points, int pointCount, float thick, Color color); // Draw spline: B-Spline, minimum 4 points +RLAPI void DrawSplineCatmullRom(Vector2 *points, int pointCount, float thick, Color color); // Draw spline: Catmull-Rom, minimum 4 points +RLAPI void DrawSplineBezierQuadratic(Vector2 *points, int pointCount, float thick, Color color); // Draw spline: Quadratic Bezier, minimum 3 points (1 control point): [p1, c2, p3, c4...] +RLAPI void DrawSplineBezierCubic(Vector2 *points, int pointCount, float thick, Color color); // Draw spline: Cubic Bezier, minimum 4 points (2 control points): [p1, c2, c3, p4, c5, c6...] +RLAPI void DrawSplineSegmentLinear(Vector2 p1, Vector2 p2, float thick, Color color); // Draw spline segment: Linear, 2 points +RLAPI void DrawSplineSegmentBasis(Vector2 p1, Vector2 p2, Vector2 p3, Vector2 p4, float thick, Color color); // Draw spline segment: B-Spline, 4 points +RLAPI void DrawSplineSegmentCatmullRom(Vector2 p1, Vector2 p2, Vector2 p3, Vector2 p4, float thick, Color color); // Draw spline segment: Catmull-Rom, 4 points +RLAPI void DrawSplineSegmentBezierQuadratic(Vector2 p1, Vector2 c2, Vector2 p3, float thick, Color color); // Draw spline segment: Quadratic Bezier, 2 points, 1 control point +RLAPI void DrawSplineSegmentBezierCubic(Vector2 p1, Vector2 c2, Vector2 c3, Vector2 p4, float thick, Color color); // Draw spline segment: Cubic Bezier, 2 points, 2 control points + +// Spline segment point evaluation functions, for a given t [0.0f .. 1.0f] +RLAPI Vector2 GetSplinePointLinear(Vector2 startPos, Vector2 endPos, float t); // Get (evaluate) spline point: Linear +RLAPI Vector2 GetSplinePointBasis(Vector2 p1, Vector2 p2, Vector2 p3, Vector2 p4, float t); // Get (evaluate) spline point: B-Spline +RLAPI Vector2 GetSplinePointCatmullRom(Vector2 p1, Vector2 p2, Vector2 p3, Vector2 p4, float t); // Get (evaluate) spline point: Catmull-Rom +RLAPI Vector2 GetSplinePointBezierQuad(Vector2 p1, Vector2 c2, Vector2 p3, float t); // Get (evaluate) spline point: Quadratic Bezier +RLAPI Vector2 GetSplinePointBezierCubic(Vector2 p1, Vector2 c2, Vector2 c3, Vector2 p4, float t); // Get (evaluate) spline point: Cubic Bezier + // Basic shapes collision detection functions RLAPI bool CheckCollisionRecs(Rectangle rec1, Rectangle rec2); // Check collision between two rectangles RLAPI bool CheckCollisionCircles(Vector2 center1, float radius1, Vector2 center2, float radius2); // Check collision between two circles diff --git a/raylib/rcore.c b/raylib/rcore.c index 9a9ac3f..aa25401 100644 --- a/raylib/rcore.c +++ b/raylib/rcore.c @@ -189,7 +189,11 @@ __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigne #define MAX_FILEPATH_CAPACITY 8192 // Maximum capacity for filepath #endif #ifndef MAX_FILEPATH_LENGTH - #define MAX_FILEPATH_LENGTH 4096 // Maximum length for filepaths (Linux PATH_MAX default value) + #if defined(_WIN32) + #define MAX_FILEPATH_LENGTH 256 // On Win32, MAX_PATH = 260 (limits.h) but Windows 10, Version 1607 enables long paths... + #else + #define MAX_FILEPATH_LENGTH 4096 // On Linux, PATH_MAX = 4096 by default (limits.h) + #endif #endif #ifndef MAX_KEYBOARD_KEYS @@ -252,6 +256,7 @@ typedef struct CoreData { bool shouldClose; // Check if window set for closing bool resizedLastFrame; // Check if window has been resized last frame bool eventWaiting; // Wait for events before ending frame + bool usingFbo; // Using FBO (RenderTexture) for rendering instead of default framebuffer Point position; // Window position (required on fullscreen toggle) Point previousPosition; // Window previous position (required on borderless windowed toggle) @@ -1035,6 +1040,7 @@ void BeginTextureMode(RenderTexture2D target) // calculation when using BeginMode3D() CORE.Window.currentFbo.width = target.texture.width; CORE.Window.currentFbo.height = target.texture.height; + CORE.Window.usingFbo = true; } // Ends drawing to render texture @@ -1050,6 +1056,7 @@ void EndTextureMode(void) // Reset current fbo to screen size CORE.Window.currentFbo.width = CORE.Window.render.width; CORE.Window.currentFbo.height = CORE.Window.render.height; + CORE.Window.usingFbo = false; } // Begin custom shader mode @@ -1086,19 +1093,22 @@ void BeginScissorMode(int x, int y, int width, int height) rlEnableScissorTest(); #if defined(__APPLE__) - Vector2 scale = GetWindowScaleDPI(); - rlScissor((int)(x*scale.x), (int)(GetScreenHeight()*scale.y - (((y + height)*scale.y))), (int)(width*scale.x), (int)(height*scale.y)); + if (CORE.Window.usingFbo) + { + Vector2 scale = GetWindowScaleDPI(); + rlScissor((int)(x*scale.x), (int)(GetScreenHeight()*scale.y - (((y + height)*scale.y))), (int)(width*scale.x), (int)(height*scale.y)); + } #else - if ((CORE.Window.flags & FLAG_WINDOW_HIGHDPI) > 0) + if (CORE.Window.usingFbo && ((CORE.Window.flags & FLAG_WINDOW_HIGHDPI) > 0)) { Vector2 scale = GetWindowScaleDPI(); rlScissor((int)(x*scale.x), (int)(CORE.Window.currentFbo.height - (y + height)*scale.y), (int)(width*scale.x), (int)(height*scale.y)); } - else +#endif + else { rlScissor(x, CORE.Window.currentFbo.height - (y + height), width, height); } -#endif } // End scissor mode @@ -2513,7 +2523,7 @@ bool ExportAutomationEventList(AutomationEventList list, const char *fileName) // Add events data byteCount += sprintf(txtData + byteCount, "c %i\n", list.count); - for (int i = 0; i < list.count; i++) + for (unsigned int i = 0; i < list.count; i++) { byteCount += snprintf(txtData + byteCount, 256, "e %i %i %i %i %i %i // Event: %s\n", list.events[i].frame, list.events[i].type, list.events[i].params[0], list.events[i].params[1], list.events[i].params[2], list.events[i].params[3], autoEventTypeName[list.events[i].type]); @@ -3167,7 +3177,11 @@ static void ScanDirectoryFiles(const char *basePath, FilePathList *files, const if ((strcmp(dp->d_name, ".") != 0) && (strcmp(dp->d_name, "..") != 0)) { + #if defined(_WIN32) + sprintf(path, "%s\\%s", basePath, dp->d_name); + #else sprintf(path, "%s/%s", basePath, dp->d_name); + #endif if (filter != NULL) { @@ -3206,7 +3220,11 @@ static void ScanDirectoryFilesRecursively(const char *basePath, FilePathList *fi if ((strcmp(dp->d_name, ".") != 0) && (strcmp(dp->d_name, "..") != 0)) { // Construct new path from our base path + #if defined(_WIN32) + sprintf(path, "%s\\%s", basePath, dp->d_name); + #else sprintf(path, "%s/%s", basePath, dp->d_name); + #endif if (IsPathFile(path)) { diff --git a/raylib/rshapes.c b/raylib/rshapes.c index e8e533d..bababf8 100644 --- a/raylib/rshapes.c +++ b/raylib/rshapes.c @@ -65,10 +65,10 @@ // Error rate to calculate how many segments we need to draw a smooth circle, // taken from https://stackoverflow.com/a/2244088 #ifndef SMOOTH_CIRCLE_ERROR_RATE - #define SMOOTH_CIRCLE_ERROR_RATE 0.5f // Circle error rate + #define SMOOTH_CIRCLE_ERROR_RATE 0.5f // Circle error rate #endif -#ifndef SPLINE_LINE_DIVISIONS - #define SPLINE_LINE_DIVISIONS 24 // Spline lines segment divisions +#ifndef SPLINE_SEGMENT_DIVISIONS + #define SPLINE_SEGMENT_DIVISIONS 24 // Spline segment divisions #endif @@ -161,7 +161,7 @@ void DrawPixelV(Vector2 position, Color color) #endif } -// Draw a line +// Draw a line (using gl lines) void DrawLine(int startPosX, int startPosY, int endPosX, int endPosY, Color color) { rlBegin(RL_LINES); @@ -171,7 +171,7 @@ void DrawLine(int startPosX, int startPosY, int endPosX, int endPosY, Color colo rlEnd(); } -// Draw a line (Vector version) +// Draw a line (using gl lines) void DrawLineV(Vector2 startPos, Vector2 endPos, Color color) { rlBegin(RL_LINES); @@ -181,6 +181,61 @@ void DrawLineV(Vector2 startPos, Vector2 endPos, Color color) rlEnd(); } +// Draw lines sequuence (using gl lines) +void DrawLineStrip(Vector2 *points, int pointCount, Color color) +{ + if (pointCount >= 2) + { + rlBegin(RL_LINES); + rlColor4ub(color.r, color.g, color.b, color.a); + + for (int i = 0; i < pointCount - 1; i++) + { + rlVertex2f(points[i].x, points[i].y); + rlVertex2f(points[i + 1].x, points[i + 1].y); + } + rlEnd(); + } +} + +// Draw line using cubic-bezier spline, in-out interpolation, no control points +void DrawLineBezier(Vector2 startPos, Vector2 endPos, float thick, Color color) +{ + Vector2 previous = startPos; + Vector2 current = { 0 }; + + Vector2 points[2*SPLINE_SEGMENT_DIVISIONS + 2] = { 0 }; + + for (int i = 1; i <= SPLINE_SEGMENT_DIVISIONS; i++) + { + // Cubic easing in-out + // NOTE: Easing is calculated only for y position value + current.y = EaseCubicInOut((float)i, startPos.y, endPos.y - startPos.y, (float)SPLINE_SEGMENT_DIVISIONS); + current.x = previous.x + (endPos.x - startPos.x)/(float)SPLINE_SEGMENT_DIVISIONS; + + float dy = current.y - previous.y; + float dx = current.x - previous.x; + float size = 0.5f*thick/sqrtf(dx*dx+dy*dy); + + if (i == 1) + { + points[0].x = previous.x + dy*size; + points[0].y = previous.y - dx*size; + points[1].x = previous.x - dy*size; + points[1].y = previous.y + dx*size; + } + + points[2*i + 1].x = current.x - dy*size; + points[2*i + 1].y = current.y + dx*size; + points[2*i].x = current.x + dy*size; + points[2*i].y = current.y - dx*size; + + previous = current; + } + + DrawTriangleStrip(points, 2*SPLINE_SEGMENT_DIVISIONS + 2, color); +} + // Draw a line defining thickness void DrawLineEx(Vector2 startPos, Vector2 endPos, float thick, Color color) { @@ -203,295 +258,6 @@ void DrawLineEx(Vector2 startPos, Vector2 endPos, float thick, Color color) } } -// Draw line using cubic-bezier curves in-out -void DrawLineBezier(Vector2 startPos, Vector2 endPos, float thick, Color color) -{ - Vector2 previous = startPos; - Vector2 current = { 0 }; - - Vector2 points[2*SPLINE_LINE_DIVISIONS + 2] = { 0 }; - - for (int i = 1; i <= SPLINE_LINE_DIVISIONS; i++) - { - // Cubic easing in-out - // NOTE: Easing is calculated only for y position value - current.y = EaseCubicInOut((float)i, startPos.y, endPos.y - startPos.y, (float)SPLINE_LINE_DIVISIONS); - current.x = previous.x + (endPos.x - startPos.x)/(float)SPLINE_LINE_DIVISIONS; - - float dy = current.y - previous.y; - float dx = current.x - previous.x; - float size = 0.5f*thick/sqrtf(dx*dx+dy*dy); - - if (i == 1) - { - points[0].x = previous.x + dy*size; - points[0].y = previous.y - dx*size; - points[1].x = previous.x - dy*size; - points[1].y = previous.y + dx*size; - } - - points[2*i + 1].x = current.x - dy*size; - points[2*i + 1].y = current.y + dx*size; - points[2*i].x = current.x + dy*size; - points[2*i].y = current.y - dx*size; - - previous = current; - } - - DrawTriangleStrip(points, 2*SPLINE_LINE_DIVISIONS + 2, color); -} - -// Draw line using quadratic bezier curves with a control point -void DrawLineBezierQuad(Vector2 startPos, Vector2 endPos, Vector2 controlPos, float thick, Color color) -{ - const float step = 1.0f/SPLINE_LINE_DIVISIONS; - - Vector2 previous = startPos; - Vector2 current = { 0 }; - float t = 0.0f; - - Vector2 points[2*SPLINE_LINE_DIVISIONS + 2] = { 0 }; - - for (int i = 1; i <= SPLINE_LINE_DIVISIONS; i++) - { - t = step*i; - - float a = powf(1.0f - t, 2); - float b = 2.0f*(1.0f - t)*t; - float c = powf(t, 2); - - // NOTE: The easing functions aren't suitable here because they don't take a control point - current.y = a*startPos.y + b*controlPos.y + c*endPos.y; - current.x = a*startPos.x + b*controlPos.x + c*endPos.x; - - float dy = current.y - previous.y; - float dx = current.x - previous.x; - float size = 0.5f*thick/sqrtf(dx*dx+dy*dy); - - if (i == 1) - { - points[0].x = previous.x + dy*size; - points[0].y = previous.y - dx*size; - points[1].x = previous.x - dy*size; - points[1].y = previous.y + dx*size; - } - - points[2*i + 1].x = current.x - dy*size; - points[2*i + 1].y = current.y + dx*size; - points[2*i].x = current.x + dy*size; - points[2*i].y = current.y - dx*size; - - previous = current; - } - - DrawTriangleStrip(points, 2*SPLINE_LINE_DIVISIONS + 2, color); -} - -// Draw line using cubic bezier curves with 2 control points -void DrawLineBezierCubic(Vector2 startPos, Vector2 endPos, Vector2 startControlPos, Vector2 endControlPos, float thick, Color color) -{ - const float step = 1.0f/SPLINE_LINE_DIVISIONS; - - Vector2 previous = startPos; - Vector2 current = { 0 }; - float t = 0.0f; - - Vector2 points[2*SPLINE_LINE_DIVISIONS + 2] = { 0 }; - - for (int i = 1; i <= SPLINE_LINE_DIVISIONS; i++) - { - t = step*i; - - float a = powf(1.0f - t, 3); - float b = 3.0f*powf(1.0f - t, 2)*t; - float c = 3.0f*(1.0f - t)*powf(t, 2); - float d = powf(t, 3); - - current.y = a*startPos.y + b*startControlPos.y + c*endControlPos.y + d*endPos.y; - current.x = a*startPos.x + b*startControlPos.x + c*endControlPos.x + d*endPos.x; - - float dy = current.y - previous.y; - float dx = current.x - previous.x; - float size = 0.5f*thick/sqrtf(dx*dx+dy*dy); - - if (i == 1) - { - points[0].x = previous.x + dy*size; - points[0].y = previous.y - dx*size; - points[1].x = previous.x - dy*size; - points[1].y = previous.y + dx*size; - } - - points[2*i + 1].x = current.x - dy*size; - points[2*i + 1].y = current.y + dx*size; - points[2*i].x = current.x + dy*size; - points[2*i].y = current.y - dx*size; - - previous = current; - } - - DrawTriangleStrip(points, 2*SPLINE_LINE_DIVISIONS + 2, color); -} - -// Draw a B-Spline line, minimum 4 points -void DrawLineBSpline(Vector2 *points, int pointCount, float thick, Color color) -{ - if (pointCount < 4) return; - - float a[4] = { 0 }; - float b[4] = { 0 }; - float dy = 0.0f; - float dx = 0.0f; - float size = 0.0f; - - Vector2 currentPoint = { 0 }; - Vector2 nextPoint = { 0 }; - Vector2 vertices[2*SPLINE_LINE_DIVISIONS + 2] = { 0 }; - - for (int i = 0; i < (pointCount - 3); i++) - { - float t = 0.0f; - Vector2 p1 = points[i], p2 = points[i + 1], p3 = points[i + 2], p4 = points[i + 3]; - - a[0] = (-p1.x + 3.0f*p2.x - 3.0f*p3.x + p4.x)/6.0f; - a[1] = (3.0f*p1.x - 6.0f*p2.x + 3.0f*p3.x)/6.0f; - a[2] = (-3.0f*p1.x + 3.0f*p3.x)/6.0f; - a[3] = (p1.x + 4.0f*p2.x + p3.x)/6.0f; - - b[0] = (-p1.y + 3.0f*p2.y - 3.0f*p3.y + p4.y)/6.0f; - b[1] = (3.0f*p1.y - 6.0f*p2.y + 3.0f*p3.y)/6.0f; - b[2] = (-3.0f*p1.y + 3.0f*p3.y)/6.0f; - b[3] = (p1.y + 4.0f*p2.y + p3.y)/6.0f; - - currentPoint.x = a[3]; - currentPoint.y = b[3]; - - if (i == 0) DrawCircleV(currentPoint, thick/2.0f, color); // Draw init line circle-cap - - if (i > 0) - { - vertices[0].x = currentPoint.x + dy*size; - vertices[0].y = currentPoint.y - dx*size; - vertices[1].x = currentPoint.x - dy*size; - vertices[1].y = currentPoint.y + dx*size; - } - - for (int j = 1; j <= SPLINE_LINE_DIVISIONS; j++) - { - t = ((float)j)/((float)SPLINE_LINE_DIVISIONS); - - nextPoint.x = a[3] + t*(a[2] + t*(a[1] + t*a[0])); - nextPoint.y = b[3] + t*(b[2] + t*(b[1] + t*b[0])); - - dy = nextPoint.y - currentPoint.y; - dx = nextPoint.x - currentPoint.x; - size = 0.5f*thick/sqrtf(dx*dx+dy*dy); - - if ((i == 0) && (j == 1)) - { - vertices[0].x = currentPoint.x + dy*size; - vertices[0].y = currentPoint.y - dx*size; - vertices[1].x = currentPoint.x - dy*size; - vertices[1].y = currentPoint.y + dx*size; - } - - vertices[2*j + 1].x = nextPoint.x - dy*size; - vertices[2*j + 1].y = nextPoint.y + dx*size; - vertices[2*j].x = nextPoint.x + dy*size; - vertices[2*j].y = nextPoint.y - dx*size; - - currentPoint = nextPoint; - } - - DrawTriangleStrip(vertices, 2*SPLINE_LINE_DIVISIONS + 2, color); - } - - DrawCircleV(currentPoint, thick/2.0f, color); // Draw end line circle-cap -} - -// Draw a Catmull Rom spline line, minimum 4 points -void DrawLineCatmullRom(Vector2 *points, int pointCount, float thick, Color color) -{ - if (pointCount < 4) return; - - float dy = 0.0f; - float dx = 0.0f; - float size = 0.0f; - - Vector2 currentPoint = points[1]; - Vector2 nextPoint = { 0 }; - Vector2 vertices[2*SPLINE_LINE_DIVISIONS + 2] = { 0 }; - - DrawCircleV(currentPoint, thick/2.0f, color); // Draw init line circle-cap - - for (int i = 0; i < (pointCount - 3); i++) - { - float t = 0.0f; - Vector2 p1 = points[i], p2 = points[i + 1], p3 = points[i + 2], p4 = points[i + 3]; - - if (i > 0) - { - vertices[0].x = currentPoint.x + dy*size; - vertices[0].y = currentPoint.y - dx*size; - vertices[1].x = currentPoint.x - dy*size; - vertices[1].y = currentPoint.y + dx*size; - } - - for (int j = 1; j <= SPLINE_LINE_DIVISIONS; j++) - { - t = ((float)j)/((float)SPLINE_LINE_DIVISIONS); - - float q0 = (-1.0f*t*t*t) + (2.0f*t*t) + (-1.0f*t); - float q1 = (3.0f*t*t*t) + (-5.0f*t*t) + 2.0f; - float q2 = (-3.0f*t*t*t) + (4.0f*t*t) + t; - float q3 = t*t*t - t*t; - - nextPoint.x = 0.5f*((p1.x*q0) + (p2.x*q1) + (p3.x*q2) + (p4.x*q3)); - nextPoint.y = 0.5f*((p1.y*q0) + (p2.y*q1) + (p3.y*q2) + (p4.y*q3)); - - dy = nextPoint.y - currentPoint.y; - dx = nextPoint.x - currentPoint.x; - size = (0.5f*thick)/sqrtf(dx*dx + dy*dy); - - if ((i == 0) && (j == 1)) - { - vertices[0].x = currentPoint.x + dy*size; - vertices[0].y = currentPoint.y - dx*size; - vertices[1].x = currentPoint.x - dy*size; - vertices[1].y = currentPoint.y + dx*size; - } - - vertices[2*j + 1].x = nextPoint.x - dy*size; - vertices[2*j + 1].y = nextPoint.y + dx*size; - vertices[2*j].x = nextPoint.x + dy*size; - vertices[2*j].y = nextPoint.y - dx*size; - - currentPoint = nextPoint; - } - - DrawTriangleStrip(vertices, 2*SPLINE_LINE_DIVISIONS + 2, color); - } - - DrawCircleV(currentPoint, thick/2.0f, color); // Draw end line circle-cap -} - -// Draw lines sequence -void DrawLineStrip(Vector2 *points, int pointCount, Color color) -{ - if (pointCount >= 2) - { - rlBegin(RL_LINES); - rlColor4ub(color.r, color.g, color.b, color.a); - - for (int i = 0; i < pointCount - 1; i++) - { - rlVertex2f(points[i].x, points[i].y); - rlVertex2f(points[i + 1].x, points[i + 1].y); - } - rlEnd(); - } -} - // Draw a color-filled circle void DrawCircle(int centerX, int centerY, float radius, Color color) { @@ -1773,6 +1539,504 @@ void DrawPolyLinesEx(Vector2 center, int sides, float radius, float rotation, fl #endif } +//---------------------------------------------------------------------------------- +// Module Functions Definition - Splines functions +//---------------------------------------------------------------------------------- + +// Draw spline: linear, minimum 2 points +void DrawSplineLinear(Vector2 *points, int pointCount, float thick, Color color) +{ + Vector2 delta = { 0 }; + float length = 0.0f; + float scale = 0.0f; + + for (int i = 0; i < pointCount - 1; i++) + { + delta = (Vector2){ points[i + 1].x - points[i].x, points[i + 1].y - points[i].y }; + length = sqrtf(delta.x*delta.x + delta.y*delta.y); + + if (length > 0) scale = thick/(2*length); + + Vector2 radius = { -scale*delta.y, scale*delta.x }; + Vector2 strip[4] = { + { points[i].x - radius.x, points[i].y - radius.y }, + { points[i].x + radius.x, points[i].y + radius.y }, + { points[i + 1].x - radius.x, points[i + 1].y - radius.y }, + { points[i + 1].x + radius.x, points[i + 1].y + radius.y } + }; + + DrawTriangleStrip(strip, 4, color); + } +#if defined(SUPPORT_SPLINE_SEGMENT_CAPS) + +#endif +} + +// Draw spline: B-Spline, minimum 4 points +void DrawSplineBasis(Vector2 *points, int pointCount, float thick, Color color) +{ + if (pointCount < 4) return; + + float a[4] = { 0 }; + float b[4] = { 0 }; + float dy = 0.0f; + float dx = 0.0f; + float size = 0.0f; + + Vector2 currentPoint = { 0 }; + Vector2 nextPoint = { 0 }; + Vector2 vertices[2*SPLINE_SEGMENT_DIVISIONS + 2] = { 0 }; + + for (int i = 0; i < (pointCount - 3); i++) + { + float t = 0.0f; + Vector2 p1 = points[i], p2 = points[i + 1], p3 = points[i + 2], p4 = points[i + 3]; + + a[0] = (-p1.x + 3.0f*p2.x - 3.0f*p3.x + p4.x)/6.0f; + a[1] = (3.0f*p1.x - 6.0f*p2.x + 3.0f*p3.x)/6.0f; + a[2] = (-3.0f*p1.x + 3.0f*p3.x)/6.0f; + a[3] = (p1.x + 4.0f*p2.x + p3.x)/6.0f; + + b[0] = (-p1.y + 3.0f*p2.y - 3.0f*p3.y + p4.y)/6.0f; + b[1] = (3.0f*p1.y - 6.0f*p2.y + 3.0f*p3.y)/6.0f; + b[2] = (-3.0f*p1.y + 3.0f*p3.y)/6.0f; + b[3] = (p1.y + 4.0f*p2.y + p3.y)/6.0f; + + currentPoint.x = a[3]; + currentPoint.y = b[3]; + + if (i == 0) DrawCircleV(currentPoint, thick/2.0f, color); // Draw init line circle-cap + + if (i > 0) + { + vertices[0].x = currentPoint.x + dy*size; + vertices[0].y = currentPoint.y - dx*size; + vertices[1].x = currentPoint.x - dy*size; + vertices[1].y = currentPoint.y + dx*size; + } + + for (int j = 1; j <= SPLINE_SEGMENT_DIVISIONS; j++) + { + t = ((float)j)/((float)SPLINE_SEGMENT_DIVISIONS); + + nextPoint.x = a[3] + t*(a[2] + t*(a[1] + t*a[0])); + nextPoint.y = b[3] + t*(b[2] + t*(b[1] + t*b[0])); + + dy = nextPoint.y - currentPoint.y; + dx = nextPoint.x - currentPoint.x; + size = 0.5f*thick/sqrtf(dx*dx+dy*dy); + + if ((i == 0) && (j == 1)) + { + vertices[0].x = currentPoint.x + dy*size; + vertices[0].y = currentPoint.y - dx*size; + vertices[1].x = currentPoint.x - dy*size; + vertices[1].y = currentPoint.y + dx*size; + } + + vertices[2*j + 1].x = nextPoint.x - dy*size; + vertices[2*j + 1].y = nextPoint.y + dx*size; + vertices[2*j].x = nextPoint.x + dy*size; + vertices[2*j].y = nextPoint.y - dx*size; + + currentPoint = nextPoint; + } + + DrawTriangleStrip(vertices, 2*SPLINE_SEGMENT_DIVISIONS + 2, color); + } + + DrawCircleV(currentPoint, thick/2.0f, color); // Draw end line circle-cap +} + +// Draw spline: Catmull-Rom, minimum 4 points +void DrawSplineCatmullRom(Vector2 *points, int pointCount, float thick, Color color) +{ + if (pointCount < 4) return; + + float dy = 0.0f; + float dx = 0.0f; + float size = 0.0f; + + Vector2 currentPoint = points[1]; + Vector2 nextPoint = { 0 }; + Vector2 vertices[2*SPLINE_SEGMENT_DIVISIONS + 2] = { 0 }; + + DrawCircleV(currentPoint, thick/2.0f, color); // Draw init line circle-cap + + for (int i = 0; i < (pointCount - 3); i++) + { + float t = 0.0f; + Vector2 p1 = points[i], p2 = points[i + 1], p3 = points[i + 2], p4 = points[i + 3]; + + if (i > 0) + { + vertices[0].x = currentPoint.x + dy*size; + vertices[0].y = currentPoint.y - dx*size; + vertices[1].x = currentPoint.x - dy*size; + vertices[1].y = currentPoint.y + dx*size; + } + + for (int j = 1; j <= SPLINE_SEGMENT_DIVISIONS; j++) + { + t = ((float)j)/((float)SPLINE_SEGMENT_DIVISIONS); + + float q0 = (-1.0f*t*t*t) + (2.0f*t*t) + (-1.0f*t); + float q1 = (3.0f*t*t*t) + (-5.0f*t*t) + 2.0f; + float q2 = (-3.0f*t*t*t) + (4.0f*t*t) + t; + float q3 = t*t*t - t*t; + + nextPoint.x = 0.5f*((p1.x*q0) + (p2.x*q1) + (p3.x*q2) + (p4.x*q3)); + nextPoint.y = 0.5f*((p1.y*q0) + (p2.y*q1) + (p3.y*q2) + (p4.y*q3)); + + dy = nextPoint.y - currentPoint.y; + dx = nextPoint.x - currentPoint.x; + size = (0.5f*thick)/sqrtf(dx*dx + dy*dy); + + if ((i == 0) && (j == 1)) + { + vertices[0].x = currentPoint.x + dy*size; + vertices[0].y = currentPoint.y - dx*size; + vertices[1].x = currentPoint.x - dy*size; + vertices[1].y = currentPoint.y + dx*size; + } + + vertices[2*j + 1].x = nextPoint.x - dy*size; + vertices[2*j + 1].y = nextPoint.y + dx*size; + vertices[2*j].x = nextPoint.x + dy*size; + vertices[2*j].y = nextPoint.y - dx*size; + + currentPoint = nextPoint; + } + + DrawTriangleStrip(vertices, 2*SPLINE_SEGMENT_DIVISIONS + 2, color); + } + + DrawCircleV(currentPoint, thick/2.0f, color); // Draw end line circle-cap +} + +// Draw spline: Quadratic Bezier, minimum 3 points (1 control point): [p1, c2, p3, c4...] +void DrawSplineBezierQuadratic(Vector2 *points, int pointCount, float thick, Color color) +{ + if (pointCount < 3) return; + + for (int i = 0; i < pointCount - 2; i++) + { + DrawSplineSegmentBezierQuadratic(points[i], points[i + 1], points[i + 2], thick, color); + } +} + +// Draw spline: Cubic Bezier, minimum 4 points (2 control points): [p1, c2, c3, p4, c5, c6...] +void DrawSplineBezierCubic(Vector2 *points, int pointCount, float thick, Color color) +{ + if (pointCount < 4) return; + + for (int i = 0; i < pointCount - 3; i++) + { + DrawSplineSegmentBezierCubic(points[i], points[i + 1], points[i + 2], points[i + 3], thick, color); + } +} + +// Draw spline segment: Linear, 2 points +void DrawSplineSegmentLinear(Vector2 p1, Vector2 p2, float thick, Color color) +{ + // NOTE: For the linear spline we don't use subdivisions, just a single quad + + Vector2 delta = { p2.x - p1.x, p2.y - p1.y }; + float length = sqrtf(delta.x*delta.x + delta.y*delta.y); + + if ((length > 0) && (thick > 0)) + { + float scale = thick/(2*length); + + Vector2 radius = { -scale*delta.y, scale*delta.x }; + Vector2 strip[4] = { + { p1.x - radius.x, p1.y - radius.y }, + { p1.x + radius.x, p1.y + radius.y }, + { p2.x - radius.x, p2.y - radius.y }, + { p2.x + radius.x, p2.y + radius.y } + }; + + DrawTriangleStrip(strip, 4, color); + } +} + +// Draw spline segment: B-Spline, 4 points +void DrawSplineSegmentBasis(Vector2 p1, Vector2 p2, Vector2 p3, Vector2 p4, float thick, Color color) +{ + const float step = 1.0f/SPLINE_SEGMENT_DIVISIONS; + + Vector2 currentPoint = { 0 }; + Vector2 nextPoint = { 0 }; + float t = 0.0f; + + Vector2 points[2*SPLINE_SEGMENT_DIVISIONS + 2] = { 0 }; + + float a[4] = { 0 }; + float b[4] = { 0 }; + + a[0] = (-p1.x + 3*p2.x - 3*p3.x + p4.x)/6.0f; + a[1] = (3*p1.x - 6*p2.x + 3*p3.x)/6.0f; + a[2] = (-3*p1.x + 3*p3.x)/6.0f; + a[3] = (p1.x + 4*p2.x + p3.x)/6.0f; + + b[0] = (-p1.y + 3*p2.y - 3*p3.y + p4.y)/6.0f; + b[1] = (3*p1.y - 6*p2.y + 3*p3.y)/6.0f; + b[2] = (-3*p1.y + 3*p3.y)/6.0f; + b[3] = (p1.y + 4*p2.y + p3.y)/6.0f; + + currentPoint.x = a[3]; + currentPoint.y = b[3]; + + for (int i = 0; i <= SPLINE_SEGMENT_DIVISIONS; i++) + { + t = step*(float)i; + + nextPoint.x = a[3] + t*(a[2] + t*(a[1] + t*a[0])); + nextPoint.y = b[3] + t*(b[2] + t*(b[1] + t*b[0])); + + float dy = nextPoint.y - currentPoint.y; + float dx = nextPoint.x - currentPoint.x; + float size = (0.5f*thick)/sqrtf(dx*dx + dy*dy); + + if (i == 1) + { + points[0].x = currentPoint.x + dy*size; + points[0].y = currentPoint.y - dx*size; + points[1].x = currentPoint.x - dy*size; + points[1].y = currentPoint.y + dx*size; + } + + points[2*i + 1].x = nextPoint.x - dy*size; + points[2*i + 1].y = nextPoint.y + dx*size; + points[2*i].x = nextPoint.x + dy*size; + points[2*i].y = nextPoint.y - dx*size; + + currentPoint = nextPoint; + } + + DrawTriangleStrip(points, 2*SPLINE_SEGMENT_DIVISIONS+2, color); +} + +// Draw spline segment: Catmull-Rom, 4 points +void DrawSplineSegmentCatmullRom(Vector2 p1, Vector2 p2, Vector2 p3, Vector2 p4, float thick, Color color) +{ + const float step = 1.0f/SPLINE_SEGMENT_DIVISIONS; + + Vector2 currentPoint = p1; + Vector2 nextPoint = { 0 }; + float t = 0.0f; + + Vector2 points[2*SPLINE_SEGMENT_DIVISIONS + 2] = { 0 }; + + for (int i = 0; i <= SPLINE_SEGMENT_DIVISIONS; i++) + { + t = step*(float)i; + + float q0 = (-1*t*t*t) + (2*t*t) + (-1*t); + float q1 = (3*t*t*t) + (-5*t*t) + 2; + float q2 = (-3*t*t*t) + (4*t*t) + t; + float q3 = t*t*t - t*t; + + nextPoint.x = 0.5f*((p1.x*q0) + (p2.x*q1) + (p3.x*q2) + (p4.x*q3)); + nextPoint.y = 0.5f*((p1.y*q0) + (p2.y*q1) + (p3.y*q2) + (p4.y*q3)); + + float dy = nextPoint.y - currentPoint.y; + float dx = nextPoint.x - currentPoint.x; + float size = (0.5f*thick)/sqrtf(dx*dx + dy*dy); + + if (i == 1) + { + points[0].x = currentPoint.x + dy*size; + points[0].y = currentPoint.y - dx*size; + points[1].x = currentPoint.x - dy*size; + points[1].y = currentPoint.y + dx*size; + } + + points[2*i + 1].x = nextPoint.x - dy*size; + points[2*i + 1].y = nextPoint.y + dx*size; + points[2*i].x = nextPoint.x + dy*size; + points[2*i].y = nextPoint.y - dx*size; + + currentPoint = nextPoint; + } + + DrawTriangleStrip(points, 2*SPLINE_SEGMENT_DIVISIONS + 2, color); +} + +// Draw spline segment: Quadratic Bezier, 2 points, 1 control point +void DrawSplineSegmentBezierQuadratic(Vector2 p1, Vector2 c2, Vector2 p3, float thick, Color color) +{ + const float step = 1.0f/SPLINE_SEGMENT_DIVISIONS; + + Vector2 previous = p1; + Vector2 current = { 0 }; + float t = 0.0f; + + Vector2 points[2*SPLINE_SEGMENT_DIVISIONS + 2] = { 0 }; + + for (int i = 1; i <= SPLINE_SEGMENT_DIVISIONS; i++) + { + t = step*(float)i; + + float a = powf(1.0f - t, 2); + float b = 2.0f*(1.0f - t)*t; + float c = powf(t, 2); + + // NOTE: The easing functions aren't suitable here because they don't take a control point + current.y = a*p1.y + b*c2.y + c*p3.y; + current.x = a*p1.x + b*c2.x + c*p3.x; + + float dy = current.y - previous.y; + float dx = current.x - previous.x; + float size = 0.5f*thick/sqrtf(dx*dx+dy*dy); + + if (i == 1) + { + points[0].x = previous.x + dy*size; + points[0].y = previous.y - dx*size; + points[1].x = previous.x - dy*size; + points[1].y = previous.y + dx*size; + } + + points[2*i + 1].x = current.x - dy*size; + points[2*i + 1].y = current.y + dx*size; + points[2*i].x = current.x + dy*size; + points[2*i].y = current.y - dx*size; + + previous = current; + } + + DrawTriangleStrip(points, 2*SPLINE_SEGMENT_DIVISIONS + 2, color); +} + +// Draw spline segment: Cubic Bezier, 2 points, 2 control points +void DrawSplineSegmentBezierCubic(Vector2 p1, Vector2 c2, Vector2 c3, Vector2 p4, float thick, Color color) +{ + const float step = 1.0f/SPLINE_SEGMENT_DIVISIONS; + + Vector2 previous = p1; + Vector2 current = { 0 }; + float t = 0.0f; + + Vector2 points[2*SPLINE_SEGMENT_DIVISIONS + 2] = { 0 }; + + for (int i = 1; i <= SPLINE_SEGMENT_DIVISIONS; i++) + { + t = step*(float)i; + + float a = powf(1.0f - t, 3); + float b = 3.0f*powf(1.0f - t, 2)*t; + float c = 3.0f*(1.0f - t)*powf(t, 2); + float d = powf(t, 3); + + current.y = a*p1.y + b*c2.y + c*c3.y + d*p4.y; + current.x = a*p1.x + b*c2.x + c*c3.x + d*p4.x; + + float dy = current.y - previous.y; + float dx = current.x - previous.x; + float size = 0.5f*thick/sqrtf(dx*dx+dy*dy); + + if (i == 1) + { + points[0].x = previous.x + dy*size; + points[0].y = previous.y - dx*size; + points[1].x = previous.x - dy*size; + points[1].y = previous.y + dx*size; + } + + points[2*i + 1].x = current.x - dy*size; + points[2*i + 1].y = current.y + dx*size; + points[2*i].x = current.x + dy*size; + points[2*i].y = current.y - dx*size; + + previous = current; + } + + DrawTriangleStrip(points, 2*SPLINE_SEGMENT_DIVISIONS + 2, color); +} + +// Get spline point for a given t [0.0f .. 1.0f], Linear +Vector2 GetSplinePointLinear(Vector2 startPos, Vector2 endPos, float t) +{ + Vector2 point = { 0 }; + + point.x = startPos.x*(1.0f - t) + endPos.x*t; + point.y = startPos.y*(1.0f - t) + endPos.y*t; + + return point; +} + +// Get spline point for a given t [0.0f .. 1.0f], B-Spline +Vector2 GetSplinePointBasis(Vector2 p1, Vector2 p2, Vector2 p3, Vector2 p4, float t) +{ + Vector2 point = { 0 }; + + float a[4] = { 0 }; + float b[4] = { 0 }; + + a[0] = (-p1.x + 3*p2.x - 3*p3.x + p4.x)/6.0f; + a[1] = (3*p1.x - 6*p2.x + 3*p3.x)/6.0f; + a[2] = (-3*p1.x + 3*p3.x)/6.0f; + a[3] = (p1.x + 4*p2.x + p3.x)/6.0f; + + b[0] = (-p1.y + 3*p2.y - 3*p3.y + p4.y)/6.0f; + b[1] = (3*p1.y - 6*p2.y + 3*p3.y)/6.0f; + b[2] = (-3*p1.y + 3*p3.y)/6.0f; + b[3] = (p1.y + 4*p2.y + p3.y)/6.0f; + + point.x = a[3] + t*(a[2] + t*(a[1] + t*a[0])); + point.y = b[3] + t*(b[2] + t*(b[1] + t*b[0])); + + return point; +} + +// Get spline point for a given t [0.0f .. 1.0f], Catmull-Rom +Vector2 GetSplinePointCatmullRom(Vector2 p1, Vector2 p2, Vector2 p3, Vector2 p4, float t) +{ + Vector2 point = { 0 }; + + float q0 = (-1*t*t*t) + (2*t*t) + (-1*t); + float q1 = (3*t*t*t) + (-5*t*t) + 2; + float q2 = (-3*t*t*t) + (4*t*t) + t; + float q3 = t*t*t - t*t; + + point.x = 0.5f*((p1.x*q0) + (p2.x*q1) + (p3.x*q2) + (p4.x*q3)); + point.y = 0.5f*((p1.y*q0) + (p2.y*q1) + (p3.y*q2) + (p4.y*q3)); + + return point; +} + +// Get spline point for a given t [0.0f .. 1.0f], Quadratic Bezier +Vector2 GetSplinePointBezierQuad(Vector2 startPos, Vector2 controlPos, Vector2 endPos, float t) +{ + Vector2 point = { 0 }; + + float a = powf(1.0f - t, 2); + float b = 2.0f*(1.0f - t)*t; + float c = powf(t, 2); + + point.y = a*startPos.y + b*controlPos.y + c*endPos.y; + point.x = a*startPos.x + b*controlPos.x + c*endPos.x; + + return point; +} + +// Get spline point for a given t [0.0f .. 1.0f], Cubic Bezier +Vector2 GetSplinePointBezierCubic(Vector2 startPos, Vector2 startControlPos, Vector2 endControlPos, Vector2 endPos, float t) +{ + Vector2 point = { 0 }; + + float a = powf(1.0f - t, 3); + float b = 3.0f*powf(1.0f - t, 2)*t; + float c = 3.0f*(1.0f - t)*powf(t, 2); + float d = powf(t, 3); + + point.y = a*startPos.y + b*startControlPos.y + c*endControlPos.y + d*endPos.y; + point.x = a*startPos.x + b*startControlPos.x + c*endControlPos.x + d*endPos.x; + + return point; +} + //---------------------------------------------------------------------------------- // Module Functions Definition - Collision Detection functions //---------------------------------------------------------------------------------- diff --git a/raylib/rshapes.go b/raylib/rshapes.go index c5de17b..6d85d4c 100644 --- a/raylib/rshapes.go +++ b/raylib/rshapes.go @@ -59,6 +59,14 @@ func DrawLineEx(startPos, endPos Vector2, thick float32, col color.RGBA) { C.DrawLineEx(*cstartPos, *cendPos, cthick, *ccolor) } +// DrawLineStrip - Draw lines sequence +func DrawLineStrip(points []Vector2, pointCount int32, col color.RGBA) { + cpoints := (*C.Vector2)(unsafe.Pointer(&points[0])) + cpointCount := (C.int)(pointCount) + ccolor := colorCptr(col) + C.DrawLineStrip(cpoints, cpointCount, *ccolor) +} + // DrawLineBezier - Draw a line using cubic-bezier curves in-out func DrawLineBezier(startPos, endPos Vector2, thick float32, col color.RGBA) { cstartPos := startPos.cptr() @@ -68,53 +76,6 @@ func DrawLineBezier(startPos, endPos Vector2, thick float32, col color.RGBA) { C.DrawLineBezier(*cstartPos, *cendPos, cthick, *ccolor) } -// DrawLineBezierQuad - Draw line using quadratic bezier curves with a control point -func DrawLineBezierQuad(startPos Vector2, endPos Vector2, controlPos Vector2, thick float32, col color.RGBA) { - cstartPos := startPos.cptr() - cendPos := endPos.cptr() - ccontrolPos := controlPos.cptr() - cthick := (C.float)(thick) - ccolor := colorCptr(col) - C.DrawLineBezierQuad(*cstartPos, *cendPos, *ccontrolPos, cthick, *ccolor) -} - -// DrawLineBezierCubic - Draw line using cubic bezier curves with 2 contrl points -func DrawLineBezierCubic(startPos Vector2, endPos Vector2, startControlPos Vector2, endControlPos Vector2, thick float32, col color.RGBA) { - cstartPos := startPos.cptr() - cendPos := endPos.cptr() - cstartControlPos := startControlPos.cptr() - cendControlPos := endControlPos.cptr() - cthick := (C.float)(thick) - ccolor := colorCptr(col) - C.DrawLineBezierCubic(*cstartPos, *cendPos, *cstartControlPos, *cendControlPos, cthick, *ccolor) -} - -// DrawLineBSpline - Draw a B-Spline line, minimum 4 points -func DrawLineBSpline(points []Vector2, pointCount int32, thick float32, col color.RGBA) { - cpoints := (*C.Vector2)(unsafe.Pointer(&points[0])) - cpointCount := (C.int)(pointCount) - cthick := (C.float)(thick) - ccolor := colorCptr(col) - C.DrawLineBSpline(cpoints, cpointCount, cthick, *ccolor) -} - -// DrawLineCatmullRom - Draw a Catmull Rom spline line, minimum 4 points -func DrawLineCatmullRom(points []Vector2, pointCount int32, thick float32, col color.RGBA) { - cpoints := (*C.Vector2)(unsafe.Pointer(&points[0])) - cpointCount := (C.int)(pointCount) - cthick := (C.float)(thick) - ccolor := colorCptr(col) - C.DrawLineCatmullRom(cpoints, cpointCount, cthick, *ccolor) -} - -// DrawLineStrip - Draw lines sequence -func DrawLineStrip(points []Vector2, pointCount int32, col color.RGBA) { - cpoints := (*C.Vector2)(unsafe.Pointer(&points[0])) - cpointCount := (C.int)(pointCount) - ccolor := colorCptr(col) - C.DrawLineStrip(cpoints, cpointCount, *ccolor) -} - // DrawCircle - Draw a color-filled circle func DrawCircle(centerX, centerY int32, radius float32, col color.RGBA) { ccenterX := (C.int)(centerX) diff --git a/raylib/rtextures.c b/raylib/rtextures.c index 9465c5b..98586db 100644 --- a/raylib/rtextures.c +++ b/raylib/rtextures.c @@ -347,7 +347,7 @@ Image LoadImageSvg(const char *fileNameOrString, int width, int height) (fileNameOrString[2] == 'v') && (fileNameOrString[3] == 'g')) { - fileData = fileNameOrString; + fileData = (unsigned char *)fileNameOrString; isSvgStringValid = true; } }