add some code

This commit is contained in:
2025-09-05 13:25:11 +08:00
parent 9ff0a99e7a
commit 3cf1229a85
8911 changed files with 2535396 additions and 0 deletions

View File

@@ -0,0 +1 @@
8d09974862ba5323c0c97b77deb3be81ca7bd83e60f77265504c086cd69cc5cf

View File

@@ -0,0 +1,103 @@
# Object files
*.o
*.ko
*.obj
*.elf
# Precompiled Headers
*.gch
*.pch
# Libraries
*.lib
*.a
*.la
*.lo
# Shared objects (inc. Windows DLLs)
*.so
*.so.*
*.dylib
# Executables
*.out
*.app
*.i*86
*.x86_64
*.hex
# Debug files
*.dSYM/
# =========================
# Operating System Files
# =========================
# Linux
# =========================
# Vim temporary files
*~
*.swp
*.swo
# OSX
# =========================
.DS_Store
.AppleDouble
.LSOverride
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
# Windows
# =========================
# Windows image file caches
Thumbs.db
ehthumbs.db
# Folder config file
Desktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN/
# Windows Installer files
*.cab
*.msi
*.msm
*.msp
# Windows shortcuts
*.lnk
# ESP32
build/
sdkconfig
managed_components/
*.lock
dist/
#Vscoe
.vscode/
.sdkconfig
.sbmp
.bmp
.gif

View File

@@ -0,0 +1,9 @@
# ChangeLog
## v1.0.1
* Add vfs and haffman
## v1.0.0 Initial Version
* Add image player.

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,5 @@
idf_component_register(
SRC_DIRS "."
INCLUDE_DIRS "include"
PRIV_INCLUDE_DIRS "./include_priv"
)

View File

@@ -0,0 +1,10 @@
menu "Animation Player Configuration"
config ANIM_PLAYER_DEFAULT_FPS
int "Default frame rate"
range 1 60
default 30
help
Default frame rate for animation player in frames per second.
endmenu

View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,62 @@
# Image Player
## Introduction
`image_player` is a lightweight and efficient image rendering component designed for embedded systems. It enables seamless integration of various image formats into your projects. This module ensures high performance and flexibility for modern embedded applications that require efficient image playback and rendering.
[![Component Registry](https://components.espressif.com/components/espressif2022/image_player/badge.svg)](https://components.espressif.com/components/espressif2022/image_player)
## Dependencies
1. **ESP-IDF**
Ensure your project includes ESP-IDF 5.0 or higher. Refer to the [ESP-IDF Documentation](https://docs.espressif.com/projects/esp-idf/en/latest/) for setup instructions.
## Scripts
### GIF to Split BMP Converter (`gif_to_split_bmp.py`)
This script converts GIF animations into a series of split BMP files optimized for embedded systems.
#### Usage
```bash
python gif_to_split_bmp.py <input_folder> <output_folder> --split <split_height> --depth <bit_depth> [--enable-huffman]
```
#### Parameters
- `input_folder`: Directory containing GIF files to process
- `output_folder`: Directory where processed files will be saved
- `--split`: Height of each split block (must be a positive integer)
- `--depth`: Bit depth (4 for 4-bit grayscale, 8 for 8-bit color)
- `--enable-huffman`: Optional flag to enable Huffman compression (default: disabled)
#### Example
```bash
# Using only RLE compression
python gif_to_split_bmp.py ./gifs ./output --split 16 --depth 4
# Using both RLE and Huffman compression
python gif_to_split_bmp.py ./gifs ./output --split 16 --depth 4 --enable-huffman
```
### GIF Merge Tool (`gif_merge.py`)
This script merges split BMP files into a single optimized asset file for embedded systems.
#### Usage
```bash
python gif_merge.py <input_dir>
```
#### Parameters
- `input_dir`: Directory containing split BMP files to merge
#### Example
```bash
python gif_merge.py ./output
```
#### Features
- Automatically detects and handles duplicate frames
- Optimizes storage by referencing repeated frames
- Generates a single asset file with all frames
- Supports both 4-bit and 8-bit color depth
- Includes compression for efficient storage
- Supports both RLE and Huffman compression methods

View File

@@ -0,0 +1,284 @@
/*
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include <string.h>
#include "esp_err.h"
#include "esp_log.h"
#include "esp_check.h"
#include "anim_player.h"
#include "anim_vfs.h"
#include "anim_dec.h"
static const char *TAG = "anim_decoder";
uint32_t anim_dec_parse_palette(const image_header_t *header, uint8_t index)
{
const uint8_t *color = &header->palette[index * 4];
return (color[2] << 16) | (color[1] << 8) | color[0];
}
image_format_t anim_dec_parse_header(const uint8_t *data, size_t data_len, image_header_t *header)
{
// Initialize header fields
memset(header, 0, sizeof(image_header_t));
// Read format identifier
memcpy(header->format, data, 2);
header->format[2] = '\0';
if (strncmp(header->format, "_S", 2) == 0) {
// Parse format
memcpy(header->version, data + 3, 6);
// Read bit depth
header->bit_depth = data[9];
// Validate bit depth
if (header->bit_depth != 4 && header->bit_depth != 8) {
ESP_LOGE(TAG, "Invalid bit depth: %d", header->bit_depth);
return IMAGE_FORMAT_INVALID;
}
header->width = *(uint16_t *)(data + 10);
header->height = *(uint16_t *)(data + 12);
header->splits = *(uint16_t *)(data + 14);
header->split_height = *(uint16_t *)(data + 16);
// Allocate and read split lengths
header->split_lengths = (uint16_t *)malloc(header->splits * sizeof(uint16_t));
if (header->split_lengths == NULL) {
ESP_LOGE(TAG, "Failed to allocate memory for split lengths");
return IMAGE_FORMAT_INVALID;
}
for (int i = 0; i < header->splits; i++) {
header->split_lengths[i] = *(uint16_t *)(data + 18 + i * 2);
}
// Calculate number of colors based on bit depth
header->num_colors = 1 << header->bit_depth;
// Allocate and read color palette
header->palette = (uint8_t *)malloc(header->num_colors * 4);
if (header->palette == NULL) {
ESP_LOGE(TAG, "Failed to allocate memory for palette");
free(header->split_lengths);
header->split_lengths = NULL;
return IMAGE_FORMAT_INVALID;
}
// Read palette data
memcpy(header->palette, data + 18 + header->splits * 2, header->num_colors * 4);
header->data_offset = 18 + header->splits * 2 + header->num_colors * 4;
return IMAGE_FORMAT_SBMP;
} else if (strncmp(header->format, "_R", 2) == 0) {
// Parse redirect format
uint8_t file_length = *(uint8_t *)(data + 2);
// For redirect format, we'll use the palette field to store the filename
header->palette = (uint8_t *)malloc(file_length + 1);
if (header->palette == NULL) {
ESP_LOGE(TAG, "Failed to allocate memory for redirect filename");
return IMAGE_FORMAT_INVALID;
}
// Copy filename to palette buffer
memcpy(header->palette, data + 3, file_length);
header->palette[file_length] = '\0'; // Ensure null termination
header->num_colors = file_length + 1;
return IMAGE_FORMAT_REDIRECT;
} else {
ESP_LOGE(TAG, "Invalid format: %s", header->format);
printf("%02X %02X %02X\r\n", header->format[0], header->format[1], header->format[2]);
return IMAGE_FORMAT_INVALID;
}
}
void anim_dec_calculate_offsets(const image_header_t *header, uint16_t *offsets)
{
offsets[0] = header->data_offset;
for (int i = 1; i < header->splits; i++) {
offsets[i] = offsets[i - 1] + header->split_lengths[i - 1];
}
}
void anim_dec_free_header(image_header_t *header)
{
if (header->split_lengths != NULL) {
free(header->split_lengths);
header->split_lengths = NULL;
}
if (header->palette != NULL) {
free(header->palette);
header->palette = NULL;
}
}
esp_err_t anim_dec_rte_decode(const uint8_t *input, size_t input_len, uint8_t *output, size_t output_len)
{
size_t in_pos = 0;
size_t out_pos = 0;
while (in_pos + 1 <= input_len) {
uint8_t count = input[in_pos++];
uint8_t value = input[in_pos++];
if (out_pos + count > output_len) {
ESP_LOGE(TAG, "Output buffer overflow, %d > %d", out_pos + count, output_len);
return ESP_FAIL;
}
for (uint8_t i = 0; i < count; i++) {
output[out_pos++] = value;
}
}
return ESP_OK;
}
static Node* create_node()
{
Node* node = (Node*)calloc(1, sizeof(Node));
return node;
}
static void free_tree(Node* node)
{
if (!node) {
return;
}
free_tree(node->left);
free_tree(node->right);
free(node);
}
static esp_err_t decode_huffman_data(const uint8_t* data, size_t data_len,
const uint8_t* dict_bytes, size_t dict_len,
uint8_t* output, size_t* output_len)
{
if (!data || !dict_bytes || data_len == 0 || dict_len == 0) {
*output_len = 0;
return ESP_OK;
}
// Get padding
uint8_t padding = dict_bytes[0];
// printf("Padding bits: %u\n", padding);
size_t dict_pos = 1;
// Reconstruct Huffman Tree
Node* root = create_node();
Node* current = NULL;
while (dict_pos < dict_len) {
uint8_t byte_val = dict_bytes[dict_pos++];
uint8_t code_len = dict_bytes[dict_pos++];
size_t code_byte_len = (code_len + 7) / 8;
uint64_t code = 0;
for (size_t i = 0; i < code_byte_len; ++i) {
code = (code << 8) | dict_bytes[dict_pos++];
}
// Insert into tree
current = root;
for (int bit = code_len - 1; bit >= 0; --bit) {
int bit_val = (code >> bit) & 1;
if (bit_val == 0) {
if (!current->left) {
current->left = create_node();
}
current = current->left;
} else {
if (!current->right) {
current->right = create_node();
}
current = current->right;
}
}
current->is_leaf = 1;
current->value = byte_val;
}
// Convert bitstream
size_t total_bits = data_len * 8;
if (padding > 0) {
total_bits -= padding;
}
current = root;
size_t out_pos = 0;
// Process each bit
for (size_t bit_index = 0; bit_index < total_bits; bit_index++) {
size_t byte_idx = bit_index / 8;
int bit_offset = 7 - (bit_index % 8); // Most significant bit first
int bit = (data[byte_idx] >> bit_offset) & 1;
if (bit == 0) {
current = current->left;
} else {
current = current->right;
}
if (current == NULL) {
ESP_LOGE(TAG, "Invalid path in Huffman tree at bit %zu", bit_index);
break;
}
if (current->is_leaf) {
output[out_pos++] = current->value;
current = root;
}
}
*output_len = out_pos;
free_tree(root);
return ESP_OK;
}
esp_err_t anim_dec_huffman_decode(const uint8_t* buffer, size_t buflen, uint8_t* output, size_t* output_len)
{
if (!buffer || buflen < 1 || !output || !output_len) {
ESP_LOGE(TAG, "Invalid parameters: buffer=%p, buflen=%d, output=%p, output_len=%p",
buffer, buflen, output, output_len);
return ESP_FAIL;
}
// First byte indicates encoding type (already checked in caller)
// Next two bytes contain dictionary length (big endian)
uint16_t dict_len = (buffer[2] << 8) | buffer[1];
if (buflen < 3 + dict_len) {
ESP_LOGE(TAG, "Buffer too short for dictionary");
return ESP_FAIL;
}
// Calculate data length
size_t data_len = buflen - 3 - dict_len;
if (data_len == 0) {
ESP_LOGE(TAG, "No data to decode");
return ESP_FAIL;
}
// Decode data
esp_err_t ret = decode_huffman_data(buffer + 3 + dict_len, data_len,
buffer + 3, dict_len,
output, output_len);
if (ret != ESP_OK) {
ESP_LOGE(TAG, "Huffman decoding failed: %d", ret);
return ESP_FAIL;
}
return ESP_OK;
}

View File

@@ -0,0 +1,503 @@
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include <string.h>
#include "esp_err.h"
#include "esp_log.h"
#include "esp_check.h"
#include "anim_player.h"
#include "anim_vfs.h"
#include "anim_dec.h"
static const char *TAG = "anim_player";
#define NEED_DELETE BIT0
#define DELETE_DONE BIT1
#define WAIT_FLUSH_DONE BIT2
#define WAIT_STOP BIT3
#define WAIT_STOP_DONE BIT4
#define FPS_TO_MS(fps) (1000 / (fps)) // Convert FPS to milliseconds
typedef struct {
player_action_t action;
} anim_player_event_t;
typedef struct {
EventGroupHandle_t event_group;
QueueHandle_t event_queue;
} anim_player_events_t;
typedef struct {
uint32_t start;
uint32_t end;
anim_vfs_handle_t file_desc;
} anim_player_info_t;
// Animation player context
typedef struct {
anim_player_info_t info;
int run_start;
int run_end;
bool repeat;
int fps;
anim_flush_cb_t flush_cb;
anim_update_cb_t update_cb;
void *user_data;
anim_player_events_t events;
TaskHandle_t handle_task;
struct {
unsigned char swap: 1;
} flags;
} anim_player_context_t;
typedef struct {
player_action_t action;
int run_start;
int run_end;
bool repeat;
int fps;
uint32_t last_frame_time;
} anim_player_run_ctx_t;
static inline uint16_t rgb888_to_rgb565(uint32_t color)
{
return (((color >> 16) & 0xF8) << 8) | (((color >> 8) & 0xFC) << 3) | ((color & 0xF8) >> 3);
}
static esp_err_t anim_player_parse(const uint8_t *data, size_t data_len, image_header_t *header, anim_player_context_t *ctx)
{
// Allocate memory for split offsets
uint16_t *offsets = (uint16_t *)malloc(header->splits * sizeof(uint16_t));
if (offsets == NULL) {
ESP_LOGE(TAG, "Failed to allocate memory for offsets");
return ESP_FAIL;
}
anim_dec_calculate_offsets(header, offsets);
// Allocate frame buffer
void *frame_buffer = malloc(header->width * header->split_height * sizeof(uint16_t));
if (frame_buffer == NULL) {
ESP_LOGE(TAG, "Failed to allocate memory for frame buffer");
free(offsets);
return ESP_FAIL;
}
// Allocate decode buffer
uint8_t *decode_buffer = NULL;
if (header->bit_depth == 4) {
decode_buffer = (uint8_t *)malloc(header->width * (header->split_height + (header->split_height % 2)) / 2);
} else if (header->bit_depth == 8) {
decode_buffer = (uint8_t *)malloc(header->width * header->split_height);
}
if (decode_buffer == NULL) {
ESP_LOGE(TAG, "Failed to allocate memory for decode buffer");
free(offsets);
free(frame_buffer);
return ESP_FAIL;
}
uint16_t *pixels = (uint16_t *)frame_buffer;
// Process each split
for (int split = 0; split < header->splits; split++) {
const uint8_t *compressed_data = data + offsets[split];
int compressed_len = header->split_lengths[split];
esp_err_t decode_result = ESP_FAIL;
int valid_height;
if (split == header->splits - 1) {
valid_height = header->height - split * header->split_height;
} else {
valid_height = header->split_height;
}
ESP_LOGD(TAG, "split:%d(%d), height:%d(%d), compressed_len:%d", split, header->splits, header->split_height, valid_height, compressed_len);
// Check encoding type from first byte
if (compressed_data[0] == ENCODING_TYPE_RLE) {
decode_result = anim_dec_rte_decode(compressed_data + 1, compressed_len - 1,
decode_buffer, header->width * header->split_height);
} else if (compressed_data[0] == ENCODING_TYPE_HUFFMAN) {
uint8_t *huffman_buffer = malloc(header->width * header->split_height);
if (huffman_buffer == NULL) {
ESP_LOGE(TAG, "Failed to allocate memory for Huffman buffer");
continue;
}
size_t huffman_decoded_len = 0;
anim_dec_huffman_decode(compressed_data, compressed_len, huffman_buffer, &huffman_decoded_len);
decode_result = ESP_OK;
if (decode_result == ESP_OK) {
decode_result = anim_dec_rte_decode(huffman_buffer, huffman_decoded_len,
decode_buffer, header->width * header->split_height);
}
free(huffman_buffer);
} else {
ESP_LOGE(TAG, "Unknown encoding type: %02X", compressed_data[0]);
continue;
}
if (decode_result != ESP_OK) {
ESP_LOGE(TAG, "Failed to decode split %d", split);
continue;
}
// Convert to RGB565 based on bit depth
if (header->bit_depth == 4) {
// 4-bit mode: each byte contains two pixels
for (int y = 0; y < valid_height; y++) {
for (int x = 0; x < header->width; x += 2) {
uint8_t packed_gray = decode_buffer[y * (header->width / 2) + (x / 2)];
uint8_t index1 = (packed_gray & 0xF0) >> 4;
uint8_t index2 = (packed_gray & 0x0F);
uint32_t color1 = anim_dec_parse_palette(header, index1);
uint32_t color2 = anim_dec_parse_palette(header, index2);
pixels[y * header->width + x] = ctx->flags.swap ? __builtin_bswap16(rgb888_to_rgb565(color1)) : rgb888_to_rgb565(color1);
if (x + 1 < header->width) {
pixels[y * header->width + x + 1] = ctx->flags.swap ? __builtin_bswap16(rgb888_to_rgb565(color2)) : rgb888_to_rgb565(color2);
}
}
}
} else if (header->bit_depth == 8) {
// 8-bit mode: each byte is one pixel
for (int y = 0; y < valid_height; y++) {
for (int x = 0; x < header->width; x++) {
uint8_t index = decode_buffer[y * header->width + x];
uint32_t color = anim_dec_parse_palette(header, index);
pixels[y * header->width + x] = ctx->flags.swap ? __builtin_bswap16(rgb888_to_rgb565(color)) : rgb888_to_rgb565(color);
}
}
} else {
ESP_LOGE(TAG, "Unsupported bit depth: %d", header->bit_depth);
continue;
}
// Flush decoded data
xEventGroupClearBits(ctx->events.event_group, WAIT_FLUSH_DONE);
if (ctx->flush_cb) {
ctx->flush_cb(ctx, 0, split * header->split_height, header->width, split * header->split_height + valid_height, pixels);
}
xEventGroupWaitBits(ctx->events.event_group, WAIT_FLUSH_DONE, pdTRUE, pdFALSE, pdMS_TO_TICKS(20));
}
// Cleanup
free(offsets);
free(frame_buffer);
free(decode_buffer);
anim_dec_free_header(header);
return ESP_OK;
}
static void anim_player_task(void *arg)
{
image_header_t header;
anim_player_context_t *ctx = (anim_player_context_t *)arg;
anim_player_run_ctx_t run_ctx;
anim_player_event_t player_event;
run_ctx.action = PLAYER_ACTION_STOP;
run_ctx.run_start = ctx->run_start;
run_ctx.run_end = ctx->run_end;
run_ctx.repeat = ctx->repeat;
run_ctx.fps = ctx->fps;
run_ctx.last_frame_time = xTaskGetTickCount();
while (1) {
EventBits_t bits = xEventGroupWaitBits(ctx->events.event_group,
NEED_DELETE | WAIT_STOP,
pdTRUE, pdFALSE, pdMS_TO_TICKS(10));
if (bits & NEED_DELETE) {
ESP_LOGW(TAG, "Player deleted");
xEventGroupSetBits(ctx->events.event_group, DELETE_DONE);
vTaskDeleteWithCaps(NULL);
}
if (bits & WAIT_STOP) {
xEventGroupSetBits(ctx->events.event_group, WAIT_STOP_DONE);
}
// Check for new events in queue
if (xQueueReceive(ctx->events.event_queue, &player_event, 0) == pdTRUE) {
run_ctx.action = player_event.action;
run_ctx.run_start = ctx->run_start;
run_ctx.run_end = ctx->run_end;
run_ctx.repeat = ctx->repeat;
run_ctx.fps = ctx->fps;
ESP_LOGD(TAG, "Player updated [%s]: %d -> %d, repeat:%d, fps:%d",
run_ctx.action == PLAYER_ACTION_START ? "START" : "STOP",
run_ctx.run_start, run_ctx.run_end, run_ctx.repeat, run_ctx.fps);
}
if (run_ctx.action == PLAYER_ACTION_STOP) {
continue;
}
// Process animation frames
do {
for (int i = run_ctx.run_start; (i <= run_ctx.run_end) && (run_ctx.action != PLAYER_ACTION_STOP); i++) {
// Frame rate control
uint32_t current_time = xTaskGetTickCount();
uint32_t elapsed = current_time - run_ctx.last_frame_time;
if (elapsed < pdMS_TO_TICKS(FPS_TO_MS(run_ctx.fps))) {
vTaskDelay(pdMS_TO_TICKS(FPS_TO_MS(run_ctx.fps)) - elapsed);
}
run_ctx.last_frame_time = xTaskGetTickCount();
// Check for new events or delete request
bits = xEventGroupWaitBits(ctx->events.event_group,
NEED_DELETE | WAIT_STOP,
pdTRUE, pdFALSE, pdMS_TO_TICKS(0));
if (bits & NEED_DELETE) {
ESP_LOGW(TAG, "Playing deleted");
xEventGroupSetBits(ctx->events.event_group, DELETE_DONE);
vTaskDelete(NULL);
}
if (bits & WAIT_STOP) {
xEventGroupSetBits(ctx->events.event_group, WAIT_STOP_DONE);
}
if (xQueueReceive(ctx->events.event_queue, &player_event, 0) == pdTRUE) {
run_ctx.action = player_event.action;
run_ctx.run_start = ctx->run_start;
run_ctx.run_end = ctx->run_end;
run_ctx.fps = ctx->fps;
if (run_ctx.action == PLAYER_ACTION_STOP) {
run_ctx.repeat = false;
} else {
run_ctx.repeat = ctx->repeat;
}
ESP_LOGD(TAG, "Playing updated [%s]: %d -> %d, repeat:%d, fps:%d",
run_ctx.action == PLAYER_ACTION_START ? "START" : "STOP",
run_ctx.run_start, run_ctx.run_end, run_ctx.repeat, run_ctx.fps);
break;
}
const void *frame_data = anim_vfs_get_frame_data(ctx->info.file_desc, i);
size_t frame_size = anim_vfs_get_frame_size(ctx->info.file_desc, i);
image_format_t format = anim_dec_parse_header(frame_data, frame_size, &header);
if (format == IMAGE_FORMAT_INVALID) {
ESP_LOGE(TAG, "Invalid frame format");
continue;
} else if (format == IMAGE_FORMAT_REDIRECT) {
ESP_LOGE(TAG, "Invalid redirect frame");
continue;
} else if (format == IMAGE_FORMAT_SBMP) {
anim_player_parse(frame_data, frame_size, &header, ctx);
if (ctx->update_cb) {
ctx->update_cb(ctx, PLAYER_EVENT_ONE_FRAME_DONE);
}
}
}
if (ctx->update_cb) {
ctx->update_cb(ctx, PLAYER_EVENT_ALL_FRAME_DONE);
}
} while (run_ctx.repeat);
run_ctx.action = PLAYER_ACTION_STOP;
if (ctx->update_cb) {
ctx->update_cb(ctx, PLAYER_EVENT_IDLE);
}
}
}
bool anim_player_flush_ready(anim_player_handle_t handle)
{
anim_player_context_t *ctx = (anim_player_context_t *)handle;
if (ctx == NULL) {
return false;
}
if (xPortInIsrContext()) {
BaseType_t pxHigherPriorityTaskWoken = pdFALSE;
bool result = xEventGroupSetBitsFromISR(ctx->events.event_group, WAIT_FLUSH_DONE, &pxHigherPriorityTaskWoken);
if (pxHigherPriorityTaskWoken == pdTRUE) {
portYIELD_FROM_ISR();
}
return result;
} else {
return xEventGroupSetBits(ctx->events.event_group, WAIT_FLUSH_DONE);
}
}
void anim_player_update(anim_player_handle_t handle, player_action_t event)
{
anim_player_context_t *ctx = (anim_player_context_t *)handle;
if (ctx == NULL) {
ESP_LOGE(TAG, "Invalid player context");
return;
}
anim_player_event_t player_event = {
.action = event,
};
if (xQueueSend(ctx->events.event_queue, &player_event, pdMS_TO_TICKS(10)) != pdTRUE) {
ESP_LOGE(TAG, "Failed to send event to queue");
}
ESP_LOGD(TAG, "update event: %s", event == PLAYER_ACTION_START ? "START" : "STOP");
}
esp_err_t anim_player_set_src_data(anim_player_handle_t handle, const void *src_data, size_t src_len)
{
anim_player_context_t *ctx = (anim_player_context_t *)handle;
if (ctx == NULL) {
ESP_LOGE(TAG, "Invalid player context");
return ESP_FAIL;
}
anim_vfs_handle_t new_desc;
anim_vfs_init(src_data, src_len, &new_desc);
if (new_desc == NULL) {
ESP_LOGE(TAG, "Failed to initialize asset parser");
return ESP_FAIL;
}
anim_player_update(handle, PLAYER_ACTION_STOP);
xEventGroupSetBits(ctx->events.event_group, WAIT_STOP);
xEventGroupWaitBits(ctx->events.event_group, WAIT_STOP_DONE, pdTRUE, pdFALSE, portMAX_DELAY);
//delete old file_desc
if (ctx->info.file_desc) {
anim_vfs_deinit(ctx->info.file_desc);
ctx->info.file_desc = NULL;
}
ctx->info.file_desc = new_desc;
ctx->info.start = 0;
ctx->info.end = anim_vfs_get_total_frames(new_desc) - 1;
//default segment
ctx->run_start = ctx->info.start;
ctx->run_end = ctx->info.end;
ctx->repeat = true;
ctx->fps = CONFIG_ANIM_PLAYER_DEFAULT_FPS;
return ESP_OK;
}
void anim_player_get_segment(anim_player_handle_t handle, uint32_t *start, uint32_t *end)
{
anim_player_context_t *ctx = (anim_player_context_t *)handle;
if (ctx == NULL) {
ESP_LOGE(TAG, "Invalid player context");
return;
}
*start = ctx->info.start;
*end = ctx->info.end;
}
void anim_player_set_segment(anim_player_handle_t handle, uint32_t start, uint32_t end, uint32_t fps, bool repeat)
{
anim_player_context_t *ctx = (anim_player_context_t *)handle;
if (ctx == NULL) {
ESP_LOGE(TAG, "Invalid player context");
return;
}
if (end > ctx->info.end || (start > end)) {
ESP_LOGE(TAG, "Invalid segment");
return;
}
ctx->run_start = start;
ctx->run_end = end;
ctx->repeat = repeat;
ctx->fps = fps;
ESP_LOGD(TAG, "set segment: %" PRIu32 " -> %" PRIu32 ", repeat:%d, fps:%" PRIu32 "", start, end, repeat, fps);
}
void *anim_player_get_user_data(anim_player_handle_t handle)
{
anim_player_context_t *ctx = (anim_player_context_t *)handle;
if (ctx == NULL) {
ESP_LOGE(TAG, "Invalid player context");
return NULL;
}
return ctx->user_data;
}
anim_player_handle_t anim_player_init(const anim_player_config_t *config)
{
if (!config) {
ESP_LOGE(TAG, "Invalid configuration");
return NULL;
}
anim_player_context_t *player = malloc(sizeof(anim_player_context_t));
if (!player) {
ESP_LOGE(TAG, "Failed to allocate player context");
return NULL;
}
player->info.file_desc = NULL;
player->info.start = 0;
player->info.end = 0;
player->run_start = 0;
player->run_end = 0;
player->repeat = false;
player->fps = CONFIG_ANIM_PLAYER_DEFAULT_FPS;
player->flush_cb = config->flush_cb;
player->update_cb = config->update_cb;
player->user_data = config->user_data;
player->flags.swap = config->flags.swap;
player->events.event_group = xEventGroupCreate();
player->events.event_queue = xQueueCreate(5, sizeof(anim_player_event_t));
// Set default task configuration if not specified
const uint32_t caps = config->task.task_stack_caps ? config->task.task_stack_caps : MALLOC_CAP_DEFAULT; // caps cannot be zero
if (config->task.task_affinity < 0) {
xTaskCreateWithCaps(anim_player_task, "Anim Player", config->task.task_stack, player, config->task.task_priority, &player->handle_task, caps);
} else {
}
return (anim_player_handle_t)player;
}
void anim_player_deinit(anim_player_handle_t handle)
{
anim_player_context_t *ctx = (anim_player_context_t *)handle;
if (ctx == NULL) {
ESP_LOGE(TAG, "Invalid player context");
return;
}
// Send event to stop the task
if (ctx->events.event_group) {
xEventGroupSetBits(ctx->events.event_group, NEED_DELETE);
xEventGroupWaitBits(ctx->events.event_group, DELETE_DONE, pdTRUE, pdFALSE, portMAX_DELAY);
}
// Delete event group
if (ctx->events.event_group) {
vEventGroupDelete(ctx->events.event_group);
ctx->events.event_group = NULL;
}
// Delete event queue
if (ctx->events.event_queue) {
vQueueDelete(ctx->events.event_queue);
ctx->events.event_queue = NULL;
}
if (ctx->info.file_desc) {
anim_vfs_deinit(ctx->info.file_desc);
ctx->info.file_desc = NULL;
}
// Free player context
free(ctx);
}

View File

@@ -0,0 +1,135 @@
/*
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include <string.h>
#include "esp_err.h"
#include "esp_log.h"
#include "esp_check.h"
#include "anim_player.h"
#include "anim_vfs.h"
static const char *TAG = "anim_vfs";
#define ASSETS_FILE_NUM_OFFSET 0
#define ASSETS_CHECKSUM_OFFSET 4
#define ASSETS_TABLE_LEN 8
#define ASSETS_TABLE_OFFSET 12
#define ASSETS_FILE_MAGIC_HEAD 0x5A5A
#define ASSETS_FILE_MAGIC_LEN 2
/**
* @brief Asset table structure, contains detailed information for each asset.
*/
#pragma pack(1)
typedef struct {
uint32_t asset_size; /*!< Size of the asset */
uint32_t asset_offset; /*!< Offset of the asset */
} asset_table_entry_t;
#pragma pack()
typedef struct {
const char *asset_mem;
const asset_table_entry_t *table;
} asset_entry_t;
typedef struct {
asset_entry_t *entries;
int total_frames;
} anim_vfs_t;
esp_err_t anim_vfs_init(const uint8_t *data, size_t data_len, anim_vfs_handle_t *ret_parser)
{
esp_err_t ret = ESP_OK;
asset_entry_t *entries = NULL;
anim_vfs_t *parser = (anim_vfs_t *)calloc(1, sizeof(anim_vfs_t));
ESP_GOTO_ON_FALSE(parser, ESP_ERR_NO_MEM, err, TAG, "no mem for parser handle");
int total_frames = *(int *)(data + ASSETS_FILE_NUM_OFFSET);
// uint32_t stored_chksum = *(uint32_t *)(data + ASSETS_CHECKSUM_OFFSET);
// uint32_t stored_len = *(uint32_t *)(data + ASSETS_TABLE_LEN);
entries = (asset_entry_t *)malloc(sizeof(asset_entry_t) * total_frames);
asset_table_entry_t *table = (asset_table_entry_t *)(data + ASSETS_TABLE_OFFSET);
for (int i = 0; i < total_frames; i++) {
(entries + i)->table = (table + i);
(entries + i)->asset_mem = (void *)(data + ASSETS_TABLE_OFFSET + total_frames * sizeof(asset_table_entry_t) + table[i].asset_offset);
uint16_t *magic_ptr = (uint16_t *)(entries + i)->asset_mem;
ESP_GOTO_ON_FALSE(*magic_ptr == ASSETS_FILE_MAGIC_HEAD, ESP_ERR_INVALID_CRC, err, TAG, "bad file magic header");
}
parser->entries = entries;
parser->total_frames = total_frames;
*ret_parser = (anim_vfs_handle_t)parser;
return ESP_OK;
err:
if (entries) {
free(entries);
}
if (parser) {
free(parser);
}
return ret;
}
esp_err_t anim_vfs_deinit(anim_vfs_handle_t handle)
{
assert(handle && "handle is invalid");
anim_vfs_t *parser = (anim_vfs_t *)(handle);
if (parser) {
if (parser->entries) {
free(parser->entries);
}
free(parser);
}
return ESP_OK;
}
int anim_vfs_get_total_frames(anim_vfs_handle_t handle)
{
assert(handle && "handle is invalid");
anim_vfs_t *parser = (anim_vfs_t *)(handle);
return parser->total_frames;
}
const uint8_t *anim_vfs_get_frame_data(anim_vfs_handle_t handle, int index)
{
assert(handle && "handle is invalid");
anim_vfs_t *parser = (anim_vfs_t *)(handle);
if (parser->total_frames > index) {
return (const uint8_t *)((parser->entries + index)->asset_mem + ASSETS_FILE_MAGIC_LEN);
} else {
ESP_LOGE(TAG, "Invalid index: %d. Maximum index is %d.", index, parser->total_frames);
return NULL;
}
}
int anim_vfs_get_frame_size(anim_vfs_handle_t handle, int index)
{
assert(handle && "handle is invalid");
anim_vfs_t *parser = (anim_vfs_t *)(handle);
if (parser->total_frames > index) {
return ((parser->entries + index)->table->asset_size - ASSETS_FILE_MAGIC_LEN);
} else {
ESP_LOGE(TAG, "Invalid index: %d. Maximum index is %d.", index, parser->total_frames);
return -1;
}
}

View File

@@ -0,0 +1,14 @@
dependencies:
cmake_utilities:
version: 0.*
idf:
version: '>=5.0'
description: An image rendering component for ESP-IDF, optimized for efficient playback.
documentation: https://bellard.org/image_player/
issues: https://github.com/espressif2022/image_player/issues
repository: git://github.com/espressif2022/image_player.git
repository_info:
commit_sha: d61e0ccb1d7b0a2a214d0c90f5451fd683f41682
path: .
url: https://github.com/espressif2022/image_player
version: 1.1.0~1

View File

@@ -0,0 +1,125 @@
#pragma once
#include <stdbool.h>
#include "esp_err.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief LVGL port configuration structure
*
*/
#define ANIM_PLAYER_INIT_CONFIG() \
{ \
.task_priority = 4, \
.task_stack = 7168, \
.task_affinity = -1, \
.task_stack_caps = MALLOC_CAP_DEFAULT, \
}
typedef void *anim_player_handle_t;
typedef enum {
PLAYER_ACTION_STOP = 0,
PLAYER_ACTION_START,
} player_action_t;
typedef enum {
PLAYER_EVENT_IDLE = 0,
PLAYER_EVENT_ONE_FRAME_DONE,
PLAYER_EVENT_ALL_FRAME_DONE,
} player_event_t;
typedef void (*anim_flush_cb_t)(anim_player_handle_t handle, int x1, int y1, int x2, int y2, const void *data);
typedef void (*anim_update_cb_t)(anim_player_handle_t handle, player_event_t event);
typedef struct {
anim_flush_cb_t flush_cb; ///< Callback function for flushing decoded data
anim_update_cb_t update_cb; ///< Callback function for updating player
void *user_data; ///< User data
struct {
unsigned char swap:1;
} flags;
struct {
int task_priority; ///< Task priority (1-20)
int task_stack; ///< Task stack size in bytes
int task_affinity; ///< CPU core ID (-1: no affinity, 0: core 0, 1: core 1)
unsigned task_stack_caps; /*!< LVGL task stack memory capabilities (see esp_heap_caps.h) */
} task;
} anim_player_config_t;
/**
* @brief Initialize animation player
*
* @param config Player configuration
* @return anim_player_handle_t Player handle, NULL on error
*/
anim_player_handle_t anim_player_init(const anim_player_config_t *config);
/**
* @brief Deinitialize animation player
*
* @param handle Player handle
*/
void anim_player_deinit(anim_player_handle_t handle);
/**
* @brief Update player event
*
* @param handle Player handle
* @param event New event
*/
void anim_player_update(anim_player_handle_t handle, player_action_t event);
/**
* @brief Check if flush is ready
*
* @param handle Player handle
* @return bool True if the flush is ready, false otherwise
*/
bool anim_player_flush_ready(anim_player_handle_t handle);
/**
* @brief Set the source data of the animation
*
* @param handle Player handle
* @param src_data Source data
* @param src_len Source data length
* @return esp_err_t ESP_OK if successful, otherwise an error code
*/
esp_err_t anim_player_set_src_data(anim_player_handle_t handle, const void *src_data, size_t src_len);
/**
* @brief Get the segment of the animation
*
* @param handle Player handle
* @param start Start index
* @param end End index
*/
void anim_player_get_segment(anim_player_handle_t handle, uint32_t *start, uint32_t *end);
/**
* @brief Set the segment of the animation
*
* @param handle Player handle
* @param start Start index
* @param end End index
* @param repeat Repeat setting
*/
void anim_player_set_segment(anim_player_handle_t handle, uint32_t start, uint32_t end, uint32_t fps, bool repeat);
/**
* @brief Get the user data of the animation
*
* @param handle Player handle
* @return void* User data
*/
void *anim_player_get_user_data(anim_player_handle_t handle);
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,93 @@
/*
* SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include "esp_err.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include <string.h>
#include "esp_err.h"
#include "esp_log.h"
#include "esp_check.h"
#include "anim_player.h"
#include "anim_vfs.h"
typedef enum {
IMAGE_FORMAT_SBMP = 0, // Split BMP format
IMAGE_FORMAT_REDIRECT = 1, // Redirect format
IMAGE_FORMAT_INVALID = 2
} image_format_t;
typedef enum {
ENCODING_TYPE_RLE = 0,
ENCODING_TYPE_HUFFMAN = 1,
ENCODING_TYPE_INVALID = 2
} encoding_type_t;
// Image header structure
typedef struct {
char format[3]; // Format identifier (e.g., "_S")
char version[6]; // Version string
uint8_t bit_depth; // Bit depth (4 or 8)
uint16_t width; // Image width
uint16_t height; // Image height
uint16_t splits; // Number of splits
uint16_t split_height; // Height of each split
uint16_t *split_lengths; // Data length of each split
uint16_t data_offset; // Offset to data segment
uint8_t *palette; // Color palette (dynamically allocated)
int num_colors; // Number of colors in palette
} image_header_t;
// Huffman tree node structure
typedef struct huffman_node {
uint8_t value; // Character value for leaf nodes
struct huffman_node *left;
struct huffman_node *right;
} huffman_node_t;
typedef struct Node {
uint8_t is_leaf;
uint8_t value;
struct Node* left;
struct Node* right;
} Node;
/**
* @brief Parse the header of an image file
* @param data Pointer to the image data
* @param data_len Length of the image data
* @param header Pointer to store the parsed header information
* @return Image format type (SBMP, REDIRECT, or INVALID)
*/
image_format_t anim_dec_parse_header(const uint8_t *data, size_t data_len, image_header_t *header);
uint32_t anim_dec_parse_palette(const image_header_t *header, uint8_t index);
void anim_dec_calculate_offsets(const image_header_t *header, uint16_t *offsets);
void anim_dec_free_header(image_header_t *header);
esp_err_t anim_dec_huffman_decode(const uint8_t* buffer, size_t buflen, uint8_t* output, size_t* output_len);
esp_err_t anim_dec_rte_decode(const uint8_t *input, size_t input_len, uint8_t *output, size_t output_len);
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,29 @@
/*
* SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include "esp_err.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct anim_vfs_t *anim_vfs_handle_t;
esp_err_t anim_vfs_init(const uint8_t *data, size_t data_len, anim_vfs_handle_t *ret_parser);
esp_err_t anim_vfs_deinit(anim_vfs_handle_t handle);
int anim_vfs_get_total_frames(anim_vfs_handle_t handle);
int anim_vfs_get_frame_size(anim_vfs_handle_t handle, int index);
const uint8_t *anim_vfs_get_frame_data(anim_vfs_handle_t handle, int index);
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,229 @@
import os
import argparse
from dataclasses import dataclass
import logging
import re
from collections import defaultdict
@dataclass
class PackModelsConfig:
target_path: str
image_file: str
assets_path: str
def setup_logging():
"""Setup logging configuration."""
logging.basicConfig(
level=logging.INFO,
format='%(levelname)s - %(message)s',
handlers=[
logging.FileHandler('frame_merge.log'),
logging.StreamHandler()
]
)
def compute_checksum(data):
"""Compute a simple checksum of the data."""
return sum(data) & 0xFFFFFFFF
def get_frame_info(filename):
"""Extract frame name and number from filename."""
match = re.search(r'(.+)_(\d+)\.sbmp$', filename)
if match:
return match.group(1), int(match.group(2))
return None, 0
def sort_key(filename):
"""Sort files by frame name and number."""
name, number = get_frame_info(filename)
return (name, number) if name else ('', 0)
def pack_assets(config: PackModelsConfig):
"""
Pack models based on the provided configuration.
"""
setup_logging()
target_path = config.target_path
out_file = config.image_file
assets_path = config.assets_path
merged_data = bytearray()
frame_info_list = [] # List of (frame_number, offset, size, is_repeated, original_frame)
frame_map = {} # Store frame offsets and sizes by frame number
# First pass: process all frames and collect information
file_list = sorted(os.listdir(target_path), key=sort_key)
for filename in file_list:
if not filename.lower().endswith('.sbmp'):
continue
file_path = os.path.join(target_path, filename)
try:
file_size = os.path.getsize(file_path)
frame_name, frame_number = get_frame_info(filename)
if not frame_name:
logging.warning(f"Invalid filename format: {filename}")
continue
# Read file content to check for _R prefix
with open(file_path, 'rb') as bin_file:
bin_data = bin_file.read()
if not bin_data:
logging.warning(f"Empty file '{filename}'")
continue
# Check if this is a repeated frame
if bin_data.startswith(b'_R'):
# Extract the original frame name from content
try:
# Format: _R + filename_length(1 byte) + original_filename
filename_length = bin_data[2] # Get filename length (1 byte)
original_frame = bin_data[3:3+filename_length].decode('utf-8')
original_frame_name, original_frame_num = get_frame_info(original_frame)
logging.info(f"Repeated {frame_name}_{frame_number} referencing {original_frame_name}_{original_frame_num}")
frame_info_list.append((frame_number, 0, file_size, True, original_frame_num))
except (ValueError, IndexError) as e:
logging.error(f"Invalid repeated frame format in {filename}: {str(e)}")
continue
# Process original frame
logging.info(f"Original {frame_name}_{frame_number} with size {file_size} bytes")
# Add 0x5A5A prefix to merged_data
merged_data.extend(b'\x5A' * 2)
merged_data.extend(bin_data)
# Update frame info with correct offset and size (including prefix)
# frame_number, offset, size, is_repeated, original_frame_num
frame_info_list.append((frame_number, len(merged_data) - file_size - 2, file_size + 2, False, None))
frame_map[frame_number] = (len(merged_data) - file_size - 2, file_size + 2)
except IOError as e:
logging.error(f"Could not read file '{filename}': {str(e)}")
continue
except Exception as e:
logging.error(f"Unexpected error processing file '{filename}': {str(e)}")
continue
# Second pass: update repeated frame offsets and recalculate
file_info_list = []
new_merged_data = bytearray()
new_offset = 0
# First add all original frames to new_merged_data
for frame_number, offset, size, is_repeated, original_frame in frame_info_list:
if not is_repeated:
frame_data = merged_data[offset:offset+size]
new_merged_data.extend(frame_data)
# Align to 4 bytes
# padding = (4 - (len(new_merged_data) % 4)) % 4
# if padding > 0:
# new_merged_data.extend(b'\x00' * padding)
# Update frame map with new offset
frame_map[frame_number] = (new_offset, size)
print(f" O [{frame_number}] frame_data: 0x{new_offset:08x} ({size})")
file_info_list.append((new_offset, size))
new_offset = len(new_merged_data)
else:
if original_frame in frame_map:
orig_offset, orig_size = frame_map[original_frame]
file_info_list.append((orig_offset, orig_size))
print(f" R [{frame_number}] frame_data: 0x{orig_offset:08x} ({orig_size})")
total_files = len(file_info_list)
if total_files == 0:
logging.error("No .sbmp files found to process")
return
mmap_table = bytearray()
for i, (offset, file_size) in enumerate(file_info_list):
mmap_table.extend(file_size.to_bytes(4, byteorder='little'))
mmap_table.extend(offset.to_bytes(4, byteorder='little'))
logging.info(f"[{i + 1}] frame_data: 0x{offset:08x} ({file_size})")
# Align mmap_table to 4 bytes
padding = (4 - (len(mmap_table) % 4)) % 4
if padding > 0:
mmap_table.extend(b'\x00' * padding)
combined_data = mmap_table + new_merged_data
combined_checksum = compute_checksum(combined_data)
combined_data_length = len(combined_data).to_bytes(4, byteorder='little')
header_data = total_files.to_bytes(4, byteorder='little') + combined_checksum.to_bytes(4, byteorder='little')
final_data = header_data + combined_data_length + combined_data
try:
with open(out_file, 'wb') as output_bin:
output_bin.write(final_data)
logging.info(f"\nSuccessfully packed {total_files} .sbmp files into {out_file}")
logging.info(f"Total size: {len(final_data)} bytes")
logging.info(f"Header size: {len(header_data)} bytes")
logging.info(f"Table size: {len(mmap_table)} bytes")
logging.info(f"Data size: {len(new_merged_data)} bytes")
except IOError as e:
logging.error(f"Failed to write output file: {str(e)}")
except Exception as e:
logging.error(f"Unexpected error writing output file: {str(e)}")
def process_directory(input_dir):
"""Process all .sbmp files in the directory and group them by name."""
# Group files by their base name
file_groups = defaultdict(list)
for filename in os.listdir(input_dir):
if filename.lower().endswith('.sbmp'):
name, _ = get_frame_info(filename)
if name:
file_groups[name].append(filename)
# Process each group
for name, files in file_groups.items():
if not files:
continue
# Create output filename based on the group name
output_file = os.path.join(input_dir, f"{name}.aaf")
# Create a temporary directory for this group
temp_dir = os.path.join(input_dir, f"temp_{name}")
os.makedirs(temp_dir, exist_ok=True)
# Copy files to temporary directory
for file in files:
src = os.path.join(input_dir, file)
dst = os.path.join(temp_dir, file)
os.link(src, dst) # Use hard link to save space
# Process the group
config = PackModelsConfig(
target_path=temp_dir,
image_file=output_file,
assets_path=temp_dir
)
pack_assets(config)
# Clean up temporary directory
for file in files:
os.remove(os.path.join(temp_dir, file))
os.rmdir(temp_dir)
def main():
parser = argparse.ArgumentParser(description='Pack .sbmp files into .aaf files')
parser.add_argument('input_dir', help='Input directory containing .sbmp files')
args = parser.parse_args()
# Ensure input directory exists
if not os.path.isdir(args.input_dir):
print(f"Error: Input directory '{args.input_dir}' does not exist")
return
print("\nProcessing directory:", args.input_dir)
print("-" * 50)
process_directory(args.input_dir)
print("\nProcessing completed!")
print("-" * 50)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,884 @@
import numpy as np
import struct
import os
import sys
from PIL import Image
import math
from sklearn.cluster import KMeans
import time
from multiprocessing import Pool, cpu_count
import heapq
from collections import defaultdict, Counter, namedtuple
import argparse
# Define Huffman tree node
class Node:
def __init__(self, freq, char, left, right):
self.freq = freq
self.char = char
self.left = left
self.right = right
def __lt__(self, other): # For heapq comparison
return self.freq < other.freq
def floyd_steinberg_dithering(img, bit_depth=4):
"""Apply Floyd-Steinberg dithering and quantize to specified bit depth."""
pixels = np.array(img, dtype=np.int32)
height, width = pixels.shape
# Calculate quantization levels based on bit depth
num_levels = 2 ** bit_depth
step = 256 // (num_levels - 1)
for y in range(height - 1):
for x in range(1, width - 1):
old_pixel = pixels[y, x]
new_pixel = round(old_pixel / step) * step
pixels[y, x] = new_pixel
error = old_pixel - new_pixel
pixels[y, x + 1] += error * 7 / 16
pixels[y + 1, x - 1] += error * 3 / 16
pixels[y + 1, x] += error * 5 / 16
pixels[y + 1, x + 1] += error * 1 / 16
np.clip(pixels, 0, 255, out=pixels)
return pixels.astype(np.uint8)
def generate_palette(bit_depth=4):
"""Generate a grayscale palette based on bit depth."""
num_colors = 2 ** bit_depth
palette = []
for i in range(num_colors):
level = int(i * 255 / (num_colors - 1))
palette.append((level, level, level, 0)) # (B, G, R, 0)
return palette
def process_row(args):
"""Process a single row of pixels."""
y, pixels, width, bit_depth, palette, row_padded = args
row = []
if bit_depth == 4:
for x in range(0, width, 2):
p1 = pixels[y, x] // 17 # 0-15
if x + 1 < width:
p2 = pixels[y, x + 1]
else:
p2 = 0
byte = (p1 << 4) | p2
row.append(byte)
else: # 8-bit
for x in range(width):
color = pixels[y, x]
index = find_closest_color(color, palette)
row.append(index)
while len(row) < row_padded:
row.append(0) # padding
return row
def save_bmp(filename, pixels, bit_depth=4):
"""Save a numpy array as a BMP file with specified bit depth."""
if bit_depth == 8:
# For 8-bit color images, use RGB channels
height, width, _ = pixels.shape
else:
# For 4-bit grayscale images
height, width = pixels.shape
bits_per_pixel = bit_depth
bytes_per_pixel = bits_per_pixel // 8
row_size = (width * bits_per_pixel + 7) // 8
row_padded = (row_size + 3) & ~3 # 4-byte align each row
# BMP Header (14 bytes)
bfType = b'BM'
bfSize = 14 + 40 + (2 ** bits_per_pixel) * 4 + row_padded * height
bfReserved1 = 0
bfReserved2 = 0
bfOffBits = 14 + 40 + (2 ** bits_per_pixel) * 4
bmp_header = struct.pack('<2sIHHI', bfType, bfSize, bfReserved1, bfReserved2, bfOffBits)
# DIB Header (BITMAPINFOHEADER, 40 bytes)
biSize = 40
biWidth = width
biHeight = height
biPlanes = 1
biBitCount = bits_per_pixel
biCompression = 0
biSizeImage = row_padded * height
biXPelsPerMeter = 3780
biYPelsPerMeter = 3780
biClrUsed = 2 ** bits_per_pixel
biClrImportant = 2 ** bits_per_pixel
dib_header = struct.pack('<IIIHHIIIIII',
biSize, biWidth, biHeight, biPlanes, biBitCount,
biCompression, biSizeImage,
biXPelsPerMeter, biYPelsPerMeter,
biClrUsed, biClrImportant)
# Generate appropriate palette based on bit depth
if bit_depth == 8:
# For 8-bit color images, generate a color palette
palette = generate_color_palette(pixels)
else:
# For 4-bit grayscale images, use grayscale palette
palette = generate_palette(bit_depth)
palette_data = b''.join(struct.pack('<BBBB', *color) for color in palette)
# Pixel Data
# Start timing
start_time = time.time()
# Prepare parallel processing arguments
process_args = [(y, pixels, width, bit_depth, palette, row_padded)
for y in range(height - 1, -1, -1)]
# Use process pool for parallel processing
with Pool(processes=cpu_count()) as pool:
rows = pool.map(process_row, process_args)
# Merge processing results
pixel_data = bytearray()
for row in rows:
pixel_data.extend(row)
# End timing and print
end_time = time.time()
execution_time = end_time - start_time
# print(f"Processing completed: Image size {width}x{height}, Total time: {execution_time:.3f} seconds")
with open(filename, 'wb') as f:
f.write(bmp_header)
f.write(dib_header)
f.write(palette_data)
f.write(pixel_data)
print(f"{os.path.basename(filename)}")
def generate_color_palette(pixels):
"""Generate a 256-color palette from the image using median cut algorithm."""
# Reshape pixels to 2D array of RGB values
pixels_2d = pixels.reshape(-1, 3)
# Count unique colors
unique_colors = np.unique(pixels_2d, axis=0)
num_colors = min(len(unique_colors), 256)
if num_colors < 256:
# If we have fewer than 256 unique colors, use them directly
colors = unique_colors
else:
# Use k-means clustering to find representative colors
kmeans = KMeans(n_clusters=num_colors, random_state=0, n_init=10).fit(pixels_2d)
colors = kmeans.cluster_centers_.astype(np.uint8)
# Convert to palette format (B, G, R, A)
palette = []
for color in colors:
palette.append((color[2], color[1], color[0], 255)) # BGR format
# Pad palette to 256 colors if necessary
while len(palette) < 256:
palette.append((0, 0, 0, 255))
return palette
def find_closest_color(color, palette):
"""Find the index of the closest color in the palette."""
min_dist = float('inf')
closest_index = 0
for i, palette_color in enumerate(palette):
# Calculate Euclidean distance in RGB space using uint8 arithmetic
r_diff = int(color[0]) - int(palette_color[2])
g_diff = int(color[1]) - int(palette_color[1])
b_diff = int(color[2]) - int(palette_color[0])
dist = r_diff * r_diff + g_diff * g_diff + b_diff * b_diff
if dist < min_dist:
min_dist = dist
closest_index = i
return closest_index
def convert_gif_to_bmp(gif_path, output_dir, bit_depth=4):
"""Convert GIF frames to BMPs with specified bit depth."""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
base_name = os.path.splitext(os.path.basename(gif_path))[0]
with Image.open(gif_path) as im:
frame = 0
try:
while True:
if bit_depth == 8:
# For 8-bit, keep original colors
frame_image = im.convert('RGB')
# Convert to numpy array while preserving colors
pixels = np.array(frame_image)
output_path = os.path.join(output_dir, f"{base_name}_{frame:04d}.bmp")
save_bmp(output_path, pixels, bit_depth)
else:
# For 4-bit, convert to grayscale and dither
gray_frame = im.convert('L')
dithered_pixels = floyd_steinberg_dithering(gray_frame, bit_depth)
output_path = os.path.join(output_dir, f"{base_name}_{frame:04d}.bmp")
save_bmp(output_path, dithered_pixels, bit_depth)
frame += 1
im.seek(frame)
except EOFError:
pass
def create_header(width, height, splits, split_height, lenbuf, ext, bit_depth=4):
"""Creates the header for the output file based on the format.
Args:
width: Image width
height: Image height
splits: Number of splits
split_height: Height of each split
lenbuf: List of split lengths
ext: File extension
bit_depth: Bit depth (4 or 8)
"""
header = bytearray()
if ext.lower() == '.bmp':
header += bytearray('_S'.encode('UTF-8'))
# 6 BYTES VERSION
header += bytearray(('\x00V1.00\x00').encode('UTF-8'))
# 1 BYTE BIT DEPTH
header += bytearray([bit_depth])
# WIDTH 2 BYTES
header += width.to_bytes(2, byteorder='little')
# HEIGHT 2 BYTES
header += height.to_bytes(2, byteorder='little')
# NUMBER OF ITEMS 2 BYTES
header += splits.to_bytes(2, byteorder='little')
# SPLIT HEIGHT 2 BYTES
header += split_height.to_bytes(2, byteorder='little')
for item_len in lenbuf:
# LENGTH 2 BYTES
header += item_len.to_bytes(2, byteorder='little')
return header
def rte_compress(data):
"""Simple RTE (Run-Time Encoding) compression: [count, value]"""
if not data:
return bytearray()
compressed = bytearray()
prev = data[0]
count = 1
for b in data[1:]:
if b == prev and count < 255:
count += 1
else:
compressed.extend([count, prev])
prev = b
count = 1
# Don't forget the last run
compressed.extend([count, prev])
return compressed
def generate_palette_from_image(im, bit_depth=4):
"""Extracts or generates a palette based on bit depth.
Args:
im: PIL Image object
bit_depth: Bit depth for the palette (4 or 8)
Returns:
tuple: (palette_bytes, palette_list)
- palette_bytes: Byte array containing the palette
- palette_list: List of RGB tuples
"""
num_colors = 2 ** bit_depth
palette_bytes = bytearray()
if bit_depth == 8:
# For 8-bit color images
if im.mode == 'RGB':
# Convert to numpy array for color analysis
pixels = np.array(im)
# Count unique colors
unique_colors = np.unique(pixels.reshape(-1, 3), axis=0)
num_unique = min(len(unique_colors), 256)
if num_unique < 256:
# If we have fewer than 256 unique colors, use them directly
colors = unique_colors
else:
# Use k-means clustering to find representative colors
kmeans = KMeans(n_clusters=num_unique, random_state=0, n_init=10).fit(pixels.reshape(-1, 3))
colors = kmeans.cluster_centers_.astype(np.uint8)
# Convert colors to palette format (B, G, R, A)
for color in colors:
palette_bytes.extend([color[2], color[1], color[0], 255]) # BGR format
# Pad palette to 256 colors if necessary
while len(palette_bytes) < 256 * 4:
palette_bytes.extend([0, 0, 0, 255])
else:
# If not RGB, convert to RGB first
im = im.convert('RGB')
return generate_palette_from_image(im, bit_depth)
else:
# For 4-bit grayscale images
if im.mode == 'P':
# If image already has a palette, use it
palette = im.getpalette()
if palette is not None:
# Extract the first `num_colors` colors from the palette
palette = palette[:num_colors * 3] # Each color is represented by 3 bytes (R, G, B)
for i in range(0, len(palette), 3):
r, g, b = palette[i:i + 3]
palette_bytes.extend([r, g, b, 255]) # Add an alpha channel value of 255
else:
# Generate a grayscale palette based on bit depth
for i in range(num_colors):
level = int(i * 255 / (num_colors - 1))
palette_bytes.extend([level, level, level, 255]) # (B, G, R, A)
# Create palette list for color matching
palette_list = []
for i in range(0, len(palette_bytes), 4):
b, g, r, _ = palette_bytes[i:i + 4]
palette_list.append((r, g, b))
return palette_bytes, palette_list
def find_palette_index(pixel_value, palette):
"""Finds the closest palette index for a pixel value."""
# Calculate the squared difference for each color channel (R, G, B)
def color_distance_squared(c1, c2):
return sum((c1[i] - c2[i]) ** 2 for i in range(3))
# Find the index of the closest color in the palette
closest_index = min(range(len(palette)), key=lambda i: color_distance_squared(
(pixel_value, pixel_value, pixel_value), palette[i]))
# Debugging: Print the distance for each palette index
# for i in range(len(palette)):
# diff = color_distance_squared((pixel_value, pixel_value, pixel_value), palette[i])
# print(f"Palette index {i}, diff: {diff}")
return closest_index
def build_huffman_tree(data):
"""Build Huffman tree from data using frequency analysis.
Args:
data: Input data to build tree from
Returns:
Node: Root node of the Huffman tree
"""
# Count frequency of each byte
freq = Counter(data)
# Create leaf nodes for each unique byte
heap = [Node(f, c, None, None) for c, f in freq.items()]
heapq.heapify(heap)
# Build tree by merging nodes
while len(heap) > 1:
node1 = heapq.heappop(heap)
node2 = heapq.heappop(heap)
merged = Node(node1.freq + node2.freq, None, node1, node2)
heapq.heappush(heap, merged)
root = heap[0] if heap else None
# Print the tree structure
# print("\nHuffman Tree Structure:")
# print_tree(root)
# print()
return root
def build_code_map(node, prefix="", code_map=None):
"""Generate Huffman code map by traversing the tree.
Args:
node: Current node in the tree
prefix: Current code prefix
code_map: Dictionary to store the codes
Returns:
dict: Mapping of bytes to their Huffman codes
"""
if code_map is None:
code_map = dict()
if node is None:
return code_map
if node.char is not None:
code_map[node.char] = prefix
build_code_map(node.left, prefix + "0", code_map)
build_code_map(node.right, prefix + "1", code_map)
return code_map
def huffman_compress(data):
"""Compress data using Huffman coding.
Args:
data: Input data to compress
Returns:
tuple: (compressed_data, dict_size, dict_bytes)
- compressed_data: Compressed data as bytes
- dict_size: Number of entries in the dictionary
- dict_bytes: Dictionary data for decompression
"""
if not data:
return bytearray(), 0, None
# Build Huffman tree and get encoding dictionary
tree = build_huffman_tree(data)
code_map = build_code_map(tree)
# Print code map for debugging
# print("\nHuffman Code Map:")
# for char, code in sorted(code_map.items()):
# print(f" '{chr(char)}' ({char:02x}) -> {code}")
# print()
# Encode data
encoded = ''.join(code_map[byte] for byte in data)
# Pad encoded string to multiple of 8
padding = (8 - len(encoded) % 8) % 8
encoded += '0' * padding
# Convert to bytes
result = bytearray()
for i in range(0, len(encoded), 8):
byte = encoded[i:i+8]
result.append(int(byte, 2))
# Convert dictionary to bytes
dict_bytes = bytearray()
dict_bytes.append(padding) # Store padding bits at the start of dictionary
for byte, code in code_map.items():
dict_bytes.extend([byte, len(code)]) # Store byte value and code length
# Convert code string to bytes
code_bytes = int(code, 2).to_bytes((len(code) + 7) // 8, byteorder='big')
dict_bytes.extend(code_bytes)
# Print debug information
# print(f"Original data: {' '.join(f'{b:02x}' for b in data[:30])}")
# print(f"Encoded bits: {encoded[:100]}")
# print(f"Compressed data: {' '.join(f'{b:02x}' for b in result[:30])}")
# print(f"Dictionary bytes: {' '.join(f'{b:02x}' for b in dict_bytes[:30])}")
# print(f"Encoded length: {len(encoded)} bits")
# print(f"Padding: {padding} bits")
return result, len(code_map), dict_bytes
def print_tree(node, prefix="", is_left=True):
"""Print the Huffman tree structure.
Args:
node: Current node to print
prefix: Prefix for the current level
is_left: Whether this node is a left child
"""
if node is None:
return
# Print current node
print(f"{prefix}{'└── ' if is_left else '┌── '}", end="")
if node.char is not None:
print(f"'{chr(node.char)}' ({node.char:02x})")
else:
print("")
# Print children
if node.left is not None:
print_tree(node.left, prefix + (" " if is_left else ""), True)
if node.right is not None:
print_tree(node.right, prefix + (" " if is_left else ""), False)
def huffman_decode(data, dict_bytes):
"""Decompress data using Huffman coding.
Args:
data: Compressed data
dict_bytes: Dictionary data for decompression
Returns:
bytearray: Decompressed data
"""
if not data or not dict_bytes:
return bytearray()
# Print debug information
print(f"\nCompressed data: {' '.join(f'{b:02x}' for b in data[:30])}")
print(f"Compressed data length: {len(data)} bytes")
print(f"Dictionary data: {' '.join(f'{b:02x}' for b in dict_bytes[:30])}")
print(f"Dictionary data length: {len(dict_bytes)} bytes")
# Get padding bits from dictionary
padding = dict_bytes[0]
dict_bytes = dict_bytes[1:] # Remove padding info from dictionary
print(f"Padding bits: {padding}")
# Rebuild Huffman tree from dictionary
root = Node(0, None, None, None)
current = root
i = 0
while i < len(dict_bytes):
# Read byte value and code length
byte_val = dict_bytes[i]
code_len = dict_bytes[i + 1]
i += 2
# Read code bytes
code_bytes = dict_bytes[i:i + (code_len + 7) // 8]
i += (code_len + 7) // 8
# Convert code bytes to binary string
code = bin(int.from_bytes(code_bytes, byteorder='big'))[2:].zfill(code_len)
# Build tree path for this code
for bit in code:
if bit == '0':
if current.left is None:
current.left = Node(0, None, None, None)
current = current.left
else:
if current.right is None:
current.right = Node(0, None, None, None)
current = current.right
# Set leaf node value
current.char = byte_val
current = root
# print_tree(root)
# Decode data using the tree
decoded = bytearray()
current = root
# Convert data to binary string
binary = ''.join(bin(b)[2:].zfill(8) for b in data)
print(f"Binary data length: {len(binary)} bits")
# Remove padding bits from the end
if padding > 0:
binary = binary[:-padding]
print(f"Binary data length after removing padding: {len(binary)} bits")
# Traverse tree using bits
for bit in binary:
if bit == '0':
current = current.left
else:
current = current.right
# If we reached a leaf node, add its value to decoded data
if current.char is not None:
decoded.append(current.char)
current = root
print(f"Decoded data length: {len(decoded)} bytes")
return decoded
def process_block(args):
"""Process a single block of pixels."""
top, bottom, width, height, pixels, bit_depth = args
block_height = bottom - top
block_data = bytearray()
for y in range(block_height):
row_start = (top + y) * width
if bit_depth == 4:
# Pack two pixels into one byte
for x in range(0, width, 2):
p1 = pixels[row_start + x] & 0x0F # Ensure p1 is 0-15
if x + 1 < width:
p2 = pixels[row_start + x + 1] & 0x0F # Ensure p2 is 0-15
else:
p2 = 0
packed_byte = ((p1 & 0x0F) << 4) | (p2 & 0x0F) # Ensure result is 0-255
block_data.append(packed_byte)
else: # 8-bit
# Use pixel value directly as index
for x in range(width):
block_data.append(pixels[row_start + x] & 0xFF) # Ensure value is 0-255
# RTE compress this block
rte_compressed = rte_compress(block_data)
# Use test string for Huffman compression
# test_string = "123456789"
# rte_compressed = bytearray(test_string.encode('ascii'))
# Huffman compress this block
huffman_compressed, dict_size, dict_bytes = huffman_compress(rte_compressed)
# Print compression comparison
rte_size = len(rte_compressed)
huffman_size = len(huffman_compressed) + len(dict_bytes) # Include dictionary size
original_size = len(block_data)
# print(f"huffman_compressed: {huffman_compressed}")
# print(f"Block {top}-{bottom} | Original: {original_size}B | RTE: {rte_size}B | Huffman: {huffman_size}B | Dict: {dict_size} entries")
# 解码数据
# decoded_data = huffman_decode(huffman_compressed, dict_bytes)
# print(f"Decoded:{' '.join(f'{b:02x}' for b in decoded_data[:30])}")
# print(f"Decoded string: {decoded_data.decode('ascii', errors='replace')}")
# return rte_compressed, huffman_compressed, dict_bytes, dict_size # Return compressed data and dictionary
block_original_size = len(block_data) # Original uncompressed size
return rte_compressed, huffman_compressed, dict_bytes, dict_size, block_original_size
def split_bmp(im, block_size, input_dir=None, bit_depth=4, enable_huffman=False):
"""Splits grayscale image into raw bitmap blocks with RTE compression.
Args:
im: PIL Image object (BMP file)
block_size: Height of each block
input_dir: Input directory (optional)
bit_depth: Bit depth for the image (4 or 8)
enable_huffman: Whether to enable Huffman compression
Returns:
tuple: (width, height, splits, palette_bytes, split_data, lenbuf)
"""
width, height = im.size
splits = math.ceil(height / block_size) if block_size else 1
# Read palette from file
palette_size = 2 ** bit_depth * 4 # Each palette entry is 4 bytes (B,G,R,A)
with open(im.filename, 'rb') as f:
f.seek(54) # Skip BMP header (14 + 40 bytes)
palette_bytes = f.read(palette_size)
# Read pixel data
pixels = list(im.getdata())
row_size = (width * bit_depth + 7) // 8
row_padded = (row_size + 3) & ~3 # 4-byte align each row
# Calculate original data size
original_size = row_padded * height
# Prepare parallel processing arguments
process_args = []
for i in range(splits):
top = i * block_size
bottom = min((i + 1) * block_size, height)
process_args.append((top, bottom, width, height, pixels, bit_depth))
# Use process pool for parallel processing
split_data = bytearray()
total_rte_size = 0
total_huffman_size = 0
total_rte_original = 0
total_huffman_original = 0
total_saved_size = 0
lenbuf = []
with Pool(processes=cpu_count()) as pool:
compressed_blocks = pool.map(process_block, process_args)
# Collect processing results
for rte_block, huffman_block, dict_bytes, dict_size, block_original_size in compressed_blocks:
rte_size = len(rte_block)
huffman_size = len(huffman_block) + len(dict_bytes) # Include dictionary size
# Choose compression method based on enable_huffman flag and size comparison
if enable_huffman and huffman_size < rte_size:
split_data.append(1) # Huffman identifier
split_data.extend(len(dict_bytes).to_bytes(2, byteorder='little')) # Dictionary size (2 bytes)
split_data.extend(dict_bytes) # Dictionary data
split_data.extend(huffman_block) # Compressed data
lenbuf.append(len(huffman_block) + len(dict_bytes) + 3) # +3 for identifier and dict size
total_huffman_size += huffman_size + 3
total_huffman_original += block_original_size
total_saved_size += rte_size - huffman_size
else:
split_data.append(0) # RTE identifier
split_data.extend(rte_block)
lenbuf.append(len(rte_block) + 1) # +1 for identifier
total_rte_size += rte_size + 1
total_rte_original += block_original_size
# Calculate compression ratios
final_size = len(split_data)
rte_ratio = (1 - total_rte_size / total_rte_original) * 100 if total_rte_original > 0 else 0
haffman_ratio = (1 - total_huffman_size / total_huffman_original) * 100 if total_huffman_original > 0 else 0
final_ratio = (1 - final_size / original_size) * 100
# Print statistics in one line with colored ratios
color_rte = '\033[31m' if rte_ratio < 0 else '\033[32m'
color_huffman = '\033[31m' if haffman_ratio < 0 else '\033[32m'
ratio_color_total = '\033[31m' if final_ratio < 0 else '\033[32m'
print(f"Frame {width:4d}x{height:4d} | Splits: {splits:3d}")
print(f"RTE: {total_rte_size:8d}B | Ratio: {color_rte}{rte_ratio:+.2f}%\033[0m | Original: {total_rte_original:8d}B")
if enable_huffman:
print(f"Huffman: {total_huffman_size:8d}B | Ratio: {color_huffman}{haffman_ratio:+.2f}%\033[0m | Original: {total_huffman_original:8d}B | Saved: {total_saved_size:8d}B")
print(f"Total: {final_size:8d}B | Ratio: {ratio_color_total}{final_ratio:+.2f}%\033[0m | Original: {original_size:8d}B")
return width, height, splits, palette_bytes, split_data, lenbuf
def save_image(output_file_path, header, split_data, palette_bytes):
"""Save the final packaged image with header, palette, and split data."""
with open(output_file_path, 'wb') as f:
f.write(header)
# print("Header saved.")
# Write the palette
f.write(palette_bytes)
# print("Palette saved.")
# Write the split data
f.write(split_data)
# print("Split data saved.")
def process_bmp(input_file, output_file, split_height, bit_depth=4, enable_huffman=False):
"""Main function to process the image and save it as the packaged file."""
try:
SPLIT_HEIGHT = int(split_height)
if SPLIT_HEIGHT <= 0:
raise ValueError('Height must be a positive integer')
except ValueError as e:
print('Error:', e)
sys.exit(1)
input_dir, input_filename = os.path.split(input_file)
base_filename, ext = os.path.splitext(input_filename)
OUTPUT_FILE_NAME = base_filename
# print(f'Processing {input_filename}')
# print(f'Input directory: {input_dir}')
# print(f'Output file name: {OUTPUT_FILE_NAME}')
# print(f'File extension: {ext}')
try:
im = Image.open(input_file)
except Exception as e:
print('Error:', e)
sys.exit(0)
# Split the image into blocks based on the specified split height
width, height, splits, palette_bytes, split_data, lenbuf = split_bmp(im, SPLIT_HEIGHT, input_dir, bit_depth, enable_huffman)
# Create header based on image properties
header = create_header(width, height, splits, SPLIT_HEIGHT, lenbuf, ext, bit_depth)
# Save the final packaged file
output_file_path = os.path.join(output_file, OUTPUT_FILE_NAME + '.sbmp')
save_image(output_file_path, header, split_data, palette_bytes)
print('Completed', input_filename, '->', os.path.basename(output_file_path))
def process_images_in_directory(input_dir, output_dir, split_height, bit_depth=4, enable_huffman=False):
"""Process all BMP images in the input directory."""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Dictionary to store processed image hashes and their corresponding output filenames
processed_images = {}
# Loop through all files in the input directory
for filename in os.listdir(input_dir):
if filename.lower().endswith(('.bmp')):
input_file = os.path.join(input_dir, filename)
# Compute a hash of the input file to check for duplicates
with open(input_file, 'rb') as f:
file_hash = hash(f.read())
# Check if the image has already been processed
if file_hash in processed_images:
# Modify the output filename based on the extension
if filename.lower().endswith('.bmp'):
output_file_path = os.path.join(output_dir, filename[:-4] + '.sbmp')
else:
output_file_path = os.path.join(output_dir, 's' + filename)
# Write the already processed filename string to the current file
with open(output_file_path, 'wb') as f:
converted_filename = os.path.splitext(processed_images[file_hash])[0] + '.sbmp'
f.write("_R".encode('UTF-8'))
filename_length = len(converted_filename)
f.write(bytearray([filename_length]))
f.write(converted_filename.encode('UTF-8'))
# print(f"Duplicate file: {filename} matches {converted_filename}.")
continue
# Process the image
process_bmp(input_file, output_dir, split_height, bit_depth, enable_huffman)
# Save the processed filename in the dictionary
processed_images[file_hash] = filename
def main():
parser = argparse.ArgumentParser(description='Convert GIF to BMP and split images')
parser.add_argument('input_folder', help='Input folder containing GIF files')
parser.add_argument('output_folder', help='Output folder for processed files')
parser.add_argument('--split', type=int, required=True, help='Split height for image processing')
parser.add_argument('--depth', type=int, choices=[4, 8], required=True, help='Bit depth (4 for 4-bit grayscale, 8 for 8-bit grayscale)')
parser.add_argument('--enable-huffman', action='store_true', help='Enable Huffman compression (default: disabled)')
args = parser.parse_args()
input_dir = args.input_folder
output_dir = args.output_folder
split_height = args.split
bit_depth = args.depth
enable_huffman = args.enable_huffman
for root, dirs, files in os.walk(input_dir):
for file in files:
if file.endswith('.gif'):
gif_path = os.path.join(root, file)
convert_gif_to_bmp(gif_path, output_dir, bit_depth)
process_images_in_directory(output_dir, output_dir, split_height, bit_depth, enable_huffman)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,6 @@
# The following lines of boilerplate have to be in your project's CMakeLists
# in this exact order for cmake to work correctly
cmake_minimum_required(VERSION 3.5)
set(EXTRA_COMPONENT_DIRS "$ENV{IDF_PATH}/tools/unit-test-app/components")
include($ENV{IDF_PATH}/tools/cmake/project.cmake)
project(test_anim_player)

View File

@@ -0,0 +1,19 @@
idf_component_register(
SRC_DIRS "."
INCLUDE_DIRS "."
)
spiffs_create_partition_assets(
assets_4bit
"../test_4bit"
FLASH_IN_PROJECT
MMAP_FILE_SUPPORT_FORMAT ".aaf"
)
spiffs_create_partition_assets(
assets_8bit
"../test_8bit"
FLASH_IN_PROJECT
MMAP_FILE_SUPPORT_FORMAT ".aaf"
)

View File

@@ -0,0 +1,9 @@
## IDF Component Manager Manifest File
dependencies:
idf: '>=5.0'
image_player:
version: '*'
override_path: ../../
espressif/esp-box: ^3.1.0~1
esp_mmap_assets:
version: 1.*

View File

@@ -0,0 +1,46 @@
/*
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief This file was generated by esp_mmap_assets, don't modify it
*/
#pragma once
#include "esp_mmap_assets.h"
#define MMAP_TEST_4BIT_FILES 26
#define MMAP_TEST_4BIT_CHECKSUM 0x4F98
enum MMAP_TEST_4BIT_LISTS {
MMAP_TEST_4BIT_ANGER_ENTER_AAF = 0, /*!< Anger_enter.aaf */
MMAP_TEST_4BIT_ANGER_LOOP_AAF = 1, /*!< Anger_loop.aaf */
MMAP_TEST_4BIT_ANGER_RETURN_AAF = 2, /*!< Anger_return.aaf */
MMAP_TEST_4BIT_ASKING_AAF = 3, /*!< asking.aaf */
MMAP_TEST_4BIT_BLINK_ONCE_AAF = 4, /*!< blink_once.aaf */
MMAP_TEST_4BIT_BLINK_QUICK_AAF = 5, /*!< blink_quick.aaf */
MMAP_TEST_4BIT_CONNECTING_AAF = 6, /*!< connecting.aaf */
MMAP_TEST_4BIT_HAPP_RETURN_AAF = 7, /*!< happ_return.aaf */
MMAP_TEST_4BIT_HAPPY_ENTER_AAF = 8, /*!< happy_enter.aaf */
MMAP_TEST_4BIT_HAPPY_LOOP_AAF = 9, /*!< happy_loop.aaf */
MMAP_TEST_4BIT_LEFT_ENTER_AAF = 10, /*!< left_enter.aaf */
MMAP_TEST_4BIT_LEFT_LOOP_AAF = 11, /*!< left_loop.aaf */
MMAP_TEST_4BIT_LEFT_RETURN_AAF = 12, /*!< left_return.aaf */
MMAP_TEST_4BIT_PANIC_ENTER_AAF = 13, /*!< panic_enter.aaf */
MMAP_TEST_4BIT_PANIC_LOOP_AAF = 14, /*!< panic_loop.aaf */
MMAP_TEST_4BIT_PANIC_RETURN_AAF = 15, /*!< panic_return.aaf */
MMAP_TEST_4BIT_RIGHT_ENTER_AAF = 16, /*!< right_enter.aaf */
MMAP_TEST_4BIT_RIGHT_LOOP_AAF = 17, /*!< right_loop.aaf */
MMAP_TEST_4BIT_RIGHT_RETURN_AAF = 18, /*!< right_return.aaf */
MMAP_TEST_4BIT_SAD_ENTER_AAF = 19, /*!< sad_enter.aaf */
MMAP_TEST_4BIT_SAD_LOOP_AAF = 20, /*!< sad_loop.aaf */
MMAP_TEST_4BIT_SAD_RETURN_AAF = 21, /*!< sad_return.aaf */
MMAP_TEST_4BIT_SCORN_ENTER_AAF = 22, /*!< scorn_enter.aaf */
MMAP_TEST_4BIT_SCORN_LOOP_AAF = 23, /*!< scorn_loop.aaf */
MMAP_TEST_4BIT_SCORN_RETURN_AAF = 24, /*!< scorn_return.aaf */
MMAP_TEST_4BIT_WAKE_AAF = 25, /*!< wake.aaf */
};

View File

@@ -0,0 +1,46 @@
/*
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief This file was generated by esp_mmap_assets, don't modify it
*/
#pragma once
#include "esp_mmap_assets.h"
#define MMAP_TEST_8BIT_FILES 26
#define MMAP_TEST_8BIT_CHECKSUM 0x88D7
enum MMAP_TEST_8BIT_LISTS {
MMAP_TEST_8BIT_ANGER_ENTER_AAF = 0, /*!< Anger_enter.aaf */
MMAP_TEST_8BIT_ANGER_LOOP_AAF = 1, /*!< Anger_loop.aaf */
MMAP_TEST_8BIT_ANGER_RETURN_AAF = 2, /*!< Anger_return.aaf */
MMAP_TEST_8BIT_ASKING_AAF = 3, /*!< asking.aaf */
MMAP_TEST_8BIT_BLINK_ONCE_AAF = 4, /*!< blink_once.aaf */
MMAP_TEST_8BIT_BLINK_QUICK_AAF = 5, /*!< blink_quick.aaf */
MMAP_TEST_8BIT_CONNECTING_AAF = 6, /*!< connecting.aaf */
MMAP_TEST_8BIT_HAPP_RETURN_AAF = 7, /*!< happ_return.aaf */
MMAP_TEST_8BIT_HAPPY_ENTER_AAF = 8, /*!< happy_enter.aaf */
MMAP_TEST_8BIT_HAPPY_LOOP_AAF = 9, /*!< happy_loop.aaf */
MMAP_TEST_8BIT_LEFT_ENTER_AAF = 10, /*!< left_enter.aaf */
MMAP_TEST_8BIT_LEFT_LOOP_AAF = 11, /*!< left_loop.aaf */
MMAP_TEST_8BIT_LEFT_RETURN_AAF = 12, /*!< left_return.aaf */
MMAP_TEST_8BIT_PANIC_ENTER_AAF = 13, /*!< panic_enter.aaf */
MMAP_TEST_8BIT_PANIC_LOOP_AAF = 14, /*!< panic_loop.aaf */
MMAP_TEST_8BIT_PANIC_RETURN_AAF = 15, /*!< panic_return.aaf */
MMAP_TEST_8BIT_RIGHT_ENTER_AAF = 16, /*!< right_enter.aaf */
MMAP_TEST_8BIT_RIGHT_LOOP_AAF = 17, /*!< right_loop.aaf */
MMAP_TEST_8BIT_RIGHT_RETURN_AAF = 18, /*!< right_return.aaf */
MMAP_TEST_8BIT_SAD_ENTER_AAF = 19, /*!< sad_enter.aaf */
MMAP_TEST_8BIT_SAD_LOOP_AAF = 20, /*!< sad_loop.aaf */
MMAP_TEST_8BIT_SAD_RETURN_AAF = 21, /*!< sad_return.aaf */
MMAP_TEST_8BIT_SCORN_ENTER_AAF = 22, /*!< scorn_enter.aaf */
MMAP_TEST_8BIT_SCORN_LOOP_AAF = 23, /*!< scorn_loop.aaf */
MMAP_TEST_8BIT_SCORN_RETURN_AAF = 24, /*!< scorn_return.aaf */
MMAP_TEST_8BIT_WAKE_AAF = 25, /*!< wake.aaf */
};

View File

@@ -0,0 +1,177 @@
#include <stdio.h>
#include <string.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "esp_log.h"
#include "unity.h"
#include "unity_test_utils.h"
#include "esp_heap_caps.h"
#include "esp_lcd_panel_io.h"
#include "esp_lcd_panel_vendor.h"
#include "esp_lcd_panel_ops.h"
#include "bsp/esp-bsp.h"
#include "anim_player.h"
#include "mmap_generate_test_4bit.h"
#include "mmap_generate_test_8bit.h"
static const char *TAG = "player";
#define TEST_MEMORY_LEAK_THRESHOLD (500)
static size_t before_free_8bit;
static size_t before_free_32bit;
static anim_player_handle_t handle = NULL;
static esp_lcd_panel_io_handle_t io_handle = NULL;
static esp_lcd_panel_handle_t panel_handle = NULL;
void setUp(void)
{
before_free_8bit = heap_caps_get_free_size(MALLOC_CAP_8BIT);
before_free_32bit = heap_caps_get_free_size(MALLOC_CAP_32BIT);
}
void tearDown(void)
{
size_t after_free_8bit = heap_caps_get_free_size(MALLOC_CAP_8BIT);
size_t after_free_32bit = heap_caps_get_free_size(MALLOC_CAP_32BIT);
unity_utils_check_leak(before_free_8bit, after_free_8bit, "8BIT", TEST_MEMORY_LEAK_THRESHOLD);
unity_utils_check_leak(before_free_32bit, after_free_32bit, "32BIT", TEST_MEMORY_LEAK_THRESHOLD);
}
static bool flush_io_ready(esp_lcd_panel_io_handle_t panel_io, esp_lcd_panel_io_event_data_t *edata, void *user_ctx)
{
anim_player_handle_t handle = (anim_player_handle_t)user_ctx;
anim_player_flush_ready(handle);
return true;
}
static void flush_callback(anim_player_handle_t handle, int x1, int y1, int x2, int y2, const void *data)
{
esp_lcd_panel_handle_t panel = (esp_lcd_panel_handle_t)anim_player_get_user_data(handle);
// if(y1 == 0) {
// ESP_LOGI(TAG, "Flush: (%03d,%03d) (%03d,%03d)", x1, y1, x2, y2);
// }
esp_lcd_panel_draw_bitmap(panel, x1, y1, x2, y2, data);
}
static void update_callback(anim_player_handle_t handle, player_event_t event)
{
switch (event) {
case PLAYER_EVENT_IDLE:
ESP_LOGI(TAG, "Event: IDLE");
break;
case PLAYER_EVENT_ONE_FRAME_DONE:
// ESP_LOGI(TAG, "Event: ONE_FRAME_DONE");
break;
case PLAYER_EVENT_ALL_FRAME_DONE:
ESP_LOGI(TAG, "Event: ALL_FRAME_DONE");
break;
default:
ESP_LOGI(TAG, "Event: UNKNOWN");
break;
}
}
static void test_anim_player_common(const char *partition_label, uint32_t max_files, uint32_t checksum, uint32_t delay_ms)
{
mmap_assets_handle_t assets_handle = NULL;
const mmap_assets_config_t asset_config = {
.partition_label = partition_label,
.max_files = max_files,
.checksum = checksum,
.flags = {.mmap_enable = true, .full_check = true}
};
esp_err_t ret = mmap_assets_new(&asset_config, &assets_handle);
if (ret != ESP_OK) {
ESP_LOGE(TAG, "Failed to initialize assets");
return;
}
const bsp_display_config_t bsp_disp_cfg = {
.max_transfer_sz = (240 * 10) * sizeof(uint16_t),
};
bsp_display_new(&bsp_disp_cfg, &panel_handle, &io_handle);
esp_lcd_panel_disp_on_off(panel_handle, true);
bsp_display_brightness_init();
bsp_display_backlight_on();
anim_player_config_t config = {
.flush_cb = flush_callback,
.update_cb = update_callback,
.user_data = panel_handle,
.flags = {.swap = true},
.task = ANIM_PLAYER_INIT_CONFIG()
};
handle = anim_player_init(&config);
TEST_ASSERT_NOT_NULL(handle);
const esp_lcd_panel_io_callbacks_t cbs = {
.on_color_trans_done = flush_io_ready,
};
esp_lcd_panel_io_register_event_callbacks(io_handle, &cbs, handle);
uint32_t start, end;
const void *src_data;
size_t src_len;
for(int i = 0; i < mmap_assets_get_stored_files(assets_handle); i++) {
src_data = mmap_assets_get_mem(assets_handle, i);
src_len = mmap_assets_get_size(assets_handle, i);
ESP_LOGW(TAG, "set src, %s", mmap_assets_get_name(assets_handle, i));
anim_player_set_src_data(handle, src_data, src_len);
anim_player_get_segment(handle, &start, &end);
anim_player_set_segment(handle, start, end, 30, true);
ESP_LOGW(TAG, "start:%" PRIu32 ", end:%" PRIu32 "", start, end);
anim_player_update(handle, PLAYER_ACTION_START);
vTaskDelay(pdMS_TO_TICKS(1000 * delay_ms));
anim_player_update(handle, PLAYER_ACTION_STOP);
vTaskDelay(pdMS_TO_TICKS(1000 * delay_ms));
}
ESP_LOGI(TAG, "test done");
if (handle) {
anim_player_deinit(handle);
handle = NULL;
}
if (assets_handle) {
mmap_assets_del(assets_handle);
assets_handle = NULL;
}
if (panel_handle) {
esp_lcd_panel_del(panel_handle);
}
if (io_handle) {
esp_lcd_panel_io_del(io_handle);
}
spi_bus_free(BSP_LCD_SPI_NUM);
vTaskDelay(pdMS_TO_TICKS(1000));
}
TEST_CASE("test anim player init and deinit", "[anim_player][4bit]")
{
test_anim_player_common("assets_4bit", MMAP_TEST_4BIT_FILES, MMAP_TEST_4BIT_CHECKSUM, 5);
}
TEST_CASE("test anim player init and deinit", "[anim_player][8bit]")
{
test_anim_player_common("assets_8bit", MMAP_TEST_8BIT_FILES, MMAP_TEST_8BIT_CHECKSUM, 5);
}
void app_main(void)
{
printf("Animation player test\n");
unity_run_menu();
}

View File

@@ -0,0 +1,7 @@
# Name, Type, SubType, Offset, Size, Flags
# Note: if you change the phy_init or app partition offset, make sure to change the offset in Kconfig.projbuild
nvs, data, nvs, , 0x6000,
phy_init, data, phy, , 0x1000,
factory, app, factory, , 500K,
assets_4bit, data, spiffs, , 300K,
assets_8bit, data, spiffs, , 900K,
1 # Name, Type, SubType, Offset, Size, Flags
2 # Note: if you change the phy_init or app partition offset, make sure to change the offset in Kconfig.projbuild
3 nvs, data, nvs, , 0x6000,
4 phy_init, data, phy, , 0x1000,
5 factory, app, factory, , 500K,
6 assets_4bit, data, spiffs, , 300K,
7 assets_8bit, data, spiffs, , 900K,

View File

@@ -0,0 +1,18 @@
# SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import pytest
from pytest_embedded import Dut
@pytest.mark.target('esp32')
@pytest.mark.target('esp32c3')
@pytest.mark.target('esp32s3')
@pytest.mark.target('esp32p4')
@pytest.mark.env('generic')
@pytest.mark.parametrize(
'config',
[
'defaults',
],
)
def test_anim_player(dut: Dut)-> None:
dut.run_all_single_board_cases()

View File

@@ -0,0 +1,12 @@
# This file was generated using idf.py save-defconfig. It can be edited manually.
# Espressif IoT Development Framework (ESP-IDF) 5.4.0 Project Minimal Configuration
#
CONFIG_IDF_TARGET="esp32s3"
CONFIG_ESPTOOLPY_FLASHSIZE_4MB=y
CONFIG_PARTITION_TABLE_CUSTOM=y
CONFIG_SPIRAM=y
CONFIG_SPIRAM_MODE_OCT=y
CONFIG_SPIRAM_SPEED_80M=y
CONFIG_ESP_CONSOLE_USB_SERIAL_JTAG=y
CONFIG_ESP_TASK_WDT_EN=n
CONFIG_MMAP_FILE_NAME_LENGTH=32