I am using tensorflow lite esp32 library on arduino ide on esp32 cam to run my model on esp32 cam but I am facing this error:
assert failed: IntegerType gemmlowp::RoundingDivideByPOT(IntegerType, int) [with IntegerType = long int] fixedpoint.h:359 (exponent <= 31)
I double checked that the error is from the model but i dont know how to solve it
this is my code:
#include <Arduino.h>
#include <WiFi.h>
#include <TensorFlowLite_ESP32.h>
#include <esp_camera.h>
#include <Wire.h>
#include <Adafruit_GFX.h>
#include <Adafruit_SSD1306.h>
// including tensorflowlite files
#include "tensorflow/lite/experimental/micro/kernels/micro_ops.h"
#include "tensorflow/lite/experimental/micro/micro_error_reporter.h"
#include "tensorflow/lite/experimental/micro/micro_interpreter.h"
#include "tensorflow/lite/experimental/micro/micro_mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
// Camera configuration
#define CAMERA_MODEL_AI_THINKER
// #include "camera_pins.h"
// OLED config
#define OLED_ADDR 0x3C
Adafruit_SSD1306 display(128, 64, &Wire, -1);
// TensorFlow Lite model and interpreter
tflite::MicroErrorReporter micro_error_reporter;
tflite::ErrorReporter* error_reporter = µ_error_reporter;
const tflite::Model* model;
constexpr int kTensorArenaSize = 800 * 1024; // Adjust size as needed
uint8_t* tensor_arena_buffer = nullptr;
tflite::MicroInterpreter* interpreter;
tflite::MicroMutableOpResolver micro_op_resolver;
// Define the input and output tensor pointers
TfLiteTensor* input_tensor;
TfLiteTensor* output_tensor;
// Preprocess image function
void preprocessImage(camera_fb_t* fb) {
// Resize and normalize the image data as needed
int width = 224;
int height = 224;
uint8_t* image_data = fb->buf; // Use your image data
// Resize and normalize the image data here
// This example does not resize but should be implemented
}
//my model binary data
// Setup function
void setup() {
// Initialize OLED display
display.begin(SSD1306_SWITCHCAPVCC, OLED_ADDR);
display.clearDisplay();
error_reporter->Report("Initializing serial...");
Serial.begin(115200);
// Initialize the camera
camera_config_t config;
config.ledc_channel = LEDC_CHANNEL_0;
config.ledc_timer = LEDC_TIMER_0;
config.pin_d0 = 5;
config.pin_d1 = 18;
config.pin_d2 = 19;
config.pin_d3 = 21;
config.pin_d4 = 36;
config.pin_d5 = 39;
config.pin_d6 = 34;
config.pin_d7 = 35;
config.pin_xclk = 0;
config.pin_pclk = 22;
config.pin_vsync = 25;
config.pin_href = 23;
config.pin_sscb_sda = 26;
config.pin_sscb_scl = 27;
config.pin_pwdn = 32;
config.pin_reset = -1;
config.xclk_freq_hz = 20000000;
config.pixel_format = PIXFORMAT_RGB565;
config.frame_size = FRAMESIZE_SVGA; // Use a larger frame size if needed
config.jpeg_quality = 10;
config.fb_count = 1;
// Initialize the camera
if (esp_camera_init(&config) != ESP_OK) {
error_reporter->Report("Camera initialization failed");
display.println("Camera initialization failed");
display.display();
return;
}
// Initialize TensorFlow Lite
tensor_arena_buffer = new uint8_t[kTensorArenaSize];
if (tensor_arena_buffer == nullptr) {
error_reporter->Report("Failed to allocate memory for tensor arena.");
display.println("Failed to allocate memory for tensor arena.");
display.display();
return;
}
model = tflite::GetModel(model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
error_reporter->Report(
"Model provided is schema version %d not equal to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);
display.printf("Model schema version mismatch: %d\n", model->version());
display.display();
return;
}
micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_CONV_2D, tflite::ops::micro::Register_CONV_2D(),1,5);
micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_MAX_POOL_2D, tflite::ops::micro::Register_MAX_POOL_2D());
micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_SOFTMAX, tflite::ops::micro::Register_SOFTMAX());
micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_RESHAPE, tflite::ops::micro::Register_RESHAPE());
micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_FULLY_CONNECTED, tflite::ops::micro::Register_FULLY_CONNECTED(),1,9);
interpreter = new tflite::MicroInterpreter(model, micro_op_resolver, tensor_arena_buffer, kTensorArenaSize, error_reporter);
TfLiteStatus allocate_status = interpreter->AllocateTensors();
if (allocate_status != kTfLiteOk) {
error_reporter->Report("AllocateTensors() failed");
display.println("AllocateTensors() failed");
display.display();
return;
}
// Get pointers to the input and output tensors
input_tensor = interpreter->input(0);
output_tensor = interpreter->output(0);
}
// Loop function
void loop() {
// Capture an image from the camera
display.setTextSize(2);
display.setTextColor(WHITE);
display.setCursor(20, 20);
camera_fb_t* fb = esp_camera_fb_get();
if (!fb) {
error_reporter->Report("No camera frame buffer available");
display.println("No camera frame buffer available");
display.display();
delay(1000); // Delay to avoid spamming error messages
return;
}
// Preprocess the image
preprocessImage(fb);
// Set input tensor data
memcpy(input_tensor->data.uint8, fb->buf, input_tensor->bytes);
// Run inference
interpreter->Invoke();
// Process the output
float* output_data = output_tensor->data.f;
// Assuming the output tensor contains three values for [red, yellow, green]
int max_index = -1;
float max_value = 0;
for (int i = 0; i < 3; ++i) {
if (output_data > max_value) {
max_value = output_data;
max_index = i;
}
}
// Print the detected color
switch (max_index) {
case 0:
error_reporter->Report("Stop");
display.println("Stop");
break;
case 1:
error_reporter->Report("Go");
display.println("Go");
break;
case 2:
error_reporter->Report("Warning");
display.println("Warning");
break;
default:
error_reporter->Report("Not detected");
display.println("Not detected");
}
display.display();
// Return the framebuffer
esp_camera_fb_return(fb);
// Delay to avoid overloading the ESP32
delay(500);
}
Error while using esp32cam and tensorflowlite
-
- Posts: 1
- Joined: Mon Sep 16, 2024 8:33 pm
Who is online
Users browsing this forum: No registered users and 105 guests