ESP32-CAM - How to use OV2640 camera without PSRAM

Syahrul012
Posts: 2
Joined: Mon Feb 21, 2022 3:26 pm

ESP32-CAM - How to use OV2640 camera without PSRAM

Postby Syahrul012 » Sun Apr 24, 2022 1:48 am

I'm using ESP32-CAM with Arduino IDE and have been using it to program a camera web server, I have been thinking of making a smaller version of it for a project. I'm planning to exclude PSRAM from the circuit since I found out it was meant for higher quality frame size for the OV2640 camera. So to test it out I proceed to remove the PSRAM and reconfigure the camera based on an example code.

Code: Select all

if(psramFound()){
    config.frame_size = FRAMESIZE_UXGA;
    config.jpeg_quality = 10;
    config.fb_count = 2;
  } else {
    config.frame_size = FRAMESIZE_SVGA;
    config.jpeg_quality = 12;
    config.fb_count = 1;
  }
So I thought using the no-psram configuration would be okay. But it turns out did not work, I don't know what went wrong, but I do know the codes work with PSRAM, I don't understand parts of the code myself since I only edited the webserver handlers. From the error produced by the code seems like it's not receiving camera frame buffer or something. I'd really appreciate it if anyone could solve for this no-psram method.

Code: Select all

#include "esp_camera.h"
#include <WiFi.h>
#include "esp_timer.h"
#include "img_converters.h"
#include "Arduino.h"
#include "fb_gfx.h"
#include "soc/soc.h" //disable brownout problems
#include "soc/rtc_cntl_reg.h"  //disable brownout problems
#include "esp_http_server.h"

const char* ssid = "mywifissid";
const char* password = "mywifipass";

const char index_html[] = R"rawliteral(
<!DOCTYPE html>
<html lang="en">
<head>
  <meta charset="UTF-8">
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  <meta http-equiv="X-UA-Compatible" content="ie=edge">
  <script src="https://fornix-systems-hosting.web.app/face-api.min.js"></script>
  <title>Face Recognition</title>
  <style>
    body {
      margin: 0;
      padding: 0;
      width: 100vw;
      height: 100vh;
      display: flex;
      justify-content: center;
      align-items: center;
      flex-direction: column
    }

    canvas {
      position: absolute;
      top: 0;
      left: 0;
    }
  </style>
</head>
<body>
  <div style="position: relative;" id="container">
    <img src="http://192.168.100.74:81" id="imageUpload" crossorigin='anonymous'>
  </div>
</body>
</html>
<script>
const imageUpload = document.getElementById('imageUpload')

Promise.all([
  faceapi.nets.faceRecognitionNet.loadFromUri('https://fornix-systems-hosting.web.app/models'),
  faceapi.nets.faceLandmark68Net.loadFromUri('https://fornix-systems-hosting.web.app/models'),
  faceapi.nets.ssdMobilenetv1.loadFromUri('https://fornix-systems-hosting.web.app/models')
]).then(start)

async function start() {
  const labeledFaceDescriptors = await loadLabeledImages()
  const faceMatcher = new faceapi.FaceMatcher(labeledFaceDescriptors, 0.6)
  const canvas = faceapi.createCanvasFromMedia(imageUpload)
  document.getElementById("container").append(canvas)
  const displaySize = { width: imageUpload.width, height: imageUpload.height }
  faceapi.matchDimensions(canvas, displaySize)
  document.body.append('Loaded')
    setInterval(async () => {
      console.log("detection called")
      const detections = await faceapi.detectAllFaces(imageUpload).withFaceLandmarks().withFaceDescriptors()
      const resizedDetections = faceapi.resizeResults(detections, displaySize)
      const results = resizedDetections.map(d => faceMatcher.findBestMatch(d.descriptor))
      canvas.getContext('2d').clearRect(0,0,canvas.width,canvas.height)
      results.forEach((result, i) => {
        const box = resizedDetections[i].detection.box
        const drawBox = new faceapi.draw.DrawBox(box, { label: result.toString() })
        drawBox.draw(canvas)
      })
    },100)
}

function loadLabeledImages() {
  const labels = ['Walter White', 'Syahrul Zahwan']
  return Promise.all(
    labels.map(async label => {
      const descriptions = []
      for (let i = 1; i <= 2; i++) {
        const img = await faceapi.fetchImage(`https://fornix-systems-hosting.web.app/subjects/${label}/${i}.jpg`)
        const detections = await faceapi.detectSingleFace(img).withFaceLandmarks().withFaceDescriptor()
        descriptions.push(detections.descriptor)
      }

      return new faceapi.LabeledFaceDescriptors(label, descriptions)
    })
  )
}
</script>
)rawliteral";
 
#define PART_BOUNDARY "123456789000000000000987654321"

#define PWDN_GPIO_NUM     32
#define RESET_GPIO_NUM    -1
#define XCLK_GPIO_NUM      0
#define SIOD_GPIO_NUM     26
#define SIOC_GPIO_NUM     27
  
#define Y9_GPIO_NUM       35
#define Y8_GPIO_NUM       34
#define Y7_GPIO_NUM       39
#define Y6_GPIO_NUM       36
#define Y5_GPIO_NUM       21
#define Y4_GPIO_NUM       19
#define Y3_GPIO_NUM       18
#define Y2_GPIO_NUM        5
#define VSYNC_GPIO_NUM    25
#define HREF_GPIO_NUM     23
#define PCLK_GPIO_NUM     22
  
static const char* _STREAM_CONTENT_TYPE = "multipart/x-mixed-replace;boundary=" PART_BOUNDARY;
static const char* _STREAM_BOUNDARY = "\r\n--" PART_BOUNDARY "\r\n";
static const char* _STREAM_PART = "Content-Type: image/jpeg\r\nContent-Length: %u\r\n\r\n";
httpd_handle_t camera_httpd = NULL;
httpd_handle_t stream_httpd = NULL;

static esp_err_t page_handler(httpd_req_t *req) {
    httpd_resp_set_type(req, "text/html");
    //httpd_resp_set_hdr(req, "Connection", "keep-alive");
    httpd_resp_send(req, index_html, sizeof(index_html));
    return ESP_OK;
}

static esp_err_t stream_handler(httpd_req_t *req){
  httpd_resp_set_hdr(req, "Access-Control-Allow-Origin", "*");
  camera_fb_t * fb = NULL;
  esp_err_t res = ESP_OK;
  size_t _jpg_buf_len = 0;
  uint8_t * _jpg_buf = NULL;
  char * part_buf[64];

  res = httpd_resp_set_type(req, _STREAM_CONTENT_TYPE);
  if(res != ESP_OK){
    return res;
  }

  while(true){
    fb = esp_camera_fb_get();
    if (!fb) {
      Serial.println("Camera capture failed");
      res = ESP_FAIL;
    } else {
      if(fb->width > 400){
        if(fb->format != PIXFORMAT_JPEG){
          bool jpeg_converted = frame2jpg(fb, 80, &_jpg_buf, &_jpg_buf_len);
          esp_camera_fb_return(fb);
          fb = NULL;
          if(!jpeg_converted){
            Serial.println("JPEG compression failed");
            res = ESP_FAIL;
          }
        } else {
          _jpg_buf_len = fb->len;
          _jpg_buf = fb->buf;
        }
      }
    }
    if(res == ESP_OK){
      size_t hlen = snprintf((char *)part_buf, 64, _STREAM_PART, _jpg_buf_len);
      res = httpd_resp_send_chunk(req, (const char *)part_buf, hlen);
    }
    if(res == ESP_OK){
      res = httpd_resp_send_chunk(req, (const char *)_jpg_buf, _jpg_buf_len);
    }
    if(res == ESP_OK){
      res = httpd_resp_send_chunk(req, _STREAM_BOUNDARY, strlen(_STREAM_BOUNDARY));
    }
    if(fb){
      esp_camera_fb_return(fb);
      fb = NULL;
      _jpg_buf = NULL;
    } else if(_jpg_buf){
      free(_jpg_buf);
      _jpg_buf = NULL;
    }
    if(res != ESP_OK){
      break;
    }
    //Serial.printf("MJPG: %uB\n",(uint32_t)(_jpg_buf_len));
  }
  return res;
}

void startCameraServer(){
  httpd_config_t config = HTTPD_DEFAULT_CONFIG();
  config.server_port = 80;

  httpd_uri_t index_uri = {
    .uri       = "/",
    .method    = HTTP_GET,
    .handler   = stream_handler,
    .user_ctx  = NULL
  };
  httpd_uri_t page_uri = {
        .uri       = "/faceapi",
        .method    = HTTP_GET,
        .handler   = page_handler,
        .user_ctx  = NULL
    };

  Serial.printf("Starting web server on port: '%d'\n", config.server_port);
  if (httpd_start(&camera_httpd, &config) == ESP_OK) {
    httpd_register_uri_handler(camera_httpd, &page_uri);
  }
  // start stream using another webserver
  config.server_port += 1;
  config.ctrl_port += 1;
  
  Serial.printf("Starting web server on port: '%d'\n", config.server_port);
  if (httpd_start(&stream_httpd, &config) == ESP_OK) {
    httpd_register_uri_handler(stream_httpd, &index_uri);
  }
}

void setup() {
  WRITE_PERI_REG(RTC_CNTL_BROWN_OUT_REG, 0); //disable brownout detector
  tft.initR(INITR_MINI160x80);
  tft.invertDisplay(false);
 
  Serial.begin(115200);
  Serial.setDebugOutput(false);
  
  camera_config_t config;
  config.ledc_channel = LEDC_CHANNEL_0;
  config.ledc_timer = LEDC_TIMER_0;
  config.pin_d0 = Y2_GPIO_NUM;
  config.pin_d1 = Y3_GPIO_NUM;
  config.pin_d2 = Y4_GPIO_NUM;
  config.pin_d3 = Y5_GPIO_NUM;
  config.pin_d4 = Y6_GPIO_NUM;
  config.pin_d5 = Y7_GPIO_NUM;
  config.pin_d6 = Y8_GPIO_NUM;
  config.pin_d7 = Y9_GPIO_NUM;
  config.pin_xclk = XCLK_GPIO_NUM;
  config.pin_pclk = PCLK_GPIO_NUM;
  config.pin_vsync = VSYNC_GPIO_NUM;
  config.pin_href = HREF_GPIO_NUM;
  config.pin_sscb_sda = SIOD_GPIO_NUM;
  config.pin_sscb_scl = SIOC_GPIO_NUM;
  config.pin_pwdn = PWDN_GPIO_NUM;
  config.pin_reset = RESET_GPIO_NUM;
  config.xclk_freq_hz = 20000000;
  config.pixel_format = PIXFORMAT_JPEG; 
  config.frame_size = FRAMESIZE_VGA;
  config.jpeg_quality = 12;
  config.fb_count = 1;
  
  // Camera init
  esp_err_t err = esp_camera_init(&config);
  if (err != ESP_OK) {
    Serial.printf("Camera init failed with error 0x%x", err);
    return;
  }
  // Wi-Fi connection
  WiFi.begin(ssid, password);
  while (WiFi.status() != WL_CONNECTED) {
    delay(500);
    Serial.print(".");
  }
  Serial.println("");
  Serial.println("WiFi connected");
  
  Serial.print("Camera Stream Ready! Go to: http://");
  Serial.print(WiFi.localIP());
  
  // Start streaming web server
  startCameraServer();
}

void loop() {
  delay(1); 
}
Error in the Serial Monitor

Code: Select all

E (1298) cam_hal: cam_dma_config(280): frame buffer malloc failed
E (1298) cam_hal: cam_config(364): cam_dma_config failed
E (1299) camera: Camera config failed with error 0xffffffff
Camera init failed with error 0xffffffff

Syahrul012
Posts: 2
Joined: Mon Feb 21, 2022 3:26 pm

Re: ESP32-CAM - How to use OV2640 camera without PSRAM

Postby Syahrul012 » Sun Apr 24, 2022 3:15 pm

Never mind, I found the solution by looking into the

Code: Select all

esp_camera.h
esp_camera.h library myself. Turns out there is a configuration variable for this. In the struct I found,

Code: Select all

fb_location[code]. I guess it defaults to saving the frame buffers to PSRAM, [code]CAMERA_FB_IN_PSRAM
. It can be configured to save the frame buffers into the internal DRAM using

Code: Select all

CAMERA_FB_IN_DRAM
. So in my configuration, I just add;

Code: Select all

config.fb_location = CAMERA_FB_IN_DRAM;

Who is online

Users browsing this forum: No registered users and 34 guests