Capture image using Arduino and run object detection

Hi there,

I am new to Node-Red. I am trying to build a flow that reads an image captured using Arduino Nano BLE 33 Sense. Then the image sent to (node-red-contrib-tfjs-coco-ssd) for object detection. The issue is that the captured image from Arduino is in Hex code, not image file (jpg, png). I need a way to capture an image using OV7675 camera module attached to the Arduino then run object detection on that image. any ideas? The attached flow the first one is the one I want it to run but it is missing something. The second one is to read image serial data from Arduino and run Python code to convert it to an image. I need to connect the two flows but it didn't work. Or find another way to read the image from Arduino.

Made you can share the python code then we can maybe see what's need to be done with the data.

All I see is an image of a flow :stuck_out_tongue:

Anyhow, if your process of saving the image is good, then next you need to retrieve said image for the processing.

I have done all of that this way, but just using an ESP32CAM instead. Look into the "grab a frame" and grab image nodes to see how that works.

I will include the flow :wink:

[{"id":"86d06f6a29325b4a","type":"tensorflowCoco","z":"7a76034f26ea9a4a","name":"","modelUrl":"http://localhost:1880/coco/model.json","scoreThreshold":0.5,"passthru":"bbox","lineColour":"magenta","x":710,"y":120,"wires":[["97099b3a8672b983","9e56b32b15e2699b","7a9566824312e813"]]},{"id":"97099b3a8672b983","type":"image","z":"7a76034f26ea9a4a","name":"","width":"400","data":"image","dataType":"msg","thumbnail":false,"active":true,"pass":false,"outputs":0,"x":720,"y":240,"wires":[]},{"id":"9e56b32b15e2699b","type":"debug","z":"7a76034f26ea9a4a","name":"","active":true,"tosidebar":true,"console":false,"tostatus":true,"complete":"classes","targetType":"msg","statusVal":"classes","statusType":"auto","x":930,"y":220,"wires":[]},{"id":"08075dce876aeb19","type":"ui_template","z":"7a76034f26ea9a4a","group":"dce9e7a2.d20c78","name":"Snapshot Template","order":1,"width":9,"height":6,"format":"<div ng-bind-html=\"msg.image\">\ndocument.\"msg.image\".style.backgroundImage='none';\n</div>","storeOutMessages":true,"fwdInMessages":true,"resendOnRefresh":true,"templateScope":"local","x":1170,"y":240,"wires":[[]]},{"id":"193adab1529d895f","type":"template","z":"7a76034f26ea9a4a","name":"","field":"image","fieldType":"msg","format":"handlebars","syntax":"mustache","template":"<img width=\"450px\" height=\"250px\" src=\"data:image/jpg;base64,{{{image}}}\">","output":"str","x":1140,"y":200,"wires":[["08075dce876aeb19"]]},{"id":"7a9566824312e813","type":"base64","z":"7a76034f26ea9a4a","name":"","action":"str","property":"image","x":1140,"y":160,"wires":[["193adab1529d895f"]]},{"id":"928f5e08cbd5d864","type":"inject","z":"7a76034f26ea9a4a","name":"ESP32CAM Fisheye","props":[{"p":"payload"},{"p":"topic","vt":"str"}],"repeat":"","crontab":"","once":false,"onceDelay":0.1,"topic":"","payload":"","payloadType":"date","x":170,"y":160,"wires":[["79a6feea5d8b2645"]]},{"id":"79a6feea5d8b2645","type":"change","z":"7a76034f26ea9a4a","name":"Set filename","rules":[{"t":"set","p":"payload","pt":"msg","to":"/home/pi/node-red-static/ESP3.jpg","tot":"str"}],"action":"","property":"","from":"","to":"","reg":false,"x":170,"y":200,"wires":[["6f6207a5582965a0"]]},{"id":"6f6207a5582965a0","type":"exec","z":"7a76034f26ea9a4a","command":"ffmpeg -y -i \"http://10.10.3.191/mjpeg/1\" -vframes 1 -qscale:v 2","addpay":"payload","append":"","useSpawn":"false","timer":"","oldrc":false,"name":"Grab a frame -> jpg","x":430,"y":220,"wires":[["8c6bcda15aabf6a5"],[],[]]},{"id":"8c6bcda15aabf6a5","type":"file in","z":"7a76034f26ea9a4a","name":"Grab image","filename":"/home/pi/node-red-static/ESP3.jpg","format":"","chunk":false,"sendError":false,"encoding":"none","x":430,"y":160,"wires":[["86d06f6a29325b4a"]]},{"id":"271b03e197dbef28","type":"ui_button","z":"7a76034f26ea9a4a","name":"","group":"dce9e7a2.d20c78","order":7,"width":0,"height":0,"passthru":false,"label":"ESP3 Fisheye","tooltip":"","color":"","bgcolor":"","icon":"","payload":"","payloadType":"str","topic":"topic","topicType":"msg","x":160,"y":240,"wires":[["79a6feea5d8b2645"]]},{"id":"dce9e7a2.d20c78","type":"ui_group","name":"Object detection","tab":"302dc820.4e5d48","order":1,"disp":true,"width":"9","collapse":false},{"id":"302dc820.4e5d48","type":"ui_tab","name":"Object detection","icon":"nature_people","order":4,"disabled":false,"hidden":false}]
1 Like

Sorry..Here is the flow as I test it (all connected)
I used Harvard codes for TinyML course in edx to capture the image in Arduino and convert it to image using python code.

[
    {
        "id": "a5834b67196b4d76",
        "type": "tab",
        "label": "Read Image and Detect",
        "disabled": false,
        "info": ""
    },
    {
        "id": "910de6e220fcd863",
        "type": "pythonshell in",
        "z": "a5834b67196b4d76",
        "name": "Convert to Image",
        "pyfile": "/Users/norahalbazzai/Documents/tstNode/main.py",
        "virtualenv": "",
        "continuous": false,
        "stdInData": false,
        "x": 550,
        "y": 260,
        "wires": [
            [
                "a85f705eace54c74"
            ]
        ]
    },
    {
        "id": "a85f705eace54c74",
        "type": "tensorflowCoco",
        "z": "a5834b67196b4d76",
        "name": "",
        "modelUrl": "http://localhost:1880/coco/model.json",
        "scoreThreshold": 0.5,
        "passthru": "false",
        "lineColour": "magenta",
        "x": 710,
        "y": 140,
        "wires": [
            [
                "e3ea622a4ec8b539"
            ]
        ]
    },
    {
        "id": "3b1946530d5f7cf1",
        "type": "inject",
        "z": "a5834b67196b4d76",
        "name": "Capture",
        "props": [
            {
                "p": "payload"
            },
            {
                "p": "topic",
                "vt": "str"
            }
        ],
        "repeat": "",
        "crontab": "",
        "once": false,
        "onceDelay": 0.1,
        "topic": "c",
        "payloadType": "str",
        "x": 110,
        "y": 260,
        "wires": [
            [
                "033e31606dbf78e7"
            ]
        ]
    },
    {
        "id": "033e31606dbf78e7",
        "type": "serial request",
        "z": "a5834b67196b4d76",
        "name": "Read Image data from Arduino",
        "serial": "bf0fde834874e6fe",
        "x": 310,
        "y": 140,
        "wires": [
            [
                "910de6e220fcd863"
            ]
        ]
    },
    {
        "id": "e3ea622a4ec8b539",
        "type": "debug",
        "z": "a5834b67196b4d76",
        "name": "Show detected objects",
        "active": true,
        "tosidebar": true,
        "console": false,
        "tostatus": false,
        "complete": "payload",
        "targetType": "msg",
        "statusVal": "",
        "statusType": "auto",
        "x": 910,
        "y": 260,
        "wires": []
    },
    {
        "id": "35748a9fbf8ef029",
        "type": "comment",
        "z": "a5834b67196b4d76",
        "name": "",
        "info": "Send c character to Arduino to capture image.\nPython to convert Hex code of image to image file.",
        "x": 280,
        "y": 300,
        "wires": []
    },
    {
        "id": "bf0fde834874e6fe",
        "type": "serial-port",
        "serialport": "/dev/tty.usbmodem14201",
        "serialbaud": "9600",
        "databits": "8",
        "parity": "none",
        "stopbits": "1",
        "waitfor": "",
        "dtr": "none",
        "rts": "none",
        "cts": "none",
        "dsr": "none",
        "newline": "\\n",
        "bin": "false",
        "out": "char",
        "addchar": "",
        "responsetimeout": "10000"
    }
]

Here is how I edit python code

## From https://colab.research.google.com/github/tinyMLx/colabs/blob/master/4-2-12-OV7675ImageViewer.ipynb

import sys
import numpy as np
import struct



HEXADECIMAL_BYTES = []
HEXADECIMAL_BYTES.append(sys.argv[1])
print(HEXADECIMAL_BYTES)
raw_bytes = np.array(HEXADECIMAL_BYTES, dtype="i2")
image = np.zeros((len(raw_bytes),3), dtype=int)
#
# # Loop through all of the pixels and form the image
for i in range(len(raw_bytes)):
     #Read 16-bit pixel
     pixel = struct.unpack('>h', raw_bytes[i])[0]
#
#     #Convert RGB565 to RGB 24-bit
     r = ((pixel >> 11) & 0x1f) << 3;
     g = ((pixel >> 5) & 0x3f) << 2;
     b = ((pixel >> 0) & 0x1f) << 3;
     image[i] = [r,g,b]
#
image = np.reshape(image,(144, 176,3)) #QCIF resolution
#
def my_function():
   return image

my_function()

Thanks..
Today I will get an ESP32CAM, so I will test it.
My issue with the Arduino is to capture and save an image (as an image file, not HEX).

The process I supplied can work with any IP camera, when using the correct URL. I have used HikVision, Amcrest & ESP32 My Foscam is used elsewhere, but it would have worked just the same

And it does save the captured images as .JPG so you probably only need part of what I sent, starting with the node labeled Grab Image

One thing to note... I run my Node-Red on a RPi, so that is where the images are saved.

Thanks a lot..
I tested it with ESP32 Cam using http request node. It worked.
If it is not an IP camera, what are the possible solutions to upload an image from the file system?

Ummm... none? :stuck_out_tongue: Note sure how you define a non-IP camera... I call them analog, proprietary cloud serviced or standalone memory card, etc. Generally none of those are properly compatible, if not impossible, for real-time uploading.

If you are referring to USB webcams, then that should be OK as long as they are plugged into the RPi, for image capture, or use other 3rd party software to "make" them IP compatible (YAWCAM for windows, not sure of others) and there are various apps for phone/tablet cameras to make them into "IP cams".

Instead of shelling out to python, you can do this all "in house" using node-red-contrib-image-tools ...

[{"id":"7fd7ebc37e4a475f","type":"function","z":"a4595cfa.591e2","name":"image data","func":"\n\n//get data from step 2 of the colab at...\n// https://colab.research.google.com/github/tinyMLx/colabs/blob/master/4-2-12-OV7675ImageViewer.ipynb#scrollTo=WSozbpyAFs8a\n\nmsg.data = [] //replace [] with data from colab\n\nreturn msg;","outputs":1,"noerr":0,"initialize":"","finalize":"","libs":[],"x":750,"y":500,"wires":[["c690b31453e4a712"]]},{"id":"31b45a27dfbadc57","type":"inject","z":"a4595cfa.591e2","name":"","props":[{"p":"payload"},{"p":"topic","vt":"str"}],"repeat":"","crontab":"","once":false,"onceDelay":0.1,"topic":"","payload":"","payloadType":"date","x":570,"y":500,"wires":[["7fd7ebc37e4a475f"]]},{"id":"c690b31453e4a712","type":"jimp-image","z":"a4595cfa.591e2","name":"create blank img 144 x 176 ","data":"{\"w\": 176, \"h\": 144 }","dataType":"json","ret":"img","parameter1":"","parameter1Type":"msg","parameter2":"","parameter2Type":"msg","parameter3":"","parameter3Type":"msg","parameter4":"","parameter4Type":"msg","parameter5":"","parameter5Type":"msg","parameter6":"","parameter6Type":"msg","parameter7":"","parameter7Type":"msg","parameter8":"","parameter8Type":"msg","sendProperty":"image","sendPropertyType":"msg","parameterCount":0,"jimpFunction":"none","selectedJimpFunction":{"name":"none","fn":"none","description":"Just loads the image.","parameters":[]},"x":680,"y":580,"wires":[["dbdee63df5e50197"]]},{"id":"dbdee63df5e50197","type":"function","z":"a4595cfa.591e2","name":"Convert RGB565 to RGB 24 bitmap","func":"\nconst data = msg.data;\nconst image = msg.image;\n\nlet x = 0;\nlet y = 0;\nconst width = image.bitmap.width;\nconst height = image.bitmap.height;\n\nfor (let index = 0; index < data.length; index++) {\n    const pixel = swap16(data[index]);//get pixel data and change endianness\n    //Convert RGB565 to RGB 24 - bit\n    let r = ((pixel >> 11) & 0x1f) << 3;\n    let g = ((pixel >> 5) & 0x3f) << 2;\n    let b = ((pixel >> 0) & 0x1f) << 3;\n    let rbghex = RGBA2INT(r,g,b,255);\n    image.setPixelColor(rbghex, x, y);\n    x++;\n    if(x >= width) {\n        x = 0;\n        y++;\n    }\n}\nmsg.payload = image;\nreturn msg;\n\nfunction swap16(val) {\n    return ((val & 0xFF) << 8) | ((val >> 8) & 0xFF);\n}\n\nfunction RGBA2INT(red, green, blue, alpha) {\n    var r = red & 0xFF;\n    var g = green & 0xFF;\n    var b = blue & 0xFF;\n    var a = alpha & 0xFF;\n    return (r << 24 >>> 0) + (g << 16) + (b << 8) + (a);\n}","outputs":1,"noerr":0,"initialize":"","finalize":"","libs":[],"x":980,"y":580,"wires":[["dcb17aba9c5030a3"]]},{"id":"dcb17aba9c5030a3","type":"jimp-image","z":"a4595cfa.591e2","name":"image to buffer","data":"payload","dataType":"msg","ret":"buf","parameter1":"","parameter1Type":"msg","parameter2":"","parameter2Type":"msg","parameter3":"","parameter3Type":"msg","parameter4":"","parameter4Type":"msg","parameter5":"","parameter5Type":"msg","parameter6":"","parameter6Type":"msg","parameter7":"","parameter7Type":"msg","parameter8":"","parameter8Type":"msg","sendProperty":"payload","sendPropertyType":"msg","parameterCount":0,"jimpFunction":"none","selectedJimpFunction":{"name":"none","fn":"none","description":"Just loads the image.","parameters":[]},"x":1250,"y":580,"wires":[["b5f620707705637f"]]},{"id":"b5f620707705637f","type":"image viewer","z":"a4595cfa.591e2","name":"","width":160,"data":"payload","dataType":"msg","active":true,"x":1410,"y":580,"wires":[[]]}]

...then feed the binary image data into your model




NOTE:
I could not post the full flow with the "image data" function because the image data inside of the function was too large to post.

I basically copied the data from step 2 of the colab and added it to msg.data in the "image data" function node like this...

msg.data =  [
    0x905B, 0x905B, 0xB05B, 
    //Lots and lots of 0xhexadecimal data snipped for brevity
    0x5394, 0x5394, 0x7394
]
return msg;
1 Like

Thank you it worked!

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.