While playing with the Good-I-Deer nodes, I wanted to visualise the output, so I used the above node, however it is very slow (no offence @dceejay@knolleary )
So with some help from chat-GPT I modified it to work with canvas instead, and it really is so much faster.
Has anyone else done something similar ?
I couldn't seem to find any other nodes, please do let me know if I missed something.
Is there any likely hood of an alternative "core" node using canvas ?
I'm no expert with creating a new node etc, so for now I just edited annotate.js directly in node_modules.
Would be interested to heath others thought about this.
If what you are doing is likely to be fairly one-off (e.g. just in 1 node-red instance), then it is a good idea to start by applying the external library to a function node and doing the processing in that.
You can always move the processing to a custom node at some later point (or raise a PR against an existing node if the authors are happy) but if you don't ever need to re-use or share then a function node is surely fine.
I had assumed that using the pureimage package was a conscious decision, perhaps as I have read, that canvas can be difficult to install on some systems ?
If not then its not difficult to change the code, but not sure about having canvas install correctly on different system automatically.
After modifying annotate.js to work with canvas, I installed the pre requisites listed for Ubuntu and did --build-from-source on my Pi as detailed here -
Modified code - added bonus now also works with PNG and GIF files
module.exports = function(RED) {
"use strict";
const { createCanvas, loadImage, registerFont } = require("canvas");
const path = require("path");
let fontLoaded = false;
function loadFont() {
if (!fontLoaded) {
registerFont(path.join(__dirname,'./SourceSansPro-Regular.ttf'), { family: 'Source Sans Pro' });
fontLoaded = true;
}
}
function AnnotateNode(n) {
RED.nodes.createNode(this, n);
var node = this;
const defaultFill = n.fill || "";
const defaultStroke = n.stroke || "#ffC000";
const defaultLineWidth = parseInt(n.lineWidth) || 5;
const defaultFontSize = n.fontSize || 24;
const defaultFontColor = n.fontColor || "#ffC000";
loadFont();
this.on("input", function(msg) {
if (Buffer.isBuffer(msg.payload)) {
if (Array.isArray(msg.annotations) && msg.annotations.length > 0) {
const buffer = Buffer.from(msg.payload);
loadImage(buffer).then(img => {
const canvas = createCanvas(img.width, img.height);
const ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
ctx.lineJoin = 'bevel';
msg.annotations.forEach(function(annotation) {
ctx.fillStyle = annotation.fill || defaultFill;
ctx.strokeStyle = annotation.stroke || defaultStroke;
ctx.lineWidth = annotation.lineWidth || defaultLineWidth;
let x, y, r, w, h;
if (!annotation.type && annotation.bbox) {
annotation.type = 'rect';
}
switch (annotation.type) {
case 'rect':
if (annotation.bbox) {
x = annotation.bbox[0];
y = annotation.bbox[1];
w = annotation.bbox[2];
h = annotation.bbox[3];
} else {
x = annotation.x;
y = annotation.y;
w = annotation.w;
h = annotation.h;
}
if (x < 0) {
w += x;
x = 0;
}
if (y < 0) {
h += y;
y = 0;
}
ctx.beginPath();
ctx.rect(x, y, w, h);
ctx.stroke();
if (annotation.label) {
ctx.font = `${annotation.fontSize || defaultFontSize}px 'Source Sans Pro'`;
ctx.fillStyle = annotation.fontColor || defaultFontColor;
ctx.textBaseline = "top";
ctx.textAlign = "left";
if (annotation.labelLocation) {
if (annotation.labelLocation === "top") {
y = y - (20 + ((defaultLineWidth * 0.5) + Number(defaultFontSize)));
if (y < 0) y = 0;
} else if (annotation.labelLocation === "bottom") {
y = y + (10 + h + ((defaultLineWidth * 0.5) + Number(defaultFontSize)));
ctx.textBaseline = "bottom";
}
} else {
if (y < 0 + (20 + ((defaultLineWidth * 0.5) + Number(defaultFontSize)))) {
y = y + (10 + h + ((defaultLineWidth * 0.5) + Number(defaultFontSize)));
ctx.textBaseline = "bottom";
} else {
y = y - (20 + ((defaultLineWidth * 0.5) + Number(defaultFontSize)));
if (y < 0) y = 0;
}
}
ctx.fillText(annotation.label, x, y);
}
break;
case 'circle':
if (annotation.bbox) {
x = annotation.bbox[0] + annotation.bbox[2] / 2;
y = annotation.bbox[1] + annotation.bbox[3] / 2;
r = Math.min(annotation.bbox[2], annotation.bbox[3]) / 2;
} else {
x = annotation.x;
y = annotation.y;
r = annotation.r;
}
ctx.beginPath();
ctx.arc(x, y, r, 0, Math.PI * 2);
ctx.stroke();
if (annotation.label) {
ctx.font = `${annotation.fontSize || defaultFontSize}px 'Source Sans Pro'`;
ctx.fillStyle = annotation.fontColor || defaultFontColor;
ctx.textBaseline = "middle";
ctx.textAlign = "center";
ctx.fillText(annotation.label, x, y);
}
break;
}
});
const outputBuffer = canvas.toBuffer('image/jpeg', { quality: 0.9 });
msg.payload = outputBuffer;
node.send(msg);
}).catch(err => {
node.error(err, msg);
});
} else {
node.send(msg);
}
} else {
node.error("Payload not a Buffer", msg);
}
return msg;
});
}
RED.nodes.registerType("annotate-image", AnnotateNode);
};
};
Julian, I have been using that approach also to make changes to @BartButenaers Image info node
This then allowed me to get rid of 2 of the dependencies by including their code in the function node.
I have also "hacked" the good-I-deer face detection node to output both image buffers and found object data in the output, as by default it is one or the other. So that may be a good candidate for transferring to a function node as well.