You can edit the below JavaScript code to customize the image tool.
Apply Changes
/**
* Identifies objects within an image using a pre-trained model and draws labels on them.
* This function interprets "Identifier Translator" as a tool that identifies objects (the "identifier")
* and "translates" them from a visual representation to a text label on the image.
*
* It dynamically loads TensorFlow.js and the COCO-SSD model to perform object detection
* directly in the browser.
*
* @param {HTMLImageElement} originalImg The original image element to process.
* @param {number} confidenceThreshold A number between 0 and 1. Only detections with a score
* higher than this threshold will be shown. Defaults to 0.5.
* @returns {Promise<HTMLCanvasElement>} A promise that resolves with a new canvas element
* containing the original image with object detection
* bounding boxes and labels drawn on it.
*/
async function processImage(originalImg, confidenceThreshold = 0.5) {
// --- 1. Parameter Validation ---
const threshold = Math.max(0, Math.min(1, Number(confidenceThreshold))) || 0.5;
// --- 2. Canvas Setup ---
const canvas = document.createElement('canvas');
canvas.width = originalImg.naturalWidth;
canvas.height = originalImg.naturalHeight;
const ctx = canvas.getContext('2d');
ctx.drawImage(originalImg, 0, 0, canvas.width, canvas.height);
const drawLoadingMessage = (message) => {
ctx.fillStyle = 'rgba(0, 0, 0, 0.6)';
ctx.fillRect(0, 0, canvas.width, canvas.height);
ctx.fillStyle = 'white';
ctx.font = `bold ${Math.min(canvas.width, canvas.height) / 15}px Arial`;
ctx.textAlign = 'center';
ctx.textBaseline = 'middle';
ctx.fillText(message, canvas.width / 2, canvas.height / 2);
};
// --- 3. Dynamic Script Loader ---
// This helper ensures we only load the scripts once.
const loadScript = (id, url) => {
return new Promise((resolve, reject) => {
if (document.getElementById(id)) {
resolve();
return;
}
const script = document.createElement('script');
script.id = id;
script.src = url;
script.onload = resolve;
script.onerror = reject;
document.head.appendChild(script);
});
};
// --- 4. Load Models ---
try {
drawLoadingMessage('Loading Models...');
// Using specific versions for stability
await loadScript('tfjs-script', 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@3.11.0/dist/tf.min.js');
await loadScript('coco-ssd-script', 'https://cdn.jsdelivr.net/npm/@tensorflow-models/coco-ssd@2.2.2/dist/coco-ssd.min.js');
} catch (error) {
console.error("Failed to load detection model scripts:", error);
ctx.drawImage(originalImg, 0, 0, canvas.width, canvas.height); // Redraw image
ctx.fillStyle = 'red';
ctx.font = '20px Arial';
ctx.fillText('Error: Could not load ML models.', 10, 30);
return canvas;
}
// --- 5. Perform Detection ---
drawLoadingMessage('Analyzing Image...');
const model = await cocoSsd.load();
const predictions = await model.detect(originalImg);
// --- 6. Draw Results on Canvas ---
ctx.drawImage(originalImg, 0, 0, canvas.width, canvas.height); // Redraw original image to clear loading message
ctx.font = `bold ${Math.max(12, Math.min(canvas.width, canvas.height) / 40)}px Arial`;
ctx.textBaseline = 'top';
ctx.lineWidth = Math.max(2, canvas.width / 300);
predictions.forEach(prediction => {
if (prediction.score > threshold) {
const [x, y, width, height] = prediction.bbox;
const label = `${prediction.class} (${Math.round(prediction.score * 100)}%)`;
// Bounding box color - using a bright color for visibility
const boxColor = '#32cd32'; // LimeGreen
ctx.strokeStyle = boxColor;
ctx.strokeRect(x, y, width, height);
// Text background
ctx.fillStyle = boxColor;
const textMetrics = ctx.measureText(label);
const textHeight = parseInt(ctx.font, 10);
ctx.fillRect(x, y, textMetrics.width + 8, textHeight + 8);
// Text
ctx.fillStyle = '#000000'; // Black text for contrast
ctx.fillText(label, x + 4, y + 4);
}
});
return canvas;
}
Apply Changes