You can edit the below JavaScript code to customize the image tool.
Apply Changes
async function processImage(originalImg, targetLang = 'en', sourceLang = 'auto') {
/**
* Dynamically loads a script and returns a promise that resolves when the script is loaded.
* @param {string} url The URL of the script to load.
* @returns {Promise<void>}
*/
const loadScript = (url) => {
if (document.querySelector(`script[src="${url}"]`)) {
return Promise.resolve();
}
return new Promise((resolve, reject) => {
const script = document.createElement('script');
script.src = url;
script.onload = () => resolve();
script.onerror = () => reject(new Error(`Script load error for ${url}`));
document.head.appendChild(script);
});
};
/**
* Translates text using a free, no-key API.
* @param {string} text The text to translate.
* @param {string} source The source language code.
* @param {string} target The target language code.
* @returns {Promise<string>} The translated text.
*/
const translateText = async (text, source, target) => {
if (!text || !text.trim()) {
return '';
}
try {
const url = `https://api.mymemory.translated.net/get?q=${encodeURIComponent(text)}&langpair=${source}|${target}`;
const response = await fetch(url);
if (!response.ok) {
return text; // Return original text on API error
}
const data = await response.json();
return data.responseData.translatedText || text;
} catch (error) {
console.error('Translation failed:', error);
return text; // Return original text on network error
}
};
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d', { willReadFrequently: true });
canvas.width = originalImg.naturalWidth;
canvas.height = originalImg.naturalHeight;
// Draw the original image first
ctx.drawImage(originalImg, 0, 0, canvas.width, canvas.height);
// Display a loading message on the canvas
ctx.fillStyle = 'rgba(0, 0, 0, 0.7)';
ctx.fillRect(0, 0, canvas.width, canvas.height);
ctx.font = '30px Arial';
ctx.fillStyle = 'white';
ctx.textAlign = 'center';
ctx.textBaseline = 'middle';
ctx.fillText('Processing: Recognizing text...', canvas.width / 2, canvas.height / 2);
try {
// Step 1: Load Tesseract.js OCR library
await loadScript('https://cdn.jsdelivr.net/npm/tesseract.js@5/dist/tesseract.min.js');
// Step 2: Initialize Tesseract worker and perform OCR
const worker = await Tesseract.createWorker();
const { data: { lines } } = await worker.recognize(originalImg);
ctx.drawImage(originalImg, 0, 0, canvas.width, canvas.height); // Redraw image to clear loading text
ctx.fillStyle = 'rgba(0, 0, 0, 0.7)';
ctx.fillRect(0, 0, canvas.width, canvas.height);
ctx.fillStyle = 'white';
ctx.fillText(`Translating ${lines.length} lines of text...`, canvas.width / 2, canvas.height / 2);
// Step 3: Translate all recognized lines of text concurrently
const translationPromises = lines.map(line => translateText(line.text, sourceLang, targetLang));
const translatedTexts = await Promise.all(translationPromises);
// Step 4: Redraw the original image to clear the loading message
ctx.drawImage(originalImg, 0, 0, canvas.width, canvas.height);
// Step 5: Draw the translated text over the original text
lines.forEach((line, index) => {
const translatedText = translatedTexts[index];
if (!translatedText) return;
const { x0, y0, x1, y1 } = line.bbox;
const width = x1 - x0;
const height = y1 - y0;
// Determine background color by sampling the top-left corner of the bbox
const imageData = ctx.getImageData(x0, y0, 1, 1).data;
const bgColor = `rgb(${imageData[0]}, ${imageData[1]}, ${imageData[2]})`;
// Determine a contrasting text color (black or white)
const luminance = (0.299 * imageData[0] + 0.587 * imageData[1] + 0.114 * imageData[2]) / 255;
const textColor = luminance > 0.5 ? 'black' : 'white';
// Draw a rectangle to cover the original text
ctx.fillStyle = bgColor;
ctx.fillRect(x0, y0, width, height);
// Prepare to draw the new text
ctx.fillStyle = textColor;
ctx.textAlign = 'left';
ctx.textBaseline = 'top';
// Dynamically adjust font size to fit the bounding box
let fontSize = height * 0.9; // Start with a font size relative to the box height
ctx.font = `bold ${fontSize}px 'Arial', sans-serif`;
while (ctx.measureText(translatedText).width > width && fontSize > 5) {
fontSize -= 1;
ctx.font = `bold ${fontSize}px 'Arial', sans-serif`;
}
// Center the text vertically and horizontally within the bbox
const textWidth = ctx.measureText(translatedText).width;
const textX = x0 + (width - textWidth) / 2;
const textY = y0 + (height - fontSize) / 2;
ctx.fillText(translatedText, textX, textY);
});
// Clean up the Tesseract worker
await worker.terminate();
} catch (error) {
console.error('Image processing failed:', error);
// Display an error message on the canvas
ctx.drawImage(originalImg, 0, 0, canvas.width, canvas.height); // Redraw
ctx.fillStyle = 'rgba(255, 0, 0, 0.7)';
ctx.fillRect(0, 0, canvas.width, canvas.height);
ctx.font = '20px Arial';
ctx.fillStyle = 'white';
ctx.textAlign = 'center';
ctx.textBaseline = 'middle';
ctx.fillText('An error occurred during processing.', canvas.width / 2, canvas.height / 2);
}
return canvas;
}
Apply Changes