You can edit the below JavaScript code to customize the image tool.
/**
* Detects text in an image using OCR, highlights it, and displays the extracted text.
* NOTE: For this function to work on images loaded from other domains (cross-origin),
* the provided 'originalImg' element must have its 'crossOrigin' property set to 'anonymous'
* before its 'src' attribute is set.
*
* @param {HTMLImageElement} originalImg The source image object to process.
* @param {string} [lang='eng'] The 3-letter ISO 639-2 language code for the text in the image
* (e.g., 'eng' for English, 'rus' for Russian, 'deu' for German).
* @returns {Promise<HTMLCanvasElement>} A promise that resolves to a canvas element containing
* the image with text highlighted and the extracted text displayed below.
*/
async function processImage(originalImg, lang = 'eng') {
// 1. Dynamically load the Tesseract.js library if it's not already present.
// This implementation ensures the script is only fetched once, even if the function is called multiple times.
if (typeof Tesseract === 'undefined') {
if (!window.tesseractPromise) {
window.tesseractPromise = new Promise((resolve, reject) => {
const script = document.createElement('script');
script.src = 'https://cdn.jsdelivr.net/npm/tesseract.js@5/dist/tesseract.min.js';
script.onload = resolve;
script.onerror = reject;
document.head.appendChild(script);
});
}
await window.tesseractPromise;
}
// 2. Set up the result canvas. We create it upfront to show progress updates.
const TEXT_AREA_HEIGHT = 80;
const resultCanvas = document.createElement('canvas');
resultCanvas.width = originalImg.naturalWidth;
resultCanvas.height = originalImg.naturalHeight + TEXT_AREA_HEIGHT;
const ctx = resultCanvas.getContext('2d');
// Draw initial state with a loading message
ctx.drawImage(originalImg, 0, 0, resultCanvas.width, originalImg.naturalHeight);
ctx.fillStyle = '#f0f0f0';
ctx.fillRect(0, originalImg.naturalHeight, resultCanvas.width, TEXT_AREA_HEIGHT);
ctx.fillStyle = 'black';
ctx.textAlign = 'center';
ctx.textBaseline = 'middle';
ctx.font = '16px Arial';
ctx.fillText('Initializing OCR...', resultCanvas.width / 2, originalImg.naturalHeight + TEXT_AREA_HEIGHT / 2);
// 3. Create a Tesseract worker and run the recognition process.
const worker = await Tesseract.createWorker(lang, 1, {
logger: m => {
// Update the UI with progress information
let statusText = `${m.status.replace(/_/g, ' ')}...`;
if (m.progress) {
statusText += ` (${Math.round(m.progress * 100)}%)`;
}
ctx.fillStyle = '#f0f0f0';
ctx.fillRect(0, originalImg.naturalHeight, resultCanvas.width, TEXT_AREA_HEIGHT);
ctx.fillStyle = 'black';
ctx.fillText(statusText, resultCanvas.width / 2, originalImg.naturalHeight + TEXT_AREA_HEIGHT / 2);
},
});
try {
const { data: { text, words } } = await worker.recognize(originalImg);
// 4. Draw the final result on the canvas.
// Clear the canvas and redraw the image.
ctx.clearRect(0, 0, resultCanvas.width, resultCanvas.height);
ctx.drawImage(originalImg, 0, 0, resultCanvas.width, originalImg.naturalHeight);
// Draw bounding boxes around detected words with reasonable confidence.
ctx.lineWidth = 2;
ctx.strokeStyle = 'rgba(255, 0, 0, 0.7)';
words.forEach(word => {
if (word.confidence > 60) {
const { x0, y0, x1, y1 } = word.bbox;
ctx.strokeRect(x0, y0, x1 - x0, y1 - y0);
}
});
// Draw the extracted text in a panel at the bottom.
ctx.fillStyle = '#f0f0f0';
ctx.fillRect(0, originalImg.naturalHeight, resultCanvas.width, TEXT_AREA_HEIGHT);
ctx.fillStyle = 'black';
ctx.textAlign = 'center';
// Helper function to wrap and draw text within the panel
const wrapText = (textToWrap, x, y, maxWidth, lineHeight) => {
const lines = [];
let currentLine = '';
const words = (textToWrap.trim() || 'No text detected.').split(/\s+/);
for (const word of words) {
const testLine = currentLine ? `${currentLine} ${word}` : word;
if (ctx.measureText(testLine).width > maxWidth && currentLine) {
lines.push(currentLine);
currentLine = word;
} else {
currentLine = testLine;
}
}
lines.push(currentLine);
const startY = y - ((lines.length - 1) * lineHeight) / 2;
lines.forEach((line, i) => ctx.fillText(line, x, startY + i * lineHeight));
};
const FONT_SIZE = 16;
const LINE_HEIGHT = FONT_SIZE * 1.2;
const PADDING = 10;
ctx.font = `${FONT_SIZE}px Arial`;
wrapText(text, resultCanvas.width / 2, originalImg.naturalHeight + TEXT_AREA_HEIGHT / 2, resultCanvas.width - PADDING * 2, LINE_HEIGHT);
} catch (error) {
console.error('OCR processing failed:', error);
ctx.fillStyle = '#f0f0f0';
ctx.fillRect(0, originalImg.naturalHeight, resultCanvas.width, TEXT_AREA_HEIGHT);
ctx.fillStyle = 'red';
ctx.fillText('Error: Could not process the image.', resultCanvas.width / 2, originalImg.naturalHeight + TEXT_AREA_HEIGHT / 2);
} finally {
// 5. Terminate the worker to free up resources.
await worker.terminate();
}
return resultCanvas;
}
Free Image Tool Creator
Can't find the image tool you're looking for? Create one based on your own needs now!
The ‘Image Name Translator From Alphabet On Photo’ tool allows users to extract and translate text from images using Optical Character Recognition (OCR). By uploading an image, users can detect text in various languages and view both the highlighted text within the image and its extracted form displayed below. This tool is useful for translating signboards, reading labels, or extracting information from documents, making it suitable for travelers, students, and anyone needing text information from images.