Please bookmark this page to avoid losing your image tool!

Image Translation Screenshot Tool

(Free & Supports Bulk Upload)

Drag & drop your images here or

The result will appear here...
You can edit the below JavaScript code to customize the image tool.
/**
 * Translates text within an image by performing OCR, translating the text,
 * and overlaying the translation back onto the image.
 * This is a heavy client-side operation and may be slow on the first run
 * as it needs to download language and translation models.
 * @param {Image} originalImg The original image object to process.
 * @param {string} sourceLang The tesseract.js language code of the text in the image (e.g., 'eng', 'jpn', 'chi_sim').
 * @param {string} targetLang The tesseract.js language code to translate the text into.
 * @param {string} backgroundColor The background color to use for the text overlay. Can be any valid CSS color or 'auto' to sample from the image.
 * @returns {Promise<HTMLCanvasElement>} A promise that resolves to a canvas element with the translated image.
 */
async function processImage(originalImg, sourceLang = 'jpn', targetLang = 'eng', backgroundColor = 'auto') {

    // Helper function to dynamically load the Tesseract.js script if not already present.
    const loadTesseract = () => {
        return new Promise((resolve, reject) => {
            if (typeof Tesseract !== 'undefined') {
                return resolve();
            }
            const script = document.createElement('script');
            script.src = 'https://cdn.jsdelivr.net/npm/tesseract.js@5/dist/tesseract.min.js';
            script.onload = () => resolve();
            script.onerror = (err) => reject(new Error('Failed to load Tesseract.js script.', { cause: err }));
            document.head.appendChild(script);
        });
    };

    // Helper function to get a contrasting text color (black or white) for a given background color.
    const getContrastingTextColor = (bgColor) => {
        const tempCanvas = document.createElement('canvas');
        tempCanvas.width = tempCanvas.height = 1;
        const ctx = tempCanvas.getContext('2d', { willReadFrequently: true });
        ctx.fillStyle = bgColor;
        ctx.fillRect(0, 0, 1, 1);
        const [r, g, b] = ctx.getImageData(0, 0, 1, 1).data;
        const luminance = (0.299 * r + 0.587 * g + 0.114 * b) / 255;
        return luminance > 0.5 ? 'black' : 'white';
    };

    // Helper function to average an array of [r, g, b, a] color arrays.
    const averageColor = (colors) => {
        const len = colors.length;
        if (len === 0) return { r: 255, g: 255, b: 255 };
        const total = colors.reduce((acc, c) => {
            acc[0] += c[0]; acc[1] += c[1]; acc[2] += c[2];
            return acc;
        }, [0, 0, 0]);
        return {
            r: Math.round(total[0] / len),
            g: Math.round(total[1] / len),
            b: Math.round(total[2] / len),
        };
    };

    // --- Main Canvas Setup ---
    const canvas = document.createElement('canvas');
    const ctx = canvas.getContext('2d', { willReadFrequently: true });
    canvas.width = originalImg.naturalWidth;
    canvas.height = originalImg.naturalHeight;
    ctx.drawImage(originalImg, 0, 0);

    // --- Progress Update Function ---
    const drawProgress = (status, progress = null) => {
        ctx.drawImage(originalImg, 0, 0); // Redraw original image to clear previous overlay
        ctx.fillStyle = 'rgba(0, 0, 0, 0.75)';
        ctx.fillRect(0, 0, canvas.width, canvas.height);
        ctx.fillStyle = 'white';
        ctx.textAlign = 'center';
        
        if (progress) {
            ctx.font = '24px sans-serif';
            ctx.fillText(status, canvas.width / 2, canvas.height / 2 - 60);
            ctx.font = '18px sans-serif';
            ctx.fillText(`${progress.file} (${Math.round(progress.progress)}%)`, canvas.width / 2, canvas.height / 2 - 20);
            
            const barWidth = canvas.width * 0.6;
            const barX = (canvas.width - barWidth) / 2;
            const barY = canvas.height / 2 + 20;
            const barHeight = 25;
            ctx.fillStyle = '#666';
            ctx.fillRect(barX, barY, barWidth, barHeight);
            ctx.fillStyle = '#4CAF50';
            ctx.fillRect(barX, barY, barWidth * (progress.progress / 100), barHeight);
        } else {
             ctx.font = '30px sans-serif';
             ctx.fillText(status, canvas.width / 2, canvas.height / 2);
        }
    };
    
    try {
        drawProgress('Initializing tool...');

        // --- Library Loading ---
        // Load Tesseract.js for OCR and Transformers.js for translation
        await loadTesseract();
        const { pipeline, env } = await import('https://cdn.jsdelivr.net/npm/@xenova/transformers@2.17.1');
        
        // Optimize for web environment
        env.allowLocalModels = false;
        env.backends.onnx.wasm.numThreads = 1;

        // --- OCR Step ---
        drawProgress('Recognizing text...');
        const worker = await Tesseract.createWorker(sourceLang);
        const { data: { lines } } = await worker.recognize(canvas);
        await worker.terminate();

        if (!lines || lines.length === 0) {
            console.log("No text found in the image.");
            return canvas; // Return original image on canvas
        }
        
        // --- Translation Step ---
        // Language code mapping from Tesseract to NLLB model
        const langMap = { 'eng': 'eng_Latn', 'jpn': 'jpn_Jpan', 'chi_sim': 'zho_Hans', 'chi_tra': 'zho_Hant', 'kor': 'kor_Hang', 'deu': 'deu_Latn', 'fra': 'fra_Latn', 'spa': 'spa_Latn', 'rus': 'rus_Cyrl', 'ita': 'ita_Latn', 'por': 'por_Latn' };
        
        const src_lang = langMap[sourceLang] || 'eng_Latn';
        const tgt_lang = langMap[targetLang] || 'eng_Latn';
        
        const originalTexts = lines.map(line => line.text.trim());

        // Cache the translator pipeline on the function object to avoid re-downloading the model
        if (!processImage.translator) {
            processImage.translator = await pipeline('translation', 'Xenova/nllb-200-distilled-600M', {
                progress_callback: (p) => drawProgress('Loading Translation Model', p),
            });
        }

        drawProgress('Translating text...');
        const translatedOutputs = await processImage.translator(originalTexts, { src_lang, tgt_lang });
        
        // Redraw original image to start fresh for overlaying
        ctx.drawImage(originalImg, 0, 0);

        // --- Overlay Step ---
        ctx.textBaseline = 'middle';
        ctx.textAlign = 'center';
        
        for (let i = 0; i < lines.length; i++) {
            const bbox = lines[i].bbox;
            const translatedText = translatedOutputs[i].translation_text;

            // 1. Determine background color
            let fillStyle;
            if (backgroundColor === 'auto') {
                const colors = [];
                const padding = 3; // Sample pixels outside the bounding box
                const points = [ [bbox.x0 - padding, bbox.y0 - padding], [bbox.x1 + padding, bbox.y0 - padding], [bbox.x0 - padding, bbox.y1 + padding], [bbox.x1 + padding, bbox.y1 + padding] ];
                points.forEach(([x, y]) => {
                    const clampedX = Math.max(0, Math.min(x, canvas.width - 1));
                    const clampedY = Math.max(0, Math.min(y, canvas.height - 1));
                    colors.push(ctx.getImageData(clampedX, clampedY, 1, 1).data);
                });
                const avgColor = averageColor(colors);
                fillStyle = `rgb(${avgColor.r},${avgColor.g},${avgColor.b})`;
            } else {
                fillStyle = backgroundColor;
            }

            // 2. Cover original text
            ctx.fillStyle = fillStyle;
            ctx.fillRect(bbox.x0, bbox.y0, bbox.x1 - bbox.x0, bbox.y1 - bbox.y0);

            // 3. Draw translated text, auto-sizing the font
            ctx.fillStyle = getContrastingTextColor(fillStyle);
            const boxWidth = bbox.x1 - bbox.x0;
            const boxHeight = bbox.y1 - bbox.y0;
            let fontSize = boxHeight * 0.8; // Start with a reasonable font size
            
            do {
                ctx.font = `${fontSize}px sans-serif`;
                fontSize--;
            } while (ctx.measureText(translatedText).width > boxWidth * 0.95 && fontSize > 5);

            const centerX = bbox.x0 + boxWidth / 2;
            const centerY = bbox.y0 + boxHeight / 2;
            ctx.fillText(translatedText, centerX, centerY);
        }

        return canvas;

    } catch (error) {
        console.error("Image translation failed:", error);
        // On error, draw the error message on the canvas over the original image
        ctx.drawImage(originalImg, 0, 0);
        ctx.fillStyle = 'rgba(255, 0, 0, 0.7)';
        ctx.fillRect(0, 0, canvas.width, canvas.height);
        ctx.fillStyle = 'white';
        ctx.textAlign = 'center';
        ctx.font = '20px sans-serif';
        ctx.fillText('An error occurred during translation.', canvas.width / 2, canvas.height / 2 - 15);
        ctx.font = '14px sans-serif';
        ctx.fillText(error.message, canvas.width / 2, canvas.height / 2 + 15);
        return canvas;
    }
}

Free Image Tool Creator

Can't find the image tool you're looking for?
Create one based on your own needs now!

Description

The Image Translation Screenshot Tool enables users to translate text within images. It utilizes Optical Character Recognition (OCR) to identify and extract text from images, then translates the recognized text into a specified target language. The translated text is overlaid back onto the original image, allowing for easy viewing of translation alongside the original content. This tool can be useful for students, travelers, and professionals who need to understand foreign text in images, such as signs, documents, and screenshots, enhancing accessibility and comprehension of visual information.

Leave a Reply

Your email address will not be published. Required fields are marked *