You can edit the below JavaScript code to customize the image tool.
Apply Changes
async function processImage(originalImg, method = "pixelate", optionValue = 20, fillColor = "black", emojiCharacter = "😀") {
// 1. Initial Image loading check
if (!originalImg.complete || originalImg.naturalWidth === 0) {
console.warn("Image not fully loaded or is invalid. Ensure the Image object is fully loaded and valid before calling processImage.");
// Create a canvas to display an error message
const errorCanvas = document.createElement('canvas');
// Try to use provided dimensions if available and valid, otherwise fallback.
errorCanvas.width = (originalImg.width && originalImg.width > 0) ? originalImg.width : 300;
errorCanvas.height = (originalImg.height && originalImg.height > 0) ? originalImg.height : 150;
const errorCtx = errorCanvas.getContext('2d');
errorCtx.fillStyle = "lightgray";
errorCtx.fillRect(0, 0, errorCanvas.width, errorCanvas.height);
errorCtx.fillStyle = "red";
errorCtx.textAlign = "center";
errorCtx.textBaseline = "middle";
errorCtx.font = "16px Arial";
errorCtx.fillText("Error: Image not loaded or invalid.", errorCanvas.width / 2, errorCanvas.height / 2);
return errorCanvas;
}
// 2. Canvas Setup
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
canvas.width = originalImg.naturalWidth;
canvas.height = originalImg.naturalHeight;
// Draw the original image onto the canvas. This will be the base.
// If face detection fails or no faces are found, this original image will be returned.
ctx.drawImage(originalImg, 0, 0, canvas.width, canvas.height);
// Helper Functions (defined inside processImage to encapsulate them)
// Helper for pico.js: convert RGBA to grayscale
function rgba_to_grayscale(rgba, nrows, ncols) {
const gray = new Uint8Array(nrows * ncols);
for (let r = 0; r < nrows; ++r) {
for (let c = 0; c < ncols; ++c) {
// Gray = 0.299*R + 0.587*G + 0.114*B
// Using pico.js's common approximation: (2*R + 7*G + 1*B) / 10
gray[r * ncols + c] = (
2 * rgba[r*4*ncols + c*4 + 0] +
7 * rgba[r*4*ncols + c*4 + 1] +
1 * rgba[r*4*ncols + c*4 + 2]
) / 10;
}
}
return gray;
}
// Helper function for "pixelate" method
function pixelateFace(context, x, y, width, height, blockSize = 20) {
blockSize = Math.max(1, Math.floor(blockSize));
const P_x = Math.round(x);
const P_y = Math.round(y);
const P_width = Math.round(width);
const P_height = Math.round(height);
if (P_width <= 0 || P_height <= 0) return;
// Get image data for the face region
const imageData = context.getImageData(P_x, P_y, P_width, P_height);
const data = imageData.data;
for (let j = 0; j < P_height; j += blockSize) {
for (let i = 0; i < P_width; i += blockSize) {
const currentBlockHeight = Math.min(blockSize, P_height - j);
const currentBlockWidth = Math.min(blockSize, P_width - i);
let r_sum = 0, g_sum = 0, b_sum = 0, a_sum = 0;
let count = 0;
for (let bj = 0; bj < currentBlockHeight; bj++) {
for (let bi = 0; bi < currentBlockWidth; bi++) {
const pixelStartIndex = ((j + bj) * P_width + (i + bi)) * 4;
r_sum += data[pixelStartIndex];
g_sum += data[pixelStartIndex + 1];
b_sum += data[pixelStartIndex + 2];
a_sum += data[pixelStartIndex + 3];
count++;
}
}
const avgR = r_sum / count;
const avgG = g_sum / count;
const avgB = b_sum / count;
const avgA = a_sum / count;
for (let bj = 0; bj < currentBlockHeight; bj++) {
for (let bi = 0; bi < currentBlockWidth; bi++) {
const pixelStartIndex = ((j + bj) * P_width + (i + bi)) * 4;
data[pixelStartIndex] = avgR;
data[pixelStartIndex + 1] = avgG;
data[pixelStartIndex + 2] = avgB;
data[pixelStartIndex + 3] = avgA;
}
}
}
}
context.putImageData(imageData, P_x, P_y);
}
// Helper function for "blur" method
function blurFace(mainCtx, x, y, width, height, blurRadius = 10, sourceCanvas, imageToBlurFrom) {
const actualX = Math.max(0, Math.round(x));
const actualY = Math.max(0, Math.round(y));
const actualWidth = Math.max(0, Math.round(width + Math.min(0, x - actualX))); // Adjust width/height if x/y were negative and clamped
const actualHeight = Math.max(0, Math.round(height + Math.min(0, y - actualY)));
if (actualWidth <= 0 || actualHeight <= 0) return;
const tempCanvas = document.createElement('canvas');
tempCanvas.width = actualWidth;
tempCanvas.height = actualHeight;
const tempCtx = tempCanvas.getContext('2d');
tempCtx.drawImage(
imageToBlurFrom, // Source image
actualX, actualY, actualWidth, actualHeight, // Source rectangle (from original image)
0, 0, actualWidth, actualHeight // Destination rectangle (on temp canvas)
);
tempCtx.filter = `blur(${blurRadius}px)`;
tempCtx.drawImage(tempCanvas, 0, 0); // Draw onto itself to apply filter
mainCtx.drawImage(tempCanvas, actualX, actualY); // Draw blurred region back to main canvas
}
// Helper function for "emoji" method
function drawEmoji(context, x, y, width, height, emojiChar = "😀") {
const fontSize = Math.min(width, height) * 0.9;
context.font = `${fontSize}px sans-serif`;
context.textAlign = 'center';
context.textBaseline = 'middle';
// Small Y offset usually helps center emojis visually
context.fillText(emojiChar, x + width / 2, y + height / 2 + fontSize * 0.1);
}
// 3. Load Face Detection Library (pico.js) and Cascade File
try {
if (typeof pico === 'undefined') {
await new Promise((resolve, reject) => {
const script = document.createElement('script');
script.src = 'https://cdn.jsdelivr.net/npm/picojs@2.0.4/pico.js';
script.async = true;
script.onload = resolve;
script.onerror = () => reject(new Error("Failed to load pico.js library."));
document.head.appendChild(script);
});
}
let facefinder_classify_region;
const cascadeFileUrl = 'https://cdn.jsdelivr.net/gh/nenadmarkus/picojs/examples/facefinder';
// Use a specific global cache name for this tool's cascade data
if (window.photoFaceHidingToolCascade) {
facefinder_classify_region = window.photoFaceHidingToolCascade;
} else {
const response = await fetch(cascadeFileUrl);
if (!response.ok) throw new Error(`Failed to fetch cascade file from ${cascadeFileUrl}: ${response.statusText}`);
const buffer = await response.arrayBuffer();
const bytes = new Int8Array(buffer);
facefinder_classify_region = pico.unpack_cascade(bytes);
window.photoFaceHidingToolCascade = facefinder_classify_region; // Cache for future calls
console.log('Face detection cascade loaded & cached.');
}
// 4. Prepare Image Data for Pico.js
const grauScaleRgba = ctx.getImageData(0, 0, canvas.width, canvas.height).data;
const imageForPico = {
"pixels": rgba_to_grayscale(grauScaleRgba, canvas.height, canvas.width),
"nrows": canvas.height,
"ncols": canvas.width,
"ldim": canvas.width
};
const minDimension = Math.min(canvas.width, canvas.height);
const picoParams = {
"shiftfactor": 0.1, // Move detection window by 10% of its size
"scalefactor": 1.1, // Resize detection window by 10% each step
"minsize": Math.max(20, Math.floor(minDimension * 0.10)), // Min face size: 10% of smallest dimension, but at least 20px
"maxsize": Math.floor(minDimension * 0.80) // Max face size: 80% of smallest dimension
};
// 5. Run Face Detection
let dets = pico.run_cascade(imageForPico, facefinder_classify_region, picoParams);
// Cluster detections (merge overlapping ones) using an IoU threshold of 0.2
dets = pico.cluster_detections(dets, 0.2);
// 6. Apply Hiding Method to Detected Faces
// It's crucial to redraw the original image before applying modifications if multiple faces are processed,
// especially for methods like blur, to ensure modifications are based on pristine pixels.
// Since we draw original image at the very beginning and then draw modifications on top, this is fine.
// If blurFace draws from mainCanvas instead of originalImg this note would be more critical.
// current blurFace (drawing from `imageToBlurFrom` == originalImg) is good.
let facesFound = 0;
for (let i = 0; i < dets.length; ++i) {
const det = dets[i];
// Check detection confidence score (det[3])
if (det[3] > 50.0) { // Confidence threshold, 50.0 is a common suggestion for pico.js
facesFound++;
const faceX = det[1] - det[2] / 2; // Center X - radius
const faceY = det[0] - det[2] / 2; // Center Y - radius
const faceSize = det[2]; // Diameter (size)
let currentOptionValue = (typeof optionValue === 'number' && !isNaN(optionValue)) ? optionValue : 20;
switch (method.toLowerCase()) {
case "pixelate":
pixelateFace(ctx, faceX, faceY, faceSize, faceSize, currentOptionValue);
break;
case "blur":
blurFace(ctx, faceX, faceY, faceSize, faceSize, currentOptionValue, canvas, originalImg);
break;
case "blackout":
ctx.fillStyle = (typeof fillColor === 'string') ? fillColor : "black";
ctx.fillRect(faceX, faceY, faceSize, faceSize);
break;
case "emoji":
drawEmoji(ctx, faceX, faceY, faceSize, faceSize, (typeof emojiCharacter === 'string') ? emojiCharacter : "😀");
break;
default: // Default to pixelate if method is unknown
pixelateFace(ctx, faceX, faceY, faceSize, faceSize, currentOptionValue);
}
}
}
if(facesFound === 0) console.log("No faces found matching the criteria.");
} catch (error) {
console.error("Error during face detection or processing:", error);
// Draw an error message on the canvas if processing fails
// The canvas already contains the original image or an error message from image loading.
// Add a small error indication on top.
ctx.fillStyle = "rgba(255, 0, 0, 0.7)";
ctx.fillRect(0, 0, canvas.width, 30); // Red bar at the top
ctx.fillStyle = "white";
ctx.font = "16px Arial";
ctx.textAlign = "left";
ctx.textBaseline = "middle";
ctx.fillText("Face hiding failed: " + error.message.substring(0, 50) + "...", 10, 15);
}
// 7. Return the Processed Canvas
return canvas;
}
Apply Changes