You can edit the below JavaScript code to customize the image tool.
Apply Changes
async function processImage(originalImg, padding = 0.2, minConfidence = 0.5) {
// Nested helper function for creating error canvases
function _createErrorCanvas(baseDimensions, title, detailMsg = '') {
const canvas = document.createElement('canvas');
const defaultWidth = 400;
const defaultHeight = 200;
canvas.width = (baseDimensions && baseDimensions.width > 0) ? baseDimensions.width : defaultWidth;
canvas.height = (baseDimensions && baseDimensions.height > 0) ? baseDimensions.height : defaultHeight;
// Cap max dimensions for error canvas
if (canvas.width > 800) canvas.width = 800; // Max width for error message display
if (canvas.height > 600) canvas.height = 600; // Max height
// Ensure minimum dimensions if not based on image
if (!(baseDimensions && baseDimensions.width > 0)) canvas.width = Math.max(canvas.width, defaultWidth);
if (!(baseDimensions && baseDimensions.height > 0)) canvas.height = Math.max(canvas.height, defaultHeight);
const ctx = canvas.getContext('2d');
ctx.fillStyle = 'lightgray';
ctx.fillRect(0, 0, canvas.width, canvas.height);
ctx.textAlign = 'center';
ctx.textBaseline = 'middle';
let fontSize = Math.max(12, Math.min(20, Math.floor(canvas.width / 20)));
ctx.font = `${fontSize}px Arial`; // Web-safe font
ctx.fillStyle = 'red';
const titleYPosition = canvas.height / 2 - (detailMsg ? (fontSize + 10) / 2 : 0); // Adjust Y if detail message is present
ctx.fillText(title, canvas.width / 2, titleYPosition);
if (detailMsg) {
let detailFontSize = Math.max(10, Math.min(16, Math.floor(canvas.width / 30)));
ctx.font = `${detailFontSize}px Arial`;
ctx.fillStyle = '#333333'; // Dark gray for details
const maxWidth = canvas.width * 0.8;
const words = String(detailMsg).split(' ');
let line = '';
let lines = [];
// Simplified Y position for the start of detail lines
let detailStartY = titleYPosition + fontSize * 0.8 + detailFontSize * 0.8;
for (let n = 0; n < words.length; n++) {
const testLine = line + words[n] + ' ';
const metrics = ctx.measureText(testLine);
const testWidth = metrics.width;
if (testWidth > maxWidth && n > 0) {
lines.push(line.trim());
line = words[n] + ' ';
} else {
line = testLine;
}
}
lines.push(line.trim());
lines.forEach((txtLine, index) => {
if (index < 2) { // Show up to 2 lines of detail
ctx.fillText(txtLine, canvas.width / 2, detailStartY + (index * (detailFontSize + 5)));
} else if (index === 2 && lines.length > 3) { // If there are more than 3 lines total, end with ellipsis on 3rd printed line
ctx.fillText("...", canvas.width / 2, detailStartY + (index * (detailFontSize + 5)));
} else if (index === 2) { // If it's the 3rd line and last one
ctx.fillText(txtLine, canvas.width / 2, detailStartY + (index * (detailFontSize + 5)));
}
});
}
return canvas;
}
// 1. Validate originalImg state and ensure it's loaded
let imgDimensions = { width: originalImg.naturalWidth, height: originalImg.naturalHeight };
const ensureImageLoaded = new Promise((resolve, reject) => {
if (originalImg.complete && originalImg.naturalWidth > 0 && originalImg.naturalHeight > 0) {
imgDimensions = { width: originalImg.naturalWidth, height: originalImg.naturalHeight };
resolve();
return;
}
// Handle cases where image is broken/empty but `complete` is true
if (originalImg.complete && (originalImg.naturalWidth === 0 || originalImg.naturalHeight === 0)) {
reject(new Error("Image is loaded but has zero or invalid dimensions."));
return;
}
// Handle if src isn't set (can't load)
if (!originalImg.src && !originalImg.currentSrc) { // Safari might use currentSrc
reject(new Error("Image source is not set."));
return;
}
// If not complete, add event listeners
const onload = () => {
cleanup();
if (originalImg.naturalWidth > 0 && originalImg.naturalHeight > 0) {
imgDimensions = { width: originalImg.naturalWidth, height: originalImg.naturalHeight };
resolve();
} else {
reject(new Error("Image loaded but has zero dimensions."));
}
};
const onerror = () => {
cleanup();
reject(new Error("Failed to load input image. Check URL or data integrity."));
};
const cleanup = () => {
originalImg.removeEventListener('load', onload);
originalImg.removeEventListener('error', onerror);
};
originalImg.addEventListener('load', onload);
originalImg.addEventListener('error', onerror);
});
try {
await ensureImageLoaded;
} catch (imgLoadError) {
console.error("Image Load Error:", imgLoadError);
return _createErrorCanvas(imgDimensions.width > 0 ? imgDimensions : null, 'Image Load Error', imgLoadError.message);
}
// 2. Load face-api.js script
if (typeof faceapi === 'undefined') {
try {
await new Promise((resolve, reject) => {
const script = document.createElement('script');
script.src = 'https://cdn.jsdelivr.net/npm/face-api.js@0.22.2/dist/face-api.min.js';
script.async = true;
script.onload = resolve;
script.onerror = () => reject(new Error('Failed to load face-api.js script. Possible network issue or CDN problem.'));
document.head.appendChild(script);
});
} catch (scriptLoadError) {
console.error("face-api.js Load Error:", scriptLoadError);
return _createErrorCanvas(imgDimensions, 'Library Load Error', scriptLoadError.message);
}
}
// 3. Load face detection models
// Using SsdMobilenetv1 model as it's a good balance of speed and accuracy.
// Check if model is already loaded using `isLoaded` property.
if (!faceapi.nets.ssdMobilenetv1.isLoaded) {
try {
// Weights are hosted on the library author's GitHub, accessible via jsDelivr CDN
const MODEL_URL = 'https://cdn.jsdelivr.net/gh/justadudewhohacks/face-api.js@master/weights/';
await faceapi.nets.ssdMobilenetv1.loadFromUri(MODEL_URL);
} catch (modelLoadError) {
console.error("Face Detection Model Load Error:", modelLoadError);
return _createErrorCanvas(imgDimensions, 'Model Load Error', 'Failed to load face detection models. Check network connection or console for more details.');
}
}
// 4. Perform face detection
let detections;
try {
// `originalImg` must be an HTMLImageElement, HTMLCanvasElement, or HTMLVideoElement
detections = await faceapi.detectAllFaces(originalImg, new faceapi.SsdMobilenetv1Options({ minConfidence }));
} catch (detectionError) {
console.error("Face Detection Error:", detectionError);
return _createErrorCanvas(imgDimensions, 'Detection Error', 'An error occurred during the face detection process.');
}
if (!detections || detections.length === 0) {
return _createErrorCanvas(imgDimensions, 'No Faces Detected', `No faces found with confidence >= ${minConfidence}. Try adjusting settings or using a different image.`);
}
// 5. Process detections and draw cropped faces to the output canvas
const croppedFacesData = [];
let totalAggregatedWidth = 0;
let maxCropHeight = 0;
const spacing = 10; // Pixels between cropped faces on the output canvas
detections.forEach(detection => {
const box = detection.box; // { x, y, width, height } of the detected face
let { x, y, width, height } = box;
// Apply padding (as a percentage of the detected box size)
const padX = width * padding;
const padY = height * padding;
// Calculate the crop coordinates and dimensions, ensuring they are within original image bounds
const cropX = Math.max(0, x - padX / 2);
const cropY = Math.max(0, y - padY / 2);
const cropWidth = Math.min(imgDimensions.width - cropX, width + padX);
const cropHeight = Math.min(imgDimensions.height - cropY, height + padY);
if (cropWidth <= 0 || cropHeight <= 0) { // Skip if crop area is invalid
return;
}
croppedFacesData.push({
sx: cropX, sy: cropY, sWidth: cropWidth, sHeight: cropHeight, // Source rect from original image
dWidth: cropWidth, dHeight: cropHeight // Destination rect size on output canvas
});
totalAggregatedWidth += cropWidth;
if (croppedFacesData.length > 1) { // Add spacing if it's not the first face
totalAggregatedWidth += spacing;
}
maxCropHeight = Math.max(maxCropHeight, cropHeight);
});
if (croppedFacesData.length === 0) {
// This can happen if all detected faces result in invalid crops (e.g., due to padding on edge detections)
return _createErrorCanvas(imgDimensions, 'Processing Error', 'Detected faces could not be cropped (possibly due to image boundaries and padding settings).');
}
const outputCanvas = document.createElement('canvas');
outputCanvas.width = totalAggregatedWidth;
outputCanvas.height = maxCropHeight;
const ctx = outputCanvas.getContext('2d');
// Optional: Fill background (default is transparent)
// ctx.fillStyle = 'white';
// ctx.fillRect(0, 0, outputCanvas.width, outputCanvas.height);
let currentOffsetX = 0;
croppedFacesData.forEach(faceData => {
// Vertically center each face crop if maxCropHeight makes some crops appear shorter
const offsetY = (maxCropHeight - faceData.dHeight) / 2;
ctx.drawImage(
originalImg, // Source image
faceData.sx, // Source X
faceData.sy, // Source Y
faceData.sWidth, // Source Width
faceData.sHeight, // Source Height
currentOffsetX, // Destination X on outputCanvas
offsetY, // Destination Y on outputCanvas
faceData.dWidth, // Destination Width on outputCanvas
faceData.dHeight // Destination Height on outputCanvas
);
currentOffsetX += faceData.dWidth + spacing; // Move X for the next crop
});
return outputCanvas;
}
Apply Changes