You can edit the below JavaScript code to customize the image tool.
Apply Changes
/**
* Changes the facial expression in an image by altering the mouth shape.
* This function uses face-api.js to detect facial landmarks.
* @param {Image} originalImg The original javascript Image object.
* @param {string} emotion The target emotion. Accepts 'happy', 'sad', 'surprised', 'angry', or 'neutral'.
* @param {number} intensity A value from 0.1 to 1.0 that controls the intensity of the expression.
* @returns {Promise<HTMLCanvasElement>} A canvas element with the modified image.
*/
async function processImage(originalImg, emotion = 'happy', intensity = 0.4) {
const canvas = document.createElement('canvas');
canvas.width = originalImg.naturalWidth;
canvas.height = originalImg.naturalHeight;
const ctx = canvas.getContext('2d', { willReadFrequently: true });
ctx.drawImage(originalImg, 0, 0);
// --- Dynamic Library and Model Loading ---
// face-api.js is a powerful library for face detection and recognition in the browser.
// We dynamically load it to avoid polluting the global scope if not needed.
if (!window.faceapi) {
try {
const script = document.createElement('script');
script.src = 'https://cdn.jsdelivr.net/npm/face-api.js@0.22.2/dist/face-api.min.js';
document.head.append(script);
await new Promise((resolve, reject) => {
script.onload = resolve;
script.onerror = reject;
});
} catch (error) {
console.error("Failed to load face-api.js script:", error);
return canvas; // Return original image on canvas if script fails
}
}
// Load models required for detection, memoizing them to avoid reloading.
if (!window.faceApiModelsLoaded) {
const modelPath = 'https://cdn.jsdelivr.net/npm/face-api.js@0.22.2/weights';
try {
await faceapi.nets.tinyFaceDetector.loadFromUri(modelPath);
await faceapi.nets.faceLandmark68Net.loadFromUri(modelPath);
window.faceApiModelsLoaded = true;
} catch (error) {
console.error("Failed to load face-api.js models:", error);
return canvas; // Return original image on canvas if models fail
}
}
// --- Face Detection ---
const detections = await faceapi.detectAllFaces(originalImg, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks();
if (!detections.length) {
console.warn("No faces detected in the image.");
return canvas; // No face found, return original
}
// Create a temporary canvas to sample original pixel data from,
// as the main canvas will be modified.
const tempCanvas = document.createElement('canvas');
tempCanvas.width = canvas.width;
tempCanvas.height = canvas.height;
const tempCtx = tempCanvas.getContext('2d', { willReadFrequently: true });
tempCtx.drawImage(originalImg, 0, 0);
// --- Process Each Detected Face ---
for (const detection of detections) {
const landmarks = detection.landmarks;
const mouth = landmarks.getMouth(); // Array of 20 {x, y} points
// 1. Erase the original mouth by covering it with local skin color.
// Sample skin color from the cheek (landmark 4 is a safe spot).
const cheekPoint = landmarks.positions[4];
const skinColorData = tempCtx.getImageData(Math.round(cheekPoint.x), Math.round(cheekPoint.y), 1, 1).data;
const skinColor = `rgb(${skinColorData[0]}, ${skinColorData[1]}, ${skinColorData[2]})`;
// Create a polygon covering the mouth area and fill it.
// A slight blur helps blend the patch.
ctx.fillStyle = skinColor;
ctx.shadowColor = skinColor;
ctx.shadowBlur = Math.max(8, (mouth[6].x - mouth[0].x) / 10); // Blur proportional to mouth size
ctx.beginPath();
ctx.moveTo(mouth[0].x, mouth[0].y);
for (let i = 1; i <= 11; i++) {
ctx.lineTo(mouth[i].x, mouth[i].y);
}
ctx.closePath();
ctx.fill();
ctx.shadowBlur = 0; // Reset blur
// 2. Sample lip color to use for the new mouth expression.
const lipPoint = mouth[13]; // A point on the lower lip
const lipColorData = tempCtx.getImageData(Math.round(lipPoint.x), Math.round(lipPoint.y), 1, 1).data;
const lipColor = `rgb(${lipColorData[0]}, ${lipColorData[1]}, ${lipColorData[2]})`;
// 3. Draw the new mouth based on the desired emotion.
const leftCorner = mouth[0];
const rightCorner = mouth[6];
const mouthWidth = rightCorner.x - leftCorner.x;
const midPointX = (leftCorner.x + rightCorner.x) / 2;
const midPointY = (leftCorner.y + rightCorner.y) / 2;
ctx.strokeStyle = lipColor;
ctx.lineWidth = Math.max(2, mouthWidth / 25);
ctx.lineCap = 'round';
const clampedIntensity = Math.max(0.1, Math.min(1.0, intensity));
const curveHeight = mouthWidth * clampedIntensity * 0.3;
switch (emotion.toLowerCase()) {
case 'happy': {
const controlY = midPointY + curveHeight;
ctx.beginPath();
ctx.moveTo(leftCorner.x, leftCorner.y);
ctx.quadraticCurveTo(midPointX, controlY, rightCorner.x, rightCorner.y);
ctx.stroke();
break;
}
case 'sad': {
const controlY = midPointY - curveHeight * 0.8;
const startY = leftCorner.y + curveHeight * 0.8;
const endY = rightCorner.y + curveHeight * 0.8;
const midSadX = (leftCorner.x + rightCorner.x) / 2;
ctx.beginPath();
ctx.moveTo(leftCorner.x, startY);
ctx.quadraticCurveTo(midSadX, controlY, rightCorner.x, endY);
ctx.stroke();
break;
}
case 'angry': {
const controlY = midPointY - curveHeight * 0.2;
ctx.beginPath();
ctx.moveTo(leftCorner.x, leftCorner.y + curveHeight * 0.2);
ctx.quadraticCurveTo(midPointX, controlY, rightCorner.x, rightCorner.y + curveHeight * 0.2);
ctx.stroke();
break;
}
case 'surprised': {
const { x, y, width, height } = landmarks.getMouthBoundingBox();
ctx.fillStyle = '#100505'; // Dark color for inside the mouth
ctx.beginPath();
ctx.ellipse(x + width / 2, y + height / 2, width / 2, height * (0.8 + clampedIntensity * 0.8), 0, 0, 2 * Math.PI);
ctx.fill();
break;
}
case 'neutral':
default: {
ctx.beginPath();
ctx.moveTo(leftCorner.x, leftCorner.y);
ctx.lineTo(rightCorner.x, rightCorner.y);
ctx.stroke();
break;
}
}
}
return canvas;
}
Apply Changes