You can edit the below JavaScript code to customize the image tool.
Apply Changes
async function processImage(originalImg, referenceImgSrc = '', featheringFactor = 0.15) {
/**
* Solves for the affine transformation matrix that maps 3 source points to 3 destination points.
* The transformation is of the form:
* x' = a*x + c*y + e
* y' = b*x + d*y + f
* It returns the matrix [a, b, c, d, e, f] for use with canvas.transform().
* @param {Array<{x: number, y: number}>} src - Array of 3 source points.
* @param {Array<{x: number, y: number}>} dst - Array of 3 destination points.
* @returns {Array<number>|null} The transformation matrix or null if points are collinear.
*/
function solveAffineTransform(src, dst) {
const [s1, s2, s3] = src;
const [d1, d2, d3] = dst;
// Matrix A for the system A * [a, c, e]' = [dx1, dx2, dx3]'
const A = [
[s1.x, s1.y, 1],
[s2.x, s2.y, 1],
[s3.x, s3.y, 1],
];
// Invert matrix A
const det = A[0][0] * (A[1][1] * A[2][2] - A[2][1] * A[1][2]) -
A[0][1] * (A[1][0] * A[2][2] - A[1][2] * A[2][0]) +
A[0][2] * (A[1][0] * A[2][1] - A[1][1] * A[2][0]);
if (Math.abs(det) < 1e-10) { // Check for collinear points
return null;
}
const invDet = 1.0 / det;
const invA = [
[(A[1][1] * A[2][2] - A[2][1] * A[1][2]) * invDet, (A[0][2] * A[2][1] - A[0][1] * A[2][2]) * invDet, (A[0][1] * A[1][2] - A[0][2] * A[1][1]) * invDet],
[(A[1][2] * A[2][0] - A[1][0] * A[2][2]) * invDet, (A[0][0] * A[2][2] - A[0][2] * A[2][0]) * invDet, (A[1][0] * A[0][2] - A[0][0] * A[1][2]) * invDet],
[(A[1][0] * A[2][1] - A[2][0] * A[1][1]) * invDet, (A[2][0] * A[0][1] - A[0][0] * A[2][1]) * invDet, (A[0][0] * A[1][1] - A[1][0] * A[0][1]) * invDet]
];
// Solve for the transform coefficients
const [ax, cx, ex] = [
invA[0][0] * d1.x + invA[0][1] * d2.x + invA[0][2] * d3.x,
invA[1][0] * d1.x + invA[1][1] * d2.x + invA[1][2] * d3.x,
invA[2][0] * d1.x + invA[2][1] * d2.x + invA[2][2] * d3.x,
];
const [by, dy, fy] = [
invA[0][0] * d1.y + invA[0][1] * d2.y + invA[0][2] * d3.y,
invA[1][0] * d1.y + invA[1][1] * d2.y + invA[1][2] * d3.y,
invA[2][0] * d1.y + invA[2][1] * d2.y + invA[2][2] * d3.y,
];
return [ax, by, cx, dy, ex, fy];
}
/**
* Extracts a feathered cutout of a face from an image using facial landmarks.
* @param {Image} image - The source image.
* @param {FaceLandmarks68} landmarks - The facial landmarks for the face to extract.
* @param {number} blurFactor - A factor to determine the blur radius for feathering, proportional to face height.
* @returns {HTMLCanvasElement} A canvas containing the isolated, feathered face.
*/
function extractFeatheredFace(image, landmarks, blurFactor) {
const faceOutlinePoints = [
...landmarks.getJawOutline(),
...landmarks.getRightEyeBrow().reverse(),
...landmarks.getLeftEyeBrow().reverse()
];
// Calculate face height for proportional blur
const faceHeight = landmarks.getJawOutline()[8].y - landmarks.getNose()[0].y;
const blurRadius = Math.max(3, Math.floor(faceHeight * blurFactor));
// Create a mask canvas
const maskCanvas = document.createElement('canvas');
maskCanvas.width = image.naturalWidth;
maskCanvas.height = image.naturalHeight;
const maskCtx = maskCanvas.getContext('2d');
// Draw the face outline and fill it
maskCtx.beginPath();
maskCtx.moveTo(faceOutlinePoints[0].x, faceOutlinePoints[0].y);
for (let i = 1; i < faceOutlinePoints.length; i++) {
maskCtx.lineTo(faceOutlinePoints[i].x, faceOutlinePoints[i].y);
}
maskCtx.closePath();
maskCtx.fillStyle = 'black';
maskCtx.fill();
// Apply a blur to the mask to create the feathering effect
maskCtx.filter = `blur(${blurRadius}px)`;
// Drawing the mask back onto itself applies the filter
maskCtx.drawImage(maskCanvas, 0, 0);
maskCtx.filter = 'none';
// Create the final cutout canvas
const cutoutCanvas = document.createElement('canvas');
cutoutCanvas.width = image.naturalWidth;
cutoutCanvas.height = image.naturalHeight;
const cutoutCtx = cutoutCanvas.getContext('2d');
// Draw the original image onto the cutout canvas
cutoutCtx.drawImage(image, 0, 0);
// Use the blurred mask to cut out the face
cutoutCtx.globalCompositeOperation = 'destination-in';
cutoutCtx.drawImage(maskCanvas, 0, 0);
cutoutCtx.globalCompositeOperation = 'source-over';
return cutoutCanvas;
}
// --- Main Function Logic ---
if (!referenceImgSrc) {
console.error("Reference image source is required for face swapping.");
const canvas = document.createElement('canvas');
canvas.width = originalImg.naturalWidth;
canvas.height = originalImg.naturalHeight;
canvas.getContext('2d').drawImage(originalImg, 0, 0);
return canvas;
}
// Dynamically load face-api.js and models if they haven't been loaded yet.
if (!window.faceApiLoaded) {
try {
await new Promise((resolve, reject) => {
const script = document.createElement('script');
script.src = 'https://cdn.jsdelivr.net/npm/@vladmandic/face-api/dist/face-api.js';
script.async = true;
script.onload = resolve;
script.onerror = () => reject(new Error('Failed to load face-api.js script'));
document.head.append(script);
});
const MODEL_URL = 'https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model/';
await faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL);
await faceapi.nets.faceLandmark68TinyNet.loadFromUri(MODEL_URL);
window.faceApiLoaded = true;
} catch (error) {
console.error("Face-API loading failed:", error);
const canvas = document.createElement('canvas');
canvas.width = originalImg.naturalWidth;
canvas.height = originalImg.naturalHeight;
canvas.getContext('2d').drawImage(originalImg, 0, 0);
return canvas;
}
}
// Load the reference image from the provided source (URL or base64)
const referenceImg = await new Promise((resolve, reject) => {
const img = new Image();
img.crossOrigin = 'Anonymous';
img.onload = () => resolve(img);
img.onerror = reject;
img.src = referenceImgSrc;
});
// Prepare the output canvas and draw the original image on it
const canvas = document.createElement('canvas');
canvas.width = originalImg.naturalWidth;
canvas.height = originalImg.naturalHeight;
const ctx = canvas.getContext('2d');
ctx.drawImage(originalImg, 0, 0);
// Detect faces and landmarks in both images
const detectorOptions = new faceapi.TinyFaceDetectorOptions({ inputSize: 320 });
const originalDetection = await faceapi.detectSingleFace(originalImg, detectorOptions).withFaceLandmarks(true);
const referenceDetection = await faceapi.detectSingleFace(referenceImg, detectorOptions).withFaceLandmarks(true);
if (!originalDetection || !referenceDetection) {
console.warn("Could not detect a face in one or both images. Returning original image.");
return canvas;
}
const { landmarks: tgtLandmarks } = originalDetection;
const { landmarks: refLandmarks } = referenceDetection;
// Select three stable anchor points for the affine transformation
// (left eye corner, right eye corner, and bottom of the mouth)
const refPoints = [refLandmarks.positions[36], refLandmarks.positions[45], refLandmarks.positions[57]];
const tgtPoints = [tgtLandmarks.positions[36], tgtLandmarks.positions[45], tgtLandmarks.positions[57]];
const transformMatrix = solveAffineTransform(refPoints, tgtPoints);
if (!transformMatrix) {
console.warn("Could not calculate a valid transformation. Points may be collinear. Returning original image.");
return canvas;
}
// Extract the feathered face from the reference image
const featheredFaceCutout = extractFeatheredFace(referenceImg, refLandmarks, featheringFactor);
// Apply the transformation and draw the swapped face onto the original image
ctx.save();
const [a, b, c, d, e, f] = transformMatrix;
ctx.transform(a, b, c, d, e, f);
ctx.drawImage(featheredFaceCutout, 0, 0);
ctx.restore();
return canvas;
}
Apply Changes