You can edit the below JavaScript code to customize the image tool.
Apply Changes
async function processImage(originalImg) {
/**
* Dynamically loads a script from a URL and returns a promise.
* @param {string} url The URL of the script to load.
* @returns {Promise<void>}
*/
const loadScript = (url) => new Promise((resolve, reject) => {
const script = document.createElement('script');
script.src = url;
script.onload = resolve;
script.onerror = reject;
document.head.appendChild(script);
});
/**
* Creates and returns a canvas with an error message.
* @param {number} width The width of the canvas.
* @param {number} height The height of the canvas.
* @param {string} message The error message to display.
* @returns {HTMLCanvasElement}
*/
const createErrorCanvas = (width, height, message) => {
const errorCanvas = document.createElement('canvas');
errorCanvas.width = width;
errorCanvas.height = height;
const ctx = errorCanvas.getContext('2d');
ctx.drawImage(originalImg, 0, 0);
ctx.fillStyle = 'rgba(0, 0, 0, 0.7)';
ctx.fillRect(0, 0, width, height);
ctx.fillStyle = 'white';
ctx.font = `bold ${Math.max(18, width / 25)}px sans-serif`;
ctx.textAlign = 'center';
ctx.textBaseline = 'middle';
ctx.fillText(message, width / 2, height / 2);
return errorCanvas;
};
// 1. Load the necessary AI model libraries (TensorFlow.js and BodyPix)
try {
if (!window.tf || !window.bodyPix) {
await Promise.all([
loadScript('https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-core'),
loadScript('https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-converter'),
loadScript('https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-webgl'),
loadScript('https://cdn.jsdelivr.net/npm/@tensorflow-models/body-pix')
]);
}
} catch (error) {
console.error("Failed to load AI model scripts:", error);
return createErrorCanvas(originalImg.width, originalImg.height, 'Error: Could not load AI model.');
}
// 2. Load the BodyPix model
const net = await bodyPix.load({
architecture: 'MobileNetV1',
outputStride: 16,
multiplier: 0.75,
quantBytes: 2
});
// 3. Run segmentation to find all people and their body parts
const partSegmentations = await net.segmentMultiPersonParts(originalImg, {
flipHorizontal: false,
internalResolution: 'medium',
segmentationThreshold: 0.6,
maxDetections: 5,
scoreThreshold: 0.3,
nmsRadius: 20
});
if (partSegmentations.length < 2) {
return createErrorCanvas(originalImg.width, originalImg.height, 'Error: Could not find at least two people.');
}
// 4. Define a helper function to extract the clothing (torso) texture and its position
const extractClothing = (segmentation, sourceImage) => {
const {
data,
width,
height
} = segmentation;
const clothingCanvas = document.createElement('canvas');
clothingCanvas.width = width;
clothingCanvas.height = height;
const clothingCtx = clothingCanvas.getContext('2d');
const sourceCanvas = document.createElement('canvas');
sourceCanvas.width = width;
sourceCanvas.height = height;
const sourceCtx = sourceCanvas.getContext('2d');
sourceCtx.drawImage(sourceImage, 0, 0, width, height);
const sourceImageData = sourceCtx.getImageData(0, 0, width, height);
const newImageData = clothingCtx.createImageData(width, height);
let minX = width, minY = height, maxX = -1, maxY = -1;
// Iterate through the segmentation data.
// Part IDs 12 & 13 correspond to the front and back torso.
for (let i = 0; i < data.length; i++) {
const partId = data[i];
if (partId === 12 || partId === 13) {
const pixelIndex = i * 4;
newImageData.data[pixelIndex] = sourceImageData.data[pixelIndex];
newImageData.data[pixelIndex + 1] = sourceImageData.data[pixelIndex + 1];
newImageData.data[pixelIndex + 2] = sourceImageData.data[pixelIndex + 2];
newImageData.data[pixelIndex + 3] = sourceImageData.data[pixelIndex + 3];
const x = i % width;
const y = Math.floor(i / width);
if (x < minX) minX = x;
if (x > maxX) maxX = x;
if (y < minY) minY = y;
if (y > maxY) maxY = y;
}
}
clothingCtx.putImageData(newImageData, 0, 0);
const boundingBox = (maxX < minX) ?
{ x: 0, y: 0, width: 0, height: 0 } :
{ x: minX, y: minY, width: maxX - minX + 1, height: maxY - minY + 1 };
// Crop the clothing to its bounding box to make it a movable asset
const croppedClothingCanvas = document.createElement('canvas');
croppedClothingCanvas.width = boundingBox.width;
croppedClothingCanvas.height = boundingBox.height;
if (boundingBox.width > 0 && boundingBox.height > 0) {
croppedClothingCanvas.getContext('2d').drawImage(clothingCanvas, boundingBox.x, boundingBox.y, boundingBox.width, boundingBox.height, 0, 0, boundingBox.width, boundingBox.height);
}
return {
clothingAsset: croppedClothingCanvas,
boundingBox: boundingBox
};
};
// 5. Extract the clothing for the first two people found
const person1 = extractClothing(partSegmentations[0], originalImg);
const person2 = extractClothing(partSegmentations[1], originalImg);
// 6. Create the final result canvas
const resultCanvas = document.createElement('canvas');
resultCanvas.width = originalImg.width;
resultCanvas.height = originalImg.height;
const resultCtx = resultCanvas.getContext('2d');
// 7. Draw the original image as the base layer.
// The old clothing will be painted over.
resultCtx.drawImage(originalImg, 0, 0);
// 8. Perform the swap:
// Draw person 2's clothing onto person 1's position
if (person2.clothingAsset.width > 0 && person1.boundingBox.width > 0) {
resultCtx.drawImage(
person2.clothingAsset,
person1.boundingBox.x,
person1.boundingBox.y,
person1.boundingBox.width,
person1.boundingBox.height
);
}
// Draw person 1's clothing onto person 2's position
if (person1.clothingAsset.width > 0 && person2.boundingBox.width > 0) {
resultCtx.drawImage(
person1.clothingAsset,
person2.boundingBox.x,
person2.boundingBox.y,
person2.boundingBox.width,
person2.boundingBox.height
);
}
return resultCanvas;
}
Apply Changes