You can edit the below JavaScript code to customize the image tool.
Apply Changes
/**
* This function serves as a versatile image and video generator.
* It can apply static filters to an image, or generate a video from the image
* with transitions and an optional audio track.
*
* @param {Image} originalImg The source HTML Image object. For video/canvas operations to work, this image must be fully loaded and from a CORS-compliant source.
* @param {string} filter A CSS filter string to apply to the image (e.g., 'grayscale(100%)', 'sepia(80%)', 'blur(5px)'). Defaults to 'none'.
* @param {number} brightness Adjusts the brightness. 1 is normal, <1 is darker, >1 is brighter. Defaults to 1.
* @param {number} contrast Adjusts the contrast. 1 is normal. Defaults to 1.
* @param {number} saturate Adjusts the saturation. 1 is normal, 0 is grayscale. Defaults to 1.
* @param {number} hueRotate Rotates the hue of the image. Value is in degrees. Defaults to 0.
* @param {string} transition The transition effect to use for video generation ('none', 'fadeIn', 'slideInLeft'). 'none' will keep the image static. Defaults to 'none'.
* @param {number} duration The duration of the output video in seconds. Defaults to 5.
* @param {string} audioUrl An optional URL to an audio file (CORS-compliant) to be included in the video. Defaults to an empty string.
* @returns {Promise<HTMLCanvasElement | HTMLVideoElement>} A Promise that resolves to an HTMLCanvasElement for static images, or an HTMLVideoElement for generated videos.
*/
async function processImage(
originalImg,
filter = 'none',
brightness = 1,
contrast = 1,
saturate = 1,
hueRotate = 0,
transition = 'none',
duration = 5,
audioUrl = ''
) {
const isStatic = transition === 'none' && !audioUrl;
const fullFilter = `brightness(${brightness}) contrast(${contrast}) saturate(${saturate}) hue-rotate(${hueRotate}deg) ${filter}`;
// --- Static Image Case ---
// If no transition or audio is requested, simply return a canvas with filters applied.
if (isStatic) {
const canvas = document.createElement('canvas');
canvas.width = originalImg.naturalWidth;
canvas.height = originalImg.naturalHeight;
const ctx = canvas.getContext('2d');
ctx.filter = fullFilter;
ctx.drawImage(originalImg, 0, 0, canvas.width, canvas.height);
ctx.filter = 'none'; // Reset for good practice
return canvas; // Async function automatically wraps this in a resolved Promise.
}
// --- Video Generation Case ---
// Wrap the asynchronous video recording logic in a Promise.
return new Promise(async (resolve, reject) => {
try {
const canvas = document.createElement('canvas');
canvas.width = originalImg.naturalWidth;
canvas.height = originalImg.naturalHeight;
const ctx = canvas.getContext('2d');
// 1. Prepare Audio Track (if provided)
let audioTrack = null;
let audioPlayer = null;
if (audioUrl) {
try {
audioPlayer = new Audio(audioUrl);
audioPlayer.crossOrigin = "anonymous";
await new Promise((res, rej) => {
audioPlayer.oncanplaythrough = res;
audioPlayer.onerror = () => rej(new Error('Failed to load audio file. Check URL and CORS policy.'));
setTimeout(() => rej(new Error('Audio loading timed out')), 10000);
});
const audioStream = audioPlayer.captureStream();
if (audioStream.getAudioTracks().length > 0) {
audioTrack = audioStream.getAudioTracks()[0];
}
} catch (e) {
console.warn(`Could not process audio: ${e.message}. Proceeding without audio.`);
}
}
// 2. Prepare Video Track from Canvas
const videoStream = canvas.captureStream(30); // 30 FPS
const videoTrack = videoStream.getVideoTracks()[0];
// 3. Combine tracks into a single stream
const tracks = [videoTrack];
if (audioTrack) {
tracks.push(audioTrack);
}
const combinedStream = new MediaStream(tracks);
// 4. Set up MediaRecorder
const recordedChunks = [];
const mimeType = ['video/webm; codecs=vp9, opus', 'video/webm; codecs=vp8, opus', 'video/webm'].find(type => MediaRecorder.isTypeSupported(type));
if (!mimeType) {
return reject(new Error("No supported MIME type for MediaRecorder found in this browser."));
}
const recorder = new MediaRecorder(combinedStream, { mimeType });
recorder.ondataavailable = (event) => {
if (event.data.size > 0) {
recordedChunks.push(event.data);
}
};
recorder.onstop = () => {
const blob = new Blob(recordedChunks, { type: mimeType });
const recUrl = URL.createObjectURL(blob);
const videoElement = document.createElement('video');
videoElement.src = recUrl;
videoElement.controls = true;
videoElement.autoplay = true;
videoElement.loop = true;
videoElement.muted = !audioTrack; // Mute if no audio, to allow autoplay
videoElement.width = canvas.width;
videoElement.height = canvas.height;
// Cleanup
if (audioPlayer) {
audioPlayer.pause();
audioPlayer = null;
}
videoTrack.stop();
if(audioTrack) audioTrack.stop();
resolve(videoElement);
};
recorder.onerror = (e) => reject(new Error(`MediaRecorder error: ${e.error.name}`));
// 5. Start Recording and Animation Loop
recorder.start();
if (audioPlayer) audioPlayer.play();
const startTime = performance.now();
const totalDurationMs = duration * 1000;
const animate = (currentTime) => {
const elapsedTime = currentTime - startTime;
if (elapsedTime >= totalDurationMs) {
if (recorder.state === 'recording') {
recorder.stop();
}
return;
}
const progress = elapsedTime / totalDurationMs;
ctx.clearRect(0, 0, canvas.width, canvas.height);
ctx.save();
ctx.filter = fullFilter;
// Handle transitions
switch (transition) {
case 'fadeIn':
ctx.globalAlpha = Math.min(1.0, progress * 2); // Fade in over first half
ctx.drawImage(originalImg, 0, 0, canvas.width, canvas.height);
break;
case 'slideInLeft':
const easeOut = t => t * (2 - t);
const easedProgress = easeOut(Math.min(1.0, progress * 1.5)); // Slide in a bit faster
const x = -canvas.width * (1 - easedProgress);
ctx.drawImage(originalImg, x, 0, canvas.width, canvas.height);
break;
default: // 'none' or unknown
ctx.drawImage(originalImg, 0, 0, canvas.width, canvas.height);
break;
}
ctx.restore();
requestAnimationFrame(animate);
};
requestAnimationFrame(animate);
} catch (e) {
reject(e);
}
});
}
Apply Changes