You can edit the below JavaScript code to customize the image tool.
Apply Changes
/**
* Creates a video from a single image with Ken Burns effects, text overlay, and audio.
*
* @param {HTMLImageElement} originalImg The source image object.
* @param {number} duration The duration of the video in seconds. Default is 5.
* @param {string} zoomDirection The direction of the zoom effect. Can be 'in', 'out', or 'none'. Default is 'in'.
* @param {string} panDirection The direction of the pan effect. Can be 'up', 'down', 'left', 'right', or 'none'. Default is 'none'.
* @param {number} zoomAmount The magnitude of the zoom, e.g., 1.2 for a 20% zoom. Must be >= 1. Default is 1.1.
* @param {number} fadeDuration The duration of the fade-in and fade-out effect in seconds. Default is 0.5.
* @param {string} textOverlay The text to display over the video. Default is an empty string.
* @param {string} textColor The color of the text overlay. Default is 'white'.
* @param {number} fontSize The font size of the text in pixels. Default is 48.
* @param {string} fontFamily The font for the text. Use web-safe fonts. Default is 'Arial'.
* @param {string} textPosition The position of the text. Can be 'top', 'center', or 'bottom'. Default is 'center'.
* @param {string} audioUrl An optional URL to an audio file to be used as background music.
* @returns {Promise<HTMLVideoElement>} A promise that resolves with the generated HTML video element.
*/
async function processImage(
originalImg,
duration = 5,
zoomDirection = 'in',
panDirection = 'none',
zoomAmount = 1.1,
fadeDuration = 0.5,
textOverlay = '',
textColor = 'white',
fontSize = 48,
fontFamily = 'Arial',
textPosition = 'center',
audioUrl = ''
) {
// 1. Parameter validation
zoomAmount = Math.max(1.0, zoomAmount);
duration = Math.max(1, duration);
fadeDuration = Math.max(0, fadeDuration);
const frameRate = 30;
// 2. This function is complex and asynchronous, so it returns a Promise.
return new Promise(async (resolve, reject) => {
try {
if (!originalImg.naturalWidth || !originalImg.naturalHeight) {
return reject(new Error("Image has not loaded or has invalid dimensions."));
}
if (!window.MediaRecorder) {
return reject(new Error("MediaRecorder API is not supported in this browser."));
}
// 3. Setup Canvas
const canvas = document.createElement('canvas');
canvas.width = originalImg.naturalWidth;
canvas.height = originalImg.naturalHeight;
const ctx = canvas.getContext('2d');
if (!ctx) {
return reject(new Error("Could not get canvas context."));
}
// 4. Setup Media Streams (Video + optional Audio)
const videoStream = canvas.captureStream(frameRate);
let finalStream = videoStream;
let audioContext;
if (audioUrl) {
try {
audioContext = new(window.AudioContext || window.webkitAudioContext)();
const response = await fetch(audioUrl);
if (!response.ok) throw new Error(`Failed to fetch audio: ${response.statusText}`);
const audioData = await response.arrayBuffer();
const audioBuffer = await audioContext.decodeAudioData(audioData);
const source = audioContext.createBufferSource();
source.buffer = audioBuffer;
const dest = audioContext.createMediaStreamDestination();
source.connect(dest);
source.start(0);
const audioTracks = dest.stream.getAudioTracks();
if (audioTracks.length > 0) {
finalStream = new MediaStream([...videoStream.getVideoTracks(), ...audioTracks]);
}
} catch (audioError) {
console.warn("Could not load or process audio. Proceeding without it.", audioError);
}
}
// 5. Setup MediaRecorder
const recordedChunks = [];
const recorder = new MediaRecorder(finalStream, {
mimeType: 'video/webm; codecs=vp9,opus'
});
recorder.ondataavailable = (event) => {
if (event.data.size > 0) recordedChunks.push(event.data);
};
recorder.onstop = () => {
const blob = new Blob(recordedChunks, {
type: 'video/webm'
});
const generatedVideoUrl = URL.createObjectURL(blob);
const videoElement = document.createElement('video');
videoElement.src = generatedVideoUrl;
videoElement.controls = true;
videoElement.autoplay = true;
videoElement.loop = true;
videoElement.playsInline = true;
// Autoplay is more likely to work if muted. Mute if no audio was requested.
videoElement.muted = !audioUrl;
if (audioContext) audioContext.close();
resolve(videoElement);
};
recorder.onerror = (event) => reject(event.error || new Error("MediaRecorder error."));
// 6. Start Recording and Animation Loop
recorder.start();
let startTime = null;
const lerp = (a, b, t) => a + (b - a) * t;
const animate = (timestamp) => {
if (!startTime) startTime = timestamp;
const elapsedMs = timestamp - startTime;
const progress = Math.min(elapsedMs / (duration * 1000), 1);
// Clear canvas
ctx.fillStyle = 'black';
ctx.fillRect(0, 0, canvas.width, canvas.height);
// --- Calculate animation parameters ---
let startScale = 1.0, endScale = 1.0;
if (zoomDirection === 'in') {
startScale = zoomAmount; endScale = 1.0;
} else if (zoomDirection === 'out') {
startScale = 1.0; endScale = zoomAmount;
}
const currentScale = lerp(startScale, endScale, progress);
const sWidth = originalImg.naturalWidth / currentScale;
const sHeight = originalImg.naturalHeight / currentScale;
const xOffsetRange = originalImg.naturalWidth - sWidth;
const yOffsetRange = originalImg.naturalHeight - sHeight;
let startSx = xOffsetRange / 2, endSx = xOffsetRange / 2;
let startSy = yOffsetRange / 2, endSy = yOffsetRange / 2;
switch (panDirection) {
case 'left': startSx = xOffsetRange; endSx = 0; break;
case 'right': startSx = 0; endSx = xOffsetRange; break;
case 'up': startSy = yOffsetRange; endSy = 0; break;
case 'down': startSy = 0; endSy = yOffsetRange; break;
}
const sx = lerp(startSx, endSx, progress);
const sy = lerp(startSy, endSy, progress);
// --- Draw Frame ---
ctx.drawImage(originalImg, sx, sy, sWidth, sHeight, 0, 0, canvas.width, canvas.height);
// --- Draw Text Overlay ---
if (textOverlay) {
ctx.font = `${fontSize}px ${fontFamily}`;
ctx.fillStyle = textColor;
ctx.textAlign = 'center';
ctx.textBaseline = 'middle';
ctx.shadowColor = 'rgba(0,0,0,0.7)';
ctx.shadowBlur = 5;
ctx.shadowOffsetX = 2;
ctx.shadowOffsetY = 2;
let ty;
switch (textPosition) {
case 'top': ty = fontSize * 1.5; break;
case 'bottom': ty = canvas.height - (fontSize * 1.5); break;
default: ty = canvas.height / 2; break;
}
ctx.fillText(textOverlay, canvas.width / 2, ty);
ctx.shadowColor = 'transparent'; // Reset shadow
}
// --- Draw Fade Effect ---
const fadeDurationMs = fadeDuration * 1000;
const totalDurationMs = duration * 1000;
if (fadeDurationMs > 0) {
let fadeAlpha = 0.0;
if (elapsedMs < fadeDurationMs) { // Fade In
fadeAlpha = 1.0 - (elapsedMs / fadeDurationMs);
} else if (elapsedMs > totalDurationMs - fadeDurationMs) { // Fade Out
fadeAlpha = (elapsedMs - (totalDurationMs - fadeDurationMs)) / fadeDurationMs;
}
if (fadeAlpha > 0) {
ctx.fillStyle = `rgba(0, 0, 0, ${Math.min(1, fadeAlpha)})`;
ctx.fillRect(0, 0, canvas.width, canvas.height);
}
}
// --- Loop or Stop ---
if (progress < 1) {
requestAnimationFrame(animate);
} else {
if (recorder.state === 'recording') {
recorder.stop();
}
}
};
requestAnimationFrame(animate);
} catch (error) {
console.error("Error in processImage:", error);
reject(error);
}
});
}
Apply Changes