Online Astrologer: Vedic Astrology service via Indian astrologers: Astrology, horoscope, love, man woman positive, negative, compatibility, career, zodiac signs,
Text To Video Song
import React, { useRef, useState } from "react";
// TextToVideoSong.jsx
// Single-file React component (Tailwind classes) that converts input text into a simple "song" video:
// - Generates a melody from the text (maps words to notes)
// - Renders animated lyrics onto a canvas
// - Uses WebAudio to synthesize the melody and routes audio to a MediaStream
// - Uses MediaRecorder to capture canvas + audio and produce a downloadable WebM video
// NOTE: This is a client-only demo — it produces a machine-synthesized song (no human voice).
export default function TextToVideoSong() {
const canvasRef = useRef(null);
const [text, setText] = useState("Twinkle twinkle little star\nHow I wonder what you are");
const [tempo, setTempo] = useState(1.0); // speed multiplier
const [noteLength, setNoteLength] = useState(0.6); // seconds per word at tempo=1
const [isGenerating, setIsGenerating] = useState(false);
const [downloadUrl, setDownloadUrl] = useState(null);
const [bgColor, setBgColor] = useState('#0b1220');
const [fontSize, setFontSize] = useState(48);
const [waveform, setWaveform] = useState('sine');
const [framerate, setFramerate] = useState(30);
const [bgImage, setBgImage] = useState(null);
// Utility: map a word (string) -> frequency (Hz)
function wordToFreq(word) {
// reduce to some deterministic number between 0..1
let sum = 0;
for (let i = 0; i < word.length; i++) sum += word.charCodeAt(i) * (i + 1);
// scale into musical range (C3 ~ 130.8Hz to C6 ~ 1046.5Hz)
const min = 130.81;
const max = 1046.5;
const frac = (sum % 1000) / 1000;
return min + frac * (max - min);
}
// Draw a frame for the current word index
function drawFrame(ctx, width, height, words, currentIndex, imgElem) {
ctx.clearRect(0, 0, width, height);
// background
if (imgElem) {
// draw background image with cover
const imgAspect = imgElem.width / imgElem.height;
const canvasAspect = width / height;
let drawW = width, drawH = height, sx = 0, sy = 0;
if (imgAspect > canvasAspect) {
// image wider -> crop sides
drawH = height;
drawW = imgElem.width * (height / imgElem.height);
sx = (imgElem.width - drawW * (imgElem.width / imgElem.width)) / 2;
}
ctx.drawImage(imgElem, 0, 0, imgElem.width, imgElem.height, 0, 0, width, height);
// overlay to darken for readability
ctx.fillStyle = 'rgba(0,0,0,0.35)';
ctx.fillRect(0, 0, width, height);
} else {
ctx.fillStyle = bgColor;
ctx.fillRect(0, 0, width, height);
}
// draw the "title" small
ctx.fillStyle = 'rgba(255,255,255,0.85)';
ctx.font = `20px system-ui`;
ctx.textAlign = 'left';
ctx.fillText('Text → Video Song', 20, 30);
// draw current & next words
ctx.textAlign = 'center';
ctx.fillStyle = 'white';
ctx.font = `bold ${fontSize}px system-ui, sans-serif`;
const y = height / 2;
const currentWord = words[currentIndex] || '';
const nextWord = words[currentIndex + 1] || '';
// slight animation: scale current word based on time fraction
ctx.save();
// shadow
ctx.shadowColor = 'rgba(0,0,0,0.6)';
ctx.shadowBlur = 20;
ctx.fillText(currentWord, width / 2, y);
ctx.restore();
ctx.font = `28px system-ui`;
ctx.fillStyle = 'rgba(255,255,255,0.85)';
ctx.fillText(nextWord, width / 2, y + fontSize + 24);
}
// Main generation function
async function generateVideo() {
if (!text.trim()) return alert('Please enter some text.');
setIsGenerating(true);
setDownloadUrl(null);
// Split into words (preserve line breaks as separators)
const words = text.split(/\s+/).filter(Boolean);
const secondsPerWord = noteLength / tempo; // effective seconds per word
const totalDuration = secondsPerWord * words.length;
// Prepare canvas
const canvas = canvasRef.current;
const width = 1280;
const height = 720;
canvas.width = width;
canvas.height = height;
const ctx = canvas.getContext('2d');
// Load background image if provided
let imgElem = null;
if (bgImage) {
imgElem = new Image();
imgElem.src = URL.createObjectURL(bgImage);
await new Promise((res) => (imgElem.onload = res));
}
// Setup AudioContext and destination
const audioCtx = new (window.AudioContext || window.webkitAudioContext)();
const dest = audioCtx.createMediaStreamDestination();
// Create gain for overall volume
const masterGain = audioCtx.createGain();
masterGain.gain.value = 0.3;
masterGain.connect(dest);
// schedule notes
const now = audioCtx.currentTime + 0.3; // small lead-in
words.forEach((w, i) => {
const start = now + i * secondsPerWord;
const duration = secondsPerWord * 0.9; // small tail
const freq = wordToFreq(w.toLowerCase());
// simple ADSR envelope with oscillator
const osc = audioCtx.createOscillator();
osc.type = waveform; // sine, square, sawtooth, triangle
osc.frequency.value = freq;
const g = audioCtx.createGain();
g.gain.setValueAtTime(0.0001, start);
g.gain.exponentialRampToValueAtTime(0.25, start + 0.02);
g.gain.exponentialRampToValueAtTime(0.0001, start + duration);
osc.connect(g);
g.connect(masterGain);
osc.start(start);
osc.stop(start + duration + 0.05);
});
// Prepare MediaRecorder with canvas stream + audio
const canvasStream = canvas.captureStream(framerate);
// add audio tracks from audio destination
dest.stream.getAudioTracks().forEach((t) => canvasStream.addTrack(t));
const recordedChunks = [];
const mimeType = 'video/webm;codecs=vp8,opus';
let recorder;
try {
recorder = new MediaRecorder(canvasStream, { mimeType });
} catch (e) {
recorder = new MediaRecorder(canvasStream);
}
recorder.ondataavailable = (ev) => {
if (ev.data && ev.data.size) recordedChunks.push(ev.data);
};
// Start drawing frames in sync with audioCtx timeline
const startTime = performance.now();
let animFrameId = null;
function frame() {
const elapsed = (performance.now() - startTime) / 1000; // s since visuals started
const index = Math.floor(elapsed / secondsPerWord);
drawFrame(ctx, width, height, words, index, imgElem);
if (elapsed < totalDuration + 0.5) {
animFrameId = requestAnimationFrame(frame);
}
}
// start recording and schedule end
recorder.start(100); // collect chunks regularly
// kick audio by creating a silent source if needed (ensures audio graph active)
// start visuals
frame();
// resume audio context by user gesture - AudioContext must be resumed in many browsers
try {
await audioCtx.resume();
} catch (e) {
console.warn('AudioContext resume failed', e);
}
// schedule stop
setTimeout(async () => {
// stop recording
recorder.stop();
// stop animation
cancelAnimationFrame(animFrameId);
// disconnect audio nodes
audioCtx.close();
recorder.onstop = () => {
const blob = new Blob(recordedChunks, { type: mimeType });
const url = URL.createObjectURL(blob);
setDownloadUrl(url);
setIsGenerating(false);
};
}, (totalDuration + 0.6) * 1000);
}
// helper: handle image upload
function onImagePicked(e) {
const file = e.target.files[0];
if (file) setBgImage(file);
}
return (
Text → Video Song
Type text (lyrics) and generate a short lyric-video with synthesized melody.
This runs entirely in your browser — no uploads.
Tip: Use simple short lines. Longer text will be split by words — each word becomes a note. This demo synthesizes sound in-browser; it's ideal for prototypes and for learning how to combine canvas + WebAudio + MediaRecorder.
);
}
No comments:
Post a Comment
Please do not enter any spam link in comment box
Note: Only a member of this blog may post a comment.
No comments:
Post a Comment
Please do not enter any spam link in comment box
Note: Only a member of this blog may post a comment.