refactor: [30388f42] Entferne ungenutztes Projekt 'k-pop-thumbnail-genie'

- Löscht das gesamte Verzeichnis und die zugehörigen Dateien des 'k-pop-thumbnail-genie'-Projekts, da es nicht mehr benötigt wird.
This commit is contained in:
2026-03-06 09:48:18 +00:00
parent 6ca9fbbc69
commit a89d1625d4
32 changed files with 0 additions and 1532 deletions

View File

@@ -1,24 +0,0 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
node_modules
dist
dist-ssr
*.local
# Editor directories and files
.vscode/*
!.vscode/extensions.json
.idea
.DS_Store
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?

Binary file not shown.

Before

Width:  |  Height:  |  Size: 207 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 164 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 197 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 166 KiB

View File

@@ -1,223 +0,0 @@
import React, { useState, useCallback } from 'react';
import { UploadedImage, AppStep, GenerationResult } from './types';
import { expandPrompt, generateImage, refineImage } from './services/geminiService';
import ImageUploader from './components/ImageUploader';
import ImageSegmenter from './components/ImageSegmenter';
import PromptCustomizer from './components/PromptCustomizer';
import ImageResult from './components/ImageResult';
import StepIndicator from './components/StepIndicator';
import { SparklesIcon } from './components/icons/SparklesIcon';
import { applyMask } from './utils/canvasUtils';
import { LoggingProvider, useLogger } from './contexts/LoggingContext';
import DebugConsole from './components/DebugConsole';
const AppContent: React.FC = () => {
const [step, setStep] = useState<AppStep>(AppStep.Upload);
const [uploadedImages, setUploadedImages] = useState<UploadedImage[]>([]);
const [masterPrompt, setMasterPrompt] = useState<string>('');
const [generationResult, setGenerationResult] = useState<GenerationResult | null>(null);
const [isLoading, setIsLoading] = useState<boolean>(false);
const [loadingMessage, setLoadingMessage] = useState<string>('');
const [error, setError] = useState<string | null>(null);
const { log } = useLogger();
const handleImagesUploaded = (images: UploadedImage[]) => {
log('info', `${images.length} images uploaded. Moving to segmentation step.`);
setUploadedImages(images);
setStep(AppStep.Segment);
setError(null);
};
const handleSegmentationComplete = (imagesWithMasks: UploadedImage[]) => {
log('success', `Segmentation complete for all ${imagesWithMasks.length} images. Moving to prompt step.`);
setUploadedImages(imagesWithMasks);
setStep(AppStep.Prompt);
setError(null);
};
const handlePromptExpanded = useCallback(async (scenario: string, userInstruction: string) => {
setIsLoading(true);
setLoadingMessage('Expanding your idea into a master prompt...');
setError(null);
log('info', `Expanding prompt with scenario: "${scenario}"`);
try {
const prompt = await expandPrompt(scenario, userInstruction, uploadedImages);
setMasterPrompt(prompt);
log('success', 'Master prompt created successfully.');
} catch (e) {
const errorMessage = e instanceof Error ? e.message : 'An unknown error occurred during prompt expansion.';
log('error', `Prompt expansion failed: ${errorMessage}`);
setError(errorMessage);
} finally {
setIsLoading(false);
setLoadingMessage('');
}
}, [uploadedImages, log]);
const handleFinalGeneration = useCallback(async (finalPrompt: string) => {
setIsLoading(true);
setLoadingMessage('Generating your K-Pop thumbnail...');
setError(null);
log('info', 'Starting final image generation.');
try {
log('info', 'Applying masks to create segmented images...');
const segmentedImages = await Promise.all(
uploadedImages.map(async (image) => {
if (!image.maskDataUrl) {
throw new Error(`Mask is missing for image: ${image.file.name}`);
}
const segmentedData = await applyMask(image.previewUrl, image.maskDataUrl);
return { ...image, segmentedDataUrl: `data:image/png;base64,${segmentedData}` };
})
);
log('success', 'Masks applied successfully.');
const result = await generateImage(finalPrompt, segmentedImages);
setGenerationResult({
baseImage: result,
currentImage: result,
history: [result],
});
setStep(AppStep.Result);
log('success', 'Thumbnail generated successfully.');
} catch (e) {
const errorMessage = e instanceof Error ? e.message : 'An unknown error occurred during image generation.';
log('error', `Final image generation failed: ${errorMessage}`);
setError(errorMessage);
setStep(AppStep.Prompt);
} finally {
setIsLoading(false);
setLoadingMessage('');
}
}, [uploadedImages, log]);
const handleImageRefinement = useCallback(async (refinementPrompt: string) => {
if (!generationResult) return;
setIsLoading(true);
setLoadingMessage('Applying your refinements...');
setError(null);
log('info', `Refining image with prompt: "${refinementPrompt}"`);
try {
const newImage = await refineImage(refinementPrompt, generationResult.currentImage);
setGenerationResult(prev => {
if (!prev) return null;
const newHistory = [...prev.history, newImage];
return {
baseImage: prev.baseImage,
currentImage: newImage,
history: newHistory,
};
});
log('success', 'Image refined successfully.');
} catch (e) {
const errorMessage = e instanceof Error ? e.message : 'An unknown error occurred during image refinement.';
log('error', `Image refinement failed: ${errorMessage}`);
setError(errorMessage);
} finally {
setIsLoading(false);
setLoadingMessage('');
}
}, [generationResult, log]);
const handleBack = () => {
setError(null);
if (step === AppStep.Result) {
log('info', 'Navigating back from Result to Prompt step.');
setMasterPrompt('');
setStep(AppStep.Prompt);
} else if (step === AppStep.Prompt) {
log('info', 'Navigating back from Prompt to Segment step.');
setMasterPrompt('');
setStep(AppStep.Segment);
} else if (step === AppStep.Segment) {
log('info', 'Navigating back from Segment to Upload step.');
setStep(AppStep.Upload);
}
};
const handleStartOver = () => {
log('info', 'Starting over. Resetting application state.');
setStep(AppStep.Upload);
setUploadedImages([]);
setMasterPrompt('');
setGenerationResult(null);
setIsLoading(false);
setLoadingMessage('');
setError(null);
}
const renderStep = () => {
switch (step) {
case AppStep.Upload:
return <ImageUploader onImagesUploaded={handleImagesUploaded} />;
case AppStep.Segment:
return <ImageSegmenter
images={uploadedImages}
onComplete={handleSegmentationComplete}
onBack={handleBack}
/>;
case AppStep.Prompt:
return <PromptCustomizer
onPromptExpanded={handlePromptExpanded}
onFinalSubmit={handleFinalGeneration}
isLoading={isLoading}
loadingMessage={loadingMessage}
uploadedImages={uploadedImages}
onBack={handleBack}
masterPrompt={masterPrompt}
setMasterPrompt={setMasterPrompt}
/>;
case AppStep.Result:
return <ImageResult
result={generationResult}
onRefine={handleImageRefinement}
masterPrompt={masterPrompt}
isLoading={isLoading}
loadingMessage={loadingMessage}
onStartOver={handleStartOver}
/>;
default:
return <ImageUploader onImagesUploaded={handleImagesUploaded} />;
}
};
return (
<div className="min-h-screen bg-gray-900 text-gray-100 flex flex-col items-center p-4 sm:p-6 lg:p-8 pb-32">
<header className="w-full max-w-6xl text-center mb-6">
<div className="flex items-center justify-center gap-3">
<SparklesIcon className="w-10 h-10 text-purple-400" />
<h1 className="text-4xl sm:text-5xl md:text-6xl font-teko tracking-wider uppercase bg-gradient-to-r from-purple-400 to-pink-500 text-transparent bg-clip-text">
K-Pop Thumbnail Genie
</h1>
</div>
<p className="text-gray-400 mt-2 text-sm sm:text-base">Create stunning, emotional YouTube thumbnails with the magic of AI</p>
</header>
<main className="w-full max-w-6xl flex-grow">
<StepIndicator currentStep={step} />
<div className="mt-8 bg-gray-800/50 p-6 sm:p-8 rounded-2xl shadow-2xl shadow-purple-900/10 border border-gray-700">
{error && (
<div className="bg-red-900/50 border border-red-700 text-red-300 px-4 py-3 rounded-lg mb-6 text-center">
<p><span className="font-bold">Error:</span> {error}</p>
</div>
)}
{renderStep()}
</div>
</main>
<footer className="w-full max-w-6xl text-center mt-8 text-gray-500 text-xs">
<p>Powered by Google Gemini. Designed for K-Pop content creators.</p>
</footer>
<DebugConsole />
</div>
);
};
const App: React.FC = () => (
<LoggingProvider>
<AppContent />
</LoggingProvider>
);
export default App;

View File

@@ -1,20 +0,0 @@
<div align="center">
<img width="1200" height="475" alt="GHBanner" src="https://github.com/user-attachments/assets/0aa67016-6eaf-458a-adb2-6e31a0763ed6" />
</div>
# Run and deploy your AI Studio app
This contains everything you need to run your app locally.
View your app in AI Studio: https://ai.studio/apps/drive/18Jqma41iTUAZu0UduZKdv9IMtoJJI3KW
## Run Locally
**Prerequisites:** Node.js
1. Install dependencies:
`npm install`
2. Set the `GEMINI_API_KEY` in [.env.local](.env.local) to your Gemini API key
3. Run the app:
`npm run dev`

Binary file not shown.

Before

Width:  |  Height:  |  Size: 147 KiB

View File

@@ -1,79 +0,0 @@
import React, { useState, useEffect, useRef } from 'react';
import { useLogger, LogMessage, LogType } from '../contexts/LoggingContext';
const LOG_COLORS: Record<LogType, string> = {
info: 'text-gray-300',
success: 'text-green-400',
error: 'text-red-400',
warn: 'text-yellow-400',
};
const DebugConsole: React.FC = () => {
const { logs, clearLogs } = useLogger();
const [isOpen, setIsOpen] = useState(false);
const [copyStatus, setCopyStatus] = useState('Copy');
const logsEndRef = useRef<HTMLDivElement>(null);
const scrollToBottom = () => {
logsEndRef.current?.scrollIntoView({ behavior: 'smooth' });
};
useEffect(scrollToBottom, [logs]);
const handleCopy = () => {
const logText = logs.map(log => `[${log.timestamp}] [${log.type.toUpperCase()}] ${log.message}`).join('\n');
navigator.clipboard.writeText(logText).then(() => {
setCopyStatus('Copied!');
setTimeout(() => setCopyStatus('Copy'), 2000);
}, () => {
setCopyStatus('Failed!');
setTimeout(() => setCopyStatus('Copy'), 2000);
});
};
return (
<>
<button
onClick={() => setIsOpen(!isOpen)}
className="fixed bottom-4 right-4 bg-purple-700 hover:bg-purple-800 text-white rounded-full p-3 shadow-lg z-50 transition-transform hover:scale-110"
aria-label="Toggle Debug Console"
>
<svg xmlns="http://www.w3.org/2000/svg" className="h-6 w-6" fill="none" viewBox="0 0 24 24" stroke="currentColor">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M8 9l3 3-3 3m5 0h3M5 20h14a2 2 0 002-2V6a2 2 0 00-2-2H5a2 2 0 00-2 2v12a2 2 0 002 2z" />
</svg>
</button>
{isOpen && (
<div className="fixed bottom-0 left-0 right-0 h-1/3 bg-gray-900/95 backdrop-blur-sm border-t-2 border-purple-800 z-40 flex flex-col p-2 shadow-2xl">
<div className="flex items-center justify-between mb-2 px-2 flex-shrink-0">
<h3 className="font-teko text-2xl text-purple-300 tracking-wide">DEBUG CONSOLE</h3>
<div className="flex items-center gap-2">
<button onClick={handleCopy} className="text-xs bg-gray-700 hover:bg-gray-600 text-gray-300 px-3 py-1 rounded-md transition-colors">{copyStatus}</button>
<button onClick={clearLogs} className="text-xs bg-gray-700 hover:bg-gray-600 text-gray-300 px-3 py-1 rounded-md transition-colors">Clear</button>
<button onClick={() => setIsOpen(false)} className="text-gray-400 hover:text-white">&times;</button>
</div>
</div>
<div className="overflow-y-auto flex-grow bg-black/50 p-2 rounded-md font-mono text-sm">
{logs.length === 0 ? (
<p className="text-gray-500">No logs yet. Start using the app to see messages here.</p>
) : (
logs.map((log, index) => (
<div key={index} className="flex">
<span className="text-gray-500 mr-2 flex-shrink-0">{log.timestamp}</span>
<span className={`${LOG_COLORS[log.type]} whitespace-pre-wrap break-all`}>
<span className='font-bold mr-2'>[{log.type.toUpperCase()}]</span>
{log.message}
</span>
</div>
))
)}
<div ref={logsEndRef} />
</div>
</div>
)}
</>
);
};
export default DebugConsole;

View File

@@ -1,103 +0,0 @@
import React, { useState } from 'react';
import { GenerationResult } from '../types';
import { DownloadIcon } from './icons/DownloadIcon';
import { MagicIcon } from './icons/MagicIcon';
interface ImageResultProps {
result: GenerationResult | null;
onRefine: (refinementPrompt: string) => void;
masterPrompt: string;
isLoading: boolean;
loadingMessage: string;
onStartOver: () => void;
}
const ImageResult: React.FC<ImageResultProps> = ({ result, onRefine, masterPrompt, isLoading, loadingMessage, onStartOver }) => {
const [refinementPrompt, setRefinementPrompt] = useState('');
const handleRefineSubmit = (e: React.FormEvent) => {
e.preventDefault();
if (refinementPrompt.trim()) {
onRefine(refinementPrompt);
setRefinementPrompt('');
}
};
if (!result) {
return (
<div className="text-center">
<p>Something went wrong. No image was generated.</p>
<button onClick={onStartOver} className="mt-4 bg-purple-600 hover:bg-purple-700 text-white font-bold py-2 px-4 rounded">Start Over</button>
</div>
);
}
return (
<div className="w-full flex flex-col lg:flex-row gap-8">
<div className="lg:w-2/3 relative">
<div className="aspect-w-16 aspect-h-9 bg-black rounded-lg overflow-hidden shadow-lg">
<img src={`data:image/png;base64,${result.currentImage}`} alt="Generated thumbnail" className="w-full h-full object-contain" />
</div>
{isLoading && (
<div className="absolute inset-0 bg-black/70 flex flex-col items-center justify-center text-center p-4 rounded-lg">
<div className="animate-spin rounded-full h-12 w-12 border-b-2 border-purple-400 mb-4"></div>
<p className="text-lg text-purple-300">{loadingMessage}</p>
</div>
)}
</div>
<div className="lg:w-1/3 flex flex-col">
<h2 className="text-2xl font-bold text-gray-100 mb-4">Your Masterpiece</h2>
<div className="flex-grow space-y-6">
<form onSubmit={handleRefineSubmit}>
<label htmlFor="refinement-prompt" className="block text-lg font-semibold text-purple-300 mb-2">
Refine Your Image
</label>
<textarea
id="refinement-prompt"
value={refinementPrompt}
onChange={(e) => setRefinementPrompt(e.target.value)}
placeholder="e.g., 'Make the smile a little softer.' or 'Change the background to be more blurry.'"
className="w-full bg-gray-700 border border-gray-600 rounded-lg p-3 text-base h-24 focus:ring-2 focus:ring-purple-500 focus:border-purple-500 transition"
required
/>
<button
type="submit"
disabled={isLoading || !refinementPrompt}
className="w-full mt-3 bg-purple-600 hover:bg-purple-700 disabled:bg-gray-600 text-white font-bold py-2.5 px-4 rounded-lg transition-colors duration-300 flex items-center justify-center gap-2"
>
<MagicIcon className="w-5 h-5"/> Refine
</button>
</form>
<div>
<details className="bg-gray-700/50 rounded-lg">
<summary className="cursor-pointer text-purple-300 font-semibold p-3">View Master Prompt</summary>
<p className="p-3 pt-0 text-gray-400 text-sm">{masterPrompt}</p>
</details>
</div>
</div>
<div className="mt-8 space-y-3">
<a
href={`data:image/png;base64,${result.currentImage}`}
download="kpop-thumbnail.png"
className="w-full bg-green-600 hover:bg-green-700 text-white font-bold py-3 px-4 rounded-lg transition-colors duration-300 flex items-center justify-center gap-2 text-lg"
>
<DownloadIcon className="w-6 h-6" /> Download Image
</a>
<button
onClick={onStartOver}
className="w-full bg-gray-600 hover:bg-gray-500 text-white font-bold py-2 px-4 rounded-lg transition-colors duration-300"
>
Start Over
</button>
</div>
</div>
</div>
);
};
export default ImageResult;

View File

@@ -1,257 +0,0 @@
import React, { useState, useEffect, useRef, useCallback } from 'react';
import { UploadedImage } from '../types';
import { segmentSubject } from '../services/geminiService';
import { ArrowLeftIcon } from './icons/ArrowLeftIcon';
import { ArrowRightIcon } from './icons/ArrowRightIcon';
import { BrushIcon } from './icons/BrushIcon';
import { EraserIcon } from './icons/EraserIcon';
import { useLogger } from '../contexts/LoggingContext';
interface ImageSegmenterProps {
images: UploadedImage[];
onComplete: (images: UploadedImage[]) => void;
onBack: () => void;
}
type EditorMode = 'brush' | 'eraser';
const ImageSegmenter: React.FC<ImageSegmenterProps> = ({ images, onComplete, onBack }) => {
const [internalImages, setInternalImages] = useState<UploadedImage[]>(images);
const [loadingStates, setLoadingStates] = useState<Record<number, boolean>>({});
const [errorStates, setErrorStates] = useState<Record<number, string | null>>({});
const [activeIndex, setActiveIndex] = useState<number>(0);
const [mode, setMode] = useState<EditorMode>('brush');
const [brushSize, setBrushSize] = useState<number>(20);
const canvasRef = useRef<HTMLCanvasElement>(null);
const imageRef = useRef<HTMLImageElement | null>(null);
const maskRef = useRef<HTMLImageElement | null>(null);
const isDrawing = useRef<boolean>(false);
const { log } = useLogger();
const generateMask = useCallback(async (index: number) => {
setLoadingStates(prev => ({ ...prev, [index]: true }));
setErrorStates(prev => ({ ...prev, [index]: null }));
log('info', `Requesting segmentation mask for image ${index + 1} ("${internalImages[index].subjectDescription}").`);
try {
const maskBase64 = await segmentSubject(internalImages[index].file, internalImages[index].subjectDescription);
setInternalImages(prev => {
const updated = [...prev];
updated[index].maskDataUrl = `data:image/png;base64,${maskBase64}`;
return updated;
});
log('success', `Successfully received segmentation mask for image ${index + 1}.`);
} catch (e) {
const errorMessage = e instanceof Error ? e.message : 'Mask generation failed';
let displayError = "Failed";
if (errorMessage.includes("The AI returned a message")) {
displayError = "AI Response Error";
} else if (errorMessage.includes("No segmentation mask")) {
displayError = "No Mask Found";
}
setErrorStates(prev => ({ ...prev, [index]: displayError }));
log('error', `Failed to generate mask for image ${index + 1}: ${errorMessage}`);
} finally {
setLoadingStates(prev => ({ ...prev, [index]: false }));
}
}, [internalImages, log]);
useEffect(() => {
internalImages.forEach((image, index) => {
if (!image.maskDataUrl && !loadingStates[index] && !errorStates[index]) {
generateMask(index);
}
});
}, [internalImages, generateMask, loadingStates, errorStates]);
const draw = useCallback(() => {
const canvas = canvasRef.current;
const originalImage = imageRef.current;
const maskImage = maskRef.current;
if (!canvas || !originalImage || !maskImage) return;
const ctx = canvas.getContext('2d');
if (!ctx) return;
if (originalImage.naturalWidth === 0 || maskImage.naturalWidth === 0 || !originalImage.complete) {
return;
}
const { naturalWidth: w, naturalHeight: h } = originalImage;
if(canvas.width !== w) canvas.width = w;
if(canvas.height !== h) canvas.height = h;
ctx.clearRect(0, 0, w, h);
// Use a temporary canvas for the overlay so we don't mess up the main canvas's state
const overlayCanvas = document.createElement('canvas');
overlayCanvas.width = w;
overlayCanvas.height = h;
const overlayCtx = overlayCanvas.getContext('2d');
if (!overlayCtx) return;
// Fill the overlay with a semi-transparent black
overlayCtx.fillStyle = 'rgba(0, 0, 0, 0.6)';
overlayCtx.fillRect(0, 0, w, h);
// Use 'destination-out' to punch a hole in the overlay where the mask is white
overlayCtx.globalCompositeOperation = 'destination-out';
overlayCtx.drawImage(maskImage, 0, 0, w, h);
// Draw the original image on the main canvas
ctx.drawImage(originalImage, 0, 0, w, h);
// Draw the overlay (with the hole punched out) on top
ctx.drawImage(overlayCanvas, 0, 0);
}, []);
useEffect(() => {
const activeImage = internalImages[activeIndex];
if (activeImage?.previewUrl && activeImage?.maskDataUrl) {
const originalImage = new Image();
const maskImage = new Image();
imageRef.current = originalImage;
maskRef.current = maskImage;
originalImage.src = activeImage.previewUrl;
maskImage.src = activeImage.maskDataUrl;
const loadImages = Promise.all([
new Promise((res, rej) => { originalImage.onload = res; originalImage.onerror = rej; }),
new Promise((res, rej) => { maskImage.onload = res; maskImage.onerror = rej; })
]);
loadImages.then(() => {
draw();
}).catch(err => {
console.error("Error loading images for canvas: ", err);
log('error', `Canvas Error: Failed to load images for editor view. ${err}`);
});
}
}, [activeIndex, internalImages, draw, log]);
const handleCanvasInteraction = (e: React.MouseEvent<HTMLCanvasElement> | React.TouchEvent<HTMLCanvasElement>) => {
if (!isDrawing.current && e.type !== 'mousedown' && e.type !== 'touchstart') return;
const canvas = canvasRef.current;
if (!canvas || !maskRef.current) return;
const tempCanvas = document.createElement('canvas');
tempCanvas.width = maskRef.current.naturalWidth;
tempCanvas.height = maskRef.current.naturalHeight;
const tempCtx = tempCanvas.getContext('2d');
if (!tempCtx) return;
tempCtx.drawImage(maskRef.current, 0, 0);
const rect = canvas.getBoundingClientRect();
const scaleX = tempCanvas.width / rect.width;
const scaleY = tempCanvas.height / rect.height;
const getCoords = (evt: any) => {
if (evt.touches) {
return { x: evt.touches[0].clientX - rect.left, y: evt.touches[0].clientY - rect.top };
}
return { x: evt.clientX - rect.left, y: evt.clientY - rect.top };
}
const {x, y} = getCoords(e.nativeEvent);
tempCtx.fillStyle = mode === 'brush' ? '#FFFFFF' : '#000000';
tempCtx.beginPath();
tempCtx.arc(x * scaleX, y * scaleY, (brushSize/2) * scaleX, 0, 2 * Math.PI);
tempCtx.fill();
const newMaskUrl = tempCanvas.toDataURL();
maskRef.current.src = newMaskUrl;
maskRef.current.onload = () => {
draw();
// Update state debounced or on mouse up for performance
if (e.type === 'mouseup' || e.type === 'touchend') {
setInternalImages(prev => {
const updated = [...prev];
updated[activeIndex].maskDataUrl = newMaskUrl;
return updated;
});
}
};
};
const startDrawing = () => { isDrawing.current = true; };
const stopDrawing = (e: any) => { isDrawing.current = false; handleCanvasInteraction(e); };
const canProceed = internalImages.every(img => img.maskDataUrl);
return (
<div className="w-full">
<div className="text-center mb-6">
<h2 className="text-2xl font-bold text-gray-100">Review & Refine Subjects</h2>
<p className="text-gray-400">The AI has extracted the subjects. Use the tools to refine the selection if needed.</p>
</div>
<div className="flex flex-col lg:flex-row gap-8">
{/* Thumbnails */}
<div className="lg:w-1/4 flex lg:flex-col gap-2 overflow-x-auto lg:overflow-y-auto lg:max-h-[500px] p-2 bg-gray-900/50 rounded-lg">
{internalImages.map((image, index) => (
<button key={index} onClick={() => setActiveIndex(index)} className={`rounded-lg border-2 transition-all p-1 flex-shrink-0 ${activeIndex === index ? 'border-purple-500' : 'border-transparent hover:border-gray-600'}`}>
<div className="relative w-24 h-24">
{loadingStates[index] && <div className="absolute inset-0 bg-black/70 flex items-center justify-center rounded-md"><div className="animate-spin rounded-full h-8 w-8 border-b-2 border-purple-400"></div></div>}
{errorStates[index] && <div className="absolute inset-0 bg-red-900/80 text-white text-xs text-center flex items-center justify-center p-1 rounded-md">{errorStates[index]}</div>}
{image.maskDataUrl && <img src={image.maskDataUrl} alt={`mask preview ${index}`} className="w-full h-full object-contain rounded-md bg-black" />}
{!image.maskDataUrl && !loadingStates[index] && !errorStates[index] && <div className="w-full h-full bg-gray-700 rounded-md flex items-center justify-center text-xs text-gray-400">Waiting...</div>}
</div>
</button>
))}
</div>
{/* Editor */}
<div className="lg:w-3/4 flex flex-col items-center">
<div className="w-full flex justify-center items-center mb-4 p-2 bg-gray-700/50 rounded-lg">
<div className="flex items-center gap-4">
<button onClick={() => setMode('brush')} className={`p-2 rounded-md transition-colors ${mode === 'brush' ? 'bg-purple-600' : 'bg-gray-600 hover:bg-gray-500'}`}><BrushIcon className="w-6 h-6"/></button>
<button onClick={() => setMode('eraser')} className={`p-2 rounded-md transition-colors ${mode === 'eraser' ? 'bg-purple-600' : 'bg-gray-600 hover:bg-gray-500'}`}><EraserIcon className="w-6 h-6"/></button>
<div className="flex items-center gap-2">
<label htmlFor="brushSize" className="text-sm">Size:</label>
<input type="range" id="brushSize" min="2" max="100" value={brushSize} onChange={e => setBrushSize(Number(e.target.value))} className="w-32 cursor-pointer"/>
</div>
</div>
</div>
<canvas
ref={canvasRef}
className="rounded-lg max-w-full h-auto"
onMouseDown={startDrawing}
onMouseUp={stopDrawing}
onMouseMove={handleCanvasInteraction}
onMouseLeave={stopDrawing}
onTouchStart={startDrawing}
onTouchEnd={stopDrawing}
onTouchMove={handleCanvasInteraction}
/>
</div>
</div>
<div className="flex flex-col sm:flex-row justify-between items-center gap-4 pt-8 mt-6 border-t border-gray-700">
<button
type="button"
onClick={onBack}
className="bg-gray-600 hover:bg-gray-500 text-white font-bold py-3 px-6 rounded-lg transition-colors duration-300 flex items-center gap-2 w-full sm:w-auto justify-center"
>
<ArrowLeftIcon className="w-5 h-5"/> Back
</button>
<button
onClick={() => onComplete(internalImages)}
disabled={!canProceed}
className="bg-purple-600 hover:bg-purple-700 disabled:bg-gray-600 disabled:cursor-not-allowed text-white font-bold py-3 px-8 rounded-lg transition-all duration-300 text-lg flex items-center gap-2 mx-auto w-full sm:w-auto justify-center"
>
Next: Customize Prompt <ArrowRightIcon className="w-5 h-5" />
</button>
</div>
</div>
);
};
export default ImageSegmenter;

View File

@@ -1,113 +0,0 @@
import React, { useState, useCallback } from 'react';
import { UploadedImage } from '../types';
import { UploadIcon } from './icons/UploadIcon';
import { ArrowRightIcon } from './icons/ArrowRightIcon';
interface ImageUploaderProps {
onImagesUploaded: (images: UploadedImage[]) => void;
}
const ImageUploader: React.FC<ImageUploaderProps> = ({ onImagesUploaded }) => {
const [images, setImages] = useState<UploadedImage[]>([]);
const [error, setError] = useState<string | null>(null);
const handleFileChange = (event: React.ChangeEvent<HTMLInputElement>) => {
setError(null);
if (event.target.files) {
const files = Array.from(event.target.files);
if (files.length + images.length > 5) {
setError("You can upload a maximum of 5 images.");
return;
}
// Fix: Explicitly type `file` as `File` to resolve type inference issues.
const newImages: UploadedImage[] = files.map((file: File) => ({
file,
previewUrl: URL.createObjectURL(file),
subjectDescription: '',
}));
setImages(prev => [...prev, ...newImages]);
}
};
const handleDescriptionChange = (index: number, value: string) => {
setImages(prev => {
const updated = [...prev];
updated[index].subjectDescription = value;
return updated;
});
};
const removeImage = (indexToRemove: number) => {
setImages(prev => prev.filter((_, index) => index !== indexToRemove));
};
const handleSubmit = () => {
if (images.length < 2) {
setError('Please upload at least 2 images.');
return;
}
if (images.some(img => img.subjectDescription.trim() === '')) {
setError('Please describe the main subject in each image.');
return;
}
onImagesUploaded(images);
};
return (
<div className="w-full">
<div className="text-center mb-6">
<h2 className="text-2xl font-bold text-gray-100">Upload Your Source Images</h2>
<p className="text-gray-400">Upload 2 or more images. Then, briefly describe the person you want to feature from each.</p>
</div>
<div className="mb-6">
<label htmlFor="file-upload" className="relative cursor-pointer bg-gray-700 hover:bg-gray-600 text-purple-300 font-semibold py-3 px-5 rounded-lg border border-dashed border-gray-500 flex flex-col items-center justify-center transition-colors duration-300 h-48">
<UploadIcon className="w-12 h-12 mb-2 text-gray-400"/>
<span className="text-lg">Click to upload images</span>
<span className="text-sm text-gray-500">PNG, JPG, WEBP up to 10MB</span>
<input id="file-upload" name="file-upload" type="file" multiple accept="image/png, image/jpeg, image/webp" className="sr-only" onChange={handleFileChange} />
</label>
</div>
{images.length > 0 && (
<div className="grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-3 gap-4 mb-6">
{images.map((image, index) => (
<div key={index} className="bg-gray-700/50 rounded-lg p-3 relative group">
<img src={image.previewUrl} alt={`preview ${index}`} className="w-full h-40 object-cover rounded-md mb-3" />
<textarea
value={image.subjectDescription}
onChange={(e) => handleDescriptionChange(index, e.target.value)}
placeholder={`e.g., The man with glasses`}
className="w-full bg-gray-800 border border-gray-600 rounded-md p-2 text-sm focus:ring-2 focus:ring-purple-500 focus:border-purple-500 transition"
rows={2}
/>
<button
onClick={() => removeImage(index)}
className="absolute top-1 right-1 bg-black/50 text-white rounded-full p-1 opacity-0 group-hover:opacity-100 transition-opacity"
aria-label="Remove image"
>
<svg xmlns="http://www.w3.org/2000/svg" className="h-4 w-4" viewBox="0 0 20 20" fill="currentColor">
<path fillRule="evenodd" d="M4.293 4.293a1 1 0 011.414 0L10 8.586l4.293-4.293a1 1 0 111.414 1.414L11.414 10l4.293 4.293a1 1 0 01-1.414 1.414L10 11.414l-4.293 4.293a1 1 0 01-1.414-1.414L8.586 10 4.293 5.707a1 1 0 010-1.414z" clipRule="evenodd" />
</svg>
</button>
</div>
))}
</div>
)}
{error && <p className="text-red-400 text-center my-4">{error}</p>}
<div className="text-center mt-4">
<button
onClick={handleSubmit}
disabled={images.length < 2 || images.some(i => !i.subjectDescription)}
className="bg-purple-600 hover:bg-purple-700 disabled:bg-gray-600 disabled:cursor-not-allowed text-white font-bold py-3 px-8 rounded-lg transition-all duration-300 text-lg flex items-center gap-2 mx-auto"
>
Next: Customize Prompt <ArrowRightIcon className="w-5 h-5" />
</button>
</div>
</div>
);
};
export default ImageUploader;

View File

@@ -1,151 +0,0 @@
import React, { useState, useEffect } from 'react';
import { PROMPT_TEMPLATES } from '../constants';
import { UploadedImage } from '../types';
import { MagicIcon } from './icons/MagicIcon';
import { ArrowLeftIcon } from './icons/ArrowLeftIcon';
interface PromptCustomizerProps {
onPromptExpanded: (scenario: string, userInstruction: string) => void;
onFinalSubmit: (masterPrompt: string) => void;
isLoading: boolean;
loadingMessage: string;
uploadedImages: UploadedImage[];
onBack: () => void;
masterPrompt: string;
setMasterPrompt: (prompt: string) => void;
}
const PromptCustomizer: React.FC<PromptCustomizerProps> = ({
onPromptExpanded,
onFinalSubmit,
isLoading,
loadingMessage,
uploadedImages,
onBack,
masterPrompt,
setMasterPrompt
}) => {
const [selectedScenario, setSelectedScenario] = useState<string>(PROMPT_TEMPLATES[0].title);
const [userInstruction, setUserInstruction] = useState<string>('');
const [isPromptExpanded, setIsPromptExpanded] = useState(false);
useEffect(() => {
if (masterPrompt) {
setIsPromptExpanded(true);
}
}, [masterPrompt]);
const handleExpandPrompt = (e: React.FormEvent) => {
e.preventDefault();
if (selectedScenario && userInstruction) {
onPromptExpanded(selectedScenario, userInstruction);
}
};
const handleFinalSubmit = (e: React.FormEvent) => {
e.preventDefault();
if (masterPrompt) {
onFinalSubmit(masterPrompt);
}
};
if (isLoading && !isPromptExpanded) {
return (
<div className="flex flex-col items-center justify-center text-center h-64">
<div className="animate-spin rounded-full h-16 w-16 border-b-2 border-purple-400 mb-4"></div>
<p className="text-xl text-purple-300">{loadingMessage}</p>
</div>
);
}
return (
<div className="w-full">
<div className="text-center mb-8">
<h2 className="text-2xl font-bold text-gray-100">{isPromptExpanded ? 'Review Your Master Prompt' : 'Describe Your Scene'}</h2>
<p className="text-gray-400">{isPromptExpanded ? 'Edit the AI-generated prompt below, then generate your image.' : 'Choose a starting scenario and describe your vision.'}</p>
</div>
<div className="flex flex-col lg:flex-row gap-8">
<div className="lg:w-1/3">
<h3 className="text-lg font-semibold text-purple-300 mb-3">Your Subjects</h3>
<div className="flex flex-wrap gap-2">
{uploadedImages.map((image, index) => (
<div key={index} className="flex items-center gap-2 bg-gray-700 p-2 rounded-lg">
<img src={image.maskDataUrl || image.previewUrl} alt={`subject ${index}`} className="w-10 h-10 rounded-md object-cover bg-black" />
<p className="text-sm text-gray-300 flex-1">{image.subjectDescription}</p>
</div>
))}
</div>
</div>
<div className="flex-grow lg:w-2/3 space-y-6">
{!isPromptExpanded ? (
<form onSubmit={handleExpandPrompt} className="space-y-6">
<div>
<label className="block text-lg font-semibold text-purple-300 mb-2">1. Choose a K-Pop Scenario</label>
<div className="grid grid-cols-1 md:grid-cols-2 gap-3">
{PROMPT_TEMPLATES.map(template => (
<button
key={template.id}
type="button"
onClick={() => setSelectedScenario(template.title)}
className={`p-4 rounded-lg text-left transition-all duration-200 border-2 ${selectedScenario === template.title ? 'bg-purple-800/50 border-purple-500' : 'bg-gray-700 border-gray-600 hover:border-purple-600'}`}
>
<p className="font-bold text-white">{template.title}</p>
<p className="text-sm text-gray-400">{template.description}</p>
</button>
))}
</div>
</div>
<div>
<label htmlFor="user-instruction" className="block text-lg font-semibold text-purple-300 mb-2">2. Describe Your Idea</label>
<textarea
id="user-instruction"
value={userInstruction}
onChange={(e) => setUserInstruction(e.target.value)}
placeholder="Example: The person from image 1 should stand behind the person from image 2, placing a hand on their shoulder. Use the background from image 2."
className="w-full bg-gray-800 border border-gray-600 rounded-lg p-3 text-base h-32 focus:ring-2 focus:ring-purple-500 focus:border-purple-500 transition"
required
/>
</div>
<div className="flex flex-col sm:flex-row justify-between items-center gap-4 pt-4">
<button type="button" onClick={onBack} className="bg-gray-600 hover:bg-gray-500 text-white font-bold py-3 px-6 rounded-lg transition-colors duration-300 flex items-center gap-2 w-full sm:w-auto justify-center">
<ArrowLeftIcon className="w-5 h-5"/> Back
</button>
<button type="submit" disabled={isLoading || !userInstruction || !selectedScenario} className="bg-purple-600 hover:bg-purple-700 disabled:bg-gray-600 text-white font-bold py-3 px-8 rounded-lg transition-all duration-300 flex items-center gap-2 w-full sm:w-auto justify-center">
Create Master Prompt <MagicIcon className="w-5 h-5"/>
</button>
</div>
</form>
) : (
<form onSubmit={handleFinalSubmit} className="space-y-6">
<div>
<label htmlFor="master-prompt" className="block text-lg font-semibold text-purple-300 mb-2">Master Prompt</label>
<textarea
id="master-prompt"
value={masterPrompt}
onChange={(e) => setMasterPrompt(e.target.value)}
className="w-full bg-gray-800 border border-gray-600 rounded-lg p-3 text-base h-48 focus:ring-2 focus:ring-purple-500 focus:border-purple-500 transition"
required
/>
</div>
<div className="flex flex-col sm:flex-row justify-between items-center gap-4 pt-4">
<button type="button" onClick={() => setIsPromptExpanded(false)} className="bg-gray-600 hover:bg-gray-500 text-white font-bold py-3 px-6 rounded-lg transition-colors duration-300 flex items-center gap-2 w-full sm:w-auto justify-center">
<ArrowLeftIcon className="w-5 h-5"/> Edit Scenario
</button>
<button type="submit" disabled={isLoading || !masterPrompt} className="bg-gradient-to-r from-purple-600 to-pink-600 hover:from-purple-700 hover:to-pink-700 disabled:from-gray-600 disabled:to-gray-600 disabled:cursor-not-allowed text-white font-bold py-3 px-8 rounded-lg transition-all duration-300 text-lg flex items-center gap-2 w-full sm:w-auto justify-center">
{isLoading ? loadingMessage : 'Generate Thumbnail'}
{!isLoading && <MagicIcon className="w-6 h-6"/>}
</button>
</div>
</form>
)}
</div>
</div>
</div>
);
};
export default PromptCustomizer;

View File

@@ -1,53 +0,0 @@
import React from 'react';
import { AppStep } from '../types';
interface StepIndicatorProps {
currentStep: AppStep;
}
const steps = [
{ id: AppStep.Upload, title: 'Upload' },
{ id: AppStep.Segment, title: 'Segment Subjects'},
{ id: AppStep.Prompt, title: 'Create Prompt' },
{ id: AppStep.Result, title: 'Generate & Refine' },
];
const StepIndicator: React.FC<StepIndicatorProps> = ({ currentStep }) => {
return (
<div className="w-full">
<div className="flex items-center justify-between md:justify-center">
{steps.map((step, index) => {
const isActive = currentStep === step.id;
const isCompleted = currentStep > step.id;
return (
<React.Fragment key={step.id}>
<div className="flex items-center flex-col sm:flex-row text-center">
<div
className={`flex items-center justify-center w-10 h-10 rounded-full transition-all duration-300 ${
isActive ? 'bg-purple-600 scale-110' : isCompleted ? 'bg-green-600' : 'bg-gray-700'
}`}
>
<span className={`font-bold ${isActive || isCompleted ? 'text-white' : 'text-gray-400'}`}>
{isCompleted ? (
<svg xmlns="http://www.w.org/2000/svg" className="h-6 w-6" fill="none" viewBox="0 0 24 24" stroke="currentColor">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M5 13l4 4L19 7" />
</svg>
) : step.id}
</span>
</div>
<p className={`mt-2 sm:mt-0 sm:ml-3 font-semibold text-xs sm:text-sm ${isActive ? 'text-purple-300' : 'text-gray-500'}`}>{step.title}</p>
</div>
{index < steps.length - 1 && (
<div className={`flex-auto border-t-2 transition-colors duration-300 mx-2 sm:mx-4 w-4 sm:w-auto ${isCompleted ? 'border-green-600' : 'border-gray-700'}`}></div>
)}
</React.Fragment>
);
})}
</div>
</div>
);
};
export default StepIndicator;

View File

@@ -1,8 +0,0 @@
import React from 'react';
export const ArrowLeftIcon: React.FC<React.SVGProps<SVGSVGElement>> = (props) => (
<svg xmlns="http://www.w3.org/2000/svg" className="h-6 w-6" fill="none" viewBox="0 0 24 24" stroke="currentColor" {...props}>
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M10 19l-7-7m0 0l7-7m-7 7h18" />
</svg>
);

View File

@@ -1,8 +0,0 @@
import React from 'react';
export const ArrowRightIcon: React.FC<React.SVGProps<SVGSVGElement>> = (props) => (
<svg xmlns="http://www.w3.org/2000/svg" className="h-6 w-6" fill="none" viewBox="0 0 24 24" stroke="currentColor" {...props}>
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M14 5l7 7m0 0l-7 7m7-7H3" />
</svg>
);

View File

@@ -1,8 +0,0 @@
import React from 'react';
export const BrushIcon: React.FC<React.SVGProps<SVGSVGElement>> = (props) => (
<svg xmlns="http://www.w3.org/2000/svg" className="h-6 w-6" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2} {...props}>
<path strokeLinecap="round" strokeLinejoin="round" d="M15.232 5.232l3.536 3.536m-2.036-5.036a2.5 2.5 0 113.536 3.536L6.5 21.036H3v-3.5L15.232 5.232z" />
</svg>
);

View File

@@ -1,8 +0,0 @@
import React from 'react';
export const DownloadIcon: React.FC<React.SVGProps<SVGSVGElement>> = (props) => (
<svg xmlns="http://www.w3.org/2000/svg" className="h-6 w-6" fill="none" viewBox="0 0 24 24" stroke="currentColor" {...props}>
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M4 16v1a3 3 0 003 3h10a3 3 0 003-3v-1m-4-4l-4 4m0 0l-4-4m4 4V4" />
</svg>
);

View File

@@ -1,9 +0,0 @@
import React from 'react';
export const EraserIcon: React.FC<React.SVGProps<SVGSVGElement>> = (props) => (
<svg xmlns="http://www.w3.org/2000/svg" className="h-6 w-6" fill="none" viewBox="0 0 24 24" stroke="currentColor" strokeWidth={2} {...props}>
<path strokeLinecap="round" strokeLinejoin="round" d="M19.452 7.618a.875.875 0 00-1.238 0l-7.366 7.366a.875.875 0 000 1.238l7.366 7.366a.875.875 0 001.238 0l3.85-3.85a.875.875 0 000-1.238l-7.366-7.366-3.85-3.85zM4 20h10" />
<path strokeLinecap="round" strokeLinejoin="round" d="M4 20l15-15" />
</svg>
);

View File

@@ -1,8 +0,0 @@
import React from 'react';
export const MagicIcon: React.FC<React.SVGProps<SVGSVGElement>> = (props) => (
<svg xmlns="http://www.w3.org/2000/svg" className="h-6 w-6" fill="none" viewBox="0 0 24 24" stroke="currentColor" {...props}>
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 6.25278C12 6.25278 15.011 3.24178 17.761 6.00078C20.511 8.75978 17.5 12.0008 17.5 12.0008M12 6.25278C12 6.25278 8.989 3.24178 6.239 6.00078C3.489 8.75978 6.5 12.0008 6.5 12.0008M12 6.25278V21.0008M17.5 12.0008L19.25 13.7508M6.5 12.0008L4.75 13.7508M12 21.0008H14.25M12 21.0008H9.75" />
</svg>
);

View File

@@ -1,8 +0,0 @@
import React from 'react';
export const SparklesIcon: React.FC<React.SVGProps<SVGSVGElement>> = (props) => (
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" strokeWidth={1.5} stroke="currentColor" {...props}>
<path strokeLinecap="round" strokeLinejoin="round" d="M9.813 15.904L9 18.75l-.813-2.846a4.5 4.5 0 00-3.09-3.09L2.25 12l2.846-.813a4.5 4.5 0 003.09-3.09L9 5.25l.813 2.846a4.5 4.5 0 003.09 3.09L15.75 12l-2.846.813a4.5 4.5 0 00-3.09 3.09zM18.259 8.715L18 9.75l-.259-1.035a3.375 3.375 0 00-2.455-2.456L14.25 6l1.036-.259a3.375 3.375 0 002.455-2.456L18 2.25l.259 1.035a3.375 3.375 0 002.456 2.456L21.75 6l-1.035.259a3.375 3.375 0 00-2.456 2.456zM16.898 20.572L16.5 21.75l-.398-1.178a3.375 3.375 0 00-2.456-2.456L12.5 18l1.178-.398a3.375 3.375 0 002.456-2.456L16.5 14.25l.398 1.178a3.375 3.375 0 002.456 2.456L20.5 18l-1.178.398a3.375 3.375 0 00-2.456 2.456z" />
</svg>
);

View File

@@ -1,8 +0,0 @@
import React from 'react';
export const UploadIcon: React.FC<React.SVGProps<SVGSVGElement>> = (props) => (
<svg xmlns="http://www.w3.org/2000/svg" className="h-6 w-6" fill="none" viewBox="0 0 24 24" stroke="currentColor" {...props}>
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M4 16v1a3 3 0 003 3h10a3 3 0 003-3v-1m-4-8l-4-4m0 0L8 8m4-4v12" />
</svg>
);

View File

@@ -1,28 +0,0 @@
export const PROMPT_TEMPLATES = [
{
id: 'reunion',
title: 'Emotional Reunion',
description: 'A touching and heartfelt reunion scene.',
},
{
id: 'playful',
title: 'Playful Interaction',
description: 'A fun, teasing, or lighthearted moment.',
},
{
id: 'magazine',
title: 'Magazine Cover',
description: 'A high-fashion, photoshoot-style composition.',
},
{
id: 'eye-contact',
title: 'Direct Eye Contact',
description: 'One or more subjects looking directly at the viewer.',
},
{
id: 'dramatic',
title: 'K-Drama Poster',
description: 'A dramatic, poster-like scene with emotional lighting.',
},
];

View File

@@ -1,45 +0,0 @@
import React, { createContext, useState, useContext, useCallback } from 'react';
export type LogType = 'info' | 'success' | 'error' | 'warn';
export interface LogMessage {
type: LogType;
message: string;
timestamp: string;
}
interface LoggingContextType {
logs: LogMessage[];
log: (type: LogType, message: string) => void;
clearLogs: () => void;
}
const LoggingContext = createContext<LoggingContextType | undefined>(undefined);
export const LoggingProvider: React.FC<{ children: React.ReactNode }> = ({ children }) => {
const [logs, setLogs] = useState<LogMessage[]>([]);
const log = useCallback((type: LogType, message: string) => {
const timestamp = new Date().toLocaleTimeString();
setLogs(prevLogs => [...prevLogs, { type, message, timestamp }]);
}, []);
const clearLogs = useCallback(() => {
setLogs([]);
}, []);
return (
<LoggingContext.Provider value={{ logs, log, clearLogs }}>
{children}
</LoggingContext.Provider>
);
};
export const useLogger = (): LoggingContextType => {
const context = useContext(LoggingContext);
if (context === undefined) {
throw new Error('useLogger must be used within a LoggingProvider');
}
return context;
};

Binary file not shown.

Before

Width:  |  Height:  |  Size: 414 KiB

View File

@@ -1,37 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="/vite.svg" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>K-Pop Thumbnail Genie</title>
<script src="https://cdn.tailwindcss.com"></script>
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link href="https://fonts.googleapis.com/css2?family=Roboto:wght@300;400;500;700&family=Teko:wght@400;500;700&display=swap" rel="stylesheet">
<style>
body {
font-family: 'Roboto', sans-serif;
}
.font-teko {
font-family: 'Teko', sans-serif;
}
</style>
<script type="importmap">
{
"imports": {
"react/": "https://aistudiocdn.com/react@^19.2.0/",
"react": "https://aistudiocdn.com/react@^19.2.0",
"react-dom/": "https://aistudiocdn.com/react-dom@^19.2.0/",
"@google/genai": "https://aistudiocdn.com/@google/genai@^1.29.0"
}
}
</script>
<link rel="stylesheet" href="/index.css">
</head>
<body class="bg-gray-900 text-white">
<div id="root"></div>
<script type="module" src="/index.tsx"></script>
</body>
</html>

View File

@@ -1,16 +0,0 @@
import React from 'react';
import ReactDOM from 'react-dom/client';
import App from './App';
const rootElement = document.getElementById('root');
if (!rootElement) {
throw new Error("Could not find root element to mount to");
}
const root = ReactDOM.createRoot(rootElement);
root.render(
<React.StrictMode>
<App />
</React.StrictMode>
);

View File

@@ -1,22 +0,0 @@
{
"name": "k-pop-thumbnail-genie",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "vite build",
"preview": "vite preview"
},
"dependencies": {
"react": "^19.2.0",
"react-dom": "^19.2.0",
"@google/genai": "^1.29.0"
},
"devDependencies": {
"@types/node": "^22.14.0",
"@vitejs/plugin-react": "^5.0.0",
"typescript": "~5.8.2",
"vite": "^6.2.0"
}
}

View File

@@ -1,202 +0,0 @@
import { GoogleGenAI, Modality } from "@google/genai";
import { UploadedImage, ImageFile } from '../types';
const ai = new GoogleGenAI({ apiKey: process.env.API_KEY });
const fileToGenerativePart = async (file: File): Promise<ImageFile> => {
const base64EncodedDataPromise = new Promise<string>((resolve) => {
const reader = new FileReader();
reader.onloadend = () => resolve((reader.result as string).split(',')[1]);
reader.readAsDataURL(file);
});
return {
inlineData: {
data: await base64EncodedDataPromise,
mimeType: file.type,
},
};
};
const dataUrlToGenerativePart = (dataUrl: string): ImageFile => {
const [header, data] = dataUrl.split(',');
const mimeType = header.match(/:(.*?);/)?.[1] || 'image/png';
return {
inlineData: { data, mimeType }
};
};
const getErrorMessage = (error: unknown): string => {
if (error instanceof Error) {
return error.message;
}
return String(error);
}
export const segmentSubject = async (imageFile: File, description: string): Promise<string> => {
const imagePart = await fileToGenerativePart(imageFile);
// A more descriptive, less technical prompt to guide the AI model more reliably.
const prompt = `
Analyze the provided image to identify the subject described as: "${description}".
Your task is to create a new image based on this analysis.
In this new image, the area that corresponds to the identified subject MUST be solid white (#FFFFFF).
Every other part of the image, which is the background, MUST be solid black (#000000).
The final output must ONLY be the image file. Do not include any text, explanations, or any other content in your response.
`;
try {
const response = await ai.models.generateContent({
model: 'gemini-2.5-flash-image',
contents: { parts: [imagePart, { text: prompt }] },
config: {
responseModalities: [Modality.IMAGE],
},
});
// Improved error detection: Check for blocking first.
if (response.promptFeedback?.blockReason) {
throw new Error(`Request blocked due to: ${response.promptFeedback.blockReason}.`);
}
const firstPart = response.candidates?.[0]?.content?.parts[0];
if (firstPart && firstPart.inlineData) {
return firstPart.inlineData.data;
}
// More detailed error reporting if no image is returned.
const fullResponseText = JSON.stringify(response, null, 2);
const textResponse = response.text?.trim();
if (textResponse) {
throw new Error(`The AI returned a message instead of a mask: "${textResponse}". Full API response: ${fullResponseText}`);
}
throw new Error(`No segmentation mask received from the AI. Full API response: ${fullResponseText}`);
} catch (error) {
console.error("Error segmenting subject:", error);
throw new Error(`Failed to segment subject: ${getErrorMessage(error)}`);
}
};
export const expandPrompt = async (
scenario: string,
userInstruction: string,
images: UploadedImage[]
): Promise<string> => {
const subjectDescriptions = images
.map((img, i) => `Person from Image ${i + 1}: ${img.subjectDescription}`)
.join('\n');
const systemInstruction = `You are a creative assistant specializing in writing detailed, effective prompts for an AI image generator. Your goal is to create a single, photorealistic, emotionally resonant 16:9 YouTube thumbnail in a K-Pop aesthetic.
- Combine the user's chosen scenario, their specific instructions, and the descriptions of the people involved.
- The output must be a single, cohesive paragraph. Do not use lists or bullet points.
- Translate the user's simple instructions into a rich, detailed description for the AI. Describe the composition, camera angle, lighting, and mood.
- Emphasize achieving high facial fidelity to the described people. The final image should look like a real photograph or a high-quality still from a music video.
- Mention specific K-Pop aesthetic elements like soft, slightly dramatic lighting, a subtle bokeh effect for the background, and a focus on emotional expression.`;
const userPrompt = `
Scenario: "${scenario}"
User Instruction: "${userInstruction}"
People to include:
${subjectDescriptions}
Generate the master prompt based on this information.
`;
try {
const response = await ai.models.generateContent({
model: 'gemini-2.5-flash',
contents: [{ parts: [{ text: userPrompt }] }],
config: { systemInstruction: systemInstruction }
});
return response.text;
} catch (error) {
console.error("Error expanding prompt:", error);
throw new Error(`Failed to generate the master prompt: ${getErrorMessage(error)}`);
}
};
export const generateImage = async (
masterPrompt: string,
images: UploadedImage[]
): Promise<string> => {
// Now using pre-segmented images with transparent backgrounds
const imageParts = images.map(img => {
if (!img.segmentedDataUrl) throw new Error("Segmented image data is missing.");
return dataUrlToGenerativePart(img.segmentedDataUrl);
});
const fullPrompt = `Task: Create a new photorealistic 16:9 image by composing the subjects from the provided images into a new scene. The subjects are provided as separate images with transparent backgrounds.
Instructions: ${masterPrompt}`;
try {
const response = await ai.models.generateContent({
model: 'gemini-2.5-flash-image',
contents: {
parts: [
...imageParts,
{ text: fullPrompt },
],
},
config: {
responseModalities: [Modality.IMAGE],
},
});
const firstPart = response.candidates?.[0]?.content?.parts[0];
if (firstPart && firstPart.inlineData) {
return firstPart.inlineData.data;
} else {
const textResponse = response.text?.trim();
if (textResponse) {
throw new Error(`The AI failed to generate an image and returned a message: "${textResponse}"`);
}
throw new Error('No image data received from the AI.');
}
} catch (error) {
console.error("Error generating image:", error);
throw new Error(`Failed to generate the image: ${getErrorMessage(error)}`);
}
};
export const refineImage = async (
refinementPrompt: string,
base64Image: string
): Promise<string> => {
const imagePart = {
inlineData: {
data: base64Image,
mimeType: 'image/png',
},
};
try {
const response = await ai.models.generateContent({
model: 'gemini-2.5-flash-image',
contents: {
parts: [
imagePart,
{ text: refinementPrompt },
],
},
config: {
responseModalities: [Modality.IMAGE],
},
});
const firstPart = response.candidates?.[0]?.content?.parts[0];
if (firstPart && firstPart.inlineData) {
return firstPart.inlineData.data;
} else {
const textResponse = response.text?.trim();
if (textResponse) {
throw new Error(`The AI failed to refine the image and returned a message: "${textResponse}"`);
}
throw new Error('No refined image data received from the AI.');
}
} catch (error) {
console.error("Error refining image:", error);
throw new Error(`Failed to refine the image: ${getErrorMessage(error)}`);
}
};

View File

@@ -1,28 +0,0 @@
export interface UploadedImage {
file: File;
previewUrl: string;
subjectDescription: string;
maskDataUrl?: string;
segmentedDataUrl?: string;
}
export enum AppStep {
Upload = 1,
Segment = 2,
Prompt = 3,
Result = 4,
}
export interface GenerationResult {
baseImage: string; // base64 string
currentImage: string; // base64 string
history: string[]; // array of base64 strings
}
export interface ImageFile {
inlineData: {
data: string;
mimeType: string;
};
}

View File

@@ -1,43 +0,0 @@
export const applyMask = (originalImageUrl: string, maskDataUrl: string): Promise<string> => {
return new Promise((resolve, reject) => {
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
if (!ctx) return reject(new Error('Could not get canvas context'));
const original = new Image();
original.crossOrigin = 'anonymous';
const mask = new Image();
mask.crossOrigin = 'anonymous';
let loadedImages = 0;
const onImageLoad = () => {
loadedImages++;
if (loadedImages === 2) {
canvas.width = original.naturalWidth;
canvas.height = original.naturalHeight;
// Draw the original image
ctx.drawImage(original, 0, 0);
// Use 'destination-in' to keep the parts of the original image
// that overlap with the non-transparent parts of the mask.
// The mask should have white for the subject and black for the background.
// For destination-in, any non-transparent part of the mask will be kept.
ctx.globalCompositeOperation = 'destination-in';
ctx.drawImage(mask, 0, 0);
// Return base64 data of the resulting image (PNG for transparency)
resolve(canvas.toDataURL('image/png').split(',')[1]);
}
};
original.onload = onImageLoad;
mask.onload = onImageLoad;
original.onerror = () => reject(new Error('Failed to load original image.'));
mask.onerror = () => reject(new Error('Failed to load mask image.'));
original.src = originalImageUrl;
mask.src = maskDataUrl;
});
};

View File

@@ -1,23 +0,0 @@
import path from 'path';
import { defineConfig, loadEnv } from 'vite';
import react from '@vitejs/plugin-react';
export default defineConfig(({ mode }) => {
const env = loadEnv(mode, '.', '');
return {
server: {
port: 3000,
host: '0.0.0.0',
},
plugins: [react()],
define: {
'process.env.API_KEY': JSON.stringify(env.GEMINI_API_KEY),
'process.env.GEMINI_API_KEY': JSON.stringify(env.GEMINI_API_KEY)
},
resolve: {
alias: {
'@': path.resolve(__dirname, '.'),
}
}
};
});