186 lines
6.3 KiB
TypeScript
186 lines
6.3 KiB
TypeScript
|
|
import { GoogleGenAI, Type } from "@google/genai";
|
|
import { BookProject, Chapter, Entity, UserProfile } from "../types";
|
|
|
|
const truncate = (str: string, length: number) => {
|
|
if (!str) return "";
|
|
return str.length > length ? str.substring(0, length) + "..." : str;
|
|
};
|
|
|
|
const checkUsage = (user: UserProfile) => {
|
|
if (user.subscription.plan === 'master') return true;
|
|
return user.usage.aiActionsCurrent < user.usage.aiActionsLimit;
|
|
};
|
|
|
|
const buildContextPrompt = (project: BookProject, currentChapterId: string, instruction: string) => {
|
|
const currentChapterIndex = project.chapters.findIndex(c => c.id === currentChapterId);
|
|
const previousSummaries = project.chapters
|
|
.slice(0, currentChapterIndex)
|
|
.map((c, i) => `Chapitre ${i + 1} (${c.title}): ${c.summary || truncate(c.content.replace(/<[^>]*>?/gm, ''), 200)}`)
|
|
.join('\n');
|
|
|
|
const entitiesContext = project.entities
|
|
.map(e => {
|
|
const base = `[${e.type}] ${e.name}: ${truncate(e.description, 150)}`;
|
|
const context = e.storyContext ? `\n - VÉCU/ÉVOLUTION DANS L'HISTOIRE: ${truncate(e.storyContext, 500)}` : '';
|
|
return base + context;
|
|
})
|
|
.join('\n');
|
|
|
|
const ideasContext = (project.ideas || [])
|
|
.map(i => {
|
|
const statusMap: Record<string, string> = { todo: 'À FAIRE', progress: 'EN COURS', done: 'TERMINÉ' };
|
|
return `[IDÉE - ${statusMap[i.status]}] ${i.title}: ${truncate(i.description, 100)}`;
|
|
})
|
|
.join('\n');
|
|
|
|
const currentContent = project.chapters[currentChapterIndex]?.content.replace(/<[^>]*>?/gm, '') || "";
|
|
const s = project.settings;
|
|
const settingsPrompt = s ? `
|
|
PARAMÈTRES DU ROMAN:
|
|
- Genre: ${s.genre} ${s.subGenre ? `(${s.subGenre})` : ''}
|
|
- Public: ${s.targetAudience}
|
|
- Ton: ${s.tone}
|
|
- Narration: ${s.pov}
|
|
- Temps: ${s.tense}
|
|
- Thèmes: ${s.themes}
|
|
- Synopsis Global: ${truncate(s.synopsis || '', 500)}
|
|
` : "";
|
|
|
|
return `
|
|
Tu es un assistant éditorial expert et un co-auteur créatif.
|
|
L'utilisateur écrit un livre intitulé "${project.title}".
|
|
|
|
${settingsPrompt}
|
|
|
|
CONTEXTE DE L'HISTOIRE (Résumé des chapitres précédents):
|
|
${previousSummaries || "Aucun chapitre précédent."}
|
|
|
|
BIBLE DU MONDE (Personnages et Lieux):
|
|
${entitiesContext || "Aucune fiche créée."}
|
|
|
|
BOÎTE À IDÉES & NOTES (Pistes de l'auteur):
|
|
${ideasContext || "Aucune note."}
|
|
|
|
CHAPITRE ACTUEL (Texte brut):
|
|
${truncate(currentContent, 3000)}
|
|
|
|
STYLE D'ÉCRITURE SPÉCIFIQUE (Instruction de l'auteur):
|
|
${project.styleGuide || "Standard, neutre."}
|
|
|
|
TA MISSION:
|
|
${instruction}
|
|
`;
|
|
};
|
|
|
|
export const generateStoryContent = async (
|
|
project: BookProject,
|
|
currentChapterId: string,
|
|
userPrompt: string,
|
|
user: UserProfile,
|
|
onSuccess: () => void
|
|
): Promise<{ text: string, type: 'draft' | 'reflection' }> => {
|
|
if (!checkUsage(user)) {
|
|
return { text: "Limite d'actions IA atteinte pour ce mois. Passez au plan Pro !", type: 'reflection' };
|
|
}
|
|
|
|
try {
|
|
const ai = new GoogleGenAI({ apiKey: process.env.API_KEY });
|
|
const finalPrompt = buildContextPrompt(project, currentChapterId, userPrompt);
|
|
|
|
// Pro/Master plan users get better models (simulated here)
|
|
const modelName = user.subscription.plan === 'master' ? 'gemini-3-pro-preview' : 'gemini-3-flash-preview';
|
|
|
|
const response = await ai.models.generateContent({
|
|
model: modelName,
|
|
contents: finalPrompt,
|
|
config: {
|
|
temperature: 0.7,
|
|
responseMimeType: "application/json",
|
|
responseSchema: {
|
|
type: Type.OBJECT,
|
|
properties: {
|
|
responseType: {
|
|
type: Type.STRING,
|
|
enum: ["draft", "reflection"]
|
|
},
|
|
content: {
|
|
type: Type.STRING
|
|
}
|
|
}
|
|
}
|
|
}
|
|
});
|
|
|
|
onSuccess();
|
|
const result = JSON.parse(response.text || "{}");
|
|
return {
|
|
text: result.content || "Erreur de génération.",
|
|
type: result.responseType || "reflection"
|
|
};
|
|
|
|
} catch (error) {
|
|
console.error("AI Generation Error:", error);
|
|
return { text: "Erreur lors de la communication avec l'IA.", type: 'reflection' };
|
|
}
|
|
};
|
|
|
|
export const updateEntityContexts = async (text: string, entities: Entity[]): Promise<Entity[]> => {
|
|
if (!text || entities.length === 0) return entities;
|
|
try {
|
|
const ai = new GoogleGenAI({ apiKey: process.env.API_KEY });
|
|
const entityList = entities.map(e => ({ id: e.id, name: e.name }));
|
|
const prompt = `Analyse ce texte et résume brièvement l'évolution ou les actions de ces personnages/lieux: ${JSON.stringify(entityList)}\nTexte: "${truncate(text, 1000)}"`;
|
|
|
|
const response = await ai.models.generateContent({
|
|
model: 'gemini-3-flash-preview',
|
|
contents: prompt,
|
|
config: {
|
|
responseMimeType: "application/json",
|
|
responseSchema: {
|
|
type: Type.ARRAY,
|
|
items: {
|
|
type: Type.OBJECT,
|
|
properties: {
|
|
entityId: { type: Type.STRING },
|
|
summary: { type: Type.STRING }
|
|
}
|
|
}
|
|
}
|
|
}
|
|
});
|
|
|
|
const updates = JSON.parse(response.text || "[]") as {entityId: string, summary: string}[];
|
|
if (updates.length === 0) return entities;
|
|
|
|
return entities.map(entity => {
|
|
const update = updates.find(u => u.entityId === entity.id);
|
|
if (update) {
|
|
const oldContext = entity.storyContext || "";
|
|
return { ...entity, storyContext: truncate(oldContext + " | " + update.summary, 1000) };
|
|
}
|
|
return entity;
|
|
});
|
|
} catch (e) { return entities; }
|
|
};
|
|
|
|
export const transformText = async (
|
|
text: string,
|
|
mode: 'correct' | 'rewrite' | 'expand' | 'continue',
|
|
context: string,
|
|
user: UserProfile,
|
|
onSuccess: () => void
|
|
): Promise<string> => {
|
|
if (!checkUsage(user)) return "Limite d'actions IA atteinte.";
|
|
try {
|
|
const ai = new GoogleGenAI({ apiKey: process.env.API_KEY });
|
|
let prompt = `Action: ${mode}. Texte: ${text}. Contexte: ${truncate(context, 1000)}. Renvoie juste le texte transformé.`;
|
|
const response = await ai.models.generateContent({ model: 'gemini-3-flash-preview', contents: prompt });
|
|
onSuccess();
|
|
return response.text?.trim() || text;
|
|
} catch (e) { return text; }
|
|
};
|
|
|
|
export const analyzeStyle = async (text: string) => "Style analysé";
|
|
export const summarizeText = async (text: string) => "Résumé généré";
|