diff --git a/frontend/components/try/ChatbotPanel.tsx b/frontend/components/try/ChatbotPanel.tsx index 67697f3..f5d1179 100644 --- a/frontend/components/try/ChatbotPanel.tsx +++ b/frontend/components/try/ChatbotPanel.tsx @@ -2,118 +2,161 @@ import { useState, useRef, useEffect } from "react"; import { chatWithCopilot } from "../../lib/api"; -const CHAT_ENDPOINT = process.env.NEXT_PUBLIC_CHAT_API_URL || 'https://fc39539f7cb9.ngrok-free.app'; +const CHAT_ENDPOINT = + process.env.NEXT_PUBLIC_CHAT_API_URL || "https://fc39539f7cb9.ngrok-free.app"; export function ChatbotPanel() { - const [messages, setMessages] = useState<{ role: "user" | "assistant"; content: string; pending?: boolean; error?: boolean }[]>([ - { role: "assistant", content: "Hi! I'm your Privacy Copilot. Ask me about compliance, GDPR articles, or dataset risks." }, - ]); - const [input, setInput] = useState(""); - const [isLoading, setIsLoading] = useState(false); - const [delayedError, setDelayedError] = useState(null); - const scrollRef = useRef(null); + const [messages, setMessages] = useState< + { + role: "user" | "assistant"; + content: string; + pending?: boolean; + error?: boolean; + }[] + >([ + { + role: "assistant", + content: + "Hi! I'm GDPR bot. Ask me about compliance, GDPR articles, or dataset risks.", + }, + ]); + const [input, setInput] = useState(""); + const [isLoading, setIsLoading] = useState(false); + const [delayedError, setDelayedError] = useState(null); + const scrollRef = useRef(null); - useEffect(() => { - if (scrollRef.current) { - scrollRef.current.scrollTop = scrollRef.current.scrollHeight; - } - }, [messages]); + useEffect(() => { + if (scrollRef.current) { + scrollRef.current.scrollTop = scrollRef.current.scrollHeight; + } + }, [messages]); - async function handleSubmit(e: React.FormEvent) { - e.preventDefault(); - const prompt = input.trim(); - if (!prompt || isLoading) return; - setInput(""); - setDelayedError(null); - setMessages(prev => [...prev, { role: "user", content: prompt }, { role: "assistant", content: "Thinking…", pending: true }]); - setIsLoading(true); + async function handleSubmit(e: React.FormEvent) { + e.preventDefault(); + const prompt = input.trim(); + if (!prompt || isLoading) return; + setInput(""); + setDelayedError(null); + setMessages((prev) => [ + ...prev, + { role: "user", content: prompt }, + { role: "assistant", content: "Thinking…", pending: true }, + ]); + setIsLoading(true); - // Delay window for showing errors (avoid instant flashing if slow model) - const errorDisplayDelayMs = 4_000; - let canShowError = false; - const delayTimer = setTimeout(() => { canShowError = true; if (delayedError) showErrorBubble(delayedError); }, errorDisplayDelayMs); + // Delay window for showing errors (avoid instant flashing if slow model) + const errorDisplayDelayMs = 4_000; + let canShowError = false; + const delayTimer = setTimeout(() => { + canShowError = true; + if (delayedError) showErrorBubble(delayedError); + }, errorDisplayDelayMs); - function showErrorBubble(msg: string) { - setMessages(prev => prev.map(m => m.pending ? { ...m, content: msg, pending: false, error: true } : m)); - } + function showErrorBubble(msg: string) { + setMessages((prev) => + prev.map((m) => + m.pending ? { ...m, content: msg, pending: false, error: true } : m, + ), + ); + } - try { - let responseText: string | null = null; - // Primary attempt via shared client - try { - responseText = await chatWithCopilot(prompt); - } catch (primaryErr: any) { - // Fallback: replicate working curl (query param, empty body) - try { - const res = await fetch(`${CHAT_ENDPOINT}/chat?prompt=${encodeURIComponent(prompt)}` , { - method: 'POST', - headers: { 'accept': 'application/json' }, - body: '' - }); - if (res.ok) { - const j = await res.json(); - responseText = j.response || JSON.stringify(j); - } else { - throw primaryErr; - } - } catch { throw primaryErr; } - } - clearTimeout(delayTimer); - setMessages(prev => prev.map(m => m.pending ? { ...m, content: responseText || 'No response text', pending: false } : m)); - } catch (err: any) { - clearTimeout(delayTimer); - const errMsg = err?.message || 'Unexpected error'; - if (canShowError) { - showErrorBubble(errMsg); - } else { - setDelayedError(errMsg); - } - } finally { - setIsLoading(false); - } - } + try { + let responseText: string | null = null; + // Primary attempt via shared client + try { + responseText = await chatWithCopilot(prompt); + } catch (primaryErr: any) { + // Fallback: replicate working curl (query param, empty body) + try { + const res = await fetch( + `${CHAT_ENDPOINT}/chat?prompt=${encodeURIComponent(prompt)}`, + { + method: "POST", + headers: { accept: "application/json" }, + body: "", + }, + ); + if (res.ok) { + const j = await res.json(); + responseText = j.response || JSON.stringify(j); + } else { + throw primaryErr; + } + } catch { + throw primaryErr; + } + } + clearTimeout(delayTimer); + setMessages((prev) => + prev.map((m) => + m.pending + ? { + ...m, + content: responseText || "No response text", + pending: false, + } + : m, + ), + ); + } catch (err: any) { + clearTimeout(delayTimer); + const errMsg = err?.message || "Unexpected error"; + if (canShowError) { + showErrorBubble(errMsg); + } else { + setDelayedError(errMsg); + } + } finally { + setIsLoading(false); + } + } - return ( -
-
-

Privacy Copilot

-
-
- {messages.map((m, i) => ( -
- {m.content} -
- ))} -
-
-
- setInput(e.target.value)} - placeholder="Ask about GDPR, compliance, privacy risks..." - className="flex-1 rounded-md border border-slate-300 bg-white px-3 py-2 text-sm focus:outline-none focus:ring-2 focus:ring-brand-400 disabled:opacity-60" - disabled={isLoading} - /> - -
-

Responses may take up to 1–2 minutes while the local model generates output.

-
-
- ); + return ( +
+
+

GDPR Bot

+
+
+ {messages.map((m, i) => ( +
+ {m.content} +
+ ))} +
+
+
+ setInput(e.target.value)} + placeholder="Ask about GDPR, compliance, privacy risks..." + className="flex-1 rounded-md border border-slate-300 bg-white px-3 py-2 text-sm focus:outline-none focus:ring-2 focus:ring-brand-400 disabled:opacity-60" + disabled={isLoading} + /> + +
+

+ Responses may take up to 1–2 minutes while the local model generates + output. +

+
+
+ ); }