"use client"; import { TryTab } from "./Sidebar"; import { useState, useRef, useCallback, useEffect } from "react"; import { saveLatestUpload, getLatestUpload, deleteLatestUpload } from "../../lib/indexeddb"; import { analyzeDataset, cleanDataset, getReportUrl, type AnalyzeResponse, type CleanResponse } from "../../lib/api"; interface CenterPanelProps { tab: TryTab; onAnalyze?: () => void; } interface UploadedFileMeta { name: string; size: number; type: string; contentPreview: string; } interface TablePreviewData { headers: string[]; rows: string[][]; origin: 'csv'; } export function CenterPanel({ tab, onAnalyze }: CenterPanelProps) { const PREVIEW_BYTES = 64 * 1024; // read first 64KB slice for large-file preview const [fileMeta, setFileMeta] = useState(null); const [uploadedFile, setUploadedFile] = useState(null); const [isDragging, setIsDragging] = useState(false); const [progress, setProgress] = useState(0); const [progressLabel, setProgressLabel] = useState("Processing"); const [tablePreview, setTablePreview] = useState(null); const inputRef = useRef(null); const [loadedFromCache, setLoadedFromCache] = useState(false); const [isProcessing, setIsProcessing] = useState(false); const [error, setError] = useState(null); // Analysis results const [analyzeResult, setAnalyzeResult] = useState(null); const [cleanResult, setCleanResult] = useState(null); const reset = () => { setFileMeta(null); setUploadedFile(null); setProgress(0); setProgressLabel("Processing"); setTablePreview(null); setError(null); }; // Handle API calls const handleAnalyze = async () => { if (!uploadedFile) { setError("No file uploaded"); return; } setIsProcessing(true); setError(null); setProgressLabel("Analyzing dataset..."); try { const result = await analyzeDataset(uploadedFile); setAnalyzeResult(result); setProgressLabel("Analysis complete!"); onAnalyze?.(); // Navigate to bias-analysis tab } catch (err: any) { setError(err.message || "Analysis failed"); } finally { setIsProcessing(false); } }; const handleClean = async () => { if (!uploadedFile) { setError("No file uploaded"); return; } setIsProcessing(true); setError(null); setProgressLabel("Cleaning dataset..."); try { const result = await cleanDataset(uploadedFile); setCleanResult(result); setProgressLabel("Cleaning complete!"); } catch (err: any) { setError(err.message || "Cleaning failed"); } finally { setIsProcessing(false); } }; function tryParseCSV(text: string, maxRows = 50, maxCols = 40): TablePreviewData | null { const lines = text.split(/\r?\n/).filter(l => l.trim().length > 0); if (lines.length < 2) return null; const commaDensity = lines.slice(0, 10).filter(l => l.includes(',')).length; if (commaDensity < 2) return null; const parseLine = (line: string) => { const out: string[] = []; let cur = ''; let inQuotes = false; for (let i = 0; i < line.length; i++) { const ch = line[i]; if (ch === '"') { if (inQuotes && line[i + 1] === '"') { cur += '"'; i++; } else { inQuotes = !inQuotes; } } else if (ch === ',' && !inQuotes) { out.push(cur); cur = ''; } else { cur += ch; } } out.push(cur); return out.map(c => c.trim()); }; const raw = lines.slice(0, maxRows).map(parseLine); if (raw.length === 0) return null; const headers = raw[0]; const colCount = Math.min(headers.length, maxCols); const rows = raw.slice(1).map(r => r.slice(0, colCount)); return { headers: headers.slice(0, colCount), rows, origin: 'csv' }; } // We no longer build table preview for JSON; revert JSON to raw text view. const processFile = useCallback(async (f: File) => { if (!f) return; const isCSV = /\.csv$/i.test(f.name); setProgress(0); setUploadedFile(f); // Save the file for API calls // For large files, show a progress bar while reading the file stream (no preview) if (f.size > 1024 * 1024) { setProgressLabel("Uploading"); const metaObj: UploadedFileMeta = { name: f.name, size: f.size, type: f.type || "unknown", contentPreview: `Loading partial preview (first ${Math.round(PREVIEW_BYTES/1024)}KB)...`, }; setFileMeta(metaObj); setTablePreview(null); // Save to IndexedDB immediately so it persists without needing full read (async () => { try { await saveLatestUpload(f, metaObj); } catch {} })(); // Read head slice for partial preview & possible CSV table extraction try { const headBlob = f.slice(0, PREVIEW_BYTES); const headReader = new FileReader(); headReader.onload = async () => { try { const buf = headReader.result as ArrayBuffer; const decoder = new TextDecoder(); const text = decoder.decode(buf); setFileMeta(prev => prev ? { ...prev, contentPreview: text.slice(0, 4000) } : prev); if (isCSV) { const parsed = tryParseCSV(text); setTablePreview(parsed); } else { setTablePreview(null); } try { await saveLatestUpload(f, { ...metaObj, contentPreview: text.slice(0, 4000) }); } catch {} } catch { /* ignore */ } }; headReader.readAsArrayBuffer(headBlob); } catch { /* ignore */ } // Use streaming read for progress without buffering entire file in memory try { const stream: ReadableStream | undefined = (typeof (f as any).stream === "function" ? (f as any).stream() : undefined); if (stream && typeof stream.getReader === "function") { const reader = stream.getReader(); let loaded = 0; const total = f.size || 1; for (;;) { const { done, value } = await reader.read(); if (done) break; loaded += value ? value.length : 0; const pct = Math.min(100, Math.round((loaded / total) * 100)); setProgress(pct); } setProgress(100); } else { // Fallback to FileReader progress events const reader = new FileReader(); reader.onprogress = (evt) => { if (evt.lengthComputable) { const pct = Math.min(100, Math.round((evt.loaded / evt.total) * 100)); setProgress(pct); } else { setProgress((p) => (p < 90 ? p + 5 : p)); } }; reader.onloadend = () => setProgress(100); reader.onerror = () => setProgress(0); reader.readAsArrayBuffer(f); } } catch { setProgress(100); } return; } const reader = new FileReader(); reader.onprogress = (evt) => { if (evt.lengthComputable) { const pct = Math.min(100, Math.round((evt.loaded / evt.total) * 100)); setProgress(pct); } else { setProgress((p) => (p < 90 ? p + 5 : p)); } }; reader.onload = async () => { try { const buf = reader.result as ArrayBuffer; const decoder = new TextDecoder(); const text = decoder.decode(buf); const metaObj: UploadedFileMeta = { name: f.name, size: f.size, type: f.type || "unknown", contentPreview: text.slice(0, 4000), }; setFileMeta(metaObj); if (isCSV) { const parsed = tryParseCSV(text); setTablePreview(parsed); } else { setTablePreview(null); } // Save file blob and meta to browser cache (IndexedDB) try { await saveLatestUpload(f, metaObj); } catch {} setProgressLabel("Processing"); setProgress(100); } catch (e) { const metaObj: UploadedFileMeta = { name: f.name, size: f.size, type: f.type || "unknown", contentPreview: "Unable to decode preview.", }; setFileMeta(metaObj); setTablePreview(null); try { await saveLatestUpload(f, metaObj); } catch {} setProgressLabel("Processing"); setProgress(100); } }; reader.onerror = () => { setProgress(0); }; reader.readAsArrayBuffer(f); }, []); function handleFileChange(e: React.ChangeEvent) { const f = e.target.files?.[0]; processFile(f as File); } const onDragOver = (e: React.DragEvent) => { e.preventDefault(); setIsDragging(true); }; const onDragLeave = () => setIsDragging(false); const onDrop = (e: React.DragEvent) => { e.preventDefault(); setIsDragging(false); const f = e.dataTransfer.files?.[0]; processFile(f as File); }; // Load last cached upload on mount (processing tab only) useEffect(() => { let ignore = false; if (tab !== "processing") return; (async () => { try { const { file, meta } = await getLatestUpload(); if (!ignore && meta) { setFileMeta(meta as UploadedFileMeta); if (file) { setUploadedFile(file); } setLoadedFromCache(true); } } catch {} })(); return () => { ignore = true; }; }, [tab]); function renderTabContent() { switch (tab) { case "processing": return (

Upload & Process Data

Upload a CSV / JSON / text file. We will later parse, detect PII, and queue analyses.

Drag & drop a CSV / JSON / TXT here, or click to browse.

{progress > 0 && (
{progressLabel} {progress}%
)} {fileMeta && (
{fileMeta.name}
{Math.round(fileMeta.size / 1024)} KB
{loadedFromCache && (
Loaded from browser cache
)}
{fileMeta.type || "Unknown type"}
{/* Table preview when structured data detected; otherwise show text */} {tablePreview && tablePreview.origin === 'csv' ? (
{tablePreview.headers.map((h, idx) => ( ))} {tablePreview.rows.map((r, i) => ( {r.map((c, j) => ( ))} ))}
{h}
{c}
) : (
														{fileMeta.contentPreview || "(no preview)"}
													
)} {error && (
❌ {error}
)} {analyzeResult && (
✅ Analysis complete! View results in tabs. Download Report
)} {cleanResult && (
✅ Cleaning complete! {cleanResult.summary.total_cells_affected} cells anonymized.
)}
)}
); case "bias-analysis": return (

Bias & Fairness Analysis

Comprehensive evaluation of algorithmic fairness across demographic groups

{analyzeResult ? (
{/* Overall Bias Score Card */}
Overall Bias Score
{(analyzeResult.bias_metrics.overall_bias_score * 100).toFixed(1)}%
{analyzeResult.bias_metrics.overall_bias_score < 0.3 ? ( <> ✓ Low Bias Excellent fairness ) : analyzeResult.bias_metrics.overall_bias_score < 0.5 ? ( <> ⚠ Moderate Bias Monitor recommended ) : ( <> ✗ High Bias Action required )}
Violations
0 ? 'text-red-600' : 'text-green-600'}`}> {analyzeResult.bias_metrics.violations_detected.length}
{/* Interpretation */}
INTERPRETATION

{analyzeResult.bias_metrics.overall_bias_score < 0.3 ? "Your model demonstrates strong fairness across demographic groups. Continue monitoring to ensure consistent performance." : analyzeResult.bias_metrics.overall_bias_score < 0.5 ? "Moderate bias detected. Review fairness metrics below and consider implementing mitigation strategies to reduce disparities." : "Significant bias detected. Immediate action required to address fairness concerns before deployment. Review all violation details below."}

{/* Model Performance Metrics */}

📊 Model Performance Metrics

ACCURACY
{(analyzeResult.model_performance.accuracy * 100).toFixed(1)}%
Overall correctness
PRECISION
{(analyzeResult.model_performance.precision * 100).toFixed(1)}%
Positive prediction accuracy
RECALL
{(analyzeResult.model_performance.recall * 100).toFixed(1)}%
True positive detection rate
F1 SCORE
{(analyzeResult.model_performance.f1_score * 100).toFixed(1)}%
Balanced metric
{/* Fairness Metrics */} {Object.keys(analyzeResult.bias_metrics.disparate_impact).length > 0 && (

⚖️ Fairness Metrics by Protected Attribute

{Object.entries(analyzeResult.bias_metrics.disparate_impact).map(([attr, metrics]: [string, any]) => (
{attr.toUpperCase()}
{/* Disparate Impact */} {metrics?.disparate_impact?.value !== undefined && (
DISPARATE IMPACT RATIO
{metrics.disparate_impact.value.toFixed(3)}
{metrics.disparate_impact.fair ? '✓ FAIR' : '✗ UNFAIR'}
{metrics.disparate_impact.interpretation || 'Ratio of positive rates between groups'}
Fair Range: {metrics.disparate_impact.threshold || 0.8} - {(1/(metrics.disparate_impact.threshold || 0.8)).toFixed(2)} {metrics.disparate_impact.fair ? " • This ratio indicates balanced treatment across groups." : " • Ratio outside fair range suggests one group receives significantly different outcomes."}
)} {/* Statistical Parity */} {metrics?.statistical_parity_difference?.value !== undefined && (
STATISTICAL PARITY DIFFERENCE
{metrics.statistical_parity_difference.value.toFixed(3)}
{metrics.statistical_parity_difference.fair ? '✓ FAIR' : '✗ UNFAIR'}
{metrics.statistical_parity_difference.interpretation || 'Difference in positive rates'}
Fair Threshold: ±{metrics.statistical_parity_difference.threshold || 0.1} {metrics.statistical_parity_difference.fair ? " • Difference within acceptable range for equal treatment." : " • Significant difference in positive outcome rates between groups."}
)} {/* Group Metrics */} {metrics.group_metrics && (
GROUP PERFORMANCE
{Object.entries(metrics.group_metrics).map(([group, groupMetrics]: [string, any]) => (
{group}
Positive Rate: {groupMetrics.positive_rate !== undefined ? (groupMetrics.positive_rate * 100).toFixed(1) : 'N/A'}%
Sample Size: {groupMetrics.sample_size ?? 'N/A'}
{groupMetrics.tpr !== undefined &&
True Positive Rate: {(groupMetrics.tpr * 100).toFixed(1)}%
}
))}
)}
))}
)} {/* Violations */} {analyzeResult.bias_metrics.violations_detected.length > 0 && (

⚠️ Fairness Violations Detected

{analyzeResult.bias_metrics.violations_detected.map((violation: any, i: number) => (
{violation.severity}
{violation.attribute}: {violation.metric}
{violation.message}
{violation.details && (
{violation.details}
)}
))}
)} {/* Key Insights */}

💡 Key Insights

  • Bias Score {(analyzeResult.bias_metrics.overall_bias_score * 100).toFixed(1)}% indicates {analyzeResult.bias_metrics.overall_bias_score < 0.3 ? ' strong fairness with minimal disparities across groups.' : analyzeResult.bias_metrics.overall_bias_score < 0.5 ? ' moderate disparities that should be monitored and addressed.' : ' significant unfairness requiring immediate remediation before deployment.'}
  • Model achieves {(analyzeResult.model_performance.accuracy * 100).toFixed(1)}% accuracy, but fairness metrics reveal how performance varies across demographic groups.
  • {analyzeResult.bias_metrics.violations_detected.length > 0 ? (
  • {analyzeResult.bias_metrics.violations_detected.length} violation(s) detected. Review mitigation tab for recommended actions to improve fairness.
  • ) : (
  • No violations detected. Model meets fairness thresholds across all protected attributes.
  • )}
) : (
📊

No analysis results yet

Upload a dataset and click "Analyze" to see bias and fairness metrics

)}
); case "risk-analysis": return (

🔒 Risk Analysis

{analyzeResult ? (
{/* Overall Risk Score Card */}
Overall Risk Score
{(analyzeResult.risk_assessment.overall_risk_score * 100).toFixed(1)}%
{analyzeResult.risk_assessment.risk_level} RISK
{analyzeResult.risk_assessment.presidio_enabled && (
Presidio-Enhanced Detection
)}
{/* Risk Categories Grid */}
{Object.entries(analyzeResult.risk_assessment.risk_categories || {}).map(([category, score]: [string, any]) => { const riskPct = (score * 100); const riskLevel = riskPct >= 70 ? 'CRITICAL' : riskPct >= 50 ? 'HIGH' : riskPct >= 30 ? 'MEDIUM' : 'LOW'; const categoryIcons: Record = { privacy: '🔒', ethical: '⚖️', compliance: '📋', security: '🛡️', operational: '⚙️', data_quality: '📊' }; return (
{categoryIcons[category] || '📌'}
{riskLevel}
{category.replace('_', ' ')}
{riskPct.toFixed(0)}%
); })}
{/* Privacy Risks - PII Detection */} {analyzeResult.risk_assessment.privacy_risks && (
🔒

Privacy Risks

{analyzeResult.risk_assessment.privacy_risks.pii_count} PII Types Detected
{/* PII Detections */} {analyzeResult.risk_assessment.privacy_risks.pii_detected && analyzeResult.risk_assessment.privacy_risks.pii_detected.length > 0 ? (
{analyzeResult.risk_assessment.privacy_risks.pii_detected.slice(0, 6).map((pii: any, idx: number) => (
{pii.column} {pii.severity}
{pii.type}
Detected via: {pii.detection_method} {pii.confidence && ` (${(pii.confidence * 100).toFixed(0)}% confidence)`}
))}
{/* Privacy Metrics */}
Re-ID Risk
{(analyzeResult.risk_assessment.privacy_risks.reidentification_risk * 100).toFixed(0)}%
Data Minimization
{(analyzeResult.risk_assessment.privacy_risks.data_minimization_score * 100).toFixed(0)}%
Anonymization
{analyzeResult.risk_assessment.privacy_risks.anonymization_level}
Detection
{analyzeResult.risk_assessment.privacy_risks.detection_method}
) : (
✓ No PII detected in the dataset
)}
)} {/* Violations */} {analyzeResult.risk_assessment.violations && analyzeResult.risk_assessment.violations.length > 0 && (
⚠️

Risk Violations

{analyzeResult.risk_assessment.violations.length} Issues
{analyzeResult.risk_assessment.violations.map((violation: any, idx: number) => (
{violation.severity} {violation.category}
{violation.message}
{violation.details && (
{violation.details}
)}
))}
)} {/* Key Insights */} {analyzeResult.risk_assessment.insights && analyzeResult.risk_assessment.insights.length > 0 && (
💡

Key Insights

{analyzeResult.risk_assessment.insights.map((insight: string, idx: number) => (
{insight}
))}
)} {/* Compliance Status */} {analyzeResult.risk_assessment.compliance_risks && (
📋

Compliance Status

{Object.entries(analyzeResult.risk_assessment.compliance_risks) .filter(([key]) => ['gdpr', 'ccpa', 'hipaa', 'ecoa'].includes(key)) .map(([regulation, data]: [string, any]) => { if (!data || typeof data !== 'object') return null; return (
{regulation} {data.status}
{data.score !== undefined && (
Compliance Score: {(data.score * 100).toFixed(0)}%
)} {data.applicable === false && (
Not applicable to this dataset
)}
); })}
)}
) : (
🔒

No risk analysis results yet

Upload a dataset and click "Analyze" to see comprehensive risk assessment

)}
); case "bias-risk-mitigation": return (

Mitigation Suggestions

{analyzeResult && analyzeResult.recommendations.length > 0 ? (
{analyzeResult.recommendations.map((rec, i) => (
{rec}
))}
) : (

Recommendations will appear here after analysis.

)}
); case "results": return (

Results Summary

{(analyzeResult || cleanResult) ? (
{analyzeResult && (

Analysis Results

Dataset: {analyzeResult.filename}
Rows: {analyzeResult.dataset_info.rows}
Columns: {analyzeResult.dataset_info.columns}
Bias Score: {(analyzeResult.bias_metrics.overall_bias_score * 100).toFixed(1)}%
Risk Score: {(analyzeResult.risk_assessment.overall_risk_score * 100).toFixed(1)}%
Download Full Report →
)} {cleanResult && (

Cleaning Results

Original: {cleanResult.dataset_info.original_rows} rows × {cleanResult.dataset_info.original_columns} cols
Cleaned: {cleanResult.dataset_info.cleaned_rows} rows × {cleanResult.dataset_info.cleaned_columns} cols
Cells Anonymized: {cleanResult.summary.total_cells_affected}
Columns Removed: {cleanResult.summary.columns_removed.length}
GDPR Compliant: {cleanResult.gdpr_compliance.length} articles applied
)}
) : (

Process a dataset to see aggregated results.

)}
); default: return null; } } return (
{renderTabContent()}
); }