Jury evaluation UX overhaul + admin review features
All checks were successful
Build and Push Docker Image / build (push) Successful in 8m53s

- Fix project documents not displaying on jury project page (rewrote MultiWindowDocViewer to use file.listByProject)
- Add working download/preview for project files via presigned URLs
- Display project tags on jury project detail page
- Add autosave for evaluation drafts (debounced 3s + save on unmount/beforeunload)
- Support mixed criterion types: numeric scores, yes/no booleans, text responses, section headers
- Replace inline criteria editor with rich EvaluationFormBuilder on admin round page
- Remove COI dialog from evaluation page
- Update AI summary service to handle boolean/text criteria (yes/no counts, text synthesis)
- Update EvaluationSummaryCard to show boolean criteria bars and text responses
- Add evaluation detail sheet on admin project page (click juror row to view full scores + feedback)
- Add Recent Evaluations dashboard widget showing latest jury reviews

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Matt
2026-02-18 12:43:28 +01:00
parent 73759eaddd
commit 9ce56f13fd
12 changed files with 1137 additions and 385 deletions

View File

@@ -21,7 +21,7 @@ import type { PrismaClient, Prisma } from '@prisma/client'
interface EvaluationForSummary {
id: string
criterionScoresJson: Record<string, number> | null
criterionScoresJson: Record<string, number | boolean | string> | null
globalScore: number | null
binaryDecision: boolean | null
feedbackText: string | null
@@ -35,7 +35,7 @@ interface EvaluationForSummary {
}
interface AnonymizedEvaluation {
criterionScores: Record<string, number> | null
criterionScores: Record<string, number | boolean | string> | null
globalScore: number | null
binaryDecision: boolean | null
feedbackText: string | null
@@ -44,6 +44,9 @@ interface AnonymizedEvaluation {
interface CriterionDef {
id: string
label: string
type?: 'numeric' | 'text' | 'boolean' | 'section_header'
trueLabel?: string
falseLabel?: string
}
interface AIResponsePayload {
@@ -58,10 +61,21 @@ interface AIResponsePayload {
recommendation: string
}
interface BooleanStats {
yesCount: number
noCount: number
total: number
yesPercent: number
trueLabel: string
falseLabel: string
}
interface ScoringPatterns {
averageGlobalScore: number | null
consensus: number
criterionAverages: Record<string, number>
booleanCriteria: Record<string, BooleanStats>
textResponses: Record<string, string[]>
evaluatorCount: number
}
@@ -84,7 +98,7 @@ export function anonymizeEvaluations(
evaluations: EvaluationForSummary[]
): AnonymizedEvaluation[] {
return evaluations.map((ev) => ({
criterionScores: ev.criterionScoresJson as Record<string, number> | null,
criterionScores: ev.criterionScoresJson as Record<string, number | boolean | string> | null,
globalScore: ev.globalScore,
binaryDecision: ev.binaryDecision,
feedbackText: ev.feedbackText ? sanitizeText(ev.feedbackText) : null,
@@ -99,15 +113,33 @@ export function anonymizeEvaluations(
export function buildSummaryPrompt(
anonymizedEvaluations: AnonymizedEvaluation[],
projectTitle: string,
criteriaLabels: string[]
criteriaDefinitions: CriterionDef[]
): string {
const sanitizedTitle = sanitizeText(projectTitle)
// Build a descriptive criteria section that explains each criterion type
const criteriaDescription = criteriaDefinitions
.filter((c) => c.type !== 'section_header')
.map((c) => {
const type = c.type || 'numeric'
if (type === 'boolean') {
return `- "${c.label}" (Yes/No decision: ${c.trueLabel || 'Yes'} / ${c.falseLabel || 'No'})`
}
if (type === 'text') {
return `- "${c.label}" (Free-text response)`
}
return `- "${c.label}" (Numeric score)`
})
.join('\n')
return `You are analyzing jury evaluations for a project competition.
PROJECT: "${sanitizedTitle}"
EVALUATION CRITERIA: ${criteriaLabels.join(', ')}
EVALUATION CRITERIA:
${criteriaDescription}
Note: criterionScores values may be numbers (numeric scores), booleans (true/false for yes/no criteria), or strings (text responses).
EVALUATIONS (${anonymizedEvaluations.length} total):
${JSON.stringify(anonymizedEvaluations, null, 2)}
@@ -123,17 +155,11 @@ Analyze these evaluations and return a JSON object with this exact structure:
"recommendation": "A brief recommendation based on the evaluation consensus"
}
Example output:
{
"overallAssessment": "The project received strong scores (avg 7.8/10) with high consensus among evaluators. Key strengths in innovation were balanced by concerns about scalability.",
"strengths": ["Innovative approach to coral reef monitoring", "Strong team expertise in marine biology"],
"weaknesses": ["Limited scalability plan", "Budget projections need more detail"],
"themes": [{"theme": "Innovation", "sentiment": "positive", "frequency": 3}, {"theme": "Scalability", "sentiment": "negative", "frequency": 2}],
"recommendation": "Recommended for advancement with condition to address scalability concerns in next round."
}
Guidelines:
- Base your analysis only on the provided evaluation data
- For numeric criteria, consider score averages and distribution
- For yes/no criteria, consider the proportion of yes vs no answers
- For text criteria, synthesize common themes from the responses
- Identify common themes across evaluator feedback
- Note areas of agreement and disagreement
- Keep the assessment objective and balanced
@@ -172,19 +198,63 @@ export function computeScoringPatterns(
consensus = Math.max(0, 1 - stdDev / 4.5)
}
// Criterion averages
// Criterion averages (numeric only)
const criterionAverages: Record<string, number> = {}
// Boolean criteria stats
const booleanCriteria: Record<string, BooleanStats> = {}
// Text responses
const textResponses: Record<string, string[]> = {}
for (const criterion of criteriaLabels) {
const scores: number[] = []
for (const ev of evaluations) {
const criterionScores = ev.criterionScoresJson as Record<string, number> | null
if (criterionScores && criterionScores[criterion.id] !== undefined) {
scores.push(criterionScores[criterion.id])
const type = criterion.type || 'numeric'
if (type === 'numeric') {
const scores: number[] = []
for (const ev of evaluations) {
const criterionScores = ev.criterionScoresJson as Record<string, number | boolean | string> | null
const val = criterionScores?.[criterion.id]
if (typeof val === 'number') {
scores.push(val)
}
}
if (scores.length > 0) {
criterionAverages[criterion.label] =
scores.reduce((a, b) => a + b, 0) / scores.length
}
} else if (type === 'boolean') {
let yesCount = 0
let noCount = 0
for (const ev of evaluations) {
const criterionScores = ev.criterionScoresJson as Record<string, number | boolean | string> | null
const val = criterionScores?.[criterion.id]
if (typeof val === 'boolean') {
if (val) yesCount++
else noCount++
}
}
const total = yesCount + noCount
if (total > 0) {
booleanCriteria[criterion.label] = {
yesCount,
noCount,
total,
yesPercent: Math.round((yesCount / total) * 100),
trueLabel: criterion.trueLabel || 'Yes',
falseLabel: criterion.falseLabel || 'No',
}
}
} else if (type === 'text') {
const responses: string[] = []
for (const ev of evaluations) {
const criterionScores = ev.criterionScoresJson as Record<string, number | boolean | string> | null
const val = criterionScores?.[criterion.id]
if (typeof val === 'string' && val.trim()) {
responses.push(sanitizeText(val))
}
}
if (responses.length > 0) {
textResponses[criterion.label] = responses
}
}
if (scores.length > 0) {
criterionAverages[criterion.label] =
scores.reduce((a, b) => a + b, 0) / scores.length
}
}
@@ -192,6 +262,8 @@ export function computeScoringPatterns(
averageGlobalScore,
consensus: Math.round(consensus * 100) / 100,
criterionAverages,
booleanCriteria,
textResponses,
evaluatorCount: evaluations.length,
}
}
@@ -266,7 +338,6 @@ export async function generateSummary({
const criteria: CriterionDef[] = form?.criteriaJson
? (form.criteriaJson as unknown as CriterionDef[])
: []
const criteriaLabels = criteria.map((c) => c.label)
// 2. Anonymize evaluations
const typedEvaluations = evaluations as unknown as EvaluationForSummary[]
@@ -282,7 +353,7 @@ export async function generateSummary({
}
const model = await getConfiguredModel(AI_MODELS.QUICK)
const prompt = buildSummaryPrompt(anonymized, project.title, criteriaLabels)
const prompt = buildSummaryPrompt(anonymized, project.title, criteria)
let aiResponse: AIResponsePayload
let tokensUsed = 0