Jury evaluation UX overhaul + admin review features
All checks were successful
Build and Push Docker Image / build (push) Successful in 8m53s

- Fix project documents not displaying on jury project page (rewrote MultiWindowDocViewer to use file.listByProject)
- Add working download/preview for project files via presigned URLs
- Display project tags on jury project detail page
- Add autosave for evaluation drafts (debounced 3s + save on unmount/beforeunload)
- Support mixed criterion types: numeric scores, yes/no booleans, text responses, section headers
- Replace inline criteria editor with rich EvaluationFormBuilder on admin round page
- Remove COI dialog from evaluation page
- Update AI summary service to handle boolean/text criteria (yes/no counts, text synthesis)
- Update EvaluationSummaryCard to show boolean criteria bars and text responses
- Add evaluation detail sheet on admin project page (click juror row to view full scores + feedback)
- Add Recent Evaluations dashboard widget showing latest jury reviews

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Matt
2026-02-18 12:43:28 +01:00
parent 73759eaddd
commit 9ce56f13fd
12 changed files with 1137 additions and 385 deletions

View File

@@ -465,4 +465,37 @@ export const dashboardRouter = router({
recentActivity,
}
}),
getRecentEvaluations: adminProcedure
.input(z.object({ editionId: z.string(), limit: z.number().int().min(1).max(50).optional() }))
.query(async ({ ctx, input }) => {
const take = input.limit ?? 10
const evaluations = await ctx.prisma.evaluation.findMany({
where: {
status: 'SUBMITTED',
assignment: {
round: { competition: { programId: input.editionId } },
},
},
orderBy: { submittedAt: 'desc' },
take,
select: {
id: true,
globalScore: true,
binaryDecision: true,
submittedAt: true,
feedbackText: true,
assignment: {
select: {
project: { select: { id: true, title: true } },
round: { select: { id: true, name: true } },
user: { select: { id: true, name: true, email: true } },
},
},
},
})
return evaluations
}),
})

View File

@@ -1067,9 +1067,27 @@ export const evaluationRouter = router({
id: z.string(),
label: z.string().min(1).max(255),
description: z.string().max(2000).optional(),
type: z.enum(['numeric', 'text', 'boolean', 'section_header']).optional(),
// Numeric fields
weight: z.number().min(0).max(100).optional(),
minScore: z.number().int().min(0).optional(),
maxScore: z.number().int().min(1).optional(),
scale: z.number().int().min(1).max(10).optional(),
required: z.boolean().optional(),
// Text fields
maxLength: z.number().int().min(1).max(10000).optional(),
placeholder: z.string().max(500).optional(),
// Boolean fields
trueLabel: z.string().max(100).optional(),
falseLabel: z.string().max(100).optional(),
// Conditional visibility
condition: z.object({
criterionId: z.string(),
operator: z.enum(['equals', 'greaterThan', 'lessThan']),
value: z.union([z.number(), z.string(), z.boolean()]),
}).optional(),
// Section grouping
sectionId: z.string().optional(),
})
).min(1),
})
@@ -1088,18 +1106,46 @@ export const evaluationRouter = router({
})
const nextVersion = (latestForm?.version ?? 0) + 1
// Build criteriaJson with defaults
const criteriaJson = criteria.map((c) => ({
id: c.id,
label: c.label,
description: c.description || '',
weight: c.weight ?? 1,
scale: `${c.minScore ?? 1}-${c.maxScore ?? 10}`,
required: true,
}))
// Build criteriaJson preserving all fields
const criteriaJson = criteria.map((c) => {
const type = c.type || 'numeric'
const base = {
id: c.id,
label: c.label,
description: c.description || '',
type,
required: c.required ?? (type !== 'section_header'),
}
// Auto-generate scalesJson from criteria min/max ranges
const scaleSet = new Set(criteriaJson.map((c) => c.scale))
if (type === 'numeric') {
const scaleVal = c.scale ?? 10
return {
...base,
weight: c.weight ?? 1,
scale: `${c.minScore ?? 1}-${c.maxScore ?? scaleVal}`,
}
}
if (type === 'text') {
return {
...base,
maxLength: c.maxLength ?? 1000,
placeholder: c.placeholder || '',
}
}
if (type === 'boolean') {
return {
...base,
trueLabel: c.trueLabel || 'Yes',
falseLabel: c.falseLabel || 'No',
}
}
// section_header
return base
})
// Auto-generate scalesJson from numeric criteria
const numericCriteria = criteriaJson.filter((c) => c.type === 'numeric')
const scaleSet = new Set(numericCriteria.map((c) => (c as { scale: string }).scale))
const scalesJson: Record<string, { min: number; max: number }> = {}
for (const scale of scaleSet) {
const [min, max] = scale.split('-').map(Number)

View File

@@ -1141,7 +1141,8 @@ export const projectRouter = router({
where: { projectId: input.id },
include: {
user: { select: { id: true, name: true, email: true, expertiseTags: true, profileImageKey: true, profileImageProvider: true } },
evaluation: { select: { status: true, submittedAt: true, globalScore: true, binaryDecision: true } },
round: { select: { id: true, name: true } },
evaluation: { select: { id: true, status: true, submittedAt: true, globalScore: true, binaryDecision: true, criterionScoresJson: true, feedbackText: true } },
},
orderBy: { createdAt: 'desc' },
}),

View File

@@ -21,7 +21,7 @@ import type { PrismaClient, Prisma } from '@prisma/client'
interface EvaluationForSummary {
id: string
criterionScoresJson: Record<string, number> | null
criterionScoresJson: Record<string, number | boolean | string> | null
globalScore: number | null
binaryDecision: boolean | null
feedbackText: string | null
@@ -35,7 +35,7 @@ interface EvaluationForSummary {
}
interface AnonymizedEvaluation {
criterionScores: Record<string, number> | null
criterionScores: Record<string, number | boolean | string> | null
globalScore: number | null
binaryDecision: boolean | null
feedbackText: string | null
@@ -44,6 +44,9 @@ interface AnonymizedEvaluation {
interface CriterionDef {
id: string
label: string
type?: 'numeric' | 'text' | 'boolean' | 'section_header'
trueLabel?: string
falseLabel?: string
}
interface AIResponsePayload {
@@ -58,10 +61,21 @@ interface AIResponsePayload {
recommendation: string
}
interface BooleanStats {
yesCount: number
noCount: number
total: number
yesPercent: number
trueLabel: string
falseLabel: string
}
interface ScoringPatterns {
averageGlobalScore: number | null
consensus: number
criterionAverages: Record<string, number>
booleanCriteria: Record<string, BooleanStats>
textResponses: Record<string, string[]>
evaluatorCount: number
}
@@ -84,7 +98,7 @@ export function anonymizeEvaluations(
evaluations: EvaluationForSummary[]
): AnonymizedEvaluation[] {
return evaluations.map((ev) => ({
criterionScores: ev.criterionScoresJson as Record<string, number> | null,
criterionScores: ev.criterionScoresJson as Record<string, number | boolean | string> | null,
globalScore: ev.globalScore,
binaryDecision: ev.binaryDecision,
feedbackText: ev.feedbackText ? sanitizeText(ev.feedbackText) : null,
@@ -99,15 +113,33 @@ export function anonymizeEvaluations(
export function buildSummaryPrompt(
anonymizedEvaluations: AnonymizedEvaluation[],
projectTitle: string,
criteriaLabels: string[]
criteriaDefinitions: CriterionDef[]
): string {
const sanitizedTitle = sanitizeText(projectTitle)
// Build a descriptive criteria section that explains each criterion type
const criteriaDescription = criteriaDefinitions
.filter((c) => c.type !== 'section_header')
.map((c) => {
const type = c.type || 'numeric'
if (type === 'boolean') {
return `- "${c.label}" (Yes/No decision: ${c.trueLabel || 'Yes'} / ${c.falseLabel || 'No'})`
}
if (type === 'text') {
return `- "${c.label}" (Free-text response)`
}
return `- "${c.label}" (Numeric score)`
})
.join('\n')
return `You are analyzing jury evaluations for a project competition.
PROJECT: "${sanitizedTitle}"
EVALUATION CRITERIA: ${criteriaLabels.join(', ')}
EVALUATION CRITERIA:
${criteriaDescription}
Note: criterionScores values may be numbers (numeric scores), booleans (true/false for yes/no criteria), or strings (text responses).
EVALUATIONS (${anonymizedEvaluations.length} total):
${JSON.stringify(anonymizedEvaluations, null, 2)}
@@ -123,17 +155,11 @@ Analyze these evaluations and return a JSON object with this exact structure:
"recommendation": "A brief recommendation based on the evaluation consensus"
}
Example output:
{
"overallAssessment": "The project received strong scores (avg 7.8/10) with high consensus among evaluators. Key strengths in innovation were balanced by concerns about scalability.",
"strengths": ["Innovative approach to coral reef monitoring", "Strong team expertise in marine biology"],
"weaknesses": ["Limited scalability plan", "Budget projections need more detail"],
"themes": [{"theme": "Innovation", "sentiment": "positive", "frequency": 3}, {"theme": "Scalability", "sentiment": "negative", "frequency": 2}],
"recommendation": "Recommended for advancement with condition to address scalability concerns in next round."
}
Guidelines:
- Base your analysis only on the provided evaluation data
- For numeric criteria, consider score averages and distribution
- For yes/no criteria, consider the proportion of yes vs no answers
- For text criteria, synthesize common themes from the responses
- Identify common themes across evaluator feedback
- Note areas of agreement and disagreement
- Keep the assessment objective and balanced
@@ -172,19 +198,63 @@ export function computeScoringPatterns(
consensus = Math.max(0, 1 - stdDev / 4.5)
}
// Criterion averages
// Criterion averages (numeric only)
const criterionAverages: Record<string, number> = {}
// Boolean criteria stats
const booleanCriteria: Record<string, BooleanStats> = {}
// Text responses
const textResponses: Record<string, string[]> = {}
for (const criterion of criteriaLabels) {
const scores: number[] = []
for (const ev of evaluations) {
const criterionScores = ev.criterionScoresJson as Record<string, number> | null
if (criterionScores && criterionScores[criterion.id] !== undefined) {
scores.push(criterionScores[criterion.id])
const type = criterion.type || 'numeric'
if (type === 'numeric') {
const scores: number[] = []
for (const ev of evaluations) {
const criterionScores = ev.criterionScoresJson as Record<string, number | boolean | string> | null
const val = criterionScores?.[criterion.id]
if (typeof val === 'number') {
scores.push(val)
}
}
if (scores.length > 0) {
criterionAverages[criterion.label] =
scores.reduce((a, b) => a + b, 0) / scores.length
}
} else if (type === 'boolean') {
let yesCount = 0
let noCount = 0
for (const ev of evaluations) {
const criterionScores = ev.criterionScoresJson as Record<string, number | boolean | string> | null
const val = criterionScores?.[criterion.id]
if (typeof val === 'boolean') {
if (val) yesCount++
else noCount++
}
}
const total = yesCount + noCount
if (total > 0) {
booleanCriteria[criterion.label] = {
yesCount,
noCount,
total,
yesPercent: Math.round((yesCount / total) * 100),
trueLabel: criterion.trueLabel || 'Yes',
falseLabel: criterion.falseLabel || 'No',
}
}
} else if (type === 'text') {
const responses: string[] = []
for (const ev of evaluations) {
const criterionScores = ev.criterionScoresJson as Record<string, number | boolean | string> | null
const val = criterionScores?.[criterion.id]
if (typeof val === 'string' && val.trim()) {
responses.push(sanitizeText(val))
}
}
if (responses.length > 0) {
textResponses[criterion.label] = responses
}
}
if (scores.length > 0) {
criterionAverages[criterion.label] =
scores.reduce((a, b) => a + b, 0) / scores.length
}
}
@@ -192,6 +262,8 @@ export function computeScoringPatterns(
averageGlobalScore,
consensus: Math.round(consensus * 100) / 100,
criterionAverages,
booleanCriteria,
textResponses,
evaluatorCount: evaluations.length,
}
}
@@ -266,7 +338,6 @@ export async function generateSummary({
const criteria: CriterionDef[] = form?.criteriaJson
? (form.criteriaJson as unknown as CriterionDef[])
: []
const criteriaLabels = criteria.map((c) => c.label)
// 2. Anonymize evaluations
const typedEvaluations = evaluations as unknown as EvaluationForSummary[]
@@ -282,7 +353,7 @@ export async function generateSummary({
}
const model = await getConfiguredModel(AI_MODELS.QUICK)
const prompt = buildSummaryPrompt(anonymized, project.title, criteriaLabels)
const prompt = buildSummaryPrompt(anonymized, project.title, criteria)
let aiResponse: AIResponsePayload
let tokensUsed = 0