Implement 10 platform features: evaluation UX, admin tools, AI summaries, applicant portal

Batch 1 - Quick Wins:
- F1: Evaluation progress indicator with touch tracking in sticky status bar
- F2: Export filtering results as CSV with dynamic AI column flattening
- F3: Observer access to analytics dashboards (8 procedures changed to observerProcedure)

Batch 2 - Jury Experience:
- F4: Countdown timer component with urgency colors + email reminder service with cron endpoint
- F5: Conflict of interest declaration system (dialog, admin management, review workflow)

Batch 3 - Admin & AI Enhancements:
- F6: Bulk status update UI with selection checkboxes, floating toolbar, status history recording
- F7: AI-powered evaluation summary with anonymized data, OpenAI integration, scoring patterns
- F8: Smart assignment improvements (geo diversity penalty, round familiarity bonus, COI blocking)

Batch 4 - Form Flexibility & Applicant Portal:
- F9: Evaluation form flexibility (text, boolean, section_header types, conditional visibility)
- F10: Applicant portal (status timeline, per-round documents, mentor messaging)

Schema: 5 new models (ReminderLog, ConflictOfInterest, EvaluationSummary, ProjectStatusHistory, MentorMessage), ProjectFile extended with roundId + isLate.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-05 21:58:27 +01:00
parent 002a9dbfc3
commit 699248e40b
38 changed files with 5437 additions and 533 deletions

View File

@@ -0,0 +1,405 @@
/**
* AI-Powered Evaluation Summary Service
*
* Generates AI summaries of jury evaluations for a project in a given round.
* Combines OpenAI analysis with server-side scoring pattern calculations.
*
* GDPR Compliance:
* - All evaluation data is anonymized before AI processing
* - No juror names, emails, or identifiers are sent to OpenAI
* - Only scores, feedback text, and binary decisions are included
*/
import { TRPCError } from '@trpc/server'
import { getOpenAI, getConfiguredModel, buildCompletionParams, AI_MODELS } from '@/lib/openai'
import { logAIUsage, extractTokenUsage } from '@/server/utils/ai-usage'
import { classifyAIError, createParseError, logAIError } from './ai-errors'
import { sanitizeText } from './anonymization'
import type { PrismaClient, Prisma } from '@prisma/client'
// ─── Types ──────────────────────────────────────────────────────────────────
interface EvaluationForSummary {
id: string
criterionScoresJson: Record<string, number> | null
globalScore: number | null
binaryDecision: boolean | null
feedbackText: string | null
assignment: {
user: {
id: string
name: string | null
email: string
}
}
}
interface AnonymizedEvaluation {
criterionScores: Record<string, number> | null
globalScore: number | null
binaryDecision: boolean | null
feedbackText: string | null
}
interface CriterionDef {
id: string
label: string
}
interface AIResponsePayload {
overallAssessment: string
strengths: string[]
weaknesses: string[]
themes: Array<{
theme: string
sentiment: 'positive' | 'negative' | 'mixed'
frequency: number
}>
recommendation: string
}
interface ScoringPatterns {
averageGlobalScore: number | null
consensus: number
criterionAverages: Record<string, number>
evaluatorCount: number
}
export interface EvaluationSummaryResult {
id: string
projectId: string
roundId: string
summaryJson: AIResponsePayload & { scoringPatterns: ScoringPatterns }
generatedAt: Date
model: string
tokensUsed: number
}
// ─── Anonymization ──────────────────────────────────────────────────────────
/**
* Strip juror names/emails from evaluations, keeping only scores and feedback.
*/
export function anonymizeEvaluations(
evaluations: EvaluationForSummary[]
): AnonymizedEvaluation[] {
return evaluations.map((ev) => ({
criterionScores: ev.criterionScoresJson as Record<string, number> | null,
globalScore: ev.globalScore,
binaryDecision: ev.binaryDecision,
feedbackText: ev.feedbackText ? sanitizeText(ev.feedbackText) : null,
}))
}
// ─── Prompt Building ────────────────────────────────────────────────────────
/**
* Build the OpenAI prompt for evaluation summary generation.
*/
export function buildSummaryPrompt(
anonymizedEvaluations: AnonymizedEvaluation[],
projectTitle: string,
criteriaLabels: string[]
): string {
const sanitizedTitle = sanitizeText(projectTitle)
return `You are analyzing jury evaluations for a project competition.
PROJECT: "${sanitizedTitle}"
EVALUATION CRITERIA: ${criteriaLabels.join(', ')}
EVALUATIONS (${anonymizedEvaluations.length} total):
${JSON.stringify(anonymizedEvaluations, null, 2)}
Analyze these evaluations and return a JSON object with this exact structure:
{
"overallAssessment": "A 2-3 sentence summary of how the project was evaluated overall",
"strengths": ["strength 1", "strength 2", ...],
"weaknesses": ["weakness 1", "weakness 2", ...],
"themes": [
{ "theme": "theme name", "sentiment": "positive" | "negative" | "mixed", "frequency": <number of evaluators mentioning this> }
],
"recommendation": "A brief recommendation based on the evaluation consensus"
}
Guidelines:
- Base your analysis only on the provided evaluation data
- Identify common themes across evaluator feedback
- Note areas of agreement and disagreement
- Keep the assessment objective and balanced
- Do not include any personal identifiers`
}
// ─── Scoring Patterns (Server-Side) ─────────────────────────────────────────
/**
* Compute scoring patterns from evaluations without AI.
*/
export function computeScoringPatterns(
evaluations: EvaluationForSummary[],
criteriaLabels: CriterionDef[]
): ScoringPatterns {
const globalScores = evaluations
.map((e) => e.globalScore)
.filter((s): s is number => s !== null)
// Average global score
const averageGlobalScore =
globalScores.length > 0
? globalScores.reduce((a, b) => a + b, 0) / globalScores.length
: null
// Consensus: 1 - normalized standard deviation (1.0 = full consensus)
let consensus = 1
if (globalScores.length > 1 && averageGlobalScore !== null) {
const variance =
globalScores.reduce(
(sum, score) => sum + Math.pow(score - averageGlobalScore, 2),
0
) / globalScores.length
const stdDev = Math.sqrt(variance)
// Normalize by the scoring scale (1-10, so max possible std dev is ~4.5)
consensus = Math.max(0, 1 - stdDev / 4.5)
}
// Criterion averages
const criterionAverages: Record<string, number> = {}
for (const criterion of criteriaLabels) {
const scores: number[] = []
for (const ev of evaluations) {
const criterionScores = ev.criterionScoresJson as Record<string, number> | null
if (criterionScores && criterionScores[criterion.id] !== undefined) {
scores.push(criterionScores[criterion.id])
}
}
if (scores.length > 0) {
criterionAverages[criterion.label] =
scores.reduce((a, b) => a + b, 0) / scores.length
}
}
return {
averageGlobalScore,
consensus: Math.round(consensus * 100) / 100,
criterionAverages,
evaluatorCount: evaluations.length,
}
}
// ─── Main Orchestrator ──────────────────────────────────────────────────────
/**
* Generate an AI-powered evaluation summary for a project in a round.
*/
export async function generateSummary({
projectId,
roundId,
userId,
prisma,
}: {
projectId: string
roundId: string
userId: string
prisma: PrismaClient
}): Promise<EvaluationSummaryResult> {
// 1. Fetch project with evaluations and form criteria
const project = await prisma.project.findUnique({
where: { id: projectId },
select: {
id: true,
title: true,
roundId: true,
},
})
if (!project) {
throw new TRPCError({ code: 'NOT_FOUND', message: 'Project not found' })
}
// Fetch submitted evaluations for this project in this round
const evaluations = await prisma.evaluation.findMany({
where: {
status: 'SUBMITTED',
assignment: {
projectId,
roundId,
},
},
select: {
id: true,
criterionScoresJson: true,
globalScore: true,
binaryDecision: true,
feedbackText: true,
assignment: {
select: {
user: {
select: { id: true, name: true, email: true },
},
},
},
},
})
if (evaluations.length === 0) {
throw new TRPCError({
code: 'BAD_REQUEST',
message: 'No submitted evaluations found for this project in this round',
})
}
// Get evaluation form criteria for this round
const form = await prisma.evaluationForm.findFirst({
where: { roundId, isActive: true },
select: { criteriaJson: true },
})
const criteria: CriterionDef[] = form?.criteriaJson
? (form.criteriaJson as unknown as CriterionDef[])
: []
const criteriaLabels = criteria.map((c) => c.label)
// 2. Anonymize evaluations
const typedEvaluations = evaluations as unknown as EvaluationForSummary[]
const anonymized = anonymizeEvaluations(typedEvaluations)
// 3. Build prompt and call OpenAI
const openai = await getOpenAI()
if (!openai) {
throw new TRPCError({
code: 'PRECONDITION_FAILED',
message: 'OpenAI is not configured. Please set up your API key in Settings.',
})
}
const model = await getConfiguredModel(AI_MODELS.QUICK)
const prompt = buildSummaryPrompt(anonymized, project.title, criteriaLabels)
let aiResponse: AIResponsePayload
let tokensUsed = 0
try {
const params = buildCompletionParams(model, {
messages: [
{ role: 'user', content: prompt },
],
jsonMode: true,
temperature: 0.3,
maxTokens: 2000,
})
const response = await openai.chat.completions.create(params)
const usage = extractTokenUsage(response)
tokensUsed = usage.totalTokens
const content = response.choices[0]?.message?.content
if (!content) {
throw new Error('Empty response from AI')
}
aiResponse = JSON.parse(content) as AIResponsePayload
} catch (error) {
if (error instanceof SyntaxError) {
const parseError = createParseError(error.message)
logAIError('EvaluationSummary', 'generateSummary', parseError)
await logAIUsage({
userId,
action: 'EVALUATION_SUMMARY',
entityType: 'Project',
entityId: projectId,
model,
promptTokens: 0,
completionTokens: 0,
totalTokens: tokensUsed,
itemsProcessed: 0,
status: 'ERROR',
errorMessage: parseError.message,
})
throw new TRPCError({
code: 'INTERNAL_SERVER_ERROR',
message: 'Failed to parse AI response. Please try again.',
})
}
const classified = classifyAIError(error)
logAIError('EvaluationSummary', 'generateSummary', classified)
await logAIUsage({
userId,
action: 'EVALUATION_SUMMARY',
entityType: 'Project',
entityId: projectId,
model,
promptTokens: 0,
completionTokens: 0,
totalTokens: 0,
itemsProcessed: 0,
status: 'ERROR',
errorMessage: classified.message,
})
throw new TRPCError({
code: 'INTERNAL_SERVER_ERROR',
message: classified.message,
})
}
// 4. Compute scoring patterns (server-side, no AI)
const scoringPatterns = computeScoringPatterns(typedEvaluations, criteria)
// 5. Merge and upsert
const summaryJson = {
...aiResponse,
scoringPatterns,
}
const summaryJsonValue = summaryJson as unknown as Prisma.InputJsonValue
const summary = await prisma.evaluationSummary.upsert({
where: {
projectId_roundId: { projectId, roundId },
},
create: {
projectId,
roundId,
summaryJson: summaryJsonValue,
generatedById: userId,
model,
tokensUsed,
},
update: {
summaryJson: summaryJsonValue,
generatedAt: new Date(),
generatedById: userId,
model,
tokensUsed,
},
})
// 6. Log AI usage
await logAIUsage({
userId,
action: 'EVALUATION_SUMMARY',
entityType: 'Project',
entityId: projectId,
model,
promptTokens: 0, // Detailed breakdown not always available
completionTokens: 0,
totalTokens: tokensUsed,
itemsProcessed: evaluations.length,
status: 'SUCCESS',
})
return {
id: summary.id,
projectId: summary.projectId,
roundId: summary.roundId,
summaryJson: summaryJson as AIResponsePayload & { scoringPatterns: ScoringPatterns },
generatedAt: summary.generatedAt,
model: summary.model,
tokensUsed: summary.tokensUsed,
}
}

View File

@@ -0,0 +1,178 @@
import { prisma } from '@/lib/prisma'
import { sendStyledNotificationEmail } from '@/lib/email'
const REMINDER_TYPES = [
{ type: '3_DAYS', thresholdMs: 3 * 24 * 60 * 60 * 1000 },
{ type: '24H', thresholdMs: 24 * 60 * 60 * 1000 },
{ type: '1H', thresholdMs: 60 * 60 * 1000 },
] as const
type ReminderType = (typeof REMINDER_TYPES)[number]['type']
interface ReminderResult {
sent: number
errors: number
}
/**
* Find active rounds with approaching voting deadlines and send reminders
* to jurors who have incomplete assignments.
*/
export async function processEvaluationReminders(roundId?: string): Promise<ReminderResult> {
const now = new Date()
let totalSent = 0
let totalErrors = 0
// Find active rounds with voting end dates in the future
const rounds = await prisma.round.findMany({
where: {
status: 'ACTIVE',
votingEndAt: { gt: now },
votingStartAt: { lte: now },
...(roundId && { id: roundId }),
},
select: {
id: true,
name: true,
votingEndAt: true,
program: { select: { name: true } },
},
})
for (const round of rounds) {
if (!round.votingEndAt) continue
const msUntilDeadline = round.votingEndAt.getTime() - now.getTime()
// Determine which reminder types should fire for this round
const applicableTypes = REMINDER_TYPES.filter(
({ thresholdMs }) => msUntilDeadline <= thresholdMs
)
if (applicableTypes.length === 0) continue
for (const { type } of applicableTypes) {
const result = await sendRemindersForRound(round, type, now)
totalSent += result.sent
totalErrors += result.errors
}
}
return { sent: totalSent, errors: totalErrors }
}
async function sendRemindersForRound(
round: {
id: string
name: string
votingEndAt: Date | null
program: { name: string }
},
type: ReminderType,
now: Date
): Promise<ReminderResult> {
let sent = 0
let errors = 0
if (!round.votingEndAt) return { sent, errors }
// Find jurors with incomplete assignments for this round
const incompleteAssignments = await prisma.assignment.findMany({
where: {
roundId: round.id,
isCompleted: false,
},
select: {
userId: true,
},
})
// Get unique user IDs with incomplete work
const userIds = [...new Set(incompleteAssignments.map((a) => a.userId))]
if (userIds.length === 0) return { sent, errors }
// Check which users already received this reminder type for this round
const existingReminders = await prisma.reminderLog.findMany({
where: {
roundId: round.id,
type,
userId: { in: userIds },
},
select: { userId: true },
})
const alreadySent = new Set(existingReminders.map((r) => r.userId))
const usersToNotify = userIds.filter((id) => !alreadySent.has(id))
if (usersToNotify.length === 0) return { sent, errors }
// Get user details and their pending counts
const users = await prisma.user.findMany({
where: { id: { in: usersToNotify } },
select: { id: true, name: true, email: true },
})
const baseUrl = process.env.NEXTAUTH_URL || 'https://monaco-opc.com'
const deadlineStr = round.votingEndAt.toLocaleDateString('en-US', {
weekday: 'long',
year: 'numeric',
month: 'long',
day: 'numeric',
hour: '2-digit',
minute: '2-digit',
timeZoneName: 'short',
})
// Map to get pending count per user
const pendingCounts = new Map<string, number>()
for (const a of incompleteAssignments) {
pendingCounts.set(a.userId, (pendingCounts.get(a.userId) || 0) + 1)
}
// Select email template type based on reminder type
const emailTemplateType = type === '1H' ? 'REMINDER_1H' : 'REMINDER_24H'
for (const user of users) {
const pendingCount = pendingCounts.get(user.id) || 0
if (pendingCount === 0) continue
try {
await sendStyledNotificationEmail(
user.email,
user.name || '',
emailTemplateType,
{
name: user.name || undefined,
title: `Evaluation Reminder - ${round.name}`,
message: `You have ${pendingCount} pending evaluation${pendingCount !== 1 ? 's' : ''} for ${round.name}.`,
linkUrl: `${baseUrl}/jury/assignments?round=${round.id}`,
metadata: {
pendingCount,
roundName: round.name,
deadline: deadlineStr,
},
}
)
// Log the sent reminder
await prisma.reminderLog.create({
data: {
roundId: round.id,
userId: user.id,
type,
},
})
sent++
} catch (error) {
console.error(
`Failed to send ${type} reminder to ${user.email} for round ${round.name}:`,
error
)
errors++
}
}
return { sent, errors }
}

View File

@@ -6,13 +6,18 @@
* - Bio/description match (text similarity)
* - Workload balance
* - Country match (mentors only)
* - Geographic diversity penalty (prevents clustering by country)
* - Previous round familiarity bonus (continuity across rounds)
* - COI penalty (conflict of interest hard-block)
*
* Score Breakdown (100 points max):
* Score Breakdown:
* - Tag overlap: 0-40 points (weighted by confidence)
* - Bio match: 0-15 points (if bio exists)
* - Workload balance: 0-25 points
* - Country match: 0-15 points (mentors only)
* - Reserved: 0-5 points (future AI boost)
* - Geo diversity: -15 per excess same-country assignment (threshold: 2)
* - Previous round familiarity: +10 if reviewed in earlier round
* - COI: juror skipped entirely if conflict declared
*/
import { prisma } from '@/lib/prisma'
@@ -24,6 +29,9 @@ export interface ScoreBreakdown {
bioMatch: number
workloadBalance: number
countryMatch: number
geoDiversityPenalty: number
previousRoundFamiliarity: number
coiPenalty: number
}
export interface AssignmentScore {
@@ -52,6 +60,12 @@ const MAX_WORKLOAD_SCORE = 25
const MAX_COUNTRY_SCORE = 15
const POINTS_PER_TAG_MATCH = 8
// New scoring factors
const GEO_DIVERSITY_THRESHOLD = 2
const GEO_DIVERSITY_PENALTY_PER_EXCESS = -15
const PREVIOUS_ROUND_FAMILIARITY_BONUS = 10
// COI jurors are skipped entirely rather than penalized (effectively -Infinity)
// Common words to exclude from bio matching
const STOP_WORDS = new Set([
'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with',
@@ -284,10 +298,68 @@ export async function getSmartSuggestions(options: {
existingAssignments.map((a) => `${a.userId}:${a.projectId}`)
)
// Calculate target assignments per user
// ── Batch-query data for new scoring factors ──────────────────────────────
// 1. Geographic diversity: per-juror country distribution for existing assignments
const assignmentsWithCountry = await prisma.assignment.findMany({
where: { roundId },
select: {
userId: true,
project: { select: { country: true } },
},
})
// Build map: userId -> { country -> count }
const userCountryDistribution = new Map<string, Map<string, number>>()
for (const a of assignmentsWithCountry) {
const country = a.project.country?.toLowerCase().trim()
if (!country) continue
let countryMap = userCountryDistribution.get(a.userId)
if (!countryMap) {
countryMap = new Map()
userCountryDistribution.set(a.userId, countryMap)
}
countryMap.set(country, (countryMap.get(country) || 0) + 1)
}
// 2. Previous round familiarity: find assignments in earlier rounds of the same program
const currentRound = await prisma.round.findUnique({
where: { id: roundId },
select: { programId: true, sortOrder: true },
})
const previousRoundAssignmentPairs = new Set<string>()
if (currentRound) {
const previousAssignments = await prisma.assignment.findMany({
where: {
round: {
programId: currentRound.programId,
sortOrder: { lt: currentRound.sortOrder },
},
},
select: { userId: true, projectId: true },
})
for (const pa of previousAssignments) {
previousRoundAssignmentPairs.add(`${pa.userId}:${pa.projectId}`)
}
}
// 3. COI declarations: all active conflicts for this round
const coiRecords = await prisma.conflictOfInterest.findMany({
where: {
roundId,
hasConflict: true,
},
select: { userId: true, projectId: true },
})
const coiPairs = new Set(
coiRecords.map((c) => `${c.userId}:${c.projectId}`)
)
// ── Calculate target assignments per user ─────────────────────────────────
const targetPerUser = Math.ceil(projects.length / users.length)
// Calculate scores for all user-project pairs
// ── Calculate scores for all user-project pairs ───────────────────────────
const suggestions: AssignmentScore[] = []
for (const user of users) {
@@ -304,6 +376,11 @@ export async function getSmartSuggestions(options: {
continue
}
// COI check - skip juror entirely for this project if COI declared
if (coiPairs.has(pairKey)) {
continue
}
// Get project tags data
const projectTags: ProjectTagData[] = project.projectTags.map((pt) => ({
tagId: pt.tagId,
@@ -311,13 +388,12 @@ export async function getSmartSuggestions(options: {
confidence: pt.confidence,
}))
// Calculate scores
// Calculate existing scores
const { score: tagScore, matchingTags } = calculateTagOverlapScore(
user.expertiseTags,
projectTags
)
// Bio match (only if user has a bio)
const { score: bioScore, matchingKeywords } = calculateBioMatchScore(
user.bio,
project.description
@@ -329,13 +405,39 @@ export async function getSmartSuggestions(options: {
user.maxAssignments
)
// Country match only for mentors
const countryScore =
type === 'mentor'
? calculateCountryMatchScore(user.country, project.country)
: 0
const totalScore = tagScore + bioScore + workloadScore + countryScore
// ── New scoring factors ─────────────────────────────────────────────
// Geographic diversity penalty
let geoDiversityPenalty = 0
const projectCountry = project.country?.toLowerCase().trim()
if (projectCountry) {
const countryMap = userCountryDistribution.get(user.id)
const sameCountryCount = countryMap?.get(projectCountry) || 0
if (sameCountryCount >= GEO_DIVERSITY_THRESHOLD) {
geoDiversityPenalty =
GEO_DIVERSITY_PENALTY_PER_EXCESS *
(sameCountryCount - GEO_DIVERSITY_THRESHOLD + 1)
}
}
// Previous round familiarity bonus
let previousRoundFamiliarity = 0
if (previousRoundAssignmentPairs.has(pairKey)) {
previousRoundFamiliarity = PREVIOUS_ROUND_FAMILIARITY_BONUS
}
const totalScore =
tagScore +
bioScore +
workloadScore +
countryScore +
geoDiversityPenalty +
previousRoundFamiliarity
// Build reasoning
const reasoning: string[] = []
@@ -353,6 +455,12 @@ export async function getSmartSuggestions(options: {
if (countryScore > 0) {
reasoning.push('Same country')
}
if (geoDiversityPenalty < 0) {
reasoning.push(`Geo diversity penalty (${geoDiversityPenalty})`)
}
if (previousRoundFamiliarity > 0) {
reasoning.push('Reviewed in previous round (+10)')
}
suggestions.push({
userId: user.id,
@@ -366,6 +474,9 @@ export async function getSmartSuggestions(options: {
bioMatch: bioScore,
workloadBalance: workloadScore,
countryMatch: countryScore,
geoDiversityPenalty,
previousRoundFamiliarity,
coiPenalty: 0, // COI jurors are skipped entirely
},
reasoning,
matchingTags,
@@ -488,6 +599,9 @@ export async function getMentorSuggestionsForProject(
bioMatch: bioScore,
workloadBalance: workloadScore,
countryMatch: countryScore,
geoDiversityPenalty: 0,
previousRoundFamiliarity: 0,
coiPenalty: 0,
},
reasoning,
matchingTags,