Competition/Round architecture: full platform rewrite (Phases 1-9)
All checks were successful
Build and Push Docker Image / build (push) Successful in 7m45s

Replace Pipeline/Stage system with Competition/Round architecture.
New schema: Competition, Round (7 types), JuryGroup, AssignmentPolicy,
ProjectRoundState, DeliberationSession, ResultLock, SubmissionWindow.
New services: round-engine, round-assignment, deliberation, result-lock,
submission-manager, competition-context, ai-prompt-guard.
Full admin/jury/applicant/mentor UI rewrite. AI prompt hardening with
structured prompts, retry logic, and injection detection. All legacy
pipeline/stage code removed. 4 new migrations + seed aligned.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-15 23:04:15 +01:00
parent 9ab4717f96
commit 6ca39c976b
349 changed files with 69938 additions and 28767 deletions

View File

@@ -14,6 +14,7 @@
import { getOpenAI, getConfiguredModel, buildCompletionParams } from '@/lib/openai'
import { logAIUsage, extractTokenUsage } from '@/server/utils/ai-usage'
import { classifyAIError, createParseError, logAIError } from './ai-errors'
import { sanitizeUserInput } from '@/server/services/ai-prompt-guard'
import {
anonymizeProjectsForAI,
validateAnonymizedProjects,
@@ -27,10 +28,42 @@ import type { SubmissionSource } from '@prisma/client'
const BATCH_SIZE = 20
// Optimized system prompt
const AI_ELIGIBILITY_SYSTEM_PROMPT = `Award eligibility evaluator. Evaluate projects against criteria, return JSON.
Format: {"evaluations": [{project_id, eligible: bool, confidence: 0-1, reasoning: str}]}
Be objective. Base evaluation only on provided data. No personal identifiers in reasoning.`
// Structured system prompt for award eligibility
const AI_ELIGIBILITY_SYSTEM_PROMPT = `You are an award eligibility evaluator for an ocean conservation competition.
## Your Role
Determine whether each project meets the criteria for a specific award category.
## Evaluation Dimensions
- Geographic Relevance: Does the project's location/focus match the award's geographic requirements?
- Category Fit: Does the project category align with the award criteria?
- Topic Alignment: Does the project's ocean issue focus match the award's thematic area?
- Maturity Level: Is the project at the right stage for this award?
## Output Format
Return a JSON object:
{
"evaluations": [
{
"project_id": "PROJECT_001",
"eligible": true/false,
"confidence": 0.0-1.0,
"reasoning": "2-3 sentence explanation covering key dimensions",
"dimensionScores": {
"geographic": 0.0-1.0,
"category": 0.0-1.0,
"topic": 0.0-1.0,
"maturity": 0.0-1.0
}
}
]
}
## Guidelines
- Base evaluation only on provided data — do not infer missing information
- eligible=true only when ALL required dimensions score above 0.5
- confidence reflects how clearly the data supports the determination
- No personal identifiers in reasoning`
// ─── Types ──────────────────────────────────────────────────────────────────
@@ -149,10 +182,17 @@ async function processEligibilityBatch(
const results: EligibilityResult[] = []
let tokensUsed = 0
const userPrompt = `CRITERIA: ${criteriaText}
// Sanitize user-supplied criteria
const { sanitized: safeCriteria } = sanitizeUserInput(criteriaText)
const userPrompt = `CRITERIA: ${safeCriteria}
PROJECTS: ${JSON.stringify(anonymized)}
Evaluate eligibility for each project.`
const MAX_PARSE_RETRIES = 2
let parseAttempts = 0
let response: Awaited<ReturnType<typeof openai.chat.completions.create>>
try {
const params = buildCompletionParams(model, {
messages: [
@@ -160,11 +200,11 @@ Evaluate eligibility for each project.`
{ role: 'user', content: userPrompt },
],
jsonMode: true,
temperature: 0.3,
temperature: 0.1,
maxTokens: 4000,
})
const response = await openai.chat.completions.create(params)
response = await openai.chat.completions.create(params)
const usage = extractTokenUsage(response)
tokensUsed = usage.totalTokens
@@ -183,12 +223,8 @@ Evaluate eligibility for each project.`
status: 'SUCCESS',
})
const content = response.choices[0]?.message?.content
if (!content) {
throw new Error('Empty response from AI')
}
const parsed = JSON.parse(content) as {
// Parse with retry logic
let parsed: {
evaluations: Array<{
project_id: string
eligible: boolean
@@ -197,6 +233,38 @@ Evaluate eligibility for each project.`
}>
}
while (true) {
try {
const content = response.choices[0]?.message?.content
if (!content) {
throw new Error('Empty response from AI')
}
parsed = JSON.parse(content)
break
} catch (parseError) {
if (parseError instanceof SyntaxError && parseAttempts < MAX_PARSE_RETRIES) {
parseAttempts++
console.warn(`[AI Eligibility] JSON parse failed, retrying (${parseAttempts}/${MAX_PARSE_RETRIES})`)
// Retry the API call with hint
const retryParams = buildCompletionParams(model, {
messages: [
{ role: 'system', content: AI_ELIGIBILITY_SYSTEM_PROMPT },
{ role: 'user', content: userPrompt + '\n\nIMPORTANT: Please ensure valid JSON output.' },
],
jsonMode: true,
temperature: 0.1,
maxTokens: 4000,
})
response = await openai.chat.completions.create(retryParams)
const retryUsage = extractTokenUsage(response)
tokensUsed += retryUsage.totalTokens
continue
}
throw parseError
}
}
// Map results back to real IDs
for (const eval_ of parsed.evaluations || []) {
const mapping = mappings.find((m) => m.anonymousId === eval_.project_id)