Competition/Round architecture: full platform rewrite (Phases 1-9)
All checks were successful
Build and Push Docker Image / build (push) Successful in 7m45s

Replace Pipeline/Stage system with Competition/Round architecture.
New schema: Competition, Round (7 types), JuryGroup, AssignmentPolicy,
ProjectRoundState, DeliberationSession, ResultLock, SubmissionWindow.
New services: round-engine, round-assignment, deliberation, result-lock,
submission-manager, competition-context, ai-prompt-guard.
Full admin/jury/applicant/mentor UI rewrite. AI prompt hardening with
structured prompts, retry logic, and injection detection. All legacy
pipeline/stage code removed. 4 new migrations + seed aligned.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-15 23:04:15 +01:00
parent 9ab4717f96
commit 6ca39c976b
349 changed files with 69938 additions and 28767 deletions

View File

@@ -31,10 +31,39 @@ import {
const ASSIGNMENT_BATCH_SIZE = 15
// Optimized system prompt
const ASSIGNMENT_SYSTEM_PROMPT = `Match jurors to projects by expertise. Return JSON assignments.
Each: {juror_id, project_id, confidence_score: 0-1, expertise_match_score: 0-1, reasoning: str (1-2 sentences)}
Distribute workload fairly. Avoid assigning jurors at capacity.`
// Structured system prompt for assignment
const ASSIGNMENT_SYSTEM_PROMPT = `You are an expert jury assignment optimizer for an ocean conservation competition.
## Your Role
Match jurors to projects based on expertise alignment, workload balance, and coverage requirements.
## Matching Criteria (Weighted)
- Expertise Match (50%): How well juror tags/expertise align with project topics
- Workload Balance (30%): Distribute assignments evenly; prefer jurors below capacity
- Minimum Target (20%): Prioritize jurors who haven't reached their minimum assignment count
## Output Format
Return a JSON object:
{
"assignments": [
{
"juror_id": "JUROR_001",
"project_id": "PROJECT_001",
"confidence_score": 0.0-1.0,
"expertise_match_score": 0.0-1.0,
"reasoning": "1-2 sentence justification"
}
]
}
## Guidelines
- Each project should receive the required number of reviews
- Do not assign jurors who are at or above their capacity
- Favor geographic and disciplinary diversity in assignments
- confidence_score reflects overall assignment quality; expertise_match_score reflects tag overlap only
- A strong match: shared expertise tags + available capacity + under minimum target
- An acceptable match: related domain + available capacity
- A poor match: no expertise overlap, only assigned for coverage`
// ─── Types ───────────────────────────────────────────────────────────────────
@@ -126,6 +155,10 @@ async function processAssignmentBatch(
batchMappings
)
const MAX_PARSE_RETRIES = 2
let parseAttempts = 0
let response: Awaited<ReturnType<typeof openai.chat.completions.create>>
try {
const params = buildCompletionParams(model, {
messages: [
@@ -133,11 +166,10 @@ async function processAssignmentBatch(
{ role: 'user', content: userPrompt },
],
jsonMode: true,
temperature: 0.3,
temperature: 0.1,
maxTokens: 4000,
})
let response
try {
response = await openai.chat.completions.create(params)
} catch (apiError) {
@@ -167,20 +199,8 @@ async function processAssignmentBatch(
status: 'SUCCESS',
})
const content = response.choices[0]?.message?.content
if (!content) {
// Check if response indicates an issue
const finishReason = response.choices[0]?.finish_reason
if (finishReason === 'content_filter') {
throw new Error('AI response was filtered. Try a different model or simplify the project descriptions.')
}
if (!response.choices || response.choices.length === 0) {
throw new Error(`No response from model "${model}". This model may not exist or may not be available. Please verify the model name.`)
}
throw new Error(`Empty response from AI model "${model}". The model may not support this type of request.`)
}
const parsed = JSON.parse(content) as {
// Parse with retry logic
let parsed: {
assignments: Array<{
juror_id: string
project_id: string
@@ -190,6 +210,46 @@ async function processAssignmentBatch(
}>
}
while (true) {
try {
const content = response.choices[0]?.message?.content
if (!content) {
// Check if response indicates an issue
const finishReason = response.choices[0]?.finish_reason
if (finishReason === 'content_filter') {
throw new Error('AI response was filtered. Try a different model or simplify the project descriptions.')
}
if (!response.choices || response.choices.length === 0) {
throw new Error(`No response from model "${model}". This model may not exist or may not be available. Please verify the model name.`)
}
throw new Error(`Empty response from AI model "${model}". The model may not support this type of request.`)
}
parsed = JSON.parse(content)
break
} catch (parseError) {
if (parseError instanceof SyntaxError && parseAttempts < MAX_PARSE_RETRIES) {
parseAttempts++
console.warn(`[AI Assignment] JSON parse failed, retrying (${parseAttempts}/${MAX_PARSE_RETRIES})`)
// Retry the API call with hint
const retryParams = buildCompletionParams(model, {
messages: [
{ role: 'system', content: ASSIGNMENT_SYSTEM_PROMPT },
{ role: 'user', content: userPrompt + '\n\nIMPORTANT: Please ensure valid JSON output.' },
],
jsonMode: true,
temperature: 0.1,
maxTokens: 4000,
})
response = await openai.chat.completions.create(retryParams)
const retryUsage = extractTokenUsage(response)
tokensUsed += retryUsage.totalTokens
continue
}
throw parseError
}
}
// De-anonymize and add to suggestions
const deanonymized = deanonymizeResults(
(parsed.assignments || []).map((a) => ({