Fix AI filtering bugs, add special award shortlist integration
All checks were successful
Build and Push Docker Image / build (push) Successful in 8m20s

Part 1 - Bug Fixes:
- Fix toProjectWithRelations() stripping file fields needed by AI (detectedLang, textContent, etc.)
- Fix parseAIData() reading flat when aiScreeningJson is nested under rule ID
- Fix getAIConfidenceScore() with same nesting issue (always returned 0)

Part 2 - Special Award Track Integration:
- Add shortlistSize to SpecialAward, qualityScore/shortlisted/confirmed fields to AwardEligibility
- Add specialAwardId to Round for award-owned rounds
- Update AI eligibility service to return qualityScore (0-100) for ranking
- Update eligibility job with filteringRoundId scoping and auto-shortlist top N
- Add 8 new specialAward router procedures (listForRound, runEligibilityForRound,
  listShortlist, toggleShortlisted, confirmShortlist, listRounds, createRound, deleteRound)
- Create award-shortlist.tsx component with ranked table, shortlist checkboxes, confirm dialog
- Add "Special Award Tracks" section to filtering dashboard

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Matt
2026-02-17 15:38:31 +01:00
parent 6743119c4d
commit a02ed59158
10 changed files with 1308 additions and 309 deletions

View File

@@ -48,6 +48,7 @@ Return a JSON object:
"project_id": "PROJECT_001",
"eligible": true/false,
"confidence": 0.0-1.0,
"quality_score": 0-100,
"reasoning": "2-3 sentence explanation covering key dimensions",
"dimensionScores": {
"geographic": 0.0-1.0,
@@ -59,6 +60,8 @@ Return a JSON object:
]
}
quality_score is a 0-100 integer measuring how well the project fits the award criteria (used for ranking shortlists). 100 = perfect fit, 0 = no fit. Even ineligible projects should receive a score for reference.
## Guidelines
- Base evaluation only on provided data — do not infer missing information
- eligible=true only when ALL required dimensions score above 0.5
@@ -77,6 +80,7 @@ export interface EligibilityResult {
projectId: string
eligible: boolean
confidence: number
qualityScore: number
reasoning: string
method: 'AUTO' | 'AI'
}
@@ -229,6 +233,7 @@ Evaluate eligibility for each project.`
project_id: string
eligible: boolean
confidence: number
quality_score?: number
reasoning: string
}>
}
@@ -273,6 +278,7 @@ Evaluate eligibility for each project.`
projectId: mapping.realId,
eligible: eval_.eligible,
confidence: eval_.confidence,
qualityScore: Math.max(0, Math.min(100, eval_.quality_score ?? 0)),
reasoning: eval_.reasoning,
method: 'AI',
})
@@ -305,6 +311,7 @@ Evaluate eligibility for each project.`
projectId: mapping.realId,
eligible: false,
confidence: 0,
qualityScore: 0,
reasoning: 'AI response parse error — requires manual review',
method: 'AI',
})
@@ -333,6 +340,7 @@ export async function aiInterpretCriteria(
projectId: p.id,
eligible: false,
confidence: 0,
qualityScore: 0,
reasoning: 'AI unavailable — requires manual eligibility review',
method: 'AI' as const,
}))
@@ -401,6 +409,7 @@ export async function aiInterpretCriteria(
projectId: p.id,
eligible: false,
confidence: 0,
qualityScore: 0,
reasoning: `AI error: ${classified.message}`,
method: 'AI' as const,
}))

View File

@@ -510,7 +510,8 @@ export async function executeAIScreening(
projects: ProjectForFiltering[],
userId?: string,
entityId?: string,
onProgress?: ProgressCallback
onProgress?: ProgressCallback,
onBatchComplete?: (batchResults: Map<string, AIScreeningResult>) => Promise<void>
): Promise<Map<string, AIScreeningResult>> {
const results = new Map<string, AIScreeningResult>()
@@ -599,6 +600,17 @@ export async function executeAIScreening(
processedBatches++
}
// Emit batch results for streaming
if (onBatchComplete) {
const chunkResults = new Map<string, AIScreeningResult>()
for (const { batchResults: br } of parallelResults) {
for (const [id, result] of br) {
chunkResults.set(id, result)
}
}
await onBatchComplete(chunkResults)
}
// Report progress after each parallel chunk
if (onProgress) {
await onProgress({
@@ -653,43 +665,29 @@ export async function executeFilteringRules(
projects: ProjectForFiltering[],
userId?: string,
roundId?: string,
onProgress?: ProgressCallback
onProgress?: ProgressCallback,
onResultsBatch?: (results: ProjectFilteringResult[]) => Promise<void>
): Promise<ProjectFilteringResult[]> {
const activeRules = rules
.filter((r) => r.isActive)
.sort((a, b) => a.priority - b.priority)
// Separate AI screening rules (need batch processing)
const aiRules = activeRules.filter((r) => r.ruleType === 'AI_SCREENING')
const nonAiRules = activeRules.filter((r) => r.ruleType !== 'AI_SCREENING')
// Pre-compute AI screening results if needed
const aiResults = new Map<string, Map<string, AIScreeningResult>>()
for (const aiRule of aiRules) {
const config = aiRule.configJson as unknown as AIScreeningConfig
const screeningResults = await executeAIScreening(config, projects, userId, roundId, onProgress)
aiResults.set(aiRule.id, screeningResults)
}
// Evaluate each project
const results: ProjectFilteringResult[] = []
// Pre-evaluate non-AI rules for all projects (instant)
const nonAiEval = new Map<string, { ruleResults: RuleResult[]; hasFailed: boolean; hasFlagged: boolean }>()
for (const project of projects) {
const ruleResults: RuleResult[] = []
let hasFailed = false
let hasFlagged = false
// Evaluate non-AI rules
for (const rule of nonAiRules) {
let result: { passed: boolean; action: 'PASS' | 'REJECT' | 'FLAG' }
if (rule.ruleType === 'FIELD_BASED') {
const config = rule.configJson as unknown as FieldRuleConfig
result = evaluateFieldRule(config, project)
result = evaluateFieldRule(rule.configJson as unknown as FieldRuleConfig, project)
} else if (rule.ruleType === 'DOCUMENT_CHECK') {
const config = rule.configJson as unknown as DocumentCheckConfig
result = evaluateDocumentRule(config, project)
result = evaluateDocumentRule(rule.configJson as unknown as DocumentCheckConfig, project)
} else {
continue
}
@@ -701,65 +699,107 @@ export async function executeFilteringRules(
passed: result.passed,
action: result.action,
})
if (!result.passed) {
if (result.action === 'REJECT') hasFailed = true
if (result.action === 'FLAG') hasFlagged = true
}
}
nonAiEval.set(project.id, { ruleResults, hasFailed, hasFlagged })
}
// Helper: combine non-AI + AI results for a single project
function computeProjectResult(
projectId: string,
aiRuleResults: Array<{ ruleId: string; ruleName: string; passed: boolean; action: string; reasoning?: string }>,
aiScreeningData: Record<string, unknown>
): ProjectFilteringResult {
const nonAi = nonAiEval.get(projectId)!
const ruleResults: RuleResult[] = [...nonAi.ruleResults]
let hasFailed = nonAi.hasFailed
let hasFlagged = nonAi.hasFlagged
for (const ar of aiRuleResults) {
ruleResults.push({
ruleId: ar.ruleId,
ruleName: ar.ruleName,
ruleType: 'AI_SCREENING',
passed: ar.passed,
action: ar.action as 'PASS' | 'REJECT' | 'FLAG',
reasoning: ar.reasoning,
})
if (!ar.passed) {
if (ar.action === 'REJECT') hasFailed = true
else hasFlagged = true
}
}
return {
projectId,
outcome: hasFailed ? 'FILTERED_OUT' : hasFlagged ? 'FLAGGED' : 'PASSED',
ruleResults,
aiScreeningJson: Object.keys(aiScreeningData).length > 0 ? aiScreeningData : undefined,
}
}
// No AI rules → compute all results immediately
if (aiRules.length === 0) {
const results = projects.map((p) => computeProjectResult(p.id, [], {}))
if (onResultsBatch) await onResultsBatch(results)
return results
}
// Single AI rule → stream results per batch
if (aiRules.length === 1) {
const aiRule = aiRules[0]
const config = aiRule.configJson as unknown as AIScreeningConfig
const allResults: ProjectFilteringResult[] = []
await executeAIScreening(config, projects, userId, roundId, onProgress, async (batchAIResults) => {
const batchResults: ProjectFilteringResult[] = []
for (const [projectId, aiResult] of batchAIResults) {
const passed = aiResult.meetsCriteria && !aiResult.spamRisk
const aiAction = config.action || 'FLAG'
batchResults.push(
computeProjectResult(
projectId,
[{ ruleId: aiRule.id, ruleName: aiRule.name, passed, action: aiAction, reasoning: aiResult.reasoning }],
{ [aiRule.id]: aiResult }
)
)
}
allResults.push(...batchResults)
if (onResultsBatch) await onResultsBatch(batchResults)
})
return allResults
}
// Multiple AI rules → run all sequentially, then compute (no per-batch streaming)
const aiResults = new Map<string, Map<string, AIScreeningResult>>()
for (const aiRule of aiRules) {
const config = aiRule.configJson as unknown as AIScreeningConfig
const screeningResults = await executeAIScreening(config, projects, userId, roundId, onProgress)
aiResults.set(aiRule.id, screeningResults)
}
const results: ProjectFilteringResult[] = []
for (const project of projects) {
const aiRuleResults: Array<{ ruleId: string; ruleName: string; passed: boolean; action: string; reasoning?: string }> = []
const aiScreeningData: Record<string, unknown> = {}
// Evaluate AI rules
for (const aiRule of aiRules) {
const ruleScreening = aiResults.get(aiRule.id)
const screening = ruleScreening?.get(project.id)
const screening = aiResults.get(aiRule.id)?.get(project.id)
if (screening) {
const passed = screening.meetsCriteria && !screening.spamRisk
const aiConfig = aiRule.configJson as unknown as AIScreeningConfig
const aiAction = aiConfig?.action || 'FLAG'
ruleResults.push({
ruleId: aiRule.id,
ruleName: aiRule.name,
ruleType: 'AI_SCREENING',
passed,
action: aiAction,
reasoning: screening.reasoning,
})
if (!passed) {
if (aiAction === 'REJECT') hasFailed = true
else hasFlagged = true
}
}
}
// Determine overall outcome
let outcome: 'PASSED' | 'FILTERED_OUT' | 'FLAGGED'
if (hasFailed) {
outcome = 'FILTERED_OUT'
} else if (hasFlagged) {
outcome = 'FLAGGED'
} else {
outcome = 'PASSED'
}
// Collect AI screening data
const aiScreeningData: Record<string, unknown> = {}
for (const aiRule of aiRules) {
const screening = aiResults.get(aiRule.id)?.get(project.id)
if (screening) {
aiRuleResults.push({ ruleId: aiRule.id, ruleName: aiRule.name, passed, action: aiAction, reasoning: screening.reasoning })
aiScreeningData[aiRule.id] = screening
}
}
results.push({
projectId: project.id,
outcome,
ruleResults,
aiScreeningJson:
Object.keys(aiScreeningData).length > 0 ? aiScreeningData : undefined,
})
results.push(computeProjectResult(project.id, aiRuleResults, aiScreeningData))
}
if (onResultsBatch) await onResultsBatch(results)
return results
}

View File

@@ -141,7 +141,16 @@ export interface ProjectWithRelations {
teamMembers?: number
files?: number
}
files?: Array<{ fileType: FileType | null; size?: number; pageCount?: number | null }>
files?: Array<{
fileType: FileType | null
size?: number
pageCount?: number | null
detectedLang?: string
langConfidence?: number
roundName?: string
isCurrentRound?: boolean
textContent?: string
}>
}
/**
@@ -197,6 +206,11 @@ export function toProjectWithRelations(project: {
fileType: (f.fileType as FileType) ?? null,
size: f.size,
pageCount: f.pageCount ?? null,
detectedLang: f.detectedLang as string | undefined,
langConfidence: f.langConfidence as number | undefined,
roundName: f.roundName as string | undefined,
isCurrentRound: f.isCurrentRound as boolean | undefined,
textContent: f.textContent as string | undefined,
})) ?? [],
}
}

View File

@@ -14,7 +14,8 @@ const BATCH_SIZE = 20
export async function processEligibilityJob(
awardId: string,
includeSubmitted: boolean,
userId: string
userId: string,
filteringRoundId?: string
): Promise<void> {
try {
// Mark job as PROCESSING
@@ -23,27 +24,76 @@ export async function processEligibilityJob(
include: { program: true },
})
// Get projects
const statusFilter = includeSubmitted
? (['SUBMITTED', 'ELIGIBLE', 'ASSIGNED', 'SEMIFINALIST', 'FINALIST'] as const)
: (['ELIGIBLE', 'ASSIGNED', 'SEMIFINALIST', 'FINALIST'] as const)
// Get projects — scoped to filtering round PASSED projects if provided
let projects: Array<{
id: string
title: string
description: string | null
competitionCategory: string | null
country: string | null
geographicZone: string | null
tags: string[]
oceanIssue: string | null
}>
const projects = await prisma.project.findMany({
where: {
programId: award.programId,
status: { in: [...statusFilter] },
},
select: {
id: true,
title: true,
description: true,
competitionCategory: true,
country: true,
geographicZone: true,
tags: true,
oceanIssue: true,
},
})
if (filteringRoundId) {
// Scope to projects that PASSED filtering in the specified round
const passedResults = await prisma.filteringResult.findMany({
where: { roundId: filteringRoundId, outcome: 'PASSED' },
select: { projectId: true },
})
const passedIds = passedResults.map((r) => r.projectId)
if (passedIds.length === 0) {
await prisma.specialAward.update({
where: { id: awardId },
data: {
eligibilityJobStatus: 'COMPLETED',
eligibilityJobTotal: 0,
eligibilityJobDone: 0,
},
})
return
}
projects = await prisma.project.findMany({
where: {
id: { in: passedIds },
programId: award.programId,
},
select: {
id: true,
title: true,
description: true,
competitionCategory: true,
country: true,
geographicZone: true,
tags: true,
oceanIssue: true,
},
})
} else {
const statusFilter = includeSubmitted
? (['SUBMITTED', 'ELIGIBLE', 'ASSIGNED', 'SEMIFINALIST', 'FINALIST'] as const)
: (['ELIGIBLE', 'ASSIGNED', 'SEMIFINALIST', 'FINALIST'] as const)
projects = await prisma.project.findMany({
where: {
programId: award.programId,
status: { in: [...statusFilter] },
},
select: {
id: true,
title: true,
description: true,
competitionCategory: true,
country: true,
geographicZone: true,
tags: true,
oceanIssue: true,
},
})
}
if (projects.length === 0) {
await prisma.specialAward.update({
@@ -77,7 +127,7 @@ export async function processEligibilityJob(
// Phase 2: AI interpretation (if criteria text exists AND AI eligibility is enabled)
// Process in batches to avoid timeouts
let aiResults: Map<string, { eligible: boolean; confidence: number; reasoning: string }> | undefined
let aiResults: Map<string, { eligible: boolean; confidence: number; qualityScore: number; reasoning: string }> | undefined
if (award.criteriaText && award.useAiEligibility) {
aiResults = new Map()
@@ -90,6 +140,7 @@ export async function processEligibilityJob(
aiResults.set(e.projectId, {
eligible: e.eligible,
confidence: e.confidence,
qualityScore: e.qualityScore,
reasoning: e.reasoning,
})
}
@@ -123,8 +174,9 @@ export async function processEligibilityJob(
projectId: project.id,
eligible,
method,
qualityScore: aiEval?.qualityScore ?? null,
aiReasoningJson: aiEval
? { confidence: aiEval.confidence, reasoning: aiEval.reasoning }
? { confidence: aiEval.confidence, qualityScore: aiEval.qualityScore, reasoning: aiEval.reasoning }
: null,
}
})
@@ -144,19 +196,47 @@ export async function processEligibilityJob(
projectId: e.projectId,
eligible: e.eligible,
method: e.method as 'AUTO' | 'MANUAL',
qualityScore: e.qualityScore,
aiReasoningJson: e.aiReasoningJson ?? undefined,
},
update: {
eligible: e.eligible,
method: e.method as 'AUTO' | 'MANUAL',
qualityScore: e.qualityScore,
aiReasoningJson: e.aiReasoningJson ?? undefined,
overriddenBy: null,
overriddenAt: null,
shortlisted: false,
confirmedAt: null,
confirmedBy: null,
},
})
)
)
// Auto-shortlist top N eligible projects by qualityScore
const shortlistSize = award.shortlistSize ?? 10
const topEligible = eligibilities
.filter((e) => e.eligible && e.qualityScore != null)
.sort((a, b) => (b.qualityScore ?? 0) - (a.qualityScore ?? 0))
.slice(0, shortlistSize)
if (topEligible.length > 0) {
await prisma.$transaction(
topEligible.map((e) =>
prisma.awardEligibility.update({
where: {
awardId_projectId: {
awardId,
projectId: e.projectId,
},
},
data: { shortlisted: true },
})
)
)
}
// Mark as completed
await prisma.specialAward.update({
where: { id: awardId },