Integrate special award eligibility into AI filtering pass
All checks were successful
Build and Push Docker Image / build (push) Successful in 8m18s

Single AI call now evaluates both screening criteria AND award eligibility.
Awards with useAiEligibility + criteriaText are appended to the system prompt,
AI returns award_matches per project, results auto-populate AwardEligibility
and auto-shortlist top-N. Re-running filtering clears and re-evaluates awards.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-17 20:32:38 +01:00
parent 1fe6667400
commit 619206c03f
3 changed files with 268 additions and 17 deletions

View File

@@ -1915,7 +1915,7 @@ function AwardTracksSection({ competitionId, roundId }: { competitionId: string;
Special Award Tracks
</CardTitle>
<CardDescription>
Evaluate passed projects against special award criteria and manage shortlists
Award eligibility is evaluated automatically during AI filtering. Use Run Eligibility to re-evaluate.
</CardDescription>
</CardHeader>
<CardContent className="space-y-3">

View File

@@ -2,7 +2,8 @@ import { z } from 'zod'
import { TRPCError } from '@trpc/server'
import { Prisma, PrismaClient } from '@prisma/client'
import { router, adminProcedure, protectedProcedure } from '../trpc'
import { executeFilteringRules, type ProgressCallback } from '../services/ai-filtering'
import { executeFilteringRules, type ProgressCallback, type AwardCriteriaInput, type AwardMatchResult } from '../services/ai-filtering'
import { sanitizeUserInput } from '../services/ai-prompt-guard'
import { logAudit } from '../utils/audit'
import { isOpenAIConfigured, testOpenAIConnection } from '@/lib/openai'
import { prisma } from '@/lib/prisma'
@@ -58,11 +59,38 @@ export async function runFilteringJob(jobId: string, roundId: string, userId: st
// Get current round with config
const currentRound = await prisma.round.findUniqueOrThrow({
where: { id: roundId },
select: { id: true, name: true, configJson: true },
select: { id: true, name: true, configJson: true, competitionId: true },
})
const roundConfig = (currentRound.configJson as Record<string, unknown>) || {}
const aiParseFiles = !!roundConfig.aiParseFiles
// Load special awards for integrated AI evaluation
let awardsForAI: AwardCriteriaInput[] = []
if (currentRound.competitionId) {
const rawAwards = await prisma.specialAward.findMany({
where: {
competitionId: currentRound.competitionId,
useAiEligibility: true,
criteriaText: { not: null },
},
select: { id: true, name: true, criteriaText: true },
orderBy: { sortOrder: 'asc' },
})
for (const a of rawAwards) {
if (a.criteriaText && a.criteriaText.trim().length > 0) {
const { sanitized } = sanitizeUserInput(a.criteriaText)
awardsForAI.push({
awardId: a.id,
awardName: a.name,
criteriaText: sanitized,
})
}
}
if (awardsForAI.length > 0) {
console.log(`[Filtering] Including ${awardsForAI.length} special award(s) in AI evaluation`)
}
}
// Get projects in this round via ProjectRoundState
const projectStates = await prisma.projectRoundState.findMany({
where: {
@@ -189,7 +217,107 @@ export async function runFilteringJob(jobId: string, roundId: string, userId: st
})
)
)
// Upsert AwardEligibility for PASSED projects with award matches
if (awardsForAI.length > 0) {
const awardUpserts: Prisma.PrismaPromise<unknown>[] = []
for (const r of batchResults) {
if (r.outcome !== 'PASSED' || !r.awardMatches || r.awardMatches.length === 0) continue
for (const am of r.awardMatches) {
awardUpserts.push(
prisma.awardEligibility.upsert({
where: {
awardId_projectId: {
awardId: am.awardId,
projectId: r.projectId,
},
},
create: {
awardId: am.awardId,
projectId: r.projectId,
eligible: am.eligible,
method: 'AUTO',
qualityScore: am.qualityScore,
aiReasoningJson: { reasoning: am.reasoning, confidence: am.confidence },
},
update: {
eligible: am.eligible,
method: 'AUTO',
qualityScore: am.qualityScore,
aiReasoningJson: { reasoning: am.reasoning, confidence: am.confidence },
overriddenBy: null,
overriddenAt: null,
shortlisted: false,
confirmedAt: null,
confirmedBy: null,
},
})
)
}
}
if (awardUpserts.length > 0) {
await prisma.$transaction(awardUpserts)
}
}
}, awardsForAI)
// Auto-shortlist top-N per award and mark eligibility job as completed
if (awardsForAI.length > 0) {
// Collect all award matches from PASSED results
const awardMatchesByAward = new Map<string, Array<{ projectId: string; qualityScore: number }>>()
for (const r of results) {
if (r.outcome !== 'PASSED' || !r.awardMatches) continue
for (const am of r.awardMatches) {
if (!am.eligible) continue
const arr = awardMatchesByAward.get(am.awardId) || []
arr.push({ projectId: r.projectId, qualityScore: am.qualityScore })
awardMatchesByAward.set(am.awardId, arr)
}
}
// Load shortlistSize per award
const awardIds = awardsForAI.map((a) => a.awardId)
const awardsWithSize = await prisma.specialAward.findMany({
where: { id: { in: awardIds } },
select: { id: true, shortlistSize: true },
})
for (const award of awardsWithSize) {
const eligible = awardMatchesByAward.get(award.id) || []
const shortlistSize = award.shortlistSize ?? 10
const topN = eligible
.sort((a, b) => b.qualityScore - a.qualityScore)
.slice(0, shortlistSize)
if (topN.length > 0) {
await prisma.$transaction(
topN.map((e) =>
prisma.awardEligibility.update({
where: {
awardId_projectId: {
awardId: award.id,
projectId: e.projectId,
},
},
data: { shortlisted: true },
})
)
)
}
// Mark award eligibility job as completed
await prisma.specialAward.update({
where: { id: award.id },
data: {
eligibilityJobStatus: 'COMPLETED',
eligibilityJobDone: results.length,
eligibilityJobTotal: results.length,
},
})
}
console.log(`[Filtering] Auto-shortlisted for ${awardsWithSize.length} award(s)`)
}
// Count outcomes
const passedCount = results.filter((r) => r.outcome === 'PASSED').length
@@ -498,6 +626,37 @@ export const filteringRouter = router({
where: { roundId: input.roundId },
})
// Clear award eligibilities for awards linked to this competition
const roundForComp = await ctx.prisma.round.findUniqueOrThrow({
where: { id: input.roundId },
select: { competitionId: true },
})
if (roundForComp.competitionId) {
const linkedAwards = await ctx.prisma.specialAward.findMany({
where: {
competitionId: roundForComp.competitionId,
useAiEligibility: true,
},
select: { id: true },
})
const awardIds = linkedAwards.map((a) => a.id)
if (awardIds.length > 0) {
await ctx.prisma.awardEligibility.deleteMany({
where: { awardId: { in: awardIds } },
})
await ctx.prisma.specialAward.updateMany({
where: { id: { in: awardIds } },
data: {
eligibilityJobStatus: null,
eligibilityJobTotal: null,
eligibilityJobDone: null,
eligibilityJobError: null,
eligibilityJobStarted: null,
},
})
}
}
const job = await ctx.prisma.filteringJob.create({
data: {
roundId: input.roundId,

View File

@@ -98,6 +98,21 @@ export interface ProjectFilteringResult {
outcome: 'PASSED' | 'FILTERED_OUT' | 'FLAGGED'
ruleResults: RuleResult[]
aiScreeningJson?: Record<string, unknown>
awardMatches?: AwardMatchResult[]
}
export type AwardCriteriaInput = {
awardId: string
awardName: string
criteriaText: string
}
export type AwardMatchResult = {
awardId: string
eligible: boolean
confidence: number
qualityScore: number
reasoning: string
}
interface ProjectForFiltering {
@@ -381,6 +396,7 @@ interface AIScreeningResult {
reasoning: string
qualityScore: number
spamRisk: boolean
awardMatches?: AwardMatchResult[]
}
/**
@@ -393,7 +409,8 @@ async function processAIBatch(
anonymized: AnonymizedProjectForAI[],
mappings: ProjectAIMapping[],
userId?: string,
entityId?: string
entityId?: string,
awards?: AwardCriteriaInput[]
): Promise<{
results: Map<string, AIScreeningResult>
tokensUsed: number
@@ -404,6 +421,34 @@ async function processAIBatch(
// Sanitize user-supplied criteria
const { sanitized: safeCriteria } = sanitizeUserInput(criteriaText)
// Build system prompt — dynamically append awards block if any
let systemPrompt = AI_SCREENING_SYSTEM_PROMPT
if (awards && awards.length > 0) {
const awardsList = awards
.map((a, i) => `${i + 1}. ${a.awardName}: ${a.criteriaText}`)
.join('\n')
systemPrompt += `
## Special Award Evaluation (Additional Task)
In addition to the main screening, evaluate each project against these special awards.
Award eligibility is independent of the main screening outcome — a project can fail screening but still match an award.
### Awards
${awardsList}
Add an "award_matches" array to each project result:
"award_matches": [
{
"award_index": 1,
"eligible": true/false,
"confidence": 0.0-1.0,
"quality_score": 0-100,
"reasoning": "1-2 sentence explanation"
}
]
Include one entry per award for every project. quality_score (0-100) measures fit for ranking.`
}
// Build user prompt with clear structure
const userPrompt = `## Screening Criteria
The admin has defined the following requirements. Evaluate each project against ALL of these criteria:
@@ -418,16 +463,17 @@ Evaluate each project and return JSON with your assessment.`
const MAX_PARSE_RETRIES = 2
let parseAttempts = 0
let response: Awaited<ReturnType<typeof openai.chat.completions.create>>
const maxTokens = Math.min(8000, 4000 + (awards?.length ?? 0) * 500)
try {
const params = buildCompletionParams(model, {
messages: [
{ role: 'system', content: AI_SCREENING_SYSTEM_PROMPT },
{ role: 'system', content: systemPrompt },
{ role: 'user', content: userPrompt },
],
jsonMode: true,
temperature: 0.1,
maxTokens: 4000,
maxTokens,
})
response = await openai.chat.completions.create(params)
@@ -457,6 +503,13 @@ Evaluate each project and return JSON with your assessment.`
reasoning: string
quality_score: number
spam_risk: boolean
award_matches?: Array<{
award_index: number
eligible: boolean
confidence: number
quality_score: number
reasoning: string
}>
}>
}
@@ -476,12 +529,12 @@ Evaluate each project and return JSON with your assessment.`
// Retry the API call with hint
const retryParams = buildCompletionParams(model, {
messages: [
{ role: 'system', content: AI_SCREENING_SYSTEM_PROMPT },
{ role: 'system', content: systemPrompt },
{ role: 'user', content: userPrompt + '\n\nIMPORTANT: Please ensure valid JSON output.' },
],
jsonMode: true,
temperature: 0.1,
maxTokens: 4000,
maxTokens,
})
response = await openai.chat.completions.create(retryParams)
const retryUsage = extractTokenUsage(response)
@@ -496,12 +549,31 @@ Evaluate each project and return JSON with your assessment.`
for (const result of parsed.projects || []) {
const mapping = mappings.find((m) => m.anonymousId === result.project_id)
if (mapping) {
// Map award matches back to real award IDs
let awardMatches: AwardMatchResult[] | undefined
if (awards && awards.length > 0 && result.award_matches) {
awardMatches = []
for (const match of result.award_matches) {
const award = awards[(match.award_index ?? 1) - 1]
if (award) {
awardMatches.push({
awardId: award.awardId,
eligible: match.eligible,
confidence: match.confidence,
qualityScore: Math.max(0, Math.min(100, match.quality_score ?? 0)),
reasoning: match.reasoning ?? '',
})
}
}
}
results.set(mapping.realId, {
meetsCriteria: result.meets_criteria,
confidence: result.confidence,
reasoning: result.reasoning,
qualityScore: result.quality_score,
spamRisk: result.spam_risk,
awardMatches: awardMatches && awardMatches.length > 0 ? awardMatches : undefined,
})
}
}
@@ -550,7 +622,8 @@ export async function executeAIScreening(
userId?: string,
entityId?: string,
onProgress?: ProgressCallback,
onBatchComplete?: (batchResults: Map<string, AIScreeningResult>) => Promise<void>
onBatchComplete?: (batchResults: Map<string, AIScreeningResult>) => Promise<void>,
awards?: AwardCriteriaInput[]
): Promise<Map<string, AIScreeningResult>> {
const results = new Map<string, AIScreeningResult>()
@@ -623,7 +696,8 @@ export async function executeAIScreening(
batch.anon,
batch.maps,
userId,
entityId
entityId,
awards
)
return { batchResults, tokensUsed, index: batch.index }
})
@@ -705,7 +779,8 @@ export async function executeFilteringRules(
userId?: string,
roundId?: string,
onProgress?: ProgressCallback,
onResultsBatch?: (results: ProjectFilteringResult[]) => Promise<void>
onResultsBatch?: (results: ProjectFilteringResult[]) => Promise<void>,
awards?: AwardCriteriaInput[]
): Promise<ProjectFilteringResult[]> {
const activeRules = rules
.filter((r) => r.isActive)
@@ -750,7 +825,8 @@ export async function executeFilteringRules(
function computeProjectResult(
projectId: string,
aiRuleResults: Array<{ ruleId: string; ruleName: string; passed: boolean; action: string; reasoning?: string }>,
aiScreeningData: Record<string, unknown>
aiScreeningData: Record<string, unknown>,
awardMatches?: AwardMatchResult[]
): ProjectFilteringResult {
const nonAi = nonAiEval.get(projectId)!
const ruleResults: RuleResult[] = [...nonAi.ruleResults]
@@ -777,6 +853,7 @@ export async function executeFilteringRules(
outcome: hasFailed ? 'FILTERED_OUT' : hasFlagged ? 'FLAGGED' : 'PASSED',
ruleResults,
aiScreeningJson: Object.keys(aiScreeningData).length > 0 ? aiScreeningData : undefined,
awardMatches: awardMatches && awardMatches.length > 0 ? awardMatches : undefined,
}
}
@@ -820,13 +897,14 @@ export async function executeFilteringRules(
computeProjectResult(
projectId,
[{ ruleId: aiRule.id, ruleName: aiRule.name, passed, action: aiAction, reasoning: aiResult.reasoning }],
{ [aiRule.id]: aiResult }
{ [aiRule.id]: aiResult },
aiResult.awardMatches
)
)
}
allResults.push(...batchResults)
if (onResultsBatch) await onResultsBatch(batchResults)
})
}, awards)
return allResults
}
@@ -835,7 +913,7 @@ export async function executeFilteringRules(
const aiResults = new Map<string, Map<string, AIScreeningResult>>()
for (const aiRule of aiRules) {
const config = aiRule.configJson as unknown as AIScreeningConfig
const screeningResults = await executeAIScreening(config, projects, userId, roundId, onProgress)
const screeningResults = await executeAIScreening(config, projects, userId, roundId, onProgress, undefined, awards)
aiResults.set(aiRule.id, screeningResults)
}
@@ -870,7 +948,21 @@ export async function executeFilteringRules(
aiScreeningData[aiRule.id] = screening
}
}
results.push(computeProjectResult(project.id, aiRuleResults, aiScreeningData))
// Merge award matches from all AI rules (dedup by awardId, first wins)
const mergedAwardMatches: AwardMatchResult[] = []
const seenAwardIds = new Set<string>()
for (const aiRule of aiRules) {
const screening = aiResults.get(aiRule.id)?.get(project.id)
if (screening?.awardMatches) {
for (const m of screening.awardMatches) {
if (!seenAwardIds.has(m.awardId)) {
seenAwardIds.add(m.awardId)
mergedAwardMatches.push(m)
}
}
}
}
results.push(computeProjectResult(project.id, aiRuleResults, aiScreeningData, mergedAwardMatches))
}
if (onResultsBatch) await onResultsBatch(results)