diff --git a/src/components/admin/round/filtering-dashboard.tsx b/src/components/admin/round/filtering-dashboard.tsx
index 7f9cfa2..4828a13 100644
--- a/src/components/admin/round/filtering-dashboard.tsx
+++ b/src/components/admin/round/filtering-dashboard.tsx
@@ -1915,7 +1915,7 @@ function AwardTracksSection({ competitionId, roundId }: { competitionId: string;
Special Award Tracks
- Evaluate passed projects against special award criteria and manage shortlists
+ Award eligibility is evaluated automatically during AI filtering. Use Run Eligibility to re-evaluate.
diff --git a/src/server/routers/filtering.ts b/src/server/routers/filtering.ts
index 841f1a4..43bd179 100644
--- a/src/server/routers/filtering.ts
+++ b/src/server/routers/filtering.ts
@@ -2,7 +2,8 @@ import { z } from 'zod'
import { TRPCError } from '@trpc/server'
import { Prisma, PrismaClient } from '@prisma/client'
import { router, adminProcedure, protectedProcedure } from '../trpc'
-import { executeFilteringRules, type ProgressCallback } from '../services/ai-filtering'
+import { executeFilteringRules, type ProgressCallback, type AwardCriteriaInput, type AwardMatchResult } from '../services/ai-filtering'
+import { sanitizeUserInput } from '../services/ai-prompt-guard'
import { logAudit } from '../utils/audit'
import { isOpenAIConfigured, testOpenAIConnection } from '@/lib/openai'
import { prisma } from '@/lib/prisma'
@@ -58,11 +59,38 @@ export async function runFilteringJob(jobId: string, roundId: string, userId: st
// Get current round with config
const currentRound = await prisma.round.findUniqueOrThrow({
where: { id: roundId },
- select: { id: true, name: true, configJson: true },
+ select: { id: true, name: true, configJson: true, competitionId: true },
})
const roundConfig = (currentRound.configJson as Record) || {}
const aiParseFiles = !!roundConfig.aiParseFiles
+ // Load special awards for integrated AI evaluation
+ let awardsForAI: AwardCriteriaInput[] = []
+ if (currentRound.competitionId) {
+ const rawAwards = await prisma.specialAward.findMany({
+ where: {
+ competitionId: currentRound.competitionId,
+ useAiEligibility: true,
+ criteriaText: { not: null },
+ },
+ select: { id: true, name: true, criteriaText: true },
+ orderBy: { sortOrder: 'asc' },
+ })
+ for (const a of rawAwards) {
+ if (a.criteriaText && a.criteriaText.trim().length > 0) {
+ const { sanitized } = sanitizeUserInput(a.criteriaText)
+ awardsForAI.push({
+ awardId: a.id,
+ awardName: a.name,
+ criteriaText: sanitized,
+ })
+ }
+ }
+ if (awardsForAI.length > 0) {
+ console.log(`[Filtering] Including ${awardsForAI.length} special award(s) in AI evaluation`)
+ }
+ }
+
// Get projects in this round via ProjectRoundState
const projectStates = await prisma.projectRoundState.findMany({
where: {
@@ -189,7 +217,107 @@ export async function runFilteringJob(jobId: string, roundId: string, userId: st
})
)
)
- })
+
+ // Upsert AwardEligibility for PASSED projects with award matches
+ if (awardsForAI.length > 0) {
+ const awardUpserts: Prisma.PrismaPromise[] = []
+ for (const r of batchResults) {
+ if (r.outcome !== 'PASSED' || !r.awardMatches || r.awardMatches.length === 0) continue
+ for (const am of r.awardMatches) {
+ awardUpserts.push(
+ prisma.awardEligibility.upsert({
+ where: {
+ awardId_projectId: {
+ awardId: am.awardId,
+ projectId: r.projectId,
+ },
+ },
+ create: {
+ awardId: am.awardId,
+ projectId: r.projectId,
+ eligible: am.eligible,
+ method: 'AUTO',
+ qualityScore: am.qualityScore,
+ aiReasoningJson: { reasoning: am.reasoning, confidence: am.confidence },
+ },
+ update: {
+ eligible: am.eligible,
+ method: 'AUTO',
+ qualityScore: am.qualityScore,
+ aiReasoningJson: { reasoning: am.reasoning, confidence: am.confidence },
+ overriddenBy: null,
+ overriddenAt: null,
+ shortlisted: false,
+ confirmedAt: null,
+ confirmedBy: null,
+ },
+ })
+ )
+ }
+ }
+ if (awardUpserts.length > 0) {
+ await prisma.$transaction(awardUpserts)
+ }
+ }
+ }, awardsForAI)
+
+ // Auto-shortlist top-N per award and mark eligibility job as completed
+ if (awardsForAI.length > 0) {
+ // Collect all award matches from PASSED results
+ const awardMatchesByAward = new Map>()
+ for (const r of results) {
+ if (r.outcome !== 'PASSED' || !r.awardMatches) continue
+ for (const am of r.awardMatches) {
+ if (!am.eligible) continue
+ const arr = awardMatchesByAward.get(am.awardId) || []
+ arr.push({ projectId: r.projectId, qualityScore: am.qualityScore })
+ awardMatchesByAward.set(am.awardId, arr)
+ }
+ }
+
+ // Load shortlistSize per award
+ const awardIds = awardsForAI.map((a) => a.awardId)
+ const awardsWithSize = await prisma.specialAward.findMany({
+ where: { id: { in: awardIds } },
+ select: { id: true, shortlistSize: true },
+ })
+
+ for (const award of awardsWithSize) {
+ const eligible = awardMatchesByAward.get(award.id) || []
+ const shortlistSize = award.shortlistSize ?? 10
+ const topN = eligible
+ .sort((a, b) => b.qualityScore - a.qualityScore)
+ .slice(0, shortlistSize)
+
+ if (topN.length > 0) {
+ await prisma.$transaction(
+ topN.map((e) =>
+ prisma.awardEligibility.update({
+ where: {
+ awardId_projectId: {
+ awardId: award.id,
+ projectId: e.projectId,
+ },
+ },
+ data: { shortlisted: true },
+ })
+ )
+ )
+ }
+
+ // Mark award eligibility job as completed
+ await prisma.specialAward.update({
+ where: { id: award.id },
+ data: {
+ eligibilityJobStatus: 'COMPLETED',
+ eligibilityJobDone: results.length,
+ eligibilityJobTotal: results.length,
+ },
+ })
+ }
+
+ console.log(`[Filtering] Auto-shortlisted for ${awardsWithSize.length} award(s)`)
+ }
// Count outcomes
const passedCount = results.filter((r) => r.outcome === 'PASSED').length
@@ -498,6 +626,37 @@ export const filteringRouter = router({
where: { roundId: input.roundId },
})
+ // Clear award eligibilities for awards linked to this competition
+ const roundForComp = await ctx.prisma.round.findUniqueOrThrow({
+ where: { id: input.roundId },
+ select: { competitionId: true },
+ })
+ if (roundForComp.competitionId) {
+ const linkedAwards = await ctx.prisma.specialAward.findMany({
+ where: {
+ competitionId: roundForComp.competitionId,
+ useAiEligibility: true,
+ },
+ select: { id: true },
+ })
+ const awardIds = linkedAwards.map((a) => a.id)
+ if (awardIds.length > 0) {
+ await ctx.prisma.awardEligibility.deleteMany({
+ where: { awardId: { in: awardIds } },
+ })
+ await ctx.prisma.specialAward.updateMany({
+ where: { id: { in: awardIds } },
+ data: {
+ eligibilityJobStatus: null,
+ eligibilityJobTotal: null,
+ eligibilityJobDone: null,
+ eligibilityJobError: null,
+ eligibilityJobStarted: null,
+ },
+ })
+ }
+ }
+
const job = await ctx.prisma.filteringJob.create({
data: {
roundId: input.roundId,
diff --git a/src/server/services/ai-filtering.ts b/src/server/services/ai-filtering.ts
index 4a2ffb3..7ab667f 100644
--- a/src/server/services/ai-filtering.ts
+++ b/src/server/services/ai-filtering.ts
@@ -98,6 +98,21 @@ export interface ProjectFilteringResult {
outcome: 'PASSED' | 'FILTERED_OUT' | 'FLAGGED'
ruleResults: RuleResult[]
aiScreeningJson?: Record
+ awardMatches?: AwardMatchResult[]
+}
+
+export type AwardCriteriaInput = {
+ awardId: string
+ awardName: string
+ criteriaText: string
+}
+
+export type AwardMatchResult = {
+ awardId: string
+ eligible: boolean
+ confidence: number
+ qualityScore: number
+ reasoning: string
}
interface ProjectForFiltering {
@@ -381,6 +396,7 @@ interface AIScreeningResult {
reasoning: string
qualityScore: number
spamRisk: boolean
+ awardMatches?: AwardMatchResult[]
}
/**
@@ -393,7 +409,8 @@ async function processAIBatch(
anonymized: AnonymizedProjectForAI[],
mappings: ProjectAIMapping[],
userId?: string,
- entityId?: string
+ entityId?: string,
+ awards?: AwardCriteriaInput[]
): Promise<{
results: Map
tokensUsed: number
@@ -404,6 +421,34 @@ async function processAIBatch(
// Sanitize user-supplied criteria
const { sanitized: safeCriteria } = sanitizeUserInput(criteriaText)
+ // Build system prompt — dynamically append awards block if any
+ let systemPrompt = AI_SCREENING_SYSTEM_PROMPT
+ if (awards && awards.length > 0) {
+ const awardsList = awards
+ .map((a, i) => `${i + 1}. ${a.awardName}: ${a.criteriaText}`)
+ .join('\n')
+ systemPrompt += `
+
+## Special Award Evaluation (Additional Task)
+In addition to the main screening, evaluate each project against these special awards.
+Award eligibility is independent of the main screening outcome — a project can fail screening but still match an award.
+
+### Awards
+${awardsList}
+
+Add an "award_matches" array to each project result:
+"award_matches": [
+ {
+ "award_index": 1,
+ "eligible": true/false,
+ "confidence": 0.0-1.0,
+ "quality_score": 0-100,
+ "reasoning": "1-2 sentence explanation"
+ }
+]
+Include one entry per award for every project. quality_score (0-100) measures fit for ranking.`
+ }
+
// Build user prompt with clear structure
const userPrompt = `## Screening Criteria
The admin has defined the following requirements. Evaluate each project against ALL of these criteria:
@@ -418,16 +463,17 @@ Evaluate each project and return JSON with your assessment.`
const MAX_PARSE_RETRIES = 2
let parseAttempts = 0
let response: Awaited>
+ const maxTokens = Math.min(8000, 4000 + (awards?.length ?? 0) * 500)
try {
const params = buildCompletionParams(model, {
messages: [
- { role: 'system', content: AI_SCREENING_SYSTEM_PROMPT },
+ { role: 'system', content: systemPrompt },
{ role: 'user', content: userPrompt },
],
jsonMode: true,
temperature: 0.1,
- maxTokens: 4000,
+ maxTokens,
})
response = await openai.chat.completions.create(params)
@@ -457,6 +503,13 @@ Evaluate each project and return JSON with your assessment.`
reasoning: string
quality_score: number
spam_risk: boolean
+ award_matches?: Array<{
+ award_index: number
+ eligible: boolean
+ confidence: number
+ quality_score: number
+ reasoning: string
+ }>
}>
}
@@ -476,12 +529,12 @@ Evaluate each project and return JSON with your assessment.`
// Retry the API call with hint
const retryParams = buildCompletionParams(model, {
messages: [
- { role: 'system', content: AI_SCREENING_SYSTEM_PROMPT },
+ { role: 'system', content: systemPrompt },
{ role: 'user', content: userPrompt + '\n\nIMPORTANT: Please ensure valid JSON output.' },
],
jsonMode: true,
temperature: 0.1,
- maxTokens: 4000,
+ maxTokens,
})
response = await openai.chat.completions.create(retryParams)
const retryUsage = extractTokenUsage(response)
@@ -496,12 +549,31 @@ Evaluate each project and return JSON with your assessment.`
for (const result of parsed.projects || []) {
const mapping = mappings.find((m) => m.anonymousId === result.project_id)
if (mapping) {
+ // Map award matches back to real award IDs
+ let awardMatches: AwardMatchResult[] | undefined
+ if (awards && awards.length > 0 && result.award_matches) {
+ awardMatches = []
+ for (const match of result.award_matches) {
+ const award = awards[(match.award_index ?? 1) - 1]
+ if (award) {
+ awardMatches.push({
+ awardId: award.awardId,
+ eligible: match.eligible,
+ confidence: match.confidence,
+ qualityScore: Math.max(0, Math.min(100, match.quality_score ?? 0)),
+ reasoning: match.reasoning ?? '',
+ })
+ }
+ }
+ }
+
results.set(mapping.realId, {
meetsCriteria: result.meets_criteria,
confidence: result.confidence,
reasoning: result.reasoning,
qualityScore: result.quality_score,
spamRisk: result.spam_risk,
+ awardMatches: awardMatches && awardMatches.length > 0 ? awardMatches : undefined,
})
}
}
@@ -550,7 +622,8 @@ export async function executeAIScreening(
userId?: string,
entityId?: string,
onProgress?: ProgressCallback,
- onBatchComplete?: (batchResults: Map) => Promise
+ onBatchComplete?: (batchResults: Map) => Promise,
+ awards?: AwardCriteriaInput[]
): Promise