Files
MOPC-Portal/src/server/services/ai-award-eligibility.ts

343 lines
9.9 KiB
TypeScript
Raw Normal View History

/**
* AI-Powered Award Eligibility Service
*
* Determines project eligibility for special awards using:
* - Deterministic field matching (tags, country, category)
* - AI interpretation of plain-language criteria
*
* GDPR Compliance:
* - All project data is anonymized before AI processing
* - IDs replaced with sequential identifiers
* - No personal information sent to OpenAI
*/
import { getOpenAI, getConfiguredModel, buildCompletionParams } from '@/lib/openai'
import { logAIUsage, extractTokenUsage } from '@/server/utils/ai-usage'
import { classifyAIError, createParseError, logAIError } from './ai-errors'
import {
anonymizeProjectsForAI,
validateAnonymizedProjects,
Comprehensive platform review: security fixes, query optimization, UI improvements, and code cleanup Security (Critical/High): - Fix path traversal bypass in local storage provider (path.resolve + prefix check) - Fix timing-unsafe HMAC comparison (crypto.timingSafeEqual) - Add auth + ownership checks to email API routes (verify-credentials, change-password) - Remove hardcoded secret key fallback in local storage provider - Add production credential check for MinIO (fail loudly if not set) - Remove DB error details from health check response - Add stricter rate limiting on application submissions (5/hour) - Add rate limiting on email availability check (anti-enumeration) - Change getAIAssignmentJobStatus to adminProcedure - Block dangerous file extensions on upload - Reduce project list max perPage from 5000 to 200 Query Optimization: - Optimize analytics getProjectRankings with select instead of full includes - Fix N+1 in mentor.getSuggestions (batch findMany instead of loop) - Use _count for files instead of fetching full file records in project list - Switch to bulk notifications in assignment and user bulk operations - Batch filtering upserts (25 per transaction instead of all at once) UI/UX: - Replace Inter font with Montserrat in public layout (brand consistency) - Use Logo component in public layout instead of placeholder - Create branded 404 and error pages - Make admin rounds table responsive with mobile card layout - Fix notification bell paths to be role-aware - Replace hardcoded slate colors with semantic tokens in admin sidebar - Force light mode (dark mode untested) - Adjust CardTitle default size - Improve muted-foreground contrast for accessibility (A11Y) - Move profile form state initialization to useEffect Code Quality: - Extract shared toProjectWithRelations to anonymization.ts (removed 3 duplicates) - Remove dead code: getObjectInfo, isValidImageSize, unused batch tag functions, debug logs - Remove unused twilio dependency - Remove redundant email index from schema - Add actual storage object deletion when file records are deleted - Wrap evaluation submit + assignment update in - Add comprehensive platform review document Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 20:31:08 +01:00
toProjectWithRelations,
type AnonymizedProjectForAI,
type ProjectAIMapping,
} from './anonymization'
import type { SubmissionSource } from '@prisma/client'
// ─── Constants ───────────────────────────────────────────────────────────────
const BATCH_SIZE = 20
// Optimized system prompt
const AI_ELIGIBILITY_SYSTEM_PROMPT = `Award eligibility evaluator. Evaluate projects against criteria, return JSON.
Format: {"evaluations": [{project_id, eligible: bool, confidence: 0-1, reasoning: str}]}
Be objective. Base evaluation only on provided data. No personal identifiers in reasoning.`
// ─── Types ──────────────────────────────────────────────────────────────────
export type AutoTagRule = {
field: 'competitionCategory' | 'country' | 'geographicZone' | 'tags' | 'oceanIssue'
operator: 'equals' | 'contains' | 'in'
value: string | string[]
}
export interface EligibilityResult {
projectId: string
eligible: boolean
confidence: number
reasoning: string
method: 'AUTO' | 'AI'
}
interface ProjectForEligibility {
id: string
title: string
description?: string | null
competitionCategory?: string | null
country?: string | null
geographicZone?: string | null
tags: string[]
oceanIssue?: string | null
institution?: string | null
foundedAt?: Date | null
wantsMentorship?: boolean
submissionSource?: SubmissionSource
submittedAt?: Date | null
_count?: {
teamMembers?: number
files?: number
}
files?: Array<{ fileType: string | null }>
}
// ─── Auto Tag Rules ─────────────────────────────────────────────────────────
export function applyAutoTagRules(
rules: AutoTagRule[],
projects: ProjectForEligibility[]
): Map<string, boolean> {
const results = new Map<string, boolean>()
for (const project of projects) {
const matches = rules.every((rule) => {
const fieldValue = getFieldValue(project, rule.field)
switch (rule.operator) {
case 'equals':
return String(fieldValue).toLowerCase() === String(rule.value).toLowerCase()
case 'contains':
if (Array.isArray(fieldValue)) {
return fieldValue.some((v) =>
String(v).toLowerCase().includes(String(rule.value).toLowerCase())
)
}
return String(fieldValue || '').toLowerCase().includes(String(rule.value).toLowerCase())
case 'in':
if (Array.isArray(rule.value)) {
return rule.value.some((v) =>
String(v).toLowerCase() === String(fieldValue).toLowerCase()
)
}
return false
default:
return false
}
})
results.set(project.id, matches)
}
return results
}
function getFieldValue(
project: ProjectForEligibility,
field: AutoTagRule['field']
): unknown {
switch (field) {
case 'competitionCategory':
return project.competitionCategory
case 'country':
return project.country
case 'geographicZone':
return project.geographicZone
case 'tags':
return project.tags
case 'oceanIssue':
return project.oceanIssue
default:
return null
}
}
// ─── AI Criteria Interpretation ─────────────────────────────────────────────
/**
* Process a batch for AI eligibility evaluation
*/
async function processEligibilityBatch(
openai: NonNullable<Awaited<ReturnType<typeof getOpenAI>>>,
model: string,
criteriaText: string,
anonymized: AnonymizedProjectForAI[],
mappings: ProjectAIMapping[],
userId?: string,
entityId?: string
): Promise<{
results: EligibilityResult[]
tokensUsed: number
}> {
const results: EligibilityResult[] = []
let tokensUsed = 0
const userPrompt = `CRITERIA: ${criteriaText}
PROJECTS: ${JSON.stringify(anonymized)}
Evaluate eligibility for each project.`
try {
const params = buildCompletionParams(model, {
messages: [
{ role: 'system', content: AI_ELIGIBILITY_SYSTEM_PROMPT },
{ role: 'user', content: userPrompt },
],
jsonMode: true,
temperature: 0.3,
maxTokens: 4000,
})
const response = await openai.chat.completions.create(params)
const usage = extractTokenUsage(response)
tokensUsed = usage.totalTokens
// Log usage
await logAIUsage({
userId,
action: 'AWARD_ELIGIBILITY',
entityType: 'Award',
entityId,
model,
promptTokens: usage.promptTokens,
completionTokens: usage.completionTokens,
totalTokens: usage.totalTokens,
batchSize: anonymized.length,
itemsProcessed: anonymized.length,
status: 'SUCCESS',
})
const content = response.choices[0]?.message?.content
if (!content) {
throw new Error('Empty response from AI')
}
const parsed = JSON.parse(content) as {
evaluations: Array<{
project_id: string
eligible: boolean
confidence: number
reasoning: string
}>
}
// Map results back to real IDs
for (const eval_ of parsed.evaluations || []) {
const mapping = mappings.find((m) => m.anonymousId === eval_.project_id)
if (mapping) {
results.push({
projectId: mapping.realId,
eligible: eval_.eligible,
confidence: eval_.confidence,
reasoning: eval_.reasoning,
method: 'AI',
})
}
}
} catch (error) {
if (error instanceof SyntaxError) {
const parseError = createParseError(error.message)
logAIError('AwardEligibility', 'batch processing', parseError)
await logAIUsage({
userId,
action: 'AWARD_ELIGIBILITY',
entityType: 'Award',
entityId,
model,
promptTokens: 0,
completionTokens: 0,
totalTokens: tokensUsed,
batchSize: anonymized.length,
itemsProcessed: 0,
status: 'ERROR',
errorMessage: parseError.message,
})
// Flag all for manual review
for (const mapping of mappings) {
results.push({
projectId: mapping.realId,
eligible: false,
confidence: 0,
reasoning: 'AI response parse error — requires manual review',
method: 'AI',
})
}
} else {
throw error
}
}
return { results, tokensUsed }
}
export async function aiInterpretCriteria(
criteriaText: string,
projects: ProjectForEligibility[],
userId?: string,
awardId?: string
): Promise<EligibilityResult[]> {
const results: EligibilityResult[] = []
try {
const openai = await getOpenAI()
if (!openai) {
console.warn('[AI Eligibility] OpenAI not configured')
return projects.map((p) => ({
projectId: p.id,
eligible: false,
confidence: 0,
reasoning: 'AI unavailable — requires manual eligibility review',
method: 'AI' as const,
}))
}
const model = await getConfiguredModel()
console.log(`[AI Eligibility] Using model: ${model} for ${projects.length} projects`)
// Convert and anonymize projects
const projectsWithRelations = projects.map(toProjectWithRelations)
const { anonymized, mappings } = anonymizeProjectsForAI(projectsWithRelations, 'ELIGIBILITY')
// Validate anonymization
if (!validateAnonymizedProjects(anonymized)) {
console.error('[AI Eligibility] Anonymization validation failed')
throw new Error('GDPR compliance check failed: PII detected in anonymized data')
}
let totalTokens = 0
// Process in batches
for (let i = 0; i < anonymized.length; i += BATCH_SIZE) {
const batchAnon = anonymized.slice(i, i + BATCH_SIZE)
const batchMappings = mappings.slice(i, i + BATCH_SIZE)
console.log(`[AI Eligibility] Processing batch ${Math.floor(i / BATCH_SIZE) + 1}/${Math.ceil(anonymized.length / BATCH_SIZE)}`)
const { results: batchResults, tokensUsed } = await processEligibilityBatch(
openai,
model,
criteriaText,
batchAnon,
batchMappings,
userId,
awardId
)
results.push(...batchResults)
totalTokens += tokensUsed
}
console.log(`[AI Eligibility] Completed. Total tokens: ${totalTokens}`)
} catch (error) {
const classified = classifyAIError(error)
logAIError('AwardEligibility', 'aiInterpretCriteria', classified)
// Log failed attempt
await logAIUsage({
userId,
action: 'AWARD_ELIGIBILITY',
entityType: 'Award',
entityId: awardId,
model: 'unknown',
promptTokens: 0,
completionTokens: 0,
totalTokens: 0,
batchSize: projects.length,
itemsProcessed: 0,
status: 'ERROR',
errorMessage: classified.message,
})
// Return all as needing manual review
return projects.map((p) => ({
projectId: p.id,
eligible: false,
confidence: 0,
reasoning: `AI error: ${classified.message}`,
method: 'AI' as const,
}))
}
return results
}