Apply full refactor updates plus pipeline/email UX confirmations
All checks were successful
Build and Push Docker Image / build (push) Successful in 10m33s

This commit is contained in:
Matt
2026-02-14 15:26:42 +01:00
parent e56e143a40
commit b5425e705e
374 changed files with 116737 additions and 111969 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -1,342 +1,342 @@
/**
* AI-Powered Award Eligibility Service
*
* Determines project eligibility for special awards using:
* - Deterministic field matching (tags, country, category)
* - AI interpretation of plain-language criteria
*
* GDPR Compliance:
* - All project data is anonymized before AI processing
* - IDs replaced with sequential identifiers
* - No personal information sent to OpenAI
*/
import { getOpenAI, getConfiguredModel, buildCompletionParams } from '@/lib/openai'
import { logAIUsage, extractTokenUsage } from '@/server/utils/ai-usage'
import { classifyAIError, createParseError, logAIError } from './ai-errors'
import {
anonymizeProjectsForAI,
validateAnonymizedProjects,
toProjectWithRelations,
type AnonymizedProjectForAI,
type ProjectAIMapping,
} from './anonymization'
import type { SubmissionSource } from '@prisma/client'
// ─── Constants ───────────────────────────────────────────────────────────────
const BATCH_SIZE = 20
// Optimized system prompt
const AI_ELIGIBILITY_SYSTEM_PROMPT = `Award eligibility evaluator. Evaluate projects against criteria, return JSON.
Format: {"evaluations": [{project_id, eligible: bool, confidence: 0-1, reasoning: str}]}
Be objective. Base evaluation only on provided data. No personal identifiers in reasoning.`
// ─── Types ──────────────────────────────────────────────────────────────────
export type AutoTagRule = {
field: 'competitionCategory' | 'country' | 'geographicZone' | 'tags' | 'oceanIssue'
operator: 'equals' | 'contains' | 'in'
value: string | string[]
}
export interface EligibilityResult {
projectId: string
eligible: boolean
confidence: number
reasoning: string
method: 'AUTO' | 'AI'
}
interface ProjectForEligibility {
id: string
title: string
description?: string | null
competitionCategory?: string | null
country?: string | null
geographicZone?: string | null
tags: string[]
oceanIssue?: string | null
institution?: string | null
foundedAt?: Date | null
wantsMentorship?: boolean
submissionSource?: SubmissionSource
submittedAt?: Date | null
_count?: {
teamMembers?: number
files?: number
}
files?: Array<{ fileType: string | null }>
}
// ─── Auto Tag Rules ─────────────────────────────────────────────────────────
export function applyAutoTagRules(
rules: AutoTagRule[],
projects: ProjectForEligibility[]
): Map<string, boolean> {
const results = new Map<string, boolean>()
for (const project of projects) {
const matches = rules.every((rule) => {
const fieldValue = getFieldValue(project, rule.field)
switch (rule.operator) {
case 'equals':
return String(fieldValue).toLowerCase() === String(rule.value).toLowerCase()
case 'contains':
if (Array.isArray(fieldValue)) {
return fieldValue.some((v) =>
String(v).toLowerCase().includes(String(rule.value).toLowerCase())
)
}
return String(fieldValue || '').toLowerCase().includes(String(rule.value).toLowerCase())
case 'in':
if (Array.isArray(rule.value)) {
return rule.value.some((v) =>
String(v).toLowerCase() === String(fieldValue).toLowerCase()
)
}
return false
default:
return false
}
})
results.set(project.id, matches)
}
return results
}
function getFieldValue(
project: ProjectForEligibility,
field: AutoTagRule['field']
): unknown {
switch (field) {
case 'competitionCategory':
return project.competitionCategory
case 'country':
return project.country
case 'geographicZone':
return project.geographicZone
case 'tags':
return project.tags
case 'oceanIssue':
return project.oceanIssue
default:
return null
}
}
// ─── AI Criteria Interpretation ─────────────────────────────────────────────
/**
* Process a batch for AI eligibility evaluation
*/
async function processEligibilityBatch(
openai: NonNullable<Awaited<ReturnType<typeof getOpenAI>>>,
model: string,
criteriaText: string,
anonymized: AnonymizedProjectForAI[],
mappings: ProjectAIMapping[],
userId?: string,
entityId?: string
): Promise<{
results: EligibilityResult[]
tokensUsed: number
}> {
const results: EligibilityResult[] = []
let tokensUsed = 0
const userPrompt = `CRITERIA: ${criteriaText}
PROJECTS: ${JSON.stringify(anonymized)}
Evaluate eligibility for each project.`
try {
const params = buildCompletionParams(model, {
messages: [
{ role: 'system', content: AI_ELIGIBILITY_SYSTEM_PROMPT },
{ role: 'user', content: userPrompt },
],
jsonMode: true,
temperature: 0.3,
maxTokens: 4000,
})
const response = await openai.chat.completions.create(params)
const usage = extractTokenUsage(response)
tokensUsed = usage.totalTokens
// Log usage
await logAIUsage({
userId,
action: 'AWARD_ELIGIBILITY',
entityType: 'Award',
entityId,
model,
promptTokens: usage.promptTokens,
completionTokens: usage.completionTokens,
totalTokens: usage.totalTokens,
batchSize: anonymized.length,
itemsProcessed: anonymized.length,
status: 'SUCCESS',
})
const content = response.choices[0]?.message?.content
if (!content) {
throw new Error('Empty response from AI')
}
const parsed = JSON.parse(content) as {
evaluations: Array<{
project_id: string
eligible: boolean
confidence: number
reasoning: string
}>
}
// Map results back to real IDs
for (const eval_ of parsed.evaluations || []) {
const mapping = mappings.find((m) => m.anonymousId === eval_.project_id)
if (mapping) {
results.push({
projectId: mapping.realId,
eligible: eval_.eligible,
confidence: eval_.confidence,
reasoning: eval_.reasoning,
method: 'AI',
})
}
}
} catch (error) {
if (error instanceof SyntaxError) {
const parseError = createParseError(error.message)
logAIError('AwardEligibility', 'batch processing', parseError)
await logAIUsage({
userId,
action: 'AWARD_ELIGIBILITY',
entityType: 'Award',
entityId,
model,
promptTokens: 0,
completionTokens: 0,
totalTokens: tokensUsed,
batchSize: anonymized.length,
itemsProcessed: 0,
status: 'ERROR',
errorMessage: parseError.message,
})
// Flag all for manual review
for (const mapping of mappings) {
results.push({
projectId: mapping.realId,
eligible: false,
confidence: 0,
reasoning: 'AI response parse error — requires manual review',
method: 'AI',
})
}
} else {
throw error
}
}
return { results, tokensUsed }
}
export async function aiInterpretCriteria(
criteriaText: string,
projects: ProjectForEligibility[],
userId?: string,
awardId?: string
): Promise<EligibilityResult[]> {
const results: EligibilityResult[] = []
try {
const openai = await getOpenAI()
if (!openai) {
console.warn('[AI Eligibility] OpenAI not configured')
return projects.map((p) => ({
projectId: p.id,
eligible: false,
confidence: 0,
reasoning: 'AI unavailable — requires manual eligibility review',
method: 'AI' as const,
}))
}
const model = await getConfiguredModel()
console.log(`[AI Eligibility] Using model: ${model} for ${projects.length} projects`)
// Convert and anonymize projects
const projectsWithRelations = projects.map(toProjectWithRelations)
const { anonymized, mappings } = anonymizeProjectsForAI(projectsWithRelations, 'ELIGIBILITY')
// Validate anonymization
if (!validateAnonymizedProjects(anonymized)) {
console.error('[AI Eligibility] Anonymization validation failed')
throw new Error('GDPR compliance check failed: PII detected in anonymized data')
}
let totalTokens = 0
// Process in batches
for (let i = 0; i < anonymized.length; i += BATCH_SIZE) {
const batchAnon = anonymized.slice(i, i + BATCH_SIZE)
const batchMappings = mappings.slice(i, i + BATCH_SIZE)
console.log(`[AI Eligibility] Processing batch ${Math.floor(i / BATCH_SIZE) + 1}/${Math.ceil(anonymized.length / BATCH_SIZE)}`)
const { results: batchResults, tokensUsed } = await processEligibilityBatch(
openai,
model,
criteriaText,
batchAnon,
batchMappings,
userId,
awardId
)
results.push(...batchResults)
totalTokens += tokensUsed
}
console.log(`[AI Eligibility] Completed. Total tokens: ${totalTokens}`)
} catch (error) {
const classified = classifyAIError(error)
logAIError('AwardEligibility', 'aiInterpretCriteria', classified)
// Log failed attempt
await logAIUsage({
userId,
action: 'AWARD_ELIGIBILITY',
entityType: 'Award',
entityId: awardId,
model: 'unknown',
promptTokens: 0,
completionTokens: 0,
totalTokens: 0,
batchSize: projects.length,
itemsProcessed: 0,
status: 'ERROR',
errorMessage: classified.message,
})
// Return all as needing manual review
return projects.map((p) => ({
projectId: p.id,
eligible: false,
confidence: 0,
reasoning: `AI error: ${classified.message}`,
method: 'AI' as const,
}))
}
return results
}
/**
* AI-Powered Award Eligibility Service
*
* Determines project eligibility for special awards using:
* - Deterministic field matching (tags, country, category)
* - AI interpretation of plain-language criteria
*
* GDPR Compliance:
* - All project data is anonymized before AI processing
* - IDs replaced with sequential identifiers
* - No personal information sent to OpenAI
*/
import { getOpenAI, getConfiguredModel, buildCompletionParams } from '@/lib/openai'
import { logAIUsage, extractTokenUsage } from '@/server/utils/ai-usage'
import { classifyAIError, createParseError, logAIError } from './ai-errors'
import {
anonymizeProjectsForAI,
validateAnonymizedProjects,
toProjectWithRelations,
type AnonymizedProjectForAI,
type ProjectAIMapping,
} from './anonymization'
import type { SubmissionSource } from '@prisma/client'
// ─── Constants ───────────────────────────────────────────────────────────────
const BATCH_SIZE = 20
// Optimized system prompt
const AI_ELIGIBILITY_SYSTEM_PROMPT = `Award eligibility evaluator. Evaluate projects against criteria, return JSON.
Format: {"evaluations": [{project_id, eligible: bool, confidence: 0-1, reasoning: str}]}
Be objective. Base evaluation only on provided data. No personal identifiers in reasoning.`
// ─── Types ──────────────────────────────────────────────────────────────────
export type AutoTagRule = {
field: 'competitionCategory' | 'country' | 'geographicZone' | 'tags' | 'oceanIssue'
operator: 'equals' | 'contains' | 'in'
value: string | string[]
}
export interface EligibilityResult {
projectId: string
eligible: boolean
confidence: number
reasoning: string
method: 'AUTO' | 'AI'
}
interface ProjectForEligibility {
id: string
title: string
description?: string | null
competitionCategory?: string | null
country?: string | null
geographicZone?: string | null
tags: string[]
oceanIssue?: string | null
institution?: string | null
foundedAt?: Date | null
wantsMentorship?: boolean
submissionSource?: SubmissionSource
submittedAt?: Date | null
_count?: {
teamMembers?: number
files?: number
}
files?: Array<{ fileType: string | null }>
}
// ─── Auto Tag Rules ─────────────────────────────────────────────────────────
export function applyAutoTagRules(
rules: AutoTagRule[],
projects: ProjectForEligibility[]
): Map<string, boolean> {
const results = new Map<string, boolean>()
for (const project of projects) {
const matches = rules.every((rule) => {
const fieldValue = getFieldValue(project, rule.field)
switch (rule.operator) {
case 'equals':
return String(fieldValue).toLowerCase() === String(rule.value).toLowerCase()
case 'contains':
if (Array.isArray(fieldValue)) {
return fieldValue.some((v) =>
String(v).toLowerCase().includes(String(rule.value).toLowerCase())
)
}
return String(fieldValue || '').toLowerCase().includes(String(rule.value).toLowerCase())
case 'in':
if (Array.isArray(rule.value)) {
return rule.value.some((v) =>
String(v).toLowerCase() === String(fieldValue).toLowerCase()
)
}
return false
default:
return false
}
})
results.set(project.id, matches)
}
return results
}
function getFieldValue(
project: ProjectForEligibility,
field: AutoTagRule['field']
): unknown {
switch (field) {
case 'competitionCategory':
return project.competitionCategory
case 'country':
return project.country
case 'geographicZone':
return project.geographicZone
case 'tags':
return project.tags
case 'oceanIssue':
return project.oceanIssue
default:
return null
}
}
// ─── AI Criteria Interpretation ─────────────────────────────────────────────
/**
* Process a batch for AI eligibility evaluation
*/
async function processEligibilityBatch(
openai: NonNullable<Awaited<ReturnType<typeof getOpenAI>>>,
model: string,
criteriaText: string,
anonymized: AnonymizedProjectForAI[],
mappings: ProjectAIMapping[],
userId?: string,
entityId?: string
): Promise<{
results: EligibilityResult[]
tokensUsed: number
}> {
const results: EligibilityResult[] = []
let tokensUsed = 0
const userPrompt = `CRITERIA: ${criteriaText}
PROJECTS: ${JSON.stringify(anonymized)}
Evaluate eligibility for each project.`
try {
const params = buildCompletionParams(model, {
messages: [
{ role: 'system', content: AI_ELIGIBILITY_SYSTEM_PROMPT },
{ role: 'user', content: userPrompt },
],
jsonMode: true,
temperature: 0.3,
maxTokens: 4000,
})
const response = await openai.chat.completions.create(params)
const usage = extractTokenUsage(response)
tokensUsed = usage.totalTokens
// Log usage
await logAIUsage({
userId,
action: 'AWARD_ELIGIBILITY',
entityType: 'Award',
entityId,
model,
promptTokens: usage.promptTokens,
completionTokens: usage.completionTokens,
totalTokens: usage.totalTokens,
batchSize: anonymized.length,
itemsProcessed: anonymized.length,
status: 'SUCCESS',
})
const content = response.choices[0]?.message?.content
if (!content) {
throw new Error('Empty response from AI')
}
const parsed = JSON.parse(content) as {
evaluations: Array<{
project_id: string
eligible: boolean
confidence: number
reasoning: string
}>
}
// Map results back to real IDs
for (const eval_ of parsed.evaluations || []) {
const mapping = mappings.find((m) => m.anonymousId === eval_.project_id)
if (mapping) {
results.push({
projectId: mapping.realId,
eligible: eval_.eligible,
confidence: eval_.confidence,
reasoning: eval_.reasoning,
method: 'AI',
})
}
}
} catch (error) {
if (error instanceof SyntaxError) {
const parseError = createParseError(error.message)
logAIError('AwardEligibility', 'batch processing', parseError)
await logAIUsage({
userId,
action: 'AWARD_ELIGIBILITY',
entityType: 'Award',
entityId,
model,
promptTokens: 0,
completionTokens: 0,
totalTokens: tokensUsed,
batchSize: anonymized.length,
itemsProcessed: 0,
status: 'ERROR',
errorMessage: parseError.message,
})
// Flag all for manual review
for (const mapping of mappings) {
results.push({
projectId: mapping.realId,
eligible: false,
confidence: 0,
reasoning: 'AI response parse error — requires manual review',
method: 'AI',
})
}
} else {
throw error
}
}
return { results, tokensUsed }
}
export async function aiInterpretCriteria(
criteriaText: string,
projects: ProjectForEligibility[],
userId?: string,
awardId?: string
): Promise<EligibilityResult[]> {
const results: EligibilityResult[] = []
try {
const openai = await getOpenAI()
if (!openai) {
console.warn('[AI Eligibility] OpenAI not configured')
return projects.map((p) => ({
projectId: p.id,
eligible: false,
confidence: 0,
reasoning: 'AI unavailable — requires manual eligibility review',
method: 'AI' as const,
}))
}
const model = await getConfiguredModel()
console.log(`[AI Eligibility] Using model: ${model} for ${projects.length} projects`)
// Convert and anonymize projects
const projectsWithRelations = projects.map(toProjectWithRelations)
const { anonymized, mappings } = anonymizeProjectsForAI(projectsWithRelations, 'ELIGIBILITY')
// Validate anonymization
if (!validateAnonymizedProjects(anonymized)) {
console.error('[AI Eligibility] Anonymization validation failed')
throw new Error('GDPR compliance check failed: PII detected in anonymized data')
}
let totalTokens = 0
// Process in batches
for (let i = 0; i < anonymized.length; i += BATCH_SIZE) {
const batchAnon = anonymized.slice(i, i + BATCH_SIZE)
const batchMappings = mappings.slice(i, i + BATCH_SIZE)
console.log(`[AI Eligibility] Processing batch ${Math.floor(i / BATCH_SIZE) + 1}/${Math.ceil(anonymized.length / BATCH_SIZE)}`)
const { results: batchResults, tokensUsed } = await processEligibilityBatch(
openai,
model,
criteriaText,
batchAnon,
batchMappings,
userId,
awardId
)
results.push(...batchResults)
totalTokens += tokensUsed
}
console.log(`[AI Eligibility] Completed. Total tokens: ${totalTokens}`)
} catch (error) {
const classified = classifyAIError(error)
logAIError('AwardEligibility', 'aiInterpretCriteria', classified)
// Log failed attempt
await logAIUsage({
userId,
action: 'AWARD_ELIGIBILITY',
entityType: 'Award',
entityId: awardId,
model: 'unknown',
promptTokens: 0,
completionTokens: 0,
totalTokens: 0,
batchSize: projects.length,
itemsProcessed: 0,
status: 'ERROR',
errorMessage: classified.message,
})
// Return all as needing manual review
return projects.map((p) => ({
projectId: p.id,
eligible: false,
confidence: 0,
reasoning: `AI error: ${classified.message}`,
method: 'AI' as const,
}))
}
return results
}

View File

@@ -1,404 +1,404 @@
/**
* AI-Powered Evaluation Summary Service
*
* Generates AI summaries of jury evaluations for a project in a given round.
* Combines OpenAI analysis with server-side scoring pattern calculations.
*
* GDPR Compliance:
* - All evaluation data is anonymized before AI processing
* - No juror names, emails, or identifiers are sent to OpenAI
* - Only scores, feedback text, and binary decisions are included
*/
import { TRPCError } from '@trpc/server'
import { getOpenAI, getConfiguredModel, buildCompletionParams, AI_MODELS } from '@/lib/openai'
import { logAIUsage, extractTokenUsage } from '@/server/utils/ai-usage'
import { classifyAIError, createParseError, logAIError } from './ai-errors'
import { sanitizeText } from './anonymization'
import type { PrismaClient, Prisma } from '@prisma/client'
// ─── Types ──────────────────────────────────────────────────────────────────
interface EvaluationForSummary {
id: string
criterionScoresJson: Record<string, number> | null
globalScore: number | null
binaryDecision: boolean | null
feedbackText: string | null
assignment: {
user: {
id: string
name: string | null
email: string
}
}
}
interface AnonymizedEvaluation {
criterionScores: Record<string, number> | null
globalScore: number | null
binaryDecision: boolean | null
feedbackText: string | null
}
interface CriterionDef {
id: string
label: string
}
interface AIResponsePayload {
overallAssessment: string
strengths: string[]
weaknesses: string[]
themes: Array<{
theme: string
sentiment: 'positive' | 'negative' | 'mixed'
frequency: number
}>
recommendation: string
}
interface ScoringPatterns {
averageGlobalScore: number | null
consensus: number
criterionAverages: Record<string, number>
evaluatorCount: number
}
export interface EvaluationSummaryResult {
id: string
projectId: string
stageId: string
summaryJson: AIResponsePayload & { scoringPatterns: ScoringPatterns }
generatedAt: Date
model: string
tokensUsed: number
}
// ─── Anonymization ──────────────────────────────────────────────────────────
/**
* Strip juror names/emails from evaluations, keeping only scores and feedback.
*/
export function anonymizeEvaluations(
evaluations: EvaluationForSummary[]
): AnonymizedEvaluation[] {
return evaluations.map((ev) => ({
criterionScores: ev.criterionScoresJson as Record<string, number> | null,
globalScore: ev.globalScore,
binaryDecision: ev.binaryDecision,
feedbackText: ev.feedbackText ? sanitizeText(ev.feedbackText) : null,
}))
}
// ─── Prompt Building ────────────────────────────────────────────────────────
/**
* Build the OpenAI prompt for evaluation summary generation.
*/
export function buildSummaryPrompt(
anonymizedEvaluations: AnonymizedEvaluation[],
projectTitle: string,
criteriaLabels: string[]
): string {
const sanitizedTitle = sanitizeText(projectTitle)
return `You are analyzing jury evaluations for a project competition.
PROJECT: "${sanitizedTitle}"
EVALUATION CRITERIA: ${criteriaLabels.join(', ')}
EVALUATIONS (${anonymizedEvaluations.length} total):
${JSON.stringify(anonymizedEvaluations, null, 2)}
Analyze these evaluations and return a JSON object with this exact structure:
{
"overallAssessment": "A 2-3 sentence summary of how the project was evaluated overall",
"strengths": ["strength 1", "strength 2", ...],
"weaknesses": ["weakness 1", "weakness 2", ...],
"themes": [
{ "theme": "theme name", "sentiment": "positive" | "negative" | "mixed", "frequency": <number of evaluators mentioning this> }
],
"recommendation": "A brief recommendation based on the evaluation consensus"
}
Guidelines:
- Base your analysis only on the provided evaluation data
- Identify common themes across evaluator feedback
- Note areas of agreement and disagreement
- Keep the assessment objective and balanced
- Do not include any personal identifiers`
}
// ─── Scoring Patterns (Server-Side) ─────────────────────────────────────────
/**
* Compute scoring patterns from evaluations without AI.
*/
export function computeScoringPatterns(
evaluations: EvaluationForSummary[],
criteriaLabels: CriterionDef[]
): ScoringPatterns {
const globalScores = evaluations
.map((e) => e.globalScore)
.filter((s): s is number => s !== null)
// Average global score
const averageGlobalScore =
globalScores.length > 0
? globalScores.reduce((a, b) => a + b, 0) / globalScores.length
: null
// Consensus: 1 - normalized standard deviation (1.0 = full consensus)
let consensus = 1
if (globalScores.length > 1 && averageGlobalScore !== null) {
const variance =
globalScores.reduce(
(sum, score) => sum + Math.pow(score - averageGlobalScore, 2),
0
) / globalScores.length
const stdDev = Math.sqrt(variance)
// Normalize by the scoring scale (1-10, so max possible std dev is ~4.5)
consensus = Math.max(0, 1 - stdDev / 4.5)
}
// Criterion averages
const criterionAverages: Record<string, number> = {}
for (const criterion of criteriaLabels) {
const scores: number[] = []
for (const ev of evaluations) {
const criterionScores = ev.criterionScoresJson as Record<string, number> | null
if (criterionScores && criterionScores[criterion.id] !== undefined) {
scores.push(criterionScores[criterion.id])
}
}
if (scores.length > 0) {
criterionAverages[criterion.label] =
scores.reduce((a, b) => a + b, 0) / scores.length
}
}
return {
averageGlobalScore,
consensus: Math.round(consensus * 100) / 100,
criterionAverages,
evaluatorCount: evaluations.length,
}
}
// ─── Main Orchestrator ──────────────────────────────────────────────────────
/**
* Generate an AI-powered evaluation summary for a project in a round.
*/
export async function generateSummary({
projectId,
stageId,
userId,
prisma,
}: {
projectId: string
stageId: string
userId: string
prisma: PrismaClient
}): Promise<EvaluationSummaryResult> {
// 1. Fetch project with evaluations and form criteria
const project = await prisma.project.findUnique({
where: { id: projectId },
select: {
id: true,
title: true,
},
})
if (!project) {
throw new TRPCError({ code: 'NOT_FOUND', message: 'Project not found' })
}
// Fetch submitted evaluations for this project in this stage
const evaluations = await prisma.evaluation.findMany({
where: {
status: 'SUBMITTED',
assignment: {
projectId,
stageId,
},
},
select: {
id: true,
criterionScoresJson: true,
globalScore: true,
binaryDecision: true,
feedbackText: true,
assignment: {
select: {
user: {
select: { id: true, name: true, email: true },
},
},
},
},
})
if (evaluations.length === 0) {
throw new TRPCError({
code: 'BAD_REQUEST',
message: 'No submitted evaluations found for this project in this stage',
})
}
// Get evaluation form criteria for this stage
const form = await prisma.evaluationForm.findFirst({
where: { stageId, isActive: true },
select: { criteriaJson: true },
})
const criteria: CriterionDef[] = form?.criteriaJson
? (form.criteriaJson as unknown as CriterionDef[])
: []
const criteriaLabels = criteria.map((c) => c.label)
// 2. Anonymize evaluations
const typedEvaluations = evaluations as unknown as EvaluationForSummary[]
const anonymized = anonymizeEvaluations(typedEvaluations)
// 3. Build prompt and call OpenAI
const openai = await getOpenAI()
if (!openai) {
throw new TRPCError({
code: 'PRECONDITION_FAILED',
message: 'OpenAI is not configured. Please set up your API key in Settings.',
})
}
const model = await getConfiguredModel(AI_MODELS.QUICK)
const prompt = buildSummaryPrompt(anonymized, project.title, criteriaLabels)
let aiResponse: AIResponsePayload
let tokensUsed = 0
try {
const params = buildCompletionParams(model, {
messages: [
{ role: 'user', content: prompt },
],
jsonMode: true,
temperature: 0.3,
maxTokens: 2000,
})
const response = await openai.chat.completions.create(params)
const usage = extractTokenUsage(response)
tokensUsed = usage.totalTokens
const content = response.choices[0]?.message?.content
if (!content) {
throw new Error('Empty response from AI')
}
aiResponse = JSON.parse(content) as AIResponsePayload
} catch (error) {
if (error instanceof SyntaxError) {
const parseError = createParseError(error.message)
logAIError('EvaluationSummary', 'generateSummary', parseError)
await logAIUsage({
userId,
action: 'EVALUATION_SUMMARY',
entityType: 'Project',
entityId: projectId,
model,
promptTokens: 0,
completionTokens: 0,
totalTokens: tokensUsed,
itemsProcessed: 0,
status: 'ERROR',
errorMessage: parseError.message,
})
throw new TRPCError({
code: 'INTERNAL_SERVER_ERROR',
message: 'Failed to parse AI response. Please try again.',
})
}
const classified = classifyAIError(error)
logAIError('EvaluationSummary', 'generateSummary', classified)
await logAIUsage({
userId,
action: 'EVALUATION_SUMMARY',
entityType: 'Project',
entityId: projectId,
model,
promptTokens: 0,
completionTokens: 0,
totalTokens: 0,
itemsProcessed: 0,
status: 'ERROR',
errorMessage: classified.message,
})
throw new TRPCError({
code: 'INTERNAL_SERVER_ERROR',
message: classified.message,
})
}
// 4. Compute scoring patterns (server-side, no AI)
const scoringPatterns = computeScoringPatterns(typedEvaluations, criteria)
// 5. Merge and upsert
const summaryJson = {
...aiResponse,
scoringPatterns,
}
const summaryJsonValue = summaryJson as unknown as Prisma.InputJsonValue
const summary = await prisma.evaluationSummary.upsert({
where: {
projectId_stageId: { projectId, stageId },
},
create: {
projectId,
stageId,
summaryJson: summaryJsonValue,
generatedById: userId,
model,
tokensUsed,
},
update: {
summaryJson: summaryJsonValue,
generatedAt: new Date(),
generatedById: userId,
model,
tokensUsed,
},
})
// 6. Log AI usage
await logAIUsage({
userId,
action: 'EVALUATION_SUMMARY',
entityType: 'Project',
entityId: projectId,
model,
promptTokens: 0, // Detailed breakdown not always available
completionTokens: 0,
totalTokens: tokensUsed,
itemsProcessed: evaluations.length,
status: 'SUCCESS',
})
return {
id: summary.id,
projectId: summary.projectId,
stageId: summary.stageId,
summaryJson: summaryJson as AIResponsePayload & { scoringPatterns: ScoringPatterns },
generatedAt: summary.generatedAt,
model: summary.model,
tokensUsed: summary.tokensUsed,
}
}
/**
* AI-Powered Evaluation Summary Service
*
* Generates AI summaries of jury evaluations for a project in a given round.
* Combines OpenAI analysis with server-side scoring pattern calculations.
*
* GDPR Compliance:
* - All evaluation data is anonymized before AI processing
* - No juror names, emails, or identifiers are sent to OpenAI
* - Only scores, feedback text, and binary decisions are included
*/
import { TRPCError } from '@trpc/server'
import { getOpenAI, getConfiguredModel, buildCompletionParams, AI_MODELS } from '@/lib/openai'
import { logAIUsage, extractTokenUsage } from '@/server/utils/ai-usage'
import { classifyAIError, createParseError, logAIError } from './ai-errors'
import { sanitizeText } from './anonymization'
import type { PrismaClient, Prisma } from '@prisma/client'
// ─── Types ──────────────────────────────────────────────────────────────────
interface EvaluationForSummary {
id: string
criterionScoresJson: Record<string, number> | null
globalScore: number | null
binaryDecision: boolean | null
feedbackText: string | null
assignment: {
user: {
id: string
name: string | null
email: string
}
}
}
interface AnonymizedEvaluation {
criterionScores: Record<string, number> | null
globalScore: number | null
binaryDecision: boolean | null
feedbackText: string | null
}
interface CriterionDef {
id: string
label: string
}
interface AIResponsePayload {
overallAssessment: string
strengths: string[]
weaknesses: string[]
themes: Array<{
theme: string
sentiment: 'positive' | 'negative' | 'mixed'
frequency: number
}>
recommendation: string
}
interface ScoringPatterns {
averageGlobalScore: number | null
consensus: number
criterionAverages: Record<string, number>
evaluatorCount: number
}
export interface EvaluationSummaryResult {
id: string
projectId: string
stageId: string
summaryJson: AIResponsePayload & { scoringPatterns: ScoringPatterns }
generatedAt: Date
model: string
tokensUsed: number
}
// ─── Anonymization ──────────────────────────────────────────────────────────
/**
* Strip juror names/emails from evaluations, keeping only scores and feedback.
*/
export function anonymizeEvaluations(
evaluations: EvaluationForSummary[]
): AnonymizedEvaluation[] {
return evaluations.map((ev) => ({
criterionScores: ev.criterionScoresJson as Record<string, number> | null,
globalScore: ev.globalScore,
binaryDecision: ev.binaryDecision,
feedbackText: ev.feedbackText ? sanitizeText(ev.feedbackText) : null,
}))
}
// ─── Prompt Building ────────────────────────────────────────────────────────
/**
* Build the OpenAI prompt for evaluation summary generation.
*/
export function buildSummaryPrompt(
anonymizedEvaluations: AnonymizedEvaluation[],
projectTitle: string,
criteriaLabels: string[]
): string {
const sanitizedTitle = sanitizeText(projectTitle)
return `You are analyzing jury evaluations for a project competition.
PROJECT: "${sanitizedTitle}"
EVALUATION CRITERIA: ${criteriaLabels.join(', ')}
EVALUATIONS (${anonymizedEvaluations.length} total):
${JSON.stringify(anonymizedEvaluations, null, 2)}
Analyze these evaluations and return a JSON object with this exact structure:
{
"overallAssessment": "A 2-3 sentence summary of how the project was evaluated overall",
"strengths": ["strength 1", "strength 2", ...],
"weaknesses": ["weakness 1", "weakness 2", ...],
"themes": [
{ "theme": "theme name", "sentiment": "positive" | "negative" | "mixed", "frequency": <number of evaluators mentioning this> }
],
"recommendation": "A brief recommendation based on the evaluation consensus"
}
Guidelines:
- Base your analysis only on the provided evaluation data
- Identify common themes across evaluator feedback
- Note areas of agreement and disagreement
- Keep the assessment objective and balanced
- Do not include any personal identifiers`
}
// ─── Scoring Patterns (Server-Side) ─────────────────────────────────────────
/**
* Compute scoring patterns from evaluations without AI.
*/
export function computeScoringPatterns(
evaluations: EvaluationForSummary[],
criteriaLabels: CriterionDef[]
): ScoringPatterns {
const globalScores = evaluations
.map((e) => e.globalScore)
.filter((s): s is number => s !== null)
// Average global score
const averageGlobalScore =
globalScores.length > 0
? globalScores.reduce((a, b) => a + b, 0) / globalScores.length
: null
// Consensus: 1 - normalized standard deviation (1.0 = full consensus)
let consensus = 1
if (globalScores.length > 1 && averageGlobalScore !== null) {
const variance =
globalScores.reduce(
(sum, score) => sum + Math.pow(score - averageGlobalScore, 2),
0
) / globalScores.length
const stdDev = Math.sqrt(variance)
// Normalize by the scoring scale (1-10, so max possible std dev is ~4.5)
consensus = Math.max(0, 1 - stdDev / 4.5)
}
// Criterion averages
const criterionAverages: Record<string, number> = {}
for (const criterion of criteriaLabels) {
const scores: number[] = []
for (const ev of evaluations) {
const criterionScores = ev.criterionScoresJson as Record<string, number> | null
if (criterionScores && criterionScores[criterion.id] !== undefined) {
scores.push(criterionScores[criterion.id])
}
}
if (scores.length > 0) {
criterionAverages[criterion.label] =
scores.reduce((a, b) => a + b, 0) / scores.length
}
}
return {
averageGlobalScore,
consensus: Math.round(consensus * 100) / 100,
criterionAverages,
evaluatorCount: evaluations.length,
}
}
// ─── Main Orchestrator ──────────────────────────────────────────────────────
/**
* Generate an AI-powered evaluation summary for a project in a round.
*/
export async function generateSummary({
projectId,
stageId,
userId,
prisma,
}: {
projectId: string
stageId: string
userId: string
prisma: PrismaClient
}): Promise<EvaluationSummaryResult> {
// 1. Fetch project with evaluations and form criteria
const project = await prisma.project.findUnique({
where: { id: projectId },
select: {
id: true,
title: true,
},
})
if (!project) {
throw new TRPCError({ code: 'NOT_FOUND', message: 'Project not found' })
}
// Fetch submitted evaluations for this project in this stage
const evaluations = await prisma.evaluation.findMany({
where: {
status: 'SUBMITTED',
assignment: {
projectId,
stageId,
},
},
select: {
id: true,
criterionScoresJson: true,
globalScore: true,
binaryDecision: true,
feedbackText: true,
assignment: {
select: {
user: {
select: { id: true, name: true, email: true },
},
},
},
},
})
if (evaluations.length === 0) {
throw new TRPCError({
code: 'BAD_REQUEST',
message: 'No submitted evaluations found for this project in this stage',
})
}
// Get evaluation form criteria for this stage
const form = await prisma.evaluationForm.findFirst({
where: { stageId, isActive: true },
select: { criteriaJson: true },
})
const criteria: CriterionDef[] = form?.criteriaJson
? (form.criteriaJson as unknown as CriterionDef[])
: []
const criteriaLabels = criteria.map((c) => c.label)
// 2. Anonymize evaluations
const typedEvaluations = evaluations as unknown as EvaluationForSummary[]
const anonymized = anonymizeEvaluations(typedEvaluations)
// 3. Build prompt and call OpenAI
const openai = await getOpenAI()
if (!openai) {
throw new TRPCError({
code: 'PRECONDITION_FAILED',
message: 'OpenAI is not configured. Please set up your API key in Settings.',
})
}
const model = await getConfiguredModel(AI_MODELS.QUICK)
const prompt = buildSummaryPrompt(anonymized, project.title, criteriaLabels)
let aiResponse: AIResponsePayload
let tokensUsed = 0
try {
const params = buildCompletionParams(model, {
messages: [
{ role: 'user', content: prompt },
],
jsonMode: true,
temperature: 0.3,
maxTokens: 2000,
})
const response = await openai.chat.completions.create(params)
const usage = extractTokenUsage(response)
tokensUsed = usage.totalTokens
const content = response.choices[0]?.message?.content
if (!content) {
throw new Error('Empty response from AI')
}
aiResponse = JSON.parse(content) as AIResponsePayload
} catch (error) {
if (error instanceof SyntaxError) {
const parseError = createParseError(error.message)
logAIError('EvaluationSummary', 'generateSummary', parseError)
await logAIUsage({
userId,
action: 'EVALUATION_SUMMARY',
entityType: 'Project',
entityId: projectId,
model,
promptTokens: 0,
completionTokens: 0,
totalTokens: tokensUsed,
itemsProcessed: 0,
status: 'ERROR',
errorMessage: parseError.message,
})
throw new TRPCError({
code: 'INTERNAL_SERVER_ERROR',
message: 'Failed to parse AI response. Please try again.',
})
}
const classified = classifyAIError(error)
logAIError('EvaluationSummary', 'generateSummary', classified)
await logAIUsage({
userId,
action: 'EVALUATION_SUMMARY',
entityType: 'Project',
entityId: projectId,
model,
promptTokens: 0,
completionTokens: 0,
totalTokens: 0,
itemsProcessed: 0,
status: 'ERROR',
errorMessage: classified.message,
})
throw new TRPCError({
code: 'INTERNAL_SERVER_ERROR',
message: classified.message,
})
}
// 4. Compute scoring patterns (server-side, no AI)
const scoringPatterns = computeScoringPatterns(typedEvaluations, criteria)
// 5. Merge and upsert
const summaryJson = {
...aiResponse,
scoringPatterns,
}
const summaryJsonValue = summaryJson as unknown as Prisma.InputJsonValue
const summary = await prisma.evaluationSummary.upsert({
where: {
projectId_stageId: { projectId, stageId },
},
create: {
projectId,
stageId,
summaryJson: summaryJsonValue,
generatedById: userId,
model,
tokensUsed,
},
update: {
summaryJson: summaryJsonValue,
generatedAt: new Date(),
generatedById: userId,
model,
tokensUsed,
},
})
// 6. Log AI usage
await logAIUsage({
userId,
action: 'EVALUATION_SUMMARY',
entityType: 'Project',
entityId: projectId,
model,
promptTokens: 0, // Detailed breakdown not always available
completionTokens: 0,
totalTokens: tokensUsed,
itemsProcessed: evaluations.length,
status: 'SUCCESS',
})
return {
id: summary.id,
projectId: summary.projectId,
stageId: summary.stageId,
summaryJson: summaryJson as AIResponsePayload & { scoringPatterns: ScoringPatterns },
generatedAt: summary.generatedAt,
model: summary.model,
tokensUsed: summary.tokensUsed,
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,422 +1,422 @@
/**
* AI-Powered Project Tagging Service
*
* Analyzes projects and assigns expertise tags automatically.
*
* Features:
* - Single project tagging (on-submit or manual)
* - Batch tagging for rounds
* - Confidence scores for each tag
* - Additive only - never removes existing tags
*
* GDPR Compliance:
* - All project data is anonymized before AI processing
* - Only necessary fields sent to OpenAI
* - No personal identifiers in prompts or responses
*/
import { prisma } from '@/lib/prisma'
import { getOpenAI, getConfiguredModel, buildCompletionParams } from '@/lib/openai'
import { logAIUsage, extractTokenUsage } from '@/server/utils/ai-usage'
import { classifyAIError, createParseError, logAIError } from './ai-errors'
import {
anonymizeProjectsForAI,
validateAnonymizedProjects,
toProjectWithRelations,
type AnonymizedProjectForAI,
} from './anonymization'
// ─── Types ──────────────────────────────────────────────────────────────────
export interface TagSuggestion {
tagId: string
tagName: string
confidence: number
reasoning: string
}
export interface TaggingResult {
projectId: string
suggestions: TagSuggestion[]
applied: TagSuggestion[]
tokensUsed: number
}
interface AvailableTag {
id: string
name: string
category: string | null
description: string | null
}
// ─── Constants ───────────────────────────────────────────────────────────────
const CONFIDENCE_THRESHOLD = 0.5
const DEFAULT_MAX_TAGS = 5
// System prompt optimized for tag suggestion
const TAG_SUGGESTION_SYSTEM_PROMPT = `You are an expert at categorizing ocean conservation and sustainability projects.
Analyze the project and suggest the most relevant expertise tags from the provided list.
Consider the project's focus areas, technology, methodology, and domain.
Return JSON with this format:
{
"suggestions": [
{
"tag_name": "exact tag name from list",
"confidence": 0.0-1.0,
"reasoning": "brief explanation why this tag fits"
}
]
}
Rules:
- Only suggest tags from the provided list (exact names)
- Order by relevance (most relevant first)
- Confidence should reflect how well the tag matches
- Maximum 7 suggestions per project
- Be conservative - only suggest tags that truly apply`
// ─── Helper Functions ────────────────────────────────────────────────────────
/**
* Get system settings for AI tagging
*/
export async function getTaggingSettings(): Promise<{
enabled: boolean
maxTags: number
}> {
const settings = await prisma.systemSettings.findMany({
where: {
key: {
in: ['ai_tagging_enabled', 'ai_tagging_max_tags', 'ai_enabled'],
},
},
})
const settingsMap = new Map(settings.map((s) => [s.key, s.value]))
// AI tagging is enabled if:
// 1. ai_tagging_enabled is explicitly 'true', OR
// 2. ai_tagging_enabled is not set but ai_enabled is 'true' (fall back to general AI setting)
const taggingEnabled = settingsMap.get('ai_tagging_enabled')
const aiEnabled = settingsMap.get('ai_enabled')
const enabled = taggingEnabled === 'true' || (taggingEnabled === undefined && aiEnabled === 'true')
return {
enabled,
maxTags: parseInt(settingsMap.get('ai_tagging_max_tags') || String(DEFAULT_MAX_TAGS)),
}
}
/**
* Get all active expertise tags
*/
export async function getAvailableTags(): Promise<AvailableTag[]> {
return prisma.expertiseTag.findMany({
where: { isActive: true },
select: {
id: true,
name: true,
category: true,
description: true,
},
orderBy: [{ category: 'asc' }, { sortOrder: 'asc' }],
})
}
// ─── AI Tagging Core ─────────────────────────────────────────────────────────
/**
* Call OpenAI to get tag suggestions for a project
*/
async function getAISuggestions(
anonymizedProject: AnonymizedProjectForAI,
availableTags: AvailableTag[],
userId?: string
): Promise<{ suggestions: TagSuggestion[]; tokensUsed: number }> {
const openai = await getOpenAI()
if (!openai) {
console.warn('[AI Tagging] OpenAI not configured')
return { suggestions: [], tokensUsed: 0 }
}
const model = await getConfiguredModel()
// Build tag list for prompt
const tagList = availableTags.map((t) => ({
name: t.name,
category: t.category,
description: t.description,
}))
const userPrompt = `PROJECT:
${JSON.stringify(anonymizedProject, null, 2)}
AVAILABLE TAGS:
${JSON.stringify(tagList, null, 2)}
Suggest relevant tags for this project.`
try {
const params = buildCompletionParams(model, {
messages: [
{ role: 'system', content: TAG_SUGGESTION_SYSTEM_PROMPT },
{ role: 'user', content: userPrompt },
],
jsonMode: true,
temperature: 0.3,
maxTokens: 2000,
})
const response = await openai.chat.completions.create(params)
const usage = extractTokenUsage(response)
// Log usage
await logAIUsage({
userId,
action: 'PROJECT_TAGGING',
entityType: 'Project',
entityId: anonymizedProject.project_id,
model,
promptTokens: usage.promptTokens,
completionTokens: usage.completionTokens,
totalTokens: usage.totalTokens,
batchSize: 1,
itemsProcessed: 1,
status: 'SUCCESS',
})
const content = response.choices[0]?.message?.content
if (!content) {
throw new Error('Empty response from AI')
}
const parsed = JSON.parse(content) as {
suggestions: Array<{
tag_name: string
confidence: number
reasoning: string
}>
}
// Map to TagSuggestion format, matching tag names to IDs
const suggestions: TagSuggestion[] = []
for (const s of parsed.suggestions || []) {
const tag = availableTags.find(
(t) => t.name.toLowerCase() === s.tag_name.toLowerCase()
)
if (tag) {
suggestions.push({
tagId: tag.id,
tagName: tag.name,
confidence: Math.max(0, Math.min(1, s.confidence)),
reasoning: s.reasoning,
})
}
}
return { suggestions, tokensUsed: usage.totalTokens }
} catch (error) {
if (error instanceof SyntaxError) {
const parseError = createParseError(error.message)
logAIError('Tagging', 'getAISuggestions', parseError)
}
await logAIUsage({
userId,
action: 'PROJECT_TAGGING',
entityType: 'Project',
entityId: anonymizedProject.project_id,
model,
promptTokens: 0,
completionTokens: 0,
totalTokens: 0,
batchSize: 1,
itemsProcessed: 0,
status: 'ERROR',
errorMessage: error instanceof Error ? error.message : 'Unknown error',
})
throw error
}
}
// ─── Public API ──────────────────────────────────────────────────────────────
/**
* Tag a single project with AI-suggested expertise tags
*
* Behavior:
* - Only applies tags with confidence >= 0.5
* - Additive only - never removes existing tags
* - Respects maxTags setting
*/
export async function tagProject(
projectId: string,
userId?: string
): Promise<TaggingResult> {
const settings = await getTaggingSettings()
if (!settings.enabled) {
return {
projectId,
suggestions: [],
applied: [],
tokensUsed: 0,
}
}
// Fetch project with needed fields
const project = await prisma.project.findUnique({
where: { id: projectId },
include: {
projectTags: true,
files: { select: { fileType: true } },
_count: { select: { teamMembers: true, files: true } },
},
})
if (!project) {
throw new Error(`Project not found: ${projectId}`)
}
// Get available tags
const availableTags = await getAvailableTags()
if (availableTags.length === 0) {
return {
projectId,
suggestions: [],
applied: [],
tokensUsed: 0,
}
}
// Anonymize project data
const projectWithRelations = toProjectWithRelations(project)
const { anonymized, mappings } = anonymizeProjectsForAI([projectWithRelations], 'FILTERING')
// Validate anonymization
if (!validateAnonymizedProjects(anonymized)) {
throw new Error('GDPR compliance check failed: PII detected in anonymized data')
}
// Get AI suggestions
const { suggestions, tokensUsed } = await getAISuggestions(
anonymized[0],
availableTags,
userId
)
// Filter by confidence threshold
const validSuggestions = suggestions.filter(
(s) => s.confidence >= CONFIDENCE_THRESHOLD
)
// Get existing tag IDs to avoid duplicates
const existingTagIds = new Set(project.projectTags.map((pt) => pt.tagId))
// Calculate how many more tags we can add
const currentTagCount = project.projectTags.length
const remainingSlots = Math.max(0, settings.maxTags - currentTagCount)
// Filter out existing tags and limit to remaining slots
const newSuggestions = validSuggestions
.filter((s) => !existingTagIds.has(s.tagId))
.slice(0, remainingSlots)
// Apply new tags
const applied: TagSuggestion[] = []
for (const suggestion of newSuggestions) {
try {
await prisma.projectTag.create({
data: {
projectId,
tagId: suggestion.tagId,
confidence: suggestion.confidence,
source: 'AI',
},
})
applied.push(suggestion)
} catch (error) {
// Skip if tag already exists (race condition)
console.warn(`[AI Tagging] Failed to apply tag ${suggestion.tagName}: ${error}`)
}
}
return {
projectId,
suggestions,
applied,
tokensUsed,
}
}
/**
* Get tag suggestions for a project without applying them
* Useful for preview/review before applying
*/
export async function getTagSuggestions(
projectId: string,
userId?: string
): Promise<TagSuggestion[]> {
// Fetch project
const project = await prisma.project.findUnique({
where: { id: projectId },
include: {
files: { select: { fileType: true } },
_count: { select: { teamMembers: true, files: true } },
},
})
if (!project) {
throw new Error(`Project not found: ${projectId}`)
}
// Get available tags
const availableTags = await getAvailableTags()
if (availableTags.length === 0) {
return []
}
// Anonymize project data
const projectWithRelations = toProjectWithRelations(project)
const { anonymized } = anonymizeProjectsForAI([projectWithRelations], 'FILTERING')
// Validate anonymization
if (!validateAnonymizedProjects(anonymized)) {
throw new Error('GDPR compliance check failed')
}
// Get AI suggestions
const { suggestions } = await getAISuggestions(anonymized[0], availableTags, userId)
return suggestions
}
/**
* Manually add a tag to a project
*/
export async function addProjectTag(
projectId: string,
tagId: string
): Promise<void> {
await prisma.projectTag.upsert({
where: { projectId_tagId: { projectId, tagId } },
create: { projectId, tagId, source: 'MANUAL', confidence: 1.0 },
update: { source: 'MANUAL', confidence: 1.0 },
})
}
/**
* Remove a tag from a project
*/
export async function removeProjectTag(
projectId: string,
tagId: string
): Promise<void> {
await prisma.projectTag.deleteMany({
where: { projectId, tagId },
})
}
/**
* AI-Powered Project Tagging Service
*
* Analyzes projects and assigns expertise tags automatically.
*
* Features:
* - Single project tagging (on-submit or manual)
* - Batch tagging for rounds
* - Confidence scores for each tag
* - Additive only - never removes existing tags
*
* GDPR Compliance:
* - All project data is anonymized before AI processing
* - Only necessary fields sent to OpenAI
* - No personal identifiers in prompts or responses
*/
import { prisma } from '@/lib/prisma'
import { getOpenAI, getConfiguredModel, buildCompletionParams } from '@/lib/openai'
import { logAIUsage, extractTokenUsage } from '@/server/utils/ai-usage'
import { classifyAIError, createParseError, logAIError } from './ai-errors'
import {
anonymizeProjectsForAI,
validateAnonymizedProjects,
toProjectWithRelations,
type AnonymizedProjectForAI,
} from './anonymization'
// ─── Types ──────────────────────────────────────────────────────────────────
export interface TagSuggestion {
tagId: string
tagName: string
confidence: number
reasoning: string
}
export interface TaggingResult {
projectId: string
suggestions: TagSuggestion[]
applied: TagSuggestion[]
tokensUsed: number
}
interface AvailableTag {
id: string
name: string
category: string | null
description: string | null
}
// ─── Constants ───────────────────────────────────────────────────────────────
const CONFIDENCE_THRESHOLD = 0.5
const DEFAULT_MAX_TAGS = 5
// System prompt optimized for tag suggestion
const TAG_SUGGESTION_SYSTEM_PROMPT = `You are an expert at categorizing ocean conservation and sustainability projects.
Analyze the project and suggest the most relevant expertise tags from the provided list.
Consider the project's focus areas, technology, methodology, and domain.
Return JSON with this format:
{
"suggestions": [
{
"tag_name": "exact tag name from list",
"confidence": 0.0-1.0,
"reasoning": "brief explanation why this tag fits"
}
]
}
Rules:
- Only suggest tags from the provided list (exact names)
- Order by relevance (most relevant first)
- Confidence should reflect how well the tag matches
- Maximum 7 suggestions per project
- Be conservative - only suggest tags that truly apply`
// ─── Helper Functions ────────────────────────────────────────────────────────
/**
* Get system settings for AI tagging
*/
export async function getTaggingSettings(): Promise<{
enabled: boolean
maxTags: number
}> {
const settings = await prisma.systemSettings.findMany({
where: {
key: {
in: ['ai_tagging_enabled', 'ai_tagging_max_tags', 'ai_enabled'],
},
},
})
const settingsMap = new Map(settings.map((s) => [s.key, s.value]))
// AI tagging is enabled if:
// 1. ai_tagging_enabled is explicitly 'true', OR
// 2. ai_tagging_enabled is not set but ai_enabled is 'true' (fall back to general AI setting)
const taggingEnabled = settingsMap.get('ai_tagging_enabled')
const aiEnabled = settingsMap.get('ai_enabled')
const enabled = taggingEnabled === 'true' || (taggingEnabled === undefined && aiEnabled === 'true')
return {
enabled,
maxTags: parseInt(settingsMap.get('ai_tagging_max_tags') || String(DEFAULT_MAX_TAGS)),
}
}
/**
* Get all active expertise tags
*/
export async function getAvailableTags(): Promise<AvailableTag[]> {
return prisma.expertiseTag.findMany({
where: { isActive: true },
select: {
id: true,
name: true,
category: true,
description: true,
},
orderBy: [{ category: 'asc' }, { sortOrder: 'asc' }],
})
}
// ─── AI Tagging Core ─────────────────────────────────────────────────────────
/**
* Call OpenAI to get tag suggestions for a project
*/
async function getAISuggestions(
anonymizedProject: AnonymizedProjectForAI,
availableTags: AvailableTag[],
userId?: string
): Promise<{ suggestions: TagSuggestion[]; tokensUsed: number }> {
const openai = await getOpenAI()
if (!openai) {
console.warn('[AI Tagging] OpenAI not configured')
return { suggestions: [], tokensUsed: 0 }
}
const model = await getConfiguredModel()
// Build tag list for prompt
const tagList = availableTags.map((t) => ({
name: t.name,
category: t.category,
description: t.description,
}))
const userPrompt = `PROJECT:
${JSON.stringify(anonymizedProject, null, 2)}
AVAILABLE TAGS:
${JSON.stringify(tagList, null, 2)}
Suggest relevant tags for this project.`
try {
const params = buildCompletionParams(model, {
messages: [
{ role: 'system', content: TAG_SUGGESTION_SYSTEM_PROMPT },
{ role: 'user', content: userPrompt },
],
jsonMode: true,
temperature: 0.3,
maxTokens: 2000,
})
const response = await openai.chat.completions.create(params)
const usage = extractTokenUsage(response)
// Log usage
await logAIUsage({
userId,
action: 'PROJECT_TAGGING',
entityType: 'Project',
entityId: anonymizedProject.project_id,
model,
promptTokens: usage.promptTokens,
completionTokens: usage.completionTokens,
totalTokens: usage.totalTokens,
batchSize: 1,
itemsProcessed: 1,
status: 'SUCCESS',
})
const content = response.choices[0]?.message?.content
if (!content) {
throw new Error('Empty response from AI')
}
const parsed = JSON.parse(content) as {
suggestions: Array<{
tag_name: string
confidence: number
reasoning: string
}>
}
// Map to TagSuggestion format, matching tag names to IDs
const suggestions: TagSuggestion[] = []
for (const s of parsed.suggestions || []) {
const tag = availableTags.find(
(t) => t.name.toLowerCase() === s.tag_name.toLowerCase()
)
if (tag) {
suggestions.push({
tagId: tag.id,
tagName: tag.name,
confidence: Math.max(0, Math.min(1, s.confidence)),
reasoning: s.reasoning,
})
}
}
return { suggestions, tokensUsed: usage.totalTokens }
} catch (error) {
if (error instanceof SyntaxError) {
const parseError = createParseError(error.message)
logAIError('Tagging', 'getAISuggestions', parseError)
}
await logAIUsage({
userId,
action: 'PROJECT_TAGGING',
entityType: 'Project',
entityId: anonymizedProject.project_id,
model,
promptTokens: 0,
completionTokens: 0,
totalTokens: 0,
batchSize: 1,
itemsProcessed: 0,
status: 'ERROR',
errorMessage: error instanceof Error ? error.message : 'Unknown error',
})
throw error
}
}
// ─── Public API ──────────────────────────────────────────────────────────────
/**
* Tag a single project with AI-suggested expertise tags
*
* Behavior:
* - Only applies tags with confidence >= 0.5
* - Additive only - never removes existing tags
* - Respects maxTags setting
*/
export async function tagProject(
projectId: string,
userId?: string
): Promise<TaggingResult> {
const settings = await getTaggingSettings()
if (!settings.enabled) {
return {
projectId,
suggestions: [],
applied: [],
tokensUsed: 0,
}
}
// Fetch project with needed fields
const project = await prisma.project.findUnique({
where: { id: projectId },
include: {
projectTags: true,
files: { select: { fileType: true } },
_count: { select: { teamMembers: true, files: true } },
},
})
if (!project) {
throw new Error(`Project not found: ${projectId}`)
}
// Get available tags
const availableTags = await getAvailableTags()
if (availableTags.length === 0) {
return {
projectId,
suggestions: [],
applied: [],
tokensUsed: 0,
}
}
// Anonymize project data
const projectWithRelations = toProjectWithRelations(project)
const { anonymized, mappings } = anonymizeProjectsForAI([projectWithRelations], 'FILTERING')
// Validate anonymization
if (!validateAnonymizedProjects(anonymized)) {
throw new Error('GDPR compliance check failed: PII detected in anonymized data')
}
// Get AI suggestions
const { suggestions, tokensUsed } = await getAISuggestions(
anonymized[0],
availableTags,
userId
)
// Filter by confidence threshold
const validSuggestions = suggestions.filter(
(s) => s.confidence >= CONFIDENCE_THRESHOLD
)
// Get existing tag IDs to avoid duplicates
const existingTagIds = new Set(project.projectTags.map((pt) => pt.tagId))
// Calculate how many more tags we can add
const currentTagCount = project.projectTags.length
const remainingSlots = Math.max(0, settings.maxTags - currentTagCount)
// Filter out existing tags and limit to remaining slots
const newSuggestions = validSuggestions
.filter((s) => !existingTagIds.has(s.tagId))
.slice(0, remainingSlots)
// Apply new tags
const applied: TagSuggestion[] = []
for (const suggestion of newSuggestions) {
try {
await prisma.projectTag.create({
data: {
projectId,
tagId: suggestion.tagId,
confidence: suggestion.confidence,
source: 'AI',
},
})
applied.push(suggestion)
} catch (error) {
// Skip if tag already exists (race condition)
console.warn(`[AI Tagging] Failed to apply tag ${suggestion.tagName}: ${error}`)
}
}
return {
projectId,
suggestions,
applied,
tokensUsed,
}
}
/**
* Get tag suggestions for a project without applying them
* Useful for preview/review before applying
*/
export async function getTagSuggestions(
projectId: string,
userId?: string
): Promise<TagSuggestion[]> {
// Fetch project
const project = await prisma.project.findUnique({
where: { id: projectId },
include: {
files: { select: { fileType: true } },
_count: { select: { teamMembers: true, files: true } },
},
})
if (!project) {
throw new Error(`Project not found: ${projectId}`)
}
// Get available tags
const availableTags = await getAvailableTags()
if (availableTags.length === 0) {
return []
}
// Anonymize project data
const projectWithRelations = toProjectWithRelations(project)
const { anonymized } = anonymizeProjectsForAI([projectWithRelations], 'FILTERING')
// Validate anonymization
if (!validateAnonymizedProjects(anonymized)) {
throw new Error('GDPR compliance check failed')
}
// Get AI suggestions
const { suggestions } = await getAISuggestions(anonymized[0], availableTags, userId)
return suggestions
}
/**
* Manually add a tag to a project
*/
export async function addProjectTag(
projectId: string,
tagId: string
): Promise<void> {
await prisma.projectTag.upsert({
where: { projectId_tagId: { projectId, tagId } },
create: { projectId, tagId, source: 'MANUAL', confidence: 1.0 },
update: { source: 'MANUAL', confidence: 1.0 },
})
}
/**
* Remove a tag from a project
*/
export async function removeProjectTag(
projectId: string,
tagId: string
): Promise<void> {
await prisma.projectTag.deleteMany({
where: { projectId, tagId },
})
}

View File

@@ -1,184 +1,184 @@
import { prisma } from '@/lib/prisma'
import {
applyAutoTagRules,
aiInterpretCriteria,
type AutoTagRule,
} from './ai-award-eligibility'
const BATCH_SIZE = 20
/**
* Process eligibility for an award in the background.
* Updates progress in the database as it goes so the frontend can poll.
*/
export async function processEligibilityJob(
awardId: string,
includeSubmitted: boolean,
userId: string
): Promise<void> {
try {
// Mark job as PROCESSING
const award = await prisma.specialAward.findUniqueOrThrow({
where: { id: awardId },
include: { program: true },
})
// Get projects
const statusFilter = includeSubmitted
? (['SUBMITTED', 'ELIGIBLE', 'ASSIGNED', 'SEMIFINALIST', 'FINALIST'] as const)
: (['ELIGIBLE', 'ASSIGNED', 'SEMIFINALIST', 'FINALIST'] as const)
const projects = await prisma.project.findMany({
where: {
programId: award.programId,
status: { in: [...statusFilter] },
},
select: {
id: true,
title: true,
description: true,
competitionCategory: true,
country: true,
geographicZone: true,
tags: true,
oceanIssue: true,
},
})
if (projects.length === 0) {
await prisma.specialAward.update({
where: { id: awardId },
data: {
eligibilityJobStatus: 'COMPLETED',
eligibilityJobTotal: 0,
eligibilityJobDone: 0,
},
})
return
}
await prisma.specialAward.update({
where: { id: awardId },
data: {
eligibilityJobStatus: 'PROCESSING',
eligibilityJobTotal: projects.length,
eligibilityJobDone: 0,
eligibilityJobError: null,
eligibilityJobStarted: new Date(),
},
})
// Phase 1: Auto-tag rules (deterministic, fast)
const autoTagRules = award.autoTagRulesJson as unknown as AutoTagRule[] | null
let autoResults: Map<string, boolean> | undefined
if (autoTagRules && Array.isArray(autoTagRules) && autoTagRules.length > 0) {
autoResults = applyAutoTagRules(autoTagRules, projects)
}
// Phase 2: AI interpretation (if criteria text exists AND AI eligibility is enabled)
// Process in batches to avoid timeouts
let aiResults: Map<string, { eligible: boolean; confidence: number; reasoning: string }> | undefined
if (award.criteriaText && award.useAiEligibility) {
aiResults = new Map()
for (let i = 0; i < projects.length; i += BATCH_SIZE) {
const batch = projects.slice(i, i + BATCH_SIZE)
const aiEvals = await aiInterpretCriteria(award.criteriaText, batch)
for (const e of aiEvals) {
aiResults.set(e.projectId, {
eligible: e.eligible,
confidence: e.confidence,
reasoning: e.reasoning,
})
}
// Update progress
await prisma.specialAward.update({
where: { id: awardId },
data: {
eligibilityJobDone: Math.min(i + BATCH_SIZE, projects.length),
},
})
}
} else {
// No AI needed, mark all as done
await prisma.specialAward.update({
where: { id: awardId },
data: { eligibilityJobDone: projects.length },
})
}
// Combine results: auto-tag AND AI must agree (or just one if only one configured)
const eligibilities = projects.map((project) => {
const autoEligible = autoResults?.get(project.id) ?? true
const aiEval = aiResults?.get(project.id)
const aiEligible = aiEval?.eligible ?? true
const eligible = autoEligible && aiEligible
const method = autoResults && aiResults ? 'AUTO' : autoResults ? 'AUTO' : 'MANUAL'
return {
projectId: project.id,
eligible,
method,
aiReasoningJson: aiEval
? { confidence: aiEval.confidence, reasoning: aiEval.reasoning }
: null,
}
})
// Upsert eligibilities
await prisma.$transaction(
eligibilities.map((e) =>
prisma.awardEligibility.upsert({
where: {
awardId_projectId: {
awardId,
projectId: e.projectId,
},
},
create: {
awardId,
projectId: e.projectId,
eligible: e.eligible,
method: e.method as 'AUTO' | 'MANUAL',
aiReasoningJson: e.aiReasoningJson ?? undefined,
},
update: {
eligible: e.eligible,
method: e.method as 'AUTO' | 'MANUAL',
aiReasoningJson: e.aiReasoningJson ?? undefined,
overriddenBy: null,
overriddenAt: null,
},
})
)
)
// Mark as completed
await prisma.specialAward.update({
where: { id: awardId },
data: {
eligibilityJobStatus: 'COMPLETED',
eligibilityJobDone: projects.length,
},
})
} catch (error) {
// Mark as failed
const errorMessage = error instanceof Error ? error.message : 'Unknown error'
try {
await prisma.specialAward.update({
where: { id: awardId },
data: {
eligibilityJobStatus: 'FAILED',
eligibilityJobError: errorMessage,
},
})
} catch {
// If we can't even update the status, log and give up
console.error('Failed to update eligibility job status:', error)
}
}
}
import { prisma } from '@/lib/prisma'
import {
applyAutoTagRules,
aiInterpretCriteria,
type AutoTagRule,
} from './ai-award-eligibility'
const BATCH_SIZE = 20
/**
* Process eligibility for an award in the background.
* Updates progress in the database as it goes so the frontend can poll.
*/
export async function processEligibilityJob(
awardId: string,
includeSubmitted: boolean,
userId: string
): Promise<void> {
try {
// Mark job as PROCESSING
const award = await prisma.specialAward.findUniqueOrThrow({
where: { id: awardId },
include: { program: true },
})
// Get projects
const statusFilter = includeSubmitted
? (['SUBMITTED', 'ELIGIBLE', 'ASSIGNED', 'SEMIFINALIST', 'FINALIST'] as const)
: (['ELIGIBLE', 'ASSIGNED', 'SEMIFINALIST', 'FINALIST'] as const)
const projects = await prisma.project.findMany({
where: {
programId: award.programId,
status: { in: [...statusFilter] },
},
select: {
id: true,
title: true,
description: true,
competitionCategory: true,
country: true,
geographicZone: true,
tags: true,
oceanIssue: true,
},
})
if (projects.length === 0) {
await prisma.specialAward.update({
where: { id: awardId },
data: {
eligibilityJobStatus: 'COMPLETED',
eligibilityJobTotal: 0,
eligibilityJobDone: 0,
},
})
return
}
await prisma.specialAward.update({
where: { id: awardId },
data: {
eligibilityJobStatus: 'PROCESSING',
eligibilityJobTotal: projects.length,
eligibilityJobDone: 0,
eligibilityJobError: null,
eligibilityJobStarted: new Date(),
},
})
// Phase 1: Auto-tag rules (deterministic, fast)
const autoTagRules = award.autoTagRulesJson as unknown as AutoTagRule[] | null
let autoResults: Map<string, boolean> | undefined
if (autoTagRules && Array.isArray(autoTagRules) && autoTagRules.length > 0) {
autoResults = applyAutoTagRules(autoTagRules, projects)
}
// Phase 2: AI interpretation (if criteria text exists AND AI eligibility is enabled)
// Process in batches to avoid timeouts
let aiResults: Map<string, { eligible: boolean; confidence: number; reasoning: string }> | undefined
if (award.criteriaText && award.useAiEligibility) {
aiResults = new Map()
for (let i = 0; i < projects.length; i += BATCH_SIZE) {
const batch = projects.slice(i, i + BATCH_SIZE)
const aiEvals = await aiInterpretCriteria(award.criteriaText, batch)
for (const e of aiEvals) {
aiResults.set(e.projectId, {
eligible: e.eligible,
confidence: e.confidence,
reasoning: e.reasoning,
})
}
// Update progress
await prisma.specialAward.update({
where: { id: awardId },
data: {
eligibilityJobDone: Math.min(i + BATCH_SIZE, projects.length),
},
})
}
} else {
// No AI needed, mark all as done
await prisma.specialAward.update({
where: { id: awardId },
data: { eligibilityJobDone: projects.length },
})
}
// Combine results: auto-tag AND AI must agree (or just one if only one configured)
const eligibilities = projects.map((project) => {
const autoEligible = autoResults?.get(project.id) ?? true
const aiEval = aiResults?.get(project.id)
const aiEligible = aiEval?.eligible ?? true
const eligible = autoEligible && aiEligible
const method = autoResults && aiResults ? 'AUTO' : autoResults ? 'AUTO' : 'MANUAL'
return {
projectId: project.id,
eligible,
method,
aiReasoningJson: aiEval
? { confidence: aiEval.confidence, reasoning: aiEval.reasoning }
: null,
}
})
// Upsert eligibilities
await prisma.$transaction(
eligibilities.map((e) =>
prisma.awardEligibility.upsert({
where: {
awardId_projectId: {
awardId,
projectId: e.projectId,
},
},
create: {
awardId,
projectId: e.projectId,
eligible: e.eligible,
method: e.method as 'AUTO' | 'MANUAL',
aiReasoningJson: e.aiReasoningJson ?? undefined,
},
update: {
eligible: e.eligible,
method: e.method as 'AUTO' | 'MANUAL',
aiReasoningJson: e.aiReasoningJson ?? undefined,
overriddenBy: null,
overriddenAt: null,
},
})
)
)
// Mark as completed
await prisma.specialAward.update({
where: { id: awardId },
data: {
eligibilityJobStatus: 'COMPLETED',
eligibilityJobDone: projects.length,
},
})
} catch (error) {
// Mark as failed
const errorMessage = error instanceof Error ? error.message : 'Unknown error'
try {
await prisma.specialAward.update({
where: { id: awardId },
data: {
eligibilityJobStatus: 'FAILED',
eligibilityJobError: errorMessage,
},
})
} catch {
// If we can't even update the status, log and give up
console.error('Failed to update eligibility job status:', error)
}
}
}

View File

@@ -1,276 +1,276 @@
import { prisma } from '@/lib/prisma'
import { sendStyledNotificationEmail } from '@/lib/email'
interface DigestResult {
sent: number
errors: number
}
interface DigestSection {
title: string
items: string[]
}
/**
* Process and send email digests for all opted-in users.
* Called by cron endpoint.
*/
export async function processDigests(
type: 'daily' | 'weekly'
): Promise<DigestResult> {
let sent = 0
let errors = 0
// Check if digest feature is enabled
const enabledSetting = await prisma.systemSettings.findUnique({
where: { key: 'digest_enabled' },
})
if (enabledSetting?.value === 'false') {
return { sent: 0, errors: 0 }
}
// Find users who opted in for this digest frequency
const users = await prisma.user.findMany({
where: {
digestFrequency: type,
status: 'ACTIVE',
},
select: {
id: true,
name: true,
email: true,
},
})
if (users.length === 0) {
return { sent: 0, errors: 0 }
}
// Load enabled sections from settings
const sectionsSetting = await prisma.systemSettings.findUnique({
where: { key: 'digest_sections' },
})
const enabledSections: string[] = sectionsSetting?.value
? JSON.parse(sectionsSetting.value)
: ['pending_evaluations', 'upcoming_deadlines', 'new_assignments', 'unread_notifications']
const baseUrl = process.env.NEXTAUTH_URL || 'https://monaco-opc.com'
for (const user of users) {
try {
const content = await getDigestContent(user.id, enabledSections)
// Skip if there's nothing to report
if (content.sections.length === 0) continue
// Build email body from sections
const bodyParts: string[] = []
for (const section of content.sections) {
bodyParts.push(`**${section.title}**`)
for (const item of section.items) {
bodyParts.push(`- ${item}`)
}
bodyParts.push('')
}
await sendStyledNotificationEmail(
user.email,
user.name || '',
'DIGEST',
{
name: user.name || undefined,
title: `Your ${type === 'daily' ? 'Daily' : 'Weekly'} Digest`,
message: bodyParts.join('\n'),
linkUrl: `${baseUrl}/dashboard`,
metadata: {
digestType: type,
pendingEvaluations: content.pendingEvaluations,
upcomingDeadlines: content.upcomingDeadlines,
newAssignments: content.newAssignments,
unreadNotifications: content.unreadNotifications,
},
}
)
// Log the digest
await prisma.digestLog.create({
data: {
userId: user.id,
digestType: type,
contentJson: {
pendingEvaluations: content.pendingEvaluations,
upcomingDeadlines: content.upcomingDeadlines,
newAssignments: content.newAssignments,
unreadNotifications: content.unreadNotifications,
},
},
})
sent++
} catch (error) {
console.error(
`[Digest] Failed to send ${type} digest to ${user.email}:`,
error
)
errors++
}
}
return { sent, errors }
}
/**
* Compile digest content for a single user.
*/
async function getDigestContent(
userId: string,
enabledSections: string[]
): Promise<{
sections: DigestSection[]
pendingEvaluations: number
upcomingDeadlines: number
newAssignments: number
unreadNotifications: number
}> {
const now = new Date()
const sections: DigestSection[] = []
let pendingEvaluations = 0
let upcomingDeadlines = 0
let newAssignments = 0
let unreadNotifications = 0
// 1. Pending evaluations
if (enabledSections.includes('pending_evaluations')) {
const pendingAssignments = await prisma.assignment.findMany({
where: {
userId,
isCompleted: false,
stage: {
status: 'STAGE_ACTIVE',
windowCloseAt: { gt: now },
},
},
include: {
project: { select: { id: true, title: true } },
stage: { select: { name: true, windowCloseAt: true } },
},
})
pendingEvaluations = pendingAssignments.length
if (pendingAssignments.length > 0) {
sections.push({
title: `Pending Evaluations (${pendingAssignments.length})`,
items: pendingAssignments.map(
(a) =>
`${a.project.title} - ${a.stage?.name ?? 'Unknown'}${
a.stage?.windowCloseAt
? ` (due ${a.stage.windowCloseAt.toLocaleDateString('en-US', {
month: 'short',
day: 'numeric',
})})`
: ''
}`
),
})
}
}
// 2. Upcoming deadlines (stages closing within 7 days)
if (enabledSections.includes('upcoming_deadlines')) {
const sevenDaysFromNow = new Date(now.getTime() + 7 * 24 * 60 * 60 * 1000)
const upcomingStages = await prisma.stage.findMany({
where: {
status: 'STAGE_ACTIVE',
windowCloseAt: {
gt: now,
lte: sevenDaysFromNow,
},
assignments: {
some: {
userId,
isCompleted: false,
},
},
},
select: {
name: true,
windowCloseAt: true,
},
})
upcomingDeadlines = upcomingStages.length
if (upcomingStages.length > 0) {
sections.push({
title: 'Upcoming Deadlines',
items: upcomingStages.map(
(s) =>
`${s.name} - ${s.windowCloseAt?.toLocaleDateString('en-US', {
weekday: 'short',
month: 'short',
day: 'numeric',
hour: '2-digit',
minute: '2-digit',
})}`
),
})
}
}
// 3. New assignments since last digest
if (enabledSections.includes('new_assignments')) {
const lastDigest = await prisma.digestLog.findFirst({
where: { userId },
orderBy: { sentAt: 'desc' },
select: { sentAt: true },
})
const sinceDate = lastDigest?.sentAt || new Date(now.getTime() - 24 * 60 * 60 * 1000)
const recentAssignments = await prisma.assignment.findMany({
where: {
userId,
createdAt: { gt: sinceDate },
},
include: {
project: { select: { id: true, title: true } },
stage: { select: { name: true } },
},
})
newAssignments = recentAssignments.length
if (recentAssignments.length > 0) {
sections.push({
title: `New Assignments (${recentAssignments.length})`,
items: recentAssignments.map(
(a) => `${a.project.title} - ${a.stage?.name ?? 'Unknown'}`
),
})
}
}
// 4. Unread notifications count
if (enabledSections.includes('unread_notifications')) {
const unreadCount = await prisma.inAppNotification.count({
where: {
userId,
isRead: false,
},
})
unreadNotifications = unreadCount
if (unreadCount > 0) {
sections.push({
title: 'Notifications',
items: [`You have ${unreadCount} unread notification${unreadCount !== 1 ? 's' : ''}`],
})
}
}
return {
sections,
pendingEvaluations,
upcomingDeadlines,
newAssignments,
unreadNotifications,
}
}
import { prisma } from '@/lib/prisma'
import { sendStyledNotificationEmail } from '@/lib/email'
interface DigestResult {
sent: number
errors: number
}
interface DigestSection {
title: string
items: string[]
}
/**
* Process and send email digests for all opted-in users.
* Called by cron endpoint.
*/
export async function processDigests(
type: 'daily' | 'weekly'
): Promise<DigestResult> {
let sent = 0
let errors = 0
// Check if digest feature is enabled
const enabledSetting = await prisma.systemSettings.findUnique({
where: { key: 'digest_enabled' },
})
if (enabledSetting?.value === 'false') {
return { sent: 0, errors: 0 }
}
// Find users who opted in for this digest frequency
const users = await prisma.user.findMany({
where: {
digestFrequency: type,
status: 'ACTIVE',
},
select: {
id: true,
name: true,
email: true,
},
})
if (users.length === 0) {
return { sent: 0, errors: 0 }
}
// Load enabled sections from settings
const sectionsSetting = await prisma.systemSettings.findUnique({
where: { key: 'digest_sections' },
})
const enabledSections: string[] = sectionsSetting?.value
? JSON.parse(sectionsSetting.value)
: ['pending_evaluations', 'upcoming_deadlines', 'new_assignments', 'unread_notifications']
const baseUrl = process.env.NEXTAUTH_URL || 'https://monaco-opc.com'
for (const user of users) {
try {
const content = await getDigestContent(user.id, enabledSections)
// Skip if there's nothing to report
if (content.sections.length === 0) continue
// Build email body from sections
const bodyParts: string[] = []
for (const section of content.sections) {
bodyParts.push(`**${section.title}**`)
for (const item of section.items) {
bodyParts.push(`- ${item}`)
}
bodyParts.push('')
}
await sendStyledNotificationEmail(
user.email,
user.name || '',
'DIGEST',
{
name: user.name || undefined,
title: `Your ${type === 'daily' ? 'Daily' : 'Weekly'} Digest`,
message: bodyParts.join('\n'),
linkUrl: `${baseUrl}/dashboard`,
metadata: {
digestType: type,
pendingEvaluations: content.pendingEvaluations,
upcomingDeadlines: content.upcomingDeadlines,
newAssignments: content.newAssignments,
unreadNotifications: content.unreadNotifications,
},
}
)
// Log the digest
await prisma.digestLog.create({
data: {
userId: user.id,
digestType: type,
contentJson: {
pendingEvaluations: content.pendingEvaluations,
upcomingDeadlines: content.upcomingDeadlines,
newAssignments: content.newAssignments,
unreadNotifications: content.unreadNotifications,
},
},
})
sent++
} catch (error) {
console.error(
`[Digest] Failed to send ${type} digest to ${user.email}:`,
error
)
errors++
}
}
return { sent, errors }
}
/**
* Compile digest content for a single user.
*/
async function getDigestContent(
userId: string,
enabledSections: string[]
): Promise<{
sections: DigestSection[]
pendingEvaluations: number
upcomingDeadlines: number
newAssignments: number
unreadNotifications: number
}> {
const now = new Date()
const sections: DigestSection[] = []
let pendingEvaluations = 0
let upcomingDeadlines = 0
let newAssignments = 0
let unreadNotifications = 0
// 1. Pending evaluations
if (enabledSections.includes('pending_evaluations')) {
const pendingAssignments = await prisma.assignment.findMany({
where: {
userId,
isCompleted: false,
stage: {
status: 'STAGE_ACTIVE',
windowCloseAt: { gt: now },
},
},
include: {
project: { select: { id: true, title: true } },
stage: { select: { name: true, windowCloseAt: true } },
},
})
pendingEvaluations = pendingAssignments.length
if (pendingAssignments.length > 0) {
sections.push({
title: `Pending Evaluations (${pendingAssignments.length})`,
items: pendingAssignments.map(
(a) =>
`${a.project.title} - ${a.stage?.name ?? 'Unknown'}${
a.stage?.windowCloseAt
? ` (due ${a.stage.windowCloseAt.toLocaleDateString('en-US', {
month: 'short',
day: 'numeric',
})})`
: ''
}`
),
})
}
}
// 2. Upcoming deadlines (stages closing within 7 days)
if (enabledSections.includes('upcoming_deadlines')) {
const sevenDaysFromNow = new Date(now.getTime() + 7 * 24 * 60 * 60 * 1000)
const upcomingStages = await prisma.stage.findMany({
where: {
status: 'STAGE_ACTIVE',
windowCloseAt: {
gt: now,
lte: sevenDaysFromNow,
},
assignments: {
some: {
userId,
isCompleted: false,
},
},
},
select: {
name: true,
windowCloseAt: true,
},
})
upcomingDeadlines = upcomingStages.length
if (upcomingStages.length > 0) {
sections.push({
title: 'Upcoming Deadlines',
items: upcomingStages.map(
(s) =>
`${s.name} - ${s.windowCloseAt?.toLocaleDateString('en-US', {
weekday: 'short',
month: 'short',
day: 'numeric',
hour: '2-digit',
minute: '2-digit',
})}`
),
})
}
}
// 3. New assignments since last digest
if (enabledSections.includes('new_assignments')) {
const lastDigest = await prisma.digestLog.findFirst({
where: { userId },
orderBy: { sentAt: 'desc' },
select: { sentAt: true },
})
const sinceDate = lastDigest?.sentAt || new Date(now.getTime() - 24 * 60 * 60 * 1000)
const recentAssignments = await prisma.assignment.findMany({
where: {
userId,
createdAt: { gt: sinceDate },
},
include: {
project: { select: { id: true, title: true } },
stage: { select: { name: true } },
},
})
newAssignments = recentAssignments.length
if (recentAssignments.length > 0) {
sections.push({
title: `New Assignments (${recentAssignments.length})`,
items: recentAssignments.map(
(a) => `${a.project.title} - ${a.stage?.name ?? 'Unknown'}`
),
})
}
}
// 4. Unread notifications count
if (enabledSections.includes('unread_notifications')) {
const unreadCount = await prisma.inAppNotification.count({
where: {
userId,
isRead: false,
},
})
unreadNotifications = unreadCount
if (unreadCount > 0) {
sections.push({
title: 'Notifications',
items: [`You have ${unreadCount} unread notification${unreadCount !== 1 ? 's' : ''}`],
})
}
}
return {
sections,
pendingEvaluations,
upcomingDeadlines,
newAssignments,
unreadNotifications,
}
}

View File

@@ -1,178 +1,178 @@
import { prisma } from '@/lib/prisma'
import { sendStyledNotificationEmail } from '@/lib/email'
const REMINDER_TYPES = [
{ type: '3_DAYS', thresholdMs: 3 * 24 * 60 * 60 * 1000 },
{ type: '24H', thresholdMs: 24 * 60 * 60 * 1000 },
{ type: '1H', thresholdMs: 60 * 60 * 1000 },
] as const
type ReminderType = (typeof REMINDER_TYPES)[number]['type']
interface ReminderResult {
sent: number
errors: number
}
/**
* Find active stages with approaching deadlines and send reminders
* to jurors who have incomplete assignments.
*/
export async function processEvaluationReminders(stageId?: string): Promise<ReminderResult> {
const now = new Date()
let totalSent = 0
let totalErrors = 0
// Find active stages with window close dates in the future
const stages = await prisma.stage.findMany({
where: {
status: 'STAGE_ACTIVE',
windowCloseAt: { gt: now },
windowOpenAt: { lte: now },
...(stageId && { id: stageId }),
},
select: {
id: true,
name: true,
windowCloseAt: true,
track: { select: { name: true } },
},
})
for (const stage of stages) {
if (!stage.windowCloseAt) continue
const msUntilDeadline = stage.windowCloseAt.getTime() - now.getTime()
// Determine which reminder types should fire for this stage
const applicableTypes = REMINDER_TYPES.filter(
({ thresholdMs }) => msUntilDeadline <= thresholdMs
)
if (applicableTypes.length === 0) continue
for (const { type } of applicableTypes) {
const result = await sendRemindersForStage(stage, type, now)
totalSent += result.sent
totalErrors += result.errors
}
}
return { sent: totalSent, errors: totalErrors }
}
async function sendRemindersForStage(
stage: {
id: string
name: string
windowCloseAt: Date | null
track: { name: string }
},
type: ReminderType,
now: Date
): Promise<ReminderResult> {
let sent = 0
let errors = 0
if (!stage.windowCloseAt) return { sent, errors }
// Find jurors with incomplete assignments for this stage
const incompleteAssignments = await prisma.assignment.findMany({
where: {
stageId: stage.id,
isCompleted: false,
},
select: {
userId: true,
},
})
// Get unique user IDs with incomplete work
const userIds = [...new Set(incompleteAssignments.map((a) => a.userId))]
if (userIds.length === 0) return { sent, errors }
// Check which users already received this reminder type for this stage
const existingReminders = await prisma.reminderLog.findMany({
where: {
stageId: stage.id,
type,
userId: { in: userIds },
},
select: { userId: true },
})
const alreadySent = new Set(existingReminders.map((r) => r.userId))
const usersToNotify = userIds.filter((id) => !alreadySent.has(id))
if (usersToNotify.length === 0) return { sent, errors }
// Get user details and their pending counts
const users = await prisma.user.findMany({
where: { id: { in: usersToNotify } },
select: { id: true, name: true, email: true },
})
const baseUrl = process.env.NEXTAUTH_URL || 'https://monaco-opc.com'
const deadlineStr = stage.windowCloseAt.toLocaleDateString('en-US', {
weekday: 'long',
year: 'numeric',
month: 'long',
day: 'numeric',
hour: '2-digit',
minute: '2-digit',
timeZoneName: 'short',
})
// Map to get pending count per user
const pendingCounts = new Map<string, number>()
for (const a of incompleteAssignments) {
pendingCounts.set(a.userId, (pendingCounts.get(a.userId) || 0) + 1)
}
// Select email template type based on reminder type
const emailTemplateType = type === '1H' ? 'REMINDER_1H' : 'REMINDER_24H'
for (const user of users) {
const pendingCount = pendingCounts.get(user.id) || 0
if (pendingCount === 0) continue
try {
await sendStyledNotificationEmail(
user.email,
user.name || '',
emailTemplateType,
{
name: user.name || undefined,
title: `Evaluation Reminder - ${stage.name}`,
message: `You have ${pendingCount} pending evaluation${pendingCount !== 1 ? 's' : ''} for ${stage.name}.`,
linkUrl: `${baseUrl}/jury/stages/${stage.id}/assignments`,
metadata: {
pendingCount,
stageName: stage.name,
deadline: deadlineStr,
},
}
)
// Log the sent reminder
await prisma.reminderLog.create({
data: {
stageId: stage.id,
userId: user.id,
type,
},
})
sent++
} catch (error) {
console.error(
`Failed to send ${type} reminder to ${user.email} for stage ${stage.name}:`,
error
)
errors++
}
}
return { sent, errors }
}
import { prisma } from '@/lib/prisma'
import { sendStyledNotificationEmail } from '@/lib/email'
const REMINDER_TYPES = [
{ type: '3_DAYS', thresholdMs: 3 * 24 * 60 * 60 * 1000 },
{ type: '24H', thresholdMs: 24 * 60 * 60 * 1000 },
{ type: '1H', thresholdMs: 60 * 60 * 1000 },
] as const
type ReminderType = (typeof REMINDER_TYPES)[number]['type']
interface ReminderResult {
sent: number
errors: number
}
/**
* Find active stages with approaching deadlines and send reminders
* to jurors who have incomplete assignments.
*/
export async function processEvaluationReminders(stageId?: string): Promise<ReminderResult> {
const now = new Date()
let totalSent = 0
let totalErrors = 0
// Find active stages with window close dates in the future
const stages = await prisma.stage.findMany({
where: {
status: 'STAGE_ACTIVE',
windowCloseAt: { gt: now },
windowOpenAt: { lte: now },
...(stageId && { id: stageId }),
},
select: {
id: true,
name: true,
windowCloseAt: true,
track: { select: { name: true } },
},
})
for (const stage of stages) {
if (!stage.windowCloseAt) continue
const msUntilDeadline = stage.windowCloseAt.getTime() - now.getTime()
// Determine which reminder types should fire for this stage
const applicableTypes = REMINDER_TYPES.filter(
({ thresholdMs }) => msUntilDeadline <= thresholdMs
)
if (applicableTypes.length === 0) continue
for (const { type } of applicableTypes) {
const result = await sendRemindersForStage(stage, type, now)
totalSent += result.sent
totalErrors += result.errors
}
}
return { sent: totalSent, errors: totalErrors }
}
async function sendRemindersForStage(
stage: {
id: string
name: string
windowCloseAt: Date | null
track: { name: string }
},
type: ReminderType,
now: Date
): Promise<ReminderResult> {
let sent = 0
let errors = 0
if (!stage.windowCloseAt) return { sent, errors }
// Find jurors with incomplete assignments for this stage
const incompleteAssignments = await prisma.assignment.findMany({
where: {
stageId: stage.id,
isCompleted: false,
},
select: {
userId: true,
},
})
// Get unique user IDs with incomplete work
const userIds = [...new Set(incompleteAssignments.map((a) => a.userId))]
if (userIds.length === 0) return { sent, errors }
// Check which users already received this reminder type for this stage
const existingReminders = await prisma.reminderLog.findMany({
where: {
stageId: stage.id,
type,
userId: { in: userIds },
},
select: { userId: true },
})
const alreadySent = new Set(existingReminders.map((r) => r.userId))
const usersToNotify = userIds.filter((id) => !alreadySent.has(id))
if (usersToNotify.length === 0) return { sent, errors }
// Get user details and their pending counts
const users = await prisma.user.findMany({
where: { id: { in: usersToNotify } },
select: { id: true, name: true, email: true },
})
const baseUrl = process.env.NEXTAUTH_URL || 'https://monaco-opc.com'
const deadlineStr = stage.windowCloseAt.toLocaleDateString('en-US', {
weekday: 'long',
year: 'numeric',
month: 'long',
day: 'numeric',
hour: '2-digit',
minute: '2-digit',
timeZoneName: 'short',
})
// Map to get pending count per user
const pendingCounts = new Map<string, number>()
for (const a of incompleteAssignments) {
pendingCounts.set(a.userId, (pendingCounts.get(a.userId) || 0) + 1)
}
// Select email template type based on reminder type
const emailTemplateType = type === '1H' ? 'REMINDER_1H' : 'REMINDER_24H'
for (const user of users) {
const pendingCount = pendingCounts.get(user.id) || 0
if (pendingCount === 0) continue
try {
await sendStyledNotificationEmail(
user.email,
user.name || '',
emailTemplateType,
{
name: user.name || undefined,
title: `Evaluation Reminder - ${stage.name}`,
message: `You have ${pendingCount} pending evaluation${pendingCount !== 1 ? 's' : ''} for ${stage.name}.`,
linkUrl: `${baseUrl}/jury/stages/${stage.id}/assignments`,
metadata: {
pendingCount,
stageName: stage.name,
deadline: deadlineStr,
},
}
)
// Log the sent reminder
await prisma.reminderLog.create({
data: {
stageId: stage.id,
userId: user.id,
type,
},
})
sent++
} catch (error) {
console.error(
`Failed to send ${type} reminder to ${user.email} for stage ${stage.name}:`,
error
)
errors++
}
}
return { sent, errors }
}

View File

@@ -1,482 +1,482 @@
/**
* In-App Notification Service
*
* Creates and manages in-app notifications for users.
* Optionally sends email notifications based on admin settings.
*/
import { prisma } from '@/lib/prisma'
import { sendStyledNotificationEmail } from '@/lib/email'
// Notification priority levels
export type NotificationPriority = 'low' | 'normal' | 'high' | 'urgent'
// Notification type constants
export const NotificationTypes = {
// Admin notifications
FILTERING_COMPLETE: 'FILTERING_COMPLETE',
FILTERING_FAILED: 'FILTERING_FAILED',
AI_SUGGESTIONS_READY: 'AI_SUGGESTIONS_READY',
NEW_APPLICATION: 'NEW_APPLICATION',
BULK_APPLICATIONS: 'BULK_APPLICATIONS',
DOCUMENTS_UPLOADED: 'DOCUMENTS_UPLOADED',
EVALUATION_MILESTONE: 'EVALUATION_MILESTONE',
ALL_EVALUATIONS_DONE: 'ALL_EVALUATIONS_DONE',
JURY_INACTIVE: 'JURY_INACTIVE',
DEADLINE_24H: 'DEADLINE_24H',
DEADLINE_1H: 'DEADLINE_1H',
ROUND_AUTO_CLOSED: 'ROUND_AUTO_CLOSED',
EXPORT_READY: 'EXPORT_READY',
SYSTEM_ERROR: 'SYSTEM_ERROR',
// Jury notifications
ASSIGNED_TO_PROJECT: 'ASSIGNED_TO_PROJECT',
BATCH_ASSIGNED: 'BATCH_ASSIGNED',
PROJECT_UPDATED: 'PROJECT_UPDATED',
ROUND_NOW_OPEN: 'ROUND_NOW_OPEN',
REMINDER_3_DAYS: 'REMINDER_3_DAYS',
REMINDER_24H: 'REMINDER_24H',
REMINDER_1H: 'REMINDER_1H',
ROUND_EXTENDED: 'ROUND_EXTENDED',
ROUND_CLOSED: 'ROUND_CLOSED',
THANK_YOU: 'THANK_YOU',
RESULTS_AVAILABLE: 'RESULTS_AVAILABLE',
// Jury - Award specific
AWARD_JURY_SELECTED: 'AWARD_JURY_SELECTED',
AWARD_VOTING_OPEN: 'AWARD_VOTING_OPEN',
AWARD_REMINDER: 'AWARD_REMINDER',
AWARD_RESULTS: 'AWARD_RESULTS',
// Mentor notifications
MENTEE_ASSIGNED: 'MENTEE_ASSIGNED',
MENTEE_BATCH_ASSIGNED: 'MENTEE_BATCH_ASSIGNED',
MENTEE_INTRO: 'MENTEE_INTRO',
MENTEE_UPLOADED_DOCS: 'MENTEE_UPLOADED_DOCS',
MENTEE_UPDATED_PROJECT: 'MENTEE_UPDATED_PROJECT',
MENTEE_ADVANCED: 'MENTEE_ADVANCED',
MENTEE_FINALIST: 'MENTEE_FINALIST',
MENTEE_WON: 'MENTEE_WON',
MENTEE_ELIMINATED: 'MENTEE_ELIMINATED',
MENTORSHIP_TIP: 'MENTORSHIP_TIP',
NEW_RESOURCE: 'NEW_RESOURCE',
// Team/Applicant notifications
APPLICATION_SUBMITTED: 'APPLICATION_SUBMITTED',
APPLICATION_INCOMPLETE: 'APPLICATION_INCOMPLETE',
TEAM_INVITE_RECEIVED: 'TEAM_INVITE_RECEIVED',
TEAM_MEMBER_JOINED: 'TEAM_MEMBER_JOINED',
TEAM_MEMBER_LEFT: 'TEAM_MEMBER_LEFT',
DOCUMENTS_RECEIVED: 'DOCUMENTS_RECEIVED',
REVIEW_IN_PROGRESS: 'REVIEW_IN_PROGRESS',
ADVANCED_SEMIFINAL: 'ADVANCED_SEMIFINAL',
ADVANCED_FINAL: 'ADVANCED_FINAL',
MENTOR_ASSIGNED: 'MENTOR_ASSIGNED',
MENTOR_MESSAGE: 'MENTOR_MESSAGE',
NOT_SELECTED: 'NOT_SELECTED',
FEEDBACK_AVAILABLE: 'FEEDBACK_AVAILABLE',
EVENT_INVITATION: 'EVENT_INVITATION',
WINNER_ANNOUNCEMENT: 'WINNER_ANNOUNCEMENT',
SUBMISSION_RECEIVED: 'SUBMISSION_RECEIVED',
CERTIFICATE_READY: 'CERTIFICATE_READY',
PROGRAM_NEWSLETTER: 'PROGRAM_NEWSLETTER',
// Observer notifications
ROUND_STARTED: 'ROUND_STARTED',
ROUND_PROGRESS: 'ROUND_PROGRESS',
ROUND_COMPLETED: 'ROUND_COMPLETED',
FINALISTS_ANNOUNCED: 'FINALISTS_ANNOUNCED',
WINNERS_ANNOUNCED: 'WINNERS_ANNOUNCED',
REPORT_AVAILABLE: 'REPORT_AVAILABLE',
} as const
export type NotificationType = (typeof NotificationTypes)[keyof typeof NotificationTypes]
// Notification icons by type
export const NotificationIcons: Record<string, string> = {
[NotificationTypes.FILTERING_COMPLETE]: 'Brain',
[NotificationTypes.FILTERING_FAILED]: 'AlertTriangle',
[NotificationTypes.NEW_APPLICATION]: 'FileText',
[NotificationTypes.BULK_APPLICATIONS]: 'Files',
[NotificationTypes.DOCUMENTS_UPLOADED]: 'Upload',
[NotificationTypes.ASSIGNED_TO_PROJECT]: 'ClipboardList',
[NotificationTypes.ROUND_NOW_OPEN]: 'PlayCircle',
[NotificationTypes.REMINDER_24H]: 'Clock',
[NotificationTypes.REMINDER_1H]: 'AlertCircle',
[NotificationTypes.ROUND_CLOSED]: 'Lock',
[NotificationTypes.MENTEE_ASSIGNED]: 'Users',
[NotificationTypes.MENTEE_ADVANCED]: 'TrendingUp',
[NotificationTypes.MENTEE_WON]: 'Trophy',
[NotificationTypes.APPLICATION_SUBMITTED]: 'CheckCircle',
[NotificationTypes.SUBMISSION_RECEIVED]: 'Inbox',
[NotificationTypes.ADVANCED_SEMIFINAL]: 'TrendingUp',
[NotificationTypes.ADVANCED_FINAL]: 'Star',
[NotificationTypes.MENTOR_ASSIGNED]: 'GraduationCap',
[NotificationTypes.WINNER_ANNOUNCEMENT]: 'Trophy',
[NotificationTypes.AWARD_VOTING_OPEN]: 'Vote',
[NotificationTypes.AWARD_RESULTS]: 'Trophy',
}
// Priority by notification type
export const NotificationPriorities: Record<string, NotificationPriority> = {
[NotificationTypes.FILTERING_COMPLETE]: 'high',
[NotificationTypes.FILTERING_FAILED]: 'urgent',
[NotificationTypes.DEADLINE_1H]: 'urgent',
[NotificationTypes.REMINDER_1H]: 'urgent',
[NotificationTypes.SYSTEM_ERROR]: 'urgent',
[NotificationTypes.ASSIGNED_TO_PROJECT]: 'high',
[NotificationTypes.ROUND_NOW_OPEN]: 'high',
[NotificationTypes.DEADLINE_24H]: 'high',
[NotificationTypes.REMINDER_24H]: 'high',
[NotificationTypes.MENTEE_ASSIGNED]: 'high',
[NotificationTypes.APPLICATION_SUBMITTED]: 'high',
[NotificationTypes.ADVANCED_SEMIFINAL]: 'high',
[NotificationTypes.ADVANCED_FINAL]: 'high',
[NotificationTypes.WINNER_ANNOUNCEMENT]: 'high',
[NotificationTypes.AWARD_VOTING_OPEN]: 'high',
}
interface CreateNotificationParams {
userId: string
type: string
title: string
message: string
linkUrl?: string
linkLabel?: string
icon?: string
priority?: NotificationPriority
metadata?: Record<string, unknown>
groupKey?: string
expiresAt?: Date
}
/**
* Create a single in-app notification
*/
export async function createNotification(
params: CreateNotificationParams
): Promise<void> {
const {
userId,
type,
title,
message,
linkUrl,
linkLabel,
icon,
priority,
metadata,
groupKey,
expiresAt,
} = params
// Determine icon and priority if not provided
const finalIcon = icon || NotificationIcons[type] || 'Bell'
const finalPriority = priority || NotificationPriorities[type] || 'normal'
// Check for existing notification with same groupKey (for batching)
if (groupKey) {
const existingNotification = await prisma.inAppNotification.findFirst({
where: {
userId,
groupKey,
isRead: false,
createdAt: {
gte: new Date(Date.now() - 60 * 60 * 1000), // Within last hour
},
},
})
if (existingNotification) {
// Update existing notification instead of creating new one
const existingMeta = existingNotification.metadata as Record<string, unknown> || {}
const currentCount = (existingMeta.count as number) || 1
await prisma.inAppNotification.update({
where: { id: existingNotification.id },
data: {
message,
metadata: { ...existingMeta, ...metadata, count: currentCount + 1 },
createdAt: new Date(), // Bump to top
},
})
return
}
}
// Create the in-app notification
await prisma.inAppNotification.create({
data: {
userId,
type,
title,
message,
linkUrl,
linkLabel,
icon: finalIcon,
priority: finalPriority,
metadata: metadata as object | undefined,
groupKey,
expiresAt,
},
})
// Check if we should also send an email
await maybeSendEmail(userId, type, title, message, linkUrl, metadata)
}
/**
* Create notifications for multiple users
*/
export async function createBulkNotifications(params: {
userIds: string[]
type: string
title: string
message: string
linkUrl?: string
linkLabel?: string
icon?: string
priority?: NotificationPriority
metadata?: Record<string, unknown>
}): Promise<void> {
const {
userIds,
type,
title,
message,
linkUrl,
linkLabel,
icon,
priority,
metadata,
} = params
const finalIcon = icon || NotificationIcons[type] || 'Bell'
const finalPriority = priority || NotificationPriorities[type] || 'normal'
// Create notifications in bulk
await prisma.inAppNotification.createMany({
data: userIds.map((userId) => ({
userId,
type,
title,
message,
linkUrl,
linkLabel,
icon: finalIcon,
priority: finalPriority,
metadata: metadata as object | undefined,
})),
})
// Check email settings and send emails
for (const userId of userIds) {
await maybeSendEmail(userId, type, title, message, linkUrl, metadata)
}
}
/**
* Notify all admin users
*/
export async function notifyAdmins(params: {
type: string
title: string
message: string
linkUrl?: string
linkLabel?: string
icon?: string
priority?: NotificationPriority
metadata?: Record<string, unknown>
}): Promise<void> {
const admins = await prisma.user.findMany({
where: {
role: { in: ['SUPER_ADMIN', 'PROGRAM_ADMIN'] },
status: 'ACTIVE',
},
select: { id: true },
})
if (admins.length === 0) return
await createBulkNotifications({
...params,
userIds: admins.map((a) => a.id),
})
}
/**
* Notify all jury members for a specific stage
*/
export async function notifyStageJury(
stageId: string,
params: Omit<CreateNotificationParams, 'userId'>
): Promise<void> {
const assignments = await prisma.assignment.findMany({
where: { stageId },
select: { userId: true },
distinct: ['userId'],
})
if (assignments.length === 0) return
await createBulkNotifications({
...params,
userIds: assignments.map((a) => a.userId),
})
}
/**
* Notify team members of a project
*/
export async function notifyProjectTeam(
projectId: string,
params: Omit<CreateNotificationParams, 'userId'>
): Promise<void> {
const teamMembers = await prisma.teamMember.findMany({
where: { projectId },
include: { user: { select: { id: true } } },
})
const userIds = teamMembers
.filter((tm) => tm.user)
.map((tm) => tm.user!.id)
if (userIds.length === 0) return
await createBulkNotifications({
...params,
userIds,
})
}
/**
* Notify assigned mentors of a project
*/
export async function notifyProjectMentors(
projectId: string,
params: Omit<CreateNotificationParams, 'userId'>
): Promise<void> {
const mentorAssignments = await prisma.mentorAssignment.findMany({
where: { projectId },
select: { mentorId: true },
})
if (mentorAssignments.length === 0) return
await createBulkNotifications({
...params,
userIds: mentorAssignments.map((ma) => ma.mentorId),
})
}
/**
* Check email settings and send email if enabled
*/
async function maybeSendEmail(
userId: string,
type: string,
title: string,
message: string,
linkUrl?: string,
metadata?: Record<string, unknown>
): Promise<void> {
try {
// Check if email is enabled for this notification type
const emailSetting = await prisma.notificationEmailSetting.findUnique({
where: { notificationType: type },
})
// If no setting exists, don't send email by default
if (!emailSetting || !emailSetting.sendEmail) {
return
}
// Check user's notification preference
const user = await prisma.user.findUnique({
where: { id: userId },
select: { email: true, name: true, notificationPreference: true },
})
if (!user || user.notificationPreference === 'NONE') {
return
}
// Send styled email with full context
// The styled template will use metadata for rich content
// Subject can be overridden by admin settings
await sendStyledNotificationEmail(
user.email,
user.name || 'User',
type,
{
title,
message,
linkUrl,
metadata,
},
emailSetting.emailSubject || undefined
)
} catch (error) {
// Log but don't fail the notification creation
console.error('[Notification] Failed to send email:', error)
}
}
/**
* Mark a notification as read
*/
export async function markNotificationAsRead(
notificationId: string,
userId: string
): Promise<void> {
await prisma.inAppNotification.updateMany({
where: { id: notificationId, userId },
data: { isRead: true, readAt: new Date() },
})
}
/**
* Mark all notifications as read for a user
*/
export async function markAllNotificationsAsRead(userId: string): Promise<void> {
await prisma.inAppNotification.updateMany({
where: { userId, isRead: false },
data: { isRead: true, readAt: new Date() },
})
}
/**
* Get unread notification count for a user
*/
export async function getUnreadCount(userId: string): Promise<number> {
return prisma.inAppNotification.count({
where: { userId, isRead: false },
})
}
/**
* Delete expired notifications
*/
export async function deleteExpiredNotifications(): Promise<number> {
const result = await prisma.inAppNotification.deleteMany({
where: {
expiresAt: { lt: new Date() },
},
})
return result.count
}
/**
* Delete old read notifications (cleanup job)
*/
export async function deleteOldNotifications(olderThanDays: number): Promise<number> {
const cutoffDate = new Date()
cutoffDate.setDate(cutoffDate.getDate() - olderThanDays)
const result = await prisma.inAppNotification.deleteMany({
where: {
isRead: true,
createdAt: { lt: cutoffDate },
},
})
return result.count
}
/**
* In-App Notification Service
*
* Creates and manages in-app notifications for users.
* Optionally sends email notifications based on admin settings.
*/
import { prisma } from '@/lib/prisma'
import { sendStyledNotificationEmail } from '@/lib/email'
// Notification priority levels
export type NotificationPriority = 'low' | 'normal' | 'high' | 'urgent'
// Notification type constants
export const NotificationTypes = {
// Admin notifications
FILTERING_COMPLETE: 'FILTERING_COMPLETE',
FILTERING_FAILED: 'FILTERING_FAILED',
AI_SUGGESTIONS_READY: 'AI_SUGGESTIONS_READY',
NEW_APPLICATION: 'NEW_APPLICATION',
BULK_APPLICATIONS: 'BULK_APPLICATIONS',
DOCUMENTS_UPLOADED: 'DOCUMENTS_UPLOADED',
EVALUATION_MILESTONE: 'EVALUATION_MILESTONE',
ALL_EVALUATIONS_DONE: 'ALL_EVALUATIONS_DONE',
JURY_INACTIVE: 'JURY_INACTIVE',
DEADLINE_24H: 'DEADLINE_24H',
DEADLINE_1H: 'DEADLINE_1H',
ROUND_AUTO_CLOSED: 'ROUND_AUTO_CLOSED',
EXPORT_READY: 'EXPORT_READY',
SYSTEM_ERROR: 'SYSTEM_ERROR',
// Jury notifications
ASSIGNED_TO_PROJECT: 'ASSIGNED_TO_PROJECT',
BATCH_ASSIGNED: 'BATCH_ASSIGNED',
PROJECT_UPDATED: 'PROJECT_UPDATED',
ROUND_NOW_OPEN: 'ROUND_NOW_OPEN',
REMINDER_3_DAYS: 'REMINDER_3_DAYS',
REMINDER_24H: 'REMINDER_24H',
REMINDER_1H: 'REMINDER_1H',
ROUND_EXTENDED: 'ROUND_EXTENDED',
ROUND_CLOSED: 'ROUND_CLOSED',
THANK_YOU: 'THANK_YOU',
RESULTS_AVAILABLE: 'RESULTS_AVAILABLE',
// Jury - Award specific
AWARD_JURY_SELECTED: 'AWARD_JURY_SELECTED',
AWARD_VOTING_OPEN: 'AWARD_VOTING_OPEN',
AWARD_REMINDER: 'AWARD_REMINDER',
AWARD_RESULTS: 'AWARD_RESULTS',
// Mentor notifications
MENTEE_ASSIGNED: 'MENTEE_ASSIGNED',
MENTEE_BATCH_ASSIGNED: 'MENTEE_BATCH_ASSIGNED',
MENTEE_INTRO: 'MENTEE_INTRO',
MENTEE_UPLOADED_DOCS: 'MENTEE_UPLOADED_DOCS',
MENTEE_UPDATED_PROJECT: 'MENTEE_UPDATED_PROJECT',
MENTEE_ADVANCED: 'MENTEE_ADVANCED',
MENTEE_FINALIST: 'MENTEE_FINALIST',
MENTEE_WON: 'MENTEE_WON',
MENTEE_ELIMINATED: 'MENTEE_ELIMINATED',
MENTORSHIP_TIP: 'MENTORSHIP_TIP',
NEW_RESOURCE: 'NEW_RESOURCE',
// Team/Applicant notifications
APPLICATION_SUBMITTED: 'APPLICATION_SUBMITTED',
APPLICATION_INCOMPLETE: 'APPLICATION_INCOMPLETE',
TEAM_INVITE_RECEIVED: 'TEAM_INVITE_RECEIVED',
TEAM_MEMBER_JOINED: 'TEAM_MEMBER_JOINED',
TEAM_MEMBER_LEFT: 'TEAM_MEMBER_LEFT',
DOCUMENTS_RECEIVED: 'DOCUMENTS_RECEIVED',
REVIEW_IN_PROGRESS: 'REVIEW_IN_PROGRESS',
ADVANCED_SEMIFINAL: 'ADVANCED_SEMIFINAL',
ADVANCED_FINAL: 'ADVANCED_FINAL',
MENTOR_ASSIGNED: 'MENTOR_ASSIGNED',
MENTOR_MESSAGE: 'MENTOR_MESSAGE',
NOT_SELECTED: 'NOT_SELECTED',
FEEDBACK_AVAILABLE: 'FEEDBACK_AVAILABLE',
EVENT_INVITATION: 'EVENT_INVITATION',
WINNER_ANNOUNCEMENT: 'WINNER_ANNOUNCEMENT',
SUBMISSION_RECEIVED: 'SUBMISSION_RECEIVED',
CERTIFICATE_READY: 'CERTIFICATE_READY',
PROGRAM_NEWSLETTER: 'PROGRAM_NEWSLETTER',
// Observer notifications
ROUND_STARTED: 'ROUND_STARTED',
ROUND_PROGRESS: 'ROUND_PROGRESS',
ROUND_COMPLETED: 'ROUND_COMPLETED',
FINALISTS_ANNOUNCED: 'FINALISTS_ANNOUNCED',
WINNERS_ANNOUNCED: 'WINNERS_ANNOUNCED',
REPORT_AVAILABLE: 'REPORT_AVAILABLE',
} as const
export type NotificationType = (typeof NotificationTypes)[keyof typeof NotificationTypes]
// Notification icons by type
export const NotificationIcons: Record<string, string> = {
[NotificationTypes.FILTERING_COMPLETE]: 'Brain',
[NotificationTypes.FILTERING_FAILED]: 'AlertTriangle',
[NotificationTypes.NEW_APPLICATION]: 'FileText',
[NotificationTypes.BULK_APPLICATIONS]: 'Files',
[NotificationTypes.DOCUMENTS_UPLOADED]: 'Upload',
[NotificationTypes.ASSIGNED_TO_PROJECT]: 'ClipboardList',
[NotificationTypes.ROUND_NOW_OPEN]: 'PlayCircle',
[NotificationTypes.REMINDER_24H]: 'Clock',
[NotificationTypes.REMINDER_1H]: 'AlertCircle',
[NotificationTypes.ROUND_CLOSED]: 'Lock',
[NotificationTypes.MENTEE_ASSIGNED]: 'Users',
[NotificationTypes.MENTEE_ADVANCED]: 'TrendingUp',
[NotificationTypes.MENTEE_WON]: 'Trophy',
[NotificationTypes.APPLICATION_SUBMITTED]: 'CheckCircle',
[NotificationTypes.SUBMISSION_RECEIVED]: 'Inbox',
[NotificationTypes.ADVANCED_SEMIFINAL]: 'TrendingUp',
[NotificationTypes.ADVANCED_FINAL]: 'Star',
[NotificationTypes.MENTOR_ASSIGNED]: 'GraduationCap',
[NotificationTypes.WINNER_ANNOUNCEMENT]: 'Trophy',
[NotificationTypes.AWARD_VOTING_OPEN]: 'Vote',
[NotificationTypes.AWARD_RESULTS]: 'Trophy',
}
// Priority by notification type
export const NotificationPriorities: Record<string, NotificationPriority> = {
[NotificationTypes.FILTERING_COMPLETE]: 'high',
[NotificationTypes.FILTERING_FAILED]: 'urgent',
[NotificationTypes.DEADLINE_1H]: 'urgent',
[NotificationTypes.REMINDER_1H]: 'urgent',
[NotificationTypes.SYSTEM_ERROR]: 'urgent',
[NotificationTypes.ASSIGNED_TO_PROJECT]: 'high',
[NotificationTypes.ROUND_NOW_OPEN]: 'high',
[NotificationTypes.DEADLINE_24H]: 'high',
[NotificationTypes.REMINDER_24H]: 'high',
[NotificationTypes.MENTEE_ASSIGNED]: 'high',
[NotificationTypes.APPLICATION_SUBMITTED]: 'high',
[NotificationTypes.ADVANCED_SEMIFINAL]: 'high',
[NotificationTypes.ADVANCED_FINAL]: 'high',
[NotificationTypes.WINNER_ANNOUNCEMENT]: 'high',
[NotificationTypes.AWARD_VOTING_OPEN]: 'high',
}
interface CreateNotificationParams {
userId: string
type: string
title: string
message: string
linkUrl?: string
linkLabel?: string
icon?: string
priority?: NotificationPriority
metadata?: Record<string, unknown>
groupKey?: string
expiresAt?: Date
}
/**
* Create a single in-app notification
*/
export async function createNotification(
params: CreateNotificationParams
): Promise<void> {
const {
userId,
type,
title,
message,
linkUrl,
linkLabel,
icon,
priority,
metadata,
groupKey,
expiresAt,
} = params
// Determine icon and priority if not provided
const finalIcon = icon || NotificationIcons[type] || 'Bell'
const finalPriority = priority || NotificationPriorities[type] || 'normal'
// Check for existing notification with same groupKey (for batching)
if (groupKey) {
const existingNotification = await prisma.inAppNotification.findFirst({
where: {
userId,
groupKey,
isRead: false,
createdAt: {
gte: new Date(Date.now() - 60 * 60 * 1000), // Within last hour
},
},
})
if (existingNotification) {
// Update existing notification instead of creating new one
const existingMeta = existingNotification.metadata as Record<string, unknown> || {}
const currentCount = (existingMeta.count as number) || 1
await prisma.inAppNotification.update({
where: { id: existingNotification.id },
data: {
message,
metadata: { ...existingMeta, ...metadata, count: currentCount + 1 },
createdAt: new Date(), // Bump to top
},
})
return
}
}
// Create the in-app notification
await prisma.inAppNotification.create({
data: {
userId,
type,
title,
message,
linkUrl,
linkLabel,
icon: finalIcon,
priority: finalPriority,
metadata: metadata as object | undefined,
groupKey,
expiresAt,
},
})
// Check if we should also send an email
await maybeSendEmail(userId, type, title, message, linkUrl, metadata)
}
/**
* Create notifications for multiple users
*/
export async function createBulkNotifications(params: {
userIds: string[]
type: string
title: string
message: string
linkUrl?: string
linkLabel?: string
icon?: string
priority?: NotificationPriority
metadata?: Record<string, unknown>
}): Promise<void> {
const {
userIds,
type,
title,
message,
linkUrl,
linkLabel,
icon,
priority,
metadata,
} = params
const finalIcon = icon || NotificationIcons[type] || 'Bell'
const finalPriority = priority || NotificationPriorities[type] || 'normal'
// Create notifications in bulk
await prisma.inAppNotification.createMany({
data: userIds.map((userId) => ({
userId,
type,
title,
message,
linkUrl,
linkLabel,
icon: finalIcon,
priority: finalPriority,
metadata: metadata as object | undefined,
})),
})
// Check email settings and send emails
for (const userId of userIds) {
await maybeSendEmail(userId, type, title, message, linkUrl, metadata)
}
}
/**
* Notify all admin users
*/
export async function notifyAdmins(params: {
type: string
title: string
message: string
linkUrl?: string
linkLabel?: string
icon?: string
priority?: NotificationPriority
metadata?: Record<string, unknown>
}): Promise<void> {
const admins = await prisma.user.findMany({
where: {
role: { in: ['SUPER_ADMIN', 'PROGRAM_ADMIN'] },
status: 'ACTIVE',
},
select: { id: true },
})
if (admins.length === 0) return
await createBulkNotifications({
...params,
userIds: admins.map((a) => a.id),
})
}
/**
* Notify all jury members for a specific stage
*/
export async function notifyStageJury(
stageId: string,
params: Omit<CreateNotificationParams, 'userId'>
): Promise<void> {
const assignments = await prisma.assignment.findMany({
where: { stageId },
select: { userId: true },
distinct: ['userId'],
})
if (assignments.length === 0) return
await createBulkNotifications({
...params,
userIds: assignments.map((a) => a.userId),
})
}
/**
* Notify team members of a project
*/
export async function notifyProjectTeam(
projectId: string,
params: Omit<CreateNotificationParams, 'userId'>
): Promise<void> {
const teamMembers = await prisma.teamMember.findMany({
where: { projectId },
include: { user: { select: { id: true } } },
})
const userIds = teamMembers
.filter((tm) => tm.user)
.map((tm) => tm.user!.id)
if (userIds.length === 0) return
await createBulkNotifications({
...params,
userIds,
})
}
/**
* Notify assigned mentors of a project
*/
export async function notifyProjectMentors(
projectId: string,
params: Omit<CreateNotificationParams, 'userId'>
): Promise<void> {
const mentorAssignments = await prisma.mentorAssignment.findMany({
where: { projectId },
select: { mentorId: true },
})
if (mentorAssignments.length === 0) return
await createBulkNotifications({
...params,
userIds: mentorAssignments.map((ma) => ma.mentorId),
})
}
/**
* Check email settings and send email if enabled
*/
async function maybeSendEmail(
userId: string,
type: string,
title: string,
message: string,
linkUrl?: string,
metadata?: Record<string, unknown>
): Promise<void> {
try {
// Check if email is enabled for this notification type
const emailSetting = await prisma.notificationEmailSetting.findUnique({
where: { notificationType: type },
})
// If no setting exists, don't send email by default
if (!emailSetting || !emailSetting.sendEmail) {
return
}
// Check user's notification preference
const user = await prisma.user.findUnique({
where: { id: userId },
select: { email: true, name: true, notificationPreference: true },
})
if (!user || user.notificationPreference === 'NONE') {
return
}
// Send styled email with full context
// The styled template will use metadata for rich content
// Subject can be overridden by admin settings
await sendStyledNotificationEmail(
user.email,
user.name || 'User',
type,
{
title,
message,
linkUrl,
metadata,
},
emailSetting.emailSubject || undefined
)
} catch (error) {
// Log but don't fail the notification creation
console.error('[Notification] Failed to send email:', error)
}
}
/**
* Mark a notification as read
*/
export async function markNotificationAsRead(
notificationId: string,
userId: string
): Promise<void> {
await prisma.inAppNotification.updateMany({
where: { id: notificationId, userId },
data: { isRead: true, readAt: new Date() },
})
}
/**
* Mark all notifications as read for a user
*/
export async function markAllNotificationsAsRead(userId: string): Promise<void> {
await prisma.inAppNotification.updateMany({
where: { userId, isRead: false },
data: { isRead: true, readAt: new Date() },
})
}
/**
* Get unread notification count for a user
*/
export async function getUnreadCount(userId: string): Promise<number> {
return prisma.inAppNotification.count({
where: { userId, isRead: false },
})
}
/**
* Delete expired notifications
*/
export async function deleteExpiredNotifications(): Promise<number> {
const result = await prisma.inAppNotification.deleteMany({
where: {
expiresAt: { lt: new Date() },
},
})
return result.count
}
/**
* Delete old read notifications (cleanup job)
*/
export async function deleteOldNotifications(olderThanDays: number): Promise<number> {
const cutoffDate = new Date()
cutoffDate.setDate(cutoffDate.getDate() - olderThanDays)
const result = await prisma.inAppNotification.deleteMany({
where: {
isRead: true,
createdAt: { lt: cutoffDate },
},
})
return result.count
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,486 +1,486 @@
/**
* AI-Powered Mentor Matching Service
*
* Matches mentors to projects based on expertise alignment.
*
* Optimization:
* - Batched processing (15 projects per batch)
* - Token tracking and cost logging
* - Fallback to algorithmic matching
*
* GDPR Compliance:
* - All data anonymized before AI processing
* - No personal information sent to OpenAI
*/
import { PrismaClient, OceanIssue, CompetitionCategory } from '@prisma/client'
import { getOpenAI, getConfiguredModel, buildCompletionParams } from '@/lib/openai'
import { logAIUsage, extractTokenUsage } from '@/server/utils/ai-usage'
import { classifyAIError, createParseError, logAIError } from './ai-errors'
// ─── Constants ───────────────────────────────────────────────────────────────
const MENTOR_BATCH_SIZE = 15
// Optimized system prompt
const MENTOR_MATCHING_SYSTEM_PROMPT = `Match mentors to projects by expertise. Return JSON.
Format for each project: {"matches": [{project_id, mentor_matches: [{mentor_index, confidence_score: 0-1, expertise_match_score: 0-1, reasoning: str}]}]}
Rank by suitability. Consider expertise alignment and availability.`
// ─── Types ───────────────────────────────────────────────────────────────────
interface ProjectInfo {
id: string
title: string
description: string | null
oceanIssue: OceanIssue | null
competitionCategory: CompetitionCategory | null
tags: string[]
}
interface MentorInfo {
id: string
name: string | null
email: string
expertiseTags: string[]
currentAssignments: number
maxAssignments: number | null
}
interface MentorMatch {
mentorId: string
confidenceScore: number
expertiseMatchScore: number
reasoning: string
}
// ─── Batched AI Matching ─────────────────────────────────────────────────────
/**
* Process a batch of projects for mentor matching
*/
async function processMatchingBatch(
openai: NonNullable<Awaited<ReturnType<typeof getOpenAI>>>,
model: string,
projects: ProjectInfo[],
mentors: MentorInfo[],
limit: number,
userId?: string
): Promise<{
results: Map<string, MentorMatch[]>
tokensUsed: number
}> {
const results = new Map<string, MentorMatch[]>()
let tokensUsed = 0
// Anonymize project data
const anonymizedProjects = projects.map((p, index) => ({
project_id: `P${index + 1}`,
real_id: p.id,
description: p.description?.slice(0, 350) || 'No description',
category: p.competitionCategory,
oceanIssue: p.oceanIssue,
tags: p.tags,
}))
// Anonymize mentor data
const anonymizedMentors = mentors.map((m, index) => ({
index,
expertise: m.expertiseTags,
availability: m.maxAssignments
? `${m.currentAssignments}/${m.maxAssignments}`
: 'unlimited',
}))
const userPrompt = `PROJECTS:
${anonymizedProjects.map(p => `${p.project_id}: Category=${p.category || 'N/A'}, Issue=${p.oceanIssue || 'N/A'}, Tags=[${p.tags.join(', ')}], Desc=${p.description.slice(0, 200)}`).join('\n')}
MENTORS:
${anonymizedMentors.map(m => `${m.index}: Expertise=[${m.expertise.join(', ')}], Availability=${m.availability}`).join('\n')}
For each project, rank top ${limit} mentors.`
try {
const params = buildCompletionParams(model, {
messages: [
{ role: 'system', content: MENTOR_MATCHING_SYSTEM_PROMPT },
{ role: 'user', content: userPrompt },
],
jsonMode: true,
temperature: 0.3,
maxTokens: 4000,
})
const response = await openai.chat.completions.create(params)
const usage = extractTokenUsage(response)
tokensUsed = usage.totalTokens
// Log usage
await logAIUsage({
userId,
action: 'MENTOR_MATCHING',
entityType: 'Project',
model,
promptTokens: usage.promptTokens,
completionTokens: usage.completionTokens,
totalTokens: usage.totalTokens,
batchSize: projects.length,
itemsProcessed: projects.length,
status: 'SUCCESS',
})
const content = response.choices[0]?.message?.content
if (!content) {
throw new Error('No response from AI')
}
const parsed = JSON.parse(content) as {
matches: Array<{
project_id: string
mentor_matches: Array<{
mentor_index: number
confidence_score: number
expertise_match_score: number
reasoning: string
}>
}>
}
// Map results back to real IDs
for (const projectMatch of parsed.matches || []) {
const project = anonymizedProjects.find(p => p.project_id === projectMatch.project_id)
if (!project) continue
const mentorMatches: MentorMatch[] = []
for (const match of projectMatch.mentor_matches || []) {
if (match.mentor_index >= 0 && match.mentor_index < mentors.length) {
mentorMatches.push({
mentorId: mentors[match.mentor_index].id,
confidenceScore: Math.min(1, Math.max(0, match.confidence_score)),
expertiseMatchScore: Math.min(1, Math.max(0, match.expertise_match_score)),
reasoning: match.reasoning,
})
}
}
results.set(project.real_id, mentorMatches)
}
} catch (error) {
if (error instanceof SyntaxError) {
const parseError = createParseError(error.message)
logAIError('MentorMatching', 'batch processing', parseError)
await logAIUsage({
userId,
action: 'MENTOR_MATCHING',
entityType: 'Project',
model,
promptTokens: 0,
completionTokens: 0,
totalTokens: tokensUsed,
batchSize: projects.length,
itemsProcessed: 0,
status: 'ERROR',
errorMessage: parseError.message,
})
// Return empty results for batch (will fall back to algorithm)
for (const project of projects) {
results.set(project.id, [])
}
} else {
throw error
}
}
return { results, tokensUsed }
}
/**
* Get AI-suggested mentor matches for multiple projects (batched)
*/
export async function getAIMentorSuggestionsBatch(
prisma: PrismaClient,
projectIds: string[],
limit: number = 5,
userId?: string
): Promise<Map<string, MentorMatch[]>> {
const allResults = new Map<string, MentorMatch[]>()
// Get projects
const projects = await prisma.project.findMany({
where: { id: { in: projectIds } },
select: {
id: true,
title: true,
description: true,
oceanIssue: true,
competitionCategory: true,
tags: true,
},
})
if (projects.length === 0) {
return allResults
}
// Get available mentors
const mentors = await prisma.user.findMany({
where: {
OR: [
{ expertiseTags: { isEmpty: false } },
{ role: 'JURY_MEMBER' },
],
status: 'ACTIVE',
},
select: {
id: true,
name: true,
email: true,
expertiseTags: true,
maxAssignments: true,
mentorAssignments: {
select: { id: true },
},
},
})
// Filter mentors who haven't reached max assignments
const availableMentors: MentorInfo[] = mentors
.filter((m) => {
const currentAssignments = m.mentorAssignments.length
return !m.maxAssignments || currentAssignments < m.maxAssignments
})
.map((m) => ({
id: m.id,
name: m.name,
email: m.email,
expertiseTags: m.expertiseTags,
currentAssignments: m.mentorAssignments.length,
maxAssignments: m.maxAssignments,
}))
if (availableMentors.length === 0) {
return allResults
}
// Try AI matching
try {
const openai = await getOpenAI()
if (!openai) {
console.log('[Mentor Matching] OpenAI not configured, using algorithm')
return getAlgorithmicMatchesBatch(projects, availableMentors, limit)
}
const model = await getConfiguredModel()
console.log(`[Mentor Matching] Using model: ${model} for ${projects.length} projects in batches of ${MENTOR_BATCH_SIZE}`)
let totalTokens = 0
// Process in batches
for (let i = 0; i < projects.length; i += MENTOR_BATCH_SIZE) {
const batchProjects = projects.slice(i, i + MENTOR_BATCH_SIZE)
console.log(`[Mentor Matching] Processing batch ${Math.floor(i / MENTOR_BATCH_SIZE) + 1}/${Math.ceil(projects.length / MENTOR_BATCH_SIZE)}`)
const { results, tokensUsed } = await processMatchingBatch(
openai,
model,
batchProjects,
availableMentors,
limit,
userId
)
totalTokens += tokensUsed
// Merge results
for (const [projectId, matches] of results) {
allResults.set(projectId, matches)
}
}
console.log(`[Mentor Matching] Completed. Total tokens: ${totalTokens}`)
// Fill in any missing projects with algorithmic fallback
for (const project of projects) {
if (!allResults.has(project.id) || allResults.get(project.id)?.length === 0) {
const fallbackMatches = getAlgorithmicMatches(project, availableMentors, limit)
allResults.set(project.id, fallbackMatches)
}
}
return allResults
} catch (error) {
const classified = classifyAIError(error)
logAIError('MentorMatching', 'getAIMentorSuggestionsBatch', classified)
// Log failed attempt
await logAIUsage({
userId,
action: 'MENTOR_MATCHING',
entityType: 'Project',
model: 'unknown',
promptTokens: 0,
completionTokens: 0,
totalTokens: 0,
batchSize: projects.length,
itemsProcessed: 0,
status: 'ERROR',
errorMessage: classified.message,
})
console.error('[Mentor Matching] AI failed, using algorithm:', classified.message)
return getAlgorithmicMatchesBatch(projects, availableMentors, limit)
}
}
/**
* Get AI-suggested mentor matches for a single project
*/
export async function getAIMentorSuggestions(
prisma: PrismaClient,
projectId: string,
limit: number = 5,
userId?: string
): Promise<MentorMatch[]> {
const results = await getAIMentorSuggestionsBatch(prisma, [projectId], limit, userId)
return results.get(projectId) || []
}
// ─── Algorithmic Fallback ────────────────────────────────────────────────────
/**
* Algorithmic fallback for multiple projects
*/
function getAlgorithmicMatchesBatch(
projects: ProjectInfo[],
mentors: MentorInfo[],
limit: number
): Map<string, MentorMatch[]> {
const results = new Map<string, MentorMatch[]>()
for (const project of projects) {
results.set(project.id, getAlgorithmicMatches(project, mentors, limit))
}
return results
}
/**
* Algorithmic fallback for mentor matching
*/
function getAlgorithmicMatches(
project: ProjectInfo,
mentors: MentorInfo[],
limit: number
): MentorMatch[] {
// Build keyword set from project
const projectKeywords = new Set<string>()
if (project.oceanIssue) {
projectKeywords.add(project.oceanIssue.toLowerCase().replace(/_/g, ' '))
}
if (project.competitionCategory) {
projectKeywords.add(project.competitionCategory.toLowerCase().replace(/_/g, ' '))
}
project.tags.forEach((tag) => {
tag.toLowerCase().split(/\s+/).forEach((word) => {
if (word.length > 3) projectKeywords.add(word)
})
})
if (project.description) {
const words = project.description.toLowerCase().split(/\s+/)
words.forEach((word) => {
if (word.length > 4) projectKeywords.add(word.replace(/[^a-z]/g, ''))
})
}
// Score each mentor
const scored = mentors.map((mentor) => {
const mentorKeywords = new Set<string>()
mentor.expertiseTags.forEach((tag) => {
tag.toLowerCase().split(/\s+/).forEach((word) => {
if (word.length > 2) mentorKeywords.add(word)
})
})
// Calculate overlap
let matchCount = 0
projectKeywords.forEach((keyword) => {
mentorKeywords.forEach((mentorKeyword) => {
if (keyword.includes(mentorKeyword) || mentorKeyword.includes(keyword)) {
matchCount++
}
})
})
const expertiseMatchScore = mentorKeywords.size > 0
? Math.min(1, matchCount / mentorKeywords.size)
: 0
// Factor in availability
const availabilityScore = mentor.maxAssignments
? 1 - (mentor.currentAssignments / mentor.maxAssignments)
: 1
const confidenceScore = (expertiseMatchScore * 0.7 + availabilityScore * 0.3)
return {
mentorId: mentor.id,
confidenceScore: Math.round(confidenceScore * 100) / 100,
expertiseMatchScore: Math.round(expertiseMatchScore * 100) / 100,
reasoning: `Matched ${matchCount} keyword(s). Availability: ${availabilityScore > 0.5 ? 'Good' : 'Limited'}.`,
}
})
// Sort by confidence and return top matches
return scored
.sort((a, b) => b.confidenceScore - a.confidenceScore)
.slice(0, limit)
}
/**
* Round-robin assignment for load balancing
*/
export async function getRoundRobinMentor(
prisma: PrismaClient,
excludeMentorIds: string[] = []
): Promise<string | null> {
const mentors = await prisma.user.findMany({
where: {
OR: [
{ expertiseTags: { isEmpty: false } },
{ role: 'JURY_MEMBER' },
],
status: 'ACTIVE',
id: { notIn: excludeMentorIds },
},
select: {
id: true,
maxAssignments: true,
mentorAssignments: {
select: { id: true },
},
},
orderBy: {
mentorAssignments: {
_count: 'asc',
},
},
})
// Find mentor with fewest assignments who hasn't reached max
for (const mentor of mentors) {
const currentCount = mentor.mentorAssignments.length
if (!mentor.maxAssignments || currentCount < mentor.maxAssignments) {
return mentor.id
}
}
return null
}
/**
* AI-Powered Mentor Matching Service
*
* Matches mentors to projects based on expertise alignment.
*
* Optimization:
* - Batched processing (15 projects per batch)
* - Token tracking and cost logging
* - Fallback to algorithmic matching
*
* GDPR Compliance:
* - All data anonymized before AI processing
* - No personal information sent to OpenAI
*/
import { PrismaClient, OceanIssue, CompetitionCategory } from '@prisma/client'
import { getOpenAI, getConfiguredModel, buildCompletionParams } from '@/lib/openai'
import { logAIUsage, extractTokenUsage } from '@/server/utils/ai-usage'
import { classifyAIError, createParseError, logAIError } from './ai-errors'
// ─── Constants ───────────────────────────────────────────────────────────────
const MENTOR_BATCH_SIZE = 15
// Optimized system prompt
const MENTOR_MATCHING_SYSTEM_PROMPT = `Match mentors to projects by expertise. Return JSON.
Format for each project: {"matches": [{project_id, mentor_matches: [{mentor_index, confidence_score: 0-1, expertise_match_score: 0-1, reasoning: str}]}]}
Rank by suitability. Consider expertise alignment and availability.`
// ─── Types ───────────────────────────────────────────────────────────────────
interface ProjectInfo {
id: string
title: string
description: string | null
oceanIssue: OceanIssue | null
competitionCategory: CompetitionCategory | null
tags: string[]
}
interface MentorInfo {
id: string
name: string | null
email: string
expertiseTags: string[]
currentAssignments: number
maxAssignments: number | null
}
interface MentorMatch {
mentorId: string
confidenceScore: number
expertiseMatchScore: number
reasoning: string
}
// ─── Batched AI Matching ─────────────────────────────────────────────────────
/**
* Process a batch of projects for mentor matching
*/
async function processMatchingBatch(
openai: NonNullable<Awaited<ReturnType<typeof getOpenAI>>>,
model: string,
projects: ProjectInfo[],
mentors: MentorInfo[],
limit: number,
userId?: string
): Promise<{
results: Map<string, MentorMatch[]>
tokensUsed: number
}> {
const results = new Map<string, MentorMatch[]>()
let tokensUsed = 0
// Anonymize project data
const anonymizedProjects = projects.map((p, index) => ({
project_id: `P${index + 1}`,
real_id: p.id,
description: p.description?.slice(0, 350) || 'No description',
category: p.competitionCategory,
oceanIssue: p.oceanIssue,
tags: p.tags,
}))
// Anonymize mentor data
const anonymizedMentors = mentors.map((m, index) => ({
index,
expertise: m.expertiseTags,
availability: m.maxAssignments
? `${m.currentAssignments}/${m.maxAssignments}`
: 'unlimited',
}))
const userPrompt = `PROJECTS:
${anonymizedProjects.map(p => `${p.project_id}: Category=${p.category || 'N/A'}, Issue=${p.oceanIssue || 'N/A'}, Tags=[${p.tags.join(', ')}], Desc=${p.description.slice(0, 200)}`).join('\n')}
MENTORS:
${anonymizedMentors.map(m => `${m.index}: Expertise=[${m.expertise.join(', ')}], Availability=${m.availability}`).join('\n')}
For each project, rank top ${limit} mentors.`
try {
const params = buildCompletionParams(model, {
messages: [
{ role: 'system', content: MENTOR_MATCHING_SYSTEM_PROMPT },
{ role: 'user', content: userPrompt },
],
jsonMode: true,
temperature: 0.3,
maxTokens: 4000,
})
const response = await openai.chat.completions.create(params)
const usage = extractTokenUsage(response)
tokensUsed = usage.totalTokens
// Log usage
await logAIUsage({
userId,
action: 'MENTOR_MATCHING',
entityType: 'Project',
model,
promptTokens: usage.promptTokens,
completionTokens: usage.completionTokens,
totalTokens: usage.totalTokens,
batchSize: projects.length,
itemsProcessed: projects.length,
status: 'SUCCESS',
})
const content = response.choices[0]?.message?.content
if (!content) {
throw new Error('No response from AI')
}
const parsed = JSON.parse(content) as {
matches: Array<{
project_id: string
mentor_matches: Array<{
mentor_index: number
confidence_score: number
expertise_match_score: number
reasoning: string
}>
}>
}
// Map results back to real IDs
for (const projectMatch of parsed.matches || []) {
const project = anonymizedProjects.find(p => p.project_id === projectMatch.project_id)
if (!project) continue
const mentorMatches: MentorMatch[] = []
for (const match of projectMatch.mentor_matches || []) {
if (match.mentor_index >= 0 && match.mentor_index < mentors.length) {
mentorMatches.push({
mentorId: mentors[match.mentor_index].id,
confidenceScore: Math.min(1, Math.max(0, match.confidence_score)),
expertiseMatchScore: Math.min(1, Math.max(0, match.expertise_match_score)),
reasoning: match.reasoning,
})
}
}
results.set(project.real_id, mentorMatches)
}
} catch (error) {
if (error instanceof SyntaxError) {
const parseError = createParseError(error.message)
logAIError('MentorMatching', 'batch processing', parseError)
await logAIUsage({
userId,
action: 'MENTOR_MATCHING',
entityType: 'Project',
model,
promptTokens: 0,
completionTokens: 0,
totalTokens: tokensUsed,
batchSize: projects.length,
itemsProcessed: 0,
status: 'ERROR',
errorMessage: parseError.message,
})
// Return empty results for batch (will fall back to algorithm)
for (const project of projects) {
results.set(project.id, [])
}
} else {
throw error
}
}
return { results, tokensUsed }
}
/**
* Get AI-suggested mentor matches for multiple projects (batched)
*/
export async function getAIMentorSuggestionsBatch(
prisma: PrismaClient,
projectIds: string[],
limit: number = 5,
userId?: string
): Promise<Map<string, MentorMatch[]>> {
const allResults = new Map<string, MentorMatch[]>()
// Get projects
const projects = await prisma.project.findMany({
where: { id: { in: projectIds } },
select: {
id: true,
title: true,
description: true,
oceanIssue: true,
competitionCategory: true,
tags: true,
},
})
if (projects.length === 0) {
return allResults
}
// Get available mentors
const mentors = await prisma.user.findMany({
where: {
OR: [
{ expertiseTags: { isEmpty: false } },
{ role: 'JURY_MEMBER' },
],
status: 'ACTIVE',
},
select: {
id: true,
name: true,
email: true,
expertiseTags: true,
maxAssignments: true,
mentorAssignments: {
select: { id: true },
},
},
})
// Filter mentors who haven't reached max assignments
const availableMentors: MentorInfo[] = mentors
.filter((m) => {
const currentAssignments = m.mentorAssignments.length
return !m.maxAssignments || currentAssignments < m.maxAssignments
})
.map((m) => ({
id: m.id,
name: m.name,
email: m.email,
expertiseTags: m.expertiseTags,
currentAssignments: m.mentorAssignments.length,
maxAssignments: m.maxAssignments,
}))
if (availableMentors.length === 0) {
return allResults
}
// Try AI matching
try {
const openai = await getOpenAI()
if (!openai) {
console.log('[Mentor Matching] OpenAI not configured, using algorithm')
return getAlgorithmicMatchesBatch(projects, availableMentors, limit)
}
const model = await getConfiguredModel()
console.log(`[Mentor Matching] Using model: ${model} for ${projects.length} projects in batches of ${MENTOR_BATCH_SIZE}`)
let totalTokens = 0
// Process in batches
for (let i = 0; i < projects.length; i += MENTOR_BATCH_SIZE) {
const batchProjects = projects.slice(i, i + MENTOR_BATCH_SIZE)
console.log(`[Mentor Matching] Processing batch ${Math.floor(i / MENTOR_BATCH_SIZE) + 1}/${Math.ceil(projects.length / MENTOR_BATCH_SIZE)}`)
const { results, tokensUsed } = await processMatchingBatch(
openai,
model,
batchProjects,
availableMentors,
limit,
userId
)
totalTokens += tokensUsed
// Merge results
for (const [projectId, matches] of results) {
allResults.set(projectId, matches)
}
}
console.log(`[Mentor Matching] Completed. Total tokens: ${totalTokens}`)
// Fill in any missing projects with algorithmic fallback
for (const project of projects) {
if (!allResults.has(project.id) || allResults.get(project.id)?.length === 0) {
const fallbackMatches = getAlgorithmicMatches(project, availableMentors, limit)
allResults.set(project.id, fallbackMatches)
}
}
return allResults
} catch (error) {
const classified = classifyAIError(error)
logAIError('MentorMatching', 'getAIMentorSuggestionsBatch', classified)
// Log failed attempt
await logAIUsage({
userId,
action: 'MENTOR_MATCHING',
entityType: 'Project',
model: 'unknown',
promptTokens: 0,
completionTokens: 0,
totalTokens: 0,
batchSize: projects.length,
itemsProcessed: 0,
status: 'ERROR',
errorMessage: classified.message,
})
console.error('[Mentor Matching] AI failed, using algorithm:', classified.message)
return getAlgorithmicMatchesBatch(projects, availableMentors, limit)
}
}
/**
* Get AI-suggested mentor matches for a single project
*/
export async function getAIMentorSuggestions(
prisma: PrismaClient,
projectId: string,
limit: number = 5,
userId?: string
): Promise<MentorMatch[]> {
const results = await getAIMentorSuggestionsBatch(prisma, [projectId], limit, userId)
return results.get(projectId) || []
}
// ─── Algorithmic Fallback ────────────────────────────────────────────────────
/**
* Algorithmic fallback for multiple projects
*/
function getAlgorithmicMatchesBatch(
projects: ProjectInfo[],
mentors: MentorInfo[],
limit: number
): Map<string, MentorMatch[]> {
const results = new Map<string, MentorMatch[]>()
for (const project of projects) {
results.set(project.id, getAlgorithmicMatches(project, mentors, limit))
}
return results
}
/**
* Algorithmic fallback for mentor matching
*/
function getAlgorithmicMatches(
project: ProjectInfo,
mentors: MentorInfo[],
limit: number
): MentorMatch[] {
// Build keyword set from project
const projectKeywords = new Set<string>()
if (project.oceanIssue) {
projectKeywords.add(project.oceanIssue.toLowerCase().replace(/_/g, ' '))
}
if (project.competitionCategory) {
projectKeywords.add(project.competitionCategory.toLowerCase().replace(/_/g, ' '))
}
project.tags.forEach((tag) => {
tag.toLowerCase().split(/\s+/).forEach((word) => {
if (word.length > 3) projectKeywords.add(word)
})
})
if (project.description) {
const words = project.description.toLowerCase().split(/\s+/)
words.forEach((word) => {
if (word.length > 4) projectKeywords.add(word.replace(/[^a-z]/g, ''))
})
}
// Score each mentor
const scored = mentors.map((mentor) => {
const mentorKeywords = new Set<string>()
mentor.expertiseTags.forEach((tag) => {
tag.toLowerCase().split(/\s+/).forEach((word) => {
if (word.length > 2) mentorKeywords.add(word)
})
})
// Calculate overlap
let matchCount = 0
projectKeywords.forEach((keyword) => {
mentorKeywords.forEach((mentorKeyword) => {
if (keyword.includes(mentorKeyword) || mentorKeyword.includes(keyword)) {
matchCount++
}
})
})
const expertiseMatchScore = mentorKeywords.size > 0
? Math.min(1, matchCount / mentorKeywords.size)
: 0
// Factor in availability
const availabilityScore = mentor.maxAssignments
? 1 - (mentor.currentAssignments / mentor.maxAssignments)
: 1
const confidenceScore = (expertiseMatchScore * 0.7 + availabilityScore * 0.3)
return {
mentorId: mentor.id,
confidenceScore: Math.round(confidenceScore * 100) / 100,
expertiseMatchScore: Math.round(expertiseMatchScore * 100) / 100,
reasoning: `Matched ${matchCount} keyword(s). Availability: ${availabilityScore > 0.5 ? 'Good' : 'Limited'}.`,
}
})
// Sort by confidence and return top matches
return scored
.sort((a, b) => b.confidenceScore - a.confidenceScore)
.slice(0, limit)
}
/**
* Round-robin assignment for load balancing
*/
export async function getRoundRobinMentor(
prisma: PrismaClient,
excludeMentorIds: string[] = []
): Promise<string | null> {
const mentors = await prisma.user.findMany({
where: {
OR: [
{ expertiseTags: { isEmpty: false } },
{ role: 'JURY_MEMBER' },
],
status: 'ACTIVE',
id: { notIn: excludeMentorIds },
},
select: {
id: true,
maxAssignments: true,
mentorAssignments: {
select: { id: true },
},
},
orderBy: {
mentorAssignments: {
_count: 'asc',
},
},
})
// Find mentor with fewest assignments who hasn't reached max
for (const mentor of mentors) {
const currentCount = mentor.mentorAssignments.length
if (!mentor.maxAssignments || currentCount < mentor.maxAssignments) {
return mentor.id
}
}
return null
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,464 +1,464 @@
/**
* Stage Engine Service
*
* State machine service for managing project transitions between stages in
* the pipeline. Handles validation of transitions (guard evaluation, window
* constraints, PSS existence) and atomic execution with full audit logging.
*
* Key invariants:
* - A project can only be in one active PSS per track/stage combination
* - Transitions must follow defined StageTransition records
* - Guard conditions (guardJson) on transitions are evaluated before execution
* - All transitions are logged in DecisionAuditLog and AuditLog
*/
import type { PrismaClient, ProjectStageStateValue, Prisma } from '@prisma/client'
import { logAudit } from '@/server/utils/audit'
// ─── Types ──────────────────────────────────────────────────────────────────
export interface TransitionValidationResult {
valid: boolean
errors: string[]
}
export interface TransitionExecutionResult {
success: boolean
projectStageState: {
id: string
projectId: string
trackId: string
stageId: string
state: ProjectStageStateValue
} | null
errors?: string[]
}
export interface BatchTransitionResult {
succeeded: string[]
failed: Array<{ projectId: string; errors: string[] }>
total: number
}
interface GuardCondition {
field: string
operator: 'eq' | 'neq' | 'in' | 'contains' | 'gt' | 'lt' | 'exists'
value: unknown
}
interface GuardConfig {
conditions?: GuardCondition[]
logic?: 'AND' | 'OR'
requireAllEvaluationsComplete?: boolean
requireMinScore?: number
}
// ─── Constants ──────────────────────────────────────────────────────────────
const BATCH_SIZE = 50
// ─── Guard Evaluation ───────────────────────────────────────────────────────
function evaluateGuardCondition(
condition: GuardCondition,
context: Record<string, unknown>
): boolean {
const fieldValue = context[condition.field]
switch (condition.operator) {
case 'eq':
return fieldValue === condition.value
case 'neq':
return fieldValue !== condition.value
case 'in': {
if (!Array.isArray(condition.value)) return false
return condition.value.includes(fieldValue)
}
case 'contains': {
if (typeof fieldValue === 'string' && typeof condition.value === 'string') {
return fieldValue.toLowerCase().includes(condition.value.toLowerCase())
}
if (Array.isArray(fieldValue)) {
return fieldValue.includes(condition.value)
}
return false
}
case 'gt':
return Number(fieldValue) > Number(condition.value)
case 'lt':
return Number(fieldValue) < Number(condition.value)
case 'exists':
return fieldValue !== null && fieldValue !== undefined
default:
return false
}
}
function evaluateGuard(
guardJson: Prisma.JsonValue | null | undefined,
context: Record<string, unknown>
): { passed: boolean; failedConditions: string[] } {
if (!guardJson || typeof guardJson !== 'object') {
return { passed: true, failedConditions: [] }
}
const guard = guardJson as unknown as GuardConfig
const conditions = guard.conditions ?? []
if (conditions.length === 0) {
return { passed: true, failedConditions: [] }
}
const failedConditions: string[] = []
const results = conditions.map((condition) => {
const result = evaluateGuardCondition(condition, context)
if (!result) {
failedConditions.push(
`Guard failed: ${condition.field} ${condition.operator} ${JSON.stringify(condition.value)}`
)
}
return result
})
const logic = guard.logic ?? 'AND'
const passed = logic === 'AND'
? results.every(Boolean)
: results.some(Boolean)
return { passed, failedConditions: passed ? [] : failedConditions }
}
// ─── Validate Transition ────────────────────────────────────────────────────
/**
* Validate whether a project can transition from one stage to another.
* Checks:
* 1. Source PSS exists and is not already exited
* 2. A StageTransition record exists for fromStage -> toStage
* 3. Destination stage is active (not DRAFT or ARCHIVED)
* 4. Voting/evaluation window constraints on the destination stage
* 5. Guard conditions on the transition
*/
export async function validateTransition(
projectId: string,
fromStageId: string,
toStageId: string,
prisma: PrismaClient | any
): Promise<TransitionValidationResult> {
const errors: string[] = []
// 1. Check source PSS exists and is active (no exitedAt)
const sourcePSS = await prisma.projectStageState.findFirst({
where: {
projectId,
stageId: fromStageId,
exitedAt: null,
},
})
if (!sourcePSS) {
errors.push(
`Project ${projectId} has no active state in stage ${fromStageId}`
)
}
// 2. Check StageTransition record exists
const transition = await prisma.stageTransition.findUnique({
where: {
fromStageId_toStageId: {
fromStageId,
toStageId,
},
},
})
if (!transition) {
errors.push(
`No transition defined from stage ${fromStageId} to stage ${toStageId}`
)
return { valid: false, errors }
}
// 3. Check destination stage is active
const destStage = await prisma.stage.findUnique({
where: { id: toStageId },
})
if (!destStage) {
errors.push(`Destination stage ${toStageId} not found`)
return { valid: false, errors }
}
if (destStage.status === 'STAGE_ARCHIVED') {
errors.push(`Destination stage "${destStage.name}" is archived`)
}
// 4. Check window constraints on destination stage
const now = new Date()
if (destStage.windowOpenAt && now < destStage.windowOpenAt) {
errors.push(
`Destination stage "${destStage.name}" window has not opened yet (opens ${destStage.windowOpenAt.toISOString()})`
)
}
if (destStage.windowCloseAt && now > destStage.windowCloseAt) {
errors.push(
`Destination stage "${destStage.name}" window has already closed (closed ${destStage.windowCloseAt.toISOString()})`
)
}
// 5. Evaluate guard conditions
if (transition.guardJson && sourcePSS) {
// Build context from the project and its current state for guard evaluation
const project = await prisma.project.findUnique({
where: { id: projectId },
include: {
assignments: {
where: { stageId: fromStageId },
include: { evaluation: true },
},
},
})
const evaluations = project?.assignments
?.map((a: any) => a.evaluation)
.filter(Boolean) ?? []
const submittedEvaluations = evaluations.filter(
(e: any) => e.status === 'SUBMITTED'
)
const avgScore =
submittedEvaluations.length > 0
? submittedEvaluations.reduce(
(sum: number, e: any) => sum + (e.globalScore ?? 0),
0
) / submittedEvaluations.length
: 0
const guardContext: Record<string, unknown> = {
state: sourcePSS?.state,
evaluationCount: evaluations.length,
submittedEvaluationCount: submittedEvaluations.length,
averageScore: avgScore,
status: project?.status,
country: project?.country,
competitionCategory: project?.competitionCategory,
tags: project?.tags ?? [],
}
const guardResult = evaluateGuard(transition.guardJson, guardContext)
if (!guardResult.passed) {
errors.push(...guardResult.failedConditions)
}
}
return { valid: errors.length === 0, errors }
}
// ─── Execute Transition ─────────────────────────────────────────────────────
/**
* Execute a stage transition for a single project atomically.
* Within a transaction:
* 1. Sets exitedAt on the source PSS
* 2. Creates or updates the destination PSS with the new state
* 3. Logs the transition in DecisionAuditLog
* 4. Logs the transition in AuditLog
*/
export async function executeTransition(
projectId: string,
trackId: string,
fromStageId: string,
toStageId: string,
newState: ProjectStageStateValue,
actorId: string,
prisma: PrismaClient | any
): Promise<TransitionExecutionResult> {
try {
const result = await prisma.$transaction(async (tx: any) => {
const now = new Date()
// 1. Exit the source PSS
const sourcePSS = await tx.projectStageState.findFirst({
where: {
projectId,
stageId: fromStageId,
exitedAt: null,
},
})
if (sourcePSS) {
await tx.projectStageState.update({
where: { id: sourcePSS.id },
data: {
exitedAt: now,
state: sourcePSS.state === 'PENDING' || sourcePSS.state === 'IN_PROGRESS'
? 'COMPLETED'
: sourcePSS.state,
},
})
}
// 2. Create or update destination PSS
const existingDestPSS = await tx.projectStageState.findUnique({
where: {
projectId_trackId_stageId: {
projectId,
trackId,
stageId: toStageId,
},
},
})
let destPSS
if (existingDestPSS) {
destPSS = await tx.projectStageState.update({
where: { id: existingDestPSS.id },
data: {
state: newState,
enteredAt: now,
exitedAt: null,
},
})
} else {
destPSS = await tx.projectStageState.create({
data: {
projectId,
trackId,
stageId: toStageId,
state: newState,
enteredAt: now,
},
})
}
// 3. Log in DecisionAuditLog
await tx.decisionAuditLog.create({
data: {
eventType: 'stage.transitioned',
entityType: 'ProjectStageState',
entityId: destPSS.id,
actorId,
detailsJson: {
projectId,
trackId,
fromStageId,
toStageId,
previousState: sourcePSS?.state ?? null,
newState,
},
snapshotJson: {
sourcePSSId: sourcePSS?.id ?? null,
destPSSId: destPSS.id,
timestamp: now.toISOString(),
},
},
})
// 4. Audit log (never throws)
await logAudit({
prisma: tx,
userId: actorId,
action: 'STAGE_TRANSITION',
entityType: 'ProjectStageState',
entityId: destPSS.id,
detailsJson: {
projectId,
fromStageId,
toStageId,
newState,
},
})
return destPSS
})
return {
success: true,
projectStageState: {
id: result.id,
projectId: result.projectId,
trackId: result.trackId,
stageId: result.stageId,
state: result.state,
},
}
} catch (error) {
console.error('[StageEngine] Transition execution failed:', error)
return {
success: false,
projectStageState: null,
errors: [
error instanceof Error
? error.message
: 'Unknown error during transition execution',
],
}
}
}
// ─── Batch Transition ───────────────────────────────────────────────────────
/**
* Execute transitions for multiple projects in batches of 50.
* Each project is processed independently so a failure in one does not
* block others.
*/
export async function executeBatchTransition(
projectIds: string[],
trackId: string,
fromStageId: string,
toStageId: string,
newState: ProjectStageStateValue,
actorId: string,
prisma: PrismaClient | any
): Promise<BatchTransitionResult> {
const succeeded: string[] = []
const failed: Array<{ projectId: string; errors: string[] }> = []
// Process in batches
for (let i = 0; i < projectIds.length; i += BATCH_SIZE) {
const batch = projectIds.slice(i, i + BATCH_SIZE)
const batchPromises = batch.map(async (projectId) => {
// Validate first
const validation = await validateTransition(
projectId,
fromStageId,
toStageId,
prisma
)
if (!validation.valid) {
failed.push({ projectId, errors: validation.errors })
return
}
// Execute transition
const result = await executeTransition(
projectId,
trackId,
fromStageId,
toStageId,
newState,
actorId,
prisma
)
if (result.success) {
succeeded.push(projectId)
} else {
failed.push({
projectId,
errors: result.errors ?? ['Transition execution failed'],
})
}
})
await Promise.all(batchPromises)
}
return {
succeeded,
failed,
total: projectIds.length,
}
}
/**
* Stage Engine Service
*
* State machine service for managing project transitions between stages in
* the pipeline. Handles validation of transitions (guard evaluation, window
* constraints, PSS existence) and atomic execution with full audit logging.
*
* Key invariants:
* - A project can only be in one active PSS per track/stage combination
* - Transitions must follow defined StageTransition records
* - Guard conditions (guardJson) on transitions are evaluated before execution
* - All transitions are logged in DecisionAuditLog and AuditLog
*/
import type { PrismaClient, ProjectStageStateValue, Prisma } from '@prisma/client'
import { logAudit } from '@/server/utils/audit'
// ─── Types ──────────────────────────────────────────────────────────────────
export interface TransitionValidationResult {
valid: boolean
errors: string[]
}
export interface TransitionExecutionResult {
success: boolean
projectStageState: {
id: string
projectId: string
trackId: string
stageId: string
state: ProjectStageStateValue
} | null
errors?: string[]
}
export interface BatchTransitionResult {
succeeded: string[]
failed: Array<{ projectId: string; errors: string[] }>
total: number
}
interface GuardCondition {
field: string
operator: 'eq' | 'neq' | 'in' | 'contains' | 'gt' | 'lt' | 'exists'
value: unknown
}
interface GuardConfig {
conditions?: GuardCondition[]
logic?: 'AND' | 'OR'
requireAllEvaluationsComplete?: boolean
requireMinScore?: number
}
// ─── Constants ──────────────────────────────────────────────────────────────
const BATCH_SIZE = 50
// ─── Guard Evaluation ───────────────────────────────────────────────────────
function evaluateGuardCondition(
condition: GuardCondition,
context: Record<string, unknown>
): boolean {
const fieldValue = context[condition.field]
switch (condition.operator) {
case 'eq':
return fieldValue === condition.value
case 'neq':
return fieldValue !== condition.value
case 'in': {
if (!Array.isArray(condition.value)) return false
return condition.value.includes(fieldValue)
}
case 'contains': {
if (typeof fieldValue === 'string' && typeof condition.value === 'string') {
return fieldValue.toLowerCase().includes(condition.value.toLowerCase())
}
if (Array.isArray(fieldValue)) {
return fieldValue.includes(condition.value)
}
return false
}
case 'gt':
return Number(fieldValue) > Number(condition.value)
case 'lt':
return Number(fieldValue) < Number(condition.value)
case 'exists':
return fieldValue !== null && fieldValue !== undefined
default:
return false
}
}
function evaluateGuard(
guardJson: Prisma.JsonValue | null | undefined,
context: Record<string, unknown>
): { passed: boolean; failedConditions: string[] } {
if (!guardJson || typeof guardJson !== 'object') {
return { passed: true, failedConditions: [] }
}
const guard = guardJson as unknown as GuardConfig
const conditions = guard.conditions ?? []
if (conditions.length === 0) {
return { passed: true, failedConditions: [] }
}
const failedConditions: string[] = []
const results = conditions.map((condition) => {
const result = evaluateGuardCondition(condition, context)
if (!result) {
failedConditions.push(
`Guard failed: ${condition.field} ${condition.operator} ${JSON.stringify(condition.value)}`
)
}
return result
})
const logic = guard.logic ?? 'AND'
const passed = logic === 'AND'
? results.every(Boolean)
: results.some(Boolean)
return { passed, failedConditions: passed ? [] : failedConditions }
}
// ─── Validate Transition ────────────────────────────────────────────────────
/**
* Validate whether a project can transition from one stage to another.
* Checks:
* 1. Source PSS exists and is not already exited
* 2. A StageTransition record exists for fromStage -> toStage
* 3. Destination stage is active (not DRAFT or ARCHIVED)
* 4. Voting/evaluation window constraints on the destination stage
* 5. Guard conditions on the transition
*/
export async function validateTransition(
projectId: string,
fromStageId: string,
toStageId: string,
prisma: PrismaClient | any
): Promise<TransitionValidationResult> {
const errors: string[] = []
// 1. Check source PSS exists and is active (no exitedAt)
const sourcePSS = await prisma.projectStageState.findFirst({
where: {
projectId,
stageId: fromStageId,
exitedAt: null,
},
})
if (!sourcePSS) {
errors.push(
`Project ${projectId} has no active state in stage ${fromStageId}`
)
}
// 2. Check StageTransition record exists
const transition = await prisma.stageTransition.findUnique({
where: {
fromStageId_toStageId: {
fromStageId,
toStageId,
},
},
})
if (!transition) {
errors.push(
`No transition defined from stage ${fromStageId} to stage ${toStageId}`
)
return { valid: false, errors }
}
// 3. Check destination stage is active
const destStage = await prisma.stage.findUnique({
where: { id: toStageId },
})
if (!destStage) {
errors.push(`Destination stage ${toStageId} not found`)
return { valid: false, errors }
}
if (destStage.status === 'STAGE_ARCHIVED') {
errors.push(`Destination stage "${destStage.name}" is archived`)
}
// 4. Check window constraints on destination stage
const now = new Date()
if (destStage.windowOpenAt && now < destStage.windowOpenAt) {
errors.push(
`Destination stage "${destStage.name}" window has not opened yet (opens ${destStage.windowOpenAt.toISOString()})`
)
}
if (destStage.windowCloseAt && now > destStage.windowCloseAt) {
errors.push(
`Destination stage "${destStage.name}" window has already closed (closed ${destStage.windowCloseAt.toISOString()})`
)
}
// 5. Evaluate guard conditions
if (transition.guardJson && sourcePSS) {
// Build context from the project and its current state for guard evaluation
const project = await prisma.project.findUnique({
where: { id: projectId },
include: {
assignments: {
where: { stageId: fromStageId },
include: { evaluation: true },
},
},
})
const evaluations = project?.assignments
?.map((a: any) => a.evaluation)
.filter(Boolean) ?? []
const submittedEvaluations = evaluations.filter(
(e: any) => e.status === 'SUBMITTED'
)
const avgScore =
submittedEvaluations.length > 0
? submittedEvaluations.reduce(
(sum: number, e: any) => sum + (e.globalScore ?? 0),
0
) / submittedEvaluations.length
: 0
const guardContext: Record<string, unknown> = {
state: sourcePSS?.state,
evaluationCount: evaluations.length,
submittedEvaluationCount: submittedEvaluations.length,
averageScore: avgScore,
status: project?.status,
country: project?.country,
competitionCategory: project?.competitionCategory,
tags: project?.tags ?? [],
}
const guardResult = evaluateGuard(transition.guardJson, guardContext)
if (!guardResult.passed) {
errors.push(...guardResult.failedConditions)
}
}
return { valid: errors.length === 0, errors }
}
// ─── Execute Transition ─────────────────────────────────────────────────────
/**
* Execute a stage transition for a single project atomically.
* Within a transaction:
* 1. Sets exitedAt on the source PSS
* 2. Creates or updates the destination PSS with the new state
* 3. Logs the transition in DecisionAuditLog
* 4. Logs the transition in AuditLog
*/
export async function executeTransition(
projectId: string,
trackId: string,
fromStageId: string,
toStageId: string,
newState: ProjectStageStateValue,
actorId: string,
prisma: PrismaClient | any
): Promise<TransitionExecutionResult> {
try {
const result = await prisma.$transaction(async (tx: any) => {
const now = new Date()
// 1. Exit the source PSS
const sourcePSS = await tx.projectStageState.findFirst({
where: {
projectId,
stageId: fromStageId,
exitedAt: null,
},
})
if (sourcePSS) {
await tx.projectStageState.update({
where: { id: sourcePSS.id },
data: {
exitedAt: now,
state: sourcePSS.state === 'PENDING' || sourcePSS.state === 'IN_PROGRESS'
? 'COMPLETED'
: sourcePSS.state,
},
})
}
// 2. Create or update destination PSS
const existingDestPSS = await tx.projectStageState.findUnique({
where: {
projectId_trackId_stageId: {
projectId,
trackId,
stageId: toStageId,
},
},
})
let destPSS
if (existingDestPSS) {
destPSS = await tx.projectStageState.update({
where: { id: existingDestPSS.id },
data: {
state: newState,
enteredAt: now,
exitedAt: null,
},
})
} else {
destPSS = await tx.projectStageState.create({
data: {
projectId,
trackId,
stageId: toStageId,
state: newState,
enteredAt: now,
},
})
}
// 3. Log in DecisionAuditLog
await tx.decisionAuditLog.create({
data: {
eventType: 'stage.transitioned',
entityType: 'ProjectStageState',
entityId: destPSS.id,
actorId,
detailsJson: {
projectId,
trackId,
fromStageId,
toStageId,
previousState: sourcePSS?.state ?? null,
newState,
},
snapshotJson: {
sourcePSSId: sourcePSS?.id ?? null,
destPSSId: destPSS.id,
timestamp: now.toISOString(),
},
},
})
// 4. Audit log (never throws)
await logAudit({
prisma: tx,
userId: actorId,
action: 'STAGE_TRANSITION',
entityType: 'ProjectStageState',
entityId: destPSS.id,
detailsJson: {
projectId,
fromStageId,
toStageId,
newState,
},
})
return destPSS
})
return {
success: true,
projectStageState: {
id: result.id,
projectId: result.projectId,
trackId: result.trackId,
stageId: result.stageId,
state: result.state,
},
}
} catch (error) {
console.error('[StageEngine] Transition execution failed:', error)
return {
success: false,
projectStageState: null,
errors: [
error instanceof Error
? error.message
: 'Unknown error during transition execution',
],
}
}
}
// ─── Batch Transition ───────────────────────────────────────────────────────
/**
* Execute transitions for multiple projects in batches of 50.
* Each project is processed independently so a failure in one does not
* block others.
*/
export async function executeBatchTransition(
projectIds: string[],
trackId: string,
fromStageId: string,
toStageId: string,
newState: ProjectStageStateValue,
actorId: string,
prisma: PrismaClient | any
): Promise<BatchTransitionResult> {
const succeeded: string[] = []
const failed: Array<{ projectId: string; errors: string[] }> = []
// Process in batches
for (let i = 0; i < projectIds.length; i += BATCH_SIZE) {
const batch = projectIds.slice(i, i + BATCH_SIZE)
const batchPromises = batch.map(async (projectId) => {
// Validate first
const validation = await validateTransition(
projectId,
fromStageId,
toStageId,
prisma
)
if (!validation.valid) {
failed.push({ projectId, errors: validation.errors })
return
}
// Execute transition
const result = await executeTransition(
projectId,
trackId,
fromStageId,
toStageId,
newState,
actorId,
prisma
)
if (result.success) {
succeeded.push(projectId)
} else {
failed.push({
projectId,
errors: result.errors ?? ['Transition execution failed'],
})
}
})
await Promise.all(batchPromises)
}
return {
succeeded,
failed,
total: projectIds.length,
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,174 +1,174 @@
import crypto from 'crypto'
import { Prisma } from '@prisma/client'
import { prisma } from '@/lib/prisma'
/**
* Dispatch a webhook event to all active webhooks subscribed to this event.
*/
export async function dispatchWebhookEvent(
event: string,
payload: Record<string, unknown>
): Promise<number> {
const webhooks = await prisma.webhook.findMany({
where: {
isActive: true,
events: { has: event },
},
})
if (webhooks.length === 0) return 0
let deliveryCount = 0
for (const webhook of webhooks) {
try {
const delivery = await prisma.webhookDelivery.create({
data: {
webhookId: webhook.id,
event,
payload: payload as Prisma.InputJsonValue,
status: 'PENDING',
attempts: 0,
},
})
// Attempt delivery asynchronously (don't block the caller)
deliverWebhook(delivery.id).catch((err) => {
console.error(`[Webhook] Background delivery failed for ${delivery.id}:`, err)
})
deliveryCount++
} catch (error) {
console.error(`[Webhook] Failed to create delivery for webhook ${webhook.id}:`, error)
}
}
return deliveryCount
}
/**
* Attempt to deliver a single webhook.
*/
export async function deliverWebhook(deliveryId: string): Promise<void> {
const delivery = await prisma.webhookDelivery.findUnique({
where: { id: deliveryId },
include: { webhook: true },
})
if (!delivery || !delivery.webhook) {
console.error(`[Webhook] Delivery ${deliveryId} not found`)
return
}
const { webhook } = delivery
const payloadStr = JSON.stringify(delivery.payload)
// Sign payload with HMAC-SHA256
const signature = crypto
.createHmac('sha256', webhook.secret)
.update(payloadStr)
.digest('hex')
// Build headers
const headers: Record<string, string> = {
'Content-Type': 'application/json',
'X-Webhook-Signature': `sha256=${signature}`,
'X-Webhook-Event': delivery.event,
'X-Webhook-Delivery': delivery.id,
}
// Merge custom headers from webhook config
if (webhook.headers && typeof webhook.headers === 'object') {
const customHeaders = webhook.headers as Record<string, string>
for (const [key, value] of Object.entries(customHeaders)) {
if (typeof value === 'string') {
headers[key] = value
}
}
}
try {
const controller = new AbortController()
const timeout = setTimeout(() => controller.abort(), 30000) // 30s timeout
const response = await fetch(webhook.url, {
method: 'POST',
headers,
body: payloadStr,
signal: controller.signal,
})
clearTimeout(timeout)
const responseBody = await response.text().catch(() => '')
await prisma.webhookDelivery.update({
where: { id: deliveryId },
data: {
status: response.ok ? 'DELIVERED' : 'FAILED',
responseStatus: response.status,
responseBody: responseBody.slice(0, 4000), // Truncate long responses
attempts: delivery.attempts + 1,
lastAttemptAt: new Date(),
},
})
} catch (error) {
const errorMessage = error instanceof Error ? error.message : 'Unknown error'
await prisma.webhookDelivery.update({
where: { id: deliveryId },
data: {
status: 'FAILED',
responseBody: errorMessage.slice(0, 4000),
attempts: delivery.attempts + 1,
lastAttemptAt: new Date(),
},
})
}
}
/**
* Retry all failed webhook deliveries that haven't exceeded max retries.
* Called by cron.
*/
export async function retryFailedDeliveries(): Promise<{
retried: number
errors: number
}> {
let retried = 0
let errors = 0
const failedDeliveries = await prisma.webhookDelivery.findMany({
where: {
status: 'FAILED',
},
include: {
webhook: {
select: { maxRetries: true, isActive: true },
},
},
})
for (const delivery of failedDeliveries) {
// Skip if webhook is inactive or max retries exceeded
if (!delivery.webhook.isActive) continue
if (delivery.attempts >= delivery.webhook.maxRetries) continue
try {
await deliverWebhook(delivery.id)
retried++
} catch (error) {
console.error(`[Webhook] Retry failed for delivery ${delivery.id}:`, error)
errors++
}
}
return { retried, errors }
}
/**
* Generate a random HMAC secret for webhook signing.
*/
export function generateWebhookSecret(): string {
return crypto.randomBytes(32).toString('hex')
}
import crypto from 'crypto'
import { Prisma } from '@prisma/client'
import { prisma } from '@/lib/prisma'
/**
* Dispatch a webhook event to all active webhooks subscribed to this event.
*/
export async function dispatchWebhookEvent(
event: string,
payload: Record<string, unknown>
): Promise<number> {
const webhooks = await prisma.webhook.findMany({
where: {
isActive: true,
events: { has: event },
},
})
if (webhooks.length === 0) return 0
let deliveryCount = 0
for (const webhook of webhooks) {
try {
const delivery = await prisma.webhookDelivery.create({
data: {
webhookId: webhook.id,
event,
payload: payload as Prisma.InputJsonValue,
status: 'PENDING',
attempts: 0,
},
})
// Attempt delivery asynchronously (don't block the caller)
deliverWebhook(delivery.id).catch((err) => {
console.error(`[Webhook] Background delivery failed for ${delivery.id}:`, err)
})
deliveryCount++
} catch (error) {
console.error(`[Webhook] Failed to create delivery for webhook ${webhook.id}:`, error)
}
}
return deliveryCount
}
/**
* Attempt to deliver a single webhook.
*/
export async function deliverWebhook(deliveryId: string): Promise<void> {
const delivery = await prisma.webhookDelivery.findUnique({
where: { id: deliveryId },
include: { webhook: true },
})
if (!delivery || !delivery.webhook) {
console.error(`[Webhook] Delivery ${deliveryId} not found`)
return
}
const { webhook } = delivery
const payloadStr = JSON.stringify(delivery.payload)
// Sign payload with HMAC-SHA256
const signature = crypto
.createHmac('sha256', webhook.secret)
.update(payloadStr)
.digest('hex')
// Build headers
const headers: Record<string, string> = {
'Content-Type': 'application/json',
'X-Webhook-Signature': `sha256=${signature}`,
'X-Webhook-Event': delivery.event,
'X-Webhook-Delivery': delivery.id,
}
// Merge custom headers from webhook config
if (webhook.headers && typeof webhook.headers === 'object') {
const customHeaders = webhook.headers as Record<string, string>
for (const [key, value] of Object.entries(customHeaders)) {
if (typeof value === 'string') {
headers[key] = value
}
}
}
try {
const controller = new AbortController()
const timeout = setTimeout(() => controller.abort(), 30000) // 30s timeout
const response = await fetch(webhook.url, {
method: 'POST',
headers,
body: payloadStr,
signal: controller.signal,
})
clearTimeout(timeout)
const responseBody = await response.text().catch(() => '')
await prisma.webhookDelivery.update({
where: { id: deliveryId },
data: {
status: response.ok ? 'DELIVERED' : 'FAILED',
responseStatus: response.status,
responseBody: responseBody.slice(0, 4000), // Truncate long responses
attempts: delivery.attempts + 1,
lastAttemptAt: new Date(),
},
})
} catch (error) {
const errorMessage = error instanceof Error ? error.message : 'Unknown error'
await prisma.webhookDelivery.update({
where: { id: deliveryId },
data: {
status: 'FAILED',
responseBody: errorMessage.slice(0, 4000),
attempts: delivery.attempts + 1,
lastAttemptAt: new Date(),
},
})
}
}
/**
* Retry all failed webhook deliveries that haven't exceeded max retries.
* Called by cron.
*/
export async function retryFailedDeliveries(): Promise<{
retried: number
errors: number
}> {
let retried = 0
let errors = 0
const failedDeliveries = await prisma.webhookDelivery.findMany({
where: {
status: 'FAILED',
},
include: {
webhook: {
select: { maxRetries: true, isActive: true },
},
},
})
for (const delivery of failedDeliveries) {
// Skip if webhook is inactive or max retries exceeded
if (!delivery.webhook.isActive) continue
if (delivery.attempts >= delivery.webhook.maxRetries) continue
try {
await deliverWebhook(delivery.id)
retried++
} catch (error) {
console.error(`[Webhook] Retry failed for delivery ${delivery.id}:`, error)
errors++
}
}
return { retried, errors }
}
/**
* Generate a random HMAC secret for webhook signing.
*/
export function generateWebhookSecret(): string {
return crypto.randomBytes(32).toString('hex')
}