2026-01-30 13:41:32 +01:00
|
|
|
/**
|
|
|
|
|
* AI-Powered Assignment Service
|
|
|
|
|
*
|
|
|
|
|
* Uses GPT to analyze juror expertise and project requirements
|
|
|
|
|
* to generate optimal assignment suggestions.
|
2026-02-03 11:58:12 +01:00
|
|
|
*
|
|
|
|
|
* Optimization:
|
|
|
|
|
* - Batched processing (15 projects per batch)
|
|
|
|
|
* - Description truncation (300 chars)
|
|
|
|
|
* - Token tracking and cost logging
|
|
|
|
|
*
|
|
|
|
|
* GDPR Compliance:
|
|
|
|
|
* - All data anonymized before AI processing
|
|
|
|
|
* - IDs replaced with sequential identifiers
|
|
|
|
|
* - No personal information sent to OpenAI
|
2026-01-30 13:41:32 +01:00
|
|
|
*/
|
|
|
|
|
|
2026-02-03 11:58:12 +01:00
|
|
|
import { getOpenAI, getConfiguredModel, buildCompletionParams } from '@/lib/openai'
|
|
|
|
|
import { logAIUsage, extractTokenUsage } from '@/server/utils/ai-usage'
|
|
|
|
|
import { classifyAIError, createParseError, logAIError } from './ai-errors'
|
2026-01-30 13:41:32 +01:00
|
|
|
import {
|
|
|
|
|
anonymizeForAI,
|
|
|
|
|
deanonymizeResults,
|
|
|
|
|
validateAnonymization,
|
2026-02-03 11:58:12 +01:00
|
|
|
DESCRIPTION_LIMITS,
|
|
|
|
|
truncateAndSanitize,
|
2026-01-30 13:41:32 +01:00
|
|
|
type AnonymizationResult,
|
|
|
|
|
} from './anonymization'
|
|
|
|
|
|
2026-02-03 11:58:12 +01:00
|
|
|
// ─── Constants ───────────────────────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
const ASSIGNMENT_BATCH_SIZE = 15
|
|
|
|
|
|
|
|
|
|
// Optimized system prompt
|
|
|
|
|
const ASSIGNMENT_SYSTEM_PROMPT = `Match jurors to projects by expertise. Return JSON assignments.
|
|
|
|
|
Each: {juror_id, project_id, confidence_score: 0-1, expertise_match_score: 0-1, reasoning: str (1-2 sentences)}
|
|
|
|
|
Distribute workload fairly. Avoid assigning jurors at capacity.`
|
|
|
|
|
|
|
|
|
|
// ─── Types ───────────────────────────────────────────────────────────────────
|
|
|
|
|
|
2026-01-30 13:41:32 +01:00
|
|
|
export interface AIAssignmentSuggestion {
|
|
|
|
|
jurorId: string
|
|
|
|
|
projectId: string
|
|
|
|
|
confidenceScore: number // 0-1
|
|
|
|
|
reasoning: string
|
|
|
|
|
expertiseMatchScore: number // 0-1
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
export interface AIAssignmentResult {
|
|
|
|
|
success: boolean
|
|
|
|
|
suggestions: AIAssignmentSuggestion[]
|
|
|
|
|
error?: string
|
|
|
|
|
tokensUsed?: number
|
|
|
|
|
fallbackUsed?: boolean
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
interface JurorForAssignment {
|
|
|
|
|
id: string
|
|
|
|
|
name?: string | null
|
|
|
|
|
email: string
|
|
|
|
|
expertiseTags: string[]
|
|
|
|
|
maxAssignments?: number | null
|
|
|
|
|
_count?: {
|
|
|
|
|
assignments: number
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
interface ProjectForAssignment {
|
|
|
|
|
id: string
|
|
|
|
|
title: string
|
|
|
|
|
description?: string | null
|
|
|
|
|
tags: string[]
|
|
|
|
|
teamName?: string | null
|
|
|
|
|
_count?: {
|
|
|
|
|
assignments: number
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
interface AssignmentConstraints {
|
|
|
|
|
requiredReviewsPerProject: number
|
2026-02-04 16:01:18 +01:00
|
|
|
minAssignmentsPerJuror?: number
|
2026-01-30 13:41:32 +01:00
|
|
|
maxAssignmentsPerJuror?: number
|
|
|
|
|
existingAssignments: Array<{
|
|
|
|
|
jurorId: string
|
|
|
|
|
projectId: string
|
|
|
|
|
}>
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-04 17:40:26 +01:00
|
|
|
export interface AssignmentProgressCallback {
|
|
|
|
|
(progress: {
|
|
|
|
|
currentBatch: number
|
|
|
|
|
totalBatches: number
|
|
|
|
|
processedCount: number
|
|
|
|
|
totalProjects: number
|
|
|
|
|
}): Promise<void>
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-03 11:58:12 +01:00
|
|
|
// ─── AI Processing ───────────────────────────────────────────────────────────
|
|
|
|
|
|
2026-01-30 13:41:32 +01:00
|
|
|
/**
|
2026-02-03 11:58:12 +01:00
|
|
|
* Process a batch of projects for assignment suggestions
|
2026-01-30 13:41:32 +01:00
|
|
|
*/
|
2026-02-03 11:58:12 +01:00
|
|
|
async function processAssignmentBatch(
|
|
|
|
|
openai: NonNullable<Awaited<ReturnType<typeof getOpenAI>>>,
|
|
|
|
|
model: string,
|
|
|
|
|
anonymizedData: AnonymizationResult,
|
|
|
|
|
batchProjects: typeof anonymizedData.projects,
|
|
|
|
|
batchMappings: typeof anonymizedData.projectMappings,
|
|
|
|
|
constraints: AssignmentConstraints,
|
|
|
|
|
userId?: string,
|
|
|
|
|
entityId?: string
|
|
|
|
|
): Promise<{
|
|
|
|
|
suggestions: AIAssignmentSuggestion[]
|
|
|
|
|
tokensUsed: number
|
|
|
|
|
}> {
|
|
|
|
|
const suggestions: AIAssignmentSuggestion[] = []
|
|
|
|
|
let tokensUsed = 0
|
|
|
|
|
|
|
|
|
|
// Build prompt with batch-specific data
|
|
|
|
|
const userPrompt = buildBatchPrompt(
|
|
|
|
|
anonymizedData.jurors,
|
|
|
|
|
batchProjects,
|
|
|
|
|
constraints,
|
|
|
|
|
anonymizedData.jurorMappings,
|
|
|
|
|
batchMappings
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
const params = buildCompletionParams(model, {
|
|
|
|
|
messages: [
|
|
|
|
|
{ role: 'system', content: ASSIGNMENT_SYSTEM_PROMPT },
|
|
|
|
|
{ role: 'user', content: userPrompt },
|
|
|
|
|
],
|
|
|
|
|
jsonMode: true,
|
|
|
|
|
temperature: 0.3,
|
|
|
|
|
maxTokens: 4000,
|
|
|
|
|
})
|
|
|
|
|
|
2026-02-05 14:56:21 +01:00
|
|
|
let response
|
|
|
|
|
try {
|
|
|
|
|
response = await openai.chat.completions.create(params)
|
|
|
|
|
} catch (apiError) {
|
|
|
|
|
// Provide clearer error for model-related issues
|
|
|
|
|
const errorMsg = apiError instanceof Error ? apiError.message : String(apiError)
|
|
|
|
|
if (errorMsg.includes('model') || errorMsg.includes('does not exist')) {
|
|
|
|
|
throw new Error(`Invalid AI model "${model}". Please check the model name in Settings > AI Configuration.`)
|
|
|
|
|
}
|
|
|
|
|
throw apiError
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-03 11:58:12 +01:00
|
|
|
const usage = extractTokenUsage(response)
|
|
|
|
|
tokensUsed = usage.totalTokens
|
|
|
|
|
|
|
|
|
|
// Log batch usage
|
|
|
|
|
await logAIUsage({
|
|
|
|
|
userId,
|
|
|
|
|
action: 'ASSIGNMENT',
|
|
|
|
|
entityType: 'Round',
|
|
|
|
|
entityId,
|
|
|
|
|
model,
|
|
|
|
|
promptTokens: usage.promptTokens,
|
|
|
|
|
completionTokens: usage.completionTokens,
|
|
|
|
|
totalTokens: usage.totalTokens,
|
|
|
|
|
batchSize: batchProjects.length,
|
|
|
|
|
itemsProcessed: batchProjects.length,
|
|
|
|
|
status: 'SUCCESS',
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
const content = response.choices[0]?.message?.content
|
|
|
|
|
if (!content) {
|
2026-02-05 14:56:21 +01:00
|
|
|
// Check if response indicates an issue
|
|
|
|
|
const finishReason = response.choices[0]?.finish_reason
|
|
|
|
|
if (finishReason === 'content_filter') {
|
|
|
|
|
throw new Error('AI response was filtered. Try a different model or simplify the project descriptions.')
|
|
|
|
|
}
|
|
|
|
|
if (!response.choices || response.choices.length === 0) {
|
|
|
|
|
throw new Error(`No response from model "${model}". This model may not exist or may not be available. Please verify the model name.`)
|
|
|
|
|
}
|
|
|
|
|
throw new Error(`Empty response from AI model "${model}". The model may not support this type of request.`)
|
2026-02-03 11:58:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const parsed = JSON.parse(content) as {
|
|
|
|
|
assignments: Array<{
|
|
|
|
|
juror_id: string
|
|
|
|
|
project_id: string
|
|
|
|
|
confidence_score: number
|
|
|
|
|
expertise_match_score: number
|
|
|
|
|
reasoning: string
|
|
|
|
|
}>
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// De-anonymize and add to suggestions
|
|
|
|
|
const deanonymized = deanonymizeResults(
|
|
|
|
|
(parsed.assignments || []).map((a) => ({
|
|
|
|
|
jurorId: a.juror_id,
|
|
|
|
|
projectId: a.project_id,
|
|
|
|
|
confidenceScore: Math.min(1, Math.max(0, a.confidence_score)),
|
|
|
|
|
expertiseMatchScore: Math.min(1, Math.max(0, a.expertise_match_score)),
|
|
|
|
|
reasoning: a.reasoning,
|
|
|
|
|
})),
|
|
|
|
|
anonymizedData.jurorMappings,
|
|
|
|
|
batchMappings
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
for (const item of deanonymized) {
|
|
|
|
|
suggestions.push({
|
|
|
|
|
jurorId: item.realJurorId,
|
|
|
|
|
projectId: item.realProjectId,
|
|
|
|
|
confidenceScore: item.confidenceScore,
|
|
|
|
|
reasoning: item.reasoning,
|
|
|
|
|
expertiseMatchScore: item.expertiseMatchScore,
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
} catch (error) {
|
|
|
|
|
if (error instanceof SyntaxError) {
|
|
|
|
|
const parseError = createParseError(error.message)
|
|
|
|
|
logAIError('Assignment', 'batch processing', parseError)
|
|
|
|
|
|
|
|
|
|
await logAIUsage({
|
|
|
|
|
userId,
|
|
|
|
|
action: 'ASSIGNMENT',
|
|
|
|
|
entityType: 'Round',
|
|
|
|
|
entityId,
|
|
|
|
|
model,
|
|
|
|
|
promptTokens: 0,
|
|
|
|
|
completionTokens: 0,
|
|
|
|
|
totalTokens: tokensUsed,
|
|
|
|
|
batchSize: batchProjects.length,
|
|
|
|
|
itemsProcessed: 0,
|
|
|
|
|
status: 'ERROR',
|
|
|
|
|
errorMessage: parseError.message,
|
|
|
|
|
})
|
|
|
|
|
} else {
|
|
|
|
|
throw error
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-01-30 13:41:32 +01:00
|
|
|
|
2026-02-03 11:58:12 +01:00
|
|
|
return { suggestions, tokensUsed }
|
|
|
|
|
}
|
2026-01-30 13:41:32 +01:00
|
|
|
|
2026-02-03 11:58:12 +01:00
|
|
|
/**
|
|
|
|
|
* Build prompt for a batch of projects
|
|
|
|
|
*/
|
|
|
|
|
function buildBatchPrompt(
|
|
|
|
|
jurors: AnonymizationResult['jurors'],
|
|
|
|
|
projects: AnonymizationResult['projects'],
|
|
|
|
|
constraints: AssignmentConstraints,
|
|
|
|
|
jurorMappings: AnonymizationResult['jurorMappings'],
|
|
|
|
|
projectMappings: AnonymizationResult['projectMappings']
|
|
|
|
|
): string {
|
|
|
|
|
// Map existing assignments to anonymous IDs
|
|
|
|
|
const jurorIdMap = new Map(jurorMappings.map((m) => [m.realId, m.anonymousId]))
|
|
|
|
|
const projectIdMap = new Map(projectMappings.map((m) => [m.realId, m.anonymousId]))
|
2026-01-30 13:41:32 +01:00
|
|
|
|
2026-02-03 11:58:12 +01:00
|
|
|
const anonymousExisting = constraints.existingAssignments
|
|
|
|
|
.map((a) => ({
|
|
|
|
|
jurorId: jurorIdMap.get(a.jurorId),
|
|
|
|
|
projectId: projectIdMap.get(a.projectId),
|
|
|
|
|
}))
|
|
|
|
|
.filter((a) => a.jurorId && a.projectId)
|
|
|
|
|
|
|
|
|
|
return `JURORS: ${JSON.stringify(jurors)}
|
|
|
|
|
PROJECTS: ${JSON.stringify(projects)}
|
|
|
|
|
CONSTRAINTS: ${constraints.requiredReviewsPerProject} reviews/project, max ${constraints.maxAssignmentsPerJuror || 'unlimited'}/juror
|
|
|
|
|
EXISTING: ${JSON.stringify(anonymousExisting)}
|
|
|
|
|
Return JSON: {"assignments": [...]}`
|
|
|
|
|
}
|
2026-01-30 13:41:32 +01:00
|
|
|
|
|
|
|
|
/**
|
2026-02-03 11:58:12 +01:00
|
|
|
* Generate AI-powered assignment suggestions with batching
|
2026-01-30 13:41:32 +01:00
|
|
|
*/
|
|
|
|
|
export async function generateAIAssignments(
|
|
|
|
|
jurors: JurorForAssignment[],
|
|
|
|
|
projects: ProjectForAssignment[],
|
2026-02-03 11:58:12 +01:00
|
|
|
constraints: AssignmentConstraints,
|
|
|
|
|
userId?: string,
|
2026-02-04 17:40:26 +01:00
|
|
|
entityId?: string,
|
|
|
|
|
onProgress?: AssignmentProgressCallback
|
2026-01-30 13:41:32 +01:00
|
|
|
): Promise<AIAssignmentResult> {
|
2026-02-03 11:58:12 +01:00
|
|
|
// Truncate descriptions before anonymization
|
|
|
|
|
const truncatedProjects = projects.map((p) => ({
|
|
|
|
|
...p,
|
|
|
|
|
description: truncateAndSanitize(p.description, DESCRIPTION_LIMITS.ASSIGNMENT),
|
|
|
|
|
}))
|
|
|
|
|
|
2026-01-30 13:41:32 +01:00
|
|
|
// Anonymize data before sending to AI
|
2026-02-03 11:58:12 +01:00
|
|
|
const anonymizedData = anonymizeForAI(jurors, truncatedProjects)
|
2026-01-30 13:41:32 +01:00
|
|
|
|
|
|
|
|
// Validate anonymization
|
|
|
|
|
if (!validateAnonymization(anonymizedData)) {
|
2026-02-03 11:58:12 +01:00
|
|
|
console.error('[AI Assignment] Anonymization validation failed, falling back to algorithm')
|
2026-01-30 13:41:32 +01:00
|
|
|
return generateFallbackAssignments(jurors, projects, constraints)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
const openai = await getOpenAI()
|
|
|
|
|
|
|
|
|
|
if (!openai) {
|
2026-02-03 11:58:12 +01:00
|
|
|
console.log('[AI Assignment] OpenAI not configured, using fallback algorithm')
|
2026-01-30 13:41:32 +01:00
|
|
|
return generateFallbackAssignments(jurors, projects, constraints)
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-03 11:58:12 +01:00
|
|
|
const model = await getConfiguredModel()
|
|
|
|
|
console.log(`[AI Assignment] Using model: ${model} for ${projects.length} projects in batches of ${ASSIGNMENT_BATCH_SIZE}`)
|
2026-01-30 13:41:32 +01:00
|
|
|
|
2026-02-03 11:58:12 +01:00
|
|
|
const allSuggestions: AIAssignmentSuggestion[] = []
|
|
|
|
|
let totalTokens = 0
|
|
|
|
|
|
|
|
|
|
// Process projects in batches
|
2026-02-04 17:40:26 +01:00
|
|
|
const totalBatches = Math.ceil(anonymizedData.projects.length / ASSIGNMENT_BATCH_SIZE)
|
|
|
|
|
|
2026-02-03 11:58:12 +01:00
|
|
|
for (let i = 0; i < anonymizedData.projects.length; i += ASSIGNMENT_BATCH_SIZE) {
|
|
|
|
|
const batchProjects = anonymizedData.projects.slice(i, i + ASSIGNMENT_BATCH_SIZE)
|
|
|
|
|
const batchMappings = anonymizedData.projectMappings.slice(i, i + ASSIGNMENT_BATCH_SIZE)
|
2026-02-04 17:40:26 +01:00
|
|
|
const currentBatch = Math.floor(i / ASSIGNMENT_BATCH_SIZE) + 1
|
2026-02-03 11:58:12 +01:00
|
|
|
|
2026-02-04 17:40:26 +01:00
|
|
|
console.log(`[AI Assignment] Processing batch ${currentBatch}/${totalBatches}`)
|
2026-02-03 11:58:12 +01:00
|
|
|
|
|
|
|
|
const { suggestions, tokensUsed } = await processAssignmentBatch(
|
|
|
|
|
openai,
|
|
|
|
|
model,
|
|
|
|
|
anonymizedData,
|
|
|
|
|
batchProjects,
|
|
|
|
|
batchMappings,
|
|
|
|
|
constraints,
|
|
|
|
|
userId,
|
|
|
|
|
entityId
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
allSuggestions.push(...suggestions)
|
|
|
|
|
totalTokens += tokensUsed
|
2026-02-04 17:40:26 +01:00
|
|
|
|
|
|
|
|
// Report progress after each batch
|
|
|
|
|
if (onProgress) {
|
|
|
|
|
const processedCount = Math.min((currentBatch) * ASSIGNMENT_BATCH_SIZE, projects.length)
|
|
|
|
|
await onProgress({
|
|
|
|
|
currentBatch,
|
|
|
|
|
totalBatches,
|
|
|
|
|
processedCount,
|
|
|
|
|
totalProjects: projects.length,
|
|
|
|
|
})
|
|
|
|
|
}
|
2026-02-03 11:58:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
console.log(`[AI Assignment] Completed. Total suggestions: ${allSuggestions.length}, Total tokens: ${totalTokens}`)
|
2026-01-30 13:41:32 +01:00
|
|
|
|
|
|
|
|
return {
|
|
|
|
|
success: true,
|
2026-02-03 11:58:12 +01:00
|
|
|
suggestions: allSuggestions,
|
|
|
|
|
tokensUsed: totalTokens,
|
2026-01-30 13:41:32 +01:00
|
|
|
fallbackUsed: false,
|
|
|
|
|
}
|
2026-02-03 11:58:12 +01:00
|
|
|
|
2026-01-30 13:41:32 +01:00
|
|
|
} catch (error) {
|
2026-02-03 11:58:12 +01:00
|
|
|
const classified = classifyAIError(error)
|
|
|
|
|
logAIError('Assignment', 'generateAIAssignments', classified)
|
|
|
|
|
|
|
|
|
|
// Log failed attempt
|
|
|
|
|
await logAIUsage({
|
|
|
|
|
userId,
|
|
|
|
|
action: 'ASSIGNMENT',
|
|
|
|
|
entityType: 'Round',
|
|
|
|
|
entityId,
|
|
|
|
|
model: 'unknown',
|
|
|
|
|
promptTokens: 0,
|
|
|
|
|
completionTokens: 0,
|
|
|
|
|
totalTokens: 0,
|
|
|
|
|
batchSize: projects.length,
|
|
|
|
|
itemsProcessed: 0,
|
|
|
|
|
status: 'ERROR',
|
|
|
|
|
errorMessage: classified.message,
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
console.error('[AI Assignment] AI assignment failed, using fallback:', classified.message)
|
2026-01-30 13:41:32 +01:00
|
|
|
return generateFallbackAssignments(jurors, projects, constraints)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-03 11:58:12 +01:00
|
|
|
// ─── Fallback Algorithm ──────────────────────────────────────────────────────
|
2026-01-30 13:41:32 +01:00
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Fallback algorithm-based assignment when AI is unavailable
|
|
|
|
|
*/
|
|
|
|
|
export function generateFallbackAssignments(
|
|
|
|
|
jurors: JurorForAssignment[],
|
|
|
|
|
projects: ProjectForAssignment[],
|
|
|
|
|
constraints: AssignmentConstraints
|
|
|
|
|
): AIAssignmentResult {
|
|
|
|
|
const suggestions: AIAssignmentSuggestion[] = []
|
|
|
|
|
const existingSet = new Set(
|
|
|
|
|
constraints.existingAssignments.map((a) => `${a.jurorId}:${a.projectId}`)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// Track assignments per juror and project
|
|
|
|
|
const jurorAssignments = new Map<string, number>()
|
|
|
|
|
const projectAssignments = new Map<string, number>()
|
|
|
|
|
|
|
|
|
|
// Initialize counts from existing assignments
|
|
|
|
|
for (const assignment of constraints.existingAssignments) {
|
|
|
|
|
jurorAssignments.set(
|
|
|
|
|
assignment.jurorId,
|
|
|
|
|
(jurorAssignments.get(assignment.jurorId) || 0) + 1
|
|
|
|
|
)
|
|
|
|
|
projectAssignments.set(
|
|
|
|
|
assignment.projectId,
|
|
|
|
|
(projectAssignments.get(assignment.projectId) || 0) + 1
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Also include current assignment counts
|
|
|
|
|
for (const juror of jurors) {
|
|
|
|
|
const current = juror._count?.assignments || 0
|
|
|
|
|
jurorAssignments.set(
|
|
|
|
|
juror.id,
|
|
|
|
|
Math.max(jurorAssignments.get(juror.id) || 0, current)
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (const project of projects) {
|
|
|
|
|
const current = project._count?.assignments || 0
|
|
|
|
|
projectAssignments.set(
|
|
|
|
|
project.id,
|
|
|
|
|
Math.max(projectAssignments.get(project.id) || 0, current)
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Sort projects by need (fewest assignments first)
|
|
|
|
|
const sortedProjects = [...projects].sort((a, b) => {
|
|
|
|
|
const aCount = projectAssignments.get(a.id) || 0
|
|
|
|
|
const bCount = projectAssignments.get(b.id) || 0
|
|
|
|
|
return aCount - bCount
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
// For each project, find best matching jurors
|
|
|
|
|
for (const project of sortedProjects) {
|
|
|
|
|
const currentProjectAssignments = projectAssignments.get(project.id) || 0
|
|
|
|
|
const neededReviews = Math.max(
|
|
|
|
|
0,
|
|
|
|
|
constraints.requiredReviewsPerProject - currentProjectAssignments
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if (neededReviews === 0) continue
|
|
|
|
|
|
|
|
|
|
// Score all available jurors
|
|
|
|
|
const scoredJurors = jurors
|
|
|
|
|
.filter((juror) => {
|
|
|
|
|
// Check not already assigned
|
|
|
|
|
if (existingSet.has(`${juror.id}:${project.id}`)) return false
|
|
|
|
|
|
|
|
|
|
// Check not at limit
|
|
|
|
|
const currentAssignments = jurorAssignments.get(juror.id) || 0
|
|
|
|
|
const maxAssignments =
|
|
|
|
|
juror.maxAssignments ?? constraints.maxAssignmentsPerJuror ?? Infinity
|
|
|
|
|
if (currentAssignments >= maxAssignments) return false
|
|
|
|
|
|
|
|
|
|
return true
|
|
|
|
|
})
|
2026-02-04 16:01:18 +01:00
|
|
|
.map((juror) => {
|
|
|
|
|
const currentLoad = jurorAssignments.get(juror.id) || 0
|
|
|
|
|
const maxLoad = juror.maxAssignments ?? constraints.maxAssignmentsPerJuror ?? 20
|
|
|
|
|
const minTarget = constraints.minAssignmentsPerJuror ?? 5
|
|
|
|
|
|
|
|
|
|
return {
|
|
|
|
|
juror,
|
|
|
|
|
score: calculateExpertiseScore(juror.expertiseTags, project.tags),
|
|
|
|
|
loadScore: calculateLoadScore(currentLoad, maxLoad),
|
|
|
|
|
underMinBonus: calculateUnderMinBonus(currentLoad, minTarget),
|
|
|
|
|
}
|
|
|
|
|
})
|
2026-01-30 13:41:32 +01:00
|
|
|
.sort((a, b) => {
|
2026-02-04 16:01:18 +01:00
|
|
|
// Combined score: 50% expertise, 30% load balancing, 20% under-min bonus
|
|
|
|
|
const aTotal = a.score * 0.5 + a.loadScore * 0.3 + a.underMinBonus * 0.2
|
|
|
|
|
const bTotal = b.score * 0.5 + b.loadScore * 0.3 + b.underMinBonus * 0.2
|
2026-01-30 13:41:32 +01:00
|
|
|
return bTotal - aTotal
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
// Assign top jurors
|
|
|
|
|
for (let i = 0; i < Math.min(neededReviews, scoredJurors.length); i++) {
|
|
|
|
|
const { juror, score } = scoredJurors[i]
|
|
|
|
|
|
|
|
|
|
suggestions.push({
|
|
|
|
|
jurorId: juror.id,
|
|
|
|
|
projectId: project.id,
|
|
|
|
|
confidenceScore: score,
|
|
|
|
|
expertiseMatchScore: score,
|
|
|
|
|
reasoning: generateFallbackReasoning(
|
|
|
|
|
juror.expertiseTags,
|
|
|
|
|
project.tags,
|
|
|
|
|
score
|
|
|
|
|
),
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
// Update tracking
|
|
|
|
|
existingSet.add(`${juror.id}:${project.id}`)
|
|
|
|
|
jurorAssignments.set(juror.id, (jurorAssignments.get(juror.id) || 0) + 1)
|
|
|
|
|
projectAssignments.set(
|
|
|
|
|
project.id,
|
|
|
|
|
(projectAssignments.get(project.id) || 0) + 1
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return {
|
|
|
|
|
success: true,
|
|
|
|
|
suggestions,
|
|
|
|
|
fallbackUsed: true,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Calculate expertise match score based on tag overlap
|
|
|
|
|
*/
|
|
|
|
|
function calculateExpertiseScore(
|
|
|
|
|
jurorTags: string[],
|
|
|
|
|
projectTags: string[]
|
|
|
|
|
): number {
|
|
|
|
|
if (jurorTags.length === 0 || projectTags.length === 0) {
|
|
|
|
|
return 0.5 // Neutral score if no tags
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const jurorTagsLower = new Set(jurorTags.map((t) => t.toLowerCase()))
|
|
|
|
|
const matchingTags = projectTags.filter((t) =>
|
|
|
|
|
jurorTagsLower.has(t.toLowerCase())
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// Score based on percentage of project tags matched
|
|
|
|
|
const matchRatio = matchingTags.length / projectTags.length
|
|
|
|
|
|
|
|
|
|
// Boost for having expertise, even if not all match
|
|
|
|
|
const hasExpertise = matchingTags.length > 0 ? 0.2 : 0
|
|
|
|
|
|
|
|
|
|
return Math.min(1, matchRatio * 0.8 + hasExpertise)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Calculate load balancing score (higher score = less loaded)
|
|
|
|
|
*/
|
|
|
|
|
function calculateLoadScore(currentLoad: number, maxLoad: number): number {
|
|
|
|
|
if (maxLoad === 0) return 0
|
|
|
|
|
const utilization = currentLoad / maxLoad
|
|
|
|
|
return Math.max(0, 1 - utilization)
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-04 16:01:18 +01:00
|
|
|
/**
|
|
|
|
|
* Calculate bonus for jurors under their minimum target
|
|
|
|
|
* Returns 1.0 if under min, scaled down as approaching min
|
|
|
|
|
*/
|
|
|
|
|
function calculateUnderMinBonus(currentLoad: number, minTarget: number): number {
|
|
|
|
|
if (currentLoad >= minTarget) return 0
|
|
|
|
|
// Scale bonus based on how far under min (1.0 at 0 load, decreasing as approaching min)
|
|
|
|
|
return (minTarget - currentLoad) / minTarget
|
|
|
|
|
}
|
|
|
|
|
|
2026-01-30 13:41:32 +01:00
|
|
|
/**
|
|
|
|
|
* Generate reasoning for fallback assignments
|
|
|
|
|
*/
|
|
|
|
|
function generateFallbackReasoning(
|
|
|
|
|
jurorTags: string[],
|
|
|
|
|
projectTags: string[],
|
|
|
|
|
score: number
|
|
|
|
|
): string {
|
|
|
|
|
const jurorTagsLower = new Set(jurorTags.map((t) => t.toLowerCase()))
|
|
|
|
|
const matchingTags = projectTags.filter((t) =>
|
|
|
|
|
jurorTagsLower.has(t.toLowerCase())
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if (matchingTags.length > 0) {
|
|
|
|
|
return `Expertise match: ${matchingTags.join(', ')}. Match score: ${(score * 100).toFixed(0)}%.`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (score >= 0.5) {
|
|
|
|
|
return `Assigned for workload balance. No direct expertise match but available capacity.`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return `Assigned to ensure project coverage.`
|
|
|
|
|
}
|