Apply full refactor updates plus pipeline/email UX confirmations
All checks were successful
Build and Push Docker Image / build (push) Successful in 10m33s
All checks were successful
Build and Push Docker Image / build (push) Successful in 10m33s
This commit is contained in:
@@ -1,486 +1,486 @@
|
||||
/**
|
||||
* AI-Powered Mentor Matching Service
|
||||
*
|
||||
* Matches mentors to projects based on expertise alignment.
|
||||
*
|
||||
* Optimization:
|
||||
* - Batched processing (15 projects per batch)
|
||||
* - Token tracking and cost logging
|
||||
* - Fallback to algorithmic matching
|
||||
*
|
||||
* GDPR Compliance:
|
||||
* - All data anonymized before AI processing
|
||||
* - No personal information sent to OpenAI
|
||||
*/
|
||||
|
||||
import { PrismaClient, OceanIssue, CompetitionCategory } from '@prisma/client'
|
||||
import { getOpenAI, getConfiguredModel, buildCompletionParams } from '@/lib/openai'
|
||||
import { logAIUsage, extractTokenUsage } from '@/server/utils/ai-usage'
|
||||
import { classifyAIError, createParseError, logAIError } from './ai-errors'
|
||||
|
||||
// ─── Constants ───────────────────────────────────────────────────────────────
|
||||
|
||||
const MENTOR_BATCH_SIZE = 15
|
||||
|
||||
// Optimized system prompt
|
||||
const MENTOR_MATCHING_SYSTEM_PROMPT = `Match mentors to projects by expertise. Return JSON.
|
||||
Format for each project: {"matches": [{project_id, mentor_matches: [{mentor_index, confidence_score: 0-1, expertise_match_score: 0-1, reasoning: str}]}]}
|
||||
Rank by suitability. Consider expertise alignment and availability.`
|
||||
|
||||
// ─── Types ───────────────────────────────────────────────────────────────────
|
||||
|
||||
interface ProjectInfo {
|
||||
id: string
|
||||
title: string
|
||||
description: string | null
|
||||
oceanIssue: OceanIssue | null
|
||||
competitionCategory: CompetitionCategory | null
|
||||
tags: string[]
|
||||
}
|
||||
|
||||
interface MentorInfo {
|
||||
id: string
|
||||
name: string | null
|
||||
email: string
|
||||
expertiseTags: string[]
|
||||
currentAssignments: number
|
||||
maxAssignments: number | null
|
||||
}
|
||||
|
||||
interface MentorMatch {
|
||||
mentorId: string
|
||||
confidenceScore: number
|
||||
expertiseMatchScore: number
|
||||
reasoning: string
|
||||
}
|
||||
|
||||
// ─── Batched AI Matching ─────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Process a batch of projects for mentor matching
|
||||
*/
|
||||
async function processMatchingBatch(
|
||||
openai: NonNullable<Awaited<ReturnType<typeof getOpenAI>>>,
|
||||
model: string,
|
||||
projects: ProjectInfo[],
|
||||
mentors: MentorInfo[],
|
||||
limit: number,
|
||||
userId?: string
|
||||
): Promise<{
|
||||
results: Map<string, MentorMatch[]>
|
||||
tokensUsed: number
|
||||
}> {
|
||||
const results = new Map<string, MentorMatch[]>()
|
||||
let tokensUsed = 0
|
||||
|
||||
// Anonymize project data
|
||||
const anonymizedProjects = projects.map((p, index) => ({
|
||||
project_id: `P${index + 1}`,
|
||||
real_id: p.id,
|
||||
description: p.description?.slice(0, 350) || 'No description',
|
||||
category: p.competitionCategory,
|
||||
oceanIssue: p.oceanIssue,
|
||||
tags: p.tags,
|
||||
}))
|
||||
|
||||
// Anonymize mentor data
|
||||
const anonymizedMentors = mentors.map((m, index) => ({
|
||||
index,
|
||||
expertise: m.expertiseTags,
|
||||
availability: m.maxAssignments
|
||||
? `${m.currentAssignments}/${m.maxAssignments}`
|
||||
: 'unlimited',
|
||||
}))
|
||||
|
||||
const userPrompt = `PROJECTS:
|
||||
${anonymizedProjects.map(p => `${p.project_id}: Category=${p.category || 'N/A'}, Issue=${p.oceanIssue || 'N/A'}, Tags=[${p.tags.join(', ')}], Desc=${p.description.slice(0, 200)}`).join('\n')}
|
||||
|
||||
MENTORS:
|
||||
${anonymizedMentors.map(m => `${m.index}: Expertise=[${m.expertise.join(', ')}], Availability=${m.availability}`).join('\n')}
|
||||
|
||||
For each project, rank top ${limit} mentors.`
|
||||
|
||||
try {
|
||||
const params = buildCompletionParams(model, {
|
||||
messages: [
|
||||
{ role: 'system', content: MENTOR_MATCHING_SYSTEM_PROMPT },
|
||||
{ role: 'user', content: userPrompt },
|
||||
],
|
||||
jsonMode: true,
|
||||
temperature: 0.3,
|
||||
maxTokens: 4000,
|
||||
})
|
||||
|
||||
const response = await openai.chat.completions.create(params)
|
||||
const usage = extractTokenUsage(response)
|
||||
tokensUsed = usage.totalTokens
|
||||
|
||||
// Log usage
|
||||
await logAIUsage({
|
||||
userId,
|
||||
action: 'MENTOR_MATCHING',
|
||||
entityType: 'Project',
|
||||
model,
|
||||
promptTokens: usage.promptTokens,
|
||||
completionTokens: usage.completionTokens,
|
||||
totalTokens: usage.totalTokens,
|
||||
batchSize: projects.length,
|
||||
itemsProcessed: projects.length,
|
||||
status: 'SUCCESS',
|
||||
})
|
||||
|
||||
const content = response.choices[0]?.message?.content
|
||||
if (!content) {
|
||||
throw new Error('No response from AI')
|
||||
}
|
||||
|
||||
const parsed = JSON.parse(content) as {
|
||||
matches: Array<{
|
||||
project_id: string
|
||||
mentor_matches: Array<{
|
||||
mentor_index: number
|
||||
confidence_score: number
|
||||
expertise_match_score: number
|
||||
reasoning: string
|
||||
}>
|
||||
}>
|
||||
}
|
||||
|
||||
// Map results back to real IDs
|
||||
for (const projectMatch of parsed.matches || []) {
|
||||
const project = anonymizedProjects.find(p => p.project_id === projectMatch.project_id)
|
||||
if (!project) continue
|
||||
|
||||
const mentorMatches: MentorMatch[] = []
|
||||
for (const match of projectMatch.mentor_matches || []) {
|
||||
if (match.mentor_index >= 0 && match.mentor_index < mentors.length) {
|
||||
mentorMatches.push({
|
||||
mentorId: mentors[match.mentor_index].id,
|
||||
confidenceScore: Math.min(1, Math.max(0, match.confidence_score)),
|
||||
expertiseMatchScore: Math.min(1, Math.max(0, match.expertise_match_score)),
|
||||
reasoning: match.reasoning,
|
||||
})
|
||||
}
|
||||
}
|
||||
results.set(project.real_id, mentorMatches)
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
if (error instanceof SyntaxError) {
|
||||
const parseError = createParseError(error.message)
|
||||
logAIError('MentorMatching', 'batch processing', parseError)
|
||||
|
||||
await logAIUsage({
|
||||
userId,
|
||||
action: 'MENTOR_MATCHING',
|
||||
entityType: 'Project',
|
||||
model,
|
||||
promptTokens: 0,
|
||||
completionTokens: 0,
|
||||
totalTokens: tokensUsed,
|
||||
batchSize: projects.length,
|
||||
itemsProcessed: 0,
|
||||
status: 'ERROR',
|
||||
errorMessage: parseError.message,
|
||||
})
|
||||
|
||||
// Return empty results for batch (will fall back to algorithm)
|
||||
for (const project of projects) {
|
||||
results.set(project.id, [])
|
||||
}
|
||||
} else {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
return { results, tokensUsed }
|
||||
}
|
||||
|
||||
/**
|
||||
* Get AI-suggested mentor matches for multiple projects (batched)
|
||||
*/
|
||||
export async function getAIMentorSuggestionsBatch(
|
||||
prisma: PrismaClient,
|
||||
projectIds: string[],
|
||||
limit: number = 5,
|
||||
userId?: string
|
||||
): Promise<Map<string, MentorMatch[]>> {
|
||||
const allResults = new Map<string, MentorMatch[]>()
|
||||
|
||||
// Get projects
|
||||
const projects = await prisma.project.findMany({
|
||||
where: { id: { in: projectIds } },
|
||||
select: {
|
||||
id: true,
|
||||
title: true,
|
||||
description: true,
|
||||
oceanIssue: true,
|
||||
competitionCategory: true,
|
||||
tags: true,
|
||||
},
|
||||
})
|
||||
|
||||
if (projects.length === 0) {
|
||||
return allResults
|
||||
}
|
||||
|
||||
// Get available mentors
|
||||
const mentors = await prisma.user.findMany({
|
||||
where: {
|
||||
OR: [
|
||||
{ expertiseTags: { isEmpty: false } },
|
||||
{ role: 'JURY_MEMBER' },
|
||||
],
|
||||
status: 'ACTIVE',
|
||||
},
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
email: true,
|
||||
expertiseTags: true,
|
||||
maxAssignments: true,
|
||||
mentorAssignments: {
|
||||
select: { id: true },
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Filter mentors who haven't reached max assignments
|
||||
const availableMentors: MentorInfo[] = mentors
|
||||
.filter((m) => {
|
||||
const currentAssignments = m.mentorAssignments.length
|
||||
return !m.maxAssignments || currentAssignments < m.maxAssignments
|
||||
})
|
||||
.map((m) => ({
|
||||
id: m.id,
|
||||
name: m.name,
|
||||
email: m.email,
|
||||
expertiseTags: m.expertiseTags,
|
||||
currentAssignments: m.mentorAssignments.length,
|
||||
maxAssignments: m.maxAssignments,
|
||||
}))
|
||||
|
||||
if (availableMentors.length === 0) {
|
||||
return allResults
|
||||
}
|
||||
|
||||
// Try AI matching
|
||||
try {
|
||||
const openai = await getOpenAI()
|
||||
if (!openai) {
|
||||
console.log('[Mentor Matching] OpenAI not configured, using algorithm')
|
||||
return getAlgorithmicMatchesBatch(projects, availableMentors, limit)
|
||||
}
|
||||
|
||||
const model = await getConfiguredModel()
|
||||
console.log(`[Mentor Matching] Using model: ${model} for ${projects.length} projects in batches of ${MENTOR_BATCH_SIZE}`)
|
||||
|
||||
let totalTokens = 0
|
||||
|
||||
// Process in batches
|
||||
for (let i = 0; i < projects.length; i += MENTOR_BATCH_SIZE) {
|
||||
const batchProjects = projects.slice(i, i + MENTOR_BATCH_SIZE)
|
||||
|
||||
console.log(`[Mentor Matching] Processing batch ${Math.floor(i / MENTOR_BATCH_SIZE) + 1}/${Math.ceil(projects.length / MENTOR_BATCH_SIZE)}`)
|
||||
|
||||
const { results, tokensUsed } = await processMatchingBatch(
|
||||
openai,
|
||||
model,
|
||||
batchProjects,
|
||||
availableMentors,
|
||||
limit,
|
||||
userId
|
||||
)
|
||||
|
||||
totalTokens += tokensUsed
|
||||
|
||||
// Merge results
|
||||
for (const [projectId, matches] of results) {
|
||||
allResults.set(projectId, matches)
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`[Mentor Matching] Completed. Total tokens: ${totalTokens}`)
|
||||
|
||||
// Fill in any missing projects with algorithmic fallback
|
||||
for (const project of projects) {
|
||||
if (!allResults.has(project.id) || allResults.get(project.id)?.length === 0) {
|
||||
const fallbackMatches = getAlgorithmicMatches(project, availableMentors, limit)
|
||||
allResults.set(project.id, fallbackMatches)
|
||||
}
|
||||
}
|
||||
|
||||
return allResults
|
||||
|
||||
} catch (error) {
|
||||
const classified = classifyAIError(error)
|
||||
logAIError('MentorMatching', 'getAIMentorSuggestionsBatch', classified)
|
||||
|
||||
// Log failed attempt
|
||||
await logAIUsage({
|
||||
userId,
|
||||
action: 'MENTOR_MATCHING',
|
||||
entityType: 'Project',
|
||||
model: 'unknown',
|
||||
promptTokens: 0,
|
||||
completionTokens: 0,
|
||||
totalTokens: 0,
|
||||
batchSize: projects.length,
|
||||
itemsProcessed: 0,
|
||||
status: 'ERROR',
|
||||
errorMessage: classified.message,
|
||||
})
|
||||
|
||||
console.error('[Mentor Matching] AI failed, using algorithm:', classified.message)
|
||||
return getAlgorithmicMatchesBatch(projects, availableMentors, limit)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get AI-suggested mentor matches for a single project
|
||||
*/
|
||||
export async function getAIMentorSuggestions(
|
||||
prisma: PrismaClient,
|
||||
projectId: string,
|
||||
limit: number = 5,
|
||||
userId?: string
|
||||
): Promise<MentorMatch[]> {
|
||||
const results = await getAIMentorSuggestionsBatch(prisma, [projectId], limit, userId)
|
||||
return results.get(projectId) || []
|
||||
}
|
||||
|
||||
// ─── Algorithmic Fallback ────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Algorithmic fallback for multiple projects
|
||||
*/
|
||||
function getAlgorithmicMatchesBatch(
|
||||
projects: ProjectInfo[],
|
||||
mentors: MentorInfo[],
|
||||
limit: number
|
||||
): Map<string, MentorMatch[]> {
|
||||
const results = new Map<string, MentorMatch[]>()
|
||||
|
||||
for (const project of projects) {
|
||||
results.set(project.id, getAlgorithmicMatches(project, mentors, limit))
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
/**
|
||||
* Algorithmic fallback for mentor matching
|
||||
*/
|
||||
function getAlgorithmicMatches(
|
||||
project: ProjectInfo,
|
||||
mentors: MentorInfo[],
|
||||
limit: number
|
||||
): MentorMatch[] {
|
||||
// Build keyword set from project
|
||||
const projectKeywords = new Set<string>()
|
||||
|
||||
if (project.oceanIssue) {
|
||||
projectKeywords.add(project.oceanIssue.toLowerCase().replace(/_/g, ' '))
|
||||
}
|
||||
|
||||
if (project.competitionCategory) {
|
||||
projectKeywords.add(project.competitionCategory.toLowerCase().replace(/_/g, ' '))
|
||||
}
|
||||
|
||||
project.tags.forEach((tag) => {
|
||||
tag.toLowerCase().split(/\s+/).forEach((word) => {
|
||||
if (word.length > 3) projectKeywords.add(word)
|
||||
})
|
||||
})
|
||||
|
||||
if (project.description) {
|
||||
const words = project.description.toLowerCase().split(/\s+/)
|
||||
words.forEach((word) => {
|
||||
if (word.length > 4) projectKeywords.add(word.replace(/[^a-z]/g, ''))
|
||||
})
|
||||
}
|
||||
|
||||
// Score each mentor
|
||||
const scored = mentors.map((mentor) => {
|
||||
const mentorKeywords = new Set<string>()
|
||||
mentor.expertiseTags.forEach((tag) => {
|
||||
tag.toLowerCase().split(/\s+/).forEach((word) => {
|
||||
if (word.length > 2) mentorKeywords.add(word)
|
||||
})
|
||||
})
|
||||
|
||||
// Calculate overlap
|
||||
let matchCount = 0
|
||||
projectKeywords.forEach((keyword) => {
|
||||
mentorKeywords.forEach((mentorKeyword) => {
|
||||
if (keyword.includes(mentorKeyword) || mentorKeyword.includes(keyword)) {
|
||||
matchCount++
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
const expertiseMatchScore = mentorKeywords.size > 0
|
||||
? Math.min(1, matchCount / mentorKeywords.size)
|
||||
: 0
|
||||
|
||||
// Factor in availability
|
||||
const availabilityScore = mentor.maxAssignments
|
||||
? 1 - (mentor.currentAssignments / mentor.maxAssignments)
|
||||
: 1
|
||||
|
||||
const confidenceScore = (expertiseMatchScore * 0.7 + availabilityScore * 0.3)
|
||||
|
||||
return {
|
||||
mentorId: mentor.id,
|
||||
confidenceScore: Math.round(confidenceScore * 100) / 100,
|
||||
expertiseMatchScore: Math.round(expertiseMatchScore * 100) / 100,
|
||||
reasoning: `Matched ${matchCount} keyword(s). Availability: ${availabilityScore > 0.5 ? 'Good' : 'Limited'}.`,
|
||||
}
|
||||
})
|
||||
|
||||
// Sort by confidence and return top matches
|
||||
return scored
|
||||
.sort((a, b) => b.confidenceScore - a.confidenceScore)
|
||||
.slice(0, limit)
|
||||
}
|
||||
|
||||
/**
|
||||
* Round-robin assignment for load balancing
|
||||
*/
|
||||
export async function getRoundRobinMentor(
|
||||
prisma: PrismaClient,
|
||||
excludeMentorIds: string[] = []
|
||||
): Promise<string | null> {
|
||||
const mentors = await prisma.user.findMany({
|
||||
where: {
|
||||
OR: [
|
||||
{ expertiseTags: { isEmpty: false } },
|
||||
{ role: 'JURY_MEMBER' },
|
||||
],
|
||||
status: 'ACTIVE',
|
||||
id: { notIn: excludeMentorIds },
|
||||
},
|
||||
select: {
|
||||
id: true,
|
||||
maxAssignments: true,
|
||||
mentorAssignments: {
|
||||
select: { id: true },
|
||||
},
|
||||
},
|
||||
orderBy: {
|
||||
mentorAssignments: {
|
||||
_count: 'asc',
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Find mentor with fewest assignments who hasn't reached max
|
||||
for (const mentor of mentors) {
|
||||
const currentCount = mentor.mentorAssignments.length
|
||||
if (!mentor.maxAssignments || currentCount < mentor.maxAssignments) {
|
||||
return mentor.id
|
||||
}
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
/**
|
||||
* AI-Powered Mentor Matching Service
|
||||
*
|
||||
* Matches mentors to projects based on expertise alignment.
|
||||
*
|
||||
* Optimization:
|
||||
* - Batched processing (15 projects per batch)
|
||||
* - Token tracking and cost logging
|
||||
* - Fallback to algorithmic matching
|
||||
*
|
||||
* GDPR Compliance:
|
||||
* - All data anonymized before AI processing
|
||||
* - No personal information sent to OpenAI
|
||||
*/
|
||||
|
||||
import { PrismaClient, OceanIssue, CompetitionCategory } from '@prisma/client'
|
||||
import { getOpenAI, getConfiguredModel, buildCompletionParams } from '@/lib/openai'
|
||||
import { logAIUsage, extractTokenUsage } from '@/server/utils/ai-usage'
|
||||
import { classifyAIError, createParseError, logAIError } from './ai-errors'
|
||||
|
||||
// ─── Constants ───────────────────────────────────────────────────────────────
|
||||
|
||||
const MENTOR_BATCH_SIZE = 15
|
||||
|
||||
// Optimized system prompt
|
||||
const MENTOR_MATCHING_SYSTEM_PROMPT = `Match mentors to projects by expertise. Return JSON.
|
||||
Format for each project: {"matches": [{project_id, mentor_matches: [{mentor_index, confidence_score: 0-1, expertise_match_score: 0-1, reasoning: str}]}]}
|
||||
Rank by suitability. Consider expertise alignment and availability.`
|
||||
|
||||
// ─── Types ───────────────────────────────────────────────────────────────────
|
||||
|
||||
interface ProjectInfo {
|
||||
id: string
|
||||
title: string
|
||||
description: string | null
|
||||
oceanIssue: OceanIssue | null
|
||||
competitionCategory: CompetitionCategory | null
|
||||
tags: string[]
|
||||
}
|
||||
|
||||
interface MentorInfo {
|
||||
id: string
|
||||
name: string | null
|
||||
email: string
|
||||
expertiseTags: string[]
|
||||
currentAssignments: number
|
||||
maxAssignments: number | null
|
||||
}
|
||||
|
||||
interface MentorMatch {
|
||||
mentorId: string
|
||||
confidenceScore: number
|
||||
expertiseMatchScore: number
|
||||
reasoning: string
|
||||
}
|
||||
|
||||
// ─── Batched AI Matching ─────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Process a batch of projects for mentor matching
|
||||
*/
|
||||
async function processMatchingBatch(
|
||||
openai: NonNullable<Awaited<ReturnType<typeof getOpenAI>>>,
|
||||
model: string,
|
||||
projects: ProjectInfo[],
|
||||
mentors: MentorInfo[],
|
||||
limit: number,
|
||||
userId?: string
|
||||
): Promise<{
|
||||
results: Map<string, MentorMatch[]>
|
||||
tokensUsed: number
|
||||
}> {
|
||||
const results = new Map<string, MentorMatch[]>()
|
||||
let tokensUsed = 0
|
||||
|
||||
// Anonymize project data
|
||||
const anonymizedProjects = projects.map((p, index) => ({
|
||||
project_id: `P${index + 1}`,
|
||||
real_id: p.id,
|
||||
description: p.description?.slice(0, 350) || 'No description',
|
||||
category: p.competitionCategory,
|
||||
oceanIssue: p.oceanIssue,
|
||||
tags: p.tags,
|
||||
}))
|
||||
|
||||
// Anonymize mentor data
|
||||
const anonymizedMentors = mentors.map((m, index) => ({
|
||||
index,
|
||||
expertise: m.expertiseTags,
|
||||
availability: m.maxAssignments
|
||||
? `${m.currentAssignments}/${m.maxAssignments}`
|
||||
: 'unlimited',
|
||||
}))
|
||||
|
||||
const userPrompt = `PROJECTS:
|
||||
${anonymizedProjects.map(p => `${p.project_id}: Category=${p.category || 'N/A'}, Issue=${p.oceanIssue || 'N/A'}, Tags=[${p.tags.join(', ')}], Desc=${p.description.slice(0, 200)}`).join('\n')}
|
||||
|
||||
MENTORS:
|
||||
${anonymizedMentors.map(m => `${m.index}: Expertise=[${m.expertise.join(', ')}], Availability=${m.availability}`).join('\n')}
|
||||
|
||||
For each project, rank top ${limit} mentors.`
|
||||
|
||||
try {
|
||||
const params = buildCompletionParams(model, {
|
||||
messages: [
|
||||
{ role: 'system', content: MENTOR_MATCHING_SYSTEM_PROMPT },
|
||||
{ role: 'user', content: userPrompt },
|
||||
],
|
||||
jsonMode: true,
|
||||
temperature: 0.3,
|
||||
maxTokens: 4000,
|
||||
})
|
||||
|
||||
const response = await openai.chat.completions.create(params)
|
||||
const usage = extractTokenUsage(response)
|
||||
tokensUsed = usage.totalTokens
|
||||
|
||||
// Log usage
|
||||
await logAIUsage({
|
||||
userId,
|
||||
action: 'MENTOR_MATCHING',
|
||||
entityType: 'Project',
|
||||
model,
|
||||
promptTokens: usage.promptTokens,
|
||||
completionTokens: usage.completionTokens,
|
||||
totalTokens: usage.totalTokens,
|
||||
batchSize: projects.length,
|
||||
itemsProcessed: projects.length,
|
||||
status: 'SUCCESS',
|
||||
})
|
||||
|
||||
const content = response.choices[0]?.message?.content
|
||||
if (!content) {
|
||||
throw new Error('No response from AI')
|
||||
}
|
||||
|
||||
const parsed = JSON.parse(content) as {
|
||||
matches: Array<{
|
||||
project_id: string
|
||||
mentor_matches: Array<{
|
||||
mentor_index: number
|
||||
confidence_score: number
|
||||
expertise_match_score: number
|
||||
reasoning: string
|
||||
}>
|
||||
}>
|
||||
}
|
||||
|
||||
// Map results back to real IDs
|
||||
for (const projectMatch of parsed.matches || []) {
|
||||
const project = anonymizedProjects.find(p => p.project_id === projectMatch.project_id)
|
||||
if (!project) continue
|
||||
|
||||
const mentorMatches: MentorMatch[] = []
|
||||
for (const match of projectMatch.mentor_matches || []) {
|
||||
if (match.mentor_index >= 0 && match.mentor_index < mentors.length) {
|
||||
mentorMatches.push({
|
||||
mentorId: mentors[match.mentor_index].id,
|
||||
confidenceScore: Math.min(1, Math.max(0, match.confidence_score)),
|
||||
expertiseMatchScore: Math.min(1, Math.max(0, match.expertise_match_score)),
|
||||
reasoning: match.reasoning,
|
||||
})
|
||||
}
|
||||
}
|
||||
results.set(project.real_id, mentorMatches)
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
if (error instanceof SyntaxError) {
|
||||
const parseError = createParseError(error.message)
|
||||
logAIError('MentorMatching', 'batch processing', parseError)
|
||||
|
||||
await logAIUsage({
|
||||
userId,
|
||||
action: 'MENTOR_MATCHING',
|
||||
entityType: 'Project',
|
||||
model,
|
||||
promptTokens: 0,
|
||||
completionTokens: 0,
|
||||
totalTokens: tokensUsed,
|
||||
batchSize: projects.length,
|
||||
itemsProcessed: 0,
|
||||
status: 'ERROR',
|
||||
errorMessage: parseError.message,
|
||||
})
|
||||
|
||||
// Return empty results for batch (will fall back to algorithm)
|
||||
for (const project of projects) {
|
||||
results.set(project.id, [])
|
||||
}
|
||||
} else {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
return { results, tokensUsed }
|
||||
}
|
||||
|
||||
/**
|
||||
* Get AI-suggested mentor matches for multiple projects (batched)
|
||||
*/
|
||||
export async function getAIMentorSuggestionsBatch(
|
||||
prisma: PrismaClient,
|
||||
projectIds: string[],
|
||||
limit: number = 5,
|
||||
userId?: string
|
||||
): Promise<Map<string, MentorMatch[]>> {
|
||||
const allResults = new Map<string, MentorMatch[]>()
|
||||
|
||||
// Get projects
|
||||
const projects = await prisma.project.findMany({
|
||||
where: { id: { in: projectIds } },
|
||||
select: {
|
||||
id: true,
|
||||
title: true,
|
||||
description: true,
|
||||
oceanIssue: true,
|
||||
competitionCategory: true,
|
||||
tags: true,
|
||||
},
|
||||
})
|
||||
|
||||
if (projects.length === 0) {
|
||||
return allResults
|
||||
}
|
||||
|
||||
// Get available mentors
|
||||
const mentors = await prisma.user.findMany({
|
||||
where: {
|
||||
OR: [
|
||||
{ expertiseTags: { isEmpty: false } },
|
||||
{ role: 'JURY_MEMBER' },
|
||||
],
|
||||
status: 'ACTIVE',
|
||||
},
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
email: true,
|
||||
expertiseTags: true,
|
||||
maxAssignments: true,
|
||||
mentorAssignments: {
|
||||
select: { id: true },
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Filter mentors who haven't reached max assignments
|
||||
const availableMentors: MentorInfo[] = mentors
|
||||
.filter((m) => {
|
||||
const currentAssignments = m.mentorAssignments.length
|
||||
return !m.maxAssignments || currentAssignments < m.maxAssignments
|
||||
})
|
||||
.map((m) => ({
|
||||
id: m.id,
|
||||
name: m.name,
|
||||
email: m.email,
|
||||
expertiseTags: m.expertiseTags,
|
||||
currentAssignments: m.mentorAssignments.length,
|
||||
maxAssignments: m.maxAssignments,
|
||||
}))
|
||||
|
||||
if (availableMentors.length === 0) {
|
||||
return allResults
|
||||
}
|
||||
|
||||
// Try AI matching
|
||||
try {
|
||||
const openai = await getOpenAI()
|
||||
if (!openai) {
|
||||
console.log('[Mentor Matching] OpenAI not configured, using algorithm')
|
||||
return getAlgorithmicMatchesBatch(projects, availableMentors, limit)
|
||||
}
|
||||
|
||||
const model = await getConfiguredModel()
|
||||
console.log(`[Mentor Matching] Using model: ${model} for ${projects.length} projects in batches of ${MENTOR_BATCH_SIZE}`)
|
||||
|
||||
let totalTokens = 0
|
||||
|
||||
// Process in batches
|
||||
for (let i = 0; i < projects.length; i += MENTOR_BATCH_SIZE) {
|
||||
const batchProjects = projects.slice(i, i + MENTOR_BATCH_SIZE)
|
||||
|
||||
console.log(`[Mentor Matching] Processing batch ${Math.floor(i / MENTOR_BATCH_SIZE) + 1}/${Math.ceil(projects.length / MENTOR_BATCH_SIZE)}`)
|
||||
|
||||
const { results, tokensUsed } = await processMatchingBatch(
|
||||
openai,
|
||||
model,
|
||||
batchProjects,
|
||||
availableMentors,
|
||||
limit,
|
||||
userId
|
||||
)
|
||||
|
||||
totalTokens += tokensUsed
|
||||
|
||||
// Merge results
|
||||
for (const [projectId, matches] of results) {
|
||||
allResults.set(projectId, matches)
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`[Mentor Matching] Completed. Total tokens: ${totalTokens}`)
|
||||
|
||||
// Fill in any missing projects with algorithmic fallback
|
||||
for (const project of projects) {
|
||||
if (!allResults.has(project.id) || allResults.get(project.id)?.length === 0) {
|
||||
const fallbackMatches = getAlgorithmicMatches(project, availableMentors, limit)
|
||||
allResults.set(project.id, fallbackMatches)
|
||||
}
|
||||
}
|
||||
|
||||
return allResults
|
||||
|
||||
} catch (error) {
|
||||
const classified = classifyAIError(error)
|
||||
logAIError('MentorMatching', 'getAIMentorSuggestionsBatch', classified)
|
||||
|
||||
// Log failed attempt
|
||||
await logAIUsage({
|
||||
userId,
|
||||
action: 'MENTOR_MATCHING',
|
||||
entityType: 'Project',
|
||||
model: 'unknown',
|
||||
promptTokens: 0,
|
||||
completionTokens: 0,
|
||||
totalTokens: 0,
|
||||
batchSize: projects.length,
|
||||
itemsProcessed: 0,
|
||||
status: 'ERROR',
|
||||
errorMessage: classified.message,
|
||||
})
|
||||
|
||||
console.error('[Mentor Matching] AI failed, using algorithm:', classified.message)
|
||||
return getAlgorithmicMatchesBatch(projects, availableMentors, limit)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get AI-suggested mentor matches for a single project
|
||||
*/
|
||||
export async function getAIMentorSuggestions(
|
||||
prisma: PrismaClient,
|
||||
projectId: string,
|
||||
limit: number = 5,
|
||||
userId?: string
|
||||
): Promise<MentorMatch[]> {
|
||||
const results = await getAIMentorSuggestionsBatch(prisma, [projectId], limit, userId)
|
||||
return results.get(projectId) || []
|
||||
}
|
||||
|
||||
// ─── Algorithmic Fallback ────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Algorithmic fallback for multiple projects
|
||||
*/
|
||||
function getAlgorithmicMatchesBatch(
|
||||
projects: ProjectInfo[],
|
||||
mentors: MentorInfo[],
|
||||
limit: number
|
||||
): Map<string, MentorMatch[]> {
|
||||
const results = new Map<string, MentorMatch[]>()
|
||||
|
||||
for (const project of projects) {
|
||||
results.set(project.id, getAlgorithmicMatches(project, mentors, limit))
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
/**
|
||||
* Algorithmic fallback for mentor matching
|
||||
*/
|
||||
function getAlgorithmicMatches(
|
||||
project: ProjectInfo,
|
||||
mentors: MentorInfo[],
|
||||
limit: number
|
||||
): MentorMatch[] {
|
||||
// Build keyword set from project
|
||||
const projectKeywords = new Set<string>()
|
||||
|
||||
if (project.oceanIssue) {
|
||||
projectKeywords.add(project.oceanIssue.toLowerCase().replace(/_/g, ' '))
|
||||
}
|
||||
|
||||
if (project.competitionCategory) {
|
||||
projectKeywords.add(project.competitionCategory.toLowerCase().replace(/_/g, ' '))
|
||||
}
|
||||
|
||||
project.tags.forEach((tag) => {
|
||||
tag.toLowerCase().split(/\s+/).forEach((word) => {
|
||||
if (word.length > 3) projectKeywords.add(word)
|
||||
})
|
||||
})
|
||||
|
||||
if (project.description) {
|
||||
const words = project.description.toLowerCase().split(/\s+/)
|
||||
words.forEach((word) => {
|
||||
if (word.length > 4) projectKeywords.add(word.replace(/[^a-z]/g, ''))
|
||||
})
|
||||
}
|
||||
|
||||
// Score each mentor
|
||||
const scored = mentors.map((mentor) => {
|
||||
const mentorKeywords = new Set<string>()
|
||||
mentor.expertiseTags.forEach((tag) => {
|
||||
tag.toLowerCase().split(/\s+/).forEach((word) => {
|
||||
if (word.length > 2) mentorKeywords.add(word)
|
||||
})
|
||||
})
|
||||
|
||||
// Calculate overlap
|
||||
let matchCount = 0
|
||||
projectKeywords.forEach((keyword) => {
|
||||
mentorKeywords.forEach((mentorKeyword) => {
|
||||
if (keyword.includes(mentorKeyword) || mentorKeyword.includes(keyword)) {
|
||||
matchCount++
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
const expertiseMatchScore = mentorKeywords.size > 0
|
||||
? Math.min(1, matchCount / mentorKeywords.size)
|
||||
: 0
|
||||
|
||||
// Factor in availability
|
||||
const availabilityScore = mentor.maxAssignments
|
||||
? 1 - (mentor.currentAssignments / mentor.maxAssignments)
|
||||
: 1
|
||||
|
||||
const confidenceScore = (expertiseMatchScore * 0.7 + availabilityScore * 0.3)
|
||||
|
||||
return {
|
||||
mentorId: mentor.id,
|
||||
confidenceScore: Math.round(confidenceScore * 100) / 100,
|
||||
expertiseMatchScore: Math.round(expertiseMatchScore * 100) / 100,
|
||||
reasoning: `Matched ${matchCount} keyword(s). Availability: ${availabilityScore > 0.5 ? 'Good' : 'Limited'}.`,
|
||||
}
|
||||
})
|
||||
|
||||
// Sort by confidence and return top matches
|
||||
return scored
|
||||
.sort((a, b) => b.confidenceScore - a.confidenceScore)
|
||||
.slice(0, limit)
|
||||
}
|
||||
|
||||
/**
|
||||
* Round-robin assignment for load balancing
|
||||
*/
|
||||
export async function getRoundRobinMentor(
|
||||
prisma: PrismaClient,
|
||||
excludeMentorIds: string[] = []
|
||||
): Promise<string | null> {
|
||||
const mentors = await prisma.user.findMany({
|
||||
where: {
|
||||
OR: [
|
||||
{ expertiseTags: { isEmpty: false } },
|
||||
{ role: 'JURY_MEMBER' },
|
||||
],
|
||||
status: 'ACTIVE',
|
||||
id: { notIn: excludeMentorIds },
|
||||
},
|
||||
select: {
|
||||
id: true,
|
||||
maxAssignments: true,
|
||||
mentorAssignments: {
|
||||
select: { id: true },
|
||||
},
|
||||
},
|
||||
orderBy: {
|
||||
mentorAssignments: {
|
||||
_count: 'asc',
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Find mentor with fewest assignments who hasn't reached max
|
||||
for (const mentor of mentors) {
|
||||
const currentCount = mentor.mentorAssignments.length
|
||||
if (!mentor.maxAssignments || currentCount < mentor.maxAssignments) {
|
||||
return mentor.id
|
||||
}
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user