Implement Prototype 1 improvements: unified members, project filters, audit expansion, filtering rounds, special awards

- Unified Member Management: merge /admin/users and /admin/mentors into /admin/members with role tabs, search, pagination
- Project List Filters: add search, multi-status filter, round/category/country selects, boolean toggles, URL persistence
- Audit Log Expansion: track logins, round state changes, evaluation submissions, file access, role changes via shared logAudit utility
- Founding Date Field: add foundedAt to Project model with CSV import support
- Filtering Round System: configurable rules (field-based, document check, AI screening), execution engine, results review with override/reinstate
- Special Awards System: named awards with eligibility criteria, dedicated jury, PICK_WINNER/RANKED/SCORED voting modes, AI eligibility
- Dashboard resilience: wrap heavy queries in try-catch to prevent error boundary on transient DB failures
- Reusable pagination component extracted to src/components/shared/pagination.tsx
- Old /admin/users and /admin/mentors routes redirect to /admin/members
- Prisma migration for all schema additions (additive, no data loss)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-02-02 16:58:29 +01:00
parent 8fda8deded
commit 90e3adfab2
44 changed files with 7268 additions and 2154 deletions

View File

@@ -0,0 +1,226 @@
/**
* AI-Powered Award Eligibility Service
*
* Determines project eligibility for special awards using:
* - Deterministic field matching (tags, country, category)
* - AI interpretation of plain-language criteria
*/
import { getOpenAI, getConfiguredModel } from '@/lib/openai'
// ─── Types ──────────────────────────────────────────────────────────────────
export type AutoTagRule = {
field: 'competitionCategory' | 'country' | 'geographicZone' | 'tags' | 'oceanIssue'
operator: 'equals' | 'contains' | 'in'
value: string | string[]
}
export interface EligibilityResult {
projectId: string
eligible: boolean
confidence: number
reasoning: string
method: 'AUTO' | 'AI'
}
interface ProjectForEligibility {
id: string
title: string
description?: string | null
competitionCategory?: string | null
country?: string | null
geographicZone?: string | null
tags: string[]
oceanIssue?: string | null
}
// ─── Auto Tag Rules ─────────────────────────────────────────────────────────
export function applyAutoTagRules(
rules: AutoTagRule[],
projects: ProjectForEligibility[]
): Map<string, boolean> {
const results = new Map<string, boolean>()
for (const project of projects) {
const matches = rules.every((rule) => {
const fieldValue = getFieldValue(project, rule.field)
switch (rule.operator) {
case 'equals':
return String(fieldValue).toLowerCase() === String(rule.value).toLowerCase()
case 'contains':
if (Array.isArray(fieldValue)) {
return fieldValue.some((v) =>
String(v).toLowerCase().includes(String(rule.value).toLowerCase())
)
}
return String(fieldValue || '').toLowerCase().includes(String(rule.value).toLowerCase())
case 'in':
if (Array.isArray(rule.value)) {
return rule.value.some((v) =>
String(v).toLowerCase() === String(fieldValue).toLowerCase()
)
}
return false
default:
return false
}
})
results.set(project.id, matches)
}
return results
}
function getFieldValue(
project: ProjectForEligibility,
field: AutoTagRule['field']
): unknown {
switch (field) {
case 'competitionCategory':
return project.competitionCategory
case 'country':
return project.country
case 'geographicZone':
return project.geographicZone
case 'tags':
return project.tags
case 'oceanIssue':
return project.oceanIssue
default:
return null
}
}
// ─── AI Criteria Interpretation ─────────────────────────────────────────────
const AI_ELIGIBILITY_SYSTEM_PROMPT = `You are a special award eligibility evaluator. Given a list of projects and award criteria, determine which projects are eligible.
Return a JSON object with this structure:
{
"evaluations": [
{
"project_id": "string",
"eligible": boolean,
"confidence": number (0-1),
"reasoning": "string"
}
]
}
Be fair, objective, and base your evaluation only on the provided information. Do not include personal identifiers in reasoning.`
export async function aiInterpretCriteria(
criteriaText: string,
projects: ProjectForEligibility[]
): Promise<EligibilityResult[]> {
const results: EligibilityResult[] = []
try {
const openai = await getOpenAI()
if (!openai) {
// No OpenAI — mark all as needing manual review
return projects.map((p) => ({
projectId: p.id,
eligible: false,
confidence: 0,
reasoning: 'AI unavailable — requires manual eligibility review',
method: 'AI' as const,
}))
}
const model = await getConfiguredModel()
// Anonymize and batch
const anonymized = projects.map((p, i) => ({
project_id: `P${i + 1}`,
real_id: p.id,
title: p.title,
description: p.description?.slice(0, 500) || '',
category: p.competitionCategory || 'Unknown',
ocean_issue: p.oceanIssue || 'Unknown',
country: p.country || 'Unknown',
region: p.geographicZone || 'Unknown',
tags: p.tags.join(', '),
}))
const batchSize = 20
for (let i = 0; i < anonymized.length; i += batchSize) {
const batch = anonymized.slice(i, i + batchSize)
const userPrompt = `Award criteria: ${criteriaText}
Projects to evaluate:
${JSON.stringify(
batch.map(({ real_id, ...rest }) => rest),
null,
2
)}
Evaluate each project against the award criteria.`
const response = await openai.chat.completions.create({
model,
messages: [
{ role: 'system', content: AI_ELIGIBILITY_SYSTEM_PROMPT },
{ role: 'user', content: userPrompt },
],
response_format: { type: 'json_object' },
temperature: 0.3,
max_tokens: 4000,
})
const content = response.choices[0]?.message?.content
if (content) {
try {
const parsed = JSON.parse(content) as {
evaluations: Array<{
project_id: string
eligible: boolean
confidence: number
reasoning: string
}>
}
for (const eval_ of parsed.evaluations) {
const anon = batch.find((b) => b.project_id === eval_.project_id)
if (anon) {
results.push({
projectId: anon.real_id,
eligible: eval_.eligible,
confidence: eval_.confidence,
reasoning: eval_.reasoning,
method: 'AI',
})
}
}
} catch {
// Parse error — mark batch for manual review
for (const item of batch) {
results.push({
projectId: item.real_id,
eligible: false,
confidence: 0,
reasoning: 'AI response parse error — requires manual review',
method: 'AI',
})
}
}
}
}
} catch {
// OpenAI error — mark all for manual review
return projects.map((p) => ({
projectId: p.id,
eligible: false,
confidence: 0,
reasoning: 'AI error — requires manual eligibility review',
method: 'AI' as const,
}))
}
return results
}

View File

@@ -0,0 +1,509 @@
/**
* AI-Powered Filtering Service
*
* Runs automated filtering rules against projects:
* - Field-based rules (age checks, category, country, etc.)
* - Document checks (file existence/types)
* - AI screening (GPT interprets criteria text, flags spam)
*/
import { getOpenAI, getConfiguredModel } from '@/lib/openai'
import type { Prisma } from '@prisma/client'
// ─── Types ──────────────────────────────────────────────────────────────────
export type FieldRuleCondition = {
field:
| 'competitionCategory'
| 'foundedAt'
| 'country'
| 'geographicZone'
| 'tags'
| 'oceanIssue'
operator:
| 'equals'
| 'not_equals'
| 'greater_than'
| 'less_than'
| 'contains'
| 'in'
| 'not_in'
| 'older_than_years'
| 'newer_than_years'
| 'is_empty'
value: string | number | string[]
}
export type FieldRuleConfig = {
conditions: FieldRuleCondition[]
logic: 'AND' | 'OR'
action: 'PASS' | 'REJECT' | 'FLAG'
}
export type DocumentCheckConfig = {
requiredFileTypes?: string[] // e.g. ['pdf', 'docx']
minFileCount?: number
action: 'PASS' | 'REJECT' | 'FLAG'
}
export type AIScreeningConfig = {
criteriaText: string
action: 'FLAG' // AI screening always flags for human review
}
export type RuleConfig = FieldRuleConfig | DocumentCheckConfig | AIScreeningConfig
export interface RuleResult {
ruleId: string
ruleName: string
ruleType: string
passed: boolean
action: 'PASS' | 'REJECT' | 'FLAG'
reasoning?: string
}
export interface ProjectFilteringResult {
projectId: string
outcome: 'PASSED' | 'FILTERED_OUT' | 'FLAGGED'
ruleResults: RuleResult[]
aiScreeningJson?: Record<string, unknown>
}
interface ProjectForFiltering {
id: string
title: string
description?: string | null
competitionCategory?: string | null
foundedAt?: Date | null
country?: string | null
geographicZone?: string | null
tags: string[]
oceanIssue?: string | null
wantsMentorship?: boolean | null
files: Array<{ id: string; fileName: string; fileType?: string | null }>
}
interface FilteringRuleInput {
id: string
name: string
ruleType: string
configJson: Prisma.JsonValue
priority: number
isActive: boolean
}
// ─── Field-Based Rule Evaluation ────────────────────────────────────────────
function evaluateCondition(
condition: FieldRuleCondition,
project: ProjectForFiltering
): boolean {
const { field, operator, value } = condition
// Get field value from project
let fieldValue: unknown
switch (field) {
case 'competitionCategory':
fieldValue = project.competitionCategory
break
case 'foundedAt':
fieldValue = project.foundedAt
break
case 'country':
fieldValue = project.country
break
case 'geographicZone':
fieldValue = project.geographicZone
break
case 'tags':
fieldValue = project.tags
break
case 'oceanIssue':
fieldValue = project.oceanIssue
break
default:
return false
}
switch (operator) {
case 'equals':
return String(fieldValue) === String(value)
case 'not_equals':
return String(fieldValue) !== String(value)
case 'contains':
if (Array.isArray(fieldValue)) {
return fieldValue.some((v) =>
String(v).toLowerCase().includes(String(value).toLowerCase())
)
}
return String(fieldValue || '')
.toLowerCase()
.includes(String(value).toLowerCase())
case 'in':
if (Array.isArray(value)) {
return value.includes(String(fieldValue))
}
return false
case 'not_in':
if (Array.isArray(value)) {
return !value.includes(String(fieldValue))
}
return true
case 'is_empty':
if (fieldValue === null || fieldValue === undefined) return true
if (Array.isArray(fieldValue)) return fieldValue.length === 0
return String(fieldValue).trim() === ''
case 'older_than_years': {
if (!fieldValue || !(fieldValue instanceof Date)) return false
const yearsAgo = new Date()
yearsAgo.setFullYear(yearsAgo.getFullYear() - Number(value))
return fieldValue < yearsAgo
}
case 'newer_than_years': {
if (!fieldValue || !(fieldValue instanceof Date)) return false
const yearsAgo = new Date()
yearsAgo.setFullYear(yearsAgo.getFullYear() - Number(value))
return fieldValue >= yearsAgo
}
case 'greater_than':
return Number(fieldValue) > Number(value)
case 'less_than':
return Number(fieldValue) < Number(value)
default:
return false
}
}
export function evaluateFieldRule(
config: FieldRuleConfig,
project: ProjectForFiltering
): { passed: boolean; action: 'PASS' | 'REJECT' | 'FLAG' } {
const results = config.conditions.map((c) => evaluateCondition(c, project))
const allConditionsMet =
config.logic === 'AND'
? results.every(Boolean)
: results.some(Boolean)
// If conditions met, the rule's action applies
// For PASS action: conditions met = passed, not met = not passed
// For REJECT action: conditions met = rejected (not passed)
// For FLAG action: conditions met = flagged
if (config.action === 'PASS') {
return { passed: allConditionsMet, action: config.action }
}
// For REJECT/FLAG: conditions matching means the project should be rejected/flagged
return { passed: !allConditionsMet, action: config.action }
}
// ─── Document Check Evaluation ──────────────────────────────────────────────
export function evaluateDocumentRule(
config: DocumentCheckConfig,
project: ProjectForFiltering
): { passed: boolean; action: 'PASS' | 'REJECT' | 'FLAG' } {
const files = project.files || []
if (config.minFileCount !== undefined && files.length < config.minFileCount) {
return { passed: false, action: config.action }
}
if (config.requiredFileTypes && config.requiredFileTypes.length > 0) {
const fileExtensions = files.map((f) => {
const ext = f.fileName.split('.').pop()?.toLowerCase()
return ext || ''
})
const hasAllTypes = config.requiredFileTypes.every((type) =>
fileExtensions.some((ext) => ext === type.toLowerCase())
)
if (!hasAllTypes) {
return { passed: false, action: config.action }
}
}
return { passed: true, action: config.action }
}
// ─── AI Screening ───────────────────────────────────────────────────────────
const AI_SCREENING_SYSTEM_PROMPT = `You are a project screening assistant. You evaluate projects against specific criteria.
You must return a JSON object with this structure:
{
"projects": [
{
"project_id": "string",
"meets_criteria": boolean,
"confidence": number (0-1),
"reasoning": "string",
"quality_score": number (1-10),
"spam_risk": boolean
}
]
}
Be fair and objective. Base your evaluation only on the information provided.
Never include personal identifiers in your reasoning.`
export async function executeAIScreening(
config: AIScreeningConfig,
projects: ProjectForFiltering[]
): Promise<
Map<
string,
{
meetsCriteria: boolean
confidence: number
reasoning: string
qualityScore: number
spamRisk: boolean
}
>
> {
const results = new Map<
string,
{
meetsCriteria: boolean
confidence: number
reasoning: string
qualityScore: number
spamRisk: boolean
}
>()
try {
const openai = await getOpenAI()
if (!openai) {
// No OpenAI configured — flag all for manual review
for (const p of projects) {
results.set(p.id, {
meetsCriteria: false,
confidence: 0,
reasoning: 'AI screening unavailable — flagged for manual review',
qualityScore: 5,
spamRisk: false,
})
}
return results
}
const model = await getConfiguredModel()
// Anonymize project data — use numeric IDs
const anonymizedProjects = projects.map((p, i) => ({
project_id: `P${i + 1}`,
real_id: p.id,
title: p.title,
description: p.description?.slice(0, 500) || '',
category: p.competitionCategory || 'Unknown',
ocean_issue: p.oceanIssue || 'Unknown',
country: p.country || 'Unknown',
tags: p.tags.join(', '),
has_files: (p.files?.length || 0) > 0,
}))
// Process in batches of 20
const batchSize = 20
for (let i = 0; i < anonymizedProjects.length; i += batchSize) {
const batch = anonymizedProjects.slice(i, i + batchSize)
const userPrompt = `Evaluate these projects against the following criteria:
CRITERIA: ${config.criteriaText}
PROJECTS:
${JSON.stringify(
batch.map(({ real_id, ...rest }) => rest),
null,
2
)}
Return your evaluation as JSON.`
const response = await openai.chat.completions.create({
model,
messages: [
{ role: 'system', content: AI_SCREENING_SYSTEM_PROMPT },
{ role: 'user', content: userPrompt },
],
response_format: { type: 'json_object' },
temperature: 0.3,
max_tokens: 4000,
})
const content = response.choices[0]?.message?.content
if (content) {
try {
const parsed = JSON.parse(content) as {
projects: Array<{
project_id: string
meets_criteria: boolean
confidence: number
reasoning: string
quality_score: number
spam_risk: boolean
}>
}
for (const result of parsed.projects) {
const anon = batch.find((b) => b.project_id === result.project_id)
if (anon) {
results.set(anon.real_id, {
meetsCriteria: result.meets_criteria,
confidence: result.confidence,
reasoning: result.reasoning,
qualityScore: result.quality_score,
spamRisk: result.spam_risk,
})
}
}
} catch {
// Parse error — flag batch for manual review
for (const item of batch) {
results.set(item.real_id, {
meetsCriteria: false,
confidence: 0,
reasoning: 'AI response parse error — flagged for manual review',
qualityScore: 5,
spamRisk: false,
})
}
}
}
}
} catch {
// OpenAI error — flag all for manual review
for (const p of projects) {
results.set(p.id, {
meetsCriteria: false,
confidence: 0,
reasoning: 'AI screening error — flagged for manual review',
qualityScore: 5,
spamRisk: false,
})
}
}
return results
}
// ─── Main Execution ─────────────────────────────────────────────────────────
export async function executeFilteringRules(
rules: FilteringRuleInput[],
projects: ProjectForFiltering[]
): Promise<ProjectFilteringResult[]> {
const activeRules = rules
.filter((r) => r.isActive)
.sort((a, b) => a.priority - b.priority)
// Separate AI screening rules (need batch processing)
const aiRules = activeRules.filter((r) => r.ruleType === 'AI_SCREENING')
const nonAiRules = activeRules.filter((r) => r.ruleType !== 'AI_SCREENING')
// Pre-compute AI screening results if needed
const aiResults = new Map<
string,
Map<
string,
{
meetsCriteria: boolean
confidence: number
reasoning: string
qualityScore: number
spamRisk: boolean
}
>
>()
for (const aiRule of aiRules) {
const config = aiRule.configJson as unknown as AIScreeningConfig
const screeningResults = await executeAIScreening(config, projects)
aiResults.set(aiRule.id, screeningResults)
}
// Evaluate each project
const results: ProjectFilteringResult[] = []
for (const project of projects) {
const ruleResults: RuleResult[] = []
let hasFailed = false
let hasFlagged = false
// Evaluate non-AI rules
for (const rule of nonAiRules) {
let result: { passed: boolean; action: 'PASS' | 'REJECT' | 'FLAG' }
if (rule.ruleType === 'FIELD_BASED') {
const config = rule.configJson as unknown as FieldRuleConfig
result = evaluateFieldRule(config, project)
} else if (rule.ruleType === 'DOCUMENT_CHECK') {
const config = rule.configJson as unknown as DocumentCheckConfig
result = evaluateDocumentRule(config, project)
} else {
continue
}
ruleResults.push({
ruleId: rule.id,
ruleName: rule.name,
ruleType: rule.ruleType,
passed: result.passed,
action: result.action,
})
if (!result.passed) {
if (result.action === 'REJECT') hasFailed = true
if (result.action === 'FLAG') hasFlagged = true
}
}
// Evaluate AI rules
for (const aiRule of aiRules) {
const ruleScreening = aiResults.get(aiRule.id)
const screening = ruleScreening?.get(project.id)
if (screening) {
const passed = screening.meetsCriteria && !screening.spamRisk
ruleResults.push({
ruleId: aiRule.id,
ruleName: aiRule.name,
ruleType: 'AI_SCREENING',
passed,
action: 'FLAG',
reasoning: screening.reasoning,
})
if (!passed) hasFlagged = true
}
}
// Determine overall outcome
let outcome: 'PASSED' | 'FILTERED_OUT' | 'FLAGGED'
if (hasFailed) {
outcome = 'FILTERED_OUT'
} else if (hasFlagged) {
outcome = 'FLAGGED'
} else {
outcome = 'PASSED'
}
// Collect AI screening data
const aiScreeningData: Record<string, unknown> = {}
for (const aiRule of aiRules) {
const screening = aiResults.get(aiRule.id)?.get(project.id)
if (screening) {
aiScreeningData[aiRule.id] = screening
}
}
results.push({
projectId: project.id,
outcome,
ruleResults,
aiScreeningJson:
Object.keys(aiScreeningData).length > 0 ? aiScreeningData : undefined,
})
}
return results
}