Improve AI filtering error handling and visibility

- Add listAvailableModels() and validateModel() to openai.ts
- Improve testOpenAIConnection() to test configured model
- Add checkAIStatus endpoint to filtering router
- Add pre-execution AI config check in executeRules
- Improve error messages in AI filtering service (rate limit, quota, etc.)
- Add AI status warning banner on round detail page for filtering rounds

Now admins get clear errors when AI is misconfigured instead of silent flags.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-02-03 10:46:38 +01:00
parent d45eccea47
commit d068d9b6f6
4 changed files with 214 additions and 8 deletions

View File

@@ -4,8 +4,48 @@ import { Prisma } from '@prisma/client'
import { router, adminProcedure, protectedProcedure } from '../trpc'
import { executeFilteringRules } from '../services/ai-filtering'
import { logAudit } from '../utils/audit'
import { isOpenAIConfigured, testOpenAIConnection } from '@/lib/openai'
export const filteringRouter = router({
/**
* Check if AI is configured and ready for filtering
*/
checkAIStatus: protectedProcedure
.input(z.object({ roundId: z.string() }))
.query(async ({ ctx, input }) => {
// Check if round has AI rules
const aiRules = await ctx.prisma.filteringRule.count({
where: {
roundId: input.roundId,
ruleType: 'AI_SCREENING',
isActive: true,
},
})
if (aiRules === 0) {
return { hasAIRules: false, configured: true, error: null }
}
// Check if OpenAI is configured
const configured = await isOpenAIConfigured()
if (!configured) {
return {
hasAIRules: true,
configured: false,
error: 'OpenAI API key not configured',
}
}
// Test the connection
const testResult = await testOpenAIConnection()
return {
hasAIRules: true,
configured: testResult.success,
error: testResult.error || null,
model: testResult.modelTested,
}
}),
/**
* Get filtering rules for a round
*/
@@ -146,6 +186,30 @@ export const filteringRouter = router({
})
}
// Check if any AI_SCREENING rules exist
const hasAIRules = rules.some((r) => r.ruleType === 'AI_SCREENING' && r.isActive)
if (hasAIRules) {
// Verify OpenAI is configured before proceeding
const aiConfigured = await isOpenAIConfigured()
if (!aiConfigured) {
throw new TRPCError({
code: 'PRECONDITION_FAILED',
message:
'AI screening rules require OpenAI to be configured. Go to Settings → AI to configure your API key.',
})
}
// Also verify the model works
const testResult = await testOpenAIConnection()
if (!testResult.success) {
throw new TRPCError({
code: 'PRECONDITION_FAILED',
message: `AI configuration error: ${testResult.error}. Go to Settings → AI to fix.`,
})
}
}
// Get projects in this round
const roundProjectEntries = await ctx.prisma.roundProject.findMany({
where: { roundId: input.roundId },

View File

@@ -383,13 +383,40 @@ Return your evaluation as JSON.`
}
}
} catch (error) {
// OpenAI error — flag all for manual review
// OpenAI error — flag all for manual review with specific error info
console.error('[AI Filtering] OpenAI API error:', error)
// Extract meaningful error message
let errorType = 'unknown_error'
let errorDetail = 'Unknown error occurred'
if (error instanceof Error) {
const message = error.message.toLowerCase()
if (message.includes('rate_limit') || message.includes('rate limit')) {
errorType = 'rate_limit'
errorDetail = 'OpenAI rate limit exceeded. Try again in a few minutes.'
} else if (message.includes('model') && (message.includes('not found') || message.includes('does not exist'))) {
errorType = 'model_not_found'
errorDetail = 'The configured AI model is not available. Check Settings → AI.'
} else if (message.includes('insufficient_quota') || message.includes('quota')) {
errorType = 'quota_exceeded'
errorDetail = 'OpenAI API quota exceeded. Check your billing settings.'
} else if (message.includes('invalid_api_key') || message.includes('unauthorized')) {
errorType = 'invalid_api_key'
errorDetail = 'Invalid OpenAI API key. Check Settings → AI.'
} else if (message.includes('context_length') || message.includes('token')) {
errorType = 'context_length'
errorDetail = 'Request too large. Try with fewer projects or shorter descriptions.'
} else {
errorDetail = error.message
}
}
for (const p of projects) {
results.set(p.id, {
meetsCriteria: false,
confidence: 0,
reasoning: `AI screening error — flagged for manual review`,
reasoning: `AI screening error (${errorType}): ${errorDetail}`,
qualityScore: 5,
spamRisk: false,
})