Improve AI filtering error handling and visibility

- Add listAvailableModels() and validateModel() to openai.ts
- Improve testOpenAIConnection() to test configured model
- Add checkAIStatus endpoint to filtering router
- Add pre-execution AI config check in executeRules
- Improve error messages in AI filtering service (rate limit, quota, etc.)
- Add AI status warning banner on round detail page for filtering rounds

Now admins get clear errors when AI is misconfigured instead of silent flags.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-02-03 10:46:38 +01:00
parent d45eccea47
commit d068d9b6f6
4 changed files with 214 additions and 8 deletions

View File

@@ -66,12 +66,12 @@ export async function isOpenAIConfigured(): Promise<boolean> {
}
/**
* Test OpenAI connection
* List available models from OpenAI API
*/
export async function testOpenAIConnection(): Promise<{
export async function listAvailableModels(): Promise<{
success: boolean
models?: string[]
error?: string
model?: string
}> {
try {
const client = await getOpenAI()
@@ -83,9 +83,90 @@ export async function testOpenAIConnection(): Promise<{
}
}
// Simple test request
const response = await client.models.list()
const chatModels = response.data
.filter((m) => m.id.includes('gpt') || m.id.includes('o1') || m.id.includes('o3') || m.id.includes('o4'))
.map((m) => m.id)
.sort()
return {
success: true,
models: chatModels,
}
} catch (error) {
return {
success: false,
error: error instanceof Error ? error.message : 'Unknown error',
}
}
}
/**
* Validate that a specific model is available
*/
export async function validateModel(modelId: string): Promise<{
valid: boolean
error?: string
}> {
try {
const client = await getOpenAI()
if (!client) {
return {
valid: false,
error: 'OpenAI API key not configured',
}
}
// Try a minimal completion with the model
await client.chat.completions.create({
model: modelId,
messages: [{ role: 'user', content: 'test' }],
max_tokens: 1,
})
return { valid: true }
} catch (error) {
const message = error instanceof Error ? error.message : 'Unknown error'
// Check for specific model errors
if (message.includes('does not exist') || message.includes('model_not_found')) {
return {
valid: false,
error: `Model "${modelId}" is not available with your API key`,
}
}
return {
valid: false,
error: message,
}
}
}
/**
* Test OpenAI connection with the configured model
*/
export async function testOpenAIConnection(): Promise<{
success: boolean
error?: string
model?: string
modelTested?: string
}> {
try {
const client = await getOpenAI()
if (!client) {
return {
success: false,
error: 'OpenAI API key not configured',
}
}
// Get the configured model
const configuredModel = await getConfiguredModel()
// Test with the configured model
const response = await client.chat.completions.create({
model: 'gpt-4o-mini',
model: configuredModel,
messages: [{ role: 'user', content: 'Hello' }],
max_tokens: 5,
})
@@ -93,11 +174,25 @@ export async function testOpenAIConnection(): Promise<{
return {
success: true,
model: response.model,
modelTested: configuredModel,
}
} catch (error) {
const message = error instanceof Error ? error.message : 'Unknown error'
const configuredModel = await getConfiguredModel()
// Check for model-specific errors
if (message.includes('does not exist') || message.includes('model_not_found')) {
return {
success: false,
error: `Model "${configuredModel}" is not available. Check Settings → AI to select a valid model.`,
modelTested: configuredModel,
}
}
return {
success: false,
error: error instanceof Error ? error.message : 'Unknown error',
error: message,
modelTested: configuredModel,
}
}
}