Files
MOPC-Portal/src/lib/openai.ts

220 lines
5.2 KiB
TypeScript
Raw Normal View History

import OpenAI from 'openai'
import { prisma } from './prisma'
// OpenAI client singleton with lazy initialization
const globalForOpenAI = globalThis as unknown as {
openai: OpenAI | undefined
openaiInitialized: boolean
}
/**
* Get OpenAI API key from SystemSettings
*/
async function getOpenAIApiKey(): Promise<string | null> {
try {
const setting = await prisma.systemSettings.findUnique({
where: { key: 'openai_api_key' },
})
return setting?.value || process.env.OPENAI_API_KEY || null
} catch {
// Fall back to env var if database isn't available
return process.env.OPENAI_API_KEY || null
}
}
/**
* Create OpenAI client instance
*/
async function createOpenAIClient(): Promise<OpenAI | null> {
const apiKey = await getOpenAIApiKey()
if (!apiKey) {
console.warn('OpenAI API key not configured')
return null
}
return new OpenAI({
apiKey,
})
}
/**
* Get the OpenAI client singleton
* Returns null if API key is not configured
*/
export async function getOpenAI(): Promise<OpenAI | null> {
if (globalForOpenAI.openaiInitialized) {
return globalForOpenAI.openai || null
}
const client = await createOpenAIClient()
if (process.env.NODE_ENV !== 'production') {
globalForOpenAI.openai = client || undefined
globalForOpenAI.openaiInitialized = true
}
return client
}
/**
* Check if OpenAI is configured and available
*/
export async function isOpenAIConfigured(): Promise<boolean> {
const apiKey = await getOpenAIApiKey()
return !!apiKey
}
/**
* List available models from OpenAI API
*/
export async function listAvailableModels(): Promise<{
success: boolean
models?: string[]
error?: string
}> {
try {
const client = await getOpenAI()
if (!client) {
return {
success: false,
error: 'OpenAI API key not configured',
}
}
const response = await client.models.list()
const chatModels = response.data
.filter((m) => m.id.includes('gpt') || m.id.includes('o1') || m.id.includes('o3') || m.id.includes('o4'))
.map((m) => m.id)
.sort()
return {
success: true,
models: chatModels,
}
} catch (error) {
return {
success: false,
error: error instanceof Error ? error.message : 'Unknown error',
}
}
}
/**
* Validate that a specific model is available
*/
export async function validateModel(modelId: string): Promise<{
valid: boolean
error?: string
}> {
try {
const client = await getOpenAI()
if (!client) {
return {
valid: false,
error: 'OpenAI API key not configured',
}
}
// Try a minimal completion with the model
await client.chat.completions.create({
model: modelId,
messages: [{ role: 'user', content: 'test' }],
max_tokens: 1,
})
return { valid: true }
} catch (error) {
const message = error instanceof Error ? error.message : 'Unknown error'
// Check for specific model errors
if (message.includes('does not exist') || message.includes('model_not_found')) {
return {
valid: false,
error: `Model "${modelId}" is not available with your API key`,
}
}
return {
valid: false,
error: message,
}
}
}
/**
* Test OpenAI connection with the configured model
*/
export async function testOpenAIConnection(): Promise<{
success: boolean
error?: string
model?: string
modelTested?: string
}> {
try {
const client = await getOpenAI()
if (!client) {
return {
success: false,
error: 'OpenAI API key not configured',
}
}
// Get the configured model
const configuredModel = await getConfiguredModel()
// Test with the configured model
const response = await client.chat.completions.create({
model: configuredModel,
messages: [{ role: 'user', content: 'Hello' }],
max_tokens: 5,
})
return {
success: true,
model: response.model,
modelTested: configuredModel,
}
} catch (error) {
const message = error instanceof Error ? error.message : 'Unknown error'
const configuredModel = await getConfiguredModel()
// Check for model-specific errors
if (message.includes('does not exist') || message.includes('model_not_found')) {
return {
success: false,
error: `Model "${configuredModel}" is not available. Check Settings → AI to select a valid model.`,
modelTested: configuredModel,
}
}
return {
success: false,
error: message,
modelTested: configuredModel,
}
}
}
// Default models for different use cases
export const AI_MODELS = {
ASSIGNMENT: 'gpt-4o', // Best for complex reasoning
QUICK: 'gpt-4o-mini', // Faster, cheaper for simple tasks
} as const
/**
* Get the admin-configured AI model from SystemSettings.
* Falls back to the provided default if not configured.
*/
export async function getConfiguredModel(fallback: string = AI_MODELS.ASSIGNMENT): Promise<string> {
try {
const setting = await prisma.systemSettings.findUnique({
where: { key: 'ai_model' },
})
return setting?.value || process.env.OPENAI_MODEL || fallback
} catch {
return process.env.OPENAI_MODEL || fallback
}
}