Add LiteLLM proxy support for ChatGPT subscription AI access
All checks were successful
Build and Push Docker Image / build (push) Successful in 8m22s
All checks were successful
Build and Push Docker Image / build (push) Successful in 8m22s
- Add ai_provider setting: 'openai' (API key) or 'litellm' (ChatGPT subscription proxy) - Auto-strip max_tokens/max_completion_tokens for chatgpt/ prefix models (ChatGPT subscription backend rejects token limit fields) - LiteLLM mode: dummy API key when none configured, base URL required - isOpenAIConfigured() checks base URL instead of API key for LiteLLM - listAvailableModels() returns manualEntry flag for LiteLLM (no models.list) - Settings UI: conditional fields, info banner, manual model input with chatgpt/ prefix examples when LiteLLM selected - All 7 AI services work transparently via buildCompletionParams() Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -8,6 +8,33 @@ const globalForOpenAI = globalThis as unknown as {
|
||||
openaiInitialized: boolean
|
||||
}
|
||||
|
||||
// ─── Provider Detection ─────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Get the configured AI provider from SystemSettings.
|
||||
* Returns 'openai' (default) or 'litellm' (ChatGPT subscription proxy).
|
||||
*/
|
||||
export async function getConfiguredProvider(): Promise<'openai' | 'litellm'> {
|
||||
try {
|
||||
const setting = await prisma.systemSettings.findUnique({
|
||||
where: { key: 'ai_provider' },
|
||||
})
|
||||
const value = setting?.value || 'openai'
|
||||
return value === 'litellm' ? 'litellm' : 'openai'
|
||||
} catch {
|
||||
return 'openai'
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a model ID indicates LiteLLM ChatGPT subscription routing.
|
||||
* Models like 'chatgpt/gpt-5.2' use the chatgpt/ prefix.
|
||||
* Used by buildCompletionParams (sync) to strip unsupported token limit fields.
|
||||
*/
|
||||
export function isLiteLLMChatGPTModel(model: string): boolean {
|
||||
return model.toLowerCase().startsWith('chatgpt/')
|
||||
}
|
||||
|
||||
// ─── Model Type Detection ────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
@@ -168,6 +195,12 @@ export function buildCompletionParams(
|
||||
params.response_format = { type: 'json_object' }
|
||||
}
|
||||
|
||||
// LiteLLM ChatGPT subscription models reject token limit fields
|
||||
if (isLiteLLMChatGPTModel(model)) {
|
||||
delete params.max_tokens
|
||||
delete params.max_completion_tokens
|
||||
}
|
||||
|
||||
return params
|
||||
}
|
||||
|
||||
@@ -209,8 +242,12 @@ async function getBaseURL(): Promise<string | undefined> {
|
||||
*/
|
||||
async function createOpenAIClient(): Promise<OpenAI | null> {
|
||||
const apiKey = await getOpenAIApiKey()
|
||||
const provider = await getConfiguredProvider()
|
||||
|
||||
if (!apiKey) {
|
||||
// LiteLLM proxy may not require a real API key
|
||||
const effectiveApiKey = apiKey || (provider === 'litellm' ? 'sk-litellm' : null)
|
||||
|
||||
if (!effectiveApiKey) {
|
||||
console.warn('OpenAI API key not configured')
|
||||
return null
|
||||
}
|
||||
@@ -218,11 +255,11 @@ async function createOpenAIClient(): Promise<OpenAI | null> {
|
||||
const baseURL = await getBaseURL()
|
||||
|
||||
if (baseURL) {
|
||||
console.log(`[OpenAI] Using custom base URL: ${baseURL}`)
|
||||
console.log(`[OpenAI] Using custom base URL: ${baseURL} (provider: ${provider})`)
|
||||
}
|
||||
|
||||
return new OpenAI({
|
||||
apiKey,
|
||||
apiKey: effectiveApiKey,
|
||||
...(baseURL ? { baseURL } : {}),
|
||||
})
|
||||
}
|
||||
@@ -259,6 +296,12 @@ export function resetOpenAIClient(): void {
|
||||
* Check if OpenAI is configured and available
|
||||
*/
|
||||
export async function isOpenAIConfigured(): Promise<boolean> {
|
||||
const provider = await getConfiguredProvider()
|
||||
if (provider === 'litellm') {
|
||||
// LiteLLM just needs a base URL configured
|
||||
const baseURL = await getBaseURL()
|
||||
return !!baseURL
|
||||
}
|
||||
const apiKey = await getOpenAIApiKey()
|
||||
return !!apiKey
|
||||
}
|
||||
@@ -270,8 +313,20 @@ export async function listAvailableModels(): Promise<{
|
||||
success: boolean
|
||||
models?: string[]
|
||||
error?: string
|
||||
manualEntry?: boolean
|
||||
}> {
|
||||
try {
|
||||
const provider = await getConfiguredProvider()
|
||||
|
||||
// LiteLLM proxy for ChatGPT subscription doesn't support models.list()
|
||||
if (provider === 'litellm') {
|
||||
return {
|
||||
success: true,
|
||||
models: [],
|
||||
manualEntry: true,
|
||||
}
|
||||
}
|
||||
|
||||
const client = await getOpenAI()
|
||||
|
||||
if (!client) {
|
||||
|
||||
Reference in New Issue
Block a user