Add multiple admin improvements and bug fixes
- Email settings: Add separate sender display name field - Rounds page: Drag-and-drop reordering with visible order numbers - Round creation: Auto-assign projects to filtering rounds, auto-activate if voting started - Round detail: Fix incorrect "voting period ended" message for draft rounds - Projects page: Add delete option with confirmation dialog - AI filtering: Add configurable batch size and parallel request settings - Filtering results: Fix duplicate criteria display - Add seed scripts for notification settings and MOPC onboarding form Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -72,6 +72,9 @@ export type DocumentCheckConfig = {
|
||||
export type AIScreeningConfig = {
|
||||
criteriaText: string
|
||||
action: 'FLAG' // AI screening always flags for human review
|
||||
// Performance settings
|
||||
batchSize?: number // Projects per API call (1-50, default 20)
|
||||
parallelBatches?: number // Concurrent API calls (1-10, default 1)
|
||||
}
|
||||
|
||||
export type RuleConfig = FieldRuleConfig | DocumentCheckConfig | AIScreeningConfig
|
||||
@@ -124,7 +127,11 @@ interface FilteringRuleInput {
|
||||
|
||||
// ─── Constants ───────────────────────────────────────────────────────────────
|
||||
|
||||
const BATCH_SIZE = 20
|
||||
const DEFAULT_BATCH_SIZE = 20
|
||||
const MAX_BATCH_SIZE = 50
|
||||
const MIN_BATCH_SIZE = 1
|
||||
const DEFAULT_PARALLEL_BATCHES = 1
|
||||
const MAX_PARALLEL_BATCHES = 10
|
||||
|
||||
// Optimized system prompt (compressed for token efficiency)
|
||||
const AI_SCREENING_SYSTEM_PROMPT = `Project screening assistant. Evaluate against criteria, return JSON.
|
||||
@@ -441,7 +448,18 @@ export async function executeAIScreening(
|
||||
}
|
||||
|
||||
const model = await getConfiguredModel()
|
||||
console.log(`[AI Filtering] Using model: ${model} for ${projects.length} projects`)
|
||||
|
||||
// Get batch settings from config
|
||||
const batchSize = Math.min(
|
||||
MAX_BATCH_SIZE,
|
||||
Math.max(MIN_BATCH_SIZE, config.batchSize ?? DEFAULT_BATCH_SIZE)
|
||||
)
|
||||
const parallelBatches = Math.min(
|
||||
MAX_PARALLEL_BATCHES,
|
||||
Math.max(1, config.parallelBatches ?? DEFAULT_PARALLEL_BATCHES)
|
||||
)
|
||||
|
||||
console.log(`[AI Filtering] Using model: ${model} for ${projects.length} projects (batch size: ${batchSize}, parallel: ${parallelBatches})`)
|
||||
|
||||
// Convert and anonymize projects
|
||||
const projectsWithRelations = projects.map(toProjectWithRelations)
|
||||
@@ -454,39 +472,56 @@ export async function executeAIScreening(
|
||||
}
|
||||
|
||||
let totalTokens = 0
|
||||
const totalBatches = Math.ceil(anonymized.length / BATCH_SIZE)
|
||||
const totalBatches = Math.ceil(anonymized.length / batchSize)
|
||||
let processedBatches = 0
|
||||
|
||||
// Process in batches
|
||||
for (let i = 0; i < anonymized.length; i += BATCH_SIZE) {
|
||||
const batchAnon = anonymized.slice(i, i + BATCH_SIZE)
|
||||
const batchMappings = mappings.slice(i, i + BATCH_SIZE)
|
||||
const currentBatch = Math.floor(i / BATCH_SIZE) + 1
|
||||
// Create batch chunks for parallel processing
|
||||
const batches: Array<{ anon: typeof anonymized; maps: typeof mappings; index: number }> = []
|
||||
for (let i = 0; i < anonymized.length; i += batchSize) {
|
||||
batches.push({
|
||||
anon: anonymized.slice(i, i + batchSize),
|
||||
maps: mappings.slice(i, i + batchSize),
|
||||
index: batches.length,
|
||||
})
|
||||
}
|
||||
|
||||
console.log(`[AI Filtering] Processing batch ${currentBatch}/${totalBatches}`)
|
||||
// Process batches in parallel chunks
|
||||
for (let i = 0; i < batches.length; i += parallelBatches) {
|
||||
const parallelChunk = batches.slice(i, i + parallelBatches)
|
||||
|
||||
const { results: batchResults, tokensUsed } = await processAIBatch(
|
||||
openai,
|
||||
model,
|
||||
config.criteriaText,
|
||||
batchAnon,
|
||||
batchMappings,
|
||||
userId,
|
||||
entityId
|
||||
)
|
||||
console.log(`[AI Filtering] Processing batches ${i + 1}-${Math.min(i + parallelBatches, batches.length)} of ${totalBatches} (${parallelChunk.length} in parallel)`)
|
||||
|
||||
totalTokens += tokensUsed
|
||||
// Run parallel batches concurrently
|
||||
const batchPromises = parallelChunk.map(async (batch) => {
|
||||
const { results: batchResults, tokensUsed } = await processAIBatch(
|
||||
openai,
|
||||
model,
|
||||
config.criteriaText,
|
||||
batch.anon,
|
||||
batch.maps,
|
||||
userId,
|
||||
entityId
|
||||
)
|
||||
return { batchResults, tokensUsed, index: batch.index }
|
||||
})
|
||||
|
||||
// Merge batch results
|
||||
for (const [id, result] of batchResults) {
|
||||
results.set(id, result)
|
||||
const parallelResults = await Promise.all(batchPromises)
|
||||
|
||||
// Merge results from all parallel batches
|
||||
for (const { batchResults, tokensUsed } of parallelResults) {
|
||||
totalTokens += tokensUsed
|
||||
for (const [id, result] of batchResults) {
|
||||
results.set(id, result)
|
||||
}
|
||||
processedBatches++
|
||||
}
|
||||
|
||||
// Report progress
|
||||
// Report progress after each parallel chunk
|
||||
if (onProgress) {
|
||||
await onProgress({
|
||||
currentBatch,
|
||||
currentBatch: processedBatches,
|
||||
totalBatches,
|
||||
processedCount: Math.min((currentBatch) * BATCH_SIZE, anonymized.length),
|
||||
processedCount: Math.min(processedBatches * batchSize, anonymized.length),
|
||||
tokensUsed: totalTokens,
|
||||
})
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user