Observer dashboard extraction, PDF reports, jury UX overhaul, and miscellaneous improvements
- Extract observer dashboard to client component, add PDF export button - Add PDF report generator with jsPDF for analytics reports - Overhaul jury evaluation page with improved layout and UX - Add new analytics endpoints for observer/admin reports - Improve round creation/edit forms with better settings - Fix filtering rules page, CSV export dialog, notification bell - Update auth, prisma schema, and various type fixes Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -634,4 +634,157 @@ export const analyticsRouter = router({
|
||||
|
||||
return stats
|
||||
}),
|
||||
|
||||
/**
|
||||
* Get dashboard stats (optionally scoped to a round)
|
||||
*/
|
||||
getDashboardStats: observerProcedure
|
||||
.input(z.object({ roundId: z.string().optional() }).optional())
|
||||
.query(async ({ ctx, input }) => {
|
||||
const roundId = input?.roundId
|
||||
|
||||
const roundWhere = roundId ? { roundId } : {}
|
||||
const assignmentWhere = roundId ? { roundId } : {}
|
||||
const evalWhere = roundId
|
||||
? { assignment: { roundId }, status: 'SUBMITTED' as const }
|
||||
: { status: 'SUBMITTED' as const }
|
||||
|
||||
const [
|
||||
programCount,
|
||||
activeRoundCount,
|
||||
projectCount,
|
||||
jurorCount,
|
||||
submittedEvaluations,
|
||||
totalAssignments,
|
||||
evaluationScores,
|
||||
] = await Promise.all([
|
||||
ctx.prisma.program.count(),
|
||||
ctx.prisma.round.count({ where: { status: 'ACTIVE' } }),
|
||||
ctx.prisma.project.count({ where: roundWhere }),
|
||||
ctx.prisma.user.count({ where: { role: 'JURY_MEMBER', status: 'ACTIVE' } }),
|
||||
ctx.prisma.evaluation.count({ where: evalWhere }),
|
||||
ctx.prisma.assignment.count({ where: assignmentWhere }),
|
||||
ctx.prisma.evaluation.findMany({
|
||||
where: { ...evalWhere, globalScore: { not: null } },
|
||||
select: { globalScore: true },
|
||||
}),
|
||||
])
|
||||
|
||||
const completionRate = totalAssignments > 0
|
||||
? Math.round((submittedEvaluations / totalAssignments) * 100)
|
||||
: 0
|
||||
|
||||
const scores = evaluationScores.map((e) => e.globalScore!).filter((s) => s != null)
|
||||
const scoreDistribution = [
|
||||
{ label: '9-10', min: 9, max: 10 },
|
||||
{ label: '7-8', min: 7, max: 8.99 },
|
||||
{ label: '5-6', min: 5, max: 6.99 },
|
||||
{ label: '3-4', min: 3, max: 4.99 },
|
||||
{ label: '1-2', min: 1, max: 2.99 },
|
||||
].map((b) => ({
|
||||
label: b.label,
|
||||
count: scores.filter((s) => s >= b.min && s <= b.max).length,
|
||||
}))
|
||||
|
||||
return {
|
||||
programCount,
|
||||
activeRoundCount,
|
||||
projectCount,
|
||||
jurorCount,
|
||||
submittedEvaluations,
|
||||
totalEvaluations: totalAssignments,
|
||||
completionRate,
|
||||
scoreDistribution,
|
||||
}
|
||||
}),
|
||||
|
||||
/**
|
||||
* Get all projects with pagination, filtering, and search (for observer dashboard)
|
||||
*/
|
||||
getAllProjects: observerProcedure
|
||||
.input(
|
||||
z.object({
|
||||
roundId: z.string().optional(),
|
||||
search: z.string().optional(),
|
||||
status: z.string().optional(),
|
||||
page: z.number().min(1).default(1),
|
||||
perPage: z.number().min(1).max(100).default(20),
|
||||
})
|
||||
)
|
||||
.query(async ({ ctx, input }) => {
|
||||
const where: Record<string, unknown> = {}
|
||||
|
||||
if (input.roundId) {
|
||||
where.roundId = input.roundId
|
||||
}
|
||||
|
||||
if (input.status) {
|
||||
where.status = input.status
|
||||
}
|
||||
|
||||
if (input.search) {
|
||||
where.OR = [
|
||||
{ title: { contains: input.search, mode: 'insensitive' } },
|
||||
{ teamName: { contains: input.search, mode: 'insensitive' } },
|
||||
]
|
||||
}
|
||||
|
||||
const [projects, total] = await Promise.all([
|
||||
ctx.prisma.project.findMany({
|
||||
where,
|
||||
select: {
|
||||
id: true,
|
||||
title: true,
|
||||
teamName: true,
|
||||
status: true,
|
||||
country: true,
|
||||
round: { select: { id: true, name: true } },
|
||||
assignments: {
|
||||
select: {
|
||||
evaluation: {
|
||||
select: { globalScore: true, status: true },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
orderBy: { title: 'asc' },
|
||||
skip: (input.page - 1) * input.perPage,
|
||||
take: input.perPage,
|
||||
}),
|
||||
ctx.prisma.project.count({ where }),
|
||||
])
|
||||
|
||||
const mapped = projects.map((p) => {
|
||||
const submitted = p.assignments
|
||||
.map((a) => a.evaluation)
|
||||
.filter((e) => e?.status === 'SUBMITTED')
|
||||
const scores = submitted
|
||||
.map((e) => e?.globalScore)
|
||||
.filter((s): s is number => s !== null)
|
||||
const averageScore =
|
||||
scores.length > 0
|
||||
? scores.reduce((a, b) => a + b, 0) / scores.length
|
||||
: null
|
||||
|
||||
return {
|
||||
id: p.id,
|
||||
title: p.title,
|
||||
teamName: p.teamName,
|
||||
status: p.status,
|
||||
country: p.country,
|
||||
roundId: p.round?.id ?? '',
|
||||
roundName: p.round?.name ?? '',
|
||||
averageScore,
|
||||
evaluationCount: submitted.length,
|
||||
}
|
||||
})
|
||||
|
||||
return {
|
||||
projects: mapped,
|
||||
total,
|
||||
page: input.page,
|
||||
perPage: input.perPage,
|
||||
totalPages: Math.ceil(total / input.perPage),
|
||||
}
|
||||
}),
|
||||
})
|
||||
|
||||
@@ -826,7 +826,7 @@ export const applicantRouter = router({
|
||||
email: input.email,
|
||||
name: input.name,
|
||||
role: 'APPLICANT',
|
||||
status: 'INVITED',
|
||||
status: 'NONE',
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -482,7 +482,7 @@ export const applicationRouter = router({
|
||||
email: member.email,
|
||||
name: member.name,
|
||||
role: 'APPLICANT',
|
||||
status: 'INVITED',
|
||||
status: 'NONE',
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -94,6 +94,23 @@ export const notificationRouter = router({
|
||||
return { success: true }
|
||||
}),
|
||||
|
||||
/**
|
||||
* Mark multiple notifications as read by IDs
|
||||
*/
|
||||
markBatchAsRead: protectedProcedure
|
||||
.input(z.object({ ids: z.array(z.string()).min(1).max(50) }))
|
||||
.mutation(async ({ ctx, input }) => {
|
||||
await ctx.prisma.inAppNotification.updateMany({
|
||||
where: {
|
||||
id: { in: input.ids },
|
||||
userId: ctx.user.id,
|
||||
isRead: false,
|
||||
},
|
||||
data: { isRead: true, readAt: new Date() },
|
||||
})
|
||||
return { success: true }
|
||||
}),
|
||||
|
||||
/**
|
||||
* Mark all notifications as read for the current user
|
||||
*/
|
||||
|
||||
@@ -71,7 +71,7 @@ export type DocumentCheckConfig = {
|
||||
|
||||
export type AIScreeningConfig = {
|
||||
criteriaText: string
|
||||
action: 'FLAG' // AI screening always flags for human review
|
||||
action: 'PASS' | 'REJECT' | 'FLAG' // REJECT = auto-filter-out, FLAG = flag for human review
|
||||
// Performance settings
|
||||
batchSize?: number // Projects per API call (1-50, default 20)
|
||||
parallelBatches?: number // Concurrent API calls (1-10, default 1)
|
||||
@@ -607,16 +607,21 @@ export async function executeFilteringRules(
|
||||
|
||||
if (screening) {
|
||||
const passed = screening.meetsCriteria && !screening.spamRisk
|
||||
const aiConfig = aiRule.configJson as unknown as AIScreeningConfig
|
||||
const aiAction = aiConfig?.action || 'FLAG'
|
||||
ruleResults.push({
|
||||
ruleId: aiRule.id,
|
||||
ruleName: aiRule.name,
|
||||
ruleType: 'AI_SCREENING',
|
||||
passed,
|
||||
action: 'FLAG',
|
||||
action: aiAction,
|
||||
reasoning: screening.reasoning,
|
||||
})
|
||||
|
||||
if (!passed) hasFlagged = true
|
||||
if (!passed) {
|
||||
if (aiAction === 'REJECT') hasFailed = true
|
||||
else hasFlagged = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user