mirror of
https://github.com/voson-wang/toon.git
synced 2026-01-29 23:34:10 +08:00
test: update retrieval accuracy benchmarks
This commit is contained in:
@@ -20,18 +20,69 @@ import { RATE_LIMIT_DELAY_MS } from './constants'
|
||||
* Models used for evaluation
|
||||
*/
|
||||
export const models: Record<string, LanguageModelV2> = {
|
||||
'gpt-4o-mini': openai('gpt-4o-mini'),
|
||||
'gpt-5-nano': openai('gpt-5-nano'),
|
||||
'claude-haiku-4-5': anthropic('claude-haiku-4-5-20251001'),
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate an answer using LLM-as-judge approach
|
||||
* More robust than string matching for LLM outputs
|
||||
* Evaluate a single question with a specific format and model
|
||||
*/
|
||||
export async function validateAnswer(
|
||||
actual: string,
|
||||
expected: string,
|
||||
question: string,
|
||||
export async function evaluateQuestion(
|
||||
question: Question,
|
||||
formatName: string,
|
||||
formattedData: string,
|
||||
model: LanguageModelV2,
|
||||
modelName: string,
|
||||
): Promise<EvaluationResult> {
|
||||
const prompt = `Given the following data in ${formatName} format:
|
||||
|
||||
\`\`\`
|
||||
${formattedData}
|
||||
\`\`\`
|
||||
|
||||
Question: ${question.prompt}
|
||||
|
||||
Provide only the direct answer, without any additional explanation or formatting.`
|
||||
|
||||
const startTime = performance.now()
|
||||
const { text, usage } = await generateText({
|
||||
model,
|
||||
prompt,
|
||||
temperature: model.modelId.startsWith('gpt-') ? undefined : 0,
|
||||
})
|
||||
|
||||
await setTimeout(RATE_LIMIT_DELAY_MS)
|
||||
|
||||
const latencyMs = performance.now() - startTime
|
||||
const correct = await validateAnswer({
|
||||
actual: text.trim(),
|
||||
expected: question.groundTruth,
|
||||
question: question.prompt,
|
||||
})
|
||||
|
||||
return {
|
||||
questionId: question.id,
|
||||
format: formatName,
|
||||
model: modelName,
|
||||
expected: question.groundTruth,
|
||||
actual: text.trim(),
|
||||
correct,
|
||||
inputTokens: usage.inputTokens,
|
||||
outputTokens: usage.outputTokens,
|
||||
latencyMs,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate an answer using LLM-as-judge approach
|
||||
*/
|
||||
async function validateAnswer(
|
||||
{
|
||||
actual,
|
||||
expected,
|
||||
question,
|
||||
}:
|
||||
{ actual: string, expected: string, question: string },
|
||||
): Promise<boolean> {
|
||||
const prompt = `You are validating answers to questions about structured data.
|
||||
|
||||
@@ -49,10 +100,9 @@ Respond with only "YES" or "NO".`
|
||||
|
||||
try {
|
||||
const { text } = await generateText({
|
||||
model: models['gpt-4o-mini']!,
|
||||
model: models['claude-haiku-4-5']!,
|
||||
prompt,
|
||||
temperature: 0,
|
||||
maxOutputTokens: 16,
|
||||
})
|
||||
|
||||
await setTimeout(RATE_LIMIT_DELAY_MS)
|
||||
@@ -65,69 +115,3 @@ Respond with only "YES" or "NO".`
|
||||
return actual.toLowerCase().trim() === expected.toLowerCase().trim()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Evaluate a single question with a specific format and model
|
||||
*/
|
||||
export async function evaluateQuestion(
|
||||
question: Question,
|
||||
formatName: string,
|
||||
formattedData: string,
|
||||
model: any,
|
||||
modelName: string,
|
||||
): Promise<EvaluationResult> {
|
||||
const prompt = `Given the following data in ${formatName} format:
|
||||
|
||||
\`\`\`
|
||||
${formattedData}
|
||||
\`\`\`
|
||||
|
||||
Question: ${question.prompt}
|
||||
|
||||
Provide only the direct answer, without any additional explanation or formatting.`
|
||||
|
||||
const startTime = Date.now()
|
||||
|
||||
try {
|
||||
const { text, usage } = await generateText({
|
||||
model,
|
||||
prompt,
|
||||
temperature: 0,
|
||||
maxOutputTokens: 50,
|
||||
})
|
||||
|
||||
await setTimeout(RATE_LIMIT_DELAY_MS)
|
||||
|
||||
const latencyMs = Date.now() - startTime
|
||||
const correct = await validateAnswer(text.trim(), question.groundTruth, question.prompt)
|
||||
|
||||
return {
|
||||
questionId: question.id,
|
||||
format: formatName,
|
||||
model: modelName,
|
||||
expected: question.groundTruth,
|
||||
actual: text.trim(),
|
||||
correct,
|
||||
inputTokens: usage.inputTokens ?? 0,
|
||||
outputTokens: usage.outputTokens ?? 0,
|
||||
latencyMs,
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
consola.error(`Error evaluating ${question.id} with ${formatName}/${modelName}:`, error)
|
||||
|
||||
await setTimeout(RATE_LIMIT_DELAY_MS)
|
||||
|
||||
return {
|
||||
questionId: question.id,
|
||||
format: formatName,
|
||||
model: modelName,
|
||||
expected: question.groundTruth,
|
||||
actual: '',
|
||||
correct: false,
|
||||
inputTokens: 0,
|
||||
outputTokens: 0,
|
||||
latencyMs: Date.now() - startTime,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,15 +37,13 @@ export function calculateFormatResults(
|
||||
.filter(([key]) => key.startsWith(`${formatName}-`))
|
||||
.reduce((sum, [, tokens]) => sum + tokens, 0) / datasets.length
|
||||
|
||||
const avgInputTokens = formatResults.reduce((sum, r) => sum + r.inputTokens, 0) / totalCount
|
||||
const avgLatency = formatResults.reduce((sum, r) => sum + r.latencyMs, 0) / totalCount
|
||||
const averageLatency = formatResults.reduce((sum, r) => sum + r.latencyMs, 0) / totalCount
|
||||
|
||||
return {
|
||||
format: formatName,
|
||||
accuracy,
|
||||
totalTokens: Math.round(avgTokens),
|
||||
avgInputTokens: Math.round(avgInputTokens),
|
||||
avgLatency: Math.round(avgLatency),
|
||||
averageLatency: Math.round(averageLatency),
|
||||
correctCount,
|
||||
totalCount,
|
||||
}
|
||||
@@ -69,11 +67,13 @@ export function generateMarkdownReport(
|
||||
const toon = formatResults.find(r => r.format === 'toon')
|
||||
const json = formatResults.find(r => r.format === 'json')
|
||||
|
||||
// Model-by-model breakdown (most interesting result)
|
||||
// Model-by-model breakdown with ASCII bars
|
||||
const modelCount = Object.keys(models).length
|
||||
lines.push(`Tested across **${modelCount} ${modelCount === 1 ? 'LLM' : 'LLMs'}** with data retrieval tasks:`, '', '```')
|
||||
|
||||
for (const modelName of Object.keys(models)) {
|
||||
const modelNames = Object.keys(models)
|
||||
for (let i = 0; i < modelNames.length; i++) {
|
||||
const modelName = modelNames[i]!
|
||||
const modelResults = formatResults.map((fr) => {
|
||||
const modelFormatResults = results.filter(r => r.model === modelName && r.format === fr.format)
|
||||
const correctCount = modelFormatResults.filter(r => r.correct).length
|
||||
@@ -88,10 +88,16 @@ export function generateMarkdownReport(
|
||||
}
|
||||
}).sort((a, b) => b.accuracy - a.accuracy)
|
||||
|
||||
const bestResult = modelResults[0]!
|
||||
const bar = createTokenBar(bestResult.accuracy, 1, 20)
|
||||
|
||||
lines.push(`${modelName.padEnd(20)} ${bar} ${(bestResult.accuracy * 100).toFixed(1)}% accuracy`)
|
||||
// Add blank line before model name, except for first model
|
||||
if (i > 0)
|
||||
lines.push('')
|
||||
lines.push(modelName)
|
||||
for (const result of modelResults) {
|
||||
const bar = createProgressBar(result.accuracy, 1, 20)
|
||||
const accuracyStr = `${(result.accuracy * 100).toFixed(1)}%`.padStart(6)
|
||||
const countStr = `(${result.correctCount}/${result.totalCount})`
|
||||
lines.push(` ${result.format.padEnd(12)} ${bar} ${accuracyStr} ${countStr}`)
|
||||
}
|
||||
}
|
||||
|
||||
lines.push('```', '')
|
||||
@@ -100,24 +106,12 @@ export function generateMarkdownReport(
|
||||
if (toon && json) {
|
||||
const tokenSavings = ((1 - toon.totalTokens / json.totalTokens) * 100).toFixed(1)
|
||||
lines.push(
|
||||
`**TOON achieves ${(toon.accuracy * 100).toFixed(1)}% accuracy (vs JSON's ${(json.accuracy * 100).toFixed(1)}%) while using ${tokenSavings}% fewer tokens.**`,
|
||||
`**Tradeoff:** TOON achieves ${(toon.accuracy * 100).toFixed(1)}% accuracy (vs JSON's ${(json.accuracy * 100).toFixed(1)}%) while using ${tokenSavings}% fewer tokens.`,
|
||||
'',
|
||||
)
|
||||
}
|
||||
|
||||
// Simple format comparison table
|
||||
lines.push(
|
||||
'| Format | Accuracy | Average Tokens |',
|
||||
'| ------ | -------- | -------------- |',
|
||||
)
|
||||
|
||||
for (const result of formatResults) {
|
||||
lines.push(
|
||||
`| \`${result.format}\` | ${(result.accuracy * 100).toFixed(1)}% | ${result.totalTokens.toLocaleString()} |`,
|
||||
)
|
||||
}
|
||||
|
||||
lines.push('', '<details>', '<summary><strong>View detailed breakdown by dataset and model</strong></summary>', '', '#### Performance by Dataset', '')
|
||||
lines.push('<details>', '<summary><strong>View detailed breakdown by dataset and model</strong></summary>', '', '#### Performance by Dataset', '')
|
||||
|
||||
for (const dataset of datasets) {
|
||||
lines.push(`##### ${dataset.description}`, '')
|
||||
@@ -173,7 +167,7 @@ export function generateMarkdownReport(
|
||||
}
|
||||
|
||||
// Model breakdown
|
||||
lines.push('', '#### Performance by Model', '')
|
||||
lines.push('#### Performance by Model', '')
|
||||
|
||||
for (const modelName of Object.keys(models)) {
|
||||
lines.push(`##### ${modelName}`, '')
|
||||
@@ -203,7 +197,6 @@ export function generateMarkdownReport(
|
||||
|
||||
// Methodology
|
||||
lines.push(
|
||||
'',
|
||||
'#### Methodology',
|
||||
'',
|
||||
'- **Semantic validation**: LLM-as-judge validates responses semantically (not exact string matching).',
|
||||
@@ -252,20 +245,20 @@ export async function saveResults(
|
||||
// Save raw results
|
||||
await fsp.writeFile(
|
||||
path.join(resultsDir, 'raw-results.json'),
|
||||
JSON.stringify(results, undefined, 2),
|
||||
`${JSON.stringify(results, undefined, 2)}\n`,
|
||||
)
|
||||
|
||||
// Save summary
|
||||
await fsp.writeFile(
|
||||
path.join(resultsDir, 'summary.json'),
|
||||
JSON.stringify({
|
||||
`${JSON.stringify({
|
||||
formatResults,
|
||||
questions: questions.length,
|
||||
models: Object.keys(models),
|
||||
datasets: datasets.map(d => ({ name: d.name, description: d.description })),
|
||||
tokenCounts,
|
||||
timestamp: new Date().toISOString(),
|
||||
}, undefined, 2),
|
||||
}, undefined, 2)}\n`,
|
||||
)
|
||||
|
||||
// Generate markdown report
|
||||
@@ -279,9 +272,9 @@ export async function saveResults(
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate visual bar chart for token counts
|
||||
* Generate visual progress bar using ASCII characters (█ for filled, ░ for empty)
|
||||
*/
|
||||
function createTokenBar(tokens: number, maxTokens: number, width = 30): string {
|
||||
function createProgressBar(tokens: number, maxTokens: number, width = 30): string {
|
||||
const filled = Math.round((tokens / maxTokens) * width)
|
||||
const empty = width - filled
|
||||
return '█'.repeat(filled) + '░'.repeat(empty)
|
||||
|
||||
@@ -19,8 +19,8 @@ export interface EvaluationResult {
|
||||
expected: string
|
||||
actual: string
|
||||
correct: boolean
|
||||
inputTokens: number
|
||||
outputTokens: number
|
||||
inputTokens?: number
|
||||
outputTokens?: number
|
||||
latencyMs: number
|
||||
}
|
||||
|
||||
@@ -28,8 +28,7 @@ export interface FormatResult {
|
||||
format: string
|
||||
accuracy: number
|
||||
totalTokens: number
|
||||
avgInputTokens: number
|
||||
avgLatency: number
|
||||
averageLatency: number
|
||||
correctCount: number
|
||||
totalCount: number
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user