test: add LLM retrieval accuracy tests

This commit is contained in:
Johann Schopplich
2025-10-27 11:48:33 +01:00
parent eb8f7e28e1
commit 3c840259fe
25 changed files with 21404 additions and 723 deletions

View File

@@ -0,0 +1,39 @@
import process from 'node:process'
import * as url from 'node:url'
export const ROOT_DIR: string = url.fileURLToPath(new URL('../../', import.meta.url))
export const BENCHMARKS_DIR: string = url.fileURLToPath(new URL('../', import.meta.url))
/**
* Benchmark execution configuration
*/
/**
* Enable dry run mode for quick testing with limited AI requests
*
* @remarks
* Set via environment variable: `DRY_RUN=true`
*/
export const DRY_RUN: boolean = process.env.DRY_RUN === 'true'
/**
* Limits applied when DRY_RUN is enabled
*/
export const DRY_RUN_LIMITS = {
/** Maximum number of questions to evaluate */
maxQuestions: 10,
/** Maximum number of formats to test */
maxFormats: undefined as number | undefined,
/** Models to use in dry run */
allowedModels: [] as string[],
}
/**
* Default concurrency for parallel evaluations
*/
export const DEFAULT_CONCURRENCY = 20
/**
* Delay between API requests to avoid rate limiting (in milliseconds)
*/
export const RATE_LIMIT_DELAY_MS = 100

146
benchmarks/src/datasets.ts Normal file
View File

@@ -0,0 +1,146 @@
/**
* Datasets for TOON benchmarks
*
* These datasets are designed to test TOON's strengths and weaknesses:
* - Tabular: Uniform records (TOON optimal)
* - Nested: Complex structures with nested objects
* - Analytics: Time-series data
*/
import type { Dataset } from './types'
import { faker } from '@faker-js/faker'
import githubRepos from '../data/github-repos.json' with { type: 'json' }
// Seed for reproducibility
faker.seed(12345)
/**
* Tabular dataset: 100 uniform employee records
*
* @remarks
* Tests TOON's tabular array format
*/
const departments = ['Engineering', 'Sales', 'Marketing', 'HR', 'Operations', 'Finance']
const tabularDataset: Dataset = {
name: 'tabular',
description: 'Uniform employee records (TOON optimal format)',
data: {
employees: Array.from({ length: 100 }, (_, i) => {
const yearsExp = faker.number.int({ min: 1, max: 20 })
return {
id: i + 1,
name: faker.person.fullName(),
email: faker.internet.email().toLowerCase(),
department: departments[i % departments.length]!,
salary: faker.number.int({ min: 45000, max: 150000 }),
yearsExperience: yearsExp,
active: faker.datatype.boolean(0.8), // 80% active
}
}),
},
}
/**
* Nested dataset: 50 e-commerce orders with nested structures
*
* @remarks
* Tests TOON's handling of complex nested objects
*/
const productNames = ['Wireless Mouse', 'USB Cable', 'Laptop Stand', 'Keyboard', 'Webcam', 'Headphones', 'Monitor', 'Desk Lamp']
const statuses = ['pending', 'processing', 'shipped', 'delivered', 'cancelled']
const nestedDataset: Dataset = {
name: 'nested',
description: 'E-commerce orders with nested structures',
data: {
orders: Array.from({ length: 50 }, (_, i) => {
const customerId = (i % 20) + 1
const itemCount = faker.number.int({ min: 1, max: 4 })
const items = Array.from({ length: itemCount }, (_, j) => {
const price = faker.number.float({ min: 9.99, max: 199.99, fractionDigits: 2 })
const quantity = faker.number.int({ min: 1, max: 5 })
return {
sku: `SKU-${faker.string.alphanumeric({ length: 6 }).toUpperCase()}`,
name: productNames[j % productNames.length]!,
quantity,
price,
}
})
const total = Number(items.reduce((sum, item) => sum + (item.price * item.quantity), 0).toFixed(2))
return {
orderId: `ORD-${String(i + 1).padStart(4, '0')}`,
customer: {
id: customerId,
name: faker.person.fullName(),
email: faker.internet.email().toLowerCase(),
},
items,
total,
status: statuses[i % statuses.length]!,
orderDate: faker.date.recent({ days: 90 }).toISOString().split('T')[0],
}
}),
},
}
/**
* Analytics dataset: 60 days of time-series metrics
*
* @remarks
* Tests TOON's handling of numeric data and date fields
*/
const analyticsDataset: Dataset = {
name: 'analytics',
description: 'Time-series analytics data',
data: {
metrics: Array.from({ length: 60 }, (_, i) => {
const date = new Date('2025-01-01')
date.setDate(date.getDate() + i)
// Simulate realistic web traffic with some variation
const baseViews = 5000
const weekendMultiplier = date.getDay() === 0 || date.getDay() === 6 ? 0.7 : 1.0
const views = Math.round(baseViews * weekendMultiplier + faker.number.int({ min: -1000, max: 3000 }))
const clicks = Math.round(views * faker.number.float({ min: 0.02, max: 0.08 }))
const conversions = Math.round(clicks * faker.number.float({ min: 0.05, max: 0.15 }))
const avgOrderValue = faker.number.float({ min: 49.99, max: 299.99 })
const revenue = Number((conversions * avgOrderValue).toFixed(2))
return {
date: date.toISOString().split('T')[0]!,
views,
clicks,
conversions,
revenue,
bounceRate: faker.number.float({ min: 0.3, max: 0.7, fractionDigits: 2 }),
}
}),
},
}
/**
* GitHub dataset: Popular repositories
*
* @remarks
* Tests TOON's tabular format with real-world data
*/
const githubDataset: Dataset = {
name: 'github',
description: 'Popular GitHub repositories',
data: {
repositories: githubRepos.slice(0, 200),
},
}
/**
* All datasets used in the benchmark
*/
export const datasets: Dataset[] = [
tabularDataset,
nestedDataset,
analyticsDataset,
githubDataset,
]

133
benchmarks/src/evaluate.ts Normal file
View File

@@ -0,0 +1,133 @@
/**
* LLM evaluation logic for TOON benchmarks
*
* Handles:
* - Model configuration
* - Question evaluation with LLMs
* - Answer validation using LLM-as-judge
*/
import type { LanguageModelV2 } from '@ai-sdk/provider'
import type { EvaluationResult, Question } from './types'
import { setTimeout } from 'node:timers/promises'
import { anthropic } from '@ai-sdk/anthropic'
import { openai } from '@ai-sdk/openai'
import { generateText } from 'ai'
import { consola } from 'consola'
import { RATE_LIMIT_DELAY_MS } from './constants'
/**
* Models used for evaluation
*/
export const models: Record<string, LanguageModelV2> = {
'gpt-4o-mini': openai('gpt-4o-mini'),
'claude-haiku-4-5': anthropic('claude-haiku-4-5-20251001'),
}
/**
* Validate an answer using LLM-as-judge approach
* More robust than string matching for LLM outputs
*/
export async function validateAnswer(
actual: string,
expected: string,
question: string,
): Promise<boolean> {
const prompt = `You are validating answers to questions about structured data.
Question: ${question}
Expected answer: ${expected}
Actual answer: ${actual}
Is the actual answer correct? Consider:
- Exact matches are correct
- Semantically equivalent answers are correct (e.g., "50000" vs "$50,000" vs "50000 dollars")
- Minor formatting differences are acceptable
- Case-insensitive comparison for text
Respond with only "YES" or "NO".`
try {
const { text } = await generateText({
model: models['gpt-4o-mini']!,
prompt,
temperature: 0,
maxOutputTokens: 16,
})
await setTimeout(RATE_LIMIT_DELAY_MS)
return text.trim().toUpperCase() === 'YES'
}
catch (error) {
consola.error('Validation error:', error)
// Fallback to simple string comparison
return actual.toLowerCase().trim() === expected.toLowerCase().trim()
}
}
/**
* Evaluate a single question with a specific format and model
*/
export async function evaluateQuestion(
question: Question,
formatName: string,
formattedData: string,
model: any,
modelName: string,
): Promise<EvaluationResult> {
const prompt = `Given the following data in ${formatName} format:
\`\`\`
${formattedData}
\`\`\`
Question: ${question.prompt}
Provide only the direct answer, without any additional explanation or formatting.`
const startTime = Date.now()
try {
const { text, usage } = await generateText({
model,
prompt,
temperature: 0,
maxOutputTokens: 50,
})
await setTimeout(RATE_LIMIT_DELAY_MS)
const latencyMs = Date.now() - startTime
const correct = await validateAnswer(text.trim(), question.groundTruth, question.prompt)
return {
questionId: question.id,
format: formatName,
model: modelName,
expected: question.groundTruth,
actual: text.trim(),
correct,
inputTokens: usage.inputTokens ?? 0,
outputTokens: usage.outputTokens ?? 0,
latencyMs,
}
}
catch (error) {
consola.error(`Error evaluating ${question.id} with ${formatName}/${modelName}:`, error)
await setTimeout(RATE_LIMIT_DELAY_MS)
return {
questionId: question.id,
format: formatName,
model: modelName,
expected: question.groundTruth,
actual: '',
correct: false,
inputTokens: 0,
outputTokens: 0,
latencyMs: Date.now() - startTime,
}
}
}

View File

@@ -0,0 +1,90 @@
/**
* Format converters for TOON benchmarks
*
* Converts data to different formats:
* - JSON
* - TOON
* - CSV
* - Markdown key-value
* - YAML
*/
import { stringify as stringifyCSV } from 'csv-stringify/sync'
import { stringify as stringifyYAML } from 'yaml'
import { encode as encodeToon } from '../../src/index'
export const formatters = {
'json': (data: unknown): string => JSON.stringify(data, undefined, 2),
'toon': (data: unknown): string => encodeToon(data),
'csv': (data: unknown): string => toCSV(data),
'markdown-kv': (data: unknown): string => toMarkdownKV(data),
'yaml': (data: unknown): string => stringifyYAML(data),
}
function toCSV(data: unknown): string {
const sections: string[] = []
// Handle top-level object with arrays
if (typeof data === 'object' && data !== null && !Array.isArray(data)) {
for (const [key, value] of Object.entries(data)) {
if (Array.isArray(value) && value.length > 0) {
sections.push(`# ${key}`)
sections.push(stringifyCSV(value, { header: true }))
}
}
return sections.join('\n').trim()
}
// Root-level array
if (Array.isArray(data) && data.length > 0) {
return stringifyCSV(data, { header: true }).trim()
}
return ''
}
function toMarkdownKV(data: unknown, indent = 0): string {
const spaces = ' '.repeat(indent)
const lines: string[] = []
if (Array.isArray(data)) {
data.forEach((item, i) => {
if (typeof item === 'object' && item !== null && !Array.isArray(item)) {
Object.entries(item).forEach(([key, value]) => {
if (typeof value === 'object' && value !== null) {
lines.push(`${spaces}**${key}**:`)
lines.push(toMarkdownKV(value, indent + 1))
}
else {
lines.push(`${spaces}**${key}**: ${value}`)
}
})
if (i < data.length - 1)
lines.push('')
}
else {
lines.push(`${spaces}- ${item}`)
}
})
}
else if (typeof data === 'object' && data !== null) {
Object.entries(data).forEach(([key, value]) => {
if (Array.isArray(value)) {
lines.push(`${spaces}**${key}**:`)
lines.push(toMarkdownKV(value, indent + 1))
}
else if (typeof value === 'object' && value !== null) {
lines.push(`${spaces}**${key}**:`)
lines.push(toMarkdownKV(value, indent + 1))
}
else {
lines.push(`${spaces}**${key}**: ${value}`)
}
})
}
else {
lines.push(`${spaces}${data}`)
}
return lines.join('\n')
}

398
benchmarks/src/questions.ts Normal file
View File

@@ -0,0 +1,398 @@
/* eslint-disable no-console */
/**
* Question generation for TOON benchmarks
*
* Generates ~200 questions across different types:
* - Field retrieval (50%): "What is X's Y?"
* - Aggregation (25%): "How many X have Y?"
* - Filtering (25%): "List/count X where Y"
*
* Questions are generated dynamically based on actual data values
*/
import type { Question } from './types'
import { datasets } from './datasets'
/**
* Generate all questions from datasets
*/
export function generateQuestions(): Question[] {
const questions: Question[] = []
let idCounter = 1
// Get datasets
const tabular = datasets.find(d => d.name === 'tabular')?.data.employees as any[] || []
const nested = datasets.find(d => d.name === 'nested')?.data.orders as any[] || []
const analytics = datasets.find(d => d.name === 'analytics')?.data.metrics as any[] || []
const github = datasets.find(d => d.name === 'github')?.data.repositories as any[] || []
// ========================================
// TABULAR DATASET QUESTIONS (70 questions)
// ========================================
if (tabular.length > 0) {
// Field retrieval: specific employees (40 questions)
for (let i = 0; i < Math.min(40, tabular.length); i++) {
const emp = tabular[i * 2] || tabular[i]
if (!emp)
continue
// Alternate between different field types
if (i % 3 === 0) {
questions.push({
id: `q${idCounter++}`,
prompt: `What is the salary of ${emp.name}?`,
groundTruth: String(emp.salary),
type: 'field-retrieval',
dataset: 'tabular',
})
}
else if (i % 3 === 1) {
questions.push({
id: `q${idCounter++}`,
prompt: `What department does ${emp.name} work in?`,
groundTruth: emp.department,
type: 'field-retrieval',
dataset: 'tabular',
})
}
else {
questions.push({
id: `q${idCounter++}`,
prompt: `What is the email address of ${emp.name}?`,
groundTruth: emp.email,
type: 'field-retrieval',
dataset: 'tabular',
})
}
}
// Aggregation: count by department
const departments = [...new Set(tabular.map((e: any) => e.department))]
for (const dept of departments.slice(0, 6)) {
const count = tabular.filter((e: any) => e.department === dept).length
questions.push({
id: `q${idCounter++}`,
prompt: `How many employees work in ${dept}?`,
groundTruth: String(count),
type: 'aggregation',
dataset: 'tabular',
})
}
// Aggregation: salary ranges (4 questions)
const salaryThresholds = [60000, 80000, 100000, 120000]
for (const threshold of salaryThresholds) {
const count = tabular.filter((e: any) => e.salary > threshold).length
questions.push({
id: `q${idCounter++}`,
prompt: `How many employees have a salary greater than ${threshold}?`,
groundTruth: String(count),
type: 'aggregation',
dataset: 'tabular',
})
}
// Filtering: active status
const activeCount = tabular.filter((e: any) => e.active).length
const inactiveCount = tabular.filter((e: any) => !e.active).length
questions.push(
{
id: `q${idCounter++}`,
prompt: 'How many employees are active?',
groundTruth: String(activeCount),
type: 'filtering',
dataset: 'tabular',
},
{
id: `q${idCounter++}`,
prompt: 'How many employees are inactive?',
groundTruth: String(inactiveCount),
type: 'filtering',
dataset: 'tabular',
},
)
// Complex filtering: multi-condition (8 questions)
for (const dept of departments.slice(0, 4)) {
const count = tabular.filter((e: any) => e.department === dept && e.salary > 80000).length
questions.push({
id: `q${idCounter++}`,
prompt: `How many employees in ${dept} have a salary greater than 80000?`,
groundTruth: String(count),
type: 'filtering',
dataset: 'tabular',
})
}
for (const exp of [5, 10]) {
const count = tabular.filter((e: any) => e.yearsExperience > exp && e.active).length
questions.push({
id: `q${idCounter++}`,
prompt: `How many active employees have more than ${exp} years of experience?`,
groundTruth: String(count),
type: 'filtering',
dataset: 'tabular',
})
}
}
// ========================================
// NESTED DATASET QUESTIONS (50 questions)
// ========================================
if (nested.length > 0) {
// Field retrieval: order totals (20 questions)
for (let i = 0; i < Math.min(20, nested.length); i++) {
const order = nested[i * 2] || nested[i]
if (!order)
continue
if (i % 2 === 0) {
questions.push({
id: `q${idCounter++}`,
prompt: `What is the total amount for order ${order.orderId}?`,
groundTruth: String(order.total),
type: 'field-retrieval',
dataset: 'nested',
})
}
else {
questions.push({
id: `q${idCounter++}`,
prompt: `What is the status of order ${order.orderId}?`,
groundTruth: order.status,
type: 'field-retrieval',
dataset: 'nested',
})
}
}
// Field retrieval: customer info (15 questions)
for (let i = 0; i < Math.min(15, nested.length); i++) {
const order = nested[i * 3] || nested[i]
if (!order)
continue
questions.push({
id: `q${idCounter++}`,
prompt: `What is the customer name for order ${order.orderId}?`,
groundTruth: order.customer.name,
type: 'field-retrieval',
dataset: 'nested',
})
}
// Aggregation: count by status
const statuses = [...new Set(nested.map((o: any) => o.status))]
for (const status of statuses) {
const count = nested.filter((o: any) => o.status === status).length
questions.push({
id: `q${idCounter++}`,
prompt: `How many orders have status "${status}"?`,
groundTruth: String(count),
type: 'filtering',
dataset: 'nested',
})
}
// Aggregation: total revenue
const totalRevenue = nested.reduce((sum: number, o: any) => sum + o.total, 0)
questions.push({
id: `q${idCounter++}`,
prompt: 'What is the total revenue across all orders?',
groundTruth: String(totalRevenue.toFixed(2)),
type: 'aggregation',
dataset: 'nested',
})
// Filtering: high-value orders (3 questions)
const highValueThresholds = [200, 400, 600]
for (const threshold of highValueThresholds) {
const count = nested.filter((o: any) => o.total > threshold).length
questions.push({
id: `q${idCounter++}`,
prompt: `How many orders have a total greater than ${threshold}?`,
groundTruth: String(count),
type: 'filtering',
dataset: 'nested',
})
}
}
// ========================================
// ANALYTICS DATASET QUESTIONS (40 questions)
// ========================================
if (analytics.length > 0) {
// Field retrieval: specific dates (20 questions)
for (let i = 0; i < Math.min(20, analytics.length); i++) {
const metric = analytics[i * 3] || analytics[i]
if (!metric)
continue
if (i % 2 === 0) {
questions.push({
id: `q${idCounter++}`,
prompt: `How many views were recorded on ${metric.date}?`,
groundTruth: String(metric.views),
type: 'field-retrieval',
dataset: 'analytics',
})
}
else {
questions.push({
id: `q${idCounter++}`,
prompt: `What was the revenue on ${metric.date}?`,
groundTruth: String(metric.revenue),
type: 'field-retrieval',
dataset: 'analytics',
})
}
}
// Aggregation: totals (4 questions)
const totalViews = analytics.reduce((sum: number, m: any) => sum + m.views, 0)
const totalRevenue = analytics.reduce((sum: number, m: any) => sum + m.revenue, 0)
const totalConversions = analytics.reduce((sum: number, m: any) => sum + m.conversions, 0)
questions.push(
{
id: `q${idCounter++}`,
prompt: 'What is the total number of views across all dates?',
groundTruth: String(totalViews),
type: 'aggregation',
dataset: 'analytics',
},
{
id: `q${idCounter++}`,
prompt: 'What is the total revenue across all dates?',
groundTruth: String(totalRevenue.toFixed(2)),
type: 'aggregation',
dataset: 'analytics',
},
{
id: `q${idCounter++}`,
prompt: 'What is the total number of conversions across all dates?',
groundTruth: String(totalConversions),
type: 'aggregation',
dataset: 'analytics',
},
)
// Filtering: high-performing days (10 questions)
const viewThresholds = [5000, 6000, 7000]
for (const threshold of viewThresholds) {
const count = analytics.filter((m: any) => m.views > threshold).length
questions.push({
id: `q${idCounter++}`,
prompt: `How many days had more than ${threshold} views?`,
groundTruth: String(count),
type: 'filtering',
dataset: 'analytics',
})
}
const conversionThresholds = [10, 20, 30]
for (const threshold of conversionThresholds) {
const count = analytics.filter((m: any) => m.conversions > threshold).length
questions.push({
id: `q${idCounter++}`,
prompt: `How many days had more than ${threshold} conversions?`,
groundTruth: String(count),
type: 'filtering',
dataset: 'analytics',
})
}
}
// ========================================
// GITHUB DATASET QUESTIONS (40 questions)
// ========================================
if (github.length > 0) {
// Field retrieval: specific repos (20 questions)
for (let i = 0; i < Math.min(20, github.length); i++) {
const repo = github[i * 10] || github[i]
if (!repo)
continue
if (i % 2 === 0) {
questions.push({
id: `q${idCounter++}`,
prompt: `How many stars does ${repo.owner}/${repo.name} have?`,
groundTruth: String(repo.stars),
type: 'field-retrieval',
dataset: 'github',
})
}
else {
questions.push({
id: `q${idCounter++}`,
prompt: `How many forks does ${repo.owner}/${repo.name} have?`,
groundTruth: String(repo.forks),
type: 'field-retrieval',
dataset: 'github',
})
}
}
// Aggregation: count by owner (5 questions)
const owners = [...new Set(github.map((r: any) => r.owner))]
for (const owner of owners.slice(0, 5)) {
const count = github.filter((r: any) => r.owner === owner).length
questions.push({
id: `q${idCounter++}`,
prompt: `How many repositories does ${owner} have in the dataset?`,
groundTruth: String(count),
type: 'aggregation',
dataset: 'github',
})
}
// Aggregation: total stars
const totalStars = github.reduce((sum: number, r: any) => sum + r.stars, 0)
questions.push({
id: `q${idCounter++}`,
prompt: 'What is the total number of stars across all repositories?',
groundTruth: String(totalStars),
type: 'aggregation',
dataset: 'github',
})
// Filtering: popular repos (8 questions)
const starThresholds = [10000, 50000, 100000]
for (const threshold of starThresholds) {
const count = github.filter((r: any) => r.stars > threshold).length
questions.push({
id: `q${idCounter++}`,
prompt: `How many repositories have more than ${threshold} stars?`,
groundTruth: String(count),
type: 'filtering',
dataset: 'github',
})
}
const forkThresholds = [1000, 5000, 10000]
for (const threshold of forkThresholds) {
const count = github.filter((r: any) => r.forks > threshold).length
questions.push({
id: `q${idCounter++}`,
prompt: `How many repositories have more than ${threshold} forks?`,
groundTruth: String(count),
type: 'filtering',
dataset: 'github',
})
}
}
console.log(`📊 Question breakdown:`)
console.log(` Tabular: ${questions.filter(q => q.dataset === 'tabular').length}`)
console.log(` Nested: ${questions.filter(q => q.dataset === 'nested').length}`)
console.log(` Analytics: ${questions.filter(q => q.dataset === 'analytics').length}`)
console.log(` GitHub: ${questions.filter(q => q.dataset === 'github').length}`)
console.log(` Total: ${questions.length}`)
return questions
}

288
benchmarks/src/report.ts Normal file
View File

@@ -0,0 +1,288 @@
/**
* Report generation for TOON benchmarks
*
* Handles:
* - Statistical analysis
* - Twitter-ready markdown report generation with visual elements
* - Per-dataset breakdowns
* - Cost analysis
* - Result file saving
*/
import type { EvaluationResult, FormatResult, Question } from './types'
import * as fsp from 'node:fs/promises'
import * as path from 'node:path'
import { encode } from 'gpt-tokenizer'
import { BENCHMARKS_DIR } from './constants'
import { datasets } from './datasets'
import { models } from './evaluate'
/**
* Calculate per-format statistics from evaluation results
*/
export function calculateFormatResults(
results: EvaluationResult[],
tokenCounts: Record<string, number>,
): FormatResult[] {
const formatNames = [...new Set(results.map(r => r.format))]
return formatNames.map((formatName) => {
const formatResults = results.filter(r => r.format === formatName)
const correctCount = formatResults.filter(r => r.correct).length
const totalCount = formatResults.length
const accuracy = correctCount / totalCount
// Calculate average tokens across all datasets for this format
const avgTokens = Object.entries(tokenCounts)
.filter(([key]) => key.startsWith(`${formatName}-`))
.reduce((sum, [, tokens]) => sum + tokens, 0) / datasets.length
const avgInputTokens = formatResults.reduce((sum, r) => sum + r.inputTokens, 0) / totalCount
const avgLatency = formatResults.reduce((sum, r) => sum + r.latencyMs, 0) / totalCount
return {
format: formatName,
accuracy,
totalTokens: Math.round(avgTokens),
avgInputTokens: Math.round(avgInputTokens),
avgLatency: Math.round(avgLatency),
correctCount,
totalCount,
}
}).sort((a, b) => b.accuracy - a.accuracy)
}
/**
* Generate embeddable markdown report from results
*/
export function generateMarkdownReport(
formatResults: FormatResult[],
results: EvaluationResult[],
questions: Question[],
tokenCounts: Record<string, number>,
): string {
const lines: string[] = [
'### Retrieval Accuracy',
'',
]
const toon = formatResults.find(r => r.format === 'toon')
const json = formatResults.find(r => r.format === 'json')
// Model-by-model breakdown (most interesting result)
const modelCount = Object.keys(models).length
lines.push(`Tested across **${modelCount} ${modelCount === 1 ? 'LLM' : 'LLMs'}** with data retrieval tasks:`, '', '```')
for (const modelName of Object.keys(models)) {
const modelResults = formatResults.map((fr) => {
const modelFormatResults = results.filter(r => r.model === modelName && r.format === fr.format)
const correctCount = modelFormatResults.filter(r => r.correct).length
const totalCount = modelFormatResults.length
const accuracy = totalCount > 0 ? correctCount / totalCount : 0
return {
format: fr.format,
accuracy,
correctCount,
totalCount,
}
}).sort((a, b) => b.accuracy - a.accuracy)
const bestResult = modelResults[0]!
const bar = createTokenBar(bestResult.accuracy, 1, 20)
lines.push(`${modelName.padEnd(20)} ${bar} ${(bestResult.accuracy * 100).toFixed(1)}% accuracy`)
}
lines.push('```', '')
// Summary comparison
if (toon && json) {
const tokenSavings = ((1 - toon.totalTokens / json.totalTokens) * 100).toFixed(1)
lines.push(
`**TOON achieves ${(toon.accuracy * 100).toFixed(1)}% accuracy (vs JSON's ${(json.accuracy * 100).toFixed(1)}%) while using ${tokenSavings}% fewer tokens.**`,
'',
)
}
// Simple format comparison table
lines.push(
'| Format | Accuracy | Average Tokens |',
'| ------ | -------- | -------------- |',
)
for (const result of formatResults) {
lines.push(
`| \`${result.format}\` | ${(result.accuracy * 100).toFixed(1)}% | ${result.totalTokens.toLocaleString()} |`,
)
}
lines.push('', '<details>', '<summary><strong>View detailed breakdown by dataset and model</strong></summary>', '', '#### Performance by Dataset', '')
for (const dataset of datasets) {
lines.push(`##### ${dataset.description}`, '')
const datasetResults = formatResults.map((fr) => {
const datasetFormatResults = results.filter(r => r.questionId.includes(dataset.name) || questions.find(q => q.id === r.questionId)?.dataset === dataset.name)
if (datasetFormatResults.length === 0)
return undefined
const formatDatasetResults = datasetFormatResults.filter(r => r.format === fr.format)
if (formatDatasetResults.length === 0)
return undefined
const correctCount = formatDatasetResults.filter(r => r.correct).length
const totalCount = formatDatasetResults.length
const accuracy = totalCount > 0 ? correctCount / totalCount : 0
// Get token count for this dataset+format
const tokenKey = `${fr.format}-${dataset.name}`
const tokens = tokenCounts[tokenKey] || fr.totalTokens
return {
format: fr.format,
accuracy,
tokens,
correctCount,
totalCount,
}
}).filter(Boolean) as { format: string, accuracy: number, tokens: number, correctCount: number, totalCount: number }[]
if (datasetResults.length === 0)
continue
// Sort by efficiency
datasetResults.sort((a, b) => {
const effA = (a.accuracy ** 2) / (a.tokens / 1000)
const effB = (b.accuracy ** 2) / (b.tokens / 1000)
return effB - effA
})
lines.push(
'| Format | Accuracy | Tokens | Correct/Total |',
'|--------|----------|--------|---------------|',
)
for (const result of datasetResults.slice(0, 6)) {
lines.push(
`| \`${result.format}\` | ${(result.accuracy * 100).toFixed(1)}% | ${result.tokens.toLocaleString()} | ${result.correctCount}/${result.totalCount} |`,
)
}
lines.push('')
}
// Model breakdown
lines.push('', '#### Performance by Model', '')
for (const modelName of Object.keys(models)) {
lines.push(`##### ${modelName}`, '')
const modelResults = formatResults.map((fr) => {
const modelFormatResults = results.filter(r => r.model === modelName && r.format === fr.format)
const correctCount = modelFormatResults.filter(r => r.correct).length
const totalCount = modelFormatResults.length
const accuracy = correctCount / totalCount
return {
format: fr.format,
accuracy,
correctCount,
totalCount,
}
}).sort((a, b) => b.accuracy - a.accuracy)
lines.push('| Format | Accuracy | Correct/Total |', '|--------|----------|---------------|')
for (const result of modelResults) {
lines.push(`| \`${result.format}\` | ${(result.accuracy * 100).toFixed(1)}% | ${result.correctCount}/${result.totalCount} |`)
}
lines.push('')
}
// Methodology
lines.push(
'',
'#### Methodology',
'',
'- **Semantic validation**: LLM-as-judge validates responses semantically (not exact string matching).',
'- **Token counting**: Using `gpt-tokenizer` with `o200k_base` encoding.',
'- **Question types**: Field retrieval, aggregation, and filtering tasks.',
'- **Real data**: Faker.js-generated datasets + GitHub repositories.',
'',
'</details>',
'',
)
return lines.join('\n')
}
/**
* Calculate token counts for all format+dataset combinations
*/
export function calculateTokenCounts(
formatters: Record<string, (data: any) => string>,
): Record<string, number> {
const tokenCounts: Record<string, number> = {}
for (const [formatName, formatter] of Object.entries(formatters)) {
for (const dataset of datasets) {
const formatted = formatter(dataset.data)
const key = `${formatName}-${dataset.name}`
tokenCounts[key] = encode(formatted).length
}
}
return tokenCounts
}
/**
* Save results to disk
*/
export async function saveResults(
results: EvaluationResult[],
formatResults: FormatResult[],
questions: Question[],
tokenCounts: Record<string, number>,
): Promise<string> {
const resultsDir = path.join(BENCHMARKS_DIR, 'results', 'accuracy')
await fsp.mkdir(resultsDir, { recursive: true })
// Save raw results
await fsp.writeFile(
path.join(resultsDir, 'raw-results.json'),
JSON.stringify(results, undefined, 2),
)
// Save summary
await fsp.writeFile(
path.join(resultsDir, 'summary.json'),
JSON.stringify({
formatResults,
questions: questions.length,
models: Object.keys(models),
datasets: datasets.map(d => ({ name: d.name, description: d.description })),
tokenCounts,
timestamp: new Date().toISOString(),
}, undefined, 2),
)
// Generate markdown report
const report = generateMarkdownReport(formatResults, results, questions, tokenCounts)
await fsp.writeFile(
path.join(resultsDir, 'report.md'),
report,
)
return resultsDir
}
/**
* Generate visual bar chart for token counts
*/
function createTokenBar(tokens: number, maxTokens: number, width = 30): string {
const filled = Math.round((tokens / maxTokens) * width)
const empty = width - filled
return '█'.repeat(filled) + '░'.repeat(empty)
}

35
benchmarks/src/types.ts Normal file
View File

@@ -0,0 +1,35 @@
export interface Dataset {
name: string
description: string
data: any
}
export interface Question {
id: string
prompt: string
groundTruth: string
type: 'field-retrieval' | 'aggregation' | 'filtering' | 'comparison'
dataset: string
}
export interface EvaluationResult {
questionId: string
format: string
model: string
expected: string
actual: string
correct: boolean
inputTokens: number
outputTokens: number
latencyMs: number
}
export interface FormatResult {
format: string
accuracy: number
totalTokens: number
avgInputTokens: number
avgLatency: number
correctCount: number
totalCount: number
}