mirror of
https://github.com/voson-wang/toon.git
synced 2026-01-29 23:34:10 +08:00
test: add LLM retrieval accuracy tests
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
dist
|
||||
node_modules
|
||||
.DS_Store
|
||||
.env
|
||||
|
||||
301
README.md
301
README.md
@@ -42,168 +42,148 @@ users[2]{id,name,role}:
|
||||
- 📐 **Indentation-based structure:** replaces braces with whitespace for better readability
|
||||
- 🧺 **Tabular arrays:** declare keys once, then stream rows without repetition
|
||||
|
||||
## Token Benchmarks
|
||||
## Benchmarks
|
||||
|
||||
> [!NOTE]
|
||||
> Benchmarks for LLM accuracy and retrieval are currently in development.
|
||||
<!-- automd:file src="./benchmarks/results/token-efficiency.md" -->
|
||||
|
||||
<!-- automd:file src="./docs/benchmarks.md" -->
|
||||
### Token Efficiency
|
||||
|
||||
| Example | JSON | TOON | Tokens Saved | Reduction |
|
||||
| ------- | ---- | ---- | ------------ | --------- |
|
||||
| 👤 Simple user object | 31 | 18 | 13 | **41.9%** |
|
||||
| 🏷️ User with tags | 48 | 28 | 20 | **41.7%** |
|
||||
| 📦 Small product catalog | 117 | 49 | 68 | **58.1%** |
|
||||
| 👥 API response with users | 123 | 53 | 70 | **56.9%** |
|
||||
| ⚙️ Nested configuration | 68 | 42 | 26 | **38.2%** |
|
||||
| 🛒 E-commerce order | 163 | 94 | 69 | **42.3%** |
|
||||
| 📊 Analytics data | 209 | 94 | 115 | **55.0%** |
|
||||
| 📈 Large dataset (50 records) | 2159 | 762 | 1397 | **64.7%** |
|
||||
| **Total** | **2918** | **1140** | **1778** | **60.9%** |
|
||||
```
|
||||
⭐ GitHub Repositories ██████████████░░░░░░░░░░░ 8,745 tokens (JSON: 15,145) 💰 42.3% saved
|
||||
📈 Analytics Time Series ██████████░░░░░░░░░░░░░░░ 3,631 tokens (JSON: 9,024) 💰 59.8% saved
|
||||
👥 API Response ██████████████░░░░░░░░░░░ 2,593 tokens (JSON: 4,589) 💰 43.5% saved
|
||||
🛒 E-commerce Order ███████████████░░░░░░░░░░ 203 tokens (JSON: 338) 💰 39.9% saved
|
||||
```
|
||||
|
||||
**Total:** 15,172 tokens (TOON) vs 29,096 tokens (JSON) → 47.9% savings
|
||||
|
||||
<details>
|
||||
<summary><strong>View detailed results</strong></summary>
|
||||
<summary><strong>View detailed examples</strong></summary>
|
||||
|
||||
### 📦 Small product catalog
|
||||
#### ⭐ GitHub Repositories
|
||||
|
||||
**Savings: 68 tokens (58.1% reduction)**
|
||||
**Configuration:** Top 100 GitHub repositories with stars, forks, and metadata
|
||||
|
||||
**JSON** (117 tokens):
|
||||
**Savings:** 6,400 tokens (42.3% reduction)
|
||||
|
||||
**JSON** (15,145 tokens):
|
||||
|
||||
```json
|
||||
{
|
||||
"items": [
|
||||
"repositories": [
|
||||
{
|
||||
"sku": "A1",
|
||||
"name": "Widget",
|
||||
"qty": 2,
|
||||
"price": 9.99
|
||||
"id": 28457823,
|
||||
"name": "freeCodeCamp",
|
||||
"repo": "freeCodeCamp/freeCodeCamp",
|
||||
"description": "freeCodeCamp.org's open-source codebase and curriculum. Learn math, programming,...",
|
||||
"createdAt": "2014-12-24T17:49:19Z",
|
||||
"updatedAt": "2025-10-27T07:40:58Z",
|
||||
"pushedAt": "2025-10-26T11:31:08Z",
|
||||
"stars": 430828,
|
||||
"watchers": 8582,
|
||||
"forks": 42136,
|
||||
"defaultBranch": "main"
|
||||
},
|
||||
{
|
||||
"sku": "B2",
|
||||
"name": "Gadget",
|
||||
"qty": 1,
|
||||
"price": 14.5
|
||||
"id": 132750724,
|
||||
"name": "build-your-own-x",
|
||||
"repo": "codecrafters-io/build-your-own-x",
|
||||
"description": "Master programming by recreating your favorite technologies from scratch.",
|
||||
"createdAt": "2018-05-09T12:03:18Z",
|
||||
"updatedAt": "2025-10-27T07:43:25Z",
|
||||
"pushedAt": "2025-10-10T18:45:01Z",
|
||||
"stars": 430102,
|
||||
"watchers": 6322,
|
||||
"forks": 40388,
|
||||
"defaultBranch": "master"
|
||||
},
|
||||
{
|
||||
"sku": "C3",
|
||||
"name": "Doohickey",
|
||||
"qty": 5,
|
||||
"price": 7.25
|
||||
"id": 21737465,
|
||||
"name": "awesome",
|
||||
"repo": "sindresorhus/awesome",
|
||||
"description": "😎 Awesome lists about all kinds of interesting topics",
|
||||
"createdAt": "2014-07-11T13:42:37Z",
|
||||
"updatedAt": "2025-10-27T07:44:27Z",
|
||||
"pushedAt": "2025-10-23T17:26:53Z",
|
||||
"stars": 409760,
|
||||
"watchers": 8016,
|
||||
"forks": 32015,
|
||||
"defaultBranch": "main"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**TOON** (49 tokens):
|
||||
**TOON** (8,745 tokens):
|
||||
|
||||
```
|
||||
items[3]{sku,name,qty,price}:
|
||||
A1,Widget,2,9.99
|
||||
B2,Gadget,1,14.5
|
||||
C3,Doohickey,5,7.25
|
||||
repositories[3]{id,name,repo,description,createdAt,updatedAt,pushedAt,stars,watchers,forks,defaultBranch}:
|
||||
28457823,freeCodeCamp,freeCodeCamp/freeCodeCamp,"freeCodeCamp.org's open-source codebase and curriculum. Learn math, programming,...","2014-12-24T17:49:19Z","2025-10-27T07:40:58Z","2025-10-26T11:31:08Z",430828,8582,42136,main
|
||||
132750724,build-your-own-x,codecrafters-io/build-your-own-x,Master programming by recreating your favorite technologies from scratch.,"2018-05-09T12:03:18Z","2025-10-27T07:43:25Z","2025-10-10T18:45:01Z",430102,6322,40388,master
|
||||
21737465,awesome,sindresorhus/awesome,😎 Awesome lists about all kinds of interesting topics,"2014-07-11T13:42:37Z","2025-10-27T07:44:27Z","2025-10-23T17:26:53Z",409760,8016,32015,main
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 👥 API response with users
|
||||
#### 📈 Analytics Time Series
|
||||
|
||||
**Savings: 70 tokens (56.9% reduction)**
|
||||
**Configuration:** 180 days of web metrics (views, clicks, conversions, revenue)
|
||||
|
||||
**JSON** (123 tokens):
|
||||
**Savings:** 5,393 tokens (59.8% reduction)
|
||||
|
||||
```json
|
||||
{
|
||||
"users": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Alice",
|
||||
"email": "alice@example.com",
|
||||
"active": true
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Bob",
|
||||
"email": "bob@example.com",
|
||||
"active": true
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Charlie",
|
||||
"email": "charlie@example.com",
|
||||
"active": false
|
||||
}
|
||||
],
|
||||
"total": 3,
|
||||
"page": 1
|
||||
}
|
||||
```
|
||||
|
||||
**TOON** (53 tokens):
|
||||
|
||||
```
|
||||
users[3]{id,name,email,active}:
|
||||
1,Alice,alice@example.com,true
|
||||
2,Bob,bob@example.com,true
|
||||
3,Charlie,charlie@example.com,false
|
||||
total: 3
|
||||
page: 1
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 📊 Analytics data
|
||||
|
||||
**Savings: 115 tokens (55.0% reduction)**
|
||||
|
||||
**JSON** (209 tokens):
|
||||
**JSON** (9,024 tokens):
|
||||
|
||||
```json
|
||||
{
|
||||
"metrics": [
|
||||
{
|
||||
"date": "2024-12-31",
|
||||
"views": 3769,
|
||||
"clicks": 400,
|
||||
"conversions": 59,
|
||||
"revenue": 198.98
|
||||
},
|
||||
{
|
||||
"date": "2025-01-01",
|
||||
"views": 1234,
|
||||
"clicks": 89,
|
||||
"conversions": 12
|
||||
"views": 5742,
|
||||
"clicks": 463,
|
||||
"conversions": 28,
|
||||
"revenue": 295.77
|
||||
},
|
||||
{
|
||||
"date": "2025-01-02",
|
||||
"views": 2345,
|
||||
"clicks": 156,
|
||||
"conversions": 23
|
||||
"views": 3669,
|
||||
"clicks": 336,
|
||||
"conversions": 102,
|
||||
"revenue": 624.23
|
||||
},
|
||||
{
|
||||
"date": "2025-01-03",
|
||||
"views": 1890,
|
||||
"clicks": 123,
|
||||
"conversions": 18
|
||||
"views": 1332,
|
||||
"clicks": 304,
|
||||
"conversions": 99,
|
||||
"revenue": 113.06
|
||||
},
|
||||
{
|
||||
"date": "2025-01-04",
|
||||
"views": 3456,
|
||||
"clicks": 234,
|
||||
"conversions": 34
|
||||
},
|
||||
{
|
||||
"date": "2025-01-05",
|
||||
"views": 2789,
|
||||
"clicks": 178,
|
||||
"conversions": 27
|
||||
"views": 1444,
|
||||
"clicks": 222,
|
||||
"conversions": 88,
|
||||
"revenue": 986.69
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**TOON** (94 tokens):
|
||||
**TOON** (3,631 tokens):
|
||||
|
||||
```
|
||||
metrics[5]{date,views,clicks,conversions}:
|
||||
2025-01-01,1234,89,12
|
||||
2025-01-02,2345,156,23
|
||||
2025-01-03,1890,123,18
|
||||
2025-01-04,3456,234,34
|
||||
2025-01-05,2789,178,27
|
||||
metrics[5]{date,views,clicks,conversions,revenue}:
|
||||
2024-12-31,3769,400,59,198.98
|
||||
2025-01-01,5742,463,28,295.77
|
||||
2025-01-02,3669,336,102,624.23
|
||||
2025-01-03,1332,304,99,113.06
|
||||
2025-01-04,1444,222,88,986.69
|
||||
```
|
||||
|
||||
</details>
|
||||
@@ -213,6 +193,107 @@ metrics[5]{date,views,clicks,conversions}:
|
||||
> [!NOTE]
|
||||
> Measured with [`gpt-tokenizer`](https://github.com/niieani/gpt-tokenizer) using `o200k_base` encoding (used by GPT-5 and other modern models). Savings will vary across models and tokenizers.
|
||||
|
||||
<!-- automd:file src="./benchmarks/results/accuracy/report.md" -->
|
||||
|
||||
### Retrieval Accuracy
|
||||
|
||||
Tested across **2 LLMs** with data retrieval tasks:
|
||||
|
||||
```
|
||||
gpt-4o-mini ██████████████░░░░░░ 72.3% accuracy
|
||||
claude-haiku-4-5 ███████████████░░░░░ 76.7% accuracy
|
||||
```
|
||||
|
||||
**TOON achieves 73.9% accuracy (vs JSON's 73.6%) while using 46.3% fewer tokens.**
|
||||
|
||||
| Format | Accuracy | Average Tokens |
|
||||
| ------ | -------- | -------------- |
|
||||
| `toon` | 73.9% | 4.678 |
|
||||
| `json` | 73.6% | 8.713 |
|
||||
| `markdown-kv` | 73.6% | 8.649 |
|
||||
| `csv` | 72.3% | 4.745 |
|
||||
| `yaml` | 71.7% | 7.091 |
|
||||
|
||||
<details>
|
||||
<summary><strong>View detailed breakdown by dataset and model</strong></summary>
|
||||
|
||||
#### Performance by Dataset
|
||||
|
||||
##### Uniform employee records (TOON optimal format)
|
||||
|
||||
| Format | Accuracy | Tokens | Correct/Total |
|
||||
|--------|----------|--------|---------------|
|
||||
| `toon` | 72.4% | 2.483 | 84/116 |
|
||||
| `csv` | 69.0% | 2.337 | 80/116 |
|
||||
| `yaml` | 68.1% | 4.969 | 79/116 |
|
||||
| `markdown-kv` | 68.1% | 6.270 | 79/116 |
|
||||
| `json` | 68.1% | 6.347 | 79/116 |
|
||||
|
||||
##### E-commerce orders with nested structures
|
||||
|
||||
| Format | Accuracy | Tokens | Correct/Total |
|
||||
|--------|----------|--------|---------------|
|
||||
| `toon` | 84.1% | 5.967 | 74/88 |
|
||||
| `csv` | 83.0% | 6.735 | 73/88 |
|
||||
| `yaml` | 81.8% | 7.328 | 72/88 |
|
||||
| `markdown-kv` | 86.4% | 9.110 | 76/88 |
|
||||
| `json` | 84.1% | 9.694 | 74/88 |
|
||||
|
||||
##### Time-series analytics data
|
||||
|
||||
| Format | Accuracy | Tokens | Correct/Total |
|
||||
|--------|----------|--------|---------------|
|
||||
| `csv` | 72.4% | 1.393 | 42/58 |
|
||||
| `toon` | 70.7% | 1.515 | 41/58 |
|
||||
| `yaml` | 72.4% | 2.938 | 42/58 |
|
||||
| `json` | 74.1% | 3.665 | 43/58 |
|
||||
| `markdown-kv` | 70.7% | 3.779 | 41/58 |
|
||||
|
||||
##### Popular GitHub repositories
|
||||
|
||||
| Format | Accuracy | Tokens | Correct/Total |
|
||||
|--------|----------|--------|---------------|
|
||||
| `toon` | 64.3% | 8.745 | 36/56 |
|
||||
| `csv` | 62.5% | 8.513 | 35/56 |
|
||||
| `json` | 67.9% | 15.145 | 38/56 |
|
||||
| `markdown-kv` | 67.9% | 15.436 | 38/56 |
|
||||
| `yaml` | 62.5% | 13.129 | 35/56 |
|
||||
|
||||
|
||||
#### Performance by Model
|
||||
|
||||
##### gpt-4o-mini
|
||||
|
||||
| Format | Accuracy | Correct/Total |
|
||||
|--------|----------|---------------|
|
||||
| `toon` | 72.3% | 115/159 |
|
||||
| `json` | 71.7% | 114/159 |
|
||||
| `markdown-kv` | 70.4% | 112/159 |
|
||||
| `csv` | 69.2% | 110/159 |
|
||||
| `yaml` | 68.6% | 109/159 |
|
||||
|
||||
##### claude-haiku-4-5
|
||||
|
||||
| Format | Accuracy | Correct/Total |
|
||||
|--------|----------|---------------|
|
||||
| `markdown-kv` | 76.7% | 122/159 |
|
||||
| `toon` | 75.5% | 120/159 |
|
||||
| `json` | 75.5% | 120/159 |
|
||||
| `csv` | 75.5% | 120/159 |
|
||||
| `yaml` | 74.8% | 119/159 |
|
||||
|
||||
|
||||
#### Methodology
|
||||
|
||||
- **Semantic validation**: LLM-as-judge validates responses semantically (not exact string matching).
|
||||
- **Token counting**: Using `gpt-tokenizer` with `o200k_base` encoding.
|
||||
- **Question types**: Field retrieval, aggregation, and filtering tasks.
|
||||
- **Real data**: Faker.js-generated datasets + GitHub repositories.
|
||||
|
||||
</details>
|
||||
|
||||
<!-- /automd -->
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
|
||||
3
benchmarks/.env.example
Normal file
3
benchmarks/.env.example
Normal file
@@ -0,0 +1,3 @@
|
||||
OPENAI_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
GOOGLE_GENERATIVE_AI_API_KEY=
|
||||
1302
benchmarks/data/github-repos.json
Normal file
1302
benchmarks/data/github-repos.json
Normal file
File diff suppressed because it is too large
Load Diff
26
benchmarks/package.json
Normal file
26
benchmarks/package.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"name": "@toon/benchmarks",
|
||||
"type": "module",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"benchmark:token-efficiency": "tsx scripts/token-efficiency-benchmark.ts",
|
||||
"benchmark:accuracy": "tsx --env-file=.env scripts/accuracy-benchmark.ts",
|
||||
"fetch-github-data": "tsx scripts/fetch-github-data.ts",
|
||||
"test": "vitest"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@ai-sdk/anthropic": "^2.0.37",
|
||||
"@ai-sdk/google": "^2.0.23",
|
||||
"@ai-sdk/openai": "^2.0.53",
|
||||
"@ai-sdk/provider": "^2.0.0",
|
||||
"@antfu/eslint-config": "^6.1.0",
|
||||
"@faker-js/faker": "^10.1.0",
|
||||
"ai": "^5.0.80",
|
||||
"consola": "^3.4.2",
|
||||
"csv-stringify": "^6.6.0",
|
||||
"gpt-tokenizer": "^3.2.0",
|
||||
"ofetch": "^1.4.1",
|
||||
"p-map": "^7.0.3",
|
||||
"yaml": "^2.8.1"
|
||||
}
|
||||
}
|
||||
96
benchmarks/results/accuracy/accuracy.md
Normal file
96
benchmarks/results/accuracy/accuracy.md
Normal file
@@ -0,0 +1,96 @@
|
||||
### Retrieval Accuracy
|
||||
|
||||
Tested across **2 LLMs** with data retrieval tasks:
|
||||
|
||||
```
|
||||
gpt-4o-mini ██████████████░░░░░░ 72.3% accuracy
|
||||
claude-haiku-4-5 ███████████████░░░░░ 76.7% accuracy
|
||||
```
|
||||
|
||||
**TOON achieves 73.9% accuracy (vs JSON's 73.6%) while using 46.3% fewer tokens.**
|
||||
|
||||
| Format | Accuracy | Average Tokens |
|
||||
| ------ | -------- | -------------- |
|
||||
| `toon` | 73.9% | 4.678 |
|
||||
| `json` | 73.6% | 8.713 |
|
||||
| `markdown-kv` | 73.6% | 8.649 |
|
||||
| `csv` | 72.3% | 4.745 |
|
||||
| `yaml` | 71.7% | 7.091 |
|
||||
|
||||
<details>
|
||||
<summary><strong>View detailed breakdown by dataset and model</strong></summary>
|
||||
|
||||
#### Performance by Dataset
|
||||
|
||||
##### Uniform employee records (TOON optimal format)
|
||||
|
||||
| Format | Accuracy | Tokens | Correct/Total |
|
||||
|--------|----------|--------|---------------|
|
||||
| `toon` | 72.4% | 2.483 | 84/116 |
|
||||
| `csv` | 69.0% | 2.337 | 80/116 |
|
||||
| `yaml` | 68.1% | 4.969 | 79/116 |
|
||||
| `markdown-kv` | 68.1% | 6.270 | 79/116 |
|
||||
| `json` | 68.1% | 6.347 | 79/116 |
|
||||
|
||||
##### E-commerce orders with nested structures
|
||||
|
||||
| Format | Accuracy | Tokens | Correct/Total |
|
||||
|--------|----------|--------|---------------|
|
||||
| `toon` | 84.1% | 5.967 | 74/88 |
|
||||
| `csv` | 83.0% | 6.735 | 73/88 |
|
||||
| `yaml` | 81.8% | 7.328 | 72/88 |
|
||||
| `markdown-kv` | 86.4% | 9.110 | 76/88 |
|
||||
| `json` | 84.1% | 9.694 | 74/88 |
|
||||
|
||||
##### Time-series analytics data
|
||||
|
||||
| Format | Accuracy | Tokens | Correct/Total |
|
||||
|--------|----------|--------|---------------|
|
||||
| `csv` | 72.4% | 1.393 | 42/58 |
|
||||
| `toon` | 70.7% | 1.515 | 41/58 |
|
||||
| `yaml` | 72.4% | 2.938 | 42/58 |
|
||||
| `json` | 74.1% | 3.665 | 43/58 |
|
||||
| `markdown-kv` | 70.7% | 3.779 | 41/58 |
|
||||
|
||||
##### Popular GitHub repositories
|
||||
|
||||
| Format | Accuracy | Tokens | Correct/Total |
|
||||
|--------|----------|--------|---------------|
|
||||
| `toon` | 64.3% | 8.745 | 36/56 |
|
||||
| `csv` | 62.5% | 8.513 | 35/56 |
|
||||
| `json` | 67.9% | 15.145 | 38/56 |
|
||||
| `markdown-kv` | 67.9% | 15.436 | 38/56 |
|
||||
| `yaml` | 62.5% | 13.129 | 35/56 |
|
||||
|
||||
|
||||
#### Performance by Model
|
||||
|
||||
##### gpt-4o-mini
|
||||
|
||||
| Format | Accuracy | Correct/Total |
|
||||
|--------|----------|---------------|
|
||||
| `toon` | 72.3% | 115/159 |
|
||||
| `json` | 71.7% | 114/159 |
|
||||
| `markdown-kv` | 70.4% | 112/159 |
|
||||
| `csv` | 69.2% | 110/159 |
|
||||
| `yaml` | 68.6% | 109/159 |
|
||||
|
||||
##### claude-haiku-4-5
|
||||
|
||||
| Format | Accuracy | Correct/Total |
|
||||
|--------|----------|---------------|
|
||||
| `markdown-kv` | 76.7% | 122/159 |
|
||||
| `toon` | 75.5% | 120/159 |
|
||||
| `json` | 75.5% | 120/159 |
|
||||
| `csv` | 75.5% | 120/159 |
|
||||
| `yaml` | 74.8% | 119/159 |
|
||||
|
||||
|
||||
#### Methodology
|
||||
|
||||
- **Semantic validation**: LLM-as-judge validates responses semantically (not exact string matching)
|
||||
- **Token counting**: Using `gpt-tokenizer` with `o200k_base` encoding
|
||||
- **Question types**: Field retrieval, aggregation, and filtering tasks
|
||||
- **Real data**: faker.js-generated datasets + real GitHub repository data
|
||||
|
||||
</details>
|
||||
17492
benchmarks/results/accuracy/raw-results.json
Normal file
17492
benchmarks/results/accuracy/raw-results.json
Normal file
File diff suppressed because it is too large
Load Diff
96
benchmarks/results/accuracy/report.md
Normal file
96
benchmarks/results/accuracy/report.md
Normal file
@@ -0,0 +1,96 @@
|
||||
### Retrieval Accuracy
|
||||
|
||||
Tested across **2 LLMs** with data retrieval tasks:
|
||||
|
||||
```
|
||||
gpt-4o-mini ██████████████░░░░░░ 72.3% accuracy
|
||||
claude-haiku-4-5 ███████████████░░░░░ 76.7% accuracy
|
||||
```
|
||||
|
||||
**TOON achieves 73.9% accuracy (vs JSON's 73.6%) while using 46.3% fewer tokens.**
|
||||
|
||||
| Format | Accuracy | Average Tokens |
|
||||
| ------ | -------- | -------------- |
|
||||
| `toon` | 73.9% | 4.678 |
|
||||
| `json` | 73.6% | 8.713 |
|
||||
| `markdown-kv` | 73.6% | 8.649 |
|
||||
| `csv` | 72.3% | 4.745 |
|
||||
| `yaml` | 71.7% | 7.091 |
|
||||
|
||||
<details>
|
||||
<summary><strong>View detailed breakdown by dataset and model</strong></summary>
|
||||
|
||||
#### Performance by Dataset
|
||||
|
||||
##### Uniform employee records (TOON optimal format)
|
||||
|
||||
| Format | Accuracy | Tokens | Correct/Total |
|
||||
|--------|----------|--------|---------------|
|
||||
| `toon` | 72.4% | 2.483 | 84/116 |
|
||||
| `csv` | 69.0% | 2.337 | 80/116 |
|
||||
| `yaml` | 68.1% | 4.969 | 79/116 |
|
||||
| `markdown-kv` | 68.1% | 6.270 | 79/116 |
|
||||
| `json` | 68.1% | 6.347 | 79/116 |
|
||||
|
||||
##### E-commerce orders with nested structures
|
||||
|
||||
| Format | Accuracy | Tokens | Correct/Total |
|
||||
|--------|----------|--------|---------------|
|
||||
| `toon` | 84.1% | 5.967 | 74/88 |
|
||||
| `csv` | 83.0% | 6.735 | 73/88 |
|
||||
| `yaml` | 81.8% | 7.328 | 72/88 |
|
||||
| `markdown-kv` | 86.4% | 9.110 | 76/88 |
|
||||
| `json` | 84.1% | 9.694 | 74/88 |
|
||||
|
||||
##### Time-series analytics data
|
||||
|
||||
| Format | Accuracy | Tokens | Correct/Total |
|
||||
|--------|----------|--------|---------------|
|
||||
| `csv` | 72.4% | 1.393 | 42/58 |
|
||||
| `toon` | 70.7% | 1.515 | 41/58 |
|
||||
| `yaml` | 72.4% | 2.938 | 42/58 |
|
||||
| `json` | 74.1% | 3.665 | 43/58 |
|
||||
| `markdown-kv` | 70.7% | 3.779 | 41/58 |
|
||||
|
||||
##### Popular GitHub repositories
|
||||
|
||||
| Format | Accuracy | Tokens | Correct/Total |
|
||||
|--------|----------|--------|---------------|
|
||||
| `toon` | 64.3% | 8.745 | 36/56 |
|
||||
| `csv` | 62.5% | 8.513 | 35/56 |
|
||||
| `json` | 67.9% | 15.145 | 38/56 |
|
||||
| `markdown-kv` | 67.9% | 15.436 | 38/56 |
|
||||
| `yaml` | 62.5% | 13.129 | 35/56 |
|
||||
|
||||
|
||||
#### Performance by Model
|
||||
|
||||
##### gpt-4o-mini
|
||||
|
||||
| Format | Accuracy | Correct/Total |
|
||||
|--------|----------|---------------|
|
||||
| `toon` | 72.3% | 115/159 |
|
||||
| `json` | 71.7% | 114/159 |
|
||||
| `markdown-kv` | 70.4% | 112/159 |
|
||||
| `csv` | 69.2% | 110/159 |
|
||||
| `yaml` | 68.6% | 109/159 |
|
||||
|
||||
##### claude-haiku-4-5
|
||||
|
||||
| Format | Accuracy | Correct/Total |
|
||||
|--------|----------|---------------|
|
||||
| `markdown-kv` | 76.7% | 122/159 |
|
||||
| `toon` | 75.5% | 120/159 |
|
||||
| `json` | 75.5% | 120/159 |
|
||||
| `csv` | 75.5% | 120/159 |
|
||||
| `yaml` | 74.8% | 119/159 |
|
||||
|
||||
|
||||
#### Methodology
|
||||
|
||||
- **Semantic validation**: LLM-as-judge validates responses semantically (not exact string matching).
|
||||
- **Token counting**: Using `gpt-tokenizer` with `o200k_base` encoding.
|
||||
- **Question types**: Field retrieval, aggregation, and filtering tasks.
|
||||
- **Real data**: Faker.js-generated datasets + GitHub repositories.
|
||||
|
||||
</details>
|
||||
95
benchmarks/results/accuracy/summary.json
Normal file
95
benchmarks/results/accuracy/summary.json
Normal file
@@ -0,0 +1,95 @@
|
||||
{
|
||||
"formatResults": [
|
||||
{
|
||||
"format": "toon",
|
||||
"accuracy": 0.7389937106918238,
|
||||
"totalTokens": 4678,
|
||||
"avgInputTokens": 4675,
|
||||
"avgLatency": 1424,
|
||||
"correctCount": 235,
|
||||
"totalCount": 318
|
||||
},
|
||||
{
|
||||
"format": "json",
|
||||
"accuracy": 0.7358490566037735,
|
||||
"totalTokens": 8713,
|
||||
"avgInputTokens": 9177,
|
||||
"avgLatency": 1678,
|
||||
"correctCount": 234,
|
||||
"totalCount": 318
|
||||
},
|
||||
{
|
||||
"format": "markdown-kv",
|
||||
"accuracy": 0.7358490566037735,
|
||||
"totalTokens": 8649,
|
||||
"avgInputTokens": 8242,
|
||||
"avgLatency": 1724,
|
||||
"correctCount": 234,
|
||||
"totalCount": 318
|
||||
},
|
||||
{
|
||||
"format": "csv",
|
||||
"accuracy": 0.7232704402515723,
|
||||
"totalTokens": 4745,
|
||||
"avgInputTokens": 4878,
|
||||
"avgLatency": 1573,
|
||||
"correctCount": 230,
|
||||
"totalCount": 318
|
||||
},
|
||||
{
|
||||
"format": "yaml",
|
||||
"accuracy": 0.7169811320754716,
|
||||
"totalTokens": 7091,
|
||||
"avgInputTokens": 7136,
|
||||
"avgLatency": 1602,
|
||||
"correctCount": 228,
|
||||
"totalCount": 318
|
||||
}
|
||||
],
|
||||
"questions": 159,
|
||||
"models": [
|
||||
"gpt-4o-mini",
|
||||
"claude-haiku-4-5"
|
||||
],
|
||||
"datasets": [
|
||||
{
|
||||
"name": "tabular",
|
||||
"description": "Uniform employee records (TOON optimal format)"
|
||||
},
|
||||
{
|
||||
"name": "nested",
|
||||
"description": "E-commerce orders with nested structures"
|
||||
},
|
||||
{
|
||||
"name": "analytics",
|
||||
"description": "Time-series analytics data"
|
||||
},
|
||||
{
|
||||
"name": "github",
|
||||
"description": "Popular GitHub repositories"
|
||||
}
|
||||
],
|
||||
"tokenCounts": {
|
||||
"json-tabular": 6347,
|
||||
"json-nested": 9694,
|
||||
"json-analytics": 3665,
|
||||
"json-github": 15145,
|
||||
"toon-tabular": 2483,
|
||||
"toon-nested": 5967,
|
||||
"toon-analytics": 1515,
|
||||
"toon-github": 8745,
|
||||
"csv-tabular": 2337,
|
||||
"csv-nested": 6735,
|
||||
"csv-analytics": 1393,
|
||||
"csv-github": 8513,
|
||||
"markdown-kv-tabular": 6270,
|
||||
"markdown-kv-nested": 9110,
|
||||
"markdown-kv-analytics": 3779,
|
||||
"markdown-kv-github": 15436,
|
||||
"yaml-tabular": 4969,
|
||||
"yaml-nested": 7328,
|
||||
"yaml-analytics": 2938,
|
||||
"yaml-github": 13129
|
||||
},
|
||||
"timestamp": "2025-10-27T10:46:35.127Z"
|
||||
}
|
||||
141
benchmarks/results/token-efficiency.md
Normal file
141
benchmarks/results/token-efficiency.md
Normal file
@@ -0,0 +1,141 @@
|
||||
### Token Efficiency
|
||||
|
||||
```
|
||||
⭐ GitHub Repositories ██████████████░░░░░░░░░░░ 8,745 tokens (JSON: 15,145) 💰 42.3% saved
|
||||
📈 Analytics Time Series ██████████░░░░░░░░░░░░░░░ 3,631 tokens (JSON: 9,024) 💰 59.8% saved
|
||||
👥 API Response ██████████████░░░░░░░░░░░ 2,593 tokens (JSON: 4,589) 💰 43.5% saved
|
||||
🛒 E-commerce Order ███████████████░░░░░░░░░░ 203 tokens (JSON: 338) 💰 39.9% saved
|
||||
```
|
||||
|
||||
**Total:** 15,172 tokens (TOON) vs 29,096 tokens (JSON) → 47.9% savings
|
||||
|
||||
<details>
|
||||
<summary><strong>View detailed examples</strong></summary>
|
||||
|
||||
#### ⭐ GitHub Repositories
|
||||
|
||||
**Configuration:** Top 100 GitHub repositories with stars, forks, and metadata
|
||||
|
||||
**Savings:** 6,400 tokens (42.3% reduction)
|
||||
|
||||
**JSON** (15,145 tokens):
|
||||
|
||||
```json
|
||||
{
|
||||
"repositories": [
|
||||
{
|
||||
"id": 28457823,
|
||||
"name": "freeCodeCamp",
|
||||
"repo": "freeCodeCamp/freeCodeCamp",
|
||||
"description": "freeCodeCamp.org's open-source codebase and curriculum. Learn math, programming,...",
|
||||
"createdAt": "2014-12-24T17:49:19Z",
|
||||
"updatedAt": "2025-10-27T07:40:58Z",
|
||||
"pushedAt": "2025-10-26T11:31:08Z",
|
||||
"stars": 430828,
|
||||
"watchers": 8582,
|
||||
"forks": 42136,
|
||||
"defaultBranch": "main"
|
||||
},
|
||||
{
|
||||
"id": 132750724,
|
||||
"name": "build-your-own-x",
|
||||
"repo": "codecrafters-io/build-your-own-x",
|
||||
"description": "Master programming by recreating your favorite technologies from scratch.",
|
||||
"createdAt": "2018-05-09T12:03:18Z",
|
||||
"updatedAt": "2025-10-27T07:43:25Z",
|
||||
"pushedAt": "2025-10-10T18:45:01Z",
|
||||
"stars": 430102,
|
||||
"watchers": 6322,
|
||||
"forks": 40388,
|
||||
"defaultBranch": "master"
|
||||
},
|
||||
{
|
||||
"id": 21737465,
|
||||
"name": "awesome",
|
||||
"repo": "sindresorhus/awesome",
|
||||
"description": "😎 Awesome lists about all kinds of interesting topics",
|
||||
"createdAt": "2014-07-11T13:42:37Z",
|
||||
"updatedAt": "2025-10-27T07:44:27Z",
|
||||
"pushedAt": "2025-10-23T17:26:53Z",
|
||||
"stars": 409760,
|
||||
"watchers": 8016,
|
||||
"forks": 32015,
|
||||
"defaultBranch": "main"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**TOON** (8,745 tokens):
|
||||
|
||||
```
|
||||
repositories[3]{id,name,repo,description,createdAt,updatedAt,pushedAt,stars,watchers,forks,defaultBranch}:
|
||||
28457823,freeCodeCamp,freeCodeCamp/freeCodeCamp,"freeCodeCamp.org's open-source codebase and curriculum. Learn math, programming,...","2014-12-24T17:49:19Z","2025-10-27T07:40:58Z","2025-10-26T11:31:08Z",430828,8582,42136,main
|
||||
132750724,build-your-own-x,codecrafters-io/build-your-own-x,Master programming by recreating your favorite technologies from scratch.,"2018-05-09T12:03:18Z","2025-10-27T07:43:25Z","2025-10-10T18:45:01Z",430102,6322,40388,master
|
||||
21737465,awesome,sindresorhus/awesome,😎 Awesome lists about all kinds of interesting topics,"2014-07-11T13:42:37Z","2025-10-27T07:44:27Z","2025-10-23T17:26:53Z",409760,8016,32015,main
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
#### 📈 Analytics Time Series
|
||||
|
||||
**Configuration:** 180 days of web metrics (views, clicks, conversions, revenue)
|
||||
|
||||
**Savings:** 5,393 tokens (59.8% reduction)
|
||||
|
||||
**JSON** (9,024 tokens):
|
||||
|
||||
```json
|
||||
{
|
||||
"metrics": [
|
||||
{
|
||||
"date": "2024-12-31",
|
||||
"views": 3769,
|
||||
"clicks": 400,
|
||||
"conversions": 59,
|
||||
"revenue": 198.98
|
||||
},
|
||||
{
|
||||
"date": "2025-01-01",
|
||||
"views": 5742,
|
||||
"clicks": 463,
|
||||
"conversions": 28,
|
||||
"revenue": 295.77
|
||||
},
|
||||
{
|
||||
"date": "2025-01-02",
|
||||
"views": 3669,
|
||||
"clicks": 336,
|
||||
"conversions": 102,
|
||||
"revenue": 624.23
|
||||
},
|
||||
{
|
||||
"date": "2025-01-03",
|
||||
"views": 1332,
|
||||
"clicks": 304,
|
||||
"conversions": 99,
|
||||
"revenue": 113.06
|
||||
},
|
||||
{
|
||||
"date": "2025-01-04",
|
||||
"views": 1444,
|
||||
"clicks": 222,
|
||||
"conversions": 88,
|
||||
"revenue": 986.69
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**TOON** (3,631 tokens):
|
||||
|
||||
```
|
||||
metrics[5]{date,views,clicks,conversions,revenue}:
|
||||
2024-12-31,3769,400,59,198.98
|
||||
2025-01-01,5742,463,28,295.77
|
||||
2025-01-02,3669,336,102,624.23
|
||||
2025-01-03,1332,304,99,113.06
|
||||
2025-01-04,1444,222,88,986.69
|
||||
```
|
||||
|
||||
</details>
|
||||
140
benchmarks/scripts/accuracy-benchmark.ts
Normal file
140
benchmarks/scripts/accuracy-benchmark.ts
Normal file
@@ -0,0 +1,140 @@
|
||||
/**
|
||||
* TOON LLM Accuracy Benchmark
|
||||
*
|
||||
* Main entry point that orchestrates the full benchmark:
|
||||
* 1. Generate questions from datasets
|
||||
* 2. Format data in all formats (JSON, TOON, YAML, Markdown-kv)
|
||||
* 3. Evaluate each question with each format using LLMs
|
||||
* 4. Generate reports
|
||||
*/
|
||||
|
||||
import type { EvaluationResult, Question } from '../src/types'
|
||||
import * as fsp from 'node:fs/promises'
|
||||
import * as path from 'node:path'
|
||||
import { consola } from 'consola'
|
||||
import pMap from 'p-map'
|
||||
import { BENCHMARKS_DIR, DEFAULT_CONCURRENCY, DRY_RUN, DRY_RUN_LIMITS, ROOT_DIR } from '../src/constants'
|
||||
import { datasets } from '../src/datasets'
|
||||
import { evaluateQuestion, models } from '../src/evaluate'
|
||||
import { formatters } from '../src/formatters'
|
||||
import { generateQuestions } from '../src/questions'
|
||||
import { calculateFormatResults, calculateTokenCounts, saveResults } from '../src/report'
|
||||
|
||||
consola.start('LLM Accuracy Benchmark for TOON')
|
||||
|
||||
// Check if results already exist
|
||||
const resultsDir = path.join(BENCHMARKS_DIR, 'results', 'accuracy')
|
||||
const rawResultsPath = path.join(resultsDir, 'raw-results.json')
|
||||
const summaryPath = path.join(resultsDir, 'summary.json')
|
||||
|
||||
let existingResults: EvaluationResult[] | undefined
|
||||
let existingTokenCounts: Record<string, number> | undefined
|
||||
|
||||
try {
|
||||
const [rawData, summaryData] = await Promise.all([
|
||||
fsp.readFile(rawResultsPath, 'utf-8'),
|
||||
fsp.readFile(summaryPath, 'utf-8'),
|
||||
])
|
||||
existingResults = JSON.parse(rawData)
|
||||
const summary = JSON.parse(summaryData)
|
||||
existingTokenCounts = summary.tokenCounts
|
||||
consola.info('Found existing results – regenerating report only')
|
||||
}
|
||||
catch {
|
||||
// Results don't exist, will run full evaluation
|
||||
}
|
||||
|
||||
if (DRY_RUN) {
|
||||
consola.info('Limiting questions and models for dry run')
|
||||
}
|
||||
|
||||
let questions = generateQuestions()
|
||||
|
||||
// Apply dry run limits if enabled
|
||||
if (DRY_RUN && DRY_RUN_LIMITS.maxQuestions) {
|
||||
questions = questions.slice(0, DRY_RUN_LIMITS.maxQuestions)
|
||||
}
|
||||
|
||||
// Filter models for dry run
|
||||
const activeModels = DRY_RUN && DRY_RUN_LIMITS.allowedModels.length > 0
|
||||
? Object.fromEntries(
|
||||
Object.entries(models).filter(([name]) => DRY_RUN_LIMITS.allowedModels.includes(name)),
|
||||
)
|
||||
: models
|
||||
|
||||
let results: EvaluationResult[]
|
||||
let tokenCounts: Record<string, number>
|
||||
|
||||
if (existingResults && existingTokenCounts) {
|
||||
// Reuse existing results
|
||||
results = existingResults
|
||||
tokenCounts = existingTokenCounts
|
||||
}
|
||||
else {
|
||||
// Run full evaluation
|
||||
consola.info(`Evaluating ${questions.length} questions`)
|
||||
consola.info(`Testing ${Object.keys(formatters).length} formats`)
|
||||
consola.info(`Using ${Object.keys(activeModels).length} models: ${Object.keys(activeModels).join(', ')}`)
|
||||
|
||||
// Calculate token counts for all format+dataset combinations
|
||||
tokenCounts = calculateTokenCounts(formatters)
|
||||
|
||||
// Format datasets once (reuse for all questions)
|
||||
const formattedDatasets: Record<string, Record<string, string>> = {}
|
||||
for (const [formatName, formatter] of Object.entries(formatters)) {
|
||||
formattedDatasets[formatName] = {}
|
||||
for (const dataset of datasets) {
|
||||
const formatted = formatter(dataset.data)
|
||||
formattedDatasets[formatName]![dataset.name] = formatted
|
||||
}
|
||||
}
|
||||
|
||||
// Generate evaluation tasks
|
||||
const tasks: { question: Question, formatName: string, modelName: string }[] = []
|
||||
for (const question of questions) {
|
||||
for (const [formatName] of Object.entries(formatters)) {
|
||||
for (const [modelName] of Object.entries(activeModels)) {
|
||||
tasks.push({ question, formatName, modelName })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const total = tasks.length
|
||||
|
||||
consola.start(`Running ${total} evaluations with concurrency: ${DEFAULT_CONCURRENCY}`)
|
||||
|
||||
// Evaluate all tasks in parallel
|
||||
results = await pMap(
|
||||
tasks,
|
||||
async (task, index) => {
|
||||
const formattedData = formattedDatasets[task.formatName]![task.question.dataset]!
|
||||
const model = activeModels[task.modelName as keyof typeof activeModels]
|
||||
|
||||
const result = await evaluateQuestion(
|
||||
task.question,
|
||||
task.formatName,
|
||||
formattedData,
|
||||
model,
|
||||
task.modelName,
|
||||
)
|
||||
|
||||
// Progress update
|
||||
if ((index + 1) % 10 === 0) {
|
||||
const percent = (((index + 1) / total) * 100).toFixed(1)
|
||||
console.log(`⏳ Progress: ${index + 1}/${total} (${percent}%)`)
|
||||
}
|
||||
|
||||
return result
|
||||
},
|
||||
{ concurrency: DEFAULT_CONCURRENCY },
|
||||
)
|
||||
|
||||
consola.success('Evaluation complete!')
|
||||
}
|
||||
|
||||
// Generate/regenerate markdown report
|
||||
const formatResults = calculateFormatResults(results, tokenCounts)
|
||||
await saveResults(results, formatResults, questions, tokenCounts)
|
||||
|
||||
consola.info(`Results saved to: \`${path.relative(ROOT_DIR, resultsDir)}\``)
|
||||
consola.success(existingResults ? 'Markdown report regenerated!' : 'Evaluation complete!')
|
||||
78
benchmarks/scripts/fetch-github-data.ts
Normal file
78
benchmarks/scripts/fetch-github-data.ts
Normal file
@@ -0,0 +1,78 @@
|
||||
import * as fsp from 'node:fs/promises'
|
||||
import * as path from 'node:path'
|
||||
import process from 'node:process'
|
||||
import { consola } from 'consola'
|
||||
import { ofetch } from 'ofetch'
|
||||
import { BENCHMARKS_DIR } from '../src/constants'
|
||||
|
||||
try {
|
||||
// Fetch top 100 repos from GitHub
|
||||
const repoList = await searchTop100Repos()
|
||||
const repos = await fetchRepoDetails(repoList)
|
||||
|
||||
if (repos.length === 0) {
|
||||
consola.error('❌ No repositories fetched. Exiting.')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
// Sort by stars descending
|
||||
repos.sort((a, b) => b.stars - a.stars)
|
||||
|
||||
await saveRepos(repos)
|
||||
|
||||
consola.success('Done!')
|
||||
}
|
||||
catch (error) {
|
||||
consola.error(error)
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
async function searchTop100Repos(): Promise<string[]> {
|
||||
consola.start('Fetching top 100 starred repositories from GitHub API…')
|
||||
|
||||
const response = await ofetch<{ items: { full_name: string }[] }>(
|
||||
'https://api.github.com/search/repositories',
|
||||
{
|
||||
query: {
|
||||
q: 'stars:>1',
|
||||
sort: 'stars',
|
||||
order: 'desc',
|
||||
per_page: 100,
|
||||
},
|
||||
headers: {
|
||||
'Accept': 'application/vnd.github+json',
|
||||
'X-GitHub-Api-Version': '2022-11-28',
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
return response.items.map(item => item.full_name)
|
||||
}
|
||||
|
||||
async function fetchRepoDetails(repoList: string[]): Promise<Record<string, any>[]> {
|
||||
consola.start(`Fetching ${repoList.length} GitHub repositories…`)
|
||||
|
||||
const repos: Record<string, any>[] = []
|
||||
|
||||
for (let i = 0; i < repoList.length; i++) {
|
||||
const repoPath = repoList[i]!
|
||||
console.log(`[${i + 1}/${repoList.length}] Fetching ${repoPath}…`)
|
||||
const { repo } = await await ofetch(`https://ungh.cc/repos/${repoPath}`)
|
||||
repos.push(repo)
|
||||
}
|
||||
|
||||
consola.success(`Successfully fetched ${repos.length}/${repoList.length} repositories`)
|
||||
|
||||
return repos
|
||||
}
|
||||
|
||||
async function saveRepos(repos: Record<string, any>[]): Promise<void> {
|
||||
const outputDir = path.join(BENCHMARKS_DIR, 'data')
|
||||
const outputFile = path.join(outputDir, 'github-repos.json')
|
||||
|
||||
await fsp.mkdir(outputDir, { recursive: true })
|
||||
await fsp.writeFile(outputFile, JSON.stringify(repos, undefined, 2))
|
||||
|
||||
const relativePath = path.relative(BENCHMARKS_DIR, outputFile)
|
||||
consola.info(`Saved to \`${relativePath}\``)
|
||||
}
|
||||
228
benchmarks/scripts/token-efficiency-benchmark.ts
Normal file
228
benchmarks/scripts/token-efficiency-benchmark.ts
Normal file
@@ -0,0 +1,228 @@
|
||||
import * as fsp from 'node:fs/promises'
|
||||
import * as path from 'node:path'
|
||||
import { faker } from '@faker-js/faker'
|
||||
import { consola } from 'consola'
|
||||
import { encode as encodeTokens } from 'gpt-tokenizer' // o200k_base encoding (default)
|
||||
import { encode } from '../../src/index'
|
||||
import githubRepos from '../data/github-repos.json' with { type: 'json' }
|
||||
import { BENCHMARKS_DIR, ROOT_DIR } from '../src/constants'
|
||||
|
||||
interface BenchmarkResult {
|
||||
name: string
|
||||
emoji: string
|
||||
description: string
|
||||
data: any
|
||||
jsonTokens: number
|
||||
toonTokens: number
|
||||
savings: number
|
||||
savingsPercent: string
|
||||
showDetailed: boolean
|
||||
}
|
||||
|
||||
const outputFilePath = path.join(BENCHMARKS_DIR, 'results', 'token-efficiency.md')
|
||||
|
||||
const BENCHMARK_EXAMPLES = [
|
||||
{
|
||||
name: 'GitHub Repositories',
|
||||
emoji: '⭐',
|
||||
description: 'Top 100 GitHub repositories with stars, forks, and metadata',
|
||||
getData: () => ({ repositories: githubRepos }),
|
||||
showDetailed: true,
|
||||
},
|
||||
{
|
||||
name: 'Analytics Time Series',
|
||||
emoji: '📈',
|
||||
description: '180 days of web metrics (views, clicks, conversions, revenue)',
|
||||
getData: () => generateAnalytics(180),
|
||||
showDetailed: true,
|
||||
},
|
||||
{
|
||||
name: 'API Response',
|
||||
emoji: '👥',
|
||||
description: '50 user records with metadata and timestamps',
|
||||
getData: () => generateUsers(50),
|
||||
showDetailed: false,
|
||||
},
|
||||
{
|
||||
name: 'E-commerce Order',
|
||||
emoji: '🛒',
|
||||
description: 'Nested order with customer and items',
|
||||
getData: generateOrder,
|
||||
showDetailed: false,
|
||||
},
|
||||
] as const
|
||||
|
||||
// Calculate total savings
|
||||
let totalJsonTokens = 0
|
||||
let totalToonTokens = 0
|
||||
|
||||
const results: BenchmarkResult[] = []
|
||||
|
||||
for (const example of BENCHMARK_EXAMPLES) {
|
||||
const data = await example.getData()
|
||||
|
||||
const jsonString = JSON.stringify(data, undefined, 2)
|
||||
const toonString = encode(data)
|
||||
|
||||
const jsonTokens = encodeTokens(jsonString).length
|
||||
const toonTokens = encodeTokens(toonString).length
|
||||
const savings = jsonTokens - toonTokens
|
||||
const savingsPercent = ((savings / jsonTokens) * 100).toFixed(1)
|
||||
|
||||
totalJsonTokens += jsonTokens
|
||||
totalToonTokens += toonTokens
|
||||
|
||||
results.push({
|
||||
name: example.name,
|
||||
emoji: example.emoji,
|
||||
description: example.description,
|
||||
data,
|
||||
jsonTokens,
|
||||
toonTokens,
|
||||
savings,
|
||||
savingsPercent,
|
||||
showDetailed: example.showDetailed,
|
||||
})
|
||||
}
|
||||
|
||||
const totalSavings = totalJsonTokens - totalToonTokens
|
||||
const totalSavingsPercent = ((totalSavings / totalJsonTokens) * 100).toFixed(1)
|
||||
|
||||
// Generate ASCII bar chart visualization
|
||||
const barChartSection = results
|
||||
.map((result) => {
|
||||
const percentage = Number.parseFloat(result.savingsPercent)
|
||||
const bar = generateBarChart(100 - percentage) // Invert to show TOON tokens
|
||||
const jsonStr = result.jsonTokens.toLocaleString('en-US')
|
||||
const toonStr = result.toonTokens.toLocaleString('en-US')
|
||||
return `${result.emoji} ${result.name.padEnd(25)} ${bar} ${toonStr.padStart(6)} tokens (JSON: ${jsonStr.padStart(6)}) 💰 ${result.savingsPercent}% saved`
|
||||
})
|
||||
.join('\n')
|
||||
|
||||
// Generate detailed examples (only for selected examples)
|
||||
const detailedExamples = results
|
||||
.filter(result => result.showDetailed)
|
||||
.map((result, i, filtered) => {
|
||||
// Truncate large datasets for display
|
||||
let displayData = result.data
|
||||
if (result.name === 'GitHub Repositories') {
|
||||
displayData = {
|
||||
repositories: result.data.repositories.slice(0, 3).map((repo: any) => ({
|
||||
...repo,
|
||||
description: repo.description?.slice(0, 80) + (repo.description?.length > 80 ? '...' : ''),
|
||||
})),
|
||||
}
|
||||
}
|
||||
else if (result.name === 'Analytics Time Series') {
|
||||
displayData = { metrics: result.data.metrics.slice(0, 5) }
|
||||
}
|
||||
|
||||
const separator = i < filtered.length - 1 ? '\n\n---' : ''
|
||||
|
||||
return `#### ${result.emoji} ${result.name}
|
||||
|
||||
**Configuration:** ${result.description}
|
||||
|
||||
**Savings:** ${result.savings.toLocaleString('en-US')} tokens (${result.savingsPercent}% reduction)
|
||||
|
||||
**JSON** (${result.jsonTokens.toLocaleString('en-US')} tokens):
|
||||
|
||||
\`\`\`json
|
||||
${JSON.stringify(displayData, undefined, 2)}
|
||||
\`\`\`
|
||||
|
||||
**TOON** (${result.toonTokens.toLocaleString('en-US')} tokens):
|
||||
|
||||
\`\`\`
|
||||
${encode(displayData)}
|
||||
\`\`\`${separator}`
|
||||
})
|
||||
.join('\n\n')
|
||||
|
||||
const markdown = `### Token Efficiency
|
||||
|
||||
\`\`\`
|
||||
${barChartSection}
|
||||
\`\`\`
|
||||
|
||||
**Total:** ${totalToonTokens.toLocaleString('en-US')} tokens (TOON) vs ${totalJsonTokens.toLocaleString('en-US')} tokens (JSON) → ${totalSavingsPercent}% savings
|
||||
|
||||
<details>
|
||||
<summary><strong>View detailed examples</strong></summary>
|
||||
|
||||
${detailedExamples}
|
||||
|
||||
</details>
|
||||
`.trimStart()
|
||||
|
||||
console.log(markdown)
|
||||
|
||||
await fsp.mkdir(path.join(BENCHMARKS_DIR, 'results'), { recursive: true })
|
||||
await fsp.writeFile(outputFilePath, markdown, 'utf-8')
|
||||
|
||||
consola.success(`Benchmark written to \`${path.relative(ROOT_DIR, outputFilePath)}\``)
|
||||
|
||||
// Generate ASCII bar chart
|
||||
function generateBarChart(percentage: number, maxWidth: number = 25): string {
|
||||
const filled = Math.round((percentage / 100) * maxWidth)
|
||||
const empty = maxWidth - filled
|
||||
return '█'.repeat(filled) + '░'.repeat(empty)
|
||||
}
|
||||
|
||||
// Generate analytics time series data
|
||||
function generateAnalytics(days: number) {
|
||||
return {
|
||||
metrics: Array.from({ length: days }, (_, i) => {
|
||||
const date = new Date(2025, 0, 1)
|
||||
date.setDate(date.getDate() + i)
|
||||
return {
|
||||
date: date.toISOString().split('T')[0],
|
||||
views: Math.floor(Math.random() * 5000) + 1000,
|
||||
clicks: Math.floor(Math.random() * 500) + 50,
|
||||
conversions: Math.floor(Math.random() * 100) + 10,
|
||||
revenue: Number((Math.random() * 1000 + 100).toFixed(2)),
|
||||
}
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// Generate user API response
|
||||
function generateUsers(count: number) {
|
||||
return {
|
||||
users: Array.from({ length: count }, (_, i) => ({
|
||||
id: i + 1,
|
||||
name: faker.person.fullName(),
|
||||
email: faker.internet.email(),
|
||||
role: faker.helpers.arrayElement(['admin', 'user', 'moderator']),
|
||||
active: faker.datatype.boolean(),
|
||||
createdAt: faker.date.past({ years: 2 }).toISOString(),
|
||||
lastLogin: faker.date.recent({ days: 30 }).toISOString(),
|
||||
})),
|
||||
total: count,
|
||||
page: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// Generate nested e-commerce order
|
||||
function generateOrder() {
|
||||
return {
|
||||
orderId: faker.string.alphanumeric({ length: 12, casing: 'upper' }),
|
||||
customer: {
|
||||
id: faker.number.int({ min: 1000, max: 9999 }),
|
||||
name: faker.person.fullName(),
|
||||
email: faker.internet.email(),
|
||||
phone: faker.phone.number(),
|
||||
},
|
||||
items: Array.from({ length: faker.number.int({ min: 2, max: 5 }) }, () => ({
|
||||
sku: faker.string.alphanumeric({ length: 8, casing: 'upper' }),
|
||||
name: faker.commerce.productName(),
|
||||
quantity: faker.number.int({ min: 1, max: 5 }),
|
||||
price: Number(faker.commerce.price({ min: 10, max: 200 })),
|
||||
})),
|
||||
subtotal: Number(faker.commerce.price({ min: 100, max: 500 })),
|
||||
tax: Number(faker.commerce.price({ min: 10, max: 50 })),
|
||||
total: Number(faker.commerce.price({ min: 110, max: 550 })),
|
||||
status: faker.helpers.arrayElement(['pending', 'processing', 'shipped', 'delivered']),
|
||||
createdAt: faker.date.recent({ days: 7 }).toISOString(),
|
||||
}
|
||||
}
|
||||
39
benchmarks/src/constants.ts
Normal file
39
benchmarks/src/constants.ts
Normal file
@@ -0,0 +1,39 @@
|
||||
import process from 'node:process'
|
||||
import * as url from 'node:url'
|
||||
|
||||
export const ROOT_DIR: string = url.fileURLToPath(new URL('../../', import.meta.url))
|
||||
export const BENCHMARKS_DIR: string = url.fileURLToPath(new URL('../', import.meta.url))
|
||||
|
||||
/**
|
||||
* Benchmark execution configuration
|
||||
*/
|
||||
|
||||
/**
|
||||
* Enable dry run mode for quick testing with limited AI requests
|
||||
*
|
||||
* @remarks
|
||||
* Set via environment variable: `DRY_RUN=true`
|
||||
*/
|
||||
export const DRY_RUN: boolean = process.env.DRY_RUN === 'true'
|
||||
|
||||
/**
|
||||
* Limits applied when DRY_RUN is enabled
|
||||
*/
|
||||
export const DRY_RUN_LIMITS = {
|
||||
/** Maximum number of questions to evaluate */
|
||||
maxQuestions: 10,
|
||||
/** Maximum number of formats to test */
|
||||
maxFormats: undefined as number | undefined,
|
||||
/** Models to use in dry run */
|
||||
allowedModels: [] as string[],
|
||||
}
|
||||
|
||||
/**
|
||||
* Default concurrency for parallel evaluations
|
||||
*/
|
||||
export const DEFAULT_CONCURRENCY = 20
|
||||
|
||||
/**
|
||||
* Delay between API requests to avoid rate limiting (in milliseconds)
|
||||
*/
|
||||
export const RATE_LIMIT_DELAY_MS = 100
|
||||
146
benchmarks/src/datasets.ts
Normal file
146
benchmarks/src/datasets.ts
Normal file
@@ -0,0 +1,146 @@
|
||||
/**
|
||||
* Datasets for TOON benchmarks
|
||||
*
|
||||
* These datasets are designed to test TOON's strengths and weaknesses:
|
||||
* - Tabular: Uniform records (TOON optimal)
|
||||
* - Nested: Complex structures with nested objects
|
||||
* - Analytics: Time-series data
|
||||
*/
|
||||
|
||||
import type { Dataset } from './types'
|
||||
import { faker } from '@faker-js/faker'
|
||||
import githubRepos from '../data/github-repos.json' with { type: 'json' }
|
||||
|
||||
// Seed for reproducibility
|
||||
faker.seed(12345)
|
||||
|
||||
/**
|
||||
* Tabular dataset: 100 uniform employee records
|
||||
*
|
||||
* @remarks
|
||||
* Tests TOON's tabular array format
|
||||
*/
|
||||
const departments = ['Engineering', 'Sales', 'Marketing', 'HR', 'Operations', 'Finance']
|
||||
const tabularDataset: Dataset = {
|
||||
name: 'tabular',
|
||||
description: 'Uniform employee records (TOON optimal format)',
|
||||
data: {
|
||||
employees: Array.from({ length: 100 }, (_, i) => {
|
||||
const yearsExp = faker.number.int({ min: 1, max: 20 })
|
||||
return {
|
||||
id: i + 1,
|
||||
name: faker.person.fullName(),
|
||||
email: faker.internet.email().toLowerCase(),
|
||||
department: departments[i % departments.length]!,
|
||||
salary: faker.number.int({ min: 45000, max: 150000 }),
|
||||
yearsExperience: yearsExp,
|
||||
active: faker.datatype.boolean(0.8), // 80% active
|
||||
}
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Nested dataset: 50 e-commerce orders with nested structures
|
||||
*
|
||||
* @remarks
|
||||
* Tests TOON's handling of complex nested objects
|
||||
*/
|
||||
const productNames = ['Wireless Mouse', 'USB Cable', 'Laptop Stand', 'Keyboard', 'Webcam', 'Headphones', 'Monitor', 'Desk Lamp']
|
||||
const statuses = ['pending', 'processing', 'shipped', 'delivered', 'cancelled']
|
||||
|
||||
const nestedDataset: Dataset = {
|
||||
name: 'nested',
|
||||
description: 'E-commerce orders with nested structures',
|
||||
data: {
|
||||
orders: Array.from({ length: 50 }, (_, i) => {
|
||||
const customerId = (i % 20) + 1
|
||||
const itemCount = faker.number.int({ min: 1, max: 4 })
|
||||
|
||||
const items = Array.from({ length: itemCount }, (_, j) => {
|
||||
const price = faker.number.float({ min: 9.99, max: 199.99, fractionDigits: 2 })
|
||||
const quantity = faker.number.int({ min: 1, max: 5 })
|
||||
return {
|
||||
sku: `SKU-${faker.string.alphanumeric({ length: 6 }).toUpperCase()}`,
|
||||
name: productNames[j % productNames.length]!,
|
||||
quantity,
|
||||
price,
|
||||
}
|
||||
})
|
||||
|
||||
const total = Number(items.reduce((sum, item) => sum + (item.price * item.quantity), 0).toFixed(2))
|
||||
|
||||
return {
|
||||
orderId: `ORD-${String(i + 1).padStart(4, '0')}`,
|
||||
customer: {
|
||||
id: customerId,
|
||||
name: faker.person.fullName(),
|
||||
email: faker.internet.email().toLowerCase(),
|
||||
},
|
||||
items,
|
||||
total,
|
||||
status: statuses[i % statuses.length]!,
|
||||
orderDate: faker.date.recent({ days: 90 }).toISOString().split('T')[0],
|
||||
}
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Analytics dataset: 60 days of time-series metrics
|
||||
*
|
||||
* @remarks
|
||||
* Tests TOON's handling of numeric data and date fields
|
||||
*/
|
||||
const analyticsDataset: Dataset = {
|
||||
name: 'analytics',
|
||||
description: 'Time-series analytics data',
|
||||
data: {
|
||||
metrics: Array.from({ length: 60 }, (_, i) => {
|
||||
const date = new Date('2025-01-01')
|
||||
date.setDate(date.getDate() + i)
|
||||
|
||||
// Simulate realistic web traffic with some variation
|
||||
const baseViews = 5000
|
||||
const weekendMultiplier = date.getDay() === 0 || date.getDay() === 6 ? 0.7 : 1.0
|
||||
const views = Math.round(baseViews * weekendMultiplier + faker.number.int({ min: -1000, max: 3000 }))
|
||||
const clicks = Math.round(views * faker.number.float({ min: 0.02, max: 0.08 }))
|
||||
const conversions = Math.round(clicks * faker.number.float({ min: 0.05, max: 0.15 }))
|
||||
const avgOrderValue = faker.number.float({ min: 49.99, max: 299.99 })
|
||||
const revenue = Number((conversions * avgOrderValue).toFixed(2))
|
||||
|
||||
return {
|
||||
date: date.toISOString().split('T')[0]!,
|
||||
views,
|
||||
clicks,
|
||||
conversions,
|
||||
revenue,
|
||||
bounceRate: faker.number.float({ min: 0.3, max: 0.7, fractionDigits: 2 }),
|
||||
}
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* GitHub dataset: Popular repositories
|
||||
*
|
||||
* @remarks
|
||||
* Tests TOON's tabular format with real-world data
|
||||
*/
|
||||
const githubDataset: Dataset = {
|
||||
name: 'github',
|
||||
description: 'Popular GitHub repositories',
|
||||
data: {
|
||||
repositories: githubRepos.slice(0, 200),
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* All datasets used in the benchmark
|
||||
*/
|
||||
export const datasets: Dataset[] = [
|
||||
tabularDataset,
|
||||
nestedDataset,
|
||||
analyticsDataset,
|
||||
githubDataset,
|
||||
]
|
||||
133
benchmarks/src/evaluate.ts
Normal file
133
benchmarks/src/evaluate.ts
Normal file
@@ -0,0 +1,133 @@
|
||||
/**
|
||||
* LLM evaluation logic for TOON benchmarks
|
||||
*
|
||||
* Handles:
|
||||
* - Model configuration
|
||||
* - Question evaluation with LLMs
|
||||
* - Answer validation using LLM-as-judge
|
||||
*/
|
||||
|
||||
import type { LanguageModelV2 } from '@ai-sdk/provider'
|
||||
import type { EvaluationResult, Question } from './types'
|
||||
import { setTimeout } from 'node:timers/promises'
|
||||
import { anthropic } from '@ai-sdk/anthropic'
|
||||
import { openai } from '@ai-sdk/openai'
|
||||
import { generateText } from 'ai'
|
||||
import { consola } from 'consola'
|
||||
import { RATE_LIMIT_DELAY_MS } from './constants'
|
||||
|
||||
/**
|
||||
* Models used for evaluation
|
||||
*/
|
||||
export const models: Record<string, LanguageModelV2> = {
|
||||
'gpt-4o-mini': openai('gpt-4o-mini'),
|
||||
'claude-haiku-4-5': anthropic('claude-haiku-4-5-20251001'),
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate an answer using LLM-as-judge approach
|
||||
* More robust than string matching for LLM outputs
|
||||
*/
|
||||
export async function validateAnswer(
|
||||
actual: string,
|
||||
expected: string,
|
||||
question: string,
|
||||
): Promise<boolean> {
|
||||
const prompt = `You are validating answers to questions about structured data.
|
||||
|
||||
Question: ${question}
|
||||
Expected answer: ${expected}
|
||||
Actual answer: ${actual}
|
||||
|
||||
Is the actual answer correct? Consider:
|
||||
- Exact matches are correct
|
||||
- Semantically equivalent answers are correct (e.g., "50000" vs "$50,000" vs "50000 dollars")
|
||||
- Minor formatting differences are acceptable
|
||||
- Case-insensitive comparison for text
|
||||
|
||||
Respond with only "YES" or "NO".`
|
||||
|
||||
try {
|
||||
const { text } = await generateText({
|
||||
model: models['gpt-4o-mini']!,
|
||||
prompt,
|
||||
temperature: 0,
|
||||
maxOutputTokens: 16,
|
||||
})
|
||||
|
||||
await setTimeout(RATE_LIMIT_DELAY_MS)
|
||||
|
||||
return text.trim().toUpperCase() === 'YES'
|
||||
}
|
||||
catch (error) {
|
||||
consola.error('Validation error:', error)
|
||||
// Fallback to simple string comparison
|
||||
return actual.toLowerCase().trim() === expected.toLowerCase().trim()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Evaluate a single question with a specific format and model
|
||||
*/
|
||||
export async function evaluateQuestion(
|
||||
question: Question,
|
||||
formatName: string,
|
||||
formattedData: string,
|
||||
model: any,
|
||||
modelName: string,
|
||||
): Promise<EvaluationResult> {
|
||||
const prompt = `Given the following data in ${formatName} format:
|
||||
|
||||
\`\`\`
|
||||
${formattedData}
|
||||
\`\`\`
|
||||
|
||||
Question: ${question.prompt}
|
||||
|
||||
Provide only the direct answer, without any additional explanation or formatting.`
|
||||
|
||||
const startTime = Date.now()
|
||||
|
||||
try {
|
||||
const { text, usage } = await generateText({
|
||||
model,
|
||||
prompt,
|
||||
temperature: 0,
|
||||
maxOutputTokens: 50,
|
||||
})
|
||||
|
||||
await setTimeout(RATE_LIMIT_DELAY_MS)
|
||||
|
||||
const latencyMs = Date.now() - startTime
|
||||
const correct = await validateAnswer(text.trim(), question.groundTruth, question.prompt)
|
||||
|
||||
return {
|
||||
questionId: question.id,
|
||||
format: formatName,
|
||||
model: modelName,
|
||||
expected: question.groundTruth,
|
||||
actual: text.trim(),
|
||||
correct,
|
||||
inputTokens: usage.inputTokens ?? 0,
|
||||
outputTokens: usage.outputTokens ?? 0,
|
||||
latencyMs,
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
consola.error(`Error evaluating ${question.id} with ${formatName}/${modelName}:`, error)
|
||||
|
||||
await setTimeout(RATE_LIMIT_DELAY_MS)
|
||||
|
||||
return {
|
||||
questionId: question.id,
|
||||
format: formatName,
|
||||
model: modelName,
|
||||
expected: question.groundTruth,
|
||||
actual: '',
|
||||
correct: false,
|
||||
inputTokens: 0,
|
||||
outputTokens: 0,
|
||||
latencyMs: Date.now() - startTime,
|
||||
}
|
||||
}
|
||||
}
|
||||
90
benchmarks/src/formatters.ts
Normal file
90
benchmarks/src/formatters.ts
Normal file
@@ -0,0 +1,90 @@
|
||||
/**
|
||||
* Format converters for TOON benchmarks
|
||||
*
|
||||
* Converts data to different formats:
|
||||
* - JSON
|
||||
* - TOON
|
||||
* - CSV
|
||||
* - Markdown key-value
|
||||
* - YAML
|
||||
*/
|
||||
|
||||
import { stringify as stringifyCSV } from 'csv-stringify/sync'
|
||||
import { stringify as stringifyYAML } from 'yaml'
|
||||
import { encode as encodeToon } from '../../src/index'
|
||||
|
||||
export const formatters = {
|
||||
'json': (data: unknown): string => JSON.stringify(data, undefined, 2),
|
||||
'toon': (data: unknown): string => encodeToon(data),
|
||||
'csv': (data: unknown): string => toCSV(data),
|
||||
'markdown-kv': (data: unknown): string => toMarkdownKV(data),
|
||||
'yaml': (data: unknown): string => stringifyYAML(data),
|
||||
}
|
||||
|
||||
function toCSV(data: unknown): string {
|
||||
const sections: string[] = []
|
||||
|
||||
// Handle top-level object with arrays
|
||||
if (typeof data === 'object' && data !== null && !Array.isArray(data)) {
|
||||
for (const [key, value] of Object.entries(data)) {
|
||||
if (Array.isArray(value) && value.length > 0) {
|
||||
sections.push(`# ${key}`)
|
||||
sections.push(stringifyCSV(value, { header: true }))
|
||||
}
|
||||
}
|
||||
return sections.join('\n').trim()
|
||||
}
|
||||
|
||||
// Root-level array
|
||||
if (Array.isArray(data) && data.length > 0) {
|
||||
return stringifyCSV(data, { header: true }).trim()
|
||||
}
|
||||
|
||||
return ''
|
||||
}
|
||||
|
||||
function toMarkdownKV(data: unknown, indent = 0): string {
|
||||
const spaces = ' '.repeat(indent)
|
||||
const lines: string[] = []
|
||||
|
||||
if (Array.isArray(data)) {
|
||||
data.forEach((item, i) => {
|
||||
if (typeof item === 'object' && item !== null && !Array.isArray(item)) {
|
||||
Object.entries(item).forEach(([key, value]) => {
|
||||
if (typeof value === 'object' && value !== null) {
|
||||
lines.push(`${spaces}**${key}**:`)
|
||||
lines.push(toMarkdownKV(value, indent + 1))
|
||||
}
|
||||
else {
|
||||
lines.push(`${spaces}**${key}**: ${value}`)
|
||||
}
|
||||
})
|
||||
if (i < data.length - 1)
|
||||
lines.push('')
|
||||
}
|
||||
else {
|
||||
lines.push(`${spaces}- ${item}`)
|
||||
}
|
||||
})
|
||||
}
|
||||
else if (typeof data === 'object' && data !== null) {
|
||||
Object.entries(data).forEach(([key, value]) => {
|
||||
if (Array.isArray(value)) {
|
||||
lines.push(`${spaces}**${key}**:`)
|
||||
lines.push(toMarkdownKV(value, indent + 1))
|
||||
}
|
||||
else if (typeof value === 'object' && value !== null) {
|
||||
lines.push(`${spaces}**${key}**:`)
|
||||
lines.push(toMarkdownKV(value, indent + 1))
|
||||
}
|
||||
else {
|
||||
lines.push(`${spaces}**${key}**: ${value}`)
|
||||
}
|
||||
})
|
||||
}
|
||||
else {
|
||||
lines.push(`${spaces}${data}`)
|
||||
}
|
||||
|
||||
return lines.join('\n')
|
||||
}
|
||||
398
benchmarks/src/questions.ts
Normal file
398
benchmarks/src/questions.ts
Normal file
@@ -0,0 +1,398 @@
|
||||
/* eslint-disable no-console */
|
||||
|
||||
/**
|
||||
* Question generation for TOON benchmarks
|
||||
*
|
||||
* Generates ~200 questions across different types:
|
||||
* - Field retrieval (50%): "What is X's Y?"
|
||||
* - Aggregation (25%): "How many X have Y?"
|
||||
* - Filtering (25%): "List/count X where Y"
|
||||
*
|
||||
* Questions are generated dynamically based on actual data values
|
||||
*/
|
||||
|
||||
import type { Question } from './types'
|
||||
import { datasets } from './datasets'
|
||||
|
||||
/**
|
||||
* Generate all questions from datasets
|
||||
*/
|
||||
export function generateQuestions(): Question[] {
|
||||
const questions: Question[] = []
|
||||
let idCounter = 1
|
||||
|
||||
// Get datasets
|
||||
const tabular = datasets.find(d => d.name === 'tabular')?.data.employees as any[] || []
|
||||
const nested = datasets.find(d => d.name === 'nested')?.data.orders as any[] || []
|
||||
const analytics = datasets.find(d => d.name === 'analytics')?.data.metrics as any[] || []
|
||||
const github = datasets.find(d => d.name === 'github')?.data.repositories as any[] || []
|
||||
|
||||
// ========================================
|
||||
// TABULAR DATASET QUESTIONS (70 questions)
|
||||
// ========================================
|
||||
|
||||
if (tabular.length > 0) {
|
||||
// Field retrieval: specific employees (40 questions)
|
||||
for (let i = 0; i < Math.min(40, tabular.length); i++) {
|
||||
const emp = tabular[i * 2] || tabular[i]
|
||||
if (!emp)
|
||||
continue
|
||||
|
||||
// Alternate between different field types
|
||||
if (i % 3 === 0) {
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `What is the salary of ${emp.name}?`,
|
||||
groundTruth: String(emp.salary),
|
||||
type: 'field-retrieval',
|
||||
dataset: 'tabular',
|
||||
})
|
||||
}
|
||||
else if (i % 3 === 1) {
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `What department does ${emp.name} work in?`,
|
||||
groundTruth: emp.department,
|
||||
type: 'field-retrieval',
|
||||
dataset: 'tabular',
|
||||
})
|
||||
}
|
||||
else {
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `What is the email address of ${emp.name}?`,
|
||||
groundTruth: emp.email,
|
||||
type: 'field-retrieval',
|
||||
dataset: 'tabular',
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregation: count by department
|
||||
const departments = [...new Set(tabular.map((e: any) => e.department))]
|
||||
for (const dept of departments.slice(0, 6)) {
|
||||
const count = tabular.filter((e: any) => e.department === dept).length
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `How many employees work in ${dept}?`,
|
||||
groundTruth: String(count),
|
||||
type: 'aggregation',
|
||||
dataset: 'tabular',
|
||||
})
|
||||
}
|
||||
|
||||
// Aggregation: salary ranges (4 questions)
|
||||
const salaryThresholds = [60000, 80000, 100000, 120000]
|
||||
for (const threshold of salaryThresholds) {
|
||||
const count = tabular.filter((e: any) => e.salary > threshold).length
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `How many employees have a salary greater than ${threshold}?`,
|
||||
groundTruth: String(count),
|
||||
type: 'aggregation',
|
||||
dataset: 'tabular',
|
||||
})
|
||||
}
|
||||
|
||||
// Filtering: active status
|
||||
const activeCount = tabular.filter((e: any) => e.active).length
|
||||
const inactiveCount = tabular.filter((e: any) => !e.active).length
|
||||
questions.push(
|
||||
{
|
||||
id: `q${idCounter++}`,
|
||||
prompt: 'How many employees are active?',
|
||||
groundTruth: String(activeCount),
|
||||
type: 'filtering',
|
||||
dataset: 'tabular',
|
||||
},
|
||||
{
|
||||
id: `q${idCounter++}`,
|
||||
prompt: 'How many employees are inactive?',
|
||||
groundTruth: String(inactiveCount),
|
||||
type: 'filtering',
|
||||
dataset: 'tabular',
|
||||
},
|
||||
)
|
||||
|
||||
// Complex filtering: multi-condition (8 questions)
|
||||
for (const dept of departments.slice(0, 4)) {
|
||||
const count = tabular.filter((e: any) => e.department === dept && e.salary > 80000).length
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `How many employees in ${dept} have a salary greater than 80000?`,
|
||||
groundTruth: String(count),
|
||||
type: 'filtering',
|
||||
dataset: 'tabular',
|
||||
})
|
||||
}
|
||||
|
||||
for (const exp of [5, 10]) {
|
||||
const count = tabular.filter((e: any) => e.yearsExperience > exp && e.active).length
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `How many active employees have more than ${exp} years of experience?`,
|
||||
groundTruth: String(count),
|
||||
type: 'filtering',
|
||||
dataset: 'tabular',
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================
|
||||
// NESTED DATASET QUESTIONS (50 questions)
|
||||
// ========================================
|
||||
|
||||
if (nested.length > 0) {
|
||||
// Field retrieval: order totals (20 questions)
|
||||
for (let i = 0; i < Math.min(20, nested.length); i++) {
|
||||
const order = nested[i * 2] || nested[i]
|
||||
if (!order)
|
||||
continue
|
||||
|
||||
if (i % 2 === 0) {
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `What is the total amount for order ${order.orderId}?`,
|
||||
groundTruth: String(order.total),
|
||||
type: 'field-retrieval',
|
||||
dataset: 'nested',
|
||||
})
|
||||
}
|
||||
else {
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `What is the status of order ${order.orderId}?`,
|
||||
groundTruth: order.status,
|
||||
type: 'field-retrieval',
|
||||
dataset: 'nested',
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Field retrieval: customer info (15 questions)
|
||||
for (let i = 0; i < Math.min(15, nested.length); i++) {
|
||||
const order = nested[i * 3] || nested[i]
|
||||
if (!order)
|
||||
continue
|
||||
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `What is the customer name for order ${order.orderId}?`,
|
||||
groundTruth: order.customer.name,
|
||||
type: 'field-retrieval',
|
||||
dataset: 'nested',
|
||||
})
|
||||
}
|
||||
|
||||
// Aggregation: count by status
|
||||
const statuses = [...new Set(nested.map((o: any) => o.status))]
|
||||
for (const status of statuses) {
|
||||
const count = nested.filter((o: any) => o.status === status).length
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `How many orders have status "${status}"?`,
|
||||
groundTruth: String(count),
|
||||
type: 'filtering',
|
||||
dataset: 'nested',
|
||||
})
|
||||
}
|
||||
|
||||
// Aggregation: total revenue
|
||||
const totalRevenue = nested.reduce((sum: number, o: any) => sum + o.total, 0)
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: 'What is the total revenue across all orders?',
|
||||
groundTruth: String(totalRevenue.toFixed(2)),
|
||||
type: 'aggregation',
|
||||
dataset: 'nested',
|
||||
})
|
||||
|
||||
// Filtering: high-value orders (3 questions)
|
||||
const highValueThresholds = [200, 400, 600]
|
||||
for (const threshold of highValueThresholds) {
|
||||
const count = nested.filter((o: any) => o.total > threshold).length
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `How many orders have a total greater than ${threshold}?`,
|
||||
groundTruth: String(count),
|
||||
type: 'filtering',
|
||||
dataset: 'nested',
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================
|
||||
// ANALYTICS DATASET QUESTIONS (40 questions)
|
||||
// ========================================
|
||||
|
||||
if (analytics.length > 0) {
|
||||
// Field retrieval: specific dates (20 questions)
|
||||
for (let i = 0; i < Math.min(20, analytics.length); i++) {
|
||||
const metric = analytics[i * 3] || analytics[i]
|
||||
if (!metric)
|
||||
continue
|
||||
|
||||
if (i % 2 === 0) {
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `How many views were recorded on ${metric.date}?`,
|
||||
groundTruth: String(metric.views),
|
||||
type: 'field-retrieval',
|
||||
dataset: 'analytics',
|
||||
})
|
||||
}
|
||||
else {
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `What was the revenue on ${metric.date}?`,
|
||||
groundTruth: String(metric.revenue),
|
||||
type: 'field-retrieval',
|
||||
dataset: 'analytics',
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregation: totals (4 questions)
|
||||
const totalViews = analytics.reduce((sum: number, m: any) => sum + m.views, 0)
|
||||
const totalRevenue = analytics.reduce((sum: number, m: any) => sum + m.revenue, 0)
|
||||
const totalConversions = analytics.reduce((sum: number, m: any) => sum + m.conversions, 0)
|
||||
|
||||
questions.push(
|
||||
{
|
||||
id: `q${idCounter++}`,
|
||||
prompt: 'What is the total number of views across all dates?',
|
||||
groundTruth: String(totalViews),
|
||||
type: 'aggregation',
|
||||
dataset: 'analytics',
|
||||
},
|
||||
{
|
||||
id: `q${idCounter++}`,
|
||||
prompt: 'What is the total revenue across all dates?',
|
||||
groundTruth: String(totalRevenue.toFixed(2)),
|
||||
type: 'aggregation',
|
||||
dataset: 'analytics',
|
||||
},
|
||||
{
|
||||
id: `q${idCounter++}`,
|
||||
prompt: 'What is the total number of conversions across all dates?',
|
||||
groundTruth: String(totalConversions),
|
||||
type: 'aggregation',
|
||||
dataset: 'analytics',
|
||||
},
|
||||
)
|
||||
|
||||
// Filtering: high-performing days (10 questions)
|
||||
const viewThresholds = [5000, 6000, 7000]
|
||||
for (const threshold of viewThresholds) {
|
||||
const count = analytics.filter((m: any) => m.views > threshold).length
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `How many days had more than ${threshold} views?`,
|
||||
groundTruth: String(count),
|
||||
type: 'filtering',
|
||||
dataset: 'analytics',
|
||||
})
|
||||
}
|
||||
|
||||
const conversionThresholds = [10, 20, 30]
|
||||
for (const threshold of conversionThresholds) {
|
||||
const count = analytics.filter((m: any) => m.conversions > threshold).length
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `How many days had more than ${threshold} conversions?`,
|
||||
groundTruth: String(count),
|
||||
type: 'filtering',
|
||||
dataset: 'analytics',
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================
|
||||
// GITHUB DATASET QUESTIONS (40 questions)
|
||||
// ========================================
|
||||
|
||||
if (github.length > 0) {
|
||||
// Field retrieval: specific repos (20 questions)
|
||||
for (let i = 0; i < Math.min(20, github.length); i++) {
|
||||
const repo = github[i * 10] || github[i]
|
||||
if (!repo)
|
||||
continue
|
||||
|
||||
if (i % 2 === 0) {
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `How many stars does ${repo.owner}/${repo.name} have?`,
|
||||
groundTruth: String(repo.stars),
|
||||
type: 'field-retrieval',
|
||||
dataset: 'github',
|
||||
})
|
||||
}
|
||||
else {
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `How many forks does ${repo.owner}/${repo.name} have?`,
|
||||
groundTruth: String(repo.forks),
|
||||
type: 'field-retrieval',
|
||||
dataset: 'github',
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregation: count by owner (5 questions)
|
||||
const owners = [...new Set(github.map((r: any) => r.owner))]
|
||||
for (const owner of owners.slice(0, 5)) {
|
||||
const count = github.filter((r: any) => r.owner === owner).length
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `How many repositories does ${owner} have in the dataset?`,
|
||||
groundTruth: String(count),
|
||||
type: 'aggregation',
|
||||
dataset: 'github',
|
||||
})
|
||||
}
|
||||
|
||||
// Aggregation: total stars
|
||||
const totalStars = github.reduce((sum: number, r: any) => sum + r.stars, 0)
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: 'What is the total number of stars across all repositories?',
|
||||
groundTruth: String(totalStars),
|
||||
type: 'aggregation',
|
||||
dataset: 'github',
|
||||
})
|
||||
|
||||
// Filtering: popular repos (8 questions)
|
||||
const starThresholds = [10000, 50000, 100000]
|
||||
for (const threshold of starThresholds) {
|
||||
const count = github.filter((r: any) => r.stars > threshold).length
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `How many repositories have more than ${threshold} stars?`,
|
||||
groundTruth: String(count),
|
||||
type: 'filtering',
|
||||
dataset: 'github',
|
||||
})
|
||||
}
|
||||
|
||||
const forkThresholds = [1000, 5000, 10000]
|
||||
for (const threshold of forkThresholds) {
|
||||
const count = github.filter((r: any) => r.forks > threshold).length
|
||||
questions.push({
|
||||
id: `q${idCounter++}`,
|
||||
prompt: `How many repositories have more than ${threshold} forks?`,
|
||||
groundTruth: String(count),
|
||||
type: 'filtering',
|
||||
dataset: 'github',
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`📊 Question breakdown:`)
|
||||
console.log(` Tabular: ${questions.filter(q => q.dataset === 'tabular').length}`)
|
||||
console.log(` Nested: ${questions.filter(q => q.dataset === 'nested').length}`)
|
||||
console.log(` Analytics: ${questions.filter(q => q.dataset === 'analytics').length}`)
|
||||
console.log(` GitHub: ${questions.filter(q => q.dataset === 'github').length}`)
|
||||
console.log(` Total: ${questions.length}`)
|
||||
|
||||
return questions
|
||||
}
|
||||
288
benchmarks/src/report.ts
Normal file
288
benchmarks/src/report.ts
Normal file
@@ -0,0 +1,288 @@
|
||||
/**
|
||||
* Report generation for TOON benchmarks
|
||||
*
|
||||
* Handles:
|
||||
* - Statistical analysis
|
||||
* - Twitter-ready markdown report generation with visual elements
|
||||
* - Per-dataset breakdowns
|
||||
* - Cost analysis
|
||||
* - Result file saving
|
||||
*/
|
||||
|
||||
import type { EvaluationResult, FormatResult, Question } from './types'
|
||||
import * as fsp from 'node:fs/promises'
|
||||
import * as path from 'node:path'
|
||||
import { encode } from 'gpt-tokenizer'
|
||||
import { BENCHMARKS_DIR } from './constants'
|
||||
import { datasets } from './datasets'
|
||||
import { models } from './evaluate'
|
||||
|
||||
/**
|
||||
* Calculate per-format statistics from evaluation results
|
||||
*/
|
||||
export function calculateFormatResults(
|
||||
results: EvaluationResult[],
|
||||
tokenCounts: Record<string, number>,
|
||||
): FormatResult[] {
|
||||
const formatNames = [...new Set(results.map(r => r.format))]
|
||||
|
||||
return formatNames.map((formatName) => {
|
||||
const formatResults = results.filter(r => r.format === formatName)
|
||||
const correctCount = formatResults.filter(r => r.correct).length
|
||||
const totalCount = formatResults.length
|
||||
const accuracy = correctCount / totalCount
|
||||
|
||||
// Calculate average tokens across all datasets for this format
|
||||
const avgTokens = Object.entries(tokenCounts)
|
||||
.filter(([key]) => key.startsWith(`${formatName}-`))
|
||||
.reduce((sum, [, tokens]) => sum + tokens, 0) / datasets.length
|
||||
|
||||
const avgInputTokens = formatResults.reduce((sum, r) => sum + r.inputTokens, 0) / totalCount
|
||||
const avgLatency = formatResults.reduce((sum, r) => sum + r.latencyMs, 0) / totalCount
|
||||
|
||||
return {
|
||||
format: formatName,
|
||||
accuracy,
|
||||
totalTokens: Math.round(avgTokens),
|
||||
avgInputTokens: Math.round(avgInputTokens),
|
||||
avgLatency: Math.round(avgLatency),
|
||||
correctCount,
|
||||
totalCount,
|
||||
}
|
||||
}).sort((a, b) => b.accuracy - a.accuracy)
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate embeddable markdown report from results
|
||||
*/
|
||||
export function generateMarkdownReport(
|
||||
formatResults: FormatResult[],
|
||||
results: EvaluationResult[],
|
||||
questions: Question[],
|
||||
tokenCounts: Record<string, number>,
|
||||
): string {
|
||||
const lines: string[] = [
|
||||
'### Retrieval Accuracy',
|
||||
'',
|
||||
]
|
||||
|
||||
const toon = formatResults.find(r => r.format === 'toon')
|
||||
const json = formatResults.find(r => r.format === 'json')
|
||||
|
||||
// Model-by-model breakdown (most interesting result)
|
||||
const modelCount = Object.keys(models).length
|
||||
lines.push(`Tested across **${modelCount} ${modelCount === 1 ? 'LLM' : 'LLMs'}** with data retrieval tasks:`, '', '```')
|
||||
|
||||
for (const modelName of Object.keys(models)) {
|
||||
const modelResults = formatResults.map((fr) => {
|
||||
const modelFormatResults = results.filter(r => r.model === modelName && r.format === fr.format)
|
||||
const correctCount = modelFormatResults.filter(r => r.correct).length
|
||||
const totalCount = modelFormatResults.length
|
||||
const accuracy = totalCount > 0 ? correctCount / totalCount : 0
|
||||
|
||||
return {
|
||||
format: fr.format,
|
||||
accuracy,
|
||||
correctCount,
|
||||
totalCount,
|
||||
}
|
||||
}).sort((a, b) => b.accuracy - a.accuracy)
|
||||
|
||||
const bestResult = modelResults[0]!
|
||||
const bar = createTokenBar(bestResult.accuracy, 1, 20)
|
||||
|
||||
lines.push(`${modelName.padEnd(20)} ${bar} ${(bestResult.accuracy * 100).toFixed(1)}% accuracy`)
|
||||
}
|
||||
|
||||
lines.push('```', '')
|
||||
|
||||
// Summary comparison
|
||||
if (toon && json) {
|
||||
const tokenSavings = ((1 - toon.totalTokens / json.totalTokens) * 100).toFixed(1)
|
||||
lines.push(
|
||||
`**TOON achieves ${(toon.accuracy * 100).toFixed(1)}% accuracy (vs JSON's ${(json.accuracy * 100).toFixed(1)}%) while using ${tokenSavings}% fewer tokens.**`,
|
||||
'',
|
||||
)
|
||||
}
|
||||
|
||||
// Simple format comparison table
|
||||
lines.push(
|
||||
'| Format | Accuracy | Average Tokens |',
|
||||
'| ------ | -------- | -------------- |',
|
||||
)
|
||||
|
||||
for (const result of formatResults) {
|
||||
lines.push(
|
||||
`| \`${result.format}\` | ${(result.accuracy * 100).toFixed(1)}% | ${result.totalTokens.toLocaleString()} |`,
|
||||
)
|
||||
}
|
||||
|
||||
lines.push('', '<details>', '<summary><strong>View detailed breakdown by dataset and model</strong></summary>', '', '#### Performance by Dataset', '')
|
||||
|
||||
for (const dataset of datasets) {
|
||||
lines.push(`##### ${dataset.description}`, '')
|
||||
|
||||
const datasetResults = formatResults.map((fr) => {
|
||||
const datasetFormatResults = results.filter(r => r.questionId.includes(dataset.name) || questions.find(q => q.id === r.questionId)?.dataset === dataset.name)
|
||||
if (datasetFormatResults.length === 0)
|
||||
return undefined
|
||||
|
||||
const formatDatasetResults = datasetFormatResults.filter(r => r.format === fr.format)
|
||||
if (formatDatasetResults.length === 0)
|
||||
return undefined
|
||||
|
||||
const correctCount = formatDatasetResults.filter(r => r.correct).length
|
||||
const totalCount = formatDatasetResults.length
|
||||
const accuracy = totalCount > 0 ? correctCount / totalCount : 0
|
||||
|
||||
// Get token count for this dataset+format
|
||||
const tokenKey = `${fr.format}-${dataset.name}`
|
||||
const tokens = tokenCounts[tokenKey] || fr.totalTokens
|
||||
|
||||
return {
|
||||
format: fr.format,
|
||||
accuracy,
|
||||
tokens,
|
||||
correctCount,
|
||||
totalCount,
|
||||
}
|
||||
}).filter(Boolean) as { format: string, accuracy: number, tokens: number, correctCount: number, totalCount: number }[]
|
||||
|
||||
if (datasetResults.length === 0)
|
||||
continue
|
||||
|
||||
// Sort by efficiency
|
||||
datasetResults.sort((a, b) => {
|
||||
const effA = (a.accuracy ** 2) / (a.tokens / 1000)
|
||||
const effB = (b.accuracy ** 2) / (b.tokens / 1000)
|
||||
return effB - effA
|
||||
})
|
||||
|
||||
lines.push(
|
||||
'| Format | Accuracy | Tokens | Correct/Total |',
|
||||
'|--------|----------|--------|---------------|',
|
||||
)
|
||||
|
||||
for (const result of datasetResults.slice(0, 6)) {
|
||||
lines.push(
|
||||
`| \`${result.format}\` | ${(result.accuracy * 100).toFixed(1)}% | ${result.tokens.toLocaleString()} | ${result.correctCount}/${result.totalCount} |`,
|
||||
)
|
||||
}
|
||||
|
||||
lines.push('')
|
||||
}
|
||||
|
||||
// Model breakdown
|
||||
lines.push('', '#### Performance by Model', '')
|
||||
|
||||
for (const modelName of Object.keys(models)) {
|
||||
lines.push(`##### ${modelName}`, '')
|
||||
|
||||
const modelResults = formatResults.map((fr) => {
|
||||
const modelFormatResults = results.filter(r => r.model === modelName && r.format === fr.format)
|
||||
const correctCount = modelFormatResults.filter(r => r.correct).length
|
||||
const totalCount = modelFormatResults.length
|
||||
const accuracy = correctCount / totalCount
|
||||
|
||||
return {
|
||||
format: fr.format,
|
||||
accuracy,
|
||||
correctCount,
|
||||
totalCount,
|
||||
}
|
||||
}).sort((a, b) => b.accuracy - a.accuracy)
|
||||
|
||||
lines.push('| Format | Accuracy | Correct/Total |', '|--------|----------|---------------|')
|
||||
|
||||
for (const result of modelResults) {
|
||||
lines.push(`| \`${result.format}\` | ${(result.accuracy * 100).toFixed(1)}% | ${result.correctCount}/${result.totalCount} |`)
|
||||
}
|
||||
|
||||
lines.push('')
|
||||
}
|
||||
|
||||
// Methodology
|
||||
lines.push(
|
||||
'',
|
||||
'#### Methodology',
|
||||
'',
|
||||
'- **Semantic validation**: LLM-as-judge validates responses semantically (not exact string matching).',
|
||||
'- **Token counting**: Using `gpt-tokenizer` with `o200k_base` encoding.',
|
||||
'- **Question types**: Field retrieval, aggregation, and filtering tasks.',
|
||||
'- **Real data**: Faker.js-generated datasets + GitHub repositories.',
|
||||
'',
|
||||
'</details>',
|
||||
'',
|
||||
)
|
||||
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate token counts for all format+dataset combinations
|
||||
*/
|
||||
export function calculateTokenCounts(
|
||||
formatters: Record<string, (data: any) => string>,
|
||||
): Record<string, number> {
|
||||
const tokenCounts: Record<string, number> = {}
|
||||
|
||||
for (const [formatName, formatter] of Object.entries(formatters)) {
|
||||
for (const dataset of datasets) {
|
||||
const formatted = formatter(dataset.data)
|
||||
const key = `${formatName}-${dataset.name}`
|
||||
tokenCounts[key] = encode(formatted).length
|
||||
}
|
||||
}
|
||||
|
||||
return tokenCounts
|
||||
}
|
||||
|
||||
/**
|
||||
* Save results to disk
|
||||
*/
|
||||
export async function saveResults(
|
||||
results: EvaluationResult[],
|
||||
formatResults: FormatResult[],
|
||||
questions: Question[],
|
||||
tokenCounts: Record<string, number>,
|
||||
): Promise<string> {
|
||||
const resultsDir = path.join(BENCHMARKS_DIR, 'results', 'accuracy')
|
||||
await fsp.mkdir(resultsDir, { recursive: true })
|
||||
|
||||
// Save raw results
|
||||
await fsp.writeFile(
|
||||
path.join(resultsDir, 'raw-results.json'),
|
||||
JSON.stringify(results, undefined, 2),
|
||||
)
|
||||
|
||||
// Save summary
|
||||
await fsp.writeFile(
|
||||
path.join(resultsDir, 'summary.json'),
|
||||
JSON.stringify({
|
||||
formatResults,
|
||||
questions: questions.length,
|
||||
models: Object.keys(models),
|
||||
datasets: datasets.map(d => ({ name: d.name, description: d.description })),
|
||||
tokenCounts,
|
||||
timestamp: new Date().toISOString(),
|
||||
}, undefined, 2),
|
||||
)
|
||||
|
||||
// Generate markdown report
|
||||
const report = generateMarkdownReport(formatResults, results, questions, tokenCounts)
|
||||
await fsp.writeFile(
|
||||
path.join(resultsDir, 'report.md'),
|
||||
report,
|
||||
)
|
||||
|
||||
return resultsDir
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate visual bar chart for token counts
|
||||
*/
|
||||
function createTokenBar(tokens: number, maxTokens: number, width = 30): string {
|
||||
const filled = Math.round((tokens / maxTokens) * width)
|
||||
const empty = width - filled
|
||||
return '█'.repeat(filled) + '░'.repeat(empty)
|
||||
}
|
||||
35
benchmarks/src/types.ts
Normal file
35
benchmarks/src/types.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
export interface Dataset {
|
||||
name: string
|
||||
description: string
|
||||
data: any
|
||||
}
|
||||
|
||||
export interface Question {
|
||||
id: string
|
||||
prompt: string
|
||||
groundTruth: string
|
||||
type: 'field-retrieval' | 'aggregation' | 'filtering' | 'comparison'
|
||||
dataset: string
|
||||
}
|
||||
|
||||
export interface EvaluationResult {
|
||||
questionId: string
|
||||
format: string
|
||||
model: string
|
||||
expected: string
|
||||
actual: string
|
||||
correct: boolean
|
||||
inputTokens: number
|
||||
outputTokens: number
|
||||
latencyMs: number
|
||||
}
|
||||
|
||||
export interface FormatResult {
|
||||
format: string
|
||||
accuracy: number
|
||||
totalTokens: number
|
||||
avgInputTokens: number
|
||||
avgLatency: number
|
||||
correctCount: number
|
||||
totalCount: number
|
||||
}
|
||||
@@ -1,158 +0,0 @@
|
||||
| Example | JSON | TOON | Tokens Saved | Reduction |
|
||||
| ------- | ---- | ---- | ------------ | --------- |
|
||||
| 👤 Simple user object | 31 | 18 | 13 | **41.9%** |
|
||||
| 🏷️ User with tags | 48 | 28 | 20 | **41.7%** |
|
||||
| 📦 Small product catalog | 117 | 49 | 68 | **58.1%** |
|
||||
| 👥 API response with users | 123 | 53 | 70 | **56.9%** |
|
||||
| ⚙️ Nested configuration | 68 | 42 | 26 | **38.2%** |
|
||||
| 🛒 E-commerce order | 163 | 94 | 69 | **42.3%** |
|
||||
| 📊 Analytics data | 209 | 94 | 115 | **55.0%** |
|
||||
| 📈 Large dataset (50 records) | 2159 | 762 | 1397 | **64.7%** |
|
||||
| **Total** | **2918** | **1140** | **1778** | **60.9%** |
|
||||
|
||||
<details>
|
||||
<summary><strong>View detailed results</strong></summary>
|
||||
|
||||
### 📦 Small product catalog
|
||||
|
||||
**Savings: 68 tokens (58.1% reduction)**
|
||||
|
||||
**JSON** (117 tokens):
|
||||
|
||||
```json
|
||||
{
|
||||
"items": [
|
||||
{
|
||||
"sku": "A1",
|
||||
"name": "Widget",
|
||||
"qty": 2,
|
||||
"price": 9.99
|
||||
},
|
||||
{
|
||||
"sku": "B2",
|
||||
"name": "Gadget",
|
||||
"qty": 1,
|
||||
"price": 14.5
|
||||
},
|
||||
{
|
||||
"sku": "C3",
|
||||
"name": "Doohickey",
|
||||
"qty": 5,
|
||||
"price": 7.25
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**TOON** (49 tokens):
|
||||
|
||||
```
|
||||
items[3]{sku,name,qty,price}:
|
||||
A1,Widget,2,9.99
|
||||
B2,Gadget,1,14.5
|
||||
C3,Doohickey,5,7.25
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 👥 API response with users
|
||||
|
||||
**Savings: 70 tokens (56.9% reduction)**
|
||||
|
||||
**JSON** (123 tokens):
|
||||
|
||||
```json
|
||||
{
|
||||
"users": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Alice",
|
||||
"email": "alice@example.com",
|
||||
"active": true
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Bob",
|
||||
"email": "bob@example.com",
|
||||
"active": true
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Charlie",
|
||||
"email": "charlie@example.com",
|
||||
"active": false
|
||||
}
|
||||
],
|
||||
"total": 3,
|
||||
"page": 1
|
||||
}
|
||||
```
|
||||
|
||||
**TOON** (53 tokens):
|
||||
|
||||
```
|
||||
users[3]{id,name,email,active}:
|
||||
1,Alice,alice@example.com,true
|
||||
2,Bob,bob@example.com,true
|
||||
3,Charlie,charlie@example.com,false
|
||||
total: 3
|
||||
page: 1
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 📊 Analytics data
|
||||
|
||||
**Savings: 115 tokens (55.0% reduction)**
|
||||
|
||||
**JSON** (209 tokens):
|
||||
|
||||
```json
|
||||
{
|
||||
"metrics": [
|
||||
{
|
||||
"date": "2025-01-01",
|
||||
"views": 1234,
|
||||
"clicks": 89,
|
||||
"conversions": 12
|
||||
},
|
||||
{
|
||||
"date": "2025-01-02",
|
||||
"views": 2345,
|
||||
"clicks": 156,
|
||||
"conversions": 23
|
||||
},
|
||||
{
|
||||
"date": "2025-01-03",
|
||||
"views": 1890,
|
||||
"clicks": 123,
|
||||
"conversions": 18
|
||||
},
|
||||
{
|
||||
"date": "2025-01-04",
|
||||
"views": 3456,
|
||||
"clicks": 234,
|
||||
"conversions": 34
|
||||
},
|
||||
{
|
||||
"date": "2025-01-05",
|
||||
"views": 2789,
|
||||
"clicks": 178,
|
||||
"conversions": 27
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**TOON** (94 tokens):
|
||||
|
||||
```
|
||||
metrics[5]{date,views,clicks,conversions}:
|
||||
2025-01-01,1234,89,12
|
||||
2025-01-02,2345,156,23
|
||||
2025-01-03,1890,123,18
|
||||
2025-01-04,3456,234,34
|
||||
2025-01-05,2789,178,27
|
||||
```
|
||||
|
||||
</details>
|
||||
@@ -26,7 +26,7 @@
|
||||
"dist"
|
||||
],
|
||||
"scripts": {
|
||||
"automd": "tsx scripts/generate-bench.ts && automd",
|
||||
"automd": "automd",
|
||||
"build": "tsdown",
|
||||
"lint": "eslint .",
|
||||
"lint:fix": "eslint . --fix",
|
||||
@@ -35,16 +35,16 @@
|
||||
"release": "bumpp"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@antfu/eslint-config": "^6.0.0",
|
||||
"@antfu/eslint-config": "^6.1.0",
|
||||
"@types/node": "^24.9.1",
|
||||
"automd": "^0.4.2",
|
||||
"bumpp": "^10.3.1",
|
||||
"eslint": "^9.38.0",
|
||||
"gpt-tokenizer": "^3.2.0",
|
||||
"tsdown": "^0.15.9",
|
||||
"tsdown": "^0.15.10",
|
||||
"tsx": "^4.20.6",
|
||||
"typescript": "^5.9.3",
|
||||
"vitest": "^3.2.4"
|
||||
"vitest": "^4.0.3"
|
||||
},
|
||||
"pnpm": {
|
||||
"onlyBuiltDependencies": [
|
||||
|
||||
614
pnpm-lock.yaml
generated
614
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
2
pnpm-workspace.yaml
Normal file
2
pnpm-workspace.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
packages:
|
||||
- benchmarks
|
||||
@@ -1,213 +0,0 @@
|
||||
import * as fsp from 'node:fs/promises'
|
||||
import * as path from 'node:path'
|
||||
import * as url from 'node:url'
|
||||
import { encode } from 'gpt-tokenizer' // o200k_base encoding (default)
|
||||
import { encode as encodeToon } from '../src/index'
|
||||
|
||||
interface BenchmarkResult {
|
||||
name: string
|
||||
emoji: string
|
||||
jsonTokens: number
|
||||
toonTokens: number
|
||||
savings: number
|
||||
savingsPercent: string
|
||||
}
|
||||
|
||||
const rootDir = url.fileURLToPath(new URL('../', import.meta.url))
|
||||
const benchPath = path.join(rootDir, 'docs', 'benchmarks.md')
|
||||
|
||||
const BENCHMARK_EXAMPLES = [
|
||||
{
|
||||
name: 'Simple user object',
|
||||
emoji: '👤',
|
||||
data: {
|
||||
id: 123,
|
||||
name: 'Alice',
|
||||
email: 'alice@example.com',
|
||||
active: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'User with tags',
|
||||
emoji: '🏷️',
|
||||
data: {
|
||||
user: {
|
||||
id: 123,
|
||||
name: 'Ada',
|
||||
tags: ['reading', 'gaming', 'coding'],
|
||||
active: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'Small product catalog',
|
||||
emoji: '📦',
|
||||
data: {
|
||||
items: [
|
||||
{ sku: 'A1', name: 'Widget', qty: 2, price: 9.99 },
|
||||
{ sku: 'B2', name: 'Gadget', qty: 1, price: 14.5 },
|
||||
{ sku: 'C3', name: 'Doohickey', qty: 5, price: 7.25 },
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'API response with users',
|
||||
emoji: '👥',
|
||||
data: {
|
||||
users: [
|
||||
{ id: 1, name: 'Alice', email: 'alice@example.com', active: true },
|
||||
{ id: 2, name: 'Bob', email: 'bob@example.com', active: true },
|
||||
{ id: 3, name: 'Charlie', email: 'charlie@example.com', active: false },
|
||||
],
|
||||
total: 3,
|
||||
page: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'Nested configuration',
|
||||
emoji: '⚙️',
|
||||
data: {
|
||||
database: {
|
||||
host: 'localhost',
|
||||
port: 5432,
|
||||
credentials: {
|
||||
username: 'dbuser',
|
||||
password: 'secret123',
|
||||
},
|
||||
},
|
||||
cache: {
|
||||
enabled: true,
|
||||
ttl: 3600,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'E-commerce order',
|
||||
emoji: '🛒',
|
||||
data: {
|
||||
orderId: 'ORD-2025-001',
|
||||
customer: {
|
||||
id: 456,
|
||||
name: 'Jane Doe',
|
||||
email: 'jane@example.com',
|
||||
},
|
||||
items: [
|
||||
{ sku: 'PROD-A', name: 'Premium Widget', quantity: 2, price: 29.99 },
|
||||
{ sku: 'PROD-B', name: 'Deluxe Gadget', quantity: 1, price: 49.99 },
|
||||
],
|
||||
subtotal: 109.97,
|
||||
tax: 10.99,
|
||||
total: 120.96,
|
||||
status: 'pending',
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'Analytics data',
|
||||
emoji: '📊',
|
||||
data: {
|
||||
metrics: [
|
||||
{ date: '2025-01-01', views: 1234, clicks: 89, conversions: 12 },
|
||||
{ date: '2025-01-02', views: 2345, clicks: 156, conversions: 23 },
|
||||
{ date: '2025-01-03', views: 1890, clicks: 123, conversions: 18 },
|
||||
{ date: '2025-01-04', views: 3456, clicks: 234, conversions: 34 },
|
||||
{ date: '2025-01-05', views: 2789, clicks: 178, conversions: 27 },
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'Large dataset (50 records)',
|
||||
emoji: '📈',
|
||||
data: {
|
||||
records: Array.from({ length: 50 }, (_, i) => ({
|
||||
id: i + 1,
|
||||
name: `User ${i + 1}`,
|
||||
email: `user${i + 1}@example.com`,
|
||||
score: (i * 7) % 100,
|
||||
active: i % 3 !== 0,
|
||||
})),
|
||||
},
|
||||
},
|
||||
] as const
|
||||
|
||||
const DETAILED_EXAMPLE_INDICES = [2, 3, 6] // Small product catalog, API response, Analytics data
|
||||
|
||||
// Calculate total savings
|
||||
let totalJsonTokens = 0
|
||||
let totalToonTokens = 0
|
||||
|
||||
const results: BenchmarkResult[] = []
|
||||
|
||||
for (const example of BENCHMARK_EXAMPLES) {
|
||||
const jsonString = JSON.stringify(example.data, null, 2)
|
||||
const toonString = encodeToon(example.data)
|
||||
|
||||
const jsonTokens = encode(jsonString).length
|
||||
const toonTokens = encode(toonString).length
|
||||
const savings = jsonTokens - toonTokens
|
||||
const savingsPercent = ((savings / jsonTokens) * 100).toFixed(1)
|
||||
|
||||
totalJsonTokens += jsonTokens
|
||||
totalToonTokens += toonTokens
|
||||
|
||||
results.push({
|
||||
name: example.name,
|
||||
emoji: example.emoji,
|
||||
jsonTokens,
|
||||
toonTokens,
|
||||
savings,
|
||||
savingsPercent,
|
||||
})
|
||||
}
|
||||
|
||||
const totalSavings = totalJsonTokens - totalToonTokens
|
||||
const totalSavingsPercent = ((totalSavings / totalJsonTokens) * 100).toFixed(1)
|
||||
|
||||
// Generate markdown content matching README style
|
||||
const summaryRows = results
|
||||
.map(result => `| ${result.emoji} ${result.name} | ${result.jsonTokens} | ${result.toonTokens} | ${result.savings} | **${result.savingsPercent}%** |`)
|
||||
.join('\n')
|
||||
|
||||
const detailedExamples = DETAILED_EXAMPLE_INDICES
|
||||
.map((exampleIndex, i) => {
|
||||
const example = BENCHMARK_EXAMPLES[exampleIndex]!
|
||||
const result = results[exampleIndex]!
|
||||
const separator = i < DETAILED_EXAMPLE_INDICES.length - 1 ? '\n\n---' : ''
|
||||
|
||||
return `### ${result.emoji} ${result.name}
|
||||
|
||||
**Savings: ${result.savings} tokens (${result.savingsPercent}% reduction)**
|
||||
|
||||
**JSON** (${result.jsonTokens} tokens):
|
||||
|
||||
\`\`\`json
|
||||
${JSON.stringify(example.data, null, 2)}
|
||||
\`\`\`
|
||||
|
||||
**TOON** (${result.toonTokens} tokens):
|
||||
|
||||
\`\`\`
|
||||
${encodeToon(example.data)}
|
||||
\`\`\`${separator}`
|
||||
})
|
||||
.join('\n\n')
|
||||
|
||||
const markdown = `
|
||||
| Example | JSON | TOON | Tokens Saved | Reduction |
|
||||
| ------- | ---- | ---- | ------------ | --------- |
|
||||
${summaryRows}
|
||||
| **Total** | **${totalJsonTokens}** | **${totalToonTokens}** | **${totalSavings}** | **${totalSavingsPercent}%** |
|
||||
|
||||
<details>
|
||||
<summary><strong>View detailed results</strong></summary>
|
||||
|
||||
${detailedExamples}
|
||||
|
||||
</details>
|
||||
`.trimStart()
|
||||
|
||||
console.log(markdown)
|
||||
|
||||
await fsp.mkdir(path.join(rootDir, 'docs'), { recursive: true })
|
||||
await fsp.writeFile(benchPath, markdown, 'utf-8')
|
||||
|
||||
console.log(`✅ Benchmark written to ${benchPath}`)
|
||||
Reference in New Issue
Block a user