From b2c58d2b977edbdc26634927481a1a3650c9fb36 Mon Sep 17 00:00:00 2001 From: Johann Schopplich Date: Mon, 27 Oct 2025 11:49:40 +0100 Subject: [PATCH] chore: fix linting issues --- README.md | 2 - benchmarks/results/accuracy/accuracy.md | 96 -------------------- benchmarks/results/accuracy/raw-results.json | 2 +- benchmarks/results/accuracy/report.md | 2 - benchmarks/results/accuracy/summary.json | 2 +- 5 files changed, 2 insertions(+), 102 deletions(-) delete mode 100644 benchmarks/results/accuracy/accuracy.md diff --git a/README.md b/README.md index 271be53..8d7d411 100644 --- a/README.md +++ b/README.md @@ -259,7 +259,6 @@ claude-haiku-4-5 ███████████████░░░░ | `markdown-kv` | 67.9% | 15.436 | 38/56 | | `yaml` | 62.5% | 13.129 | 35/56 | - #### Performance by Model ##### gpt-4o-mini @@ -282,7 +281,6 @@ claude-haiku-4-5 ███████████████░░░░ | `csv` | 75.5% | 120/159 | | `yaml` | 74.8% | 119/159 | - #### Methodology - **Semantic validation**: LLM-as-judge validates responses semantically (not exact string matching). diff --git a/benchmarks/results/accuracy/accuracy.md b/benchmarks/results/accuracy/accuracy.md deleted file mode 100644 index e435df6..0000000 --- a/benchmarks/results/accuracy/accuracy.md +++ /dev/null @@ -1,96 +0,0 @@ -### Retrieval Accuracy - -Tested across **2 LLMs** with data retrieval tasks: - -``` -gpt-4o-mini ██████████████░░░░░░ 72.3% accuracy -claude-haiku-4-5 ███████████████░░░░░ 76.7% accuracy -``` - -**TOON achieves 73.9% accuracy (vs JSON's 73.6%) while using 46.3% fewer tokens.** - -| Format | Accuracy | Average Tokens | -| ------ | -------- | -------------- | -| `toon` | 73.9% | 4.678 | -| `json` | 73.6% | 8.713 | -| `markdown-kv` | 73.6% | 8.649 | -| `csv` | 72.3% | 4.745 | -| `yaml` | 71.7% | 7.091 | - -
-View detailed breakdown by dataset and model - -#### Performance by Dataset - -##### Uniform employee records (TOON optimal format) - -| Format | Accuracy | Tokens | Correct/Total | -|--------|----------|--------|---------------| -| `toon` | 72.4% | 2.483 | 84/116 | -| `csv` | 69.0% | 2.337 | 80/116 | -| `yaml` | 68.1% | 4.969 | 79/116 | -| `markdown-kv` | 68.1% | 6.270 | 79/116 | -| `json` | 68.1% | 6.347 | 79/116 | - -##### E-commerce orders with nested structures - -| Format | Accuracy | Tokens | Correct/Total | -|--------|----------|--------|---------------| -| `toon` | 84.1% | 5.967 | 74/88 | -| `csv` | 83.0% | 6.735 | 73/88 | -| `yaml` | 81.8% | 7.328 | 72/88 | -| `markdown-kv` | 86.4% | 9.110 | 76/88 | -| `json` | 84.1% | 9.694 | 74/88 | - -##### Time-series analytics data - -| Format | Accuracy | Tokens | Correct/Total | -|--------|----------|--------|---------------| -| `csv` | 72.4% | 1.393 | 42/58 | -| `toon` | 70.7% | 1.515 | 41/58 | -| `yaml` | 72.4% | 2.938 | 42/58 | -| `json` | 74.1% | 3.665 | 43/58 | -| `markdown-kv` | 70.7% | 3.779 | 41/58 | - -##### Popular GitHub repositories - -| Format | Accuracy | Tokens | Correct/Total | -|--------|----------|--------|---------------| -| `toon` | 64.3% | 8.745 | 36/56 | -| `csv` | 62.5% | 8.513 | 35/56 | -| `json` | 67.9% | 15.145 | 38/56 | -| `markdown-kv` | 67.9% | 15.436 | 38/56 | -| `yaml` | 62.5% | 13.129 | 35/56 | - - -#### Performance by Model - -##### gpt-4o-mini - -| Format | Accuracy | Correct/Total | -|--------|----------|---------------| -| `toon` | 72.3% | 115/159 | -| `json` | 71.7% | 114/159 | -| `markdown-kv` | 70.4% | 112/159 | -| `csv` | 69.2% | 110/159 | -| `yaml` | 68.6% | 109/159 | - -##### claude-haiku-4-5 - -| Format | Accuracy | Correct/Total | -|--------|----------|---------------| -| `markdown-kv` | 76.7% | 122/159 | -| `toon` | 75.5% | 120/159 | -| `json` | 75.5% | 120/159 | -| `csv` | 75.5% | 120/159 | -| `yaml` | 74.8% | 119/159 | - - -#### Methodology - -- **Semantic validation**: LLM-as-judge validates responses semantically (not exact string matching) -- **Token counting**: Using `gpt-tokenizer` with `o200k_base` encoding -- **Question types**: Field retrieval, aggregation, and filtering tasks -- **Real data**: faker.js-generated datasets + real GitHub repository data - -
diff --git a/benchmarks/results/accuracy/raw-results.json b/benchmarks/results/accuracy/raw-results.json index a5a21a0..761a265 100644 --- a/benchmarks/results/accuracy/raw-results.json +++ b/benchmarks/results/accuracy/raw-results.json @@ -17489,4 +17489,4 @@ "outputTokens": 5, "latencyMs": 1537 } -] \ No newline at end of file +] diff --git a/benchmarks/results/accuracy/report.md b/benchmarks/results/accuracy/report.md index 9991de9..b1050bb 100644 --- a/benchmarks/results/accuracy/report.md +++ b/benchmarks/results/accuracy/report.md @@ -62,7 +62,6 @@ claude-haiku-4-5 ███████████████░░░░ | `markdown-kv` | 67.9% | 15.436 | 38/56 | | `yaml` | 62.5% | 13.129 | 35/56 | - #### Performance by Model ##### gpt-4o-mini @@ -85,7 +84,6 @@ claude-haiku-4-5 ███████████████░░░░ | `csv` | 75.5% | 120/159 | | `yaml` | 74.8% | 119/159 | - #### Methodology - **Semantic validation**: LLM-as-judge validates responses semantically (not exact string matching). diff --git a/benchmarks/results/accuracy/summary.json b/benchmarks/results/accuracy/summary.json index b5dddc2..737107e 100644 --- a/benchmarks/results/accuracy/summary.json +++ b/benchmarks/results/accuracy/summary.json @@ -92,4 +92,4 @@ "yaml-github": 13129 }, "timestamp": "2025-10-27T10:46:35.127Z" -} \ No newline at end of file +}