Reorganize task categories from opaque a/b to descriptive names
Replace category_a/category_b directories with algorithm, pipeline, environment, filesystem, and process. Add separate mode field (solve/convert) to decouple orchestration from capability grouping. Add per-category summary and questionnaire breakdowns to both terminal report and HTML export.
This commit is contained in:
@@ -88,11 +88,11 @@ def chart_questionnaire_comparison(results: list[BenchmarkResult]) -> str:
|
||||
|
||||
def chart_turns_comparison(results: list[BenchmarkResult]) -> str:
|
||||
"""Bar chart of agent turns per task for bash vs lush."""
|
||||
# Only include tasks where the agent actually solved (turns > 0)
|
||||
cat_a = [r for r in results if r.category == "a"]
|
||||
names = [r.task_name for r in cat_a]
|
||||
bash_turns = [r.bash_result.agent_turns if r.bash_result else 0 for r in cat_a]
|
||||
lush_turns = [r.lush_result.agent_turns if r.lush_result else 0 for r in cat_a]
|
||||
# Only include tasks where the agent actually solved (solve mode)
|
||||
solve = [r for r in results if r.mode == "solve"]
|
||||
names = [r.task_name for r in solve]
|
||||
bash_turns = [r.bash_result.agent_turns if r.bash_result else 0 for r in solve]
|
||||
lush_turns = [r.lush_result.agent_turns if r.lush_result else 0 for r in solve]
|
||||
|
||||
fig, ax = plt.subplots(figsize=(8, 4))
|
||||
x = range(len(names))
|
||||
@@ -103,7 +103,7 @@ def chart_turns_comparison(results: list[BenchmarkResult]) -> str:
|
||||
ax.set_xticks(list(x))
|
||||
ax.set_xticklabels(names, rotation=35, ha="right", fontsize=8)
|
||||
ax.set_ylabel("Agent Turns")
|
||||
ax.set_title("Agent Turns to Solve (Category A)")
|
||||
ax.set_title("Agent Turns to Solve (Solve Mode)")
|
||||
ax.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
|
||||
ax.legend()
|
||||
ax.grid(axis="y", alpha=0.3)
|
||||
@@ -148,6 +148,151 @@ def chart_per_task_heatmap(results: list[BenchmarkResult]) -> str:
|
||||
return _fig_to_base64(fig)
|
||||
|
||||
|
||||
def chart_per_category_questionnaire(results: list[BenchmarkResult]) -> str:
|
||||
"""Grouped bar chart: one cluster per category, bars for bash/lush avg scores."""
|
||||
from collections import defaultdict
|
||||
|
||||
by_cat: dict[str, list[BenchmarkResult]] = defaultdict(list)
|
||||
for r in results:
|
||||
by_cat[r.category].append(r)
|
||||
|
||||
categories = sorted(by_cat)
|
||||
bash_avgs = []
|
||||
lush_avgs = []
|
||||
for cat in categories:
|
||||
b_scores: list[float] = []
|
||||
l_scores: list[float] = []
|
||||
for r in by_cat[cat]:
|
||||
scores = _get_likert_scores(r)
|
||||
for key in scores:
|
||||
if scores[key]["bash"] is not None:
|
||||
b_scores.append(scores[key]["bash"])
|
||||
if scores[key]["lush"] is not None:
|
||||
l_scores.append(scores[key]["lush"])
|
||||
bash_avgs.append(sum(b_scores) / len(b_scores) if b_scores else 0.0)
|
||||
lush_avgs.append(sum(l_scores) / len(l_scores) if l_scores else 0.0)
|
||||
|
||||
fig, ax = plt.subplots(figsize=(8, 4))
|
||||
x = range(len(categories))
|
||||
bar_w = 0.35
|
||||
bars_b = ax.bar([i - bar_w / 2 for i in x], bash_avgs, bar_w, label="bash", color=BASH_COLOR)
|
||||
bars_l = ax.bar([i + bar_w / 2 for i in x], lush_avgs, bar_w, label="lush", color=LUSH_COLOR)
|
||||
|
||||
ax.set_xticks(list(x))
|
||||
ax.set_xticklabels(categories, fontsize=9)
|
||||
ax.set_ylim(0, 5.5)
|
||||
ax.set_ylabel("Avg Score (1-5)")
|
||||
ax.set_title("Questionnaire Scores by Category")
|
||||
ax.legend()
|
||||
ax.grid(axis="y", alpha=0.3)
|
||||
|
||||
for bar in bars_b:
|
||||
ax.text(bar.get_x() + bar.get_width() / 2, bar.get_height() + 0.08, f"{bar.get_height():.1f}",
|
||||
ha="center", va="bottom", fontsize=8)
|
||||
for bar in bars_l:
|
||||
ax.text(bar.get_x() + bar.get_width() / 2, bar.get_height() + 0.08, f"{bar.get_height():.1f}",
|
||||
ha="center", va="bottom", fontsize=8)
|
||||
|
||||
return _fig_to_base64(fig)
|
||||
|
||||
|
||||
def chart_per_category_radar(results: list[BenchmarkResult]) -> list[tuple[str, str]]:
|
||||
"""Small-multiples bar charts: one per category showing 6 Likert dimensions for bash vs lush."""
|
||||
import numpy as np
|
||||
from collections import defaultdict
|
||||
|
||||
by_cat: dict[str, list[BenchmarkResult]] = defaultdict(list)
|
||||
for r in results:
|
||||
by_cat[r.category].append(r)
|
||||
|
||||
charts: list[tuple[str, str]] = []
|
||||
labels = [label for _, label in LIKERT_QUESTIONS]
|
||||
|
||||
for cat in sorted(by_cat):
|
||||
cat_results = by_cat[cat]
|
||||
agg: dict[str, dict[str, list[float]]] = {}
|
||||
for key, _ in LIKERT_QUESTIONS:
|
||||
agg[key] = {"bash": [], "lush": []}
|
||||
for r in cat_results:
|
||||
scores = _get_likert_scores(r)
|
||||
for key in scores:
|
||||
for lang in ("bash", "lush"):
|
||||
val = scores[key][lang]
|
||||
if val is not None:
|
||||
agg[key][lang].append(val)
|
||||
|
||||
bash_vals = [sum(agg[k]["bash"]) / len(agg[k]["bash"]) if agg[k]["bash"] else 0.0 for k, _ in LIKERT_QUESTIONS]
|
||||
lush_vals = [sum(agg[k]["lush"]) / len(agg[k]["lush"]) if agg[k]["lush"] else 0.0 for k, _ in LIKERT_QUESTIONS]
|
||||
|
||||
fig, ax = plt.subplots(figsize=(6, 3.5))
|
||||
y = range(len(labels))
|
||||
bar_h = 0.35
|
||||
ax.barh([i + bar_h / 2 for i in y], bash_vals, bar_h, label="bash", color=BASH_COLOR)
|
||||
ax.barh([i - bar_h / 2 for i in y], lush_vals, bar_h, label="lush", color=LUSH_COLOR)
|
||||
ax.set_yticks(list(y))
|
||||
ax.set_yticklabels(labels, fontsize=8)
|
||||
ax.set_xlim(0, 5.5)
|
||||
ax.set_title(f"{cat}", fontsize=10)
|
||||
ax.legend(fontsize=8, loc="lower right")
|
||||
ax.invert_yaxis()
|
||||
ax.grid(axis="x", alpha=0.3)
|
||||
|
||||
charts.append((cat, _fig_to_base64(fig)))
|
||||
|
||||
return charts
|
||||
|
||||
|
||||
def _build_per_category_summary_html(results: list[BenchmarkResult]) -> str:
|
||||
"""HTML table: rows=categories, columns=bash/lush pass rate, turns, scores."""
|
||||
from collections import defaultdict
|
||||
|
||||
by_cat: dict[str, list[BenchmarkResult]] = defaultdict(list)
|
||||
for r in results:
|
||||
by_cat[r.category].append(r)
|
||||
|
||||
rows = []
|
||||
for cat in sorted(by_cat):
|
||||
cat_results = by_cat[cat]
|
||||
b_passed = sum(1 for r in cat_results if r.bash_result and r.bash_result.all_passed)
|
||||
l_passed = sum(1 for r in cat_results if r.lush_result and r.lush_result.all_passed)
|
||||
b_total = sum(1 for r in cat_results if r.bash_result)
|
||||
l_total = sum(1 for r in cat_results if r.lush_result)
|
||||
|
||||
b_turn_vals = [r.bash_result.agent_turns for r in cat_results if r.bash_result and r.bash_result.agent_turns > 0]
|
||||
l_turn_vals = [r.lush_result.agent_turns for r in cat_results if r.lush_result and r.lush_result.agent_turns > 0]
|
||||
b_turns_avg = sum(b_turn_vals) / len(b_turn_vals) if b_turn_vals else 0.0
|
||||
l_turns_avg = sum(l_turn_vals) / len(l_turn_vals) if l_turn_vals else 0.0
|
||||
|
||||
b_scores: list[float] = []
|
||||
l_scores: list[float] = []
|
||||
for r in cat_results:
|
||||
scores = _get_likert_scores(r)
|
||||
for key in scores:
|
||||
if scores[key]["bash"] is not None:
|
||||
b_scores.append(scores[key]["bash"])
|
||||
if scores[key]["lush"] is not None:
|
||||
l_scores.append(scores[key]["lush"])
|
||||
b_avg = sum(b_scores) / len(b_scores) if b_scores else 0.0
|
||||
l_avg = sum(l_scores) / len(l_scores) if l_scores else 0.0
|
||||
|
||||
rows.append(f"""<tr>
|
||||
<td>{html.escape(cat)}</td>
|
||||
<td>{b_passed}/{b_total}</td><td>{l_passed}/{l_total}</td>
|
||||
<td>{b_turns_avg:.1f}</td><td>{l_turns_avg:.1f}</td>
|
||||
<td>{b_avg:.1f}</td><td>{l_avg:.1f}</td>
|
||||
</tr>""")
|
||||
|
||||
return f"""<table>
|
||||
<thead><tr>
|
||||
<th>Category</th>
|
||||
<th>Bash Pass</th><th>Lush Pass</th>
|
||||
<th>Bash Avg Turns</th><th>Lush Avg Turns</th>
|
||||
<th>Bash Avg Score</th><th>Lush Avg Score</th>
|
||||
</tr></thead>
|
||||
<tbody>{"".join(rows)}</tbody>
|
||||
</table>"""
|
||||
|
||||
|
||||
def _build_summary_html(results: list[BenchmarkResult]) -> str:
|
||||
rows = []
|
||||
for r in results:
|
||||
@@ -160,7 +305,7 @@ def _build_summary_html(results: list[BenchmarkResult]) -> str:
|
||||
b_turns = str(b.agent_turns) if b else "-"
|
||||
l_turns = str(l.agent_turns) if l else "-"
|
||||
rows.append(f"""<tr>
|
||||
<td>{html.escape(r.task_name)}</td><td>{r.category.upper()}</td>
|
||||
<td>{html.escape(r.task_name)}</td><td>{html.escape(r.category)}</td>
|
||||
<td class="{b_cls}">{b_pass}</td><td>{b_turns}</td>
|
||||
<td class="{l_cls}">{l_pass}</td><td>{l_turns}</td>
|
||||
</tr>""")
|
||||
@@ -215,7 +360,7 @@ def _build_detail_html(results: list[BenchmarkResult]) -> str:
|
||||
|
||||
sections.append(f"""
|
||||
<div class="task-detail">
|
||||
<h3>{html.escape(r.task_name)} <span class="cat">[{r.category}]</span>
|
||||
<h3>{html.escape(r.task_name)} <span class="cat">[{r.category}/{r.mode}]</span>
|
||||
<span class="{"pass" if b_status == "PASS" else "fail"}">bash={b_status}</span>
|
||||
<span class="{"pass" if l_status == "PASS" else "fail"}">lush={l_status}</span>
|
||||
</h3>
|
||||
@@ -238,7 +383,10 @@ def export_html(results_dir: Path, output_path: Path) -> None:
|
||||
chart_questionnaire = chart_questionnaire_comparison(results)
|
||||
chart_turns = chart_turns_comparison(results)
|
||||
chart_heatmap = chart_per_task_heatmap(results)
|
||||
chart_cat_quest = chart_per_category_questionnaire(results)
|
||||
cat_radar_charts = chart_per_category_radar(results)
|
||||
summary_table = _build_summary_html(results)
|
||||
cat_summary_table = _build_per_category_summary_html(results)
|
||||
detail_html = _build_detail_html(results)
|
||||
|
||||
model = results[0].model if results else "unknown"
|
||||
@@ -288,15 +436,24 @@ def export_html(results_dir: Path, output_path: Path) -> None:
|
||||
<h2>Summary</h2>
|
||||
{summary_table}
|
||||
|
||||
<h2>Per-Category Summary</h2>
|
||||
{cat_summary_table}
|
||||
|
||||
<h2>Questionnaire Scores</h2>
|
||||
<div class="chart"><img src="data:image/png;base64,{chart_questionnaire}" alt="Questionnaire comparison"></div>
|
||||
|
||||
<h2>Agent Turns (Category A)</h2>
|
||||
<h2>Questionnaire Scores by Category</h2>
|
||||
<div class="chart"><img src="data:image/png;base64,{chart_cat_quest}" alt="Per-category questionnaire"></div>
|
||||
|
||||
<h2>Agent Turns (Solve Mode)</h2>
|
||||
<div class="chart"><img src="data:image/png;base64,{chart_turns}" alt="Turns comparison"></div>
|
||||
|
||||
<h2>Score Difference Heatmap (Lush - Bash)</h2>
|
||||
<div class="chart"><img src="data:image/png;base64,{chart_heatmap}" alt="Score heatmap"></div>
|
||||
|
||||
<h2>Per-Category Breakdown</h2>
|
||||
{"".join(f'<h3>{cat}</h3><div class="chart"><img src="data:image/png;base64,{img}" alt="{cat} breakdown"></div>' for cat, img in cat_radar_charts)}
|
||||
|
||||
<h2>Per-Task Detail</h2>
|
||||
{detail_html}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user