- Replace 6 compound Likert questions with 12 atomic ones grouped by dimension (syntax, expressiveness, data/IO, errors, overall); drop free-form question. Responses now stored as ints, not strings. - Back-compat layer maps legacy keys to new dimensions so existing results still render. - Parallelize run-all with ThreadPoolExecutor (configurable workers) and add a thread-safe min-request-interval rate limiter to the Anthropic provider. - Add new tasks: path_normalizer, todo_manager, currency_converter, locale_weather_url, network_info_parser, url_normalizer.
42 lines
1.4 KiB
Python
42 lines
1.4 KiB
Python
from __future__ import annotations
|
|
|
|
import tomllib
|
|
from dataclasses import dataclass, field
|
|
from pathlib import Path
|
|
from typing import Any
|
|
|
|
|
|
@dataclass
|
|
class Config:
|
|
lush_binary: Path
|
|
max_retries: int = 3
|
|
timeout_seconds: float = 10.0
|
|
normalize_whitespace: bool = True
|
|
output_dir: Path = Path("results")
|
|
max_workers: int = 4
|
|
provider_configs: dict[str, dict[str, Any]] = field(default_factory=dict)
|
|
|
|
@classmethod
|
|
def load(cls, path: Path | None = None) -> Config:
|
|
if path is None:
|
|
path = Path(__file__).parent.parent / "config.toml"
|
|
raw = tomllib.loads(path.read_text())
|
|
|
|
lush = raw.get("lush", {})
|
|
agent = raw.get("agent", {})
|
|
results = raw.get("results", {})
|
|
|
|
# Collect provider configs (any top-level section not in known keys)
|
|
known_sections = {"lush", "agent", "results"}
|
|
provider_configs = {k: v for k, v in raw.items() if k not in known_sections and isinstance(v, dict)}
|
|
|
|
return cls(
|
|
lush_binary=Path(lush["binary"]),
|
|
max_retries=agent.get("max_retries", 3),
|
|
timeout_seconds=agent.get("timeout_seconds", 10.0),
|
|
normalize_whitespace=agent.get("normalize_whitespace", True),
|
|
output_dir=Path(results.get("output_dir", "results")),
|
|
max_workers=agent.get("max_workers", 4),
|
|
provider_configs=provider_configs,
|
|
)
|