Revamp questionnaire, parallelize run-all, add new tasks

- Replace 6 compound Likert questions with 12 atomic ones grouped by
  dimension (syntax, expressiveness, data/IO, errors, overall); drop
  free-form question. Responses now stored as ints, not strings.
- Back-compat layer maps legacy keys to new dimensions so existing
  results still render.
- Parallelize run-all with ThreadPoolExecutor (configurable workers)
  and add a thread-safe min-request-interval rate limiter to the
  Anthropic provider.
- Add new tasks: path_normalizer, todo_manager, currency_converter,
  locale_weather_url, network_info_parser, url_normalizer.
This commit is contained in:
Cormac Shannon
2026-04-07 19:07:21 +01:00
parent 20e62f60f6
commit 18ce7e57cf
13 changed files with 943 additions and 206 deletions

View File

@@ -0,0 +1,97 @@
name = "path_normalizer"
category = "environment"
mode = "convert"
description = """
Read file paths from stdin, one per line. Normalize each path:
1. Replace a leading "~" with the value of $HOME.
2. Remove trailing slashes (except for root "/").
3. Collapse consecutive slashes into one.
4. Resolve "." components (remove them).
5. Resolve ".." components (go up one directory level).
Output the cleaned path, one per line.
Skip empty lines.
"""
bash_source = '''
#!/bin/bash
while IFS= read -r line || [[ -n "$line" ]]; do
[[ -z "$line" ]] && continue
# Expand tilde
path=$(echo "$line" | sed "s:^~:$HOME:")
# Collapse multiple slashes
path=$(echo "$path" | sed 's:/\\+:/:g')
# Remove trailing slash (but keep root)
path=$(echo "$path" | sed 's:/$::')
[[ -z "$path" ]] && path="/"
# Resolve . and .. components
IFS='/' read -ra parts <<< "$path"
result=()
absolute=""
if [[ "$path" == /* ]]; then
absolute="/"
fi
for part in "${parts[@]}"; do
if [[ "$part" == "." || "$part" == "" ]]; then
continue
elif [[ "$part" == ".." ]]; then
if [[ ${#result[@]} -gt 0 && "${result[-1]}" != ".." ]]; then
unset 'result[${#result[@]}-1]'
elif [[ -z "$absolute" ]]; then
result+=("..")
fi
else
result+=("$part")
fi
done
if [[ -n "$absolute" ]]; then
final="/"
IFS='/'; final+="${result[*]}"
else
IFS='/'; final="${result[*]}"
fi
[[ -z "$final" ]] && final="/"
echo "$final"
done
'''
[[test_cases]]
description = "Tilde expansion and trailing slashes"
stdin = """~/Documents/
~/projects/code
/var/log/"""
expected_stdout = """/Users/testuser/Documents
/Users/testuser/projects/code
/var/log"""
env = { "HOME" = "/Users/testuser" }
[[test_cases]]
description = "Resolving . and .. components"
stdin = """/usr/local/./bin
/home/user/../shared/docs
/a/b/c/../../d"""
expected_stdout = """/usr/local/bin
/home/shared/docs
/a/d"""
[[test_cases]]
description = "Collapsing multiple slashes"
stdin = """/usr//local///bin
/var/log//syslog"""
expected_stdout = """/usr/local/bin
/var/log/syslog"""
[[test_cases]]
description = "Root and relative paths"
stdin = """/
./config/settings
../parent/child"""
expected_stdout = """/
config/settings
../parent/child"""

View File

@@ -0,0 +1,150 @@
name = "todo_manager"
category = "filesystem"
mode = "convert"
description = """
A simple todo list manager. Read commands from stdin, one per line:
add <task text> — append the task to the todo file
list — print all tasks numbered as "NN). <task>"
remove <N> — remove task number N (1-based)
clear — remove all tasks
The todo file is "todo.txt" in the working directory.
When listing, pad task numbers to two digits (01, 02, …).
After "add" or "remove", automatically list the remaining tasks.
If the list is empty, print "No tasks found".
"""
bash_source = '''
#!/bin/bash
TODOFILE="./todo.txt"
list_tasks() {
if [ -f "$TODOFILE" ] && [ -s "$TODOFILE" ]; then
count=1
IFS=$'\\n'
while read -r task; do
num=$count
if [ $count -lt 10 ]; then num="0$count"; fi
echo "$num). $task"
count=$(( count + 1 ))
done < "$TODOFILE"
else
echo "No tasks found"
fi
}
add_task() {
echo "$1" >> "$TODOFILE"
}
remove_task() {
taskNum=$1
totalLines=$(wc -l < "$TODOFILE" | tr -d ' ')
if [ "$taskNum" -gt "$totalLines" ] 2>/dev/null; then
echo "Error: task number $taskNum does not exist!"
return 1
fi
tmpfile="./todo_tmp.txt"
count=1
IFS=$'\\n'
while read -r task; do
if [ "$count" -ne "$taskNum" ]; then
echo "$task" >> "$tmpfile"
fi
count=$(( count + 1 ))
done < "$TODOFILE"
if [ -f "$tmpfile" ]; then
mv "$tmpfile" "$TODOFILE"
else
> "$TODOFILE"
fi
echo "Sucessfully removed task number $taskNum"
}
clear_tasks() {
> "$TODOFILE"
echo "Tasks cleared."
}
if [ ! -f "$TODOFILE" ]; then
touch "$TODOFILE"
fi
while IFS= read -r line || [[ -n "$line" ]]; do
cmd=$(echo "$line" | cut -d' ' -f1)
arg=$(echo "$line" | cut -d' ' -f2-)
case "$cmd" in
add)
add_task "$arg"
list_tasks
;;
list)
list_tasks
;;
remove)
remove_task "$arg"
list_tasks
;;
clear)
clear_tasks
;;
esac
done
'''
[[test_cases]]
description = "Add tasks then list"
stdin = """add Buy groceries
add Walk the dog
list"""
expected_stdout = """01). Buy groceries
01). Buy groceries
02). Walk the dog
01). Buy groceries
02). Walk the dog"""
[[test_cases]]
description = "Add, remove, list"
stdin = """add First task
add Second task
add Third task
remove 2
list"""
expected_stdout = """01). First task
01). First task
02). Second task
01). First task
02). Second task
03). Third task
Sucessfully removed task number 2
01). First task
02). Third task
01). First task
02). Third task"""
[[test_cases]]
description = "Empty list and clear"
stdin = """list
add Something
clear
list"""
expected_stdout = """No tasks found
01). Something
Tasks cleared.
No tasks found"""
[[test_cases]]
description = "Works with pre-existing todo file"
stdin = """list
add Third item
list"""
setup_files = { "todo.txt" = "Existing item one\nExisting item two\n" }
expected_stdout = """01). Existing item one
02). Existing item two
01). Existing item one
02). Existing item two
03). Third item
01). Existing item one
02). Existing item two
03). Third item"""
expected_files = { "todo.txt" = "Existing item one\nExisting item two\nThird item\n" }

View File

@@ -0,0 +1,113 @@
name = "currency_converter"
category = "pipeline"
mode = "convert"
description = """
A currency converter that reads conversion requests from stdin.
Each line has the format: AMOUNT FROM TO RATE
- AMOUNT: a decimal number (e.g., 12.35)
- FROM: 3-letter currency code
- TO: 3-letter currency code
- RATE: the exchange rate from FROM's base to TO's base
Some currencies are pegged to others at fixed rates:
BAM is pegged to EUR at 1.95583
BMD is pegged to USD at 1.0
BND is pegged to SGD at 1.0
DJF is pegged to USD at 177.721
PAB is pegged to USD at 1.0
When a pegged currency is involved, the conversion must account for the
peg coefficient. The formula is: result = amount * (rate / coef_from) * coef_to
where coef is the peg ratio (1 if not pegged).
Output one line per input: "AMOUNT FROM = RESULT TO" with RESULT
computed using bc with scale=2.
For invalid lines (wrong field count or non-numeric amount), output "ERROR: <original line>".
"""
bash_source = '''
#!/bin/bash
pegged_to() {
case "$1" in
BAM) echo "EUR:1.95583" ;;
BMD) echo "USD:1.0" ;;
BND) echo "SGD:1.0" ;;
DJF) echo "USD:177.721" ;;
PAB) echo "USD:1.0" ;;
*) echo "NONE:1" ;;
esac
}
while IFS= read -r line || [[ -n "$line" ]]; do
# Skip empty lines
[[ -z "$line" ]] && continue
# Parse fields
set -- $line
if [[ $# -ne 4 ]]; then
echo "ERROR: $line"
continue
fi
amount=$1
from=$2
to=$3
rate=$4
# Validate amount is numeric
if [[ ! "$amount" =~ ^[0-9]+(\.[0-9]+)?$ ]]; then
echo "ERROR: $line"
continue
fi
# Validate rate is numeric
if [[ ! "$rate" =~ ^[0-9]+(\.[0-9]+)?$ ]]; then
echo "ERROR: $line"
continue
fi
# Get peg info
peg_from=$(pegged_to "$from")
coef_from=$(echo "$peg_from" | cut -d: -f2)
peg_to=$(pegged_to "$to")
coef_to=$(echo "$peg_to" | cut -d: -f2)
# Calculate: result = amount * (rate / coef_from) * coef_to
result=$(echo "scale=8; $amount * ($rate / $coef_from) * $coef_to" | bc)
result=$(printf "%.2f" "$result")
echo "$amount $from = $result $to"
done
'''
[[test_cases]]
description = "Standard conversion with direct rate"
stdin = """100 USD EUR 0.92
50 GBP JPY 188.50"""
expected_stdout = """100 USD = 92.00 EUR
50 GBP = 9425.00 JPY"""
[[test_cases]]
description = "Pegged currency conversions"
stdin = """100 BAM USD 1.08
200 BMD EUR 0.92
50 USD DJF 1.0"""
expected_stdout = """100 BAM = 55.22 USD
200 BMD = 184.00 EUR
50 USD = 8886.05 DJF"""
[[test_cases]]
description = "Invalid input lines"
stdin = """abc EUR USD 0.92
100 USD
100 EUR USD 0.85"""
expected_stdout = """ERROR: abc EUR USD 0.92
ERROR: 100 USD
100 EUR = 85.00 USD"""
[[test_cases]]
description = "Pegged-to-pegged conversion"
stdin = "100 BAM BMD 1.08"
expected_stdout = "100 BAM = 55.22 BMD"

View File

@@ -0,0 +1,76 @@
name = "locale_weather_url"
category = "pipeline"
mode = "convert"
description = """
Construct weather API URLs from locale and location information.
Read lines from stdin in the format: LANG_CODE LOCATION
where LANG_CODE is a 2-letter locale (e.g., "en", "fr", "de")
and LOCATION is a city/place name (may contain spaces).
For each line, construct a URL in the format:
https://LANG.wttr.in/LOCATION
Where spaces in the location are replaced with "+" characters.
If LANG_CODE is empty or invalid (not exactly 2 lowercase letters),
default to "en".
Skip empty lines. Output one URL per input line.
"""
bash_source = '''
#!/bin/bash
while IFS= read -r line || [[ -n "$line" ]]; do
[[ -z "$line" ]] && continue
# Extract lang code (first field)
lang=$(echo "$line" | awk '{print $1}')
# Extract location (everything after first field)
location=$(echo "$line" | sed 's/^[^ ]* *//')
# Validate lang code: must be exactly 2 lowercase letters
if [[ ! "$lang" =~ ^[a-z]{2}$ ]]; then
lang="en"
fi
# If location is same as lang (single-word line), skip
if [[ "$location" == "$lang" || -z "$location" ]]; then
location=""
fi
# Replace spaces with +
location=$(echo "$location" | tr ' ' '+')
echo "https://$lang.wttr.in/$location"
done
'''
[[test_cases]]
description = "Various locales and locations"
stdin = """en New York
fr Paris
de Berlin
ja Tokyo"""
expected_stdout = """https://en.wttr.in/New+York
https://fr.wttr.in/Paris
https://de.wttr.in/Berlin
https://ja.wttr.in/Tokyo"""
[[test_cases]]
description = "Multi-word locations"
stdin = """en San Francisco
es Buenos Aires
pt Rio de Janeiro"""
expected_stdout = """https://en.wttr.in/San+Francisco
https://es.wttr.in/Buenos+Aires
https://pt.wttr.in/Rio+de+Janeiro"""
[[test_cases]]
description = "Invalid or missing locale defaults to en"
stdin = """ENG London
123 Moscow
x Rome"""
expected_stdout = """https://en.wttr.in/London
https://en.wttr.in/Moscow
https://en.wttr.in/Rome"""

View File

@@ -0,0 +1,86 @@
name = "network_info_parser"
category = "pipeline"
mode = "convert"
description = """
Parse network interface configuration from stdin (in "ip addr show" format)
and extract a summary of each interface.
For each interface block, output a line:
IFACE: <name> IP: <ipv4_addr> MASK: /<prefix_len>
An interface block starts with a line like:
2: eth0: <BROADCAST,MULTICAST,UP> mtu 1500 ...
and contains inet lines like:
inet 192.168.1.100/24 brd 192.168.1.255 scope global eth0
If an interface has no inet line, output:
IFACE: <name> IP: none MASK: none
Skip the loopback interface (lo).
"""
bash_source = '''
#!/bin/bash
current_iface=""
found_ip=""
found_mask=""
flush_iface() {
if [[ -n "$current_iface" && "$current_iface" != "lo" ]]; then
if [[ -n "$found_ip" ]]; then
echo "IFACE: $current_iface IP: $found_ip MASK: /$found_mask"
else
echo "IFACE: $current_iface IP: none MASK: none"
fi
fi
}
while IFS= read -r line || [[ -n "$line" ]]; do
# Detect interface line: starts with a number followed by colon
if echo "$line" | grep -qE '^[0-9]+:'; then
flush_iface
current_iface=$(echo "$line" | awk -F: '{print $2}' | sed 's/^[[:space:]]*//' | awk '{print $1}')
found_ip=""
found_mask=""
fi
# Detect inet line (IPv4 only, not inet6)
if echo "$line" | grep -qE '^[[:space:]]+inet [0-9]'; then
ip_cidr=$(echo "$line" | awk '{print $2}')
found_ip=$(echo "$ip_cidr" | cut -d/ -f1)
found_mask=$(echo "$ip_cidr" | cut -d/ -f2)
fi
done
flush_iface
'''
[[test_cases]]
description = "Two interfaces with IPs"
stdin = """1: lo: <LOOPBACK,UP> mtu 65536
inet 127.0.0.1/8 scope host lo
2: eth0: <BROADCAST,MULTICAST,UP> mtu 1500
inet 192.168.1.100/24 brd 192.168.1.255 scope global eth0
3: wlan0: <BROADCAST,MULTICAST,UP> mtu 1500
inet 10.0.0.42/16 brd 10.0.255.255 scope global wlan0"""
expected_stdout = """IFACE: eth0 IP: 192.168.1.100 MASK: /24
IFACE: wlan0 IP: 10.0.0.42 MASK: /16"""
[[test_cases]]
description = "Interface with no IP"
stdin = """1: lo: <LOOPBACK,UP> mtu 65536
inet 127.0.0.1/8 scope host lo
2: eth0: <BROADCAST,MULTICAST,UP> mtu 1500
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0"""
expected_stdout = """IFACE: eth0 IP: none MASK: none
IFACE: docker0 IP: 172.17.0.1 MASK: /16"""
[[test_cases]]
description = "Single interface"
stdin = """1: lo: <LOOPBACK,UP> mtu 65536
inet 127.0.0.1/8 scope host lo
2: enp3s0: <BROADCAST,MULTICAST,UP> mtu 9000
inet 10.10.10.5/8 brd 10.255.255.255 scope global enp3s0"""
expected_stdout = "IFACE: enp3s0 IP: 10.10.10.5 MASK: /8"

View File

@@ -0,0 +1,79 @@
name = "url_normalizer"
category = "pipeline"
mode = "convert"
description = """
Read URLs from stdin, one per line. Normalize each URL:
1. If the URL already starts with "https://", keep it as-is.
2. If it starts with "http://", keep it as-is.
3. Otherwise, prepend "http://" to it.
4. After normalization, validate that the URL matches a basic pattern:
it must have a protocol (http:// or https://), followed by at least
one character, a dot, and at least one more character for the domain.
5. Output the normalized URL, or "INVALID: <original>" for invalid entries.
Skip empty lines silently.
"""
bash_source = '''
#!/bin/bash
while IFS= read -r line || [[ -n "$line" ]]; do
# Skip empty lines
[[ -z "$line" ]] && continue
# Trim whitespace
url=$(echo "$line" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
[[ -z "$url" ]] && continue
original="$url"
# Check if it already has https://
prefix8=$(echo "$url" | cut -c1-8)
if [[ "$prefix8" == "https://" ]]; then
normalized="$url"
else
prefix7=$(echo "$url" | cut -c1-7)
if [[ "$prefix7" == "http://" ]]; then
normalized="$url"
else
normalized="http://$url"
fi
fi
# Validate: protocol + something.something
if echo "$normalized" | grep -qE '^https?://[^/]+\.[^/]+'; then
echo "$normalized"
else
echo "INVALID: $original"
fi
done
'''
[[test_cases]]
description = "URLs with and without protocol"
stdin = """example.com
http://example.com
https://example.com
www.google.com/search?q=test"""
expected_stdout = """http://example.com
http://example.com
https://example.com
http://www.google.com/search?q=test"""
[[test_cases]]
description = "Invalid entries"
stdin = """notaurl
https://valid.example.com
just-a-word"""
expected_stdout = """INVALID: notaurl
https://valid.example.com
INVALID: just-a-word"""
[[test_cases]]
description = "Mixed valid and empty lines"
stdin = """https://secure.site.org/path
api.service.io:8080
http://old.site.net"""
expected_stdout = """https://secure.site.org/path
http://api.service.io:8080
http://old.site.net"""