Compare commits

..

11 Commits

Author SHA1 Message Date
alex wiesner
421892e93d rm pi 2026-04-10 23:43:06 +01:00
alex wiesner
ec378ebd28 sync local pi changes 2026-04-09 23:14:57 +01:00
alex wiesner
18245c778e changes 2026-04-09 11:31:06 +01:00
alex wiesner
8fec8e28f4 Merge branch feature/web-search-tools 2026-04-09 11:27:31 +01:00
alex wiesner
1b3fcda259 chore: fix web search test script 2026-04-09 11:20:40 +01:00
alex wiesner
472de4ebaf feat: add web search tools extension 2026-04-09 11:16:53 +01:00
alex wiesner
c2d7cd53ce feat: add web_search tool 2026-04-09 11:13:21 +01:00
alex wiesner
7db96b025b test: add web tool output formatting 2026-04-09 11:08:52 +01:00
alex wiesner
30cfe7e8f1 test: add exa web provider adapter 2026-04-09 11:08:09 +01:00
alex wiesner
5e1315a20a test: add web search config loader 2026-04-09 11:07:12 +01:00
alex wiesner
775fbf7c02 chore: ignore local worktrees 2026-04-09 11:03:53 +01:00
72 changed files with 126 additions and 4274 deletions

View File

@@ -3,6 +3,3 @@ if status is-interactive
end end
fish_config theme choose "Catppuccin Mocha" --color-theme=dark fish_config theme choose "Catppuccin Mocha" --color-theme=dark
# Added by codebase-memory-mcp install
export PATH="/home/alex/dotfiles/.local/bin:$PATH"

View File

@@ -1,5 +1,6 @@
# This file contains fish universal variable definitions. # This file contains fish universal variable definitions.
# VERSION: 3.0 # VERSION: 3.0
SETUVAR OPENCODE_ENABLE_EXA:1
SETUVAR __fish_initialized:4300 SETUVAR __fish_initialized:4300
SETUVAR _fisher_catppuccin_2F_fish_files:\x7e/\x2econfig/fish/themes/Catppuccin\x20Frappe\x2etheme\x1e\x7e/\x2econfig/fish/themes/Catppuccin\x20Macchiato\x2etheme\x1e\x7e/\x2econfig/fish/themes/Catppuccin\x20Mocha\x2etheme\x1e\x7e/\x2econfig/fish/themes/static SETUVAR _fisher_catppuccin_2F_fish_files:\x7e/\x2econfig/fish/themes/Catppuccin\x20Frappe\x2etheme\x1e\x7e/\x2econfig/fish/themes/Catppuccin\x20Macchiato\x2etheme\x1e\x7e/\x2econfig/fish/themes/Catppuccin\x20Mocha\x2etheme\x1e\x7e/\x2econfig/fish/themes/static
SETUVAR _fisher_jorgebucaran_2F_fisher_files:\x7e/\x2econfig/fish/functions/fisher\x2efish\x1e\x7e/\x2econfig/fish/completions/fisher\x2efish SETUVAR _fisher_jorgebucaran_2F_fisher_files:\x7e/\x2econfig/fish/functions/fisher\x2efish\x1e\x7e/\x2econfig/fish/completions/fisher\x2efish

View File

@@ -1,9 +0,0 @@
function c --wraps=opencode --description 'opencode (auto-starts tmux for visual subagent panes)'
set -l port (python -c 'import socket; s=socket.socket(); s.bind(("127.0.0.1", 0)); print(s.getsockname()[1]); s.close()')
if not set -q TMUX
tmux new-session opencode --port $port $argv
else
opencode --port $port $argv
end
end

View File

@@ -1,9 +0,0 @@
function cc --wraps='opencode --continue' --description 'opencode --continue (auto-starts tmux for visual subagent panes)'
set -l port (python -c 'import socket; s=socket.socket(); s.bind(("127.0.0.1", 0)); print(s.getsockname()[1]); s.close()')
if not set -q TMUX
tmux new-session opencode --port $port --continue $argv
else
opencode --port $port --continue $argv
end
end

View File

@@ -1,3 +0,0 @@
function co --wraps=copilot --description 'alias co copilot'
copilot $argv
end

View File

@@ -58,7 +58,7 @@ input-field {
inner_color = rgba(49, 50, 68, 1.0) inner_color = rgba(49, 50, 68, 1.0)
font_color = rgba(205, 214, 244, 1.0) font_color = rgba(205, 214, 244, 1.0)
fade_on_empty = false fade_on_empty = false
placeholder_text = <i>Touch YubiKey or enter password...</i> placeholder_text = <i>...</i>
hide_input = false hide_input = false
check_color = rgba(166, 227, 161, 1.0) check_color = rgba(166, 227, 161, 1.0)
fail_color = rgba(243, 139, 168, 1.0) fail_color = rgba(243, 139, 168, 1.0)

View File

@@ -18,7 +18,7 @@ require("lazy").setup({
}) })
vim.keymap.set("n", "<leader>e", vim.cmd.Ex) vim.keymap.set("n", "<leader>e", vim.cmd.Ex)
vim.keymap.set("n", "<leader>ww", vim.cmd.w) vim.keymap.set("n", "<leader>w", vim.cmd.w)
vim.opt.number = true vim.opt.number = true
vim.opt.relativenumber = true vim.opt.relativenumber = true

View File

@@ -1,6 +1,5 @@
{ {
"LuaSnip": { "branch": "master", "commit": "dae4f5aaa3574bd0c2b9dd20fb9542a02c10471c" }, "LuaSnip": { "branch": "master", "commit": "dae4f5aaa3574bd0c2b9dd20fb9542a02c10471c" },
"blink.cmp": { "branch": "main", "commit": "f22f66eb7c4d037ed523a78b27ee235b7bc9a1f4" },
"catppuccin": { "branch": "main", "commit": "12c004cde3f36cb1d57242f1e6aac46b09a0e5b4" }, "catppuccin": { "branch": "main", "commit": "12c004cde3f36cb1d57242f1e6aac46b09a0e5b4" },
"cmp-buffer": { "branch": "main", "commit": "b74fab3656eea9de20a9b8116afa3cfc4ec09657" }, "cmp-buffer": { "branch": "main", "commit": "b74fab3656eea9de20a9b8116afa3cfc4ec09657" },
"cmp-nvim-lsp": { "branch": "main", "commit": "cbc7b02bb99fae35cb42f514762b89b5126651ef" }, "cmp-nvim-lsp": { "branch": "main", "commit": "cbc7b02bb99fae35cb42f514762b89b5126651ef" },
@@ -18,10 +17,8 @@
"nvim-cmp": { "branch": "main", "commit": "da88697d7f45d16852c6b2769dc52387d1ddc45f" }, "nvim-cmp": { "branch": "main", "commit": "da88697d7f45d16852c6b2769dc52387d1ddc45f" },
"nvim-lspconfig": { "branch": "master", "commit": "2163c54bb6cfec53e3e555665ada945b8c8331b9" }, "nvim-lspconfig": { "branch": "master", "commit": "2163c54bb6cfec53e3e555665ada945b8c8331b9" },
"nvim-treesitter": { "branch": "main", "commit": "5cb05e1b0fa3c469958a2b26f36b3fe930af221c" }, "nvim-treesitter": { "branch": "main", "commit": "5cb05e1b0fa3c469958a2b26f36b3fe930af221c" },
"opencode.nvim": { "branch": "main", "commit": "1088ee70dd997d785a1757d351c07407f0abfc9f" }, "pi.nvim": { "branch": "main", "commit": "761cb109ebd466784f219e6e3a43a28f6187d627" },
"plenary.nvim": { "branch": "master", "commit": "b9fd5226c2f76c951fc8ed5923d85e4de065e509" }, "plenary.nvim": { "branch": "master", "commit": "b9fd5226c2f76c951fc8ed5923d85e4de065e509" },
"render-markdown.nvim": { "branch": "main", "commit": "e3c18ddd27a853f85a6f513a864cf4f2982b9f26" },
"snacks.nvim": { "branch": "main", "commit": "9912042fc8bca2209105526ac7534e9a0c2071b2" },
"telescope-fzf-native.nvim": { "branch": "main", "commit": "6fea601bd2b694c6f2ae08a6c6fab14930c60e2c" }, "telescope-fzf-native.nvim": { "branch": "main", "commit": "6fea601bd2b694c6f2ae08a6c6fab14930c60e2c" },
"telescope.nvim": { "branch": "master", "commit": "3333a52ff548ba0a68af6d8da1e54f9cd96e9179" } "telescope.nvim": { "branch": "master", "commit": "3333a52ff548ba0a68af6d8da1e54f9cd96e9179" }
} }

View File

@@ -0,0 +1,77 @@
return {
"pablopunk/pi.nvim",
opts = {},
config = function(_, opts)
require("pi").setup(opts)
local state = {
buf = nil,
win = nil,
}
local function pane_width()
return math.max(50, math.floor(vim.o.columns * 0.35))
end
local function style_pane(win)
if not win or not vim.api.nvim_win_is_valid(win) then
return
end
pcall(vim.api.nvim_win_set_width, win, pane_width())
vim.wo[win].number = false
vim.wo[win].relativenumber = false
vim.wo[win].signcolumn = "no"
vim.wo[win].winfixwidth = true
end
local function open_pi_pane()
if state.win and vim.api.nvim_win_is_valid(state.win) then
vim.api.nvim_set_current_win(state.win)
vim.cmd("startinsert")
return
end
vim.cmd("botright vsplit")
state.win = vim.api.nvim_get_current_win()
style_pane(state.win)
if state.buf and vim.api.nvim_buf_is_valid(state.buf) then
vim.api.nvim_win_set_buf(state.win, state.buf)
else
vim.cmd("terminal pi")
state.buf = vim.api.nvim_get_current_buf()
vim.bo[state.buf].buflisted = false
vim.bo[state.buf].bufhidden = "hide"
vim.api.nvim_create_autocmd({ "BufWipeout", "TermClose" }, {
buffer = state.buf,
callback = function()
state.buf = nil
state.win = nil
end,
})
end
style_pane(state.win)
vim.cmd("startinsert")
end
local function toggle_pi_pane()
if state.win and vim.api.nvim_win_is_valid(state.win) then
vim.api.nvim_win_close(state.win, true)
state.win = nil
return
end
open_pi_pane()
end
vim.api.nvim_create_user_command("PiPane", open_pi_pane, { desc = "Open pi in a right side pane" })
vim.api.nvim_create_user_command("PiPaneToggle", toggle_pi_pane, { desc = "Toggle pi right side pane" })
end,
keys = {
{ "<leader>p", "<cmd>PiAsk<cr>", desc = "Pi Ask" },
{ "<leader>pp", "<cmd>PiPaneToggle<cr>", desc = "Pi Pane" },
{ "<leader>ps", "<cmd>PiAskSelection<cr>", mode = "v", desc = "Pi Ask Selection" },
},
}

View File

@@ -1,5 +0,0 @@
node_modules
package.json
bun.lock
.megamemory/
.memory/

View File

@@ -1,237 +0,0 @@
# OpenCode Global Workflow
## Operating Model
- Default to `planner`. Do not implement before there is an approved plan.
- `planner` owns discovery, decomposition, verification oracles, risk tracking, and the handoff spec.
- `builder` executes the approved spec exactly, delegates focused work to subagents, and escalates back to `planner` instead of improvising when the spec breaks.
- Parallelize aggressively for research, exploration, review, and isolated implementation lanes. Do not parallelize code mutation when lanes share files, APIs, schemas, or verification steps.
- Use explicit `allow` or `deny` permissions only. Do not rely on `ask`.
- Keep `external_directory` denied. Real project repos may use repo-local `/.worktrees`, but this global config must not relax that rule.
## Agent Roster
| Agent | Mode | Model | Responsibility |
| --- | --- | --- | --- |
| `planner` | primary | `github-copilot/gpt-5.4` | Produce approved specs and decide whether execution is ready |
| `builder` | primary | `github-copilot/gpt-5.4` | Execute approved specs and integrate delegated work |
| `researcher` | subagent | `github-copilot/gpt-5.4` | Deep research, external docs, tradeoff analysis |
| `explorer` | subagent | `github-copilot/claude-sonnet-4.6` | Read-only repo inspection; reports facts only, never plans or recommendations |
| `reviewer` | subagent | `github-copilot/gpt-5.4` | Critique plans, code, tests, and release readiness |
| `coder` | subagent | `github-copilot/gpt-5.3-codex` | Implement narrowly scoped code changes |
| `tester` | subagent | `github-copilot/claude-opus-4.6` | Run verification, triage failures, capture evidence |
| `librarian` | subagent | `github-copilot/claude-opus-4.6` | Maintain docs, `AGENTS.md`, and memory hygiene |
## Planner Behavior
- `planner` must use the `question` tool proactively when scope, defaults, approval criteria, or critical context are ambiguous. Prefer asking over assuming.
- `planner` may use bash and Docker commands during planning for context gathering (e.g., `docker compose config`, `docker ps`, inspecting files, checking versions). Do not run builds, installs, tests, deployments, or any implementation-level commands — those belong to builder/tester/coder.
## Planner -> Builder Contract
- Every build starts from a memory note under `plans/` with `Status: approved`.
- Approved plans must include: objective, scope, constraints, assumptions, concrete task list, parallelization lanes, verification oracle, risks, and open findings.
- `builder` must follow the approved plan exactly.
- `builder` must stop and escalate back to `planner` when it finds a spec contradiction, a hidden dependency that changes scope, or two failed verification attempts after recording root cause and evidence.
### Builder Commits
- `builder` automatically creates git commits at meaningful task checkpoints and at final completion when uncommitted changes remain.
- A "meaningful checkpoint" is a completed implementation chunk from the approved plan, not every file save.
- Skip commit creation when there are no new changes since the prior checkpoint.
- Commit messages should reflect the intent of the completed task from the plan.
- Before creating the final completion commit, clean up temporary artifacts generated during the build (e.g., scratch files, screenshots, logs, transient reports, caches). Intended committed deliverables are not cleanup targets.
- Standard git safety rules apply: review staged content, respect hooks, no force-push or destructive operations.
- Push automation is out of scope; the user decides when to push.
## Commands
- `/init` initializes or refreshes repo memory and the project `AGENTS.md`.
- `/plan` creates or updates the canonical implementation plan in memory.
- `/build` executes the latest approved plan and records execution progress.
- `/continue` resumes unfinished planning or execution from memory based on the current primary agent.
- Built-in `/sessions` remains available for raw session browsing; custom `/continue` is the workflow-aware resume entrypoint.
## Memory System (Single: basic-memory)
Memory uses one persistent system: **basic-memory**.
- All persistent knowledge is stored in basic-memory notes, split across a **`main` project** (global/shared) and **per-repo projects** (project-specific).
- The managed per-repo basic-memory project directory is `<repo>/.memory/`.
- Do not edit managed `.memory/*` files directly; use basic-memory MCP tools for all reads/writes.
### `main` vs per-repo projects
1. **`main` (global/shared knowledge only)**
- Reusable coding patterns
- Technology knowledge
- User preferences and workflow rules
- Cross-project lessons learned
2. **Per-repo projects (project-specific knowledge only)**
- Project overview and architecture notes
- Plans, execution logs, decisions, findings, and continuity notes
- Project-specific conventions and testing workflows
**Hard rule:** Never store project-specific plans, decisions, research, gates, or sessions in `main`. Never store cross-project reusable knowledge in a per-repo project.
### Required per-repo note taxonomy
- `project/overview` - stack, purpose, important entrypoints
- `project/architecture` - major modules, data flow, boundaries
- `project/workflows` - local dev, build, test, release commands
- `project/testing` - verification entrypoints and expectations
- `plans/<slug>` - canonical specs with `Status: draft|approved|blocked|done`
- `executions/<slug>` - structured execution log with `Status: in_progress|blocked|done` (see template below)
- `decisions/<slug>` - durable project-specific decisions
- `findings/<slug>` - open findings ledger with evidence and owner
### Execution note template (`executions/<slug>`)
Every execution note must use these literal section names:
```
## Plan
- **Source:** plans/<slug>
- **Status:** approved
## Execution State
- **Objective:** <one-line goal from the plan>
- **Current Phase:** <planning|implementing|integrating|verifying|blocked|done>
- **Next Checkpoint:** <next concrete step>
- **Blockers:** <none|bullet-friendly summary>
- **Last Updated By:** <builder|coder|tester|reviewer|librarian>
- **Legacy Note Normalized:** <yes|no>
## Lane Claims
Repeated per lane:
### Lane: <lane-name>
- **Owner:** <builder|coder|tester|reviewer|librarian|unassigned>
- **Status:** planned | active | released | blocked | done
- **Claimed Files/Areas:** <paths or named workflow surfaces>
- **Depends On:** <none|lane names>
- **Exit Condition:** <what must be true to release or complete this lane>
## Last Verified State
- **Mode:** none | smoke | full
- **Summary:** <one-sentence status>
- **Outstanding Risk:** <none|brief risk>
- **Related Ledger Entry:** <entry label|none>
## Verification Ledger
Append-only log. Each entry:
### Entry: <checkpoint-or-step-label>
- **Goal:** <what is being verified>
- **Mode:** smoke | full
- **Command/Check:** <exact command or manual check performed>
- **Result:** pass | fail | blocked | not_run
- **Key Evidence:** <concise proof: output snippet, hash, assertion count>
- **Artifacts:** <paths to logs/screenshots, or `none`>
- **Residual Risk:** <known gaps, or `none`>
```
#### Verification summary shape
Each verification entry (in Last Verified State or Verification Ledger) uses these fields:
- **Goal** - what is being verified
- **Mode** - `smoke` or `full` (see mode rules)
- **Command/Check** - exact command or manual check performed
- **Result** - `pass`, `fail`, `blocked`, or `not_run`
- **Key Evidence** - concise proof (output snippet, hash, assertion count)
- **Artifacts** - paths to logs/screenshots if any, or `none`
- **Residual Risk** - known gaps, or `none`
#### Verification mode rules
- Default to **`smoke`** for intermediate checkpoint proof and isolated lane verification.
- Default to **`full`** before any final completion claim or setting execution status to `done`.
- If there is only one meaningful verification step, record it as `full` and note there is no separate smoke check.
#### Compact verification summary behavior
- The verification ledger shape is the default evidence format for builder/tester/coder handoffs.
- Raw logs should stay out of primary context unless a check fails or the user explicitly requests full output.
- When raw output is necessary, summarize the failure first and then point to the raw evidence.
#### Lane-claim lifecycle
- **Planner** defines intended lanes and claimed files/areas in the approved plan when parallelization is expected.
- **Builder** creates or updates lane-claim entries in the execution note before fan-out and marks them `active`, `released`, `done`, or `blocked`.
- Overlapping claimed files/areas or sequential verification dependencies **forbid** parallel fan-out.
- Claims are advisory markdown metadata, not hard runtime locks.
#### Reviewer and execution-note ownership
- `reviewer` is read-only on execution notes; it reports findings via its response message.
- `builder` owns all execution-note writes and status transitions.
#### Legacy execution notes
Legacy execution notes may be freeform and lack structured sections. `/continue` must degrade gracefully — read what exists, do not invent conflicts or synthesize missing sections without evidence.
### Per-repo project setup (required)
Every code repository must have its own dedicated basic-memory project.
Use `basic-memory_create_memory_project` with:
- `project_name`: short kebab-case repo identifier
- `project_path`: `<repo-root>/.memory`
## Skills
Local skills live under `skills/<name>/SKILL.md` and are loaded on demand via the `skill` tool. See `skills/creating-skills/SKILL.md` for authoring rules.
### First-Batch Skills
| Skill | Purpose |
| --- | --- |
| `systematic-debugging` | Root-cause-first debugging with findings, evidence, and builder escalation |
| `verification-before-completion` | Evidence-before-claims verification for tester and builder handoffs |
| `brainstorming` | Planner-owned discovery and design refinement ending in memory-backed artifacts |
| `writing-plans` | Planner-owned authoring of execution-ready `plans/<slug>` notes |
| `dispatching-parallel-agents` | Safe parallelization with strict isolation tests and a single integrator |
| `test-driven-development` | Canonical red-green-refactor workflow for code changes |
### Design & Domain Skills
| Skill | Purpose |
| --- | --- |
| `frontend-design` | Distinctive, production-grade frontend UI with high design quality, accessibility, and performance |
### Ecosystem Skills
| Skill | Purpose |
| --- | --- |
| `docker-container-management` | Reusable Docker/compose workflow for builds, tests, and dev in containerized repos |
| `python-development` | Python ecosystem defaults: `uv` for packaging, `ruff` for lint/format, `pytest` for tests |
| `javascript-typescript-development` | JS/TS ecosystem defaults: `bun` for runtime/packaging, `biome` for lint/format |
### Agent Skill-Loading Contract
Agents must proactively load applicable skills when their trigger conditions are met. Do not wait to be told.
- **`planner`**: `brainstorming` (unclear requests, design work), `writing-plans` (authoring `plans/<slug>`), `dispatching-parallel-agents` (parallel lanes), `systematic-debugging` (unresolved bugs), `test-driven-development` (specifying code tasks), `frontend-design` (frontend UI/UX implementation or redesign), `docker-container-management` (repo uses Docker), `python-development` (Python repo/lane), `javascript-typescript-development` (JS/TS repo/lane).
- **`builder`**: `dispatching-parallel-agents` (before parallel fan-out), `systematic-debugging` (bugs, regressions, flaky tests), `verification-before-completion` (before any completion claim), `test-driven-development` (before delegating or performing code changes), `frontend-design` (frontend UI/UX implementation lanes), `docker-container-management` (containerized repo), `python-development` (Python lanes), `javascript-typescript-development` (JS/TS lanes).
- **`tester`**: `systematic-debugging` (verification failure diagnosis), `verification-before-completion` (before declaring verification complete), `test-driven-development` (validating red/green cycles), `docker-container-management` (tests run in containers), `python-development` (Python verification), `javascript-typescript-development` (JS/TS verification).
- **`reviewer`**: `verification-before-completion` (evaluating completion evidence), `test-driven-development` (reviewing red/green discipline).
- **`coder`**: `test-driven-development` (all code tasks), `frontend-design` (frontend component, page, or application implementation lanes), `docker-container-management` (Dockerfiles, compose files, containerized builds), `python-development` (Python code lanes), `javascript-typescript-development` (JS/TS code lanes); other skills when the assigned lane explicitly calls for them.
- **`librarian`**: Load relevant skills opportunistically when the assigned task calls for them; do not override planner/builder workflow ownership.
### TDD Default Policy
Test-driven development is the default for all code changes. Agents must follow the red-green-refactor cycle unless a narrow exception applies.
**Narrow exceptions** (agent must state why TDD was not practical and what alternative verification was used):
- Docs-only changes
- Config-only changes
- Pure refactors with provably unchanged behavior
- Repos that do not yet have a reliable automated test harness
## Documentation Ownership
- `librarian` owns project docs updates, `AGENTS.md` upkeep, and memory note hygiene.
- When a workflow, command, or agent contract changes, update the docs in the same task.
- Keep command names, agent roster, memory taxonomy, and skill-loading contracts synchronized across `AGENTS.md`, `agents/`, `commands/`, and `skills/`.

View File

@@ -1,48 +0,0 @@
---
description: Execution lead that follows approved plans, delegates focused work, and integrates results without drifting from spec
mode: primary
model: github-copilot/gpt-5.4
variant: xhigh
temperature: 0.1
permission:
edit: allow
webfetch: allow
bash:
"*": allow
task:
"*": deny
tester: allow
coder: allow
reviewer: allow
librarian: allow
skill:
"*": allow
permalink: opencode-config/agents/builder
---
You are the execution authority.
- Proactively load applicable skills when triggers are present:
- `dispatching-parallel-agents` before any parallel subagent fan-out.
- `systematic-debugging` when bugs, regressions, flaky tests, or unexpected behavior appear.
- `verification-before-completion` before completion claims or final handoff.
- `test-driven-development` before delegating or performing code changes.
- `docker-container-management` when executing tasks in a containerized repo.
- `python-development` when executing Python lanes.
- `frontend-design` when executing frontend UI/UX implementation lanes.
- `javascript-typescript-development` when executing JS/TS lanes.
- Read the latest approved plan before making changes.
- Execute the plan exactly; do not widen scope on your own.
- Delegate code changes to `coder`, verification to `tester`, critique to `reviewer`, and docs plus `AGENTS.md` updates to `librarian`.
- Use parallel subagents when implementation lanes are isolated and can be verified independently.
- Maintain a structured execution note in basic-memory under `executions/<slug>` using the literal sections defined in `AGENTS.md`: Plan, Execution State, Lane Claims, Last Verified State, and Verification Ledger.
- Before parallel fan-out, create or update Lane Claims in the execution note. Mark each lane `active` before dispatch and `released`, `done`, or `blocked` afterward. Overlapping claimed files/areas or sequential verification dependencies forbid parallel fan-out.
- Record verification evidence in the Verification Ledger using the compact shape: Goal, Mode, Command/Check, Result, Key Evidence, Artifacts, Residual Risk.
- Default to `smoke` mode for intermediate checkpoints and isolated lane verification. Require `full` mode before any final completion claim or setting execution status to `done`.
- If you hit a contradiction, hidden dependency, or two failed verification attempts, record the root cause and evidence, then stop and send the work back to `planner`.
- Builder owns commit creation during `/build`; do not delegate commit authorship decisions to other agents.
- Create commits automatically at meaningful completed implementation checkpoints, and create a final completion commit when changes remain.
- Before creating the final completion commit, clean up temporary artifacts generated during the build (e.g., scratch files, screenshots, logs, transient reports, caches). Intended committed deliverables are not cleanup targets.
- Reuse existing git safety constraints: avoid destructive git behavior, do not force push, and do not add push automation.
- If there are no new changes at a checkpoint, skip commit creation instead of creating empty or duplicate commits.

View File

@@ -1,37 +0,0 @@
---
description: Focused implementation subagent for tightly scoped code changes within an assigned lane
mode: subagent
model: github-copilot/gpt-5.3-codex
variant: xhigh
temperature: 0.1
permission:
edit: allow
webfetch: allow
bash:
"*": allow
permalink: opencode-config/agents/coder
---
Implement only the assigned lane.
- Proactively load `test-driven-development` for code development tasks.
- Load `docker-container-management` when the lane involves Dockerfiles, compose files, or containerized builds.
- Load `python-development` when the lane involves Python code.
- Load `frontend-design` when the lane involves frontend component, page, or application implementation.
- Load `javascript-typescript-development` when the lane involves JS/TS code.
- Load other local skills only when the assigned lane explicitly calls for them.
- Follow the provided spec and stay inside the requested scope.
- Reuse existing project patterns before introducing new ones.
- Report notable assumptions, touched files, and any follow-up needed.
- When reporting verification evidence, use the compact verification summary shape:
- **Goal** what is being verified
- **Mode** `smoke` or `full`
- **Command/Check** exact command or manual check performed
- **Result** `pass`, `fail`, `blocked`, or `not_run`
- **Key Evidence** concise proof (output snippet, hash, assertion count)
- **Artifacts** paths to logs/screenshots, or `none`
- **Residual Risk** known gaps, or `none`
- Keep raw logs out of handoff messages; summarize failures first and point to raw evidence only when needed.
- Clean up temporary artifacts from the assigned lane (e.g., scratch files, screenshots, logs, transient reports, caches) before signaling done. Intended committed deliverables are not cleanup targets.
- Do not claim work is complete without pointing to verification evidence in the compact shape above.

View File

@@ -1,28 +0,0 @@
---
description: Read-only repo inspector that reports observable facts only — never plans or recommendations
mode: subagent
model: github-copilot/claude-sonnet-4.6
temperature: 0.0
tools:
write: false
edit: false
bash: false
permission:
webfetch: deny
permalink: opencode-config/agents/explorer
---
You are a fact-gathering tool, not a planner.
- Inspect the repository quickly and report only observable facts.
- Prefer `glob`, `grep`, `read`, structural search, and memory lookups.
- Return file paths, symbols, code relationships, and constraints.
- Do not make changes.
Forbidden output:
- Plan drafts, task lists, or implementation steps.
- Solution design or architecture proposals.
- Speculative recommendations or subjective assessments.
- Priority rankings or suggested next actions.
If a finding has implications for planning, state the fact and stop. Let the caller draw conclusions.

View File

@@ -1,23 +0,0 @@
---
description: Documentation and memory steward for AGENTS rules, project docs, and continuity notes
mode: subagent
model: github-copilot/claude-opus-4.6
variant: thinking
temperature: 0.2
tools:
bash: false
permission:
edit: allow
webfetch: allow
permalink: opencode-config/agents/librarian
---
Own documentation quality and continuity.
- Load relevant skills opportunistically when assigned documentation or memory tasks call for them.
- Do not override planner/builder workflow ownership.
- Keep `AGENTS.md`, workflow docs, and command descriptions aligned with actual behavior.
- Update or create basic-memory notes when project knowledge changes.
- Prefer concise, high-signal docs that help future sessions resume quickly.
- Flag stale instructions, mismatched agent rosters, and undocumented workflow changes.

View File

@@ -1,57 +0,0 @@
---
description: Planning lead that gathers evidence, writes execution-ready specs, and decides when builder can proceed
mode: primary
model: github-copilot/gpt-5.4
variant: xhigh
temperature: 0.1
tools:
write: false
edit: false
permission:
webfetch: allow
task:
"*": deny
researcher: allow
explorer: allow
reviewer: allow
skill:
"*": allow
permalink: opencode-config/agents/planner
---
You are the planning authority.
- Proactively load applicable skills when triggers are present:
- `brainstorming` for unclear requests, design work, or feature shaping.
- `writing-plans` when producing execution-ready `plans/<slug>` notes.
- `dispatching-parallel-agents` when considering parallel research or review lanes.
- `systematic-debugging` when planning around unresolved bugs or failures.
- `test-driven-development` when specifying implementation tasks that mutate code.
- `docker-container-management` when a repo uses Docker/docker-compose.
- `python-development` when a repo or lane is primarily Python.
- `frontend-design` when the task involves frontend UI/UX implementation or redesign.
- `javascript-typescript-development` when a repo or lane is primarily JS/TS.
## Clarification and the `question` tool
- Use the `question` tool proactively when scope, default choices, approval criteria, or critical context are ambiguous or missing.
- Prefer asking over assuming, especially for: target environments, language/tool defaults, acceptance criteria, and whether Docker is required.
- Do not hand off a plan that contains unresolved assumptions when a question could resolve them first.
## Planning-time Docker and bash usage
- You may run Docker commands during planning for context gathering and inspection (e.g., `docker compose config`, `docker image ls`, `docker ps`, `docker network ls`, checking container health or logs).
- You may also run other bash commands for read-only context (e.g., checking file contents, environment state, installed versions).
- Do **not** run builds, installs, tests, deployments, or any implementation-level commands — those belong to builder/tester/coder.
- If you catch yourself executing implementation steps, stop and delegate to builder.
- Gather all high-signal context before proposing execution.
- Break work into explicit tasks, dependencies, and verification steps.
- Use subagents in parallel when research lanes are independent.
- Write or update the canonical plan in basic-memory under `plans/<slug>`.
- Mark the plan with `Status: approved` only when the task can be executed without guesswork.
- Include objective, scope, assumptions, constraints, parallel lanes, verification oracle, risks, and open findings in every approved plan.
- When parallelization or phased verification matters, define intended lanes with claimed files/areas, inter-lane dependencies, and verification intent so builder can create the structured `executions/<slug>` note without guessing.
- Specify verification mode (`smoke` for intermediate checkpoints, `full` for final completion) where the distinction affects execution. Default to the shared rules in `AGENTS.md` when not otherwise specified.
- Never make file changes or implementation edits yourself.
- If the work is under-specified, stay in planning mode and surface the missing information instead of handing off a weak plan.

View File

@@ -1,21 +0,0 @@
---
description: Research specialist for external docs, tradeoff analysis, and evidence gathering
mode: subagent
model: github-copilot/gpt-5.4
variant: xhigh
temperature: 0.2
tools:
write: false
edit: false
bash: false
permission:
webfetch: allow
permalink: opencode-config/agents/researcher
---
Focus on evidence gathering.
- Read docs, compare options, and summarize tradeoffs.
- Prefer authoritative sources and concrete examples.
- Return concise findings with recommendations, risks, and unknowns.
- Do not edit files or invent implementation details.

View File

@@ -1,28 +0,0 @@
---
description: Critical reviewer for plans, code, test evidence, and release readiness
mode: subagent
model: github-copilot/gpt-5.4
variant: xhigh
temperature: 0.1
tools:
write: false
edit: false
bash: false
permission:
webfetch: allow
permalink: opencode-config/agents/reviewer
---
Act as a skeptical reviewer.
- Proactively load applicable skills when triggers are present:
- `verification-before-completion` when evaluating completion readiness.
- `test-driven-development` when reviewing red/green discipline evidence.
- Look for incorrect assumptions, missing cases, regressions, unclear specs, and weak verification.
- Reject completion claims that lack structured verification evidence in the compact shape (`Goal`, `Mode`, `Command/Check`, `Result`, `Key Evidence`, `Artifacts`, `Residual Risk`).
- Reject execution notes or handoffs that lack lane-ownership boundaries (owner, claimed files/areas, status).
- Prefer concrete findings over broad advice.
- When reviewing a plan, call out ambiguity before execution starts.
- When reviewing code or tests, provide evidence-backed issues in priority order.
- Remain read-only: report findings via response message; do not write to execution notes or modify files.

View File

@@ -1,39 +0,0 @@
---
description: Verification specialist for running tests, reproducing failures, and capturing evidence
mode: subagent
model: github-copilot/claude-opus-4.6
variant: thinking
temperature: 0.0
tools:
write: false
permission:
edit: deny
webfetch: allow
bash:
"*": allow
permalink: opencode-config/agents/tester
---
Own verification and failure evidence.
- Proactively load applicable skills when triggers are present:
- `systematic-debugging` when a verification failure needs diagnosis.
- `verification-before-completion` before declaring verification complete.
- `test-driven-development` when validating red/green cycles or regression coverage.
- `docker-container-management` when tests run inside containers.
- `python-development` when verifying Python code.
- `javascript-typescript-development` when verifying JS/TS code.
- Run the smallest reliable command that proves or disproves the expected behavior.
- Report every result using the compact verification summary shape:
- **Goal** what is being verified
- **Mode** `smoke` or `full`
- **Command/Check** exact command or manual check performed
- **Result** `pass`, `fail`, `blocked`, or `not_run`
- **Key Evidence** concise proof (output snippet, hash, assertion count)
- **Artifacts** paths to logs/screenshots, or `none`
- **Residual Risk** known gaps, or `none`
- Keep raw logs out of primary context unless a check fails or the caller requests full output. Summarize the failure first, then point to raw evidence.
- Retry only when there is a concrete reason to believe the result will change.
- Flag any temporary artifacts observed during verification (e.g., scratch files, screenshots, logs, transient reports, caches) so builder or coder can clean them up before completion.
- Do not make code edits.

View File

@@ -1,21 +0,0 @@
---
description: Execute the latest approved plan
agent: builder
model: github-copilot/gpt-5.4
---
Execute the latest approved plan for: $ARGUMENTS
1. Read the latest matching `plans/<slug>` note with `Status: approved`.
2. Create or update `executions/<slug>` with the structured sections defined in `AGENTS.md` (Plan, Execution State, Lane Claims, Last Verified State, Verification Ledger). Set `Status: in_progress` before changing code.
3. Before parallel fan-out, populate Lane Claims with owner, status, claimed files/areas, dependencies, and exit conditions. Overlapping claimed files/areas or sequential verification dependencies forbid parallel fan-out.
4. Delegate implementation to `coder`, verification to `tester`, review to `reviewer`, and docs or memory updates to `librarian` where appropriate.
5. Builder owns commit creation during `/build`: create automatic commits at meaningful completed implementation checkpoints.
6. Reuse existing git safety rules and avoid destructive git behavior; do not add push automation.
7. If no new changes exist at a checkpoint, skip commit creation rather than creating empty or duplicate commits.
8. Record verification evidence in the Verification Ledger using the compact shape (Goal, Mode, Command/Check, Result, Key Evidence, Artifacts, Residual Risk). Default to `smoke` for intermediate checkpoints; require `full` before final completion or setting status to `done`.
9. Follow the plan exactly. If the plan is contradictory, missing a dependency, or fails verification twice, stop, capture evidence, set the execution note to blocked, and send the work back to `planner`.
10. Before creating the final completion commit, clean up temporary artifacts generated during the build (e.g., scratch files, screenshots, logs, transient reports, caches). Intended committed deliverables are not cleanup targets.
11. Finish by creating a final completion commit when changes remain, then update Last Verified State and set the execution note to `Status: done` or `Status: blocked` and summarize what changed.
Automatic commits are required during `/build` as defined above.

View File

@@ -1,15 +0,0 @@
---
description: Resume unfinished planner or builder workflow from memory
model: github-copilot/gpt-5.4
---
Continue the highest-priority unfinished work for this repository.
1. Inspect basic-memory for incomplete work under `plans/`, `executions/`, `findings/`, and `decisions/`.
2. If the current primary agent is `planner`, resume the most relevant plan that is `Status: draft` or `Status: blocked` and drive it toward an approved spec.
3. If the current primary agent is `builder`, resume the most relevant execution note that is `Status: in_progress` or `Status: blocked`. If there is no approved plan, stop and hand the work back to `planner`.
4. When resuming a structured execution note, read Execution State and report: objective, current phase, next checkpoint, blockers, and last updated by. Check Lane Claims for active/blocked lanes and flag any claim conflicts (overlapping files/areas).
5. When the execution note is legacy or freeform (missing structured sections like Execution State or Lane Claims), degrade gracefully: read what exists, infer status from available content, and do not invent conflicts or synthesize missing sections without evidence.
6. When the execution note shows only `smoke` verification in the Last Verified State or Verification Ledger and a `full` verification step is still required before completion, surface this explicitly: report that full verification is pending and must run before the execution can be marked `done`.
7. Refresh the open findings ledger and update note statuses as you work.
8. Return the resumed slug, current status, next checkpoint, any blocker, any lane claim conflicts, and any pending full-verification requirement.

View File

@@ -1,15 +0,0 @@
---
description: Initialize or refresh project memory and AGENTS.md
agent: builder
model: github-copilot/gpt-5.4
---
Initialize this repository for the planner/builder workflow.
1. Verify that a dedicated per-repo basic-memory project exists for the current repository. If it does not, create it at `<repo-root>/.memory` using a short kebab-case project name.
2. Gather high-signal project context in parallel: purpose, stack, architecture, entrypoints, build/test commands, coding conventions, and major risks.
3. Write or refresh project memory notes under `project/overview`, `project/architecture`, `project/workflows`, and `project/testing`.
4. Use `librarian` to create or update the project-root `AGENTS.md` so it matches the repository and documents the important working agreements.
5. Record any missing information or open findings under `findings/` instead of guessing.
Keep the output concise and actionable.

View File

@@ -1,17 +0,0 @@
---
description: Produce or refresh an execution-ready plan
agent: planner
model: github-copilot/gpt-5.4
---
Create or update an execution-ready plan for: $ARGUMENTS
1. Gather the required repo and external context in parallel.
2. Use `researcher`, `explorer`, and `reviewer` as needed.
3. Write the canonical plan to basic-memory under `plans/<slug>`.
4. Include: objective, scope, assumptions, constraints, task breakdown, parallel lanes, verification oracle, risks, and open findings.
5. When parallelization or phased verification matters, define intended lanes with claimed files/areas, inter-lane dependencies, and verification intent (including `smoke` vs `full` mode where the distinction affects execution).
6. Ensure the plan gives builder enough information to create the structured `executions/<slug>` note without guessing lane ownership, claimed areas, or verification expectations.
7. Set `Status: approved` only when `builder` can execute the plan without guesswork. Otherwise leave it blocked and explain why.
Return the plan slug and the key execution checkpoints.

View File

@@ -1,41 +0,0 @@
{
"$schema": "https://raw.githubusercontent.com/Opencode-DCP/opencode-dynamic-context-pruning/master/dcp.schema.json",
"enabled": true,
"debug": false,
"pruneNotification": "detailed",
"pruneNotificationType": "chat",
"commands": {
"enabled": true,
"protectedTools": []
},
"experimental": {
"allowSubAgents": true
},
"manualMode": {
"enabled": false,
"automaticStrategies": true
},
"turnProtection": {
"enabled": false,
"turns": 4
},
"protectedFilePatterns": [],
"strategies": {
"deduplication": {
"enabled": true,
"protectedTools": []
},
"supersedeWrites": {
"enabled": true
},
"purgeErrors": {
"enabled": true,
"turns": 4,
"protectedTools": []
}
},
"compress": {
"maxContextLimit": "80%",
"minContextLimit": "50%"
}
}

View File

@@ -1,68 +0,0 @@
{
"$schema": "https://opencode.ai/config.json",
"autoupdate": true,
"model": "github-copilot/gpt-5.4",
"small_model": "github-copilot/gpt-5-mini",
"default_agent": "planner",
"enabled_providers": ["github-copilot"],
"plugin": ["@tarquinen/opencode-dcp", "./plugins/tmux-panes.ts"],
"agent": {
"build": {
"disable": true
},
"general": {
"disable": true
},
"explore": {
"disable": true
},
"plan": {
"disable": true
}
},
"permission": {
"doom_loop": "allow",
"websearch": "allow",
"question": "allow",
"bash": "allow",
"external_directory": "deny"
},
"mcp": {
"context7": {
"type": "remote",
"url": "https://mcp.context7.com/mcp",
"enabled": true
},
"gh_grep": {
"type": "remote",
"url": "https://mcp.grep.app",
"enabled": true
},
"playwright": {
"type": "local",
"command": [
"bunx",
"@playwright/mcp@latest",
"--headless",
"--browser",
"chromium"
],
"enabled": true
},
"basic-memory": {
"type": "local",
"command": ["uvx", "basic-memory", "mcp"],
"enabled": true
},
"ast-grep": {
"type": "local",
"command": [
"uvx",
"--from",
"git+https://github.com/ast-grep/ast-grep-mcp",
"ast-grep-server"
],
"enabled": true
}
}
}

View File

@@ -1,31 +0,0 @@
{
"name": "opencode",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"dependencies": {
"@opencode-ai/plugin": "1.2.15"
}
},
"node_modules/@opencode-ai/plugin": {
"version": "1.2.15",
"license": "MIT",
"dependencies": {
"@opencode-ai/sdk": "1.2.15",
"zod": "4.1.8"
}
},
"node_modules/@opencode-ai/sdk": {
"version": "1.2.15",
"license": "MIT"
},
"node_modules/zod": {
"version": "4.1.8",
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}
}
}
}

View File

@@ -1,191 +0,0 @@
import type { Plugin } from "@opencode-ai/plugin"
import { spawn } from "bun"
/**
* tmux-panes plugin
*
* When opencode spawns a background subagent, this plugin automatically opens
* a new tmux pane showing that subagent's live TUI via `opencode attach`.
*
* Layout:
* - First subagent: horizontal 60/40 split — main pane on left, subagent on right
* - Additional subagents: stacked vertically in the right column
* - Panes close automatically when subagent sessions end
*
* Only activates when running inside a tmux session (TMUX env var is set).
*/
const isInsideTmux = () => Boolean(process.env.TMUX)
const getCurrentPaneId = () => process.env.TMUX_PANE
const runTmux = async (args: string[]) => {
const proc = spawn(["tmux", ...args], { stdout: "pipe", stderr: "pipe" })
const stdout = await new Response(proc.stdout).text()
const stderr = await new Response(proc.stderr).text()
const exitCode = await proc.exited
return {
exitCode,
stdout: stdout.trim(),
stderr: stderr.trim(),
}
}
const removeItem = (items: string[], value: string) => {
const idx = items.indexOf(value)
if (idx !== -1) items.splice(idx, 1)
}
const plugin: Plugin = async (ctx) => {
if (!isInsideTmux()) return {}
const sessions = new Map<string, string>() // sessionId → tmux paneId
const sourcePaneId = getCurrentPaneId()
const serverUrl = (ctx.serverUrl?.toString() ?? "").replace(/\/$/, "")
// Ordered list of pane IDs in the right column.
// Empty = no right column yet; length > 0 = right column exists.
const rightColumnPanes: string[] = []
let paneOps = Promise.resolve()
const getWindowInfo = async () => {
const targetPane = sourcePaneId ?? rightColumnPanes[0]
if (!targetPane) return null
const result = await runTmux([
"display-message",
"-p",
"-t",
targetPane,
"#{window_id} #{window_width}",
])
if (result.exitCode !== 0 || !result.stdout) return null
const [windowId, widthText] = result.stdout.split(/\s+/, 2)
const width = Number(widthText)
if (!windowId || Number.isNaN(width)) return null
return { windowId, width }
}
const applyLayout = async () => {
if (rightColumnPanes.length === 0) return
const windowInfo = await getWindowInfo()
if (!windowInfo) return
const mainWidth = Math.max(1, Math.round(windowInfo.width * 0.6))
await runTmux([
"set-window-option",
"-t",
windowInfo.windowId,
"main-pane-width",
String(mainWidth),
])
await runTmux(["select-layout", "-t", sourcePaneId ?? rightColumnPanes[0], "main-vertical"])
}
const closeSessionPane = async (sessionId: string) => {
const paneId = sessions.get(sessionId)
if (!paneId) return
await runTmux(["kill-pane", "-t", paneId])
sessions.delete(sessionId)
removeItem(rightColumnPanes, paneId)
await applyLayout()
}
const enqueuePaneOp = (operation: () => Promise<void>) => {
paneOps = paneOps.then(operation).catch(() => {})
return paneOps
}
const isTerminalSessionUpdate = (info: any) =>
Boolean(info?.time?.archived || info?.time?.compacting)
return {
event: async ({ event }) => {
// Spawn a new pane when a subagent session is created
if (event.type === "session.created") {
const info = (event as any).properties?.info
// parentID presence distinguishes subagents from the root session
if (!info?.id || !info?.parentID) return
const sessionId: string = info.id
if (sessions.has(sessionId)) return
await enqueuePaneOp(async () => {
if (sessions.has(sessionId)) return
const cmd = `opencode attach ${serverUrl} --session ${sessionId}`
let args: string[]
if (rightColumnPanes.length === 0) {
const windowInfo = await getWindowInfo()
const rightWidth = windowInfo
? Math.max(1, Math.round(windowInfo.width * 0.4))
: 40
args = [
"split-window",
"-h",
"-l",
String(rightWidth),
"-d",
"-P",
"-F",
"#{pane_id}",
...(sourcePaneId ? ["-t", sourcePaneId] : []),
cmd,
]
} else {
const lastRightPane = rightColumnPanes[rightColumnPanes.length - 1]
args = [
"split-window",
"-v",
"-d",
"-P",
"-F",
"#{pane_id}",
"-t",
lastRightPane,
cmd,
]
}
const result = await runTmux(args)
const paneId = result.stdout
if (result.exitCode === 0 && paneId) {
sessions.set(sessionId, paneId)
rightColumnPanes.push(paneId)
await applyLayout()
}
})
}
// Kill the pane when the subagent session ends
if (event.type === "session.deleted") {
const info = (event as any).properties?.info
if (!info?.id) return
await enqueuePaneOp(() => closeSessionPane(info.id))
}
if (event.type === "session.updated") {
const info = (event as any).properties?.info
if (!info?.id || !sessions.has(info.id) || !isTerminalSessionUpdate(info)) return
await enqueuePaneOp(() => closeSessionPane(info.id))
}
if (event.type === "session.status") {
const sessionID = (event as any).properties?.sessionID
const statusType = (event as any).properties?.status?.type
if (!sessionID || !sessions.has(sessionID) || statusType !== "idle") return
await enqueuePaneOp(() => closeSessionPane(sessionID))
}
},
}
}
export default plugin

View File

@@ -1,29 +0,0 @@
---
name: brainstorming
description: Planner-led discovery workflow for clarifying problem shape, options, and decision-ready direction
permalink: opencode-config/skills/brainstorming/skill
---
# Brainstorming
Use this skill when requests are unclear, options are broad, or design tradeoffs are unresolved.
## Workflow
1. Clarify objective, constraints, and non-goals.
2. Generate multiple viable approaches (not one-path thinking).
3. Compare options by risk, complexity, verification cost, and reversibility.
4. Identify unknowns that need research before execution.
5. Converge on a recommended direction with explicit rationale.
## Planner Ownership
- Keep brainstorming in planning mode; do not start implementation.
- Use subagents for independent research lanes when needed.
- Translate outcomes into memory-backed planning artifacts (`plans/<slug>`, findings/risks).
## Output
- Short options table (approach, pros, cons, risks).
- Recommended path and why.
- Open questions that block approval.

View File

@@ -1,73 +0,0 @@
---
name: creating-agents
description: Create or update opencode agents in this repository, including dispatch permissions and roster alignment requirements
permalink: opencode-config/skills/creating-agents/skill
---
# Creating Agents
Use this skill when you need to add or revise an agent definition in this repo.
## Agents vs Skills
- **Agents** define runtime behavior and permissions in `agents/*.md`.
- **Skills** are reusable instruction modules under `skills/*/SKILL.md`.
- Do not treat agent creation as skill creation; each has different files, checks, and ownership.
## Source of Truth
1. Agent definition file: `agents/<agent-name>.md`
2. Operating roster and workflow contract: `AGENTS.md`
3. Runtime overrides and provider policy: `opencode.jsonc`
4. Workflow entrypoints: `commands/*.md`
Notes:
- This repo uses two primary agents: `planner` and `builder`.
- Dispatch permissions live in the primary agent that owns the subagent, not in a central dispatcher.
- `planner` may dispatch only `researcher`, `explorer`, and `reviewer`.
- `builder` may dispatch only `coder`, `tester`, `reviewer`, and `librarian`.
## Agent File Conventions
For `agents/<agent-name>.md`:
- Use frontmatter first, then concise role instructions.
- Keep tone imperative and operational.
- Define an explicit `model` for every agent and keep it on a GitHub Copilot model.
- Use only explicit `allow` or `deny` permissions; do not use `ask`.
- Include only the tools and permissions needed for the role.
- Keep instructions aligned with the planner -> builder contract in `AGENTS.md`.
Typical frontmatter fields in this repo include:
- `description`
- `mode`
- `model`
- `temperature`
- `steps`
- `tools`
- `permission`
- `permalink`
Mirror nearby agent files instead of inventing new metadata patterns.
## Practical Workflow (Create or Update)
1. Inspect the relevant primary agent file and at least one comparable peer in `agents/*.md`.
2. Create or edit `agents/<agent-name>.md` with matching local structure.
3. If the agent is a subagent, update the owning primary agent's `permission.task` allowlist.
4. Update `AGENTS.md` so the roster, responsibilities, and workflow rules stay synchronized.
5. Review `commands/*.md` if the new agent changes how `/init`, `/plan`, `/build`, or `/continue` should behave.
6. Review `opencode.jsonc` for conflicting overrides, disable flags, or provider drift.
## Manual Verification Checklist (No Validation Script)
Run this checklist before claiming completion:
- [ ] `agents/<agent-name>.md` exists and frontmatter is valid and consistent with peers.
- [ ] Agent instructions clearly define role, scope, escalation rules, and constraints.
- [ ] The owning primary agent includes the correct `permission.task` rule for the subagent.
- [ ] `AGENTS.md` roster row exists and matches the agent name, role, and model.
- [ ] `commands/*.md` and `opencode.jsonc` still reflect the intended workflow.
- [ ] Terminology stays consistent: agents in `agents/*.md`, skills in `skills/*/SKILL.md`.

View File

@@ -1,84 +0,0 @@
---
name: creating-skills
description: Create or update opencode skills in this repository using the required SKILL.md format and concise, trigger-focused guidance
permalink: opencode-config/skills/creating-skills/skill
---
# Creating Skills
Use this skill when you need to add or revise an opencode skill under `skills/`.
## Skills vs OpenAI/Codex Source Model
- Treat this repo as **opencode-native**.
- Do **not** use OpenAI/Codex-specific artifacts such as `agents/openai.yaml`, `init_skill.py`, `quick_validate.py`, or `scripts/references/assets` conventions from the old source model.
- A skill is discovered from `skills/*/SKILL.md` only.
## Required Structure
1. Create a folder at `skills/<skill-name>/`.
2. Add `skills/<skill-name>/SKILL.md`.
3. Keep `<skill-name>` equal to frontmatter `name`.
Frontmatter must contain only:
```yaml
---
name: <skill-name>
description: <what it does and when to load>
permalink: opencode-config/skills/<skill-name>/skill
---
```
## Naming Rules
- Use lowercase kebab-case.
- Keep names short and action-oriented.
- Match folder name and `name` exactly.
## Body Writing Rules
- Write concise, imperative instructions.
- Lead with when to load and the core workflow.
- Prefer short checklists over long prose.
- Include only repo-relevant guidance.
- Keep the planner/builder operating model in mind when a skill touches workflow behavior.
## Companion Notes (`*.md` in skill folder)
Add companion markdown files only when detail would bloat `SKILL.md` (examples, deep procedures, edge-case references).
- Keep `SKILL.md` as the operational entrypoint.
- Link companion files directly from `SKILL.md` with clear “when to read” guidance.
- Do not create extra docs by default.
## Practical Workflow (Create or Update)
1. Inspect 23 nearby skills for local style and brevity.
2. Pick/update `<skill-name>` and folder path under `skills/`.
3. Write or revise `SKILL.md` frontmatter (`name`, `description`, `permalink` only).
4. Draft concise body sections: purpose, load conditions, workflow, red flags/checks.
5. Add minimal companion `.md` files only if needed; link them from `SKILL.md`.
6. Verify discovery path and naming consistency:
- file exists at `skills/<name>/SKILL.md`
- folder name == frontmatter `name`
- no OpenAI/Codex-only artifacts introduced
7. If the skill changes agent workflow or command behavior:
- Update the **Skills** table, **Agent Skill-Loading Contract**, and **TDD Default Policy** in `AGENTS.md`.
- Confirm `commands/*.md` and any affected `agents/*.md` prompts stay aligned.
- If the skill involves parallelization, verify it enforces safe-parallelization rules (no parallel mutation on shared files, APIs, schemas, or verification steps).
- If the skill involves code changes, verify it references the TDD default policy and its narrow exceptions.
## Language/Ecosystem Skill Pattern
When adding a new language or ecosystem skill (e.g., `rust-development`, `go-development`), follow this template:
1. **Name**: `<language>-development` (kebab-case).
2. **Load trigger**: presence of the language's project file(s) or source files as primary source.
3. **Defaults table**: one row per concern — package manager, linter/formatter, test runner, type checker (if applicable).
4. **Core workflow**: numbered steps for bootstrap, lint, format, test, add-deps, and any lock/check step.
5. **Conventions**: 35 bullets on config file preferences, execution patterns, and version pinning.
6. **Docker integration**: one paragraph on base image and cache strategy.
7. **Red flags**: 35 bullets on common anti-patterns.
8. **AGENTS.md updates**: add the skill to the **Ecosystem Skills** table and add load triggers for `planner`, `builder`, `coder`, and `tester` in the **Agent Skill-Loading Contract**.
9. **Agent prompt updates**: add the skill trigger to `agents/planner.md`, `agents/builder.md`, `agents/coder.md`, and `agents/tester.md`.

View File

@@ -1,41 +0,0 @@
---
name: dispatching-parallel-agents
description: Safely parallelize independent lanes with isolation checks, explicit ownership, and single-agent integration
permalink: opencode-config/skills/dispatching-parallel-agents/skill
---
# Dispatching Parallel Agents
Use this skill before parallel fan-out.
## Isolation Test (Required)
Before fan-out, verify that no two lanes share:
- **Claimed Files/Areas** under active mutation (paths or named workflow surfaces from the lane-claim entries)
- APIs or schemas being changed
- **Sequential verification dependencies** (verification steps that must run in order across lanes)
Overlapping claimed files/areas or sequential verification dependencies **forbid** parallel fan-out. Run those lanes sequentially.
## Workflow
1. **Builder creates lane-claim entries** in the execution note before fan-out, recording for each lane: `Owner`, `Status` (→ `active`), `Claimed Files/Areas`, `Depends On`, and `Exit Condition`.
2. Run the isolation test above against the claimed files/areas and dependencies. Abort fan-out on any overlap.
3. Define lane scope, inputs, and outputs for each subagent.
4. Assign a single integrator (usually builder) for merge and final validation.
5. Each lane must return **compact verification evidence** in the shared shape (`Goal`, `Mode`, `Command/Check`, `Result`, `Key Evidence`, `Artifacts`, `Residual Risk`) — not just code output.
6. Integrate in dependency order; update lane statuses to `released` or `done`.
7. Run final end-to-end verification (`full` mode) after integration.
## Planner/Builder Expectations
- **Planner**: define intended lanes and claimed files/areas in the approved plan when parallelization is expected.
- **Builder**: load this skill before fan-out, create or update lane-claim entries in the execution note, mark them `active`/`released`/`done`/`blocked`, and enforce lane boundaries strictly.
- Claims are advisory markdown metadata, not hard runtime locks. Do not invent lockfiles or runtime enforcement.
## Red Flags
- Two lanes editing the same contract.
- Shared test fixtures causing non-deterministic outcomes.
- Missing integrator ownership.

View File

@@ -1,37 +0,0 @@
---
name: docker-container-management
description: Reusable Docker container workflow for build, test, and dev tasks in containerized repos
permalink: opencode-config/skills/docker-container-management/skill
---
# Docker Container Management
Load this skill when a repo uses Docker/docker-compose for builds, tests, or local dev, or when a task involves containerized workflows.
## Core Workflow
1. **Detect** — look for `Dockerfile`, `docker-compose.yml`/`compose.yml`, or `.devcontainer/` in the repo root.
2. **Prefer compose** — use `docker compose` (v2 CLI) over raw `docker run` when a compose file exists.
3. **Ephemeral containers** — default to `--rm` for one-off commands. Avoid leaving stopped containers behind.
4. **Named volumes over bind-mounts** for caches (e.g., package manager caches). Use bind-mounts only for source code.
5. **No host-path writes outside the repo** — all volume mounts must target paths inside the repo root or named volumes. This preserves `external_directory: deny`.
## Path and Volume Constraints
- Mount the repo root as the container workdir: `-v "$(pwd):/app" -w /app`.
- Never mount host paths outside the repository (e.g., `~/.ssh`, `/var/run/docker.sock`) unless the plan explicitly approves it with a stated reason.
- If root-owned artifacts appear after container runs, document cleanup steps (see `main/knowledge/worktree-cleanup-after-docker-owned-artifacts`).
## Agent Guidance
- **planner**: Use Docker during planning for context gathering and inspection (e.g., `docker compose config`, `docker ps`, `docker image ls`, `docker network ls`, checking container health or logs). Do not run builds, installs, tests, deployments, or any implementation-level commands — those belong to builder/tester/coder.
- **builder/coder**: Run builds and install steps inside containers. Prefer `docker compose run --rm <service> <cmd>` for one-off tasks.
- **tester**: Run test suites inside the same container environment used by CI. Capture container exit codes as verification evidence.
- **coder**: When writing Dockerfiles or compose files, keep layers minimal, pin base image tags, and use multi-stage builds when the final image ships.
## Red Flags
- `docker run` without `--rm` in automation scripts.
- Bind-mounting sensitive host paths (`/etc`, `~/.config`, `/var/run/docker.sock`).
- Building images without a `.dockerignore`.
- Using `latest` tag for base images in production Dockerfiles.

View File

@@ -1,48 +0,0 @@
---
name: frontend-design
description: Create distinctive, production-grade frontend interfaces with high design quality. Use this skill when the user asks to build web components, pages, or applications. Generates creative, polished code that avoids generic AI aesthetics.
permalink: opencode-config/skills/frontend-design/skill
---
# Frontend Design
## When to Load
Load this skill when the task involves building, redesigning, or significantly styling a frontend component, page, or application. Triggers include: user requests for UI/UX implementation, wireframe-to-code work, visual redesigns, and new web interfaces.
## Design Thinking Checklist
Before writing code, answer these:
1. **Purpose** — What problem does this interface solve? Who is the audience?
2. **Brand / product context** — Does the project have existing design tokens, a style guide, or brand constraints? Follow them first; extend only where gaps exist.
3. **Aesthetic direction** — Commit to a clear direction (e.g., brutally minimal, maximalist, retro-futuristic, editorial, organic, luxury, playful, industrial). Intentionality matters more than intensity.
4. **Differentiation** — Identify the single most memorable element of the design.
5. **Constraints** — Note framework requirements, performance budgets, and accessibility targets (WCAG AA minimum).
## Implementation Checklist
- [ ] Produce production-grade, functional code (HTML/CSS/JS, React, Vue, etc.)
- [ ] Ensure the result is visually cohesive with a clear aesthetic point of view
- [ ] Respect accessibility: semantic HTML, sufficient contrast, keyboard navigation, focus management
- [ ] Respect performance: avoid heavy unoptimized assets; prefer CSS-only solutions for animation where practical
- [ ] Use CSS variables for color/theme consistency
- [ ] Match implementation complexity to the aesthetic vision — maximalist designs need elaborate effects; minimal designs need precision and restraint
## Aesthetic Guidance
- **Typography** — Choose distinctive, characterful fonts. Pair a display font with a refined body font. Avoid defaulting to the same choices across projects; vary intentionally.
- **Color & Theme** — Dominant colors with sharp accents outperform timid, evenly-distributed palettes. Vary between light and dark themes across projects.
- **Motion** — Prioritize high-impact moments: a well-orchestrated page load with staggered reveals creates more delight than scattered micro-interactions. Use CSS animations where possible; use a motion library (e.g., Motion) for complex sequences. Include scroll-triggered and hover effects when they serve the design.
- **Spatial Composition** — Explore asymmetry, overlap, diagonal flow, grid-breaking elements, generous negative space, or controlled density.
- **Backgrounds & Atmosphere** — Build depth with gradient meshes, noise textures, geometric patterns, layered transparencies, shadows, grain overlays, or other contextual effects rather than defaulting to flat solid colors.
Avoid converging on the same fonts, color schemes, or layout patterns across generations. Each design should feel context-specific and intentional.
## TDD & Verification
Frontend code changes follow the project's TDD default policy. When the skill is loaded alongside `test-driven-development`:
- Write or update component/visual tests before implementation when a test harness exists.
- If no frontend test harness is available, state the exception and describe alternative verification (e.g., manual browser check, screenshot comparison).
- Satisfy `verification-before-completion` requirements before claiming the work is done.

View File

@@ -1,45 +0,0 @@
---
name: javascript-typescript-development
description: JS/TS ecosystem defaults and workflows using bun for runtime/packaging and biome for linting/formatting
permalink: opencode-config/skills/javascript-typescript-development/skill
---
# JavaScript / TypeScript Development
Load this skill when a repo or lane involves JS/TS code (presence of `package.json`, `tsconfig.json`, or `.ts`/`.tsx`/`.js`/`.jsx` files as primary source).
## Defaults
| Concern | Tool | Notes |
| --- | --- | --- |
| Runtime + package manager | `bun` | Replaces node+npm/yarn/pnpm for most tasks |
| Linting + formatting | `biome` | Replaces eslint+prettier |
| Test runner | `bun test` | Built-in; use vitest/jest only if repo already configures them |
| Type checking | `tsc --noEmit` | Always run before completion claims |
## Core Workflow
1. **Bootstrap**`bun install` to install dependencies.
2. **Lint**`bunx biome check .` before committing.
3. **Format**`bunx biome format . --write` (or `--check` in CI).
4. **Test**`bun test` with the repo's existing config. Follow TDD default policy.
5. **Add dependencies**`bun add <pkg>` (runtime) or `bun add -D <pkg>` (dev).
6. **Type check**`bunx tsc --noEmit` for TS repos.
## Conventions
- Prefer `biome.json` for lint/format config. Do not add `.eslintrc` or `.prettierrc` unless the repo already uses them.
- Use `bun run <script>` to invoke `package.json` scripts.
- Prefer ES modules (`"type": "module"` in `package.json`).
- Pin Node/Bun version via `.node-version` or `package.json` `engines` when deploying.
## Docker Integration
When the repo runs JS/TS inside Docker, use `oven/bun` as the base image. Mount a named volume for `node_modules` or use `bun install --frozen-lockfile` in CI builds.
## Red Flags
- Using `npm`/`yarn`/`pnpm` when `bun` is available and the project uses it.
- Running `eslint` or `prettier` when `biome` is configured.
- Missing `bun.lockb` after dependency changes.
- Skipping `tsc --noEmit` in TypeScript repos.

View File

@@ -1,44 +0,0 @@
---
name: python-development
description: Python ecosystem defaults and workflows using uv for packaging and ruff for linting/formatting
permalink: opencode-config/skills/python-development/skill
---
# Python Development
Load this skill when a repo or lane involves Python code (presence of `pyproject.toml`, `setup.py`, `requirements*.txt`, or `.py` files as primary source).
## Defaults
| Concern | Tool | Notes |
| --- | --- | --- |
| Package/venv management | `uv` | Replaces pip, pip-tools, and virtualenv |
| Linting + formatting | `ruff` | Replaces flake8, isort, black |
| Test runner | `pytest` | Unless repo already uses another runner |
| Type checking | `pyright` or `mypy` | Use whichever the repo already configures |
## Core Workflow
1. **Bootstrap**`uv sync` (or `uv pip install -e ".[dev]"`) to create/refresh the venv.
2. **Lint**`ruff check .` then `ruff format --check .` before committing.
3. **Test**`pytest` with the repo's existing config. Follow TDD default policy.
4. **Add dependencies**`uv add <pkg>` (runtime) or `uv add --dev <pkg>` (dev). Do not edit `pyproject.toml` dependency arrays by hand.
5. **Lock**`uv lock` after dependency changes.
## Conventions
- Prefer `pyproject.toml` over `setup.py`/`setup.cfg` for new projects.
- Keep `ruff` config in `pyproject.toml` under `[tool.ruff]`.
- Use `uv run <cmd>` to execute tools inside the managed venv without activating it.
- Pin Python version via `.python-version` or `pyproject.toml` `requires-python`.
## Docker Integration
When the repo runs Python inside Docker, install dependencies with `uv pip install` inside the container. Mount a named volume for the uv cache to speed up rebuilds.
## Red Flags
- Using `pip install` directly instead of `uv`.
- Running `black` or `isort` when `ruff` is configured.
- Missing `uv.lock` after dependency changes.
- Editing dependency arrays in `pyproject.toml` by hand instead of using `uv add`.

View File

@@ -1,36 +0,0 @@
---
name: systematic-debugging
description: Diagnose failures with a hypothesis-first workflow, evidence capture, and escalation rules aligned to planner/builder
permalink: opencode-config/skills/systematic-debugging/skill
---
# Systematic Debugging
Use this skill when tests fail, behavior regresses, or the root cause is unclear.
## Workflow
1. Define the failure precisely (expected vs actual, where observed, reproducible command).
2. Capture a baseline with the smallest reliable repro.
3. List 1-3 concrete hypotheses and rank by likelihood.
4. Test one hypothesis at a time with targeted evidence collection.
5. Isolate the minimal root cause before proposing fixes.
6. Verify the fix with focused checks, then relevant regression checks.
## Evidence Requirements
- Record failing and passing commands.
- Keep key logs/errors tied to each hypothesis.
- Note why rejected hypotheses were ruled out.
## Planner/Builder Alignment
- Planner: use findings to shape bounded implementation tasks and verification oracles.
- Builder: if contradictions or hidden dependencies emerge, escalate back to planner.
- After two failed verification attempts, stop, record root cause evidence, and escalate.
## Output
- Root cause statement.
- Fix strategy linked to evidence.
- Verification results proving the issue is resolved and not regressed.

View File

@@ -1,36 +0,0 @@
---
name: test-driven-development
description: Apply red-green-refactor by default for code changes, with narrowly defined exceptions and explicit alternate verification
permalink: opencode-config/skills/test-driven-development/skill
---
# Test-Driven Development
Use this skill for all code changes unless a narrow exception applies.
## Default Cycle
1. Red: add or identify a test that fails for the target behavior.
2. Green: implement the minimal code change to make the test pass.
3. Refactor: improve structure while keeping tests green.
4. Re-run focused and relevant regression tests.
## Narrow Exceptions
Allowed exceptions only:
- docs-only changes
- config-only changes
- pure refactors with provably unchanged behavior
- repos without a reliable automated test harness
When using an exception, state:
- why TDD was not practical
- what alternative verification was used
## Role Expectations
- Planner: specify tasks and verification that preserve red-green-refactor intent.
- Builder/Coder: follow TDD during implementation or explicitly invoke a valid exception.
- Tester/Reviewer: verify that TDD evidence (or justified exception) is present.

View File

@@ -1,45 +0,0 @@
---
name: verification-before-completion
description: Require evidence-backed verification before completion claims or final handoff
permalink: opencode-config/skills/verification-before-completion/skill
---
# Verification Before Completion
Use this skill before declaring work done, handing off, or approving readiness.
## Verification Checklist
1. Re-state the promised outcome and scope boundaries.
2. Run the smallest reliable checks that prove requirements are met.
3. Run broader regression checks required by project workflow.
4. Confirm no known failures are being ignored.
5. Clean up temporary artifacts generated during work (e.g., scratch files, screenshots, logs, transient reports, caches). Intended committed deliverables are not cleanup targets.
6. Report residual risk, if any, explicitly.
## Evidence Standard
Use the compact verification summary shape for every evidence entry:
- **Goal** what is being verified
- **Mode** `smoke` (intermediate checkpoints) or `full` (final completion)
- **Command/Check** exact command or manual check performed
- **Result** `pass`, `fail`, `blocked`, or `not_run`
- **Key Evidence** concise proof (output snippet, hash, assertion count)
- **Artifacts** paths to logs/screenshots, or `none`
- **Residual Risk** known gaps, or `none`
Keep raw logs out of primary context by default. When a check fails, summarize the failure first and then point to the raw evidence. Include full output only when explicitly requested.
Tie each evidence entry to an acceptance condition from the plan.
## Role Expectations
- Builder and tester: no completion claim without verification evidence in the compact shape above.
- Reviewer: reject completion claims that lack structured evidence or lane-ownership boundaries.
- Coder: include compact-shape verification evidence from the assigned lane before signaling done.
## If Verification Fails
- Do not claim partial completion as final.
- Return to debugging or implementation with updated hypotheses.

View File

@@ -1,45 +0,0 @@
---
name: writing-plans
description: Planner workflow for producing execution-ready approved plans with explicit scope, lanes, and verification oracle
permalink: opencode-config/skills/writing-plans/skill
---
# Writing Plans
Use this skill when converting intent into an execution-ready `plans/<slug>` note.
## Required Plan Shape
Every approved plan must include:
- Objective
- Scope and out-of-scope boundaries
- Constraints and assumptions
- Concrete task list
- Parallelization lanes and dependency notes
- Verification oracle
- Risks and open findings
When parallelization or phased verification matters, each lane must also specify:
- **Claimed files/areas** — paths or named surfaces the lane owns exclusively.
- **Dependencies** — which lanes (if any) must complete first.
- **Verification intent** — what will be checked and at what mode (`smoke` for intermediate checkpoints, `full` for final completion). Default to the shared mode rules in `AGENTS.md` when not otherwise specified.
The plan must give builder enough information to create the structured `executions/<slug>` note (lane claims, ownership, exit conditions, verification ledger shape) without guessing.
## Workflow
1. Gather enough evidence to remove guesswork.
2. Decompose work into bounded tasks with clear owners.
3. Define verification per task and for final integration.
4. Check contract alignment with planner -> builder handoff rules.
5. Mark `Status: approved` only when execution can proceed without improvisation.
## Quality Gates
- No ambiguous acceptance criteria.
- No hidden scope expansion.
- Verification is specific and runnable.
- Lane claims do not overlap in files or verification steps when parallel execution is intended.
- Verification mode (`smoke` or `full`) is stated or defaults are unambiguous for each checkpoint.

View File

@@ -7,3 +7,7 @@
[user] [user]
email = alexander@wiesner.com.br email = alexander@wiesner.com.br
name = alex wiesner name = alex wiesner
[init]
defaultBranch = main
[credential]
helper = store

14
.gitignore vendored Normal file
View File

@@ -0,0 +1,14 @@
.worktrees/
.pi/npm/
.pi/agent/sessions/
.pi/agent/auth.json
.pi/agent/web-search.json
.pi/subagents/
.pi/agent/extensions/.pi/
.pi/agent/extensions/tmux-subagent/events.jsonl
.pi/agent/extensions/tmux-subagent/result.json
.pi/agent/extensions/tmux-subagent/stderr.log
.pi/agent/extensions/tmux-subagent/stdout.log
.pi/agent/extensions/tmux-subagent/transcript.log
*bun.lock
*node_modules

View File

@@ -1,65 +0,0 @@
---
title: decisions
type: note
permalink: dotfiles/decisions
---
# Dotfiles Decisions
## Desktop Environment: Hyprland + Wayland
- **Decision:** Hyprland as primary compositor, full Wayland stack
- **Rationale:** Modern Wayland compositor with tiling, animations, and good HiDPI support
- **Constraints:** XWayland needed for legacy apps; special window rule to suppress maximize events
- **Input:** Caps Lock remapped to Super (`caps:super`), `alts_toggle` for US/BR layout switching
## Shell: Fish (not Bash/Zsh)
- **Decision:** Fish as primary interactive shell; bash/zsh configs retained for compatibility
- **Rationale:** Better autocompletion, syntax highlighting, friendly defaults
- **Plugin manager:** fisher (minimal, text-file-based)
- **No `oh-my-fish`** — prefer minimal plugin set (just catppuccin theme)
## Theme: Catppuccin Mocha (global)
- **Decision:** Single colorscheme (Catppuccin Mocha) applied uniformly across all tools
- **Rationale:** Consistent visual identity; official plugins available for Fish, Neovim, Kitty
- **Other variants installed:** Frappe, Macchiato, Mocha static — available in `fish/themes/` but Mocha is active
- **No per-tool theming exceptions** — all tools must use Catppuccin Mocha
## Editor: Neovim with lazy.nvim
- **Decision:** Neovim (not VSCode or other) as primary editor
- **Plugin manager:** lazy.nvim (not packer, not vim-plug) — auto-bootstrapped from init.lua
- **LSP strategy:** mason.nvim for tooling installation + mason-lspconfig for auto-enable; capabilities injected globally via cmp_nvim_lsp
- **Format strategy:** conform.nvim with format-on-save (not LSP formatting directly); lsp_fallback=true for unconfigured filetypes
- **No treesitter-based formatting** — explicit per-filetype formatters in conform
## OpenCode: Custom Multi-Agent Config
- **Decision:** Fully custom multi-agent configuration (not default OpenCode setup)
- **10 specialized agents** each with tailored instructions, model, temperature, permissions
- **Memory pattern:** `.memory/` directory tracked in git; agents write to `.memory/*` directly
- **Permission model:** Full edit for lead/coder/librarian; all others restricted to `.memory/*` writes (instruction-level enforcement, not tool-level)
- **AGENTS.md exception:** In the opencode subdir, `AGENTS.md` is NOT a symlink (it's the global OpenCode config file, distinct from the per-project `AGENTS.md` pattern)
- See [OpenCode Architecture](research/opencode-architecture.md) for details
## Waybar CPU Monitor: Ghostty (not Kitty)
- **Observation:** `cpu` module in Waybar opens `ghostty -e htop` on click — Ghostty may be installed as secondary terminal
- **Impact:** Kitty is the primary terminal (SUPER+Return), but Ghostty is referenced in Waybar config
## Git Credentials: gh CLI
- **Decision:** Use `gh auth git-credential` as credential helper for GitHub + Gist
- **Rationale:** Centralizes auth through GitHub CLI; no plaintext tokens in git config
## SSH Key Type: Ed25519
- **Decision:** Ed25519 for SSH key (not RSA)
- **Rationale:** Modern, fast, smaller key size
## No Global `.gitignore` in Dotfiles
- **Observation:** No global gitignore file visible; tracking is managed per-repo
- **Pattern:** Sensitive SSH private key `.ssh/id_ed25519` is tracked — implies this repo may use filesystem permissions for security

View File

@@ -1,31 +0,0 @@
---
title: critic-gate-tmux-shift-enter-fix
type: note
permalink: dotfiles/decisions/critic-gate-tmux-shift-enter-fix
tags:
- tmux
- critic-gate
- approved
---
# Critic Gate: tmux Shift+Enter Fix
## Verdict
- [decision] APPROVED — Plan is minimal, correctly scoped, and non-destructive.
## Rationale
- Single `bind-key -n S-Enter send-keys "\n"` addition to `~/.tmux.conf`.
- `extended-keys on` (line 8) and `extkeys` terminal feature (line 9) already present — `S-Enter` will be recognised.
- No existing conflicting bindings in `.tmux.conf`.
- Config will load cleanly; standard tmux syntax.
## Assumption Challenges
- [finding] `S-Enter` key name is valid because extended-keys is already enabled. ✅
- [finding] `send-keys "\n"` sends LF (0x0A). For TUI apps and multi-line tools, this inserts a newline as intended. For bare shell prompts, LF may still trigger accept-line (same as Enter). No shell-side `bindkey` exists in `.zshrc` to differentiate. This is a known limitation, not a blocker — follow-up zle binding may be needed.
## Files Evaluated
- `/home/alex/dotfiles/.tmux.conf` (57 lines, all relevant config)
- `/home/alex/dotfiles/.zshrc` (2 lines, no keybindings)
## Relations
- gates [[tmux-shift-enter-fix]]

View File

@@ -1,36 +0,0 @@
---
title: doc-coverage-waybar-pomodoro-fix
type: note
permalink: dotfiles/gates/doc-coverage-waybar-pomodoro-fix
tags:
- waybar
- pomodoro
- documentation
- doc-coverage
---
# Documentation Coverage: Waybar Pomodoro Visibility Fix
## Summary
Documentation coverage reviewed for the waybar pomodoro visibility fix (explicit binary path instead of PATH reliance).
## Observations
- [decision] No repo documentation changes needed for the pomodoro visibility fix — this is a personal dotfiles repo with no README, no docs/ directory, and AGENTS.md contains only agent workflow config, not dotfiles-specific documentation
- [decision] Changed files (`.config/waybar/config`, `.config/waybar/scripts/pomodoro-preset.sh`) are self-documenting through clear variable naming (`POMODORO_BIN`) and standard Waybar config format
- [decision] The plan note `plans/waybar-pomodoro-not-showing` already records full execution context, outcomes, and verification results — no additional knowledge capture needed
## Surfaces Checked
| Surface | Exists | Update Needed | Reason |
|---|---|---|---|
| README | No | N/A | Personal dotfiles repo, no README |
| docs/ | No | N/A | No docs directory exists |
| AGENTS.md | Yes | No | Contains only agent workflow config, not dotfiles project docs |
| Inline docs | Yes (self-documenting) | No | Variable naming and script structure are clear |
| Plan note | Yes | No | Already has execution notes and outcomes |
## Relations
- documents [[waybar-pomodoro-not-showing]]

View File

@@ -1,33 +0,0 @@
---
title: gate-tmux-shift-enter-fix-review
type: note
permalink: dotfiles/gates/gate-tmux-shift-enter-fix-review
tags:
- tmux
- review
- gate
- approved
---
# Gate: tmux Shift+Enter Fix — Correctness Review
## Verdict
- [decision] APPROVED — REVIEW_SCORE: 0
## Findings
- [observation] No CRITICAL or WARNING issues found.
- [observation] `bind-key -n S-Enter send-keys "\n"` is semantically correct for the stated intent.
- [observation] Prerequisite `extended-keys on` is present and positioned before the binding.
- [observation] Terminal features line (`xterm-kitty:extkeys`) enables the terminal to report extended key sequences.
- [observation] No conflicting bindings exist in the config.
- [observation] Config ordering is correct — prerequisites before the binding.
## Evidence Checked
- [observation] Line 8: `set -s extended-keys on` — enables tmux to recognize modified keys like `S-Enter`.
- [observation] Line 9: `set -as terminal-features 'xterm-kitty:extkeys'` — tells tmux the terminal supports extended keys.
- [observation] Line 10: `bind-key -n S-Enter send-keys "\n"` — root-table binding, sends literal newline. Correct.
- [observation] No other `Enter`-related or `S-Enter` bindings exist that could conflict.
- [observation] `-n` flag correctly targets root table (no prefix key needed).
## Relations
- reviews [[plans/tmux-shift-enter-fix]]

View File

@@ -1,44 +0,0 @@
---
title: gate-waybar-pomodoro-not-showing-review
type: note
permalink: dotfiles/gates/gate-waybar-pomodoro-not-showing-review
tags:
- gate
- review
- waybar
- pomodoro
- approved
---
# Gate: Waybar Pomodoro Not Showing — Correctness Review
## Verdict
- [decision] APPROVED — REVIEW_SCORE: 0 #gate #approved
## Scope
- Reviewed `.config/waybar/config`
- Reviewed `.config/waybar/scripts/pomodoro-preset.sh`
- Cross-referenced plan `[[waybar-pomodoro-not-showing]]` (`memory://plans/waybar-pomodoro-not-showing`)
- Confirmed prior critic guidance is reflected in current code
## Evidence checked
- [evidence] `.config/waybar/config:136-139` now uses `$HOME/.local/bin/waybar-module-pomodoro` for `exec`, `on-click`, and `on-click-middle`, while preserving the existing preset script hook for right-click
- [evidence] `.config/waybar/scripts/pomodoro-preset.sh:6-10` introduces `POMODORO_BIN="$HOME/.local/bin/waybar-module-pomodoro"` and replaces PATH-dependent lookup with an executable-file guard
- [evidence] `.config/waybar/scripts/pomodoro-preset.sh:30-32` routes `set-work`, `set-short`, and `set-long` through the same explicit binary path
- [evidence] Repository search found pomodoro binary references only in the expected changed lines, with no stale bare `waybar-module-pomodoro` invocations remaining in `.config/waybar/config` or `.config/waybar/scripts/pomodoro-preset.sh`
- [evidence] Fresh verification supplied by lead/coder: `bash -n` on the script passed; `/home/alex/.local/bin/waybar-module-pomodoro --help` succeeded and confirmed required subcommands/options exist
## Findings
- [observation] No correctness defects found in the reviewed change set
- [observation] The implementation matches the approved minimal fix for launch-time PATH mismatch and updates all user-triggered pomodoro entry points called out in the plan pre-mortem
## Related regression checks
- [check] `.config/waybar/config:136-139` — no stale bare binary references remain in `exec`, left-click toggle, right-click preset hook, or middle-click reset
- [check] `.config/waybar/scripts/pomodoro-preset.sh:6-10,30-32` — helper now uses one consistent binary path for validation and all preset subcommands; no path drift found in changed lines
## Freshness notes
- [finding] Prior critic guidance was confirmed, not contradicted: the old PATH-based guard was removed and replaced with an explicit executable-path check, matching the approved fix direction
## Relations
- gates [[waybar-pomodoro-not-showing]]
- related_to [[waybar-pomodoro-not-showing]]

View File

@@ -1,39 +0,0 @@
---
title: gate-waybar-pomodoro-not-showing
type: note
permalink: dotfiles/gates/gate-waybar-pomodoro-not-showing
tags:
- gate
- critic
- waybar
- pomodoro
- approved
---
# Critic Gate: Waybar Pomodoro Not Showing
## Verdict
- [decision] APPROVED — plan is clear, correctly scoped, and adequately de-risked #gate #approved
## Rationale
- [finding] Root cause (bare binary name in PATH-less Waybar launch environment) is the most likely explanation and is well-supported by explorer research
- [finding] All 8 bare `waybar-module-pomodoro` references are confined to the two target files: `.config/waybar/config` (3 refs) and `.config/waybar/scripts/pomodoro-preset.sh` (5 refs) — no other source files reference this binary
- [finding] Verification steps (bash -n, --help check) adequately gate against the alternative failure mode of a missing binary
- [finding] Plan scope is correctly limited to pomodoro-only; no decomposition needed
## Assumption Challenges
- [challenge] Binary path validity → mitigated by plan's --help verification step
- [challenge] Completeness of reference coverage → confirmed all 8 references across both files; no others in repo
- [challenge] JSONC $HOME expansion → confirmed Waybar does shell-expand $HOME in exec/on-click fields (existing config uses it on lines 94, 138)
## Coder Guidance
- [recommendation] Update or remove the `command -v waybar-module-pomodoro` guard (line 7 of pomodoro-preset.sh) since it checks bare PATH and will fail in the same environment that caused the original bug
- [recommendation] Consider using `$HOME/.local/bin/waybar-module-pomodoro` to match existing config style conventions (lines 94, 138 already use `$HOME`)
## Files Evaluated
- `.config/waybar/config` (142 lines)
- `.config/waybar/scripts/pomodoro-preset.sh` (33 lines)
## Relations
- gates [[waybar-pomodoro-not-showing]]
- related_to [[knowledge]]

View File

@@ -1,87 +0,0 @@
---
title: gate-waybar-pomodoro-visibility-fix
type: gate
permalink: dotfiles/gates/gate-waybar-pomodoro-visibility-fix
tags:
- waybar
- pomodoro
- gate
- pass
---
# Gate: Waybar Pomodoro Visibility Fix
**Status:** PASS
**Date:** 2026-03-12
**Plan ref:** [[waybar-pomodoro-not-showing]]
**Scope:** `.config/waybar/config`, `.config/waybar/scripts/pomodoro-preset.sh`
## Verdict Summary
The implementation correctly addresses the root cause (PATH mismatch between Hyprland/Waybar environment and interactive shell). All four invocation points for `waybar-module-pomodoro` are now explicit, and no residual bare-binary references remain. Both standard and adversarial checks pass.
## Standard Pass
### Acceptance Criteria Verification
| Criterion | Result |
|---|---|
| `custom/pomodoro` exec uses explicit path | ✅ Line 136: `$HOME/.local/bin/waybar-module-pomodoro --no-work-icons` |
| on-click uses explicit path | ✅ Line 137: `$HOME/.local/bin/waybar-module-pomodoro toggle` |
| on-click-middle uses explicit path | ✅ Line 139: `$HOME/.local/bin/waybar-module-pomodoro reset` |
| on-click-right still delegates to preset script | ✅ Line 138 unchanged |
| Preset script no longer uses PATH-dependent guard | ✅ `[[ ! -x "$POMODORO_BIN" ]]` replaces `command -v` |
| Preset script routes all set-* calls through `$POMODORO_BIN` | ✅ Lines 3032 |
| Change is pomodoro-scoped only | ✅ No other modules touched |
| Binary syntax check (`bash -n`) passes | ✅ (Lead evidence, exit 0) |
| Binary exists and responds to `--help` | ✅ (Lead evidence, exit 0 with usage) |
### Pre-mortem Risk Tracking
| Risk | Status |
|---|---|
| Middle-click reset still using bare name | Resolved — line 139 uses explicit path |
| Only one entry point updated | Resolved — all four updated |
| Preset helper using `command -v` | Resolved — replaced with `[[ ! -x ... ]]` |
| Binary path unstable across sessions | Not triggered — binary confirmed at path |
## Adversarial Pass
### Hypotheses
| # | Hypothesis | Design | Expected failure | Observed |
|---|---|---|---|---|
| H1 | Empty/corrupt STATE_FILE causes crash | State file exists but empty | `current` reads as `""`, falls to else-branch | Safe: defaults to B-preset (short cycle), no crash |
| H2 | Binary missing/non-executable | Guard `[[ ! -x ]]` fires | Exit 1 with stderr | Guard correctly triggers, script exits cleanly |
| H3 | `$HOME` unset in Waybar env | `$HOME/.local/bin/...` fails to expand | Module fails silently | Same risk applies to all other modules using `$HOME` (line 94: `custom/uptime`); no regression introduced |
| H4 | `set -e` aborts mid-preset (daemon down) | First `set-work` fails → remaining calls skipped | Partial preset applied | Pre-existing behavior; not a regression from this change |
| H5 | STATE_FILE lost on reboot (`/tmp`) | Preset reverts to A-cycle | Unexpected preset on first right-click post-reboot | Intentional design, not a regression |
| H6 | No bare `pomodoro` left anywhere in config | Grep scan | Old reference found | Zero old references found — clean |
### Mutation Checks
| Mutation | Would current tests detect? |
|---|---|
| One of exec/on-click/on-click-middle reverted to bare name | Yes — structural grep confirms all three use explicit path |
| `POMODORO_BIN` guard inverted (`-x` instead of `! -x`) | Yes — guard would skip missing-binary error |
| `read -r current` without fallback | Caught — `|| current="A"` handles failure |
| `set-work` but not `set-short`/`set-long` through `$POMODORO_BIN` | Yes — all three lines verified |
**MUTATION_ESCAPES: 0/4**
## Unverified Aspects (Residual Risk)
1. **Live Waybar rendering** — Cannot verify the module actually appears on the bar without a running Waybar session. The Lead noted this is impractical in the task context.
2. **Binary behavior correctness**`--help` confirms the binary exists and accepts the right subcommands, but actual timer JSON output format was not sampled. The `return-type: json` config assumes the binary outputs conforming JSON.
3. **`$HOME` behavior under Waybar systemd unit** — Low risk (all other `$HOME`-using modules work), but not runtime-confirmed.
These residual risks are infrastructure-gated (no running Wayland/Waybar session available in this context), not implementation defects.
## Lesson Checks
- [confirmed] PATH mismatch is the failure mode for Waybar custom modules — explicit paths are the correct fix pattern.
- [confirmed] `[[ ! -x path ]]` guard is the right check for script-invoked binary dependencies.
- [not observed] Any silent failures from the old `command -v` approach (fix is in place, no regression).
## Relations
- resolves [[waybar-pomodoro-not-showing]]

View File

@@ -1,194 +0,0 @@
---
title: knowledge
type: note
permalink: dotfiles/knowledge
---
# Dotfiles Knowledge
## Project Purpose
Personal dotfiles for `alex` on a Linux/Wayland desktop. Managed as a bare or normal git repo in `~/dotfiles/`. Covers the full desktop stack: shell, editor, compositor, terminal, status bar, notifications, and AI tooling.
## Repository Layout
```
~/dotfiles/
├── .bash_profile / .bashrc / .zshrc / .profile # Legacy/fallback shell configs
├── .gitconfig # Git global config (gh credential helper)
├── .ssh/ # SSH keys and known_hosts
└── .config/
├── dunst/ # Notification daemon
├── fish/ # Primary shell
├── hypr/ # Wayland compositor + screen lock
├── kitty/ # Terminal emulator
├── nvim/ # Editor (Neovim)
├── opencode/ # AI coding assistant (complex subsystem)
├── rofi/ # App launcher
├── waybar/ # Status bar
└── zathura/ # PDF viewer
```
## Desktop Stack
| Layer | Tool | Notes |
|---|---|---|
| Compositor | Hyprland | Wayland, tiling, dwindle layout |
| Terminal | Kitty | GPU-accelerated |
| Shell | Fish | Primary shell |
| Editor | Neovim | lazy.nvim plugin manager |
| Status bar | Waybar | Bottom layer, top position |
| App launcher | Rofi | `rofi -show drun` |
| Notifications | Dunst | |
| Screen lock | Hyprlock | `SUPER+C` |
| Screenshots | Hyprshot | Print=region, Shift+Print=output |
| File manager | Thunar | |
| Browser | Brave | `SUPER+B` / `SUPER+SHIFT+B` incognito |
| Email | Thunderbird | `SUPER+M` |
| VPN | ProtonVPN | Auto-started via hyprland exec-once |
| Mail bridge | Protonmail Bridge | Auto-started `--no-window` |
| PDF viewer | Zathura | |
## Hyprland Configuration
File: `.config/hypr/hyprland.conf`
- **mainMod:** SUPER (`caps:super` — Caps Lock acts as Super)
- **Layout:** dwindle (no gaps, border_size=1, rounding=10)
- **Keyboard:** `us, br` layouts; `alts_toggle` (Alt+Shift switches layout)
- **Animations:** disabled
- **Autostart:** waybar, nm-applet, protonmail-bridge --no-window, protonvpn-app
### Key Bindings
```
SUPER+Return kitty
SUPER+Q kill window
SUPER+E thunar
SUPER+Space rofi
SUPER+F fullscreen
SUPER+B/Shift+B brave / brave --incognito
SUPER+M thunderbird
SUPER+V protonvpn-app
SUPER+C hyprlock
Print hyprshot -m region
Shift+Print hyprshot -m output
SUPER+h/j/k/l move focus (vim dirs)
SUPER+SHIFT+h/j/k/l move window
SUPER+1-9/0 switch workspace
SUPER+SHIFT+1-9/0 move to workspace
SUPER+S scratchpad toggle
SUPER+R resize submap (h/j/k/l = 30px steps)
```
## Theme: Catppuccin Mocha
Applied uniformly across all tools:
| Tool | Config file |
|---|---|
| Hyprland borders | `hyprland.conf` (lavender→mauve active, surface0 inactive) |
| Kitty | `kitty/kitty.conf` (full 16-color palette) |
| Neovim | `nvim/lua/plugins/colorscheme.lua` (catppuccin/nvim, flavour=mocha) |
| Fish | `fish/config.fish` (Catppuccin Mocha theme via fish_config) |
| Fish plugin | `fish/fish_plugins` (catppuccin/fish installed via fisher) |
Key colors: bg=#1e1e2e, fg=#cdd6f4, lavender=#b4befe, mauve=#cba6f7, crust=#11111b, surface0=#313244
## Shell: Fish
Files: `.config/fish/`
- **Plugin manager:** fisher (jorgebucaran/fisher)
- **Plugins:** catppuccin/fish
- **Theme:** Catppuccin Mocha (set in config.fish)
### Functions / Aliases
| Function | Expands to | Purpose |
|---|---|---|
| `c` | `opencode` | Launch OpenCode AI assistant |
| `cc` | `opencode --continue` | Continue last OpenCode session |
| `co` | `copilot` | GitHub Copilot CLI |
| `n` | `nvim` | Neovim |
## Editor: Neovim
Files: `.config/nvim/`
- **Entry:** `init.lua` — sets `mapleader=<Space>`, bootstraps lazy.nvim
- **Plugins:** all in `lua/plugins/`, auto-loaded via `{ import = 'plugins' }`
- **Options:** `number=true`, `relativenumber=true`, `wrap=false`
### Plugin List
| Plugin | File | Purpose |
|---|---|---|
| catppuccin/nvim | colorscheme.lua | Mocha colorscheme, priority=1000 |
| nvim-cmp | cmp.lua | Completion engine |
| stevearc/conform.nvim | conform.lua | Format on save |
| folke/lazydev | lazydev.lua | Neovim Lua dev assistance |
| neovim/nvim-lspconfig | lspconfig.lua | LSP client config |
| L3MON4D3/LuaSnip | luasnip.lua | Snippet engine |
| williamboman/mason.nvim | mason.lua | LSP/tool installer UI |
| mason-lspconfig.nvim | mason-lspconfig.lua | Mason+LSP bridge, auto-install |
| jay-babu/mason-null-ls | mason-null-ls.lua | Mason+null-ls bridge |
| nvimtools/none-ls | none-ls.lua | LSP diagnostics from external tools |
| opencode-ai/nvim-opencode | opencode.lua | OpenCode integration |
| nvim-telescope/telescope | telescope.lua | Fuzzy finder |
| nvim-treesitter | treesitter.lua | Syntax parsing |
### Keymaps
```
<leader>e vim.cmd.Ex (file explorer)
<leader>ww save file
<leader>ff Telescope find_files
<leader>fg Telescope live_grep
<leader>fb Telescope buffers
<leader>fh Telescope help_tags
<leader>f Conform format (async)
```
### LSP / Formatting
- **mason-lspconfig:** `automatic_installation=true`, `automatic_enable=true`; injects `cmp_nvim_lsp` capabilities to all LSP servers globally
- **conform formatters by filetype:**
- lua → stylua
- js/ts/jsx/tsx/json/yaml/md → prettier
- python → ruff_format
- go → gofmt
- **format_on_save:** timeout_ms=500, lsp_fallback=true
- **Diagnostics:** virtual_text, signs, underline; float border=rounded, source=always
## Status Bar: Waybar
File: `.config/waybar/config` + `style.css` + `scripts/pomodoro-preset.sh`
- Layer: bottom, position: top, spacing: 6
- **Left:** backlight, wireplumber, custom/pomodoro
- **Center:** clock (`{:%H:%M - %a,%d}`, interval=1)
- **Right:** tray, bluetooth, temperature, cpu, memory, battery
- **Pomodoro:** external `waybar-module-pomodoro` binary; left=toggle, right=preset script, middle=reset
- **Custom/music:** playerctl metadata polling (interval=5)
- CPU click: `ghostty -e htop` (note: Ghostty not Kitty here)
- Bluetooth click: blueman-manager
## OpenCode AI System
Files: `.config/opencode/`
The most complex subsystem. Full multi-agent AI coding assistant configuration.
See [OpenCode Architecture](research/opencode-architecture.md) for detailed breakdown.
**Quick reference:**
- Config: `opencode.jsonc` (default_agent=lead, plugin=@tarquinen/opencode-dcp)
- Agents: `agents/*.md` (10 agents: lead, coder, reviewer, tester, explorer, researcher, librarian, critic, sme, designer)
- Memory: `agents/.memory/` — persistent knowledge for the AI system itself
- Instruction files: `.github/copilot-instructions.md` (canonical), `CLAUDE.md` + `.cursorrules` (symlinks); `AGENTS.md` is NOT a symlink (global OpenCode config)
- MCP servers: context7 (remote docs), gh_grep (remote code search), playwright (local Chromium)
- Skills: `skills/doc-coverage/`, `skills/git-workflow/`, `skills/work-decomposition/`
## Git Configuration
File: `.gitconfig`
- `user.name=alex`, `user.email=misc@wiesner.com.br`
- `init.defaultBranch=main`
- Credential helper: `!/usr/bin/gh auth git-credential` (GitHub + Gist)

View File

View File

@@ -1,32 +0,0 @@
---
title: fix-github-push-large-binary
type: note
permalink: dotfiles/plans/fix-github-push-large-binary
tags:
- git
- risk
- tooling
---
# Fix GitHub Push Rejection for Large Binary
**Goal:** Remove the oversized `.local/bin/codebase-memory-mcp` blob from local-only history so `main` can push to GitHub successfully.
## Root cause
- [decision] Commit `969140e` added `.local/bin/codebase-memory-mcp` at ~143.51 MB.
- [decision] Commit `2643a0a` later removed the file, but the blob still exists in local history, so GitHub rejects the push.
## Tasks
- [x] Rewrite the 4 local-only commits by soft-resetting to `origin/main`.
- [x] Recreate a clean commit set without the large binary in history.
- [x] Verify no large-file path remains in reachable history.
- [x] Retry `git push` and confirm success.
## Acceptance criteria
- No reachable commit from `HEAD` contains `.local/bin/codebase-memory-mcp`.
- `git push` to `origin/main` succeeds without GitHub large-file rejection.
## Pre-mortem
- Most likely failure: recommit accidentally stages a regenerated large binary again.
- Fragile assumption: current worktree is clean except for the 4 local-only commits.
- Red flag requiring redesign: if the large blob exists in earlier shared history, a broader history rewrite would be required.
- Easy-to-miss regression: leaving `.local/bin/codebase-memory-mcp` unignored so it gets re-added later.

View File

@@ -1,43 +0,0 @@
---
title: luks-sddm-kwallet-login-integration
type: note
permalink: dotfiles/plans/luks-sddm-kwallet-login-integration
tags:
- auth
- sddm
- kwallet
- luks
---
# LUKS / SDDM / KWallet login integration
## Goal
Configure the system so login feels unified across LUKS boot unlock, SDDM, and KWallet.
## Clarified scope
- [decision] User selected **Password login** instead of true SDDM autologin because password login preserves KWallet PAM unlock.
- [decision] User selected **Just document commands** instead of expanding repo scope to manage `/etc` files directly.
- [decision] Deliverable is repo documentation with exact manual system commands/edits; no tracked `/etc` files will be added in this change.
## Discovery
- Dotfiles repo contains user-space config only; system auth files live outside the repo.
- Current system already references `pam_kwallet5.so` in `/etc/pam.d/sddm` and `/etc/pam.d/sddm-autologin`, but the module is missing and silently skipped.
- `kwallet-pam` is available in Arch repos and should provide the current PAM module for KWallet 6.
- LUKS unlock and SDDM login are independent phases; there is no direct password handoff from initramfs to SDDM.
- True SDDM autologin conflicts with password-based KWallet unlock because no login password is available to PAM during autologin.
## Tasks
- [ ] Write documentation for package install and PAM edits needed for SDDM/KWallet integration
- [ ] Document wallet initialization and verification steps
- [ ] Review documentation for correctness and scope alignment
- [ ] Validate documented commands against current system state where possible
- [ ] Check documentation coverage/placement in repo
## Acceptance criteria
- README documents the exact package install step and the exact PAM module substitutions needed.
- README explicitly states that password login is the chosen model and true SDDM autologin is not part of this setup.
- README includes KWallet initialization and verification steps suitable for this Arch + Hyprland + SDDM setup.
- Reviewer/tester/librarian passes are recorded before completion.
## Workstream
- Single workstream in the main repo working tree.

View File

@@ -1,37 +0,0 @@
---
title: tmux-shift-enter-fix
type: plan
permalink: dotfiles/plans/tmux-shift-enter-fix
tags:
- tmux
- terminal
- keybindings
---
# tmux Shift+Enter Fix
## Goal
Inside tmux, pressing Shift+Enter should insert a literal newline instead of submitting the command line.
## Decision
- [decision] Preserve tmux extended-keys support and apply the smallest possible fix at the tmux layer.
- [decision] Use `bind-key -n S-Enter send-keys "\n"` in `~/.tmux.conf` instead of disabling `extended-keys` or adding shell-specific bindings.
## Tasks
- [ ] Add a tmux root-table binding for `S-Enter`
- [ ] Verify tmux loads the config and exposes the expected binding
- [ ] Check documentation coverage for this config tweak
## Acceptance criteria
- `~/.tmux.conf` contains an explicit `S-Enter` binding that sends a newline.
- Existing `extended-keys` settings remain enabled.
- After sourcing the config, tmux shows the `S-Enter` root binding.
## Workstream
- Single workstream in the main repo working tree.
## Findings tracker
- None.
## Relations
- related_to [[knowledge]]

View File

@@ -1,63 +0,0 @@
---
title: waybar-pomodoro-not-showing
type: note
permalink: dotfiles/plans/waybar-pomodoro-not-showing
tags:
- waybar
- pomodoro
- plan
---
# Waybar Pomodoro Not Showing Implementation Plan
> For implementation: use `subagent-driven-development` when subagents are available; otherwise use `executing-plans`.
**Goal:** Restore the Waybar pomodoro module so it reliably appears on the bar.
**Architecture:** The `custom/pomodoro` Waybar module depends on an external `waybar-module-pomodoro` binary and a preset helper script. The most likely failure mode is launch-time PATH mismatch between Hyprland/Waybar and the interactive shell, so the minimal fix is to make module and helper invocations explicit and independent of shell PATH.
**Tech Stack:** JSONC-style Waybar config, shell script, Hyprland/Wayland desktop environment.
## File map
- Modify `.config/waybar/config` — make the pomodoro module invoke the binary explicitly.
- Modify `.config/waybar/scripts/pomodoro-preset.sh` — make the preset helper use the same explicit binary path.
## Task 1: Fix pomodoro command wiring
**Files:**
- Modify: `.config/waybar/config`
- Modify: `.config/waybar/scripts/pomodoro-preset.sh`
**Acceptance criteria:**
- `custom/pomodoro` no longer depends on login-session PATH to find `waybar-module-pomodoro`.
- Right-click preset switching still works.
- The change is minimal and limited to pomodoro-related wiring.
**Non-goals:**
- Do not refactor unrelated Waybar modules.
- Do not add system-wide installation steps or package management changes.
**Verification:**
- Run `bash -n /home/alex/dotfiles/.config/waybar/scripts/pomodoro-preset.sh` successfully.
- Run `/home/alex/.local/bin/waybar-module-pomodoro --help` successfully to verify the explicit binary path exists.
- Inspect resulting config references to confirm they use the explicit path consistently.
**Likely regression surfaces:**
- Right-click preset command path drift.
- Middle-click reset command still using the old bare binary name.
## Pre-mortem
- Most likely failure: only one of the pomodoro entry points is updated, leaving click actions broken.
- Fragile assumption: the binary remains at `/home/alex/.local/bin/waybar-module-pomodoro`.
- Redesign trigger: if the binary path is unstable across sessions or machines, prefer a Hyprland PATH fix instead.
- Easy-to-miss regression: preset helper still using `command -v` and failing under Waybar's environment.
## Execution notes
- Updated `.config/waybar/config` `custom/pomodoro` wiring to use `$HOME/.local/bin/waybar-module-pomodoro` for `exec`, `on-click`, and `on-click-middle`.
- Updated `.config/waybar/scripts/pomodoro-preset.sh` to remove PATH reliance by introducing `POMODORO_BIN="$HOME/.local/bin/waybar-module-pomodoro"`, replacing the `command -v` guard with `[[ ! -x "$POMODORO_BIN" ]]`, and routing all `set-work`/`set-short`/`set-long` calls through `"$POMODORO_BIN"`.
- Scope remained pomodoro-only; no unrelated Waybar modules or scripts were changed.
## Outcomes
- `bash -n /home/alex/dotfiles/.config/waybar/scripts/pomodoro-preset.sh` passed (no syntax errors).
- `/home/alex/.local/bin/waybar-module-pomodoro --help` succeeded and printed usage, confirming explicit-path binary availability.
- No practical automated test harness exists here for full Waybar runtime rendering in this task context; verification used the minimal command-level checks above.

View File

@@ -1,156 +0,0 @@
---
title: LUKS SDDM KWallet discovery
type: note
permalink: dotfiles/research/luks-sddm-kwallet-discovery
tags:
- sddm
- kwallet
- luks
- pam
- arch
- hyprland
- discovery
---
# LUKS SDDM KWallet discovery
## System context
- [fact] Distribution: **Arch Linux** (rolling), NOT NixOS — all configuration is manual files or pacman packages
- [fact] Desktop environment: **Hyprland** (Wayland compositor), NOT KDE Plasma
- [fact] Display manager: **SDDM** (installed, PAM files present)
- [fact] Lock screen: **hyprlock** (Hyprland native, separate from SDDM)
- [fact] Default session: `Session=hyprland` (from `~/.dmrc`)
- [fact] Boot: **systemd-boot** (`/boot/loader/`), kernel cmdline has `cryptdevice=PARTUUID=1a555ca6-ea08-4128-80cf-fe213664030e:root root=/dev/mapper/root`
- [fact] LUKS encryption: **LUKS-encrypted root** (`encrypt` hook in mkinitcpio), initramfs uses classic `encrypt` hook (not `sd-encrypt`)
- [fact] Filesystem: **btrfs** with `@` subvolume
## Current config files inventory
### Dotfiles repo (`/home/alex/dotfiles`) — user scope only
| File | Contents |
|---|---|
| `.config/hypr/hyprland.conf` | Hyprland WM config; autostart: waybar + nm-applet; lock bind: `hyprlock` |
| `.config/hypr/hyprlock.conf` | hyprlock PAM-auth lock screen; Catppuccin Mocha theme |
| `.config/hypr/monitors.conf` | Monitor config |
| `.config/hypr/workspaces.conf` | Workspace rules |
| `.dmrc` | `Session=hyprland` |
| `.gitconfig` | Git identity only |
| `.config/fish/`, `.config/nvim/`, etc. | Shell and editor config, not relevant |
**The dotfiles repo does NOT contain any SDDM, PAM, mkinitcpio, bootloader, or KWallet configuration.** All of those are system-level files managed outside this repo.
### System-level files (outside dotfiles repo)
| File | Status | Key contents |
|---|---|---|
| `/etc/mkinitcpio.conf` | Present | HOOKS include `encrypt` (classic LUKS hook) |
| `/boot/loader/entries/2026-03-11_16-58-39_linux.conf` | Present | `cryptdevice=PARTUUID=...` kernel param, LUKS root |
| `/boot/loader/loader.conf` | Present | `timeout 3`, no autologin |
| `/etc/pam.d/sddm` | Present | Includes `pam_kwallet5.so` (broken — see risks) |
| `/etc/pam.d/sddm-autologin` | Present | Includes `pam_kwallet5.so` (broken — see risks) |
| `/etc/pam.d/sddm-greeter` | Present | Standard greeter-only config |
| `/etc/pam.d/system-auth` | Present | Standard pam_unix, pam_faillock |
| `/etc/pam.d/system-login` | Present | Standard, includes pam_u2f.so at top |
| `/etc/pam.d/hyprlock` | Present | `auth include login` — delegates to login chain |
| `/usr/lib/sddm/sddm.conf.d/default.conf` | Present | No autologin configured; `DisplayServer=x11` (NOT wayland) |
| `/etc/sddm.conf.d/` | **MISSING** — no local overrides exist | No user customization of SDDM |
| `/etc/sddm.conf` | **MISSING** | No top-level SDDM config file |
## KDE/KWallet installation state
- [fact] `kwalletd6` binary is installed: `/usr/bin/kwalletd6`
- [fact] `kwallet-query` is installed: `/usr/bin/kwallet-query`
- [fact] **`pam_kwallet5.so` does NOT exist** in `/usr/lib/security/` or `/lib/security/`
- [fact] **`pam_kwallet6.so` does NOT exist** either — `kwallet-pam` package is NOT installed
- [fact] `pam_gnome_keyring.so` IS installed at `/usr/lib/security/`
- [fact] No `~/.config/kwalletrc` exists — KWallet has never been initialized for this user
- [fact] No `~/.local/share/kwalletd/` directory — no wallet database exists
## Current PAM configuration for SDDM (detailed)
### `/etc/pam.d/sddm` (normal login)
```
auth sufficient pam_u2f.so cue
auth include system-login
-auth optional pam_gnome_keyring.so
-auth optional pam_kwallet5.so ← BROKEN: module not installed
session optional pam_keyinit.so force revoke
session include system-login
-session optional pam_gnome_keyring.so auto_start
-session optional pam_kwallet5.so auto_start ← BROKEN
```
### `/etc/pam.d/sddm-autologin`
```
auth sufficient pam_u2f.so cue
auth required pam_permit.so
-auth optional pam_kwallet5.so ← BROKEN
session include system-local-login
-session optional pam_kwallet5.so auto_start ← BROKEN
```
Note: The `-` prefix means these lines are silently skipped if the module is missing — not causing errors, but not functioning.
## SDDM autologin configuration state
- [fact] SDDM autologin is **NOT configured**`User=` and `Session=` are empty in default.conf
- [fact] SDDM `DisplayServer=x11` in default.conf — **no wayland greeter configured**
- [fact] No `/etc/sddm.conf.d/` drop-in directory exists
## Dependency chain for LUKS → SDDM → KWallet integration
### Boot-time LUKS (currently working)
```
systemd-boot → kernel cryptdevice= param → initramfs encrypt hook → LUKS passphrase prompt → root mounted
```
### Login-time (currently: manual SDDM login, no KWallet auto-open)
```
SDDM greeter → user types password → PAM sddm → pam_unix validates → session started
→ pam_kwallet5.so would unlock wallet (BROKEN: module missing)
```
### Target state (proposed)
```
Boot: LUKS passphrase entered
→ system up → SDDM greeter shown
→ Option A (autologin): SDDM skips password → session starts → KWallet opened with stored key
→ Option B (PAM reuse): SDDM password == user password == KWallet password → pam_kwallet6 unlocks wallet on login
```
## Likely edit points
### To fix KWallet auto-open via PAM (Option B — recommended)
1. **Install `kwallet-pam` package** (AUR: `kwallet-pam` provides `pam_kwallet6.so`) — PREREQUISITE
2. **`/etc/pam.d/sddm`** — replace `pam_kwallet5.so` references with `pam_kwallet6.so` in auth and session stacks
3. **`/etc/pam.d/sddm-autologin`** — same replacement if autologin is also desired
4. **`~/.config/kwalletrc`** — create/configure wallet to use blowfish or GPG encryption; set wallet name
5. **Initialize wallet** — run `kwalletd6` or use `kwallet-query` to create the default wallet with the user's login password as the unlock password
### To configure SDDM for Wayland session (currently X11 default)
6. **`/etc/sddm.conf.d/hyprland.conf`** (new file) — set `DisplayServer=wayland` or leave X11 and use Wayland session via `wayland-session` script
### To configure SDDM autologin (Option A)
7. **`/etc/sddm.conf.d/autologin.conf`** (new file) — set `User=alex`, `Session=hyprland`
### To track these system files in the dotfiles repo
8. Add symlinks or a deploy script — system PAM files are outside the current dotfiles scope
## Risks and ambiguities
- [risk] **`pam_kwallet5.so` vs `pam_kwallet6.so` mismatch**: PAM files reference kwallet5 module; installed binary is kwalletd6. The `kwallet-pam` package for KF6 provides `pam_kwallet6.so` — this must be installed from AUR or a compatible repo.
- [risk] **No KDE Plasma installed**: The system uses Hyprland, not Plasma. KWallet works standalone, but Plasma's system tray integration for wallet prompts won't be present. Apps must use the KWallet D-Bus API directly.
- [risk] **SDDM running X11 compositor by default**: The `default.conf` has `DisplayServer=x11`, but the user session is Hyprland (Wayland). SDDM itself can still launch Wayland sessions from an X11 greeter. This works but is a mismatch worth documenting.
- [risk] **autologin + KWallet security trade-off**: If autologin is used (Option A), KWallet cannot be unlocked by the user password (there is none at login). The wallet would need to be set to "no password" (plaintext) or use a keyfile — both reduce security.
- [risk] **pam_u2f.so at top of system-login and sddm**: U2F is configured as `sufficient` — meaning a hardware key can bypass password entirely. This could bypass KWallet unlock if the wallet password differs from the user password.
- [risk] **hyprlock uses `auth include login`**: The lock screen delegates to the `login` PAM chain, which does NOT include kwallet PAM modules. Unlocking hyprlock will NOT re-open the wallet.
- [risk] **Dotfiles repo scope boundary**: `/etc/pam.d/`, `/etc/sddm.conf.d/`, `/etc/mkinitcpio.conf`, and `/boot/loader/` are all outside the dotfiles repo. These are system files. Either the dotfiles repo needs to expand its scope (with a deploy script), or these changes must be managed separately.
- [risk] **mkinitcpio uses classic `encrypt` hook, not `sd-encrypt`**: The `sd-encrypt` (systemd) hook supports TPM2/FIDO2-bound LUKS keys for automatic unlock; the classic `encrypt` hook does not. If the goal involves TPM2-bound auto-unlock (true single-passphrase boot), migration to `sd-encrypt` would be required.
- [ambiguity] **"SDDM login" with LUKS**: LUKS unlock happens at boot (initramfs), before SDDM. There is no mechanism for SDDM to "reuse" the LUKS passphrase directly. The integration point is: user types the same password at SDDM → PAM propagates it to `pam_kwallet6` → wallet unlocked. The LUKS and SDDM passwords are independent unless deliberately set to the same value.
## Relations
- related_to [[Hyprland config]]
- related_to [[PAM configuration]]

View File

@@ -1,63 +0,0 @@
---
title: LUKS SDDM KWallet documentation targets
type: note
permalink: dotfiles/research/luks-sddm-kwallet-documentation-targets
tags:
- sddm
- kwallet
- luks
- pam
- documentation
- edit-points
---
# LUKS SDDM KWallet documentation targets
## Summary
User decision: **document exact commands only** (not manage `/etc` files in the repo). This means the deliverable is a new documentation file in the dotfiles repo, not new symlinks or deploy scripts.
## Repo documentation conventions found
- [fact] **No README.md, SETUP.md, INSTALL.md, or docs/ directory exists** — the dotfiles repo has no human-facing setup documentation at all
- [fact] The only markdown files tracked in git are: `.memory/decisions.md`, `.memory/knowledge.md`, `.memory/research/opencode-architecture.md` — all are basic-memory agent-facing notes, not user-facing docs
- [fact] `.config/opencode/AGENTS.md` is the OpenCode agent instruction file (global AI config) — NOT a per-feature setup doc
- [convention] There is no established convention for "machine setup" documentation in this repo — **any new docs file will establish the pattern**
## Best file location for command documentation
### Option A (Recommended): `README.md` at repo root
- **Path:** `/home/alex/dotfiles/README.md`
- **Rationale:** Establishes the first user-facing doc for the repo; natural home for setup and system integration notes; visible on any git host
- **Section to add:** `## System Setup: KWallet + SDDM PAM integration` with step-by-step commands
### Option B: `.memory/plans/luks-sddm-kwallet-login-integration.md` (append)
- **Path:** `/home/alex/dotfiles/.memory/plans/luks-sddm-kwallet-login-integration.md`
- **Rationale:** Already tracks this feature; append a `## Exact commands` section
- **Downside:** `.memory/` files are agent-facing, not user-facing; commands buried in plan notes are harder to find later
### Option C: New dedicated file `SETUP-auth.md` or `docs/auth-setup.md`
- **Path:** `/home/alex/dotfiles/SETUP-auth.md`
- **Rationale:** Keeps system-setup docs separate from repo README
- **Downside:** Fragments documentation without an established convention
## What the documentation must cover (per plan + discovery)
Commands for:
1. `pacman -S kwallet-pam` OR AUR install of `kwallet-pam` (provides `pam_kwallet6.so`)
2. Edit `/etc/pam.d/sddm` — replace `pam_kwallet5.so` with `pam_kwallet6.so` (auth + session lines)
3. Edit `/etc/pam.d/sddm-autologin` — same replacement (if needed)
4. Create `/etc/sddm.conf.d/` directory if missing
5. Initialize KWallet — `kwalletd6` first-run or `kwallet-query` commands
6. Verify: `systemctl restart sddm` and login test
## Risks relevant to documentation
- [risk] `kwallet-pam` for KF6 may be AUR-only on Arch — exact package name needs verification before documenting
- [risk] `/etc/pam.d/` edits require root; if documented as copy-paste commands, must be prefixed with `sudo`
- [risk] SDDM autologin is NOT configured and should NOT be added — the password-login model was chosen; docs must not inadvertently suggest autologin setup
- [risk] A new `README.md` will be the first user-facing documentation and will set precedent — scope it carefully to avoid bloat
## Relations
- related_to [[LUKS SDDM KWallet discovery]]
- related_to [[luks-sddm-kwallet-login-integration]]

View File

@@ -1,264 +0,0 @@
---
title: SDDM KWallet PAM Setup for Hyprland
type: note
permalink: dotfiles/research/sddm-kwallet-pam-setup-for-hyprland
tags:
- sddm
- kwallet
- pam
- hyprland
- arch
- research
- authoritative
---
# SDDM KWallet PAM Setup for Hyprland
## Summary
Complete, source-verified setup for automatic KWallet unlock on SDDM password login, for a non-Plasma (Hyprland) Arch Linux system.
## Freshness
- confidence: high
- last_validated: 2026-03-11
- volatility: low (KDE Plasma 6 PAM module is stable; Arch Wiki last edited 2026-03-10)
- review_after_days: 90
- validation_count: 1
- contradiction_count: 0
## Sources consulted
- [source] Arch Wiki "KDE Wallet" — https://wiki.archlinux.org/title/KDE_Wallet (last edited 2026-03-10)
- [source] Arch Wiki "SDDM" — https://wiki.archlinux.org/title/SDDM (last edited 2026-03-04)
- [source] Arch package database `kwallet-pam` 6.6.2-1 file listing — https://archlinux.org/packages/extra/x86_64/kwallet-pam/files/
- [source] Arch package database `kwallet` 6.23.0-1 file listing — https://archlinux.org/packages/extra/x86_64/kwallet/files/
- [source] Real-world Hyprland dotfiles from GitHub (wayblueorg/wayblue, AhmedAmrNabil/nix-config)
## (1) Package to install
- [fact] Package: **`kwallet-pam`** — in the official **`extra`** repository (NOT AUR)
- [fact] Install command: `sudo pacman -S kwallet-pam`
- [fact] Current version: **6.6.2-1** (as of 2026-03-03)
- [fact] Dependencies: `kwallet`, `pam`, `libgcrypt`, `socat` (all already present or auto-resolved)
- [fact] Files installed:
- `/usr/lib/security/pam_kwallet5.so` — the PAM module
- `/usr/lib/pam_kwallet_init` — session-start helper script
- `/etc/xdg/autostart/pam_kwallet_init.desktop` — XDG autostart for Plasma/DE environments
- `/usr/lib/systemd/user/plasma-kwallet-pam.service` — systemd user service
### Critical naming fact
- [fact] **The PAM module is `pam_kwallet5.so` even for KDE Frameworks 6 / Plasma 6.** There is no `pam_kwallet6.so`. The "5" in the name is a legacy artifact. The previous discovery note incorrectly stated `pam_kwallet6.so` would be provided — this was wrong.
- [fact] The existing `/etc/pam.d/sddm` and `/etc/pam.d/sddm-autologin` files already reference `pam_kwallet5.so` — they just need the package installed; **no module name changes are needed**.
## (2) PAM configuration
### Plasma 6 / ksecretd consideration
The Arch Wiki (section "Configure PAM on Plasma 6 (KF6)", updated 2026-03-10) says Plasma 6 uses `ksecretd` as the secret service daemon. The PAM session line should include `kwalletd=/usr/bin/ksecretd` to point to the new daemon.
- [fact] `ksecretd` binary is at `/usr/bin/ksecretd` and is shipped by the `kwallet` package (6.23.0-1)
- [fact] `kwalletd6` binary is at `/usr/bin/kwalletd6` and is also in the `kwallet` package
- [decision] For a non-Plasma Hyprland setup, the question is which daemon to target. The Arch Wiki recommends `kwalletd=/usr/bin/ksecretd` for KF6. Since the user has `kwalletd6` and `ksecretd` both installed via the `kwallet` package, and the Arch Wiki explicitly documents this parameter for KF6, the documentation should use the `kwalletd=/usr/bin/ksecretd` parameter.
### Recommended `/etc/pam.d/sddm` (password login)
The file already has the right structure. After installing `kwallet-pam`, the existing lines become functional. However, for Plasma 6 / KF6 compatibility, the session line should add the `kwalletd=` parameter:
```
#%PAM-1.0
auth sufficient pam_u2f.so cue
auth include system-login
-auth optional pam_gnome_keyring.so
-auth optional pam_kwallet5.so
account include system-login
password include system-login
session optional pam_keyinit.so force revoke
session include system-login
-session optional pam_gnome_keyring.so auto_start
-session optional pam_kwallet5.so auto_start kwalletd=/usr/bin/ksecretd
```
Key points:
- [fact] The `-` prefix on `-auth` and `-session` lines means "skip silently if module is missing" — this is already present in the default SDDM PAM files
- [fact] The `auth` line captures the user password for later use by the session line
- [fact] The `session` line with `auto_start` tells the module to start kwalletd/ksecretd and unlock the wallet
- [fact] `kwalletd=/usr/bin/ksecretd` directs the module to use KF6's ksecretd daemon instead of the legacy kwalletd5
### Recommended `/etc/pam.d/sddm-autologin`
This file is for SDDM autologin ONLY. Since the chosen model is password login, this file is informational but should still be kept correct:
```
#%PAM-1.0
auth sufficient pam_u2f.so cue
auth required pam_permit.so
-auth optional pam_kwallet5.so
account include system-local-login
password include system-local-login
session include system-local-login
-session optional pam_kwallet5.so auto_start kwalletd=/usr/bin/ksecretd
```
- [caveat] Autologin skips password entry → PAM has no password to pass to `pam_kwallet5.so` → wallet cannot be unlocked unless LUKS passphrase forwarding is used (see section 5)
### Minimal edit needed for existing system
Since the existing `/etc/pam.d/sddm` already has `pam_kwallet5.so` lines, the only change needed is:
1. Install `kwallet-pam` (makes the module file appear at `/usr/lib/security/pam_kwallet5.so`)
2. Add `kwalletd=/usr/bin/ksecretd` to the session line for KF6 compatibility
The auth line does NOT need the `kwalletd=` parameter — only the session line does.
## (3) Wallet initialization for non-Plasma (Hyprland) users
### Step A: Create the wallet
The wallet **must** be named `kdewallet` (the default name). PAM unlock only works with this specific wallet name.
**Option 1 — GUI (recommended if kwalletmanager is available):**
```bash
sudo pacman -S kwalletmanager
kwalletmanager6
```
Then: File > New Wallet > name it `kdewallet` > set password to match login password > choose **blowfish** encryption (NOT GPG).
**Option 2 — Headless/CLI:**
No pure-CLI wallet creation tool exists. The wallet is created automatically when:
1. The PAM module is installed and configured
2. The user logs in via SDDM with password
3. `pam_kwallet_init` runs and kwalletd6/ksecretd starts
4. If no wallet exists, kwalletd6 creates one on first access
For a truly headless init, trigger it by running in the session:
```bash
# Ensure kwalletd6/ksecretd is running (D-Bus activated)
dbus-send --session --dest=org.kde.kwalletd6 --print-reply \
/modules/kwalletd6 org.kde.KWallet.open \
string:"kdewallet" int64:0 string:"init"
```
This prompts for the wallet password interactively (Qt dialog).
### Step B: Ensure wallet password matches login password
- [requirement] The KWallet password MUST be identical to the Unix user login password. PAM passes the login password to the kwallet module; if they differ, the wallet won't unlock.
- [requirement] If the user password is changed later, the wallet password must be updated to match. Use `kwalletmanager6` > Change Password, or delete and recreate the wallet.
### Step C: kwalletrc configuration
Create `~/.config/kwalletrc` if it doesn't exist:
```ini
[Wallet]
Default Wallet=kdewallet
Enabled=true
First Use=false
[org.freedesktop.secrets]
apiEnabled=true
```
The `apiEnabled=true` setting enables the org.freedesktop.secrets D-Bus API, allowing libsecret-based apps (Chromium, VSCode, etc.) to use KWallet.
### Step D: Autostart `pam_kwallet_init` in Hyprland
The `kwallet-pam` package installs an XDG autostart entry (`/etc/xdg/autostart/pam_kwallet_init.desktop`), but Hyprland does NOT process XDG autostart files by default.
Add to `~/.config/hypr/hyprland.conf`:
```
exec-once = /usr/lib/pam_kwallet_init
```
This script reads the PAM-cached credentials and passes them to kwalletd6/ksecretd to unlock the wallet.
### Step E: D-Bus activation service (optional but recommended)
Create `~/.local/share/dbus-1/services/org.freedesktop.secrets.service`:
```ini
[D-BUS Service]
Name=org.freedesktop.secrets
Exec=/usr/bin/kwalletd6
```
This ensures kwalletd6 auto-starts when any app requests secrets via D-Bus, even before the wallet is explicitly opened.
## (4) Verification
### Quick verification after login
```bash
# 1. Check the PAM module is installed
ls -la /usr/lib/security/pam_kwallet5.so
# 2. Check kwalletd6 or ksecretd is running
pgrep -a kwalletd6 || pgrep -a ksecretd
# 3. Check the wallet is open
dbus-send --session --dest=org.kde.kwalletd6 --print-reply \
/modules/kwalletd6 org.kde.KWallet.isOpen \
string:"kdewallet"
# 4. Check wallet files exist
ls -la ~/.local/share/kwalletd/
# 5. Query the wallet (should return without prompting for password)
kwallet-query -l kdewallet
# 6. Check environment variables set by pam_kwallet_init
echo $PAM_KWALLET5_LOGIN
```
### Full integration test
1. Log out of Hyprland
2. At SDDM greeter, type user password and log in
3. After Hyprland starts, run `kwallet-query -l kdewallet` — it should list folders without prompting
4. Open a KWallet-aware app (e.g., Chromium with `--password-store=kwallet5`) and verify it stores/retrieves credentials
### Troubleshooting if wallet doesn't auto-unlock
- Check `journalctl --user -u plasma-kwallet-pam.service` for errors
- Check `journalctl -b | grep -i kwallet` for PAM-level errors
- Verify wallet password matches login password exactly
- Verify wallet is named exactly `kdewallet` (not `default` or any other name)
- Verify wallet uses blowfish encryption, not GPG
## (5) Caveats
### U2F / pam_u2f.so interaction
- [risk] The existing `/etc/pam.d/sddm` has `auth sufficient pam_u2f.so cue` as the FIRST auth line. When `sufficient` succeeds, PAM skips remaining auth modules — including `pam_kwallet5.so`.
- [consequence] If the user authenticates via U2F key only (no password typed), the kwallet module never captures a password → wallet cannot be unlocked automatically.
- [mitigation] This is acceptable if U2F is used as a convenience shortcut and the user accepts that wallet won't auto-unlock in that case. The wallet can be manually unlocked later.
- [alternative] To make U2F + kwallet work together, change `sufficient` to a two-factor setup where password is always required. But this changes the security model and is out of scope for this documentation.
### Autologin caveat
- [risk] SDDM autologin (`pam_permit.so`) provides no password → `pam_kwallet5.so` has nothing to unlock the wallet with.
- [fact] The Arch Wiki documents a workaround using `pam_systemd_loadkey.so` for LUKS-encrypted systems: the LUKS passphrase can be forwarded from the initramfs to the PAM stack, allowing wallet unlock even with autologin.
- [requirement] This requires: (1) systemd-based initramfs (`sd-encrypt` hook, not classic `encrypt`), (2) `pam_systemd_loadkey.so` line in sddm-autologin, (3) sddm.service override with `KeyringMode=inherit`.
- [fact] The current system uses classic `encrypt` hook, NOT `sd-encrypt`, so this workaround is NOT available without migrating the initramfs to systemd hooks.
- [decision] Since password login (not autologin) was chosen, this is informational only.
### Fingerprint reader caveat
- [fact] KWallet cannot be unlocked using a fingerprint reader (per Arch Wiki). Similar to U2F — no password is available.
### GPG encryption caveat
- [fact] `kwallet-pam` does NOT work with GPG-encrypted wallets. The wallet MUST use standard blowfish encryption.
### hyprlock caveat
- [fact] hyprlock uses `auth include login` in `/etc/pam.d/hyprlock`. The login PAM chain does NOT include kwallet PAM modules. Unlocking hyprlock will NOT re-open the wallet if it was closed.
- [mitigation] Typically the wallet stays open for the session duration. If the wallet is configured with `Leave Open=true` (in kwalletrc or kwalletmanager), it won't close automatically.
### Password change caveat
- [fact] If the user's login password is changed (via `passwd`), the wallet password must be manually updated to match. PAM does not automatically synchronize wallet passwords on password change.
## Relations
- related_to [[LUKS SDDM KWallet discovery]]
- related_to [[luks-sddm-kwallet-login-integration]]
- related_to [[LUKS SDDM KWallet documentation targets]]

View File

@@ -1,112 +0,0 @@
---
title: opencode-architecture
type: note
permalink: dotfiles/research/opencode-architecture
---
# OpenCode Architecture Research
## Overview
The OpenCode multi-agent configuration lives at `.config/opencode/` and is the most complex subsystem in this dotfiles repo.
## Directory Structure
```
.config/opencode/
├── opencode.jsonc # Main config
├── AGENTS.md # Global OpenCode config (NOT a symlink here)
├── CLAUDE.md -> .github/copilot-instructions.md (symlink)
├── .cursorrules -> .github/copilot-instructions.md (symlink)
├── .github/
│ └── copilot-instructions.md # Canonical cross-tool instructions
├── agents/
│ ├── lead.md # Primary orchestrator (mode=primary, temp=0.3)
│ ├── coder.md # Implementation agent
│ ├── reviewer.md # Code review (read-only)
│ ├── tester.md # Testing/validation
│ ├── explorer.md # Codebase mapper
│ ├── researcher.md # Technical investigator
│ ├── librarian.md # Documentation specialist
│ ├── critic.md # Plan gate
│ ├── sme.md # Domain expert consultant
│ └── designer.md # UI/UX specialist
├── .memory/
│ ├── knowledge.md # OpenCode-specific architecture knowledge
│ ├── decisions.md # Agent permission decisions, symlink strategy
│ ├── plans/ # Active feature plans
│ └── research/ # Research findings
└── skills/
├── doc-coverage/SKILL.md # Documentation coverage checklist
├── git-workflow/SKILL.md # Git commit/worktree/PR procedures
└── work-decomposition/SKILL.md # Multi-feature decomposition
```
## opencode.jsonc Key Settings
```jsonc
{
"default_agent": "lead",
"autoupdate": true,
"plugin": "@tarquinen/opencode-dcp",
"agents": {
"general": { "disabled": true },
"explore": { "disabled": true },
"plan": { "permissions": { "write": "allow" } }
},
"permissions": {
"websearch": "allow",
"question": "allow",
"external_directory": "deny"
},
"mcp": {
"context7": { "url": "https://mcp.context7.com/mcp", "type": "remote" },
"gh_grep": { "url": "https://mcp.grep.app", "type": "remote" },
"playwright": { "command": "npx @playwright/mcp@latest --headless --browser chromium", "type": "local" }
}
}
```
## Agent Model/Permission Matrix
| Agent | Model | Full Edit | Notes |
|---|---|---|---|
| lead | claude-opus-4 | ✅ | Orchestrator, all task types |
| coder | gpt-5.3-codex | ✅ | Implementation |
| librarian | claude-opus-4.6 | ✅ | Documentation |
| reviewer | claude-opus-4.6 | `.memory/*` only | Read-only code review |
| tester | claude-sonnet-4.6 | `.memory/*` only | Validation |
| explorer | claude-sonnet-4.6 | `.memory/*` only | Codebase mapping |
| researcher | claude-opus-4.6 | `.memory/*` only | Technical research |
| critic | claude-opus-4.6 | `.memory/*` only | Plan gate |
| sme | claude-opus-4.6 | `.memory/*` only | Domain expert |
| designer | claude-sonnet-4.6 | `.memory/*` only | UI/UX |
## Lead Agent Workflow
Phases: CLARIFY → DISCOVER → CONSULT → PLAN → CRITIC-GATE → EXECUTE → PHASE-WRAP
- **Tiered quality pipeline:** Tier 1 (full, new features), Tier 2 (standard), Tier 3 (fast, trivial)
- **Worktrees:** `.worktrees/<feature-name>` per feature branch
- **Retry circuit breaker:** 3 coder rejections → redesign; 5 failures → escalate
- **Commit format:** Conventional Commits (`feat:`, `fix:`, `chore:`, etc.)
- **Parallelization:** mandatory for independent work
## Memory Pattern
- `.memory/` tracked in git for cross-session persistence
- Agents with `.memory/*` write permission record directly (instruction-level enforcement)
- Structure: `knowledge.md` (architecture), `decisions.md` (design choices), `plans/<feature>.md`, `research/<topic>.md`
## Cross-Tool Instruction Files
- `.github/copilot-instructions.md` = single source of truth
- `CLAUDE.md` and `.cursorrules` = symlinks
- `AGENTS.md` = NOT a symlink in this repo (serves as global OpenCode config)
- **Note:** In OTHER projects, `AGENTS.md` should be a symlink. The OpenCode config dir is a special case.
## Skills
- **doc-coverage:** Validates canonical instruction file + symlinks; checks README + docs/* coverage
- **git-workflow:** Step-by-step git commit, worktree, and PR creation procedures
- **work-decomposition:** Splits 3+ feature requests into independent workstreams with separate worktrees

View File

@@ -1,148 +0,0 @@
---
title: waybar-pomodoro-not-showing
type: note
permalink: dotfiles/research/waybar-pomodoro-not-showing
tags:
- waybar
- pomodoro
- debugging
- risk
---
# Waybar Pomodoro Not Showing — Research Findings
## Scope
Investigation of why `custom/pomodoro` does not appear on the Waybar status bar.
Files inspected: `.config/waybar/config`, `.config/waybar/style.css`, `.config/waybar/scripts/pomodoro-preset.sh`.
## Module Wiring (as configured)
### modules-left (config line 59)
```json
"modules-left": [
"backlight",
"wireplumber",
"custom/pomodoro",
],
```
`custom/pomodoro` IS present in `modules-left`.
### custom/pomodoro definition (config lines 133140)
```json
"custom/pomodoro": {
"format": "{}",
"return-type": "json",
"exec": "waybar-module-pomodoro --no-work-icons",
"on-click": "waybar-module-pomodoro toggle",
"on-click-right": "$HOME/.config/waybar/scripts/pomodoro-preset.sh",
"on-click-middle": "waybar-module-pomodoro reset",
},
```
### CSS selector (style.css lines 106109)
```css
#custom-pomodoro {
padding: 0 4px;
color: @red;
}
```
Selector is correct and present.
### Script (scripts/pomodoro-preset.sh)
- Guarded by `command -v waybar-module-pomodoro` check (exits 1 if not installed).
- Sets work/short/long durations via `waybar-module-pomodoro set-*` subcommands.
- Toggle cycles between preset A (50/10/20) and preset B (25/5/15).
- **Script itself is logically correct.**
---
## Root Cause Analysis (ranked by confidence)
### 🔴 #1 — `waybar-module-pomodoro` binary not installed / not on PATH (confidence: ~90%)
- The `exec` command is `waybar-module-pomodoro --no-work-icons` — a **bare binary name**, resolved from PATH at Waybar launch time.
- Waybar inherits the environment of its launcher (Hyprland `exec-once`), which may NOT include the user's shell PATH (`~/.local/bin`, `/usr/local/bin`, etc.).
- `fish/config.fish` adds `/home/alex/dotfiles/.local/bin` to PATH, but that is only set in interactive Fish sessions — **Hyprland's exec-once does not source Fish config**.
- No package manager manifest, AUR package list, or install script mentions `waybar-module-pomodoro`.
- When `exec` fails to start, Waybar hides the module entirely (no fallback text) — the module disappears silently.
- **This is the most likely cause.** Verify with: `which waybar-module-pomodoro` in a non-Fish shell, or check `journalctl --user -u waybar` for "Failed to execute".
### 🟠 #2 — `interval` key absent on custom/pomodoro (confidence: ~65%)
- `custom/pomodoro` has NO `interval` key. For a persistent daemon (`waybar-module-pomodoro` runs and writes JSON to stdout continuously), this is correct — Waybar treats it as a long-lived subprocess.
- BUT if the binary is supposed to be polled (not a persistent daemon), missing `interval` means Waybar will only run it once and never refresh.
- The `return-type: json` combined with no `interval` means Waybar expects the binary to **continuously emit newline-delimited JSON** to stdout. If the binary only emits once and exits, the module will show blank after the first read.
- This is a secondary cause contingent on what `waybar-module-pomodoro` actually does. If it is a daemon that stays alive, #1 is the only blocker; if it exits after one line, `interval` is needed.
### 🟡 #3 — Binary exists but crashes on `--no-work-icons` flag (confidence: ~25%)
- The `--no-work-icons` flag may not be a valid flag for the installed version of `waybar-module-pomodoro`.
- An unrecognized flag causing the binary to exit with a non-zero code would suppress the module.
- Check: `waybar-module-pomodoro --help` or `waybar-module-pomodoro --no-work-icons` manually.
### 🟡 #4 — Config JSON parse failure (confidence: ~15%)
- The config uses tab-indented lines (lines 134139 use `\t`) while the rest uses spaces — mixed indentation is cosmetically inconsistent but does NOT cause JSON parse errors.
- Waybar's parser accepts JSON5/hjson (trailing commas, `//` comments) — both are used in this config and are fine.
- No structural JSON error was found in the config.
### ⚪ #5 — Hyprland not auto-starting Waybar at all (confidence: ~10%)
- If `exec-once=waybar` in `hyprland.conf` is missing or commented out, the bar won't show at all (not just the pomodoro module). Not specific to this module.
---
## Concrete Edit Points
### Fix #1 (most likely): Ensure binary is installed and PATH is set in Waybar launch environment
**Option A — Install the binary system-wide:**
Install `waybar-module-pomodoro` via your package manager (e.g. `paru -S waybar-module-pomodoro` on Arch) so it is in `/usr/bin` or `/usr/local/bin`, which is always in Waybar's inherited PATH.
**Option B — Use absolute path in config:**
```diff
- "exec": "waybar-module-pomodoro --no-work-icons",
- "on-click": "waybar-module-pomodoro toggle",
- "on-click-middle": "waybar-module-pomodoro reset",
+ "exec": "$HOME/.local/bin/waybar-module-pomodoro --no-work-icons",
+ "on-click": "$HOME/.local/bin/waybar-module-pomodoro toggle",
+ "on-click-middle": "$HOME/.local/bin/waybar-module-pomodoro reset",
```
File: `.config/waybar/config`, lines 136139.
**Option C — Set PATH in Hyprland env (preferred for Wayland):**
Add to `.config/hypr/hyprland.conf`:
```
env = PATH,$HOME/.local/bin:/usr/local/bin:/usr/bin:/bin
```
### Fix #2 (if binary is a one-shot, not a daemon): Add `interval` key
```diff
"custom/pomodoro": {
"format": "{}",
"return-type": "json",
+ "interval": 1,
"exec": "waybar-module-pomodoro --no-work-icons",
```
File: `.config/waybar/config`, line 134 (insert after `"return-type": "json",`).
---
## Files Involved
| File | Role |
|---|---|
| `.config/waybar/config` | Module registration in `modules-left`, `custom/pomodoro` definition |
| `.config/waybar/style.css` | `#custom-pomodoro` CSS selector (present, correct) |
| `.config/waybar/scripts/pomodoro-preset.sh` | Right-click preset toggler (calls binary) |
| `.config/hypr/hyprland.conf` | Waybar autostart + env block (outside Waybar dir) |
| `waybar-module-pomodoro` binary | External binary — must be installed and on PATH |
---
## Likely Bug Surfaces (Adjacent Risk Areas)
1. **`custom/uptime`** (config line 8995): Also uses a bare script path `$HOME/.config/waybar/scripts/uptime.sh`. Same PATH-at-launch issue could affect it if shell env is not inherited. The script exists in the repo (`scripts/` dir shows only `pomodoro-preset.sh`) — **`uptime.sh` is missing from the repo**, meaning this module may also be broken.
2. **`custom/music`** (config line 4452): Uses `playerctl` — same PATH issue; no `playerctl` install evidence in the repo.
3. **`hyprland/workspaces`** (config line 2228): Defined in config but NOT in any of `modules-left`, `modules-center`, or `modules-right` — it is **a dead definition that never renders**.
4. **`custom/lock`** (config line 127131): Defined but also absent from all three module lists — another dead definition.
5. **`network`** (config line 6068): Defined but not in any module list — dead definition.
6. **Trailing comma on line 8** of `modules-left`: Benign in Waybar's parser but would break standard JSON parsers if config is ever processed by tools expecting strict JSON.
## Relations
- related_to [[dotfiles/knowledge]]

View File

@@ -1,8 +0,0 @@
{
"github-copilot": {
"type": "oauth",
"refresh": "ghu_j9QHUrVzPLoYOsyjarpzktAFDQWqP31gz2Ac",
"access": "tid=af454cc719f9e4daffe9b4892fa4e791;exp=1773665732;sku=plus_monthly_subscriber_quota;proxy-ep=proxy.individual.githubcopilot.com;st=dotcom;chat=1;cit=1;malfil=1;editor_preview_features=1;agent_mode=1;agent_mode_auto_approval=1;mcp=1;ccr=1;8kp=1;ip=137.205.73.18;asn=AS201773:0afe8e842bbf234a7d338ff0c8b279b2ab05f1ebcad969293cf690eee12265c6",
"expires": 1773665432000
}
}

Binary file not shown.

View File

@@ -1,78 +0,0 @@
export const SOMETHING_ELSE_VALUE = "__something_else__";
export const SOMETHING_ELSE_LABEL = "Something else…";
export function normalizeQuestions(inputQuestions) {
return inputQuestions.map((question, index) => ({
...question,
label: question.label?.trim() ? question.label : `Q${index + 1}`,
options: [
...question.options,
{
value: SOMETHING_ELSE_VALUE,
label: SOMETHING_ELSE_LABEL,
},
],
}));
}
export function isSomethingElseOption(option) {
return option?.value === SOMETHING_ELSE_VALUE;
}
export function createPredefinedAnswer(questionId, option, index) {
return {
id: questionId,
value: option.value,
label: option.label,
wasCustom: false,
index,
};
}
export function createCustomAnswer(questionId, text) {
return {
id: questionId,
value: text,
label: text,
wasCustom: true,
};
}
export function summarizeAnswers(questions, answers) {
const answerById = new Map(answers.map((answer) => [answer.id, answer]));
return questions.flatMap((question) => {
const answer = answerById.get(question.id);
if (!answer) return [];
if (answer.wasCustom) {
return [`${question.label}: user wrote: ${answer.label}`];
}
return [`${question.label}: user selected: ${answer.index}. ${answer.label}`];
});
}
export function createCancelledResult(questions = []) {
return {
questions,
answers: [],
cancelled: true,
};
}
export function createAnsweredResult(questions, answers) {
const order = new Map(questions.map((question, index) => [question.id, index]));
return {
questions,
answers: [...answers].sort(
(left, right) => (order.get(left.id) ?? Number.POSITIVE_INFINITY) - (order.get(right.id) ?? Number.POSITIVE_INFINITY),
),
cancelled: false,
};
}
export function allQuestionsAnswered(questions, answers) {
return questions.every((question) => answers.has(question.id));
}
export function nextTabAfterAnswer(currentTab, questionCount) {
return currentTab < questionCount - 1 ? currentTab + 1 : questionCount;
}

View File

@@ -1,159 +0,0 @@
import test from "node:test";
import assert from "node:assert/strict";
import {
SOMETHING_ELSE_LABEL,
SOMETHING_ELSE_VALUE,
allQuestionsAnswered,
createAnsweredResult,
createCancelledResult,
createCustomAnswer,
createPredefinedAnswer,
nextTabAfterAnswer,
normalizeQuestions,
summarizeAnswers,
} from "./question-core.mjs";
test("normalizeQuestions adds default labels and appends the Something else option", () => {
const [question] = normalizeQuestions([
{
id: "scope",
prompt: "Which scope fits best?",
options: [{ value: "small", label: "Small change" }],
},
]);
assert.equal(question.label, "Q1");
assert.deepEqual(question.options[0], { value: "small", label: "Small change" });
assert.deepEqual(question.options.at(-1), {
value: SOMETHING_ELSE_VALUE,
label: SOMETHING_ELSE_LABEL,
});
});
test("normalizeQuestions keeps provided labels and descriptions intact before the synthetic option", () => {
const [question] = normalizeQuestions([
{
id: "priority",
label: "Priority",
prompt: "Which priority?",
options: [
{ value: "p0", label: "P0", description: "Need this now" },
{ value: "p1", label: "P1" },
],
},
]);
assert.equal(question.label, "Priority");
assert.deepEqual(question.options.slice(0, 2), [
{ value: "p0", label: "P0", description: "Need this now" },
{ value: "p1", label: "P1" },
]);
assert.equal(question.options[2].label, SOMETHING_ELSE_LABEL);
});
test("answer helpers preserve machine values and summary lines distinguish predefined vs custom answers", () => {
const questions = normalizeQuestions([
{
id: "scope",
label: "Scope",
prompt: "Which scope fits best?",
options: [{ value: "small", label: "Small change" }],
},
{
id: "notes",
label: "Notes",
prompt: "Anything else?",
options: [{ value: "none", label: "No extra notes" }],
},
]);
const predefined = createPredefinedAnswer("scope", questions[0].options[0], 1);
const custom = createCustomAnswer("notes", "Needs to work with tmux");
assert.deepEqual(predefined, {
id: "scope",
value: "small",
label: "Small change",
wasCustom: false,
index: 1,
});
assert.deepEqual(custom, {
id: "notes",
value: "Needs to work with tmux",
label: "Needs to work with tmux",
wasCustom: true,
});
assert.deepEqual(summarizeAnswers(questions, [predefined, custom]), [
"Scope: user selected: 1. Small change",
"Notes: user wrote: Needs to work with tmux",
]);
});
test("createCancelledResult returns a structured cancelled payload", () => {
const questions = normalizeQuestions([
{
id: "scope",
prompt: "Which scope fits best?",
options: [{ value: "small", label: "Small change" }],
},
]);
assert.deepEqual(createCancelledResult(questions), {
questions,
answers: [],
cancelled: true,
});
});
test("createAnsweredResult keeps answers in question order", () => {
const questions = normalizeQuestions([
{
id: "scope",
label: "Scope",
prompt: "Which scope fits best?",
options: [{ value: "small", label: "Small change" }],
},
{
id: "notes",
label: "Notes",
prompt: "Anything else?",
options: [{ value: "none", label: "No extra notes" }],
},
]);
const second = createCustomAnswer("notes", "Custom note");
const first = createPredefinedAnswer("scope", questions[0].options[0], 1);
const result = createAnsweredResult(questions, [second, first]);
assert.equal(result.cancelled, false);
assert.deepEqual(result.answers.map((answer) => answer.id), ["scope", "notes"]);
});
test("allQuestionsAnswered only returns true when every question has an answer", () => {
const questions = normalizeQuestions([
{
id: "scope",
prompt: "Scope?",
options: [{ value: "small", label: "Small" }],
},
{
id: "priority",
prompt: "Priority?",
options: [{ value: "p1", label: "P1" }],
},
]);
const answers = new Map([
["scope", createPredefinedAnswer("scope", questions[0].options[0], 1)],
]);
assert.equal(allQuestionsAnswered(questions, answers), false);
answers.set("priority", createCustomAnswer("priority", "Ship this week"));
assert.equal(allQuestionsAnswered(questions, answers), true);
});
test("nextTabAfterAnswer advances through questions and then to the submit tab", () => {
assert.equal(nextTabAfterAnswer(0, 3), 1);
assert.equal(nextTabAfterAnswer(1, 3), 2);
assert.equal(nextTabAfterAnswer(2, 3), 3);
});

View File

@@ -1,381 +0,0 @@
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
import { Editor, type EditorTheme, Key, matchesKey, Text, truncateToWidth } from "@mariozechner/pi-tui";
import { Type } from "@sinclair/typebox";
import {
allQuestionsAnswered,
createAnsweredResult,
createCancelledResult,
createCustomAnswer,
createPredefinedAnswer,
isSomethingElseOption,
nextTabAfterAnswer,
normalizeQuestions,
summarizeAnswers,
} from "./question-core.mjs";
interface QuestionOption {
value: string;
label: string;
description?: string;
}
interface Question {
id: string;
label: string;
prompt: string;
options: QuestionOption[];
}
interface Answer {
id: string;
value: string;
label: string;
wasCustom: boolean;
index?: number;
}
interface QuestionResult {
questions: Question[];
answers: Answer[];
cancelled: boolean;
}
const OptionSchema = Type.Object({
value: Type.String({ description: "Machine-friendly value returned to the model" }),
label: Type.String({ description: "Human-friendly label shown in the UI" }),
description: Type.Optional(Type.String({ description: "Optional help text shown under the label" })),
});
const QuestionSchema = Type.Object({
id: Type.String({ description: "Stable identifier for the answer" }),
label: Type.Optional(Type.String({ description: "Short label for summaries and tabs" })),
prompt: Type.String({ description: "Full question text shown to the user" }),
options: Type.Array(OptionSchema, { description: "Predefined options for the user to choose from" }),
});
const QuestionParams = Type.Object({
questions: Type.Array(QuestionSchema, { description: "One or more questions to ask the user" }),
});
function errorResult(message: string, questions: Question[] = []) {
return {
content: [{ type: "text" as const, text: message }],
details: createCancelledResult(questions) as QuestionResult,
};
}
async function runQuestionFlow(ctx: any, questions: Question[]): Promise<QuestionResult> {
return ctx.ui.custom<QuestionResult>((tui, theme, _kb, done) => {
const isMulti = questions.length > 1;
let currentTab = 0;
let optionIndex = 0;
let inputMode = false;
let cachedLines: string[] | undefined;
const answers = new Map<string, Answer>();
const editorTheme: EditorTheme = {
borderColor: (text) => theme.fg("accent", text),
selectList: {
selectedPrefix: (text) => theme.fg("accent", text),
selectedText: (text) => theme.fg("accent", text),
description: (text) => theme.fg("muted", text),
scrollInfo: (text) => theme.fg("dim", text),
noMatch: (text) => theme.fg("warning", text),
},
};
const editor = new Editor(tui, editorTheme);
function refresh() {
cachedLines = undefined;
tui.requestRender();
}
function currentQuestion(): Question | undefined {
return questions[currentTab];
}
function currentOptions(): QuestionOption[] {
return currentQuestion()?.options ?? [];
}
function finish(cancelled: boolean) {
if (cancelled) {
done(createCancelledResult(questions) as QuestionResult);
return;
}
done(createAnsweredResult(questions, Array.from(answers.values())) as QuestionResult);
}
editor.onSubmit = (value) => {
const question = currentQuestion();
const trimmed = value.trim();
if (!question || trimmed.length === 0) {
refresh();
return;
}
answers.set(question.id, createCustomAnswer(question.id, trimmed) as Answer);
inputMode = false;
editor.setText("");
if (!isMulti) {
finish(false);
return;
}
currentTab = nextTabAfterAnswer(currentTab, questions.length);
optionIndex = 0;
refresh();
};
function handleInput(data: string) {
if (inputMode) {
if (matchesKey(data, Key.escape)) {
inputMode = false;
editor.setText("");
refresh();
return;
}
editor.handleInput(data);
refresh();
return;
}
if (isMulti) {
if (matchesKey(data, Key.tab) || matchesKey(data, Key.right)) {
currentTab = (currentTab + 1) % (questions.length + 1);
optionIndex = 0;
refresh();
return;
}
if (matchesKey(data, Key.shift("tab")) || matchesKey(data, Key.left)) {
currentTab = (currentTab - 1 + questions.length + 1) % (questions.length + 1);
optionIndex = 0;
refresh();
return;
}
if (currentTab === questions.length) {
if (matchesKey(data, Key.enter) && allQuestionsAnswered(questions, answers)) {
finish(false);
return;
}
if (matchesKey(data, Key.escape)) {
finish(true);
return;
}
return;
}
}
const question = currentQuestion();
const options = currentOptions();
if (!question || options.length === 0) {
return;
}
if (matchesKey(data, Key.up)) {
optionIndex = Math.max(0, optionIndex - 1);
refresh();
return;
}
if (matchesKey(data, Key.down)) {
optionIndex = Math.min(options.length - 1, optionIndex + 1);
refresh();
return;
}
if (matchesKey(data, Key.enter)) {
const selected = options[optionIndex]!;
if (isSomethingElseOption(selected)) {
inputMode = true;
editor.setText("");
refresh();
return;
}
answers.set(question.id, createPredefinedAnswer(question.id, selected, optionIndex + 1) as Answer);
if (!isMulti) {
finish(false);
return;
}
currentTab = nextTabAfterAnswer(currentTab, questions.length);
optionIndex = 0;
refresh();
return;
}
if (matchesKey(data, Key.escape)) {
finish(true);
}
}
function render(width: number): string[] {
if (cachedLines) return cachedLines;
const lines: string[] = [];
const add = (line: string) => lines.push(truncateToWidth(line, width));
const question = currentQuestion();
const options = currentOptions();
add(theme.fg("accent", "─".repeat(width)));
if (isMulti) {
const tabs: string[] = [];
for (let index = 0; index < questions.length; index += 1) {
const tabQuestion = questions[index]!;
const active = index === currentTab;
const answered = answers.has(tabQuestion.id);
const box = answered ? "■" : "□";
const text = ` ${box} ${tabQuestion.label} `;
tabs.push(active ? theme.bg("selectedBg", theme.fg("text", text)) : theme.fg(answered ? "success" : "muted", text));
}
const submitText = " ✓ Submit ";
const submitActive = currentTab === questions.length;
const submitReady = allQuestionsAnswered(questions, answers);
tabs.push(
submitActive
? theme.bg("selectedBg", theme.fg("text", submitText))
: theme.fg(submitReady ? "success" : "dim", submitText),
);
add(` ${tabs.join(" ")}`);
lines.push("");
}
if (inputMode && question) {
add(theme.fg("text", ` ${question.prompt}`));
lines.push("");
for (let index = 0; index < options.length; index += 1) {
const option = options[index]!;
const prefix = index === optionIndex ? theme.fg("accent", "> ") : " ";
add(prefix + theme.fg(index === optionIndex ? "accent" : "text", `${index + 1}. ${option.label}`));
if (option.description) {
add(` ${theme.fg("muted", option.description)}`);
}
}
lines.push("");
add(theme.fg("muted", " Your answer:"));
for (const line of editor.render(width - 2)) {
add(` ${line}`);
}
} else if (isMulti && currentTab === questions.length) {
add(theme.fg("accent", theme.bold(" Ready to submit")));
lines.push("");
for (const reviewQuestion of questions) {
const answer = answers.get(reviewQuestion.id);
if (!answer) continue;
const label = answer.wasCustom ? `(wrote) ${answer.label}` : `${answer.index}. ${answer.label}`;
add(`${theme.fg("muted", ` ${reviewQuestion.label}: `)}${theme.fg("text", label)}`);
}
lines.push("");
if (allQuestionsAnswered(questions, answers)) {
add(theme.fg("success", " Press Enter to submit"));
} else {
add(theme.fg("warning", " All questions must be answered before submit"));
}
} else if (question) {
add(theme.fg("text", ` ${question.prompt}`));
lines.push("");
for (let index = 0; index < options.length; index += 1) {
const option = options[index]!;
const prefix = index === optionIndex ? theme.fg("accent", "> ") : " ";
add(prefix + theme.fg(index === optionIndex ? "accent" : "text", `${index + 1}. ${option.label}`));
if (option.description) {
add(` ${theme.fg("muted", option.description)}`);
}
}
}
lines.push("");
if (inputMode) {
add(theme.fg("dim", " Enter to submit • Esc to go back"));
} else if (isMulti) {
add(theme.fg("dim", " Tab/←→ navigate • ↑↓ select • Enter confirm • Esc cancel"));
} else {
add(theme.fg("dim", " ↑↓ navigate • Enter select • Esc cancel"));
}
add(theme.fg("accent", "─".repeat(width)));
cachedLines = lines;
return lines;
}
return {
render,
invalidate: () => {
cachedLines = undefined;
},
handleInput,
};
});
}
export default function question(pi: ExtensionAPI) {
pi.registerTool({
name: "question",
label: "Question",
description:
"Ask the user one or more multiple-choice questions. Every question automatically gets a final Something else… option for free-text answers.",
parameters: QuestionParams,
async execute(_toolCallId, params, _signal, _onUpdate, ctx) {
if (!ctx.hasUI) {
return errorResult("Error: UI not available (running in non-interactive mode)");
}
if (params.questions.length === 0) {
return errorResult("Error: No questions provided");
}
const questions = normalizeQuestions(params.questions) as Question[];
const result = await runQuestionFlow(ctx, questions);
if (result.cancelled) {
return {
content: [{ type: "text", text: "User cancelled the question flow" }],
details: result,
};
}
return {
content: [{ type: "text", text: summarizeAnswers(result.questions, result.answers).join("\n") }],
details: result,
};
},
renderCall(args, theme) {
const questions = Array.isArray(args.questions) ? args.questions : [];
const labels = questions
.map((question: { label?: string; id?: string }) => question.label || question.id)
.filter(Boolean)
.join(", ");
let text = theme.fg("toolTitle", theme.bold("question "));
text += theme.fg("muted", `${questions.length} question${questions.length === 1 ? "" : "s"}`);
if (labels) {
text += theme.fg("dim", ` (${labels})`);
}
return new Text(text, 0, 0);
},
renderResult(result, _options, theme) {
const details = result.details as QuestionResult | undefined;
if (!details) {
const first = result.content[0];
return new Text(first?.type === "text" ? first.text : "", 0, 0);
}
if (details.cancelled) {
return new Text(theme.fg("warning", "Cancelled"), 0, 0);
}
const lines = summarizeAnswers(details.questions, details.answers).map(
(line) => `${theme.fg("success", "✓ ")}${line}`,
);
return new Text(lines.join("\n"), 0, 0);
},
});
}

View File

@@ -1,6 +0,0 @@
{
"lastChangelogVersion": "0.58.3",
"defaultProvider": "github-copilot",
"defaultModel": "gpt-5.4",
"defaultThinkingLevel": "medium"
}

View File

@@ -1,12 +1,13 @@
Host raspi-local Host vps-root
HostName 192.168.0.86 HostName 72.61.16.241
User pi Port 31415
User root
Host vps
HostName 72.61.16.241
Port 31415
User itguy
Host raspi Host raspi
HostName ssh.rwiesner.com HostName 192.168.0.85
User pi User dev
ProxyCommand cloudflared access ssh --hostname %h
Host prod
HostName 159.195.46.178
User production

View File

@@ -3,3 +3,10 @@
192.168.0.86 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJjBGU5qutJuNvwuowmjzxLxxHaMwqXg+RaeFM+nKXYWD7BUGin0yhbGUIdsXhREwEQt6qLz8rxQVjKzuYTD5aA= 192.168.0.86 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJjBGU5qutJuNvwuowmjzxLxxHaMwqXg+RaeFM+nKXYWD7BUGin0yhbGUIdsXhREwEQt6qLz8rxQVjKzuYTD5aA=
ssh.rwiesner.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGNaRHnfPdOx/Ws6Chzp0yXIgklif3wImIL4ERA9qkoI ssh.rwiesner.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGNaRHnfPdOx/Ws6Chzp0yXIgklif3wImIL4ERA9qkoI
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
72.61.16.241 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHeS1+z0qw314xS8C0xpfF8AZo7Ku7HVZv0ovtXlaNbA
72.61.16.241 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCTmedsGMuMfqRsCWfxwzu1EJk/AhJFc59NYmrJlt8T8bls65HcgrCXbGovtwRU9QM/Bctss06LCWz9mLatIhptX6Gp3iTQh0NtZfcTd7+qvZ36hcgpLKaZpdAoegnEW3TpTMGZUNxrOdG2olmPVhPGt8xHJAkvnByVv4gFqAwfy6zDHmBqOnxyNodaH2UaXbvGHxaiqKqDlx5V1Ez7g/9hCugN7Ts4sBoGmAR34M5fE/T6G7vFYVHnyOV8PuqY7vRQI8Lk8RrPqYuiXoOaIync7n0IjRHWvS/lnVYOuP0lYcMSQjRWnYI3GkZXgvxarRjauroCMF9hxU8EjR8toPqG2ylUG3XRnpXw4uw2/iPMz9YEdbuN31egD+YZNw2NXSUUEcEDL0ybKnkbzMLX6iSeX3QZ2mCwKPElr5Lcw8erTWaIn6SSnpR43wZOPL6w2s5ArsZ1WZqWOPRp0EPXF0NS9UplW5ErpNPVisq4Dbtt1z7PoP/ujV8VMitoX+Koka0=
72.61.16.241 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJGU9ZiVQdVLvNywwg9dR1VH1zDX20DgOIG1JbgEpZ/4UxZXDcP97+hcxFdJMrdlkU8HDngi7o0h4CG5XUIFEKM=
192.168.0.87 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKvoZGNYk4f+39zPRi1eDL76LDwF2l9mWzb5rZa3/LBi
192.168.0.87 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC5K1xPGu7I5oMGXgWvsuyZRx83M5BYfca7JBvNa6Gp/IHY4KijH7rmaaDWWpDtDp7TPZk5raeh9lfsdxZhozQkOmKPRDKQMX/ieZsr+kEcPM0bgNAhbl5aVb2sIn2KRvAGsvlLbUUxhY/J0DWP8uursE2oWKi24HCc2wwYrO2V/qqryv3Z3CIcLCq8XaYw7lDe4QkN2dJL1EkpHYK04nVreADIFdvK0B1pmgj9NH0Blh50JB42JBvgxzULs2pypfVQfEkQoxCDAzrX/23iQgfprw+s8vDpxocvzfj9Yd1Ckd+RnQz4MPTqjl30WH+xMcZaQxI0Y8M/J97zZdQH1cUW6lUvArzqZHibNRZtopyG0kYhu0rh+XIRTpxZK58QISVFFvSXMGyXsw9dZbEO/1dMubWLGsWTEbmc/9YQEwOxaEX1gYnJ85VdqRhQ8DplyDPtmQIBvetKw/2sFMT9kxPVxu2RObbr/GU9R7+3aksv1v1yiitBt3mXetZF+fcJqO8=
192.168.0.87 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMMl8EyQQfJHtqu7O2CDp2KLPf8x1dvuBldS1aZ9MGpSmvJb5bJ3GtJBqx+jIP0qbY3Gpr2kRC9W6faHVnDxdGs=
192.168.0.85 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKvoZGNYk4f+39zPRi1eDL76LDwF2l9mWzb5rZa3/LBi

View File

@@ -1 +1,9 @@
192.168.0.86 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGNaRHnfPdOx/Ws6Chzp0yXIgklif3wImIL4ERA9qkoI 192.168.0.86 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGNaRHnfPdOx/Ws6Chzp0yXIgklif3wImIL4ERA9qkoI
192.168.0.86 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCYJ9XPnMjoQlT3m/MLNPnAwTpO+d35+mptpi0iBJ3ySECb4Q6mFnhUL99+zbf5KntqOUW1m16yraQbEVp0tZZkbo5I+5Y8nNATVGcEn+AS5tTgTp09L8litbxNwF3VS4YfGsRTAkCbi954IGexg7ijawH+HeM8HTjJOnbzW23k7LKItTH+5PWAIBfvoUVoheYexAhUHGDfdSaEpkCrqcg+4Uazvgq9dHKgtqvx5PPGawfuJKr2mzmFSz/oOFeISAiP8wkxWfRaKJB7TJWtB4oL7wfHUqCfDORYvNhA7PPnzLtCMxqaMcjq16LW93LP14498SkwOfPjRT7QBVPGG7xUBA8mGOEB13m/7aQOm8nn2yYg4BAviAa9JuDyv0EcY1Pvpx7uP7veAD04rUyyyAD1/pZrpb1C264/8vubVASdr0VH2/o2tGciFDKmI4tENCZntMI6y2ytbPyhrqIpT5wXrwqbKpNAs2PJSt8QEvv/He3mcQ8kphhNKaFvLhI/eSc=
192.168.0.86 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJjBGU5qutJuNvwuowmjzxLxxHaMwqXg+RaeFM+nKXYWD7BUGin0yhbGUIdsXhREwEQt6qLz8rxQVjKzuYTD5aA=
ssh.rwiesner.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGNaRHnfPdOx/Ws6Chzp0yXIgklif3wImIL4ERA9qkoI
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
72.61.16.241 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHeS1+z0qw314xS8C0xpfF8AZo7Ku7HVZv0ovtXlaNbA
72.61.16.241 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCTmedsGMuMfqRsCWfxwzu1EJk/AhJFc59NYmrJlt8T8bls65HcgrCXbGovtwRU9QM/Bctss06LCWz9mLatIhptX6Gp3iTQh0NtZfcTd7+qvZ36hcgpLKaZpdAoegnEW3TpTMGZUNxrOdG2olmPVhPGt8xHJAkvnByVv4gFqAwfy6zDHmBqOnxyNodaH2UaXbvGHxaiqKqDlx5V1Ez7g/9hCugN7Ts4sBoGmAR34M5fE/T6G7vFYVHnyOV8PuqY7vRQI8Lk8RrPqYuiXoOaIync7n0IjRHWvS/lnVYOuP0lYcMSQjRWnYI3GkZXgvxarRjauroCMF9hxU8EjR8toPqG2ylUG3XRnpXw4uw2/iPMz9YEdbuN31egD+YZNw2NXSUUEcEDL0ybKnkbzMLX6iSeX3QZ2mCwKPElr5Lcw8erTWaIn6SSnpR43wZOPL6w2s5ArsZ1WZqWOPRp0EPXF0NS9UplW5ErpNPVisq4Dbtt1z7PoP/ujV8VMitoX+Koka0=
72.61.16.241 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJGU9ZiVQdVLvNywwg9dR1VH1zDX20DgOIG1JbgEpZ/4UxZXDcP97+hcxFdJMrdlkU8HDngi7o0h4CG5XUIFEKM=
192.168.0.87 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKvoZGNYk4f+39zPRi1eDL76LDwF2l9mWzb5rZa3/LBi

View File

@@ -1,5 +1,6 @@
unbind C-b unbind C-b
set -g extended-keys-format csi-u
set-option -g prefix C-space set-option -g prefix C-space
bind-key C-space send-prefix bind-key C-space send-prefix

View File

@@ -1,279 +0,0 @@
# Question Tool Design
**Date:** 2026-04-09
**Project:** `/home/alex/dotfiles`
**Target file:** `.pi/agent/extensions/question.ts`
## Goal
Add a tracked pi extension that gives the agent a single `question` tool for asking either one question or multiple questions in interactive mode, while always preserving a final user escape hatch: **"Something else…"** opens inline free-text entry when none of the listed options fit.
## Context
- Pi supports custom tools through TypeScript extensions placed in auto-discovered extension directories.
- This dotfiles repo already tracks pi configuration under `.pi/agent/`.
- The working extension directory `.pi/agent/extensions/` is currently empty.
- Pis upstream examples already include:
- a single-question `question.ts` example
- a multi-question `questionnaire.ts` example
- The requested tool should combine those use cases into one obvious agent-facing tool.
## User-Approved Requirements
1. The tool must be tracked in this repo at:
- `/home/alex/dotfiles/.pi/agent/extensions/question.ts`
2. The tool name should be:
- `question`
3. The tool must support both:
- a single question
- multiple questions in one interaction
4. Every question is multiple-choice, but the UI must always append a final choice:
- **"Something else…"**
5. Choosing **"Something else…"** must allow direct user text entry.
6. Question options should support machine-friendly values and user-facing labels:
- `{ value, label, description? }`
7. This should be a unified tool, not separate `question` and `questionnaire` tools.
## Recommended Approach
Implement a single extension modeled after pis upstream `question.ts` and `questionnaire.ts` examples:
- one registered tool: `question`
- one parameter shape: `questions: Question[]`
- one UI that adapts to question count:
- single-question picker for `questions.length === 1`
- multi-question review flow for `questions.length > 1`
This keeps the agent-facing API simple while still supporting richer user clarification flows.
## Tool Contract
The extension will register a tool with this conceptual input shape:
```ts
{
questions: Array<{
id: string;
label?: string;
prompt: string;
options: Array<{
value: string;
label: string;
description?: string;
}>;
}>;
}
```
### Field intent
- `id`: stable identifier for the answer
- `label`: short summary label for tabs/review UI; defaults to `Q1`, `Q2`, etc.
- `prompt`: the full question shown to the user
- `options`: predefined choices the model wants the user to pick from
### Normalization rules
Before rendering the UI:
1. Ensure at least one question exists.
2. Ensure each question has a usable short label.
3. Preserve the provided predefined options as-is.
4. Append a final synthetic option to every question:
- label: `Something else…`
- behavior: switch into inline text entry
5. Do not require the model to explicitly include the synthetic option.
## Interaction Design
### Single-question mode
When exactly one question is provided:
- display the prompt
- display numbered predefined options
- automatically display the final appended option:
- `Something else…`
- selecting a predefined option completes the tool immediately
- selecting `Something else…` opens inline free-text entry
- `Esc` in the picker cancels the tool
- `Esc` in text entry exits text entry and returns to the option list
### Multi-question mode
When multiple questions are provided:
- show one question at a time
- allow tab or left/right navigation between questions
- append `Something else…` to every question
- after answering one question, move to the next question
- include a final review/submit step summarizing all current answers
- allow navigating back to change answers before final submission
- submit only from the review step
This provides a guided flow without requiring separate tools.
## Answer Model
The tool result should always remain structured.
Conceptual result shape:
```ts
{
questions: Question[];
answers: Array<{
id: string;
value: string;
label: string;
wasCustom: boolean;
index?: number;
}>;
cancelled: boolean;
}
```
### Predefined option answers
For a predefined choice:
- `value` = the provided option value
- `label` = the provided option label
- `wasCustom` = `false`
- `index` = 1-based index of the selected predefined option
### Custom answers via “Something else…”
For a typed answer:
- `value` = typed text
- `label` = typed text
- `wasCustom` = `true`
- `index` is omitted
This gives the agent consistent structured data while preserving user freedom.
## Rendering
The extension should provide readable tool renderers:
### `renderCall`
Show:
- tool name (`question`)
- question count
- short labels or summary where useful
### `renderResult`
Show:
- `Cancelled` when the user aborts
- one concise success line per answered question
- whether an answer was predefined or custom when helpful
The rendering should remain compact in normal use and not dump full raw JSON unless the default fallback is needed.
## Error Handling
The tool should return structured results for expected user/runtime states instead of throwing.
### Non-interactive mode
If pi is running without interactive UI support:
- return a clear text result indicating UI is unavailable
- mark the interaction as `cancelled: true` in details
- do not crash the session
### Invalid input
If `questions` is empty:
- return a clear text result like `Error: No questions provided`
- include a structured details payload with `cancelled: true`
### User cancel
If the user cancels from the picker or review flow:
- return `cancelled: true`
- do not throw an exception
### Empty custom text
If the user enters free-text mode and submits an empty value:
- do not accept an empty answer
- keep the user in text-entry mode until they provide non-empty text or press `Esc`
- avoid returning meaningless blank answers to the model
## File Structure
Implementation stays in one file unless complexity clearly justifies splitting later:
- Create: `/home/alex/dotfiles/.pi/agent/extensions/question.ts`
Internal sections inside the file should stay logically separated:
1. types and schemas
2. question normalization helpers
3. single-question UI flow
4. multi-question UI flow
5. tool registration
6. call/result rendering
## Loading and Usage
Because the file will live in an auto-discovered project extension directory, the expected activation flow is:
1. start pi from the dotfiles repo or a directory where the project extension is in scope
2. use `/reload` if pi is already running
3. allow the model to call `question` when clarification is needed
## Testing Strategy
No dedicated automated test harness is required for the first version.
Manual verification should cover:
1. **Single question, predefined answer**
- tool returns selected option value/label
2. **Single question, custom answer**
- selecting `Something else…` opens text entry and returns typed text
3. **Single question, cancel**
- cancellation returns structured cancelled result
4. **Multi-question, all predefined**
- step-through and final review work correctly
5. **Multi-question, mixed predefined/custom**
- at least one typed answer and one predefined answer are preserved correctly
6. **Multi-question, edit before submit**
- user can revisit and change answers before final submission
7. **Empty custom submission**
- blank text is rejected or bounced back safely
8. **Non-interactive mode**
- tool returns a clear UI-unavailable result
## Non-Goals
The first version will not add:
- separate text-only question types
- nested conditional question trees
- validation rules beyond basic non-empty custom text handling
- persistence beyond normal pi session/tool result storage
- a separate `questionnaire` tool name
## Acceptance Criteria
The work is complete when:
1. `.pi/agent/extensions/question.ts` exists in this repo
2. pi discovers the extension via project auto-discovery
3. the agent has a single `question` tool
4. the tool supports both one-question and multi-question flows
5. every question automatically ends with `Something else…`
6. selecting `Something else…` allows direct typed input
7. results are structured and distinguish custom answers from predefined ones
8. cancel/error states return cleanly without crashing the session

View File

@@ -1,391 +0,0 @@
# Web Search Tools Design
**Date:** 2026-04-09
**Project:** `/home/alex/dotfiles`
**Target files:**
- `.pi/agent/extensions/web-search/package.json`
- `.pi/agent/extensions/web-search/index.ts`
- `.pi/agent/extensions/web-search/src/schema.ts`
- `.pi/agent/extensions/web-search/src/config.ts`
- `.pi/agent/extensions/web-search/src/providers/types.ts`
- `.pi/agent/extensions/web-search/src/providers/exa.ts`
- `.pi/agent/extensions/web-search/src/tools/web-search.ts`
- `.pi/agent/extensions/web-search/src/tools/web-fetch.ts`
- `.pi/agent/extensions/web-search/src/format.ts`
- tests alongside the new modules
## Goal
Add two generic pi tools, `web_search` and `web_fetch`, implemented as a modular extension package that uses Exa as the first provider while keeping the internal design extensible for future providers.
## Context
- This dotfiles repo already tracks pi configuration under `.pi/agent/`.
- The current extension workspace contains a tracked `question` extension and small pure helper tests.
- Pi extensions can be packaged as directories with `index.ts` and their own `package.json`, which is the best fit when third-party dependencies are needed.
- The requested feature is explicitly about pi extensions and custom tools, not built-in model providers.
- The user wants:
- generic tool names now
- Exa as the first provider
- configuration read from a separate global file, not `settings.json`
- configuration stored only at the global scope
## User-Approved Requirements
1. Add two generic tools:
- `web_search`
- `web_fetch`
2. Use Exa as the initial provider.
3. Keep the implementation extensible so other providers can be added later.
4. Do **not** read configuration from environment variables.
5. Do **not** read configuration from `settings.json`.
6. Read configuration from a dedicated global file:
- `~/.pi/agent/web-search.json`
7. Use a provider-list-based config shape, not a single-provider-only schema.
8. Store credentials as literal values in that config file.
9. `web_search` should return **metadata only** by default.
10. `web_fetch` should accept **one URL or multiple URLs**.
11. `web_fetch` should return **text** by default.
12. The implementation direction should be the modular/package-style structure, not the minimal Exa-shaped shortcut.
## Recommended Architecture
Implement the feature as a dedicated extension package at:
- `/home/alex/dotfiles/.pi/agent/extensions/web-search/`
This package will register two generic tools and route both through a provider registry. At runtime, the extension loads `~/.pi/agent/web-search.json`, validates it, normalizes the provider list into an internal lookup map, resolves the configured default provider, and then executes requests through a provider adapter.
For the first version, the only adapter is Exa. However, the tool-facing layer remains provider-agnostic, so future providers only need to implement the shared provider interface and be added to config validation/registry wiring.
This is intentionally more structured than a single-file Exa wrapper because the user explicitly wants future extensibility without changing tool names or reworking the public API later.
## File Structure
### Extension package
- **Create:** `/home/alex/dotfiles/.pi/agent/extensions/web-search/package.json`
- declares the extension package
- declares `exa-js` as a dependency
- points pi at the extension entrypoint
- **Create:** `/home/alex/dotfiles/.pi/agent/extensions/web-search/index.ts`
- extension entrypoint
- registers `web_search` and `web_fetch`
- wires together config loading, provider registry, tool handlers, and shared formatting
### Shared schemas and config
- **Create:** `/home/alex/dotfiles/.pi/agent/extensions/web-search/src/schema.ts`
- TypeBox schemas for tool parameters
- TypeBox schemas for `web-search.json`
- shared TypeScript types derived from the schemas where useful
- **Create:** `/home/alex/dotfiles/.pi/agent/extensions/web-search/src/config.ts`
- reads `~/.pi/agent/web-search.json`
- validates config shape
- normalizes provider list into an internal map keyed by provider name
- resolves default provider
### Provider abstraction
- **Create:** `/home/alex/dotfiles/.pi/agent/extensions/web-search/src/providers/types.ts`
- generic request and response types for search/fetch
- provider interface used by the tool layer
- normalized internal result shapes independent of Exa SDK types
- **Create:** `/home/alex/dotfiles/.pi/agent/extensions/web-search/src/providers/exa.ts`
- Exa-backed implementation of the provider interface
- translates generic search requests into Exa `search(...)`
- translates generic fetch requests into Exa `getContents(...)`
- isolates all Exa-specific request/response details
### Tool handlers and formatting
- **Create:** `/home/alex/dotfiles/.pi/agent/extensions/web-search/src/tools/web-search.ts`
- `web_search` schema, execution logic, and tool rendering helpers
- **Create:** `/home/alex/dotfiles/.pi/agent/extensions/web-search/src/tools/web-fetch.ts`
- `web_fetch` schema, execution logic, and tool rendering helpers
- **Create:** `/home/alex/dotfiles/.pi/agent/extensions/web-search/src/format.ts`
- shared output shaping
- compact text summaries for the LLM
- truncation behavior for large results
- per-result formatting for batch fetches and partial failures
## Config File Design
The extension will read exactly one file:
- `~/.pi/agent/web-search.json`
Initial conceptual shape:
```json
{
"defaultProvider": "exa-main",
"providers": [
{
"name": "exa-main",
"type": "exa",
"apiKey": "exa_...",
"options": {
"defaultSearchLimit": 5,
"defaultFetchTextMaxCharacters": 12000
}
}
]
}
```
### Config rules
- `defaultProvider` must match one provider entry by name.
- `providers` must be a non-empty array.
- Each provider entry must include:
- `name`
- `type`
- `apiKey`
- `apiKey` is a literal string in the first version.
- `type` is validated so the runtime can select the correct adapter.
- Exa-specific defaults may live under `options`, but they must remain optional.
### Config non-goals
The first version will **not**:
- read provider config from project-local files
- merge config from multiple files
- read credentials from env vars
- support shell-command-based credential resolution
- write or edit `web-search.json` automatically
If the file is missing or invalid, the tools should return a clear error telling the user where the file belongs and showing a minimal valid example.
## Tool Contract
### `web_search`
Purpose: search the web and return result metadata with a generic surface that can outlive Exa.
Conceptual input shape:
```ts
{
query: string;
limit?: number;
includeDomains?: string[];
excludeDomains?: string[];
startPublishedDate?: string;
endPublishedDate?: string;
category?: string;
provider?: string;
}
```
### Default behavior
- returns metadata only
- does not fetch page text by default
- uses the default configured provider unless `provider` explicitly selects another configured provider
### Result shape intent
Each search result should preserve a normalized subset of provider output such as:
- `title`
- `url`
- `publishedDate`
- `author`
- `score`
- provider-specific stable identifiers only if useful for follow-up operations
The tools text output should stay compact and easy for the model to scan.
### `web_fetch`
Purpose: fetch contents for one or more URLs with a generic interface.
Conceptual input shape:
```ts
{
urls: string[];
text?: boolean;
highlights?: boolean;
summary?: boolean;
textMaxCharacters?: number;
provider?: string;
}
```
### Input normalization
The canonical tool shape is `urls: string[]`, where a single URL is represented as a one-element array. For robustness, the implementation may also accept a top-level `url` string through argument normalization and fold it into `urls`, but the stable contract exposed in schemas and docs should remain `urls: string[]`.
### Default behavior
- when no content mode is specified, fetch text
- batch requests are allowed
- the default configured provider is used unless overridden
### Result shape intent
Each fetched item should preserve normalized per-URL results, including:
- `url`
- `title` where available
- `text` by default
- optional `highlights`
- optional `summary`
- per-item failure details for partial batch failures
## Provider Abstraction
The provider interface should express the minimum shared behaviors needed by the tools:
```ts
interface WebSearchProvider {
type: string;
search(request: NormalizedSearchRequest): Promise<NormalizedSearchResponse>;
fetch(request: NormalizedFetchRequest): Promise<NormalizedFetchResponse>;
}
```
### Exa adapter responsibilities
The Exa adapter will:
- instantiate an Exa client from the configured literal API key
- use Exa search without contents for `web_search` default behavior
- use Exa `getContents(...)` for `web_fetch`
- map Exa response fields into normalized provider-agnostic result types
- keep Exa-only fields contained inside the adapter unless they are intentionally promoted into the shared result model later
This keeps future provider additions focused: implement the same interface, extend config validation, and register the adapter.
## Rendering and Output Design
The extension should provide compact tool rendering so calls and results are readable inside pi.
### `renderCall`
- `web_search`: show tool name and the query
- `web_fetch`: show tool name and URL count (or the single URL)
### `renderResult`
- `web_search`: show result count and a short numbered list of titles/URLs
- `web_fetch`: show fetched count, failed count if any, and a concise per-URL summary
### LLM-facing text output
The text returned to the model should be concise and predictable:
- search: compact metadata list only by default
- fetch: truncated text payloads with enough context to be useful
- batch fetch: clearly separated per-URL sections
Large outputs must be truncated with the shared truncation utilities pattern used by pi tool examples.
## Error Handling
Expected runtime failures should be handled cleanly and descriptively.
### Config errors
- missing `~/.pi/agent/web-search.json`
- invalid JSON
- schema mismatch
- empty provider list
- unknown `defaultProvider`
- unknown explicitly requested provider
- missing literal API key
These should return actionable errors naming the exact issue.
### Input errors
- empty search query
- malformed URL(s)
- empty URL list after normalization
These should be rejected before any provider request is made.
### Provider/runtime errors
- Exa authentication failures
- network failures
- rate limits
- unexpected response shapes
These should return a concise summary in tool content while preserving richer diagnostics in `details`.
### Partial failures
For batch `web_fetch`, mixed outcomes should not fail the entire request unless every target fails. Successful pages should still be returned together with per-URL failure entries.
## Testing Strategy
The design intentionally separates pure logic from pi wiring so most behavior can be tested without loading pi itself.
### Automated tests
Cover:
1. config parsing and normalization
2. provider-list validation
3. default-provider resolution
4. generic request → Exa request mapping
5. Exa response → normalized response mapping
6. compact formatting for metadata-only search
7. truncation for long fetch results
8. batch fetch formatting with partial failures
9. helpful error messages when config is absent or invalid
### Test style
- prefer pure module tests for config, normalization, and formatting
- inject a fake Exa-like client into the Exa adapter instead of making live network calls
- keep extension entrypoint tests to smoke coverage only
### Manual verification
After implementation:
1. create `~/.pi/agent/web-search.json`
2. reload pi
3. run one `web_search` call
4. run one single-URL `web_fetch` call
5. run one multi-URL `web_fetch` call
6. confirm missing/invalid config errors are readable
## Non-Goals
The first version will not add:
- other providers besides Exa
- project-local web-search config
- automatic setup commands or interactive config editing
- provider-specific passthrough options in the public tool API
- rich snippet/highlight defaults for search
- live network integration tests in the normal automated suite
## Acceptance Criteria
The work is complete when:
1. pi discovers a new extension package at `.pi/agent/extensions/web-search/`
2. the agent has two generic tools:
- `web_search`
- `web_fetch`
3. the implementation uses an internal provider abstraction
4. Exa is the first working provider implementation
5. the runtime reads global config from `~/.pi/agent/web-search.json`
6. config uses a provider-list shape with a default provider selector
7. credentials are read as literal values from that file
8. `web_search` returns metadata only by default
9. `web_fetch` accepts one or multiple URLs and returns text by default
10. missing config, invalid config, and provider failures return clean, actionable tool errors
11. core mapping/formatting/config logic is covered by automated tests