Compare commits

...

31 Commits

Author SHA1 Message Date
Firehose Bot
e2caed41b9 Add current_tag attribute to post_index component 2026-03-24 14:56:54 +00:00
Firehose Bot
59a675ad71 Linting rule only for dev and test
Production build broke because our custom lint rule was compiled. The
credo linter is not available and not necessary in production.

Solution: create separate directory for dev tools.
2026-03-24 14:41:58 +00:00
Firehose Bot
34d1589d67 not running autoresearch at the moment 2026-03-24 14:23:00 +00:00
Firehose Bot
f1c2d8b232 Fix trailing newline and format code 2026-03-24 14:21:22 +00:00
Firehose Bot
60cfb137f2 remove sequence diagram skill, moved to other repo 2026-03-24 12:14:01 +00:00
Firehose Bot
51c59e3388 nono sandbox 2026-03-24 12:13:05 +00:00
Firehose Bot
c76853efec post: blog triage with an llm 2026-03-24 12:11:23 +00:00
Firehose Bot
09ca4f2758 fix score according to claude desktop 2026-03-21 18:41:21 +00:00
Firehose Bot
f4d992f0d6 initial setup for autoresearch of sequence diagram prompt 2026-03-21 15:39:15 +00:00
Firehose Bot
419e5dd5bd sandboxed haiku with pi 2026-03-21 15:36:51 +00:00
Willem van den Ende
dcf3032d0e Add custom Credo check for conn shadowing in tests
Detects `conn = get(conn, ...)` patterns and directs to
refactor_conn_aliasing.sh for automatic fixing.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-20 21:37:04 +00:00
Willem van den Ende
6a5269f30a Refactor conn aliasing in controller tests to use pipe chains
Applied refactor_conn_aliasing.sh to eliminate conn shadowing.

Show draft posts in test and dev
2026-03-20 21:36:08 +00:00
Willem van den Ende
5d040b4062 Fix Sandbox module not available in DataCase setup
The alias was inside the `using` block (only available to consumers),
but setup_sandbox/1 runs in DataCase itself. Use fully qualified name.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-20 21:19:54 +00:00
Willem van den Ende
5186edc2e9 Add reusable script to refactor Phoenix test conn aliasing
Portable awk-based script that transforms conn shadowing patterns
into idiomatic pipe chains across 4 cases (body extraction, single
assert, pattern match assert, multi-use rename).

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-20 21:19:54 +00:00
Firehose Bot
f148fe4fcd fix blog tag clicks, and new post 2026-03-19 22:14:19 +00:00
Firehose Bot
3ffb0883f9 Fix review issues from commit 2a21d75
1. Rename goto_engineering_post_page/2 to visit_engineering_path/2 for
   better accuracy (used for both post pages and tag pages)

2. Simplify Makefile test target by removing explicit ecto.create and
   ecto.migrate commands (mix test handles migrations automatically)

3. Update blog_test.exs header comment to reflect actual changes made

4. Move Sandbox alias to top level in data_case.ex
2026-03-19 11:07:17 +00:00
Firehose Bot
2a21d75938 adjust makefile and refactor test 2026-03-18 20:04:41 +00:00
Firehose Bot
a82dab0350 also write something about unit tests 2026-03-18 20:03:41 +00:00
Firehose Bot
f05dd00c55 test writer skill
Focuses on integration tests, but might be more reusable
2026-03-18 20:02:20 +00:00
Firehose Bot
d428f51e8c Fix blog API tests and add missing tag tests
- Add Accept: application/json headers to all API endpoint tests
- Add GET /blog/releases/tag/:tag HTML page test
- Add GET /api/blog/*/tag/:tag JSON API tests for both blogs
- Fix feed.xml assertions to check body first, then content type
2026-03-18 19:03:40 +00:00
Willem van den Ende
99639090b6 Add healthcheck and attempt to fix devcontainer
user was root instead of vscode, and pi was broken.
Claude code had gone missing
2026-03-18 17:15:45 +00:00
80046094b8 update blog post, and run credo with 'pi' 2026-03-18 15:03:24 +00:00
ceeeb994fb Dokku setup script did not work that well, fixed by hand 2026-03-18 14:38:45 +00:00
Willem van den Ende
ddf75031e0 set default port to 5000 for production 2026-03-18 13:55:49 +00:00
Willem van den Ende
3bfca5a726 set DATABASE_URL 2026-03-18 13:48:12 +00:00
Willem van den Ende
3846cae6ca Add MIT license 2026-03-18 13:22:12 +00:00
Willem van den Ende
999ed4e121 Clearly mark sample posts as generated 2026-03-18 12:11:28 +00:00
Willem van den Ende
fcd2a91ecc open port 4050 for testing in docker compose file 2026-03-18 12:07:20 +00:00
Willem van den Ende
87d7b39d22 Enable UTF-8 in devcontainer 2026-03-18 11:30:27 +00:00
Willem van den Ende
7c3aac56ec Add postgres to devcontainer / compose 2026-03-18 11:22:54 +00:00
Willem van den Ende
2d97353649 Add Dockerfile-based Dokku deployment for monorepo layout
Uses a multi-stage Docker build that copies both app/ and blogex/,
preserving the path dependency. Includes release scripts, migration
module, and a sample Dokku setup script.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-18 10:55:44 +00:00
56 changed files with 1437 additions and 250 deletions

View File

@ -1,6 +1,11 @@
FROM mcr.microsoft.com/devcontainers/base:ubuntu-24.04 FROM mcr.microsoft.com/devcontainers/base:ubuntu-24.04
USER root USER root
RUN apt-get update && apt-get install -y locales && \
sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen && \
locale-gen en_US.UTF-8
ENV LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
RUN curl -fsSL https://deb.nodesource.com/setup_lts.x | bash - && \ RUN curl -fsSL https://deb.nodesource.com/setup_lts.x | bash - && \
apt-get install -y nodejs apt-get install -y nodejs
@ -19,4 +24,7 @@ RUN /home/vscode/.local/bin/mise exec -- mix local.hex --force && \
RUN npm install -g @mariozechner/pi-coding-agent RUN npm install -g @mariozechner/pi-coding-agent
USER root RUN npm install -g @anthropic-ai/claude-code
RUN echo 'eval "$(/home/vscode/.local/bin/mise activate bash)"' >> /home/vscode/.bashrc
RUN /home/vscode/.local/bin/mise settings set trusted_config_paths /workspaces/firehose

View File

@ -1,10 +1,12 @@
{ {
"$schema": "https://containers.dev/implementors/json_schema/", "$schema": "https://containers.dev/implementors/json_schema/",
"build": { "dockerComposeFile": "docker-compose.yml",
"dockerfile": "Dockerfile" "service": "app",
}, "workspaceFolder": "/workspaces/firehose",
"remoteUser": "vscode", "remoteUser": "vscode",
"runArgs": [], "containerEnv": {
"DB_HOST": "db"
},
"features": { "features": {
"ghcr.io/devcontainers/features/python:1": {}, "ghcr.io/devcontainers/features/python:1": {},
"ghcr.io/jsburckhardt/devcontainer-features/uv:1": {}, "ghcr.io/jsburckhardt/devcontainer-features/uv:1": {},
@ -17,7 +19,7 @@
"source=${localEnv:HOME}/.pi/agent/bin,target=/home/vscode/.pi/agent/bin,type=bind,consistency=cached" "source=${localEnv:HOME}/.pi/agent/bin,target=/home/vscode/.pi/agent/bin,type=bind,consistency=cached"
], ],
"postCreateCommand": { "postCreateCommand": {
"pi-subagents": "bash -ic 'pi install npm:pi-subagents'" "pi-subagents-disabled": "echo 'pi-subagents disabled: upstream JSON schema bug — investigate version pinning separately'"
}, },
"customizations": { "customizations": {
"jetbrains": { "jetbrains": {

View File

@ -0,0 +1,30 @@
services:
app:
build:
context: .
dockerfile: Dockerfile
volumes:
- ..:/workspaces/firehose:cached
ports:
- "4050:4050"
command: sleep infinity
depends_on:
db:
condition: service_healthy
db:
image: postgres:16
restart: unless-stopped
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
volumes:
- pgdata:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 5s
timeout: 5s
retries: 5
volumes:
pgdata:

31
.dockerignore Normal file
View File

@ -0,0 +1,31 @@
# Git
.git
# Build artifacts
app/_build
app/deps
blogex/_build
blogex/deps
# Dev/test only
app/test
blogex/test
app/.formatter.exs
blogex/.formatter.exs
# IDE
.devcontainer
.claude
# Documentation
*.md
!app/README.md
# Misc
app/tmp
app/cover
app/doc
blogex/doc
# Dokku setup (may contain secrets)
dokku-setup.sh

3
.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
# Dokku setup (may contain secrets)
dokku-setup.sh
/output/

BIN
.nono.sh.swp Normal file

Binary file not shown.

View File

@ -0,0 +1,148 @@
---
name: test-writer
description: Writes tests following Elixir/Phoenix best practices. Ensures DRY tests with proper helper functions, no duplicated setup code, and correct parameter defaults. Use when writing or modifying tests.
---
# Test Writer Skill
## Overview
This skill provides guidelines for writing clean, maintainable Elixir/Phoenix tests following best practices. It focuses mainly on integration tests. For unit tests, we also want glanceable tests with unique names for values, test helpers, custom matchers and shared setups where appropriate.
## Core Principles
### 1. DRY Tests
Avoid duplication by creating focused helper functions:
**Bad:**
```elixir
test "GET /users returns index", %{conn: conn} do
conn = get(conn, "/users")
body = html_response(conn, 200)
assert body =~ "Users"
end
test "GET /users/:id returns show", %{conn: conn} do
conn = get(conn, "/users/1")
body = html_response(conn, 200)
assert body =~ "User"
end
```
**Good:**
```elixir
defp goto_users_page(conn, suffix \\ ""), do: get(conn, "/users" <> suffix)
test "GET /users returns index", %{conn: conn} do
conn = goto_users_page(conn)
assert html_response(conn, 200) =~ "Users"
end
test "GET /users/:id returns show", %{conn: conn} do
conn = goto_users_page(conn, "/1")
assert html_response(conn, 200) =~ "User"
end
```
### 2. Separate Helpers for Different Assertion Patterns
Don't use conditionals in helpers to handle different cases:
**Bad:**
```elixir
defp goto_users_page(conn, suffix \\ "", check_title \\ true) do
path = "/users" <> suffix
conn = get(conn, path)
body = html_response(conn, 200)
if check_title, do: assert body =~ "Users"
assert body =~ "AppLayout"
body
end
```
**Good:**
```elixir
defp goto_users_page(conn, suffix \\ "") do
path = "/users" <> suffix
conn = get(conn, path)
body = html_response(conn, 200)
assert body =~ "Users"
assert body =~ "AppLayout"
body
end
defp goto_user_page(conn, suffix) do
path = "/users" <> suffix
conn = get(conn, path)
body = html_response(conn, 200)
assert body =~ "AppLayout"
body
end
```
## Test Structure
### Context Block
```elixir
describe "resource name" do
# Shared setup in context if needed
# test "scenario" do ...
end
```
### Value Aliasing
Never reuse value names. Elixir is immutable, but value aliasing is confusing. Use unique, meaningful names for the left hand side of assignments. Or use pipes `|>` to eliminate the need for naming.
```elixir
test "GET /users returns index", %{conn: conn} do
# don't reassign
response = get(conn, "/users")
# ...
end
```
## Common Patterns
### HTML Pages with Layout
```elixir
defp goto_resource_page(conn, suffix \\ ""), do: ...
# Asserts common layout elements and page-specific content
```
### API Endpoints
```elixir
test "GET /api/resource returns JSON", %{conn: conn} do
conn = conn |> put_req_header("accept", "application/json")
conn = get(conn, "/api/resource")
response = json_response(conn, 200)
# Assert structure
end
```
### Error Handling
```elixir
test "returns 404 for nonexistent", %{conn: conn} do
assert html_response(get(conn, "/nonexistent"), 404)
end
```
## Running Tests
### One focused test file
```bash
cd /path/to/app
mix test test/path/to_test.exs
```
### all tests
```bash
make test
```

86
Dockerfile Normal file
View File

@ -0,0 +1,86 @@
# Dockerfile for Dokku deployment
# Multi-stage build for Phoenix/Elixir app with monorepo layout
ARG ELIXIR_VERSION=1.18.3
ARG OTP_VERSION=27.2.4
ARG DEBIAN_VERSION=bookworm-20260316-slim
ARG BUILDER_IMAGE="docker.io/hexpm/elixir:${ELIXIR_VERSION}-erlang-${OTP_VERSION}-debian-${DEBIAN_VERSION}"
ARG RUNNER_IMAGE="docker.io/debian:${DEBIAN_VERSION}"
# =============================================================================
# Build stage
# =============================================================================
FROM ${BUILDER_IMAGE} AS builder
RUN apt-get update -y && apt-get install -y build-essential git \
&& apt-get clean && rm -f /var/lib/apt/lists/*_*
WORKDIR /build
# Install hex + rebar
RUN mix local.hex --force && \
mix local.rebar --force
ENV MIX_ENV="prod"
# Copy blogex dependency first (changes less often)
COPY blogex /build/blogex
# Copy app dependency files first for better layer caching
COPY app/mix.exs app/mix.lock /build/app/
WORKDIR /build/app
RUN mix deps.get --only $MIX_ENV
RUN mkdir config
# Copy compile-time config files
COPY app/config/config.exs app/config/${MIX_ENV}.exs config/
RUN mix deps.compile
# Copy application source and compile
COPY app/priv priv
COPY app/assets assets
COPY app/lib lib
COPY app/rel rel
COPY app/config/runtime.exs config/
RUN mix compile
# Build assets after compile (phoenix-colocated hooks need compiled app)
RUN mix assets.deploy
# Build the release
RUN mix release
# =============================================================================
# Runtime stage
# =============================================================================
FROM ${RUNNER_IMAGE}
RUN apt-get update -y && \
apt-get install -y libstdc++6 openssl libncurses5 locales ca-certificates \
&& apt-get clean && rm -f /var/lib/apt/lists/*_*
# Set the locale
RUN sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen && locale-gen
ENV LANG=en_US.UTF-8
ENV LANGUAGE=en_US:en
ENV LC_ALL=en_US.UTF-8
WORKDIR /app
RUN chown nobody /app
ENV MIX_ENV="prod"
# Copy the release from the build stage
COPY --from=builder --chown=nobody:root /build/app/_build/${MIX_ENV}/rel/firehose ./
USER nobody
# Dokku uses the EXPOSE port for routing
EXPOSE 5000
ENV PHX_SERVER=true
CMD ["/app/bin/server"]

21
LICENSE Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2026 Living Software LTD
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

27
Makefile Normal file
View File

@ -0,0 +1,27 @@
# Makefile for Firehose monorepo
.PHONY: check precommit deps compile test format
# Common check target that runs all static analysis
check:
@echo "Running static analysis..."
@make -C app MISE_BIN=mise check
# Precommit target for CI/pre-commit hooks
precommit: check
# Sync dependencies
deps:
@make -C app deps
# Compile the project
compile:
@make -C app compile
# Run tests
test:
@make -C app test
# Format code
format:
@make -C app format

20
app.json Normal file
View File

@ -0,0 +1,20 @@
{
"name": "firehose",
"healthchecks": {
"web": [
{
"type": "startup",
"name": "web check",
"path": "/",
"attempts": 5,
"wait": 3,
"timeout": 5
}
]
},
"scripts": {
"dokku": {
"postdeploy": "/app/bin/migrate"
}
}
}

226
app/.credo.exs Normal file
View File

@ -0,0 +1,226 @@
# This file contains the configuration for Credo and you are probably reading
# this after creating it with `mix credo.gen.config`.
#
# If you find anything wrong or unclear in this file, please report an
# issue on GitHub: https://github.com/rrrene/credo/issues
#
%{
#
# You can have as many configs as you like in the `configs:` field.
configs: [
%{
#
# Run any config using `mix credo -C <name>`. If no config name is given
# "default" is used.
#
name: "default",
#
# These are the files included in the analysis:
files: %{
#
# You can give explicit globs or simply directories.
# In the latter case `**/*.{ex,exs}` will be used.
#
included: [
"lib/",
"src/",
"test/",
"web/",
"apps/*/lib/",
"apps/*/src/",
"apps/*/test/",
"apps/*/web/"
],
excluded: [~r"/_build/", ~r"/deps/", ~r"/node_modules/"]
},
#
# Load and configure plugins here:
#
plugins: [],
#
# If you create your own checks, you must specify the source files for
# them here, so they can be loaded by Credo before running the analysis.
#
requires: ["lib_dev/firehose/checks/"],
#
# If you want to enforce a style guide and need a more traditional linting
# experience, you can change `strict` to `true` below:
#
strict: false,
#
# To modify the timeout for parsing files, change this value:
#
parse_timeout: 5000,
#
# If you want to use uncolored output by default, you can change `color`
# to `false` below:
#
color: true,
#
# You can customize the parameters of any check by adding a second element
# to the tuple.
#
# To disable a check put `false` as second element:
#
# {Credo.Check.Design.DuplicatedCode, false}
#
checks: %{
enabled: [
#
## Consistency Checks
#
{Credo.Check.Consistency.ExceptionNames, []},
{Credo.Check.Consistency.LineEndings, []},
{Credo.Check.Consistency.ParameterPatternMatching, []},
{Credo.Check.Consistency.SpaceAroundOperators, []},
{Credo.Check.Consistency.SpaceInParentheses, []},
{Credo.Check.Consistency.TabsOrSpaces, []},
#
## Design Checks
#
# You can customize the priority of any check
# Priority values are: `low, normal, high, higher`
#
{Credo.Check.Design.AliasUsage,
[priority: :low, if_nested_deeper_than: 2, if_called_more_often_than: 0]},
{Credo.Check.Design.TagFIXME, []},
# You can also customize the exit_status of each check.
# If you don't want TODO comments to cause `mix credo` to fail, just
# set this value to 0 (zero).
#
{Credo.Check.Design.TagTODO, [exit_status: 2]},
#
## Readability Checks
#
{Credo.Check.Readability.AliasOrder, []},
{Credo.Check.Readability.FunctionNames, []},
{Credo.Check.Readability.LargeNumbers, []},
{Credo.Check.Readability.MaxLineLength, [priority: :low, max_length: 120]},
{Credo.Check.Readability.ModuleAttributeNames, []},
{Credo.Check.Readability.ModuleDoc, []},
{Credo.Check.Readability.ModuleNames, []},
{Credo.Check.Readability.ParenthesesInCondition, []},
{Credo.Check.Readability.ParenthesesOnZeroArityDefs, []},
{Credo.Check.Readability.PipeIntoAnonymousFunctions, []},
{Credo.Check.Readability.PredicateFunctionNames, []},
{Credo.Check.Readability.PreferImplicitTry, []},
{Credo.Check.Readability.RedundantBlankLines, []},
{Credo.Check.Readability.Semicolons, []},
{Credo.Check.Readability.SpaceAfterCommas, []},
{Credo.Check.Readability.StringSigils, []},
{Credo.Check.Readability.TrailingBlankLine, []},
{Credo.Check.Readability.TrailingWhiteSpace, []},
{Credo.Check.Readability.UnnecessaryAliasExpansion, []},
{Credo.Check.Readability.VariableNames, []},
{Credo.Check.Readability.WithSingleClause, []},
#
## Refactoring Opportunities
#
{Credo.Check.Refactor.Apply, []},
{Credo.Check.Refactor.CondStatements, []},
{Credo.Check.Refactor.CyclomaticComplexity, []},
{Credo.Check.Refactor.FilterCount, []},
{Credo.Check.Refactor.FilterFilter, []},
{Credo.Check.Refactor.FunctionArity, []},
{Credo.Check.Refactor.LongQuoteBlocks, []},
{Credo.Check.Refactor.MapJoin, []},
{Credo.Check.Refactor.MatchInCondition, []},
{Credo.Check.Refactor.NegatedConditionsInUnless, []},
{Credo.Check.Refactor.NegatedConditionsWithElse, []},
{Credo.Check.Refactor.Nesting, []},
{Credo.Check.Refactor.RedundantWithClauseResult, []},
{Credo.Check.Refactor.RejectReject, []},
{Credo.Check.Refactor.UnlessWithElse, []},
{Credo.Check.Refactor.WithClauses, []},
#
## Warnings
#
{Credo.Check.Warning.ApplicationConfigInModuleAttribute, []},
{Credo.Check.Warning.BoolOperationOnSameValues, []},
{Credo.Check.Warning.Dbg, []},
{Credo.Check.Warning.ExpensiveEmptyEnumCheck, []},
{Credo.Check.Warning.IExPry, []},
{Credo.Check.Warning.IoInspect, []},
{Credo.Check.Warning.MissedMetadataKeyInLoggerConfig, []},
{Credo.Check.Warning.OperationOnSameValues, []},
{Credo.Check.Warning.OperationWithConstantResult, []},
{Credo.Check.Warning.RaiseInsideRescue, []},
{Credo.Check.Warning.SpecWithStruct, []},
{Credo.Check.Warning.StructFieldAmount, []},
{Credo.Check.Warning.UnsafeExec, []},
{Credo.Check.Warning.UnusedEnumOperation, []},
{Credo.Check.Warning.UnusedFileOperation, []},
{Credo.Check.Warning.UnusedKeywordOperation, []},
{Credo.Check.Warning.UnusedListOperation, []},
{Credo.Check.Warning.UnusedMapOperation, []},
{Credo.Check.Warning.UnusedPathOperation, []},
{Credo.Check.Warning.UnusedRegexOperation, []},
{Credo.Check.Warning.UnusedStringOperation, []},
{Credo.Check.Warning.UnusedTupleOperation, []},
{Credo.Check.Warning.WrongTestFilename, []},
#
## Custom Checks
#
{Firehose.Checks.NoConnShadowing, []}
],
disabled: [
#
# Checks scheduled for next check update (opt-in for now)
{Credo.Check.Refactor.UtcNowTruncate, []},
#
# Controversial and experimental checks (opt-in, just move the check to `:enabled`
# and be sure to use `mix credo --strict` to see low priority checks)
#
{Credo.Check.Consistency.MultiAliasImportRequireUse, []},
{Credo.Check.Consistency.UnusedVariableNames, []},
{Credo.Check.Design.DuplicatedCode, []},
{Credo.Check.Design.SkipTestWithoutComment, []},
{Credo.Check.Readability.AliasAs, []},
{Credo.Check.Readability.BlockPipe, []},
{Credo.Check.Readability.ImplTrue, []},
{Credo.Check.Readability.MultiAlias, []},
{Credo.Check.Readability.NestedFunctionCalls, []},
{Credo.Check.Readability.OneArityFunctionInPipe, []},
{Credo.Check.Readability.OnePipePerLine, []},
{Credo.Check.Readability.SeparateAliasRequire, []},
{Credo.Check.Readability.SingleFunctionToBlockPipe, []},
{Credo.Check.Readability.SinglePipe, []},
{Credo.Check.Readability.Specs, []},
{Credo.Check.Readability.StrictModuleLayout, []},
{Credo.Check.Readability.WithCustomTaggedTuple, []},
{Credo.Check.Refactor.ABCSize, []},
{Credo.Check.Refactor.AppendSingleItem, []},
{Credo.Check.Refactor.CondInsteadOfIfElse, []},
{Credo.Check.Refactor.DoubleBooleanNegation, []},
{Credo.Check.Refactor.FilterReject, []},
{Credo.Check.Refactor.IoPuts, []},
{Credo.Check.Refactor.MapMap, []},
{Credo.Check.Refactor.ModuleDependencies, []},
{Credo.Check.Refactor.NegatedIsNil, []},
{Credo.Check.Refactor.PassAsyncInTestCases, []},
{Credo.Check.Refactor.PipeChainStart, []},
{Credo.Check.Refactor.RejectFilter, []},
{Credo.Check.Refactor.VariableRebinding, []},
{Credo.Check.Warning.LazyLogging, []},
{Credo.Check.Warning.LeakyEnvironment, []},
{Credo.Check.Warning.MapGetUnsafePass, []},
{Credo.Check.Warning.MixEnv, []},
{Credo.Check.Warning.UnsafeToAtom, []}
# {Credo.Check.Warning.UnusedOperation, [{MyMagicModule, [:fun1, :fun2]}]}
# {Credo.Check.Refactor.MapInto, []},
#
# Custom checks can be created using `mix credo.gen.check`.
#
]
}
}
]
}

33
app/Makefile Normal file
View File

@ -0,0 +1,33 @@
# Makefile for Firehose app
MISE_BIN ?= /home/vscode/.local/bin/mise
MISE_EXEC = $(MISE_BIN) exec --
.PHONY: check precommit deps compile test format credo
# Run all static analysis checks (no database required)
check: credo format
# Precommit target for CI/pre-commit hooks
precommit: check compile
# Sync dependencies
deps:
$(MISE_EXEC) mix deps.get
# Compile the project
compile:
$(MISE_EXEC) mix compile --warnings-as-errors
# Run tests (requires PostgreSQL running on localhost:5432)
# Note: If you don't have PostgreSQL, you can skip tests with `make check`
test: deps compile
$(MISE_EXEC) mix test
# Format code
format:
$(MISE_EXEC) mix format
# Run Credo static analysis
credo:
$(MISE_EXEC) mix credo --strict

View File

@ -61,7 +61,8 @@ config :logger, :default_formatter,
config :phoenix, :json_library, Jason config :phoenix, :json_library, Jason
config :blogex, config :blogex,
blogs: [Firehose.EngineeringBlog, Firehose.ReleaseNotes] blogs: [Firehose.EngineeringBlog, Firehose.ReleaseNotes],
show_drafts: true
# Import environment specific config. This must remain at the bottom # Import environment specific config. This must remain at the bottom
# of this file so it overrides the configuration defined above. # of this file so it overrides the configuration defined above.

View File

@ -4,7 +4,7 @@ import Config
config :firehose, Firehose.Repo, config :firehose, Firehose.Repo,
username: "postgres", username: "postgres",
password: "postgres", password: "postgres",
hostname: "localhost", hostname: System.get_env("DB_HOST") || "localhost",
database: "firehose_dev", database: "firehose_dev",
stacktrace: true, stacktrace: true,
show_sensitive_data_on_connection_error: true, show_sensitive_data_on_connection_error: true,

View File

@ -13,6 +13,9 @@ config :swoosh, api_client: Swoosh.ApiClient.Req
# Disable Swoosh Local Memory Storage # Disable Swoosh Local Memory Storage
config :swoosh, local: false config :swoosh, local: false
# Hide draft blog posts in production
config :blogex, show_drafts: false
# Do not print debug messages in production # Do not print debug messages in production
config :logger, level: :info config :logger, level: :info

View File

@ -51,7 +51,7 @@ if config_env() == :prod do
""" """
host = System.get_env("PHX_HOST") || "example.com" host = System.get_env("PHX_HOST") || "example.com"
port = String.to_integer(System.get_env("PORT") || "4000") port = String.to_integer(System.get_env("PORT") || "5000")
config :firehose, :dns_cluster_query, System.get_env("DNS_CLUSTER_QUERY") config :firehose, :dns_cluster_query, System.get_env("DNS_CLUSTER_QUERY")

View File

@ -8,7 +8,7 @@ import Config
config :firehose, Firehose.Repo, config :firehose, Firehose.Repo,
username: "postgres", username: "postgres",
password: "postgres", password: "postgres",
hostname: "localhost", hostname: System.get_env("DB_HOST") || "localhost",
database: "firehose_test#{System.get_env("MIX_TEST_PARTITION")}", database: "firehose_test#{System.get_env("MIX_TEST_PARTITION")}",
pool: Ecto.Adapters.SQL.Sandbox, pool: Ecto.Adapters.SQL.Sandbox,
pool_size: System.schedulers_online() * 2 pool_size: System.schedulers_online() * 2

View File

@ -1,4 +1,7 @@
defmodule Firehose.EngineeringBlog do defmodule Firehose.EngineeringBlog do
@moduledoc """
Engineering blog configuration.
"""
use Blogex.Blog, use Blogex.Blog,
blog_id: :engineering, blog_id: :engineering,
app: :firehose, app: :firehose,

View File

@ -1,4 +1,7 @@
defmodule Firehose.ReleaseNotes do defmodule Firehose.ReleaseNotes do
@moduledoc """
Release notes blog configuration.
"""
use Blogex.Blog, use Blogex.Blog,
blog_id: :release_notes, blog_id: :release_notes,
app: :firehose, app: :firehose,

View File

@ -0,0 +1,31 @@
defmodule Firehose.Release do
@moduledoc """
Tasks for production releases (e.g., database migrations).
Usage from Dokku:
dokku run APP_NAME /app/bin/migrate
"""
@app :firehose
def migrate do
load_app()
for repo <- repos() do
{:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :up, all: true))
end
end
def rollback(repo, version) do
load_app()
{:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :down, to: version))
end
defp repos do
Application.fetch_env!(@app, :ecto_repos)
end
defp load_app do
Application.load(@app)
end
end

View File

@ -88,8 +88,8 @@ defmodule FirehoseWeb do
import FirehoseWeb.CoreComponents import FirehoseWeb.CoreComponents
# Common modules used in templates # Common modules used in templates
alias Phoenix.LiveView.JS
alias FirehoseWeb.Layouts alias FirehoseWeb.Layouts
alias Phoenix.LiveView.JS
# Routes generation with the ~p sigil # Routes generation with the ~p sigil
unquote(verified_routes()) unquote(verified_routes())

View File

@ -29,6 +29,7 @@ defmodule FirehoseWeb.CoreComponents do
use Phoenix.Component use Phoenix.Component
use Gettext, backend: FirehoseWeb.Gettext use Gettext, backend: FirehoseWeb.Gettext
alias Phoenix.HTML.Form
alias Phoenix.LiveView.JS alias Phoenix.LiveView.JS
@doc """ @doc """
@ -181,7 +182,7 @@ defmodule FirehoseWeb.CoreComponents do
def input(%{type: "checkbox"} = assigns) do def input(%{type: "checkbox"} = assigns) do
assigns = assigns =
assign_new(assigns, :checked, fn -> assign_new(assigns, :checked, fn ->
Phoenix.HTML.Form.normalize_value("checkbox", assigns[:value]) Form.normalize_value("checkbox", assigns[:value])
end) end)
~H""" ~H"""

View File

@ -1,6 +1,9 @@
<header class="navbar px-4 sm:px-6 lg:px-8 border-b border-base-200"> <header class="navbar px-4 sm:px-6 lg:px-8 border-b border-base-200">
<div class="flex-1"> <div class="flex-1">
<a href="/" class="font-display text-xl font-semibold tracking-tight text-primary hover:opacity-80 transition"> <a
href="/"
class="font-display text-xl font-semibold tracking-tight text-primary hover:opacity-80 transition"
>
firehose firehose
</a> </a>
</div> </div>

View File

@ -58,6 +58,7 @@ defmodule FirehoseWeb.BlogController do
end end
defp parse_page(nil), do: 1 defp parse_page(nil), do: 1
defp parse_page(str) do defp parse_page(str) do
case Integer.parse(str) do case Integer.parse(str) do
{page, ""} when page > 0 -> page {page, ""} when page > 0 -> page

View File

@ -1,4 +1,4 @@
<div class="space-y-8"> <div class="space-y-8">
<a href={@base_path} class="text-sm text-primary hover:underline">&larr; Back to posts</a> <a href={@base_path} class="text-sm text-primary hover:underline">&larr; Back to posts</a>
<.post_show post={@post} /> <.post_show post={@post} base_path={@base_path} />
</div> </div>

View File

@ -4,7 +4,7 @@
<p class="mt-2 text-base-content/70">Posts tagged "{@tag}"</p> <p class="mt-2 text-base-content/70">Posts tagged "{@tag}"</p>
</header> </header>
<.post_index posts={@posts} base_path={@base_path} /> <.post_index posts={@posts} base_path={@base_path} current_tag={@tag} />
<a href={@base_path} class="text-sm text-primary hover:underline">&larr; All posts</a> <a href={@base_path} class="text-sm text-primary hover:underline">&larr; All posts</a>
</div> </div>

View File

@ -6,7 +6,12 @@
<div class="space-y-4 text-lg leading-relaxed text-base-content/80"> <div class="space-y-4 text-lg leading-relaxed text-base-content/80">
<p> <p>
I'm <strong class="text-base-content">Willem van den Ende</strong>, I'm <strong class="text-base-content">Willem van den Ende</strong>,
partner at <a href="https://qwan.eu" class="text-primary hover:underline" target="_blank" rel="noopener">QWAN</a>. partner at <a
href="https://qwan.eu"
class="text-primary hover:underline"
target="_blank"
rel="noopener"
>QWAN</a>.
This is where I write about AI-native consulting, shitty evals, This is where I write about AI-native consulting, shitty evals,
and whatever prototype I'm building this week. and whatever prototype I'm building this week.
</p> </p>
@ -21,7 +26,9 @@
class="rounded-box border border-base-200 p-5 space-y-2 hover:border-primary/30 transition" class="rounded-box border border-base-200 p-5 space-y-2 hover:border-primary/30 transition"
> >
<a href={"#{post_base_path(post)}/#{post.id}"} class="block space-y-2"> <a href={"#{post_base_path(post)}/#{post.id}"} class="block space-y-2">
<h3 class="font-semibold text-base-content hover:text-primary transition">{post.title}</h3> <h3 class="font-semibold text-base-content hover:text-primary transition">
{post.title}
</h3>
<p class="text-sm text-base-content/60">{post.description}</p> <p class="text-sm text-base-content/60">{post.description}</p>
<div class="flex items-center gap-2 text-xs text-base-content/50"> <div class="flex items-center gap-2 text-xs text-base-content/50">
<time datetime={Date.to_iso8601(post.date)}> <time datetime={Date.to_iso8601(post.date)}>

View File

@ -0,0 +1,49 @@
defmodule Firehose.Checks.NoConnShadowing do
use Credo.Check,
base_priority: :normal,
category: :readability,
explanations: [
check: """
Conn shadowing (`conn = get(conn, ...)`) makes Phoenix controller tests
noisy. Use pipe chains instead:
body = conn |> get("/path") |> html_response(200)
Run `./refactor_conn_aliasing.sh <file>` to fix automatically.
"""
]
@http_verbs ~w(get post put patch delete head options)a
@impl true
def run(%SourceFile{} = source_file, params) do
issue_meta = IssueMeta.for(source_file, params)
source_file
|> Credo.Code.prewalk(&traverse(&1, &2, issue_meta))
|> Enum.reverse()
end
defp traverse(
{:=, meta, [{:conn, _, _}, {verb, _, [{:conn, _, _} | _]}]} = ast,
issues,
issue_meta
)
when verb in @http_verbs do
issue = issue_for(issue_meta, meta[:line], verb)
{ast, [issue | issues]}
end
defp traverse(ast, issues, _issue_meta) do
{ast, issues}
end
defp issue_for(issue_meta, line_no, verb) do
format_issue(
issue_meta,
message:
"Conn shadowing detected (`conn = #{verb}(conn, ...)`). Run `./refactor_conn_aliasing.sh <file>` to fix.",
line_no: line_no
)
end
end

View File

@ -32,7 +32,8 @@ defmodule Firehose.MixProject do
end end
# Specifies which paths to compile per environment. # Specifies which paths to compile per environment.
defp elixirc_paths(:test), do: ["lib", "test/support"] defp elixirc_paths(:test), do: ["lib", "lib_dev", "test/support"]
defp elixirc_paths(:dev), do: ["lib", "lib_dev"]
defp elixirc_paths(_), do: ["lib"] defp elixirc_paths(_), do: ["lib"]
# Specifies your project dependencies. # Specifies your project dependencies.
@ -66,7 +67,8 @@ defmodule Firehose.MixProject do
{:jason, "~> 1.2"}, {:jason, "~> 1.2"},
{:dns_cluster, "~> 0.2.0"}, {:dns_cluster, "~> 0.2.0"},
{:bandit, "~> 1.5"}, {:bandit, "~> 1.5"},
{:blogex, path: "../blogex"} {:blogex, path: "../blogex"},
{:credo, "~> 1.7", only: [:dev, :test], runtime: false}
] ]
end end

View File

@ -1,6 +1,8 @@
%{ %{
"bandit": {:hex, :bandit, "1.10.3", "1e5d168fa79ec8de2860d1b4d878d97d4fbbe2fdbe7b0a7d9315a4359d1d4bb9", [:mix], [{:hpax, "~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}, {:plug, "~> 1.18", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:thousand_island, "~> 1.0", [hex: :thousand_island, repo: "hexpm", optional: false]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "99a52d909c48db65ca598e1962797659e3c0f1d06e825a50c3d75b74a5e2db18"}, "bandit": {:hex, :bandit, "1.10.3", "1e5d168fa79ec8de2860d1b4d878d97d4fbbe2fdbe7b0a7d9315a4359d1d4bb9", [:mix], [{:hpax, "~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}, {:plug, "~> 1.18", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:thousand_island, "~> 1.0", [hex: :thousand_island, repo: "hexpm", optional: false]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "99a52d909c48db65ca598e1962797659e3c0f1d06e825a50c3d75b74a5e2db18"},
"bunt": {:hex, :bunt, "1.0.0", "081c2c665f086849e6d57900292b3a161727ab40431219529f13c4ddcf3e7a44", [:mix], [], "hexpm", "dc5f86aa08a5f6fa6b8096f0735c4e76d54ae5c9fa2c143e5a1fc7c1cd9bb6b5"},
"cc_precompiler": {:hex, :cc_precompiler, "0.1.11", "8c844d0b9fb98a3edea067f94f616b3f6b29b959b6b3bf25fee94ffe34364768", [:mix], [{:elixir_make, "~> 0.7", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "3427232caf0835f94680e5bcf082408a70b48ad68a5f5c0b02a3bea9f3a075b9"}, "cc_precompiler": {:hex, :cc_precompiler, "0.1.11", "8c844d0b9fb98a3edea067f94f616b3f6b29b959b6b3bf25fee94ffe34364768", [:mix], [{:elixir_make, "~> 0.7", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "3427232caf0835f94680e5bcf082408a70b48ad68a5f5c0b02a3bea9f3a075b9"},
"credo": {:hex, :credo, "1.7.17", "f92b6aa5b26301eaa5a35e4d48ebf5aa1e7094ac00ae38f87086c562caf8a22f", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "1eb5645c835f0b6c9b5410f94b5a185057bcf6d62a9c2b476da971cde8749645"},
"db_connection": {:hex, :db_connection, "2.9.0", "a6a97c5c958a2d7091a58a9be40caf41ab496b0701d21e1d1abff3fa27a7f371", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "17d502eacaf61829db98facf6f20808ed33da6ccf495354a41e64fe42f9c509c"}, "db_connection": {:hex, :db_connection, "2.9.0", "a6a97c5c958a2d7091a58a9be40caf41ab496b0701d21e1d1abff3fa27a7f371", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "17d502eacaf61829db98facf6f20808ed33da6ccf495354a41e64fe42f9c509c"},
"decimal": {:hex, :decimal, "2.3.0", "3ad6255aa77b4a3c4f818171b12d237500e63525c2fd056699967a3e7ea20f62", [:mix], [], "hexpm", "a4d66355cb29cb47c3cf30e71329e58361cfcb37c34235ef3bf1d7bf3773aeac"}, "decimal": {:hex, :decimal, "2.3.0", "3ad6255aa77b4a3c4f818171b12d237500e63525c2fd056699967a3e7ea20f62", [:mix], [], "hexpm", "a4d66355cb29cb47c3cf30e71329e58361cfcb37c34235ef3bf1d7bf3773aeac"},
"dns_cluster": {:hex, :dns_cluster, "0.2.0", "aa8eb46e3bd0326bd67b84790c561733b25c5ba2fe3c7e36f28e88f384ebcb33", [:mix], [], "hexpm", "ba6f1893411c69c01b9e8e8f772062535a4cf70f3f35bcc964a324078d8c8240"}, "dns_cluster": {:hex, :dns_cluster, "0.2.0", "aa8eb46e3bd0326bd67b84790c561733b25c5ba2fe3c7e36f28e88f384ebcb33", [:mix], [], "hexpm", "ba6f1893411c69c01b9e8e8f772062535a4cf70f3f35bcc964a324078d8c8240"},

View File

@ -1,6 +1,7 @@
%{ %{
title: "Hello World", title: "Hello World",
author: "Firehose Team", author: "Firehose Team",
published: false,
tags: ~w(elixir phoenix), tags: ~w(elixir phoenix),
description: "Our first engineering blog post" description: "Our first engineering blog post"
} }

View File

@ -7,20 +7,24 @@
} }
--- ---
I wrote about [publishing short posts](https://www.qwan.eu/2025/05/20/publish-short-posts.html) on the QWAN blog last year. Giving myself license to write shorter, rougher pieces. That worked for a while. But some things don't belong on a consultancy blog. I wrote about [publishing short posts](https://www.qwan.eu/2025/05/20/publish-short-posts.html) on the QWAN blog last year. Giving myself license to write shorter, rougher pieces. That worked for a while. But some things don't feel like a good fit for the QWAN blog just yet.
When I prototype with a coding agent at 11pm, the thing I learn is not a polished QWAN insight. It's a half-formed observation about evals, or a trick for keeping the human in the loop, or just "I built this and here's what surprised me." The QWAN blog has a certain standard. This stuff needs somewhere scruffier to land. This post was partially written with claude code, see the commit history [on our gitea](https://gitea.apps.sustainabledelivery.com/mostalive/firehose) if you want to check the differences.
Hence **Firehose** — named after what it feels like to work with AI coding agents. You're drinking from a firehose of generated code, suggestions, and decisions. The interesting question is not "how do I generate more" but "how do I stay in control of what's coming out." When I prototype with a coding agent at 11pm (I should go to bed and write a post about sustainable pace the next day ;-) ), the thing I learn is not a polished QWAN insight. It's a half-formed observation about something that just happened, or a trick for keeping the human in the loop, or just "I built this and here's what surprised me."
That's also what this site is built with, by the way. The homepage, the blog engine, the layout — all built in conversation with Claude Code. I wanted to experience what our clients experience: shipping something real with an AI agent, and noticing where the friction is. Hence **Firehose** — named after what it feels like to work with AI coding agents. You're drinking from a firehose of generated code, suggestions, and decisions. The interesting question is not "how do I generate more" but "how do I stay in control of what's coming out.". And also, currently, how do I generate just enough and focus on interesting feedback loops instead of code?
A few things I noticed, building this: I wrote this last may as well [shallow research tool](https://www.qwan.eu/2025/05/01/agentic-search.html):
- **Layout inheritance is a design decision.** The blog engine rendered pages outside Phoenix's layout pipeline. Getting navbar and CSS onto blog pages meant rethinking how the pieces fit together — not just adding a wrapper div. > I want to both get better at using LLMs for programming, and also understand how they work. Marc suggested earlier this year that I write a series of blog posts about my use of them, but I have been drinking from a firehose, and it is quite difficult to figure out a good place to start writing.
- **Warm aesthetics take intention.** The default Phoenix boilerplate is fine, but it says nothing about who you are. Choosing fonts and colours forced me to think about what "personal but professional" looks like.
- **It's fast when it works, and confusing when it doesn't.** When the agent understands your stack, you move at extraordinary speed. When it doesn't (say, the difference between `@inner_block` and `@inner_content` in Phoenix layouts), you can burn time on a misunderstanding that a human would catch in seconds. I have made good progress in learning, and at the same time, practices are still evolving. I see people write patterns. I think it is useful, but too early for that. I am at heuristics (rules of thumb).
That is also why I open sourced the code for this blog [firehose repository on our gitea](https://gitea.apps.sustainabledelivery.com/mostalive/firehose). I think Jekyll, the static site generator we have for QWAN is passable, but I want the option to have a more interactive blog, and since this is going to be a firehose of ideas, give readers the option to subscribe to only what they are interested in, filter posts, like etc. I helped a friend with 'Ghost', but it felt clunky. I like writing in plain text and publishing with `git push` - that works with Jekyll and other static site generators.
I am exploring working in small slices. That does require some initial investment in modularity. If you look at the code, you will notice that some of the blogging functionality is separate from the main site. I want an 'engineering blog' and 'release notes' as a plugin for Software as a Service applications.
This is the space I want to write in. Shorter than a conference talk, longer than a LinkedIn post. Honest about what works and what doesn't. This is the space I want to write in. Shorter than a conference talk, longer than a LinkedIn post. Honest about what works and what doesn't.
If you're a CTO or engineering lead wondering what "AI-native development" actually looks like day to day — not the vendor pitch, the lived experience — that's what I'll be writing about here. If you're wondering what "AI-native development" actually looks like day to day — not the vendor pitch, the lived experience — that's what I'll be writing about here.

View File

@ -0,0 +1,12 @@
%{
title: "Coding agent from scratch - a loop with tools, not that complicated",
author: "Willem van den Ende",
published: true,
tags: ~w(llm coding-agent python exercise),
description: "Coding agents are not that complicated. A loop with some tools. I found an interactive tutorial that lets you experience it"
}
---
I had started on a "Write your own coding agent" exercise. Four iterations in, actually. And then I found [Tiny Agents]( https://tinyagents.dev/lesson/agent-loop), a set of interactive exercises that let you experience how agents work, from a simple chat request, through a tool, more tools etc. It has a live graph, that visualises of the flow of data and actions.
It is good fun to play with, it starts simple and builds up. It lets you inspect the messages between the 'agent' loop code and the large language model server (which is just HTTP and some JSON).

View File

@ -0,0 +1,46 @@
%{
title: "Blog post triage with a local coding agent",
author: "Willem van den Ende",
published: true,
tags: ~w(llm coding-agent blogging),
description: "Can a coding agent help me get some of my draft blog posts over the line? I followed a tip by Chris Parsons to find out."
}
---
I made a skill for a coding agent to help me get more of my draft blog posts over the line. I enjoy writing, and am somewhat fluent in it. Publishing that writing is more hit and miss, however. I often lose energy just before a piece is finished enough. I want to publish more often, and need to form a more effective habit for it.
# What did I get out if it?
I got a working agent 'skill' in an hour or so. I like the QWEN models for their no-bullshit approach to feedback. As it turns out, I have about 60 pages with the 'Candidate Blogpost' tag in my notes, but most of them are not more than an idea. Only some of them have enough detail to turn into a post. I am going to keep this around, prune my candidate blogposts, and add my recent clippings to the mix.
Quite a few of my candidates were 'just links' according to the model, but as I am inspired by [Simon Willison](https://www.simonwillison.net), there is value in sharing links with a brief description on why I think they are relevant. Probably in a different category.
# How did I develop the skill?
I was inspired by two writings:
- Jurgen De Smet asking [how do you write long form articles?](https://www.linkedin.com/posts/jurgendesmet_this-is-how-i-write-long-form-articles-these-share-7441394036222935040-JHmv).
- Chris Parsons suggested to [brief an agent for daily tasks](https://www.chrismdp.com/stop-prompting-start-briefing/), and use the _backbriefing_ loop from "The Art of Action" to improve them.
I like "The Art of Action" - detailed, yet practical. So I had a chat with a frontier model to develop a skill for a local model to surface notes that are almost finished, with some suggestions to get them over the line.
This was my initial prompt. Full chat transcript in the Further Reading section.
#+begin_quote
https://www.chrismdp.com/stop-prompting-start-briefing/ suggests an art of action style backbriefing loop for daily work. I would like to use a local model with pi, the shitty coding agent, instead of claude code. I have trouble publishing blogposts. I have many drafts, marked as CandidateBlogPost in an org-roam directory. I wonder if I could make some kind of pi extension or skill that finds candidate blogposts, helps identify ones that are almost finished, with a suggesion on what to do next for the top 3 almost finished, and suggestions for others on what to add. Probably prioritize recency. I could run that as a cron job in the morning, and create a new daily entry (I use daily entries for org-roam) to get me starte.d Goal would be not to have AI write my posts, but help me finish in pomodori instead of days.
#+end_quote
What I found interesting was that, maybe because I mentioned the links were in an sqlite database, claude desktop spontaneously suggested to create a bash script as part of the skill. I used to have a meta-skill to separate the deterministic parts of agent skills into scripts, but that does not seem to be necessary anymore. I prune my agent setups continuously, only keeping what is needed.
# Tradeoffs
Initially I planned to run this as a scheduled job, but from the development chat it emerged that backbriefing (improving the skill as we run it daily) would not work if it runs scheduled.
I chose a local coding agent with a local model, because I don't want to share my personal notes with a cloud service, and I thought that a smaller model would be more than powerful enough.
## Further reading
https://claude.ai/share/be0184d9-f2bf-41ba-b2e3-235fe9daf9fd - initial chat do develop the skill
I will share a repository with the skill later. I think it is more instructive to have a look at the prompt, and make one for your own notes, starting from your own goals.

6
app/rel/overlays/bin/migrate Executable file
View File

@ -0,0 +1,6 @@
#!/bin/sh
set -eu
cd -P -- "$(dirname -- "$0")"/..
exec ./bin/firehose eval Firehose.Release.migrate

6
app/rel/overlays/bin/server Executable file
View File

@ -0,0 +1,6 @@
#!/bin/sh
set -eu
cd -P -- "$(dirname -- "$0")"/..
PHX_SERVER=true exec ./bin/firehose start

View File

@ -0,0 +1,123 @@
defmodule FirehoseWeb.BlogTagsTest do
use FirehoseWeb.ConnCase
defp goto_engineering_tag_page(conn, tag) do
path = "/blog/engineering/tag/#{tag}"
conn_res = get(conn, path)
body = html_response(conn_res, 200)
assert body =~ ~s(tagged "#{tag}")
assert body =~ "Engineering Blog"
body
end
defp goto_releases_tag_page(conn, tag) do
path = "/blog/releases/tag/#{tag}"
conn_res = get(conn, path)
body = html_response(conn_res, 200)
assert body =~ ~s(tagged "#{tag}")
assert body =~ "Release Notes"
body
end
describe "engineering blog tags" do
test "GET /blog/engineering/tag/:tag shows tag page with all posts", %{conn: conn} do
body = goto_engineering_tag_page(conn, "elixir")
assert body =~ "Hello World"
end
test "GET /blog/engineering/tag/:tag page shows filtered posts", %{conn: conn} do
body = goto_engineering_tag_page(conn, "phoenix")
assert body =~ "Hello World"
end
test "GET /blog/engineering/tag/:tag page shows empty list for nonexistent tag", %{
conn: conn
} do
conn_res = get(conn, "/blog/engineering/tag/nonexistent-tag")
assert html_response(conn_res, 200) =~ ~s(tagged "nonexistent-tag")
end
end
describe "release notes blog tags" do
test "GET /blog/releases/tag/:tag shows tag page with all posts", %{conn: conn} do
body = goto_releases_tag_page(conn, "release")
assert body =~ "v0.1.0 Released"
end
test "GET /blog/releases/tag/:tag page shows filtered posts", %{conn: conn} do
conn_res = get(conn, "/blog/releases/tag/nonexistent-tag")
assert html_response(conn_res, 200) =~ ~s(tagged "nonexistent-tag")
end
end
describe "tag URL pattern" do
test "tag URLs follow pattern /blog/:blog_id/tag/:tag for engineering blog", %{conn: conn} do
# Test that the tag route exists and works correctly
conn_res1 = get(conn, "/blog/engineering/tag/elixir")
assert html_response(conn_res1, 200) =~ ~s(tagged "elixir")
conn_res2 = get(conn, "/blog/engineering/tag/phoenix")
assert html_response(conn_res2, 200) =~ ~s(tagged "phoenix")
end
test "tag URLs follow pattern /blog/:blog_id/tag/:tag for releases blog", %{conn: conn} do
# Test that the tag route exists and works correctly
conn_res = get(conn, "/blog/releases/tag/release")
assert html_response(conn_res, 200) =~ ~s(tagged "release")
end
test "nonexistent tags return 200 with empty post list", %{conn: conn} do
conn_res = get(conn, "/blog/engineering/tag/nonexistent-tag")
assert html_response(conn_res, 200)
end
end
describe "tag page structure" do
test "tag page has proper layout and back link", %{conn: conn} do
body = goto_engineering_tag_page(conn, "elixir")
assert body =~ "Engineering Blog"
assert body =~ ~s(tagged "elixir")
assert body =~ "All posts"
end
test "release tag page has proper layout and back link", %{conn: conn} do
body = goto_releases_tag_page(conn, "release")
assert body =~ "Release Notes"
assert body =~ ~s(tagged "release")
assert body =~ "All posts"
end
end
describe "clickable tags on index page" do
test "tags are rendered as clickable links on engineering blog index", %{
conn: conn
} do
conn_res1 = get(conn, "/blog/engineering")
body1 = html_response(conn_res1, 200)
# Verify tag links exist with correct href pattern
assert body1 =~ ~r{href="/blog/engineering/tag/meta"}
assert body1 =~ ~r{href="/blog/engineering/tag/ai"}
end
test "tags are rendered as clickable links on releases blog index", %{
conn: conn
} do
conn_res2 = get(conn, "/blog/releases")
body2 = html_response(conn_res2, 200)
# Verify tag link exists
assert body2 =~ ~r{href="/blog/releases/tag/release"}
end
test "tag links have proper styling classes", %{conn: conn} do
conn_res3 = get(conn, "/blog/engineering")
body3 = html_response(conn_res3, 200)
# Verify blogex-tag-link class is present for tag links
assert body3 =~ ~r{class="[^"]*blogex-tag-link}
end
end
end

View File

@ -1,50 +1,54 @@
# Firehose blog controller tests
defmodule FirehoseWeb.BlogTest do defmodule FirehoseWeb.BlogTest do
use FirehoseWeb.ConnCase use FirehoseWeb.ConnCase
defp visit_engineering_page(conn, suffix \\ "") do
path = "/blog/engineering" <> suffix
body = conn |> get(path) |> html_response(200)
assert body =~ "Engineering Blog"
assert body =~ "firehose"
body
end
defp visit_engineering_path(conn, suffix) do
path = "/blog/engineering" <> suffix
body = conn |> get(path) |> html_response(200)
assert body =~ "firehose"
body
end
describe "engineering blog (HTML)" do describe "engineering blog (HTML)" do
test "GET /blog/engineering returns HTML index with layout", %{conn: conn} do test "GET /blog/engineering returns HTML index with layout", %{conn: conn} do
conn = get(conn, "/blog/engineering") visit_engineering_page(conn)
body = html_response(conn, 200)
assert body =~ "Engineering Blog"
assert body =~ "Hello World"
# Verify app layout is present (navbar)
assert body =~ "firehose"
end end
test "GET /blog/engineering/:slug returns HTML post with layout", %{conn: conn} do test "GET /blog/engineering/:slug returns HTML post with layout", %{conn: conn} do
conn = get(conn, "/blog/engineering/hello-world") body = visit_engineering_path(conn, "/hello-world")
body = html_response(conn, 200)
assert body =~ "Hello World" assert body =~ "Hello World"
assert body =~ "firehose"
end end
test "GET /blog/engineering/tag/:tag returns HTML tag page", %{conn: conn} do test "GET /blog/engineering/tag/:tag returns HTML tag page", %{conn: conn} do
conn = get(conn, "/blog/engineering/tag/elixir") body = visit_engineering_path(conn, "/tag/elixir")
body = html_response(conn, 200)
assert body =~ ~s(tagged "elixir") assert body =~ ~s(tagged "elixir")
assert body =~ "Hello World"
end end
end end
describe "input validation" do describe "input validation" do
test "GET /blog/nonexistent returns 404", %{conn: conn} do test "GET /blog/nonexistent returns 404", %{conn: conn} do
conn = get(conn, "/blog/nonexistent") assert conn |> get("/blog/nonexistent") |> html_response(404)
assert html_response(conn, 404)
end end
test "GET /blog/engineering?page=abc falls back to page 1", %{conn: conn} do test "GET /blog/engineering?page=abc falls back to page 1", %{conn: conn} do
conn = get(conn, "/blog/engineering?page=abc") assert conn |> get("/blog/engineering?page=abc") |> html_response(200) =~ "Engineering Blog"
assert html_response(conn, 200) =~ "Engineering Blog"
end end
test "GET /blog/engineering?page=-1 falls back to page 1", %{conn: conn} do test "GET /blog/engineering?page=-1 falls back to page 1", %{conn: conn} do
conn = get(conn, "/blog/engineering?page=-1") assert conn |> get("/blog/engineering?page=-1") |> html_response(200) =~ "Engineering Blog"
assert html_response(conn, 200) =~ "Engineering Blog"
end end
test "GET /blog/engineering?page=0 falls back to page 1", %{conn: conn} do test "GET /blog/engineering?page=0 falls back to page 1", %{conn: conn} do
conn = get(conn, "/blog/engineering?page=0") assert conn |> get("/blog/engineering?page=0") |> html_response(200) =~ "Engineering Blog"
assert html_response(conn, 200) =~ "Engineering Blog"
end end
test "GET /blog/engineering/nonexistent-post returns 404", %{conn: conn} do test "GET /blog/engineering/nonexistent-post returns 404", %{conn: conn} do
@ -56,61 +60,100 @@ defmodule FirehoseWeb.BlogTest do
describe "release notes blog (HTML)" do describe "release notes blog (HTML)" do
test "GET /blog/releases returns HTML index", %{conn: conn} do test "GET /blog/releases returns HTML index", %{conn: conn} do
conn = get(conn, "/blog/releases") body = conn |> get("/blog/releases") |> html_response(200)
body = html_response(conn, 200)
assert body =~ "Release Notes" assert body =~ "Release Notes"
assert body =~ "v0.1.0 Released" assert body =~ "v0.1.0 Released"
end end
test "GET /blog/releases/:slug returns HTML post", %{conn: conn} do test "GET /blog/releases/:slug returns HTML post", %{conn: conn} do
conn = get(conn, "/blog/releases/v0-1-0") body = conn |> get("/blog/releases/v0-1-0") |> html_response(200)
body = html_response(conn, 200)
assert body =~ "v0.1.0 Released" assert body =~ "v0.1.0 Released"
end end
test "GET /blog/releases/tag/:tag returns HTML tag page", %{conn: conn} do
body = conn |> get("/blog/releases/tag/elixir") |> html_response(200)
assert body =~ ~s(tagged "elixir")
end
end end
describe "engineering blog (JSON API)" do describe "engineering blog (JSON API)" do
test "GET /api/blog/engineering returns post index", %{conn: conn} do test "GET /api/blog/engineering returns post index", %{conn: conn} do
conn = get(conn, "/api/blog/engineering") assert %{"blog" => "engineering", "posts" => posts} =
assert %{"blog" => "engineering", "posts" => posts} = json_response(conn, 200) conn
|> put_req_header("accept", "application/json")
|> get("/api/blog/engineering")
|> json_response(200)
assert is_list(posts) assert is_list(posts)
assert length(posts) > 0 refute Enum.empty?(posts)
end end
test "GET /api/blog/engineering/:slug returns a post", %{conn: conn} do test "GET /api/blog/engineering/:slug returns a post", %{conn: conn} do
conn = get(conn, "/api/blog/engineering/hello-world") assert %{"id" => "hello-world", "title" => "Hello World"} =
assert %{"id" => "hello-world", "title" => "Hello World"} = json_response(conn, 200) conn
|> put_req_header("accept", "application/json")
|> get("/api/blog/engineering/hello-world")
|> json_response(200)
end end
test "GET /api/blog/engineering/:slug returns 404 for missing post", %{conn: conn} do test "GET /api/blog/engineering/:slug returns 404 for missing post", %{conn: conn} do
conn = get(conn, "/api/blog/engineering/nonexistent") assert conn
assert response(conn, 404) |> put_req_header("accept", "application/json")
|> get("/api/blog/engineering/nonexistent")
|> response(404)
end end
test "GET /api/blog/engineering/feed.xml returns RSS", %{conn: conn} do test "GET /api/blog/engineering/feed.xml returns RSS", %{conn: conn} do
conn = get(conn, "/api/blog/engineering/feed.xml") response = conn |> get("/api/blog/engineering/feed.xml")
assert response_content_type(conn, :xml) assert response(response, 200) =~ "<rss"
assert response(conn, 200) =~ "<rss" assert response_content_type(response, :xml)
end
test "GET /api/blog/engineering/tag/:tag returns JSON with posts", %{conn: conn} do
assert %{"blog" => "engineering", "tag" => "elixir", "posts" => posts} =
conn
|> put_req_header("accept", "application/json")
|> get("/api/blog/engineering/tag/elixir")
|> json_response(200)
assert is_list(posts)
end end
end end
describe "release notes blog (JSON API)" do describe "release notes blog (JSON API)" do
test "GET /api/blog/releases returns post index", %{conn: conn} do test "GET /api/blog/releases returns post index", %{conn: conn} do
conn = get(conn, "/api/blog/releases") assert %{"blog" => "release_notes", "posts" => posts} =
assert %{"blog" => "release_notes", "posts" => posts} = json_response(conn, 200) conn
|> put_req_header("accept", "application/json")
|> get("/api/blog/releases")
|> json_response(200)
assert is_list(posts) assert is_list(posts)
assert length(posts) > 0 refute Enum.empty?(posts)
end end
test "GET /api/blog/releases/:slug returns a post", %{conn: conn} do test "GET /api/blog/releases/:slug returns a post", %{conn: conn} do
conn = get(conn, "/api/blog/releases/v0-1-0") assert %{"id" => "v0-1-0", "title" => "v0.1.0 Released"} =
assert %{"id" => "v0-1-0", "title" => "v0.1.0 Released"} = json_response(conn, 200) conn
|> put_req_header("accept", "application/json")
|> get("/api/blog/releases/v0-1-0")
|> json_response(200)
end end
test "GET /api/blog/releases/feed.xml returns RSS", %{conn: conn} do test "GET /api/blog/releases/feed.xml returns RSS", %{conn: conn} do
conn = get(conn, "/api/blog/releases/feed.xml") response = conn |> get("/api/blog/releases/feed.xml")
assert response_content_type(conn, :xml) assert response(response, 200) =~ "<rss"
assert response(conn, 200) =~ "<rss" assert response_content_type(response, :xml)
end
test "GET /api/blog/releases/tag/:tag returns JSON with posts", %{conn: conn} do
assert %{"blog" => "release_notes", "tag" => "elixir", "posts" => posts} =
conn
|> put_req_header("accept", "application/json")
|> get("/api/blog/releases/tag/elixir")
|> json_response(200)
assert is_list(posts)
end end
end end
end end

View File

@ -2,8 +2,7 @@ defmodule FirehoseWeb.PageControllerTest do
use FirehoseWeb.ConnCase use FirehoseWeb.ConnCase
test "GET /", %{conn: conn} do test "GET /", %{conn: conn} do
conn = get(conn, ~p"/") body = conn |> get(~p"/") |> html_response(200)
body = html_response(conn, 200)
assert body =~ "Drinking from the firehose" assert body =~ "Drinking from the firehose"
assert body =~ "Willem van den Ende" assert body =~ "Willem van den Ende"
end end

View File

@ -14,6 +14,8 @@ defmodule Firehose.DataCase do
this option is not recommended for other databases. this option is not recommended for other databases.
""" """
alias Ecto.Adapters.SQL.Sandbox
use ExUnit.CaseTemplate use ExUnit.CaseTemplate
using do using do
@ -36,8 +38,8 @@ defmodule Firehose.DataCase do
Sets up the sandbox based on the test tags. Sets up the sandbox based on the test tags.
""" """
def setup_sandbox(tags) do def setup_sandbox(tags) do
pid = Ecto.Adapters.SQL.Sandbox.start_owner!(Firehose.Repo, shared: not tags[:async]) pid = Sandbox.start_owner!(Firehose.Repo, shared: not tags[:async])
on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end) on_exit(fn -> Sandbox.stop_owner(pid) end)
end end
@doc """ @doc """

View File

@ -110,6 +110,11 @@ defmodule Blogex do
* `Blogex.Router` mountable Plug router * `Blogex.Router` mountable Plug router
""" """
@doc "Returns true if draft posts should be visible (dev/test environments)."
def show_drafts? do
Application.get_env(:blogex, :show_drafts, false)
end
defdelegate blogs, to: Blogex.Registry defdelegate blogs, to: Blogex.Registry
defdelegate get_blog!(blog_id), to: Blogex.Registry defdelegate get_blog!(blog_id), to: Blogex.Registry
defdelegate get_blog(blog_id), to: Blogex.Registry defdelegate get_blog(blog_id), to: Blogex.Registry

View File

@ -73,8 +73,14 @@ defmodule Blogex.Blog do
@doc "Returns the base URL path for this blog." @doc "Returns the base URL path for this blog."
def base_path, do: @blog_base_path def base_path, do: @blog_base_path
@doc "Returns all published posts, newest first." @doc "Returns all visible posts, newest first. Drafts are included in dev/test."
def all_posts, do: Enum.filter(@posts, & &1.published) def all_posts do
if Blogex.show_drafts?() do
@posts
else
Enum.filter(@posts, & &1.published)
end
end
@doc "Returns the N most recent published posts." @doc "Returns the N most recent published posts."
def recent_posts(n \\ 5), do: Enum.take(all_posts(), n) def recent_posts(n \\ 5), do: Enum.take(all_posts(), n)

View File

@ -23,9 +23,11 @@ defmodule Blogex.Components do
* `:posts` - list of `%Blogex.Post{}` structs (required) * `:posts` - list of `%Blogex.Post{}` structs (required)
* `:base_path` - base URL path for post links (required) * `:base_path` - base URL path for post links (required)
* `:current_tag` - currently selected tag for highlighting (optional)
""" """
attr :posts, :list, required: true attr :posts, :list, required: true
attr :base_path, :string, required: true attr :base_path, :string, required: true
attr :current_tag, :string, default: nil
def post_index(assigns) do def post_index(assigns) do
~H""" ~H"""
@ -35,7 +37,7 @@ defmodule Blogex.Components do
<h2> <h2>
<a href={"#{@base_path}/#{post.id}"}>{post.title}</a> <a href={"#{@base_path}/#{post.id}"}>{post.title}</a>
</h2> </h2>
<.post_meta post={post} /> <.post_meta post={post} base_path={@base_path} current_tag={@current_tag} />
</header> </header>
<p class="blogex-post-description">{post.description}</p> <p class="blogex-post-description">{post.description}</p>
</article> </article>
@ -49,15 +51,17 @@ defmodule Blogex.Components do
## Attributes ## Attributes
* `:post` - a `%Blogex.Post{}` struct (required) * `:post` - a `%Blogex.Post{}` struct (required)
* `:base_path` - base URL path for tag links (required)
""" """
attr :post, :map, required: true attr :post, :map, required: true
attr :base_path, :string, required: true
def post_show(assigns) do def post_show(assigns) do
~H""" ~H"""
<article class="blogex-post"> <article class="blogex-post">
<header class="blogex-post-header"> <header class="blogex-post-header">
<h1>{@post.title}</h1> <h1>{@post.title}</h1>
<.post_meta post={@post} /> <.post_meta post={@post} base_path={@base_path} />
</header> </header>
<div class="blogex-post-body"> <div class="blogex-post-body">
{Phoenix.HTML.raw(@post.body)} {Phoenix.HTML.raw(@post.body)}
@ -68,8 +72,16 @@ defmodule Blogex.Components do
@doc """ @doc """
Renders post metadata (date, author, tags). Renders post metadata (date, author, tags).
## Attributes
* `:post` - a `%Blogex.Post{}` struct (required)
* `:base_path` - base URL path for tag links (required)
* `:current_tag` - currently selected tag for highlighting (optional)
""" """
attr :post, :map, required: true attr :post, :map, required: true
attr :base_path, :string, required: true
attr :current_tag, :string, default: nil
def post_meta(assigns) do def post_meta(assigns) do
~H""" ~H"""
@ -80,9 +92,13 @@ defmodule Blogex.Components do
<span :if={@post.author} class="blogex-post-author"> <span :if={@post.author} class="blogex-post-author">
by {@post.author} by {@post.author}
</span> </span>
<span :for={tag <- @post.tags} class="blogex-tag"> <a
:for={tag <- @post.tags}
href={"#{@base_path}/tag/#{tag}"}
class={["blogex-tag-link", tag == @current_tag && "blogex-tag-active"]}
>
{tag} {tag}
</span> </a>
</div> </div>
""" """
end end

View File

@ -64,7 +64,7 @@ defmodule Blogex.Layout do
</head> </head>
<body style="max-width: 48rem; margin: 0 auto; padding: 2rem; font-family: system-ui, sans-serif;"> <body style="max-width: 48rem; margin: 0 auto; padding: 2rem; font-family: system-ui, sans-serif;">
<nav><a href={@base_path}>&larr; Back</a></nav> <nav><a href={@base_path}>&larr; Back</a></nav>
<.post_show post={@post} /> <.post_show post={@post} base_path={@base_path} />
</body> </body>
</html> </html>
""" """

22
blogex/mix.lock Normal file
View File

@ -0,0 +1,22 @@
%{
"earmark": {:hex, :earmark, "1.4.48", "5f41e579d85ef812351211842b6e005f6e0cef111216dea7d4b9d58af4608434", [:mix], [], "hexpm", "a461a0ddfdc5432381c876af1c86c411fd78a25790c75023c7a4c035fdc858f9"},
"earmark_parser": {:hex, :earmark_parser, "1.4.44", "f20830dd6b5c77afe2b063777ddbbff09f9759396500cdbe7523efd58d7a339c", [:mix], [], "hexpm", "4778ac752b4701a5599215f7030989c989ffdc4f6df457c5f36938cc2d2a2750"},
"ex_doc": {:hex, :ex_doc, "0.40.1", "67542e4b6dde74811cfd580e2c0149b78010fd13001fda7cfeb2b2c2ffb1344d", [:mix], [{:earmark_parser, "~> 1.4.44", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "bcef0e2d360d93ac19f01a85d58f91752d930c0a30e2681145feea6bd3516e00"},
"jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"},
"makeup": {:hex, :makeup, "1.2.1", "e90ac1c65589ef354378def3ba19d401e739ee7ee06fb47f94c687016e3713d1", [:mix], [{:nimble_parsec, "~> 1.4", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "d36484867b0bae0fea568d10131197a4c2e47056a6fbe84922bf6ba71c8d17ce"},
"makeup_elixir": {:hex, :makeup_elixir, "1.0.1", "e928a4f984e795e41e3abd27bfc09f51db16ab8ba1aebdba2b3a575437efafc2", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "7284900d412a3e5cfd97fdaed4f5ed389b8f2b4cb49efc0eb3bd10e2febf9507"},
"makeup_erlang": {:hex, :makeup_erlang, "1.0.3", "4252d5d4098da7415c390e847c814bad3764c94a814a0b4245176215615e1035", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "953297c02582a33411ac6208f2c6e55f0e870df7f80da724ed613f10e6706afd"},
"mime": {:hex, :mime, "2.0.7", "b8d739037be7cd402aee1ba0306edfdef982687ee7e9859bee6198c1e7e2f128", [:mix], [], "hexpm", "6171188e399ee16023ffc5b76ce445eb6d9672e2e241d2df6050f3c771e80ccd"},
"nimble_parsec": {:hex, :nimble_parsec, "1.4.2", "8efba0122db06df95bfaa78f791344a89352ba04baedd3849593bfce4d0dc1c6", [:mix], [], "hexpm", "4b21398942dda052b403bbe1da991ccd03a053668d147d53fb8c4e0efe09c973"},
"nimble_publisher": {:hex, :nimble_publisher, "1.1.1", "3ea4d4cfca45b11a5377bce7608367a9ddd7e717a9098161d8439eca23e239aa", [:mix], [{:earmark, "~> 1.4", [hex: :earmark, repo: "hexpm", optional: false]}, {:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "d67e15bddf07e8c60f75849008b78ea8c6b2b4ae8e3f882ccf0a22d57bd42ed0"},
"phoenix": {:hex, :phoenix, "1.8.5", "919db335247e6d4891764dc3063415b0d2457641c5f9b3751b5df03d8e20bbcf", [:mix], [{:bandit, "~> 1.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.1", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.7", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:websock_adapter, "~> 0.5.3", [hex: :websock_adapter, repo: "hexpm", optional: false]}], "hexpm", "83b2bb125127e02e9f475c8e3e92736325b5b01b0b9b05407bcb4083b7a32485"},
"phoenix_html": {:hex, :phoenix_html, "4.3.0", "d3577a5df4b6954cd7890c84d955c470b5310bb49647f0a114a6eeecc850f7ad", [:mix], [], "hexpm", "3eaa290a78bab0f075f791a46a981bbe769d94bc776869f4f3063a14f30497ad"},
"phoenix_live_view": {:hex, :phoenix_live_view, "1.1.27", "9afcab28b0c82afdc51044e661bcd5b8de53d242593d34c964a37710b40a42af", [:mix], [{:igniter, ">= 0.6.16 and < 1.0.0-0", [hex: :igniter, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:lazy_html, "~> 0.1.0", [hex: :lazy_html, repo: "hexpm", optional: true]}, {:phoenix, "~> 1.6.15 or ~> 1.7.0 or ~> 1.8.0-rc", [hex: :phoenix, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 3.3 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.15", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "415735d0b2c612c9104108b35654e977626a0cb346711e1e4f1ed16e3c827ede"},
"phoenix_pubsub": {:hex, :phoenix_pubsub, "2.2.0", "ff3a5616e1bed6804de7773b92cbccfc0b0f473faf1f63d7daf1206c7aeaaa6f", [:mix], [], "hexpm", "adc313a5bf7136039f63cfd9668fde73bba0765e0614cba80c06ac9460ff3e96"},
"phoenix_template": {:hex, :phoenix_template, "1.0.4", "e2092c132f3b5e5b2d49c96695342eb36d0ed514c5b252a77048d5969330d639", [:mix], [{:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}], "hexpm", "2c0c81f0e5c6753faf5cca2f229c9709919aba34fab866d3bc05060c9c444206"},
"plug": {:hex, :plug, "1.19.1", "09bac17ae7a001a68ae393658aa23c7e38782be5c5c00c80be82901262c394c0", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "560a0017a8f6d5d30146916862aaf9300b7280063651dd7e532b8be168511e62"},
"plug_crypto": {:hex, :plug_crypto, "2.1.1", "19bda8184399cb24afa10be734f84a16ea0a2bc65054e23a62bb10f06bc89491", [:mix], [], "hexpm", "6470bce6ffe41c8bd497612ffde1a7e4af67f36a15eea5f921af71cf3e11247c"},
"telemetry": {:hex, :telemetry, "1.4.1", "ab6de178e2b29b58e8256b92b382ea3f590a47152ca3651ea857a6cae05ac423", [:rebar3], [], "hexpm", "2172e05a27531d3d31dd9782841065c50dd5c3c7699d95266b2edd54c2dafa1c"},
"websock": {:hex, :websock, "0.5.3", "2f69a6ebe810328555b6fe5c831a851f485e303a7c8ce6c5f675abeb20ebdadc", [:mix], [], "hexpm", "6105453d7fac22c712ad66fab1d45abdf049868f253cf719b625151460b8b453"},
"websock_adapter": {:hex, :websock_adapter, "0.5.9", "43dc3ba6d89ef5dec5b1d0a39698436a1e856d000d84bf31a3149862b01a287f", [:mix], [{:bandit, ">= 0.6.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "5534d5c9adad3c18a0f58a9371220d75a803bf0b9a3d87e6fe072faaeed76a08"},
}

View File

@ -5,6 +5,8 @@
description: "Our testing strategy for 200+ LiveView modules" description: "Our testing strategy for 200+ LiveView modules"
} }
--- ---
*This is a sample blog post, generated to show what blogex can do.*
With over 200 LiveView modules in our codebase, we needed a testing strategy With over 200 LiveView modules in our codebase, we needed a testing strategy
that was both fast and reliable. Here's what we landed on. that was both fast and reliable. Here's what we landed on.

View File

@ -5,6 +5,8 @@
description: "How we replaced our Kafka consumer with Broadway for 10x throughput" description: "How we replaced our Kafka consumer with Broadway for 10x throughput"
} }
--- ---
*This is a sample blog post, generated to show what blogex can do.*
Last quarter we hit a wall with our homegrown Kafka consumer. Message lag was Last quarter we hit a wall with our homegrown Kafka consumer. Message lag was
growing, backpressure was non-existent, and our on-call engineers were losing growing, backpressure was non-existent, and our on-call engineers were losing
sleep. We decided to rebuild on [Broadway](https://github.com/dashbitco/broadway). sleep. We decided to rebuild on [Broadway](https://github.com/dashbitco/broadway).

View File

@ -5,6 +5,8 @@
description: "Reliable webhook delivery, dark mode, and improved search" description: "Reliable webhook delivery, dark mode, and improved search"
} }
--- ---
*This is a sample blog post, generated to show what blogex can do.*
Here's what landed in v2.3.0. Here's what landed in v2.3.0.
## Webhook Reliability ## Webhook Reliability

View File

@ -5,6 +5,8 @@
description: "New team dashboards, API rate limiting, and 12 bug fixes" description: "New team dashboards, API rate limiting, and 12 bug fixes"
} }
--- ---
*This is a sample blog post, generated to show what blogex can do.*
We're excited to ship v2.4.0 with two major features and a pile of bug fixes. We're excited to ship v2.4.0 with two major features and a pile of bug fixes.
## Team Dashboards ## Team Dashboards

View File

@ -1,172 +1,7 @@
# Code Context Investigation complete. Found the tag implementation details:
## Files Retrieved **Key Finding**: The `post_meta` component in `/workspaces/firehose/blogex/lib/blogex/components.ex` (lines 83-85) renders tags as plain text without links, while there's already a working `tag_list` component (lines 93-115) that properly creates links with the pattern `href={"#{@base_path}/tag/#{tag}"}`.
List with exact line ranges:
1. `app/test/firehose_web/controllers/blog_test.exs` (lines 1-128) - Comprehensive blog controller tests covering HTML and JSON API endpoints for engineering blog and release notes
2. `app/lib/firehose_web/controllers/blog_controller.ex` (lines 1-79) - Blog controller with pagination, 404 handling, and input validation
3. `app/test/support/conn_case.ex` (lines 1-38) - Test case template for connection tests
4. `app/lib/firehose/blogs/engineering_blog.ex` (lines 1-7) - Engineering blog module configuration
5. `app/lib/firehose/blogs/release_notes.ex` (lines 1-7) - Release notes blog module configuration
## Key Code **Route structure**: `/tag/:tag` in `/workspaces/firehose/blogex/lib/blogex/router.ex` (line 62) handles tag filtering via `blog.posts_by_tag(tag)`.
### Test Organization **Tests exist**: `/workspaces/firehose/app/test/firehose_web/controllers/blog_test.exs` (lines 35-42, 117-122) verify tag page functionality.
```elixir
# Current structure has 4 describe blocks:
describe "engineering blog (HTML)" # 3 tests
describe "input validation" # 5 tests (newly added in last commit)
describe "release notes blog (HTML)" # 2 tests
describe "engineering blog (JSON API)" # 4 tests
describe "release notes blog (JSON API)" # 3 tests
```
### Input Validation Logic (blog_controller.ex, lines 68-76)
```elixir
defp parse_page(nil), do: 1
defp parse_page(str) do
case Integer.parse(str) do
{page, ""} when page > 0 -> page
_ -> 1
end
end
```
### Test Coverage Added in Last Commit
```elixir
describe "input validation" do
test "GET /blog/nonexistent returns 404", %{conn: conn} do
conn = get(conn, "/blog/nonexistent")
assert html_response(conn, 404)
end
test "GET /blog/engineering?page=abc falls back to page 1", %{conn: conn} do
conn = get(conn, "/blog/engineering?page=abc")
assert html_response(conn, 200) =~ "Engineering Blog"
end
test "GET /blog/engineering?page=-1 falls back to page 1", %{conn: conn} do
conn = get(conn, "/blog/engineering?page=-1")
assert html_response(conn, 200) =~ "Engineering Blog"
end
test "GET /blog/engineering?page=0 falls back to page 1", %{conn: conn} do
conn = get(conn, "/blog/engineering?page=0")
assert html_response(conn, 200) =~ "Engineering Blog"
end
test "GET /blog/engineering/nonexistent-post returns 404", %{conn: conn} do
assert_raise Blogex.NotFoundError, fn ->
get(conn, "/blog/engineering/nonexistent-post")
end
end
end
```
## Architecture
The application uses:
- **Blogex** library for blog functionality (engineering blog and release notes)
- **Phoenix** framework for web endpoints
- **ConnCase** test helper for connection testing
- Two blog types: `Firehose.EngineeringBlog` and `Firehose.ReleaseNotes`
- Pagination through `blog.paginate(page)` method
- 404 handling via `Blogex.NotFoundError` exception
## Start Here
Which file to look at first and why:
**Start with `app/lib/firehose_web/controllers/blog_controller.ex`**
Why: This is the central controller that handles all blog requests. Understanding its structure (especially the `parse_page/1` function and `resolve_blog/2` plug) provides context for why the validation tests were added and how input handling works across both HTML and JSON endpoints.
## Code Smells & Refactoring Suggestions
### Smell 1: Repetitive Validation Tests
**Issue**: Four tests for page parameter validation (`page=abc`, `-1`, `0`, and valid values) are highly repetitive with identical assertions.
**Refactoring Suggestion**: Use parameterized tests or test helpers:
```elixir
# Test helper approach
test_page_fallback("page=abc", "abc")
test_page_fallback("page=-1", "-1")
test_page_fallback("page=0", "0")
defp test_page_fallback(query_param, expected_page) do
conn = get(conn, "/blog/engineering?#{query_param}")
assert html_response(conn, 200) =~ "Engineering Blog"
end
```
### Smell 2: Missing Negative Test Coverage
**Issue**: Tests don't verify what happens when invalid blog_id is provided (e.g., `/blog/invalid-blog`).
**Refactoring Suggestion**: Add test for unknown blog:
```elixir
test "GET /blog/unknown returns 404", %{conn: conn} do
conn = get(conn, "/blog/unknown")
assert html_response(conn, 404)
end
```
### Smell 3: Inconsistent Test Naming
**Issue**: Some tests use hyphenated slugs (`v0-1-0`), others use different formats. The naming doesn't clearly indicate what's being tested.
**Refactoring Suggestion**: Standardize naming:
```elixir
# Instead of: "GET /blog/releases/v0-1-0 returns HTML post"
test "GET /blog/releases/:slug returns a release post", %{conn: conn} do
```
### Smell 4: Redundant Layout Assertions
**Issue**: Multiple tests assert the same "firehose" string appears in response, testing layout presence.
**Refactoring Suggestion**: Create a shared test helper:
```elixir
defp assert_has_app_layout(body),
do: assert body =~ "firehose"
# Then in tests: assert_has_app_layout(body)
```
### Smell 5: Test Order Doesn't Follow Flow
**Issue**: Tests are grouped by endpoint but validation tests (which should be first for defensive programming) are in the middle.
**Refactoring Suggestion**: Reorder to follow natural request flow:
1. Input validation (404s, invalid params)
2. Success cases (index, show, tag)
3. Edge cases (pagination, RSS feeds)
### Smell 6: No Test for Controller-Level Error Handling
**Issue**: The controller uses `halt()` in the resolve_blog plug, but there's no test verifying this behavior.
**Refactoring Suggestion**: Add test:
```elixir
test "GET /blog/:blog_id with invalid blog halts request", %{conn: conn} do
conn = get(conn, "/blog/invalid")
assert conn.halted
end
```
### Smell 7: Mixed Response Types Without Clear Separation
**Issue**: HTML tests use `html_response/2`, JSON tests use `json_response/2`, but there's no helper to verify content type before parsing.
**Refactoring Suggestion**: Create response helpers:
```elixir
defp assert_html(conn, status), do: assert html_response(conn, status) != ""
defp assert_json(conn, status), do: assert json_response(conn, status) != %{}
```
### Smell 8: No Test for Concurrent Requests or Edge Cases
**Issue**: Missing tests for:
- Empty page parameter (`?page=`)
- Very large page numbers
- Special characters in slug/tag parameters
**Refactoring Suggestion**: Add edge case tests to validation describe block.
### Overall Recommendations
1. **Extract test helpers** to reduce duplication (especially for page validation)
2. **Standardize test naming** conventions across all blog types
3. **Add positive test** for valid page numbers (currently missing)
4. **Consider property-based testing** for input validation scenarios
5. **Add performance tests** if pagination is used heavily
6. **Create integration tests** that verify end-to-end flows

45
new-post.md Normal file
View File

@ -0,0 +1,45 @@
```mermaid
sequenceDiagram
participant User
participant Engineering as Engineering Folder<br/>(priv/blog/engineering)
participant Blogex as Blogex Library
participant PhoenixApp as Firehose Web App
participant Browser
Note over User,Browser: New Markdown File Flow
User->>Engineering: Create markdown file<br/>(e.g., new-post.md)
Note over Engineering: File appears in directory
Note over Blogex: Blogex reads markdown files at app startup<br/>via config (priv/blog/engineering/**/*.md)
PhoenixApp->>Blogex: Request post index via BlogController<br/>(GET /blog/engineering)
Blogex->>Engineering: Read markdown files from priv/blog/engineering/
Blogex->>Blogex: Parse markdown + frontmatter
Blogex->>Blogex: Create %Blogex.Post{ structs}
Note over Blogex: Blogex renders HTML using its own<br/>templates in blogex/components.ex (post_index, post_show)
PhoenixApp->>PhoenixApp: Render blog_html/index.html.heex (via BlogHTML)
Note over PhoenixApp,Browser: Individual Post Request<br/>(GET /blog/engineering/:slug)
Browser->>PhoenixApp: HTTP GET /blog/engineering/new-post
PhoenixApp->>PhoenixApp: FirehoseWeb.BlogController.show
PhoenixApp->>Blogex: Get post by slug
Blogex->>Engineering: Read markdown file
Blogex->>Blogex: Parse and return %Blogex.Post{}
Note over Blogex: Blogex renders show_page for individual posts
PhoenixApp->>PhoenixApp: Render blog_html/show.html.heex (via BlogHTML)
PhoenixApp->>PhoenixApp: Apply FirehoseWeb.Layouts.app layout
PhoenixApp->>PhoenixApp: Wrap with FirehoseWeb.Layouts.root layout
Note over PhoenixApp: Layout provides:<br/>- Navbar (Engineering/Releases/QWAN)<br/>- Theme toggle<br/>- Global CSS (app.css with Tailwind/daisyUI)<br/>- Footer/flash messages
PhoenixApp->>Browser: Return full HTML page
Browser->>Browser: Render page with app styling
```

11
nono.sh Normal file
View File

@ -0,0 +1,11 @@
#!/bin/bash
nono run \
--profile pi \
--allow-cwd \
--allow /Users/willem/.local/share/mise \
--allow /Users/willem/.pi \
--read /Users/willem/.git \
--read-file /Users/willem/.gitconfig \
--allow /Users/willem/Library/Caches/mise \
--allow-net \
-- pi --verbose

61
planner_request.md Normal file
View File

@ -0,0 +1,61 @@
# Refactoring Plan for Firehose Blog Controller Tests
## Context
Based on context.md, we have a Phoenix blog controller with repetitive validation tests that need refactoring.
## Goals
1. Extract test helpers to reduce code duplication
2. Standardize test naming conventions
3. Reorganize tests to follow defensive programming flow
4. Add missing negative test coverage
5. Create separate contexts for different refactorings
## Recommended Planner Agents
### 1. TestHelperExtractor Agent
**Purpose**: Handle Smell 1 (Repetitive Validation Tests) and Smell 4 (Redundant Layout Assertions)
**Tasks**:
- Extract page validation test logic into `test_page_fallback/2` helper
- Create `assert_has_app_layout/1` helper for layout assertions
- Move helpers to support module or test case
**Context Isolation**: This can run in a separate test context without affecting controller logic.
### 2. TestOrganizer Agent
**Purpose**: Handle Smell 5 (Test Order) and Smell 3 (Inconsistent Naming)
**Tasks**:
- Reorder test blocks: validation first, then success cases, then edge cases
- Standardize all test descriptions to follow pattern: "GET /blog/:type/:slug returns [result]"
- Rename describe blocks to follow semantic order
**Context Isolation**: Pure test organization, no production code changes.
### 3. CoverageExpander Agent
**Purpose**: Handle Smell 2 (Missing Negative Tests) and Smell 8 (Edge Cases)
**Tasks**:
- Add test for unknown blog_id (`/blog/invalid`)
- Add test for empty page parameter (`?page=`)
- Add test for very large page numbers
- Add test for invalid blog halt behavior (Smell 6)
**Context Isolation**: Adds new tests without modifying existing logic.
### 4. ResponseHelperCreator Agent
**Purpose**: Handle Smell 7 (Mixed Response Types)
**Tasks**:
- Create `assert_html/2` and `assert_json/2` helpers
- Ensure proper content-type verification
- Update existing tests to use new helpers
## Execution Strategy
Run each agent in isolated contexts:
1. TestHelperExtractor → creates helper functions
2. ResponseHelperCreator → builds response assertions
3. TestOrganizer → reorganizes existing structure
4. CoverageExpander → adds new test cases
This keeps the main thread clean and allows focused changes per agent.

188
refactor_conn_aliasing.sh Executable file
View File

@ -0,0 +1,188 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage: refactor_conn_aliasing.sh [OPTIONS] FILE...
--dry-run Show diff without modifying files
--help Show usage
EOF
}
DRY_RUN=false
FILES=()
while [[ $# -gt 0 ]]; do
case "$1" in
--dry-run) DRY_RUN=true; shift ;;
--help) usage; exit 0 ;;
-*) echo "Unknown option: $1" >&2; usage >&2; exit 1 ;;
*) FILES+=("$1"); shift ;;
esac
done
if [[ ${#FILES[@]} -eq 0 ]]; then
echo "Error: no files specified" >&2
usage >&2
exit 1
fi
for file in "${FILES[@]}"; do
if [[ ! -f "$file" ]]; then
echo "Warning: $file not found, skipping" >&2
continue
fi
tmpfile=$(mktemp)
trap "rm -f '$tmpfile'" EXIT
awk '
# Detect trigger line: conn = VERB(conn, ARGS)
# where VERB is get/post/put/patch/delete/head/options
/^[[:space:]]*conn = (get|post|put|patch|delete|head|options)\(conn, / {
trigger_line = $0
# Extract leading whitespace
match($0, /^[[:space:]]*/)
indent = substr($0, RSTART, RLENGTH)
# Extract verb and args from: conn = verb(conn, args)
rest = $0
sub(/^[[:space:]]*conn = /, "", rest)
# rest is now: verb(conn, args)
paren_pos = index(rest, "(")
verb = substr(rest, 1, paren_pos - 1)
# args portion: everything after "conn, " up to the trailing ")"
inner = substr(rest, paren_pos + 1)
sub(/\)$/, "", inner)
# inner is: conn, args
sub(/^conn, /, "", inner)
args = inner
# Read the next non-blank line
triggered = 1
next
}
triggered == 1 {
# Skip blank lines, accumulating them
if ($0 ~ /^[[:space:]]*$/) {
blank_lines = blank_lines $0 "\n"
next
}
next_line = $0
triggered = 0
# Now look ahead: count how many subsequent lines (until scope boundary)
# reference "conn" — to decide Case 4 vs Cases 1-3
# We already have next_line. Check if next_line references conn.
# Then peek further lines.
# For simplicity: check if next_line matches Case 1, 2, or 3 patterns.
# If it does, check the line AFTER that for more conn references (Case 4 override).
# Case 1: var = helper(conn, status)
# helpers: html_response, json_response, text_response, response, redirected_to
case1 = 0
if (match(next_line, /^[[:space:]]*([a-z_]+) = (html_response|json_response|text_response|response|redirected_to)\(conn, [^)]+\)$/, m1)) {
case1 = 1
c1_var = m1[1]
c1_helper = m1[2]
# Extract status from helper(conn, status)
match(next_line, /\(conn, ([^)]+)\)/, m1s)
c1_status = m1s[1]
}
# Case 2: assert helper(conn, status) with optional =~ "..."
case2 = 0
if (match(next_line, /^[[:space:]]*assert (html_response|json_response|text_response|response|redirected_to)\(conn, ([^)]+)\)(.*)$/, m2)) {
case2 = 1
c2_helper = m2[1]
c2_status = m2[2]
c2_tail = m2[3]
}
# Case 3: assert %{...} = helper(conn, status)
case3 = 0
if (match(next_line, /^[[:space:]]*assert (%\{[^}]*\}) = (html_response|json_response|text_response|response|redirected_to)\(conn, ([^)]+)\)$/, m3)) {
case3 = 1
c3_pattern = m3[1]
c3_helper = m3[2]
c3_status = m3[3]
}
# If we matched Case 1, 2, or 3, emit the merged line
if (case1) {
print indent c1_var " = conn |> " verb "(" args ") |> " c1_helper "(" c1_status ")"
if (blank_lines != "") printf "%s", blank_lines
blank_lines = ""
next
}
if (case2) {
print indent "assert conn |> " verb "(" args ") |> " c2_helper "(" c2_status ")" c2_tail
if (blank_lines != "") printf "%s", blank_lines
blank_lines = ""
next
}
if (case3) {
print indent "assert " c3_pattern " = conn |> " verb "(" args ") |> " c3_helper "(" c3_status ")"
if (blank_lines != "") printf "%s", blank_lines
blank_lines = ""
next
}
# If next_line references conn at all, this is Case 4 territory
# (multiple uses without a recognized single-merge pattern)
if (next_line ~ /conn/) {
# Case 4: rename to response
print indent "response = conn |> " verb "(" args ")"
if (blank_lines != "") printf "%s", blank_lines
blank_lines = ""
# Replace conn with response in next_line
gsub(/conn/, "response", next_line)
print next_line
# Continue replacing conn->response in subsequent lines until scope boundary
renaming = 1
next
}
# No conn reference on next line — leave trigger unchanged (fallback)
print trigger_line
if (blank_lines != "") printf "%s", blank_lines
blank_lines = ""
print next_line
next
}
# Renaming mode for Case 4: replace conn with response until scope boundary
renaming == 1 {
# Scope boundary: blank line, "end", reduced indentation, or new conn = assignment
if ($0 ~ /^[[:space:]]*$/ || $0 ~ /^[[:space:]]*end$/ || $0 ~ /^[[:space:]]*conn =/) {
renaming = 0
print
next
}
gsub(/conn/, "response")
print
next
}
# Normal mode: pass through
{
if (blank_lines != "") {
printf "%s", blank_lines
blank_lines = ""
}
print
}
BEGIN { triggered = 0; renaming = 0; blank_lines = "" }
' "$file" > "$tmpfile"
if $DRY_RUN; then
diff -u "$file" "$tmpfile" || true
else
mv "$tmpfile" "$file"
echo "Refactored: $file"
fi
done

7
test.sh Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
set -e
/home/vscode/.local/bin/mise trust /workspaces/firehose/mise.toml 2>/dev/null
eval "$(/home/vscode/.local/bin/mise activate bash)"
cd /workspaces/firehose/app
mix deps.get
mix test