Compare commits

...

31 Commits
rm ... master

Author SHA1 Message Date
Carson Gross
ab8fd69929 fix typo 2026-02-27 15:13:57 -07:00
Carson Gross
8bf37f519a more tweaks 2026-02-27 14:49:25 -07:00
Carson Gross
e55662ef1a small changes 2026-02-27 14:47:27 -07:00
Carson Gross
65862ada6a more links 2026-02-27 14:25:01 -07:00
Carson Gross
3768106359 yes and essay 2026-02-27 13:58:07 -07:00
Rens
31430d995f
Include target selector in htmx:oobErrorNoTarget event and error log (#3644) 2026-02-03 12:48:58 -07:00
Damien Alexandre
a5fd180db6
fix(essay): Fix typo in Symfony mention in Paris 2024 essay (#3633) 2026-01-29 16:53:50 -07:00
Carson Gross
580549355a fix date 2026-01-20 10:20:30 -07:00
Carson Gross
709512c1ac formatting & a bit of editorial work 2026-01-20 10:19:48 -07:00
Carson Gross
5a374d546b formatting 2026-01-20 10:13:01 -07:00
Carson Gross
1a30b9130e formatting 2026-01-20 10:12:15 -07:00
Carson Gross
ad65bc77ce formatting 2026-01-20 10:11:05 -07:00
Carson Gross
381449089d remove double title 2026-01-20 10:08:00 -07:00
Carson Gross
2e229462e3 add 2024 olympics to the essays page 2026-01-20 10:06:48 -07:00
Rodolphe Trujillo
6b214f11e7
Add essay: Building Critical Infrastructure with htmx for Paris 2024 Olympics (#3627)
* Add essay: Building Critical Infrastructure with htmx for Paris 2024 Olympics

* Refine essay: clarify wording, add note on Tour de France 2025 reuse

* add comma and "the"

---------

Co-authored-by: Rodolphe Trujillo <rodolphe.trujillo@arolo-solutions.com>
2026-01-20 10:04:51 -07:00
Carson Gross
58dc1e247d add sponsor 2026-01-19 15:20:13 -07:00
Alexander Petros
749d5f2f4c
Fix REST links (#3611) 2025-12-31 16:53:41 -07:00
Carson Gross
fcfca903af Merge remote-tracking branch 'origin/master' 2025-12-24 12:46:27 -07:00
Carson Gross
563fff67db add sponsor 2025-12-24 12:46:19 -07:00
Alexander van Saase
9c1297c5f3
website: add Askama to the list of template engines that support template fragments (#3576)
Add Askama to the list of template engines that support fragments
2025-12-11 11:04:26 -07:00
Loren Stewart
e495b68dc3
Add optimistic extension to extensions index (#3474) 2025-11-16 07:37:32 -07:00
Carson Gross
3abaf7eb3f Merge remote-tracking branch 'origin/master' 2025-11-10 12:26:21 -07:00
Carson Gross
e9f2ee94e3 update the-fetchening.md 2025-11-08 20:01:08 -07:00
raven.so.900
bced397c28
Add Nomini to htmx alternatives (#3497)
Add Nomini to alternatives.md
2025-11-08 16:19:33 -07:00
Carson Gross
7a0086fceb improve 2025-11-03 12:49:22 -07:00
Carson Gross
2a1339287e typo 2025-11-03 12:48:52 -07:00
Carson Gross
a275707f4a typo 2025-11-03 12:48:27 -07:00
Carson Gross
f1e0b926d8 typo 2025-11-03 12:42:58 -07:00
Carson Gross
e71f746bad typo 2025-11-03 12:21:19 -07:00
Carson Gross
8b249b1544 correct tag chars 2025-11-03 12:09:23 -07:00
Carson Gross
b7f833b6d5 add article on the fetch()ening 2025-11-03 11:50:52 -07:00
16 changed files with 778 additions and 10 deletions

View File

@ -1508,7 +1508,7 @@ var htmx = (function() {
oobElement.parentNode.removeChild(oobElement) oobElement.parentNode.removeChild(oobElement)
} else { } else {
oobElement.parentNode.removeChild(oobElement) oobElement.parentNode.removeChild(oobElement)
triggerErrorEvent(getDocument().body, 'htmx:oobErrorNoTarget', { content: oobElement }) triggerErrorEvent(getDocument().body, 'htmx:oobErrorNoTarget', { content: oobElement, target: selector })
} }
return oobValue return oobValue
} }
@ -3111,7 +3111,7 @@ var htmx = (function() {
htmx.logger(elt, eventName, detail) htmx.logger(elt, eventName, detail)
} }
if (detail.error) { if (detail.error) {
logError(detail.error) logError(detail.error + (detail.target ? ', ' + detail.target : ''))
triggerEvent(elt, 'htmx:error', { errorInfo: detail }) triggerEvent(elt, 'htmx:error', { errorInfo: detail })
} }
let eventResult = elt.dispatchEvent(event) let eventResult = elt.dispatchEvent(event)

View File

@ -348,14 +348,14 @@ describe('hx-swap-oob attribute', function() {
}) })
} }
it.skip('triggers htmx:oobErrorNoTarget when no targets found', function(done) { it('triggers htmx:oobErrorNoTarget when no targets found', function(done) {
// this test fails right now because when targets not found it returns an empty array which makes it miss the event as it should be if (targets.length)
this.server.respondWith('GET', '/test', "Clicked<div id='nonexistent' hx-swap-oob='true'>Swapped</div>") this.server.respondWith('GET', '/test', "Clicked<div id='nonexistent' hx-swap-oob='true'>Swapped</div>")
var div = make('<div hx-get="/test">click me</div>') var div = make('<div hx-get="/test">click me</div>')
// Define the event listener function so it can be removed later // Define the event listener function so it can be removed later
var eventListenerFunction = function(event) { var eventListenerFunction = function(event) {
event.detail.content.innerHTML.should.equal('Swapped') event.detail.content.innerHTML.should.equal('Swapped')
event.detail.target.should.equal('#nonexistent')
document.body.removeEventListener('htmx:oobErrorNoTarget', eventListenerFunction) document.body.removeEventListener('htmx:oobErrorNoTarget', eventListenerFunction)
done() done()
} }

View File

@ -306,6 +306,17 @@ Thank you to all our generous <a href="https://github.com/sponsors/bigskysoftwar
<img class="dark-visible" src="/img/exchange-rate-api-dark.png" style="width:100%;max-width:250px"> <img class="dark-visible" src="/img/exchange-rate-api-dark.png" style="width:100%;max-width:250px">
</a> </a>
</div> </div>
<div>
<a data-github-account="mersano" href="https://instant-famous.com/">
<img src="/img/rsz_instant_famous.png" style="width:100%;max-width:250px">
</a>
</div>
<div></div>
<div>
<a data-github-account="blacksandsmedia" href="https://hellostake.com/au/referral-code">
<img src="/img/stake.jpeg" style="width:100%;max-width:250px">
</a>
</div>
</div> </div>
<div style="text-align: center;font-style: italic;margin-top: 26px;">ʕ •ᴥ•ʔ made in montana</div> <div style="text-align: center;font-style: italic;margin-top: 26px;">ʕ •ᴥ•ʔ made in montana</div>

View File

@ -91,7 +91,7 @@ within the language:
* Now any element, not just the entire window, can be the target for update by the request * Now any element, not just the entire window, can be the target for update by the request
Note that when you are using htmx, on the server side you typically respond with *HTML*, not *JSON*. This keeps you firmly Note that when you are using htmx, on the server side you typically respond with *HTML*, not *JSON*. This keeps you firmly
within the [original web programming model](https://www.ics.uci.edu/~fielding/pubs/dissertation/rest_arch_style.htm), within the [original web programming model](https://roy.gbiv.com/pubs/dissertation/rest_arch_style.htm),
using [Hypertext As The Engine Of Application State](https://en.wikipedia.org/wiki/HATEOAS) using [Hypertext As The Engine Of Application State](https://en.wikipedia.org/wiki/HATEOAS)
without even needing to really understand that concept. without even needing to really understand that concept.
@ -1379,7 +1379,7 @@ Here is an example of the code in action:
## Scripting {#scripting} ## Scripting {#scripting}
While htmx encourages a hypermedia approach to building web applications, it offers many options for client scripting. Scripting is included in the REST-ful description of web architecture, see: [Code-On-Demand](https://www.ics.uci.edu/~fielding/pubs/dissertation/rest_arch_style.htm#sec_5_1_7). As much as is feasible, we recommend a [hypermedia-friendly](/essays/hypermedia-friendly-scripting) approach to scripting in your web application: While htmx encourages a hypermedia approach to building web applications, it offers many options for client scripting. Scripting is included in the REST-ful description of web architecture, see: [Code-On-Demand](https://roy.gbiv.com/pubs/dissertation/rest_arch_style.htm#sec_5_1_7). As much as is feasible, we recommend a [hypermedia-friendly](/essays/hypermedia-friendly-scripting) approach to scripting in your web application:
* [Respect HATEOAS](/essays/hypermedia-friendly-scripting#prime_directive) * [Respect HATEOAS](/essays/hypermedia-friendly-scripting#prime_directive)
* [Use events to communicate between components](/essays/hypermedia-friendly-scripting#events) * [Use events to communicate between components](/essays/hypermedia-friendly-scripting#events)

View File

@ -24,6 +24,7 @@ page_template = "essay.html"
* [Does Hypermedia Scale?](@/essays/does-hypermedia-scale.md) * [Does Hypermedia Scale?](@/essays/does-hypermedia-scale.md)
### Real World htmx Experiences ### Real World htmx Experiences
* [Building Critical Infrastructure with htmx: Network Automation for the Paris 2024 Olympics](@/essays/paris-2024-olympics-htmx-network-automation.md)
* [A Real World React to htmx Port](@/essays/a-real-world-react-to-htmx-port.md) * [A Real World React to htmx Port](@/essays/a-real-world-react-to-htmx-port.md)
* [Another Real World React to htmx Port](@/essays/another-real-world-react-to-htmx-port.md) * [Another Real World React to htmx Port](@/essays/another-real-world-react-to-htmx-port.md)
* [A Real World wasm to htmx Port](@/essays/a-real-world-wasm-to-htmx-port.md) * [A Real World wasm to htmx Port](@/essays/a-real-world-wasm-to-htmx-port.md)

View File

@ -66,6 +66,12 @@ a single, tidy package that is smaller than htmx.
You can see many examples of Datastar in action [here](https://data-star.dev/examples). You can see many examples of Datastar in action [here](https://data-star.dev/examples).
## Nomini
[Nomini](https://github.com/nonnorm/nomini) is a hypermedia implementation that embraces writing JavaScript in the original and intended way, as a simple enhancement to mostly-static pages. Its goal is to add a minimal layer of LoB on top of HTML to allow for powerful server-driven web apps with easily implemented client-side features. Additionally, it is currently the smallest library existing that gives both reactive variables and partial page swaps (~2.8k minified, ~1.4k minzipped).
In essence, Nomini is a tiny reimplementation of Datastar or a combination of Fixi and Alpine.js, intended to be a minimal, pragmatic building block for reactive server-driven UIs.
## Alpine-ajax ## Alpine-ajax
Speaking of Alpine (which is a common library to use in conjunction with htmx) you should look at Speaking of Alpine (which is a common library to use in conjunction with htmx) you should look at

View File

@ -0,0 +1,204 @@
+++
title = "Building Critical Infrastructure with htmx: Network Automation for the Paris 2024 Olympics"
description = """\
Building critical software infrastructure with htmx, and how the simplification induced by this approach \
is interesting for AI-assisted development."""
date = 2026-01-16
authors = ["Rodolphe Trujillo"]
[taxonomies]
tag = ["posts"]
+++
## A Bit of Background
During my 6 years at Cisco, I developed numerous web applications to assist network engineers with highly complex
operations, both in terms of the volume of tasks to accomplish and the rigor of procedures to follow. Networking is a
specialized field in its own right, where the slightest error can have disastrous consequences: a network failure, even
partial, can deprive millions of people of essential services like the ability to make a simple phone call.
This criticality imposes strict requirements on code meant for network operations: it must be reliable, readable, and
free of unnecessary frills. If there's a problem, you need to be able to immediately trace the data flow and fix it on
the spot. That's why, for years, I've used very few design patterns and banned function calls that call functions that
call functions, and so on. Beyond 2 levels of calls, I abstain.
Following this logic, I favor mature tools over the latest trends. Thus, the Django / Celery / SQLite stack had been in
my toolbox for a long time. But like everyone else in the 2010s, I built SPAs and had never heard of intercooler.js or
hypermedia, and I understood REST the way it's commonly described pretty much everywhere.
For the JS framework, I made a conservative choice (and a marginal one, I know): I chose Ember.js. My motivations were
its strong backward compatibility during updates and native MVC support. This JS framework is excellent, and that's
still my opinion.
After watching David Guillot's presentation on HTMX at DjangoCon Europe 2022, I dug into the subject and prototyped a
component that addressed one of my recurring needs. It's a kind of datatable on which you can trigger actions. There's a
demo video on the HTMX Discord [here](https://discord.com/channels/725789699527933952/909436816388669530/1042451443656966234).
I was a beginner with HTMX and built it in 2-3 days (no AI :-) ). But what was interesting wasn't so much having this
component quickly at hand : it was the 100% Django codebase. One codebase instead of two, one app to maintain, and no
more API contracts to manage between front and back.
And once again, even though I was comfortable with the Ember.js framework, having a single project to maintain changes
everything.
A few weeks later, a concrete use case came up for a major French ISP: configuring L2VPNs on brand-new routers, in bulk,
without configuration errors (obviously), based on configurations from old routers that were end-of-life and about to be
decommissioned. It was highly critical: a single router can handle thousands of clients, and... there are a lot of
routers.
From that point on, I used the Django / Celery + HTMX + SQLite (and Hyperscript) stack and delivered the app in 5 weeks.
My goal was to guide the network engineer by the hand and spare them 100% of the repetitive, tedious work: they just had
to click and confirm, everything was handled. Their role was now limited to their expertise, and if there was a problem,
it was up to them to fix the network.
The project, initially estimated at 18 months, ultimately took 9. And we were lucky: there were no complex corner cases
to handle. And even if there had been, we had plenty of time to deal with them.
HTMX in all this?
If I had to develop the app as a SPA, it would have taken me at least twice as long. Why?
As a solo full-stack developer, simply switching back and forth between codebases is already time-consuming. And that's
just the tip of the iceberg: the front/back approach itself adds a layer of complexity that ends up weighing heavily on
productivity.
## HTMX at the Olympic Games
The Paris 2024 Olympic Games network consisted of thousands of Cisco switches pre-configured to accept Wi-Fi access
points, which self-configured through an automation system developed by Orange and Cisco. Wi-Fi was the most common
connectivity mode at the Games. But in some cases, a physical connection was necessary, most often to plug in a video
camera, but not only. Sometimes there was simply no other choice but to rely on a cable to connect, and therefore to
configure the relevant switch port. That's where an application became necessary.
When Pollux contacted me about his need, he already had a data model for his network services in a Django project.
Additionally, he could deploy services via CLI: part of the business logic was already in place. The problem was that
service deployment parameters needed to be consolidated in an application. In CLI, you have to manage different data
sources, which can quickly become complicated for the user. So it was necessary to centralize these business parameters
in a webapp, expose all the data needed to deploy a service, and provide a GUI to configure them.
The Games were approaching and Pollux didn't have time to build the webapp: as the architect of the Olympic network, he
was overwhelmed by a colossal number of tasks. I showed him the L2VPN app mentioned above and specified the 5-week
delivery timeline. I told him that if it suited him, I could build him an HTMX webapp based on his existing Django
project and a Bootstrap CSS customized internally by Orange.
We agreed on an 8-week timeline to cover the need, which involved 3 connectivity services: Direct Internet Access,
Private VLAN, and Shared Internet Access.
## Web Dev with HTMX
HTMX is somewhat a return to the roots of web development, and regardless of the web framework: Django, ROR, Symfony...
You rediscover everything that makes a web framework useful rather than turning it into a mere JSON provider. Sending
HTML directly to the browser, storing the app state directly in the HTML. That's what true REST is, and it's so much
simpler to manage.
If you ask me what's most striking, it's certainly returning to very simple things like this:
<figure>
<center>
![progress bar](/img/paris-olympics-progress-bar.png)
</center>
<figcaption> Progress bar from RCP Portal </figcaption>
</figure>
How does this progress bar work?
Exactly like [the example in the docs](https://htmx.org/examples/progress-bar/)!
Why this choice? Because it's coded in 10 seconds, because the app won't have thousands of users on this internal tool,
no scaling concerns: you can do good old data polling without any problem.
And the end user? If I use old-school polling, they don't care: what they want is the information. No SSE or WebSocket
for this use case, I don't need it. And if the need ever arises, the WebSocket (or SSE) plugin is easy to set up.
One of the big advantages of the philosophy surrounding HTMX is the notion of [Locality of Behaviour](@/essays/locality-of-behaviour.md). Let's take this
progress bar: if you want to know how it works, just look at the page source. No need to go into documentation or the
codebase, just a right-click and "View Page Source":
```html
<div
hx-get="/job/progress"
hx-trigger="every 600ms"
hx-target="this"
hx-swap="innerHTML">
<div class="progress" role="progressbar" aria-valuemin="0" aria-valuemax="100" aria-valuenow="0"
aria-labelledby="pblabel">
<div id="pb" class="progress-bar" style="width:0%">
</div>
</div>
```
You immediately know that every 600ms, this part of the page is updated with the content returned by the view handling
the `/job/progress` endpoint. No mystery for the team taking over development who wants to modify something: everything
you need to know is right in front of you.
And that's exactly what HTMX is about: every component, every interaction remains visible, understandable, and
self-documented directly in the HTML. This is important for what comes next.
## HTMX is "AI friendly"™
In the early stages of app development, I focused on the most complex network service: DIA (Direct Internet Access). DIA
for the Olympic Games meant many business parameters with many rules to apply.
The DIA creation form calls an endpoint that triggers a very long function, close to 600 lines of code.
Why such a long function?
Because it's more readable and efficient to concentrate the data flow in one place, rather than dispersing it across a
multitude of layers and patterns.
An application is a wrapper around data: it orchestrates the data flow (data must flow!) and CRUD operations. But what
control do you retain when this flow is obfuscated in complex patterns or dispersed across 2 codebases?
The data flow must remain readable for the developer.
HTMX, by allowing you to manage the GUI directly server-side, makes this flow even clearer. The same endpoint can return
HTML fragments to signal that certain form data is invalid, or conversely indicate that a service deployment has
started. You can thus act on any part of the GUI within the same function, while transforming the data to pass it to the
system that configures the network switch.
In a traditional frontend/backend approach, this would be more complex: two applications to manage, and a much less
readable data flow.
This drastic code simplification enabled by HTMX, combined with a procedural approach, produces compact and transparent
logic, easy to navigate for a developer... or for an LLM, as I discovered.
For the Private VLAN (PVLAN) network service, the "shape" of the main function is roughly the same as for DIA: input
parameters, validation, then interactions with the GUI via HTML fragments, and, if everything is correct, switch
configuration.
The difference? PVLAN is simpler to handle: fewer form parameters and a bit less business logic.
So I took the long DIA function and gave it to an LLM (Claude 3 had just been released), with a prompt specifying the
parameters specific to PVLAN. In seconds, Claude returned a new adapted function, and the same for the HTML templates.
Result: about 80% of the code was ready, with only a few points to correct and relatively few errors made by the LLM,
which freed up time for me to add specific business logic for a major client.
For the third network service, Shared Internet Access (SIA), even simpler than the previous two, I provided the LLM with
both the DIA and PVLAN functions. With the magic word *"extrapolation"* in the prompt, the generated code was 95%
correct.
## Summary of My Experience
- **DIA**: 0% AI, 100% handwritten code (business logic + GUI + overhaul of the switch configuration task management
system) → **4 weeks**
- **PVLAN**: 80% AI, 20% handwritten code (corrections + adding specific business logic) → **1 week**
- **SIA**: 95% AI, 5% handwritten code (minor corrections) → **1 day**
The time saved was reinvested in testing, bug fixes, project management, and even a few additions outside the initial
scope.
Moreover, the same app was used on the "Tour de France 2025" with minor changes that were made easily thanks to the
hypermedia approach.
This result is possible because of the combination of *HTMX + the procedural approach*, which produces naturally
readable code, without unnecessary abstraction layers. The data flow is clear, concentrated in a single function, and
the GUI/server logic is directly accessible.
For an LLM, this is ideal: it doesn't need to construct context through a complex architecture. It just needs to follow
the flow and extrapolate it to a new use case. In other words, what's simpler for the developer is also simpler for the
AI. This is the sense in which HTMX is truly *"AI friendly"™*.
Ultimately, HTMX mainly allowed me to save time and keep my code clear.
No unnecessary layers, no superfluous complexity: just concrete stuff that works, fast.
And that has made a big difference on these critical projects.

View File

@ -166,6 +166,7 @@ Here are some known implementations of the fragment concept:
* [Giraffe.ViewEngine.Htmx](https://github.com/bit-badger/Giraffe.Htmx/tree/main/src/ViewEngine.Htmx) * [Giraffe.ViewEngine.Htmx](https://github.com/bit-badger/Giraffe.Htmx/tree/main/src/ViewEngine.Htmx)
* Rust * Rust
* [MiniJinja](https://docs.rs/minijinja/latest/minijinja/struct.State.html#method.render_block) * [MiniJinja](https://docs.rs/minijinja/latest/minijinja/struct.State.html#method.render_block)
* [Askama](https://askama.readthedocs.io/en/stable/template_syntax.html#block-fragments)
* Raku * Raku
* [Cro Templates](https://github.com/croservices/cro-website/blob/main/docs/reference/cro-webapp-template-syntax.md#fragments) * [Cro Templates](https://github.com/croservices/cro-website/blob/main/docs/reference/cro-webapp-template-syntax.md#fragments)

View File

@ -0,0 +1,269 @@
+++
title = "The fetch()ening"
description = """\
You know, technically, I never said anything about a version *four*"""
date = 2025-11-01
authors = ["Carson Gross"]
[taxonomies]
tag = ["posts"]
+++
![Stop trying to make fetch() happen](/img/fetch.png)
OK, I said there would never be a version three of htmx.
But, _technically_, I never said anything about a version *four*...
## htmx 4: The fetch()ening
In [The Future of htmx](@/essays/hypermedia-driven-applications.md) I said the following:
> We are going to work to ensure that htmx is extremely stable in both API & implementation. This means accepting and
documenting the [quirks](https://htmx.org/quirks/) of the current implementation.
Earlier this year, on a whim, I created [fixi.js](https://github.com/bigskysoftware/fixi), a hyperminimalist implementation
of the ideas in htmx. That work gave me a chance to get a lot more familiar with the `fetch()` and, especially, the
[async](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/async_function) infrastructure
available in JavaScript.
In doing that work I began to wonder if that, while the htmx [API](https://htmx.org/reference/#attributes)
is (at least reasonably) correct, maybe there was room for a more dramatic change of the implementation that took
advantage of these features in order to simplify the library.
Further, changing from ye olde [`XMLHttpRequest`](https://developer.mozilla.org/en-US/docs/Web/API/XMLHttpRequest)
(a legacy of htmx 1.0 IE support) to [`fetch()`](https://developer.mozilla.org/en-US/docs/Web/API/Window/fetch) would
be a pretty violent change, guaranteed to break at least *some* stuff.
So I began thinking: if we are going to consider moving to fetch, then maybe we should _also_ use this update as a
chance address at least _some_ of the [quirks & cruft](https://htmx.org/quirks/) that htmx has acquired over its lifetime.
So, eventually & reluctantly, I have changed my mind: there _will_ be another major version of htmx.
However, in order to keep my word that there will not be a htmx 3.0, the next release will instead be htmx 4.0.
## Project Goals
With htmx 4.0 we are rebuilding the internals of htmx, based on the lessons learned from
fixi.js and [five+ years](https://www.npmjs.com/package/htmx.org/v/0.0.1) of supporting htmx.
There are three major simplifying changes:
### The fetch()ening
The biggest internal change is that `fetch()` will replace `XMLHttpRequest` as the core ajax infrastructure. This
won't actually have a huge effect on most usages of htmx _except_ that the events model will necessarily change due
to the differences between `fetch()` and `XMLHttpRequest`.
### Explicit Inheritance By Default
I feel that the biggest mistake in htmx 1.0 & 2.0 was making attribute inheritance implicit. I was inspired by CSS in
doing this, and the results have been roughly the same as CSS: powerful & maddening.
In htmx 4.0, attribute inheritance will be explicit by default rather than implicit. Explicit inheritance will
be done via the `:inherited` modifier:
```html
<div hx-target:inherited="#output">
<button hx-post="/up">Like</button>
<button hx-post="/down">Dislike</button>
</div>
<output id="output">Pick a button...</output>
```
Here the `hx-target` attribute is explicitly declared as `inherited` on the enclosing `div` and, if it wasn't, the
`button` elements would not inherit the target from it.
You will be able to revert to htmx 2.0 implicit inheritance behavior via a configuration variable.
### No Locally Cached History
Another source of pain for both us and for htmx users is history support. htmx 2.0 stores history in local
cache to make navigation faster. Unfortunately, snapshotting the DOM is often brittle because of third-party
modifications, hidden state, etc. There is a terrible simplicity to the web 1.0 model of blowing everything away and
starting over. There are also security concerns storing history information in session storage.
In htmx 2.0, we often end up recommending that people facing history-related issues simply disable the cache entirely,
and that usually fixes the problems.
In htmx 4.0, history support will no longer snapshot the DOM and keep it locally. It will, rather, issue a network
request for the restored content. This is the behavior of 2.0 on a history cache-miss, and it works reliably with
little effort on behalf of htmx users.
We will offer an extension that enables history caching like in htmx 2.0, but it will be opt-in, rather than the default.
This tremendously simplifies the htmx codebase and should make the out-of-the-box behavior much more plug-and-play.
## What Stays The Same?
Most things.
The [core](https://dl.acm.org/doi/10.1145/3648188.3675127) functionality of htmx will remain the same, `hx-get`, `hx-post`,
`hx-target`, `hx-boost`, `hx-swap`, `hx-trigger`, etc.
With a few configuration tweaks, most htmx 2.x based applications should work with htmx 4.x.
These changes will make the long term maintenance & sustainability of the project much stronger. It will also take
pressure off the 2.0 releases, which can now focus on stability rather than contemplating new features.
## Upgrading
htmx 2.0 users _will_ face an upgrade project when moving to 4.0 in a way that they did not have to in moving
from 1.0 to 2.0.
I am sorry about that, and want to offer two things to address it:
* htmx 2.0 (like htmx 1.0 & intercooler.js 1.0) will be supported _in perpetuity_, so there is absolutely _no_ pressure to
upgrade your application: if htmx 2.0 is satisfying your hypermedia needs, you can stick with it.
* We will roll htmx 4.0 out slowly, over a multi-year period. As with the htmx 1.0 -> 2.0 upgrade, there will be a long
period where htmx 2.x is `latest` and htmx 4.x is `next`
## New Features
Beyond simplifying the implementation of htmx significantly, switching to fetch also gives us the opportunity to add
some nice new features to htmx
### Streaming Responses & SSE in Core
By switching to `fetch()`, we can take advantage of its support for
[readable streams](https://developer.mozilla.org/en-US/docs/Web/API/Streams_API/Using_readable_streams), which
allow for a stream of content to be swapped into the DOM, rather than a single response.
htmx 1.0 had Server Sent Event support integrated into the library. In htmx 2.0 we pulled this functionality out as an
extension. It turns out that SSE is just a specialized version of a streaming response, so in adding streaming
support, it's an almost-free free two-fer to add that back into core as well.
This will make incremental response swapping much cleaner and well-supported in htmx.
### Morphing Swap in Core
[Three years ago](https://github.com/bigskysoftware/idiomorph/commit/7760e89d9f198b43aa7d39cc4f940f606771f47b) I had
an idea for a DOM morphing algorithm that improved on the initial algorithm pioneered by [morphdom](https://github.com/patrick-steele-idem/morphdom).
The idea was to use "id sets" to make smarter decisions regarding which nodes to preserve and which nodes to delete when
merging changes into the DOM, and I called this idea "idiomorph". Idiomorph has gone on to be adopted by many other
web project such as [Hotwire](https://hotwired.dev/).
We strongly considered including it in htmx 2.0, but I decided not too because it worked well as an extension and
htmx 2.0 had already grown larger than I wanted.
In 4.0, with the complexity savings we achieved by moving to `fetch()`, we can now comfortably fit a `morphInner` and
`morphOuter` swap into core, thanks to the excellent work of Michael West.
### Explicit &lt;htmx-partial&gt; Tag Support
htmx has, since very early on, supported a concept of "Out-of-band" swaps: content that is removed from the main HTML
response and swapped into the DOM elsewhere. I have always been a bit ambivalent about them, because they move away
from [Locality of Behavior](https://htmx.org/essays/locality-of-behaviour/), but there is no doubt that they are useful
and often crucial for achieving certain UI patterns.
Out-of-band swaps started off very simply: if you marked an element as `hx-swap-oob='true'`, htmx would swap the element
as the outer HTML of any existing element already in the DOM with that id. Easy-peasy.
However, over time, people started asking for different functionality around Out-of-band swaps: prepending, appending,
etc. and the feature began acquiring some fairly baroque syntax to handle all these needs.
We have come to the conclusion that the problem is that there are really _two_ use cases, both currently trying to be
filled by Out-of-band swaps:
* A simple, id-based replacement
* A more elaborate swap of partial content
Therefore, we are introducing the notion of `<htmx-partial>`s in htmx 4.0
A partial element is, under the covers, a template element and, thus, can contain any sort of content you like. It
specifies on itself all the standard htmx options regarding swapping, `hx-target` and `hx-swap` in particular, allowing
you full access to all the standard swapping behavior of htmx without using a specialized syntax. This tremendously
simplifies the mental model for these sorts of needs, and dovetails well with the streaming support we intend to offer.
Out-of-band swaps will be retained in htmx 4.0, but will go back to their initial, simple focus of simply replacing
an existing element by id.
### Improved View Transitions Support
htmx 2.0 has had [View Transition](https://developer.mozilla.org/en-US/docs/Web/API/View_Transition_API) support since
[April of 2023](https://github.com/bigskysoftware/htmx/blob/master/CHANGELOG.md#190---2023-04-11). In the interceding
two years, support for the feature has grown across browsers (c'mon, safari, you can do it) and we've gained experience
with the feature.
One thing that has become apparent to us while using them is that, to use them in a stable manner, it is important
to establish a _queue_ of transitions, so each can complete before the other begins. If you don't do this, you can get
visually ugly transition cancellations.
So, in htmx 4.0 we have added this queue which will ensure that all view transitions complete smoothly.
CSS transitions will continue to work as before as well, although the swapping model is again made much simpler by the
async runtime.
We may enable View Transitions by default, the jury is still out on that.
### Stabilized Event Ordering
A wonderful thing about `fetch()` and the async support in general is that it is _much_ easier to guarantee a stable
order of events. By linearizing asynchronous code and allowing us to use standard language features like try/catch,
the event model of htmx should be much more predictable and comprehensible.
We are going to adopt a new standard for event naming to make things even clearer:
`htmx:<phase>:<system>[:<optional-sub-action>]`
So, for example, `htmx:before:request` will be triggered before a request is made.
### Improved Extension Support
Another opportunity we have is to take advantage of the `async` behavior of `fetch()` for much better performance in our
preload extension (where we issue a speculative (`GET` only!) request in anticipation of an actual trigger). We have
also added an optimistic update extension to the core extensions, again made easy by the new async features.
In general, we have opened up the internals of the htmx request/response/swap cycle much more fully to extension developers,
up to and including allowing them to replace the `fetch()` implementation used by htmx for a particular request. There
should not be a need for any hacks to get the behavior you want out of htmx now: the events and the open "context" object
should provide the ability to do almost anything.
### Improved `hx-on` Support
In htmx 2.0, I somewhat reluctantly added the [`hx-on`](https://htmx.org/attributes/hx-on/) attributes to support light
scripting inline on elements. I added this because HTML does not allow you to listen for arbitrary events via `on`
attributes: only standard DOM events like `onclick` can be responded to.
We hemmed and hawed about the syntax and so, unfortunately, there are a few different ways to do it.
In htmx 4.0 we will adopt a single standard for the `hx-on` attributes: `hx-on:<event name>`. Additionally, we are
working to improve the htmx JavaScript API (especially around async operation support) and will make those features
available in `hx-on`:
```html
<button hx-post="/like"
hx-on:htmx:after:swap="await timeout('3s'); ctx.newContent[0].remove()">
Get A Response Then Remove It 3 Seconds Later
</button>
```
htmx will never support a fully featured scripting mechanism in core, we recommend something like
[Alpine.js](https://alpinejs.dev/) for that, but our hope is that we can provide a relatively minimalist API that
allows for easy, light async scripting of the DOM.
I should note that htmx 4.0 will continue to work with `eval()` disabled, but you will need to forego a few features like
`hx-on` if you choose to do so.
### A Better But Familiar htmx
All in all, our hope is that htmx 4.0 will feel an awful lot like 2.0, but with better features and, we hope, with fewer bugs.
## Timeline
As always, software takes as long as it takes.
However, our current planned timeline is:
* An alpha release is available _today_: `htmx@4.0.0-alpha1`
* A 4.0.0 release should be available in early-to-mid 2026
* 4.0 will be marked `latest` in early-2027ish
You can track our progress (and see quite a bit of dust flying around) in the `four` branch on
[github](https://github.com/bigskysoftware/htmx/tree/four) and at:
<https://four.htmx.org>
Thank you for your patience and pardon our dust!
> "Well, when events change, I change my mind. What do you do?" --Paul Samuelson or John Maynard Keynes

View File

@ -0,0 +1,271 @@
+++
title = "Yes, and..."
description = """\
In this essay, Carson Gross discusses his advice to young people interested in computer science worried about the \
future given the advancements in AI."""
date = 2026-02-27
updated = 2026-02-27
authors = ["Carson Gross"]
[taxonomies]
tag = ["posts"]
+++
I teach computer science at [Montana State University](https://www.cs.montana.edu). I am the father of three sons who
all know I am a computer programmer and one of whom, at least, has expressed interest in the field. I love computer
programming and try to communicate that love to my sons, the students in my classes and anyone else who will listen.
A question I am increasingly getting from relatives, friends and students is:
> Given AI, should I still consider becoming a computer programmer?
My response to this is: "Yes, and..."
## "Yes"
Computer programming is, fundamentally, about two things:
* Problem-solving using computers
* Learning to control complexity while solving these problems
I have a hard time imagining a future where knowing how to solve problems with computers and how to control the complexity
of those solutions is *less* valuable than it is today, so I think it will continue to be a viable career even with the
advent of AI tools.
### "You have to write the code"
That being said, I view AI as very dangerous for junior programmers because it _is_ able to effectively generate code for
many problems. If a junior programmer does not learn to write code and simply generates it, they are robbing
themselves of the opportunity to develop the visceral understanding of code that comes with being down in the trenches.
Because of this, I warn my students:
"Yes, AI can generate the code for this assignment. Don't let it. You _have_ to write the code."
I explain that, if they don't write the code, they will not be able to effectively _read_ the code. The ability to
read code is certainly going to be valuable, maybe _more_ valuable, in an AI-based coding future.
If you can't read the code you are going to fall into [The Sorcerer's Apprentice Trap](https://www.youtube.com/watch?v=m-W8vUXRfxU),
creating systems [you don't understand and can't control](https://www.youtube.com/watch?v=GFiWEjCedzY).
### Is Coding &rarr; Prompting like Assembly &rarr; High Level Coding?
Some people say that the move from high level languages to AI-generated code is like the move from assembly to
[high level programming languages](https://en.wikipedia.org/wiki/High-level_programming_language).
I do not agree with this simile.
Compilers are, for the most part, deterministic in a way that current AI tools are not. Given a high-level programming
language construct such as a for loop or if statement, you can, with reasonable certainty, say what the generated
assembly will look like for a given computer architecture (at least pre-optimization).
The same cannot be said for an LLM-based solution to a particular prompt.
High level programming languages are a _very good_ way to create highly specified solutions to problems
using computers with a minimum of text in a way that assembly was not. They eliminated a lot of
[accidental complexity](https://en.wikipedia.org/wiki/No_Silver_Bullet), leaving (assuming the code was written
reasonably well) mostly necessary complexity.
LLM generated code, on the other hand, often does not eliminate accidental complexity and, in fact, can add
significant accidental complexity by choosing inappropriate approaches to problems, taking shortcuts, etc.
If you can't read the code, how can you tell?
And if you want to read the code you must write the code.
### AI is a great TA
Another thing that I tell my students is that AI, used properly, is a tremendously effective TA. If you don't use it
as a code-generator but rather as a partner to help you understand concepts and techniques, it can provide a huge boost
to your intellectual development.
One of the most difficult things when learning computer programming is getting "stuck". You just don't see the trick
or know where to even start well enough to make progress.
Even worse is when you get stuck due to accidental complexity: you don't know how to work with a particular tool chain
or even what a tool chain is.
This isn't a problem with *you*, this is a problem with your environment. Getting stuck pointlessly robs you of time to
actually be learning and often knocks people out of computer science.
(I got stuck trying to learn Unix on my own at Berkeley, which is one reason I dropped out of the computer science
program there.)
AI can help you get past these roadblocks, and can be a great TA if used correctly. I have posted an
[AGENTS.md](https://gist.github.com/1cg/a6c6f2276a1fe5ee172282580a44a7ac) file that I provide to my students to configure
coding agents to behave like a great TA, rather than a code generator, and I encourage them to use AI in this role.
AI doesn't *have* to be a detriment to your ability to grow as a computer programmer, so long as it is used
appropriately.
## ", and..."
I do think AI is going to change computer programming. Not as dramatically as some people think, but in some
fundamental ways.
### Raw coding may become less important
It may be that the *act* of coding will lose *relative* value.
I regard this as too bad: I usually like the act of coding, it is fun to make something do something with your
(metaphorical) bare hands. There is an art and satisfaction to writing code well, and lots of aesthetic decisions to be
made doing it.
However, it does appear that raw code writing prowess may be less important in the future.
As this becomes relatively less important, it seems to me that other skills will become more important.
### Communication Skills
For example, the ability to write, think and communicate clearly, both with LLMs and humans seems likely to be much more
important in the future. Many computer programmers have a literary bent anyway, and this is a skill that will likely
increase in value over time and is worth working on.
Reading books and writing essays/blog posts seem like activities likely to help in this regard.
### Understanding Business
Another thing you can work on is turning some of your mental energy towards understanding a business (or government
role, etc) better.
Computer programming is about solving problems with computers and businesses have plenty of both of these.
Some business folks look at AI and say "Great, we don't need programmers!", but it seems just as plausible to me that
a programmer might say "Great, we don't need business people!"
I think both of these views are short-sighted, but I do think that AI can give programmers the ability to continue
fundamentally working as a programmer while *also* investing more time in understanding the real-world problems (business or
otherwise) that they are solving.
This dovetails well with improving communication skills.
### "Architecting" Systems
Like many computer programmers, I am ambivalent towards the term "software architect." I have seen
[architect astronauts](https://www.joelonsoftware.com/2001/04/21/dont-let-architecture-astronauts-scare-you/) inflict
a lot of pain on the world.
For lack of a better term, however, I think software architecture will become a more important skill over time: the
ability to organize large software systems effectively and, crucially, to control the complexity of those systems.
A tough part of this for juniors is that traditionally the ability to architect larger solutions well has come from
experience building smaller parts of systems, first poorly then, over time, more effectively.
Most bad architects I have met were either bad coders or simply didn't have much coding experience at all.
If you let AI take over as a code generator for the "simple" stuff, how are you going to develop the intuitions necessary
to be an effective architect?
This is why, again, you must write the code.
### Using LLMs Effectively
Another skill that seems likely to increase in value (obviously) is knowing how to use LLMs effectively. I think that
currently we are still in the process of figuring out what that means.
I also think that what this means varies by experience level.
#### Seniors
Senior programmers who already have a lot of experience from the pre-AI era are in a good spot to use LLMs effectively:
they know what "good" code looks like, they have experience with building larger systems and know what matters and
what doesn't. The danger with senior programmers is that they stop programming entirely and start suffering from
[brain rot](https://www.media.mit.edu/publications/your-brain-on-chatgpt/).
Particularly dangerous is firing off prompts and then getting sucked into
[The Eternal Scroll](https://theneverendingstory.fandom.com/wiki/The_Nothing) while waiting.
Ask me how I know.
I typically try to use LLMs in the following way:
* To analyze existing code to better understand it and find issues and inconsistencies in it
* To help organize my thoughts for larger projects I want to take on
* To generate relatively small bits of code for systems I am working on
* To generate code that I don't enjoy writing (e.g. regular expressions & CSS)
* To generate demos/exploratory code that I am willing to throw away or don't intend to maintain deeply
* To suggest tests for a particular feature I am working on
I try not to use LLMs to generate full solutions that I am going to need to support. I will sometimes use LLMs alongside
my manual coding as I build out a solution to help me understand APIs and my options while coding.
I never let LLMs design the APIs to the systems I am building.
#### Juniors
Juniors are in a tougher spot. I will say it again: you must write the code.
The temptation to vibe your way through problems is very, very high, but you will need to fight against that temptation.
Peers *will* be vibing their way through things and that will be annoying: you will need to work harder than they do,
and you may be criticized for being slow. The work dynamics here are important to understand: if your company
prioritizes speed over understanding (as many are currently) you need to accept that and not get fired.
However, I think that this is a temporary situation and that soon companies are going to realize that vibe coding at
speed suffers from worse complexity explosion issues than well understood, deliberate coding does.
At that point I expect slower, more deliberate coding with AI assistance will be understood as the best way to utilize
this new technology.
Where AI _can_ help juniors is in accelerating the road to senior developer by eliminating accidental complexity that often
trips juniors up. As I said above, viewing AI as a useful although sometimes overly-eager helper rather than a servant
can be very effective in understanding the shape of code bases, what the APIs and techniques available for a particular
problem are, how a given build system or programming language works, etc.
But you must write the code.
And companies: you must let juniors write the code.
## Getting a Job Today
The questions I get around AI and programming fundamentally revolve around getting a decent job.
It is no secret that the programmer job market is bad right now, and I am seeing good CS students struggle to find
positions programming.
While I do not have a crystal ball, I believe this is a temporary rather than permanent situation. The computer
programmer job market tends to be cyclical with booms and busts, and I believe we will recover from the current bust
at some point.
That's cold comfort to someone looking for a job now, however, so I want to offer the specific job-seeking advice that
I give to my students.
### Family, Friends, Family of Friends
I view the online job sites as mostly pointless, especially for juniors. They are a lottery and the chances of finding
a good job through them are low. Since they are free they are probably still worth using, but they are not worth
investing a lot of time in.
A better approach is the four F's: Family, Friends & Family of Friends. Use your personal connections to find positions
at companies in which you have a competitive advantage of knowing people in the company. Family is the strongest
possibility. Friends are often good too. Family of friends is weaker, but also worth asking about. If you know or
are only a few degrees separated from someone at a company you have a much stronger chance of getting a job at that
company.
I stress to many students that this doesn't mean your family has to work for Google or some other big tech company.
*All* companies of any significant size have problems that need to be solved using computers. Almost every company over 100
people has some sort of development group, even if they don't call it that.
As an example, I had a student who was struggling to find a job. I asked what their parent did, and they said they worked
for Costco corporate.
I told them that they were in fact extremely lucky and that this was their ticket into a great company.
Maybe they don't start as a "computer programmer" there, maybe they start as an analyst or some other role. But the
ability to program on top of that role will be very valuable and likely set up a great career.
## Conclusion
So I still think pursuing computer programming as a career is a good idea. The current job market is bad, no doubt, but
I think this is temporary.
I do think how computer programming is done is changing, and programmers should look at building up skills beyond
"pure" code-writing. This has always been a good idea.
I don't think programming is changing as dramatically as some people claim and I think the fundamentals of programming,
particularly writing good code and controlling complexity, will be perennially important.
I hope this essay is useful in answering that question, especially for junior programmers, and helps people feel
more confident entering a career that I have found very rewarding and expect to continue to do for a long time.
And companies: let the juniors write at least some of the code. It is in your interest.

View File

@ -384,6 +384,7 @@ in the DOM to switch with.
##### Details ##### Details
* `detail.content` - the element with the bad oob `id` * `detail.content` - the element with the bad oob `id`
* `detail.target` - the bad CSS selector
### Event - `htmx:onLoadError` {#htmx:onLoadError} ### Event - `htmx:onLoadError` {#htmx:onLoadError}

View File

@ -130,6 +130,10 @@ htmx extensions are split into two categories:
<td>{% markdown() %} [dynamic-url](https://github.com/FumingPower3925/htmx-dynamic-url/blob/main/README.md) {% end %}</td> <td>{% markdown() %} [dynamic-url](https://github.com/FumingPower3925/htmx-dynamic-url/blob/main/README.md) {% end %}</td>
<td>{% markdown() %} Allows dynamic URL path templating using `{varName}` placeholders, resolved via configurable custom function or `window.` fallback. It does not rely on `hx-vals`. Useful when needing to perform requests to paths that depend on application state. {% end %}</td> <td>{% markdown() %} Allows dynamic URL path templating using `{varName}` placeholders, resolved via configurable custom function or `window.` fallback. It does not rely on `hx-vals`. Useful when needing to perform requests to paths that depend on application state. {% end %}</td>
</tr> </tr>
<tr>
<td>{% markdown() %} [optimistic]([https://github.com/FumingPower3925/htmx-dynamic-url](https://github.com/lorenseanstewart/hx-optimistic/blob/main/README.md) {% end %}</td>
<td>{% markdown() %} This extension provides a way to optimistically update the UI to increase perceived performance {% end %}</td>
</tr>
</tbody> </tbody>
<tbody> <tbody>
<tr><th scope="rowgroup" colspan="2">Data API</th></tr> <tr><th scope="rowgroup" colspan="2">Data API</th></tr>

BIN
www/static/img/fetch.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 375 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

BIN
www/static/img/stake.jpeg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB