Where ideas percolate and thoughts brew

The Optimization Fallacy

About This Sketch

A generative visualization of optimization versus adaptability. Fifty agents with different optimization levels pursue a target that periodically shifts position. Highly optimized agents (darker, more direct) excel at reaching the current goal but struggle when it moves. Less optimized agents (lighter, more wandering) are slower but more adaptable to change. The sketch explores the post's central thesis: optimization requires stable goals, but life's meaningful objectives shift as we pursue them.

Algorithm

This sketch visualizes the tension between optimization and adaptability through a multi-agent system pursuing a shifting target. Each of the 50 agents has a different "optimization factor" ranging from 0.3 to 1.0. Highly optimized agents (darker, more orange) move more directly toward the current target with less random noise. Less optimized agents (lighter, more muted) have more wandering behavior but potentially more adaptability. The target shifts position every 180 frames (roughly every 3 seconds), representing how life's goals change over time. Highly optimized agents excel at reaching the current target quickly with straight paths, but when the target moves, their commitment to the previous trajectory becomes a liability. Less optimized agents meander more but adapt more easily to the shifting goal. The visual result shows different trail patterns: tight, efficient paths versus wandering explorations. Neither is universally betterโ€”context determines which approach succeeds.

Pseudocode

INITIALIZATION:
  Create 50 agents with varying optimization factors (0.3 to 1.0)
  Place agents randomly on canvas
  Set initial target position
  Each agent maintains a trail of previous positions

EACH FRAME:
  Fade previous frame slightly (create trail effect)
  Increment system time

  Every 180 frames:
    Move target to new random position (goals shift)

  Draw target circle with glow

  For each agent:
    Calculate direction to current target
    Apply force toward target (scaled by optimization factor)
    Add random noise (scaled inversely to optimization factor)
    Apply drag to velocity
    Update position
    Handle boundary collisions
    Store position in trail
    Draw trail with fading alpha
    Draw agent circle (color based on optimization level)

  Draw border frame

VISUAL ENCODING:
  Color darkness = optimization strength
  Trail straightness = optimization strength
  Target = current goal (shifts periodically)
  Border = constraints of reality

Source Code

let sketch = function(p) {
    let agents = [];
    let targetX, targetY;
    let systemTime = 0;

    class Agent {
        constructor(x, y, id) {
            this.x = x;
            this.y = y;
            this.id = id;
            this.vx = 0;
            this.vy = 0;
            this.size = 4;
            // Each agent has slightly different "optimization strength"
            this.optimizationFactor = p.map(id, 0, 50, 0.3, 1.0);
            this.trail = [];
            this.maxTrailLength = 40;
        }

        update(tx, ty) {
            // Calculate direction to target
            let dx = tx - this.x;
            let dy = ty - this.y;
            let dist = p.sqrt(dx * dx + dy * dy);

            if (dist > 0) {
                // Normalize direction
                dx /= dist;
                dy /= dist;

                // Apply optimization: stronger optimization = more direct path
                let force = 0.5 * this.optimizationFactor;
                this.vx += dx * force;
                this.vy += dy * force;
            }

            // Add some noise (life's unpredictability)
            let noiseStrength = (1 - this.optimizationFactor) * 0.3;
            this.vx += p.random(-noiseStrength, noiseStrength);
            this.vy += p.random(-noiseStrength, noiseStrength);

            // Apply drag
            this.vx *= 0.9;
            this.vy *= 0.9;

            // Update position
            this.x += this.vx;
            this.y += this.vy;

            // Add to trail
            this.trail.push({x: this.x, y: this.y});
            if (this.trail.length > this.maxTrailLength) {
                this.trail.shift();
            }

            // Bounce off edges (constraints of reality)
            if (this.x < 10 || this.x > 390) this.vx *= -0.8;
            if (this.y < 10 || this.y > 290) this.vy *= -0.8;
            this.x = p.constrain(this.x, 10, 390);
            this.y = p.constrain(this.y, 10, 290);
        }

        display(colors) {
            // Draw trail (the path taken)
            p.noFill();
            p.strokeWeight(1);
            for (let i = 0; i < this.trail.length - 1; i++) {
                let alpha = p.map(i, 0, this.trail.length, 0, 100);
                // More optimized agents have straighter, more visible trails
                let colorMix = this.optimizationFactor;
                let r = p.lerp(colors.accent3[0], colors.accent1[0], colorMix);
                let g = p.lerp(colors.accent3[1], colors.accent1[1], colorMix);
                let b = p.lerp(colors.accent3[2], colors.accent1[2], colorMix);
                p.stroke(r, g, b, alpha);
                p.line(this.trail[i].x, this.trail[i].y, this.trail[i+1].x, this.trail[i+1].y);
            }

            // Draw agent
            let colorMix = this.optimizationFactor;
            let r = p.lerp(colors.accent3[0], colors.accent2[0], colorMix);
            let g = p.lerp(colors.accent3[1], colors.accent2[1], colorMix);
            let b = p.lerp(colors.accent3[2], colors.accent2[2], colorMix);
            p.fill(r, g, b);
            p.noStroke();
            p.circle(this.x, this.y, this.size);
        }
    }

    p.setup = function() {
        p.createCanvas(400, 300);
        p.colorMode(p.RGB);

        // Create agents with varying optimization levels
        for (let i = 0; i < 50; i++) {
            agents.push(new Agent(
                p.random(50, 350),
                p.random(50, 250),
                i
            ));
        }

        // Initial target
        targetX = 200;
        targetY = 150;
    };

    p.draw = function() {
        const colors = getThemeColors();

        // Fade previous frame for trail effect
        p.fill(...colors.bg, 15);
        p.rect(0, 0, 400, 300);

        systemTime++;

        // Move target periodically (goals shift over time)
        if (systemTime % 180 === 0) {
            targetX = p.random(80, 320);
            targetY = p.random(60, 240);
        }

        // Draw target (the "optimal" goal)
        p.fill(...colors.accent1, 40);
        p.noStroke();
        p.circle(targetX, targetY, 30);
        p.fill(...colors.accent2);
        p.circle(targetX, targetY, 6);

        // Update and display all agents
        for (let agent of agents) {
            agent.update(targetX, targetY);
            agent.display(colors);
        }

        // Draw border
        p.noFill();
        p.stroke(...colors.accent3, 100);
        p.strokeWeight(2);
        p.rect(1, 1, 398, 298);
    };
};