Open Harness
Guides

Custom Harnesses

Implement your own AI harness

Custom Harnesses

Create custom harnesses to connect Open Harness to any AI model or service.

Harness Interface

Every harness implements the Harness interface:

import type { Harness, HarnessInput, Signal } from "@open-harness/core";

interface Harness {
  run(input: HarnessInput, context: RunContext): AsyncGenerator<Signal>;
}

interface RunContext {
  signal: AbortSignal;  // For cancellation
}

Minimal Harness

A minimal harness that echoes input:

import { createSignal, type Harness, type HarnessInput, type Signal } from "@open-harness/core";

class EchoHarness implements Harness {
  async *run(input: HarnessInput): AsyncGenerator<Signal> {
    // Emit start signal
    yield createSignal("harness:start", { model: "echo" });

    // Extract prompt from messages
    const prompt = input.messages
      .filter(m => m.role === "user")
      .map(m => m.content)
      .join("\n");

    // Emit text (simulate streaming)
    yield createSignal("text:delta", { content: `Echo: ${prompt}` });
    yield createSignal("text:complete", { content: `Echo: ${prompt}` });

    // Emit end signal with usage
    yield createSignal("harness:end", {
      usage: { inputTokens: prompt.length, outputTokens: prompt.length + 6 },
    });
  }
}

Using Your Harness

import { createWorkflow } from "@open-harness/core";

type State = { input: string; output: string | null };
const { agent, runReactive } = createWorkflow<State>();

const myAgent = agent({
  prompt: "{{ state.input }}",
  activateOn: ["workflow:start"],
  updates: "output",
});

const result = await runReactive({
  agents: { myAgent },
  state: { input: "Hello", output: null },
  harness: new EchoHarness(),
  endWhen: (s) => s.output !== null,
});

console.log(result.state.output); // "Echo: Hello"

Required Signals

Harnesses must emit these signals:

SignalWhenPayload
harness:startAt start{ model: string }
harness:endAt completion{ usage?: { inputTokens, outputTokens }, cost?, sessionId? }
harness:errorOn error{ error: string }

Text Signals

For streaming text output:

yield createSignal("text:delta", { content: "partial text" });
yield createSignal("text:complete", { content: "full text" });

Tool Signals

For tool/function calling:

yield createSignal("tool:call", {
  id: "call_123",
  name: "search",
  input: { query: "..." },
});

// After tool execution
yield createSignal("tool:result", {
  id: "call_123",
  name: "search",
  output: { results: [...] },
});

OpenAI-Compatible Harness

Example wrapping an OpenAI-compatible API:

import { createSignal, type Harness, type HarnessInput, type Signal } from "@open-harness/core";

class OpenAICompatibleHarness implements Harness {
  constructor(private config: { apiKey: string; baseUrl: string; model: string }) {}

  async *run(input: HarnessInput): AsyncGenerator<Signal> {
    yield createSignal("harness:start", { model: this.config.model });

    try {
      const response = await fetch(`${this.config.baseUrl}/chat/completions`, {
        method: "POST",
        headers: {
          "Content-Type": "application/json",
          "Authorization": `Bearer ${this.config.apiKey}`,
        },
        body: JSON.stringify({
          model: this.config.model,
          messages: input.messages,
          stream: true,
        }),
      });

      // Handle streaming response
      const reader = response.body?.getReader();
      const decoder = new TextDecoder();
      let fullContent = "";

      while (reader) {
        const { done, value } = await reader.read();
        if (done) break;

        const chunk = decoder.decode(value);
        // Parse SSE and extract content...
        const content = parseSSEChunk(chunk);
        if (content) {
          fullContent += content;
          yield createSignal("text:delta", { content });
        }
      }

      yield createSignal("text:complete", { content: fullContent });
      yield createSignal("harness:end", {
        usage: { inputTokens: 0, outputTokens: fullContent.length },
      });
    } catch (error) {
      yield createSignal("harness:error", { error: String(error) });
    }
  }
}

Cancellation Support

Respect the abort signal for cancellation:

async *run(input: HarnessInput, context: RunContext): AsyncGenerator<Signal> {
  yield createSignal("harness:start", { model: "my-model" });

  for (const chunk of someStreamingSource) {
    // Check for cancellation
    if (context.signal.aborted) {
      yield createSignal("harness:error", { error: "Cancelled" });
      return;
    }

    yield createSignal("text:delta", { content: chunk });
  }

  yield createSignal("harness:end", { usage: { inputTokens: 0, outputTokens: 0 } });
}

Testing Your Harness

Test with the signal matchers:

import { describe, it, expect } from "vitest";
import { createWorkflow } from "@open-harness/core";
import { toContainSignal, toHaveSignalsInOrder } from "@open-harness/vitest";

expect.extend({ toContainSignal, toHaveSignalsInOrder });

describe("MyHarness", () => {
  it("emits required signals", async () => {
    const { agent, runReactive } = createWorkflow<{ input: string; output: string | null }>();

    const testAgent = agent({
      prompt: "{{ state.input }}",
      activateOn: ["workflow:start"],
      updates: "output",
    });

    const result = await runReactive({
      agents: { testAgent },
      state: { input: "test", output: null },
      harness: new MyHarness(),
      endWhen: (s) => s.output !== null,
    });

    expect(result.signals).toContainSignal("harness:start");
    expect(result.signals).toContainSignal("harness:end");
    expect(result.signals).toHaveSignalsInOrder([
      "harness:start",
      "text:complete",
      "harness:end",
    ]);
  });
});

Next Steps

On this page