Compare commits

..

1 Commits

Author SHA1 Message Date
Joe Fleming
f0252e663e feat: claude one-shot port from nanobot python codebase (v0.1.4.post4) 2026-03-13 09:15:05 -06:00
29 changed files with 263 additions and 1074 deletions

View File

@@ -1,5 +1,7 @@
{
"$schema": "./node_modules/oxfmt/configuration_schema.json",
"ignorePatterns": ["*.md"],
"singleQuote": true
"options": {
"singleQuote": true
}
}

View File

@@ -54,6 +54,7 @@ I maintain two distinct layers of documentation. I must keep them in sync but ne
- **PRD.md**: Core features, target audience, and business logic.
- **Architecture.md**: Tech stack, folder structure, and data flow diagrams.
- **API.md**: Endpoint definitions, request/response schemas.
- **Schema.md**: Database tables, relationships, and types.
- **Discoveries.md**: Things learned empirically that future sessions should know.
### Layer 2: The Memory Bank, or mb (/memory-bank)

View File

@@ -1,233 +0,0 @@
# API Reference
This document describes the tool interface exposed to the LLM and the internal APIs for extending nanobot.
## Tool Interface
All tools implement the `Tool` interface from `src/agent/tools/base.ts`:
```typescript
interface Tool {
name: string; // Tool identifier
description: string; // LLM-readable description
parameters: Record<string, unknown>; // JSON Schema object
execute(args: Record<string, unknown>): Promise<string>;
}
```
## Built-in Tools
### read_file
Read a file from the filesystem.
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| path | string | yes | Absolute or relative file path |
| offset | number | no | Line number to start from (1-indexed) |
| limit | number | no | Maximum number of lines to read |
**Returns**: Line-numbered content (e.g., `1: first line\n2: second line`)
### write_file
Write content to a file, creating parent directories as needed.
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| path | string | yes | File path to write |
| content | string | yes | Content to write |
**Returns**: Success message or error
### edit_file
Replace an exact string in a file.
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| path | string | yes | File path to edit |
| oldString | string | yes | Exact string to replace |
| newString | string | yes | Replacement string |
| replaceAll | boolean | no | Replace all occurrences |
**Returns**: Success message or error if oldString not found
### list_dir
List files in a directory.
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| path | string | yes | Directory path |
| recursive | boolean | no | List recursively |
**Returns**: One file/directory per line, directories suffixed with `/`
### exec
Execute a shell command.
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| command | string | yes | Shell command to execute |
| timeout | number | no | Timeout in seconds (default: 120) |
| workdir | string | no | Working directory override |
**Returns**: Combined stdout + stderr
### web_search
Search the web using Brave Search API.
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| query | string | yes | Search query |
| count | number | no | Number of results (default: 10) |
**Returns**: JSON array of `{ title, url, snippet }` objects
### web_fetch
Fetch and parse a URL.
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| url | string | yes | URL to fetch |
| mode | string | no | `markdown` (default), `raw`, or `html` |
**Returns**:
- HTML pages: extracted readable text (via Readability)
- JSON: pretty-printed JSON
- Other: raw text
### message
Send a message to the current chat channel.
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| content | string | yes | Message content |
**Returns**: Success confirmation
### spawn
Spawn a background subagent for long-running tasks.
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| task | string | yes | Task description for the subagent |
**Returns**: Spawn confirmation with subagent ID
### cron
Manage scheduled tasks.
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| action | string | yes | `list`, `add`, `remove`, `enable`, `disable`, `run`, `status` |
| id | string | conditional | Job ID (for remove/enable/disable/run) |
| name | string | conditional | Job name (for add) |
| message | string | conditional | Task message (for add) |
| schedule | string | conditional | Schedule expression (for add) |
| deleteAfterRun | boolean | no | Delete after one execution |
**Schedule formats**:
- `every Ns/m/h/d` — e.g., `every 30m`
- `at YYYY-MM-DD HH:MM` — one-time
- Cron expression — e.g., `0 9 * * 1-5`
**Returns**: Action-specific response (job list, confirmation, status)
## Internal APIs
### BaseChannel
Extend to create new channel types:
```typescript
abstract class BaseChannel {
_bus: MessageBus;
abstract start(): Promise<void>;
abstract stop(): void;
abstract send(chatId: string, content: string, metadata?: Record<string, unknown>): Promise<void>;
isAllowed(senderId: string, allowFrom: string[]): boolean;
}
```
### MessageBus
```typescript
class MessageBus {
publishInbound(msg: InboundMessage): void;
consumeInbound(): Promise<InboundMessage>;
publishOutbound(msg: OutboundMessage): void;
consumeOutbound(): Promise<OutboundMessage>;
}
```
### InboundMessage
```typescript
type InboundMessage = {
channel: string; // 'mattermost', 'cli', 'system'
senderId: string; // User identifier
chatId: string; // Conversation identifier
content: string; // Message text
metadata: Record<string, unknown>;
media?: string[]; // Optional media URLs
};
```
### OutboundMessage
```typescript
type OutboundMessage = {
channel: string;
chatId: string;
content: string | null;
metadata: Record<string, unknown>;
media?: string[];
};
```
### LLMProvider
```typescript
class LLMProvider {
defaultModel: string;
chat(opts: ChatOptions): Promise<{ response: LLMResponse; responseMessages: ModelMessage[] }>;
chatWithRetry(opts: ChatOptions): Promise<{ response: LLMResponse; responseMessages: ModelMessage[] }>;
}
```
### Session
```typescript
class Session {
key: string;
messages: SessionMessage[];
createdAt: string;
updatedAt: string;
lastConsolidated: number;
getHistory(maxMessages?: number): SessionMessage[];
clear(): void;
}
```
### CronService
```typescript
class CronService {
listJobs(): CronJob[];
addJob(job: Omit<CronJob, 'state' | 'createdAtMs' | 'updatedAtMs'>): CronJob;
removeJob(id: string): boolean;
enableJob(id: string, enabled: boolean): boolean;
runJob(id: string): Promise<string>;
status(): string;
start(): void;
stop(): void;
}
```

View File

@@ -1,150 +0,0 @@
# Architecture
## Tech Stack
| Layer | Technology |
|-------|------------|
| Runtime | Bun (v1.0+) |
| Language | TypeScript (strict mode) |
| LLM Abstraction | Vercel AI SDK v6 |
| Validation | Zod v4 |
| CLI | Commander |
| Colors | picocolors |
| Formatting | oxfmt (single quotes) |
| Linting | oxlint |
## Folder Structure
```
nanobot-ts/
├── index.ts # Entry point
├── src/
│ ├── agent/
│ │ ├── loop.ts # AgentLoop: LLM ↔ tool execution loop
│ │ ├── context.ts # ContextBuilder: system prompt assembly
│ │ ├── memory.ts # MemoryConsolidator: token management
│ │ ├── skills.ts # Skill loader from workspace
│ │ ├── subagent.ts # SubagentManager: background tasks
│ │ └── tools/
│ │ ├── base.ts # Tool interface + ToolRegistry
│ │ ├── filesystem.ts # read_file, write_file, edit_file, list_dir
│ │ ├── shell.ts # exec
│ │ ├── web.ts # web_search, web_fetch
│ │ ├── message.ts # message
│ │ ├── spawn.ts # spawn
│ │ └── cron.ts # cron
│ ├── channels/
│ │ ├── base.ts # BaseChannel abstract class
│ │ ├── mattermost.ts # Mattermost WebSocket + REST
│ │ └── manager.ts # ChannelManager lifecycle
│ ├── bus/
│ │ ├── types.ts # InboundMessage, OutboundMessage schemas
│ │ └── queue.ts # AsyncQueue, MessageBus
│ ├── provider/
│ │ ├── types.ts # LLMResponse, ToolCall, ChatOptions
│ │ └── index.ts # LLMProvider (AI SDK wrapper)
│ ├── session/
│ │ ├── types.ts # SessionMessage, SessionMeta schemas
│ │ └── manager.ts # Session persistence (JSONL)
│ ├── cron/
│ │ ├── types.ts # CronJob, CronSchedule schemas
│ │ └── service.ts # CronService
│ ├── heartbeat/
│ │ └── service.ts # HeartbeatService
│ ├── config/
│ │ ├── types.ts # Zod config schemas
│ │ └── loader.ts # loadConfig, env overrides
│ └── cli/
│ └── commands.ts # gateway + agent commands
├── templates/ # Default workspace files
│ ├── SOUL.md # Agent personality
│ ├── USER.md # User preferences
│ ├── TOOLS.md # Tool documentation
│ ├── AGENTS.md # Agent behavior rules
│ ├── HEARTBEAT.md # Periodic tasks
│ └── memory/MEMORY.md # Long-term memory
└── skills/ # Bundled skills
```
## Data Flow
```
┌─────────────────────────────────────────────────────────────────┐
│ Gateway Mode │
├─────────────────────────────────────────────────────────────────┤
│ │
│ Mattermost ──► BaseChannel ──► MessageBus ──► AgentLoop │
│ ▲ │ │ │
│ │ ▼ ▼ │
│ │ OutboundQueue LLMProvider │
│ │ │ │ │
│ └───────────────────────────────┘ ▼ │
│ ToolRegistry │
│ │ │
│ ▼ │
│ Tool.execute() │
│ │
└─────────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────────┐
│ Agent Mode │
├─────────────────────────────────────────────────────────────────┤
│ │
│ CLI stdin ──► processDirect() ──► AgentLoop ──► Response │
│ │
└─────────────────────────────────────────────────────────────────┘
```
## Key Components
### AgentLoop
The core orchestrator. Consumes inbound messages, runs the LLM tool-calling loop, and publishes responses.
1. Receives `InboundMessage` from bus
2. Loads/creates session by key
3. Builds context (system prompt + history)
4. Calls LLM with tools
5. Executes tool calls, appends results
6. Repeats until no tool calls or max iterations
7. Saves session, publishes response
### MessageBus
An async queue system for decoupling channels from the agent loop.
- `publishInbound()` / `consumeInbound()`: messages from channels to agent
- `publishOutbound()` / `consumeOutbound()`: responses from agent to channels
### LLMProvider
Wraps Vercel AI SDK `generateText()` with:
- Model string resolution (e.g., `openrouter/anthropic/claude-sonnet-4-5`)
- Retry logic (3 attempts, exponential backoff)
- Malformed JSON repair
- Normalized `LLMResponse` type
### SessionManager
Persists conversation history to JSONL files in `~/.nanobot/sessions/`.
- Key format: `{channel}:{chatId}` (e.g., `mattermost:abc123`)
- Supports history truncation for context window limits
### ToolRegistry
Stores tools by name, provides OpenAI-compatible function definitions to the LLM.
### MemoryConsolidator
When session history exceeds token limits, summarizes old messages and archives to `memory/MEMORY.md`.
## Configuration
- File: `~/.nanobot/config.json`
- Validation: Zod schemas in `src/config/types.ts`
- Env overrides: `NANOBOT_MODEL`, `NANOBOT_WORKSPACE`, `NANOBOT_CONFIG`
## Session Key Convention
| Channel | Key Format | Example |
|---------|-----------|----------|
| Mattermost | `mattermost:{channelId}` | `mattermost:abc123` |
| Mattermost (thread) | `mattermost:{channelId}:{rootId}` | `mattermost:abc:def456` |
| CLI | `cli:{chatId}` | `cli:interactive` |
| System | `system:{source}` | `system:heartbeat` |

View File

@@ -1,151 +0,0 @@
# Discoveries
Empirical learnings from implementation that future sessions should know.
## Zod v4 Specifics
### `.default()` on Nested Objects
Zod v4 requires factory functions for nested object defaults, and the factory must return the **full output type** (not just `{}`):
```typescript
// ❌ Wrong - empty object won't match the schema
const Config = z.object({
nested: NestedSchema.default({}),
});
// ✅ Correct - factory returning full type
const Config = z.object({
nested: NestedSchema.default(() => ({ field: value, ... })),
});
```
### `z.record()` Requires Two Arguments
```typescript
// ❌ Wrong
z.record(z.string())
// ✅ Correct
z.record(z.string(), z.unknown())
```
## AI SDK v6 Changes
| v4/v5 | v6 |
|-------|-----|
| `LanguageModelV2` | `LanguageModel` |
| `maxTokens` | `maxOutputTokens` |
| `maxSteps` | `stopWhen: stepCountIs(n)` |
| `usage.promptTokens` | `usage.inputTokens` |
| `usage.completionTokens` | `usage.outputTokens` |
## ollama-ai-provider Compatibility
`ollama-ai-provider` v1.2.0 returns `LanguageModelV1`, not the expected `LanguageModel` (v2/v3). Cast at call site:
```typescript
import { ollama } from 'ollama-ai-provider';
import type { LanguageModel } from 'ai';
const model = ollama('llama3.2') as unknown as LanguageModel;
```
## js-tiktoken API
```typescript
// ❌ Wrong (Python-style)
import { get_encoding } from 'js-tiktoken';
// ✅ Correct
import { getEncoding } from 'js-tiktoken';
```
## Bun/Node Globals
`Document` is not available as a global in Bun/Node. For DOM-like operations:
```typescript
// ❌ Wrong
function makeDocument(): Document { ... }
// ✅ Correct
function makePseudoDocument(): Record<string, unknown> { ... }
// Cast at call site if needed
```
## WebSocket Error Types
WebSocket `onerror` handler receives an `Event`, not an `Error`:
```typescript
socket.onerror = (err: Event) => {
console.error(`WebSocket error: ${err.type}`);
};
```
## Template Literals with Unknown Types
When interpolating `unknown` types in template literals, explicitly convert to string:
```typescript
// ❌ Risky - may throw
console.log(`Error: ${err}`);
// ✅ Safe
console.log(`Error: ${String(err)}`);
```
## Helper: strArg
For safely extracting string arguments from `Record<string, unknown>`:
```typescript
// src/agent/tools/base.ts
export function strArg(args: Record<string, unknown>, key: string, fallback = ''): string {
const val = args[key];
return typeof val === 'string' ? val : fallback;
}
```
Usage:
```typescript
// ❌ Verbose
const path = String(args['path'] ?? '');
// ✅ Cleaner
const path = strArg(args, 'path');
const timeout = parseInt(strArg(args, 'timeout', '30'), 10);
```
## Mattermost WebSocket
- Uses raw `WebSocket` + `fetch` (no mattermostdriver library)
- Auth via hello message with token
- Event types: `posted`, `post_edited`, `reaction_added`, etc.
- Group channel policy: `mention` (default), `open`, `allowlist`
## Session Persistence
- Format: JSONL (one JSON object per line)
- Location: `~/.nanobot/sessions/{sessionKey}.jsonl`
- Tool results truncated at 16,000 characters
- Memory consolidation triggered when approaching context window limit
## Retry Logic
`LLMProvider.chatWithRetry()` retries on:
- HTTP 429 (rate limit)
- HTTP 5xx (server errors)
- Timeouts
- Network errors
Max 3 attempts with exponential backoff.
## Config Precedence
1. CLI flags (`-c`, `-m`, `-w`, `-M`)
2. Environment variables (`NANOBOT_CONFIG`, `NANOBOT_MODEL`, `NANOBOT_WORKSPACE`)
3. Config file (`~/.nanobot/config.json`)
4. Zod schema defaults

View File

@@ -1,64 +0,0 @@
# Product Requirements Document (PRD)
## Overview
nanobot is an ultra-lightweight personal AI assistant framework. It provides a chat-controlled bot that can execute tasks through natural language commands, with pluggable "channels" for different messaging platforms.
## Target Audience
- Individual developers and power users who want a personal AI assistant
- Users who prefer self-hosted, privacy-respecting AI tools
- Teams using Mattermost who want an integrated AI assistant
- Users who need AI assistance with file operations, shell commands, and web searches
## Core Features
### 1. Agent Loop
- Conversational AI powered by LLMs (Anthropic, OpenAI, Google, OpenRouter, Ollama)
- Tool execution with iterative refinement
- Session management with persistent conversation history
- Memory consolidation to manage context window limits
### 2. Tool System
- **Filesystem**: read, write, edit, list files
- **Shell**: execute arbitrary commands with configurable security constraints
- **Web**: search (Brave), fetch and parse URLs
- **Message**: send intermediate updates to chat channels
- **Spawn**: delegate long-running tasks to background subagents
- **Cron**: schedule recurring tasks
### 3. Channel System
- **Mattermost**: WebSocket-based real-time messaging with REST API for posts
- **CLI**: local interactive terminal or single-shot mode
- Extensible channel interface for future platforms
### 4. Scheduling
- **Cron Service**: schedule tasks with cron expressions, intervals, or one-time execution
- **Heartbeat**: periodic wake-up to check for tasks (e.g., HEARTBEAT.md)
### 5. Memory & Skills
- Long-term memory with consolidation
- Skill loading from workspace
- System prompt construction from templates (SOUL.md, USER.md, TOOLS.md)
## Non-Goals (Out of Scope)
- Non-Mattermost channels (Telegram, Discord, Slack, etc.)
- MCP (Model Context Protocol) client support
- Extended thinking/reasoning token handling
- Onboard configuration wizard
- Multi-tenancy or user authentication
## User Stories
1. As a developer, I want to ask the AI to read and modify files in my workspace so I can work faster.
2. As a team lead, I want the bot to respond in Mattermost channels when mentioned so my team can get AI help without leaving chat.
3. As a power user, I want to schedule recurring tasks so the AI can check things automatically.
4. As a privacy-conscious user, I want to run the bot locally with Ollama so my data stays on my machine.
## Success Metrics
- Zero external dependencies for core functionality beyond LLM providers
- Sub-second response time for tool execution
- Graceful degradation on LLM errors
- Clear error messages for configuration issues

View File

@@ -1,7 +1,7 @@
# Active Context
## Current Focus
Docs directory created with 4 files (PRD.md, Architecture.md, API.md, Discoveries.md). All source files previously written and verified — typecheck and lint are both clean.
All source files written and verified — typecheck and lint are both clean.
## Session State (as of this writing)
- All source files complete and passing `tsc --noEmit` (0 errors) and `oxlint` (0 errors, 0 warnings)

View File

@@ -34,7 +34,6 @@
- **Full typecheck pass**: `tsc --noEmit` → 0 errors
- **Full lint pass**: `oxlint` → 0 errors, 0 warnings
- `package.json` scripts added: `start`, `dev`, `typecheck`
- **Docs created**: `/docs/PRD.md`, `Architecture.md`, `API.md`, `Discoveries.md`
### 🔄 In Progress
- Nothing

View File

@@ -12,6 +12,16 @@
"lint": "oxlint",
"lint:fix": "oxlint --fix"
},
"devDependencies": {
"@types/bun": "latest",
"@types/mozilla__readability": "^0.4.2",
"oxfmt": "^0.40.0",
"oxlint": "^1.55.0",
"oxlint-tsgolint": "^0.16.0"
},
"peerDependencies": {
"typescript": "^5"
},
"dependencies": {
"@ai-sdk/anthropic": "^3.0.58",
"@ai-sdk/google": "^3.0.43",
@@ -27,15 +37,5 @@
"ollama-ai-provider": "^1.2.0",
"picocolors": "^1.1.1",
"zod": "^4.3.6"
},
"devDependencies": {
"@types/bun": "latest",
"@types/mozilla__readability": "^0.4.2",
"oxfmt": "^0.40.0",
"oxlint": "^1.55.0",
"oxlint-tsgolint": "^0.16.0"
},
"peerDependencies": {
"typescript": "^5"
}
}

View File

@@ -57,11 +57,7 @@ export class AgentLoop {
this._model = opts.model ?? opts.provider.defaultModel;
this._maxIterations = opts.maxIterations ?? 40;
const execConfig = opts.execConfig ?? {
timeout: 120,
denyPatterns: [],
restrictToWorkspace: false,
};
const execConfig = opts.execConfig ?? { timeout: 120, denyPatterns: [], restrictToWorkspace: false };
this._ctx = new ContextBuilder(opts.workspace);
this._sessions = opts.sessionManager ?? new SessionManager(opts.workspace);
@@ -98,11 +94,7 @@ export class AgentLoop {
restrictToWorkspace?: boolean;
}): void {
const allowed = opts.restrictToWorkspace ? this._workspace : undefined;
const execConfig = opts.execConfig ?? {
timeout: 120,
denyPatterns: [],
restrictToWorkspace: false,
};
const execConfig = opts.execConfig ?? { timeout: 120, denyPatterns: [], restrictToWorkspace: false };
this._tools.register(new ReadFileTool({ workspace: this._workspace, allowedDir: allowed }));
this._tools.register(new WriteFileTool({ workspace: this._workspace, allowedDir: allowed }));
@@ -118,7 +110,9 @@ export class AgentLoop {
);
this._tools.register(new WebSearchTool({ apiKey: opts.braveApiKey, proxy: opts.webProxy }));
this._tools.register(new WebFetchTool({ proxy: opts.webProxy }));
this._tools.register(new MessageTool((msg) => this._bus.publishOutbound(msg)));
this._tools.register(
new MessageTool((msg) => this._bus.publishOutbound(msg)),
);
this._tools.register(new SpawnTool(this._subagents));
if (opts.cronService) {
this._tools.register(new CronTool(opts.cronService));
@@ -197,12 +191,7 @@ export class AgentLoop {
if (response) {
this._bus.publishOutbound(response);
} else if (msg.channel === 'cli') {
this._bus.publishOutbound({
channel: msg.channel,
chatId: msg.chatId,
content: '',
metadata: msg.metadata,
});
this._bus.publishOutbound({ channel: msg.channel, chatId: msg.chatId, content: '', metadata: msg.metadata });
}
} catch (err) {
if ((err as Error).name === 'AbortError') {
@@ -226,32 +215,17 @@ export class AgentLoop {
): Promise<OutboundMessage | null> {
// System messages (subagent results) routed as "system" channel
if (msg.channel === 'system') {
const [channel, chatId] = msg.chatId.includes(':')
? (msg.chatId.split(':', 2) as [string, string])
: ['cli', msg.chatId];
const [channel, chatId] = msg.chatId.includes(':') ? msg.chatId.split(':', 2) as [string, string] : ['cli', msg.chatId];
const key = `${channel}:${chatId}`;
const session = this._sessions.getOrCreate(key);
await this._consolidator.maybeConsolidateByTokens(session);
this._setToolContext(channel, chatId);
const messages = this._ctx.buildMessages({
history: session.getHistory(0) as Array<Record<string, unknown>>,
currentMessage: msg.content,
channel,
chatId,
});
const { finalContent, allMessages } = await this._runAgentLoop(
messages as ModelMessage[],
signal,
);
const messages = this._ctx.buildMessages({ history: session.getHistory(0) as Array<Record<string, unknown>>, currentMessage: msg.content, channel, chatId });
const { finalContent, allMessages } = await this._runAgentLoop(messages as ModelMessage[], signal);
this._saveTurn(session, allMessages, 1 + session.getHistory(0).length);
this._sessions.save(session);
await this._consolidator.maybeConsolidateByTokens(session);
return {
channel,
chatId,
content: finalContent ?? 'Background task completed.',
metadata: {},
};
return { channel, chatId, content: finalContent ?? 'Background task completed.', metadata: {} };
}
const preview = msg.content.length > 80 ? `${msg.content.slice(0, 80)}...` : msg.content;
@@ -264,31 +238,15 @@ export class AgentLoop {
const cmd = msg.content.trim().toLowerCase();
if (cmd === '/new') {
if (!(await this._consolidator.archiveUnconsolidated(session))) {
return {
channel: msg.channel,
chatId: msg.chatId,
content: 'Memory archival failed, session not cleared. Please try again.',
metadata: {},
};
return { channel: msg.channel, chatId: msg.chatId, content: 'Memory archival failed, session not cleared. Please try again.', metadata: {} };
}
session.clear();
this._sessions.save(session);
this._sessions.invalidate(session.key);
return {
channel: msg.channel,
chatId: msg.chatId,
content: 'New session started.',
metadata: {},
};
return { channel: msg.channel, chatId: msg.chatId, content: 'New session started.', metadata: {} };
}
if (cmd === '/help') {
return {
channel: msg.channel,
chatId: msg.chatId,
content:
'nanobot commands:\n/new — Start a new conversation\n/stop — Stop the current task\n/help — Show available commands',
metadata: {},
};
return { channel: msg.channel, chatId: msg.chatId, content: 'nanobot commands:\n/new — Start a new conversation\n/stop — Stop the current task\n/help — Show available commands', metadata: {} };
}
await this._consolidator.maybeConsolidateByTokens(session);
@@ -298,12 +256,7 @@ export class AgentLoop {
if (msgTool instanceof MessageTool) msgTool.startTurn();
const history = session.getHistory(0) as Array<Record<string, unknown>>;
const initialMessages = this._ctx.buildMessages({
history,
currentMessage: msg.content,
channel: msg.channel,
chatId: msg.chatId,
});
const initialMessages = this._ctx.buildMessages({ history, currentMessage: msg.content, channel: msg.channel, chatId: msg.chatId });
const onProgress = async (content: string, opts?: { toolHint?: boolean }) => {
this._bus.publishOutbound({
@@ -314,11 +267,7 @@ export class AgentLoop {
});
};
const { finalContent, allMessages } = await this._runAgentLoop(
initialMessages as ModelMessage[],
signal,
onProgress,
);
const { finalContent, allMessages } = await this._runAgentLoop(initialMessages as ModelMessage[], signal, onProgress);
this._saveTurn(session, allMessages, 1 + history.length);
this._sessions.save(session);
@@ -362,18 +311,13 @@ export class AgentLoop {
if (response.toolCalls.length > 0) {
if (onProgress) {
if (response.content) await onProgress(response.content);
const hint = response.toolCalls
.map((tc) => {
const firstVal = Object.values(tc.arguments)[0];
const display =
typeof firstVal === 'string'
? firstVal.length > 40
? `"${firstVal.slice(0, 40)}…"`
: `"${firstVal}"`
: '';
return `${tc.name}(${display})`;
})
.join(', ');
const hint = response.toolCalls.map((tc) => {
const firstVal = Object.values(tc.arguments)[0];
const display = typeof firstVal === 'string'
? (firstVal.length > 40 ? `"${firstVal.slice(0, 40)}…"` : `"${firstVal}"`)
: '';
return `${tc.name}(${display})`;
}).join(', ');
await onProgress(hint, { toolHint: true });
}
@@ -409,11 +353,7 @@ export class AgentLoop {
if (role === 'assistant' && !content && !(entry['tool_calls'] as unknown[])?.length) continue;
// Truncate large tool results
if (
role === 'tool' &&
typeof content === 'string' &&
content.length > TOOL_RESULT_MAX_CHARS
) {
if (role === 'tool' && typeof content === 'string' && content.length > TOOL_RESULT_MAX_CHARS) {
entry['content'] = `${content.slice(0, TOOL_RESULT_MAX_CHARS)}\n... (truncated)`;
}

View File

@@ -94,11 +94,7 @@ export class MemoryStore {
return mem ? `## Long-term Memory\n${mem}` : '';
}
async consolidate(
messages: Array<Record<string, unknown>>,
provider: LLMProvider,
model: string,
): Promise<boolean> {
async consolidate(messages: Array<Record<string, unknown>>, provider: LLMProvider, model: string): Promise<boolean> {
if (messages.length === 0) return true;
const currentMemory = this.readLongTerm();
@@ -108,8 +104,7 @@ export class MemoryStore {
.map((m) => {
const ts = typeof m['timestamp'] === 'string' ? m['timestamp'].slice(0, 16) : '?';
const role = (typeof m['role'] === 'string' ? m['role'] : 'unknown').toUpperCase();
const content =
typeof m['content'] === 'string' ? m['content'] : JSON.stringify(m['content']);
const content = typeof m['content'] === 'string' ? m['content'] : JSON.stringify(m['content']);
return `[${ts}] ${role}: ${content}`;
})
.join('\n');
@@ -145,10 +140,8 @@ ${formatted}`;
return false;
}
const entry =
typeof tc.arguments['history_entry'] === 'string' ? tc.arguments['history_entry'] : null;
const update =
typeof tc.arguments['memory_update'] === 'string' ? tc.arguments['memory_update'] : null;
const entry = typeof tc.arguments['history_entry'] === 'string' ? tc.arguments['history_entry'] : null;
const update = typeof tc.arguments['memory_update'] === 'string' ? tc.arguments['memory_update'] : null;
if (entry) this.appendHistory(entry);
if (update && update !== currentMemory) this.writeLongTerm(update);
@@ -172,12 +165,7 @@ export class MemoryConsolidator {
private _model: string;
private _sessions: SessionManager;
private _contextWindowTokens: number;
private _buildMessages: (opts: {
history: Array<Record<string, unknown>>;
currentMessage: string;
channel?: string;
chatId?: string;
}) => Array<Record<string, unknown>>;
private _buildMessages: (opts: { history: Array<Record<string, unknown>>; currentMessage: string; channel?: string; chatId?: string }) => Array<Record<string, unknown>>;
private _getToolDefs: () => Array<Record<string, unknown>>;
private _locks = new Map<string, Promise<void>>();
@@ -187,12 +175,7 @@ export class MemoryConsolidator {
model: string;
sessions: SessionManager;
contextWindowTokens: number;
buildMessages: (opts: {
history: Array<Record<string, unknown>>;
currentMessage: string;
channel?: string;
chatId?: string;
}) => Array<Record<string, unknown>>;
buildMessages: (opts: { history: Array<Record<string, unknown>>; currentMessage: string; channel?: string; chatId?: string }) => Array<Record<string, unknown>>;
getToolDefs: () => Array<Record<string, unknown>>;
}) {
this._store = new MemoryStore(opts.workspace);
@@ -212,23 +195,15 @@ export class MemoryConsolidator {
// Chain promises per session key to serialize consolidation
const prev = this._locks.get(key) ?? Promise.resolve();
const next = prev.then(fn);
this._locks.set(
key,
next.catch(() => {}),
);
this._locks.set(key, next.catch(() => {}));
await next;
}
async archiveUnconsolidated(session: Session): Promise<boolean> {
let ok = false;
await this._withLock(session.key, async () => {
const snapshot = session.messages.slice(session.lastConsolidated) as Array<
Record<string, unknown>
>;
if (snapshot.length === 0) {
ok = true;
return;
}
const snapshot = session.messages.slice(session.lastConsolidated) as Array<Record<string, unknown>>;
if (snapshot.length === 0) { ok = true; return; }
ok = await this._store.consolidate(snapshot, this._provider, this._model);
});
return ok;
@@ -244,8 +219,7 @@ export class MemoryConsolidator {
const history = session.getHistory(0) as Array<Record<string, unknown>>;
const probe = this._buildMessages({ history, currentMessage: '[token-probe]' });
const toolTokens = estimateTokens(JSON.stringify(this._getToolDefs()));
const estimated =
estimateMessagesTokens(probe as Array<Record<string, unknown>>) + toolTokens;
const estimated = estimateMessagesTokens(probe as Array<Record<string, unknown>>) + toolTokens;
if (estimated < this._contextWindowTokens) return; // fits — done
@@ -253,14 +227,10 @@ export class MemoryConsolidator {
const boundary = this._pickBoundary(session, Math.max(1, estimated - target));
if (boundary === null) return;
const chunk = session.messages.slice(session.lastConsolidated, boundary) as Array<
Record<string, unknown>
>;
const chunk = session.messages.slice(session.lastConsolidated, boundary) as Array<Record<string, unknown>>;
if (chunk.length === 0) return;
console.info(
`[memory] Token consolidation round ${round}: ~${estimated} tokens, chunk=${chunk.length} msgs`,
);
console.info(`[memory] Token consolidation round ${round}: ~${estimated} tokens, chunk=${chunk.length} msgs`);
if (!(await this._store.consolidate(chunk, this._provider, this._model))) return;
session.lastConsolidated = boundary;

View File

@@ -138,10 +138,7 @@ export class SkillsLoader {
const colon = line.indexOf(':');
if (colon < 0) continue;
const key = line.slice(0, colon).trim();
const val = line
.slice(colon + 1)
.trim()
.replace(/^["']|["']$/g, '');
const val = line.slice(colon + 1).trim().replace(/^["']|["']$/g, '');
if (key === 'description') meta.description = val;
if (key === 'always') meta.always = val === 'true';
if (key === 'metadata') meta.metadata = val;

View File

@@ -55,16 +55,12 @@ export class CronTool implements Tool {
case 'enable': {
const id = strArg(args, 'id');
if (!id) return 'Error: id is required for enable.';
return this._service.enableJob(id, true)
? `Job ${id} enabled.`
: `Error: job ${id} not found.`;
return this._service.enableJob(id, true) ? `Job ${id} enabled.` : `Error: job ${id} not found.`;
}
case 'disable': {
const id = strArg(args, 'id');
if (!id) return 'Error: id is required for disable.';
return this._service.enableJob(id, false)
? `Job ${id} disabled.`
: `Error: job ${id} not found.`;
return this._service.enableJob(id, false) ? `Job ${id} disabled.` : `Error: job ${id} not found.`;
}
case 'run': {
const id = strArg(args, 'id');

View File

@@ -5,16 +5,7 @@ import type { Tool } from './base.ts';
const MAX_READ_CHARS = 128_000;
const MAX_ENTRIES = 2000;
const IGNORED_DIRS = new Set([
'.git',
'node_modules',
'__pycache__',
'.venv',
'venv',
'dist',
'.next',
'build',
]);
const IGNORED_DIRS = new Set(['.git', 'node_modules', '__pycache__', '.venv', 'venv', 'dist', '.next', 'build']);
// ---------------------------------------------------------------------------
// read_file
@@ -64,10 +55,7 @@ export class ReadFileTool implements Tool {
const slice = lines.slice(start, end);
const numbered = slice.map((l, i) => `${start + i + 1}: ${l}`).join('\n');
const truncated =
numbered.length > MAX_READ_CHARS
? numbered.slice(0, MAX_READ_CHARS) + '\n... (truncated)'
: numbered;
const truncated = numbered.length > MAX_READ_CHARS ? numbered.slice(0, MAX_READ_CHARS) + '\n... (truncated)' : numbered;
const totalLines = lines.length;
const header = `File: ${absPath} (${totalLines} lines total)\n`;
@@ -172,7 +160,7 @@ export class EditFileTool implements Tool {
let updated: string;
if (replaceAll) {
updated = content.split(oldString).join(newString);
count = content.split(oldString).length - 1;
count = (content.split(oldString).length - 1);
} else {
const idx = content.indexOf(oldString);
if (idx === -1) return `Error: oldString not found in ${absPath}.`;

View File

@@ -7,7 +7,12 @@ const DEFAULT_TIMEOUT_S = 120;
const MAX_TIMEOUT_S = 600;
const OUTPUT_MAX_CHARS = 32_000;
const DEFAULT_DENY_PATTERNS = [/rm\s+-rf\s+\/(?!\S)/, /mkfs/, /dd\s+if=/, /:\(\)\s*\{.*\}/];
const DEFAULT_DENY_PATTERNS = [
/rm\s+-rf\s+\/(?!\S)/,
/mkfs/,
/dd\s+if=/,
/:\(\)\s*\{.*\}/,
];
export class ExecTool implements Tool {
readonly name = 'exec';

View File

@@ -7,10 +7,7 @@ export class SpawnTool implements Tool {
readonly description =
'Spawn a background subagent to handle a long-running task autonomously. The subagent has access to filesystem, shell, and web tools. It will report its result back when done.';
readonly parameters = {
task: {
type: 'string',
description: 'Full description of the task for the subagent to complete.',
},
task: { type: 'string', description: 'Full description of the task for the subagent to complete.' },
};
readonly required = ['task'];

View File

@@ -12,8 +12,7 @@ const MAX_CONTENT_CHARS = 50_000;
export class WebSearchTool implements Tool {
readonly name = 'web_search';
readonly description =
'Search the web using Brave Search. Returns a list of results with titles, URLs, and snippets.';
readonly description = 'Search the web using Brave Search. Returns a list of results with titles, URLs, and snippets.';
readonly parameters = {
query: { type: 'string', description: 'Search query.' },
count: { type: 'number', description: 'Number of results (default 10, max 20).' },
@@ -31,8 +30,7 @@ export class WebSearchTool implements Tool {
async execute(args: Record<string, unknown>): Promise<string> {
const query = strArg(args, 'query').trim();
if (!query) return 'Error: query is required.';
if (!this._apiKey)
return 'Error: BRAVE_API_KEY not configured (set tools.web.braveApiKey in config).';
if (!this._apiKey) return 'Error: BRAVE_API_KEY not configured (set tools.web.braveApiKey in config).';
const count = Math.min(Number(args['count'] ?? 10), 20);
const url = `https://api.search.brave.com/res/v1/web/search?q=${encodeURIComponent(query)}&count=${count}`;
@@ -40,7 +38,7 @@ export class WebSearchTool implements Tool {
try {
const res = await fetchWithTimeout(url, {
headers: {
Accept: 'application/json',
'Accept': 'application/json',
'Accept-Encoding': 'gzip',
'X-Subscription-Token': this._apiKey,
},
@@ -48,9 +46,7 @@ export class WebSearchTool implements Tool {
if (!res.ok) return `Error: Brave Search API returned ${res.status}: ${await res.text()}`;
const data = (await res.json()) as {
web?: { results?: Array<{ title: string; url: string; description: string }> };
};
const data = (await res.json()) as { web?: { results?: Array<{ title: string; url: string; description: string }> } };
const results = data.web?.results ?? [];
if (results.length === 0) return 'No results found.';
@@ -74,11 +70,7 @@ export class WebFetchTool implements Tool {
'Fetch a URL and return its content. HTML pages are extracted to readable text. Use mode="raw" for JSON/XML/plain text.';
readonly parameters = {
url: { type: 'string', description: 'URL to fetch.' },
mode: {
type: 'string',
enum: ['markdown', 'text', 'raw'],
description: 'Output mode (default: text).',
},
mode: { type: 'string', enum: ['markdown', 'text', 'raw'], description: 'Output mode (default: text).' },
};
readonly required = ['url'];
@@ -104,14 +96,8 @@ export class WebFetchTool implements Tool {
const contentType = res.headers.get('content-type') ?? '';
const body = await res.text();
if (
mode === 'raw' ||
(!contentType.includes('text/html') && !body.trimStart().startsWith('<'))
) {
const truncated =
body.length > MAX_CONTENT_CHARS
? body.slice(0, MAX_CONTENT_CHARS) + '\n... (truncated)'
: body;
if (mode === 'raw' || (!contentType.includes('text/html') && !body.trimStart().startsWith('<'))) {
const truncated = body.length > MAX_CONTENT_CHARS ? body.slice(0, MAX_CONTENT_CHARS) + '\n... (truncated)' : body;
return truncated;
}
@@ -128,10 +114,9 @@ export class WebFetchTool implements Tool {
const title = article?.title ?? '';
const textContent = article?.textContent ?? stripTags(body);
const trimmed = textContent.replace(/\n{3,}/g, '\n\n').trim();
const truncated =
trimmed.length > MAX_CONTENT_CHARS
? trimmed.slice(0, MAX_CONTENT_CHARS) + '\n... (truncated)'
: trimmed;
const truncated = trimmed.length > MAX_CONTENT_CHARS
? trimmed.slice(0, MAX_CONTENT_CHARS) + '\n... (truncated)'
: trimmed;
return title ? `# ${title}\n\n${truncated}` : truncated;
} catch (err) {
@@ -151,10 +136,7 @@ function fetchWithTimeout(url: string, init: RequestInit = {}): Promise<Response
}
function stripTags(html: string): string {
return html
.replace(/<[^>]*>/g, ' ')
.replace(/\s+/g, ' ')
.trim();
return html.replace(/<[^>]*>/g, ' ').replace(/\s+/g, ' ').trim();
}
/** Build a minimal pseudo-document that satisfies Readability's interface. */
@@ -184,9 +166,7 @@ function makePseudoDocument(
createTreeWalker: () => ({ nextNode: () => null }),
createRange: () => ({ selectNodeContents: () => {}, cloneContents: () => null }),
// biome-ignore lint/suspicious/noExplicitAny: Readability duck-typing
get innerHTML() {
return html;
},
get innerHTML() { return html; },
location: { href: url },
};

View File

@@ -60,7 +60,7 @@ export class ChannelManager {
}
const content = msg.content ?? '';
const chatId = (msg.metadata?.['channel_id'] as string | undefined) ?? msg.chatId;
const chatId = msg.metadata?.['channel_id'] as string | undefined ?? msg.chatId;
const rootId = msg.metadata?.['root_id'] as string | undefined;
try {

View File

@@ -165,11 +165,7 @@ export class MattermostChannel extends BaseChannel {
} else {
// Group channel
if (!this._shouldRespondInGroup(post.message, this._cfg.groupPolicy)) return;
if (
this._cfg.groupPolicy === 'allowlist' &&
!this.isAllowed(post.user_id, this._cfg.groupAllowFrom)
)
return;
if (this._cfg.groupPolicy === 'allowlist' && !this.isAllowed(post.user_id, this._cfg.groupAllowFrom)) return;
if (!this.isAllowed(post.user_id, this._cfg.allowFrom)) return;
}
@@ -230,7 +226,7 @@ export class MattermostChannel extends BaseChannel {
const res = await fetch(`${this._baseUrl}${path}`, {
method,
headers: {
Authorization: `Bearer ${this._cfg.token}`,
'Authorization': `Bearer ${this._cfg.token}`,
'Content-Type': 'application/json',
},
body: body !== undefined ? JSON.stringify(body) : undefined,

View File

@@ -1,92 +0,0 @@
import { mkdirSync } from 'node:fs';
import { createInterface } from 'node:readline';
import { Command } from 'commander';
import pc from 'picocolors';
import { AgentLoop } from '../agent/loop.ts';
import { MessageBus } from '../bus/queue.ts';
import type { Config } from '../config/types.ts';
import { makeProvider } from '../provider/index.ts';
export function agentCommand(program: Command, config: Config, workspace: string): void {
mkdirSync(workspace, { recursive: true });
program
.command('agent')
.description('Run the agent interactively or send a single message.')
.option('-c, --config <path>', 'Path to config.json')
.option('-m, --message <text>', 'Single message to process (non-interactive)')
.option('-w, --workspace <path>', 'Workspace path override')
.option('-M, --model <model>', 'Model override')
.action(
async (opts: { config?: string; message?: string; workspace?: string; model?: string }) => {
const model = opts.model ?? config.agent.model;
const provider = makeProvider(
config.providers,
model,
config.agent.maxTokens,
config.agent.temperature,
);
const bus = new MessageBus();
const agentLoop = new AgentLoop({
bus,
provider,
workspace,
model,
maxIterations: config.agent.maxToolIterations,
contextWindowTokens: config.agent.contextWindowTokens,
braveApiKey: config.tools.web.braveApiKey,
webProxy: config.tools.web.proxy,
execConfig: config.tools.exec,
restrictToWorkspace: config.tools.restrictToWorkspace,
});
// Single-shot mode
if (opts.message) {
const result = await agentLoop.processDirect(opts.message);
console.log(result);
return;
}
// Interactive mode
console.info(pc.green('nanobot interactive mode. Type your message, Ctrl+C to exit.'));
const rl = createInterface({ input: process.stdin, output: process.stdout });
const promptUser = () => {
rl.question(pc.cyan('You: '), async (input) => {
const text = input.trim();
if (!text) {
promptUser();
return;
}
const onProgress = async (content: string, opts?: { toolHint?: boolean }) => {
if (opts?.toolHint) {
process.stdout.write(pc.dim(` [${content}]\n`));
} else {
process.stdout.write(pc.dim(` ${content}\n`));
}
};
const result = await agentLoop.processDirect(
text,
'cli:interactive',
'cli',
'interactive',
onProgress,
);
console.log(pc.bold('Bot:'), result);
promptUser();
});
};
rl.on('close', () => {
agentLoop.stop();
process.exit(0);
});
promptUser();
},
);
}

View File

@@ -1,21 +1,178 @@
import { mkdirSync } from 'node:fs';
import { createInterface } from 'node:readline';
import { Command } from 'commander';
import pc from 'picocolors';
import { AgentLoop } from '../agent/loop.ts';
import { MessageBus } from '../bus/queue.ts';
import { MattermostChannel } from '../channels/mattermost.ts';
import { ChannelManager } from '../channels/manager.ts';
import { loadConfig, resolveWorkspacePath } from '../config/loader.ts';
import { agentCommand } from './agent.ts';
import { gatewayCommand } from './gateway.ts';
import { CronService } from '../cron/service.ts';
import { HeartbeatService } from '../heartbeat/service.ts';
import { makeProvider } from '../provider/index.ts';
export function createCli(): Command {
const program = new Command('nanobot')
.description('nanobot — personal AI assistant')
.version('1.0.0');
const program = new Command('nanobot').description('nanobot — personal AI assistant').version('1.0.0');
const globalOpts = program.opts();
const config = loadConfig(globalOpts.config);
const workspace = resolveWorkspacePath(config.agent.workspacePath);
mkdirSync(workspace, { recursive: true });
// ---------------------------------------------------------------------------
// gateway — full runtime: Mattermost + cron + heartbeat
// ---------------------------------------------------------------------------
program
.command('gateway')
.description('Start the full gateway: Mattermost channel, agent loop, cron, and heartbeat.')
.option('-c, --config <path>', 'Path to config.json')
.action(async (opts: { config?: string }) => {
const config = loadConfig(opts.config);
const workspace = resolveWorkspacePath(config.agent.workspacePath);
mkdirSync(workspace, { recursive: true });
gatewayCommand(program, config, workspace);
agentCommand(program, config, workspace);
const provider = makeProvider(config.providers, config.agent.model, config.agent.maxTokens, config.agent.temperature);
const bus = new MessageBus();
const channelManager = new ChannelManager(bus);
// Cron service
const cronService = new CronService(workspace, async (job) => {
bus.publishInbound({
channel: 'system',
senderId: 'cron',
chatId: `cli:cron_${job.id}`,
content: job.payload.message || `Cron job "${job.name}" triggered.`,
metadata: { cronJobId: job.id },
});
});
const agentLoop = new AgentLoop({
bus,
provider,
workspace,
model: config.agent.model,
maxIterations: config.agent.maxToolIterations,
contextWindowTokens: config.agent.contextWindowTokens,
braveApiKey: config.tools.web.braveApiKey,
webProxy: config.tools.web.proxy,
execConfig: config.tools.exec,
cronService,
restrictToWorkspace: config.tools.restrictToWorkspace,
sendProgress: config.channels.sendProgress,
sendToolHints: config.channels.sendToolHints,
});
// Mattermost
if (config.channels.mattermost) {
const mm = new MattermostChannel(bus, config.channels.mattermost);
channelManager.register(mm);
} else {
console.warn(pc.yellow('[gateway] No Mattermost config found. Running without channels.'));
}
// Heartbeat
let heartbeat: HeartbeatService | null = null;
if (config.heartbeat.enabled) {
heartbeat = new HeartbeatService({
workspace,
provider,
model: config.agent.model,
intervalMinutes: config.heartbeat.intervalMinutes,
onExecute: async (tasks) => {
const content = tasks.length > 0 ? `Heartbeat tasks:\n${tasks.map((t, i) => `${i + 1}. ${t}`).join('\n')}` : 'Heartbeat tick — check for anything to do.';
return agentLoop.processDirect(content, 'system:heartbeat', 'system', 'heartbeat');
},
onNotify: async (_result) => {
// Result already delivered via processDirect / message tool
},
});
}
// Graceful shutdown
const shutdown = () => {
console.info('\n[gateway] Shutting down...');
agentLoop.stop();
channelManager.stopAll();
heartbeat?.stop();
cronService.stop();
process.exit(0);
};
process.on('SIGINT', shutdown);
process.on('SIGTERM', shutdown);
console.info(pc.green('[gateway] Starting...'));
cronService.start();
heartbeat?.start();
await Promise.all([agentLoop.run(), channelManager.startAll()]);
});
// ---------------------------------------------------------------------------
// agent — interactive CLI or single-shot mode
// ---------------------------------------------------------------------------
program
.command('agent')
.description('Run the agent interactively or send a single message.')
.option('-c, --config <path>', 'Path to config.json')
.option('-m, --message <text>', 'Single message to process (non-interactive)')
.option('-w, --workspace <path>', 'Workspace path override')
.option('-M, --model <model>', 'Model override')
.action(async (opts: { config?: string; message?: string; workspace?: string; model?: string }) => {
const config = loadConfig(opts.config);
const workspaceRaw = opts.workspace ?? config.agent.workspacePath;
const workspace = resolveWorkspacePath(workspaceRaw);
mkdirSync(workspace, { recursive: true });
const model = opts.model ?? config.agent.model;
const provider = makeProvider(config.providers, model, config.agent.maxTokens, config.agent.temperature);
const bus = new MessageBus();
const agentLoop = new AgentLoop({
bus,
provider,
workspace,
model,
maxIterations: config.agent.maxToolIterations,
contextWindowTokens: config.agent.contextWindowTokens,
braveApiKey: config.tools.web.braveApiKey,
webProxy: config.tools.web.proxy,
execConfig: config.tools.exec,
restrictToWorkspace: config.tools.restrictToWorkspace,
});
// Single-shot mode
if (opts.message) {
const result = await agentLoop.processDirect(opts.message);
console.log(result);
return;
}
// Interactive mode
console.info(pc.green('nanobot interactive mode. Type your message, Ctrl+C to exit.'));
const rl = createInterface({ input: process.stdin, output: process.stdout });
const promptUser = () => {
rl.question(pc.cyan('You: '), async (input) => {
const text = input.trim();
if (!text) { promptUser(); return; }
const onProgress = async (content: string, opts?: { toolHint?: boolean }) => {
if (opts?.toolHint) {
process.stdout.write(pc.dim(` [${content}]\n`));
} else {
process.stdout.write(pc.dim(` ${content}\n`));
}
};
const result = await agentLoop.processDirect(text, 'cli:interactive', 'cli', 'interactive', onProgress);
console.log(pc.bold('Bot:'), result);
promptUser();
});
};
rl.on('close', () => {
agentLoop.stop();
process.exit(0);
});
promptUser();
});
return program;
}

View File

@@ -1,104 +0,0 @@
import { mkdirSync } from 'node:fs';
import { Command } from 'commander';
import pc from 'picocolors';
import { AgentLoop } from '../agent/loop.ts';
import { MessageBus } from '../bus/queue.ts';
import { MattermostChannel } from '../channels/mattermost.ts';
import { ChannelManager } from '../channels/manager.ts';
import type { Config } from '../config/types.ts';
import { CronService } from '../cron/service.ts';
import { HeartbeatService } from '../heartbeat/service.ts';
import { makeProvider } from '../provider/index.ts';
export function gatewayCommand(program: Command, config: Config, workspace: string): void {
mkdirSync(workspace, { recursive: true });
program
.command('gateway')
.description('Start the full gateway: Mattermost channel, agent loop, cron, and heartbeat.')
.option('-c, --config <path>', 'Path to config.json')
.action(async (_opts: { config?: string }) => {
const provider = makeProvider(
config.providers,
config.agent.model,
config.agent.maxTokens,
config.agent.temperature,
);
const bus = new MessageBus();
const channelManager = new ChannelManager(bus);
// Cron service
const cronService = new CronService(workspace, async (job) => {
bus.publishInbound({
channel: 'system',
senderId: 'cron',
chatId: `cli:cron_${job.id}`,
content: job.payload.message || `Cron job "${job.name}" triggered.`,
metadata: { cronJobId: job.id },
});
});
const agentLoop = new AgentLoop({
bus,
provider,
workspace,
model: config.agent.model,
maxIterations: config.agent.maxToolIterations,
contextWindowTokens: config.agent.contextWindowTokens,
braveApiKey: config.tools.web.braveApiKey,
webProxy: config.tools.web.proxy,
execConfig: config.tools.exec,
cronService,
restrictToWorkspace: config.tools.restrictToWorkspace,
sendProgress: config.channels.sendProgress,
sendToolHints: config.channels.sendToolHints,
});
// Mattermost
if (config.channels.mattermost) {
const mm = new MattermostChannel(bus, config.channels.mattermost);
channelManager.register(mm);
} else {
console.warn(pc.yellow('[gateway] No Mattermost config found. Running without channels.'));
}
// Heartbeat
let heartbeat: HeartbeatService | null = null;
if (config.heartbeat.enabled) {
heartbeat = new HeartbeatService({
workspace,
provider,
model: config.agent.model,
intervalMinutes: config.heartbeat.intervalMinutes,
onExecute: async (tasks) => {
const content =
tasks.length > 0
? `Heartbeat tasks:\n${tasks.map((t, i) => `${i + 1}. ${t}`).join('\n')}`
: 'Heartbeat tick — check for anything to do.';
return agentLoop.processDirect(content, 'system:heartbeat', 'system', 'heartbeat');
},
onNotify: async (_result) => {
// Result already delivered via processDirect / message tool
},
});
}
// Graceful shutdown
const shutdown = () => {
console.info('\n[gateway] Shutting down...');
agentLoop.stop();
channelManager.stopAll();
heartbeat?.stop();
cronService.stop();
process.exit(0);
};
process.on('SIGINT', shutdown);
process.on('SIGTERM', shutdown);
console.info(pc.green('[gateway] Starting...'));
cronService.start();
heartbeat?.start();
await Promise.all([agentLoop.run(), channelManager.startAll()]);
});
}

View File

@@ -1,4 +0,0 @@
import type { Command } from 'commander';
import type { Config } from '../config/types.ts';
export type CommandHandler = (program: Command, config: Config, workspace: string) => void;

View File

@@ -87,11 +87,7 @@ export const WebToolConfigSchema = z.object({
export type WebToolConfig = z.infer<typeof WebToolConfigSchema>;
export const ToolsConfigSchema = z.object({
exec: ExecToolConfigSchema.default(() => ({
timeout: 120,
denyPatterns: [],
restrictToWorkspace: false,
})),
exec: ExecToolConfigSchema.default(() => ({ timeout: 120, denyPatterns: [], restrictToWorkspace: false })),
web: WebToolConfigSchema.default(() => ({})),
restrictToWorkspace: z.boolean().default(false),
});

View File

@@ -125,11 +125,7 @@ export class CronService {
if (delayMs === null) return;
const nextRunAtMs = Date.now() + delayMs;
const updated: CronJob = {
...job,
state: { ...job.state, nextRunAtMs },
updatedAtMs: Date.now(),
};
const updated: CronJob = { ...job, state: { ...job.state, nextRunAtMs }, updatedAtMs: Date.now() };
this._jobs.set(job.id, updated);
this._save();
@@ -163,12 +159,7 @@ export class CronService {
} catch (err) {
const updated: CronJob = {
...job,
state: {
...job.state,
lastRunAtMs: Date.now(),
lastStatus: 'error',
lastError: String(err),
},
state: { ...job.state, lastRunAtMs: Date.now(), lastStatus: 'error', lastError: String(err) },
updatedAtMs: Date.now(),
};
this._jobs.set(job.id, updated);

View File

@@ -29,17 +29,8 @@ export const CronJobSchema = z.object({
name: z.string(),
enabled: z.boolean().default(true),
schedule: CronScheduleSchema,
payload: CronPayloadSchema.default(() => ({
kind: 'agent_turn' as const,
message: '',
deliver: false,
})),
state: CronJobStateSchema.default(() => ({
nextRunAtMs: null,
lastRunAtMs: null,
lastStatus: null,
lastError: null,
})),
payload: CronPayloadSchema.default(() => ({ kind: 'agent_turn' as const, message: '', deliver: false })),
state: CronJobStateSchema.default(() => ({ nextRunAtMs: null, lastRunAtMs: null, lastStatus: null, lastError: null })),
createdAtMs: z.number().int().default(0),
updatedAtMs: z.number().int().default(0),
deleteAfterRun: z.boolean().default(false),

View File

@@ -117,11 +117,9 @@ export class HeartbeatService {
return;
}
const action =
typeof decision.arguments['action'] === 'string' ? decision.arguments['action'] : 'skip';
const action = typeof decision.arguments['action'] === 'string' ? decision.arguments['action'] : 'skip';
if (action !== 'run') {
const reason =
typeof decision.arguments['reason'] === 'string' ? decision.arguments['reason'] : '';
const reason = typeof decision.arguments['reason'] === 'string' ? decision.arguments['reason'] : '';
console.debug(`[heartbeat] Decision: skip (${reason})`);
return;
}

View File

@@ -70,12 +70,7 @@ export class LLMProvider {
private _maxTokens: number;
private _temperature: number;
constructor(
providers: ProvidersConfig,
defaultModel: string,
maxTokens = 4096,
temperature = 0.7,
) {
constructor(providers: ProvidersConfig, defaultModel: string, maxTokens = 4096, temperature = 0.7) {
this._providers = providers;
this._defaultModel = defaultModel;
this._maxTokens = maxTokens;
@@ -111,9 +106,7 @@ export class LLMProvider {
case 'ollama': {
const cfg = this._providers.ollama;
// ollama-ai-provider returns LanguageModelV1; cast to LanguageModel (compatible at runtime)
return createOllama({ baseURL: cfg?.apiBase ?? 'http://localhost:11434/api' })(
remainder,
) as unknown as LanguageModel;
return createOllama({ baseURL: cfg?.apiBase ?? 'http://localhost:11434/api' })(remainder) as unknown as LanguageModel;
}
default: {
// No recognized prefix — fall through to openai-compatible
@@ -123,9 +116,7 @@ export class LLMProvider {
}
}
async chat(
opts: ChatOptions,
): Promise<{ response: LLMResponse; responseMessages: ModelMessage[] }> {
async chat(opts: ChatOptions): Promise<{ response: LLMResponse; responseMessages: ModelMessage[] }> {
const model = this._resolveModel(opts.model ?? this._defaultModel);
const maxTokens = opts.maxTokens ?? this._maxTokens;
const temperature = opts.temperature ?? this._temperature;
@@ -151,12 +142,7 @@ export class LLMProvider {
messages: opts.messages as ModelMessage[],
// biome-ignore lint/suspicious/noExplicitAny: AI SDK tools type is complex
tools: aiTools as any,
toolChoice:
opts.toolChoice === 'required'
? 'required'
: opts.toolChoice === 'none'
? 'none'
: 'auto',
toolChoice: opts.toolChoice === 'required' ? 'required' : opts.toolChoice === 'none' ? 'none' : 'auto',
maxOutputTokens: maxTokens,
temperature,
stopWhen: stepCountIs(1),
@@ -196,9 +182,7 @@ export class LLMProvider {
}
}
async chatWithRetry(
opts: ChatOptions,
): Promise<{ response: LLMResponse; responseMessages: ModelMessage[] }> {
async chatWithRetry(opts: ChatOptions): Promise<{ response: LLMResponse; responseMessages: ModelMessage[] }> {
for (const delay of RETRY_DELAYS_MS) {
const result = await this.chat(opts);
if (result.response.finishReason !== 'error') return result;
@@ -223,11 +207,7 @@ export function makeProvider(
}
/** Build a tool-result message to append after executing a tool call. */
export function toolResultMessage(
toolCallId: string,
toolName: string,
result: string,
): ModelMessage {
export function toolResultMessage(toolCallId: string, toolName: string, result: string): ModelMessage {
return {
role: 'tool',
content: [

View File

@@ -126,7 +126,10 @@ export class SessionManager {
save(session: Session): void {
session.updatedAt = new Date().toISOString();
const lines = [JSON.stringify(session.meta), ...session.messages.map((m) => JSON.stringify(m))];
const lines = [
JSON.stringify(session.meta),
...session.messages.map((m) => JSON.stringify(m)),
];
writeFileSync(this._filePath(session.key), lines.join('\n') + '\n', 'utf8');
}