mirror of
https://github.com/openclaw/openclaw.git
synced 2026-02-16 15:19:33 +00:00
refactor(src): split oversized modules
This commit is contained in:
@@ -119,7 +119,8 @@
|
||||
"protocol:gen": "tsx scripts/protocol-gen.ts",
|
||||
"protocol:gen:swift": "tsx scripts/protocol-gen-swift.ts",
|
||||
"protocol:check": "pnpm protocol:gen && pnpm protocol:gen:swift && git diff --exit-code -- dist/protocol.schema.json apps/macos/Sources/ClawdbotProtocol/GatewayModels.swift",
|
||||
"canvas:a2ui:bundle": "bash scripts/bundle-a2ui.sh"
|
||||
"canvas:a2ui:bundle": "bash scripts/bundle-a2ui.sh",
|
||||
"check:loc": "tsx scripts/check-ts-max-loc.ts --max 500"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": "",
|
||||
|
||||
74
scripts/check-ts-max-loc.ts
Normal file
74
scripts/check-ts-max-loc.ts
Normal file
@@ -0,0 +1,74 @@
|
||||
import { existsSync } from "node:fs";
|
||||
import { readFile } from "node:fs/promises";
|
||||
import { execFileSync } from "node:child_process";
|
||||
|
||||
type ParsedArgs = {
|
||||
maxLines: number;
|
||||
};
|
||||
|
||||
function parseArgs(argv: string[]): ParsedArgs {
|
||||
let maxLines = 500;
|
||||
|
||||
for (let index = 0; index < argv.length; index++) {
|
||||
const arg = argv[index];
|
||||
if (arg === "--max") {
|
||||
const next = argv[index + 1];
|
||||
if (!next || Number.isNaN(Number(next))) throw new Error("Missing/invalid --max value");
|
||||
maxLines = Number(next);
|
||||
index++;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
return { maxLines };
|
||||
}
|
||||
|
||||
function gitLsFilesAll(): string[] {
|
||||
// Include untracked files too so local refactors don’t “pass” by accident.
|
||||
const stdout = execFileSync("git", ["ls-files", "--cached", "--others", "--exclude-standard"], {
|
||||
encoding: "utf8",
|
||||
});
|
||||
return stdout
|
||||
.split("\n")
|
||||
.map((line) => line.trim())
|
||||
.filter(Boolean);
|
||||
}
|
||||
|
||||
async function countLines(filePath: string): Promise<number> {
|
||||
const content = await readFile(filePath, "utf8");
|
||||
// Count physical lines. Keeps the rule simple + predictable.
|
||||
return content.split("\n").length;
|
||||
}
|
||||
|
||||
async function main() {
|
||||
// Makes `... | head` safe.
|
||||
process.stdout.on("error", (error: NodeJS.ErrnoException) => {
|
||||
if (error.code === "EPIPE") process.exit(0);
|
||||
throw error;
|
||||
});
|
||||
|
||||
const { maxLines } = parseArgs(process.argv.slice(2));
|
||||
const files = gitLsFilesAll()
|
||||
.filter((filePath) => existsSync(filePath))
|
||||
.filter((filePath) => filePath.endsWith(".ts") || filePath.endsWith(".tsx"));
|
||||
|
||||
const results = await Promise.all(
|
||||
files.map(async (filePath) => ({ filePath, lines: await countLines(filePath) })),
|
||||
);
|
||||
|
||||
const offenders = results
|
||||
.filter((result) => result.lines > maxLines)
|
||||
.sort((a, b) => b.lines - a.lines);
|
||||
|
||||
if (!offenders.length) return;
|
||||
|
||||
// Minimal, grep-friendly output.
|
||||
for (const offender of offenders) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(`${offender.lines}\t${offender.filePath}`);
|
||||
}
|
||||
|
||||
process.exitCode = 1;
|
||||
}
|
||||
|
||||
await main();
|
||||
BIN
src/.DS_Store
vendored
Normal file
BIN
src/.DS_Store
vendored
Normal file
Binary file not shown.
BIN
src/agents/.DS_Store
vendored
Normal file
BIN
src/agents/.DS_Store
vendored
Normal file
Binary file not shown.
212
src/agents/apply-patch-update.ts
Normal file
212
src/agents/apply-patch-update.ts
Normal file
@@ -0,0 +1,212 @@
|
||||
import fs from "node:fs/promises";
|
||||
|
||||
type UpdateFileChunk = {
|
||||
changeContext?: string;
|
||||
oldLines: string[];
|
||||
newLines: string[];
|
||||
isEndOfFile: boolean;
|
||||
};
|
||||
|
||||
export async function applyUpdateHunk(
|
||||
filePath: string,
|
||||
chunks: UpdateFileChunk[],
|
||||
): Promise<string> {
|
||||
const originalContents = await fs.readFile(filePath, "utf8").catch((err) => {
|
||||
throw new Error(`Failed to read file to update ${filePath}: ${err}`);
|
||||
});
|
||||
|
||||
const originalLines = originalContents.split("\n");
|
||||
if (
|
||||
originalLines.length > 0 &&
|
||||
originalLines[originalLines.length - 1] === ""
|
||||
) {
|
||||
originalLines.pop();
|
||||
}
|
||||
|
||||
const replacements = computeReplacements(originalLines, filePath, chunks);
|
||||
let newLines = applyReplacements(originalLines, replacements);
|
||||
if (newLines.length === 0 || newLines[newLines.length - 1] !== "") {
|
||||
newLines = [...newLines, ""];
|
||||
}
|
||||
return newLines.join("\n");
|
||||
}
|
||||
|
||||
function computeReplacements(
|
||||
originalLines: string[],
|
||||
filePath: string,
|
||||
chunks: UpdateFileChunk[],
|
||||
): Array<[number, number, string[]]> {
|
||||
const replacements: Array<[number, number, string[]]> = [];
|
||||
let lineIndex = 0;
|
||||
|
||||
for (const chunk of chunks) {
|
||||
if (chunk.changeContext) {
|
||||
const ctxIndex = seekSequence(
|
||||
originalLines,
|
||||
[chunk.changeContext],
|
||||
lineIndex,
|
||||
false,
|
||||
);
|
||||
if (ctxIndex === null) {
|
||||
throw new Error(
|
||||
`Failed to find context '${chunk.changeContext}' in ${filePath}`,
|
||||
);
|
||||
}
|
||||
lineIndex = ctxIndex + 1;
|
||||
}
|
||||
|
||||
if (chunk.oldLines.length === 0) {
|
||||
const insertionIndex =
|
||||
originalLines.length > 0 &&
|
||||
originalLines[originalLines.length - 1] === ""
|
||||
? originalLines.length - 1
|
||||
: originalLines.length;
|
||||
replacements.push([insertionIndex, 0, chunk.newLines]);
|
||||
continue;
|
||||
}
|
||||
|
||||
let pattern = chunk.oldLines;
|
||||
let newSlice = chunk.newLines;
|
||||
let found = seekSequence(
|
||||
originalLines,
|
||||
pattern,
|
||||
lineIndex,
|
||||
chunk.isEndOfFile,
|
||||
);
|
||||
|
||||
if (found === null && pattern[pattern.length - 1] === "") {
|
||||
pattern = pattern.slice(0, -1);
|
||||
if (newSlice.length > 0 && newSlice[newSlice.length - 1] === "") {
|
||||
newSlice = newSlice.slice(0, -1);
|
||||
}
|
||||
found = seekSequence(
|
||||
originalLines,
|
||||
pattern,
|
||||
lineIndex,
|
||||
chunk.isEndOfFile,
|
||||
);
|
||||
}
|
||||
|
||||
if (found === null) {
|
||||
throw new Error(
|
||||
`Failed to find expected lines in ${filePath}:\n${chunk.oldLines.join("\n")}`,
|
||||
);
|
||||
}
|
||||
|
||||
replacements.push([found, pattern.length, newSlice]);
|
||||
lineIndex = found + pattern.length;
|
||||
}
|
||||
|
||||
replacements.sort((a, b) => a[0] - b[0]);
|
||||
return replacements;
|
||||
}
|
||||
|
||||
function applyReplacements(
|
||||
lines: string[],
|
||||
replacements: Array<[number, number, string[]]>,
|
||||
): string[] {
|
||||
const result = [...lines];
|
||||
for (const [startIndex, oldLen, newLines] of [...replacements].reverse()) {
|
||||
for (let i = 0; i < oldLen; i += 1) {
|
||||
if (startIndex < result.length) {
|
||||
result.splice(startIndex, 1);
|
||||
}
|
||||
}
|
||||
for (let i = 0; i < newLines.length; i += 1) {
|
||||
result.splice(startIndex + i, 0, newLines[i]);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
function seekSequence(
|
||||
lines: string[],
|
||||
pattern: string[],
|
||||
start: number,
|
||||
eof: boolean,
|
||||
): number | null {
|
||||
if (pattern.length === 0) return start;
|
||||
if (pattern.length > lines.length) return null;
|
||||
|
||||
const maxStart = lines.length - pattern.length;
|
||||
const searchStart = eof && lines.length >= pattern.length ? maxStart : start;
|
||||
if (searchStart > maxStart) return null;
|
||||
|
||||
for (let i = searchStart; i <= maxStart; i += 1) {
|
||||
if (linesMatch(lines, pattern, i, (value) => value)) return i;
|
||||
}
|
||||
for (let i = searchStart; i <= maxStart; i += 1) {
|
||||
if (linesMatch(lines, pattern, i, (value) => value.trimEnd())) return i;
|
||||
}
|
||||
for (let i = searchStart; i <= maxStart; i += 1) {
|
||||
if (linesMatch(lines, pattern, i, (value) => value.trim())) return i;
|
||||
}
|
||||
for (let i = searchStart; i <= maxStart; i += 1) {
|
||||
if (
|
||||
linesMatch(lines, pattern, i, (value) =>
|
||||
normalizePunctuation(value.trim()),
|
||||
)
|
||||
) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function linesMatch(
|
||||
lines: string[],
|
||||
pattern: string[],
|
||||
start: number,
|
||||
normalize: (value: string) => string,
|
||||
): boolean {
|
||||
for (let idx = 0; idx < pattern.length; idx += 1) {
|
||||
if (normalize(lines[start + idx]) !== normalize(pattern[idx])) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function normalizePunctuation(value: string): string {
|
||||
return Array.from(value)
|
||||
.map((char) => {
|
||||
switch (char) {
|
||||
case "\u2010":
|
||||
case "\u2011":
|
||||
case "\u2012":
|
||||
case "\u2013":
|
||||
case "\u2014":
|
||||
case "\u2015":
|
||||
case "\u2212":
|
||||
return "-";
|
||||
case "\u2018":
|
||||
case "\u2019":
|
||||
case "\u201A":
|
||||
case "\u201B":
|
||||
return "'";
|
||||
case "\u201C":
|
||||
case "\u201D":
|
||||
case "\u201E":
|
||||
case "\u201F":
|
||||
return '"';
|
||||
case "\u00A0":
|
||||
case "\u2002":
|
||||
case "\u2003":
|
||||
case "\u2004":
|
||||
case "\u2005":
|
||||
case "\u2006":
|
||||
case "\u2007":
|
||||
case "\u2008":
|
||||
case "\u2009":
|
||||
case "\u200A":
|
||||
case "\u202F":
|
||||
case "\u205F":
|
||||
case "\u3000":
|
||||
return " ";
|
||||
default:
|
||||
return char;
|
||||
}
|
||||
})
|
||||
.join("");
|
||||
}
|
||||
@@ -3,7 +3,7 @@ import os from "node:os";
|
||||
import path from "node:path";
|
||||
import type { AgentTool } from "@mariozechner/pi-agent-core";
|
||||
import { Type } from "@sinclair/typebox";
|
||||
|
||||
import { applyUpdateHunk } from "./apply-patch-update.js";
|
||||
import { assertSandboxPath } from "./sandbox-paths.js";
|
||||
|
||||
const BEGIN_PATCH_MARKER = "*** Begin Patch";
|
||||
@@ -483,207 +483,3 @@ function parseUpdateFileChunk(
|
||||
|
||||
return { chunk, consumed: parsedLines + startIndex };
|
||||
}
|
||||
|
||||
async function applyUpdateHunk(
|
||||
filePath: string,
|
||||
chunks: UpdateFileChunk[],
|
||||
): Promise<string> {
|
||||
const originalContents = await fs.readFile(filePath, "utf8").catch((err) => {
|
||||
throw new Error(`Failed to read file to update ${filePath}: ${err}`);
|
||||
});
|
||||
|
||||
const originalLines = originalContents.split("\n");
|
||||
if (
|
||||
originalLines.length > 0 &&
|
||||
originalLines[originalLines.length - 1] === ""
|
||||
) {
|
||||
originalLines.pop();
|
||||
}
|
||||
|
||||
const replacements = computeReplacements(originalLines, filePath, chunks);
|
||||
let newLines = applyReplacements(originalLines, replacements);
|
||||
if (newLines.length === 0 || newLines[newLines.length - 1] !== "") {
|
||||
newLines = [...newLines, ""];
|
||||
}
|
||||
return newLines.join("\n");
|
||||
}
|
||||
|
||||
function computeReplacements(
|
||||
originalLines: string[],
|
||||
filePath: string,
|
||||
chunks: UpdateFileChunk[],
|
||||
): Array<[number, number, string[]]> {
|
||||
const replacements: Array<[number, number, string[]]> = [];
|
||||
let lineIndex = 0;
|
||||
|
||||
for (const chunk of chunks) {
|
||||
if (chunk.changeContext) {
|
||||
const ctxIndex = seekSequence(
|
||||
originalLines,
|
||||
[chunk.changeContext],
|
||||
lineIndex,
|
||||
false,
|
||||
);
|
||||
if (ctxIndex === null) {
|
||||
throw new Error(
|
||||
`Failed to find context '${chunk.changeContext}' in ${filePath}`,
|
||||
);
|
||||
}
|
||||
lineIndex = ctxIndex + 1;
|
||||
}
|
||||
|
||||
if (chunk.oldLines.length === 0) {
|
||||
const insertionIndex =
|
||||
originalLines.length > 0 &&
|
||||
originalLines[originalLines.length - 1] === ""
|
||||
? originalLines.length - 1
|
||||
: originalLines.length;
|
||||
replacements.push([insertionIndex, 0, chunk.newLines]);
|
||||
continue;
|
||||
}
|
||||
|
||||
let pattern = chunk.oldLines;
|
||||
let newSlice = chunk.newLines;
|
||||
let found = seekSequence(
|
||||
originalLines,
|
||||
pattern,
|
||||
lineIndex,
|
||||
chunk.isEndOfFile,
|
||||
);
|
||||
|
||||
if (found === null && pattern[pattern.length - 1] === "") {
|
||||
pattern = pattern.slice(0, -1);
|
||||
if (newSlice.length > 0 && newSlice[newSlice.length - 1] === "") {
|
||||
newSlice = newSlice.slice(0, -1);
|
||||
}
|
||||
found = seekSequence(
|
||||
originalLines,
|
||||
pattern,
|
||||
lineIndex,
|
||||
chunk.isEndOfFile,
|
||||
);
|
||||
}
|
||||
|
||||
if (found === null) {
|
||||
throw new Error(
|
||||
`Failed to find expected lines in ${filePath}:\n${chunk.oldLines.join("\n")}`,
|
||||
);
|
||||
}
|
||||
|
||||
replacements.push([found, pattern.length, newSlice]);
|
||||
lineIndex = found + pattern.length;
|
||||
}
|
||||
|
||||
replacements.sort((a, b) => a[0] - b[0]);
|
||||
return replacements;
|
||||
}
|
||||
|
||||
function applyReplacements(
|
||||
lines: string[],
|
||||
replacements: Array<[number, number, string[]]>,
|
||||
): string[] {
|
||||
const result = [...lines];
|
||||
for (const [startIndex, oldLen, newLines] of [...replacements].reverse()) {
|
||||
for (let i = 0; i < oldLen; i += 1) {
|
||||
if (startIndex < result.length) {
|
||||
result.splice(startIndex, 1);
|
||||
}
|
||||
}
|
||||
for (let i = 0; i < newLines.length; i += 1) {
|
||||
result.splice(startIndex + i, 0, newLines[i]);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
function seekSequence(
|
||||
lines: string[],
|
||||
pattern: string[],
|
||||
start: number,
|
||||
eof: boolean,
|
||||
): number | null {
|
||||
if (pattern.length === 0) return start;
|
||||
if (pattern.length > lines.length) return null;
|
||||
|
||||
const maxStart = lines.length - pattern.length;
|
||||
const searchStart = eof && lines.length >= pattern.length ? maxStart : start;
|
||||
if (searchStart > maxStart) return null;
|
||||
|
||||
for (let i = searchStart; i <= maxStart; i += 1) {
|
||||
if (linesMatch(lines, pattern, i, (value) => value)) return i;
|
||||
}
|
||||
for (let i = searchStart; i <= maxStart; i += 1) {
|
||||
if (linesMatch(lines, pattern, i, (value) => value.trimEnd())) return i;
|
||||
}
|
||||
for (let i = searchStart; i <= maxStart; i += 1) {
|
||||
if (linesMatch(lines, pattern, i, (value) => value.trim())) return i;
|
||||
}
|
||||
for (let i = searchStart; i <= maxStart; i += 1) {
|
||||
if (
|
||||
linesMatch(lines, pattern, i, (value) =>
|
||||
normalizePunctuation(value.trim()),
|
||||
)
|
||||
) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function linesMatch(
|
||||
lines: string[],
|
||||
pattern: string[],
|
||||
start: number,
|
||||
normalize: (value: string) => string,
|
||||
): boolean {
|
||||
for (let idx = 0; idx < pattern.length; idx += 1) {
|
||||
if (normalize(lines[start + idx]) !== normalize(pattern[idx])) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function normalizePunctuation(value: string): string {
|
||||
return Array.from(value)
|
||||
.map((char) => {
|
||||
switch (char) {
|
||||
case "\u2010":
|
||||
case "\u2011":
|
||||
case "\u2012":
|
||||
case "\u2013":
|
||||
case "\u2014":
|
||||
case "\u2015":
|
||||
case "\u2212":
|
||||
return "-";
|
||||
case "\u2018":
|
||||
case "\u2019":
|
||||
case "\u201A":
|
||||
case "\u201B":
|
||||
return "'";
|
||||
case "\u201C":
|
||||
case "\u201D":
|
||||
case "\u201E":
|
||||
case "\u201F":
|
||||
return '"';
|
||||
case "\u00A0":
|
||||
case "\u2002":
|
||||
case "\u2003":
|
||||
case "\u2004":
|
||||
case "\u2005":
|
||||
case "\u2006":
|
||||
case "\u2007":
|
||||
case "\u2008":
|
||||
case "\u2009":
|
||||
case "\u200A":
|
||||
case "\u202F":
|
||||
case "\u205F":
|
||||
case "\u3000":
|
||||
return " ";
|
||||
default:
|
||||
return char;
|
||||
}
|
||||
})
|
||||
.join("");
|
||||
}
|
||||
|
||||
12
src/agents/auth-profiles.auth-profile-cooldowns.test.ts
Normal file
12
src/agents/auth-profiles.auth-profile-cooldowns.test.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { calculateAuthProfileCooldownMs } from "./auth-profiles.js";
|
||||
|
||||
describe("auth profile cooldowns", () => {
|
||||
it("applies exponential backoff with a 1h cap", () => {
|
||||
expect(calculateAuthProfileCooldownMs(1)).toBe(60_000);
|
||||
expect(calculateAuthProfileCooldownMs(2)).toBe(5 * 60_000);
|
||||
expect(calculateAuthProfileCooldownMs(3)).toBe(25 * 60_000);
|
||||
expect(calculateAuthProfileCooldownMs(4)).toBe(60 * 60_000);
|
||||
expect(calculateAuthProfileCooldownMs(5)).toBe(60 * 60_000);
|
||||
});
|
||||
});
|
||||
50
src/agents/auth-profiles.ensureauthprofilestore.test.ts
Normal file
50
src/agents/auth-profiles.ensureauthprofilestore.test.ts
Normal file
@@ -0,0 +1,50 @@
|
||||
import fs from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { ensureAuthProfileStore } from "./auth-profiles.js";
|
||||
|
||||
describe("ensureAuthProfileStore", () => {
|
||||
it("migrates legacy auth.json and deletes it (PR #368)", () => {
|
||||
const agentDir = fs.mkdtempSync(
|
||||
path.join(os.tmpdir(), "clawdbot-auth-profiles-"),
|
||||
);
|
||||
try {
|
||||
const legacyPath = path.join(agentDir, "auth.json");
|
||||
fs.writeFileSync(
|
||||
legacyPath,
|
||||
`${JSON.stringify(
|
||||
{
|
||||
anthropic: {
|
||||
type: "oauth",
|
||||
provider: "anthropic",
|
||||
access: "access-token",
|
||||
refresh: "refresh-token",
|
||||
expires: Date.now() + 60_000,
|
||||
},
|
||||
},
|
||||
null,
|
||||
2,
|
||||
)}\n`,
|
||||
"utf8",
|
||||
);
|
||||
|
||||
const store = ensureAuthProfileStore(agentDir);
|
||||
expect(store.profiles["anthropic:default"]).toMatchObject({
|
||||
type: "oauth",
|
||||
provider: "anthropic",
|
||||
});
|
||||
|
||||
const migratedPath = path.join(agentDir, "auth-profiles.json");
|
||||
expect(fs.existsSync(migratedPath)).toBe(true);
|
||||
expect(fs.existsSync(legacyPath)).toBe(false);
|
||||
|
||||
// idempotent
|
||||
const store2 = ensureAuthProfileStore(agentDir);
|
||||
expect(store2.profiles["anthropic:default"]).toBeDefined();
|
||||
expect(fs.existsSync(legacyPath)).toBe(false);
|
||||
} finally {
|
||||
fs.rmSync(agentDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,122 @@
|
||||
import fs from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { withTempHome } from "../../test/helpers/temp-home.js";
|
||||
import {
|
||||
CLAUDE_CLI_PROFILE_ID,
|
||||
ensureAuthProfileStore,
|
||||
} from "./auth-profiles.js";
|
||||
|
||||
describe("external CLI credential sync", () => {
|
||||
it("syncs Claude CLI OAuth credentials into anthropic:claude-cli", async () => {
|
||||
const agentDir = fs.mkdtempSync(
|
||||
path.join(os.tmpdir(), "clawdbot-cli-sync-"),
|
||||
);
|
||||
try {
|
||||
// Create a temp home with Claude CLI credentials
|
||||
await withTempHome(
|
||||
async (tempHome) => {
|
||||
// Create Claude CLI credentials with refreshToken (OAuth)
|
||||
const claudeDir = path.join(tempHome, ".claude");
|
||||
fs.mkdirSync(claudeDir, { recursive: true });
|
||||
const claudeCreds = {
|
||||
claudeAiOauth: {
|
||||
accessToken: "fresh-access-token",
|
||||
refreshToken: "fresh-refresh-token",
|
||||
expiresAt: Date.now() + 60 * 60 * 1000, // 1 hour from now
|
||||
},
|
||||
};
|
||||
fs.writeFileSync(
|
||||
path.join(claudeDir, ".credentials.json"),
|
||||
JSON.stringify(claudeCreds),
|
||||
);
|
||||
|
||||
// Create empty auth-profiles.json
|
||||
const authPath = path.join(agentDir, "auth-profiles.json");
|
||||
fs.writeFileSync(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
version: 1,
|
||||
profiles: {
|
||||
"anthropic:default": {
|
||||
type: "api_key",
|
||||
provider: "anthropic",
|
||||
key: "sk-default",
|
||||
},
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
// Load the store - should sync from CLI as OAuth credential
|
||||
const store = ensureAuthProfileStore(agentDir);
|
||||
|
||||
expect(store.profiles["anthropic:default"]).toBeDefined();
|
||||
expect(
|
||||
(store.profiles["anthropic:default"] as { key: string }).key,
|
||||
).toBe("sk-default");
|
||||
expect(store.profiles[CLAUDE_CLI_PROFILE_ID]).toBeDefined();
|
||||
// Should be stored as OAuth credential (type: "oauth") for auto-refresh
|
||||
const cliProfile = store.profiles[CLAUDE_CLI_PROFILE_ID];
|
||||
expect(cliProfile.type).toBe("oauth");
|
||||
expect((cliProfile as { access: string }).access).toBe(
|
||||
"fresh-access-token",
|
||||
);
|
||||
expect((cliProfile as { refresh: string }).refresh).toBe(
|
||||
"fresh-refresh-token",
|
||||
);
|
||||
expect((cliProfile as { expires: number }).expires).toBeGreaterThan(
|
||||
Date.now(),
|
||||
);
|
||||
},
|
||||
{ prefix: "clawdbot-home-" },
|
||||
);
|
||||
} finally {
|
||||
fs.rmSync(agentDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
it("syncs Claude CLI credentials without refreshToken as token type", async () => {
|
||||
const agentDir = fs.mkdtempSync(
|
||||
path.join(os.tmpdir(), "clawdbot-cli-token-sync-"),
|
||||
);
|
||||
try {
|
||||
await withTempHome(
|
||||
async (tempHome) => {
|
||||
// Create Claude CLI credentials WITHOUT refreshToken (fallback to token type)
|
||||
const claudeDir = path.join(tempHome, ".claude");
|
||||
fs.mkdirSync(claudeDir, { recursive: true });
|
||||
const claudeCreds = {
|
||||
claudeAiOauth: {
|
||||
accessToken: "access-only-token",
|
||||
// No refreshToken - backward compatibility scenario
|
||||
expiresAt: Date.now() + 60 * 60 * 1000,
|
||||
},
|
||||
};
|
||||
fs.writeFileSync(
|
||||
path.join(claudeDir, ".credentials.json"),
|
||||
JSON.stringify(claudeCreds),
|
||||
);
|
||||
|
||||
const authPath = path.join(agentDir, "auth-profiles.json");
|
||||
fs.writeFileSync(
|
||||
authPath,
|
||||
JSON.stringify({ version: 1, profiles: {} }),
|
||||
);
|
||||
|
||||
const store = ensureAuthProfileStore(agentDir);
|
||||
|
||||
expect(store.profiles[CLAUDE_CLI_PROFILE_ID]).toBeDefined();
|
||||
// Should be stored as token type (no refresh capability)
|
||||
const cliProfile = store.profiles[CLAUDE_CLI_PROFILE_ID];
|
||||
expect(cliProfile.type).toBe("token");
|
||||
expect((cliProfile as { token: string }).token).toBe(
|
||||
"access-only-token",
|
||||
);
|
||||
},
|
||||
{ prefix: "clawdbot-home-" },
|
||||
);
|
||||
} finally {
|
||||
fs.rmSync(agentDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,111 @@
|
||||
import fs from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { withTempHome } from "../../test/helpers/temp-home.js";
|
||||
import {
|
||||
CLAUDE_CLI_PROFILE_ID,
|
||||
CODEX_CLI_PROFILE_ID,
|
||||
ensureAuthProfileStore,
|
||||
} from "./auth-profiles.js";
|
||||
|
||||
describe("external CLI credential sync", () => {
|
||||
it("upgrades token to oauth when Claude CLI gets refreshToken", async () => {
|
||||
const agentDir = fs.mkdtempSync(
|
||||
path.join(os.tmpdir(), "clawdbot-cli-upgrade-"),
|
||||
);
|
||||
try {
|
||||
await withTempHome(
|
||||
async (tempHome) => {
|
||||
// Create Claude CLI credentials with refreshToken
|
||||
const claudeDir = path.join(tempHome, ".claude");
|
||||
fs.mkdirSync(claudeDir, { recursive: true });
|
||||
fs.writeFileSync(
|
||||
path.join(claudeDir, ".credentials.json"),
|
||||
JSON.stringify({
|
||||
claudeAiOauth: {
|
||||
accessToken: "new-oauth-access",
|
||||
refreshToken: "new-refresh-token",
|
||||
expiresAt: Date.now() + 60 * 60 * 1000,
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
// Create auth-profiles.json with existing token type credential
|
||||
const authPath = path.join(agentDir, "auth-profiles.json");
|
||||
fs.writeFileSync(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
version: 1,
|
||||
profiles: {
|
||||
[CLAUDE_CLI_PROFILE_ID]: {
|
||||
type: "token",
|
||||
provider: "anthropic",
|
||||
token: "old-token",
|
||||
expires: Date.now() + 30 * 60 * 1000,
|
||||
},
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const store = ensureAuthProfileStore(agentDir);
|
||||
|
||||
// Should upgrade from token to oauth
|
||||
const cliProfile = store.profiles[CLAUDE_CLI_PROFILE_ID];
|
||||
expect(cliProfile.type).toBe("oauth");
|
||||
expect((cliProfile as { access: string }).access).toBe(
|
||||
"new-oauth-access",
|
||||
);
|
||||
expect((cliProfile as { refresh: string }).refresh).toBe(
|
||||
"new-refresh-token",
|
||||
);
|
||||
},
|
||||
{ prefix: "clawdbot-home-" },
|
||||
);
|
||||
} finally {
|
||||
fs.rmSync(agentDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
it("syncs Codex CLI credentials into openai-codex:codex-cli", async () => {
|
||||
const agentDir = fs.mkdtempSync(
|
||||
path.join(os.tmpdir(), "clawdbot-codex-sync-"),
|
||||
);
|
||||
try {
|
||||
await withTempHome(
|
||||
async (tempHome) => {
|
||||
// Create Codex CLI credentials
|
||||
const codexDir = path.join(tempHome, ".codex");
|
||||
fs.mkdirSync(codexDir, { recursive: true });
|
||||
const codexCreds = {
|
||||
tokens: {
|
||||
access_token: "codex-access-token",
|
||||
refresh_token: "codex-refresh-token",
|
||||
},
|
||||
};
|
||||
const codexAuthPath = path.join(codexDir, "auth.json");
|
||||
fs.writeFileSync(codexAuthPath, JSON.stringify(codexCreds));
|
||||
|
||||
// Create empty auth-profiles.json
|
||||
const authPath = path.join(agentDir, "auth-profiles.json");
|
||||
fs.writeFileSync(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
version: 1,
|
||||
profiles: {},
|
||||
}),
|
||||
);
|
||||
|
||||
const store = ensureAuthProfileStore(agentDir);
|
||||
|
||||
expect(store.profiles[CODEX_CLI_PROFILE_ID]).toBeDefined();
|
||||
expect(
|
||||
(store.profiles[CODEX_CLI_PROFILE_ID] as { access: string }).access,
|
||||
).toBe("codex-access-token");
|
||||
},
|
||||
{ prefix: "clawdbot-home-" },
|
||||
);
|
||||
} finally {
|
||||
fs.rmSync(agentDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,116 @@
|
||||
import fs from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { withTempHome } from "../../test/helpers/temp-home.js";
|
||||
import {
|
||||
CLAUDE_CLI_PROFILE_ID,
|
||||
ensureAuthProfileStore,
|
||||
} from "./auth-profiles.js";
|
||||
|
||||
describe("external CLI credential sync", () => {
|
||||
it("does not overwrite API keys when syncing external CLI creds", async () => {
|
||||
const agentDir = fs.mkdtempSync(
|
||||
path.join(os.tmpdir(), "clawdbot-no-overwrite-"),
|
||||
);
|
||||
try {
|
||||
await withTempHome(
|
||||
async (tempHome) => {
|
||||
// Create Claude CLI credentials
|
||||
const claudeDir = path.join(tempHome, ".claude");
|
||||
fs.mkdirSync(claudeDir, { recursive: true });
|
||||
const claudeCreds = {
|
||||
claudeAiOauth: {
|
||||
accessToken: "cli-access",
|
||||
refreshToken: "cli-refresh",
|
||||
expiresAt: Date.now() + 30 * 60 * 1000,
|
||||
},
|
||||
};
|
||||
fs.writeFileSync(
|
||||
path.join(claudeDir, ".credentials.json"),
|
||||
JSON.stringify(claudeCreds),
|
||||
);
|
||||
|
||||
// Create auth-profiles.json with an API key
|
||||
const authPath = path.join(agentDir, "auth-profiles.json");
|
||||
fs.writeFileSync(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
version: 1,
|
||||
profiles: {
|
||||
"anthropic:default": {
|
||||
type: "api_key",
|
||||
provider: "anthropic",
|
||||
key: "sk-store",
|
||||
},
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const store = ensureAuthProfileStore(agentDir);
|
||||
|
||||
// Should keep the store's API key and still add the CLI profile.
|
||||
expect(
|
||||
(store.profiles["anthropic:default"] as { key: string }).key,
|
||||
).toBe("sk-store");
|
||||
expect(store.profiles[CLAUDE_CLI_PROFILE_ID]).toBeDefined();
|
||||
},
|
||||
{ prefix: "clawdbot-home-" },
|
||||
);
|
||||
} finally {
|
||||
fs.rmSync(agentDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
it("prefers oauth over token even if token has later expiry (oauth enables auto-refresh)", async () => {
|
||||
const agentDir = fs.mkdtempSync(
|
||||
path.join(os.tmpdir(), "clawdbot-cli-oauth-preferred-"),
|
||||
);
|
||||
try {
|
||||
await withTempHome(
|
||||
async (tempHome) => {
|
||||
const claudeDir = path.join(tempHome, ".claude");
|
||||
fs.mkdirSync(claudeDir, { recursive: true });
|
||||
// CLI has OAuth credentials (with refresh token) expiring in 30 min
|
||||
fs.writeFileSync(
|
||||
path.join(claudeDir, ".credentials.json"),
|
||||
JSON.stringify({
|
||||
claudeAiOauth: {
|
||||
accessToken: "cli-oauth-access",
|
||||
refreshToken: "cli-refresh",
|
||||
expiresAt: Date.now() + 30 * 60 * 1000,
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const authPath = path.join(agentDir, "auth-profiles.json");
|
||||
// Store has token credentials expiring in 60 min (later than CLI)
|
||||
fs.writeFileSync(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
version: 1,
|
||||
profiles: {
|
||||
[CLAUDE_CLI_PROFILE_ID]: {
|
||||
type: "token",
|
||||
provider: "anthropic",
|
||||
token: "store-token-access",
|
||||
expires: Date.now() + 60 * 60 * 1000,
|
||||
},
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const store = ensureAuthProfileStore(agentDir);
|
||||
// OAuth should be preferred over token because it can auto-refresh
|
||||
const cliProfile = store.profiles[CLAUDE_CLI_PROFILE_ID];
|
||||
expect(cliProfile.type).toBe("oauth");
|
||||
expect((cliProfile as { access: string }).access).toBe(
|
||||
"cli-oauth-access",
|
||||
);
|
||||
},
|
||||
{ prefix: "clawdbot-home-" },
|
||||
);
|
||||
} finally {
|
||||
fs.rmSync(agentDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,117 @@
|
||||
import fs from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { withTempHome } from "../../test/helpers/temp-home.js";
|
||||
import {
|
||||
CLAUDE_CLI_PROFILE_ID,
|
||||
ensureAuthProfileStore,
|
||||
} from "./auth-profiles.js";
|
||||
|
||||
describe("external CLI credential sync", () => {
|
||||
it("does not overwrite fresher store oauth with older CLI oauth", async () => {
|
||||
const agentDir = fs.mkdtempSync(
|
||||
path.join(os.tmpdir(), "clawdbot-cli-oauth-no-downgrade-"),
|
||||
);
|
||||
try {
|
||||
await withTempHome(
|
||||
async (tempHome) => {
|
||||
const claudeDir = path.join(tempHome, ".claude");
|
||||
fs.mkdirSync(claudeDir, { recursive: true });
|
||||
// CLI has OAuth credentials expiring in 30 min
|
||||
fs.writeFileSync(
|
||||
path.join(claudeDir, ".credentials.json"),
|
||||
JSON.stringify({
|
||||
claudeAiOauth: {
|
||||
accessToken: "cli-oauth-access",
|
||||
refreshToken: "cli-refresh",
|
||||
expiresAt: Date.now() + 30 * 60 * 1000,
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const authPath = path.join(agentDir, "auth-profiles.json");
|
||||
// Store has OAuth credentials expiring in 60 min (later than CLI)
|
||||
fs.writeFileSync(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
version: 1,
|
||||
profiles: {
|
||||
[CLAUDE_CLI_PROFILE_ID]: {
|
||||
type: "oauth",
|
||||
provider: "anthropic",
|
||||
access: "store-oauth-access",
|
||||
refresh: "store-refresh",
|
||||
expires: Date.now() + 60 * 60 * 1000,
|
||||
},
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const store = ensureAuthProfileStore(agentDir);
|
||||
// Fresher store oauth should be kept
|
||||
const cliProfile = store.profiles[CLAUDE_CLI_PROFILE_ID];
|
||||
expect(cliProfile.type).toBe("oauth");
|
||||
expect((cliProfile as { access: string }).access).toBe(
|
||||
"store-oauth-access",
|
||||
);
|
||||
},
|
||||
{ prefix: "clawdbot-home-" },
|
||||
);
|
||||
} finally {
|
||||
fs.rmSync(agentDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
it("does not downgrade store oauth to token when CLI lacks refresh token", async () => {
|
||||
const agentDir = fs.mkdtempSync(
|
||||
path.join(os.tmpdir(), "clawdbot-cli-no-downgrade-oauth-"),
|
||||
);
|
||||
try {
|
||||
await withTempHome(
|
||||
async (tempHome) => {
|
||||
const claudeDir = path.join(tempHome, ".claude");
|
||||
fs.mkdirSync(claudeDir, { recursive: true });
|
||||
// CLI has token-only credentials (no refresh token)
|
||||
fs.writeFileSync(
|
||||
path.join(claudeDir, ".credentials.json"),
|
||||
JSON.stringify({
|
||||
claudeAiOauth: {
|
||||
accessToken: "cli-token-access",
|
||||
expiresAt: Date.now() + 30 * 60 * 1000,
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const authPath = path.join(agentDir, "auth-profiles.json");
|
||||
// Store already has OAuth credentials with refresh token
|
||||
fs.writeFileSync(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
version: 1,
|
||||
profiles: {
|
||||
[CLAUDE_CLI_PROFILE_ID]: {
|
||||
type: "oauth",
|
||||
provider: "anthropic",
|
||||
access: "store-oauth-access",
|
||||
refresh: "store-refresh",
|
||||
expires: Date.now() + 60 * 60 * 1000,
|
||||
},
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const store = ensureAuthProfileStore(agentDir);
|
||||
// Keep oauth to preserve auto-refresh capability
|
||||
const cliProfile = store.profiles[CLAUDE_CLI_PROFILE_ID];
|
||||
expect(cliProfile.type).toBe("oauth");
|
||||
expect((cliProfile as { access: string }).access).toBe(
|
||||
"store-oauth-access",
|
||||
);
|
||||
},
|
||||
{ prefix: "clawdbot-home-" },
|
||||
);
|
||||
} finally {
|
||||
fs.rmSync(agentDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,62 @@
|
||||
import fs from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { withTempHome } from "../../test/helpers/temp-home.js";
|
||||
import {
|
||||
CODEX_CLI_PROFILE_ID,
|
||||
ensureAuthProfileStore,
|
||||
} from "./auth-profiles.js";
|
||||
|
||||
describe("external CLI credential sync", () => {
|
||||
it("updates codex-cli profile when Codex CLI refresh token changes", async () => {
|
||||
const agentDir = fs.mkdtempSync(
|
||||
path.join(os.tmpdir(), "clawdbot-codex-refresh-sync-"),
|
||||
);
|
||||
try {
|
||||
await withTempHome(
|
||||
async (tempHome) => {
|
||||
const codexDir = path.join(tempHome, ".codex");
|
||||
fs.mkdirSync(codexDir, { recursive: true });
|
||||
const codexAuthPath = path.join(codexDir, "auth.json");
|
||||
fs.writeFileSync(
|
||||
codexAuthPath,
|
||||
JSON.stringify({
|
||||
tokens: {
|
||||
access_token: "same-access",
|
||||
refresh_token: "new-refresh",
|
||||
},
|
||||
}),
|
||||
);
|
||||
fs.utimesSync(codexAuthPath, new Date(), new Date());
|
||||
|
||||
const authPath = path.join(agentDir, "auth-profiles.json");
|
||||
fs.writeFileSync(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
version: 1,
|
||||
profiles: {
|
||||
[CODEX_CLI_PROFILE_ID]: {
|
||||
type: "oauth",
|
||||
provider: "openai-codex",
|
||||
access: "same-access",
|
||||
refresh: "old-refresh",
|
||||
expires: Date.now() - 1000,
|
||||
},
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const store = ensureAuthProfileStore(agentDir);
|
||||
expect(
|
||||
(store.profiles[CODEX_CLI_PROFILE_ID] as { refresh: string })
|
||||
.refresh,
|
||||
).toBe("new-refresh");
|
||||
},
|
||||
{ prefix: "clawdbot-home-" },
|
||||
);
|
||||
} finally {
|
||||
fs.rmSync(agentDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
138
src/agents/auth-profiles.markauthprofilefailure.test.ts
Normal file
138
src/agents/auth-profiles.markauthprofilefailure.test.ts
Normal file
@@ -0,0 +1,138 @@
|
||||
import fs from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
ensureAuthProfileStore,
|
||||
markAuthProfileFailure,
|
||||
} from "./auth-profiles.js";
|
||||
|
||||
describe("markAuthProfileFailure", () => {
|
||||
it("disables billing failures for ~5 hours by default", async () => {
|
||||
const agentDir = fs.mkdtempSync(path.join(os.tmpdir(), "clawdbot-auth-"));
|
||||
try {
|
||||
const authPath = path.join(agentDir, "auth-profiles.json");
|
||||
fs.writeFileSync(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
version: 1,
|
||||
profiles: {
|
||||
"anthropic:default": {
|
||||
type: "api_key",
|
||||
provider: "anthropic",
|
||||
key: "sk-default",
|
||||
},
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const store = ensureAuthProfileStore(agentDir);
|
||||
const startedAt = Date.now();
|
||||
await markAuthProfileFailure({
|
||||
store,
|
||||
profileId: "anthropic:default",
|
||||
reason: "billing",
|
||||
agentDir,
|
||||
});
|
||||
|
||||
const disabledUntil =
|
||||
store.usageStats?.["anthropic:default"]?.disabledUntil;
|
||||
expect(typeof disabledUntil).toBe("number");
|
||||
const remainingMs = (disabledUntil as number) - startedAt;
|
||||
expect(remainingMs).toBeGreaterThan(4.5 * 60 * 60 * 1000);
|
||||
expect(remainingMs).toBeLessThan(5.5 * 60 * 60 * 1000);
|
||||
} finally {
|
||||
fs.rmSync(agentDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
it("honors per-provider billing backoff overrides", async () => {
|
||||
const agentDir = fs.mkdtempSync(path.join(os.tmpdir(), "clawdbot-auth-"));
|
||||
try {
|
||||
const authPath = path.join(agentDir, "auth-profiles.json");
|
||||
fs.writeFileSync(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
version: 1,
|
||||
profiles: {
|
||||
"anthropic:default": {
|
||||
type: "api_key",
|
||||
provider: "anthropic",
|
||||
key: "sk-default",
|
||||
},
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const store = ensureAuthProfileStore(agentDir);
|
||||
const startedAt = Date.now();
|
||||
await markAuthProfileFailure({
|
||||
store,
|
||||
profileId: "anthropic:default",
|
||||
reason: "billing",
|
||||
agentDir,
|
||||
cfg: {
|
||||
auth: {
|
||||
cooldowns: {
|
||||
billingBackoffHoursByProvider: { Anthropic: 1 },
|
||||
billingMaxHours: 2,
|
||||
},
|
||||
},
|
||||
} as never,
|
||||
});
|
||||
|
||||
const disabledUntil =
|
||||
store.usageStats?.["anthropic:default"]?.disabledUntil;
|
||||
expect(typeof disabledUntil).toBe("number");
|
||||
const remainingMs = (disabledUntil as number) - startedAt;
|
||||
expect(remainingMs).toBeGreaterThan(0.8 * 60 * 60 * 1000);
|
||||
expect(remainingMs).toBeLessThan(1.2 * 60 * 60 * 1000);
|
||||
} finally {
|
||||
fs.rmSync(agentDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
it("resets backoff counters outside the failure window", async () => {
|
||||
const agentDir = fs.mkdtempSync(path.join(os.tmpdir(), "clawdbot-auth-"));
|
||||
try {
|
||||
const authPath = path.join(agentDir, "auth-profiles.json");
|
||||
const now = Date.now();
|
||||
fs.writeFileSync(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
version: 1,
|
||||
profiles: {
|
||||
"anthropic:default": {
|
||||
type: "api_key",
|
||||
provider: "anthropic",
|
||||
key: "sk-default",
|
||||
},
|
||||
},
|
||||
usageStats: {
|
||||
"anthropic:default": {
|
||||
errorCount: 9,
|
||||
failureCounts: { billing: 3 },
|
||||
lastFailureAt: now - 48 * 60 * 60 * 1000,
|
||||
},
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const store = ensureAuthProfileStore(agentDir);
|
||||
await markAuthProfileFailure({
|
||||
store,
|
||||
profileId: "anthropic:default",
|
||||
reason: "billing",
|
||||
agentDir,
|
||||
cfg: {
|
||||
auth: { cooldowns: { failureWindowHours: 24 } },
|
||||
} as never,
|
||||
});
|
||||
|
||||
expect(store.usageStats?.["anthropic:default"]?.errorCount).toBe(1);
|
||||
expect(
|
||||
store.usageStats?.["anthropic:default"]?.failureCounts?.billing,
|
||||
).toBe(1);
|
||||
} finally {
|
||||
fs.rmSync(agentDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,169 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { resolveAuthProfileOrder } from "./auth-profiles.js";
|
||||
|
||||
describe("resolveAuthProfileOrder", () => {
|
||||
const store: AuthProfileStore = {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"anthropic:default": {
|
||||
type: "api_key",
|
||||
provider: "anthropic",
|
||||
key: "sk-default",
|
||||
},
|
||||
"anthropic:work": {
|
||||
type: "api_key",
|
||||
provider: "anthropic",
|
||||
key: "sk-work",
|
||||
},
|
||||
},
|
||||
};
|
||||
const cfg = {
|
||||
auth: {
|
||||
profiles: {
|
||||
"anthropic:default": { provider: "anthropic", mode: "api_key" },
|
||||
"anthropic:work": { provider: "anthropic", mode: "api_key" },
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
it("uses stored profiles when no config exists", () => {
|
||||
const order = resolveAuthProfileOrder({
|
||||
store,
|
||||
provider: "anthropic",
|
||||
});
|
||||
expect(order).toEqual(["anthropic:default", "anthropic:work"]);
|
||||
});
|
||||
it("prioritizes preferred profiles", () => {
|
||||
const order = resolveAuthProfileOrder({
|
||||
cfg,
|
||||
store,
|
||||
provider: "anthropic",
|
||||
preferredProfile: "anthropic:work",
|
||||
});
|
||||
expect(order[0]).toBe("anthropic:work");
|
||||
expect(order).toContain("anthropic:default");
|
||||
});
|
||||
it("drops explicit order entries that are missing from the store", () => {
|
||||
const order = resolveAuthProfileOrder({
|
||||
cfg: {
|
||||
auth: {
|
||||
order: {
|
||||
minimax: ["minimax:default", "minimax:prod"],
|
||||
},
|
||||
},
|
||||
},
|
||||
store: {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"minimax:prod": {
|
||||
type: "api_key",
|
||||
provider: "minimax",
|
||||
key: "sk-prod",
|
||||
},
|
||||
},
|
||||
},
|
||||
provider: "minimax",
|
||||
});
|
||||
expect(order).toEqual(["minimax:prod"]);
|
||||
});
|
||||
it("drops explicit order entries that belong to another provider", () => {
|
||||
const order = resolveAuthProfileOrder({
|
||||
cfg: {
|
||||
auth: {
|
||||
order: {
|
||||
minimax: ["openai:default", "minimax:prod"],
|
||||
},
|
||||
},
|
||||
},
|
||||
store: {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"openai:default": {
|
||||
type: "api_key",
|
||||
provider: "openai",
|
||||
key: "sk-openai",
|
||||
},
|
||||
"minimax:prod": {
|
||||
type: "api_key",
|
||||
provider: "minimax",
|
||||
key: "sk-mini",
|
||||
},
|
||||
},
|
||||
},
|
||||
provider: "minimax",
|
||||
});
|
||||
expect(order).toEqual(["minimax:prod"]);
|
||||
});
|
||||
it("drops token profiles with empty credentials", () => {
|
||||
const order = resolveAuthProfileOrder({
|
||||
cfg: {
|
||||
auth: {
|
||||
order: {
|
||||
minimax: ["minimax:default"],
|
||||
},
|
||||
},
|
||||
},
|
||||
store: {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"minimax:default": {
|
||||
type: "token",
|
||||
provider: "minimax",
|
||||
token: " ",
|
||||
},
|
||||
},
|
||||
},
|
||||
provider: "minimax",
|
||||
});
|
||||
expect(order).toEqual([]);
|
||||
});
|
||||
it("drops token profiles that are already expired", () => {
|
||||
const order = resolveAuthProfileOrder({
|
||||
cfg: {
|
||||
auth: {
|
||||
order: {
|
||||
minimax: ["minimax:default"],
|
||||
},
|
||||
},
|
||||
},
|
||||
store: {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"minimax:default": {
|
||||
type: "token",
|
||||
provider: "minimax",
|
||||
token: "sk-minimax",
|
||||
expires: Date.now() - 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
provider: "minimax",
|
||||
});
|
||||
expect(order).toEqual([]);
|
||||
});
|
||||
it("keeps oauth profiles that can refresh", () => {
|
||||
const order = resolveAuthProfileOrder({
|
||||
cfg: {
|
||||
auth: {
|
||||
order: {
|
||||
anthropic: ["anthropic:oauth"],
|
||||
},
|
||||
},
|
||||
},
|
||||
store: {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"anthropic:oauth": {
|
||||
type: "oauth",
|
||||
provider: "anthropic",
|
||||
access: "",
|
||||
refresh: "refresh-token",
|
||||
expires: Date.now() - 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
provider: "anthropic",
|
||||
});
|
||||
expect(order).toEqual(["anthropic:oauth"]);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,157 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { resolveAuthProfileOrder } from "./auth-profiles.js";
|
||||
|
||||
describe("resolveAuthProfileOrder", () => {
|
||||
const store: AuthProfileStore = {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"anthropic:default": {
|
||||
type: "api_key",
|
||||
provider: "anthropic",
|
||||
key: "sk-default",
|
||||
},
|
||||
"anthropic:work": {
|
||||
type: "api_key",
|
||||
provider: "anthropic",
|
||||
key: "sk-work",
|
||||
},
|
||||
},
|
||||
};
|
||||
const cfg = {
|
||||
auth: {
|
||||
profiles: {
|
||||
"anthropic:default": { provider: "anthropic", mode: "api_key" },
|
||||
"anthropic:work": { provider: "anthropic", mode: "api_key" },
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
it("does not prioritize lastGood over round-robin ordering", () => {
|
||||
const order = resolveAuthProfileOrder({
|
||||
cfg,
|
||||
store: {
|
||||
...store,
|
||||
lastGood: { anthropic: "anthropic:work" },
|
||||
usageStats: {
|
||||
"anthropic:default": { lastUsed: 100 },
|
||||
"anthropic:work": { lastUsed: 200 },
|
||||
},
|
||||
},
|
||||
provider: "anthropic",
|
||||
});
|
||||
expect(order[0]).toBe("anthropic:default");
|
||||
});
|
||||
it("uses explicit profiles when order is missing", () => {
|
||||
const order = resolveAuthProfileOrder({
|
||||
cfg,
|
||||
store,
|
||||
provider: "anthropic",
|
||||
});
|
||||
expect(order).toEqual(["anthropic:default", "anthropic:work"]);
|
||||
});
|
||||
it("uses configured order when provided", () => {
|
||||
const order = resolveAuthProfileOrder({
|
||||
cfg: {
|
||||
auth: {
|
||||
order: { anthropic: ["anthropic:work", "anthropic:default"] },
|
||||
profiles: cfg.auth.profiles,
|
||||
},
|
||||
},
|
||||
store,
|
||||
provider: "anthropic",
|
||||
});
|
||||
expect(order).toEqual(["anthropic:work", "anthropic:default"]);
|
||||
});
|
||||
it("prefers store order over config order", () => {
|
||||
const order = resolveAuthProfileOrder({
|
||||
cfg: {
|
||||
auth: {
|
||||
order: { anthropic: ["anthropic:default", "anthropic:work"] },
|
||||
profiles: cfg.auth.profiles,
|
||||
},
|
||||
},
|
||||
store: {
|
||||
...store,
|
||||
order: { anthropic: ["anthropic:work", "anthropic:default"] },
|
||||
},
|
||||
provider: "anthropic",
|
||||
});
|
||||
expect(order).toEqual(["anthropic:work", "anthropic:default"]);
|
||||
});
|
||||
it("pushes cooldown profiles to the end even with store order", () => {
|
||||
const now = Date.now();
|
||||
const order = resolveAuthProfileOrder({
|
||||
store: {
|
||||
...store,
|
||||
order: { anthropic: ["anthropic:default", "anthropic:work"] },
|
||||
usageStats: {
|
||||
"anthropic:default": { cooldownUntil: now + 60_000 },
|
||||
"anthropic:work": { lastUsed: 1 },
|
||||
},
|
||||
},
|
||||
provider: "anthropic",
|
||||
});
|
||||
expect(order).toEqual(["anthropic:work", "anthropic:default"]);
|
||||
});
|
||||
it("pushes cooldown profiles to the end even with configured order", () => {
|
||||
const now = Date.now();
|
||||
const order = resolveAuthProfileOrder({
|
||||
cfg: {
|
||||
auth: {
|
||||
order: { anthropic: ["anthropic:default", "anthropic:work"] },
|
||||
profiles: cfg.auth.profiles,
|
||||
},
|
||||
},
|
||||
store: {
|
||||
...store,
|
||||
usageStats: {
|
||||
"anthropic:default": { cooldownUntil: now + 60_000 },
|
||||
"anthropic:work": { lastUsed: 1 },
|
||||
},
|
||||
},
|
||||
provider: "anthropic",
|
||||
});
|
||||
expect(order).toEqual(["anthropic:work", "anthropic:default"]);
|
||||
});
|
||||
it("pushes disabled profiles to the end even with store order", () => {
|
||||
const now = Date.now();
|
||||
const order = resolveAuthProfileOrder({
|
||||
store: {
|
||||
...store,
|
||||
order: { anthropic: ["anthropic:default", "anthropic:work"] },
|
||||
usageStats: {
|
||||
"anthropic:default": {
|
||||
disabledUntil: now + 60_000,
|
||||
disabledReason: "billing",
|
||||
},
|
||||
"anthropic:work": { lastUsed: 1 },
|
||||
},
|
||||
},
|
||||
provider: "anthropic",
|
||||
});
|
||||
expect(order).toEqual(["anthropic:work", "anthropic:default"]);
|
||||
});
|
||||
it("pushes disabled profiles to the end even with configured order", () => {
|
||||
const now = Date.now();
|
||||
const order = resolveAuthProfileOrder({
|
||||
cfg: {
|
||||
auth: {
|
||||
order: { anthropic: ["anthropic:default", "anthropic:work"] },
|
||||
profiles: cfg.auth.profiles,
|
||||
},
|
||||
},
|
||||
store: {
|
||||
...store,
|
||||
usageStats: {
|
||||
"anthropic:default": {
|
||||
disabledUntil: now + 60_000,
|
||||
disabledReason: "billing",
|
||||
},
|
||||
"anthropic:work": { lastUsed: 1 },
|
||||
},
|
||||
},
|
||||
provider: "anthropic",
|
||||
});
|
||||
expect(order).toEqual(["anthropic:work", "anthropic:default"]);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,142 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { resolveAuthProfileOrder } from "./auth-profiles.js";
|
||||
|
||||
describe("resolveAuthProfileOrder", () => {
|
||||
const _store: AuthProfileStore = {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"anthropic:default": {
|
||||
type: "api_key",
|
||||
provider: "anthropic",
|
||||
key: "sk-default",
|
||||
},
|
||||
"anthropic:work": {
|
||||
type: "api_key",
|
||||
provider: "anthropic",
|
||||
key: "sk-work",
|
||||
},
|
||||
},
|
||||
};
|
||||
const _cfg = {
|
||||
auth: {
|
||||
profiles: {
|
||||
"anthropic:default": { provider: "anthropic", mode: "api_key" },
|
||||
"anthropic:work": { provider: "anthropic", mode: "api_key" },
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
it("normalizes z.ai aliases in auth.order", () => {
|
||||
const order = resolveAuthProfileOrder({
|
||||
cfg: {
|
||||
auth: {
|
||||
order: { "z.ai": ["zai:work", "zai:default"] },
|
||||
profiles: {
|
||||
"zai:default": { provider: "zai", mode: "api_key" },
|
||||
"zai:work": { provider: "zai", mode: "api_key" },
|
||||
},
|
||||
},
|
||||
},
|
||||
store: {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"zai:default": {
|
||||
type: "api_key",
|
||||
provider: "zai",
|
||||
key: "sk-default",
|
||||
},
|
||||
"zai:work": {
|
||||
type: "api_key",
|
||||
provider: "zai",
|
||||
key: "sk-work",
|
||||
},
|
||||
},
|
||||
},
|
||||
provider: "zai",
|
||||
});
|
||||
expect(order).toEqual(["zai:work", "zai:default"]);
|
||||
});
|
||||
it("normalizes provider casing in auth.order keys", () => {
|
||||
const order = resolveAuthProfileOrder({
|
||||
cfg: {
|
||||
auth: {
|
||||
order: { OpenAI: ["openai:work", "openai:default"] },
|
||||
profiles: {
|
||||
"openai:default": { provider: "openai", mode: "api_key" },
|
||||
"openai:work": { provider: "openai", mode: "api_key" },
|
||||
},
|
||||
},
|
||||
},
|
||||
store: {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"openai:default": {
|
||||
type: "api_key",
|
||||
provider: "openai",
|
||||
key: "sk-default",
|
||||
},
|
||||
"openai:work": {
|
||||
type: "api_key",
|
||||
provider: "openai",
|
||||
key: "sk-work",
|
||||
},
|
||||
},
|
||||
},
|
||||
provider: "openai",
|
||||
});
|
||||
expect(order).toEqual(["openai:work", "openai:default"]);
|
||||
});
|
||||
it("normalizes z.ai aliases in auth.profiles", () => {
|
||||
const order = resolveAuthProfileOrder({
|
||||
cfg: {
|
||||
auth: {
|
||||
profiles: {
|
||||
"zai:default": { provider: "z.ai", mode: "api_key" },
|
||||
"zai:work": { provider: "Z.AI", mode: "api_key" },
|
||||
},
|
||||
},
|
||||
},
|
||||
store: {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"zai:default": {
|
||||
type: "api_key",
|
||||
provider: "zai",
|
||||
key: "sk-default",
|
||||
},
|
||||
"zai:work": {
|
||||
type: "api_key",
|
||||
provider: "zai",
|
||||
key: "sk-work",
|
||||
},
|
||||
},
|
||||
},
|
||||
provider: "zai",
|
||||
});
|
||||
expect(order).toEqual(["zai:default", "zai:work"]);
|
||||
});
|
||||
it("prioritizes oauth profiles when order missing", () => {
|
||||
const mixedStore: AuthProfileStore = {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"anthropic:default": {
|
||||
type: "api_key",
|
||||
provider: "anthropic",
|
||||
key: "sk-default",
|
||||
},
|
||||
"anthropic:oauth": {
|
||||
type: "oauth",
|
||||
provider: "anthropic",
|
||||
access: "access-token",
|
||||
refresh: "refresh-token",
|
||||
expires: Date.now() + 60_000,
|
||||
},
|
||||
},
|
||||
};
|
||||
const order = resolveAuthProfileOrder({
|
||||
store: mixedStore,
|
||||
provider: "anthropic",
|
||||
});
|
||||
expect(order).toEqual(["anthropic:oauth", "anthropic:default"]);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,100 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { resolveAuthProfileOrder } from "./auth-profiles.js";
|
||||
|
||||
describe("resolveAuthProfileOrder", () => {
|
||||
const _store: AuthProfileStore = {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"anthropic:default": {
|
||||
type: "api_key",
|
||||
provider: "anthropic",
|
||||
key: "sk-default",
|
||||
},
|
||||
"anthropic:work": {
|
||||
type: "api_key",
|
||||
provider: "anthropic",
|
||||
key: "sk-work",
|
||||
},
|
||||
},
|
||||
};
|
||||
const _cfg = {
|
||||
auth: {
|
||||
profiles: {
|
||||
"anthropic:default": { provider: "anthropic", mode: "api_key" },
|
||||
"anthropic:work": { provider: "anthropic", mode: "api_key" },
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
it("orders by lastUsed when no explicit order exists", () => {
|
||||
const order = resolveAuthProfileOrder({
|
||||
store: {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"anthropic:a": {
|
||||
type: "oauth",
|
||||
provider: "anthropic",
|
||||
access: "access-token",
|
||||
refresh: "refresh-token",
|
||||
expires: Date.now() + 60_000,
|
||||
},
|
||||
"anthropic:b": {
|
||||
type: "api_key",
|
||||
provider: "anthropic",
|
||||
key: "sk-b",
|
||||
},
|
||||
"anthropic:c": {
|
||||
type: "api_key",
|
||||
provider: "anthropic",
|
||||
key: "sk-c",
|
||||
},
|
||||
},
|
||||
usageStats: {
|
||||
"anthropic:a": { lastUsed: 200 },
|
||||
"anthropic:b": { lastUsed: 100 },
|
||||
"anthropic:c": { lastUsed: 300 },
|
||||
},
|
||||
},
|
||||
provider: "anthropic",
|
||||
});
|
||||
expect(order).toEqual(["anthropic:a", "anthropic:b", "anthropic:c"]);
|
||||
});
|
||||
it("pushes cooldown profiles to the end, ordered by cooldown expiry", () => {
|
||||
const now = Date.now();
|
||||
const order = resolveAuthProfileOrder({
|
||||
store: {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"anthropic:ready": {
|
||||
type: "api_key",
|
||||
provider: "anthropic",
|
||||
key: "sk-ready",
|
||||
},
|
||||
"anthropic:cool1": {
|
||||
type: "oauth",
|
||||
provider: "anthropic",
|
||||
access: "access-token",
|
||||
refresh: "refresh-token",
|
||||
expires: now + 60_000,
|
||||
},
|
||||
"anthropic:cool2": {
|
||||
type: "api_key",
|
||||
provider: "anthropic",
|
||||
key: "sk-cool",
|
||||
},
|
||||
},
|
||||
usageStats: {
|
||||
"anthropic:ready": { lastUsed: 50 },
|
||||
"anthropic:cool1": { cooldownUntil: now + 5_000 },
|
||||
"anthropic:cool2": { cooldownUntil: now + 1_000 },
|
||||
},
|
||||
},
|
||||
provider: "anthropic",
|
||||
});
|
||||
expect(order).toEqual([
|
||||
"anthropic:ready",
|
||||
"anthropic:cool2",
|
||||
"anthropic:cool1",
|
||||
]);
|
||||
});
|
||||
});
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
24
src/agents/auth-profiles/constants.ts
Normal file
24
src/agents/auth-profiles/constants.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import { createSubsystemLogger } from "../../logging.js";
|
||||
|
||||
export const AUTH_STORE_VERSION = 1;
|
||||
export const AUTH_PROFILE_FILENAME = "auth-profiles.json";
|
||||
export const LEGACY_AUTH_FILENAME = "auth.json";
|
||||
|
||||
export const CLAUDE_CLI_PROFILE_ID = "anthropic:claude-cli";
|
||||
export const CODEX_CLI_PROFILE_ID = "openai-codex:codex-cli";
|
||||
|
||||
export const AUTH_STORE_LOCK_OPTIONS = {
|
||||
retries: {
|
||||
retries: 10,
|
||||
factor: 2,
|
||||
minTimeout: 100,
|
||||
maxTimeout: 10_000,
|
||||
randomize: true,
|
||||
},
|
||||
stale: 30_000,
|
||||
} as const;
|
||||
|
||||
export const EXTERNAL_CLI_SYNC_TTL_MS = 15 * 60 * 1000;
|
||||
export const EXTERNAL_CLI_NEAR_EXPIRY_MS = 10 * 60 * 1000;
|
||||
|
||||
export const log = createSubsystemLogger("agents/auth-profiles");
|
||||
19
src/agents/auth-profiles/display.ts
Normal file
19
src/agents/auth-profiles/display.ts
Normal file
@@ -0,0 +1,19 @@
|
||||
import type { ClawdbotConfig } from "../../config/config.js";
|
||||
import type { AuthProfileStore } from "./types.js";
|
||||
|
||||
export function resolveAuthProfileDisplayLabel(params: {
|
||||
cfg?: ClawdbotConfig;
|
||||
store: AuthProfileStore;
|
||||
profileId: string;
|
||||
}): string {
|
||||
const { cfg, store, profileId } = params;
|
||||
const profile = store.profiles[profileId];
|
||||
const configEmail = cfg?.auth?.profiles?.[profileId]?.email?.trim();
|
||||
const email =
|
||||
configEmail ||
|
||||
(profile && "email" in profile
|
||||
? (profile.email as string | undefined)?.trim()
|
||||
: undefined);
|
||||
if (email) return `${profileId} (${email})`;
|
||||
return profileId;
|
||||
}
|
||||
44
src/agents/auth-profiles/doctor.ts
Normal file
44
src/agents/auth-profiles/doctor.ts
Normal file
@@ -0,0 +1,44 @@
|
||||
import type { ClawdbotConfig } from "../../config/config.js";
|
||||
import { normalizeProviderId } from "../model-selection.js";
|
||||
import { listProfilesForProvider } from "./profiles.js";
|
||||
import { suggestOAuthProfileIdForLegacyDefault } from "./repair.js";
|
||||
import type { AuthProfileStore } from "./types.js";
|
||||
|
||||
export function formatAuthDoctorHint(params: {
|
||||
cfg?: ClawdbotConfig;
|
||||
store: AuthProfileStore;
|
||||
provider: string;
|
||||
profileId?: string;
|
||||
}): string {
|
||||
const providerKey = normalizeProviderId(params.provider);
|
||||
if (providerKey !== "anthropic") return "";
|
||||
|
||||
const legacyProfileId = params.profileId ?? "anthropic:default";
|
||||
const suggested = suggestOAuthProfileIdForLegacyDefault({
|
||||
cfg: params.cfg,
|
||||
store: params.store,
|
||||
provider: providerKey,
|
||||
legacyProfileId,
|
||||
});
|
||||
if (!suggested || suggested === legacyProfileId) return "";
|
||||
|
||||
const storeOauthProfiles = listProfilesForProvider(params.store, providerKey)
|
||||
.filter((id) => params.store.profiles[id]?.type === "oauth")
|
||||
.join(", ");
|
||||
|
||||
const cfgMode = params.cfg?.auth?.profiles?.[legacyProfileId]?.mode;
|
||||
const cfgProvider = params.cfg?.auth?.profiles?.[legacyProfileId]?.provider;
|
||||
|
||||
return [
|
||||
"Doctor hint (for GitHub issue):",
|
||||
`- provider: ${providerKey}`,
|
||||
`- config: ${legacyProfileId}${
|
||||
cfgProvider || cfgMode
|
||||
? ` (provider=${cfgProvider ?? "?"}, mode=${cfgMode ?? "?"})`
|
||||
: ""
|
||||
}`,
|
||||
`- auth store oauth profiles: ${storeOauthProfiles || "(none)"}`,
|
||||
`- suggested profile: ${suggested}`,
|
||||
'Fix: run "clawdbot doctor --yes"',
|
||||
].join("\n");
|
||||
}
|
||||
183
src/agents/auth-profiles/external-cli-sync.ts
Normal file
183
src/agents/auth-profiles/external-cli-sync.ts
Normal file
@@ -0,0 +1,183 @@
|
||||
import {
|
||||
readClaudeCliCredentialsCached,
|
||||
readCodexCliCredentialsCached,
|
||||
} from "../cli-credentials.js";
|
||||
import {
|
||||
CLAUDE_CLI_PROFILE_ID,
|
||||
CODEX_CLI_PROFILE_ID,
|
||||
EXTERNAL_CLI_NEAR_EXPIRY_MS,
|
||||
EXTERNAL_CLI_SYNC_TTL_MS,
|
||||
log,
|
||||
} from "./constants.js";
|
||||
import type {
|
||||
AuthProfileCredential,
|
||||
AuthProfileStore,
|
||||
OAuthCredential,
|
||||
TokenCredential,
|
||||
} from "./types.js";
|
||||
|
||||
function shallowEqualOAuthCredentials(
|
||||
a: OAuthCredential | undefined,
|
||||
b: OAuthCredential,
|
||||
): boolean {
|
||||
if (!a) return false;
|
||||
if (a.type !== "oauth") return false;
|
||||
return (
|
||||
a.provider === b.provider &&
|
||||
a.access === b.access &&
|
||||
a.refresh === b.refresh &&
|
||||
a.expires === b.expires &&
|
||||
a.email === b.email &&
|
||||
a.enterpriseUrl === b.enterpriseUrl &&
|
||||
a.projectId === b.projectId &&
|
||||
a.accountId === b.accountId
|
||||
);
|
||||
}
|
||||
|
||||
function shallowEqualTokenCredentials(
|
||||
a: TokenCredential | undefined,
|
||||
b: TokenCredential,
|
||||
): boolean {
|
||||
if (!a) return false;
|
||||
if (a.type !== "token") return false;
|
||||
return (
|
||||
a.provider === b.provider &&
|
||||
a.token === b.token &&
|
||||
a.expires === b.expires &&
|
||||
a.email === b.email
|
||||
);
|
||||
}
|
||||
|
||||
function isExternalProfileFresh(
|
||||
cred: AuthProfileCredential | undefined,
|
||||
now: number,
|
||||
): boolean {
|
||||
if (!cred) return false;
|
||||
if (cred.type !== "oauth" && cred.type !== "token") return false;
|
||||
if (cred.provider !== "anthropic" && cred.provider !== "openai-codex") {
|
||||
return false;
|
||||
}
|
||||
if (typeof cred.expires !== "number") return true;
|
||||
return cred.expires > now + EXTERNAL_CLI_NEAR_EXPIRY_MS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sync OAuth credentials from external CLI tools (Claude CLI, Codex CLI) into the store.
|
||||
* This allows clawdbot to use the same credentials as these tools without requiring
|
||||
* separate authentication, and keeps credentials in sync when CLI tools refresh tokens.
|
||||
*
|
||||
* Returns true if any credentials were updated.
|
||||
*/
|
||||
export function syncExternalCliCredentials(
|
||||
store: AuthProfileStore,
|
||||
options?: { allowKeychainPrompt?: boolean },
|
||||
): boolean {
|
||||
let mutated = false;
|
||||
const now = Date.now();
|
||||
|
||||
// Sync from Claude CLI (supports both OAuth and Token credentials)
|
||||
const existingClaude = store.profiles[CLAUDE_CLI_PROFILE_ID];
|
||||
const shouldSyncClaude =
|
||||
!existingClaude ||
|
||||
existingClaude.provider !== "anthropic" ||
|
||||
existingClaude.type === "token" ||
|
||||
!isExternalProfileFresh(existingClaude, now);
|
||||
const claudeCreds = shouldSyncClaude
|
||||
? readClaudeCliCredentialsCached({
|
||||
allowKeychainPrompt: options?.allowKeychainPrompt,
|
||||
ttlMs: EXTERNAL_CLI_SYNC_TTL_MS,
|
||||
})
|
||||
: null;
|
||||
if (claudeCreds) {
|
||||
const existing = store.profiles[CLAUDE_CLI_PROFILE_ID];
|
||||
const claudeCredsExpires = claudeCreds.expires ?? 0;
|
||||
|
||||
// Determine if we should update based on credential comparison
|
||||
let shouldUpdate = false;
|
||||
let isEqual = false;
|
||||
|
||||
if (claudeCreds.type === "oauth") {
|
||||
const existingOAuth = existing?.type === "oauth" ? existing : undefined;
|
||||
isEqual = shallowEqualOAuthCredentials(existingOAuth, claudeCreds);
|
||||
// Update if: no existing profile, type changed to oauth, expired, or CLI has newer token
|
||||
shouldUpdate =
|
||||
!existingOAuth ||
|
||||
existingOAuth.provider !== "anthropic" ||
|
||||
existingOAuth.expires <= now ||
|
||||
(claudeCredsExpires > now &&
|
||||
claudeCredsExpires > existingOAuth.expires);
|
||||
} else {
|
||||
const existingToken = existing?.type === "token" ? existing : undefined;
|
||||
isEqual = shallowEqualTokenCredentials(existingToken, claudeCreds);
|
||||
// Update if: no existing profile, expired, or CLI has newer token
|
||||
shouldUpdate =
|
||||
!existingToken ||
|
||||
existingToken.provider !== "anthropic" ||
|
||||
(existingToken.expires ?? 0) <= now ||
|
||||
(claudeCredsExpires > now &&
|
||||
claudeCredsExpires > (existingToken.expires ?? 0));
|
||||
}
|
||||
|
||||
// Also update if credential type changed (token -> oauth upgrade)
|
||||
if (existing && existing.type !== claudeCreds.type) {
|
||||
// Prefer oauth over token (enables auto-refresh)
|
||||
if (claudeCreds.type === "oauth") {
|
||||
shouldUpdate = true;
|
||||
isEqual = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Avoid downgrading from oauth to token-only credentials.
|
||||
if (existing?.type === "oauth" && claudeCreds.type === "token") {
|
||||
shouldUpdate = false;
|
||||
}
|
||||
|
||||
if (shouldUpdate && !isEqual) {
|
||||
store.profiles[CLAUDE_CLI_PROFILE_ID] = claudeCreds;
|
||||
mutated = true;
|
||||
log.info("synced anthropic credentials from claude cli", {
|
||||
profileId: CLAUDE_CLI_PROFILE_ID,
|
||||
type: claudeCreds.type,
|
||||
expires:
|
||||
typeof claudeCreds.expires === "number"
|
||||
? new Date(claudeCreds.expires).toISOString()
|
||||
: "unknown",
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Sync from Codex CLI
|
||||
const existingCodex = store.profiles[CODEX_CLI_PROFILE_ID];
|
||||
const shouldSyncCodex =
|
||||
!existingCodex ||
|
||||
existingCodex.provider !== "openai-codex" ||
|
||||
!isExternalProfileFresh(existingCodex, now);
|
||||
const codexCreds = shouldSyncCodex
|
||||
? readCodexCliCredentialsCached({ ttlMs: EXTERNAL_CLI_SYNC_TTL_MS })
|
||||
: null;
|
||||
if (codexCreds) {
|
||||
const existing = store.profiles[CODEX_CLI_PROFILE_ID];
|
||||
const existingOAuth = existing?.type === "oauth" ? existing : undefined;
|
||||
|
||||
// Codex creds don't carry expiry; use file mtime heuristic for freshness.
|
||||
const shouldUpdate =
|
||||
!existingOAuth ||
|
||||
existingOAuth.provider !== "openai-codex" ||
|
||||
existingOAuth.expires <= now ||
|
||||
codexCreds.expires > existingOAuth.expires;
|
||||
|
||||
if (
|
||||
shouldUpdate &&
|
||||
!shallowEqualOAuthCredentials(existingOAuth, codexCreds)
|
||||
) {
|
||||
store.profiles[CODEX_CLI_PROFILE_ID] = codexCreds;
|
||||
mutated = true;
|
||||
log.info("synced openai-codex credentials from codex cli", {
|
||||
profileId: CODEX_CLI_PROFILE_ID,
|
||||
expires: new Date(codexCreds.expires).toISOString(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return mutated;
|
||||
}
|
||||
224
src/agents/auth-profiles/oauth.ts
Normal file
224
src/agents/auth-profiles/oauth.ts
Normal file
@@ -0,0 +1,224 @@
|
||||
import {
|
||||
getOAuthApiKey,
|
||||
type OAuthCredentials,
|
||||
type OAuthProvider,
|
||||
} from "@mariozechner/pi-ai";
|
||||
import lockfile from "proper-lockfile";
|
||||
|
||||
import type { ClawdbotConfig } from "../../config/config.js";
|
||||
import { refreshChutesTokens } from "../chutes-oauth.js";
|
||||
import { writeClaudeCliCredentials } from "../cli-credentials.js";
|
||||
import { AUTH_STORE_LOCK_OPTIONS, CLAUDE_CLI_PROFILE_ID } from "./constants.js";
|
||||
import { formatAuthDoctorHint } from "./doctor.js";
|
||||
import { ensureAuthStoreFile, resolveAuthStorePath } from "./paths.js";
|
||||
import { suggestOAuthProfileIdForLegacyDefault } from "./repair.js";
|
||||
import { ensureAuthProfileStore, saveAuthProfileStore } from "./store.js";
|
||||
import type { AuthProfileStore } from "./types.js";
|
||||
|
||||
function buildOAuthApiKey(
|
||||
provider: string,
|
||||
credentials: OAuthCredentials,
|
||||
): string {
|
||||
const needsProjectId =
|
||||
provider === "google-gemini-cli" || provider === "google-antigravity";
|
||||
return needsProjectId
|
||||
? JSON.stringify({
|
||||
token: credentials.access,
|
||||
projectId: credentials.projectId,
|
||||
})
|
||||
: credentials.access;
|
||||
}
|
||||
|
||||
async function refreshOAuthTokenWithLock(params: {
|
||||
profileId: string;
|
||||
agentDir?: string;
|
||||
}): Promise<{ apiKey: string; newCredentials: OAuthCredentials } | null> {
|
||||
const authPath = resolveAuthStorePath(params.agentDir);
|
||||
ensureAuthStoreFile(authPath);
|
||||
|
||||
let release: (() => Promise<void>) | undefined;
|
||||
try {
|
||||
release = await lockfile.lock(authPath, {
|
||||
...AUTH_STORE_LOCK_OPTIONS,
|
||||
});
|
||||
|
||||
const store = ensureAuthProfileStore(params.agentDir);
|
||||
const cred = store.profiles[params.profileId];
|
||||
if (!cred || cred.type !== "oauth") return null;
|
||||
|
||||
if (Date.now() < cred.expires) {
|
||||
return {
|
||||
apiKey: buildOAuthApiKey(cred.provider, cred),
|
||||
newCredentials: cred,
|
||||
};
|
||||
}
|
||||
|
||||
const oauthCreds: Record<string, OAuthCredentials> = {
|
||||
[cred.provider]: cred,
|
||||
};
|
||||
|
||||
const result =
|
||||
String(cred.provider) === "chutes"
|
||||
? await (async () => {
|
||||
const newCredentials = await refreshChutesTokens({
|
||||
credential: cred,
|
||||
});
|
||||
return { apiKey: newCredentials.access, newCredentials };
|
||||
})()
|
||||
: await getOAuthApiKey(cred.provider as OAuthProvider, oauthCreds);
|
||||
if (!result) return null;
|
||||
store.profiles[params.profileId] = {
|
||||
...cred,
|
||||
...result.newCredentials,
|
||||
type: "oauth",
|
||||
};
|
||||
saveAuthProfileStore(store, params.agentDir);
|
||||
|
||||
// Sync refreshed credentials back to Claude CLI if this is the claude-cli profile
|
||||
// This ensures Claude Code continues to work after ClawdBot refreshes the token
|
||||
if (
|
||||
params.profileId === CLAUDE_CLI_PROFILE_ID &&
|
||||
cred.provider === "anthropic"
|
||||
) {
|
||||
writeClaudeCliCredentials(result.newCredentials);
|
||||
}
|
||||
|
||||
return result;
|
||||
} finally {
|
||||
if (release) {
|
||||
try {
|
||||
await release();
|
||||
} catch {
|
||||
// ignore unlock errors
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function tryResolveOAuthProfile(params: {
|
||||
cfg?: ClawdbotConfig;
|
||||
store: AuthProfileStore;
|
||||
profileId: string;
|
||||
agentDir?: string;
|
||||
}): Promise<{ apiKey: string; provider: string; email?: string } | null> {
|
||||
const { cfg, store, profileId } = params;
|
||||
const cred = store.profiles[profileId];
|
||||
if (!cred || cred.type !== "oauth") return null;
|
||||
const profileConfig = cfg?.auth?.profiles?.[profileId];
|
||||
if (profileConfig && profileConfig.provider !== cred.provider) return null;
|
||||
if (profileConfig && profileConfig.mode !== cred.type) return null;
|
||||
|
||||
if (Date.now() < cred.expires) {
|
||||
return {
|
||||
apiKey: buildOAuthApiKey(cred.provider, cred),
|
||||
provider: cred.provider,
|
||||
email: cred.email,
|
||||
};
|
||||
}
|
||||
|
||||
const refreshed = await refreshOAuthTokenWithLock({
|
||||
profileId,
|
||||
agentDir: params.agentDir,
|
||||
});
|
||||
if (!refreshed) return null;
|
||||
return {
|
||||
apiKey: refreshed.apiKey,
|
||||
provider: cred.provider,
|
||||
email: cred.email,
|
||||
};
|
||||
}
|
||||
|
||||
export async function resolveApiKeyForProfile(params: {
|
||||
cfg?: ClawdbotConfig;
|
||||
store: AuthProfileStore;
|
||||
profileId: string;
|
||||
agentDir?: string;
|
||||
}): Promise<{ apiKey: string; provider: string; email?: string } | null> {
|
||||
const { cfg, store, profileId } = params;
|
||||
const cred = store.profiles[profileId];
|
||||
if (!cred) return null;
|
||||
const profileConfig = cfg?.auth?.profiles?.[profileId];
|
||||
if (profileConfig && profileConfig.provider !== cred.provider) return null;
|
||||
if (profileConfig && profileConfig.mode !== cred.type) {
|
||||
// Compatibility: treat "oauth" config as compatible with stored token profiles.
|
||||
if (!(profileConfig.mode === "oauth" && cred.type === "token")) return null;
|
||||
}
|
||||
|
||||
if (cred.type === "api_key") {
|
||||
return { apiKey: cred.key, provider: cred.provider, email: cred.email };
|
||||
}
|
||||
if (cred.type === "token") {
|
||||
const token = cred.token?.trim();
|
||||
if (!token) return null;
|
||||
if (
|
||||
typeof cred.expires === "number" &&
|
||||
Number.isFinite(cred.expires) &&
|
||||
cred.expires > 0 &&
|
||||
Date.now() >= cred.expires
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
return { apiKey: token, provider: cred.provider, email: cred.email };
|
||||
}
|
||||
if (Date.now() < cred.expires) {
|
||||
return {
|
||||
apiKey: buildOAuthApiKey(cred.provider, cred),
|
||||
provider: cred.provider,
|
||||
email: cred.email,
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await refreshOAuthTokenWithLock({
|
||||
profileId,
|
||||
agentDir: params.agentDir,
|
||||
});
|
||||
if (!result) return null;
|
||||
return {
|
||||
apiKey: result.apiKey,
|
||||
provider: cred.provider,
|
||||
email: cred.email,
|
||||
};
|
||||
} catch (error) {
|
||||
const refreshedStore = ensureAuthProfileStore(params.agentDir);
|
||||
const refreshed = refreshedStore.profiles[profileId];
|
||||
if (refreshed?.type === "oauth" && Date.now() < refreshed.expires) {
|
||||
return {
|
||||
apiKey: buildOAuthApiKey(refreshed.provider, refreshed),
|
||||
provider: refreshed.provider,
|
||||
email: refreshed.email ?? cred.email,
|
||||
};
|
||||
}
|
||||
const fallbackProfileId = suggestOAuthProfileIdForLegacyDefault({
|
||||
cfg,
|
||||
store: refreshedStore,
|
||||
provider: cred.provider,
|
||||
legacyProfileId: profileId,
|
||||
});
|
||||
if (fallbackProfileId && fallbackProfileId !== profileId) {
|
||||
try {
|
||||
const fallbackResolved = await tryResolveOAuthProfile({
|
||||
cfg,
|
||||
store: refreshedStore,
|
||||
profileId: fallbackProfileId,
|
||||
agentDir: params.agentDir,
|
||||
});
|
||||
if (fallbackResolved) return fallbackResolved;
|
||||
} catch {
|
||||
// keep original error
|
||||
}
|
||||
}
|
||||
const message = error instanceof Error ? error.message : String(error);
|
||||
const hint = formatAuthDoctorHint({
|
||||
cfg,
|
||||
store: refreshedStore,
|
||||
provider: cred.provider,
|
||||
profileId,
|
||||
});
|
||||
throw new Error(
|
||||
`OAuth token refresh failed for ${cred.provider}: ${message}. ` +
|
||||
"Please try again or re-authenticate." +
|
||||
(hint ? `\n\n${hint}` : ""),
|
||||
);
|
||||
}
|
||||
}
|
||||
199
src/agents/auth-profiles/order.ts
Normal file
199
src/agents/auth-profiles/order.ts
Normal file
@@ -0,0 +1,199 @@
|
||||
import type { ClawdbotConfig } from "../../config/config.js";
|
||||
import { normalizeProviderId } from "../model-selection.js";
|
||||
import { listProfilesForProvider } from "./profiles.js";
|
||||
import type { AuthProfileStore } from "./types.js";
|
||||
import { isProfileInCooldown } from "./usage.js";
|
||||
|
||||
function resolveProfileUnusableUntil(stats: {
|
||||
cooldownUntil?: number;
|
||||
disabledUntil?: number;
|
||||
}): number | null {
|
||||
const values = [stats.cooldownUntil, stats.disabledUntil]
|
||||
.filter((value): value is number => typeof value === "number")
|
||||
.filter((value) => Number.isFinite(value) && value > 0);
|
||||
if (values.length === 0) return null;
|
||||
return Math.max(...values);
|
||||
}
|
||||
|
||||
export function resolveAuthProfileOrder(params: {
|
||||
cfg?: ClawdbotConfig;
|
||||
store: AuthProfileStore;
|
||||
provider: string;
|
||||
preferredProfile?: string;
|
||||
}): string[] {
|
||||
const { cfg, store, provider, preferredProfile } = params;
|
||||
const providerKey = normalizeProviderId(provider);
|
||||
const now = Date.now();
|
||||
const storedOrder = (() => {
|
||||
const order = store.order;
|
||||
if (!order) return undefined;
|
||||
for (const [key, value] of Object.entries(order)) {
|
||||
if (normalizeProviderId(key) === providerKey) return value;
|
||||
}
|
||||
return undefined;
|
||||
})();
|
||||
const configuredOrder = (() => {
|
||||
const order = cfg?.auth?.order;
|
||||
if (!order) return undefined;
|
||||
for (const [key, value] of Object.entries(order)) {
|
||||
if (normalizeProviderId(key) === providerKey) return value;
|
||||
}
|
||||
return undefined;
|
||||
})();
|
||||
const explicitOrder = storedOrder ?? configuredOrder;
|
||||
const explicitProfiles = cfg?.auth?.profiles
|
||||
? Object.entries(cfg.auth.profiles)
|
||||
.filter(
|
||||
([, profile]) =>
|
||||
normalizeProviderId(profile.provider) === providerKey,
|
||||
)
|
||||
.map(([profileId]) => profileId)
|
||||
: [];
|
||||
const baseOrder =
|
||||
explicitOrder ??
|
||||
(explicitProfiles.length > 0
|
||||
? explicitProfiles
|
||||
: listProfilesForProvider(store, providerKey));
|
||||
if (baseOrder.length === 0) return [];
|
||||
|
||||
const filtered = baseOrder.filter((profileId) => {
|
||||
const cred = store.profiles[profileId];
|
||||
if (!cred) return false;
|
||||
if (normalizeProviderId(cred.provider) !== providerKey) return false;
|
||||
const profileConfig = cfg?.auth?.profiles?.[profileId];
|
||||
if (profileConfig) {
|
||||
if (normalizeProviderId(profileConfig.provider) !== providerKey) {
|
||||
return false;
|
||||
}
|
||||
if (profileConfig.mode !== cred.type) {
|
||||
const oauthCompatible =
|
||||
profileConfig.mode === "oauth" && cred.type === "token";
|
||||
if (!oauthCompatible) return false;
|
||||
}
|
||||
}
|
||||
if (cred.type === "api_key") return Boolean(cred.key?.trim());
|
||||
if (cred.type === "token") {
|
||||
if (!cred.token?.trim()) return false;
|
||||
if (
|
||||
typeof cred.expires === "number" &&
|
||||
Number.isFinite(cred.expires) &&
|
||||
cred.expires > 0 &&
|
||||
now >= cred.expires
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
if (cred.type === "oauth") {
|
||||
return Boolean(cred.access?.trim() || cred.refresh?.trim());
|
||||
}
|
||||
return false;
|
||||
});
|
||||
const deduped: string[] = [];
|
||||
for (const entry of filtered) {
|
||||
if (!deduped.includes(entry)) deduped.push(entry);
|
||||
}
|
||||
|
||||
// If user specified explicit order (store override or config), respect it
|
||||
// exactly, but still apply cooldown sorting to avoid repeatedly selecting
|
||||
// known-bad/rate-limited keys as the first candidate.
|
||||
if (explicitOrder && explicitOrder.length > 0) {
|
||||
// ...but still respect cooldown tracking to avoid repeatedly selecting a
|
||||
// known-bad/rate-limited key as the first candidate.
|
||||
const available: string[] = [];
|
||||
const inCooldown: Array<{ profileId: string; cooldownUntil: number }> = [];
|
||||
|
||||
for (const profileId of deduped) {
|
||||
const cooldownUntil =
|
||||
resolveProfileUnusableUntil(store.usageStats?.[profileId] ?? {}) ?? 0;
|
||||
if (
|
||||
typeof cooldownUntil === "number" &&
|
||||
Number.isFinite(cooldownUntil) &&
|
||||
cooldownUntil > 0 &&
|
||||
now < cooldownUntil
|
||||
) {
|
||||
inCooldown.push({ profileId, cooldownUntil });
|
||||
} else {
|
||||
available.push(profileId);
|
||||
}
|
||||
}
|
||||
|
||||
const cooldownSorted = inCooldown
|
||||
.sort((a, b) => a.cooldownUntil - b.cooldownUntil)
|
||||
.map((entry) => entry.profileId);
|
||||
|
||||
const ordered = [...available, ...cooldownSorted];
|
||||
|
||||
// Still put preferredProfile first if specified
|
||||
if (preferredProfile && ordered.includes(preferredProfile)) {
|
||||
return [
|
||||
preferredProfile,
|
||||
...ordered.filter((e) => e !== preferredProfile),
|
||||
];
|
||||
}
|
||||
return ordered;
|
||||
}
|
||||
|
||||
// Otherwise, use round-robin: sort by lastUsed (oldest first)
|
||||
// preferredProfile goes first if specified (for explicit user choice)
|
||||
// lastGood is NOT prioritized - that would defeat round-robin
|
||||
const sorted = orderProfilesByMode(deduped, store);
|
||||
|
||||
if (preferredProfile && sorted.includes(preferredProfile)) {
|
||||
return [preferredProfile, ...sorted.filter((e) => e !== preferredProfile)];
|
||||
}
|
||||
|
||||
return sorted;
|
||||
}
|
||||
|
||||
function orderProfilesByMode(
|
||||
order: string[],
|
||||
store: AuthProfileStore,
|
||||
): string[] {
|
||||
const now = Date.now();
|
||||
|
||||
// Partition into available and in-cooldown
|
||||
const available: string[] = [];
|
||||
const inCooldown: string[] = [];
|
||||
|
||||
for (const profileId of order) {
|
||||
if (isProfileInCooldown(store, profileId)) {
|
||||
inCooldown.push(profileId);
|
||||
} else {
|
||||
available.push(profileId);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort available profiles by lastUsed (oldest first = round-robin)
|
||||
// Then by lastUsed (oldest first = round-robin within type)
|
||||
const scored = available.map((profileId) => {
|
||||
const type = store.profiles[profileId]?.type;
|
||||
const typeScore =
|
||||
type === "oauth" ? 0 : type === "token" ? 1 : type === "api_key" ? 2 : 3;
|
||||
const lastUsed = store.usageStats?.[profileId]?.lastUsed ?? 0;
|
||||
return { profileId, typeScore, lastUsed };
|
||||
});
|
||||
|
||||
// Primary sort: type preference (oauth > token > api_key).
|
||||
// Secondary sort: lastUsed (oldest first for round-robin within type).
|
||||
const sorted = scored
|
||||
.sort((a, b) => {
|
||||
// First by type (oauth > token > api_key)
|
||||
if (a.typeScore !== b.typeScore) return a.typeScore - b.typeScore;
|
||||
// Then by lastUsed (oldest first)
|
||||
return a.lastUsed - b.lastUsed;
|
||||
})
|
||||
.map((entry) => entry.profileId);
|
||||
|
||||
// Append cooldown profiles at the end (sorted by cooldown expiry, soonest first)
|
||||
const cooldownSorted = inCooldown
|
||||
.map((profileId) => ({
|
||||
profileId,
|
||||
cooldownUntil:
|
||||
resolveProfileUnusableUntil(store.usageStats?.[profileId] ?? {}) ?? now,
|
||||
}))
|
||||
.sort((a, b) => a.cooldownUntil - b.cooldownUntil)
|
||||
.map((entry) => entry.profileId);
|
||||
|
||||
return [...sorted, ...cooldownSorted];
|
||||
}
|
||||
36
src/agents/auth-profiles/paths.ts
Normal file
36
src/agents/auth-profiles/paths.ts
Normal file
@@ -0,0 +1,36 @@
|
||||
import fs from "node:fs";
|
||||
import path from "node:path";
|
||||
|
||||
import { saveJsonFile } from "../../infra/json-file.js";
|
||||
import { resolveUserPath } from "../../utils.js";
|
||||
import { resolveClawdbotAgentDir } from "../agent-paths.js";
|
||||
import {
|
||||
AUTH_PROFILE_FILENAME,
|
||||
AUTH_STORE_VERSION,
|
||||
LEGACY_AUTH_FILENAME,
|
||||
} from "./constants.js";
|
||||
import type { AuthProfileStore } from "./types.js";
|
||||
|
||||
export function resolveAuthStorePath(agentDir?: string): string {
|
||||
const resolved = resolveUserPath(agentDir ?? resolveClawdbotAgentDir());
|
||||
return path.join(resolved, AUTH_PROFILE_FILENAME);
|
||||
}
|
||||
|
||||
export function resolveLegacyAuthStorePath(agentDir?: string): string {
|
||||
const resolved = resolveUserPath(agentDir ?? resolveClawdbotAgentDir());
|
||||
return path.join(resolved, LEGACY_AUTH_FILENAME);
|
||||
}
|
||||
|
||||
export function resolveAuthStorePathForDisplay(agentDir?: string): string {
|
||||
const pathname = resolveAuthStorePath(agentDir);
|
||||
return pathname.startsWith("~") ? pathname : resolveUserPath(pathname);
|
||||
}
|
||||
|
||||
export function ensureAuthStoreFile(pathname: string) {
|
||||
if (fs.existsSync(pathname)) return;
|
||||
const payload: AuthProfileStore = {
|
||||
version: AUTH_STORE_VERSION,
|
||||
profiles: {},
|
||||
};
|
||||
saveJsonFile(pathname, payload);
|
||||
}
|
||||
87
src/agents/auth-profiles/profiles.ts
Normal file
87
src/agents/auth-profiles/profiles.ts
Normal file
@@ -0,0 +1,87 @@
|
||||
import { normalizeProviderId } from "../model-selection.js";
|
||||
import {
|
||||
ensureAuthProfileStore,
|
||||
saveAuthProfileStore,
|
||||
updateAuthProfileStoreWithLock,
|
||||
} from "./store.js";
|
||||
import type { AuthProfileCredential, AuthProfileStore } from "./types.js";
|
||||
|
||||
export async function setAuthProfileOrder(params: {
|
||||
agentDir?: string;
|
||||
provider: string;
|
||||
order?: string[] | null;
|
||||
}): Promise<AuthProfileStore | null> {
|
||||
const providerKey = normalizeProviderId(params.provider);
|
||||
const sanitized =
|
||||
params.order && Array.isArray(params.order)
|
||||
? params.order.map((entry) => String(entry).trim()).filter(Boolean)
|
||||
: [];
|
||||
|
||||
const deduped: string[] = [];
|
||||
for (const entry of sanitized) {
|
||||
if (!deduped.includes(entry)) deduped.push(entry);
|
||||
}
|
||||
|
||||
return await updateAuthProfileStoreWithLock({
|
||||
agentDir: params.agentDir,
|
||||
updater: (store) => {
|
||||
store.order = store.order ?? {};
|
||||
if (deduped.length === 0) {
|
||||
if (!store.order[providerKey]) return false;
|
||||
delete store.order[providerKey];
|
||||
if (Object.keys(store.order).length === 0) {
|
||||
store.order = undefined;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
store.order[providerKey] = deduped;
|
||||
return true;
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
export function upsertAuthProfile(params: {
|
||||
profileId: string;
|
||||
credential: AuthProfileCredential;
|
||||
agentDir?: string;
|
||||
}): void {
|
||||
const store = ensureAuthProfileStore(params.agentDir);
|
||||
store.profiles[params.profileId] = params.credential;
|
||||
saveAuthProfileStore(store, params.agentDir);
|
||||
}
|
||||
|
||||
export function listProfilesForProvider(
|
||||
store: AuthProfileStore,
|
||||
provider: string,
|
||||
): string[] {
|
||||
const providerKey = normalizeProviderId(provider);
|
||||
return Object.entries(store.profiles)
|
||||
.filter(([, cred]) => normalizeProviderId(cred.provider) === providerKey)
|
||||
.map(([id]) => id);
|
||||
}
|
||||
|
||||
export async function markAuthProfileGood(params: {
|
||||
store: AuthProfileStore;
|
||||
provider: string;
|
||||
profileId: string;
|
||||
agentDir?: string;
|
||||
}): Promise<void> {
|
||||
const { store, provider, profileId, agentDir } = params;
|
||||
const updated = await updateAuthProfileStoreWithLock({
|
||||
agentDir,
|
||||
updater: (freshStore) => {
|
||||
const profile = freshStore.profiles[profileId];
|
||||
if (!profile || profile.provider !== provider) return false;
|
||||
freshStore.lastGood = { ...freshStore.lastGood, [provider]: profileId };
|
||||
return true;
|
||||
},
|
||||
});
|
||||
if (updated) {
|
||||
store.lastGood = updated.lastGood;
|
||||
return;
|
||||
}
|
||||
const profile = store.profiles[profileId];
|
||||
if (!profile || profile.provider !== provider) return;
|
||||
store.lastGood = { ...store.lastGood, [provider]: profileId };
|
||||
saveAuthProfileStore(store, agentDir);
|
||||
}
|
||||
162
src/agents/auth-profiles/repair.ts
Normal file
162
src/agents/auth-profiles/repair.ts
Normal file
@@ -0,0 +1,162 @@
|
||||
import type { ClawdbotConfig } from "../../config/config.js";
|
||||
import type { AuthProfileConfig } from "../../config/types.js";
|
||||
import { normalizeProviderId } from "../model-selection.js";
|
||||
import { listProfilesForProvider } from "./profiles.js";
|
||||
import type { AuthProfileIdRepairResult, AuthProfileStore } from "./types.js";
|
||||
|
||||
function getProfileSuffix(profileId: string): string {
|
||||
const idx = profileId.indexOf(":");
|
||||
if (idx < 0) return "";
|
||||
return profileId.slice(idx + 1);
|
||||
}
|
||||
|
||||
function isEmailLike(value: string): boolean {
|
||||
const trimmed = value.trim();
|
||||
if (!trimmed) return false;
|
||||
return trimmed.includes("@") && trimmed.includes(".");
|
||||
}
|
||||
|
||||
export function suggestOAuthProfileIdForLegacyDefault(params: {
|
||||
cfg?: ClawdbotConfig;
|
||||
store: AuthProfileStore;
|
||||
provider: string;
|
||||
legacyProfileId: string;
|
||||
}): string | null {
|
||||
const providerKey = normalizeProviderId(params.provider);
|
||||
const legacySuffix = getProfileSuffix(params.legacyProfileId);
|
||||
if (legacySuffix !== "default") return null;
|
||||
|
||||
const legacyCfg = params.cfg?.auth?.profiles?.[params.legacyProfileId];
|
||||
if (
|
||||
legacyCfg &&
|
||||
normalizeProviderId(legacyCfg.provider) === providerKey &&
|
||||
legacyCfg.mode !== "oauth"
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const oauthProfiles = listProfilesForProvider(
|
||||
params.store,
|
||||
providerKey,
|
||||
).filter((id) => params.store.profiles[id]?.type === "oauth");
|
||||
if (oauthProfiles.length === 0) return null;
|
||||
|
||||
const configuredEmail = legacyCfg?.email?.trim();
|
||||
if (configuredEmail) {
|
||||
const byEmail = oauthProfiles.find((id) => {
|
||||
const cred = params.store.profiles[id];
|
||||
if (!cred || cred.type !== "oauth") return false;
|
||||
const email = (cred.email as string | undefined)?.trim();
|
||||
return (
|
||||
email === configuredEmail || id === `${providerKey}:${configuredEmail}`
|
||||
);
|
||||
});
|
||||
if (byEmail) return byEmail;
|
||||
}
|
||||
|
||||
const lastGood =
|
||||
params.store.lastGood?.[providerKey] ??
|
||||
params.store.lastGood?.[params.provider];
|
||||
if (lastGood && oauthProfiles.includes(lastGood)) return lastGood;
|
||||
|
||||
const nonLegacy = oauthProfiles.filter((id) => id !== params.legacyProfileId);
|
||||
if (nonLegacy.length === 1) return nonLegacy[0] ?? null;
|
||||
|
||||
const emailLike = nonLegacy.filter((id) => isEmailLike(getProfileSuffix(id)));
|
||||
if (emailLike.length === 1) return emailLike[0] ?? null;
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
export function repairOAuthProfileIdMismatch(params: {
|
||||
cfg: ClawdbotConfig;
|
||||
store: AuthProfileStore;
|
||||
provider: string;
|
||||
legacyProfileId?: string;
|
||||
}): AuthProfileIdRepairResult {
|
||||
const legacyProfileId =
|
||||
params.legacyProfileId ?? `${normalizeProviderId(params.provider)}:default`;
|
||||
const legacyCfg = params.cfg.auth?.profiles?.[legacyProfileId];
|
||||
if (!legacyCfg) {
|
||||
return { config: params.cfg, changes: [], migrated: false };
|
||||
}
|
||||
if (legacyCfg.mode !== "oauth") {
|
||||
return { config: params.cfg, changes: [], migrated: false };
|
||||
}
|
||||
if (
|
||||
normalizeProviderId(legacyCfg.provider) !==
|
||||
normalizeProviderId(params.provider)
|
||||
) {
|
||||
return { config: params.cfg, changes: [], migrated: false };
|
||||
}
|
||||
|
||||
const toProfileId = suggestOAuthProfileIdForLegacyDefault({
|
||||
cfg: params.cfg,
|
||||
store: params.store,
|
||||
provider: params.provider,
|
||||
legacyProfileId,
|
||||
});
|
||||
if (!toProfileId || toProfileId === legacyProfileId) {
|
||||
return { config: params.cfg, changes: [], migrated: false };
|
||||
}
|
||||
|
||||
const toCred = params.store.profiles[toProfileId];
|
||||
const toEmail =
|
||||
toCred?.type === "oauth"
|
||||
? (toCred.email as string | undefined)?.trim()
|
||||
: undefined;
|
||||
|
||||
const nextProfiles = {
|
||||
...(params.cfg.auth?.profiles as
|
||||
| Record<string, AuthProfileConfig>
|
||||
| undefined),
|
||||
} as Record<string, AuthProfileConfig>;
|
||||
delete nextProfiles[legacyProfileId];
|
||||
nextProfiles[toProfileId] = {
|
||||
...legacyCfg,
|
||||
...(toEmail ? { email: toEmail } : {}),
|
||||
};
|
||||
|
||||
const providerKey = normalizeProviderId(params.provider);
|
||||
const nextOrder = (() => {
|
||||
const order = params.cfg.auth?.order;
|
||||
if (!order) return undefined;
|
||||
const resolvedKey = Object.keys(order).find(
|
||||
(key) => normalizeProviderId(key) === providerKey,
|
||||
);
|
||||
if (!resolvedKey) return order;
|
||||
const existing = order[resolvedKey];
|
||||
if (!Array.isArray(existing)) return order;
|
||||
const replaced = existing
|
||||
.map((id) => (id === legacyProfileId ? toProfileId : id))
|
||||
.filter(
|
||||
(id): id is string => typeof id === "string" && id.trim().length > 0,
|
||||
);
|
||||
const deduped: string[] = [];
|
||||
for (const entry of replaced) {
|
||||
if (!deduped.includes(entry)) deduped.push(entry);
|
||||
}
|
||||
return { ...order, [resolvedKey]: deduped };
|
||||
})();
|
||||
|
||||
const nextCfg: ClawdbotConfig = {
|
||||
...params.cfg,
|
||||
auth: {
|
||||
...params.cfg.auth,
|
||||
profiles: nextProfiles,
|
||||
...(nextOrder ? { order: nextOrder } : {}),
|
||||
},
|
||||
};
|
||||
|
||||
const changes = [
|
||||
`Auth: migrate ${legacyProfileId} → ${toProfileId} (OAuth profile id)`,
|
||||
];
|
||||
|
||||
return {
|
||||
config: nextCfg,
|
||||
changes,
|
||||
migrated: true,
|
||||
fromProfileId: legacyProfileId,
|
||||
toProfileId,
|
||||
};
|
||||
}
|
||||
317
src/agents/auth-profiles/store.ts
Normal file
317
src/agents/auth-profiles/store.ts
Normal file
@@ -0,0 +1,317 @@
|
||||
import fs from "node:fs";
|
||||
import type { OAuthCredentials } from "@mariozechner/pi-ai";
|
||||
import lockfile from "proper-lockfile";
|
||||
import { resolveOAuthPath } from "../../config/paths.js";
|
||||
import { loadJsonFile, saveJsonFile } from "../../infra/json-file.js";
|
||||
import {
|
||||
AUTH_STORE_LOCK_OPTIONS,
|
||||
AUTH_STORE_VERSION,
|
||||
log,
|
||||
} from "./constants.js";
|
||||
import { syncExternalCliCredentials } from "./external-cli-sync.js";
|
||||
import {
|
||||
ensureAuthStoreFile,
|
||||
resolveAuthStorePath,
|
||||
resolveLegacyAuthStorePath,
|
||||
} from "./paths.js";
|
||||
import type {
|
||||
AuthProfileCredential,
|
||||
AuthProfileStore,
|
||||
ProfileUsageStats,
|
||||
} from "./types.js";
|
||||
|
||||
type LegacyAuthStore = Record<string, AuthProfileCredential>;
|
||||
|
||||
function _syncAuthProfileStore(
|
||||
target: AuthProfileStore,
|
||||
source: AuthProfileStore,
|
||||
): void {
|
||||
target.version = source.version;
|
||||
target.profiles = source.profiles;
|
||||
target.order = source.order;
|
||||
target.lastGood = source.lastGood;
|
||||
target.usageStats = source.usageStats;
|
||||
}
|
||||
|
||||
export async function updateAuthProfileStoreWithLock(params: {
|
||||
agentDir?: string;
|
||||
updater: (store: AuthProfileStore) => boolean;
|
||||
}): Promise<AuthProfileStore | null> {
|
||||
const authPath = resolveAuthStorePath(params.agentDir);
|
||||
ensureAuthStoreFile(authPath);
|
||||
|
||||
let release: (() => Promise<void>) | undefined;
|
||||
try {
|
||||
release = await lockfile.lock(authPath, AUTH_STORE_LOCK_OPTIONS);
|
||||
const store = ensureAuthProfileStore(params.agentDir);
|
||||
const shouldSave = params.updater(store);
|
||||
if (shouldSave) {
|
||||
saveAuthProfileStore(store, params.agentDir);
|
||||
}
|
||||
return store;
|
||||
} catch {
|
||||
return null;
|
||||
} finally {
|
||||
if (release) {
|
||||
try {
|
||||
await release();
|
||||
} catch {
|
||||
// ignore unlock errors
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function coerceLegacyStore(raw: unknown): LegacyAuthStore | null {
|
||||
if (!raw || typeof raw !== "object") return null;
|
||||
const record = raw as Record<string, unknown>;
|
||||
if ("profiles" in record) return null;
|
||||
const entries: LegacyAuthStore = {};
|
||||
for (const [key, value] of Object.entries(record)) {
|
||||
if (!value || typeof value !== "object") continue;
|
||||
const typed = value as Partial<AuthProfileCredential>;
|
||||
if (
|
||||
typed.type !== "api_key" &&
|
||||
typed.type !== "oauth" &&
|
||||
typed.type !== "token"
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
entries[key] = {
|
||||
...typed,
|
||||
provider: String(typed.provider ?? key),
|
||||
} as AuthProfileCredential;
|
||||
}
|
||||
return Object.keys(entries).length > 0 ? entries : null;
|
||||
}
|
||||
|
||||
function coerceAuthStore(raw: unknown): AuthProfileStore | null {
|
||||
if (!raw || typeof raw !== "object") return null;
|
||||
const record = raw as Record<string, unknown>;
|
||||
if (!record.profiles || typeof record.profiles !== "object") return null;
|
||||
const profiles = record.profiles as Record<string, unknown>;
|
||||
const normalized: Record<string, AuthProfileCredential> = {};
|
||||
for (const [key, value] of Object.entries(profiles)) {
|
||||
if (!value || typeof value !== "object") continue;
|
||||
const typed = value as Partial<AuthProfileCredential>;
|
||||
if (
|
||||
typed.type !== "api_key" &&
|
||||
typed.type !== "oauth" &&
|
||||
typed.type !== "token"
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
if (!typed.provider) continue;
|
||||
normalized[key] = typed as AuthProfileCredential;
|
||||
}
|
||||
const order =
|
||||
record.order && typeof record.order === "object"
|
||||
? Object.entries(record.order as Record<string, unknown>).reduce(
|
||||
(acc, [provider, value]) => {
|
||||
if (!Array.isArray(value)) return acc;
|
||||
const list = value
|
||||
.map((entry) => (typeof entry === "string" ? entry.trim() : ""))
|
||||
.filter(Boolean);
|
||||
if (list.length === 0) return acc;
|
||||
acc[provider] = list;
|
||||
return acc;
|
||||
},
|
||||
{} as Record<string, string[]>,
|
||||
)
|
||||
: undefined;
|
||||
return {
|
||||
version: Number(record.version ?? AUTH_STORE_VERSION),
|
||||
profiles: normalized,
|
||||
order,
|
||||
lastGood:
|
||||
record.lastGood && typeof record.lastGood === "object"
|
||||
? (record.lastGood as Record<string, string>)
|
||||
: undefined,
|
||||
usageStats:
|
||||
record.usageStats && typeof record.usageStats === "object"
|
||||
? (record.usageStats as Record<string, ProfileUsageStats>)
|
||||
: undefined,
|
||||
};
|
||||
}
|
||||
|
||||
function mergeOAuthFileIntoStore(store: AuthProfileStore): boolean {
|
||||
const oauthPath = resolveOAuthPath();
|
||||
const oauthRaw = loadJsonFile(oauthPath);
|
||||
if (!oauthRaw || typeof oauthRaw !== "object") return false;
|
||||
const oauthEntries = oauthRaw as Record<string, OAuthCredentials>;
|
||||
let mutated = false;
|
||||
for (const [provider, creds] of Object.entries(oauthEntries)) {
|
||||
if (!creds || typeof creds !== "object") continue;
|
||||
const profileId = `${provider}:default`;
|
||||
if (store.profiles[profileId]) continue;
|
||||
store.profiles[profileId] = {
|
||||
type: "oauth",
|
||||
provider,
|
||||
...creds,
|
||||
};
|
||||
mutated = true;
|
||||
}
|
||||
return mutated;
|
||||
}
|
||||
|
||||
export function loadAuthProfileStore(): AuthProfileStore {
|
||||
const authPath = resolveAuthStorePath();
|
||||
const raw = loadJsonFile(authPath);
|
||||
const asStore = coerceAuthStore(raw);
|
||||
if (asStore) {
|
||||
// Sync from external CLI tools on every load
|
||||
const synced = syncExternalCliCredentials(asStore);
|
||||
if (synced) {
|
||||
saveJsonFile(authPath, asStore);
|
||||
}
|
||||
return asStore;
|
||||
}
|
||||
|
||||
const legacyRaw = loadJsonFile(resolveLegacyAuthStorePath());
|
||||
const legacy = coerceLegacyStore(legacyRaw);
|
||||
if (legacy) {
|
||||
const store: AuthProfileStore = {
|
||||
version: AUTH_STORE_VERSION,
|
||||
profiles: {},
|
||||
};
|
||||
for (const [provider, cred] of Object.entries(legacy)) {
|
||||
const profileId = `${provider}:default`;
|
||||
if (cred.type === "api_key") {
|
||||
store.profiles[profileId] = {
|
||||
type: "api_key",
|
||||
provider: String(cred.provider ?? provider),
|
||||
key: cred.key,
|
||||
...(cred.email ? { email: cred.email } : {}),
|
||||
};
|
||||
} else if (cred.type === "token") {
|
||||
store.profiles[profileId] = {
|
||||
type: "token",
|
||||
provider: String(cred.provider ?? provider),
|
||||
token: cred.token,
|
||||
...(typeof cred.expires === "number"
|
||||
? { expires: cred.expires }
|
||||
: {}),
|
||||
...(cred.email ? { email: cred.email } : {}),
|
||||
};
|
||||
} else {
|
||||
store.profiles[profileId] = {
|
||||
type: "oauth",
|
||||
provider: String(cred.provider ?? provider),
|
||||
access: cred.access,
|
||||
refresh: cred.refresh,
|
||||
expires: cred.expires,
|
||||
...(cred.enterpriseUrl ? { enterpriseUrl: cred.enterpriseUrl } : {}),
|
||||
...(cred.projectId ? { projectId: cred.projectId } : {}),
|
||||
...(cred.accountId ? { accountId: cred.accountId } : {}),
|
||||
...(cred.email ? { email: cred.email } : {}),
|
||||
};
|
||||
}
|
||||
}
|
||||
syncExternalCliCredentials(store);
|
||||
return store;
|
||||
}
|
||||
|
||||
const store: AuthProfileStore = { version: AUTH_STORE_VERSION, profiles: {} };
|
||||
syncExternalCliCredentials(store);
|
||||
return store;
|
||||
}
|
||||
|
||||
export function ensureAuthProfileStore(
|
||||
agentDir?: string,
|
||||
options?: { allowKeychainPrompt?: boolean },
|
||||
): AuthProfileStore {
|
||||
const authPath = resolveAuthStorePath(agentDir);
|
||||
const raw = loadJsonFile(authPath);
|
||||
const asStore = coerceAuthStore(raw);
|
||||
if (asStore) {
|
||||
// Sync from external CLI tools on every load
|
||||
const synced = syncExternalCliCredentials(asStore, options);
|
||||
if (synced) {
|
||||
saveJsonFile(authPath, asStore);
|
||||
}
|
||||
return asStore;
|
||||
}
|
||||
|
||||
const legacyRaw = loadJsonFile(resolveLegacyAuthStorePath(agentDir));
|
||||
const legacy = coerceLegacyStore(legacyRaw);
|
||||
const store: AuthProfileStore = {
|
||||
version: AUTH_STORE_VERSION,
|
||||
profiles: {},
|
||||
};
|
||||
if (legacy) {
|
||||
for (const [provider, cred] of Object.entries(legacy)) {
|
||||
const profileId = `${provider}:default`;
|
||||
if (cred.type === "api_key") {
|
||||
store.profiles[profileId] = {
|
||||
type: "api_key",
|
||||
provider: String(cred.provider ?? provider),
|
||||
key: cred.key,
|
||||
...(cred.email ? { email: cred.email } : {}),
|
||||
};
|
||||
} else if (cred.type === "token") {
|
||||
store.profiles[profileId] = {
|
||||
type: "token",
|
||||
provider: String(cred.provider ?? provider),
|
||||
token: cred.token,
|
||||
...(typeof cred.expires === "number"
|
||||
? { expires: cred.expires }
|
||||
: {}),
|
||||
...(cred.email ? { email: cred.email } : {}),
|
||||
};
|
||||
} else {
|
||||
store.profiles[profileId] = {
|
||||
type: "oauth",
|
||||
provider: String(cred.provider ?? provider),
|
||||
access: cred.access,
|
||||
refresh: cred.refresh,
|
||||
expires: cred.expires,
|
||||
...(cred.enterpriseUrl ? { enterpriseUrl: cred.enterpriseUrl } : {}),
|
||||
...(cred.projectId ? { projectId: cred.projectId } : {}),
|
||||
...(cred.accountId ? { accountId: cred.accountId } : {}),
|
||||
...(cred.email ? { email: cred.email } : {}),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const mergedOAuth = mergeOAuthFileIntoStore(store);
|
||||
const syncedCli = syncExternalCliCredentials(store, options);
|
||||
const shouldWrite = legacy !== null || mergedOAuth || syncedCli;
|
||||
if (shouldWrite) {
|
||||
saveJsonFile(authPath, store);
|
||||
}
|
||||
|
||||
// PR #368: legacy auth.json could get re-migrated from other agent dirs,
|
||||
// overwriting fresh OAuth creds with stale tokens (fixes #363). Delete only
|
||||
// after we've successfully written auth-profiles.json.
|
||||
if (shouldWrite && legacy !== null) {
|
||||
const legacyPath = resolveLegacyAuthStorePath(agentDir);
|
||||
try {
|
||||
fs.unlinkSync(legacyPath);
|
||||
} catch (err) {
|
||||
if ((err as NodeJS.ErrnoException)?.code !== "ENOENT") {
|
||||
log.warn("failed to delete legacy auth.json after migration", {
|
||||
err,
|
||||
legacyPath,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return store;
|
||||
}
|
||||
|
||||
export function saveAuthProfileStore(
|
||||
store: AuthProfileStore,
|
||||
agentDir?: string,
|
||||
): void {
|
||||
const authPath = resolveAuthStorePath(agentDir);
|
||||
const payload = {
|
||||
version: AUTH_STORE_VERSION,
|
||||
profiles: store.profiles,
|
||||
order: store.order ?? undefined,
|
||||
lastGood: store.lastGood ?? undefined,
|
||||
usageStats: store.usageStats ?? undefined,
|
||||
} satisfies AuthProfileStore;
|
||||
saveJsonFile(authPath, payload);
|
||||
}
|
||||
76
src/agents/auth-profiles/types.ts
Normal file
76
src/agents/auth-profiles/types.ts
Normal file
@@ -0,0 +1,76 @@
|
||||
import type { OAuthCredentials } from "@mariozechner/pi-ai";
|
||||
|
||||
import type { ClawdbotConfig } from "../../config/config.js";
|
||||
|
||||
export type ApiKeyCredential = {
|
||||
type: "api_key";
|
||||
provider: string;
|
||||
key: string;
|
||||
email?: string;
|
||||
};
|
||||
|
||||
export type TokenCredential = {
|
||||
/**
|
||||
* Static bearer-style token (often OAuth access token / PAT).
|
||||
* Not refreshable by clawdbot (unlike `type: "oauth"`).
|
||||
*/
|
||||
type: "token";
|
||||
provider: string;
|
||||
token: string;
|
||||
/** Optional expiry timestamp (ms since epoch). */
|
||||
expires?: number;
|
||||
email?: string;
|
||||
};
|
||||
|
||||
export type OAuthCredential = OAuthCredentials & {
|
||||
type: "oauth";
|
||||
provider: string;
|
||||
clientId?: string;
|
||||
email?: string;
|
||||
};
|
||||
|
||||
export type AuthProfileCredential =
|
||||
| ApiKeyCredential
|
||||
| TokenCredential
|
||||
| OAuthCredential;
|
||||
|
||||
export type AuthProfileFailureReason =
|
||||
| "auth"
|
||||
| "format"
|
||||
| "rate_limit"
|
||||
| "billing"
|
||||
| "timeout"
|
||||
| "unknown";
|
||||
|
||||
/** Per-profile usage statistics for round-robin and cooldown tracking */
|
||||
export type ProfileUsageStats = {
|
||||
lastUsed?: number;
|
||||
cooldownUntil?: number;
|
||||
disabledUntil?: number;
|
||||
disabledReason?: AuthProfileFailureReason;
|
||||
errorCount?: number;
|
||||
failureCounts?: Partial<Record<AuthProfileFailureReason, number>>;
|
||||
lastFailureAt?: number;
|
||||
};
|
||||
|
||||
export type AuthProfileStore = {
|
||||
version: number;
|
||||
profiles: Record<string, AuthProfileCredential>;
|
||||
/**
|
||||
* Optional per-agent preferred profile order overrides.
|
||||
* This lets you lock/override auth rotation for a specific agent without
|
||||
* changing the global config.
|
||||
*/
|
||||
order?: Record<string, string[]>;
|
||||
lastGood?: Record<string, string>;
|
||||
/** Usage statistics per profile for round-robin rotation */
|
||||
usageStats?: Record<string, ProfileUsageStats>;
|
||||
};
|
||||
|
||||
export type AuthProfileIdRepairResult = {
|
||||
config: ClawdbotConfig;
|
||||
changes: string[];
|
||||
migrated: boolean;
|
||||
fromProfileId?: string;
|
||||
toProfileId?: string;
|
||||
};
|
||||
319
src/agents/auth-profiles/usage.ts
Normal file
319
src/agents/auth-profiles/usage.ts
Normal file
@@ -0,0 +1,319 @@
|
||||
import type { ClawdbotConfig } from "../../config/config.js";
|
||||
import { normalizeProviderId } from "../model-selection.js";
|
||||
import {
|
||||
saveAuthProfileStore,
|
||||
updateAuthProfileStoreWithLock,
|
||||
} from "./store.js";
|
||||
import type {
|
||||
AuthProfileFailureReason,
|
||||
AuthProfileStore,
|
||||
ProfileUsageStats,
|
||||
} from "./types.js";
|
||||
|
||||
function resolveProfileUnusableUntil(stats: ProfileUsageStats): number | null {
|
||||
const values = [stats.cooldownUntil, stats.disabledUntil]
|
||||
.filter((value): value is number => typeof value === "number")
|
||||
.filter((value) => Number.isFinite(value) && value > 0);
|
||||
if (values.length === 0) return null;
|
||||
return Math.max(...values);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a profile is currently in cooldown (due to rate limiting or errors).
|
||||
*/
|
||||
export function isProfileInCooldown(
|
||||
store: AuthProfileStore,
|
||||
profileId: string,
|
||||
): boolean {
|
||||
const stats = store.usageStats?.[profileId];
|
||||
if (!stats) return false;
|
||||
const unusableUntil = resolveProfileUnusableUntil(stats);
|
||||
return unusableUntil ? Date.now() < unusableUntil : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark a profile as successfully used. Resets error count and updates lastUsed.
|
||||
* Uses store lock to avoid overwriting concurrent usage updates.
|
||||
*/
|
||||
export async function markAuthProfileUsed(params: {
|
||||
store: AuthProfileStore;
|
||||
profileId: string;
|
||||
agentDir?: string;
|
||||
}): Promise<void> {
|
||||
const { store, profileId, agentDir } = params;
|
||||
const updated = await updateAuthProfileStoreWithLock({
|
||||
agentDir,
|
||||
updater: (freshStore) => {
|
||||
if (!freshStore.profiles[profileId]) return false;
|
||||
freshStore.usageStats = freshStore.usageStats ?? {};
|
||||
freshStore.usageStats[profileId] = {
|
||||
...freshStore.usageStats[profileId],
|
||||
lastUsed: Date.now(),
|
||||
errorCount: 0,
|
||||
cooldownUntil: undefined,
|
||||
disabledUntil: undefined,
|
||||
disabledReason: undefined,
|
||||
failureCounts: undefined,
|
||||
};
|
||||
return true;
|
||||
},
|
||||
});
|
||||
if (updated) {
|
||||
store.usageStats = updated.usageStats;
|
||||
return;
|
||||
}
|
||||
if (!store.profiles[profileId]) return;
|
||||
|
||||
store.usageStats = store.usageStats ?? {};
|
||||
store.usageStats[profileId] = {
|
||||
...store.usageStats[profileId],
|
||||
lastUsed: Date.now(),
|
||||
errorCount: 0,
|
||||
cooldownUntil: undefined,
|
||||
disabledUntil: undefined,
|
||||
disabledReason: undefined,
|
||||
failureCounts: undefined,
|
||||
};
|
||||
saveAuthProfileStore(store, agentDir);
|
||||
}
|
||||
|
||||
export function calculateAuthProfileCooldownMs(errorCount: number): number {
|
||||
const normalized = Math.max(1, errorCount);
|
||||
return Math.min(
|
||||
60 * 60 * 1000, // 1 hour max
|
||||
60 * 1000 * 5 ** Math.min(normalized - 1, 3),
|
||||
);
|
||||
}
|
||||
|
||||
type ResolvedAuthCooldownConfig = {
|
||||
billingBackoffMs: number;
|
||||
billingMaxMs: number;
|
||||
failureWindowMs: number;
|
||||
};
|
||||
|
||||
function resolveAuthCooldownConfig(params: {
|
||||
cfg?: ClawdbotConfig;
|
||||
providerId: string;
|
||||
}): ResolvedAuthCooldownConfig {
|
||||
const defaults = {
|
||||
billingBackoffHours: 5,
|
||||
billingMaxHours: 24,
|
||||
failureWindowHours: 24,
|
||||
} as const;
|
||||
|
||||
const resolveHours = (value: unknown, fallback: number) =>
|
||||
typeof value === "number" && Number.isFinite(value) && value > 0
|
||||
? value
|
||||
: fallback;
|
||||
|
||||
const cooldowns = params.cfg?.auth?.cooldowns;
|
||||
const billingOverride = (() => {
|
||||
const map = cooldowns?.billingBackoffHoursByProvider;
|
||||
if (!map) return undefined;
|
||||
for (const [key, value] of Object.entries(map)) {
|
||||
if (normalizeProviderId(key) === params.providerId) return value;
|
||||
}
|
||||
return undefined;
|
||||
})();
|
||||
|
||||
const billingBackoffHours = resolveHours(
|
||||
billingOverride ?? cooldowns?.billingBackoffHours,
|
||||
defaults.billingBackoffHours,
|
||||
);
|
||||
const billingMaxHours = resolveHours(
|
||||
cooldowns?.billingMaxHours,
|
||||
defaults.billingMaxHours,
|
||||
);
|
||||
const failureWindowHours = resolveHours(
|
||||
cooldowns?.failureWindowHours,
|
||||
defaults.failureWindowHours,
|
||||
);
|
||||
|
||||
return {
|
||||
billingBackoffMs: billingBackoffHours * 60 * 60 * 1000,
|
||||
billingMaxMs: billingMaxHours * 60 * 60 * 1000,
|
||||
failureWindowMs: failureWindowHours * 60 * 60 * 1000,
|
||||
};
|
||||
}
|
||||
|
||||
function calculateAuthProfileBillingDisableMsWithConfig(params: {
|
||||
errorCount: number;
|
||||
baseMs: number;
|
||||
maxMs: number;
|
||||
}): number {
|
||||
const normalized = Math.max(1, params.errorCount);
|
||||
const baseMs = Math.max(60_000, params.baseMs);
|
||||
const maxMs = Math.max(baseMs, params.maxMs);
|
||||
const exponent = Math.min(normalized - 1, 10);
|
||||
const raw = baseMs * 2 ** exponent;
|
||||
return Math.min(maxMs, raw);
|
||||
}
|
||||
|
||||
export function resolveProfileUnusableUntilForDisplay(
|
||||
store: AuthProfileStore,
|
||||
profileId: string,
|
||||
): number | null {
|
||||
const stats = store.usageStats?.[profileId];
|
||||
if (!stats) return null;
|
||||
return resolveProfileUnusableUntil(stats);
|
||||
}
|
||||
|
||||
function computeNextProfileUsageStats(params: {
|
||||
existing: ProfileUsageStats;
|
||||
now: number;
|
||||
reason: AuthProfileFailureReason;
|
||||
cfgResolved: ResolvedAuthCooldownConfig;
|
||||
}): ProfileUsageStats {
|
||||
const windowMs = params.cfgResolved.failureWindowMs;
|
||||
const windowExpired =
|
||||
typeof params.existing.lastFailureAt === "number" &&
|
||||
params.existing.lastFailureAt > 0 &&
|
||||
params.now - params.existing.lastFailureAt > windowMs;
|
||||
|
||||
const baseErrorCount = windowExpired ? 0 : (params.existing.errorCount ?? 0);
|
||||
const nextErrorCount = baseErrorCount + 1;
|
||||
const failureCounts = windowExpired
|
||||
? {}
|
||||
: { ...params.existing.failureCounts };
|
||||
failureCounts[params.reason] = (failureCounts[params.reason] ?? 0) + 1;
|
||||
|
||||
const updatedStats: ProfileUsageStats = {
|
||||
...params.existing,
|
||||
errorCount: nextErrorCount,
|
||||
failureCounts,
|
||||
lastFailureAt: params.now,
|
||||
};
|
||||
|
||||
if (params.reason === "billing") {
|
||||
const billingCount = failureCounts.billing ?? 1;
|
||||
const backoffMs = calculateAuthProfileBillingDisableMsWithConfig({
|
||||
errorCount: billingCount,
|
||||
baseMs: params.cfgResolved.billingBackoffMs,
|
||||
maxMs: params.cfgResolved.billingMaxMs,
|
||||
});
|
||||
updatedStats.disabledUntil = params.now + backoffMs;
|
||||
updatedStats.disabledReason = "billing";
|
||||
} else {
|
||||
const backoffMs = calculateAuthProfileCooldownMs(nextErrorCount);
|
||||
updatedStats.cooldownUntil = params.now + backoffMs;
|
||||
}
|
||||
|
||||
return updatedStats;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark a profile as failed for a specific reason. Billing failures are treated
|
||||
* as "disabled" (longer backoff) vs the regular cooldown window.
|
||||
*/
|
||||
export async function markAuthProfileFailure(params: {
|
||||
store: AuthProfileStore;
|
||||
profileId: string;
|
||||
reason: AuthProfileFailureReason;
|
||||
cfg?: ClawdbotConfig;
|
||||
agentDir?: string;
|
||||
}): Promise<void> {
|
||||
const { store, profileId, reason, agentDir, cfg } = params;
|
||||
const updated = await updateAuthProfileStoreWithLock({
|
||||
agentDir,
|
||||
updater: (freshStore) => {
|
||||
const profile = freshStore.profiles[profileId];
|
||||
if (!profile) return false;
|
||||
freshStore.usageStats = freshStore.usageStats ?? {};
|
||||
const existing = freshStore.usageStats[profileId] ?? {};
|
||||
|
||||
const now = Date.now();
|
||||
const providerKey = normalizeProviderId(profile.provider);
|
||||
const cfgResolved = resolveAuthCooldownConfig({
|
||||
cfg,
|
||||
providerId: providerKey,
|
||||
});
|
||||
|
||||
freshStore.usageStats[profileId] = computeNextProfileUsageStats({
|
||||
existing,
|
||||
now,
|
||||
reason,
|
||||
cfgResolved,
|
||||
});
|
||||
return true;
|
||||
},
|
||||
});
|
||||
if (updated) {
|
||||
store.usageStats = updated.usageStats;
|
||||
return;
|
||||
}
|
||||
if (!store.profiles[profileId]) return;
|
||||
|
||||
store.usageStats = store.usageStats ?? {};
|
||||
const existing = store.usageStats[profileId] ?? {};
|
||||
const now = Date.now();
|
||||
const providerKey = normalizeProviderId(
|
||||
store.profiles[profileId]?.provider ?? "",
|
||||
);
|
||||
const cfgResolved = resolveAuthCooldownConfig({
|
||||
cfg,
|
||||
providerId: providerKey,
|
||||
});
|
||||
|
||||
store.usageStats[profileId] = computeNextProfileUsageStats({
|
||||
existing,
|
||||
now,
|
||||
reason,
|
||||
cfgResolved,
|
||||
});
|
||||
saveAuthProfileStore(store, agentDir);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark a profile as failed/rate-limited. Applies exponential backoff cooldown.
|
||||
* Cooldown times: 1min, 5min, 25min, max 1 hour.
|
||||
* Uses store lock to avoid overwriting concurrent usage updates.
|
||||
*/
|
||||
export async function markAuthProfileCooldown(params: {
|
||||
store: AuthProfileStore;
|
||||
profileId: string;
|
||||
agentDir?: string;
|
||||
}): Promise<void> {
|
||||
await markAuthProfileFailure({
|
||||
store: params.store,
|
||||
profileId: params.profileId,
|
||||
reason: "unknown",
|
||||
agentDir: params.agentDir,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear cooldown for a profile (e.g., manual reset).
|
||||
* Uses store lock to avoid overwriting concurrent usage updates.
|
||||
*/
|
||||
export async function clearAuthProfileCooldown(params: {
|
||||
store: AuthProfileStore;
|
||||
profileId: string;
|
||||
agentDir?: string;
|
||||
}): Promise<void> {
|
||||
const { store, profileId, agentDir } = params;
|
||||
const updated = await updateAuthProfileStoreWithLock({
|
||||
agentDir,
|
||||
updater: (freshStore) => {
|
||||
if (!freshStore.usageStats?.[profileId]) return false;
|
||||
|
||||
freshStore.usageStats[profileId] = {
|
||||
...freshStore.usageStats[profileId],
|
||||
errorCount: 0,
|
||||
cooldownUntil: undefined,
|
||||
};
|
||||
return true;
|
||||
},
|
||||
});
|
||||
if (updated) {
|
||||
store.usageStats = updated.usageStats;
|
||||
return;
|
||||
}
|
||||
if (!store.usageStats?.[profileId]) return;
|
||||
|
||||
store.usageStats[profileId] = {
|
||||
...store.usageStats[profileId],
|
||||
errorCount: 0,
|
||||
cooldownUntil: undefined,
|
||||
};
|
||||
saveAuthProfileStore(store, agentDir);
|
||||
}
|
||||
177
src/agents/clawdbot-tools.subagents.part-1.test.ts
Normal file
177
src/agents/clawdbot-tools.subagents.part-1.test.ts
Normal file
@@ -0,0 +1,177 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
const callGatewayMock = vi.fn();
|
||||
vi.mock("../gateway/call.js", () => ({
|
||||
callGateway: (opts: unknown) => callGatewayMock(opts),
|
||||
}));
|
||||
|
||||
let configOverride: ReturnType<
|
||||
typeof import("../config/config.js")["loadConfig"]
|
||||
> = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
};
|
||||
|
||||
vi.mock("../config/config.js", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("../config/config.js")>();
|
||||
return {
|
||||
...actual,
|
||||
loadConfig: () => configOverride,
|
||||
resolveGatewayPort: () => 18789,
|
||||
};
|
||||
});
|
||||
|
||||
import { emitAgentEvent } from "../infra/agent-events.js";
|
||||
import { createClawdbotTools } from "./clawdbot-tools.js";
|
||||
import { resetSubagentRegistryForTests } from "./subagent-registry.js";
|
||||
|
||||
describe("clawdbot-tools: subagents", () => {
|
||||
beforeEach(() => {
|
||||
configOverride = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
it("sessions_spawn announces back to the requester group channel", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
const calls: Array<{ method?: string; params?: unknown }> = [];
|
||||
let agentCallCount = 0;
|
||||
let sendParams: { to?: string; channel?: string; message?: string } = {};
|
||||
let deletedKey: string | undefined;
|
||||
let childRunId: string | undefined;
|
||||
let childSessionKey: string | undefined;
|
||||
const waitCalls: Array<{ runId?: string; timeoutMs?: number }> = [];
|
||||
const sessionLastAssistantText = new Map<string, string>();
|
||||
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
calls.push(request);
|
||||
if (request.method === "agent") {
|
||||
agentCallCount += 1;
|
||||
const runId = `run-${agentCallCount}`;
|
||||
const params = request.params as {
|
||||
message?: string;
|
||||
sessionKey?: string;
|
||||
channel?: string;
|
||||
timeout?: number;
|
||||
};
|
||||
const message = params?.message ?? "";
|
||||
const sessionKey = params?.sessionKey ?? "";
|
||||
if (message === "Sub-agent announce step.") {
|
||||
sessionLastAssistantText.set(sessionKey, "announce now");
|
||||
} else {
|
||||
childRunId = runId;
|
||||
childSessionKey = sessionKey;
|
||||
sessionLastAssistantText.set(sessionKey, "result");
|
||||
expect(params?.channel).toBe("discord");
|
||||
expect(params?.timeout).toBe(1);
|
||||
}
|
||||
return {
|
||||
runId,
|
||||
status: "accepted",
|
||||
acceptedAt: 1000 + agentCallCount,
|
||||
};
|
||||
}
|
||||
if (request.method === "agent.wait") {
|
||||
const params = request.params as
|
||||
| { runId?: string; timeoutMs?: number }
|
||||
| undefined;
|
||||
waitCalls.push(params ?? {});
|
||||
const status = params?.runId === childRunId ? "timeout" : "ok";
|
||||
return { runId: params?.runId ?? "run-1", status };
|
||||
}
|
||||
if (request.method === "chat.history") {
|
||||
const params = request.params as { sessionKey?: string } | undefined;
|
||||
const text =
|
||||
sessionLastAssistantText.get(params?.sessionKey ?? "") ?? "";
|
||||
return {
|
||||
messages: [{ role: "assistant", content: [{ type: "text", text }] }],
|
||||
};
|
||||
}
|
||||
if (request.method === "send") {
|
||||
const params = request.params as
|
||||
| { to?: string; channel?: string; message?: string }
|
||||
| undefined;
|
||||
sendParams = {
|
||||
to: params?.to,
|
||||
channel: params?.channel,
|
||||
message: params?.message,
|
||||
};
|
||||
return { messageId: "m-announce" };
|
||||
}
|
||||
if (request.method === "sessions.delete") {
|
||||
const params = request.params as { key?: string } | undefined;
|
||||
deletedKey = params?.key;
|
||||
return { ok: true };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "discord:group:req",
|
||||
agentChannel: "discord",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call1", {
|
||||
task: "do thing",
|
||||
runTimeoutSeconds: 1,
|
||||
cleanup: "delete",
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
runId: "run-1",
|
||||
});
|
||||
|
||||
if (!childRunId) throw new Error("missing child runId");
|
||||
emitAgentEvent({
|
||||
runId: childRunId,
|
||||
stream: "lifecycle",
|
||||
data: {
|
||||
phase: "end",
|
||||
startedAt: 1234,
|
||||
endedAt: 2345,
|
||||
},
|
||||
});
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
|
||||
const childWait = waitCalls.find((call) => call.runId === childRunId);
|
||||
expect(childWait?.timeoutMs).toBe(1000);
|
||||
const agentCalls = calls.filter((call) => call.method === "agent");
|
||||
expect(agentCalls).toHaveLength(2);
|
||||
const first = agentCalls[0]?.params as
|
||||
| {
|
||||
lane?: string;
|
||||
deliver?: boolean;
|
||||
sessionKey?: string;
|
||||
channel?: string;
|
||||
}
|
||||
| undefined;
|
||||
expect(first?.lane).toBe("subagent");
|
||||
expect(first?.deliver).toBe(false);
|
||||
expect(first?.channel).toBe("discord");
|
||||
expect(first?.sessionKey?.startsWith("agent:main:subagent:")).toBe(true);
|
||||
expect(childSessionKey?.startsWith("agent:main:subagent:")).toBe(true);
|
||||
const second = agentCalls[1]?.params as
|
||||
| { channel?: string; deliver?: boolean; lane?: string }
|
||||
| undefined;
|
||||
expect(second?.lane).toBe("nested");
|
||||
expect(second?.deliver).toBe(false);
|
||||
expect(second?.channel).toBe("webchat");
|
||||
|
||||
expect(sendParams.channel).toBe("discord");
|
||||
expect(sendParams.to).toBe("channel:req");
|
||||
expect(sendParams.message ?? "").toContain("announce now");
|
||||
expect(sendParams.message ?? "").toContain("Stats:");
|
||||
expect(deletedKey?.startsWith("agent:main:subagent:")).toBe(true);
|
||||
});
|
||||
});
|
||||
158
src/agents/clawdbot-tools.subagents.part-2.test.ts
Normal file
158
src/agents/clawdbot-tools.subagents.part-2.test.ts
Normal file
@@ -0,0 +1,158 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
const callGatewayMock = vi.fn();
|
||||
vi.mock("../gateway/call.js", () => ({
|
||||
callGateway: (opts: unknown) => callGatewayMock(opts),
|
||||
}));
|
||||
|
||||
let configOverride: ReturnType<
|
||||
typeof import("../config/config.js")["loadConfig"]
|
||||
> = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
};
|
||||
|
||||
vi.mock("../config/config.js", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("../config/config.js")>();
|
||||
return {
|
||||
...actual,
|
||||
loadConfig: () => configOverride,
|
||||
resolveGatewayPort: () => 18789,
|
||||
};
|
||||
});
|
||||
|
||||
import { createClawdbotTools } from "./clawdbot-tools.js";
|
||||
import { resetSubagentRegistryForTests } from "./subagent-registry.js";
|
||||
|
||||
describe("clawdbot-tools: subagents", () => {
|
||||
beforeEach(() => {
|
||||
configOverride = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
it("sessions_spawn announces via agent.wait when lifecycle events are missing", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
const calls: Array<{ method?: string; params?: unknown }> = [];
|
||||
let agentCallCount = 0;
|
||||
let sendParams: { to?: string; channel?: string; message?: string } = {};
|
||||
let deletedKey: string | undefined;
|
||||
let childRunId: string | undefined;
|
||||
let childSessionKey: string | undefined;
|
||||
const waitCalls: Array<{ runId?: string; timeoutMs?: number }> = [];
|
||||
const sessionLastAssistantText = new Map<string, string>();
|
||||
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
calls.push(request);
|
||||
if (request.method === "agent") {
|
||||
agentCallCount += 1;
|
||||
const runId = `run-${agentCallCount}`;
|
||||
const params = request.params as {
|
||||
message?: string;
|
||||
sessionKey?: string;
|
||||
channel?: string;
|
||||
timeout?: number;
|
||||
};
|
||||
const message = params?.message ?? "";
|
||||
const sessionKey = params?.sessionKey ?? "";
|
||||
if (message === "Sub-agent announce step.") {
|
||||
sessionLastAssistantText.set(sessionKey, "announce now");
|
||||
} else {
|
||||
childRunId = runId;
|
||||
childSessionKey = sessionKey;
|
||||
sessionLastAssistantText.set(sessionKey, "result");
|
||||
expect(params?.channel).toBe("discord");
|
||||
expect(params?.timeout).toBe(1);
|
||||
}
|
||||
return {
|
||||
runId,
|
||||
status: "accepted",
|
||||
acceptedAt: 2000 + agentCallCount,
|
||||
};
|
||||
}
|
||||
if (request.method === "agent.wait") {
|
||||
const params = request.params as
|
||||
| { runId?: string; timeoutMs?: number }
|
||||
| undefined;
|
||||
waitCalls.push(params ?? {});
|
||||
return {
|
||||
runId: params?.runId ?? "run-1",
|
||||
status: "ok",
|
||||
startedAt: 3000,
|
||||
endedAt: 4000,
|
||||
};
|
||||
}
|
||||
if (request.method === "chat.history") {
|
||||
const params = request.params as { sessionKey?: string } | undefined;
|
||||
const text =
|
||||
sessionLastAssistantText.get(params?.sessionKey ?? "") ?? "";
|
||||
return {
|
||||
messages: [{ role: "assistant", content: [{ type: "text", text }] }],
|
||||
};
|
||||
}
|
||||
if (request.method === "send") {
|
||||
const params = request.params as
|
||||
| { to?: string; channel?: string; message?: string }
|
||||
| undefined;
|
||||
sendParams = {
|
||||
to: params?.to,
|
||||
channel: params?.channel,
|
||||
message: params?.message,
|
||||
};
|
||||
return { messageId: "m-announce" };
|
||||
}
|
||||
if (request.method === "sessions.delete") {
|
||||
const params = request.params as { key?: string } | undefined;
|
||||
deletedKey = params?.key;
|
||||
return { ok: true };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "discord:group:req",
|
||||
agentChannel: "discord",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call1b", {
|
||||
task: "do thing",
|
||||
runTimeoutSeconds: 1,
|
||||
cleanup: "delete",
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
runId: "run-1",
|
||||
});
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
|
||||
const childWait = waitCalls.find((call) => call.runId === childRunId);
|
||||
expect(childWait?.timeoutMs).toBe(1000);
|
||||
expect(childSessionKey?.startsWith("agent:main:subagent:")).toBe(true);
|
||||
|
||||
const agentCalls = calls.filter((call) => call.method === "agent");
|
||||
expect(agentCalls).toHaveLength(2);
|
||||
const second = agentCalls[1]?.params as
|
||||
| { channel?: string; deliver?: boolean; lane?: string }
|
||||
| undefined;
|
||||
expect(second?.lane).toBe("nested");
|
||||
expect(second?.deliver).toBe(false);
|
||||
expect(second?.channel).toBe("webchat");
|
||||
|
||||
expect(sendParams.channel).toBe("discord");
|
||||
expect(sendParams.to).toBe("channel:req");
|
||||
expect(sendParams.message ?? "").toContain("announce now");
|
||||
expect(sendParams.message ?? "").toContain("Stats:");
|
||||
expect(deletedKey?.startsWith("agent:main:subagent:")).toBe(true);
|
||||
});
|
||||
});
|
||||
177
src/agents/clawdbot-tools.subagents.part-3.test.ts
Normal file
177
src/agents/clawdbot-tools.subagents.part-3.test.ts
Normal file
@@ -0,0 +1,177 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
const callGatewayMock = vi.fn();
|
||||
vi.mock("../gateway/call.js", () => ({
|
||||
callGateway: (opts: unknown) => callGatewayMock(opts),
|
||||
}));
|
||||
|
||||
let configOverride: ReturnType<
|
||||
typeof import("../config/config.js")["loadConfig"]
|
||||
> = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
};
|
||||
|
||||
vi.mock("../config/config.js", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("../config/config.js")>();
|
||||
return {
|
||||
...actual,
|
||||
loadConfig: () => configOverride,
|
||||
resolveGatewayPort: () => 18789,
|
||||
};
|
||||
});
|
||||
|
||||
import { emitAgentEvent } from "../infra/agent-events.js";
|
||||
import { createClawdbotTools } from "./clawdbot-tools.js";
|
||||
import { resetSubagentRegistryForTests } from "./subagent-registry.js";
|
||||
|
||||
describe("clawdbot-tools: subagents", () => {
|
||||
beforeEach(() => {
|
||||
configOverride = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
it("sessions_spawn resolves main announce target from sessions.list", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
const calls: Array<{ method?: string; params?: unknown }> = [];
|
||||
let agentCallCount = 0;
|
||||
let sendParams: { to?: string; channel?: string; message?: string } = {};
|
||||
let childRunId: string | undefined;
|
||||
let childSessionKey: string | undefined;
|
||||
const waitCalls: Array<{ runId?: string; timeoutMs?: number }> = [];
|
||||
const sessionLastAssistantText = new Map<string, string>();
|
||||
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
calls.push(request);
|
||||
if (request.method === "sessions.list") {
|
||||
return {
|
||||
sessions: [
|
||||
{
|
||||
key: "main",
|
||||
lastChannel: "whatsapp",
|
||||
lastTo: "+123",
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
if (request.method === "agent") {
|
||||
agentCallCount += 1;
|
||||
const runId = `run-${agentCallCount}`;
|
||||
const params = request.params as {
|
||||
message?: string;
|
||||
sessionKey?: string;
|
||||
};
|
||||
const message = params?.message ?? "";
|
||||
const sessionKey = params?.sessionKey ?? "";
|
||||
if (message === "Sub-agent announce step.") {
|
||||
sessionLastAssistantText.set(sessionKey, "hello from sub");
|
||||
} else {
|
||||
childRunId = runId;
|
||||
childSessionKey = sessionKey;
|
||||
sessionLastAssistantText.set(sessionKey, "done");
|
||||
}
|
||||
return {
|
||||
runId,
|
||||
status: "accepted",
|
||||
acceptedAt: 2000 + agentCallCount,
|
||||
};
|
||||
}
|
||||
if (request.method === "agent.wait") {
|
||||
const params = request.params as
|
||||
| { runId?: string; timeoutMs?: number }
|
||||
| undefined;
|
||||
waitCalls.push(params ?? {});
|
||||
const status = params?.runId === childRunId ? "timeout" : "ok";
|
||||
return { runId: params?.runId ?? "run-1", status };
|
||||
}
|
||||
if (request.method === "chat.history") {
|
||||
const params = request.params as { sessionKey?: string } | undefined;
|
||||
const text =
|
||||
sessionLastAssistantText.get(params?.sessionKey ?? "") ?? "";
|
||||
return {
|
||||
messages: [{ role: "assistant", content: [{ type: "text", text }] }],
|
||||
};
|
||||
}
|
||||
if (request.method === "send") {
|
||||
const params = request.params as
|
||||
| { to?: string; channel?: string; message?: string }
|
||||
| undefined;
|
||||
sendParams = {
|
||||
to: params?.to,
|
||||
channel: params?.channel,
|
||||
message: params?.message,
|
||||
};
|
||||
return { messageId: "m1" };
|
||||
}
|
||||
if (request.method === "sessions.delete") {
|
||||
return { ok: true };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "main",
|
||||
agentChannel: "whatsapp",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call2", {
|
||||
task: "do thing",
|
||||
runTimeoutSeconds: 1,
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
runId: "run-1",
|
||||
});
|
||||
|
||||
if (!childRunId) throw new Error("missing child runId");
|
||||
emitAgentEvent({
|
||||
runId: childRunId,
|
||||
stream: "lifecycle",
|
||||
data: {
|
||||
phase: "end",
|
||||
startedAt: 1000,
|
||||
endedAt: 2000,
|
||||
},
|
||||
});
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
|
||||
const childWait = waitCalls.find((call) => call.runId === childRunId);
|
||||
expect(childWait?.timeoutMs).toBe(1000);
|
||||
expect(sendParams.channel).toBe("whatsapp");
|
||||
expect(sendParams.to).toBe("+123");
|
||||
expect(sendParams.message ?? "").toContain("hello from sub");
|
||||
expect(sendParams.message ?? "").toContain("Stats:");
|
||||
expect(childSessionKey?.startsWith("agent:main:subagent:")).toBe(true);
|
||||
});
|
||||
it("sessions_spawn only allows same-agent by default", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "main",
|
||||
agentChannel: "whatsapp",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call6", {
|
||||
task: "do thing",
|
||||
agentId: "beta",
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "forbidden",
|
||||
});
|
||||
expect(callGatewayMock).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
141
src/agents/clawdbot-tools.subagents.part-4.test.ts
Normal file
141
src/agents/clawdbot-tools.subagents.part-4.test.ts
Normal file
@@ -0,0 +1,141 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
const callGatewayMock = vi.fn();
|
||||
vi.mock("../gateway/call.js", () => ({
|
||||
callGateway: (opts: unknown) => callGatewayMock(opts),
|
||||
}));
|
||||
|
||||
let configOverride: ReturnType<
|
||||
typeof import("../config/config.js")["loadConfig"]
|
||||
> = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
};
|
||||
|
||||
vi.mock("../config/config.js", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("../config/config.js")>();
|
||||
return {
|
||||
...actual,
|
||||
loadConfig: () => configOverride,
|
||||
resolveGatewayPort: () => 18789,
|
||||
};
|
||||
});
|
||||
|
||||
import { createClawdbotTools } from "./clawdbot-tools.js";
|
||||
import { resetSubagentRegistryForTests } from "./subagent-registry.js";
|
||||
|
||||
describe("clawdbot-tools: subagents", () => {
|
||||
beforeEach(() => {
|
||||
configOverride = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
it("sessions_spawn allows cross-agent spawning when configured", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
configOverride = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
agents: {
|
||||
list: [
|
||||
{
|
||||
id: "main",
|
||||
subagents: {
|
||||
allowAgents: ["beta"],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
let childSessionKey: string | undefined;
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
if (request.method === "agent") {
|
||||
const params = request.params as { sessionKey?: string } | undefined;
|
||||
childSessionKey = params?.sessionKey;
|
||||
return { runId: "run-1", status: "accepted", acceptedAt: 5000 };
|
||||
}
|
||||
if (request.method === "agent.wait") {
|
||||
return { status: "timeout" };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "main",
|
||||
agentChannel: "whatsapp",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call7", {
|
||||
task: "do thing",
|
||||
agentId: "beta",
|
||||
});
|
||||
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
runId: "run-1",
|
||||
});
|
||||
expect(childSessionKey?.startsWith("agent:beta:subagent:")).toBe(true);
|
||||
});
|
||||
it("sessions_spawn allows any agent when allowlist is *", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
configOverride = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
agents: {
|
||||
list: [
|
||||
{
|
||||
id: "main",
|
||||
subagents: {
|
||||
allowAgents: ["*"],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
let childSessionKey: string | undefined;
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
if (request.method === "agent") {
|
||||
const params = request.params as { sessionKey?: string } | undefined;
|
||||
childSessionKey = params?.sessionKey;
|
||||
return { runId: "run-1", status: "accepted", acceptedAt: 5100 };
|
||||
}
|
||||
if (request.method === "agent.wait") {
|
||||
return { status: "timeout" };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "main",
|
||||
agentChannel: "whatsapp",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call8", {
|
||||
task: "do thing",
|
||||
agentId: "beta",
|
||||
});
|
||||
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
runId: "run-1",
|
||||
});
|
||||
expect(childSessionKey?.startsWith("agent:beta:subagent:")).toBe(true);
|
||||
});
|
||||
});
|
||||
125
src/agents/clawdbot-tools.subagents.part-5.test.ts
Normal file
125
src/agents/clawdbot-tools.subagents.part-5.test.ts
Normal file
@@ -0,0 +1,125 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
const callGatewayMock = vi.fn();
|
||||
vi.mock("../gateway/call.js", () => ({
|
||||
callGateway: (opts: unknown) => callGatewayMock(opts),
|
||||
}));
|
||||
|
||||
let configOverride: ReturnType<
|
||||
typeof import("../config/config.js")["loadConfig"]
|
||||
> = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
};
|
||||
|
||||
vi.mock("../config/config.js", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("../config/config.js")>();
|
||||
return {
|
||||
...actual,
|
||||
loadConfig: () => configOverride,
|
||||
resolveGatewayPort: () => 18789,
|
||||
};
|
||||
});
|
||||
|
||||
import { createClawdbotTools } from "./clawdbot-tools.js";
|
||||
import { resetSubagentRegistryForTests } from "./subagent-registry.js";
|
||||
|
||||
describe("clawdbot-tools: subagents", () => {
|
||||
beforeEach(() => {
|
||||
configOverride = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
it("sessions_spawn normalizes allowlisted agent ids", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
configOverride = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
agents: {
|
||||
list: [
|
||||
{
|
||||
id: "main",
|
||||
subagents: {
|
||||
allowAgents: ["Research"],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
let childSessionKey: string | undefined;
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
if (request.method === "agent") {
|
||||
const params = request.params as { sessionKey?: string } | undefined;
|
||||
childSessionKey = params?.sessionKey;
|
||||
return { runId: "run-1", status: "accepted", acceptedAt: 5200 };
|
||||
}
|
||||
if (request.method === "agent.wait") {
|
||||
return { status: "timeout" };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "main",
|
||||
agentChannel: "whatsapp",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call10", {
|
||||
task: "do thing",
|
||||
agentId: "research",
|
||||
});
|
||||
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
runId: "run-1",
|
||||
});
|
||||
expect(childSessionKey?.startsWith("agent:research:subagent:")).toBe(true);
|
||||
});
|
||||
it("sessions_spawn forbids cross-agent spawning when not allowed", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
configOverride = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
agents: {
|
||||
list: [
|
||||
{
|
||||
id: "main",
|
||||
subagents: {
|
||||
allowAgents: ["alpha"],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "main",
|
||||
agentChannel: "whatsapp",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call9", {
|
||||
task: "do thing",
|
||||
agentId: "beta",
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "forbidden",
|
||||
});
|
||||
expect(callGatewayMock).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
139
src/agents/clawdbot-tools.subagents.part-6.test.ts
Normal file
139
src/agents/clawdbot-tools.subagents.part-6.test.ts
Normal file
@@ -0,0 +1,139 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
const callGatewayMock = vi.fn();
|
||||
vi.mock("../gateway/call.js", () => ({
|
||||
callGateway: (opts: unknown) => callGatewayMock(opts),
|
||||
}));
|
||||
|
||||
let configOverride: ReturnType<
|
||||
typeof import("../config/config.js")["loadConfig"]
|
||||
> = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
};
|
||||
|
||||
vi.mock("../config/config.js", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("../config/config.js")>();
|
||||
return {
|
||||
...actual,
|
||||
loadConfig: () => configOverride,
|
||||
resolveGatewayPort: () => 18789,
|
||||
};
|
||||
});
|
||||
|
||||
import { createClawdbotTools } from "./clawdbot-tools.js";
|
||||
import { resetSubagentRegistryForTests } from "./subagent-registry.js";
|
||||
|
||||
describe("clawdbot-tools: subagents", () => {
|
||||
beforeEach(() => {
|
||||
configOverride = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
it("sessions_spawn applies a model to the child session", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
const calls: Array<{ method?: string; params?: unknown }> = [];
|
||||
let agentCallCount = 0;
|
||||
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
calls.push(request);
|
||||
if (request.method === "sessions.patch") {
|
||||
return { ok: true };
|
||||
}
|
||||
if (request.method === "agent") {
|
||||
agentCallCount += 1;
|
||||
const runId = `run-${agentCallCount}`;
|
||||
return {
|
||||
runId,
|
||||
status: "accepted",
|
||||
acceptedAt: 3000 + agentCallCount,
|
||||
};
|
||||
}
|
||||
if (request.method === "agent.wait") {
|
||||
return { status: "timeout" };
|
||||
}
|
||||
if (request.method === "sessions.delete") {
|
||||
return { ok: true };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "discord:group:req",
|
||||
agentSurface: "discord",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call3", {
|
||||
task: "do thing",
|
||||
runTimeoutSeconds: 1,
|
||||
model: "claude-haiku-4-5",
|
||||
cleanup: "keep",
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
modelApplied: true,
|
||||
});
|
||||
|
||||
const patchIndex = calls.findIndex(
|
||||
(call) => call.method === "sessions.patch",
|
||||
);
|
||||
const agentIndex = calls.findIndex((call) => call.method === "agent");
|
||||
expect(patchIndex).toBeGreaterThan(-1);
|
||||
expect(agentIndex).toBeGreaterThan(-1);
|
||||
expect(patchIndex).toBeLessThan(agentIndex);
|
||||
const patchCall = calls[patchIndex];
|
||||
expect(patchCall?.params).toMatchObject({
|
||||
key: expect.stringContaining("subagent:"),
|
||||
model: "claude-haiku-4-5",
|
||||
});
|
||||
});
|
||||
it("sessions_spawn applies default subagent model from defaults config", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
configOverride = {
|
||||
session: { mainKey: "main", scope: "per-sender" },
|
||||
agents: { defaults: { subagents: { model: "minimax/MiniMax-M2.1" } } },
|
||||
};
|
||||
const calls: Array<{ method?: string; params?: unknown }> = [];
|
||||
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
calls.push(request);
|
||||
if (request.method === "sessions.patch") {
|
||||
return { ok: true };
|
||||
}
|
||||
if (request.method === "agent") {
|
||||
return { runId: "run-default-model", status: "accepted" };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "agent:main:main",
|
||||
agentChannel: "discord",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call-default-model", {
|
||||
task: "do thing",
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
modelApplied: true,
|
||||
});
|
||||
|
||||
const patchCall = calls.find((call) => call.method === "sessions.patch");
|
||||
expect(patchCall?.params).toMatchObject({
|
||||
model: "minimax/MiniMax-M2.1",
|
||||
});
|
||||
});
|
||||
});
|
||||
163
src/agents/clawdbot-tools.subagents.part-7.test.ts
Normal file
163
src/agents/clawdbot-tools.subagents.part-7.test.ts
Normal file
@@ -0,0 +1,163 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
const callGatewayMock = vi.fn();
|
||||
vi.mock("../gateway/call.js", () => ({
|
||||
callGateway: (opts: unknown) => callGatewayMock(opts),
|
||||
}));
|
||||
|
||||
let configOverride: ReturnType<
|
||||
typeof import("../config/config.js")["loadConfig"]
|
||||
> = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
};
|
||||
|
||||
vi.mock("../config/config.js", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("../config/config.js")>();
|
||||
return {
|
||||
...actual,
|
||||
loadConfig: () => configOverride,
|
||||
resolveGatewayPort: () => 18789,
|
||||
};
|
||||
});
|
||||
|
||||
import { createClawdbotTools } from "./clawdbot-tools.js";
|
||||
import { resetSubagentRegistryForTests } from "./subagent-registry.js";
|
||||
|
||||
describe("clawdbot-tools: subagents", () => {
|
||||
beforeEach(() => {
|
||||
configOverride = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
it("sessions_spawn prefers per-agent subagent model over defaults", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
configOverride = {
|
||||
session: { mainKey: "main", scope: "per-sender" },
|
||||
agents: {
|
||||
defaults: { subagents: { model: "minimax/MiniMax-M2.1" } },
|
||||
list: [{ id: "research", subagents: { model: "opencode/claude" } }],
|
||||
},
|
||||
};
|
||||
const calls: Array<{ method?: string; params?: unknown }> = [];
|
||||
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
calls.push(request);
|
||||
if (request.method === "sessions.patch") {
|
||||
return { ok: true };
|
||||
}
|
||||
if (request.method === "agent") {
|
||||
return { runId: "run-agent-model", status: "accepted" };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "agent:research:main",
|
||||
agentChannel: "discord",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call-agent-model", {
|
||||
task: "do thing",
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
modelApplied: true,
|
||||
});
|
||||
|
||||
const patchCall = calls.find((call) => call.method === "sessions.patch");
|
||||
expect(patchCall?.params).toMatchObject({
|
||||
model: "opencode/claude",
|
||||
});
|
||||
});
|
||||
it("sessions_spawn skips invalid model overrides and continues", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
const calls: Array<{ method?: string; params?: unknown }> = [];
|
||||
let agentCallCount = 0;
|
||||
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
calls.push(request);
|
||||
if (request.method === "sessions.patch") {
|
||||
throw new Error("invalid model: bad-model");
|
||||
}
|
||||
if (request.method === "agent") {
|
||||
agentCallCount += 1;
|
||||
const runId = `run-${agentCallCount}`;
|
||||
return {
|
||||
runId,
|
||||
status: "accepted",
|
||||
acceptedAt: 4000 + agentCallCount,
|
||||
};
|
||||
}
|
||||
if (request.method === "agent.wait") {
|
||||
return { status: "timeout" };
|
||||
}
|
||||
if (request.method === "sessions.delete") {
|
||||
return { ok: true };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "main",
|
||||
agentChannel: "whatsapp",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call4", {
|
||||
task: "do thing",
|
||||
runTimeoutSeconds: 1,
|
||||
model: "bad-model",
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
modelApplied: false,
|
||||
});
|
||||
expect(
|
||||
String((result.details as { warning?: string }).warning ?? ""),
|
||||
).toContain("invalid model");
|
||||
expect(calls.some((call) => call.method === "agent")).toBe(true);
|
||||
});
|
||||
it("sessions_spawn supports legacy timeoutSeconds alias", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
let spawnedTimeout: number | undefined;
|
||||
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
if (request.method === "agent") {
|
||||
const params = request.params as { timeout?: number } | undefined;
|
||||
spawnedTimeout = params?.timeout;
|
||||
return { runId: "run-1", status: "accepted", acceptedAt: 1000 };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "main",
|
||||
agentChannel: "whatsapp",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call5", {
|
||||
task: "do thing",
|
||||
timeoutSeconds: 2,
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
runId: "run-1",
|
||||
});
|
||||
expect(spawnedTimeout).toBe(2);
|
||||
});
|
||||
});
|
||||
@@ -1,857 +0,0 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
const callGatewayMock = vi.fn();
|
||||
vi.mock("../gateway/call.js", () => ({
|
||||
callGateway: (opts: unknown) => callGatewayMock(opts),
|
||||
}));
|
||||
|
||||
let configOverride: ReturnType<
|
||||
typeof import("../config/config.js")["loadConfig"]
|
||||
> = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
};
|
||||
|
||||
vi.mock("../config/config.js", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("../config/config.js")>();
|
||||
return {
|
||||
...actual,
|
||||
loadConfig: () => configOverride,
|
||||
resolveGatewayPort: () => 18789,
|
||||
};
|
||||
});
|
||||
|
||||
import { emitAgentEvent } from "../infra/agent-events.js";
|
||||
import { createClawdbotTools } from "./clawdbot-tools.js";
|
||||
import { resetSubagentRegistryForTests } from "./subagent-registry.js";
|
||||
|
||||
describe("subagents", () => {
|
||||
beforeEach(() => {
|
||||
configOverride = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
it("sessions_spawn announces back to the requester group channel", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
const calls: Array<{ method?: string; params?: unknown }> = [];
|
||||
let agentCallCount = 0;
|
||||
let sendParams: { to?: string; channel?: string; message?: string } = {};
|
||||
let deletedKey: string | undefined;
|
||||
let childRunId: string | undefined;
|
||||
let childSessionKey: string | undefined;
|
||||
const waitCalls: Array<{ runId?: string; timeoutMs?: number }> = [];
|
||||
const sessionLastAssistantText = new Map<string, string>();
|
||||
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
calls.push(request);
|
||||
if (request.method === "agent") {
|
||||
agentCallCount += 1;
|
||||
const runId = `run-${agentCallCount}`;
|
||||
const params = request.params as {
|
||||
message?: string;
|
||||
sessionKey?: string;
|
||||
channel?: string;
|
||||
timeout?: number;
|
||||
};
|
||||
const message = params?.message ?? "";
|
||||
const sessionKey = params?.sessionKey ?? "";
|
||||
if (message === "Sub-agent announce step.") {
|
||||
sessionLastAssistantText.set(sessionKey, "announce now");
|
||||
} else {
|
||||
childRunId = runId;
|
||||
childSessionKey = sessionKey;
|
||||
sessionLastAssistantText.set(sessionKey, "result");
|
||||
expect(params?.channel).toBe("discord");
|
||||
expect(params?.timeout).toBe(1);
|
||||
}
|
||||
return {
|
||||
runId,
|
||||
status: "accepted",
|
||||
acceptedAt: 1000 + agentCallCount,
|
||||
};
|
||||
}
|
||||
if (request.method === "agent.wait") {
|
||||
const params = request.params as
|
||||
| { runId?: string; timeoutMs?: number }
|
||||
| undefined;
|
||||
waitCalls.push(params ?? {});
|
||||
const status = params?.runId === childRunId ? "timeout" : "ok";
|
||||
return { runId: params?.runId ?? "run-1", status };
|
||||
}
|
||||
if (request.method === "chat.history") {
|
||||
const params = request.params as { sessionKey?: string } | undefined;
|
||||
const text =
|
||||
sessionLastAssistantText.get(params?.sessionKey ?? "") ?? "";
|
||||
return {
|
||||
messages: [{ role: "assistant", content: [{ type: "text", text }] }],
|
||||
};
|
||||
}
|
||||
if (request.method === "send") {
|
||||
const params = request.params as
|
||||
| { to?: string; channel?: string; message?: string }
|
||||
| undefined;
|
||||
sendParams = {
|
||||
to: params?.to,
|
||||
channel: params?.channel,
|
||||
message: params?.message,
|
||||
};
|
||||
return { messageId: "m-announce" };
|
||||
}
|
||||
if (request.method === "sessions.delete") {
|
||||
const params = request.params as { key?: string } | undefined;
|
||||
deletedKey = params?.key;
|
||||
return { ok: true };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "discord:group:req",
|
||||
agentChannel: "discord",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call1", {
|
||||
task: "do thing",
|
||||
runTimeoutSeconds: 1,
|
||||
cleanup: "delete",
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
runId: "run-1",
|
||||
});
|
||||
|
||||
if (!childRunId) throw new Error("missing child runId");
|
||||
emitAgentEvent({
|
||||
runId: childRunId,
|
||||
stream: "lifecycle",
|
||||
data: {
|
||||
phase: "end",
|
||||
startedAt: 1234,
|
||||
endedAt: 2345,
|
||||
},
|
||||
});
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
|
||||
const childWait = waitCalls.find((call) => call.runId === childRunId);
|
||||
expect(childWait?.timeoutMs).toBe(1000);
|
||||
const agentCalls = calls.filter((call) => call.method === "agent");
|
||||
expect(agentCalls).toHaveLength(2);
|
||||
const first = agentCalls[0]?.params as
|
||||
| {
|
||||
lane?: string;
|
||||
deliver?: boolean;
|
||||
sessionKey?: string;
|
||||
channel?: string;
|
||||
}
|
||||
| undefined;
|
||||
expect(first?.lane).toBe("subagent");
|
||||
expect(first?.deliver).toBe(false);
|
||||
expect(first?.channel).toBe("discord");
|
||||
expect(first?.sessionKey?.startsWith("agent:main:subagent:")).toBe(true);
|
||||
expect(childSessionKey?.startsWith("agent:main:subagent:")).toBe(true);
|
||||
const second = agentCalls[1]?.params as
|
||||
| { channel?: string; deliver?: boolean; lane?: string }
|
||||
| undefined;
|
||||
expect(second?.lane).toBe("nested");
|
||||
expect(second?.deliver).toBe(false);
|
||||
expect(second?.channel).toBe("webchat");
|
||||
|
||||
expect(sendParams.channel).toBe("discord");
|
||||
expect(sendParams.to).toBe("channel:req");
|
||||
expect(sendParams.message ?? "").toContain("announce now");
|
||||
expect(sendParams.message ?? "").toContain("Stats:");
|
||||
expect(deletedKey?.startsWith("agent:main:subagent:")).toBe(true);
|
||||
});
|
||||
|
||||
it("sessions_spawn announces via agent.wait when lifecycle events are missing", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
const calls: Array<{ method?: string; params?: unknown }> = [];
|
||||
let agentCallCount = 0;
|
||||
let sendParams: { to?: string; channel?: string; message?: string } = {};
|
||||
let deletedKey: string | undefined;
|
||||
let childRunId: string | undefined;
|
||||
let childSessionKey: string | undefined;
|
||||
const waitCalls: Array<{ runId?: string; timeoutMs?: number }> = [];
|
||||
const sessionLastAssistantText = new Map<string, string>();
|
||||
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
calls.push(request);
|
||||
if (request.method === "agent") {
|
||||
agentCallCount += 1;
|
||||
const runId = `run-${agentCallCount}`;
|
||||
const params = request.params as {
|
||||
message?: string;
|
||||
sessionKey?: string;
|
||||
channel?: string;
|
||||
timeout?: number;
|
||||
};
|
||||
const message = params?.message ?? "";
|
||||
const sessionKey = params?.sessionKey ?? "";
|
||||
if (message === "Sub-agent announce step.") {
|
||||
sessionLastAssistantText.set(sessionKey, "announce now");
|
||||
} else {
|
||||
childRunId = runId;
|
||||
childSessionKey = sessionKey;
|
||||
sessionLastAssistantText.set(sessionKey, "result");
|
||||
expect(params?.channel).toBe("discord");
|
||||
expect(params?.timeout).toBe(1);
|
||||
}
|
||||
return {
|
||||
runId,
|
||||
status: "accepted",
|
||||
acceptedAt: 2000 + agentCallCount,
|
||||
};
|
||||
}
|
||||
if (request.method === "agent.wait") {
|
||||
const params = request.params as
|
||||
| { runId?: string; timeoutMs?: number }
|
||||
| undefined;
|
||||
waitCalls.push(params ?? {});
|
||||
return {
|
||||
runId: params?.runId ?? "run-1",
|
||||
status: "ok",
|
||||
startedAt: 3000,
|
||||
endedAt: 4000,
|
||||
};
|
||||
}
|
||||
if (request.method === "chat.history") {
|
||||
const params = request.params as { sessionKey?: string } | undefined;
|
||||
const text =
|
||||
sessionLastAssistantText.get(params?.sessionKey ?? "") ?? "";
|
||||
return {
|
||||
messages: [{ role: "assistant", content: [{ type: "text", text }] }],
|
||||
};
|
||||
}
|
||||
if (request.method === "send") {
|
||||
const params = request.params as
|
||||
| { to?: string; channel?: string; message?: string }
|
||||
| undefined;
|
||||
sendParams = {
|
||||
to: params?.to,
|
||||
channel: params?.channel,
|
||||
message: params?.message,
|
||||
};
|
||||
return { messageId: "m-announce" };
|
||||
}
|
||||
if (request.method === "sessions.delete") {
|
||||
const params = request.params as { key?: string } | undefined;
|
||||
deletedKey = params?.key;
|
||||
return { ok: true };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "discord:group:req",
|
||||
agentChannel: "discord",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call1b", {
|
||||
task: "do thing",
|
||||
runTimeoutSeconds: 1,
|
||||
cleanup: "delete",
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
runId: "run-1",
|
||||
});
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
|
||||
const childWait = waitCalls.find((call) => call.runId === childRunId);
|
||||
expect(childWait?.timeoutMs).toBe(1000);
|
||||
expect(childSessionKey?.startsWith("agent:main:subagent:")).toBe(true);
|
||||
|
||||
const agentCalls = calls.filter((call) => call.method === "agent");
|
||||
expect(agentCalls).toHaveLength(2);
|
||||
const second = agentCalls[1]?.params as
|
||||
| { channel?: string; deliver?: boolean; lane?: string }
|
||||
| undefined;
|
||||
expect(second?.lane).toBe("nested");
|
||||
expect(second?.deliver).toBe(false);
|
||||
expect(second?.channel).toBe("webchat");
|
||||
|
||||
expect(sendParams.channel).toBe("discord");
|
||||
expect(sendParams.to).toBe("channel:req");
|
||||
expect(sendParams.message ?? "").toContain("announce now");
|
||||
expect(sendParams.message ?? "").toContain("Stats:");
|
||||
expect(deletedKey?.startsWith("agent:main:subagent:")).toBe(true);
|
||||
});
|
||||
|
||||
it("sessions_spawn resolves main announce target from sessions.list", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
const calls: Array<{ method?: string; params?: unknown }> = [];
|
||||
let agentCallCount = 0;
|
||||
let sendParams: { to?: string; channel?: string; message?: string } = {};
|
||||
let childRunId: string | undefined;
|
||||
let childSessionKey: string | undefined;
|
||||
const waitCalls: Array<{ runId?: string; timeoutMs?: number }> = [];
|
||||
const sessionLastAssistantText = new Map<string, string>();
|
||||
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
calls.push(request);
|
||||
if (request.method === "sessions.list") {
|
||||
return {
|
||||
sessions: [
|
||||
{
|
||||
key: "main",
|
||||
lastChannel: "whatsapp",
|
||||
lastTo: "+123",
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
if (request.method === "agent") {
|
||||
agentCallCount += 1;
|
||||
const runId = `run-${agentCallCount}`;
|
||||
const params = request.params as {
|
||||
message?: string;
|
||||
sessionKey?: string;
|
||||
};
|
||||
const message = params?.message ?? "";
|
||||
const sessionKey = params?.sessionKey ?? "";
|
||||
if (message === "Sub-agent announce step.") {
|
||||
sessionLastAssistantText.set(sessionKey, "hello from sub");
|
||||
} else {
|
||||
childRunId = runId;
|
||||
childSessionKey = sessionKey;
|
||||
sessionLastAssistantText.set(sessionKey, "done");
|
||||
}
|
||||
return {
|
||||
runId,
|
||||
status: "accepted",
|
||||
acceptedAt: 2000 + agentCallCount,
|
||||
};
|
||||
}
|
||||
if (request.method === "agent.wait") {
|
||||
const params = request.params as
|
||||
| { runId?: string; timeoutMs?: number }
|
||||
| undefined;
|
||||
waitCalls.push(params ?? {});
|
||||
const status = params?.runId === childRunId ? "timeout" : "ok";
|
||||
return { runId: params?.runId ?? "run-1", status };
|
||||
}
|
||||
if (request.method === "chat.history") {
|
||||
const params = request.params as { sessionKey?: string } | undefined;
|
||||
const text =
|
||||
sessionLastAssistantText.get(params?.sessionKey ?? "") ?? "";
|
||||
return {
|
||||
messages: [{ role: "assistant", content: [{ type: "text", text }] }],
|
||||
};
|
||||
}
|
||||
if (request.method === "send") {
|
||||
const params = request.params as
|
||||
| { to?: string; channel?: string; message?: string }
|
||||
| undefined;
|
||||
sendParams = {
|
||||
to: params?.to,
|
||||
channel: params?.channel,
|
||||
message: params?.message,
|
||||
};
|
||||
return { messageId: "m1" };
|
||||
}
|
||||
if (request.method === "sessions.delete") {
|
||||
return { ok: true };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "main",
|
||||
agentChannel: "whatsapp",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call2", {
|
||||
task: "do thing",
|
||||
runTimeoutSeconds: 1,
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
runId: "run-1",
|
||||
});
|
||||
|
||||
if (!childRunId) throw new Error("missing child runId");
|
||||
emitAgentEvent({
|
||||
runId: childRunId,
|
||||
stream: "lifecycle",
|
||||
data: {
|
||||
phase: "end",
|
||||
startedAt: 1000,
|
||||
endedAt: 2000,
|
||||
},
|
||||
});
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
|
||||
const childWait = waitCalls.find((call) => call.runId === childRunId);
|
||||
expect(childWait?.timeoutMs).toBe(1000);
|
||||
expect(sendParams.channel).toBe("whatsapp");
|
||||
expect(sendParams.to).toBe("+123");
|
||||
expect(sendParams.message ?? "").toContain("hello from sub");
|
||||
expect(sendParams.message ?? "").toContain("Stats:");
|
||||
expect(childSessionKey?.startsWith("agent:main:subagent:")).toBe(true);
|
||||
});
|
||||
|
||||
it("sessions_spawn only allows same-agent by default", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "main",
|
||||
agentChannel: "whatsapp",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call6", {
|
||||
task: "do thing",
|
||||
agentId: "beta",
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "forbidden",
|
||||
});
|
||||
expect(callGatewayMock).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("sessions_spawn allows cross-agent spawning when configured", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
configOverride = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
agents: {
|
||||
list: [
|
||||
{
|
||||
id: "main",
|
||||
subagents: {
|
||||
allowAgents: ["beta"],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
let childSessionKey: string | undefined;
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
if (request.method === "agent") {
|
||||
const params = request.params as { sessionKey?: string } | undefined;
|
||||
childSessionKey = params?.sessionKey;
|
||||
return { runId: "run-1", status: "accepted", acceptedAt: 5000 };
|
||||
}
|
||||
if (request.method === "agent.wait") {
|
||||
return { status: "timeout" };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "main",
|
||||
agentChannel: "whatsapp",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call7", {
|
||||
task: "do thing",
|
||||
agentId: "beta",
|
||||
});
|
||||
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
runId: "run-1",
|
||||
});
|
||||
expect(childSessionKey?.startsWith("agent:beta:subagent:")).toBe(true);
|
||||
});
|
||||
|
||||
it("sessions_spawn allows any agent when allowlist is *", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
configOverride = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
agents: {
|
||||
list: [
|
||||
{
|
||||
id: "main",
|
||||
subagents: {
|
||||
allowAgents: ["*"],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
let childSessionKey: string | undefined;
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
if (request.method === "agent") {
|
||||
const params = request.params as { sessionKey?: string } | undefined;
|
||||
childSessionKey = params?.sessionKey;
|
||||
return { runId: "run-1", status: "accepted", acceptedAt: 5100 };
|
||||
}
|
||||
if (request.method === "agent.wait") {
|
||||
return { status: "timeout" };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "main",
|
||||
agentChannel: "whatsapp",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call8", {
|
||||
task: "do thing",
|
||||
agentId: "beta",
|
||||
});
|
||||
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
runId: "run-1",
|
||||
});
|
||||
expect(childSessionKey?.startsWith("agent:beta:subagent:")).toBe(true);
|
||||
});
|
||||
|
||||
it("sessions_spawn normalizes allowlisted agent ids", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
configOverride = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
agents: {
|
||||
list: [
|
||||
{
|
||||
id: "main",
|
||||
subagents: {
|
||||
allowAgents: ["Research"],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
let childSessionKey: string | undefined;
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
if (request.method === "agent") {
|
||||
const params = request.params as { sessionKey?: string } | undefined;
|
||||
childSessionKey = params?.sessionKey;
|
||||
return { runId: "run-1", status: "accepted", acceptedAt: 5200 };
|
||||
}
|
||||
if (request.method === "agent.wait") {
|
||||
return { status: "timeout" };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "main",
|
||||
agentChannel: "whatsapp",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call10", {
|
||||
task: "do thing",
|
||||
agentId: "research",
|
||||
});
|
||||
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
runId: "run-1",
|
||||
});
|
||||
expect(childSessionKey?.startsWith("agent:research:subagent:")).toBe(true);
|
||||
});
|
||||
|
||||
it("sessions_spawn forbids cross-agent spawning when not allowed", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
configOverride = {
|
||||
session: {
|
||||
mainKey: "main",
|
||||
scope: "per-sender",
|
||||
},
|
||||
agents: {
|
||||
list: [
|
||||
{
|
||||
id: "main",
|
||||
subagents: {
|
||||
allowAgents: ["alpha"],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "main",
|
||||
agentChannel: "whatsapp",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call9", {
|
||||
task: "do thing",
|
||||
agentId: "beta",
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "forbidden",
|
||||
});
|
||||
expect(callGatewayMock).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("sessions_spawn applies a model to the child session", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
const calls: Array<{ method?: string; params?: unknown }> = [];
|
||||
let agentCallCount = 0;
|
||||
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
calls.push(request);
|
||||
if (request.method === "sessions.patch") {
|
||||
return { ok: true };
|
||||
}
|
||||
if (request.method === "agent") {
|
||||
agentCallCount += 1;
|
||||
const runId = `run-${agentCallCount}`;
|
||||
return {
|
||||
runId,
|
||||
status: "accepted",
|
||||
acceptedAt: 3000 + agentCallCount,
|
||||
};
|
||||
}
|
||||
if (request.method === "agent.wait") {
|
||||
return { status: "timeout" };
|
||||
}
|
||||
if (request.method === "sessions.delete") {
|
||||
return { ok: true };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "discord:group:req",
|
||||
agentSurface: "discord",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call3", {
|
||||
task: "do thing",
|
||||
runTimeoutSeconds: 1,
|
||||
model: "claude-haiku-4-5",
|
||||
cleanup: "keep",
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
modelApplied: true,
|
||||
});
|
||||
|
||||
const patchIndex = calls.findIndex(
|
||||
(call) => call.method === "sessions.patch",
|
||||
);
|
||||
const agentIndex = calls.findIndex((call) => call.method === "agent");
|
||||
expect(patchIndex).toBeGreaterThan(-1);
|
||||
expect(agentIndex).toBeGreaterThan(-1);
|
||||
expect(patchIndex).toBeLessThan(agentIndex);
|
||||
const patchCall = calls[patchIndex];
|
||||
expect(patchCall?.params).toMatchObject({
|
||||
key: expect.stringContaining("subagent:"),
|
||||
model: "claude-haiku-4-5",
|
||||
});
|
||||
});
|
||||
|
||||
it("sessions_spawn applies default subagent model from defaults config", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
configOverride = {
|
||||
session: { mainKey: "main", scope: "per-sender" },
|
||||
agents: { defaults: { subagents: { model: "minimax/MiniMax-M2.1" } } },
|
||||
};
|
||||
const calls: Array<{ method?: string; params?: unknown }> = [];
|
||||
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
calls.push(request);
|
||||
if (request.method === "sessions.patch") {
|
||||
return { ok: true };
|
||||
}
|
||||
if (request.method === "agent") {
|
||||
return { runId: "run-default-model", status: "accepted" };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "agent:main:main",
|
||||
agentChannel: "discord",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call-default-model", {
|
||||
task: "do thing",
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
modelApplied: true,
|
||||
});
|
||||
|
||||
const patchCall = calls.find((call) => call.method === "sessions.patch");
|
||||
expect(patchCall?.params).toMatchObject({
|
||||
model: "minimax/MiniMax-M2.1",
|
||||
});
|
||||
});
|
||||
|
||||
it("sessions_spawn prefers per-agent subagent model over defaults", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
configOverride = {
|
||||
session: { mainKey: "main", scope: "per-sender" },
|
||||
agents: {
|
||||
defaults: { subagents: { model: "minimax/MiniMax-M2.1" } },
|
||||
list: [{ id: "research", subagents: { model: "opencode/claude" } }],
|
||||
},
|
||||
};
|
||||
const calls: Array<{ method?: string; params?: unknown }> = [];
|
||||
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
calls.push(request);
|
||||
if (request.method === "sessions.patch") {
|
||||
return { ok: true };
|
||||
}
|
||||
if (request.method === "agent") {
|
||||
return { runId: "run-agent-model", status: "accepted" };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "agent:research:main",
|
||||
agentChannel: "discord",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call-agent-model", {
|
||||
task: "do thing",
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
modelApplied: true,
|
||||
});
|
||||
|
||||
const patchCall = calls.find((call) => call.method === "sessions.patch");
|
||||
expect(patchCall?.params).toMatchObject({
|
||||
model: "opencode/claude",
|
||||
});
|
||||
});
|
||||
|
||||
it("sessions_spawn skips invalid model overrides and continues", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
const calls: Array<{ method?: string; params?: unknown }> = [];
|
||||
let agentCallCount = 0;
|
||||
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
calls.push(request);
|
||||
if (request.method === "sessions.patch") {
|
||||
throw new Error("invalid model: bad-model");
|
||||
}
|
||||
if (request.method === "agent") {
|
||||
agentCallCount += 1;
|
||||
const runId = `run-${agentCallCount}`;
|
||||
return {
|
||||
runId,
|
||||
status: "accepted",
|
||||
acceptedAt: 4000 + agentCallCount,
|
||||
};
|
||||
}
|
||||
if (request.method === "agent.wait") {
|
||||
return { status: "timeout" };
|
||||
}
|
||||
if (request.method === "sessions.delete") {
|
||||
return { ok: true };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "main",
|
||||
agentChannel: "whatsapp",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call4", {
|
||||
task: "do thing",
|
||||
runTimeoutSeconds: 1,
|
||||
model: "bad-model",
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
modelApplied: false,
|
||||
});
|
||||
expect(
|
||||
String((result.details as { warning?: string }).warning ?? ""),
|
||||
).toContain("invalid model");
|
||||
expect(calls.some((call) => call.method === "agent")).toBe(true);
|
||||
});
|
||||
|
||||
it("sessions_spawn supports legacy timeoutSeconds alias", async () => {
|
||||
resetSubagentRegistryForTests();
|
||||
callGatewayMock.mockReset();
|
||||
let spawnedTimeout: number | undefined;
|
||||
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: unknown };
|
||||
if (request.method === "agent") {
|
||||
const params = request.params as { timeout?: number } | undefined;
|
||||
spawnedTimeout = params?.timeout;
|
||||
return { runId: "run-1", status: "accepted", acceptedAt: 1000 };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
const tool = createClawdbotTools({
|
||||
agentSessionKey: "main",
|
||||
agentChannel: "whatsapp",
|
||||
}).find((candidate) => candidate.name === "sessions_spawn");
|
||||
if (!tool) throw new Error("missing sessions_spawn tool");
|
||||
|
||||
const result = await tool.execute("call5", {
|
||||
task: "do thing",
|
||||
timeoutSeconds: 2,
|
||||
});
|
||||
expect(result.details).toMatchObject({
|
||||
status: "accepted",
|
||||
runId: "run-1",
|
||||
});
|
||||
expect(spawnedTimeout).toBe(2);
|
||||
});
|
||||
});
|
||||
@@ -1,454 +1,41 @@
|
||||
import crypto from "node:crypto";
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
|
||||
import type { AgentTool } from "@mariozechner/pi-agent-core";
|
||||
import type { ImageContent } from "@mariozechner/pi-ai";
|
||||
import { resolveHeartbeatPrompt } from "../auto-reply/heartbeat.js";
|
||||
import type { ThinkLevel } from "../auto-reply/thinking.js";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
import type { CliBackendConfig } from "../config/types.js";
|
||||
import { shouldLogVerbose } from "../globals.js";
|
||||
import { createSubsystemLogger } from "../logging.js";
|
||||
import { runCommandWithTimeout, runExec } from "../process/exec.js";
|
||||
import { runCommandWithTimeout } from "../process/exec.js";
|
||||
import { resolveUserPath } from "../utils.js";
|
||||
import { resolveSessionAgentIds } from "./agent-scope.js";
|
||||
import { resolveCliBackendConfig } from "./cli-backends.js";
|
||||
import {
|
||||
appendImagePathsToPrompt,
|
||||
buildCliArgs,
|
||||
buildSystemPrompt,
|
||||
cleanupResumeProcesses,
|
||||
enqueueCliRun,
|
||||
normalizeCliModel,
|
||||
parseCliJson,
|
||||
parseCliJsonl,
|
||||
resolvePromptInput,
|
||||
resolveSessionIdToSend,
|
||||
resolveSystemPromptUsage,
|
||||
writeCliImages,
|
||||
} from "./cli-runner/helpers.js";
|
||||
import { FailoverError, resolveFailoverStatus } from "./failover-error.js";
|
||||
import {
|
||||
buildBootstrapContextFiles,
|
||||
classifyFailoverReason,
|
||||
type EmbeddedContextFile,
|
||||
isFailoverErrorMessage,
|
||||
resolveBootstrapMaxChars,
|
||||
} from "./pi-embedded-helpers.js";
|
||||
import type { EmbeddedPiRunResult } from "./pi-embedded-runner.js";
|
||||
import { buildAgentSystemPrompt } from "./system-prompt.js";
|
||||
import {
|
||||
filterBootstrapFilesForSession,
|
||||
loadWorkspaceBootstrapFiles,
|
||||
} from "./workspace.js";
|
||||
|
||||
const log = createSubsystemLogger("agent/claude-cli");
|
||||
const CLI_RUN_QUEUE = new Map<string, Promise<unknown>>();
|
||||
|
||||
function escapeRegex(value: string): string {
|
||||
return value.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
||||
}
|
||||
|
||||
async function cleanupResumeProcesses(
|
||||
backend: CliBackendConfig,
|
||||
sessionId: string,
|
||||
): Promise<void> {
|
||||
if (process.platform === "win32") return;
|
||||
const resumeArgs = backend.resumeArgs ?? [];
|
||||
if (resumeArgs.length === 0) return;
|
||||
if (!resumeArgs.some((arg) => arg.includes("{sessionId}"))) return;
|
||||
const commandToken = path.basename(backend.command ?? "").trim();
|
||||
if (!commandToken) return;
|
||||
|
||||
const resumeTokens = resumeArgs.map((arg) =>
|
||||
arg.replaceAll("{sessionId}", sessionId),
|
||||
);
|
||||
const pattern = [commandToken, ...resumeTokens]
|
||||
.filter(Boolean)
|
||||
.map((token) => escapeRegex(token))
|
||||
.join(".*");
|
||||
if (!pattern) return;
|
||||
|
||||
try {
|
||||
await runExec("pkill", ["-f", pattern]);
|
||||
} catch {
|
||||
// ignore missing pkill or no matches
|
||||
}
|
||||
}
|
||||
|
||||
function enqueueCliRun<T>(key: string, task: () => Promise<T>): Promise<T> {
|
||||
const prior = CLI_RUN_QUEUE.get(key) ?? Promise.resolve();
|
||||
const chained = prior.catch(() => undefined).then(task);
|
||||
const tracked = chained.finally(() => {
|
||||
if (CLI_RUN_QUEUE.get(key) === tracked) {
|
||||
CLI_RUN_QUEUE.delete(key);
|
||||
}
|
||||
});
|
||||
CLI_RUN_QUEUE.set(key, tracked);
|
||||
return chained;
|
||||
}
|
||||
|
||||
type CliUsage = {
|
||||
input?: number;
|
||||
output?: number;
|
||||
cacheRead?: number;
|
||||
cacheWrite?: number;
|
||||
total?: number;
|
||||
};
|
||||
|
||||
type CliOutput = {
|
||||
text: string;
|
||||
sessionId?: string;
|
||||
usage?: CliUsage;
|
||||
};
|
||||
|
||||
function resolveUserTimezone(configured?: string): string {
|
||||
const trimmed = configured?.trim();
|
||||
if (trimmed) {
|
||||
try {
|
||||
new Intl.DateTimeFormat("en-US", { timeZone: trimmed }).format(
|
||||
new Date(),
|
||||
);
|
||||
return trimmed;
|
||||
} catch {
|
||||
// ignore invalid timezone
|
||||
}
|
||||
}
|
||||
const host = Intl.DateTimeFormat().resolvedOptions().timeZone;
|
||||
return host?.trim() || "UTC";
|
||||
}
|
||||
|
||||
function formatUserTime(date: Date, timeZone: string): string | undefined {
|
||||
try {
|
||||
const parts = new Intl.DateTimeFormat("en-CA", {
|
||||
timeZone,
|
||||
weekday: "long",
|
||||
year: "numeric",
|
||||
month: "2-digit",
|
||||
day: "2-digit",
|
||||
hour: "2-digit",
|
||||
minute: "2-digit",
|
||||
hourCycle: "h23",
|
||||
}).formatToParts(date);
|
||||
const map: Record<string, string> = {};
|
||||
for (const part of parts) {
|
||||
if (part.type !== "literal") map[part.type] = part.value;
|
||||
}
|
||||
if (
|
||||
!map.weekday ||
|
||||
!map.year ||
|
||||
!map.month ||
|
||||
!map.day ||
|
||||
!map.hour ||
|
||||
!map.minute
|
||||
) {
|
||||
return undefined;
|
||||
}
|
||||
return `${map.weekday} ${map.year}-${map.month}-${map.day} ${map.hour}:${map.minute}`;
|
||||
} catch {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
function buildModelAliasLines(cfg?: ClawdbotConfig) {
|
||||
const models = cfg?.agents?.defaults?.models ?? {};
|
||||
const entries: Array<{ alias: string; model: string }> = [];
|
||||
for (const [keyRaw, entryRaw] of Object.entries(models)) {
|
||||
const model = String(keyRaw ?? "").trim();
|
||||
if (!model) continue;
|
||||
const alias = String(
|
||||
(entryRaw as { alias?: string } | undefined)?.alias ?? "",
|
||||
).trim();
|
||||
if (!alias) continue;
|
||||
entries.push({ alias, model });
|
||||
}
|
||||
return entries
|
||||
.sort((a, b) => a.alias.localeCompare(b.alias))
|
||||
.map((entry) => `- ${entry.alias}: ${entry.model}`);
|
||||
}
|
||||
|
||||
function buildSystemPrompt(params: {
|
||||
workspaceDir: string;
|
||||
config?: ClawdbotConfig;
|
||||
defaultThinkLevel?: ThinkLevel;
|
||||
extraSystemPrompt?: string;
|
||||
ownerNumbers?: string[];
|
||||
heartbeatPrompt?: string;
|
||||
tools: AgentTool[];
|
||||
contextFiles?: EmbeddedContextFile[];
|
||||
modelDisplay: string;
|
||||
}) {
|
||||
const userTimezone = resolveUserTimezone(
|
||||
params.config?.agents?.defaults?.userTimezone,
|
||||
);
|
||||
const userTime = formatUserTime(new Date(), userTimezone);
|
||||
return buildAgentSystemPrompt({
|
||||
workspaceDir: params.workspaceDir,
|
||||
defaultThinkLevel: params.defaultThinkLevel,
|
||||
extraSystemPrompt: params.extraSystemPrompt,
|
||||
ownerNumbers: params.ownerNumbers,
|
||||
reasoningTagHint: false,
|
||||
heartbeatPrompt: params.heartbeatPrompt,
|
||||
runtimeInfo: {
|
||||
host: "clawdbot",
|
||||
os: `${os.type()} ${os.release()}`,
|
||||
arch: os.arch(),
|
||||
node: process.version,
|
||||
model: params.modelDisplay,
|
||||
},
|
||||
toolNames: params.tools.map((tool) => tool.name),
|
||||
modelAliasLines: buildModelAliasLines(params.config),
|
||||
userTimezone,
|
||||
userTime,
|
||||
contextFiles: params.contextFiles,
|
||||
});
|
||||
}
|
||||
|
||||
function normalizeCliModel(modelId: string, backend: CliBackendConfig): string {
|
||||
const trimmed = modelId.trim();
|
||||
if (!trimmed) return trimmed;
|
||||
const direct = backend.modelAliases?.[trimmed];
|
||||
if (direct) return direct;
|
||||
const lower = trimmed.toLowerCase();
|
||||
const mapped = backend.modelAliases?.[lower];
|
||||
if (mapped) return mapped;
|
||||
return trimmed;
|
||||
}
|
||||
|
||||
function toUsage(raw: Record<string, unknown>): CliUsage | undefined {
|
||||
const pick = (key: string) =>
|
||||
typeof raw[key] === "number" && raw[key] > 0
|
||||
? (raw[key] as number)
|
||||
: undefined;
|
||||
const input = pick("input_tokens") ?? pick("inputTokens");
|
||||
const output = pick("output_tokens") ?? pick("outputTokens");
|
||||
const cacheRead =
|
||||
pick("cache_read_input_tokens") ??
|
||||
pick("cached_input_tokens") ??
|
||||
pick("cacheRead");
|
||||
const cacheWrite = pick("cache_write_input_tokens") ?? pick("cacheWrite");
|
||||
const total = pick("total_tokens") ?? pick("total");
|
||||
if (!input && !output && !cacheRead && !cacheWrite && !total)
|
||||
return undefined;
|
||||
return { input, output, cacheRead, cacheWrite, total };
|
||||
}
|
||||
|
||||
function isRecord(value: unknown): value is Record<string, unknown> {
|
||||
return Boolean(value && typeof value === "object" && !Array.isArray(value));
|
||||
}
|
||||
|
||||
function collectText(value: unknown): string {
|
||||
if (!value) return "";
|
||||
if (typeof value === "string") return value;
|
||||
if (Array.isArray(value)) {
|
||||
return value.map((entry) => collectText(entry)).join("");
|
||||
}
|
||||
if (!isRecord(value)) return "";
|
||||
if (typeof value.text === "string") return value.text;
|
||||
if (typeof value.content === "string") return value.content;
|
||||
if (Array.isArray(value.content)) {
|
||||
return value.content.map((entry) => collectText(entry)).join("");
|
||||
}
|
||||
if (isRecord(value.message)) return collectText(value.message);
|
||||
return "";
|
||||
}
|
||||
|
||||
function pickSessionId(
|
||||
parsed: Record<string, unknown>,
|
||||
backend: CliBackendConfig,
|
||||
): string | undefined {
|
||||
const fields = backend.sessionIdFields ?? [
|
||||
"session_id",
|
||||
"sessionId",
|
||||
"conversation_id",
|
||||
"conversationId",
|
||||
];
|
||||
for (const field of fields) {
|
||||
const value = parsed[field];
|
||||
if (typeof value === "string" && value.trim()) return value.trim();
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
function parseCliJson(
|
||||
raw: string,
|
||||
backend: CliBackendConfig,
|
||||
): CliOutput | null {
|
||||
const trimmed = raw.trim();
|
||||
if (!trimmed) return null;
|
||||
let parsed: unknown;
|
||||
try {
|
||||
parsed = JSON.parse(trimmed);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
if (!isRecord(parsed)) return null;
|
||||
const sessionId = pickSessionId(parsed, backend);
|
||||
const usage = isRecord(parsed.usage) ? toUsage(parsed.usage) : undefined;
|
||||
const text =
|
||||
collectText(parsed.message) ||
|
||||
collectText(parsed.content) ||
|
||||
collectText(parsed.result) ||
|
||||
collectText(parsed);
|
||||
return { text: text.trim(), sessionId, usage };
|
||||
}
|
||||
|
||||
function parseCliJsonl(
|
||||
raw: string,
|
||||
backend: CliBackendConfig,
|
||||
): CliOutput | null {
|
||||
const lines = raw
|
||||
.split(/\r?\n/g)
|
||||
.map((line) => line.trim())
|
||||
.filter(Boolean);
|
||||
if (lines.length === 0) return null;
|
||||
let sessionId: string | undefined;
|
||||
let usage: CliUsage | undefined;
|
||||
const texts: string[] = [];
|
||||
for (const line of lines) {
|
||||
let parsed: unknown;
|
||||
try {
|
||||
parsed = JSON.parse(line);
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
if (!isRecord(parsed)) continue;
|
||||
if (!sessionId) sessionId = pickSessionId(parsed, backend);
|
||||
if (!sessionId && typeof parsed.thread_id === "string") {
|
||||
sessionId = parsed.thread_id.trim();
|
||||
}
|
||||
if (isRecord(parsed.usage)) {
|
||||
usage = toUsage(parsed.usage) ?? usage;
|
||||
}
|
||||
const item = isRecord(parsed.item) ? parsed.item : null;
|
||||
if (item && typeof item.text === "string") {
|
||||
const type = typeof item.type === "string" ? item.type.toLowerCase() : "";
|
||||
if (!type || type.includes("message")) {
|
||||
texts.push(item.text);
|
||||
}
|
||||
}
|
||||
}
|
||||
const text = texts.join("\n").trim();
|
||||
if (!text) return null;
|
||||
return { text, sessionId, usage };
|
||||
}
|
||||
|
||||
function resolveSystemPromptUsage(params: {
|
||||
backend: CliBackendConfig;
|
||||
isNewSession: boolean;
|
||||
systemPrompt?: string;
|
||||
}): string | null {
|
||||
const systemPrompt = params.systemPrompt?.trim();
|
||||
if (!systemPrompt) return null;
|
||||
const when = params.backend.systemPromptWhen ?? "first";
|
||||
if (when === "never") return null;
|
||||
if (when === "first" && !params.isNewSession) return null;
|
||||
if (!params.backend.systemPromptArg?.trim()) return null;
|
||||
return systemPrompt;
|
||||
}
|
||||
|
||||
function resolveSessionIdToSend(params: {
|
||||
backend: CliBackendConfig;
|
||||
cliSessionId?: string;
|
||||
}): { sessionId?: string; isNew: boolean } {
|
||||
const mode = params.backend.sessionMode ?? "always";
|
||||
const existing = params.cliSessionId?.trim();
|
||||
if (mode === "none") return { sessionId: undefined, isNew: !existing };
|
||||
if (mode === "existing") return { sessionId: existing, isNew: !existing };
|
||||
if (existing) return { sessionId: existing, isNew: false };
|
||||
return { sessionId: crypto.randomUUID(), isNew: true };
|
||||
}
|
||||
|
||||
function resolvePromptInput(params: {
|
||||
backend: CliBackendConfig;
|
||||
prompt: string;
|
||||
}): { argsPrompt?: string; stdin?: string } {
|
||||
const inputMode = params.backend.input ?? "arg";
|
||||
if (inputMode === "stdin") {
|
||||
return { stdin: params.prompt };
|
||||
}
|
||||
if (
|
||||
params.backend.maxPromptArgChars &&
|
||||
params.prompt.length > params.backend.maxPromptArgChars
|
||||
) {
|
||||
return { stdin: params.prompt };
|
||||
}
|
||||
return { argsPrompt: params.prompt };
|
||||
}
|
||||
|
||||
function resolveImageExtension(mimeType: string): string {
|
||||
const normalized = mimeType.toLowerCase();
|
||||
if (normalized.includes("png")) return "png";
|
||||
if (normalized.includes("jpeg") || normalized.includes("jpg")) return "jpg";
|
||||
if (normalized.includes("gif")) return "gif";
|
||||
if (normalized.includes("webp")) return "webp";
|
||||
return "bin";
|
||||
}
|
||||
|
||||
function appendImagePathsToPrompt(prompt: string, paths: string[]): string {
|
||||
if (!paths.length) return prompt;
|
||||
const trimmed = prompt.trimEnd();
|
||||
const separator = trimmed ? "\n\n" : "";
|
||||
return `${trimmed}${separator}${paths.join("\n")}`;
|
||||
}
|
||||
|
||||
async function writeCliImages(
|
||||
images: ImageContent[],
|
||||
): Promise<{ paths: string[]; cleanup: () => Promise<void> }> {
|
||||
const tempDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-cli-images-"),
|
||||
);
|
||||
const paths: string[] = [];
|
||||
for (let i = 0; i < images.length; i += 1) {
|
||||
const image = images[i];
|
||||
const ext = resolveImageExtension(image.mimeType);
|
||||
const filePath = path.join(tempDir, `image-${i + 1}.${ext}`);
|
||||
const buffer = Buffer.from(image.data, "base64");
|
||||
await fs.writeFile(filePath, buffer, { mode: 0o600 });
|
||||
paths.push(filePath);
|
||||
}
|
||||
const cleanup = async () => {
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
};
|
||||
return { paths, cleanup };
|
||||
}
|
||||
|
||||
function buildCliArgs(params: {
|
||||
backend: CliBackendConfig;
|
||||
baseArgs: string[];
|
||||
modelId: string;
|
||||
sessionId?: string;
|
||||
systemPrompt?: string | null;
|
||||
imagePaths?: string[];
|
||||
promptArg?: string;
|
||||
useResume: boolean;
|
||||
}): string[] {
|
||||
const args: string[] = [...params.baseArgs];
|
||||
if (!params.useResume && params.backend.modelArg && params.modelId) {
|
||||
args.push(params.backend.modelArg, params.modelId);
|
||||
}
|
||||
if (
|
||||
!params.useResume &&
|
||||
params.systemPrompt &&
|
||||
params.backend.systemPromptArg
|
||||
) {
|
||||
args.push(params.backend.systemPromptArg, params.systemPrompt);
|
||||
}
|
||||
if (!params.useResume && params.sessionId) {
|
||||
if (params.backend.sessionArgs && params.backend.sessionArgs.length > 0) {
|
||||
for (const entry of params.backend.sessionArgs) {
|
||||
args.push(entry.replaceAll("{sessionId}", params.sessionId));
|
||||
}
|
||||
} else if (params.backend.sessionArg) {
|
||||
args.push(params.backend.sessionArg, params.sessionId);
|
||||
}
|
||||
}
|
||||
if (params.imagePaths && params.imagePaths.length > 0) {
|
||||
const mode = params.backend.imageMode ?? "repeat";
|
||||
const imageArg = params.backend.imageArg;
|
||||
if (imageArg) {
|
||||
if (mode === "list") {
|
||||
args.push(imageArg, params.imagePaths.join(","));
|
||||
} else {
|
||||
for (const imagePath of params.imagePaths) {
|
||||
args.push(imageArg, imagePath);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (params.promptArg !== undefined) {
|
||||
args.push(params.promptArg);
|
||||
}
|
||||
return args;
|
||||
}
|
||||
|
||||
export async function runCliAgent(params: {
|
||||
sessionId: string;
|
||||
|
||||
438
src/agents/cli-runner/helpers.ts
Normal file
438
src/agents/cli-runner/helpers.ts
Normal file
@@ -0,0 +1,438 @@
|
||||
import crypto from "node:crypto";
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
|
||||
import type { AgentTool } from "@mariozechner/pi-agent-core";
|
||||
import type { ImageContent } from "@mariozechner/pi-ai";
|
||||
import type { ThinkLevel } from "../../auto-reply/thinking.js";
|
||||
import type { ClawdbotConfig } from "../../config/config.js";
|
||||
import type { CliBackendConfig } from "../../config/types.js";
|
||||
import { runExec } from "../../process/exec.js";
|
||||
import type { EmbeddedContextFile } from "../pi-embedded-helpers.js";
|
||||
import { buildAgentSystemPrompt } from "../system-prompt.js";
|
||||
|
||||
const CLI_RUN_QUEUE = new Map<string, Promise<unknown>>();
|
||||
|
||||
function escapeRegex(value: string): string {
|
||||
return value.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
||||
}
|
||||
|
||||
export async function cleanupResumeProcesses(
|
||||
backend: CliBackendConfig,
|
||||
sessionId: string,
|
||||
): Promise<void> {
|
||||
if (process.platform === "win32") return;
|
||||
const resumeArgs = backend.resumeArgs ?? [];
|
||||
if (resumeArgs.length === 0) return;
|
||||
if (!resumeArgs.some((arg) => arg.includes("{sessionId}"))) return;
|
||||
const commandToken = path.basename(backend.command ?? "").trim();
|
||||
if (!commandToken) return;
|
||||
|
||||
const resumeTokens = resumeArgs.map((arg) =>
|
||||
arg.replaceAll("{sessionId}", sessionId),
|
||||
);
|
||||
const pattern = [commandToken, ...resumeTokens]
|
||||
.filter(Boolean)
|
||||
.map((token) => escapeRegex(token))
|
||||
.join(".*");
|
||||
if (!pattern) return;
|
||||
|
||||
try {
|
||||
await runExec("pkill", ["-f", pattern]);
|
||||
} catch {
|
||||
// ignore missing pkill or no matches
|
||||
}
|
||||
}
|
||||
|
||||
export function enqueueCliRun<T>(
|
||||
key: string,
|
||||
task: () => Promise<T>,
|
||||
): Promise<T> {
|
||||
const prior = CLI_RUN_QUEUE.get(key) ?? Promise.resolve();
|
||||
const chained = prior.catch(() => undefined).then(task);
|
||||
const tracked = chained.finally(() => {
|
||||
if (CLI_RUN_QUEUE.get(key) === tracked) {
|
||||
CLI_RUN_QUEUE.delete(key);
|
||||
}
|
||||
});
|
||||
CLI_RUN_QUEUE.set(key, tracked);
|
||||
return chained;
|
||||
}
|
||||
|
||||
type CliUsage = {
|
||||
input?: number;
|
||||
output?: number;
|
||||
cacheRead?: number;
|
||||
cacheWrite?: number;
|
||||
total?: number;
|
||||
};
|
||||
|
||||
export type CliOutput = {
|
||||
text: string;
|
||||
sessionId?: string;
|
||||
usage?: CliUsage;
|
||||
};
|
||||
|
||||
function resolveUserTimezone(configured?: string): string {
|
||||
const trimmed = configured?.trim();
|
||||
if (trimmed) {
|
||||
try {
|
||||
new Intl.DateTimeFormat("en-US", { timeZone: trimmed }).format(
|
||||
new Date(),
|
||||
);
|
||||
return trimmed;
|
||||
} catch {
|
||||
// ignore invalid timezone
|
||||
}
|
||||
}
|
||||
const host = Intl.DateTimeFormat().resolvedOptions().timeZone;
|
||||
return host?.trim() || "UTC";
|
||||
}
|
||||
|
||||
function formatUserTime(date: Date, timeZone: string): string | undefined {
|
||||
try {
|
||||
const parts = new Intl.DateTimeFormat("en-CA", {
|
||||
timeZone,
|
||||
weekday: "long",
|
||||
year: "numeric",
|
||||
month: "2-digit",
|
||||
day: "2-digit",
|
||||
hour: "2-digit",
|
||||
minute: "2-digit",
|
||||
hourCycle: "h23",
|
||||
}).formatToParts(date);
|
||||
const map: Record<string, string> = {};
|
||||
for (const part of parts) {
|
||||
if (part.type !== "literal") map[part.type] = part.value;
|
||||
}
|
||||
if (
|
||||
!map.weekday ||
|
||||
!map.year ||
|
||||
!map.month ||
|
||||
!map.day ||
|
||||
!map.hour ||
|
||||
!map.minute
|
||||
)
|
||||
return undefined;
|
||||
return `${map.weekday} ${map.year}-${map.month}-${map.day} ${map.hour}:${map.minute}`;
|
||||
} catch {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
function buildModelAliasLines(cfg?: ClawdbotConfig) {
|
||||
const models = cfg?.agents?.defaults?.models ?? {};
|
||||
const entries: Array<{ alias: string; model: string }> = [];
|
||||
for (const [keyRaw, entryRaw] of Object.entries(models)) {
|
||||
const model = String(keyRaw ?? "").trim();
|
||||
if (!model) continue;
|
||||
const alias = String(
|
||||
(entryRaw as { alias?: string } | undefined)?.alias ?? "",
|
||||
).trim();
|
||||
if (!alias) continue;
|
||||
entries.push({ alias, model });
|
||||
}
|
||||
return entries
|
||||
.sort((a, b) => a.alias.localeCompare(b.alias))
|
||||
.map((entry) => `- ${entry.alias}: ${entry.model}`);
|
||||
}
|
||||
|
||||
export function buildSystemPrompt(params: {
|
||||
workspaceDir: string;
|
||||
config?: ClawdbotConfig;
|
||||
defaultThinkLevel?: ThinkLevel;
|
||||
extraSystemPrompt?: string;
|
||||
ownerNumbers?: string[];
|
||||
heartbeatPrompt?: string;
|
||||
tools: AgentTool[];
|
||||
contextFiles?: EmbeddedContextFile[];
|
||||
modelDisplay: string;
|
||||
}) {
|
||||
const userTimezone = resolveUserTimezone(
|
||||
params.config?.agents?.defaults?.userTimezone,
|
||||
);
|
||||
const userTime = formatUserTime(new Date(), userTimezone);
|
||||
return buildAgentSystemPrompt({
|
||||
workspaceDir: params.workspaceDir,
|
||||
defaultThinkLevel: params.defaultThinkLevel,
|
||||
extraSystemPrompt: params.extraSystemPrompt,
|
||||
ownerNumbers: params.ownerNumbers,
|
||||
reasoningTagHint: false,
|
||||
heartbeatPrompt: params.heartbeatPrompt,
|
||||
runtimeInfo: {
|
||||
host: "clawdbot",
|
||||
os: `${os.type()} ${os.release()}`,
|
||||
arch: os.arch(),
|
||||
node: process.version,
|
||||
model: params.modelDisplay,
|
||||
},
|
||||
toolNames: params.tools.map((tool) => tool.name),
|
||||
modelAliasLines: buildModelAliasLines(params.config),
|
||||
userTimezone,
|
||||
userTime,
|
||||
contextFiles: params.contextFiles,
|
||||
});
|
||||
}
|
||||
|
||||
export function normalizeCliModel(
|
||||
modelId: string,
|
||||
backend: CliBackendConfig,
|
||||
): string {
|
||||
const trimmed = modelId.trim();
|
||||
if (!trimmed) return trimmed;
|
||||
const direct = backend.modelAliases?.[trimmed];
|
||||
if (direct) return direct;
|
||||
const lower = trimmed.toLowerCase();
|
||||
const mapped = backend.modelAliases?.[lower];
|
||||
if (mapped) return mapped;
|
||||
return trimmed;
|
||||
}
|
||||
|
||||
function toUsage(raw: Record<string, unknown>): CliUsage | undefined {
|
||||
const pick = (key: string) =>
|
||||
typeof raw[key] === "number" && raw[key] > 0
|
||||
? (raw[key] as number)
|
||||
: undefined;
|
||||
const input = pick("input_tokens") ?? pick("inputTokens");
|
||||
const output = pick("output_tokens") ?? pick("outputTokens");
|
||||
const cacheRead =
|
||||
pick("cache_read_input_tokens") ??
|
||||
pick("cached_input_tokens") ??
|
||||
pick("cacheRead");
|
||||
const cacheWrite = pick("cache_write_input_tokens") ?? pick("cacheWrite");
|
||||
const total = pick("total_tokens") ?? pick("total");
|
||||
if (!input && !output && !cacheRead && !cacheWrite && !total)
|
||||
return undefined;
|
||||
return { input, output, cacheRead, cacheWrite, total };
|
||||
}
|
||||
|
||||
function isRecord(value: unknown): value is Record<string, unknown> {
|
||||
return Boolean(value && typeof value === "object" && !Array.isArray(value));
|
||||
}
|
||||
|
||||
function collectText(value: unknown): string {
|
||||
if (!value) return "";
|
||||
if (typeof value === "string") return value;
|
||||
if (Array.isArray(value))
|
||||
return value.map((entry) => collectText(entry)).join("");
|
||||
if (!isRecord(value)) return "";
|
||||
if (typeof value.text === "string") return value.text;
|
||||
if (typeof value.content === "string") return value.content;
|
||||
if (Array.isArray(value.content))
|
||||
return value.content.map((entry) => collectText(entry)).join("");
|
||||
if (isRecord(value.message)) return collectText(value.message);
|
||||
return "";
|
||||
}
|
||||
|
||||
function pickSessionId(
|
||||
parsed: Record<string, unknown>,
|
||||
backend: CliBackendConfig,
|
||||
): string | undefined {
|
||||
const fields = backend.sessionIdFields ?? [
|
||||
"session_id",
|
||||
"sessionId",
|
||||
"conversation_id",
|
||||
"conversationId",
|
||||
];
|
||||
for (const field of fields) {
|
||||
const value = parsed[field];
|
||||
if (typeof value === "string" && value.trim()) return value.trim();
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
export function parseCliJson(
|
||||
raw: string,
|
||||
backend: CliBackendConfig,
|
||||
): CliOutput | null {
|
||||
const trimmed = raw.trim();
|
||||
if (!trimmed) return null;
|
||||
let parsed: unknown;
|
||||
try {
|
||||
parsed = JSON.parse(trimmed);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
if (!isRecord(parsed)) return null;
|
||||
const sessionId = pickSessionId(parsed, backend);
|
||||
const usage = isRecord(parsed.usage) ? toUsage(parsed.usage) : undefined;
|
||||
const text =
|
||||
collectText(parsed.message) ||
|
||||
collectText(parsed.content) ||
|
||||
collectText(parsed.result) ||
|
||||
collectText(parsed);
|
||||
return { text: text.trim(), sessionId, usage };
|
||||
}
|
||||
|
||||
export function parseCliJsonl(
|
||||
raw: string,
|
||||
backend: CliBackendConfig,
|
||||
): CliOutput | null {
|
||||
const lines = raw
|
||||
.split(/\r?\n/g)
|
||||
.map((line) => line.trim())
|
||||
.filter(Boolean);
|
||||
if (lines.length === 0) return null;
|
||||
let sessionId: string | undefined;
|
||||
let usage: CliUsage | undefined;
|
||||
const texts: string[] = [];
|
||||
for (const line of lines) {
|
||||
let parsed: unknown;
|
||||
try {
|
||||
parsed = JSON.parse(line);
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
if (!isRecord(parsed)) continue;
|
||||
if (!sessionId) sessionId = pickSessionId(parsed, backend);
|
||||
if (!sessionId && typeof parsed.thread_id === "string") {
|
||||
sessionId = parsed.thread_id.trim();
|
||||
}
|
||||
if (isRecord(parsed.usage)) {
|
||||
usage = toUsage(parsed.usage) ?? usage;
|
||||
}
|
||||
const item = isRecord(parsed.item) ? parsed.item : null;
|
||||
if (item && typeof item.text === "string") {
|
||||
const type = typeof item.type === "string" ? item.type.toLowerCase() : "";
|
||||
if (!type || type.includes("message")) {
|
||||
texts.push(item.text);
|
||||
}
|
||||
}
|
||||
}
|
||||
const text = texts.join("\n").trim();
|
||||
if (!text) return null;
|
||||
return { text, sessionId, usage };
|
||||
}
|
||||
|
||||
export function resolveSystemPromptUsage(params: {
|
||||
backend: CliBackendConfig;
|
||||
isNewSession: boolean;
|
||||
systemPrompt?: string;
|
||||
}): string | null {
|
||||
const systemPrompt = params.systemPrompt?.trim();
|
||||
if (!systemPrompt) return null;
|
||||
const when = params.backend.systemPromptWhen ?? "first";
|
||||
if (when === "never") return null;
|
||||
if (when === "first" && !params.isNewSession) return null;
|
||||
if (!params.backend.systemPromptArg?.trim()) return null;
|
||||
return systemPrompt;
|
||||
}
|
||||
|
||||
export function resolveSessionIdToSend(params: {
|
||||
backend: CliBackendConfig;
|
||||
cliSessionId?: string;
|
||||
}): { sessionId?: string; isNew: boolean } {
|
||||
const mode = params.backend.sessionMode ?? "always";
|
||||
const existing = params.cliSessionId?.trim();
|
||||
if (mode === "none") return { sessionId: undefined, isNew: !existing };
|
||||
if (mode === "existing") return { sessionId: existing, isNew: !existing };
|
||||
if (existing) return { sessionId: existing, isNew: false };
|
||||
return { sessionId: crypto.randomUUID(), isNew: true };
|
||||
}
|
||||
|
||||
export function resolvePromptInput(params: {
|
||||
backend: CliBackendConfig;
|
||||
prompt: string;
|
||||
}): { argsPrompt?: string; stdin?: string } {
|
||||
const inputMode = params.backend.input ?? "arg";
|
||||
if (inputMode === "stdin") {
|
||||
return { stdin: params.prompt };
|
||||
}
|
||||
if (
|
||||
params.backend.maxPromptArgChars &&
|
||||
params.prompt.length > params.backend.maxPromptArgChars
|
||||
) {
|
||||
return { stdin: params.prompt };
|
||||
}
|
||||
return { argsPrompt: params.prompt };
|
||||
}
|
||||
|
||||
function resolveImageExtension(mimeType: string): string {
|
||||
const normalized = mimeType.toLowerCase();
|
||||
if (normalized.includes("png")) return "png";
|
||||
if (normalized.includes("jpeg") || normalized.includes("jpg")) return "jpg";
|
||||
if (normalized.includes("gif")) return "gif";
|
||||
if (normalized.includes("webp")) return "webp";
|
||||
return "bin";
|
||||
}
|
||||
|
||||
export function appendImagePathsToPrompt(
|
||||
prompt: string,
|
||||
paths: string[],
|
||||
): string {
|
||||
if (!paths.length) return prompt;
|
||||
const trimmed = prompt.trimEnd();
|
||||
const separator = trimmed ? "\n\n" : "";
|
||||
return `${trimmed}${separator}${paths.join("\n")}`;
|
||||
}
|
||||
|
||||
export async function writeCliImages(
|
||||
images: ImageContent[],
|
||||
): Promise<{ paths: string[]; cleanup: () => Promise<void> }> {
|
||||
const tempDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-cli-images-"),
|
||||
);
|
||||
const paths: string[] = [];
|
||||
for (let i = 0; i < images.length; i += 1) {
|
||||
const image = images[i];
|
||||
const ext = resolveImageExtension(image.mimeType);
|
||||
const filePath = path.join(tempDir, `image-${i + 1}.${ext}`);
|
||||
const buffer = Buffer.from(image.data, "base64");
|
||||
await fs.writeFile(filePath, buffer, { mode: 0o600 });
|
||||
paths.push(filePath);
|
||||
}
|
||||
const cleanup = async () => {
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
};
|
||||
return { paths, cleanup };
|
||||
}
|
||||
|
||||
export function buildCliArgs(params: {
|
||||
backend: CliBackendConfig;
|
||||
baseArgs: string[];
|
||||
modelId: string;
|
||||
sessionId?: string;
|
||||
systemPrompt?: string | null;
|
||||
imagePaths?: string[];
|
||||
promptArg?: string;
|
||||
useResume: boolean;
|
||||
}): string[] {
|
||||
const args: string[] = [...params.baseArgs];
|
||||
if (!params.useResume && params.backend.modelArg && params.modelId) {
|
||||
args.push(params.backend.modelArg, params.modelId);
|
||||
}
|
||||
if (
|
||||
!params.useResume &&
|
||||
params.systemPrompt &&
|
||||
params.backend.systemPromptArg
|
||||
) {
|
||||
args.push(params.backend.systemPromptArg, params.systemPrompt);
|
||||
}
|
||||
if (!params.useResume && params.sessionId) {
|
||||
if (params.backend.sessionArgs && params.backend.sessionArgs.length > 0) {
|
||||
for (const entry of params.backend.sessionArgs) {
|
||||
args.push(entry.replaceAll("{sessionId}", params.sessionId));
|
||||
}
|
||||
} else if (params.backend.sessionArg) {
|
||||
args.push(params.backend.sessionArg, params.sessionId);
|
||||
}
|
||||
}
|
||||
if (params.imagePaths && params.imagePaths.length > 0) {
|
||||
const mode = params.backend.imageMode ?? "repeat";
|
||||
const imageArg = params.backend.imageArg;
|
||||
if (imageArg) {
|
||||
if (mode === "list") {
|
||||
args.push(imageArg, params.imagePaths.join(","));
|
||||
} else {
|
||||
for (const imagePath of params.imagePaths) {
|
||||
args.push(imageArg, imagePath);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (params.promptArg !== undefined) {
|
||||
args.push(params.promptArg);
|
||||
}
|
||||
return args;
|
||||
}
|
||||
@@ -20,7 +20,6 @@ const GOOGLE_PREFIXES = ["gemini-3"];
|
||||
const ZAI_PREFIXES = ["glm-4.7"];
|
||||
const MINIMAX_PREFIXES = ["minimax-m2.1"];
|
||||
const XAI_PREFIXES = ["grok-4"];
|
||||
const SYNTHETIC_PREFIXES = ["hf:minimaxai/minimax-m2.1"];
|
||||
|
||||
function matchesPrefix(id: string, prefixes: string[]): boolean {
|
||||
return prefixes.some((prefix) => id.startsWith(prefix));
|
||||
@@ -74,10 +73,6 @@ export function isModernModelRef(ref: ModelRef): boolean {
|
||||
return matchesPrefix(id, XAI_PREFIXES);
|
||||
}
|
||||
|
||||
if (provider === "synthetic") {
|
||||
return matchesPrefix(id, SYNTHETIC_PREFIXES);
|
||||
}
|
||||
|
||||
if (provider === "openrouter" || provider === "opencode") {
|
||||
return matchesAny(id, [
|
||||
...ANTHROPIC_PREFIXES,
|
||||
|
||||
126
src/agents/models-config.part-1.test.ts
Normal file
126
src/agents/models-config.part-1.test.ts
Normal file
@@ -0,0 +1,126 @@
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
|
||||
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
|
||||
return withTempHomeBase(fn, { prefix: "clawdbot-models-" });
|
||||
}
|
||||
|
||||
const _MODELS_CONFIG: ClawdbotConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
"custom-proxy": {
|
||||
baseUrl: "http://localhost:4000/v1",
|
||||
apiKey: "TEST_KEY",
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "llama-3.1-8b",
|
||||
name: "Llama 3.1 8B (Proxy)",
|
||||
api: "openai-completions",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 128000,
|
||||
maxTokens: 32000,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
describe("models-config", () => {
|
||||
let previousHome: string | undefined;
|
||||
|
||||
beforeEach(() => {
|
||||
previousHome = process.env.HOME;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env.HOME = previousHome;
|
||||
});
|
||||
|
||||
it("auto-injects github-copilot provider when token is present", async () => {
|
||||
await withTempHome(async (home) => {
|
||||
const previous = process.env.COPILOT_GITHUB_TOKEN;
|
||||
process.env.COPILOT_GITHUB_TOKEN = "gh-token";
|
||||
|
||||
try {
|
||||
vi.resetModules();
|
||||
|
||||
vi.doMock("../providers/github-copilot-token.js", () => ({
|
||||
DEFAULT_COPILOT_API_BASE_URL:
|
||||
"https://api.individual.githubcopilot.com",
|
||||
resolveCopilotApiToken: vi.fn().mockResolvedValue({
|
||||
token: "copilot",
|
||||
expiresAt: Date.now() + 60 * 60 * 1000,
|
||||
source: "mock",
|
||||
baseUrl: "https://api.copilot.example",
|
||||
}),
|
||||
}));
|
||||
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
|
||||
const agentDir = path.join(home, "agent-default-base-url");
|
||||
await ensureClawdbotModelsJson({ models: { providers: {} } }, agentDir);
|
||||
|
||||
const raw = await fs.readFile(
|
||||
path.join(agentDir, "models.json"),
|
||||
"utf8",
|
||||
);
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<string, { baseUrl?: string; models?: unknown[] }>;
|
||||
};
|
||||
|
||||
expect(parsed.providers["github-copilot"]?.baseUrl).toBe(
|
||||
"https://api.copilot.example",
|
||||
);
|
||||
expect(parsed.providers["github-copilot"]?.models?.length ?? 0).toBe(0);
|
||||
} finally {
|
||||
process.env.COPILOT_GITHUB_TOKEN = previous;
|
||||
}
|
||||
});
|
||||
});
|
||||
it("prefers COPILOT_GITHUB_TOKEN over GH_TOKEN and GITHUB_TOKEN", async () => {
|
||||
await withTempHome(async () => {
|
||||
const previous = process.env.COPILOT_GITHUB_TOKEN;
|
||||
const previousGh = process.env.GH_TOKEN;
|
||||
const previousGithub = process.env.GITHUB_TOKEN;
|
||||
process.env.COPILOT_GITHUB_TOKEN = "copilot-token";
|
||||
process.env.GH_TOKEN = "gh-token";
|
||||
process.env.GITHUB_TOKEN = "github-token";
|
||||
|
||||
try {
|
||||
vi.resetModules();
|
||||
|
||||
const resolveCopilotApiToken = vi.fn().mockResolvedValue({
|
||||
token: "copilot",
|
||||
expiresAt: Date.now() + 60 * 60 * 1000,
|
||||
source: "mock",
|
||||
baseUrl: "https://api.copilot.example",
|
||||
});
|
||||
|
||||
vi.doMock("../providers/github-copilot-token.js", () => ({
|
||||
DEFAULT_COPILOT_API_BASE_URL:
|
||||
"https://api.individual.githubcopilot.com",
|
||||
resolveCopilotApiToken,
|
||||
}));
|
||||
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
|
||||
await ensureClawdbotModelsJson({ models: { providers: {} } });
|
||||
|
||||
expect(resolveCopilotApiToken).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ githubToken: "copilot-token" }),
|
||||
);
|
||||
} finally {
|
||||
process.env.COPILOT_GITHUB_TOKEN = previous;
|
||||
process.env.GH_TOKEN = previousGh;
|
||||
process.env.GITHUB_TOKEN = previousGithub;
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
164
src/agents/models-config.part-2.test.ts
Normal file
164
src/agents/models-config.part-2.test.ts
Normal file
@@ -0,0 +1,164 @@
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
|
||||
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
|
||||
return withTempHomeBase(fn, { prefix: "clawdbot-models-" });
|
||||
}
|
||||
|
||||
const _MODELS_CONFIG: ClawdbotConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
"custom-proxy": {
|
||||
baseUrl: "http://localhost:4000/v1",
|
||||
apiKey: "TEST_KEY",
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "llama-3.1-8b",
|
||||
name: "Llama 3.1 8B (Proxy)",
|
||||
api: "openai-completions",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 128000,
|
||||
maxTokens: 32000,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
describe("models-config", () => {
|
||||
let previousHome: string | undefined;
|
||||
|
||||
beforeEach(() => {
|
||||
previousHome = process.env.HOME;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env.HOME = previousHome;
|
||||
});
|
||||
|
||||
it("uses the first github-copilot profile when env tokens are missing", async () => {
|
||||
await withTempHome(async (home) => {
|
||||
const previous = process.env.COPILOT_GITHUB_TOKEN;
|
||||
const previousGh = process.env.GH_TOKEN;
|
||||
const previousGithub = process.env.GITHUB_TOKEN;
|
||||
delete process.env.COPILOT_GITHUB_TOKEN;
|
||||
delete process.env.GH_TOKEN;
|
||||
delete process.env.GITHUB_TOKEN;
|
||||
|
||||
try {
|
||||
vi.resetModules();
|
||||
|
||||
const agentDir = path.join(home, "agent-profiles");
|
||||
await fs.mkdir(agentDir, { recursive: true });
|
||||
await fs.writeFile(
|
||||
path.join(agentDir, "auth-profiles.json"),
|
||||
JSON.stringify(
|
||||
{
|
||||
version: 1,
|
||||
profiles: {
|
||||
"github-copilot:alpha": {
|
||||
type: "token",
|
||||
provider: "github-copilot",
|
||||
token: "alpha-token",
|
||||
},
|
||||
"github-copilot:beta": {
|
||||
type: "token",
|
||||
provider: "github-copilot",
|
||||
token: "beta-token",
|
||||
},
|
||||
},
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
);
|
||||
|
||||
const resolveCopilotApiToken = vi.fn().mockResolvedValue({
|
||||
token: "copilot",
|
||||
expiresAt: Date.now() + 60 * 60 * 1000,
|
||||
source: "mock",
|
||||
baseUrl: "https://api.copilot.example",
|
||||
});
|
||||
|
||||
vi.doMock("../providers/github-copilot-token.js", () => ({
|
||||
DEFAULT_COPILOT_API_BASE_URL:
|
||||
"https://api.individual.githubcopilot.com",
|
||||
resolveCopilotApiToken,
|
||||
}));
|
||||
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
|
||||
await ensureClawdbotModelsJson({ models: { providers: {} } }, agentDir);
|
||||
|
||||
expect(resolveCopilotApiToken).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ githubToken: "alpha-token" }),
|
||||
);
|
||||
} finally {
|
||||
if (previous === undefined) delete process.env.COPILOT_GITHUB_TOKEN;
|
||||
else process.env.COPILOT_GITHUB_TOKEN = previous;
|
||||
if (previousGh === undefined) delete process.env.GH_TOKEN;
|
||||
else process.env.GH_TOKEN = previousGh;
|
||||
if (previousGithub === undefined) delete process.env.GITHUB_TOKEN;
|
||||
else process.env.GITHUB_TOKEN = previousGithub;
|
||||
}
|
||||
});
|
||||
});
|
||||
it("does not override explicit github-copilot provider config", async () => {
|
||||
await withTempHome(async () => {
|
||||
const previous = process.env.COPILOT_GITHUB_TOKEN;
|
||||
process.env.COPILOT_GITHUB_TOKEN = "gh-token";
|
||||
|
||||
try {
|
||||
vi.resetModules();
|
||||
|
||||
vi.doMock("../providers/github-copilot-token.js", () => ({
|
||||
DEFAULT_COPILOT_API_BASE_URL:
|
||||
"https://api.individual.githubcopilot.com",
|
||||
resolveCopilotApiToken: vi.fn().mockResolvedValue({
|
||||
token: "copilot",
|
||||
expiresAt: Date.now() + 60 * 60 * 1000,
|
||||
source: "mock",
|
||||
baseUrl: "https://api.copilot.example",
|
||||
}),
|
||||
}));
|
||||
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
const { resolveClawdbotAgentDir } = await import("./agent-paths.js");
|
||||
|
||||
await ensureClawdbotModelsJson({
|
||||
models: {
|
||||
providers: {
|
||||
"github-copilot": {
|
||||
baseUrl: "https://copilot.local",
|
||||
api: "openai-responses",
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const agentDir = resolveClawdbotAgentDir();
|
||||
const raw = await fs.readFile(
|
||||
path.join(agentDir, "models.json"),
|
||||
"utf8",
|
||||
);
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<string, { baseUrl?: string }>;
|
||||
};
|
||||
|
||||
expect(parsed.providers["github-copilot"]?.baseUrl).toBe(
|
||||
"https://copilot.local",
|
||||
);
|
||||
} finally {
|
||||
process.env.COPILOT_GITHUB_TOKEN = previous;
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
149
src/agents/models-config.part-3.test.ts
Normal file
149
src/agents/models-config.part-3.test.ts
Normal file
@@ -0,0 +1,149 @@
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
|
||||
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
|
||||
return withTempHomeBase(fn, { prefix: "clawdbot-models-" });
|
||||
}
|
||||
|
||||
const _MODELS_CONFIG: ClawdbotConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
"custom-proxy": {
|
||||
baseUrl: "http://localhost:4000/v1",
|
||||
apiKey: "TEST_KEY",
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "llama-3.1-8b",
|
||||
name: "Llama 3.1 8B (Proxy)",
|
||||
api: "openai-completions",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 128000,
|
||||
maxTokens: 32000,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
describe("models-config", () => {
|
||||
let previousHome: string | undefined;
|
||||
|
||||
beforeEach(() => {
|
||||
previousHome = process.env.HOME;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env.HOME = previousHome;
|
||||
});
|
||||
|
||||
it("falls back to default baseUrl when token exchange fails", async () => {
|
||||
await withTempHome(async () => {
|
||||
const previous = process.env.COPILOT_GITHUB_TOKEN;
|
||||
process.env.COPILOT_GITHUB_TOKEN = "gh-token";
|
||||
|
||||
try {
|
||||
vi.resetModules();
|
||||
|
||||
vi.doMock("../providers/github-copilot-token.js", () => ({
|
||||
DEFAULT_COPILOT_API_BASE_URL: "https://api.default.test",
|
||||
resolveCopilotApiToken: vi.fn().mockRejectedValue(new Error("boom")),
|
||||
}));
|
||||
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
const { resolveClawdbotAgentDir } = await import("./agent-paths.js");
|
||||
|
||||
await ensureClawdbotModelsJson({ models: { providers: {} } });
|
||||
|
||||
const agentDir = resolveClawdbotAgentDir();
|
||||
const raw = await fs.readFile(
|
||||
path.join(agentDir, "models.json"),
|
||||
"utf8",
|
||||
);
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<string, { baseUrl?: string }>;
|
||||
};
|
||||
|
||||
expect(parsed.providers["github-copilot"]?.baseUrl).toBe(
|
||||
"https://api.default.test",
|
||||
);
|
||||
} finally {
|
||||
process.env.COPILOT_GITHUB_TOKEN = previous;
|
||||
}
|
||||
});
|
||||
});
|
||||
it("uses agentDir override auth profiles for copilot injection", async () => {
|
||||
await withTempHome(async (home) => {
|
||||
const previous = process.env.COPILOT_GITHUB_TOKEN;
|
||||
const previousGh = process.env.GH_TOKEN;
|
||||
const previousGithub = process.env.GITHUB_TOKEN;
|
||||
delete process.env.COPILOT_GITHUB_TOKEN;
|
||||
delete process.env.GH_TOKEN;
|
||||
delete process.env.GITHUB_TOKEN;
|
||||
|
||||
try {
|
||||
vi.resetModules();
|
||||
|
||||
const agentDir = path.join(home, "agent-override");
|
||||
await fs.mkdir(agentDir, { recursive: true });
|
||||
await fs.writeFile(
|
||||
path.join(agentDir, "auth-profiles.json"),
|
||||
JSON.stringify(
|
||||
{
|
||||
version: 1,
|
||||
profiles: {
|
||||
"github-copilot:github": {
|
||||
type: "token",
|
||||
provider: "github-copilot",
|
||||
token: "gh-profile-token",
|
||||
},
|
||||
},
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
);
|
||||
|
||||
vi.doMock("../providers/github-copilot-token.js", () => ({
|
||||
DEFAULT_COPILOT_API_BASE_URL:
|
||||
"https://api.individual.githubcopilot.com",
|
||||
resolveCopilotApiToken: vi.fn().mockResolvedValue({
|
||||
token: "copilot",
|
||||
expiresAt: Date.now() + 60 * 60 * 1000,
|
||||
source: "mock",
|
||||
baseUrl: "https://api.copilot.example",
|
||||
}),
|
||||
}));
|
||||
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
|
||||
await ensureClawdbotModelsJson({ models: { providers: {} } }, agentDir);
|
||||
|
||||
const raw = await fs.readFile(
|
||||
path.join(agentDir, "models.json"),
|
||||
"utf8",
|
||||
);
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<string, { baseUrl?: string }>;
|
||||
};
|
||||
|
||||
expect(parsed.providers["github-copilot"]?.baseUrl).toBe(
|
||||
"https://api.copilot.example",
|
||||
);
|
||||
} finally {
|
||||
if (previous === undefined) delete process.env.COPILOT_GITHUB_TOKEN;
|
||||
else process.env.COPILOT_GITHUB_TOKEN = previous;
|
||||
if (previousGh === undefined) delete process.env.GH_TOKEN;
|
||||
else process.env.GH_TOKEN = previousGh;
|
||||
if (previousGithub === undefined) delete process.env.GITHUB_TOKEN;
|
||||
else process.env.GITHUB_TOKEN = previousGithub;
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
186
src/agents/models-config.part-4.test.ts
Normal file
186
src/agents/models-config.part-4.test.ts
Normal file
@@ -0,0 +1,186 @@
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
|
||||
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
|
||||
return withTempHomeBase(fn, { prefix: "clawdbot-models-" });
|
||||
}
|
||||
|
||||
const MODELS_CONFIG: ClawdbotConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
"custom-proxy": {
|
||||
baseUrl: "http://localhost:4000/v1",
|
||||
apiKey: "TEST_KEY",
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "llama-3.1-8b",
|
||||
name: "Llama 3.1 8B (Proxy)",
|
||||
api: "openai-completions",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 128000,
|
||||
maxTokens: 32000,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
describe("models-config", () => {
|
||||
let previousHome: string | undefined;
|
||||
|
||||
beforeEach(() => {
|
||||
previousHome = process.env.HOME;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env.HOME = previousHome;
|
||||
});
|
||||
|
||||
it("skips writing models.json when no env token or profile exists", async () => {
|
||||
await withTempHome(async (home) => {
|
||||
const previous = process.env.COPILOT_GITHUB_TOKEN;
|
||||
const previousGh = process.env.GH_TOKEN;
|
||||
const previousGithub = process.env.GITHUB_TOKEN;
|
||||
const previousMinimax = process.env.MINIMAX_API_KEY;
|
||||
const previousMoonshot = process.env.MOONSHOT_API_KEY;
|
||||
const previousSynthetic = process.env.SYNTHETIC_API_KEY;
|
||||
delete process.env.COPILOT_GITHUB_TOKEN;
|
||||
delete process.env.GH_TOKEN;
|
||||
delete process.env.GITHUB_TOKEN;
|
||||
delete process.env.MINIMAX_API_KEY;
|
||||
delete process.env.MOONSHOT_API_KEY;
|
||||
delete process.env.SYNTHETIC_API_KEY;
|
||||
|
||||
try {
|
||||
vi.resetModules();
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
|
||||
const agentDir = path.join(home, "agent-empty");
|
||||
const result = await ensureClawdbotModelsJson(
|
||||
{
|
||||
models: { providers: {} },
|
||||
},
|
||||
agentDir,
|
||||
);
|
||||
|
||||
await expect(
|
||||
fs.stat(path.join(agentDir, "models.json")),
|
||||
).rejects.toThrow();
|
||||
expect(result.wrote).toBe(false);
|
||||
} finally {
|
||||
if (previous === undefined) delete process.env.COPILOT_GITHUB_TOKEN;
|
||||
else process.env.COPILOT_GITHUB_TOKEN = previous;
|
||||
if (previousGh === undefined) delete process.env.GH_TOKEN;
|
||||
else process.env.GH_TOKEN = previousGh;
|
||||
if (previousGithub === undefined) delete process.env.GITHUB_TOKEN;
|
||||
else process.env.GITHUB_TOKEN = previousGithub;
|
||||
if (previousMinimax === undefined) delete process.env.MINIMAX_API_KEY;
|
||||
else process.env.MINIMAX_API_KEY = previousMinimax;
|
||||
if (previousMoonshot === undefined) delete process.env.MOONSHOT_API_KEY;
|
||||
else process.env.MOONSHOT_API_KEY = previousMoonshot;
|
||||
if (previousSynthetic === undefined)
|
||||
delete process.env.SYNTHETIC_API_KEY;
|
||||
else process.env.SYNTHETIC_API_KEY = previousSynthetic;
|
||||
}
|
||||
});
|
||||
});
|
||||
it("writes models.json for configured providers", async () => {
|
||||
await withTempHome(async () => {
|
||||
vi.resetModules();
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
const { resolveClawdbotAgentDir } = await import("./agent-paths.js");
|
||||
|
||||
await ensureClawdbotModelsJson(MODELS_CONFIG);
|
||||
|
||||
const modelPath = path.join(resolveClawdbotAgentDir(), "models.json");
|
||||
const raw = await fs.readFile(modelPath, "utf8");
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<string, { baseUrl?: string }>;
|
||||
};
|
||||
|
||||
expect(parsed.providers["custom-proxy"]?.baseUrl).toBe(
|
||||
"http://localhost:4000/v1",
|
||||
);
|
||||
});
|
||||
});
|
||||
it("adds minimax provider when MINIMAX_API_KEY is set", async () => {
|
||||
await withTempHome(async () => {
|
||||
vi.resetModules();
|
||||
const prevKey = process.env.MINIMAX_API_KEY;
|
||||
process.env.MINIMAX_API_KEY = "sk-minimax-test";
|
||||
try {
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
const { resolveClawdbotAgentDir } = await import("./agent-paths.js");
|
||||
|
||||
await ensureClawdbotModelsJson({});
|
||||
|
||||
const modelPath = path.join(resolveClawdbotAgentDir(), "models.json");
|
||||
const raw = await fs.readFile(modelPath, "utf8");
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<
|
||||
string,
|
||||
{
|
||||
baseUrl?: string;
|
||||
apiKey?: string;
|
||||
models?: Array<{ id: string }>;
|
||||
}
|
||||
>;
|
||||
};
|
||||
expect(parsed.providers.minimax?.baseUrl).toBe(
|
||||
"https://api.minimax.io/anthropic",
|
||||
);
|
||||
expect(parsed.providers.minimax?.apiKey).toBe("MINIMAX_API_KEY");
|
||||
const ids = parsed.providers.minimax?.models?.map((model) => model.id);
|
||||
expect(ids).toContain("MiniMax-M2.1");
|
||||
expect(ids).toContain("MiniMax-VL-01");
|
||||
} finally {
|
||||
if (prevKey === undefined) delete process.env.MINIMAX_API_KEY;
|
||||
else process.env.MINIMAX_API_KEY = prevKey;
|
||||
}
|
||||
});
|
||||
});
|
||||
it("adds synthetic provider when SYNTHETIC_API_KEY is set", async () => {
|
||||
await withTempHome(async () => {
|
||||
vi.resetModules();
|
||||
const prevKey = process.env.SYNTHETIC_API_KEY;
|
||||
process.env.SYNTHETIC_API_KEY = "sk-synthetic-test";
|
||||
try {
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
const { resolveClawdbotAgentDir } = await import("./agent-paths.js");
|
||||
|
||||
await ensureClawdbotModelsJson({});
|
||||
|
||||
const modelPath = path.join(resolveClawdbotAgentDir(), "models.json");
|
||||
const raw = await fs.readFile(modelPath, "utf8");
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<
|
||||
string,
|
||||
{
|
||||
baseUrl?: string;
|
||||
apiKey?: string;
|
||||
models?: Array<{ id: string }>;
|
||||
}
|
||||
>;
|
||||
};
|
||||
expect(parsed.providers.synthetic?.baseUrl).toBe(
|
||||
"https://api.synthetic.new/anthropic",
|
||||
);
|
||||
expect(parsed.providers.synthetic?.apiKey).toBe("SYNTHETIC_API_KEY");
|
||||
const ids = parsed.providers.synthetic?.models?.map(
|
||||
(model) => model.id,
|
||||
);
|
||||
expect(ids).toContain("hf:MiniMaxAI/MiniMax-M2.1");
|
||||
} finally {
|
||||
if (prevKey === undefined) delete process.env.SYNTHETIC_API_KEY;
|
||||
else process.env.SYNTHETIC_API_KEY = prevKey;
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
149
src/agents/models-config.part-5.test.ts
Normal file
149
src/agents/models-config.part-5.test.ts
Normal file
@@ -0,0 +1,149 @@
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
|
||||
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
|
||||
return withTempHomeBase(fn, { prefix: "clawdbot-models-" });
|
||||
}
|
||||
|
||||
const MODELS_CONFIG: ClawdbotConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
"custom-proxy": {
|
||||
baseUrl: "http://localhost:4000/v1",
|
||||
apiKey: "TEST_KEY",
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "llama-3.1-8b",
|
||||
name: "Llama 3.1 8B (Proxy)",
|
||||
api: "openai-completions",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 128000,
|
||||
maxTokens: 32000,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
describe("models-config", () => {
|
||||
let previousHome: string | undefined;
|
||||
|
||||
beforeEach(() => {
|
||||
previousHome = process.env.HOME;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env.HOME = previousHome;
|
||||
});
|
||||
|
||||
it("fills missing provider.apiKey from env var name when models exist", async () => {
|
||||
await withTempHome(async () => {
|
||||
vi.resetModules();
|
||||
const prevKey = process.env.MINIMAX_API_KEY;
|
||||
process.env.MINIMAX_API_KEY = "sk-minimax-test";
|
||||
try {
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
const { resolveClawdbotAgentDir } = await import("./agent-paths.js");
|
||||
|
||||
const cfg: ClawdbotConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
minimax: {
|
||||
baseUrl: "https://api.minimax.io/anthropic",
|
||||
api: "anthropic-messages",
|
||||
models: [
|
||||
{
|
||||
id: "MiniMax-M2.1",
|
||||
name: "MiniMax M2.1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 200000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
await ensureClawdbotModelsJson(cfg);
|
||||
|
||||
const modelPath = path.join(resolveClawdbotAgentDir(), "models.json");
|
||||
const raw = await fs.readFile(modelPath, "utf8");
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<
|
||||
string,
|
||||
{ apiKey?: string; models?: Array<{ id: string }> }
|
||||
>;
|
||||
};
|
||||
expect(parsed.providers.minimax?.apiKey).toBe("MINIMAX_API_KEY");
|
||||
const ids = parsed.providers.minimax?.models?.map((model) => model.id);
|
||||
expect(ids).toContain("MiniMax-VL-01");
|
||||
} finally {
|
||||
if (prevKey === undefined) delete process.env.MINIMAX_API_KEY;
|
||||
else process.env.MINIMAX_API_KEY = prevKey;
|
||||
}
|
||||
});
|
||||
});
|
||||
it("merges providers by default", async () => {
|
||||
await withTempHome(async () => {
|
||||
vi.resetModules();
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
const { resolveClawdbotAgentDir } = await import("./agent-paths.js");
|
||||
|
||||
const agentDir = resolveClawdbotAgentDir();
|
||||
await fs.mkdir(agentDir, { recursive: true });
|
||||
await fs.writeFile(
|
||||
path.join(agentDir, "models.json"),
|
||||
JSON.stringify(
|
||||
{
|
||||
providers: {
|
||||
existing: {
|
||||
baseUrl: "http://localhost:1234/v1",
|
||||
apiKey: "EXISTING_KEY",
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "existing-model",
|
||||
name: "Existing",
|
||||
api: "openai-completions",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 8192,
|
||||
maxTokens: 2048,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
"utf8",
|
||||
);
|
||||
|
||||
await ensureClawdbotModelsJson(MODELS_CONFIG);
|
||||
|
||||
const raw = await fs.readFile(path.join(agentDir, "models.json"), "utf8");
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<string, { baseUrl?: string }>;
|
||||
};
|
||||
|
||||
expect(parsed.providers.existing?.baseUrl).toBe(
|
||||
"http://localhost:1234/v1",
|
||||
);
|
||||
expect(parsed.providers["custom-proxy"]?.baseUrl).toBe(
|
||||
"http://localhost:4000/v1",
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
97
src/agents/models-config.part-6.test.ts
Normal file
97
src/agents/models-config.part-6.test.ts
Normal file
@@ -0,0 +1,97 @@
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
|
||||
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
|
||||
return withTempHomeBase(fn, { prefix: "clawdbot-models-" });
|
||||
}
|
||||
|
||||
const _MODELS_CONFIG: ClawdbotConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
"custom-proxy": {
|
||||
baseUrl: "http://localhost:4000/v1",
|
||||
apiKey: "TEST_KEY",
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "llama-3.1-8b",
|
||||
name: "Llama 3.1 8B (Proxy)",
|
||||
api: "openai-completions",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 128000,
|
||||
maxTokens: 32000,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
describe("models-config", () => {
|
||||
let previousHome: string | undefined;
|
||||
|
||||
beforeEach(() => {
|
||||
previousHome = process.env.HOME;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env.HOME = previousHome;
|
||||
});
|
||||
|
||||
it("normalizes gemini 3 ids to preview for google providers", async () => {
|
||||
await withTempHome(async () => {
|
||||
vi.resetModules();
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
const { resolveClawdbotAgentDir } = await import("./agent-paths.js");
|
||||
|
||||
const cfg: ClawdbotConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
google: {
|
||||
baseUrl: "https://generativelanguage.googleapis.com/v1beta",
|
||||
apiKey: "GEMINI_KEY",
|
||||
api: "google-generative-ai",
|
||||
models: [
|
||||
{
|
||||
id: "gemini-3-pro",
|
||||
name: "Gemini 3 Pro",
|
||||
api: "google-generative-ai",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1048576,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
{
|
||||
id: "gemini-3-flash",
|
||||
name: "Gemini 3 Flash",
|
||||
api: "google-generative-ai",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1048576,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
await ensureClawdbotModelsJson(cfg);
|
||||
|
||||
const modelPath = path.join(resolveClawdbotAgentDir(), "models.json");
|
||||
const raw = await fs.readFile(modelPath, "utf8");
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<string, { models: Array<{ id: string }> }>;
|
||||
};
|
||||
const ids = parsed.providers.google?.models?.map((model) => model.id);
|
||||
expect(ids).toEqual(["gemini-3-pro-preview", "gemini-3-flash-preview"]);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,653 +0,0 @@
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
|
||||
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
|
||||
return withTempHomeBase(fn, { prefix: "clawdbot-models-" });
|
||||
}
|
||||
|
||||
const MODELS_CONFIG: ClawdbotConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
"custom-proxy": {
|
||||
baseUrl: "http://localhost:4000/v1",
|
||||
apiKey: "TEST_KEY",
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "llama-3.1-8b",
|
||||
name: "Llama 3.1 8B (Proxy)",
|
||||
api: "openai-completions",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 128000,
|
||||
maxTokens: 32000,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
describe("models config", () => {
|
||||
it("auto-injects github-copilot provider when token is present", async () => {
|
||||
await withTempHome(async (home) => {
|
||||
const previous = process.env.COPILOT_GITHUB_TOKEN;
|
||||
process.env.COPILOT_GITHUB_TOKEN = "gh-token";
|
||||
|
||||
try {
|
||||
vi.resetModules();
|
||||
|
||||
vi.doMock("../providers/github-copilot-token.js", () => ({
|
||||
DEFAULT_COPILOT_API_BASE_URL:
|
||||
"https://api.individual.githubcopilot.com",
|
||||
resolveCopilotApiToken: vi.fn().mockResolvedValue({
|
||||
token: "copilot",
|
||||
expiresAt: Date.now() + 60 * 60 * 1000,
|
||||
source: "mock",
|
||||
baseUrl: "https://api.copilot.example",
|
||||
}),
|
||||
}));
|
||||
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
|
||||
const agentDir = path.join(home, "agent-default-base-url");
|
||||
await ensureClawdbotModelsJson({ models: { providers: {} } }, agentDir);
|
||||
|
||||
const raw = await fs.readFile(
|
||||
path.join(agentDir, "models.json"),
|
||||
"utf8",
|
||||
);
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<string, { baseUrl?: string; models?: unknown[] }>;
|
||||
};
|
||||
|
||||
expect(parsed.providers["github-copilot"]?.baseUrl).toBe(
|
||||
"https://api.copilot.example",
|
||||
);
|
||||
expect(parsed.providers["github-copilot"]?.models?.length ?? 0).toBe(0);
|
||||
} finally {
|
||||
process.env.COPILOT_GITHUB_TOKEN = previous;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it("prefers COPILOT_GITHUB_TOKEN over GH_TOKEN and GITHUB_TOKEN", async () => {
|
||||
await withTempHome(async () => {
|
||||
const previous = process.env.COPILOT_GITHUB_TOKEN;
|
||||
const previousGh = process.env.GH_TOKEN;
|
||||
const previousGithub = process.env.GITHUB_TOKEN;
|
||||
process.env.COPILOT_GITHUB_TOKEN = "copilot-token";
|
||||
process.env.GH_TOKEN = "gh-token";
|
||||
process.env.GITHUB_TOKEN = "github-token";
|
||||
|
||||
try {
|
||||
vi.resetModules();
|
||||
|
||||
const resolveCopilotApiToken = vi.fn().mockResolvedValue({
|
||||
token: "copilot",
|
||||
expiresAt: Date.now() + 60 * 60 * 1000,
|
||||
source: "mock",
|
||||
baseUrl: "https://api.copilot.example",
|
||||
});
|
||||
|
||||
vi.doMock("../providers/github-copilot-token.js", () => ({
|
||||
DEFAULT_COPILOT_API_BASE_URL:
|
||||
"https://api.individual.githubcopilot.com",
|
||||
resolveCopilotApiToken,
|
||||
}));
|
||||
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
|
||||
await ensureClawdbotModelsJson({ models: { providers: {} } });
|
||||
|
||||
expect(resolveCopilotApiToken).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ githubToken: "copilot-token" }),
|
||||
);
|
||||
} finally {
|
||||
process.env.COPILOT_GITHUB_TOKEN = previous;
|
||||
process.env.GH_TOKEN = previousGh;
|
||||
process.env.GITHUB_TOKEN = previousGithub;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it("uses the first github-copilot profile when env tokens are missing", async () => {
|
||||
await withTempHome(async (home) => {
|
||||
const previous = process.env.COPILOT_GITHUB_TOKEN;
|
||||
const previousGh = process.env.GH_TOKEN;
|
||||
const previousGithub = process.env.GITHUB_TOKEN;
|
||||
delete process.env.COPILOT_GITHUB_TOKEN;
|
||||
delete process.env.GH_TOKEN;
|
||||
delete process.env.GITHUB_TOKEN;
|
||||
|
||||
try {
|
||||
vi.resetModules();
|
||||
|
||||
const agentDir = path.join(home, "agent-profiles");
|
||||
await fs.mkdir(agentDir, { recursive: true });
|
||||
await fs.writeFile(
|
||||
path.join(agentDir, "auth-profiles.json"),
|
||||
JSON.stringify(
|
||||
{
|
||||
version: 1,
|
||||
profiles: {
|
||||
"github-copilot:alpha": {
|
||||
type: "token",
|
||||
provider: "github-copilot",
|
||||
token: "alpha-token",
|
||||
},
|
||||
"github-copilot:beta": {
|
||||
type: "token",
|
||||
provider: "github-copilot",
|
||||
token: "beta-token",
|
||||
},
|
||||
},
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
);
|
||||
|
||||
const resolveCopilotApiToken = vi.fn().mockResolvedValue({
|
||||
token: "copilot",
|
||||
expiresAt: Date.now() + 60 * 60 * 1000,
|
||||
source: "mock",
|
||||
baseUrl: "https://api.copilot.example",
|
||||
});
|
||||
|
||||
vi.doMock("../providers/github-copilot-token.js", () => ({
|
||||
DEFAULT_COPILOT_API_BASE_URL:
|
||||
"https://api.individual.githubcopilot.com",
|
||||
resolveCopilotApiToken,
|
||||
}));
|
||||
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
|
||||
await ensureClawdbotModelsJson({ models: { providers: {} } }, agentDir);
|
||||
|
||||
expect(resolveCopilotApiToken).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ githubToken: "alpha-token" }),
|
||||
);
|
||||
} finally {
|
||||
if (previous === undefined) delete process.env.COPILOT_GITHUB_TOKEN;
|
||||
else process.env.COPILOT_GITHUB_TOKEN = previous;
|
||||
if (previousGh === undefined) delete process.env.GH_TOKEN;
|
||||
else process.env.GH_TOKEN = previousGh;
|
||||
if (previousGithub === undefined) delete process.env.GITHUB_TOKEN;
|
||||
else process.env.GITHUB_TOKEN = previousGithub;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it("does not override explicit github-copilot provider config", async () => {
|
||||
await withTempHome(async () => {
|
||||
const previous = process.env.COPILOT_GITHUB_TOKEN;
|
||||
process.env.COPILOT_GITHUB_TOKEN = "gh-token";
|
||||
|
||||
try {
|
||||
vi.resetModules();
|
||||
|
||||
vi.doMock("../providers/github-copilot-token.js", () => ({
|
||||
DEFAULT_COPILOT_API_BASE_URL:
|
||||
"https://api.individual.githubcopilot.com",
|
||||
resolveCopilotApiToken: vi.fn().mockResolvedValue({
|
||||
token: "copilot",
|
||||
expiresAt: Date.now() + 60 * 60 * 1000,
|
||||
source: "mock",
|
||||
baseUrl: "https://api.copilot.example",
|
||||
}),
|
||||
}));
|
||||
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
const { resolveClawdbotAgentDir } = await import("./agent-paths.js");
|
||||
|
||||
await ensureClawdbotModelsJson({
|
||||
models: {
|
||||
providers: {
|
||||
"github-copilot": {
|
||||
baseUrl: "https://copilot.local",
|
||||
api: "openai-responses",
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const agentDir = resolveClawdbotAgentDir();
|
||||
const raw = await fs.readFile(
|
||||
path.join(agentDir, "models.json"),
|
||||
"utf8",
|
||||
);
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<string, { baseUrl?: string }>;
|
||||
};
|
||||
|
||||
expect(parsed.providers["github-copilot"]?.baseUrl).toBe(
|
||||
"https://copilot.local",
|
||||
);
|
||||
} finally {
|
||||
process.env.COPILOT_GITHUB_TOKEN = previous;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it("falls back to default baseUrl when token exchange fails", async () => {
|
||||
await withTempHome(async () => {
|
||||
const previous = process.env.COPILOT_GITHUB_TOKEN;
|
||||
process.env.COPILOT_GITHUB_TOKEN = "gh-token";
|
||||
|
||||
try {
|
||||
vi.resetModules();
|
||||
|
||||
vi.doMock("../providers/github-copilot-token.js", () => ({
|
||||
DEFAULT_COPILOT_API_BASE_URL: "https://api.default.test",
|
||||
resolveCopilotApiToken: vi.fn().mockRejectedValue(new Error("boom")),
|
||||
}));
|
||||
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
const { resolveClawdbotAgentDir } = await import("./agent-paths.js");
|
||||
|
||||
await ensureClawdbotModelsJson({ models: { providers: {} } });
|
||||
|
||||
const agentDir = resolveClawdbotAgentDir();
|
||||
const raw = await fs.readFile(
|
||||
path.join(agentDir, "models.json"),
|
||||
"utf8",
|
||||
);
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<string, { baseUrl?: string }>;
|
||||
};
|
||||
|
||||
expect(parsed.providers["github-copilot"]?.baseUrl).toBe(
|
||||
"https://api.default.test",
|
||||
);
|
||||
} finally {
|
||||
process.env.COPILOT_GITHUB_TOKEN = previous;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it("uses agentDir override auth profiles for copilot injection", async () => {
|
||||
await withTempHome(async (home) => {
|
||||
const previous = process.env.COPILOT_GITHUB_TOKEN;
|
||||
const previousGh = process.env.GH_TOKEN;
|
||||
const previousGithub = process.env.GITHUB_TOKEN;
|
||||
delete process.env.COPILOT_GITHUB_TOKEN;
|
||||
delete process.env.GH_TOKEN;
|
||||
delete process.env.GITHUB_TOKEN;
|
||||
|
||||
try {
|
||||
vi.resetModules();
|
||||
|
||||
const agentDir = path.join(home, "agent-override");
|
||||
await fs.mkdir(agentDir, { recursive: true });
|
||||
await fs.writeFile(
|
||||
path.join(agentDir, "auth-profiles.json"),
|
||||
JSON.stringify(
|
||||
{
|
||||
version: 1,
|
||||
profiles: {
|
||||
"github-copilot:github": {
|
||||
type: "token",
|
||||
provider: "github-copilot",
|
||||
token: "gh-profile-token",
|
||||
},
|
||||
},
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
);
|
||||
|
||||
vi.doMock("../providers/github-copilot-token.js", () => ({
|
||||
DEFAULT_COPILOT_API_BASE_URL:
|
||||
"https://api.individual.githubcopilot.com",
|
||||
resolveCopilotApiToken: vi.fn().mockResolvedValue({
|
||||
token: "copilot",
|
||||
expiresAt: Date.now() + 60 * 60 * 1000,
|
||||
source: "mock",
|
||||
baseUrl: "https://api.copilot.example",
|
||||
}),
|
||||
}));
|
||||
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
|
||||
await ensureClawdbotModelsJson({ models: { providers: {} } }, agentDir);
|
||||
|
||||
const raw = await fs.readFile(
|
||||
path.join(agentDir, "models.json"),
|
||||
"utf8",
|
||||
);
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<string, { baseUrl?: string }>;
|
||||
};
|
||||
|
||||
expect(parsed.providers["github-copilot"]?.baseUrl).toBe(
|
||||
"https://api.copilot.example",
|
||||
);
|
||||
} finally {
|
||||
if (previous === undefined) delete process.env.COPILOT_GITHUB_TOKEN;
|
||||
else process.env.COPILOT_GITHUB_TOKEN = previous;
|
||||
if (previousGh === undefined) delete process.env.GH_TOKEN;
|
||||
else process.env.GH_TOKEN = previousGh;
|
||||
if (previousGithub === undefined) delete process.env.GITHUB_TOKEN;
|
||||
else process.env.GITHUB_TOKEN = previousGithub;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it("skips writing models.json when no env token or profile exists", async () => {
|
||||
await withTempHome(async (home) => {
|
||||
const previous = process.env.COPILOT_GITHUB_TOKEN;
|
||||
const previousGh = process.env.GH_TOKEN;
|
||||
const previousGithub = process.env.GITHUB_TOKEN;
|
||||
const previousMinimax = process.env.MINIMAX_API_KEY;
|
||||
const previousMoonshot = process.env.MOONSHOT_API_KEY;
|
||||
const previousSynthetic = process.env.SYNTHETIC_API_KEY;
|
||||
delete process.env.COPILOT_GITHUB_TOKEN;
|
||||
delete process.env.GH_TOKEN;
|
||||
delete process.env.GITHUB_TOKEN;
|
||||
delete process.env.MINIMAX_API_KEY;
|
||||
delete process.env.MOONSHOT_API_KEY;
|
||||
delete process.env.SYNTHETIC_API_KEY;
|
||||
|
||||
try {
|
||||
vi.resetModules();
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
|
||||
const agentDir = path.join(home, "agent-empty");
|
||||
const result = await ensureClawdbotModelsJson(
|
||||
{
|
||||
models: { providers: {} },
|
||||
},
|
||||
agentDir,
|
||||
);
|
||||
|
||||
await expect(
|
||||
fs.stat(path.join(agentDir, "models.json")),
|
||||
).rejects.toThrow();
|
||||
expect(result.wrote).toBe(false);
|
||||
} finally {
|
||||
if (previous === undefined) delete process.env.COPILOT_GITHUB_TOKEN;
|
||||
else process.env.COPILOT_GITHUB_TOKEN = previous;
|
||||
if (previousGh === undefined) delete process.env.GH_TOKEN;
|
||||
else process.env.GH_TOKEN = previousGh;
|
||||
if (previousGithub === undefined) delete process.env.GITHUB_TOKEN;
|
||||
else process.env.GITHUB_TOKEN = previousGithub;
|
||||
if (previousMinimax === undefined) delete process.env.MINIMAX_API_KEY;
|
||||
else process.env.MINIMAX_API_KEY = previousMinimax;
|
||||
if (previousMoonshot === undefined) delete process.env.MOONSHOT_API_KEY;
|
||||
else process.env.MOONSHOT_API_KEY = previousMoonshot;
|
||||
if (previousSynthetic === undefined)
|
||||
delete process.env.SYNTHETIC_API_KEY;
|
||||
else process.env.SYNTHETIC_API_KEY = previousSynthetic;
|
||||
}
|
||||
});
|
||||
});
|
||||
let previousHome: string | undefined;
|
||||
|
||||
beforeEach(() => {
|
||||
previousHome = process.env.HOME;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env.HOME = previousHome;
|
||||
});
|
||||
|
||||
it("writes models.json for configured providers", async () => {
|
||||
await withTempHome(async () => {
|
||||
vi.resetModules();
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
const { resolveClawdbotAgentDir } = await import("./agent-paths.js");
|
||||
|
||||
await ensureClawdbotModelsJson(MODELS_CONFIG);
|
||||
|
||||
const modelPath = path.join(resolveClawdbotAgentDir(), "models.json");
|
||||
const raw = await fs.readFile(modelPath, "utf8");
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<string, { baseUrl?: string }>;
|
||||
};
|
||||
|
||||
expect(parsed.providers["custom-proxy"]?.baseUrl).toBe(
|
||||
"http://localhost:4000/v1",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it("adds minimax provider when MINIMAX_API_KEY is set", async () => {
|
||||
await withTempHome(async () => {
|
||||
vi.resetModules();
|
||||
const prevKey = process.env.MINIMAX_API_KEY;
|
||||
process.env.MINIMAX_API_KEY = "sk-minimax-test";
|
||||
try {
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
const { resolveClawdbotAgentDir } = await import("./agent-paths.js");
|
||||
|
||||
await ensureClawdbotModelsJson({});
|
||||
|
||||
const modelPath = path.join(resolveClawdbotAgentDir(), "models.json");
|
||||
const raw = await fs.readFile(modelPath, "utf8");
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<
|
||||
string,
|
||||
{
|
||||
baseUrl?: string;
|
||||
apiKey?: string;
|
||||
models?: Array<{ id: string }>;
|
||||
}
|
||||
>;
|
||||
};
|
||||
expect(parsed.providers.minimax?.baseUrl).toBe(
|
||||
"https://api.minimax.io/anthropic",
|
||||
);
|
||||
expect(parsed.providers.minimax?.apiKey).toBe("MINIMAX_API_KEY");
|
||||
const ids = parsed.providers.minimax?.models?.map((model) => model.id);
|
||||
expect(ids).toContain("MiniMax-M2.1");
|
||||
expect(ids).toContain("MiniMax-VL-01");
|
||||
} finally {
|
||||
if (prevKey === undefined) delete process.env.MINIMAX_API_KEY;
|
||||
else process.env.MINIMAX_API_KEY = prevKey;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it("adds synthetic provider when SYNTHETIC_API_KEY is set", async () => {
|
||||
await withTempHome(async () => {
|
||||
vi.resetModules();
|
||||
const prevKey = process.env.SYNTHETIC_API_KEY;
|
||||
process.env.SYNTHETIC_API_KEY = "sk-synthetic-test";
|
||||
try {
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
const { resolveClawdbotAgentDir } = await import("./agent-paths.js");
|
||||
|
||||
await ensureClawdbotModelsJson({});
|
||||
|
||||
const modelPath = path.join(resolveClawdbotAgentDir(), "models.json");
|
||||
const raw = await fs.readFile(modelPath, "utf8");
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<
|
||||
string,
|
||||
{
|
||||
baseUrl?: string;
|
||||
apiKey?: string;
|
||||
models?: Array<{ id: string }>;
|
||||
}
|
||||
>;
|
||||
};
|
||||
expect(parsed.providers.synthetic?.baseUrl).toBe(
|
||||
"https://api.synthetic.new/anthropic",
|
||||
);
|
||||
expect(parsed.providers.synthetic?.apiKey).toBe("SYNTHETIC_API_KEY");
|
||||
const ids = parsed.providers.synthetic?.models?.map(
|
||||
(model) => model.id,
|
||||
);
|
||||
expect(ids).toContain("hf:MiniMaxAI/MiniMax-M2.1");
|
||||
} finally {
|
||||
if (prevKey === undefined) delete process.env.SYNTHETIC_API_KEY;
|
||||
else process.env.SYNTHETIC_API_KEY = prevKey;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it("fills missing provider.apiKey from env var name when models exist", async () => {
|
||||
await withTempHome(async () => {
|
||||
vi.resetModules();
|
||||
const prevKey = process.env.MINIMAX_API_KEY;
|
||||
process.env.MINIMAX_API_KEY = "sk-minimax-test";
|
||||
try {
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
const { resolveClawdbotAgentDir } = await import("./agent-paths.js");
|
||||
|
||||
const cfg: ClawdbotConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
minimax: {
|
||||
baseUrl: "https://api.minimax.io/anthropic",
|
||||
api: "anthropic-messages",
|
||||
models: [
|
||||
{
|
||||
id: "MiniMax-M2.1",
|
||||
name: "MiniMax M2.1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 200000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
await ensureClawdbotModelsJson(cfg);
|
||||
|
||||
const modelPath = path.join(resolveClawdbotAgentDir(), "models.json");
|
||||
const raw = await fs.readFile(modelPath, "utf8");
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<
|
||||
string,
|
||||
{ apiKey?: string; models?: Array<{ id: string }> }
|
||||
>;
|
||||
};
|
||||
expect(parsed.providers.minimax?.apiKey).toBe("MINIMAX_API_KEY");
|
||||
const ids = parsed.providers.minimax?.models?.map((model) => model.id);
|
||||
expect(ids).toContain("MiniMax-VL-01");
|
||||
} finally {
|
||||
if (prevKey === undefined) delete process.env.MINIMAX_API_KEY;
|
||||
else process.env.MINIMAX_API_KEY = prevKey;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it("merges providers by default", async () => {
|
||||
await withTempHome(async () => {
|
||||
vi.resetModules();
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
const { resolveClawdbotAgentDir } = await import("./agent-paths.js");
|
||||
|
||||
const agentDir = resolveClawdbotAgentDir();
|
||||
await fs.mkdir(agentDir, { recursive: true });
|
||||
await fs.writeFile(
|
||||
path.join(agentDir, "models.json"),
|
||||
JSON.stringify(
|
||||
{
|
||||
providers: {
|
||||
existing: {
|
||||
baseUrl: "http://localhost:1234/v1",
|
||||
apiKey: "EXISTING_KEY",
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "existing-model",
|
||||
name: "Existing",
|
||||
api: "openai-completions",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 8192,
|
||||
maxTokens: 2048,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
"utf8",
|
||||
);
|
||||
|
||||
await ensureClawdbotModelsJson(MODELS_CONFIG);
|
||||
|
||||
const raw = await fs.readFile(path.join(agentDir, "models.json"), "utf8");
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<string, { baseUrl?: string }>;
|
||||
};
|
||||
|
||||
expect(parsed.providers.existing?.baseUrl).toBe(
|
||||
"http://localhost:1234/v1",
|
||||
);
|
||||
expect(parsed.providers["custom-proxy"]?.baseUrl).toBe(
|
||||
"http://localhost:4000/v1",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it("normalizes gemini 3 ids to preview for google providers", async () => {
|
||||
await withTempHome(async () => {
|
||||
vi.resetModules();
|
||||
const { ensureClawdbotModelsJson } = await import("./models-config.js");
|
||||
const { resolveClawdbotAgentDir } = await import("./agent-paths.js");
|
||||
|
||||
const cfg: ClawdbotConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
google: {
|
||||
baseUrl: "https://generativelanguage.googleapis.com/v1beta",
|
||||
apiKey: "GEMINI_KEY",
|
||||
api: "google-generative-ai",
|
||||
models: [
|
||||
{
|
||||
id: "gemini-3-pro",
|
||||
name: "Gemini 3 Pro",
|
||||
api: "google-generative-ai",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1048576,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
{
|
||||
id: "gemini-3-flash",
|
||||
name: "Gemini 3 Flash",
|
||||
api: "google-generative-ai",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1048576,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
await ensureClawdbotModelsJson(cfg);
|
||||
|
||||
const modelPath = path.join(resolveClawdbotAgentDir(), "models.json");
|
||||
const raw = await fs.readFile(modelPath, "utf8");
|
||||
const parsed = JSON.parse(raw) as {
|
||||
providers: Record<string, { models: Array<{ id: string }> }>;
|
||||
};
|
||||
const ids = parsed.providers.google?.models?.map((model) => model.id);
|
||||
expect(ids).toEqual(["gemini-3-pro-preview", "gemini-3-flash-preview"]);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,62 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
buildBootstrapContextFiles,
|
||||
DEFAULT_BOOTSTRAP_MAX_CHARS,
|
||||
} from "./pi-embedded-helpers.js";
|
||||
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
|
||||
|
||||
const makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("buildBootstrapContextFiles", () => {
|
||||
it("keeps missing markers", () => {
|
||||
const files = [makeFile({ missing: true, content: undefined })];
|
||||
expect(buildBootstrapContextFiles(files)).toEqual([
|
||||
{
|
||||
path: DEFAULT_AGENTS_FILENAME,
|
||||
content: "[MISSING] Expected at: /tmp/AGENTS.md",
|
||||
},
|
||||
]);
|
||||
});
|
||||
it("skips empty or whitespace-only content", () => {
|
||||
const files = [makeFile({ content: " \n " })];
|
||||
expect(buildBootstrapContextFiles(files)).toEqual([]);
|
||||
});
|
||||
it("truncates large bootstrap content", () => {
|
||||
const head = `HEAD-${"a".repeat(600)}`;
|
||||
const tail = `${"b".repeat(300)}-TAIL`;
|
||||
const long = `${head}${tail}`;
|
||||
const files = [makeFile({ name: "TOOLS.md", content: long })];
|
||||
const warnings: string[] = [];
|
||||
const maxChars = 200;
|
||||
const expectedTailChars = Math.floor(maxChars * 0.2);
|
||||
const [result] = buildBootstrapContextFiles(files, {
|
||||
maxChars,
|
||||
warn: (message) => warnings.push(message),
|
||||
});
|
||||
expect(result?.content).toContain(
|
||||
"[...truncated, read TOOLS.md for full content...]",
|
||||
);
|
||||
expect(result?.content.length).toBeLessThan(long.length);
|
||||
expect(result?.content.startsWith(long.slice(0, 120))).toBe(true);
|
||||
expect(result?.content.endsWith(long.slice(-expectedTailChars))).toBe(true);
|
||||
expect(warnings).toHaveLength(1);
|
||||
expect(warnings[0]).toContain("TOOLS.md");
|
||||
expect(warnings[0]).toContain("limit 200");
|
||||
});
|
||||
it("keeps content under the default limit", () => {
|
||||
const long = "a".repeat(DEFAULT_BOOTSTRAP_MAX_CHARS - 10);
|
||||
const files = [makeFile({ content: long })];
|
||||
const [result] = buildBootstrapContextFiles(files);
|
||||
expect(result?.content).toBe(long);
|
||||
expect(result?.content).not.toContain(
|
||||
"[...truncated, read AGENTS.md for full content...]",
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,43 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { classifyFailoverReason } from "./pi-embedded-helpers.js";
|
||||
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
|
||||
|
||||
const _makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("classifyFailoverReason", () => {
|
||||
it("returns a stable reason", () => {
|
||||
expect(classifyFailoverReason("invalid api key")).toBe("auth");
|
||||
expect(classifyFailoverReason("no credentials found")).toBe("auth");
|
||||
expect(classifyFailoverReason("no api key found")).toBe("auth");
|
||||
expect(classifyFailoverReason("429 too many requests")).toBe("rate_limit");
|
||||
expect(classifyFailoverReason("resource has been exhausted")).toBe(
|
||||
"rate_limit",
|
||||
);
|
||||
expect(
|
||||
classifyFailoverReason(
|
||||
'{"type":"error","error":{"type":"overloaded_error","message":"Overloaded"}}',
|
||||
),
|
||||
).toBe("rate_limit");
|
||||
expect(classifyFailoverReason("invalid request format")).toBe("format");
|
||||
expect(classifyFailoverReason("credit balance too low")).toBe("billing");
|
||||
expect(classifyFailoverReason("deadline exceeded")).toBe("timeout");
|
||||
expect(classifyFailoverReason("string should match pattern")).toBe(
|
||||
"format",
|
||||
);
|
||||
expect(classifyFailoverReason("bad request")).toBeNull();
|
||||
});
|
||||
it("classifies OpenAI usage limit errors as rate_limit", () => {
|
||||
expect(
|
||||
classifyFailoverReason(
|
||||
"You have hit your ChatGPT usage limit (plus plan)",
|
||||
),
|
||||
).toBe("rate_limit");
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,42 @@
|
||||
import type { AssistantMessage } from "@mariozechner/pi-ai";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { formatAssistantErrorText } from "./pi-embedded-helpers.js";
|
||||
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
|
||||
|
||||
const _makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("formatAssistantErrorText", () => {
|
||||
const makeAssistantError = (errorMessage: string): AssistantMessage =>
|
||||
({
|
||||
stopReason: "error",
|
||||
errorMessage,
|
||||
}) as AssistantMessage;
|
||||
|
||||
it("returns a friendly message for context overflow", () => {
|
||||
const msg = makeAssistantError("request_too_large");
|
||||
expect(formatAssistantErrorText(msg)).toContain("Context overflow");
|
||||
});
|
||||
it("returns a friendly message for Anthropic role ordering", () => {
|
||||
const msg = makeAssistantError(
|
||||
'messages: roles must alternate between "user" and "assistant"',
|
||||
);
|
||||
expect(formatAssistantErrorText(msg)).toContain(
|
||||
"Message ordering conflict",
|
||||
);
|
||||
});
|
||||
it("returns a friendly message for Anthropic overload errors", () => {
|
||||
const msg = makeAssistantError(
|
||||
'{"type":"error","error":{"details":null,"type":"overloaded_error","message":"Overloaded"},"request_id":"req_123"}',
|
||||
);
|
||||
expect(formatAssistantErrorText(msg)).toBe(
|
||||
"The AI service is temporarily overloaded. Please try again in a moment.",
|
||||
);
|
||||
});
|
||||
});
|
||||
28
src/agents/pi-embedded-helpers.isautherrormessage.test.ts
Normal file
28
src/agents/pi-embedded-helpers.isautherrormessage.test.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { isAuthErrorMessage } from "./pi-embedded-helpers.js";
|
||||
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
|
||||
|
||||
const _makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("isAuthErrorMessage", () => {
|
||||
it("matches credential validation errors", () => {
|
||||
const samples = [
|
||||
'No credentials found for profile "anthropic:claude-cli".',
|
||||
"No API key found for profile openai.",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isAuthErrorMessage(sample)).toBe(true);
|
||||
}
|
||||
});
|
||||
it("ignores unrelated errors", () => {
|
||||
expect(isAuthErrorMessage("rate limit exceeded")).toBe(false);
|
||||
expect(isAuthErrorMessage("billing issue detected")).toBe(false);
|
||||
});
|
||||
});
|
||||
33
src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts
Normal file
33
src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts
Normal file
@@ -0,0 +1,33 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { isBillingErrorMessage } from "./pi-embedded-helpers.js";
|
||||
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
|
||||
|
||||
const _makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("isBillingErrorMessage", () => {
|
||||
it("matches credit / payment failures", () => {
|
||||
const samples = [
|
||||
"Your credit balance is too low to access the Anthropic API.",
|
||||
"insufficient credits",
|
||||
"Payment Required",
|
||||
"HTTP 402 Payment Required",
|
||||
"plans & billing",
|
||||
"billing: please upgrade your plan",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isBillingErrorMessage(sample)).toBe(true);
|
||||
}
|
||||
});
|
||||
it("ignores unrelated errors", () => {
|
||||
expect(isBillingErrorMessage("rate limit exceeded")).toBe(false);
|
||||
expect(isBillingErrorMessage("invalid api key")).toBe(false);
|
||||
expect(isBillingErrorMessage("context length exceeded")).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,29 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { isCloudCodeAssistFormatError } from "./pi-embedded-helpers.js";
|
||||
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
|
||||
|
||||
const _makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("isCloudCodeAssistFormatError", () => {
|
||||
it("matches format errors", () => {
|
||||
const samples = [
|
||||
"INVALID_REQUEST_ERROR: string should match pattern",
|
||||
"messages.1.content.1.tool_use.id",
|
||||
"tool_use.id should match pattern",
|
||||
"invalid request format",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isCloudCodeAssistFormatError(sample)).toBe(true);
|
||||
}
|
||||
});
|
||||
it("ignores unrelated errors", () => {
|
||||
expect(isCloudCodeAssistFormatError("rate limit exceeded")).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,31 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { isCompactionFailureError } from "./pi-embedded-helpers.js";
|
||||
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
|
||||
|
||||
const _makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("isCompactionFailureError", () => {
|
||||
it("matches compaction overflow failures", () => {
|
||||
const samples = [
|
||||
'Context overflow: Summarization failed: 400 {"message":"prompt is too long"}',
|
||||
"auto-compaction failed due to context overflow",
|
||||
"Compaction failed: prompt is too long",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isCompactionFailureError(sample)).toBe(true);
|
||||
}
|
||||
});
|
||||
it("ignores non-compaction overflow errors", () => {
|
||||
expect(isCompactionFailureError("Context overflow: prompt too large")).toBe(
|
||||
false,
|
||||
);
|
||||
expect(isCompactionFailureError("rate limit exceeded")).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,32 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { isContextOverflowError } from "./pi-embedded-helpers.js";
|
||||
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
|
||||
|
||||
const _makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("isContextOverflowError", () => {
|
||||
it("matches known overflow hints", () => {
|
||||
const samples = [
|
||||
"request_too_large",
|
||||
"Request exceeds the maximum size",
|
||||
"context length exceeded",
|
||||
"Maximum context length",
|
||||
"prompt is too long: 208423 tokens > 200000 maximum",
|
||||
"Context overflow: Summarization failed",
|
||||
"413 Request Entity Too Large",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isContextOverflowError(sample)).toBe(true);
|
||||
}
|
||||
});
|
||||
it("ignores unrelated errors", () => {
|
||||
expect(isContextOverflowError("rate limit exceeded")).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,27 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { isFailoverErrorMessage } from "./pi-embedded-helpers.js";
|
||||
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
|
||||
|
||||
const _makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("isFailoverErrorMessage", () => {
|
||||
it("matches auth/rate/billing/timeout", () => {
|
||||
const samples = [
|
||||
"invalid api key",
|
||||
"429 rate limit exceeded",
|
||||
"Your credit balance is too low",
|
||||
"request timed out",
|
||||
"invalid request format",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isFailoverErrorMessage(sample)).toBe(true);
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,64 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { isMessagingToolDuplicate } from "./pi-embedded-helpers.js";
|
||||
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
|
||||
|
||||
const _makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("isMessagingToolDuplicate", () => {
|
||||
it("returns false for empty sentTexts", () => {
|
||||
expect(isMessagingToolDuplicate("hello world", [])).toBe(false);
|
||||
});
|
||||
it("returns false for short texts", () => {
|
||||
expect(isMessagingToolDuplicate("short", ["short"])).toBe(false);
|
||||
});
|
||||
it("detects exact duplicates", () => {
|
||||
expect(
|
||||
isMessagingToolDuplicate("Hello, this is a test message!", [
|
||||
"Hello, this is a test message!",
|
||||
]),
|
||||
).toBe(true);
|
||||
});
|
||||
it("detects duplicates with different casing", () => {
|
||||
expect(
|
||||
isMessagingToolDuplicate("HELLO, THIS IS A TEST MESSAGE!", [
|
||||
"hello, this is a test message!",
|
||||
]),
|
||||
).toBe(true);
|
||||
});
|
||||
it("detects duplicates with emoji variations", () => {
|
||||
expect(
|
||||
isMessagingToolDuplicate("Hello! 👋 This is a test message!", [
|
||||
"Hello! This is a test message!",
|
||||
]),
|
||||
).toBe(true);
|
||||
});
|
||||
it("detects substring duplicates (LLM elaboration)", () => {
|
||||
expect(
|
||||
isMessagingToolDuplicate(
|
||||
'I sent the message: "Hello, this is a test message!"',
|
||||
["Hello, this is a test message!"],
|
||||
),
|
||||
).toBe(true);
|
||||
});
|
||||
it("detects when sent text contains block reply (reverse substring)", () => {
|
||||
expect(
|
||||
isMessagingToolDuplicate("Hello, this is a test message!", [
|
||||
'I sent the message: "Hello, this is a test message!"',
|
||||
]),
|
||||
).toBe(true);
|
||||
});
|
||||
it("returns false for non-matching texts", () => {
|
||||
expect(
|
||||
isMessagingToolDuplicate("This is completely different content.", [
|
||||
"Hello, this is a test message!",
|
||||
]),
|
||||
).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,32 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { normalizeTextForComparison } from "./pi-embedded-helpers.js";
|
||||
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
|
||||
|
||||
const _makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("normalizeTextForComparison", () => {
|
||||
it("lowercases text", () => {
|
||||
expect(normalizeTextForComparison("Hello World")).toBe("hello world");
|
||||
});
|
||||
it("trims whitespace", () => {
|
||||
expect(normalizeTextForComparison(" hello ")).toBe("hello");
|
||||
});
|
||||
it("collapses multiple spaces", () => {
|
||||
expect(normalizeTextForComparison("hello world")).toBe("hello world");
|
||||
});
|
||||
it("strips emoji", () => {
|
||||
expect(normalizeTextForComparison("Hello 👋 World 🌍")).toBe("hello world");
|
||||
});
|
||||
it("handles mixed normalization", () => {
|
||||
expect(normalizeTextForComparison(" Hello 👋 WORLD 🌍 ")).toBe(
|
||||
"hello world",
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,34 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
import {
|
||||
DEFAULT_BOOTSTRAP_MAX_CHARS,
|
||||
resolveBootstrapMaxChars,
|
||||
} from "./pi-embedded-helpers.js";
|
||||
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
|
||||
|
||||
const _makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("resolveBootstrapMaxChars", () => {
|
||||
it("returns default when unset", () => {
|
||||
expect(resolveBootstrapMaxChars()).toBe(DEFAULT_BOOTSTRAP_MAX_CHARS);
|
||||
});
|
||||
it("uses configured value when valid", () => {
|
||||
const cfg = {
|
||||
agents: { defaults: { bootstrapMaxChars: 12345 } },
|
||||
} as ClawdbotConfig;
|
||||
expect(resolveBootstrapMaxChars(cfg)).toBe(12345);
|
||||
});
|
||||
it("falls back when invalid", () => {
|
||||
const cfg = {
|
||||
agents: { defaults: { bootstrapMaxChars: -1 } },
|
||||
} as ClawdbotConfig;
|
||||
expect(resolveBootstrapMaxChars(cfg)).toBe(DEFAULT_BOOTSTRAP_MAX_CHARS);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,125 @@
|
||||
import type { AgentMessage } from "@mariozechner/pi-agent-core";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { sanitizeSessionMessagesImages } from "./pi-embedded-helpers.js";
|
||||
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
|
||||
|
||||
const _makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("sanitizeSessionMessagesImages", () => {
|
||||
it("removes empty assistant text blocks but preserves tool calls", async () => {
|
||||
const input = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "text", text: "" },
|
||||
{ type: "toolCall", id: "call_1", name: "read", arguments: {} },
|
||||
],
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test");
|
||||
|
||||
expect(out).toHaveLength(1);
|
||||
const content = (out[0] as { content?: unknown }).content;
|
||||
expect(Array.isArray(content)).toBe(true);
|
||||
expect(content).toHaveLength(1);
|
||||
expect((content as Array<{ type?: string }>)[0]?.type).toBe("toolCall");
|
||||
});
|
||||
it("sanitizes tool ids for assistant blocks and tool results when enabled", async () => {
|
||||
const input = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "toolUse", id: "call_abc|item:123", name: "test", input: {} },
|
||||
{
|
||||
type: "toolCall",
|
||||
id: "call_abc|item:456",
|
||||
name: "exec",
|
||||
arguments: {},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: "toolResult",
|
||||
toolUseId: "call_abc|item:123",
|
||||
content: [{ type: "text", text: "ok" }],
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test", {
|
||||
sanitizeToolCallIds: true,
|
||||
});
|
||||
|
||||
const assistant = out[0] as { content?: Array<{ id?: string }> };
|
||||
expect(assistant.content?.[0]?.id).toBe("call_abc_item_123");
|
||||
expect(assistant.content?.[1]?.id).toBe("call_abc_item_456");
|
||||
|
||||
const toolResult = out[1] as { toolUseId?: string };
|
||||
expect(toolResult.toolUseId).toBe("call_abc_item_123");
|
||||
});
|
||||
it("filters whitespace-only assistant text blocks", async () => {
|
||||
const input = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "text", text: " " },
|
||||
{ type: "text", text: "ok" },
|
||||
],
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test");
|
||||
|
||||
expect(out).toHaveLength(1);
|
||||
const content = (out[0] as { content?: unknown }).content;
|
||||
expect(Array.isArray(content)).toBe(true);
|
||||
expect(content).toHaveLength(1);
|
||||
expect((content as Array<{ text?: string }>)[0]?.text).toBe("ok");
|
||||
});
|
||||
it("drops assistant messages that only contain empty text", async () => {
|
||||
const input = [
|
||||
{ role: "user", content: "hello" },
|
||||
{ role: "assistant", content: [{ type: "text", text: "" }] },
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test");
|
||||
|
||||
expect(out).toHaveLength(1);
|
||||
expect(out[0]?.role).toBe("user");
|
||||
});
|
||||
it("drops empty assistant error messages", async () => {
|
||||
const input = [
|
||||
{ role: "user", content: "hello" },
|
||||
{ role: "assistant", stopReason: "error", content: [] },
|
||||
{ role: "assistant", stopReason: "error" },
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test");
|
||||
|
||||
expect(out).toHaveLength(1);
|
||||
expect(out[0]?.role).toBe("user");
|
||||
});
|
||||
it("leaves non-assistant messages unchanged", async () => {
|
||||
const input = [
|
||||
{ role: "user", content: "hello" },
|
||||
{
|
||||
role: "toolResult",
|
||||
toolCallId: "tool-1",
|
||||
content: [{ type: "text", text: "result" }],
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test");
|
||||
|
||||
expect(out).toHaveLength(2);
|
||||
expect(out[0]?.role).toBe("user");
|
||||
expect(out[1]?.role).toBe("toolResult");
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,137 @@
|
||||
import type { AgentMessage } from "@mariozechner/pi-agent-core";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { sanitizeSessionMessagesImages } from "./pi-embedded-helpers.js";
|
||||
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
|
||||
|
||||
const _makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("sanitizeSessionMessagesImages", () => {
|
||||
it("keeps tool call + tool result IDs unchanged by default", async () => {
|
||||
const input = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{
|
||||
type: "toolCall",
|
||||
id: "call_123|fc_456",
|
||||
name: "read",
|
||||
arguments: { path: "package.json" },
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: "toolResult",
|
||||
toolCallId: "call_123|fc_456",
|
||||
toolName: "read",
|
||||
content: [{ type: "text", text: "ok" }],
|
||||
isError: false,
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test");
|
||||
|
||||
const assistant = out[0] as unknown as { role?: string; content?: unknown };
|
||||
expect(assistant.role).toBe("assistant");
|
||||
expect(Array.isArray(assistant.content)).toBe(true);
|
||||
const toolCall = (
|
||||
assistant.content as Array<{ type?: string; id?: string }>
|
||||
).find((b) => b.type === "toolCall");
|
||||
expect(toolCall?.id).toBe("call_123|fc_456");
|
||||
|
||||
const toolResult = out[1] as unknown as {
|
||||
role?: string;
|
||||
toolCallId?: string;
|
||||
};
|
||||
expect(toolResult.role).toBe("toolResult");
|
||||
expect(toolResult.toolCallId).toBe("call_123|fc_456");
|
||||
});
|
||||
it("sanitizes tool call + tool result IDs when enabled", async () => {
|
||||
const input = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{
|
||||
type: "toolCall",
|
||||
id: "call_123|fc_456",
|
||||
name: "read",
|
||||
arguments: { path: "package.json" },
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: "toolResult",
|
||||
toolCallId: "call_123|fc_456",
|
||||
toolName: "read",
|
||||
content: [{ type: "text", text: "ok" }],
|
||||
isError: false,
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test", {
|
||||
sanitizeToolCallIds: true,
|
||||
});
|
||||
|
||||
const assistant = out[0] as unknown as { role?: string; content?: unknown };
|
||||
expect(assistant.role).toBe("assistant");
|
||||
expect(Array.isArray(assistant.content)).toBe(true);
|
||||
const toolCall = (
|
||||
assistant.content as Array<{ type?: string; id?: string }>
|
||||
).find((b) => b.type === "toolCall");
|
||||
expect(toolCall?.id).toBe("call_123_fc_456");
|
||||
|
||||
const toolResult = out[1] as unknown as {
|
||||
role?: string;
|
||||
toolCallId?: string;
|
||||
};
|
||||
expect(toolResult.role).toBe("toolResult");
|
||||
expect(toolResult.toolCallId).toBe("call_123_fc_456");
|
||||
});
|
||||
it("drops assistant blocks after a tool call when enforceToolCallLast is enabled", async () => {
|
||||
const input = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "text", text: "before" },
|
||||
{ type: "toolCall", id: "call_1", name: "read", arguments: {} },
|
||||
{ type: "thinking", thinking: "after", thinkingSignature: "sig" },
|
||||
{ type: "text", text: "after text" },
|
||||
],
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test", {
|
||||
enforceToolCallLast: true,
|
||||
});
|
||||
const assistant = out[0] as { content?: Array<{ type?: string }> };
|
||||
expect(assistant.content?.map((b) => b.type)).toEqual(["text", "toolCall"]);
|
||||
});
|
||||
it("keeps assistant blocks after a tool call when enforceToolCallLast is disabled", async () => {
|
||||
const input = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "text", text: "before" },
|
||||
{ type: "toolCall", id: "call_1", name: "read", arguments: {} },
|
||||
{ type: "thinking", thinking: "after", thinkingSignature: "sig" },
|
||||
{ type: "text", text: "after text" },
|
||||
],
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test");
|
||||
const assistant = out[0] as { content?: Array<{ type?: string }> };
|
||||
expect(assistant.content?.map((b) => b.type)).toEqual([
|
||||
"text",
|
||||
"toolCall",
|
||||
"thinking",
|
||||
"text",
|
||||
]);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,35 @@
|
||||
import type { AgentMessage } from "@mariozechner/pi-agent-core";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { sanitizeGoogleTurnOrdering } from "./pi-embedded-helpers.js";
|
||||
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
|
||||
|
||||
const _makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("sanitizeGoogleTurnOrdering", () => {
|
||||
it("prepends a synthetic user turn when history starts with assistant", () => {
|
||||
const input = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "toolCall", id: "call_1", name: "exec", arguments: {} },
|
||||
],
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = sanitizeGoogleTurnOrdering(input);
|
||||
expect(out[0]?.role).toBe("user");
|
||||
expect(out[1]?.role).toBe("assistant");
|
||||
});
|
||||
it("is a no-op when history starts with user", () => {
|
||||
const input = [{ role: "user", content: "hi" }] satisfies AgentMessage[];
|
||||
const out = sanitizeGoogleTurnOrdering(input);
|
||||
expect(out).toBe(input);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,41 @@
|
||||
import type { AgentMessage } from "@mariozechner/pi-agent-core";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { sanitizeSessionMessagesImages } from "./pi-embedded-helpers.js";
|
||||
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
|
||||
|
||||
const _makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("sanitizeSessionMessagesImages - thought_signature stripping", () => {
|
||||
it("strips msg_-prefixed thought_signature from assistant message content blocks", async () => {
|
||||
const input = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "text", text: "hello", thought_signature: "msg_abc123" },
|
||||
{
|
||||
type: "thinking",
|
||||
thinking: "reasoning",
|
||||
thought_signature: "AQID",
|
||||
},
|
||||
],
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test");
|
||||
|
||||
expect(out).toHaveLength(1);
|
||||
const content = (out[0] as { content?: unknown[] }).content;
|
||||
expect(content).toHaveLength(2);
|
||||
expect("thought_signature" in ((content?.[0] ?? {}) as object)).toBe(false);
|
||||
expect(
|
||||
(content?.[1] as { thought_signature?: unknown })?.thought_signature,
|
||||
).toBe("AQID");
|
||||
});
|
||||
});
|
||||
24
src/agents/pi-embedded-helpers.sanitizetoolcallid.test.ts
Normal file
24
src/agents/pi-embedded-helpers.sanitizetoolcallid.test.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { sanitizeToolCallId } from "./pi-embedded-helpers.js";
|
||||
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
|
||||
|
||||
const _makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("sanitizeToolCallId", () => {
|
||||
it("keeps valid tool call IDs", () => {
|
||||
expect(sanitizeToolCallId("call_abc-123")).toBe("call_abc-123");
|
||||
});
|
||||
it("replaces invalid characters with underscores", () => {
|
||||
expect(sanitizeToolCallId("call_abc|item:456")).toBe("call_abc_item_456");
|
||||
});
|
||||
it("returns default for empty IDs", () => {
|
||||
expect(sanitizeToolCallId("")).toBe("default_tool_id");
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,69 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { stripThoughtSignatures } from "./pi-embedded-helpers.js";
|
||||
import { DEFAULT_AGENTS_FILENAME } from "./workspace.js";
|
||||
|
||||
const _makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("stripThoughtSignatures", () => {
|
||||
it("returns non-array content unchanged", () => {
|
||||
expect(stripThoughtSignatures("hello")).toBe("hello");
|
||||
expect(stripThoughtSignatures(null)).toBe(null);
|
||||
expect(stripThoughtSignatures(undefined)).toBe(undefined);
|
||||
expect(stripThoughtSignatures(123)).toBe(123);
|
||||
});
|
||||
it("removes msg_-prefixed thought_signature from content blocks", () => {
|
||||
const input = [
|
||||
{ type: "text", text: "hello", thought_signature: "msg_abc123" },
|
||||
{ type: "thinking", thinking: "test", thought_signature: "AQID" },
|
||||
];
|
||||
const result = stripThoughtSignatures(input);
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0]).toEqual({ type: "text", text: "hello" });
|
||||
expect(result[1]).toEqual({
|
||||
type: "thinking",
|
||||
thinking: "test",
|
||||
thought_signature: "AQID",
|
||||
});
|
||||
expect("thought_signature" in result[0]).toBe(false);
|
||||
expect("thought_signature" in result[1]).toBe(true);
|
||||
});
|
||||
it("preserves blocks without thought_signature", () => {
|
||||
const input = [
|
||||
{ type: "text", text: "hello" },
|
||||
{ type: "toolCall", id: "call_1", name: "read", arguments: {} },
|
||||
];
|
||||
const result = stripThoughtSignatures(input);
|
||||
|
||||
expect(result).toEqual(input);
|
||||
});
|
||||
it("handles mixed blocks with and without thought_signature", () => {
|
||||
const input = [
|
||||
{ type: "text", text: "hello", thought_signature: "msg_abc" },
|
||||
{ type: "toolCall", id: "call_1", name: "read", arguments: {} },
|
||||
{ type: "thinking", thinking: "hmm", thought_signature: "msg_xyz" },
|
||||
];
|
||||
const result = stripThoughtSignatures(input);
|
||||
|
||||
expect(result).toEqual([
|
||||
{ type: "text", text: "hello" },
|
||||
{ type: "toolCall", id: "call_1", name: "read", arguments: {} },
|
||||
{ type: "thinking", thinking: "hmm" },
|
||||
]);
|
||||
});
|
||||
it("handles empty array", () => {
|
||||
expect(stripThoughtSignatures([])).toEqual([]);
|
||||
});
|
||||
it("handles null/undefined blocks in array", () => {
|
||||
const input = [null, undefined, { type: "text", text: "hello" }];
|
||||
const result = stripThoughtSignatures(input);
|
||||
expect(result).toEqual([null, undefined, { type: "text", text: "hello" }]);
|
||||
});
|
||||
});
|
||||
@@ -1,734 +0,0 @@
|
||||
import type { AgentMessage } from "@mariozechner/pi-agent-core";
|
||||
import type { AssistantMessage } from "@mariozechner/pi-ai";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
import {
|
||||
buildBootstrapContextFiles,
|
||||
classifyFailoverReason,
|
||||
DEFAULT_BOOTSTRAP_MAX_CHARS,
|
||||
formatAssistantErrorText,
|
||||
isAuthErrorMessage,
|
||||
isBillingErrorMessage,
|
||||
isCloudCodeAssistFormatError,
|
||||
isCompactionFailureError,
|
||||
isContextOverflowError,
|
||||
isFailoverErrorMessage,
|
||||
isMessagingToolDuplicate,
|
||||
normalizeTextForComparison,
|
||||
resolveBootstrapMaxChars,
|
||||
sanitizeGoogleTurnOrdering,
|
||||
sanitizeSessionMessagesImages,
|
||||
sanitizeToolCallId,
|
||||
stripThoughtSignatures,
|
||||
} from "./pi-embedded-helpers.js";
|
||||
import {
|
||||
DEFAULT_AGENTS_FILENAME,
|
||||
type WorkspaceBootstrapFile,
|
||||
} from "./workspace.js";
|
||||
|
||||
const makeFile = (
|
||||
overrides: Partial<WorkspaceBootstrapFile>,
|
||||
): WorkspaceBootstrapFile => ({
|
||||
name: DEFAULT_AGENTS_FILENAME,
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "",
|
||||
missing: false,
|
||||
...overrides,
|
||||
});
|
||||
describe("buildBootstrapContextFiles", () => {
|
||||
it("keeps missing markers", () => {
|
||||
const files = [makeFile({ missing: true, content: undefined })];
|
||||
expect(buildBootstrapContextFiles(files)).toEqual([
|
||||
{
|
||||
path: DEFAULT_AGENTS_FILENAME,
|
||||
content: "[MISSING] Expected at: /tmp/AGENTS.md",
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it("skips empty or whitespace-only content", () => {
|
||||
const files = [makeFile({ content: " \n " })];
|
||||
expect(buildBootstrapContextFiles(files)).toEqual([]);
|
||||
});
|
||||
|
||||
it("truncates large bootstrap content", () => {
|
||||
const head = `HEAD-${"a".repeat(600)}`;
|
||||
const tail = `${"b".repeat(300)}-TAIL`;
|
||||
const long = `${head}${tail}`;
|
||||
const files = [makeFile({ name: "TOOLS.md", content: long })];
|
||||
const warnings: string[] = [];
|
||||
const maxChars = 200;
|
||||
const expectedTailChars = Math.floor(maxChars * 0.2);
|
||||
const [result] = buildBootstrapContextFiles(files, {
|
||||
maxChars,
|
||||
warn: (message) => warnings.push(message),
|
||||
});
|
||||
expect(result?.content).toContain(
|
||||
"[...truncated, read TOOLS.md for full content...]",
|
||||
);
|
||||
expect(result?.content.length).toBeLessThan(long.length);
|
||||
expect(result?.content.startsWith(long.slice(0, 120))).toBe(true);
|
||||
expect(result?.content.endsWith(long.slice(-expectedTailChars))).toBe(true);
|
||||
expect(warnings).toHaveLength(1);
|
||||
expect(warnings[0]).toContain("TOOLS.md");
|
||||
expect(warnings[0]).toContain("limit 200");
|
||||
});
|
||||
|
||||
it("keeps content under the default limit", () => {
|
||||
const long = "a".repeat(DEFAULT_BOOTSTRAP_MAX_CHARS - 10);
|
||||
const files = [makeFile({ content: long })];
|
||||
const [result] = buildBootstrapContextFiles(files);
|
||||
expect(result?.content).toBe(long);
|
||||
expect(result?.content).not.toContain(
|
||||
"[...truncated, read AGENTS.md for full content...]",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("resolveBootstrapMaxChars", () => {
|
||||
it("returns default when unset", () => {
|
||||
expect(resolveBootstrapMaxChars()).toBe(DEFAULT_BOOTSTRAP_MAX_CHARS);
|
||||
});
|
||||
|
||||
it("uses configured value when valid", () => {
|
||||
const cfg = {
|
||||
agents: { defaults: { bootstrapMaxChars: 12345 } },
|
||||
} as ClawdbotConfig;
|
||||
expect(resolveBootstrapMaxChars(cfg)).toBe(12345);
|
||||
});
|
||||
|
||||
it("falls back when invalid", () => {
|
||||
const cfg = {
|
||||
agents: { defaults: { bootstrapMaxChars: -1 } },
|
||||
} as ClawdbotConfig;
|
||||
expect(resolveBootstrapMaxChars(cfg)).toBe(DEFAULT_BOOTSTRAP_MAX_CHARS);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isContextOverflowError", () => {
|
||||
it("matches known overflow hints", () => {
|
||||
const samples = [
|
||||
"request_too_large",
|
||||
"Request exceeds the maximum size",
|
||||
"context length exceeded",
|
||||
"Maximum context length",
|
||||
"prompt is too long: 208423 tokens > 200000 maximum",
|
||||
"Context overflow: Summarization failed",
|
||||
"413 Request Entity Too Large",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isContextOverflowError(sample)).toBe(true);
|
||||
}
|
||||
});
|
||||
|
||||
it("ignores unrelated errors", () => {
|
||||
expect(isContextOverflowError("rate limit exceeded")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isCompactionFailureError", () => {
|
||||
it("matches compaction overflow failures", () => {
|
||||
const samples = [
|
||||
'Context overflow: Summarization failed: 400 {"message":"prompt is too long"}',
|
||||
"auto-compaction failed due to context overflow",
|
||||
"Compaction failed: prompt is too long",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isCompactionFailureError(sample)).toBe(true);
|
||||
}
|
||||
});
|
||||
|
||||
it("ignores non-compaction overflow errors", () => {
|
||||
expect(isCompactionFailureError("Context overflow: prompt too large")).toBe(
|
||||
false,
|
||||
);
|
||||
expect(isCompactionFailureError("rate limit exceeded")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isBillingErrorMessage", () => {
|
||||
it("matches credit / payment failures", () => {
|
||||
const samples = [
|
||||
"Your credit balance is too low to access the Anthropic API.",
|
||||
"insufficient credits",
|
||||
"Payment Required",
|
||||
"HTTP 402 Payment Required",
|
||||
"plans & billing",
|
||||
"billing: please upgrade your plan",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isBillingErrorMessage(sample)).toBe(true);
|
||||
}
|
||||
});
|
||||
|
||||
it("ignores unrelated errors", () => {
|
||||
expect(isBillingErrorMessage("rate limit exceeded")).toBe(false);
|
||||
expect(isBillingErrorMessage("invalid api key")).toBe(false);
|
||||
expect(isBillingErrorMessage("context length exceeded")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isAuthErrorMessage", () => {
|
||||
it("matches credential validation errors", () => {
|
||||
const samples = [
|
||||
'No credentials found for profile "anthropic:claude-cli".',
|
||||
"No API key found for profile openai.",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isAuthErrorMessage(sample)).toBe(true);
|
||||
}
|
||||
});
|
||||
|
||||
it("ignores unrelated errors", () => {
|
||||
expect(isAuthErrorMessage("rate limit exceeded")).toBe(false);
|
||||
expect(isAuthErrorMessage("billing issue detected")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isFailoverErrorMessage", () => {
|
||||
it("matches auth/rate/billing/timeout", () => {
|
||||
const samples = [
|
||||
"invalid api key",
|
||||
"429 rate limit exceeded",
|
||||
"Your credit balance is too low",
|
||||
"request timed out",
|
||||
"invalid request format",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isFailoverErrorMessage(sample)).toBe(true);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe("classifyFailoverReason", () => {
|
||||
it("returns a stable reason", () => {
|
||||
expect(classifyFailoverReason("invalid api key")).toBe("auth");
|
||||
expect(classifyFailoverReason("no credentials found")).toBe("auth");
|
||||
expect(classifyFailoverReason("no api key found")).toBe("auth");
|
||||
expect(classifyFailoverReason("429 too many requests")).toBe("rate_limit");
|
||||
expect(classifyFailoverReason("resource has been exhausted")).toBe(
|
||||
"rate_limit",
|
||||
);
|
||||
expect(
|
||||
classifyFailoverReason(
|
||||
'{"type":"error","error":{"type":"overloaded_error","message":"Overloaded"}}',
|
||||
),
|
||||
).toBe("rate_limit");
|
||||
expect(classifyFailoverReason("invalid request format")).toBe("format");
|
||||
expect(classifyFailoverReason("credit balance too low")).toBe("billing");
|
||||
expect(classifyFailoverReason("deadline exceeded")).toBe("timeout");
|
||||
expect(classifyFailoverReason("string should match pattern")).toBe(
|
||||
"format",
|
||||
);
|
||||
expect(classifyFailoverReason("bad request")).toBeNull();
|
||||
});
|
||||
|
||||
it("classifies OpenAI usage limit errors as rate_limit", () => {
|
||||
expect(
|
||||
classifyFailoverReason(
|
||||
"You have hit your ChatGPT usage limit (plus plan)",
|
||||
),
|
||||
).toBe("rate_limit");
|
||||
});
|
||||
});
|
||||
|
||||
describe("isCloudCodeAssistFormatError", () => {
|
||||
it("matches format errors", () => {
|
||||
const samples = [
|
||||
"INVALID_REQUEST_ERROR: string should match pattern",
|
||||
"messages.1.content.1.tool_use.id",
|
||||
"tool_use.id should match pattern",
|
||||
"invalid request format",
|
||||
];
|
||||
for (const sample of samples) {
|
||||
expect(isCloudCodeAssistFormatError(sample)).toBe(true);
|
||||
}
|
||||
});
|
||||
|
||||
it("ignores unrelated errors", () => {
|
||||
expect(isCloudCodeAssistFormatError("rate limit exceeded")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("formatAssistantErrorText", () => {
|
||||
const makeAssistantError = (errorMessage: string): AssistantMessage =>
|
||||
({
|
||||
stopReason: "error",
|
||||
errorMessage,
|
||||
}) as AssistantMessage;
|
||||
|
||||
it("returns a friendly message for context overflow", () => {
|
||||
const msg = makeAssistantError("request_too_large");
|
||||
expect(formatAssistantErrorText(msg)).toContain("Context overflow");
|
||||
});
|
||||
|
||||
it("returns a friendly message for Anthropic role ordering", () => {
|
||||
const msg = makeAssistantError(
|
||||
'messages: roles must alternate between "user" and "assistant"',
|
||||
);
|
||||
expect(formatAssistantErrorText(msg)).toContain(
|
||||
"Message ordering conflict",
|
||||
);
|
||||
});
|
||||
|
||||
it("returns a friendly message for Anthropic overload errors", () => {
|
||||
const msg = makeAssistantError(
|
||||
'{"type":"error","error":{"details":null,"type":"overloaded_error","message":"Overloaded"},"request_id":"req_123"}',
|
||||
);
|
||||
expect(formatAssistantErrorText(msg)).toBe(
|
||||
"The AI service is temporarily overloaded. Please try again in a moment.",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("sanitizeToolCallId", () => {
|
||||
it("keeps valid tool call IDs", () => {
|
||||
expect(sanitizeToolCallId("call_abc-123")).toBe("call_abc-123");
|
||||
});
|
||||
|
||||
it("replaces invalid characters with underscores", () => {
|
||||
expect(sanitizeToolCallId("call_abc|item:456")).toBe("call_abc_item_456");
|
||||
});
|
||||
|
||||
it("returns default for empty IDs", () => {
|
||||
expect(sanitizeToolCallId("")).toBe("default_tool_id");
|
||||
});
|
||||
});
|
||||
|
||||
describe("sanitizeGoogleTurnOrdering", () => {
|
||||
it("prepends a synthetic user turn when history starts with assistant", () => {
|
||||
const input = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "toolCall", id: "call_1", name: "exec", arguments: {} },
|
||||
],
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = sanitizeGoogleTurnOrdering(input);
|
||||
expect(out[0]?.role).toBe("user");
|
||||
expect(out[1]?.role).toBe("assistant");
|
||||
});
|
||||
|
||||
it("is a no-op when history starts with user", () => {
|
||||
const input = [{ role: "user", content: "hi" }] satisfies AgentMessage[];
|
||||
const out = sanitizeGoogleTurnOrdering(input);
|
||||
expect(out).toBe(input);
|
||||
});
|
||||
});
|
||||
|
||||
describe("sanitizeSessionMessagesImages", () => {
|
||||
it("removes empty assistant text blocks but preserves tool calls", async () => {
|
||||
const input = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "text", text: "" },
|
||||
{ type: "toolCall", id: "call_1", name: "read", arguments: {} },
|
||||
],
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test");
|
||||
|
||||
expect(out).toHaveLength(1);
|
||||
const content = (out[0] as { content?: unknown }).content;
|
||||
expect(Array.isArray(content)).toBe(true);
|
||||
expect(content).toHaveLength(1);
|
||||
expect((content as Array<{ type?: string }>)[0]?.type).toBe("toolCall");
|
||||
});
|
||||
|
||||
it("sanitizes tool ids for assistant blocks and tool results when enabled", async () => {
|
||||
const input = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "toolUse", id: "call_abc|item:123", name: "test", input: {} },
|
||||
{
|
||||
type: "toolCall",
|
||||
id: "call_abc|item:456",
|
||||
name: "exec",
|
||||
arguments: {},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: "toolResult",
|
||||
toolUseId: "call_abc|item:123",
|
||||
content: [{ type: "text", text: "ok" }],
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test", {
|
||||
sanitizeToolCallIds: true,
|
||||
});
|
||||
|
||||
const assistant = out[0] as { content?: Array<{ id?: string }> };
|
||||
expect(assistant.content?.[0]?.id).toBe("call_abc_item_123");
|
||||
expect(assistant.content?.[1]?.id).toBe("call_abc_item_456");
|
||||
|
||||
const toolResult = out[1] as { toolUseId?: string };
|
||||
expect(toolResult.toolUseId).toBe("call_abc_item_123");
|
||||
});
|
||||
|
||||
it("filters whitespace-only assistant text blocks", async () => {
|
||||
const input = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "text", text: " " },
|
||||
{ type: "text", text: "ok" },
|
||||
],
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test");
|
||||
|
||||
expect(out).toHaveLength(1);
|
||||
const content = (out[0] as { content?: unknown }).content;
|
||||
expect(Array.isArray(content)).toBe(true);
|
||||
expect(content).toHaveLength(1);
|
||||
expect((content as Array<{ text?: string }>)[0]?.text).toBe("ok");
|
||||
});
|
||||
|
||||
it("drops assistant messages that only contain empty text", async () => {
|
||||
const input = [
|
||||
{ role: "user", content: "hello" },
|
||||
{ role: "assistant", content: [{ type: "text", text: "" }] },
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test");
|
||||
|
||||
expect(out).toHaveLength(1);
|
||||
expect(out[0]?.role).toBe("user");
|
||||
});
|
||||
|
||||
it("drops empty assistant error messages", async () => {
|
||||
const input = [
|
||||
{ role: "user", content: "hello" },
|
||||
{ role: "assistant", stopReason: "error", content: [] },
|
||||
{ role: "assistant", stopReason: "error" },
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test");
|
||||
|
||||
expect(out).toHaveLength(1);
|
||||
expect(out[0]?.role).toBe("user");
|
||||
});
|
||||
|
||||
it("leaves non-assistant messages unchanged", async () => {
|
||||
const input = [
|
||||
{ role: "user", content: "hello" },
|
||||
{
|
||||
role: "toolResult",
|
||||
toolCallId: "tool-1",
|
||||
content: [{ type: "text", text: "result" }],
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test");
|
||||
|
||||
expect(out).toHaveLength(2);
|
||||
expect(out[0]?.role).toBe("user");
|
||||
expect(out[1]?.role).toBe("toolResult");
|
||||
});
|
||||
|
||||
it("keeps tool call + tool result IDs unchanged by default", async () => {
|
||||
const input = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{
|
||||
type: "toolCall",
|
||||
id: "call_123|fc_456",
|
||||
name: "read",
|
||||
arguments: { path: "package.json" },
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: "toolResult",
|
||||
toolCallId: "call_123|fc_456",
|
||||
toolName: "read",
|
||||
content: [{ type: "text", text: "ok" }],
|
||||
isError: false,
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test");
|
||||
|
||||
const assistant = out[0] as unknown as { role?: string; content?: unknown };
|
||||
expect(assistant.role).toBe("assistant");
|
||||
expect(Array.isArray(assistant.content)).toBe(true);
|
||||
const toolCall = (
|
||||
assistant.content as Array<{ type?: string; id?: string }>
|
||||
).find((b) => b.type === "toolCall");
|
||||
expect(toolCall?.id).toBe("call_123|fc_456");
|
||||
|
||||
const toolResult = out[1] as unknown as {
|
||||
role?: string;
|
||||
toolCallId?: string;
|
||||
};
|
||||
expect(toolResult.role).toBe("toolResult");
|
||||
expect(toolResult.toolCallId).toBe("call_123|fc_456");
|
||||
});
|
||||
|
||||
it("sanitizes tool call + tool result IDs when enabled", async () => {
|
||||
const input = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{
|
||||
type: "toolCall",
|
||||
id: "call_123|fc_456",
|
||||
name: "read",
|
||||
arguments: { path: "package.json" },
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: "toolResult",
|
||||
toolCallId: "call_123|fc_456",
|
||||
toolName: "read",
|
||||
content: [{ type: "text", text: "ok" }],
|
||||
isError: false,
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test", {
|
||||
sanitizeToolCallIds: true,
|
||||
});
|
||||
|
||||
const assistant = out[0] as unknown as { role?: string; content?: unknown };
|
||||
expect(assistant.role).toBe("assistant");
|
||||
expect(Array.isArray(assistant.content)).toBe(true);
|
||||
const toolCall = (
|
||||
assistant.content as Array<{ type?: string; id?: string }>
|
||||
).find((b) => b.type === "toolCall");
|
||||
expect(toolCall?.id).toBe("call_123_fc_456");
|
||||
|
||||
const toolResult = out[1] as unknown as {
|
||||
role?: string;
|
||||
toolCallId?: string;
|
||||
};
|
||||
expect(toolResult.role).toBe("toolResult");
|
||||
expect(toolResult.toolCallId).toBe("call_123_fc_456");
|
||||
});
|
||||
|
||||
it("drops assistant blocks after a tool call when enforceToolCallLast is enabled", async () => {
|
||||
const input = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "text", text: "before" },
|
||||
{ type: "toolCall", id: "call_1", name: "read", arguments: {} },
|
||||
{ type: "thinking", thinking: "after", thinkingSignature: "sig" },
|
||||
{ type: "text", text: "after text" },
|
||||
],
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test", {
|
||||
enforceToolCallLast: true,
|
||||
});
|
||||
const assistant = out[0] as { content?: Array<{ type?: string }> };
|
||||
expect(assistant.content?.map((b) => b.type)).toEqual(["text", "toolCall"]);
|
||||
});
|
||||
|
||||
it("keeps assistant blocks after a tool call when enforceToolCallLast is disabled", async () => {
|
||||
const input = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "text", text: "before" },
|
||||
{ type: "toolCall", id: "call_1", name: "read", arguments: {} },
|
||||
{ type: "thinking", thinking: "after", thinkingSignature: "sig" },
|
||||
{ type: "text", text: "after text" },
|
||||
],
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test");
|
||||
const assistant = out[0] as { content?: Array<{ type?: string }> };
|
||||
expect(assistant.content?.map((b) => b.type)).toEqual([
|
||||
"text",
|
||||
"toolCall",
|
||||
"thinking",
|
||||
"text",
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("normalizeTextForComparison", () => {
|
||||
it("lowercases text", () => {
|
||||
expect(normalizeTextForComparison("Hello World")).toBe("hello world");
|
||||
});
|
||||
|
||||
it("trims whitespace", () => {
|
||||
expect(normalizeTextForComparison(" hello ")).toBe("hello");
|
||||
});
|
||||
|
||||
it("collapses multiple spaces", () => {
|
||||
expect(normalizeTextForComparison("hello world")).toBe("hello world");
|
||||
});
|
||||
|
||||
it("strips emoji", () => {
|
||||
expect(normalizeTextForComparison("Hello 👋 World 🌍")).toBe("hello world");
|
||||
});
|
||||
|
||||
it("handles mixed normalization", () => {
|
||||
expect(normalizeTextForComparison(" Hello 👋 WORLD 🌍 ")).toBe(
|
||||
"hello world",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("stripThoughtSignatures", () => {
|
||||
it("returns non-array content unchanged", () => {
|
||||
expect(stripThoughtSignatures("hello")).toBe("hello");
|
||||
expect(stripThoughtSignatures(null)).toBe(null);
|
||||
expect(stripThoughtSignatures(undefined)).toBe(undefined);
|
||||
expect(stripThoughtSignatures(123)).toBe(123);
|
||||
});
|
||||
|
||||
it("removes msg_-prefixed thought_signature from content blocks", () => {
|
||||
const input = [
|
||||
{ type: "text", text: "hello", thought_signature: "msg_abc123" },
|
||||
{ type: "thinking", thinking: "test", thought_signature: "AQID" },
|
||||
];
|
||||
const result = stripThoughtSignatures(input);
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0]).toEqual({ type: "text", text: "hello" });
|
||||
expect(result[1]).toEqual({
|
||||
type: "thinking",
|
||||
thinking: "test",
|
||||
thought_signature: "AQID",
|
||||
});
|
||||
expect("thought_signature" in result[0]).toBe(false);
|
||||
expect("thought_signature" in result[1]).toBe(true);
|
||||
});
|
||||
|
||||
it("preserves blocks without thought_signature", () => {
|
||||
const input = [
|
||||
{ type: "text", text: "hello" },
|
||||
{ type: "toolCall", id: "call_1", name: "read", arguments: {} },
|
||||
];
|
||||
const result = stripThoughtSignatures(input);
|
||||
|
||||
expect(result).toEqual(input);
|
||||
});
|
||||
|
||||
it("handles mixed blocks with and without thought_signature", () => {
|
||||
const input = [
|
||||
{ type: "text", text: "hello", thought_signature: "msg_abc" },
|
||||
{ type: "toolCall", id: "call_1", name: "read", arguments: {} },
|
||||
{ type: "thinking", thinking: "hmm", thought_signature: "msg_xyz" },
|
||||
];
|
||||
const result = stripThoughtSignatures(input);
|
||||
|
||||
expect(result).toEqual([
|
||||
{ type: "text", text: "hello" },
|
||||
{ type: "toolCall", id: "call_1", name: "read", arguments: {} },
|
||||
{ type: "thinking", thinking: "hmm" },
|
||||
]);
|
||||
});
|
||||
|
||||
it("handles empty array", () => {
|
||||
expect(stripThoughtSignatures([])).toEqual([]);
|
||||
});
|
||||
|
||||
it("handles null/undefined blocks in array", () => {
|
||||
const input = [null, undefined, { type: "text", text: "hello" }];
|
||||
const result = stripThoughtSignatures(input);
|
||||
expect(result).toEqual([null, undefined, { type: "text", text: "hello" }]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("sanitizeSessionMessagesImages - thought_signature stripping", () => {
|
||||
it("strips msg_-prefixed thought_signature from assistant message content blocks", async () => {
|
||||
const input = [
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "text", text: "hello", thought_signature: "msg_abc123" },
|
||||
{
|
||||
type: "thinking",
|
||||
thinking: "reasoning",
|
||||
thought_signature: "AQID",
|
||||
},
|
||||
],
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
const out = await sanitizeSessionMessagesImages(input, "test");
|
||||
|
||||
expect(out).toHaveLength(1);
|
||||
const content = (out[0] as { content?: unknown[] }).content;
|
||||
expect(content).toHaveLength(2);
|
||||
expect("thought_signature" in ((content?.[0] ?? {}) as object)).toBe(false);
|
||||
expect(
|
||||
(content?.[1] as { thought_signature?: unknown })?.thought_signature,
|
||||
).toBe("AQID");
|
||||
});
|
||||
});
|
||||
|
||||
describe("isMessagingToolDuplicate", () => {
|
||||
it("returns false for empty sentTexts", () => {
|
||||
expect(isMessagingToolDuplicate("hello world", [])).toBe(false);
|
||||
});
|
||||
|
||||
it("returns false for short texts", () => {
|
||||
expect(isMessagingToolDuplicate("short", ["short"])).toBe(false);
|
||||
});
|
||||
|
||||
it("detects exact duplicates", () => {
|
||||
expect(
|
||||
isMessagingToolDuplicate("Hello, this is a test message!", [
|
||||
"Hello, this is a test message!",
|
||||
]),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("detects duplicates with different casing", () => {
|
||||
expect(
|
||||
isMessagingToolDuplicate("HELLO, THIS IS A TEST MESSAGE!", [
|
||||
"hello, this is a test message!",
|
||||
]),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("detects duplicates with emoji variations", () => {
|
||||
expect(
|
||||
isMessagingToolDuplicate("Hello! 👋 This is a test message!", [
|
||||
"Hello! This is a test message!",
|
||||
]),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("detects substring duplicates (LLM elaboration)", () => {
|
||||
expect(
|
||||
isMessagingToolDuplicate(
|
||||
'I sent the message: "Hello, this is a test message!"',
|
||||
["Hello, this is a test message!"],
|
||||
),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("detects when sent text contains block reply (reverse substring)", () => {
|
||||
expect(
|
||||
isMessagingToolDuplicate("Hello, this is a test message!", [
|
||||
'I sent the message: "Hello, this is a test message!"',
|
||||
]),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("returns false for non-matching texts", () => {
|
||||
expect(
|
||||
isMessagingToolDuplicate("This is completely different content.", [
|
||||
"Hello, this is a test message!",
|
||||
]),
|
||||
).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -1,946 +1,55 @@
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
export {
|
||||
buildBootstrapContextFiles,
|
||||
DEFAULT_BOOTSTRAP_MAX_CHARS,
|
||||
ensureSessionHeader,
|
||||
resolveBootstrapMaxChars,
|
||||
stripThoughtSignatures,
|
||||
} from "./pi-embedded-helpers/bootstrap.js";
|
||||
export {
|
||||
classifyFailoverReason,
|
||||
formatAssistantErrorText,
|
||||
isAuthAssistantError,
|
||||
isAuthErrorMessage,
|
||||
isBillingAssistantError,
|
||||
isBillingErrorMessage,
|
||||
isCloudCodeAssistFormatError,
|
||||
isCompactionFailureError,
|
||||
isContextOverflowError,
|
||||
isFailoverAssistantError,
|
||||
isFailoverErrorMessage,
|
||||
isOverloadedErrorMessage,
|
||||
isRateLimitAssistantError,
|
||||
isRateLimitErrorMessage,
|
||||
isTimeoutErrorMessage,
|
||||
} from "./pi-embedded-helpers/errors.js";
|
||||
export {
|
||||
downgradeGeminiHistory,
|
||||
isGoogleModelApi,
|
||||
sanitizeGoogleTurnOrdering,
|
||||
} from "./pi-embedded-helpers/google.js";
|
||||
export {
|
||||
isEmptyAssistantMessageContent,
|
||||
sanitizeSessionMessagesImages,
|
||||
} from "./pi-embedded-helpers/images.js";
|
||||
export {
|
||||
isMessagingToolDuplicate,
|
||||
isMessagingToolDuplicateNormalized,
|
||||
normalizeTextForComparison,
|
||||
} from "./pi-embedded-helpers/messaging-dedupe.js";
|
||||
|
||||
import type {
|
||||
AgentMessage,
|
||||
AgentToolResult,
|
||||
} from "@mariozechner/pi-agent-core";
|
||||
import type { AssistantMessage } from "@mariozechner/pi-ai";
|
||||
import {
|
||||
normalizeThinkLevel,
|
||||
type ThinkLevel,
|
||||
} from "../auto-reply/thinking.js";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
import { formatSandboxToolPolicyBlockedMessage } from "./sandbox.js";
|
||||
import {
|
||||
export { pickFallbackThinkingLevel } from "./pi-embedded-helpers/thinking.js";
|
||||
|
||||
export {
|
||||
mergeConsecutiveUserTurns,
|
||||
validateAnthropicTurns,
|
||||
validateGeminiTurns,
|
||||
} from "./pi-embedded-helpers/turns.js";
|
||||
export type {
|
||||
EmbeddedContextFile,
|
||||
FailoverReason,
|
||||
} from "./pi-embedded-helpers/types.js";
|
||||
|
||||
export {
|
||||
isValidCloudCodeAssistToolId,
|
||||
sanitizeToolCallId,
|
||||
sanitizeToolCallIdsForCloudCodeAssist,
|
||||
} from "./tool-call-id.js";
|
||||
import { sanitizeContentBlocksImages } from "./tool-images.js";
|
||||
import type { WorkspaceBootstrapFile } from "./workspace.js";
|
||||
|
||||
export type EmbeddedContextFile = { path: string; content: string };
|
||||
|
||||
// ── Cross-provider thought_signature sanitization ──────────────────────────────
|
||||
// Claude's extended thinking feature generates thought_signature fields (message IDs
|
||||
// like "msg_abc123...") in content blocks. When these are sent to Google's Gemini API,
|
||||
// it expects Base64-encoded bytes and rejects Claude's format with a 400 error.
|
||||
// This function strips thought_signature fields to enable cross-provider session sharing.
|
||||
|
||||
type ContentBlockWithSignature = {
|
||||
thought_signature?: unknown;
|
||||
[key: string]: unknown;
|
||||
};
|
||||
|
||||
/**
|
||||
* Strips Claude-style thought_signature fields from content blocks.
|
||||
*
|
||||
* Gemini expects thought signatures as base64-encoded bytes, but Claude stores message ids
|
||||
* like "msg_abc123...". We only strip "msg_*" to preserve any provider-valid signatures.
|
||||
*/
|
||||
export function stripThoughtSignatures<T>(content: T): T {
|
||||
if (!Array.isArray(content)) return content;
|
||||
return content.map((block) => {
|
||||
if (!block || typeof block !== "object") return block;
|
||||
const rec = block as ContentBlockWithSignature;
|
||||
const signature = rec.thought_signature;
|
||||
if (typeof signature !== "string" || !signature.startsWith("msg_")) {
|
||||
return block;
|
||||
}
|
||||
const { thought_signature: _signature, ...rest } = rec;
|
||||
return rest;
|
||||
}) as T;
|
||||
}
|
||||
|
||||
export const DEFAULT_BOOTSTRAP_MAX_CHARS = 20_000;
|
||||
const BOOTSTRAP_HEAD_RATIO = 0.7;
|
||||
const BOOTSTRAP_TAIL_RATIO = 0.2;
|
||||
|
||||
type TrimBootstrapResult = {
|
||||
content: string;
|
||||
truncated: boolean;
|
||||
maxChars: number;
|
||||
originalLength: number;
|
||||
};
|
||||
|
||||
export function resolveBootstrapMaxChars(cfg?: ClawdbotConfig): number {
|
||||
const raw = cfg?.agents?.defaults?.bootstrapMaxChars;
|
||||
if (typeof raw === "number" && Number.isFinite(raw) && raw > 0) {
|
||||
return Math.floor(raw);
|
||||
}
|
||||
return DEFAULT_BOOTSTRAP_MAX_CHARS;
|
||||
}
|
||||
|
||||
function trimBootstrapContent(
|
||||
content: string,
|
||||
fileName: string,
|
||||
maxChars: number,
|
||||
): TrimBootstrapResult {
|
||||
const trimmed = content.trimEnd();
|
||||
if (trimmed.length <= maxChars) {
|
||||
return {
|
||||
content: trimmed,
|
||||
truncated: false,
|
||||
maxChars,
|
||||
originalLength: trimmed.length,
|
||||
};
|
||||
}
|
||||
|
||||
const headChars = Math.max(1, Math.floor(maxChars * BOOTSTRAP_HEAD_RATIO));
|
||||
const tailChars = Math.max(1, Math.floor(maxChars * BOOTSTRAP_TAIL_RATIO));
|
||||
const head = trimmed.slice(0, headChars);
|
||||
const tail = trimmed.slice(-tailChars);
|
||||
const contentWithMarker = [
|
||||
head,
|
||||
"",
|
||||
`[...truncated, read ${fileName} for full content...]`,
|
||||
"",
|
||||
tail,
|
||||
].join("\n");
|
||||
return {
|
||||
content: contentWithMarker,
|
||||
truncated: true,
|
||||
maxChars,
|
||||
originalLength: trimmed.length,
|
||||
};
|
||||
}
|
||||
|
||||
export async function ensureSessionHeader(params: {
|
||||
sessionFile: string;
|
||||
sessionId: string;
|
||||
cwd: string;
|
||||
}) {
|
||||
const file = params.sessionFile;
|
||||
try {
|
||||
await fs.stat(file);
|
||||
return;
|
||||
} catch {
|
||||
// create
|
||||
}
|
||||
await fs.mkdir(path.dirname(file), { recursive: true });
|
||||
const sessionVersion = 2;
|
||||
const entry = {
|
||||
type: "session",
|
||||
version: sessionVersion,
|
||||
id: params.sessionId,
|
||||
timestamp: new Date().toISOString(),
|
||||
cwd: params.cwd,
|
||||
};
|
||||
await fs.writeFile(file, `${JSON.stringify(entry)}\n`, "utf-8");
|
||||
}
|
||||
|
||||
type ContentBlock = AgentToolResult<unknown>["content"][number];
|
||||
|
||||
export function isEmptyAssistantMessageContent(
|
||||
message: Extract<AgentMessage, { role: "assistant" }>,
|
||||
): boolean {
|
||||
const content = message.content;
|
||||
if (content == null) return true;
|
||||
if (!Array.isArray(content)) return false;
|
||||
return content.every((block) => {
|
||||
if (!block || typeof block !== "object") return true;
|
||||
const rec = block as { type?: unknown; text?: unknown };
|
||||
if (rec.type !== "text") return false;
|
||||
return typeof rec.text !== "string" || rec.text.trim().length === 0;
|
||||
});
|
||||
}
|
||||
|
||||
function isEmptyAssistantErrorMessage(
|
||||
message: Extract<AgentMessage, { role: "assistant" }>,
|
||||
): boolean {
|
||||
if (message.stopReason !== "error") return false;
|
||||
return isEmptyAssistantMessageContent(message);
|
||||
}
|
||||
|
||||
export async function sanitizeSessionMessagesImages(
|
||||
messages: AgentMessage[],
|
||||
label: string,
|
||||
options?: { sanitizeToolCallIds?: boolean; enforceToolCallLast?: boolean },
|
||||
): Promise<AgentMessage[]> {
|
||||
// We sanitize historical session messages because Anthropic can reject a request
|
||||
// if the transcript contains oversized base64 images (see MAX_IMAGE_DIMENSION_PX).
|
||||
const sanitizedIds = options?.sanitizeToolCallIds
|
||||
? sanitizeToolCallIdsForCloudCodeAssist(messages)
|
||||
: messages;
|
||||
const base = sanitizedIds;
|
||||
const out: AgentMessage[] = [];
|
||||
for (const msg of base) {
|
||||
if (!msg || typeof msg !== "object") {
|
||||
out.push(msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
const role = (msg as { role?: unknown }).role;
|
||||
if (role === "toolResult") {
|
||||
const toolMsg = msg as Extract<AgentMessage, { role: "toolResult" }>;
|
||||
const content = Array.isArray(toolMsg.content) ? toolMsg.content : [];
|
||||
const nextContent = (await sanitizeContentBlocksImages(
|
||||
content as ContentBlock[],
|
||||
label,
|
||||
)) as unknown as typeof toolMsg.content;
|
||||
out.push({ ...toolMsg, content: nextContent });
|
||||
continue;
|
||||
}
|
||||
|
||||
if (role === "user") {
|
||||
const userMsg = msg as Extract<AgentMessage, { role: "user" }>;
|
||||
const content = userMsg.content;
|
||||
if (Array.isArray(content)) {
|
||||
const nextContent = (await sanitizeContentBlocksImages(
|
||||
content as unknown as ContentBlock[],
|
||||
label,
|
||||
)) as unknown as typeof userMsg.content;
|
||||
out.push({ ...userMsg, content: nextContent });
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (role === "assistant") {
|
||||
const assistantMsg = msg as Extract<AgentMessage, { role: "assistant" }>;
|
||||
if (isEmptyAssistantErrorMessage(assistantMsg)) {
|
||||
continue;
|
||||
}
|
||||
const content = assistantMsg.content;
|
||||
if (Array.isArray(content)) {
|
||||
// Strip thought_signature fields to enable cross-provider session sharing
|
||||
const strippedContent = stripThoughtSignatures(content);
|
||||
const filteredContent = strippedContent.filter((block) => {
|
||||
if (!block || typeof block !== "object") return true;
|
||||
const rec = block as { type?: unknown; text?: unknown };
|
||||
if (rec.type !== "text" || typeof rec.text !== "string") return true;
|
||||
return rec.text.trim().length > 0;
|
||||
});
|
||||
const normalizedContent = options?.enforceToolCallLast
|
||||
? (() => {
|
||||
let lastToolIndex = -1;
|
||||
for (let i = filteredContent.length - 1; i >= 0; i -= 1) {
|
||||
const block = filteredContent[i];
|
||||
if (!block || typeof block !== "object") continue;
|
||||
const type = (block as { type?: unknown }).type;
|
||||
if (
|
||||
type === "functionCall" ||
|
||||
type === "toolUse" ||
|
||||
type === "toolCall"
|
||||
) {
|
||||
lastToolIndex = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (lastToolIndex === -1) return filteredContent;
|
||||
return filteredContent.slice(0, lastToolIndex + 1);
|
||||
})()
|
||||
: filteredContent;
|
||||
const finalContent = (await sanitizeContentBlocksImages(
|
||||
normalizedContent as unknown as ContentBlock[],
|
||||
label,
|
||||
)) as unknown as typeof assistantMsg.content;
|
||||
if (finalContent.length === 0) {
|
||||
continue;
|
||||
}
|
||||
out.push({ ...assistantMsg, content: finalContent });
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
out.push(msg);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
const GOOGLE_TURN_ORDER_BOOTSTRAP_TEXT = "(session bootstrap)";
|
||||
|
||||
export function isGoogleModelApi(api?: string | null): boolean {
|
||||
return (
|
||||
api === "google-gemini-cli" ||
|
||||
api === "google-generative-ai" ||
|
||||
api === "google-antigravity"
|
||||
);
|
||||
}
|
||||
|
||||
export function sanitizeGoogleTurnOrdering(
|
||||
messages: AgentMessage[],
|
||||
): AgentMessage[] {
|
||||
const first = messages[0] as
|
||||
| { role?: unknown; content?: unknown }
|
||||
| undefined;
|
||||
const role = first?.role;
|
||||
const content = first?.content;
|
||||
if (
|
||||
role === "user" &&
|
||||
typeof content === "string" &&
|
||||
content.trim() === GOOGLE_TURN_ORDER_BOOTSTRAP_TEXT
|
||||
) {
|
||||
return messages;
|
||||
}
|
||||
if (role !== "assistant") return messages;
|
||||
|
||||
// Cloud Code Assist rejects histories that begin with a model turn (tool call or text).
|
||||
// Prepend a tiny synthetic user turn so the rest of the transcript can be used.
|
||||
const bootstrap: AgentMessage = {
|
||||
role: "user",
|
||||
content: GOOGLE_TURN_ORDER_BOOTSTRAP_TEXT,
|
||||
timestamp: Date.now(),
|
||||
} as AgentMessage;
|
||||
|
||||
return [bootstrap, ...messages];
|
||||
}
|
||||
|
||||
export function buildBootstrapContextFiles(
|
||||
files: WorkspaceBootstrapFile[],
|
||||
opts?: { warn?: (message: string) => void; maxChars?: number },
|
||||
): EmbeddedContextFile[] {
|
||||
const maxChars = opts?.maxChars ?? DEFAULT_BOOTSTRAP_MAX_CHARS;
|
||||
const result: EmbeddedContextFile[] = [];
|
||||
for (const file of files) {
|
||||
if (file.missing) {
|
||||
result.push({
|
||||
path: file.name,
|
||||
content: `[MISSING] Expected at: ${file.path}`,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
const trimmed = trimBootstrapContent(
|
||||
file.content ?? "",
|
||||
file.name,
|
||||
maxChars,
|
||||
);
|
||||
if (!trimmed.content) continue;
|
||||
if (trimmed.truncated) {
|
||||
opts?.warn?.(
|
||||
`workspace bootstrap file ${file.name} is ${trimmed.originalLength} chars (limit ${trimmed.maxChars}); truncating in injected context`,
|
||||
);
|
||||
}
|
||||
result.push({
|
||||
path: file.name,
|
||||
content: trimmed.content,
|
||||
});
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
export function isContextOverflowError(errorMessage?: string): boolean {
|
||||
if (!errorMessage) return false;
|
||||
const lower = errorMessage.toLowerCase();
|
||||
return (
|
||||
lower.includes("request_too_large") ||
|
||||
lower.includes("request exceeds the maximum size") ||
|
||||
lower.includes("context length exceeded") ||
|
||||
lower.includes("maximum context length") ||
|
||||
lower.includes("prompt is too long") ||
|
||||
lower.includes("context overflow") ||
|
||||
(lower.includes("413") && lower.includes("too large"))
|
||||
);
|
||||
}
|
||||
|
||||
export function isCompactionFailureError(errorMessage?: string): boolean {
|
||||
if (!errorMessage) return false;
|
||||
if (!isContextOverflowError(errorMessage)) return false;
|
||||
const lower = errorMessage.toLowerCase();
|
||||
return (
|
||||
lower.includes("summarization failed") ||
|
||||
lower.includes("auto-compaction") ||
|
||||
lower.includes("compaction failed") ||
|
||||
lower.includes("compaction")
|
||||
);
|
||||
}
|
||||
|
||||
export function formatAssistantErrorText(
|
||||
msg: AssistantMessage,
|
||||
opts?: { cfg?: ClawdbotConfig; sessionKey?: string },
|
||||
): string | undefined {
|
||||
if (msg.stopReason !== "error") return undefined;
|
||||
const raw = (msg.errorMessage ?? "").trim();
|
||||
if (!raw) return "LLM request failed with an unknown error.";
|
||||
|
||||
const unknownTool =
|
||||
raw.match(/unknown tool[:\s]+["']?([a-z0-9_-]+)["']?/i) ??
|
||||
raw.match(
|
||||
/tool\s+["']?([a-z0-9_-]+)["']?\s+(?:not found|is not available)/i,
|
||||
);
|
||||
if (unknownTool?.[1]) {
|
||||
const rewritten = formatSandboxToolPolicyBlockedMessage({
|
||||
cfg: opts?.cfg,
|
||||
sessionKey: opts?.sessionKey,
|
||||
toolName: unknownTool[1],
|
||||
});
|
||||
if (rewritten) return rewritten;
|
||||
}
|
||||
|
||||
// Check for context overflow (413) errors
|
||||
if (isContextOverflowError(raw)) {
|
||||
return (
|
||||
"Context overflow: prompt too large for the model. " +
|
||||
"Try again with less input or a larger-context model."
|
||||
);
|
||||
}
|
||||
|
||||
// Check for role ordering errors (Anthropic 400 "Incorrect role information")
|
||||
// This typically happens when consecutive user messages are sent without
|
||||
// an assistant response between them, often due to steering/queueing timing.
|
||||
if (/incorrect role information|roles must alternate/i.test(raw)) {
|
||||
return (
|
||||
"Message ordering conflict - please try again. " +
|
||||
"If this persists, use /new to start a fresh session."
|
||||
);
|
||||
}
|
||||
|
||||
const invalidRequest = raw.match(
|
||||
/"type":"invalid_request_error".*?"message":"([^"]+)"/,
|
||||
);
|
||||
if (invalidRequest?.[1]) {
|
||||
return `LLM request rejected: ${invalidRequest[1]}`;
|
||||
}
|
||||
|
||||
// Check for overloaded errors (Anthropic API capacity)
|
||||
if (isOverloadedErrorMessage(raw)) {
|
||||
return "The AI service is temporarily overloaded. Please try again in a moment.";
|
||||
}
|
||||
|
||||
// Keep it short for WhatsApp.
|
||||
return raw.length > 600 ? `${raw.slice(0, 600)}…` : raw;
|
||||
}
|
||||
|
||||
export function isRateLimitAssistantError(
|
||||
msg: AssistantMessage | undefined,
|
||||
): boolean {
|
||||
if (!msg || msg.stopReason !== "error") return false;
|
||||
return isRateLimitErrorMessage(msg.errorMessage ?? "");
|
||||
}
|
||||
|
||||
type ErrorPattern = RegExp | string;
|
||||
|
||||
const ERROR_PATTERNS = {
|
||||
rateLimit: [
|
||||
/rate[_ ]limit|too many requests|429/,
|
||||
"exceeded your current quota",
|
||||
"resource has been exhausted",
|
||||
"quota exceeded",
|
||||
"resource_exhausted",
|
||||
"usage limit",
|
||||
],
|
||||
overloaded: [
|
||||
/overloaded_error|"type"\s*:\s*"overloaded_error"/i,
|
||||
"overloaded",
|
||||
],
|
||||
timeout: [
|
||||
"timeout",
|
||||
"timed out",
|
||||
"deadline exceeded",
|
||||
"context deadline exceeded",
|
||||
],
|
||||
billing: [
|
||||
/\b402\b/,
|
||||
"payment required",
|
||||
"insufficient credits",
|
||||
"credit balance",
|
||||
"plans & billing",
|
||||
],
|
||||
auth: [
|
||||
/invalid[_ ]?api[_ ]?key/,
|
||||
"incorrect api key",
|
||||
"invalid token",
|
||||
"authentication",
|
||||
"unauthorized",
|
||||
"forbidden",
|
||||
"access denied",
|
||||
"expired",
|
||||
"token has expired",
|
||||
/\b401\b/,
|
||||
/\b403\b/,
|
||||
// Credential validation failures should trigger fallback (#761)
|
||||
"no credentials found",
|
||||
"no api key found",
|
||||
],
|
||||
format: [
|
||||
"invalid_request_error",
|
||||
"string should match pattern",
|
||||
"tool_use.id",
|
||||
"tool_use_id",
|
||||
"messages.1.content.1.tool_use.id",
|
||||
"invalid request format",
|
||||
],
|
||||
} as const;
|
||||
|
||||
function matchesErrorPatterns(
|
||||
raw: string,
|
||||
patterns: readonly ErrorPattern[],
|
||||
): boolean {
|
||||
if (!raw) return false;
|
||||
const value = raw.toLowerCase();
|
||||
return patterns.some((pattern) =>
|
||||
pattern instanceof RegExp ? pattern.test(value) : value.includes(pattern),
|
||||
);
|
||||
}
|
||||
|
||||
export function isRateLimitErrorMessage(raw: string): boolean {
|
||||
return matchesErrorPatterns(raw, ERROR_PATTERNS.rateLimit);
|
||||
}
|
||||
|
||||
export function isTimeoutErrorMessage(raw: string): boolean {
|
||||
return matchesErrorPatterns(raw, ERROR_PATTERNS.timeout);
|
||||
}
|
||||
|
||||
export function isBillingErrorMessage(raw: string): boolean {
|
||||
const value = raw.toLowerCase();
|
||||
if (!value) return false;
|
||||
if (matchesErrorPatterns(value, ERROR_PATTERNS.billing)) return true;
|
||||
return (
|
||||
value.includes("billing") &&
|
||||
(value.includes("upgrade") ||
|
||||
value.includes("credits") ||
|
||||
value.includes("payment") ||
|
||||
value.includes("plan"))
|
||||
);
|
||||
}
|
||||
|
||||
export function isBillingAssistantError(
|
||||
msg: AssistantMessage | undefined,
|
||||
): boolean {
|
||||
if (!msg || msg.stopReason !== "error") return false;
|
||||
return isBillingErrorMessage(msg.errorMessage ?? "");
|
||||
}
|
||||
|
||||
export function isAuthErrorMessage(raw: string): boolean {
|
||||
return matchesErrorPatterns(raw, ERROR_PATTERNS.auth);
|
||||
}
|
||||
|
||||
export function isOverloadedErrorMessage(raw: string): boolean {
|
||||
return matchesErrorPatterns(raw, ERROR_PATTERNS.overloaded);
|
||||
}
|
||||
|
||||
export function isCloudCodeAssistFormatError(raw: string): boolean {
|
||||
return matchesErrorPatterns(raw, ERROR_PATTERNS.format);
|
||||
}
|
||||
|
||||
export function isAuthAssistantError(
|
||||
msg: AssistantMessage | undefined,
|
||||
): boolean {
|
||||
if (!msg || msg.stopReason !== "error") return false;
|
||||
return isAuthErrorMessage(msg.errorMessage ?? "");
|
||||
}
|
||||
|
||||
export type FailoverReason =
|
||||
| "auth"
|
||||
| "format"
|
||||
| "rate_limit"
|
||||
| "billing"
|
||||
| "timeout"
|
||||
| "unknown";
|
||||
|
||||
export function classifyFailoverReason(raw: string): FailoverReason | null {
|
||||
if (isRateLimitErrorMessage(raw)) return "rate_limit";
|
||||
if (isOverloadedErrorMessage(raw)) return "rate_limit"; // Treat overloaded as rate limit for failover
|
||||
if (isCloudCodeAssistFormatError(raw)) return "format";
|
||||
if (isBillingErrorMessage(raw)) return "billing";
|
||||
if (isTimeoutErrorMessage(raw)) return "timeout";
|
||||
if (isAuthErrorMessage(raw)) return "auth";
|
||||
return null;
|
||||
}
|
||||
|
||||
export function isFailoverErrorMessage(raw: string): boolean {
|
||||
return classifyFailoverReason(raw) !== null;
|
||||
}
|
||||
|
||||
export function isFailoverAssistantError(
|
||||
msg: AssistantMessage | undefined,
|
||||
): boolean {
|
||||
if (!msg || msg.stopReason !== "error") return false;
|
||||
return isFailoverErrorMessage(msg.errorMessage ?? "");
|
||||
}
|
||||
|
||||
function extractSupportedValues(raw: string): string[] {
|
||||
const match =
|
||||
raw.match(/supported values are:\s*([^\n.]+)/i) ??
|
||||
raw.match(/supported values:\s*([^\n.]+)/i);
|
||||
if (!match?.[1]) return [];
|
||||
const fragment = match[1];
|
||||
const quoted = Array.from(fragment.matchAll(/['"]([^'"]+)['"]/g)).map(
|
||||
(entry) => entry[1]?.trim(),
|
||||
);
|
||||
if (quoted.length > 0) {
|
||||
return quoted.filter((entry): entry is string => Boolean(entry));
|
||||
}
|
||||
return fragment
|
||||
.split(/,|\band\b/gi)
|
||||
.map((entry) => entry.replace(/^[^a-zA-Z]+|[^a-zA-Z]+$/g, "").trim())
|
||||
.filter(Boolean);
|
||||
}
|
||||
|
||||
export function pickFallbackThinkingLevel(params: {
|
||||
message?: string;
|
||||
attempted: Set<ThinkLevel>;
|
||||
}): ThinkLevel | undefined {
|
||||
const raw = params.message?.trim();
|
||||
if (!raw) return undefined;
|
||||
const supported = extractSupportedValues(raw);
|
||||
if (supported.length === 0) return undefined;
|
||||
for (const entry of supported) {
|
||||
const normalized = normalizeThinkLevel(entry);
|
||||
if (!normalized) continue;
|
||||
if (params.attempted.has(normalized)) continue;
|
||||
return normalized;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates and fixes conversation turn sequences for Gemini API.
|
||||
* Gemini requires strict alternating user→assistant→tool→user pattern.
|
||||
* This function:
|
||||
* 1. Detects consecutive messages from the same role
|
||||
* 2. Merges consecutive assistant messages together
|
||||
* 3. Preserves metadata (usage, stopReason, etc.)
|
||||
*
|
||||
* This prevents the "function call turn comes immediately after a user turn or after a function response turn" error.
|
||||
*/
|
||||
export function validateGeminiTurns(messages: AgentMessage[]): AgentMessage[] {
|
||||
if (!Array.isArray(messages) || messages.length === 0) {
|
||||
return messages;
|
||||
}
|
||||
|
||||
const result: AgentMessage[] = [];
|
||||
let lastRole: string | undefined;
|
||||
|
||||
for (const msg of messages) {
|
||||
if (!msg || typeof msg !== "object") {
|
||||
result.push(msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
const msgRole = (msg as { role?: unknown }).role as string | undefined;
|
||||
if (!msgRole) {
|
||||
result.push(msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if this message has the same role as the last one
|
||||
if (msgRole === lastRole && lastRole === "assistant") {
|
||||
// Merge consecutive assistant messages
|
||||
const lastMsg = result[result.length - 1];
|
||||
const currentMsg = msg as Extract<AgentMessage, { role: "assistant" }>;
|
||||
|
||||
if (lastMsg && typeof lastMsg === "object") {
|
||||
const lastAsst = lastMsg as Extract<
|
||||
AgentMessage,
|
||||
{ role: "assistant" }
|
||||
>;
|
||||
|
||||
// Merge content blocks
|
||||
const mergedContent = [
|
||||
...(Array.isArray(lastAsst.content) ? lastAsst.content : []),
|
||||
...(Array.isArray(currentMsg.content) ? currentMsg.content : []),
|
||||
];
|
||||
|
||||
// Preserve metadata from the later message (more recent)
|
||||
const merged: Extract<AgentMessage, { role: "assistant" }> = {
|
||||
...lastAsst,
|
||||
content: mergedContent,
|
||||
// Take timestamps, usage, stopReason from the newer message if present
|
||||
...(currentMsg.usage && { usage: currentMsg.usage }),
|
||||
...(currentMsg.stopReason && { stopReason: currentMsg.stopReason }),
|
||||
...(currentMsg.errorMessage && {
|
||||
errorMessage: currentMsg.errorMessage,
|
||||
}),
|
||||
};
|
||||
|
||||
// Replace the last message with merged version
|
||||
result[result.length - 1] = merged;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Not a consecutive duplicate, add normally
|
||||
result.push(msg);
|
||||
lastRole = msgRole;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
export function mergeConsecutiveUserTurns(
|
||||
previous: Extract<AgentMessage, { role: "user" }>,
|
||||
current: Extract<AgentMessage, { role: "user" }>,
|
||||
): Extract<AgentMessage, { role: "user" }> {
|
||||
const mergedContent = [
|
||||
...(Array.isArray(previous.content) ? previous.content : []),
|
||||
...(Array.isArray(current.content) ? current.content : []),
|
||||
];
|
||||
|
||||
// Preserve newest metadata while backfilling timestamp if the latest is missing.
|
||||
return {
|
||||
...current, // newest wins for metadata
|
||||
content: mergedContent,
|
||||
timestamp: current.timestamp ?? previous.timestamp,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates and fixes conversation turn sequences for Anthropic API.
|
||||
* Anthropic requires strict alternating user→assistant pattern.
|
||||
* This function:
|
||||
* 1. Detects consecutive user messages
|
||||
* 2. Merges consecutive user messages together
|
||||
* 3. Preserves timestamps from the later message
|
||||
*
|
||||
* This prevents the "400 Incorrect role information" error that occurs
|
||||
* when steering messages are injected during streaming and create
|
||||
* consecutive user messages.
|
||||
*/
|
||||
export function validateAnthropicTurns(
|
||||
messages: AgentMessage[],
|
||||
): AgentMessage[] {
|
||||
if (!Array.isArray(messages) || messages.length === 0) {
|
||||
return messages;
|
||||
}
|
||||
|
||||
const result: AgentMessage[] = [];
|
||||
let lastRole: string | undefined;
|
||||
|
||||
for (const msg of messages) {
|
||||
if (!msg || typeof msg !== "object") {
|
||||
result.push(msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
const msgRole = (msg as { role?: unknown }).role as string | undefined;
|
||||
if (!msgRole) {
|
||||
result.push(msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if this message has the same role as the last one
|
||||
if (msgRole === lastRole && lastRole === "user") {
|
||||
// Merge consecutive user messages. Base on the newest message so we keep
|
||||
// fresh metadata (attachments, timestamps, future fields) while
|
||||
// appending prior content.
|
||||
const lastMsg = result[result.length - 1];
|
||||
const currentMsg = msg as Extract<AgentMessage, { role: "user" }>;
|
||||
|
||||
if (lastMsg && typeof lastMsg === "object") {
|
||||
const lastUser = lastMsg as Extract<AgentMessage, { role: "user" }>;
|
||||
const merged = mergeConsecutiveUserTurns(lastUser, currentMsg);
|
||||
|
||||
// Replace the last message with merged version
|
||||
result[result.length - 1] = merged;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Not a consecutive duplicate, add normally
|
||||
result.push(msg);
|
||||
lastRole = msgRole;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// ── Messaging tool duplicate detection ──────────────────────────────────────
|
||||
// When the agent uses a messaging tool (telegram, discord, slack, message, sessions_send)
|
||||
// to send a message, we track the text so we can suppress duplicate block replies.
|
||||
// The LLM sometimes elaborates or wraps the same content, so we use substring matching.
|
||||
|
||||
const MIN_DUPLICATE_TEXT_LENGTH = 10;
|
||||
|
||||
/**
|
||||
* Normalize text for duplicate comparison.
|
||||
* - Trims whitespace
|
||||
* - Lowercases
|
||||
* - Strips emoji (Emoji_Presentation and Extended_Pictographic)
|
||||
* - Collapses multiple spaces to single space
|
||||
*/
|
||||
export function normalizeTextForComparison(text: string): string {
|
||||
return text
|
||||
.trim()
|
||||
.toLowerCase()
|
||||
.replace(/\p{Emoji_Presentation}|\p{Extended_Pictographic}/gu, "")
|
||||
.replace(/\s+/g, " ")
|
||||
.trim();
|
||||
}
|
||||
|
||||
export function isMessagingToolDuplicateNormalized(
|
||||
normalized: string,
|
||||
normalizedSentTexts: string[],
|
||||
): boolean {
|
||||
if (normalizedSentTexts.length === 0) return false;
|
||||
if (!normalized || normalized.length < MIN_DUPLICATE_TEXT_LENGTH)
|
||||
return false;
|
||||
return normalizedSentTexts.some((normalizedSent) => {
|
||||
if (!normalizedSent || normalizedSent.length < MIN_DUPLICATE_TEXT_LENGTH)
|
||||
return false;
|
||||
return (
|
||||
normalized.includes(normalizedSent) || normalizedSent.includes(normalized)
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a text is a duplicate of any previously sent messaging tool text.
|
||||
* Uses substring matching to handle LLM elaboration (e.g., wrapping in quotes,
|
||||
* adding context, or slight rephrasing that includes the original).
|
||||
*/
|
||||
// ── Tool Call ID Sanitization (Google Cloud Code Assist) ───────────────────────
|
||||
// Google Cloud Code Assist rejects tool call IDs that contain invalid characters.
|
||||
// OpenAI Codex generates IDs like "call_abc123|item_456" with pipe characters,
|
||||
// but Google requires IDs matching ^[a-zA-Z0-9_-]+$ pattern.
|
||||
// This function sanitizes tool call IDs by replacing invalid characters with underscores.
|
||||
export { sanitizeToolCallId, isValidCloudCodeAssistToolId };
|
||||
|
||||
export function isMessagingToolDuplicate(
|
||||
text: string,
|
||||
sentTexts: string[],
|
||||
): boolean {
|
||||
if (sentTexts.length === 0) return false;
|
||||
const normalized = normalizeTextForComparison(text);
|
||||
if (!normalized || normalized.length < MIN_DUPLICATE_TEXT_LENGTH)
|
||||
return false;
|
||||
return isMessagingToolDuplicateNormalized(
|
||||
normalized,
|
||||
sentTexts.map(normalizeTextForComparison),
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Downgrades tool calls that are missing `thought_signature` (required by Gemini)
|
||||
* into text representations, to prevent 400 INVALID_ARGUMENT errors.
|
||||
* Also converts corresponding tool results into user messages.
|
||||
*/
|
||||
type GeminiToolCallBlock = {
|
||||
type?: unknown;
|
||||
thought_signature?: unknown;
|
||||
id?: unknown;
|
||||
toolCallId?: unknown;
|
||||
name?: unknown;
|
||||
toolName?: unknown;
|
||||
arguments?: unknown;
|
||||
input?: unknown;
|
||||
};
|
||||
|
||||
export function downgradeGeminiHistory(
|
||||
messages: AgentMessage[],
|
||||
): AgentMessage[] {
|
||||
const downgradedIds = new Set<string>();
|
||||
const out: AgentMessage[] = [];
|
||||
|
||||
const resolveToolResultId = (
|
||||
msg: Extract<AgentMessage, { role: "toolResult" }>,
|
||||
): string | undefined => {
|
||||
const toolCallId = (msg as { toolCallId?: unknown }).toolCallId;
|
||||
if (typeof toolCallId === "string" && toolCallId) return toolCallId;
|
||||
const toolUseId = (msg as { toolUseId?: unknown }).toolUseId;
|
||||
if (typeof toolUseId === "string" && toolUseId) return toolUseId;
|
||||
return undefined;
|
||||
};
|
||||
|
||||
for (const msg of messages) {
|
||||
if (!msg || typeof msg !== "object") {
|
||||
out.push(msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
const role = (msg as { role?: unknown }).role;
|
||||
|
||||
if (role === "assistant") {
|
||||
const assistantMsg = msg as Extract<AgentMessage, { role: "assistant" }>;
|
||||
if (!Array.isArray(assistantMsg.content)) {
|
||||
out.push(msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
let hasDowngraded = false;
|
||||
const newContent = assistantMsg.content.map((block) => {
|
||||
if (!block || typeof block !== "object") return block;
|
||||
const blockRecord = block as GeminiToolCallBlock;
|
||||
const type = blockRecord.type;
|
||||
|
||||
// Check for tool calls / function calls
|
||||
if (
|
||||
type === "toolCall" ||
|
||||
type === "functionCall" ||
|
||||
type === "toolUse"
|
||||
) {
|
||||
// Check if thought_signature is missing
|
||||
// Note: TypeScript doesn't know about thought_signature on standard types
|
||||
const hasSignature = Boolean(blockRecord.thought_signature);
|
||||
|
||||
if (!hasSignature) {
|
||||
const id =
|
||||
typeof blockRecord.id === "string"
|
||||
? blockRecord.id
|
||||
: typeof blockRecord.toolCallId === "string"
|
||||
? blockRecord.toolCallId
|
||||
: undefined;
|
||||
const name =
|
||||
typeof blockRecord.name === "string"
|
||||
? blockRecord.name
|
||||
: typeof blockRecord.toolName === "string"
|
||||
? blockRecord.toolName
|
||||
: undefined;
|
||||
const args =
|
||||
blockRecord.arguments !== undefined
|
||||
? blockRecord.arguments
|
||||
: blockRecord.input;
|
||||
|
||||
if (id) downgradedIds.add(id);
|
||||
hasDowngraded = true;
|
||||
|
||||
const argsText =
|
||||
typeof args === "string" ? args : JSON.stringify(args, null, 2);
|
||||
|
||||
return {
|
||||
type: "text",
|
||||
text: `[Tool Call: ${name ?? "unknown"}${
|
||||
id ? ` (ID: ${id})` : ""
|
||||
}]\nArguments: ${argsText}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
return block;
|
||||
});
|
||||
|
||||
if (hasDowngraded) {
|
||||
out.push({ ...assistantMsg, content: newContent } as AgentMessage);
|
||||
} else {
|
||||
out.push(msg);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (role === "toolResult") {
|
||||
const toolMsg = msg as Extract<AgentMessage, { role: "toolResult" }>;
|
||||
const toolResultId = resolveToolResultId(toolMsg);
|
||||
if (toolResultId && downgradedIds.has(toolResultId)) {
|
||||
// Convert to User message
|
||||
let textContent = "";
|
||||
if (Array.isArray(toolMsg.content)) {
|
||||
textContent = toolMsg.content
|
||||
.map((entry) => {
|
||||
if (entry && typeof entry === "object") {
|
||||
const text = (entry as { text?: unknown }).text;
|
||||
if (typeof text === "string") return text;
|
||||
}
|
||||
return JSON.stringify(entry);
|
||||
})
|
||||
.join("\n");
|
||||
} else {
|
||||
textContent = JSON.stringify(toolMsg.content);
|
||||
}
|
||||
|
||||
out.push({
|
||||
role: "user",
|
||||
content: [
|
||||
{
|
||||
type: "text",
|
||||
text: `[Tool Result for ID ${toolResultId}]\n${textContent}`,
|
||||
},
|
||||
],
|
||||
} as AgentMessage);
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
out.push(msg);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
173
src/agents/pi-embedded-helpers/bootstrap.ts
Normal file
173
src/agents/pi-embedded-helpers/bootstrap.ts
Normal file
@@ -0,0 +1,173 @@
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
|
||||
import type { AgentMessage } from "@mariozechner/pi-agent-core";
|
||||
|
||||
import type { ClawdbotConfig } from "../../config/config.js";
|
||||
import type { WorkspaceBootstrapFile } from "../workspace.js";
|
||||
import type { EmbeddedContextFile } from "./types.js";
|
||||
|
||||
type ContentBlockWithSignature = {
|
||||
thought_signature?: unknown;
|
||||
[key: string]: unknown;
|
||||
};
|
||||
|
||||
/**
|
||||
* Strips Claude-style thought_signature fields from content blocks.
|
||||
*
|
||||
* Gemini expects thought signatures as base64-encoded bytes, but Claude stores message ids
|
||||
* like "msg_abc123...". We only strip "msg_*" to preserve any provider-valid signatures.
|
||||
*/
|
||||
export function stripThoughtSignatures<T>(content: T): T {
|
||||
if (!Array.isArray(content)) return content;
|
||||
return content.map((block) => {
|
||||
if (!block || typeof block !== "object") return block;
|
||||
const rec = block as ContentBlockWithSignature;
|
||||
const signature = rec.thought_signature;
|
||||
if (typeof signature !== "string" || !signature.startsWith("msg_")) {
|
||||
return block;
|
||||
}
|
||||
const { thought_signature: _signature, ...rest } = rec;
|
||||
return rest;
|
||||
}) as T;
|
||||
}
|
||||
|
||||
export const DEFAULT_BOOTSTRAP_MAX_CHARS = 20_000;
|
||||
const BOOTSTRAP_HEAD_RATIO = 0.7;
|
||||
const BOOTSTRAP_TAIL_RATIO = 0.2;
|
||||
|
||||
type TrimBootstrapResult = {
|
||||
content: string;
|
||||
truncated: boolean;
|
||||
maxChars: number;
|
||||
originalLength: number;
|
||||
};
|
||||
|
||||
export function resolveBootstrapMaxChars(cfg?: ClawdbotConfig): number {
|
||||
const raw = cfg?.agents?.defaults?.bootstrapMaxChars;
|
||||
if (typeof raw === "number" && Number.isFinite(raw) && raw > 0) {
|
||||
return Math.floor(raw);
|
||||
}
|
||||
return DEFAULT_BOOTSTRAP_MAX_CHARS;
|
||||
}
|
||||
|
||||
function trimBootstrapContent(
|
||||
content: string,
|
||||
fileName: string,
|
||||
maxChars: number,
|
||||
): TrimBootstrapResult {
|
||||
const trimmed = content.trimEnd();
|
||||
if (trimmed.length <= maxChars) {
|
||||
return {
|
||||
content: trimmed,
|
||||
truncated: false,
|
||||
maxChars,
|
||||
originalLength: trimmed.length,
|
||||
};
|
||||
}
|
||||
|
||||
const headChars = Math.floor(maxChars * BOOTSTRAP_HEAD_RATIO);
|
||||
const tailChars = Math.floor(maxChars * BOOTSTRAP_TAIL_RATIO);
|
||||
const head = trimmed.slice(0, headChars);
|
||||
const tail = trimmed.slice(-tailChars);
|
||||
|
||||
const marker = [
|
||||
"",
|
||||
`[...truncated, read ${fileName} for full content...]`,
|
||||
`…(truncated ${fileName}: kept ${headChars}+${tailChars} chars of ${trimmed.length})…`,
|
||||
"",
|
||||
].join("\n");
|
||||
const contentWithMarker = [head, marker, tail].join("\n");
|
||||
return {
|
||||
content: contentWithMarker,
|
||||
truncated: true,
|
||||
maxChars,
|
||||
originalLength: trimmed.length,
|
||||
};
|
||||
}
|
||||
|
||||
export async function ensureSessionHeader(params: {
|
||||
sessionFile: string;
|
||||
sessionId: string;
|
||||
cwd: string;
|
||||
}) {
|
||||
const file = params.sessionFile;
|
||||
try {
|
||||
await fs.stat(file);
|
||||
return;
|
||||
} catch {
|
||||
// create
|
||||
}
|
||||
await fs.mkdir(path.dirname(file), { recursive: true });
|
||||
const sessionVersion = 2;
|
||||
const entry = {
|
||||
type: "session",
|
||||
version: sessionVersion,
|
||||
id: params.sessionId,
|
||||
timestamp: new Date().toISOString(),
|
||||
cwd: params.cwd,
|
||||
};
|
||||
await fs.writeFile(file, `${JSON.stringify(entry)}\n`, "utf-8");
|
||||
}
|
||||
|
||||
export function buildBootstrapContextFiles(
|
||||
files: WorkspaceBootstrapFile[],
|
||||
opts?: { warn?: (message: string) => void; maxChars?: number },
|
||||
): EmbeddedContextFile[] {
|
||||
const maxChars = opts?.maxChars ?? DEFAULT_BOOTSTRAP_MAX_CHARS;
|
||||
const result: EmbeddedContextFile[] = [];
|
||||
for (const file of files) {
|
||||
if (file.missing) {
|
||||
result.push({
|
||||
path: file.name,
|
||||
content: `[MISSING] Expected at: ${file.path}`,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
const trimmed = trimBootstrapContent(
|
||||
file.content ?? "",
|
||||
file.name,
|
||||
maxChars,
|
||||
);
|
||||
if (!trimmed.content) continue;
|
||||
if (trimmed.truncated) {
|
||||
opts?.warn?.(
|
||||
`workspace bootstrap file ${file.name} is ${trimmed.originalLength} chars (limit ${trimmed.maxChars}); truncating in injected context`,
|
||||
);
|
||||
}
|
||||
result.push({
|
||||
path: file.name,
|
||||
content: trimmed.content,
|
||||
});
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
export function sanitizeGoogleTurnOrdering(
|
||||
messages: AgentMessage[],
|
||||
): AgentMessage[] {
|
||||
const GOOGLE_TURN_ORDER_BOOTSTRAP_TEXT = "(session bootstrap)";
|
||||
const first = messages[0] as
|
||||
| { role?: unknown; content?: unknown }
|
||||
| undefined;
|
||||
const role = first?.role;
|
||||
const content = first?.content;
|
||||
if (
|
||||
role === "user" &&
|
||||
typeof content === "string" &&
|
||||
content.trim() === GOOGLE_TURN_ORDER_BOOTSTRAP_TEXT
|
||||
) {
|
||||
return messages;
|
||||
}
|
||||
if (role !== "assistant") return messages;
|
||||
|
||||
// Cloud Code Assist rejects histories that begin with a model turn (tool call or text).
|
||||
// Prepend a tiny synthetic user turn so the rest of the transcript can be used.
|
||||
const bootstrap: AgentMessage = {
|
||||
role: "user",
|
||||
content: GOOGLE_TURN_ORDER_BOOTSTRAP_TEXT,
|
||||
timestamp: Date.now(),
|
||||
} as AgentMessage;
|
||||
|
||||
return [bootstrap, ...messages];
|
||||
}
|
||||
220
src/agents/pi-embedded-helpers/errors.ts
Normal file
220
src/agents/pi-embedded-helpers/errors.ts
Normal file
@@ -0,0 +1,220 @@
|
||||
import type { AssistantMessage } from "@mariozechner/pi-ai";
|
||||
|
||||
import type { ClawdbotConfig } from "../../config/config.js";
|
||||
import { formatSandboxToolPolicyBlockedMessage } from "../sandbox.js";
|
||||
import type { FailoverReason } from "./types.js";
|
||||
|
||||
export function isContextOverflowError(errorMessage?: string): boolean {
|
||||
if (!errorMessage) return false;
|
||||
const lower = errorMessage.toLowerCase();
|
||||
return (
|
||||
lower.includes("request_too_large") ||
|
||||
lower.includes("request exceeds the maximum size") ||
|
||||
lower.includes("context length exceeded") ||
|
||||
lower.includes("maximum context length") ||
|
||||
lower.includes("prompt is too long") ||
|
||||
lower.includes("context overflow") ||
|
||||
(lower.includes("413") && lower.includes("too large"))
|
||||
);
|
||||
}
|
||||
|
||||
export function isCompactionFailureError(errorMessage?: string): boolean {
|
||||
if (!errorMessage) return false;
|
||||
if (!isContextOverflowError(errorMessage)) return false;
|
||||
const lower = errorMessage.toLowerCase();
|
||||
return (
|
||||
lower.includes("summarization failed") ||
|
||||
lower.includes("auto-compaction") ||
|
||||
lower.includes("compaction failed") ||
|
||||
lower.includes("compaction")
|
||||
);
|
||||
}
|
||||
|
||||
export function formatAssistantErrorText(
|
||||
msg: AssistantMessage,
|
||||
opts?: { cfg?: ClawdbotConfig; sessionKey?: string },
|
||||
): string | undefined {
|
||||
if (msg.stopReason !== "error") return undefined;
|
||||
const raw = (msg.errorMessage ?? "").trim();
|
||||
if (!raw) return "LLM request failed with an unknown error.";
|
||||
|
||||
const unknownTool =
|
||||
raw.match(/unknown tool[:\s]+["']?([a-z0-9_-]+)["']?/i) ??
|
||||
raw.match(
|
||||
/tool\s+["']?([a-z0-9_-]+)["']?\s+(?:not found|is not available)/i,
|
||||
);
|
||||
if (unknownTool?.[1]) {
|
||||
const rewritten = formatSandboxToolPolicyBlockedMessage({
|
||||
cfg: opts?.cfg,
|
||||
sessionKey: opts?.sessionKey,
|
||||
toolName: unknownTool[1],
|
||||
});
|
||||
if (rewritten) return rewritten;
|
||||
}
|
||||
|
||||
if (isContextOverflowError(raw)) {
|
||||
return (
|
||||
"Context overflow: prompt too large for the model. " +
|
||||
"Try again with less input or a larger-context model."
|
||||
);
|
||||
}
|
||||
|
||||
if (/incorrect role information|roles must alternate/i.test(raw)) {
|
||||
return (
|
||||
"Message ordering conflict - please try again. " +
|
||||
"If this persists, use /new to start a fresh session."
|
||||
);
|
||||
}
|
||||
|
||||
const invalidRequest = raw.match(
|
||||
/"type":"invalid_request_error".*?"message":"([^"]+)"/,
|
||||
);
|
||||
if (invalidRequest?.[1]) {
|
||||
return `LLM request rejected: ${invalidRequest[1]}`;
|
||||
}
|
||||
|
||||
if (isOverloadedErrorMessage(raw)) {
|
||||
return "The AI service is temporarily overloaded. Please try again in a moment.";
|
||||
}
|
||||
|
||||
return raw.length > 600 ? `${raw.slice(0, 600)}…` : raw;
|
||||
}
|
||||
|
||||
export function isRateLimitAssistantError(
|
||||
msg: AssistantMessage | undefined,
|
||||
): boolean {
|
||||
if (!msg || msg.stopReason !== "error") return false;
|
||||
return isRateLimitErrorMessage(msg.errorMessage ?? "");
|
||||
}
|
||||
|
||||
type ErrorPattern = RegExp | string;
|
||||
|
||||
const ERROR_PATTERNS = {
|
||||
rateLimit: [
|
||||
/rate[_ ]limit|too many requests|429/,
|
||||
"exceeded your current quota",
|
||||
"resource has been exhausted",
|
||||
"quota exceeded",
|
||||
"resource_exhausted",
|
||||
"usage limit",
|
||||
],
|
||||
overloaded: [
|
||||
/overloaded_error|"type"\s*:\s*"overloaded_error"/i,
|
||||
"overloaded",
|
||||
],
|
||||
timeout: [
|
||||
"timeout",
|
||||
"timed out",
|
||||
"deadline exceeded",
|
||||
"context deadline exceeded",
|
||||
],
|
||||
billing: [
|
||||
/\b402\b/,
|
||||
"payment required",
|
||||
"insufficient credits",
|
||||
"credit balance",
|
||||
"plans & billing",
|
||||
],
|
||||
auth: [
|
||||
/invalid[_ ]?api[_ ]?key/,
|
||||
"incorrect api key",
|
||||
"invalid token",
|
||||
"authentication",
|
||||
"unauthorized",
|
||||
"forbidden",
|
||||
"access denied",
|
||||
"expired",
|
||||
"token has expired",
|
||||
/\b401\b/,
|
||||
/\b403\b/,
|
||||
"no credentials found",
|
||||
"no api key found",
|
||||
],
|
||||
format: [
|
||||
"invalid_request_error",
|
||||
"string should match pattern",
|
||||
"tool_use.id",
|
||||
"tool_use_id",
|
||||
"messages.1.content.1.tool_use.id",
|
||||
"invalid request format",
|
||||
],
|
||||
} as const;
|
||||
|
||||
function matchesErrorPatterns(
|
||||
raw: string,
|
||||
patterns: readonly ErrorPattern[],
|
||||
): boolean {
|
||||
if (!raw) return false;
|
||||
const value = raw.toLowerCase();
|
||||
return patterns.some((pattern) =>
|
||||
pattern instanceof RegExp ? pattern.test(value) : value.includes(pattern),
|
||||
);
|
||||
}
|
||||
|
||||
export function isRateLimitErrorMessage(raw: string): boolean {
|
||||
return matchesErrorPatterns(raw, ERROR_PATTERNS.rateLimit);
|
||||
}
|
||||
|
||||
export function isTimeoutErrorMessage(raw: string): boolean {
|
||||
return matchesErrorPatterns(raw, ERROR_PATTERNS.timeout);
|
||||
}
|
||||
|
||||
export function isBillingErrorMessage(raw: string): boolean {
|
||||
const value = raw.toLowerCase();
|
||||
if (!value) return false;
|
||||
if (matchesErrorPatterns(value, ERROR_PATTERNS.billing)) return true;
|
||||
return (
|
||||
value.includes("billing") &&
|
||||
(value.includes("upgrade") ||
|
||||
value.includes("credits") ||
|
||||
value.includes("payment") ||
|
||||
value.includes("plan"))
|
||||
);
|
||||
}
|
||||
|
||||
export function isBillingAssistantError(
|
||||
msg: AssistantMessage | undefined,
|
||||
): boolean {
|
||||
if (!msg || msg.stopReason !== "error") return false;
|
||||
return isBillingErrorMessage(msg.errorMessage ?? "");
|
||||
}
|
||||
|
||||
export function isAuthErrorMessage(raw: string): boolean {
|
||||
return matchesErrorPatterns(raw, ERROR_PATTERNS.auth);
|
||||
}
|
||||
|
||||
export function isOverloadedErrorMessage(raw: string): boolean {
|
||||
return matchesErrorPatterns(raw, ERROR_PATTERNS.overloaded);
|
||||
}
|
||||
|
||||
export function isCloudCodeAssistFormatError(raw: string): boolean {
|
||||
return matchesErrorPatterns(raw, ERROR_PATTERNS.format);
|
||||
}
|
||||
|
||||
export function isAuthAssistantError(
|
||||
msg: AssistantMessage | undefined,
|
||||
): boolean {
|
||||
if (!msg || msg.stopReason !== "error") return false;
|
||||
return isAuthErrorMessage(msg.errorMessage ?? "");
|
||||
}
|
||||
|
||||
export function classifyFailoverReason(raw: string): FailoverReason | null {
|
||||
if (isRateLimitErrorMessage(raw)) return "rate_limit";
|
||||
if (isOverloadedErrorMessage(raw)) return "rate_limit";
|
||||
if (isCloudCodeAssistFormatError(raw)) return "format";
|
||||
if (isBillingErrorMessage(raw)) return "billing";
|
||||
if (isTimeoutErrorMessage(raw)) return "timeout";
|
||||
if (isAuthErrorMessage(raw)) return "auth";
|
||||
return null;
|
||||
}
|
||||
|
||||
export function isFailoverErrorMessage(raw: string): boolean {
|
||||
return classifyFailoverReason(raw) !== null;
|
||||
}
|
||||
|
||||
export function isFailoverAssistantError(
|
||||
msg: AssistantMessage | undefined,
|
||||
): boolean {
|
||||
if (!msg || msg.stopReason !== "error") return false;
|
||||
return isFailoverErrorMessage(msg.errorMessage ?? "");
|
||||
}
|
||||
151
src/agents/pi-embedded-helpers/google.ts
Normal file
151
src/agents/pi-embedded-helpers/google.ts
Normal file
@@ -0,0 +1,151 @@
|
||||
import type { AgentMessage } from "@mariozechner/pi-agent-core";
|
||||
|
||||
import { sanitizeGoogleTurnOrdering } from "./bootstrap.js";
|
||||
|
||||
export function isGoogleModelApi(api?: string | null): boolean {
|
||||
return (
|
||||
api === "google-gemini-cli" ||
|
||||
api === "google-generative-ai" ||
|
||||
api === "google-antigravity"
|
||||
);
|
||||
}
|
||||
|
||||
export { sanitizeGoogleTurnOrdering };
|
||||
|
||||
/**
|
||||
* Downgrades tool calls that are missing `thought_signature` (required by Gemini)
|
||||
* into text representations, to prevent 400 INVALID_ARGUMENT errors.
|
||||
* Also converts corresponding tool results into user messages.
|
||||
*/
|
||||
type GeminiToolCallBlock = {
|
||||
type?: unknown;
|
||||
thought_signature?: unknown;
|
||||
id?: unknown;
|
||||
toolCallId?: unknown;
|
||||
name?: unknown;
|
||||
toolName?: unknown;
|
||||
arguments?: unknown;
|
||||
input?: unknown;
|
||||
};
|
||||
|
||||
export function downgradeGeminiHistory(
|
||||
messages: AgentMessage[],
|
||||
): AgentMessage[] {
|
||||
const downgradedIds = new Set<string>();
|
||||
const out: AgentMessage[] = [];
|
||||
|
||||
const resolveToolResultId = (
|
||||
msg: Extract<AgentMessage, { role: "toolResult" }>,
|
||||
): string | undefined => {
|
||||
const toolCallId = (msg as { toolCallId?: unknown }).toolCallId;
|
||||
if (typeof toolCallId === "string" && toolCallId) return toolCallId;
|
||||
const toolUseId = (msg as { toolUseId?: unknown }).toolUseId;
|
||||
if (typeof toolUseId === "string" && toolUseId) return toolUseId;
|
||||
return undefined;
|
||||
};
|
||||
|
||||
for (const msg of messages) {
|
||||
if (!msg || typeof msg !== "object") {
|
||||
out.push(msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
const role = (msg as { role?: unknown }).role;
|
||||
if (role === "assistant") {
|
||||
const assistantMsg = msg as Extract<AgentMessage, { role: "assistant" }>;
|
||||
if (!Array.isArray(assistantMsg.content)) {
|
||||
out.push(msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
let hasDowngraded = false;
|
||||
const newContent = assistantMsg.content.map((block) => {
|
||||
if (!block || typeof block !== "object") return block;
|
||||
const blockRecord = block as GeminiToolCallBlock;
|
||||
const type = blockRecord.type;
|
||||
if (
|
||||
type === "toolCall" ||
|
||||
type === "functionCall" ||
|
||||
type === "toolUse"
|
||||
) {
|
||||
const hasSignature = Boolean(blockRecord.thought_signature);
|
||||
if (!hasSignature) {
|
||||
const id =
|
||||
typeof blockRecord.id === "string"
|
||||
? blockRecord.id
|
||||
: typeof blockRecord.toolCallId === "string"
|
||||
? blockRecord.toolCallId
|
||||
: undefined;
|
||||
const name =
|
||||
typeof blockRecord.name === "string"
|
||||
? blockRecord.name
|
||||
: typeof blockRecord.toolName === "string"
|
||||
? blockRecord.toolName
|
||||
: undefined;
|
||||
const args =
|
||||
blockRecord.arguments !== undefined
|
||||
? blockRecord.arguments
|
||||
: blockRecord.input;
|
||||
|
||||
if (id) downgradedIds.add(id);
|
||||
hasDowngraded = true;
|
||||
|
||||
const argsText =
|
||||
typeof args === "string" ? args : JSON.stringify(args, null, 2);
|
||||
|
||||
return {
|
||||
type: "text",
|
||||
text: `[Tool Call: ${name ?? "unknown"}${
|
||||
id ? ` (ID: ${id})` : ""
|
||||
}]\nArguments: ${argsText}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
return block;
|
||||
});
|
||||
|
||||
out.push(
|
||||
hasDowngraded
|
||||
? ({ ...assistantMsg, content: newContent } as AgentMessage)
|
||||
: msg,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (role === "toolResult") {
|
||||
const toolMsg = msg as Extract<AgentMessage, { role: "toolResult" }>;
|
||||
const toolResultId = resolveToolResultId(toolMsg);
|
||||
if (toolResultId && downgradedIds.has(toolResultId)) {
|
||||
let textContent = "";
|
||||
if (Array.isArray(toolMsg.content)) {
|
||||
textContent = toolMsg.content
|
||||
.map((entry) => {
|
||||
if (entry && typeof entry === "object") {
|
||||
const text = (entry as { text?: unknown }).text;
|
||||
if (typeof text === "string") return text;
|
||||
}
|
||||
return JSON.stringify(entry);
|
||||
})
|
||||
.join("\n");
|
||||
} else {
|
||||
textContent = JSON.stringify(toolMsg.content);
|
||||
}
|
||||
|
||||
out.push({
|
||||
role: "user",
|
||||
content: [
|
||||
{
|
||||
type: "text",
|
||||
text: `[Tool Result for ID ${toolResultId}]\n${textContent}`,
|
||||
},
|
||||
],
|
||||
} as AgentMessage);
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
out.push(msg);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
124
src/agents/pi-embedded-helpers/images.ts
Normal file
124
src/agents/pi-embedded-helpers/images.ts
Normal file
@@ -0,0 +1,124 @@
|
||||
import type {
|
||||
AgentMessage,
|
||||
AgentToolResult,
|
||||
} from "@mariozechner/pi-agent-core";
|
||||
|
||||
import { sanitizeToolCallIdsForCloudCodeAssist } from "../tool-call-id.js";
|
||||
import { sanitizeContentBlocksImages } from "../tool-images.js";
|
||||
import { stripThoughtSignatures } from "./bootstrap.js";
|
||||
|
||||
type ContentBlock = AgentToolResult<unknown>["content"][number];
|
||||
|
||||
export function isEmptyAssistantMessageContent(
|
||||
message: Extract<AgentMessage, { role: "assistant" }>,
|
||||
): boolean {
|
||||
const content = message.content;
|
||||
if (content == null) return true;
|
||||
if (!Array.isArray(content)) return false;
|
||||
return content.every((block) => {
|
||||
if (!block || typeof block !== "object") return true;
|
||||
const rec = block as { type?: unknown; text?: unknown };
|
||||
if (rec.type !== "text") return false;
|
||||
return typeof rec.text !== "string" || rec.text.trim().length === 0;
|
||||
});
|
||||
}
|
||||
|
||||
function isEmptyAssistantErrorMessage(
|
||||
message: Extract<AgentMessage, { role: "assistant" }>,
|
||||
): boolean {
|
||||
if (message.stopReason !== "error") return false;
|
||||
return isEmptyAssistantMessageContent(message);
|
||||
}
|
||||
|
||||
export async function sanitizeSessionMessagesImages(
|
||||
messages: AgentMessage[],
|
||||
label: string,
|
||||
options?: { sanitizeToolCallIds?: boolean; enforceToolCallLast?: boolean },
|
||||
): Promise<AgentMessage[]> {
|
||||
// We sanitize historical session messages because Anthropic can reject a request
|
||||
// if the transcript contains oversized base64 images (see MAX_IMAGE_DIMENSION_PX).
|
||||
const sanitizedIds = options?.sanitizeToolCallIds
|
||||
? sanitizeToolCallIdsForCloudCodeAssist(messages)
|
||||
: messages;
|
||||
const out: AgentMessage[] = [];
|
||||
for (const msg of sanitizedIds) {
|
||||
if (!msg || typeof msg !== "object") {
|
||||
out.push(msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
const role = (msg as { role?: unknown }).role;
|
||||
if (role === "toolResult") {
|
||||
const toolMsg = msg as Extract<AgentMessage, { role: "toolResult" }>;
|
||||
const content = Array.isArray(toolMsg.content) ? toolMsg.content : [];
|
||||
const nextContent = (await sanitizeContentBlocksImages(
|
||||
content as ContentBlock[],
|
||||
label,
|
||||
)) as unknown as typeof toolMsg.content;
|
||||
out.push({ ...toolMsg, content: nextContent });
|
||||
continue;
|
||||
}
|
||||
|
||||
if (role === "user") {
|
||||
const userMsg = msg as Extract<AgentMessage, { role: "user" }>;
|
||||
const content = userMsg.content;
|
||||
if (Array.isArray(content)) {
|
||||
const nextContent = (await sanitizeContentBlocksImages(
|
||||
content as unknown as ContentBlock[],
|
||||
label,
|
||||
)) as unknown as typeof userMsg.content;
|
||||
out.push({ ...userMsg, content: nextContent });
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (role === "assistant") {
|
||||
const assistantMsg = msg as Extract<AgentMessage, { role: "assistant" }>;
|
||||
if (isEmptyAssistantErrorMessage(assistantMsg)) {
|
||||
continue;
|
||||
}
|
||||
const content = assistantMsg.content;
|
||||
if (Array.isArray(content)) {
|
||||
const strippedContent = stripThoughtSignatures(content);
|
||||
const filteredContent = strippedContent.filter((block) => {
|
||||
if (!block || typeof block !== "object") return true;
|
||||
const rec = block as { type?: unknown; text?: unknown };
|
||||
if (rec.type !== "text" || typeof rec.text !== "string") return true;
|
||||
return rec.text.trim().length > 0;
|
||||
});
|
||||
const normalizedContent = options?.enforceToolCallLast
|
||||
? (() => {
|
||||
let lastToolIndex = -1;
|
||||
for (let i = filteredContent.length - 1; i >= 0; i -= 1) {
|
||||
const block = filteredContent[i];
|
||||
if (!block || typeof block !== "object") continue;
|
||||
const type = (block as { type?: unknown }).type;
|
||||
if (
|
||||
type === "functionCall" ||
|
||||
type === "toolUse" ||
|
||||
type === "toolCall"
|
||||
) {
|
||||
lastToolIndex = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (lastToolIndex === -1) return filteredContent;
|
||||
return filteredContent.slice(0, lastToolIndex + 1);
|
||||
})()
|
||||
: filteredContent;
|
||||
const finalContent = (await sanitizeContentBlocksImages(
|
||||
normalizedContent as unknown as ContentBlock[],
|
||||
label,
|
||||
)) as unknown as typeof assistantMsg.content;
|
||||
if (finalContent.length === 0) {
|
||||
continue;
|
||||
}
|
||||
out.push({ ...assistantMsg, content: finalContent });
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
out.push(msg);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
47
src/agents/pi-embedded-helpers/messaging-dedupe.ts
Normal file
47
src/agents/pi-embedded-helpers/messaging-dedupe.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
const MIN_DUPLICATE_TEXT_LENGTH = 10;
|
||||
|
||||
/**
|
||||
* Normalize text for duplicate comparison.
|
||||
* - Trims whitespace
|
||||
* - Lowercases
|
||||
* - Strips emoji (Emoji_Presentation and Extended_Pictographic)
|
||||
* - Collapses multiple spaces to single space
|
||||
*/
|
||||
export function normalizeTextForComparison(text: string): string {
|
||||
return text
|
||||
.trim()
|
||||
.toLowerCase()
|
||||
.replace(/\p{Emoji_Presentation}|\p{Extended_Pictographic}/gu, "")
|
||||
.replace(/\s+/g, " ")
|
||||
.trim();
|
||||
}
|
||||
|
||||
export function isMessagingToolDuplicateNormalized(
|
||||
normalized: string,
|
||||
normalizedSentTexts: string[],
|
||||
): boolean {
|
||||
if (normalizedSentTexts.length === 0) return false;
|
||||
if (!normalized || normalized.length < MIN_DUPLICATE_TEXT_LENGTH)
|
||||
return false;
|
||||
return normalizedSentTexts.some((normalizedSent) => {
|
||||
if (!normalizedSent || normalizedSent.length < MIN_DUPLICATE_TEXT_LENGTH)
|
||||
return false;
|
||||
return (
|
||||
normalized.includes(normalizedSent) || normalizedSent.includes(normalized)
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
export function isMessagingToolDuplicate(
|
||||
text: string,
|
||||
sentTexts: string[],
|
||||
): boolean {
|
||||
if (sentTexts.length === 0) return false;
|
||||
const normalized = normalizeTextForComparison(text);
|
||||
if (!normalized || normalized.length < MIN_DUPLICATE_TEXT_LENGTH)
|
||||
return false;
|
||||
return isMessagingToolDuplicateNormalized(
|
||||
normalized,
|
||||
sentTexts.map(normalizeTextForComparison),
|
||||
);
|
||||
}
|
||||
39
src/agents/pi-embedded-helpers/thinking.ts
Normal file
39
src/agents/pi-embedded-helpers/thinking.ts
Normal file
@@ -0,0 +1,39 @@
|
||||
import {
|
||||
normalizeThinkLevel,
|
||||
type ThinkLevel,
|
||||
} from "../../auto-reply/thinking.js";
|
||||
|
||||
function extractSupportedValues(raw: string): string[] {
|
||||
const match =
|
||||
raw.match(/supported values are:\s*([^\n.]+)/i) ??
|
||||
raw.match(/supported values:\s*([^\n.]+)/i);
|
||||
if (!match?.[1]) return [];
|
||||
const fragment = match[1];
|
||||
const quoted = Array.from(fragment.matchAll(/['"]([^'"]+)['"]/g)).map(
|
||||
(entry) => entry[1]?.trim(),
|
||||
);
|
||||
if (quoted.length > 0) {
|
||||
return quoted.filter((entry): entry is string => Boolean(entry));
|
||||
}
|
||||
return fragment
|
||||
.split(/,|\band\b/gi)
|
||||
.map((entry) => entry.replace(/^[^a-zA-Z]+|[^a-zA-Z]+$/g, "").trim())
|
||||
.filter(Boolean);
|
||||
}
|
||||
|
||||
export function pickFallbackThinkingLevel(params: {
|
||||
message?: string;
|
||||
attempted: Set<ThinkLevel>;
|
||||
}): ThinkLevel | undefined {
|
||||
const raw = params.message?.trim();
|
||||
if (!raw) return undefined;
|
||||
const supported = extractSupportedValues(raw);
|
||||
if (supported.length === 0) return undefined;
|
||||
for (const entry of supported) {
|
||||
const normalized = normalizeThinkLevel(entry);
|
||||
if (!normalized) continue;
|
||||
if (params.attempted.has(normalized)) continue;
|
||||
return normalized;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
124
src/agents/pi-embedded-helpers/turns.ts
Normal file
124
src/agents/pi-embedded-helpers/turns.ts
Normal file
@@ -0,0 +1,124 @@
|
||||
import type { AgentMessage } from "@mariozechner/pi-agent-core";
|
||||
|
||||
/**
|
||||
* Validates and fixes conversation turn sequences for Gemini API.
|
||||
* Gemini requires strict alternating user→assistant→tool→user pattern.
|
||||
* Merges consecutive assistant messages together.
|
||||
*/
|
||||
export function validateGeminiTurns(messages: AgentMessage[]): AgentMessage[] {
|
||||
if (!Array.isArray(messages) || messages.length === 0) {
|
||||
return messages;
|
||||
}
|
||||
|
||||
const result: AgentMessage[] = [];
|
||||
let lastRole: string | undefined;
|
||||
|
||||
for (const msg of messages) {
|
||||
if (!msg || typeof msg !== "object") {
|
||||
result.push(msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
const msgRole = (msg as { role?: unknown }).role as string | undefined;
|
||||
if (!msgRole) {
|
||||
result.push(msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (msgRole === lastRole && lastRole === "assistant") {
|
||||
const lastMsg = result[result.length - 1];
|
||||
const currentMsg = msg as Extract<AgentMessage, { role: "assistant" }>;
|
||||
|
||||
if (lastMsg && typeof lastMsg === "object") {
|
||||
const lastAsst = lastMsg as Extract<
|
||||
AgentMessage,
|
||||
{ role: "assistant" }
|
||||
>;
|
||||
const mergedContent = [
|
||||
...(Array.isArray(lastAsst.content) ? lastAsst.content : []),
|
||||
...(Array.isArray(currentMsg.content) ? currentMsg.content : []),
|
||||
];
|
||||
|
||||
const merged: Extract<AgentMessage, { role: "assistant" }> = {
|
||||
...lastAsst,
|
||||
content: mergedContent,
|
||||
...(currentMsg.usage && { usage: currentMsg.usage }),
|
||||
...(currentMsg.stopReason && { stopReason: currentMsg.stopReason }),
|
||||
...(currentMsg.errorMessage && {
|
||||
errorMessage: currentMsg.errorMessage,
|
||||
}),
|
||||
};
|
||||
|
||||
result[result.length - 1] = merged;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
result.push(msg);
|
||||
lastRole = msgRole;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
export function mergeConsecutiveUserTurns(
|
||||
previous: Extract<AgentMessage, { role: "user" }>,
|
||||
current: Extract<AgentMessage, { role: "user" }>,
|
||||
): Extract<AgentMessage, { role: "user" }> {
|
||||
const mergedContent = [
|
||||
...(Array.isArray(previous.content) ? previous.content : []),
|
||||
...(Array.isArray(current.content) ? current.content : []),
|
||||
];
|
||||
|
||||
return {
|
||||
...current,
|
||||
content: mergedContent,
|
||||
timestamp: current.timestamp ?? previous.timestamp,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates and fixes conversation turn sequences for Anthropic API.
|
||||
* Anthropic requires strict alternating user→assistant pattern.
|
||||
* Merges consecutive user messages together.
|
||||
*/
|
||||
export function validateAnthropicTurns(
|
||||
messages: AgentMessage[],
|
||||
): AgentMessage[] {
|
||||
if (!Array.isArray(messages) || messages.length === 0) {
|
||||
return messages;
|
||||
}
|
||||
|
||||
const result: AgentMessage[] = [];
|
||||
let lastRole: string | undefined;
|
||||
|
||||
for (const msg of messages) {
|
||||
if (!msg || typeof msg !== "object") {
|
||||
result.push(msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
const msgRole = (msg as { role?: unknown }).role as string | undefined;
|
||||
if (!msgRole) {
|
||||
result.push(msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (msgRole === lastRole && lastRole === "user") {
|
||||
const lastMsg = result[result.length - 1];
|
||||
const currentMsg = msg as Extract<AgentMessage, { role: "user" }>;
|
||||
|
||||
if (lastMsg && typeof lastMsg === "object") {
|
||||
const lastUser = lastMsg as Extract<AgentMessage, { role: "user" }>;
|
||||
const merged = mergeConsecutiveUserTurns(lastUser, currentMsg);
|
||||
result[result.length - 1] = merged;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
result.push(msg);
|
||||
lastRole = msgRole;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
9
src/agents/pi-embedded-helpers/types.ts
Normal file
9
src/agents/pi-embedded-helpers/types.ts
Normal file
@@ -0,0 +1,9 @@
|
||||
export type EmbeddedContextFile = { path: string; content: string };
|
||||
|
||||
export type FailoverReason =
|
||||
| "auth"
|
||||
| "format"
|
||||
| "rate_limit"
|
||||
| "billing"
|
||||
| "timeout"
|
||||
| "unknown";
|
||||
161
src/agents/pi-embedded-runner.applygoogleturnorderingfix.test.ts
Normal file
161
src/agents/pi-embedded-runner.applygoogleturnorderingfix.test.ts
Normal file
@@ -0,0 +1,161 @@
|
||||
import fs from "node:fs/promises";
|
||||
import type { AgentMessage } from "@mariozechner/pi-agent-core";
|
||||
import { SessionManager } from "@mariozechner/pi-coding-agent";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
import { ensureClawdbotModelsJson } from "./models-config.js";
|
||||
import { applyGoogleTurnOrderingFix } from "./pi-embedded-runner.js";
|
||||
|
||||
vi.mock("@mariozechner/pi-ai", async () => {
|
||||
const actual = await vi.importActual<typeof import("@mariozechner/pi-ai")>(
|
||||
"@mariozechner/pi-ai",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
streamSimple: (model: { api: string; provider: string; id: string }) => {
|
||||
if (model.id === "mock-error") {
|
||||
throw new Error("boom");
|
||||
}
|
||||
const stream = new actual.AssistantMessageEventStream();
|
||||
queueMicrotask(() => {
|
||||
stream.push({
|
||||
type: "done",
|
||||
reason: "stop",
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "ok" }],
|
||||
stopReason: "stop",
|
||||
api: model.api,
|
||||
provider: model.provider,
|
||||
model: model.id,
|
||||
usage: {
|
||||
input: 1,
|
||||
output: 1,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
totalTokens: 2,
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
total: 0,
|
||||
},
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
},
|
||||
});
|
||||
});
|
||||
return stream;
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
const _makeOpenAiConfig = (modelIds: string[]) =>
|
||||
({
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
api: "openai-responses",
|
||||
apiKey: "sk-test",
|
||||
baseUrl: "https://example.com",
|
||||
models: modelIds.map((id) => ({
|
||||
id,
|
||||
name: `Mock ${id}`,
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 16_000,
|
||||
maxTokens: 2048,
|
||||
})),
|
||||
},
|
||||
},
|
||||
},
|
||||
}) satisfies ClawdbotConfig;
|
||||
|
||||
const _ensureModels = (cfg: ClawdbotConfig, agentDir: string) =>
|
||||
ensureClawdbotModelsJson(cfg, agentDir);
|
||||
|
||||
const _textFromContent = (content: unknown) => {
|
||||
if (typeof content === "string") return content;
|
||||
if (Array.isArray(content) && content[0]?.type === "text") {
|
||||
return (content[0] as { text?: string }).text;
|
||||
}
|
||||
return undefined;
|
||||
};
|
||||
|
||||
const _readSessionMessages = async (sessionFile: string) => {
|
||||
const raw = await fs.readFile(sessionFile, "utf-8");
|
||||
return raw
|
||||
.split(/\r?\n/)
|
||||
.filter(Boolean)
|
||||
.map(
|
||||
(line) =>
|
||||
JSON.parse(line) as {
|
||||
type?: string;
|
||||
message?: { role?: string; content?: unknown };
|
||||
},
|
||||
)
|
||||
.filter((entry) => entry.type === "message")
|
||||
.map((entry) => entry.message as { role?: string; content?: unknown });
|
||||
};
|
||||
|
||||
describe("applyGoogleTurnOrderingFix", () => {
|
||||
const makeAssistantFirst = () =>
|
||||
[
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "toolCall", id: "call_1", name: "exec", arguments: {} },
|
||||
],
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
it("prepends a bootstrap once and records a marker for Google models", () => {
|
||||
const sessionManager = SessionManager.inMemory();
|
||||
const warn = vi.fn();
|
||||
const input = makeAssistantFirst();
|
||||
const first = applyGoogleTurnOrderingFix({
|
||||
messages: input,
|
||||
modelApi: "google-generative-ai",
|
||||
sessionManager,
|
||||
sessionId: "session:1",
|
||||
warn,
|
||||
});
|
||||
expect(first.messages[0]?.role).toBe("user");
|
||||
expect(first.messages[1]?.role).toBe("assistant");
|
||||
expect(warn).toHaveBeenCalledTimes(1);
|
||||
expect(
|
||||
sessionManager
|
||||
.getEntries()
|
||||
.some(
|
||||
(entry) =>
|
||||
entry.type === "custom" &&
|
||||
entry.customType === "google-turn-ordering-bootstrap",
|
||||
),
|
||||
).toBe(true);
|
||||
|
||||
applyGoogleTurnOrderingFix({
|
||||
messages: input,
|
||||
modelApi: "google-generative-ai",
|
||||
sessionManager,
|
||||
sessionId: "session:1",
|
||||
warn,
|
||||
});
|
||||
expect(warn).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
it("skips non-Google models", () => {
|
||||
const sessionManager = SessionManager.inMemory();
|
||||
const warn = vi.fn();
|
||||
const input = makeAssistantFirst();
|
||||
const result = applyGoogleTurnOrderingFix({
|
||||
messages: input,
|
||||
modelApi: "openai",
|
||||
sessionManager,
|
||||
sessionId: "session:2",
|
||||
warn,
|
||||
});
|
||||
expect(result.messages).toBe(input);
|
||||
expect(warn).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
190
src/agents/pi-embedded-runner.buildembeddedsandboxinfo.test.ts
Normal file
190
src/agents/pi-embedded-runner.buildembeddedsandboxinfo.test.ts
Normal file
@@ -0,0 +1,190 @@
|
||||
import fs from "node:fs/promises";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
import { ensureClawdbotModelsJson } from "./models-config.js";
|
||||
import { buildEmbeddedSandboxInfo } from "./pi-embedded-runner.js";
|
||||
import type { SandboxContext } from "./sandbox.js";
|
||||
|
||||
vi.mock("@mariozechner/pi-ai", async () => {
|
||||
const actual = await vi.importActual<typeof import("@mariozechner/pi-ai")>(
|
||||
"@mariozechner/pi-ai",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
streamSimple: (model: { api: string; provider: string; id: string }) => {
|
||||
if (model.id === "mock-error") {
|
||||
throw new Error("boom");
|
||||
}
|
||||
const stream = new actual.AssistantMessageEventStream();
|
||||
queueMicrotask(() => {
|
||||
stream.push({
|
||||
type: "done",
|
||||
reason: "stop",
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "ok" }],
|
||||
stopReason: "stop",
|
||||
api: model.api,
|
||||
provider: model.provider,
|
||||
model: model.id,
|
||||
usage: {
|
||||
input: 1,
|
||||
output: 1,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
totalTokens: 2,
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
total: 0,
|
||||
},
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
},
|
||||
});
|
||||
});
|
||||
return stream;
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
const _makeOpenAiConfig = (modelIds: string[]) =>
|
||||
({
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
api: "openai-responses",
|
||||
apiKey: "sk-test",
|
||||
baseUrl: "https://example.com",
|
||||
models: modelIds.map((id) => ({
|
||||
id,
|
||||
name: `Mock ${id}`,
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 16_000,
|
||||
maxTokens: 2048,
|
||||
})),
|
||||
},
|
||||
},
|
||||
},
|
||||
}) satisfies ClawdbotConfig;
|
||||
|
||||
const _ensureModels = (cfg: ClawdbotConfig, agentDir: string) =>
|
||||
ensureClawdbotModelsJson(cfg, agentDir);
|
||||
|
||||
const _textFromContent = (content: unknown) => {
|
||||
if (typeof content === "string") return content;
|
||||
if (Array.isArray(content) && content[0]?.type === "text") {
|
||||
return (content[0] as { text?: string }).text;
|
||||
}
|
||||
return undefined;
|
||||
};
|
||||
|
||||
const _readSessionMessages = async (sessionFile: string) => {
|
||||
const raw = await fs.readFile(sessionFile, "utf-8");
|
||||
return raw
|
||||
.split(/\r?\n/)
|
||||
.filter(Boolean)
|
||||
.map(
|
||||
(line) =>
|
||||
JSON.parse(line) as {
|
||||
type?: string;
|
||||
message?: { role?: string; content?: unknown };
|
||||
},
|
||||
)
|
||||
.filter((entry) => entry.type === "message")
|
||||
.map((entry) => entry.message as { role?: string; content?: unknown });
|
||||
};
|
||||
|
||||
describe("buildEmbeddedSandboxInfo", () => {
|
||||
it("returns undefined when sandbox is missing", () => {
|
||||
expect(buildEmbeddedSandboxInfo()).toBeUndefined();
|
||||
});
|
||||
it("maps sandbox context into prompt info", () => {
|
||||
const sandbox = {
|
||||
enabled: true,
|
||||
sessionKey: "session:test",
|
||||
workspaceDir: "/tmp/clawdbot-sandbox",
|
||||
agentWorkspaceDir: "/tmp/clawdbot-workspace",
|
||||
workspaceAccess: "none",
|
||||
containerName: "clawdbot-sbx-test",
|
||||
containerWorkdir: "/workspace",
|
||||
docker: {
|
||||
image: "clawdbot-sandbox:bookworm-slim",
|
||||
containerPrefix: "clawdbot-sbx-",
|
||||
workdir: "/workspace",
|
||||
readOnlyRoot: true,
|
||||
tmpfs: ["/tmp"],
|
||||
network: "none",
|
||||
user: "1000:1000",
|
||||
capDrop: ["ALL"],
|
||||
env: { LANG: "C.UTF-8" },
|
||||
},
|
||||
tools: {
|
||||
allow: ["exec"],
|
||||
deny: ["browser"],
|
||||
},
|
||||
browserAllowHostControl: true,
|
||||
browser: {
|
||||
controlUrl: "http://localhost:9222",
|
||||
noVncUrl: "http://localhost:6080",
|
||||
containerName: "clawdbot-sbx-browser-test",
|
||||
},
|
||||
} satisfies SandboxContext;
|
||||
|
||||
expect(buildEmbeddedSandboxInfo(sandbox)).toEqual({
|
||||
enabled: true,
|
||||
workspaceDir: "/tmp/clawdbot-sandbox",
|
||||
workspaceAccess: "none",
|
||||
agentWorkspaceMount: undefined,
|
||||
browserControlUrl: "http://localhost:9222",
|
||||
browserNoVncUrl: "http://localhost:6080",
|
||||
hostBrowserAllowed: true,
|
||||
});
|
||||
});
|
||||
it("includes elevated info when allowed", () => {
|
||||
const sandbox = {
|
||||
enabled: true,
|
||||
sessionKey: "session:test",
|
||||
workspaceDir: "/tmp/clawdbot-sandbox",
|
||||
agentWorkspaceDir: "/tmp/clawdbot-workspace",
|
||||
workspaceAccess: "none",
|
||||
containerName: "clawdbot-sbx-test",
|
||||
containerWorkdir: "/workspace",
|
||||
docker: {
|
||||
image: "clawdbot-sandbox:bookworm-slim",
|
||||
containerPrefix: "clawdbot-sbx-",
|
||||
workdir: "/workspace",
|
||||
readOnlyRoot: true,
|
||||
tmpfs: ["/tmp"],
|
||||
network: "none",
|
||||
user: "1000:1000",
|
||||
capDrop: ["ALL"],
|
||||
env: { LANG: "C.UTF-8" },
|
||||
},
|
||||
tools: {
|
||||
allow: ["exec"],
|
||||
deny: ["browser"],
|
||||
},
|
||||
browserAllowHostControl: false,
|
||||
} satisfies SandboxContext;
|
||||
|
||||
expect(
|
||||
buildEmbeddedSandboxInfo(sandbox, {
|
||||
enabled: true,
|
||||
allowed: true,
|
||||
defaultLevel: "on",
|
||||
}),
|
||||
).toEqual({
|
||||
enabled: true,
|
||||
workspaceDir: "/tmp/clawdbot-sandbox",
|
||||
workspaceAccess: "none",
|
||||
agentWorkspaceMount: undefined,
|
||||
hostBrowserAllowed: false,
|
||||
elevated: { allowed: true, defaultLevel: "on" },
|
||||
});
|
||||
});
|
||||
});
|
||||
110
src/agents/pi-embedded-runner.createsystempromptoverride.test.ts
Normal file
110
src/agents/pi-embedded-runner.createsystempromptoverride.test.ts
Normal file
@@ -0,0 +1,110 @@
|
||||
import fs from "node:fs/promises";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
import { ensureClawdbotModelsJson } from "./models-config.js";
|
||||
import { createSystemPromptOverride } from "./pi-embedded-runner.js";
|
||||
|
||||
vi.mock("@mariozechner/pi-ai", async () => {
|
||||
const actual = await vi.importActual<typeof import("@mariozechner/pi-ai")>(
|
||||
"@mariozechner/pi-ai",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
streamSimple: (model: { api: string; provider: string; id: string }) => {
|
||||
if (model.id === "mock-error") {
|
||||
throw new Error("boom");
|
||||
}
|
||||
const stream = new actual.AssistantMessageEventStream();
|
||||
queueMicrotask(() => {
|
||||
stream.push({
|
||||
type: "done",
|
||||
reason: "stop",
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "ok" }],
|
||||
stopReason: "stop",
|
||||
api: model.api,
|
||||
provider: model.provider,
|
||||
model: model.id,
|
||||
usage: {
|
||||
input: 1,
|
||||
output: 1,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
totalTokens: 2,
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
total: 0,
|
||||
},
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
},
|
||||
});
|
||||
});
|
||||
return stream;
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
const _makeOpenAiConfig = (modelIds: string[]) =>
|
||||
({
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
api: "openai-responses",
|
||||
apiKey: "sk-test",
|
||||
baseUrl: "https://example.com",
|
||||
models: modelIds.map((id) => ({
|
||||
id,
|
||||
name: `Mock ${id}`,
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 16_000,
|
||||
maxTokens: 2048,
|
||||
})),
|
||||
},
|
||||
},
|
||||
},
|
||||
}) satisfies ClawdbotConfig;
|
||||
|
||||
const _ensureModels = (cfg: ClawdbotConfig, agentDir: string) =>
|
||||
ensureClawdbotModelsJson(cfg, agentDir);
|
||||
|
||||
const _textFromContent = (content: unknown) => {
|
||||
if (typeof content === "string") return content;
|
||||
if (Array.isArray(content) && content[0]?.type === "text") {
|
||||
return (content[0] as { text?: string }).text;
|
||||
}
|
||||
return undefined;
|
||||
};
|
||||
|
||||
const _readSessionMessages = async (sessionFile: string) => {
|
||||
const raw = await fs.readFile(sessionFile, "utf-8");
|
||||
return raw
|
||||
.split(/\r?\n/)
|
||||
.filter(Boolean)
|
||||
.map(
|
||||
(line) =>
|
||||
JSON.parse(line) as {
|
||||
type?: string;
|
||||
message?: { role?: string; content?: unknown };
|
||||
},
|
||||
)
|
||||
.filter((entry) => entry.type === "message")
|
||||
.map((entry) => entry.message as { role?: string; content?: unknown });
|
||||
};
|
||||
|
||||
describe("createSystemPromptOverride", () => {
|
||||
it("returns the override prompt regardless of default prompt", () => {
|
||||
const override = createSystemPromptOverride("OVERRIDE");
|
||||
expect(override("DEFAULT")).toBe("OVERRIDE");
|
||||
});
|
||||
it("returns an empty string for blank overrides", () => {
|
||||
const override = createSystemPromptOverride(" \n ");
|
||||
expect(override("DEFAULT")).toBe("");
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,234 @@
|
||||
import fs from "node:fs/promises";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
import { ensureClawdbotModelsJson } from "./models-config.js";
|
||||
import { getDmHistoryLimitFromSessionKey } from "./pi-embedded-runner.js";
|
||||
|
||||
vi.mock("@mariozechner/pi-ai", async () => {
|
||||
const actual = await vi.importActual<typeof import("@mariozechner/pi-ai")>(
|
||||
"@mariozechner/pi-ai",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
streamSimple: (model: { api: string; provider: string; id: string }) => {
|
||||
if (model.id === "mock-error") {
|
||||
throw new Error("boom");
|
||||
}
|
||||
const stream = new actual.AssistantMessageEventStream();
|
||||
queueMicrotask(() => {
|
||||
stream.push({
|
||||
type: "done",
|
||||
reason: "stop",
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "ok" }],
|
||||
stopReason: "stop",
|
||||
api: model.api,
|
||||
provider: model.provider,
|
||||
model: model.id,
|
||||
usage: {
|
||||
input: 1,
|
||||
output: 1,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
totalTokens: 2,
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
total: 0,
|
||||
},
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
},
|
||||
});
|
||||
});
|
||||
return stream;
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
const _makeOpenAiConfig = (modelIds: string[]) =>
|
||||
({
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
api: "openai-responses",
|
||||
apiKey: "sk-test",
|
||||
baseUrl: "https://example.com",
|
||||
models: modelIds.map((id) => ({
|
||||
id,
|
||||
name: `Mock ${id}`,
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 16_000,
|
||||
maxTokens: 2048,
|
||||
})),
|
||||
},
|
||||
},
|
||||
},
|
||||
}) satisfies ClawdbotConfig;
|
||||
|
||||
const _ensureModels = (cfg: ClawdbotConfig, agentDir: string) =>
|
||||
ensureClawdbotModelsJson(cfg, agentDir);
|
||||
|
||||
const _textFromContent = (content: unknown) => {
|
||||
if (typeof content === "string") return content;
|
||||
if (Array.isArray(content) && content[0]?.type === "text") {
|
||||
return (content[0] as { text?: string }).text;
|
||||
}
|
||||
return undefined;
|
||||
};
|
||||
|
||||
const _readSessionMessages = async (sessionFile: string) => {
|
||||
const raw = await fs.readFile(sessionFile, "utf-8");
|
||||
return raw
|
||||
.split(/\r?\n/)
|
||||
.filter(Boolean)
|
||||
.map(
|
||||
(line) =>
|
||||
JSON.parse(line) as {
|
||||
type?: string;
|
||||
message?: { role?: string; content?: unknown };
|
||||
},
|
||||
)
|
||||
.filter((entry) => entry.type === "message")
|
||||
.map((entry) => entry.message as { role?: string; content?: unknown });
|
||||
};
|
||||
|
||||
describe("getDmHistoryLimitFromSessionKey", () => {
|
||||
it("returns undefined when sessionKey is undefined", () => {
|
||||
expect(getDmHistoryLimitFromSessionKey(undefined, {})).toBeUndefined();
|
||||
});
|
||||
it("returns undefined when config is undefined", () => {
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey("telegram:dm:123", undefined),
|
||||
).toBeUndefined();
|
||||
});
|
||||
it("returns dmHistoryLimit for telegram provider", () => {
|
||||
const config = {
|
||||
channels: { telegram: { dmHistoryLimit: 15 } },
|
||||
} as ClawdbotConfig;
|
||||
expect(getDmHistoryLimitFromSessionKey("telegram:dm:123", config)).toBe(15);
|
||||
});
|
||||
it("returns dmHistoryLimit for whatsapp provider", () => {
|
||||
const config = {
|
||||
channels: { whatsapp: { dmHistoryLimit: 20 } },
|
||||
} as ClawdbotConfig;
|
||||
expect(getDmHistoryLimitFromSessionKey("whatsapp:dm:123", config)).toBe(20);
|
||||
});
|
||||
it("returns dmHistoryLimit for agent-prefixed session keys", () => {
|
||||
const config = {
|
||||
channels: { telegram: { dmHistoryLimit: 10 } },
|
||||
} as ClawdbotConfig;
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey("agent:main:telegram:dm:123", config),
|
||||
).toBe(10);
|
||||
});
|
||||
it("returns undefined for non-dm session kinds", () => {
|
||||
const config = {
|
||||
channels: {
|
||||
telegram: { dmHistoryLimit: 15 },
|
||||
slack: { dmHistoryLimit: 10 },
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey("agent:beta:slack:channel:C1", config),
|
||||
).toBeUndefined();
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey("telegram:slash:123", config),
|
||||
).toBeUndefined();
|
||||
});
|
||||
it("returns undefined for unknown provider", () => {
|
||||
const config = {
|
||||
channels: { telegram: { dmHistoryLimit: 15 } },
|
||||
} as ClawdbotConfig;
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey("unknown:dm:123", config),
|
||||
).toBeUndefined();
|
||||
});
|
||||
it("returns undefined when provider config has no dmHistoryLimit", () => {
|
||||
const config = { channels: { telegram: {} } } as ClawdbotConfig;
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey("telegram:dm:123", config),
|
||||
).toBeUndefined();
|
||||
});
|
||||
it("handles all supported providers", () => {
|
||||
const providers = [
|
||||
"telegram",
|
||||
"whatsapp",
|
||||
"discord",
|
||||
"slack",
|
||||
"signal",
|
||||
"imessage",
|
||||
"msteams",
|
||||
] as const;
|
||||
|
||||
for (const provider of providers) {
|
||||
const config = {
|
||||
channels: { [provider]: { dmHistoryLimit: 5 } },
|
||||
} as ClawdbotConfig;
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey(`${provider}:dm:123`, config),
|
||||
).toBe(5);
|
||||
}
|
||||
});
|
||||
it("handles per-DM overrides for all supported providers", () => {
|
||||
const providers = [
|
||||
"telegram",
|
||||
"whatsapp",
|
||||
"discord",
|
||||
"slack",
|
||||
"signal",
|
||||
"imessage",
|
||||
"msteams",
|
||||
] as const;
|
||||
|
||||
for (const provider of providers) {
|
||||
// Test per-DM override takes precedence
|
||||
const configWithOverride = {
|
||||
channels: {
|
||||
[provider]: {
|
||||
dmHistoryLimit: 20,
|
||||
dms: { user123: { historyLimit: 7 } },
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey(
|
||||
`${provider}:dm:user123`,
|
||||
configWithOverride,
|
||||
),
|
||||
).toBe(7);
|
||||
|
||||
// Test fallback to provider default when user not in dms
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey(
|
||||
`${provider}:dm:otheruser`,
|
||||
configWithOverride,
|
||||
),
|
||||
).toBe(20);
|
||||
|
||||
// Test with agent-prefixed key
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey(
|
||||
`agent:main:${provider}:dm:user123`,
|
||||
configWithOverride,
|
||||
),
|
||||
).toBe(7);
|
||||
}
|
||||
});
|
||||
it("returns per-DM override when set", () => {
|
||||
const config = {
|
||||
channels: {
|
||||
telegram: {
|
||||
dmHistoryLimit: 15,
|
||||
dms: { "123": { historyLimit: 5 } },
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
expect(getDmHistoryLimitFromSessionKey("telegram:dm:123", config)).toBe(5);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,162 @@
|
||||
import fs from "node:fs/promises";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
import { ensureClawdbotModelsJson } from "./models-config.js";
|
||||
import { getDmHistoryLimitFromSessionKey } from "./pi-embedded-runner.js";
|
||||
|
||||
vi.mock("@mariozechner/pi-ai", async () => {
|
||||
const actual = await vi.importActual<typeof import("@mariozechner/pi-ai")>(
|
||||
"@mariozechner/pi-ai",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
streamSimple: (model: { api: string; provider: string; id: string }) => {
|
||||
if (model.id === "mock-error") {
|
||||
throw new Error("boom");
|
||||
}
|
||||
const stream = new actual.AssistantMessageEventStream();
|
||||
queueMicrotask(() => {
|
||||
stream.push({
|
||||
type: "done",
|
||||
reason: "stop",
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "ok" }],
|
||||
stopReason: "stop",
|
||||
api: model.api,
|
||||
provider: model.provider,
|
||||
model: model.id,
|
||||
usage: {
|
||||
input: 1,
|
||||
output: 1,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
totalTokens: 2,
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
total: 0,
|
||||
},
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
},
|
||||
});
|
||||
});
|
||||
return stream;
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
const _makeOpenAiConfig = (modelIds: string[]) =>
|
||||
({
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
api: "openai-responses",
|
||||
apiKey: "sk-test",
|
||||
baseUrl: "https://example.com",
|
||||
models: modelIds.map((id) => ({
|
||||
id,
|
||||
name: `Mock ${id}`,
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 16_000,
|
||||
maxTokens: 2048,
|
||||
})),
|
||||
},
|
||||
},
|
||||
},
|
||||
}) satisfies ClawdbotConfig;
|
||||
|
||||
const _ensureModels = (cfg: ClawdbotConfig, agentDir: string) =>
|
||||
ensureClawdbotModelsJson(cfg, agentDir);
|
||||
|
||||
const _textFromContent = (content: unknown) => {
|
||||
if (typeof content === "string") return content;
|
||||
if (Array.isArray(content) && content[0]?.type === "text") {
|
||||
return (content[0] as { text?: string }).text;
|
||||
}
|
||||
return undefined;
|
||||
};
|
||||
|
||||
const _readSessionMessages = async (sessionFile: string) => {
|
||||
const raw = await fs.readFile(sessionFile, "utf-8");
|
||||
return raw
|
||||
.split(/\r?\n/)
|
||||
.filter(Boolean)
|
||||
.map(
|
||||
(line) =>
|
||||
JSON.parse(line) as {
|
||||
type?: string;
|
||||
message?: { role?: string; content?: unknown };
|
||||
},
|
||||
)
|
||||
.filter((entry) => entry.type === "message")
|
||||
.map((entry) => entry.message as { role?: string; content?: unknown });
|
||||
};
|
||||
|
||||
describe("getDmHistoryLimitFromSessionKey", () => {
|
||||
it("falls back to provider default when per-DM not set", () => {
|
||||
const config = {
|
||||
channels: {
|
||||
telegram: {
|
||||
dmHistoryLimit: 15,
|
||||
dms: { "456": { historyLimit: 5 } },
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
expect(getDmHistoryLimitFromSessionKey("telegram:dm:123", config)).toBe(15);
|
||||
});
|
||||
it("returns per-DM override for agent-prefixed keys", () => {
|
||||
const config = {
|
||||
channels: {
|
||||
telegram: {
|
||||
dmHistoryLimit: 20,
|
||||
dms: { "789": { historyLimit: 3 } },
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey("agent:main:telegram:dm:789", config),
|
||||
).toBe(3);
|
||||
});
|
||||
it("handles userId with colons (e.g., email)", () => {
|
||||
const config = {
|
||||
channels: {
|
||||
msteams: {
|
||||
dmHistoryLimit: 10,
|
||||
dms: { "user@example.com": { historyLimit: 7 } },
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey("msteams:dm:user@example.com", config),
|
||||
).toBe(7);
|
||||
});
|
||||
it("returns undefined when per-DM historyLimit is not set", () => {
|
||||
const config = {
|
||||
channels: {
|
||||
telegram: {
|
||||
dms: { "123": {} },
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey("telegram:dm:123", config),
|
||||
).toBeUndefined();
|
||||
});
|
||||
it("returns 0 when per-DM historyLimit is explicitly 0 (unlimited)", () => {
|
||||
const config = {
|
||||
channels: {
|
||||
telegram: {
|
||||
dmHistoryLimit: 15,
|
||||
dms: { "123": { historyLimit: 0 } },
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
expect(getDmHistoryLimitFromSessionKey("telegram:dm:123", config)).toBe(0);
|
||||
});
|
||||
});
|
||||
182
src/agents/pi-embedded-runner.limithistoryturns.test.ts
Normal file
182
src/agents/pi-embedded-runner.limithistoryturns.test.ts
Normal file
@@ -0,0 +1,182 @@
|
||||
import fs from "node:fs/promises";
|
||||
import type { AgentMessage } from "@mariozechner/pi-agent-core";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
import { ensureClawdbotModelsJson } from "./models-config.js";
|
||||
import { limitHistoryTurns } from "./pi-embedded-runner.js";
|
||||
|
||||
vi.mock("@mariozechner/pi-ai", async () => {
|
||||
const actual = await vi.importActual<typeof import("@mariozechner/pi-ai")>(
|
||||
"@mariozechner/pi-ai",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
streamSimple: (model: { api: string; provider: string; id: string }) => {
|
||||
if (model.id === "mock-error") {
|
||||
throw new Error("boom");
|
||||
}
|
||||
const stream = new actual.AssistantMessageEventStream();
|
||||
queueMicrotask(() => {
|
||||
stream.push({
|
||||
type: "done",
|
||||
reason: "stop",
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "ok" }],
|
||||
stopReason: "stop",
|
||||
api: model.api,
|
||||
provider: model.provider,
|
||||
model: model.id,
|
||||
usage: {
|
||||
input: 1,
|
||||
output: 1,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
totalTokens: 2,
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
total: 0,
|
||||
},
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
},
|
||||
});
|
||||
});
|
||||
return stream;
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
const _makeOpenAiConfig = (modelIds: string[]) =>
|
||||
({
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
api: "openai-responses",
|
||||
apiKey: "sk-test",
|
||||
baseUrl: "https://example.com",
|
||||
models: modelIds.map((id) => ({
|
||||
id,
|
||||
name: `Mock ${id}`,
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 16_000,
|
||||
maxTokens: 2048,
|
||||
})),
|
||||
},
|
||||
},
|
||||
},
|
||||
}) satisfies ClawdbotConfig;
|
||||
|
||||
const _ensureModels = (cfg: ClawdbotConfig, agentDir: string) =>
|
||||
ensureClawdbotModelsJson(cfg, agentDir);
|
||||
|
||||
const _textFromContent = (content: unknown) => {
|
||||
if (typeof content === "string") return content;
|
||||
if (Array.isArray(content) && content[0]?.type === "text") {
|
||||
return (content[0] as { text?: string }).text;
|
||||
}
|
||||
return undefined;
|
||||
};
|
||||
|
||||
const _readSessionMessages = async (sessionFile: string) => {
|
||||
const raw = await fs.readFile(sessionFile, "utf-8");
|
||||
return raw
|
||||
.split(/\r?\n/)
|
||||
.filter(Boolean)
|
||||
.map(
|
||||
(line) =>
|
||||
JSON.parse(line) as {
|
||||
type?: string;
|
||||
message?: { role?: string; content?: unknown };
|
||||
},
|
||||
)
|
||||
.filter((entry) => entry.type === "message")
|
||||
.map((entry) => entry.message as { role?: string; content?: unknown });
|
||||
};
|
||||
|
||||
describe("limitHistoryTurns", () => {
|
||||
const makeMessages = (roles: ("user" | "assistant")[]): AgentMessage[] =>
|
||||
roles.map((role, i) => ({
|
||||
role,
|
||||
content: [{ type: "text", text: `message ${i}` }],
|
||||
}));
|
||||
|
||||
it("returns all messages when limit is undefined", () => {
|
||||
const messages = makeMessages(["user", "assistant", "user", "assistant"]);
|
||||
expect(limitHistoryTurns(messages, undefined)).toBe(messages);
|
||||
});
|
||||
it("returns all messages when limit is 0", () => {
|
||||
const messages = makeMessages(["user", "assistant", "user", "assistant"]);
|
||||
expect(limitHistoryTurns(messages, 0)).toBe(messages);
|
||||
});
|
||||
it("returns all messages when limit is negative", () => {
|
||||
const messages = makeMessages(["user", "assistant", "user", "assistant"]);
|
||||
expect(limitHistoryTurns(messages, -1)).toBe(messages);
|
||||
});
|
||||
it("returns empty array when messages is empty", () => {
|
||||
expect(limitHistoryTurns([], 5)).toEqual([]);
|
||||
});
|
||||
it("keeps all messages when fewer user turns than limit", () => {
|
||||
const messages = makeMessages(["user", "assistant", "user", "assistant"]);
|
||||
expect(limitHistoryTurns(messages, 10)).toBe(messages);
|
||||
});
|
||||
it("limits to last N user turns", () => {
|
||||
const messages = makeMessages([
|
||||
"user",
|
||||
"assistant",
|
||||
"user",
|
||||
"assistant",
|
||||
"user",
|
||||
"assistant",
|
||||
]);
|
||||
const limited = limitHistoryTurns(messages, 2);
|
||||
expect(limited.length).toBe(4);
|
||||
expect(limited[0].content).toEqual([{ type: "text", text: "message 2" }]);
|
||||
});
|
||||
it("handles single user turn limit", () => {
|
||||
const messages = makeMessages([
|
||||
"user",
|
||||
"assistant",
|
||||
"user",
|
||||
"assistant",
|
||||
"user",
|
||||
"assistant",
|
||||
]);
|
||||
const limited = limitHistoryTurns(messages, 1);
|
||||
expect(limited.length).toBe(2);
|
||||
expect(limited[0].content).toEqual([{ type: "text", text: "message 4" }]);
|
||||
expect(limited[1].content).toEqual([{ type: "text", text: "message 5" }]);
|
||||
});
|
||||
it("handles messages with multiple assistant responses per user turn", () => {
|
||||
const messages = makeMessages([
|
||||
"user",
|
||||
"assistant",
|
||||
"assistant",
|
||||
"user",
|
||||
"assistant",
|
||||
]);
|
||||
const limited = limitHistoryTurns(messages, 1);
|
||||
expect(limited.length).toBe(2);
|
||||
expect(limited[0].role).toBe("user");
|
||||
expect(limited[1].role).toBe("assistant");
|
||||
});
|
||||
it("preserves message content integrity", () => {
|
||||
const messages: AgentMessage[] = [
|
||||
{ role: "user", content: [{ type: "text", text: "first" }] },
|
||||
{
|
||||
role: "assistant",
|
||||
content: [{ type: "toolCall", id: "1", name: "exec", arguments: {} }],
|
||||
},
|
||||
{ role: "user", content: [{ type: "text", text: "second" }] },
|
||||
{ role: "assistant", content: [{ type: "text", text: "response" }] },
|
||||
];
|
||||
const limited = limitHistoryTurns(messages, 1);
|
||||
expect(limited[0].content).toEqual([{ type: "text", text: "second" }]);
|
||||
expect(limited[1].content).toEqual([{ type: "text", text: "response" }]);
|
||||
});
|
||||
});
|
||||
143
src/agents/pi-embedded-runner.resolvesessionagentids.test.ts
Normal file
143
src/agents/pi-embedded-runner.resolvesessionagentids.test.ts
Normal file
@@ -0,0 +1,143 @@
|
||||
import fs from "node:fs/promises";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
import { resolveSessionAgentIds } from "./agent-scope.js";
|
||||
import { ensureClawdbotModelsJson } from "./models-config.js";
|
||||
|
||||
vi.mock("@mariozechner/pi-ai", async () => {
|
||||
const actual = await vi.importActual<typeof import("@mariozechner/pi-ai")>(
|
||||
"@mariozechner/pi-ai",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
streamSimple: (model: { api: string; provider: string; id: string }) => {
|
||||
if (model.id === "mock-error") {
|
||||
throw new Error("boom");
|
||||
}
|
||||
const stream = new actual.AssistantMessageEventStream();
|
||||
queueMicrotask(() => {
|
||||
stream.push({
|
||||
type: "done",
|
||||
reason: "stop",
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "ok" }],
|
||||
stopReason: "stop",
|
||||
api: model.api,
|
||||
provider: model.provider,
|
||||
model: model.id,
|
||||
usage: {
|
||||
input: 1,
|
||||
output: 1,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
totalTokens: 2,
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
total: 0,
|
||||
},
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
},
|
||||
});
|
||||
});
|
||||
return stream;
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
const _makeOpenAiConfig = (modelIds: string[]) =>
|
||||
({
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
api: "openai-responses",
|
||||
apiKey: "sk-test",
|
||||
baseUrl: "https://example.com",
|
||||
models: modelIds.map((id) => ({
|
||||
id,
|
||||
name: `Mock ${id}`,
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 16_000,
|
||||
maxTokens: 2048,
|
||||
})),
|
||||
},
|
||||
},
|
||||
},
|
||||
}) satisfies ClawdbotConfig;
|
||||
|
||||
const _ensureModels = (cfg: ClawdbotConfig, agentDir: string) =>
|
||||
ensureClawdbotModelsJson(cfg, agentDir);
|
||||
|
||||
const _textFromContent = (content: unknown) => {
|
||||
if (typeof content === "string") return content;
|
||||
if (Array.isArray(content) && content[0]?.type === "text") {
|
||||
return (content[0] as { text?: string }).text;
|
||||
}
|
||||
return undefined;
|
||||
};
|
||||
|
||||
const _readSessionMessages = async (sessionFile: string) => {
|
||||
const raw = await fs.readFile(sessionFile, "utf-8");
|
||||
return raw
|
||||
.split(/\r?\n/)
|
||||
.filter(Boolean)
|
||||
.map(
|
||||
(line) =>
|
||||
JSON.parse(line) as {
|
||||
type?: string;
|
||||
message?: { role?: string; content?: unknown };
|
||||
},
|
||||
)
|
||||
.filter((entry) => entry.type === "message")
|
||||
.map((entry) => entry.message as { role?: string; content?: unknown });
|
||||
};
|
||||
|
||||
describe("resolveSessionAgentIds", () => {
|
||||
const cfg = {
|
||||
agents: {
|
||||
list: [{ id: "main" }, { id: "beta", default: true }],
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
|
||||
it("falls back to the configured default when sessionKey is missing", () => {
|
||||
const { defaultAgentId, sessionAgentId } = resolveSessionAgentIds({
|
||||
config: cfg,
|
||||
});
|
||||
expect(defaultAgentId).toBe("beta");
|
||||
expect(sessionAgentId).toBe("beta");
|
||||
});
|
||||
it("falls back to the configured default when sessionKey is non-agent", () => {
|
||||
const { sessionAgentId } = resolveSessionAgentIds({
|
||||
sessionKey: "telegram:slash:123",
|
||||
config: cfg,
|
||||
});
|
||||
expect(sessionAgentId).toBe("beta");
|
||||
});
|
||||
it("falls back to the configured default for global sessions", () => {
|
||||
const { sessionAgentId } = resolveSessionAgentIds({
|
||||
sessionKey: "global",
|
||||
config: cfg,
|
||||
});
|
||||
expect(sessionAgentId).toBe("beta");
|
||||
});
|
||||
it("keeps the agent id for provider-qualified agent sessions", () => {
|
||||
const { sessionAgentId } = resolveSessionAgentIds({
|
||||
sessionKey: "agent:beta:slack:channel:C1",
|
||||
config: cfg,
|
||||
});
|
||||
expect(sessionAgentId).toBe("beta");
|
||||
});
|
||||
it("uses the agent id from agent session keys", () => {
|
||||
const { sessionAgentId } = resolveSessionAgentIds({
|
||||
sessionKey: "agent:main:main",
|
||||
config: cfg,
|
||||
});
|
||||
expect(sessionAgentId).toBe("main");
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,282 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
import { ensureClawdbotModelsJson } from "./models-config.js";
|
||||
|
||||
vi.mock("@mariozechner/pi-ai", async () => {
|
||||
const actual = await vi.importActual<typeof import("@mariozechner/pi-ai")>(
|
||||
"@mariozechner/pi-ai",
|
||||
);
|
||||
|
||||
const buildAssistantMessage = (model: {
|
||||
api: string;
|
||||
provider: string;
|
||||
id: string;
|
||||
}) => ({
|
||||
role: "assistant" as const,
|
||||
content: [{ type: "text" as const, text: "ok" }],
|
||||
stopReason: "stop" as const,
|
||||
api: model.api,
|
||||
provider: model.provider,
|
||||
model: model.id,
|
||||
usage: {
|
||||
input: 1,
|
||||
output: 1,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
totalTokens: 2,
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
total: 0,
|
||||
},
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
|
||||
const buildAssistantErrorMessage = (model: {
|
||||
api: string;
|
||||
provider: string;
|
||||
id: string;
|
||||
}) => ({
|
||||
role: "assistant" as const,
|
||||
content: [] as const,
|
||||
stopReason: "error" as const,
|
||||
errorMessage: "boom",
|
||||
api: model.api,
|
||||
provider: model.provider,
|
||||
model: model.id,
|
||||
usage: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
totalTokens: 0,
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
total: 0,
|
||||
},
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
|
||||
return {
|
||||
...actual,
|
||||
complete: async (model: { api: string; provider: string; id: string }) => {
|
||||
if (model.id === "mock-error") return buildAssistantErrorMessage(model);
|
||||
return buildAssistantMessage(model);
|
||||
},
|
||||
completeSimple: async (model: {
|
||||
api: string;
|
||||
provider: string;
|
||||
id: string;
|
||||
}) => {
|
||||
if (model.id === "mock-error") return buildAssistantErrorMessage(model);
|
||||
return buildAssistantMessage(model);
|
||||
},
|
||||
streamSimple: (model: { api: string; provider: string; id: string }) => {
|
||||
const stream = new actual.AssistantMessageEventStream();
|
||||
queueMicrotask(() => {
|
||||
stream.push({
|
||||
type: "done",
|
||||
reason: "stop",
|
||||
message:
|
||||
model.id === "mock-error"
|
||||
? buildAssistantErrorMessage(model)
|
||||
: buildAssistantMessage(model),
|
||||
});
|
||||
});
|
||||
return stream;
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
vi.resetModules();
|
||||
|
||||
const { runEmbeddedPiAgent } = await import("./pi-embedded-runner.js");
|
||||
|
||||
const makeOpenAiConfig = (modelIds: string[]) =>
|
||||
({
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
api: "openai-responses",
|
||||
apiKey: "sk-test",
|
||||
baseUrl: "https://example.com",
|
||||
models: modelIds.map((id) => ({
|
||||
id,
|
||||
name: `Mock ${id}`,
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 16_000,
|
||||
maxTokens: 2048,
|
||||
})),
|
||||
},
|
||||
},
|
||||
},
|
||||
}) satisfies ClawdbotConfig;
|
||||
|
||||
const ensureModels = (cfg: ClawdbotConfig, agentDir: string) =>
|
||||
ensureClawdbotModelsJson(cfg, agentDir);
|
||||
|
||||
const textFromContent = (content: unknown) => {
|
||||
if (typeof content === "string") return content;
|
||||
if (Array.isArray(content) && content[0]?.type === "text") {
|
||||
return (content[0] as { text?: string }).text;
|
||||
}
|
||||
return undefined;
|
||||
};
|
||||
|
||||
const readSessionMessages = async (sessionFile: string) => {
|
||||
const raw = await fs.readFile(sessionFile, "utf-8");
|
||||
return raw
|
||||
.split(/\r?\n/)
|
||||
.filter(Boolean)
|
||||
.map(
|
||||
(line) =>
|
||||
JSON.parse(line) as {
|
||||
type?: string;
|
||||
message?: { role?: string; content?: unknown };
|
||||
},
|
||||
)
|
||||
.filter((entry) => entry.type === "message")
|
||||
.map((entry) => entry.message as { role?: string; content?: unknown });
|
||||
};
|
||||
|
||||
describe("runEmbeddedPiAgent", () => {
|
||||
it("writes models.json into the provided agentDir", async () => {
|
||||
const agentDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-agent-"),
|
||||
);
|
||||
const workspaceDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-workspace-"),
|
||||
);
|
||||
const sessionFile = path.join(workspaceDir, "session.jsonl");
|
||||
|
||||
const cfg = {
|
||||
models: {
|
||||
providers: {
|
||||
minimax: {
|
||||
baseUrl: "https://api.minimax.io/anthropic",
|
||||
api: "anthropic-messages",
|
||||
apiKey: "sk-minimax-test",
|
||||
models: [
|
||||
{
|
||||
id: "MiniMax-M2.1",
|
||||
name: "MiniMax M2.1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 200000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
} satisfies ClawdbotConfig;
|
||||
|
||||
await expect(
|
||||
runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
sessionKey: "agent:dev:test",
|
||||
sessionFile,
|
||||
workspaceDir,
|
||||
config: cfg,
|
||||
prompt: "hi",
|
||||
provider: "definitely-not-a-provider",
|
||||
model: "definitely-not-a-model",
|
||||
timeoutMs: 1,
|
||||
agentDir,
|
||||
}),
|
||||
).rejects.toThrow(/Unknown model:/);
|
||||
|
||||
await expect(
|
||||
fs.stat(path.join(agentDir, "models.json")),
|
||||
).resolves.toBeTruthy();
|
||||
});
|
||||
it(
|
||||
"persists the first user message before assistant output",
|
||||
{ timeout: 15_000 },
|
||||
async () => {
|
||||
const agentDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-agent-"),
|
||||
);
|
||||
const workspaceDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-workspace-"),
|
||||
);
|
||||
const sessionFile = path.join(workspaceDir, "session.jsonl");
|
||||
|
||||
const cfg = makeOpenAiConfig(["mock-1"]);
|
||||
await ensureModels(cfg, agentDir);
|
||||
|
||||
await runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
sessionKey: "agent:main:main",
|
||||
sessionFile,
|
||||
workspaceDir,
|
||||
config: cfg,
|
||||
prompt: "hello",
|
||||
provider: "openai",
|
||||
model: "mock-1",
|
||||
timeoutMs: 5_000,
|
||||
agentDir,
|
||||
});
|
||||
|
||||
const messages = await readSessionMessages(sessionFile);
|
||||
const firstUserIndex = messages.findIndex(
|
||||
(message) =>
|
||||
message?.role === "user" &&
|
||||
textFromContent(message.content) === "hello",
|
||||
);
|
||||
const firstAssistantIndex = messages.findIndex(
|
||||
(message) => message?.role === "assistant",
|
||||
);
|
||||
expect(firstUserIndex).toBeGreaterThanOrEqual(0);
|
||||
if (firstAssistantIndex !== -1) {
|
||||
expect(firstUserIndex).toBeLessThan(firstAssistantIndex);
|
||||
}
|
||||
},
|
||||
);
|
||||
it("persists the user message when prompt fails before assistant output", async () => {
|
||||
const agentDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-agent-"),
|
||||
);
|
||||
const workspaceDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-workspace-"),
|
||||
);
|
||||
const sessionFile = path.join(workspaceDir, "session.jsonl");
|
||||
|
||||
const cfg = makeOpenAiConfig(["mock-error"]);
|
||||
await ensureModels(cfg, agentDir);
|
||||
|
||||
const result = await runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
sessionKey: "agent:main:main",
|
||||
sessionFile,
|
||||
workspaceDir,
|
||||
config: cfg,
|
||||
prompt: "boom",
|
||||
provider: "openai",
|
||||
model: "mock-error",
|
||||
timeoutMs: 5_000,
|
||||
agentDir,
|
||||
});
|
||||
expect(result.payloads[0]?.isError).toBe(true);
|
||||
|
||||
const messages = await readSessionMessages(sessionFile);
|
||||
const userIndex = messages.findIndex(
|
||||
(message) =>
|
||||
message?.role === "user" && textFromContent(message.content) === "boom",
|
||||
);
|
||||
expect(userIndex).toBeGreaterThanOrEqual(0);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,297 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
import { ensureClawdbotModelsJson } from "./models-config.js";
|
||||
|
||||
vi.mock("@mariozechner/pi-ai", async () => {
|
||||
const actual = await vi.importActual<typeof import("@mariozechner/pi-ai")>(
|
||||
"@mariozechner/pi-ai",
|
||||
);
|
||||
|
||||
const buildAssistantMessage = (model: {
|
||||
api: string;
|
||||
provider: string;
|
||||
id: string;
|
||||
}) => ({
|
||||
role: "assistant" as const,
|
||||
content: [{ type: "text" as const, text: "ok" }],
|
||||
stopReason: "stop" as const,
|
||||
api: model.api,
|
||||
provider: model.provider,
|
||||
model: model.id,
|
||||
usage: {
|
||||
input: 1,
|
||||
output: 1,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
totalTokens: 2,
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
total: 0,
|
||||
},
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
|
||||
const buildAssistantErrorMessage = (model: {
|
||||
api: string;
|
||||
provider: string;
|
||||
id: string;
|
||||
}) => ({
|
||||
role: "assistant" as const,
|
||||
content: [] as const,
|
||||
stopReason: "error" as const,
|
||||
errorMessage: "boom",
|
||||
api: model.api,
|
||||
provider: model.provider,
|
||||
model: model.id,
|
||||
usage: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
totalTokens: 0,
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
total: 0,
|
||||
},
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
|
||||
return {
|
||||
...actual,
|
||||
complete: async (model: { api: string; provider: string; id: string }) => {
|
||||
if (model.id === "mock-error") return buildAssistantErrorMessage(model);
|
||||
return buildAssistantMessage(model);
|
||||
},
|
||||
completeSimple: async (model: {
|
||||
api: string;
|
||||
provider: string;
|
||||
id: string;
|
||||
}) => {
|
||||
if (model.id === "mock-error") return buildAssistantErrorMessage(model);
|
||||
return buildAssistantMessage(model);
|
||||
},
|
||||
streamSimple: (model: { api: string; provider: string; id: string }) => {
|
||||
const stream = new actual.AssistantMessageEventStream();
|
||||
queueMicrotask(() => {
|
||||
stream.push({
|
||||
type: "done",
|
||||
reason: "stop",
|
||||
message:
|
||||
model.id === "mock-error"
|
||||
? buildAssistantErrorMessage(model)
|
||||
: buildAssistantMessage(model),
|
||||
});
|
||||
});
|
||||
return stream;
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
vi.resetModules();
|
||||
|
||||
const { runEmbeddedPiAgent } = await import("./pi-embedded-runner.js");
|
||||
|
||||
const makeOpenAiConfig = (modelIds: string[]) =>
|
||||
({
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
api: "openai-responses",
|
||||
apiKey: "sk-test",
|
||||
baseUrl: "https://example.com",
|
||||
models: modelIds.map((id) => ({
|
||||
id,
|
||||
name: `Mock ${id}`,
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 16_000,
|
||||
maxTokens: 2048,
|
||||
})),
|
||||
},
|
||||
},
|
||||
},
|
||||
}) satisfies ClawdbotConfig;
|
||||
|
||||
const ensureModels = (cfg: ClawdbotConfig, agentDir: string) =>
|
||||
ensureClawdbotModelsJson(cfg, agentDir);
|
||||
|
||||
const textFromContent = (content: unknown) => {
|
||||
if (typeof content === "string") return content;
|
||||
if (Array.isArray(content) && content[0]?.type === "text") {
|
||||
return (content[0] as { text?: string }).text;
|
||||
}
|
||||
return undefined;
|
||||
};
|
||||
|
||||
const readSessionMessages = async (sessionFile: string) => {
|
||||
const raw = await fs.readFile(sessionFile, "utf-8");
|
||||
return raw
|
||||
.split(/\r?\n/)
|
||||
.filter(Boolean)
|
||||
.map(
|
||||
(line) =>
|
||||
JSON.parse(line) as {
|
||||
type?: string;
|
||||
message?: { role?: string; content?: unknown };
|
||||
},
|
||||
)
|
||||
.filter((entry) => entry.type === "message")
|
||||
.map((entry) => entry.message as { role?: string; content?: unknown });
|
||||
};
|
||||
|
||||
describe("runEmbeddedPiAgent", () => {
|
||||
it("appends new user + assistant after existing transcript entries", async () => {
|
||||
const { SessionManager } = await import("@mariozechner/pi-coding-agent");
|
||||
|
||||
const agentDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-agent-"),
|
||||
);
|
||||
const workspaceDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-workspace-"),
|
||||
);
|
||||
const sessionFile = path.join(workspaceDir, "session.jsonl");
|
||||
|
||||
const sessionManager = SessionManager.open(sessionFile);
|
||||
sessionManager.appendMessage({
|
||||
role: "user",
|
||||
content: [{ type: "text", text: "seed user" }],
|
||||
});
|
||||
sessionManager.appendMessage({
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "seed assistant" }],
|
||||
stopReason: "stop",
|
||||
api: "openai-responses",
|
||||
provider: "openai",
|
||||
model: "mock-1",
|
||||
usage: {
|
||||
input: 1,
|
||||
output: 1,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
totalTokens: 2,
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
total: 0,
|
||||
},
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
|
||||
const cfg = makeOpenAiConfig(["mock-1"]);
|
||||
await ensureModels(cfg, agentDir);
|
||||
|
||||
await runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
sessionKey: "agent:main:main",
|
||||
sessionFile,
|
||||
workspaceDir,
|
||||
config: cfg,
|
||||
prompt: "hello",
|
||||
provider: "openai",
|
||||
model: "mock-1",
|
||||
timeoutMs: 5_000,
|
||||
agentDir,
|
||||
});
|
||||
|
||||
const messages = await readSessionMessages(sessionFile);
|
||||
const seedUserIndex = messages.findIndex(
|
||||
(message) =>
|
||||
message?.role === "user" &&
|
||||
textFromContent(message.content) === "seed user",
|
||||
);
|
||||
const seedAssistantIndex = messages.findIndex(
|
||||
(message) =>
|
||||
message?.role === "assistant" &&
|
||||
textFromContent(message.content) === "seed assistant",
|
||||
);
|
||||
const newUserIndex = messages.findIndex(
|
||||
(message) =>
|
||||
message?.role === "user" &&
|
||||
textFromContent(message.content) === "hello",
|
||||
);
|
||||
const newAssistantIndex = messages.findIndex(
|
||||
(message, index) => index > newUserIndex && message?.role === "assistant",
|
||||
);
|
||||
expect(seedUserIndex).toBeGreaterThanOrEqual(0);
|
||||
expect(seedAssistantIndex).toBeGreaterThan(seedUserIndex);
|
||||
expect(newUserIndex).toBeGreaterThan(seedAssistantIndex);
|
||||
expect(newAssistantIndex).toBeGreaterThan(newUserIndex);
|
||||
}, 20_000);
|
||||
it("persists multi-turn user/assistant ordering across runs", async () => {
|
||||
const agentDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-agent-"),
|
||||
);
|
||||
const workspaceDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-workspace-"),
|
||||
);
|
||||
const sessionFile = path.join(workspaceDir, "session.jsonl");
|
||||
|
||||
const cfg = makeOpenAiConfig(["mock-1"]);
|
||||
await ensureModels(cfg, agentDir);
|
||||
|
||||
await runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
sessionKey: "agent:main:main",
|
||||
sessionFile,
|
||||
workspaceDir,
|
||||
config: cfg,
|
||||
prompt: "first",
|
||||
provider: "openai",
|
||||
model: "mock-1",
|
||||
timeoutMs: 5_000,
|
||||
agentDir,
|
||||
});
|
||||
|
||||
await runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
sessionKey: "agent:main:main",
|
||||
sessionFile,
|
||||
workspaceDir,
|
||||
config: cfg,
|
||||
prompt: "second",
|
||||
provider: "openai",
|
||||
model: "mock-1",
|
||||
timeoutMs: 5_000,
|
||||
agentDir,
|
||||
});
|
||||
|
||||
const messages = await readSessionMessages(sessionFile);
|
||||
const firstUserIndex = messages.findIndex(
|
||||
(message) =>
|
||||
message?.role === "user" &&
|
||||
textFromContent(message.content) === "first",
|
||||
);
|
||||
const firstAssistantIndex = messages.findIndex(
|
||||
(message, index) =>
|
||||
index > firstUserIndex && message?.role === "assistant",
|
||||
);
|
||||
const secondUserIndex = messages.findIndex(
|
||||
(message) =>
|
||||
message?.role === "user" &&
|
||||
textFromContent(message.content) === "second",
|
||||
);
|
||||
const secondAssistantIndex = messages.findIndex(
|
||||
(message, index) =>
|
||||
index > secondUserIndex && message?.role === "assistant",
|
||||
);
|
||||
expect(firstUserIndex).toBeGreaterThanOrEqual(0);
|
||||
expect(firstAssistantIndex).toBeGreaterThan(firstUserIndex);
|
||||
expect(secondUserIndex).toBeGreaterThan(firstAssistantIndex);
|
||||
expect(secondAssistantIndex).toBeGreaterThan(secondUserIndex);
|
||||
}, 20_000);
|
||||
});
|
||||
149
src/agents/pi-embedded-runner.splitsdktools.test.ts
Normal file
149
src/agents/pi-embedded-runner.splitsdktools.test.ts
Normal file
@@ -0,0 +1,149 @@
|
||||
import fs from "node:fs/promises";
|
||||
import type { AgentTool, AgentToolResult } from "@mariozechner/pi-agent-core";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
import { ensureClawdbotModelsJson } from "./models-config.js";
|
||||
import { splitSdkTools } from "./pi-embedded-runner.js";
|
||||
|
||||
vi.mock("@mariozechner/pi-ai", async () => {
|
||||
const actual = await vi.importActual<typeof import("@mariozechner/pi-ai")>(
|
||||
"@mariozechner/pi-ai",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
streamSimple: (model: { api: string; provider: string; id: string }) => {
|
||||
if (model.id === "mock-error") {
|
||||
throw new Error("boom");
|
||||
}
|
||||
const stream = new actual.AssistantMessageEventStream();
|
||||
queueMicrotask(() => {
|
||||
stream.push({
|
||||
type: "done",
|
||||
reason: "stop",
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "ok" }],
|
||||
stopReason: "stop",
|
||||
api: model.api,
|
||||
provider: model.provider,
|
||||
model: model.id,
|
||||
usage: {
|
||||
input: 1,
|
||||
output: 1,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
totalTokens: 2,
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
total: 0,
|
||||
},
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
},
|
||||
});
|
||||
});
|
||||
return stream;
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
const _makeOpenAiConfig = (modelIds: string[]) =>
|
||||
({
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
api: "openai-responses",
|
||||
apiKey: "sk-test",
|
||||
baseUrl: "https://example.com",
|
||||
models: modelIds.map((id) => ({
|
||||
id,
|
||||
name: `Mock ${id}`,
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 16_000,
|
||||
maxTokens: 2048,
|
||||
})),
|
||||
},
|
||||
},
|
||||
},
|
||||
}) satisfies ClawdbotConfig;
|
||||
|
||||
const _ensureModels = (cfg: ClawdbotConfig, agentDir: string) =>
|
||||
ensureClawdbotModelsJson(cfg, agentDir);
|
||||
|
||||
const _textFromContent = (content: unknown) => {
|
||||
if (typeof content === "string") return content;
|
||||
if (Array.isArray(content) && content[0]?.type === "text") {
|
||||
return (content[0] as { text?: string }).text;
|
||||
}
|
||||
return undefined;
|
||||
};
|
||||
|
||||
const _readSessionMessages = async (sessionFile: string) => {
|
||||
const raw = await fs.readFile(sessionFile, "utf-8");
|
||||
return raw
|
||||
.split(/\r?\n/)
|
||||
.filter(Boolean)
|
||||
.map(
|
||||
(line) =>
|
||||
JSON.parse(line) as {
|
||||
type?: string;
|
||||
message?: { role?: string; content?: unknown };
|
||||
},
|
||||
)
|
||||
.filter((entry) => entry.type === "message")
|
||||
.map((entry) => entry.message as { role?: string; content?: unknown });
|
||||
};
|
||||
|
||||
function createStubTool(name: string): AgentTool<unknown, unknown> {
|
||||
return {
|
||||
name,
|
||||
label: name,
|
||||
description: "",
|
||||
parameters: {},
|
||||
execute: async () => ({}) as AgentToolResult<unknown>,
|
||||
};
|
||||
}
|
||||
|
||||
describe("splitSdkTools", () => {
|
||||
const tools = [
|
||||
createStubTool("read"),
|
||||
createStubTool("exec"),
|
||||
createStubTool("edit"),
|
||||
createStubTool("write"),
|
||||
createStubTool("browser"),
|
||||
];
|
||||
|
||||
it("routes all tools to customTools when sandboxed", () => {
|
||||
const { builtInTools, customTools } = splitSdkTools({
|
||||
tools,
|
||||
sandboxEnabled: true,
|
||||
});
|
||||
expect(builtInTools).toEqual([]);
|
||||
expect(customTools.map((tool) => tool.name)).toEqual([
|
||||
"read",
|
||||
"exec",
|
||||
"edit",
|
||||
"write",
|
||||
"browser",
|
||||
]);
|
||||
});
|
||||
it("routes all tools to customTools even when not sandboxed", () => {
|
||||
const { builtInTools, customTools } = splitSdkTools({
|
||||
tools,
|
||||
sandboxEnabled: false,
|
||||
});
|
||||
expect(builtInTools).toEqual([]);
|
||||
expect(customTools.map((tool) => tool.name)).toEqual([
|
||||
"read",
|
||||
"exec",
|
||||
"edit",
|
||||
"write",
|
||||
"browser",
|
||||
]);
|
||||
});
|
||||
});
|
||||
@@ -1,951 +0,0 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import type { AgentMessage, AgentTool } from "@mariozechner/pi-agent-core";
|
||||
import { SessionManager } from "@mariozechner/pi-coding-agent";
|
||||
import { Type } from "@sinclair/typebox";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import type { ClawdbotConfig } from "../config/config.js";
|
||||
import { resolveSessionAgentIds } from "./agent-scope.js";
|
||||
import { ensureClawdbotModelsJson } from "./models-config.js";
|
||||
import {
|
||||
applyGoogleTurnOrderingFix,
|
||||
buildEmbeddedSandboxInfo,
|
||||
createSystemPromptOverride,
|
||||
getDmHistoryLimitFromSessionKey,
|
||||
limitHistoryTurns,
|
||||
runEmbeddedPiAgent,
|
||||
splitSdkTools,
|
||||
} from "./pi-embedded-runner.js";
|
||||
import type { SandboxContext } from "./sandbox.js";
|
||||
|
||||
vi.mock("@mariozechner/pi-ai", async () => {
|
||||
const actual = await vi.importActual<typeof import("@mariozechner/pi-ai")>(
|
||||
"@mariozechner/pi-ai",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
streamSimple: (model: { api: string; provider: string; id: string }) => {
|
||||
if (model.id === "mock-error") {
|
||||
throw new Error("boom");
|
||||
}
|
||||
const stream = new actual.AssistantMessageEventStream();
|
||||
queueMicrotask(() => {
|
||||
stream.push({
|
||||
type: "done",
|
||||
reason: "stop",
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "ok" }],
|
||||
stopReason: "stop",
|
||||
api: model.api,
|
||||
provider: model.provider,
|
||||
model: model.id,
|
||||
usage: {
|
||||
input: 1,
|
||||
output: 1,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
totalTokens: 2,
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
total: 0,
|
||||
},
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
},
|
||||
});
|
||||
});
|
||||
return stream;
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
const makeOpenAiConfig = (modelIds: string[]) =>
|
||||
({
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
api: "openai-responses",
|
||||
apiKey: "sk-test",
|
||||
baseUrl: "https://example.com",
|
||||
models: modelIds.map((id) => ({
|
||||
id,
|
||||
name: `Mock ${id}`,
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 16_000,
|
||||
maxTokens: 2048,
|
||||
})),
|
||||
},
|
||||
},
|
||||
},
|
||||
}) satisfies ClawdbotConfig;
|
||||
|
||||
const ensureModels = (cfg: ClawdbotConfig, agentDir: string) =>
|
||||
ensureClawdbotModelsJson(cfg, agentDir);
|
||||
|
||||
const textFromContent = (content: unknown) => {
|
||||
if (typeof content === "string") return content;
|
||||
if (Array.isArray(content) && content[0]?.type === "text") {
|
||||
return (content[0] as { text?: string }).text;
|
||||
}
|
||||
return undefined;
|
||||
};
|
||||
|
||||
const readSessionMessages = async (sessionFile: string) => {
|
||||
const raw = await fs.readFile(sessionFile, "utf-8");
|
||||
return raw
|
||||
.split(/\r?\n/)
|
||||
.filter(Boolean)
|
||||
.map(
|
||||
(line) =>
|
||||
JSON.parse(line) as {
|
||||
type?: string;
|
||||
message?: { role?: string; content?: unknown };
|
||||
},
|
||||
)
|
||||
.filter((entry) => entry.type === "message")
|
||||
.map((entry) => entry.message as { role?: string; content?: unknown });
|
||||
};
|
||||
|
||||
describe("buildEmbeddedSandboxInfo", () => {
|
||||
it("returns undefined when sandbox is missing", () => {
|
||||
expect(buildEmbeddedSandboxInfo()).toBeUndefined();
|
||||
});
|
||||
|
||||
it("maps sandbox context into prompt info", () => {
|
||||
const sandbox = {
|
||||
enabled: true,
|
||||
sessionKey: "session:test",
|
||||
workspaceDir: "/tmp/clawdbot-sandbox",
|
||||
agentWorkspaceDir: "/tmp/clawdbot-workspace",
|
||||
workspaceAccess: "none",
|
||||
containerName: "clawdbot-sbx-test",
|
||||
containerWorkdir: "/workspace",
|
||||
docker: {
|
||||
image: "clawdbot-sandbox:bookworm-slim",
|
||||
containerPrefix: "clawdbot-sbx-",
|
||||
workdir: "/workspace",
|
||||
readOnlyRoot: true,
|
||||
tmpfs: ["/tmp"],
|
||||
network: "none",
|
||||
user: "1000:1000",
|
||||
capDrop: ["ALL"],
|
||||
env: { LANG: "C.UTF-8" },
|
||||
},
|
||||
tools: {
|
||||
allow: ["exec"],
|
||||
deny: ["browser"],
|
||||
},
|
||||
browserAllowHostControl: true,
|
||||
browser: {
|
||||
controlUrl: "http://localhost:9222",
|
||||
noVncUrl: "http://localhost:6080",
|
||||
containerName: "clawdbot-sbx-browser-test",
|
||||
},
|
||||
} satisfies SandboxContext;
|
||||
|
||||
expect(buildEmbeddedSandboxInfo(sandbox)).toEqual({
|
||||
enabled: true,
|
||||
workspaceDir: "/tmp/clawdbot-sandbox",
|
||||
workspaceAccess: "none",
|
||||
agentWorkspaceMount: undefined,
|
||||
browserControlUrl: "http://localhost:9222",
|
||||
browserNoVncUrl: "http://localhost:6080",
|
||||
hostBrowserAllowed: true,
|
||||
});
|
||||
});
|
||||
|
||||
it("includes elevated info when allowed", () => {
|
||||
const sandbox = {
|
||||
enabled: true,
|
||||
sessionKey: "session:test",
|
||||
workspaceDir: "/tmp/clawdbot-sandbox",
|
||||
agentWorkspaceDir: "/tmp/clawdbot-workspace",
|
||||
workspaceAccess: "none",
|
||||
containerName: "clawdbot-sbx-test",
|
||||
containerWorkdir: "/workspace",
|
||||
docker: {
|
||||
image: "clawdbot-sandbox:bookworm-slim",
|
||||
containerPrefix: "clawdbot-sbx-",
|
||||
workdir: "/workspace",
|
||||
readOnlyRoot: true,
|
||||
tmpfs: ["/tmp"],
|
||||
network: "none",
|
||||
user: "1000:1000",
|
||||
capDrop: ["ALL"],
|
||||
env: { LANG: "C.UTF-8" },
|
||||
},
|
||||
tools: {
|
||||
allow: ["exec"],
|
||||
deny: ["browser"],
|
||||
},
|
||||
browserAllowHostControl: false,
|
||||
} satisfies SandboxContext;
|
||||
|
||||
expect(
|
||||
buildEmbeddedSandboxInfo(sandbox, {
|
||||
enabled: true,
|
||||
allowed: true,
|
||||
defaultLevel: "on",
|
||||
}),
|
||||
).toEqual({
|
||||
enabled: true,
|
||||
workspaceDir: "/tmp/clawdbot-sandbox",
|
||||
workspaceAccess: "none",
|
||||
agentWorkspaceMount: undefined,
|
||||
hostBrowserAllowed: false,
|
||||
elevated: { allowed: true, defaultLevel: "on" },
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("resolveSessionAgentIds", () => {
|
||||
const cfg = {
|
||||
agents: {
|
||||
list: [{ id: "main" }, { id: "beta", default: true }],
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
|
||||
it("falls back to the configured default when sessionKey is missing", () => {
|
||||
const { defaultAgentId, sessionAgentId } = resolveSessionAgentIds({
|
||||
config: cfg,
|
||||
});
|
||||
expect(defaultAgentId).toBe("beta");
|
||||
expect(sessionAgentId).toBe("beta");
|
||||
});
|
||||
|
||||
it("falls back to the configured default when sessionKey is non-agent", () => {
|
||||
const { sessionAgentId } = resolveSessionAgentIds({
|
||||
sessionKey: "telegram:slash:123",
|
||||
config: cfg,
|
||||
});
|
||||
expect(sessionAgentId).toBe("beta");
|
||||
});
|
||||
|
||||
it("falls back to the configured default for global sessions", () => {
|
||||
const { sessionAgentId } = resolveSessionAgentIds({
|
||||
sessionKey: "global",
|
||||
config: cfg,
|
||||
});
|
||||
expect(sessionAgentId).toBe("beta");
|
||||
});
|
||||
|
||||
it("keeps the agent id for provider-qualified agent sessions", () => {
|
||||
const { sessionAgentId } = resolveSessionAgentIds({
|
||||
sessionKey: "agent:beta:slack:channel:C1",
|
||||
config: cfg,
|
||||
});
|
||||
expect(sessionAgentId).toBe("beta");
|
||||
});
|
||||
|
||||
it("uses the agent id from agent session keys", () => {
|
||||
const { sessionAgentId } = resolveSessionAgentIds({
|
||||
sessionKey: "agent:main:main",
|
||||
config: cfg,
|
||||
});
|
||||
expect(sessionAgentId).toBe("main");
|
||||
});
|
||||
});
|
||||
|
||||
function createStubTool(name: string): AgentTool {
|
||||
return {
|
||||
name,
|
||||
label: name,
|
||||
description: "",
|
||||
parameters: Type.Object({}),
|
||||
execute: async () => ({ content: [], details: {} }),
|
||||
};
|
||||
}
|
||||
|
||||
describe("splitSdkTools", () => {
|
||||
const tools = [
|
||||
createStubTool("read"),
|
||||
createStubTool("exec"),
|
||||
createStubTool("edit"),
|
||||
createStubTool("write"),
|
||||
createStubTool("browser"),
|
||||
];
|
||||
|
||||
it("routes all tools to customTools when sandboxed", () => {
|
||||
const { builtInTools, customTools } = splitSdkTools({
|
||||
tools,
|
||||
sandboxEnabled: true,
|
||||
});
|
||||
expect(builtInTools).toEqual([]);
|
||||
expect(customTools.map((tool) => tool.name)).toEqual([
|
||||
"read",
|
||||
"exec",
|
||||
"edit",
|
||||
"write",
|
||||
"browser",
|
||||
]);
|
||||
});
|
||||
|
||||
it("routes all tools to customTools even when not sandboxed", () => {
|
||||
const { builtInTools, customTools } = splitSdkTools({
|
||||
tools,
|
||||
sandboxEnabled: false,
|
||||
});
|
||||
expect(builtInTools).toEqual([]);
|
||||
expect(customTools.map((tool) => tool.name)).toEqual([
|
||||
"read",
|
||||
"exec",
|
||||
"edit",
|
||||
"write",
|
||||
"browser",
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("createSystemPromptOverride", () => {
|
||||
it("returns the override prompt regardless of default prompt", () => {
|
||||
const override = createSystemPromptOverride("OVERRIDE");
|
||||
expect(override("DEFAULT")).toBe("OVERRIDE");
|
||||
});
|
||||
|
||||
it("returns an empty string for blank overrides", () => {
|
||||
const override = createSystemPromptOverride(" \n ");
|
||||
expect(override("DEFAULT")).toBe("");
|
||||
});
|
||||
});
|
||||
|
||||
describe("applyGoogleTurnOrderingFix", () => {
|
||||
const makeAssistantFirst = () =>
|
||||
[
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "toolCall", id: "call_1", name: "exec", arguments: {} },
|
||||
],
|
||||
},
|
||||
] satisfies AgentMessage[];
|
||||
|
||||
it("prepends a bootstrap once and records a marker for Google models", () => {
|
||||
const sessionManager = SessionManager.inMemory();
|
||||
const warn = vi.fn();
|
||||
const input = makeAssistantFirst();
|
||||
const first = applyGoogleTurnOrderingFix({
|
||||
messages: input,
|
||||
modelApi: "google-generative-ai",
|
||||
sessionManager,
|
||||
sessionId: "session:1",
|
||||
warn,
|
||||
});
|
||||
expect(first.messages[0]?.role).toBe("user");
|
||||
expect(first.messages[1]?.role).toBe("assistant");
|
||||
expect(warn).toHaveBeenCalledTimes(1);
|
||||
expect(
|
||||
sessionManager
|
||||
.getEntries()
|
||||
.some(
|
||||
(entry) =>
|
||||
entry.type === "custom" &&
|
||||
entry.customType === "google-turn-ordering-bootstrap",
|
||||
),
|
||||
).toBe(true);
|
||||
|
||||
applyGoogleTurnOrderingFix({
|
||||
messages: input,
|
||||
modelApi: "google-generative-ai",
|
||||
sessionManager,
|
||||
sessionId: "session:1",
|
||||
warn,
|
||||
});
|
||||
expect(warn).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it("skips non-Google models", () => {
|
||||
const sessionManager = SessionManager.inMemory();
|
||||
const warn = vi.fn();
|
||||
const input = makeAssistantFirst();
|
||||
const result = applyGoogleTurnOrderingFix({
|
||||
messages: input,
|
||||
modelApi: "openai",
|
||||
sessionManager,
|
||||
sessionId: "session:2",
|
||||
warn,
|
||||
});
|
||||
expect(result.messages).toBe(input);
|
||||
expect(warn).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe("limitHistoryTurns", () => {
|
||||
const makeMessages = (roles: ("user" | "assistant")[]): AgentMessage[] =>
|
||||
roles.map((role, i) => ({
|
||||
role,
|
||||
content: [{ type: "text", text: `message ${i}` }],
|
||||
}));
|
||||
|
||||
it("returns all messages when limit is undefined", () => {
|
||||
const messages = makeMessages(["user", "assistant", "user", "assistant"]);
|
||||
expect(limitHistoryTurns(messages, undefined)).toBe(messages);
|
||||
});
|
||||
|
||||
it("returns all messages when limit is 0", () => {
|
||||
const messages = makeMessages(["user", "assistant", "user", "assistant"]);
|
||||
expect(limitHistoryTurns(messages, 0)).toBe(messages);
|
||||
});
|
||||
|
||||
it("returns all messages when limit is negative", () => {
|
||||
const messages = makeMessages(["user", "assistant", "user", "assistant"]);
|
||||
expect(limitHistoryTurns(messages, -1)).toBe(messages);
|
||||
});
|
||||
|
||||
it("returns empty array when messages is empty", () => {
|
||||
expect(limitHistoryTurns([], 5)).toEqual([]);
|
||||
});
|
||||
|
||||
it("keeps all messages when fewer user turns than limit", () => {
|
||||
const messages = makeMessages(["user", "assistant", "user", "assistant"]);
|
||||
expect(limitHistoryTurns(messages, 10)).toBe(messages);
|
||||
});
|
||||
|
||||
it("limits to last N user turns", () => {
|
||||
const messages = makeMessages([
|
||||
"user",
|
||||
"assistant",
|
||||
"user",
|
||||
"assistant",
|
||||
"user",
|
||||
"assistant",
|
||||
]);
|
||||
const limited = limitHistoryTurns(messages, 2);
|
||||
expect(limited.length).toBe(4);
|
||||
expect(limited[0].content).toEqual([{ type: "text", text: "message 2" }]);
|
||||
});
|
||||
|
||||
it("handles single user turn limit", () => {
|
||||
const messages = makeMessages([
|
||||
"user",
|
||||
"assistant",
|
||||
"user",
|
||||
"assistant",
|
||||
"user",
|
||||
"assistant",
|
||||
]);
|
||||
const limited = limitHistoryTurns(messages, 1);
|
||||
expect(limited.length).toBe(2);
|
||||
expect(limited[0].content).toEqual([{ type: "text", text: "message 4" }]);
|
||||
expect(limited[1].content).toEqual([{ type: "text", text: "message 5" }]);
|
||||
});
|
||||
|
||||
it("handles messages with multiple assistant responses per user turn", () => {
|
||||
const messages = makeMessages([
|
||||
"user",
|
||||
"assistant",
|
||||
"assistant",
|
||||
"user",
|
||||
"assistant",
|
||||
]);
|
||||
const limited = limitHistoryTurns(messages, 1);
|
||||
expect(limited.length).toBe(2);
|
||||
expect(limited[0].role).toBe("user");
|
||||
expect(limited[1].role).toBe("assistant");
|
||||
});
|
||||
|
||||
it("preserves message content integrity", () => {
|
||||
const messages: AgentMessage[] = [
|
||||
{ role: "user", content: [{ type: "text", text: "first" }] },
|
||||
{
|
||||
role: "assistant",
|
||||
content: [{ type: "toolCall", id: "1", name: "exec", arguments: {} }],
|
||||
},
|
||||
{ role: "user", content: [{ type: "text", text: "second" }] },
|
||||
{ role: "assistant", content: [{ type: "text", text: "response" }] },
|
||||
];
|
||||
const limited = limitHistoryTurns(messages, 1);
|
||||
expect(limited[0].content).toEqual([{ type: "text", text: "second" }]);
|
||||
expect(limited[1].content).toEqual([{ type: "text", text: "response" }]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getDmHistoryLimitFromSessionKey", () => {
|
||||
it("returns undefined when sessionKey is undefined", () => {
|
||||
expect(getDmHistoryLimitFromSessionKey(undefined, {})).toBeUndefined();
|
||||
});
|
||||
|
||||
it("returns undefined when config is undefined", () => {
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey("telegram:dm:123", undefined),
|
||||
).toBeUndefined();
|
||||
});
|
||||
|
||||
it("returns dmHistoryLimit for telegram provider", () => {
|
||||
const config = {
|
||||
channels: { telegram: { dmHistoryLimit: 15 } },
|
||||
} as ClawdbotConfig;
|
||||
expect(getDmHistoryLimitFromSessionKey("telegram:dm:123", config)).toBe(15);
|
||||
});
|
||||
|
||||
it("returns dmHistoryLimit for whatsapp provider", () => {
|
||||
const config = {
|
||||
channels: { whatsapp: { dmHistoryLimit: 20 } },
|
||||
} as ClawdbotConfig;
|
||||
expect(getDmHistoryLimitFromSessionKey("whatsapp:dm:123", config)).toBe(20);
|
||||
});
|
||||
|
||||
it("returns dmHistoryLimit for agent-prefixed session keys", () => {
|
||||
const config = {
|
||||
channels: { telegram: { dmHistoryLimit: 10 } },
|
||||
} as ClawdbotConfig;
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey("agent:main:telegram:dm:123", config),
|
||||
).toBe(10);
|
||||
});
|
||||
|
||||
it("returns undefined for non-dm session kinds", () => {
|
||||
const config = {
|
||||
channels: {
|
||||
telegram: { dmHistoryLimit: 15 },
|
||||
slack: { dmHistoryLimit: 10 },
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey("agent:beta:slack:channel:C1", config),
|
||||
).toBeUndefined();
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey("telegram:slash:123", config),
|
||||
).toBeUndefined();
|
||||
});
|
||||
|
||||
it("returns undefined for unknown provider", () => {
|
||||
const config = {
|
||||
channels: { telegram: { dmHistoryLimit: 15 } },
|
||||
} as ClawdbotConfig;
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey("unknown:dm:123", config),
|
||||
).toBeUndefined();
|
||||
});
|
||||
|
||||
it("returns undefined when provider config has no dmHistoryLimit", () => {
|
||||
const config = { channels: { telegram: {} } } as ClawdbotConfig;
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey("telegram:dm:123", config),
|
||||
).toBeUndefined();
|
||||
});
|
||||
|
||||
it("handles all supported providers", () => {
|
||||
const providers = [
|
||||
"telegram",
|
||||
"whatsapp",
|
||||
"discord",
|
||||
"slack",
|
||||
"signal",
|
||||
"imessage",
|
||||
"msteams",
|
||||
] as const;
|
||||
|
||||
for (const provider of providers) {
|
||||
const config = {
|
||||
channels: { [provider]: { dmHistoryLimit: 5 } },
|
||||
} as ClawdbotConfig;
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey(`${provider}:dm:123`, config),
|
||||
).toBe(5);
|
||||
}
|
||||
});
|
||||
|
||||
it("handles per-DM overrides for all supported providers", () => {
|
||||
const providers = [
|
||||
"telegram",
|
||||
"whatsapp",
|
||||
"discord",
|
||||
"slack",
|
||||
"signal",
|
||||
"imessage",
|
||||
"msteams",
|
||||
] as const;
|
||||
|
||||
for (const provider of providers) {
|
||||
// Test per-DM override takes precedence
|
||||
const configWithOverride = {
|
||||
channels: {
|
||||
[provider]: {
|
||||
dmHistoryLimit: 20,
|
||||
dms: { user123: { historyLimit: 7 } },
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey(
|
||||
`${provider}:dm:user123`,
|
||||
configWithOverride,
|
||||
),
|
||||
).toBe(7);
|
||||
|
||||
// Test fallback to provider default when user not in dms
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey(
|
||||
`${provider}:dm:otheruser`,
|
||||
configWithOverride,
|
||||
),
|
||||
).toBe(20);
|
||||
|
||||
// Test with agent-prefixed key
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey(
|
||||
`agent:main:${provider}:dm:user123`,
|
||||
configWithOverride,
|
||||
),
|
||||
).toBe(7);
|
||||
}
|
||||
});
|
||||
|
||||
it("returns per-DM override when set", () => {
|
||||
const config = {
|
||||
channels: {
|
||||
telegram: {
|
||||
dmHistoryLimit: 15,
|
||||
dms: { "123": { historyLimit: 5 } },
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
expect(getDmHistoryLimitFromSessionKey("telegram:dm:123", config)).toBe(5);
|
||||
});
|
||||
|
||||
it("falls back to provider default when per-DM not set", () => {
|
||||
const config = {
|
||||
channels: {
|
||||
telegram: {
|
||||
dmHistoryLimit: 15,
|
||||
dms: { "456": { historyLimit: 5 } },
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
expect(getDmHistoryLimitFromSessionKey("telegram:dm:123", config)).toBe(15);
|
||||
});
|
||||
|
||||
it("returns per-DM override for agent-prefixed keys", () => {
|
||||
const config = {
|
||||
channels: {
|
||||
telegram: {
|
||||
dmHistoryLimit: 20,
|
||||
dms: { "789": { historyLimit: 3 } },
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey("agent:main:telegram:dm:789", config),
|
||||
).toBe(3);
|
||||
});
|
||||
|
||||
it("handles userId with colons (e.g., email)", () => {
|
||||
const config = {
|
||||
channels: {
|
||||
msteams: {
|
||||
dmHistoryLimit: 10,
|
||||
dms: { "user@example.com": { historyLimit: 7 } },
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey("msteams:dm:user@example.com", config),
|
||||
).toBe(7);
|
||||
});
|
||||
|
||||
it("returns undefined when per-DM historyLimit is not set", () => {
|
||||
const config = {
|
||||
channels: {
|
||||
telegram: {
|
||||
dms: { "123": {} },
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
expect(
|
||||
getDmHistoryLimitFromSessionKey("telegram:dm:123", config),
|
||||
).toBeUndefined();
|
||||
});
|
||||
|
||||
it("returns 0 when per-DM historyLimit is explicitly 0 (unlimited)", () => {
|
||||
const config = {
|
||||
channels: {
|
||||
telegram: {
|
||||
dmHistoryLimit: 15,
|
||||
dms: { "123": { historyLimit: 0 } },
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
expect(getDmHistoryLimitFromSessionKey("telegram:dm:123", config)).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("runEmbeddedPiAgent", () => {
|
||||
it("writes models.json into the provided agentDir", async () => {
|
||||
const agentDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-agent-"),
|
||||
);
|
||||
const workspaceDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-workspace-"),
|
||||
);
|
||||
const sessionFile = path.join(workspaceDir, "session.jsonl");
|
||||
|
||||
const cfg = {
|
||||
models: {
|
||||
providers: {
|
||||
minimax: {
|
||||
baseUrl: "https://api.minimax.io/anthropic",
|
||||
api: "anthropic-messages",
|
||||
apiKey: "sk-minimax-test",
|
||||
models: [
|
||||
{
|
||||
id: "MiniMax-M2.1",
|
||||
name: "MiniMax M2.1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 200000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
} satisfies ClawdbotConfig;
|
||||
|
||||
await expect(
|
||||
runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
sessionKey: "agent:dev:test",
|
||||
sessionFile,
|
||||
workspaceDir,
|
||||
config: cfg,
|
||||
prompt: "hi",
|
||||
provider: "definitely-not-a-provider",
|
||||
model: "definitely-not-a-model",
|
||||
timeoutMs: 1,
|
||||
agentDir,
|
||||
}),
|
||||
).rejects.toThrow(/Unknown model:/);
|
||||
|
||||
await expect(
|
||||
fs.stat(path.join(agentDir, "models.json")),
|
||||
).resolves.toBeTruthy();
|
||||
});
|
||||
|
||||
it(
|
||||
"persists the first user message before assistant output",
|
||||
{ timeout: 15_000 },
|
||||
async () => {
|
||||
const agentDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-agent-"),
|
||||
);
|
||||
const workspaceDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-workspace-"),
|
||||
);
|
||||
const sessionFile = path.join(workspaceDir, "session.jsonl");
|
||||
|
||||
const cfg = makeOpenAiConfig(["mock-1"]);
|
||||
await ensureModels(cfg, agentDir);
|
||||
|
||||
await runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
sessionKey: "agent:main:main",
|
||||
sessionFile,
|
||||
workspaceDir,
|
||||
config: cfg,
|
||||
prompt: "hello",
|
||||
provider: "openai",
|
||||
model: "mock-1",
|
||||
timeoutMs: 5_000,
|
||||
agentDir,
|
||||
});
|
||||
|
||||
const messages = await readSessionMessages(sessionFile);
|
||||
const firstUserIndex = messages.findIndex(
|
||||
(message) =>
|
||||
message?.role === "user" &&
|
||||
textFromContent(message.content) === "hello",
|
||||
);
|
||||
const firstAssistantIndex = messages.findIndex(
|
||||
(message) => message?.role === "assistant",
|
||||
);
|
||||
expect(firstUserIndex).toBeGreaterThanOrEqual(0);
|
||||
if (firstAssistantIndex !== -1) {
|
||||
expect(firstUserIndex).toBeLessThan(firstAssistantIndex);
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
it("persists the user message when prompt fails before assistant output", async () => {
|
||||
const agentDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-agent-"),
|
||||
);
|
||||
const workspaceDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-workspace-"),
|
||||
);
|
||||
const sessionFile = path.join(workspaceDir, "session.jsonl");
|
||||
|
||||
const cfg = makeOpenAiConfig(["mock-error"]);
|
||||
await ensureModels(cfg, agentDir);
|
||||
|
||||
const result = await runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
sessionKey: "agent:main:main",
|
||||
sessionFile,
|
||||
workspaceDir,
|
||||
config: cfg,
|
||||
prompt: "boom",
|
||||
provider: "openai",
|
||||
model: "mock-error",
|
||||
timeoutMs: 5_000,
|
||||
agentDir,
|
||||
});
|
||||
expect(result.payloads[0]?.isError).toBe(true);
|
||||
|
||||
const messages = await readSessionMessages(sessionFile);
|
||||
const userIndex = messages.findIndex(
|
||||
(message) =>
|
||||
message?.role === "user" && textFromContent(message.content) === "boom",
|
||||
);
|
||||
expect(userIndex).toBeGreaterThanOrEqual(0);
|
||||
});
|
||||
|
||||
it("appends new user + assistant after existing transcript entries", async () => {
|
||||
const agentDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-agent-"),
|
||||
);
|
||||
const workspaceDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-workspace-"),
|
||||
);
|
||||
const sessionFile = path.join(workspaceDir, "session.jsonl");
|
||||
|
||||
const sessionManager = SessionManager.open(sessionFile);
|
||||
sessionManager.appendMessage({
|
||||
role: "user",
|
||||
content: [{ type: "text", text: "seed user" }],
|
||||
});
|
||||
sessionManager.appendMessage({
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "seed assistant" }],
|
||||
stopReason: "stop",
|
||||
api: "openai-responses",
|
||||
provider: "openai",
|
||||
model: "mock-1",
|
||||
usage: {
|
||||
input: 1,
|
||||
output: 1,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
totalTokens: 2,
|
||||
cost: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
total: 0,
|
||||
},
|
||||
},
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
|
||||
const cfg = makeOpenAiConfig(["mock-1"]);
|
||||
await ensureModels(cfg, agentDir);
|
||||
|
||||
await runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
sessionKey: "agent:main:main",
|
||||
sessionFile,
|
||||
workspaceDir,
|
||||
config: cfg,
|
||||
prompt: "hello",
|
||||
provider: "openai",
|
||||
model: "mock-1",
|
||||
timeoutMs: 5_000,
|
||||
agentDir,
|
||||
});
|
||||
|
||||
const messages = await readSessionMessages(sessionFile);
|
||||
const seedUserIndex = messages.findIndex(
|
||||
(message) =>
|
||||
message?.role === "user" &&
|
||||
textFromContent(message.content) === "seed user",
|
||||
);
|
||||
const seedAssistantIndex = messages.findIndex(
|
||||
(message) =>
|
||||
message?.role === "assistant" &&
|
||||
textFromContent(message.content) === "seed assistant",
|
||||
);
|
||||
const newUserIndex = messages.findIndex(
|
||||
(message) =>
|
||||
message?.role === "user" &&
|
||||
textFromContent(message.content) === "hello",
|
||||
);
|
||||
const newAssistantIndex = messages.findIndex(
|
||||
(message, index) => index > newUserIndex && message?.role === "assistant",
|
||||
);
|
||||
expect(seedUserIndex).toBeGreaterThanOrEqual(0);
|
||||
expect(seedAssistantIndex).toBeGreaterThan(seedUserIndex);
|
||||
expect(newUserIndex).toBeGreaterThan(seedAssistantIndex);
|
||||
expect(newAssistantIndex).toBeGreaterThan(newUserIndex);
|
||||
});
|
||||
|
||||
it("persists multi-turn user/assistant ordering across runs", async () => {
|
||||
const agentDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-agent-"),
|
||||
);
|
||||
const workspaceDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "clawdbot-workspace-"),
|
||||
);
|
||||
const sessionFile = path.join(workspaceDir, "session.jsonl");
|
||||
|
||||
const cfg = makeOpenAiConfig(["mock-1"]);
|
||||
await ensureModels(cfg, agentDir);
|
||||
|
||||
await runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
sessionKey: "agent:main:main",
|
||||
sessionFile,
|
||||
workspaceDir,
|
||||
config: cfg,
|
||||
prompt: "first",
|
||||
provider: "openai",
|
||||
model: "mock-1",
|
||||
timeoutMs: 5_000,
|
||||
agentDir,
|
||||
});
|
||||
|
||||
await runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
sessionKey: "agent:main:main",
|
||||
sessionFile,
|
||||
workspaceDir,
|
||||
config: cfg,
|
||||
prompt: "second",
|
||||
provider: "openai",
|
||||
model: "mock-1",
|
||||
timeoutMs: 5_000,
|
||||
agentDir,
|
||||
});
|
||||
|
||||
const messages = await readSessionMessages(sessionFile);
|
||||
const firstUserIndex = messages.findIndex(
|
||||
(message) =>
|
||||
message?.role === "user" &&
|
||||
textFromContent(message.content) === "first",
|
||||
);
|
||||
const firstAssistantIndex = messages.findIndex(
|
||||
(message, index) =>
|
||||
index > firstUserIndex && message?.role === "assistant",
|
||||
);
|
||||
const secondUserIndex = messages.findIndex(
|
||||
(message) =>
|
||||
message?.role === "user" &&
|
||||
textFromContent(message.content) === "second",
|
||||
);
|
||||
const secondAssistantIndex = messages.findIndex(
|
||||
(message, index) =>
|
||||
index > secondUserIndex && message?.role === "assistant",
|
||||
);
|
||||
expect(firstUserIndex).toBeGreaterThanOrEqual(0);
|
||||
expect(firstAssistantIndex).toBeGreaterThan(firstUserIndex);
|
||||
expect(secondUserIndex).toBeGreaterThan(firstAssistantIndex);
|
||||
expect(secondAssistantIndex).toBeGreaterThan(secondUserIndex);
|
||||
});
|
||||
});
|
||||
File diff suppressed because it is too large
Load Diff
10
src/agents/pi-embedded-runner/abort.ts
Normal file
10
src/agents/pi-embedded-runner/abort.ts
Normal file
@@ -0,0 +1,10 @@
|
||||
export function isAbortError(err: unknown): boolean {
|
||||
if (!err || typeof err !== "object") return false;
|
||||
const name = "name" in err ? String(err.name) : "";
|
||||
if (name === "AbortError") return true;
|
||||
const message =
|
||||
"message" in err && typeof err.message === "string"
|
||||
? err.message.toLowerCase()
|
||||
: "";
|
||||
return message.includes("aborted");
|
||||
}
|
||||
390
src/agents/pi-embedded-runner/compact.ts
Normal file
390
src/agents/pi-embedded-runner/compact.ts
Normal file
@@ -0,0 +1,390 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
|
||||
import {
|
||||
createAgentSession,
|
||||
SessionManager,
|
||||
SettingsManager,
|
||||
} from "@mariozechner/pi-coding-agent";
|
||||
|
||||
import { resolveHeartbeatPrompt } from "../../auto-reply/heartbeat.js";
|
||||
import type { ReasoningLevel, ThinkLevel } from "../../auto-reply/thinking.js";
|
||||
import { resolveChannelCapabilities } from "../../config/channel-capabilities.js";
|
||||
import type { ClawdbotConfig } from "../../config/config.js";
|
||||
import { getMachineDisplayName } from "../../infra/machine-name.js";
|
||||
import {
|
||||
type enqueueCommand,
|
||||
enqueueCommandInLane,
|
||||
} from "../../process/command-queue.js";
|
||||
import { normalizeMessageChannel } from "../../utils/message-channel.js";
|
||||
import { isReasoningTagProvider } from "../../utils/provider-utils.js";
|
||||
import { resolveUserPath } from "../../utils.js";
|
||||
import { resolveClawdbotAgentDir } from "../agent-paths.js";
|
||||
import { resolveSessionAgentIds } from "../agent-scope.js";
|
||||
import type { ExecElevatedDefaults } from "../bash-tools.js";
|
||||
import { DEFAULT_MODEL, DEFAULT_PROVIDER } from "../defaults.js";
|
||||
import { getApiKeyForModel, resolveModelAuthMode } from "../model-auth.js";
|
||||
import { ensureClawdbotModelsJson } from "../models-config.js";
|
||||
import {
|
||||
buildBootstrapContextFiles,
|
||||
type EmbeddedContextFile,
|
||||
ensureSessionHeader,
|
||||
resolveBootstrapMaxChars,
|
||||
validateAnthropicTurns,
|
||||
validateGeminiTurns,
|
||||
} from "../pi-embedded-helpers.js";
|
||||
import {
|
||||
ensurePiCompactionReserveTokens,
|
||||
resolveCompactionReserveTokensFloor,
|
||||
} from "../pi-settings.js";
|
||||
import { createClawdbotCodingTools } from "../pi-tools.js";
|
||||
import { resolveSandboxContext } from "../sandbox.js";
|
||||
import { guardSessionManager } from "../session-tool-result-guard-wrapper.js";
|
||||
import { acquireSessionWriteLock } from "../session-write-lock.js";
|
||||
import {
|
||||
applySkillEnvOverrides,
|
||||
applySkillEnvOverridesFromSnapshot,
|
||||
loadWorkspaceSkillEntries,
|
||||
resolveSkillsPromptForRun,
|
||||
type SkillSnapshot,
|
||||
} from "../skills.js";
|
||||
import {
|
||||
filterBootstrapFilesForSession,
|
||||
loadWorkspaceBootstrapFiles,
|
||||
} from "../workspace.js";
|
||||
import { buildEmbeddedExtensionPaths } from "./extensions.js";
|
||||
import { logToolSchemasForGoogle, sanitizeSessionHistory } from "./google.js";
|
||||
import {
|
||||
getDmHistoryLimitFromSessionKey,
|
||||
limitHistoryTurns,
|
||||
} from "./history.js";
|
||||
import { resolveGlobalLane, resolveSessionLane } from "./lanes.js";
|
||||
import { log } from "./logger.js";
|
||||
import { buildModelAliasLines, resolveModel } from "./model.js";
|
||||
import { buildEmbeddedSandboxInfo } from "./sandbox-info.js";
|
||||
import {
|
||||
prewarmSessionFile,
|
||||
trackSessionManagerAccess,
|
||||
} from "./session-manager-cache.js";
|
||||
import {
|
||||
buildEmbeddedSystemPrompt,
|
||||
createSystemPromptOverride,
|
||||
} from "./system-prompt.js";
|
||||
import { splitSdkTools } from "./tool-split.js";
|
||||
import type { EmbeddedPiCompactResult } from "./types.js";
|
||||
import {
|
||||
describeUnknownError,
|
||||
formatUserTime,
|
||||
mapThinkingLevel,
|
||||
resolveExecToolDefaults,
|
||||
resolveUserTimezone,
|
||||
} from "./utils.js";
|
||||
|
||||
export async function compactEmbeddedPiSession(params: {
|
||||
sessionId: string;
|
||||
sessionKey?: string;
|
||||
messageChannel?: string;
|
||||
messageProvider?: string;
|
||||
agentAccountId?: string;
|
||||
sessionFile: string;
|
||||
workspaceDir: string;
|
||||
agentDir?: string;
|
||||
config?: ClawdbotConfig;
|
||||
skillsSnapshot?: SkillSnapshot;
|
||||
provider?: string;
|
||||
model?: string;
|
||||
thinkLevel?: ThinkLevel;
|
||||
reasoningLevel?: ReasoningLevel;
|
||||
bashElevated?: ExecElevatedDefaults;
|
||||
customInstructions?: string;
|
||||
lane?: string;
|
||||
enqueue?: typeof enqueueCommand;
|
||||
extraSystemPrompt?: string;
|
||||
ownerNumbers?: string[];
|
||||
}): Promise<EmbeddedPiCompactResult> {
|
||||
const sessionLane = resolveSessionLane(
|
||||
params.sessionKey?.trim() || params.sessionId,
|
||||
);
|
||||
const globalLane = resolveGlobalLane(params.lane);
|
||||
const enqueueGlobal =
|
||||
params.enqueue ??
|
||||
((task, opts) => enqueueCommandInLane(globalLane, task, opts));
|
||||
return enqueueCommandInLane(sessionLane, () =>
|
||||
enqueueGlobal(async () => {
|
||||
const resolvedWorkspace = resolveUserPath(params.workspaceDir);
|
||||
const prevCwd = process.cwd();
|
||||
|
||||
const provider =
|
||||
(params.provider ?? DEFAULT_PROVIDER).trim() || DEFAULT_PROVIDER;
|
||||
const modelId = (params.model ?? DEFAULT_MODEL).trim() || DEFAULT_MODEL;
|
||||
const agentDir = params.agentDir ?? resolveClawdbotAgentDir();
|
||||
await ensureClawdbotModelsJson(params.config, agentDir);
|
||||
const { model, error, authStorage, modelRegistry } = resolveModel(
|
||||
provider,
|
||||
modelId,
|
||||
agentDir,
|
||||
params.config,
|
||||
);
|
||||
if (!model) {
|
||||
return {
|
||||
ok: false,
|
||||
compacted: false,
|
||||
reason: error ?? `Unknown model: ${provider}/${modelId}`,
|
||||
};
|
||||
}
|
||||
try {
|
||||
const apiKeyInfo = await getApiKeyForModel({
|
||||
model,
|
||||
cfg: params.config,
|
||||
});
|
||||
|
||||
if (model.provider === "github-copilot") {
|
||||
const { resolveCopilotApiToken } = await import(
|
||||
"../../providers/github-copilot-token.js"
|
||||
);
|
||||
const copilotToken = await resolveCopilotApiToken({
|
||||
githubToken: apiKeyInfo.apiKey,
|
||||
});
|
||||
authStorage.setRuntimeApiKey(model.provider, copilotToken.token);
|
||||
} else {
|
||||
authStorage.setRuntimeApiKey(model.provider, apiKeyInfo.apiKey);
|
||||
}
|
||||
} catch (err) {
|
||||
return {
|
||||
ok: false,
|
||||
compacted: false,
|
||||
reason: describeUnknownError(err),
|
||||
};
|
||||
}
|
||||
|
||||
await fs.mkdir(resolvedWorkspace, { recursive: true });
|
||||
const sandboxSessionKey = params.sessionKey?.trim() || params.sessionId;
|
||||
const sandbox = await resolveSandboxContext({
|
||||
config: params.config,
|
||||
sessionKey: sandboxSessionKey,
|
||||
workspaceDir: resolvedWorkspace,
|
||||
});
|
||||
const effectiveWorkspace = sandbox?.enabled
|
||||
? sandbox.workspaceAccess === "rw"
|
||||
? resolvedWorkspace
|
||||
: sandbox.workspaceDir
|
||||
: resolvedWorkspace;
|
||||
await fs.mkdir(effectiveWorkspace, { recursive: true });
|
||||
await ensureSessionHeader({
|
||||
sessionFile: params.sessionFile,
|
||||
sessionId: params.sessionId,
|
||||
cwd: effectiveWorkspace,
|
||||
});
|
||||
|
||||
let restoreSkillEnv: (() => void) | undefined;
|
||||
process.chdir(effectiveWorkspace);
|
||||
try {
|
||||
const shouldLoadSkillEntries =
|
||||
!params.skillsSnapshot || !params.skillsSnapshot.resolvedSkills;
|
||||
const skillEntries = shouldLoadSkillEntries
|
||||
? loadWorkspaceSkillEntries(effectiveWorkspace)
|
||||
: [];
|
||||
restoreSkillEnv = params.skillsSnapshot
|
||||
? applySkillEnvOverridesFromSnapshot({
|
||||
snapshot: params.skillsSnapshot,
|
||||
config: params.config,
|
||||
})
|
||||
: applySkillEnvOverrides({
|
||||
skills: skillEntries ?? [],
|
||||
config: params.config,
|
||||
});
|
||||
const skillsPrompt = resolveSkillsPromptForRun({
|
||||
skillsSnapshot: params.skillsSnapshot,
|
||||
entries: shouldLoadSkillEntries ? skillEntries : undefined,
|
||||
config: params.config,
|
||||
workspaceDir: effectiveWorkspace,
|
||||
});
|
||||
|
||||
const bootstrapFiles = filterBootstrapFilesForSession(
|
||||
await loadWorkspaceBootstrapFiles(effectiveWorkspace),
|
||||
params.sessionKey ?? params.sessionId,
|
||||
);
|
||||
const sessionLabel = params.sessionKey ?? params.sessionId;
|
||||
const contextFiles: EmbeddedContextFile[] = buildBootstrapContextFiles(
|
||||
bootstrapFiles,
|
||||
{
|
||||
maxChars: resolveBootstrapMaxChars(params.config),
|
||||
warn: (message) =>
|
||||
log.warn(`${message} (sessionKey=${sessionLabel})`),
|
||||
},
|
||||
);
|
||||
const runAbortController = new AbortController();
|
||||
const tools = createClawdbotCodingTools({
|
||||
exec: {
|
||||
...resolveExecToolDefaults(params.config),
|
||||
elevated: params.bashElevated,
|
||||
},
|
||||
sandbox,
|
||||
messageProvider: params.messageChannel ?? params.messageProvider,
|
||||
agentAccountId: params.agentAccountId,
|
||||
sessionKey: params.sessionKey ?? params.sessionId,
|
||||
agentDir,
|
||||
workspaceDir: effectiveWorkspace,
|
||||
config: params.config,
|
||||
abortSignal: runAbortController.signal,
|
||||
modelProvider: model.provider,
|
||||
modelId,
|
||||
modelAuthMode: resolveModelAuthMode(model.provider, params.config),
|
||||
});
|
||||
logToolSchemasForGoogle({ tools, provider });
|
||||
const machineName = await getMachineDisplayName();
|
||||
const runtimeChannel = normalizeMessageChannel(
|
||||
params.messageChannel ?? params.messageProvider,
|
||||
);
|
||||
const runtimeCapabilities = runtimeChannel
|
||||
? (resolveChannelCapabilities({
|
||||
cfg: params.config,
|
||||
channel: runtimeChannel,
|
||||
accountId: params.agentAccountId,
|
||||
}) ?? [])
|
||||
: undefined;
|
||||
const runtimeInfo = {
|
||||
host: machineName,
|
||||
os: `${os.type()} ${os.release()}`,
|
||||
arch: os.arch(),
|
||||
node: process.version,
|
||||
model: `${provider}/${modelId}`,
|
||||
channel: runtimeChannel,
|
||||
capabilities: runtimeCapabilities,
|
||||
};
|
||||
const sandboxInfo = buildEmbeddedSandboxInfo(
|
||||
sandbox,
|
||||
params.bashElevated,
|
||||
);
|
||||
const reasoningTagHint = isReasoningTagProvider(provider);
|
||||
const userTimezone = resolveUserTimezone(
|
||||
params.config?.agents?.defaults?.userTimezone,
|
||||
);
|
||||
const userTime = formatUserTime(new Date(), userTimezone);
|
||||
const { defaultAgentId, sessionAgentId } = resolveSessionAgentIds({
|
||||
sessionKey: params.sessionKey,
|
||||
config: params.config,
|
||||
});
|
||||
const isDefaultAgent = sessionAgentId === defaultAgentId;
|
||||
const appendPrompt = buildEmbeddedSystemPrompt({
|
||||
workspaceDir: effectiveWorkspace,
|
||||
defaultThinkLevel: params.thinkLevel,
|
||||
reasoningLevel: params.reasoningLevel ?? "off",
|
||||
extraSystemPrompt: params.extraSystemPrompt,
|
||||
ownerNumbers: params.ownerNumbers,
|
||||
reasoningTagHint,
|
||||
heartbeatPrompt: isDefaultAgent
|
||||
? resolveHeartbeatPrompt(
|
||||
params.config?.agents?.defaults?.heartbeat?.prompt,
|
||||
)
|
||||
: undefined,
|
||||
skillsPrompt,
|
||||
runtimeInfo,
|
||||
sandboxInfo,
|
||||
tools,
|
||||
modelAliasLines: buildModelAliasLines(params.config),
|
||||
userTimezone,
|
||||
userTime,
|
||||
contextFiles,
|
||||
});
|
||||
const systemPrompt = createSystemPromptOverride(appendPrompt);
|
||||
|
||||
const sessionLock = await acquireSessionWriteLock({
|
||||
sessionFile: params.sessionFile,
|
||||
});
|
||||
try {
|
||||
await prewarmSessionFile(params.sessionFile);
|
||||
const sessionManager = guardSessionManager(
|
||||
SessionManager.open(params.sessionFile),
|
||||
);
|
||||
trackSessionManagerAccess(params.sessionFile);
|
||||
const settingsManager = SettingsManager.create(
|
||||
effectiveWorkspace,
|
||||
agentDir,
|
||||
);
|
||||
ensurePiCompactionReserveTokens({
|
||||
settingsManager,
|
||||
minReserveTokens: resolveCompactionReserveTokensFloor(
|
||||
params.config,
|
||||
),
|
||||
});
|
||||
const additionalExtensionPaths = buildEmbeddedExtensionPaths({
|
||||
cfg: params.config,
|
||||
sessionManager,
|
||||
provider,
|
||||
modelId,
|
||||
model,
|
||||
});
|
||||
|
||||
const { builtInTools, customTools } = splitSdkTools({
|
||||
tools,
|
||||
sandboxEnabled: !!sandbox?.enabled,
|
||||
});
|
||||
|
||||
let session: Awaited<
|
||||
ReturnType<typeof createAgentSession>
|
||||
>["session"];
|
||||
({ session } = await createAgentSession({
|
||||
cwd: resolvedWorkspace,
|
||||
agentDir,
|
||||
authStorage,
|
||||
modelRegistry,
|
||||
model,
|
||||
thinkingLevel: mapThinkingLevel(params.thinkLevel),
|
||||
systemPrompt,
|
||||
tools: builtInTools,
|
||||
customTools,
|
||||
sessionManager,
|
||||
settingsManager,
|
||||
skills: [],
|
||||
contextFiles: [],
|
||||
additionalExtensionPaths,
|
||||
}));
|
||||
|
||||
try {
|
||||
const prior = await sanitizeSessionHistory({
|
||||
messages: session.messages,
|
||||
modelApi: model.api,
|
||||
sessionManager,
|
||||
sessionId: params.sessionId,
|
||||
});
|
||||
const validatedGemini = validateGeminiTurns(prior);
|
||||
const validated = validateAnthropicTurns(validatedGemini);
|
||||
const limited = limitHistoryTurns(
|
||||
validated,
|
||||
getDmHistoryLimitFromSessionKey(params.sessionKey, params.config),
|
||||
);
|
||||
if (limited.length > 0) {
|
||||
session.agent.replaceMessages(limited);
|
||||
}
|
||||
const result = await session.compact(params.customInstructions);
|
||||
return {
|
||||
ok: true,
|
||||
compacted: true,
|
||||
result: {
|
||||
summary: result.summary,
|
||||
firstKeptEntryId: result.firstKeptEntryId,
|
||||
tokensBefore: result.tokensBefore,
|
||||
details: result.details,
|
||||
},
|
||||
};
|
||||
} finally {
|
||||
sessionManager.flushPendingToolResults?.();
|
||||
session.dispose();
|
||||
}
|
||||
} finally {
|
||||
await sessionLock.release();
|
||||
}
|
||||
} catch (err) {
|
||||
return {
|
||||
ok: false,
|
||||
compacted: false,
|
||||
reason: describeUnknownError(err),
|
||||
};
|
||||
} finally {
|
||||
restoreSkillEnv?.();
|
||||
process.chdir(prevCwd);
|
||||
}
|
||||
}),
|
||||
);
|
||||
}
|
||||
86
src/agents/pi-embedded-runner/extensions.ts
Normal file
86
src/agents/pi-embedded-runner/extensions.ts
Normal file
@@ -0,0 +1,86 @@
|
||||
import path from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
|
||||
import type { Api, Model } from "@mariozechner/pi-ai";
|
||||
import type { SessionManager } from "@mariozechner/pi-coding-agent";
|
||||
|
||||
import type { ClawdbotConfig } from "../../config/config.js";
|
||||
import { resolveContextWindowInfo } from "../context-window-guard.js";
|
||||
import { DEFAULT_CONTEXT_TOKENS } from "../defaults.js";
|
||||
import { setContextPruningRuntime } from "../pi-extensions/context-pruning/runtime.js";
|
||||
import { computeEffectiveSettings } from "../pi-extensions/context-pruning/settings.js";
|
||||
import { makeToolPrunablePredicate } from "../pi-extensions/context-pruning/tools.js";
|
||||
import { ensurePiCompactionReserveTokens } from "../pi-settings.js";
|
||||
|
||||
function resolvePiExtensionPath(id: string): string {
|
||||
const self = fileURLToPath(import.meta.url);
|
||||
const dir = path.dirname(self);
|
||||
// In dev this file is `.ts` (tsx), in production it's `.js`.
|
||||
const ext = path.extname(self) === ".ts" ? "ts" : "js";
|
||||
return path.join(dir, "..", "pi-extensions", `${id}.${ext}`);
|
||||
}
|
||||
|
||||
function resolveContextWindowTokens(params: {
|
||||
cfg: ClawdbotConfig | undefined;
|
||||
provider: string;
|
||||
modelId: string;
|
||||
model: Model<Api> | undefined;
|
||||
}): number {
|
||||
return resolveContextWindowInfo({
|
||||
cfg: params.cfg,
|
||||
provider: params.provider,
|
||||
modelId: params.modelId,
|
||||
modelContextWindow: params.model?.contextWindow,
|
||||
defaultTokens: DEFAULT_CONTEXT_TOKENS,
|
||||
}).tokens;
|
||||
}
|
||||
|
||||
function buildContextPruningExtension(params: {
|
||||
cfg: ClawdbotConfig | undefined;
|
||||
sessionManager: SessionManager;
|
||||
provider: string;
|
||||
modelId: string;
|
||||
model: Model<Api> | undefined;
|
||||
}): { additionalExtensionPaths?: string[] } {
|
||||
const raw = params.cfg?.agents?.defaults?.contextPruning;
|
||||
if (raw?.mode !== "adaptive" && raw?.mode !== "aggressive") return {};
|
||||
|
||||
const settings = computeEffectiveSettings(raw);
|
||||
if (!settings) return {};
|
||||
|
||||
setContextPruningRuntime(params.sessionManager, {
|
||||
settings,
|
||||
contextWindowTokens: resolveContextWindowTokens(params),
|
||||
isToolPrunable: makeToolPrunablePredicate(settings.tools),
|
||||
});
|
||||
|
||||
return {
|
||||
additionalExtensionPaths: [resolvePiExtensionPath("context-pruning")],
|
||||
};
|
||||
}
|
||||
|
||||
function resolveCompactionMode(cfg?: ClawdbotConfig): "default" | "safeguard" {
|
||||
return cfg?.agents?.defaults?.compaction?.mode === "safeguard"
|
||||
? "safeguard"
|
||||
: "default";
|
||||
}
|
||||
|
||||
export function buildEmbeddedExtensionPaths(params: {
|
||||
cfg: ClawdbotConfig | undefined;
|
||||
sessionManager: SessionManager;
|
||||
provider: string;
|
||||
modelId: string;
|
||||
model: Model<Api> | undefined;
|
||||
}): string[] {
|
||||
const paths = [resolvePiExtensionPath("transcript-sanitize")];
|
||||
if (resolveCompactionMode(params.cfg) === "safeguard") {
|
||||
paths.push(resolvePiExtensionPath("compaction-safeguard"));
|
||||
}
|
||||
const pruning = buildContextPruningExtension(params);
|
||||
if (pruning.additionalExtensionPaths) {
|
||||
paths.push(...pruning.additionalExtensionPaths);
|
||||
}
|
||||
return paths;
|
||||
}
|
||||
|
||||
export { ensurePiCompactionReserveTokens };
|
||||
130
src/agents/pi-embedded-runner/extra-params.ts
Normal file
130
src/agents/pi-embedded-runner/extra-params.ts
Normal file
@@ -0,0 +1,130 @@
|
||||
import type { StreamFn } from "@mariozechner/pi-agent-core";
|
||||
import type { Api, Model, SimpleStreamOptions } from "@mariozechner/pi-ai";
|
||||
import { streamSimple } from "@mariozechner/pi-ai";
|
||||
|
||||
import type { ClawdbotConfig } from "../../config/config.js";
|
||||
import { log } from "./logger.js";
|
||||
|
||||
/**
|
||||
* Resolve provider-specific extraParams from model config.
|
||||
* Auto-enables thinking mode for GLM-4.x models unless explicitly disabled.
|
||||
*
|
||||
* For ZAI GLM-4.x models, we auto-enable thinking via the Z.AI Cloud API format:
|
||||
* thinking: { type: "enabled", clear_thinking: boolean }
|
||||
*
|
||||
* - GLM-4.7: Preserved thinking (clear_thinking: false) - reasoning kept across turns
|
||||
* - GLM-4.5/4.6: Interleaved thinking (clear_thinking: true) - reasoning cleared each turn
|
||||
*
|
||||
* Users can override via config:
|
||||
* agents.defaults.models["zai/glm-4.7"].params.thinking = { type: "disabled" }
|
||||
*
|
||||
* Or disable via runtime flag: --thinking off
|
||||
*
|
||||
* @see https://docs.z.ai/guides/capabilities/thinking-mode
|
||||
* @internal Exported for testing only
|
||||
*/
|
||||
export function resolveExtraParams(params: {
|
||||
cfg: ClawdbotConfig | undefined;
|
||||
provider: string;
|
||||
modelId: string;
|
||||
thinkLevel?: string;
|
||||
}): Record<string, unknown> | undefined {
|
||||
const modelKey = `${params.provider}/${params.modelId}`;
|
||||
const modelConfig = params.cfg?.agents?.defaults?.models?.[modelKey];
|
||||
let extraParams = modelConfig?.params ? { ...modelConfig.params } : undefined;
|
||||
|
||||
// Auto-enable thinking for ZAI GLM-4.x models when not explicitly configured
|
||||
// Skip if user explicitly disabled thinking via --thinking off
|
||||
if (params.provider === "zai" && params.thinkLevel !== "off") {
|
||||
const modelIdLower = params.modelId.toLowerCase();
|
||||
const isGlm4 = modelIdLower.includes("glm-4");
|
||||
|
||||
if (isGlm4) {
|
||||
const hasThinkingConfig = extraParams?.thinking !== undefined;
|
||||
if (!hasThinkingConfig) {
|
||||
// GLM-4.7 supports preserved thinking; GLM-4.5/4.6 clear each turn.
|
||||
const isGlm47 = modelIdLower.includes("glm-4.7");
|
||||
const clearThinking = !isGlm47;
|
||||
|
||||
extraParams = {
|
||||
...extraParams,
|
||||
thinking: {
|
||||
type: "enabled",
|
||||
clear_thinking: clearThinking,
|
||||
},
|
||||
};
|
||||
|
||||
log.debug(
|
||||
`auto-enabled thinking for ${modelKey}: type=enabled, clear_thinking=${clearThinking}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return extraParams;
|
||||
}
|
||||
|
||||
function createStreamFnWithExtraParams(
|
||||
baseStreamFn: StreamFn | undefined,
|
||||
extraParams: Record<string, unknown> | undefined,
|
||||
): StreamFn | undefined {
|
||||
if (!extraParams || Object.keys(extraParams).length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const streamParams: Partial<SimpleStreamOptions> = {};
|
||||
if (typeof extraParams.temperature === "number") {
|
||||
streamParams.temperature = extraParams.temperature;
|
||||
}
|
||||
if (typeof extraParams.maxTokens === "number") {
|
||||
streamParams.maxTokens = extraParams.maxTokens;
|
||||
}
|
||||
|
||||
if (Object.keys(streamParams).length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
log.debug(
|
||||
`creating streamFn wrapper with params: ${JSON.stringify(streamParams)}`,
|
||||
);
|
||||
|
||||
const underlying = baseStreamFn ?? streamSimple;
|
||||
const wrappedStreamFn: StreamFn = (model, context, options) =>
|
||||
underlying(model as Model<Api>, context, {
|
||||
...streamParams,
|
||||
...options,
|
||||
});
|
||||
|
||||
return wrappedStreamFn;
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply extra params (like temperature) to an agent's streamFn.
|
||||
*
|
||||
* @internal Exported for testing
|
||||
*/
|
||||
export function applyExtraParamsToAgent(
|
||||
agent: { streamFn?: StreamFn },
|
||||
cfg: ClawdbotConfig | undefined,
|
||||
provider: string,
|
||||
modelId: string,
|
||||
thinkLevel?: string,
|
||||
): void {
|
||||
const extraParams = resolveExtraParams({
|
||||
cfg,
|
||||
provider,
|
||||
modelId,
|
||||
thinkLevel,
|
||||
});
|
||||
const wrappedStreamFn = createStreamFnWithExtraParams(
|
||||
agent.streamFn,
|
||||
extraParams,
|
||||
);
|
||||
|
||||
if (wrappedStreamFn) {
|
||||
log.debug(
|
||||
`applying extraParams to agent streamFn for ${provider}/${modelId}`,
|
||||
);
|
||||
agent.streamFn = wrappedStreamFn;
|
||||
}
|
||||
}
|
||||
185
src/agents/pi-embedded-runner/google.ts
Normal file
185
src/agents/pi-embedded-runner/google.ts
Normal file
@@ -0,0 +1,185 @@
|
||||
import type { AgentMessage, AgentTool } from "@mariozechner/pi-agent-core";
|
||||
import type { SessionManager } from "@mariozechner/pi-coding-agent";
|
||||
|
||||
import { registerUnhandledRejectionHandler } from "../../infra/unhandled-rejections.js";
|
||||
import {
|
||||
downgradeGeminiHistory,
|
||||
isCompactionFailureError,
|
||||
isGoogleModelApi,
|
||||
sanitizeGoogleTurnOrdering,
|
||||
sanitizeSessionMessagesImages,
|
||||
} from "../pi-embedded-helpers.js";
|
||||
import { sanitizeToolUseResultPairing } from "../session-transcript-repair.js";
|
||||
import { log } from "./logger.js";
|
||||
import { describeUnknownError } from "./utils.js";
|
||||
|
||||
const GOOGLE_TURN_ORDERING_CUSTOM_TYPE = "google-turn-ordering-bootstrap";
|
||||
const GOOGLE_SCHEMA_UNSUPPORTED_KEYWORDS = new Set([
|
||||
"patternProperties",
|
||||
"additionalProperties",
|
||||
"$schema",
|
||||
"$id",
|
||||
"$ref",
|
||||
"$defs",
|
||||
"definitions",
|
||||
"examples",
|
||||
"minLength",
|
||||
"maxLength",
|
||||
"minimum",
|
||||
"maximum",
|
||||
"multipleOf",
|
||||
"pattern",
|
||||
"format",
|
||||
"minItems",
|
||||
"maxItems",
|
||||
"uniqueItems",
|
||||
"minProperties",
|
||||
"maxProperties",
|
||||
]);
|
||||
|
||||
function findUnsupportedSchemaKeywords(
|
||||
schema: unknown,
|
||||
path: string,
|
||||
): string[] {
|
||||
if (!schema || typeof schema !== "object") return [];
|
||||
if (Array.isArray(schema)) {
|
||||
return schema.flatMap((item, index) =>
|
||||
findUnsupportedSchemaKeywords(item, `${path}[${index}]`),
|
||||
);
|
||||
}
|
||||
const record = schema as Record<string, unknown>;
|
||||
const violations: string[] = [];
|
||||
for (const [key, value] of Object.entries(record)) {
|
||||
if (GOOGLE_SCHEMA_UNSUPPORTED_KEYWORDS.has(key)) {
|
||||
violations.push(`${path}.${key}`);
|
||||
}
|
||||
if (value && typeof value === "object") {
|
||||
violations.push(
|
||||
...findUnsupportedSchemaKeywords(value, `${path}.${key}`),
|
||||
);
|
||||
}
|
||||
}
|
||||
return violations;
|
||||
}
|
||||
|
||||
export function logToolSchemasForGoogle(params: {
|
||||
tools: AgentTool[];
|
||||
provider: string;
|
||||
}) {
|
||||
if (
|
||||
params.provider !== "google-antigravity" &&
|
||||
params.provider !== "google-gemini-cli"
|
||||
) {
|
||||
return;
|
||||
}
|
||||
const toolNames = params.tools.map((tool, index) => `${index}:${tool.name}`);
|
||||
log.info("google tool schema snapshot", {
|
||||
provider: params.provider,
|
||||
toolCount: params.tools.length,
|
||||
tools: toolNames,
|
||||
});
|
||||
for (const [index, tool] of params.tools.entries()) {
|
||||
const violations = findUnsupportedSchemaKeywords(
|
||||
tool.parameters,
|
||||
`${tool.name}.parameters`,
|
||||
);
|
||||
if (violations.length > 0) {
|
||||
log.warn("google tool schema has unsupported keywords", {
|
||||
index,
|
||||
tool: tool.name,
|
||||
violations: violations.slice(0, 12),
|
||||
violationCount: violations.length,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
registerUnhandledRejectionHandler((reason) => {
|
||||
const message = describeUnknownError(reason);
|
||||
if (!isCompactionFailureError(message)) return false;
|
||||
log.error(`Auto-compaction failed (unhandled): ${message}`);
|
||||
return true;
|
||||
});
|
||||
|
||||
type CustomEntryLike = { type?: unknown; customType?: unknown };
|
||||
|
||||
function hasGoogleTurnOrderingMarker(sessionManager: SessionManager): boolean {
|
||||
try {
|
||||
return sessionManager
|
||||
.getEntries()
|
||||
.some(
|
||||
(entry) =>
|
||||
(entry as CustomEntryLike)?.type === "custom" &&
|
||||
(entry as CustomEntryLike)?.customType ===
|
||||
GOOGLE_TURN_ORDERING_CUSTOM_TYPE,
|
||||
);
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function markGoogleTurnOrderingMarker(sessionManager: SessionManager): void {
|
||||
try {
|
||||
sessionManager.appendCustomEntry(GOOGLE_TURN_ORDERING_CUSTOM_TYPE, {
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
} catch {
|
||||
// ignore marker persistence failures
|
||||
}
|
||||
}
|
||||
|
||||
export function applyGoogleTurnOrderingFix(params: {
|
||||
messages: AgentMessage[];
|
||||
modelApi?: string | null;
|
||||
sessionManager: SessionManager;
|
||||
sessionId: string;
|
||||
warn?: (message: string) => void;
|
||||
}): { messages: AgentMessage[]; didPrepend: boolean } {
|
||||
if (!isGoogleModelApi(params.modelApi)) {
|
||||
return { messages: params.messages, didPrepend: false };
|
||||
}
|
||||
const first = params.messages[0] as
|
||||
| { role?: unknown; content?: unknown }
|
||||
| undefined;
|
||||
if (first?.role !== "assistant") {
|
||||
return { messages: params.messages, didPrepend: false };
|
||||
}
|
||||
const sanitized = sanitizeGoogleTurnOrdering(params.messages);
|
||||
const didPrepend = sanitized !== params.messages;
|
||||
if (didPrepend && !hasGoogleTurnOrderingMarker(params.sessionManager)) {
|
||||
const warn = params.warn ?? ((message: string) => log.warn(message));
|
||||
warn(
|
||||
`google turn ordering fixup: prepended user bootstrap (sessionId=${params.sessionId})`,
|
||||
);
|
||||
markGoogleTurnOrderingMarker(params.sessionManager);
|
||||
}
|
||||
return { messages: sanitized, didPrepend };
|
||||
}
|
||||
|
||||
export async function sanitizeSessionHistory(params: {
|
||||
messages: AgentMessage[];
|
||||
modelApi?: string | null;
|
||||
sessionManager: SessionManager;
|
||||
sessionId: string;
|
||||
}): Promise<AgentMessage[]> {
|
||||
const sanitizedImages = await sanitizeSessionMessagesImages(
|
||||
params.messages,
|
||||
"session:history",
|
||||
{
|
||||
sanitizeToolCallIds: isGoogleModelApi(params.modelApi),
|
||||
enforceToolCallLast: params.modelApi === "anthropic-messages",
|
||||
},
|
||||
);
|
||||
const repairedTools = sanitizeToolUseResultPairing(sanitizedImages);
|
||||
|
||||
const downgraded = isGoogleModelApi(params.modelApi)
|
||||
? downgradeGeminiHistory(repairedTools)
|
||||
: repairedTools;
|
||||
|
||||
return applyGoogleTurnOrderingFix({
|
||||
messages: downgraded,
|
||||
modelApi: params.modelApi,
|
||||
sessionManager: params.sessionManager,
|
||||
sessionId: params.sessionId,
|
||||
}).messages;
|
||||
}
|
||||
84
src/agents/pi-embedded-runner/history.ts
Normal file
84
src/agents/pi-embedded-runner/history.ts
Normal file
@@ -0,0 +1,84 @@
|
||||
import type { AgentMessage } from "@mariozechner/pi-agent-core";
|
||||
|
||||
import type { ClawdbotConfig } from "../../config/config.js";
|
||||
|
||||
/**
|
||||
* Limits conversation history to the last N user turns (and their associated
|
||||
* assistant responses). This reduces token usage for long-running DM sessions.
|
||||
*/
|
||||
export function limitHistoryTurns(
|
||||
messages: AgentMessage[],
|
||||
limit: number | undefined,
|
||||
): AgentMessage[] {
|
||||
if (!limit || limit <= 0 || messages.length === 0) return messages;
|
||||
|
||||
let userCount = 0;
|
||||
let lastUserIndex = messages.length;
|
||||
|
||||
for (let i = messages.length - 1; i >= 0; i--) {
|
||||
if (messages[i].role === "user") {
|
||||
userCount++;
|
||||
if (userCount > limit) {
|
||||
return messages.slice(lastUserIndex);
|
||||
}
|
||||
lastUserIndex = i;
|
||||
}
|
||||
}
|
||||
return messages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract provider + user ID from a session key and look up dmHistoryLimit.
|
||||
* Supports per-DM overrides and provider defaults.
|
||||
*/
|
||||
export function getDmHistoryLimitFromSessionKey(
|
||||
sessionKey: string | undefined,
|
||||
config: ClawdbotConfig | undefined,
|
||||
): number | undefined {
|
||||
if (!sessionKey || !config) return undefined;
|
||||
|
||||
const parts = sessionKey.split(":").filter(Boolean);
|
||||
const providerParts =
|
||||
parts.length >= 3 && parts[0] === "agent" ? parts.slice(2) : parts;
|
||||
|
||||
const provider = providerParts[0]?.toLowerCase();
|
||||
if (!provider) return undefined;
|
||||
|
||||
const kind = providerParts[1]?.toLowerCase();
|
||||
const userId = providerParts.slice(2).join(":");
|
||||
if (kind !== "dm") return undefined;
|
||||
|
||||
const getLimit = (
|
||||
providerConfig:
|
||||
| {
|
||||
dmHistoryLimit?: number;
|
||||
dms?: Record<string, { historyLimit?: number }>;
|
||||
}
|
||||
| undefined,
|
||||
): number | undefined => {
|
||||
if (!providerConfig) return undefined;
|
||||
if (userId && providerConfig.dms?.[userId]?.historyLimit !== undefined) {
|
||||
return providerConfig.dms[userId].historyLimit;
|
||||
}
|
||||
return providerConfig.dmHistoryLimit;
|
||||
};
|
||||
|
||||
switch (provider) {
|
||||
case "telegram":
|
||||
return getLimit(config.channels?.telegram);
|
||||
case "whatsapp":
|
||||
return getLimit(config.channels?.whatsapp);
|
||||
case "discord":
|
||||
return getLimit(config.channels?.discord);
|
||||
case "slack":
|
||||
return getLimit(config.channels?.slack);
|
||||
case "signal":
|
||||
return getLimit(config.channels?.signal);
|
||||
case "imessage":
|
||||
return getLimit(config.channels?.imessage);
|
||||
case "msteams":
|
||||
return getLimit(config.channels?.msteams);
|
||||
default:
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
13
src/agents/pi-embedded-runner/lanes.ts
Normal file
13
src/agents/pi-embedded-runner/lanes.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
export function resolveSessionLane(key: string) {
|
||||
const cleaned = key.trim() || "main";
|
||||
return cleaned.startsWith("session:") ? cleaned : `session:${cleaned}`;
|
||||
}
|
||||
|
||||
export function resolveGlobalLane(lane?: string) {
|
||||
const cleaned = lane?.trim();
|
||||
return cleaned ? cleaned : "main";
|
||||
}
|
||||
|
||||
export function resolveEmbeddedSessionLane(key: string) {
|
||||
return resolveSessionLane(key);
|
||||
}
|
||||
3
src/agents/pi-embedded-runner/logger.ts
Normal file
3
src/agents/pi-embedded-runner/logger.ts
Normal file
@@ -0,0 +1,3 @@
|
||||
import { createSubsystemLogger } from "../../logging.js";
|
||||
|
||||
export const log = createSubsystemLogger("agent/embedded");
|
||||
84
src/agents/pi-embedded-runner/model.ts
Normal file
84
src/agents/pi-embedded-runner/model.ts
Normal file
@@ -0,0 +1,84 @@
|
||||
import type { Api, Model } from "@mariozechner/pi-ai";
|
||||
import {
|
||||
discoverAuthStorage,
|
||||
discoverModels,
|
||||
} from "@mariozechner/pi-coding-agent";
|
||||
|
||||
import type { ClawdbotConfig } from "../../config/config.js";
|
||||
import { resolveClawdbotAgentDir } from "../agent-paths.js";
|
||||
import { DEFAULT_CONTEXT_TOKENS } from "../defaults.js";
|
||||
import { normalizeModelCompat } from "../model-compat.js";
|
||||
|
||||
export function buildModelAliasLines(cfg?: ClawdbotConfig) {
|
||||
const models = cfg?.agents?.defaults?.models ?? {};
|
||||
const entries: Array<{ alias: string; model: string }> = [];
|
||||
for (const [keyRaw, entryRaw] of Object.entries(models)) {
|
||||
const model = String(keyRaw ?? "").trim();
|
||||
if (!model) continue;
|
||||
const alias = String(
|
||||
(entryRaw as { alias?: string } | undefined)?.alias ?? "",
|
||||
).trim();
|
||||
if (!alias) continue;
|
||||
entries.push({ alias, model });
|
||||
}
|
||||
return entries
|
||||
.sort((a, b) => a.alias.localeCompare(b.alias))
|
||||
.map((entry) => `- ${entry.alias}: ${entry.model}`);
|
||||
}
|
||||
|
||||
export function resolveModel(
|
||||
provider: string,
|
||||
modelId: string,
|
||||
agentDir?: string,
|
||||
cfg?: ClawdbotConfig,
|
||||
): {
|
||||
model?: Model<Api>;
|
||||
error?: string;
|
||||
authStorage: ReturnType<typeof discoverAuthStorage>;
|
||||
modelRegistry: ReturnType<typeof discoverModels>;
|
||||
} {
|
||||
const resolvedAgentDir = agentDir ?? resolveClawdbotAgentDir();
|
||||
const authStorage = discoverAuthStorage(resolvedAgentDir);
|
||||
const modelRegistry = discoverModels(authStorage, resolvedAgentDir);
|
||||
const model = modelRegistry.find(provider, modelId) as Model<Api> | null;
|
||||
if (!model) {
|
||||
const providers = cfg?.models?.providers ?? {};
|
||||
const inlineModels =
|
||||
providers[provider]?.models ??
|
||||
Object.values(providers)
|
||||
.flatMap((entry) => entry?.models ?? [])
|
||||
.map((entry) => ({ ...entry, provider }));
|
||||
const inlineMatch = inlineModels.find((entry) => entry.id === modelId);
|
||||
if (inlineMatch) {
|
||||
const normalized = normalizeModelCompat(inlineMatch as Model<Api>);
|
||||
return {
|
||||
model: normalized,
|
||||
authStorage,
|
||||
modelRegistry,
|
||||
};
|
||||
}
|
||||
const providerCfg = providers[provider];
|
||||
if (providerCfg || modelId.startsWith("mock-")) {
|
||||
const fallbackModel: Model<Api> = normalizeModelCompat({
|
||||
id: modelId,
|
||||
name: modelId,
|
||||
api: providerCfg?.api ?? "openai-responses",
|
||||
provider,
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow:
|
||||
providerCfg?.models?.[0]?.contextWindow ?? DEFAULT_CONTEXT_TOKENS,
|
||||
maxTokens:
|
||||
providerCfg?.models?.[0]?.maxTokens ?? DEFAULT_CONTEXT_TOKENS,
|
||||
} as Model<Api>);
|
||||
return { model: fallbackModel, authStorage, modelRegistry };
|
||||
}
|
||||
return {
|
||||
error: `Unknown model: ${provider}/${modelId}`,
|
||||
authStorage,
|
||||
modelRegistry,
|
||||
};
|
||||
}
|
||||
return { model: normalizeModelCompat(model), authStorage, modelRegistry };
|
||||
}
|
||||
444
src/agents/pi-embedded-runner/run.ts
Normal file
444
src/agents/pi-embedded-runner/run.ts
Normal file
@@ -0,0 +1,444 @@
|
||||
import fs from "node:fs/promises";
|
||||
import type { ThinkLevel } from "../../auto-reply/thinking.js";
|
||||
import { enqueueCommandInLane } from "../../process/command-queue.js";
|
||||
import { resolveUserPath } from "../../utils.js";
|
||||
import { resolveClawdbotAgentDir } from "../agent-paths.js";
|
||||
import {
|
||||
markAuthProfileFailure,
|
||||
markAuthProfileGood,
|
||||
markAuthProfileUsed,
|
||||
} from "../auth-profiles.js";
|
||||
import {
|
||||
CONTEXT_WINDOW_HARD_MIN_TOKENS,
|
||||
CONTEXT_WINDOW_WARN_BELOW_TOKENS,
|
||||
evaluateContextWindowGuard,
|
||||
resolveContextWindowInfo,
|
||||
} from "../context-window-guard.js";
|
||||
import {
|
||||
DEFAULT_CONTEXT_TOKENS,
|
||||
DEFAULT_MODEL,
|
||||
DEFAULT_PROVIDER,
|
||||
} from "../defaults.js";
|
||||
import { FailoverError, resolveFailoverStatus } from "../failover-error.js";
|
||||
import {
|
||||
ensureAuthProfileStore,
|
||||
getApiKeyForModel,
|
||||
resolveAuthProfileOrder,
|
||||
} from "../model-auth.js";
|
||||
import { ensureClawdbotModelsJson } from "../models-config.js";
|
||||
import {
|
||||
classifyFailoverReason,
|
||||
formatAssistantErrorText,
|
||||
isAuthAssistantError,
|
||||
isCompactionFailureError,
|
||||
isContextOverflowError,
|
||||
isFailoverAssistantError,
|
||||
isFailoverErrorMessage,
|
||||
isRateLimitAssistantError,
|
||||
isTimeoutErrorMessage,
|
||||
pickFallbackThinkingLevel,
|
||||
} from "../pi-embedded-helpers.js";
|
||||
import { normalizeUsage, type UsageLike } from "../usage.js";
|
||||
|
||||
import { resolveGlobalLane, resolveSessionLane } from "./lanes.js";
|
||||
import { log } from "./logger.js";
|
||||
import { resolveModel } from "./model.js";
|
||||
import { runEmbeddedAttempt } from "./run/attempt.js";
|
||||
import type { RunEmbeddedPiAgentParams } from "./run/params.js";
|
||||
import { buildEmbeddedRunPayloads } from "./run/payloads.js";
|
||||
import type { EmbeddedPiAgentMeta, EmbeddedPiRunResult } from "./types.js";
|
||||
import { describeUnknownError } from "./utils.js";
|
||||
|
||||
type ApiKeyInfo = {
|
||||
apiKey: string;
|
||||
profileId?: string;
|
||||
source: string;
|
||||
};
|
||||
|
||||
export async function runEmbeddedPiAgent(
|
||||
params: RunEmbeddedPiAgentParams,
|
||||
): Promise<EmbeddedPiRunResult> {
|
||||
const sessionLane = resolveSessionLane(
|
||||
params.sessionKey?.trim() || params.sessionId,
|
||||
);
|
||||
const globalLane = resolveGlobalLane(params.lane);
|
||||
const enqueueGlobal =
|
||||
params.enqueue ??
|
||||
((task, opts) => enqueueCommandInLane(globalLane, task, opts));
|
||||
|
||||
return enqueueCommandInLane(sessionLane, () =>
|
||||
enqueueGlobal(async () => {
|
||||
const started = Date.now();
|
||||
const resolvedWorkspace = resolveUserPath(params.workspaceDir);
|
||||
const prevCwd = process.cwd();
|
||||
|
||||
const provider =
|
||||
(params.provider ?? DEFAULT_PROVIDER).trim() || DEFAULT_PROVIDER;
|
||||
const modelId = (params.model ?? DEFAULT_MODEL).trim() || DEFAULT_MODEL;
|
||||
const agentDir = params.agentDir ?? resolveClawdbotAgentDir();
|
||||
await ensureClawdbotModelsJson(params.config, agentDir);
|
||||
|
||||
const { model, error, authStorage, modelRegistry } = resolveModel(
|
||||
provider,
|
||||
modelId,
|
||||
agentDir,
|
||||
params.config,
|
||||
);
|
||||
if (!model) {
|
||||
throw new Error(error ?? `Unknown model: ${provider}/${modelId}`);
|
||||
}
|
||||
|
||||
const ctxInfo = resolveContextWindowInfo({
|
||||
cfg: params.config,
|
||||
provider,
|
||||
modelId,
|
||||
modelContextWindow: model.contextWindow,
|
||||
defaultTokens: DEFAULT_CONTEXT_TOKENS,
|
||||
});
|
||||
const ctxGuard = evaluateContextWindowGuard({
|
||||
info: ctxInfo,
|
||||
warnBelowTokens: CONTEXT_WINDOW_WARN_BELOW_TOKENS,
|
||||
hardMinTokens: CONTEXT_WINDOW_HARD_MIN_TOKENS,
|
||||
});
|
||||
if (ctxGuard.shouldWarn) {
|
||||
log.warn(
|
||||
`low context window: ${provider}/${modelId} ctx=${ctxGuard.tokens} (warn<${CONTEXT_WINDOW_WARN_BELOW_TOKENS}) source=${ctxGuard.source}`,
|
||||
);
|
||||
}
|
||||
if (ctxGuard.shouldBlock) {
|
||||
log.error(
|
||||
`blocked model (context window too small): ${provider}/${modelId} ctx=${ctxGuard.tokens} (min=${CONTEXT_WINDOW_HARD_MIN_TOKENS}) source=${ctxGuard.source}`,
|
||||
);
|
||||
throw new FailoverError(
|
||||
`Model context window too small (${ctxGuard.tokens} tokens). Minimum is ${CONTEXT_WINDOW_HARD_MIN_TOKENS}.`,
|
||||
{ reason: "unknown", provider, model: modelId },
|
||||
);
|
||||
}
|
||||
|
||||
const authStore = ensureAuthProfileStore(agentDir);
|
||||
const explicitProfileId = params.authProfileId?.trim();
|
||||
const profileOrder = resolveAuthProfileOrder({
|
||||
cfg: params.config,
|
||||
store: authStore,
|
||||
provider,
|
||||
preferredProfile: explicitProfileId,
|
||||
});
|
||||
if (explicitProfileId && !profileOrder.includes(explicitProfileId)) {
|
||||
throw new Error(
|
||||
`Auth profile "${explicitProfileId}" is not configured for ${provider}.`,
|
||||
);
|
||||
}
|
||||
const profileCandidates =
|
||||
profileOrder.length > 0 ? profileOrder : [undefined];
|
||||
let profileIndex = 0;
|
||||
|
||||
const initialThinkLevel = params.thinkLevel ?? "off";
|
||||
let thinkLevel = initialThinkLevel;
|
||||
const attemptedThinking = new Set<ThinkLevel>();
|
||||
let apiKeyInfo: ApiKeyInfo | null = null;
|
||||
let lastProfileId: string | undefined;
|
||||
|
||||
const resolveApiKeyForCandidate = async (candidate?: string) => {
|
||||
return getApiKeyForModel({
|
||||
model,
|
||||
cfg: params.config,
|
||||
profileId: candidate,
|
||||
store: authStore,
|
||||
});
|
||||
};
|
||||
|
||||
const applyApiKeyInfo = async (candidate?: string): Promise<void> => {
|
||||
apiKeyInfo = await resolveApiKeyForCandidate(candidate);
|
||||
if (model.provider === "github-copilot") {
|
||||
const { resolveCopilotApiToken } = await import(
|
||||
"../../providers/github-copilot-token.js"
|
||||
);
|
||||
const copilotToken = await resolveCopilotApiToken({
|
||||
githubToken: apiKeyInfo.apiKey,
|
||||
});
|
||||
authStorage.setRuntimeApiKey(model.provider, copilotToken.token);
|
||||
} else {
|
||||
authStorage.setRuntimeApiKey(model.provider, apiKeyInfo.apiKey);
|
||||
}
|
||||
lastProfileId = apiKeyInfo.profileId;
|
||||
};
|
||||
|
||||
const advanceAuthProfile = async (): Promise<boolean> => {
|
||||
let nextIndex = profileIndex + 1;
|
||||
while (nextIndex < profileCandidates.length) {
|
||||
const candidate = profileCandidates[nextIndex];
|
||||
try {
|
||||
await applyApiKeyInfo(candidate);
|
||||
profileIndex = nextIndex;
|
||||
thinkLevel = initialThinkLevel;
|
||||
attemptedThinking.clear();
|
||||
return true;
|
||||
} catch (err) {
|
||||
if (candidate && candidate === explicitProfileId) throw err;
|
||||
nextIndex += 1;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
try {
|
||||
await applyApiKeyInfo(profileCandidates[profileIndex]);
|
||||
} catch (err) {
|
||||
if (profileCandidates[profileIndex] === explicitProfileId) throw err;
|
||||
const advanced = await advanceAuthProfile();
|
||||
if (!advanced) throw err;
|
||||
}
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
attemptedThinking.add(thinkLevel);
|
||||
await fs.mkdir(resolvedWorkspace, { recursive: true });
|
||||
|
||||
const attempt = await runEmbeddedAttempt({
|
||||
sessionId: params.sessionId,
|
||||
sessionKey: params.sessionKey,
|
||||
messageChannel: params.messageChannel,
|
||||
messageProvider: params.messageProvider,
|
||||
agentAccountId: params.agentAccountId,
|
||||
currentChannelId: params.currentChannelId,
|
||||
currentThreadTs: params.currentThreadTs,
|
||||
replyToMode: params.replyToMode,
|
||||
hasRepliedRef: params.hasRepliedRef,
|
||||
sessionFile: params.sessionFile,
|
||||
workspaceDir: params.workspaceDir,
|
||||
agentDir,
|
||||
config: params.config,
|
||||
skillsSnapshot: params.skillsSnapshot,
|
||||
prompt: params.prompt,
|
||||
images: params.images,
|
||||
provider,
|
||||
modelId,
|
||||
model,
|
||||
authStorage,
|
||||
modelRegistry,
|
||||
thinkLevel,
|
||||
verboseLevel: params.verboseLevel,
|
||||
reasoningLevel: params.reasoningLevel,
|
||||
bashElevated: params.bashElevated,
|
||||
timeoutMs: params.timeoutMs,
|
||||
runId: params.runId,
|
||||
abortSignal: params.abortSignal,
|
||||
shouldEmitToolResult: params.shouldEmitToolResult,
|
||||
onPartialReply: params.onPartialReply,
|
||||
onAssistantMessageStart: params.onAssistantMessageStart,
|
||||
onBlockReply: params.onBlockReply,
|
||||
onBlockReplyFlush: params.onBlockReplyFlush,
|
||||
blockReplyBreak: params.blockReplyBreak,
|
||||
blockReplyChunking: params.blockReplyChunking,
|
||||
onReasoningStream: params.onReasoningStream,
|
||||
onToolResult: params.onToolResult,
|
||||
onAgentEvent: params.onAgentEvent,
|
||||
extraSystemPrompt: params.extraSystemPrompt,
|
||||
ownerNumbers: params.ownerNumbers,
|
||||
enforceFinalTag: params.enforceFinalTag,
|
||||
});
|
||||
|
||||
const {
|
||||
aborted,
|
||||
promptError,
|
||||
timedOut,
|
||||
sessionIdUsed,
|
||||
lastAssistant,
|
||||
} = attempt;
|
||||
|
||||
if (promptError && !aborted) {
|
||||
const errorText = describeUnknownError(promptError);
|
||||
if (isContextOverflowError(errorText)) {
|
||||
const kind = isCompactionFailureError(errorText)
|
||||
? "compaction_failure"
|
||||
: "context_overflow";
|
||||
return {
|
||||
payloads: [
|
||||
{
|
||||
text:
|
||||
"Context overflow: prompt too large for the model. " +
|
||||
"Try again with less input or a larger-context model.",
|
||||
isError: true,
|
||||
},
|
||||
],
|
||||
meta: {
|
||||
durationMs: Date.now() - started,
|
||||
agentMeta: {
|
||||
sessionId: sessionIdUsed,
|
||||
provider,
|
||||
model: model.id,
|
||||
},
|
||||
error: { kind, message: errorText },
|
||||
},
|
||||
};
|
||||
}
|
||||
const promptFailoverReason = classifyFailoverReason(errorText);
|
||||
if (
|
||||
promptFailoverReason &&
|
||||
promptFailoverReason !== "timeout" &&
|
||||
lastProfileId
|
||||
) {
|
||||
await markAuthProfileFailure({
|
||||
store: authStore,
|
||||
profileId: lastProfileId,
|
||||
reason: promptFailoverReason,
|
||||
cfg: params.config,
|
||||
agentDir: params.agentDir,
|
||||
});
|
||||
}
|
||||
if (
|
||||
isFailoverErrorMessage(errorText) &&
|
||||
promptFailoverReason !== "timeout" &&
|
||||
(await advanceAuthProfile())
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
const fallbackThinking = pickFallbackThinkingLevel({
|
||||
message: errorText,
|
||||
attempted: attemptedThinking,
|
||||
});
|
||||
if (fallbackThinking) {
|
||||
log.warn(
|
||||
`unsupported thinking level for ${provider}/${modelId}; retrying with ${fallbackThinking}`,
|
||||
);
|
||||
thinkLevel = fallbackThinking;
|
||||
continue;
|
||||
}
|
||||
throw promptError;
|
||||
}
|
||||
|
||||
const fallbackThinking = pickFallbackThinkingLevel({
|
||||
message: lastAssistant?.errorMessage,
|
||||
attempted: attemptedThinking,
|
||||
});
|
||||
if (fallbackThinking && !aborted) {
|
||||
log.warn(
|
||||
`unsupported thinking level for ${provider}/${modelId}; retrying with ${fallbackThinking}`,
|
||||
);
|
||||
thinkLevel = fallbackThinking;
|
||||
continue;
|
||||
}
|
||||
|
||||
const fallbackConfigured =
|
||||
(params.config?.agents?.defaults?.model?.fallbacks?.length ?? 0) >
|
||||
0;
|
||||
const authFailure = isAuthAssistantError(lastAssistant);
|
||||
const rateLimitFailure = isRateLimitAssistantError(lastAssistant);
|
||||
const failoverFailure = isFailoverAssistantError(lastAssistant);
|
||||
const assistantFailoverReason = classifyFailoverReason(
|
||||
lastAssistant?.errorMessage ?? "",
|
||||
);
|
||||
const cloudCodeAssistFormatError = attempt.cloudCodeAssistFormatError;
|
||||
|
||||
// Treat timeout as potential rate limit (Antigravity hangs on rate limit)
|
||||
const shouldRotate = (!aborted && failoverFailure) || timedOut;
|
||||
|
||||
if (shouldRotate) {
|
||||
if (lastProfileId) {
|
||||
const reason =
|
||||
timedOut || assistantFailoverReason === "timeout"
|
||||
? "timeout"
|
||||
: (assistantFailoverReason ?? "unknown");
|
||||
await markAuthProfileFailure({
|
||||
store: authStore,
|
||||
profileId: lastProfileId,
|
||||
reason,
|
||||
cfg: params.config,
|
||||
agentDir: params.agentDir,
|
||||
});
|
||||
if (timedOut) {
|
||||
log.warn(
|
||||
`Profile ${lastProfileId} timed out (possible rate limit). Trying next account...`,
|
||||
);
|
||||
}
|
||||
if (cloudCodeAssistFormatError) {
|
||||
log.warn(
|
||||
`Profile ${lastProfileId} hit Cloud Code Assist format error. Tool calls will be sanitized on retry.`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const rotated = await advanceAuthProfile();
|
||||
if (rotated) continue;
|
||||
|
||||
if (fallbackConfigured) {
|
||||
const message =
|
||||
lastAssistant?.errorMessage?.trim() ||
|
||||
(lastAssistant
|
||||
? formatAssistantErrorText(lastAssistant, {
|
||||
cfg: params.config,
|
||||
sessionKey: params.sessionKey ?? params.sessionId,
|
||||
})
|
||||
: "") ||
|
||||
(timedOut
|
||||
? "LLM request timed out."
|
||||
: rateLimitFailure
|
||||
? "LLM request rate limited."
|
||||
: authFailure
|
||||
? "LLM request unauthorized."
|
||||
: "LLM request failed.");
|
||||
const status =
|
||||
resolveFailoverStatus(assistantFailoverReason ?? "unknown") ??
|
||||
(isTimeoutErrorMessage(message) ? 408 : undefined);
|
||||
throw new FailoverError(message, {
|
||||
reason: assistantFailoverReason ?? "unknown",
|
||||
provider,
|
||||
model: modelId,
|
||||
profileId: lastProfileId,
|
||||
status,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const usage = normalizeUsage(lastAssistant?.usage as UsageLike);
|
||||
const agentMeta: EmbeddedPiAgentMeta = {
|
||||
sessionId: sessionIdUsed,
|
||||
provider: lastAssistant?.provider ?? provider,
|
||||
model: lastAssistant?.model ?? model.id,
|
||||
usage,
|
||||
};
|
||||
|
||||
const payloads = buildEmbeddedRunPayloads({
|
||||
assistantTexts: attempt.assistantTexts,
|
||||
toolMetas: attempt.toolMetas,
|
||||
lastAssistant: attempt.lastAssistant,
|
||||
config: params.config,
|
||||
sessionKey: params.sessionKey ?? params.sessionId,
|
||||
verboseLevel: params.verboseLevel,
|
||||
reasoningLevel: params.reasoningLevel,
|
||||
inlineToolResultsAllowed:
|
||||
!params.onPartialReply && !params.onToolResult,
|
||||
});
|
||||
|
||||
log.debug(
|
||||
`embedded run done: runId=${params.runId} sessionId=${params.sessionId} durationMs=${Date.now() - started} aborted=${aborted}`,
|
||||
);
|
||||
if (lastProfileId) {
|
||||
await markAuthProfileGood({
|
||||
store: authStore,
|
||||
provider,
|
||||
profileId: lastProfileId,
|
||||
});
|
||||
await markAuthProfileUsed({
|
||||
store: authStore,
|
||||
profileId: lastProfileId,
|
||||
});
|
||||
}
|
||||
return {
|
||||
payloads: payloads.length ? payloads : undefined,
|
||||
meta: {
|
||||
durationMs: Date.now() - started,
|
||||
agentMeta,
|
||||
aborted,
|
||||
},
|
||||
didSendViaMessagingTool: attempt.didSendViaMessagingTool,
|
||||
messagingToolSentTexts: attempt.messagingToolSentTexts,
|
||||
messagingToolSentTargets: attempt.messagingToolSentTargets,
|
||||
};
|
||||
}
|
||||
} finally {
|
||||
process.chdir(prevCwd);
|
||||
}
|
||||
}),
|
||||
);
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user