import { afterEach, beforeEach, describe, expect, it, mock } from "bun:test";
import { clearTestEnv, setTestEnv } from "./setup.js";
// llamaEmbedding と llamaCompletion のモック版をテスト
describe("Llama Client Module", () => {
beforeEach(() => {
setTestEnv();
});
afterEach(() => {
clearTestEnv();
});
describe("llamaEmbedding", () => {
it("should parse embedding from response with data array", async () => {
const mockFetch = mock(() =>
Promise.resolve({
ok: true,
json: () =>
Promise.resolve({
data: [{ embedding: [0.1, 0.2, 0.3] }],
}),
text: () => Promise.resolve(""),
})
);
global.fetch = mockFetch;
const { llamaEmbedding } = await import("../src/llama-client.js");
const result = await llamaEmbedding("test text");
expect(result).toEqual([0.1, 0.2, 0.3]);
expect(mockFetch).toHaveBeenCalled();
});
it("should parse embedding from response with direct embedding field", async () => {
const mockFetch = mock(() =>
Promise.resolve({
ok: true,
json: () =>
Promise.resolve({
embedding: [0.4, 0.5, 0.6],
}),
text: () => Promise.resolve(""),
})
);
global.fetch = mockFetch;
const { llamaEmbedding } = await import("../src/llama-client.js");
const result = await llamaEmbedding("test text");
expect(result).toEqual([0.4, 0.5, 0.6]);
});
it("should throw error on non-ok response", async () => {
const mockFetch = mock(() =>
Promise.resolve({
ok: false,
status: 500,
text: () => Promise.resolve("Internal Server Error"),
})
);
global.fetch = mockFetch;
const { llamaEmbedding } = await import("../src/llama-client.js");
try {
await llamaEmbedding("test text");
expect(true).toBe(false); // should not reach here
} catch (error) {
expect(error.message).toContain("llama.cpp embeddings error");
expect(error.message).toContain("500");
}
});
it("should throw error if response missing embedding", async () => {
const mockFetch = mock(() =>
Promise.resolve({
ok: true,
json: () => Promise.resolve({}),
text: () => Promise.resolve(""),
})
);
global.fetch = mockFetch;
const { llamaEmbedding } = await import("../src/llama-client.js");
try {
await llamaEmbedding("test text");
expect(true).toBe(false);
} catch (error) {
expect(error.message).toContain("missing or invalid embedding array");
}
});
it("should send correct request body", async () => {
let capturedBody;
const mockFetch = mock((url, options) => {
capturedBody = JSON.parse(options.body);
return Promise.resolve({
ok: true,
json: () =>
Promise.resolve({
data: [{ embedding: [0.1, 0.2, 0.3] }],
}),
text: () => Promise.resolve(""),
});
});
global.fetch = mockFetch;
const { llamaEmbedding } = await import("../src/llama-client.js");
await llamaEmbedding("test text");
expect(capturedBody.model).toBe("nomic-embed-text");
expect(capturedBody.input).toBe("test text");
});
});
describe("llamaCompletion", () => {
it("should parse completion from response", async () => {
const mockFetch = mock(() =>
Promise.resolve({
ok: true,
json: () =>
Promise.resolve({
content: "This is a completion response",
}),
text: () => Promise.resolve(""),
})
);
global.fetch = mockFetch;
const { llamaCompletion } = await import("../src/llama-client.js");
const result = await llamaCompletion("test prompt");
expect(result).toBe("This is a completion response");
});
it("should pass options to request body", async () => {
let capturedBody;
const mockFetch = mock((url, options) => {
capturedBody = JSON.parse(options.body);
return Promise.resolve({
ok: true,
json: () =>
Promise.resolve({
content: "response",
}),
text: () => Promise.resolve(""),
});
});
global.fetch = mockFetch;
const { llamaCompletion } = await import("../src/llama-client.js");
await llamaCompletion("prompt", { temperature: 0.7, n_predict: 128 });
expect(capturedBody.temperature).toBe(0.7);
expect(capturedBody.n_predict).toBe(128);
});
it("should throw error on non-ok response", async () => {
const mockFetch = mock(() =>
Promise.resolve({
ok: false,
status: 503,
text: () => Promise.resolve("Service Unavailable"),
})
);
global.fetch = mockFetch;
const { llamaCompletion } = await import("../src/llama-client.js");
try {
await llamaCompletion("test prompt");
expect(true).toBe(false);
} catch (error) {
expect(error.message).toContain("llama.cpp completion error");
}
});
it("should use environment variables for config", async () => {
let capturedUrl;
let capturedBody;
const mockFetch = mock((url, options) => {
capturedUrl = url;
capturedBody = JSON.parse(options.body);
return Promise.resolve({
ok: true,
json: () =>
Promise.resolve({
content: "response",
}),
text: () => Promise.resolve(""),
});
});
global.fetch = mockFetch;
const { llamaCompletion } = await import("../src/llama-client.js");
await llamaCompletion("prompt");
expect(capturedUrl).toContain("127.0.0.1:8080");
expect(capturedBody.model).toBe("mistral");
});
});
});