# llama-server base URL LLAMA_CPP_BASE_URL=http://127.0.0.1:8080 # llama-server model names LLAMA_CPP_EMBEDDING_MODEL=embeddinggemma-300m LLAMA_CPP_MODEL= # Config.json now handles these paths: # - database.path (default: data/vector.db) # - model.path (default: models/embeddinggemma-300m-q4_0.gguf) # See config.json in project root # Embedding dimension (384 for Gemma 3 300M) VEC_DIM=384 # MCP Server port MCP_PORT=3000