Newer
Older
TelosDB / src-tauri / src / lib.rs
mod db;
mod mcp;

use tauri::Manager;
use std::process::{Child, Command, Stdio};
use std::sync::{Arc, Mutex};


#[allow(dead_code)]
struct AppState {
    db_pool: sqlx::SqlitePool,
}

#[cfg_attr(mobile, tauri::mobile_entry_point)]
pub fn run() {
    let llama_child: Arc<Mutex<Option<Child>>> = Arc::new(Mutex::new(None));
    tauri::Builder::default()
        .plugin(tauri_plugin_shell::init())
        .plugin(tauri_plugin_log::Builder::default().build())
        .setup({
            let llama_child = llama_child.clone();
            move |app| {
                // Resolve paths
                let app_data_dir = app.path().app_data_dir().expect("failed to get app data dir");
                let db_path = app_data_dir.join("telos.db");
                // llama-serverバイナリのパス(exeと同じディレクトリ想定)
                let exe_path = std::env::current_exe()?;
                let exe_dir = exe_path.parent().unwrap();
                let llama_path = exe_dir.join("llama-server.exe");
                let model_path = exe_dir.join("Gemma-3-300M.gguf");
                let vec0_path = exe_dir.join("vec0.dll");

                log::info!("Initializing TelosDB at {:?}", db_path);
                log::info!("Loading vec0 extension from {:?}", vec0_path);

                if !vec0_path.exists() {
                    log::error!("vec0.dll NOT FOUND at {:?}. Vector search will fail.", vec0_path);
                }

                // llama-server自動起動
                if llama_path.exists() && model_path.exists() {
                    let mut child = Command::new(&llama_path)
                        .arg("--model").arg(model_path)
                        .arg("--port").arg("8080")
                        .arg("--embedding").arg("enabled")
                        .arg("--parallel").arg("1")
                        .stdout(Stdio::null())
                        .stderr(Stdio::null())
                        .spawn()
                        .expect("failed to start llama-server");
                    log::info!("llama-server started: {:?}", llama_path);
                    *llama_child.lock().unwrap() = Some(child);
                } else {
                    log::error!("llama-server.exe or Gemma-3-300M.gguf not found in {:?}", exe_dir);
                }

                // Init DB (Schema) synchronously first via rusqlite for robust extension loading
                match db::init_db(&db_path, &vec0_path) {
                    Ok(_) => log::info!("Database schema initialized."),
                    Err(e) => log::error!("Database schema init failed: {:?}", e),
                }

                // Init Async Pool for App usage
                let pool = tauri::async_runtime::block_on(async {
                    match db::init_pool(db_path.to_str().unwrap(), vec0_path.to_str().unwrap().to_owned()).await {
                        Ok(pool) => {
                            log::info!("App State managed with SQLx pool.");
                            pool
                        },
                        Err(e) => {
                            log::error!("Failed to create SQLx pool: {:?}", e);
                            panic!("Failed to create SQLx pool");
                        }
                    }
                });
                app.manage(AppState { db_pool: pool.clone() });

                // Start MCP Server(DBプールを渡す)
                let pool2 = pool.clone();
                let db_path_str = db_path.to_str().unwrap().to_owned();
                let vec0_path_str = vec0_path.to_str().unwrap().to_owned();
                use std::sync::Arc;
                use tokio::sync::RwLock;
                let llama_status = Arc::new(RwLock::new("unknown".to_string()));
                tauri::async_runtime::spawn({
                    let llama_status = llama_status.clone();
                    async move {
                        let (tx, _rx) = tokio::sync::broadcast::channel(100);
                        let _app_state = mcp::AppState { db_pool: pool2, tx, llama_status };
                        mcp::run_server(3001, &db_path_str, &vec0_path_str).await;
                    }
                });

                Ok(())
            }
        })
        .on_window_event({
            let llama_child = llama_child.clone();
            move |_app_handle, event| {
                if let tauri::WindowEvent::CloseRequested { .. } = event {
                    // llama-serverプロセスをkill
                    if let Some(mut child) = llama_child.lock().unwrap().take() {
                        let _ = child.kill();
                    }
                }
            }
        })
        .run(tauri::generate_context!())
        .expect("error while running tauri application");
}