g3 console initial cut + error doesnt kill auto

This commit is contained in:
Dhanji Prasanna
2025-11-04 11:39:26 +11:00
parent 6913c5f72e
commit aaf918828f
53 changed files with 6796 additions and 23 deletions

View File

@@ -0,0 +1,147 @@
use crate::models::*;
use crate::process::{ProcessController, ProcessDetector};
use axum::{extract::State, http::StatusCode, Json};
use std::sync::Arc;
use tokio::sync::Mutex;
use tracing::{error, info};
pub type ControllerState = Arc<Mutex<ProcessController>>;
pub async fn kill_instance(
State(controller): State<ControllerState>,
axum::extract::Path(id): axum::extract::Path<String>,
) -> Result<Json<serde_json::Value>, StatusCode> {
// Extract PID from ID (format: "pid_timestamp")
let pid = id
.split('_')
.next()
.and_then(|s| s.parse::<u32>().ok())
.ok_or(StatusCode::BAD_REQUEST)?;
let mut controller = controller.lock().await;
match controller.kill_process(pid) {
Ok(_) => {
info!("Successfully killed process {}", pid);
Ok(Json(serde_json::json!({
"status": "terminating"
})))
}
Err(e) => {
error!("Failed to kill process {}: {}", pid, e);
Err(StatusCode::INTERNAL_SERVER_ERROR)
}
}
}
pub async fn restart_instance(
State(controller): State<ControllerState>,
axum::extract::Path(id): axum::extract::Path<String>,
) -> Result<Json<LaunchResponse>, StatusCode> {
info!("Restarting instance: {}", id);
// Extract PID from instance ID (format: pid_timestamp)
let pid: u32 = id
.split('_')
.next()
.and_then(|s| s.parse().ok())
.ok_or(StatusCode::BAD_REQUEST)?;
let mut controller = controller.lock().await;
// Get stored launch params
let params = controller.get_launch_params(pid)
.ok_or(StatusCode::NOT_FOUND)?;
// Launch new instance with same parameters
let new_pid = controller.launch_g3(
params.workspace.to_str().unwrap(),
&params.provider,
&params.model,
&params.prompt,
params.autonomous,
params.g3_binary_path.as_deref(),
).map_err(|e| {
error!("Failed to restart instance: {}", e);
StatusCode::INTERNAL_SERVER_ERROR
})?;
let new_id = format!("{}_{}", new_pid, chrono::Utc::now().timestamp());
Ok(Json(LaunchResponse {
id: new_id,
status: "starting".to_string(),
}))
}
pub async fn launch_instance(
State(controller): State<ControllerState>,
Json(request): Json<LaunchRequest>,
) -> Result<Json<LaunchResponse>, (StatusCode, Json<serde_json::Value>)> {
info!("Launching new g3 instance: {:?}", request);
// Validate binary path if provided
if let Some(ref binary_path) = request.g3_binary_path {
let path = std::path::Path::new(binary_path);
// Check if file exists
if !path.exists() {
error!("G3 binary not found: {}", binary_path);
return Err((StatusCode::BAD_REQUEST, Json(serde_json::json!({
"error": "G3 binary not found",
"message": format!("The specified g3 binary does not exist: {}", binary_path)
}))));
}
// Check if file is executable (Unix only)
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
if let Ok(metadata) = std::fs::metadata(path) {
if metadata.permissions().mode() & 0o111 == 0 {
error!("G3 binary is not executable: {}", binary_path);
return Err((StatusCode::BAD_REQUEST, Json(serde_json::json!({
"error": "G3 binary is not executable",
"message": format!("The specified g3 binary is not executable: {}", binary_path)
}))));
}
}
}
}
let workspace = request.workspace.to_str().ok_or_else(|| {
(StatusCode::BAD_REQUEST, Json(serde_json::json!({
"error": "Invalid workspace path",
"message": "The workspace path contains invalid characters"
})))
})?;
let autonomous = request.mode == LaunchMode::Ensemble;
let g3_binary_path = request.g3_binary_path.as_deref();
let mut controller = controller.lock().await;
match controller.launch_g3(
workspace,
&request.provider,
&request.model,
&request.prompt,
autonomous,
g3_binary_path,
) {
Ok(pid) => {
let id = format!("{}_{}", pid, chrono::Utc::now().timestamp());
info!("Successfully launched g3 instance with PID {}", pid);
Ok(Json(LaunchResponse {
id,
status: "starting".to_string(),
}))
}
Err(e) => {
error!("Failed to launch g3 instance: {}", e);
Err((StatusCode::INTERNAL_SERVER_ERROR, Json(serde_json::json!({
"error": "Failed to launch instance",
"message": format!("Error: {}", e)
}))))
}
}
}

View File

@@ -0,0 +1,189 @@
use crate::logs::{LogParser, StatsAggregator};
use crate::models::*;
use crate::process::ProcessDetector;
use axum::{extract::State, http::StatusCode, Json};
use std::sync::Arc;
use tokio::sync::Mutex;
use tracing::{debug, error, warn};
pub type AppState = Arc<Mutex<ProcessDetector>>;
pub async fn list_instances(
State(detector): State<AppState>,
) -> Result<Json<Vec<InstanceDetail>>, StatusCode> {
let mut detector = detector.lock().await;
match detector.detect_instances() {
Ok(instances) => {
let mut details = Vec::new();
for instance in instances {
match get_instance_detail(&instance) {
Ok(detail) => details.push(detail),
Err(e) => {
error!("Failed to get instance detail: {}", e);
// Continue with other instances
}
}
}
Ok(Json(details))
}
Err(e) => {
error!("Failed to detect instances: {}", e);
Err(StatusCode::INTERNAL_SERVER_ERROR)
}
}
}
pub async fn get_instance(
State(detector): State<AppState>,
axum::extract::Path(id): axum::extract::Path<String>,
) -> Result<Json<InstanceDetail>, StatusCode> {
let mut detector = detector.lock().await;
match detector.detect_instances() {
Ok(instances) => {
if let Some(instance) = instances.into_iter().find(|i| i.id == id) {
match get_instance_detail(&instance) {
Ok(detail) => Ok(Json(detail)),
Err(e) => {
error!("Failed to get instance detail: {}", e);
Err(StatusCode::INTERNAL_SERVER_ERROR)
}
}
} else {
Err(StatusCode::NOT_FOUND)
}
}
Err(e) => {
error!("Failed to detect instances: {}", e);
Err(StatusCode::INTERNAL_SERVER_ERROR)
}
}
}
fn get_instance_detail(instance: &Instance) -> anyhow::Result<InstanceDetail> {
// Parse logs - don't fail if logs don't exist yet
let log_entries = match LogParser::parse_logs(&instance.workspace) {
Ok(entries) => entries,
Err(e) => {
warn!("Failed to parse logs for instance {}: {}. Instance may be newly started.", instance.id, e);
Vec::new()
}
};
// Aggregate stats
let is_ensemble = instance.instance_type == crate::models::InstanceType::Ensemble;
let stats = StatsAggregator::aggregate_stats(&log_entries, instance.start_time, is_ensemble);
// Get latest message
let latest_message = StatsAggregator::get_latest_message(&log_entries);
// Get git status - don't fail if not a git repo
let git_status = match get_git_status(&instance.workspace) {
Some(status) => Some(status),
None => {
debug!("No git status available for workspace: {:?}", instance.workspace);
None
}
};
// Get project files
let project_files = get_project_files(&instance.workspace);
Ok(InstanceDetail {
instance: instance.clone(),
stats,
latest_message,
git_status,
project_files,
})
}
fn get_git_status(workspace: &std::path::Path) -> Option<GitStatus> {
use std::process::Command;
// Get current branch
let branch = Command::new("git")
.arg("-C")
.arg(workspace)
.arg("branch")
.arg("--show-current")
.output()
.ok()
.and_then(|output| String::from_utf8(output.stdout).ok())
.map(|s| s.trim().to_string())?;
// Get status
let status_output = Command::new("git")
.arg("-C")
.arg(workspace)
.arg("status")
.arg("--porcelain")
.output()
.ok()
.and_then(|output| String::from_utf8(output.stdout).ok())?;
let mut modified_files = Vec::new();
let mut added_files = Vec::new();
let mut deleted_files = Vec::new();
for line in status_output.lines() {
if line.len() < 4 {
continue;
}
let status = &line[0..2];
let file = line[3..].trim();
match status.trim() {
"M" | "MM" => modified_files.push(file.to_string()),
"A" | "AM" => added_files.push(file.to_string()),
"D" => deleted_files.push(file.to_string()),
_ => modified_files.push(file.to_string()),
}
}
let uncommitted_changes = modified_files.len() + added_files.len() + deleted_files.len();
Some(GitStatus {
branch,
uncommitted_changes,
modified_files,
added_files,
deleted_files,
})
}
fn get_project_files(workspace: &std::path::Path) -> ProjectFiles {
let requirements = read_file_snippet(workspace, "requirements.md");
let readme = read_file_snippet(workspace, "README.md");
let agents = read_file_snippet(workspace, "AGENTS.md");
ProjectFiles {
requirements,
readme,
agents,
}
}
fn read_file_snippet(workspace: &std::path::Path, filename: &str) -> Option<String> {
use std::fs;
let path = workspace.join(filename);
if !path.exists() {
return None;
}
fs::read_to_string(&path)
.ok()
.map(|content| {
// Return first 10 lines
content
.lines()
.take(10)
.collect::<Vec<_>>()
.join("\n")
})
}

View File

@@ -0,0 +1,44 @@
use crate::logs::LogParser;
use crate::models::*;
use crate::process::ProcessDetector;
use axum::{extract::State, http::StatusCode, Json};
use std::sync::Arc;
use tokio::sync::Mutex;
use tracing::error;
pub type LogState = Arc<Mutex<ProcessDetector>>;
pub async fn get_instance_logs(
State(detector): State<LogState>,
axum::extract::Path(id): axum::extract::Path<String>,
) -> Result<Json<serde_json::Value>, StatusCode> {
let mut detector = detector.lock().await;
match detector.detect_instances() {
Ok(instances) => {
if let Some(instance) = instances.into_iter().find(|i| i.id == id) {
match LogParser::parse_logs(&instance.workspace) {
Ok(entries) => {
let messages = LogParser::extract_chat_messages(&entries);
let tool_calls = LogParser::extract_tool_calls(&entries);
Ok(Json(serde_json::json!({
"messages": messages,
"tool_calls": tool_calls,
})))
}
Err(e) => {
error!("Failed to parse logs: {}", e);
Err(StatusCode::INTERNAL_SERVER_ERROR)
}
}
} else {
Err(StatusCode::NOT_FOUND)
}
}
Err(e) => {
error!("Failed to detect instances: {}", e);
Err(StatusCode::INTERNAL_SERVER_ERROR)
}
}
}

View File

@@ -0,0 +1,9 @@
pub mod instances;
pub mod control;
pub mod logs;
pub mod state;
pub use instances::*;
pub use control::*;
pub use logs::*;
pub use state::*;

View File

@@ -0,0 +1,99 @@
use crate::launch::ConsoleState;
use axum::{http::StatusCode, Json};
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::os::unix::fs::PermissionsExt;
use tracing::{error, info};
pub async fn get_state() -> Result<Json<ConsoleState>, StatusCode> {
let state = ConsoleState::load();
Ok(Json(state))
}
pub async fn save_state(
Json(state): Json<ConsoleState>,
) -> Result<Json<serde_json::Value>, StatusCode> {
match state.save() {
Ok(_) => {
info!("Console state saved successfully");
Ok(Json(serde_json::json!({
"status": "saved"
})))
}
Err(e) => {
error!("Failed to save console state: {}", e);
Err(StatusCode::INTERNAL_SERVER_ERROR)
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct BrowseRequest {
pub path: Option<String>,
pub browse_type: String, // "directory" or "file"
}
#[derive(Debug, Serialize)]
pub struct BrowseResponse {
pub current_path: String,
pub parent_path: Option<String>,
pub entries: Vec<FileEntry>,
}
#[derive(Debug, Serialize)]
pub struct FileEntry {
pub name: String,
pub path: String,
pub is_directory: bool,
pub is_executable: bool,
}
pub async fn browse_filesystem(
Json(request): Json<BrowseRequest>,
) -> Result<Json<BrowseResponse>, StatusCode> {
use std::fs;
let path = if let Some(p) = request.path {
PathBuf::from(p)
} else {
std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."))
};
let current_path = path.canonicalize()
.map_err(|_| StatusCode::BAD_REQUEST)?
.to_string_lossy()
.to_string();
let parent_path = path.parent()
.and_then(|p| p.to_str())
.map(|s| s.to_string());
let mut entries = Vec::new();
if let Ok(read_dir) = fs::read_dir(&path) {
for entry in read_dir.flatten() {
if let Ok(metadata) = entry.metadata() {
entries.push(FileEntry {
name: entry.file_name().to_string_lossy().to_string(),
path: entry.path().to_string_lossy().to_string(),
is_directory: metadata.is_dir(),
is_executable: metadata.permissions().mode() & 0o111 != 0,
});
}
}
}
entries.sort_by(|a, b| {
match (a.is_directory, b.is_directory) {
(true, false) => std::cmp::Ordering::Less,
(false, true) => std::cmp::Ordering::Greater,
_ => a.name.cmp(&b.name),
}
});
Ok(Json(BrowseResponse {
current_path,
parent_path,
entries,
}))
}