This tries to short-circuit multiple round-trips to llm for reading code. It's a precursor to trying to context engineer tailored to specific tasks. In initial experiments, it's only marginally faster than regular mode, and burns more tokens.
13 lines
329 B
TOML
13 lines
329 B
TOML
[package]
|
|
name = "g3-planner"
|
|
version = "0.1.0"
|
|
edition = "2021"
|
|
description = "Fast-discovery planner for G3 AI coding agent"
|
|
|
|
[dependencies]
|
|
g3-providers = { path = "../g3-providers" }
|
|
serde = { workspace = true }
|
|
serde_json = { workspace = true }
|
|
const_format = "0.2"
|
|
anyhow = { workspace = true }
|
|
tokio = { workspace = true } |