Improve code readability in g3-core

- streaming_parser.rs: Rename has_message_like_keys to args_contain_prose_fragments
  with improved documentation explaining the heuristic for detecting malformed
  tool calls where LLM prose leaked into JSON keys

- context_window.rs: Simplify build_thin_result_message using early return
  pattern and match expression for cleaner control flow

Agent: carmack
This commit is contained in:
Dhanji R. Prasanna
2026-01-07 11:16:42 +11:00
parent 2e9535974d
commit 1980e62511
2 changed files with 37 additions and 47 deletions

View File

@@ -646,46 +646,33 @@ Format this as a detailed but concise summary that can be used to resume the con
tool_call_leaned_count: usize, tool_call_leaned_count: usize,
chars_saved: usize, chars_saved: usize,
) -> (String, usize) { ) -> (String, usize) {
let emoji = scope.emoji();
let label = scope.label();
let scope_desc = match scope { let scope_desc = match scope {
ThinScope::FirstThird => "", ThinScope::FirstThird => "",
ThinScope::All => " across entire history", ThinScope::All => " across entire history",
}; };
if leaned_count > 0 && tool_call_leaned_count > 0 { // Nothing was thinned
( if leaned_count == 0 && tool_call_leaned_count == 0 {
format!( let msg = format!(
"{} Context {} at {}%: {} tool results + {} tool calls{}, ~{} chars saved",
emoji, label, current_percentage, leaned_count, tool_call_leaned_count, scope_desc, chars_saved
),
chars_saved,
)
} else if leaned_count > 0 {
(
format!(
"{} Context {} at {}%: {} tool results{}, ~{} chars saved",
emoji, label, current_percentage, leaned_count, scope_desc, chars_saved
),
chars_saved,
)
} else if tool_call_leaned_count > 0 {
(
format!(
"{} Context {} at {}%: {} tool calls{}, ~{} chars saved",
emoji, label, current_percentage, tool_call_leaned_count, scope_desc, chars_saved
),
chars_saved,
)
} else {
(
format!(
" Context {} triggered at {}% but no large tool results or tool calls found{}", " Context {} triggered at {}% but no large tool results or tool calls found{}",
scope.error_action(), current_percentage, scope_desc scope.error_action(), current_percentage, scope_desc
), );
0, return (msg, 0);
)
} }
// Build description of what was thinned
let what_thinned = match (leaned_count > 0, tool_call_leaned_count > 0) {
(true, true) => format!("{} tool results + {} tool calls", leaned_count, tool_call_leaned_count),
(true, false) => format!("{} tool results", leaned_count),
(false, true) => format!("{} tool calls", tool_call_leaned_count),
(false, false) => unreachable!(), // handled above
};
let msg = format!(
"{} Context {} at {}%: {}{}, ~{} chars saved",
scope.emoji(), scope.label(), current_percentage, what_thinned, scope_desc, chars_saved
);
(msg, chars_saved)
} }
/// Recalculate token usage based on current conversation history /// Recalculate token usage based on current conversation history

View File

@@ -77,20 +77,23 @@ impl StreamingToolParser {
best_start best_start
} }
/// Validate that tool call args don't contain message-like content. /// Detect malformed tool calls where LLM prose leaked into JSON keys.
/// This detects malformed tool calls where agent messages got mixed into args. ///
fn has_message_like_keys(args: &serde_json::Map<String, serde_json::Value>) -> bool { /// When the LLM "stutters" or mixes formats, it sometimes emits JSON where
/// the keys are actually fragments of conversational text rather than valid
/// parameter names. This heuristic catches such cases by looking for:
/// - Unusually long keys (>100 chars)
/// - Newlines in keys (never valid in JSON keys)
/// - Common LLM response phrases that indicate prose, not parameters
fn args_contain_prose_fragments(args: &serde_json::Map<String, serde_json::Value>) -> bool {
const PROSE_MARKERS: &[&str] = &[
"I'll", "Let me", "Here's", "I can", "I need", "First", "Now", "The ",
];
args.keys().any(|key| { args.keys().any(|key| {
key.len() > 100 key.len() > 100
|| key.contains('\n') || key.contains('\n')
|| key.contains("I'll") || PROSE_MARKERS.iter().any(|marker| key.contains(marker))
|| key.contains("Let me")
|| key.contains("Here's")
|| key.contains("I can")
|| key.contains("I need")
|| key.contains("First")
|| key.contains("Now")
|| key.contains("The ")
}) })
} }
@@ -172,7 +175,7 @@ impl StreamingToolParser {
if let Ok(tool_call) = serde_json::from_str::<ToolCall>(json_str) { if let Ok(tool_call) = serde_json::from_str::<ToolCall>(json_str) {
// Validate that args is an object with reasonable keys // Validate that args is an object with reasonable keys
if let Some(args_obj) = tool_call.args.as_object() { if let Some(args_obj) = tool_call.args.as_object() {
if Self::has_message_like_keys(args_obj) { if Self::args_contain_prose_fragments(args_obj) {
debug!( debug!(
"Detected malformed tool call with message-like keys, skipping" "Detected malformed tool call with message-like keys, skipping"
); );
@@ -220,7 +223,7 @@ impl StreamingToolParser {
if let Ok(tool_call) = serde_json::from_str::<ToolCall>(json_str) { if let Ok(tool_call) = serde_json::from_str::<ToolCall>(json_str) {
if let Some(args_obj) = tool_call.args.as_object() { if let Some(args_obj) = tool_call.args.as_object() {
if !Self::has_message_like_keys(args_obj) { if !Self::args_contain_prose_fragments(args_obj) {
debug!( debug!(
"Found tool call at position {}: {:?}", "Found tool call at position {}: {:?}",
abs_start, tool_call.tool abs_start, tool_call.tool