diff --git a/crates/g3-core/src/providers/embedded.rs b/crates/g3-core/src/providers/embedded.rs
index 2c132ce..a45611d 100644
--- a/crates/g3-core/src/providers/embedded.rs
+++ b/crates/g3-core/src/providers/embedded.rs
@@ -117,6 +117,45 @@ impl EmbeddedProvider {
// Add the start of assistant response
formatted.push_str("<|im_start|>assistant\n");
+ formatted
+ } else if model_name_lower.contains("mistral") {
+ // Mistral Instruct format: [INST] ... [/INST] assistant_response
+ let mut formatted = String::new();
+ let mut in_conversation = false;
+
+ for (i, message) in messages.iter().enumerate() {
+ match message.role {
+ MessageRole::System => {
+ // Mistral doesn't have a special system token, include it at the start
+ if i == 0 {
+ formatted.push_str("[INST] ");
+ formatted.push_str(&message.content);
+ formatted.push_str("\n\n");
+ in_conversation = true;
+ }
+ }
+ MessageRole::User => {
+ if !in_conversation {
+ formatted.push_str("[INST] ");
+ }
+ formatted.push_str(&message.content);
+ formatted.push_str(" [/INST]");
+ in_conversation = false;
+ }
+ MessageRole::Assistant => {
+ formatted.push_str(" ");
+ formatted.push_str(&message.content);
+ formatted.push_str(" ");
+ in_conversation = false;
+ }
+ }
+ }
+
+ // If the last message was from user, add a space for the assistant's response
+ if messages.last().map_or(false, |m| matches!(m.role, MessageRole::User)) {
+ formatted.push_str(" ");
+ }
+
formatted
} else {
// Use Llama/CodeLlama format for other models
diff --git a/g3_session_hello_test_a63e133911916b31.json b/g3_session_hello_test_a63e133911916b31.json
index d82cba3..d96e037 100644
--- a/g3_session_hello_test_a63e133911916b31.json
+++ b/g3_session_hello_test_a63e133911916b31.json
@@ -10,23 +10,23 @@
"role": "user"
},
{
- "content": "I will start by executing a simple task as requested. Let's print \"Hello, Test\" to confirm the task is understood and can be executed.\n\n{\"tool\": \"shell\", \"args\": {\"command\":\"echo Hello, Test\"}}",
+ "content": "1. Understanding the task, it seems simple enough. The goal is to output the message \"hello\" for this test.\n2. I will execute the following tool call to print the message \"hello\" to the console:\n\n```json\n\n{\"tool\": \"shell\", \"args\": {\"command\":\"echo hello\"}}",
"role": "assistant"
},
{
- "content": "Tool result: Hello, Test",
+ "content": "Tool result: hello",
"role": "user"
},
{
- "content": "I will start by executing a simple task as requested. Let's print \"Hello, Test\" to confirm the task is understood and can be executed.\n\nTool executed: shell -> Hello, Test\n\n{\"tool\": \"final_output\", \"args\": {\"summary\": \"# Task Accomplished\n\nThe task 'hello test' was successfully executed. The output was:\n\n```\nHello, Test\n```\n\nThis confirms that the request was understood and completed as intended.\"}}",
+ "content": "1. Understanding the task, it seems simple enough. The goal is to output the message \"hello\" for this test.\n2. I will execute the following tool call to print the message \"hello\" to the console:\n\n```json\n\nTool executed: shell -> hello\n\n1. The shell tool call was successful, and it has printed the message \"hello\" as requested.\n2. I am now ready to call the final_output tool to signal task completion and provide a summary of the work done.\n\n```json\n\n=> Printed the message 'hello' as requested.",
"role": "assistant"
}
],
- "percentage_used": 0.9185791015625,
+ "percentage_used": 1.06201171875,
"total_tokens": 32768,
- "used_tokens": 301
+ "used_tokens": 348
},
"session_id": "hello_test_a63e133911916b31",
"status": "completed",
- "timestamp": 1757966669
+ "timestamp": 1757968007
}
\ No newline at end of file