Skip to content

Commit b5a2f97

Browse files
committed
Update session ID and timestamps in current_blocked.md; modify GPU type matching logic and add tests in catalog.rs; change API key and model settings in vtcode.toml
1 parent 9e26d8d commit b5a2f97

File tree

3 files changed

+56
-15
lines changed

3 files changed

+56
-15
lines changed

.vtcode/tasks/current_blocked.md

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,14 @@
11
---
2-
session_id: session-1774161591350
2+
session_id: session-1774180058612
33
outcome: blocked
4-
created_at: 2026-03-22T06:44:24.249197+00:00
4+
created_at: 2026-03-22T11:48:15.879783+00:00
55
workspace: /Users/vinhnguyenxuan/Developer/learn-by-doing/vtcode
6-
resume_command: "vtcode --resume session-1774161591350"
6+
resume_command: "vtcode --resume session-1774180058612"
77
---
88

99
# Blocker Summary
1010

11-
Recovery mode requested a final synthesis pass, but the model returned no answer.
11+
Recovery mode could not complete the final synthesis pass.
1212

1313
# Current Tracker Snapshot
1414

@@ -29,10 +29,10 @@ Recovery mode requested a final synthesis pass, but the model returned no answer
2929
- `/Users/vinhnguyenxuan/Developer/learn-by-doing/vtcode`
3030
- `/Users/vinhnguyenxuan/Developer/learn-by-doing/vtcode/.vtcode/tasks/current_task.md`
3131
- `/Users/vinhnguyenxuan/Developer/learn-by-doing/vtcode/.vtcode/tasks/current_blocked.md`
32-
- `/Users/vinhnguyenxuan/Developer/learn-by-doing/vtcode/.vtcode/tasks/blockers/session-1774161591350-20260322T064424Z.md`
32+
- `/Users/vinhnguyenxuan/Developer/learn-by-doing/vtcode/.vtcode/tasks/blockers/session-1774180058612-20260322T114815Z.md`
3333

3434
# Resume Metadata
3535

36-
- Session ID: `session-1774161591350`
36+
- Session ID: `session-1774180058612`
3737
- Outcome: `blocked`
38-
- Resume command: `vtcode --resume session-1774161591350`
38+
- Resume command: `vtcode --resume session-1774180058612`

vtcode-core/src/pods/catalog.rs

Lines changed: 46 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -95,11 +95,21 @@ impl PodProfile {
9595
return true;
9696
}
9797

98-
pod.gpus.iter().any(|gpu| {
99-
self.gpu_types
100-
.iter()
101-
.any(|gpu_type| gpu.name.to_lowercase().contains(&gpu_type.to_lowercase()))
102-
})
98+
let gpu_types = self
99+
.gpu_types
100+
.iter()
101+
.map(|gpu_type| gpu_type.to_lowercase())
102+
.collect::<Vec<_>>();
103+
104+
pod.gpus
105+
.iter()
106+
.filter(|gpu| {
107+
let gpu_name = gpu.name.to_lowercase();
108+
gpu_types.iter().any(|gpu_type| gpu_name.contains(gpu_type))
109+
})
110+
.take(self.gpu_count)
111+
.count()
112+
>= self.gpu_count
103113
}
104114

105115
pub fn matches_gpu_count(&self, count: usize) -> bool {
@@ -140,4 +150,35 @@ mod tests {
140150

141151
assert!(profile.matches_pod(&pod));
142152
}
153+
154+
#[test]
155+
fn profile_requires_enough_matching_gpu_types() {
156+
let profile = PodProfile {
157+
name: "dual-a100".to_string(),
158+
model: "model".to_string(),
159+
gpu_count: 2,
160+
gpu_types: vec!["A100".to_string()],
161+
command_template: default_command_template(),
162+
vllm_args: vec![],
163+
env: BTreeMap::new(),
164+
};
165+
let pod = PodState {
166+
name: "pod".to_string(),
167+
ssh: "ssh root@example.com".to_string(),
168+
models_path: None,
169+
gpus: vec![
170+
PodGpu {
171+
id: 0,
172+
name: "NVIDIA A100-SXM4-80GB".to_string(),
173+
},
174+
PodGpu {
175+
id: 1,
176+
name: "NVIDIA RTX 4090".to_string(),
177+
},
178+
],
179+
models: BTreeMap::new(),
180+
};
181+
182+
assert!(!profile.matches_pod(&pod));
183+
}
143184
}

vtcode.toml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ read_file = true
6565
# Environment variable that stores the API key for the active provider
6666
# Default: "OPENAI_API_KEY"
6767
# Type: `string`
68-
api_key_env = "OLLAMA_API_KEY"
68+
api_key_env = "MINIMAX_API_KEY"
6969

7070
# Enable autonomous mode - auto-approve safe tools with reduced HITL prompts When true, the agent
7171
# operates with fewer confirmation prompts for safe tools.
@@ -92,7 +92,7 @@ default_editing_mode = "edit"
9292
# Default model to use
9393
# Default: "gpt-5.3-codex"
9494
# Type: `string`
95-
default_model = "gpt-oss:120b-cloud"
95+
default_model = "MiniMax-M2.7"
9696

9797
# Enable an extra self-review pass to refine final responses
9898
# Default: false
@@ -156,7 +156,7 @@ project_doc_max_bytes = 16384
156156
# Possible values: anthropic, gemini, openai, openrouter, zai
157157
# Default: "openai"
158158
# Type: `string`
159-
provider = "ollama"
159+
provider = "minimax"
160160

161161
# Reasoning effort level for models that support it (none, minimal, low, medium, high, xhigh) Applies
162162
# to: Claude, GPT-5 family, Gemini, Qwen3, DeepSeek with reasoning capability

0 commit comments

Comments
 (0)