Skip to content

Commit ac07097

Browse files
rawwerksclaude
andcommitted
Implement --tree flag for ASCII execution tree visualization
- render_execution_tree(): ASCII art tree output for terminal - Improved prompt preview handling for context dicts - Outputs to stderr with box-drawing characters Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
1 parent be83f59 commit ac07097

File tree

2 files changed

+113
-3
lines changed

2 files changed

+113
-3
lines changed

src/rlm_cli/cli.py

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,18 @@
1010

1111
import typer
1212

13-
from .config import DEFAULT_CONFIG, load_effective_config, render_effective_config_text
13+
from .config import (
14+
DEFAULT_CONFIG,
15+
coerce_value,
16+
get_local_config_path,
17+
get_nested_value,
18+
get_user_config_path,
19+
load_effective_config,
20+
load_or_create_config,
21+
render_effective_config_text,
22+
set_nested_value,
23+
write_config_file,
24+
)
1425
from .context import WalkOptions, build_context_from_sources
1526
from .errors import (
1627
CliError,
@@ -29,6 +40,7 @@
2940
capture_stdout,
3041
emit_json,
3142
emit_text,
43+
render_execution_tree,
3244
)
3345
from .rlm_adapter import parse_json_args, parse_kv_args, run_completion
3446

@@ -1118,6 +1130,12 @@ def _run_ask(
11181130
warnings.insert(0, "Stopped early (Ctrl+C) - returning best answer so far")
11191131
_emit_text_output(result.response, output, warnings)
11201132

1133+
# Print tree to stderr if requested
1134+
if show_tree:
1135+
tree_str = render_execution_tree(result.raw)
1136+
if tree_str:
1137+
_emit_execution_tree(tree_str)
1138+
11211139
# Print summary to stderr if requested
11221140
if show_summary:
11231141
summary = build_execution_summary(result.raw)
@@ -1491,6 +1509,15 @@ def _emit_text_output(result_text: str, output: str | None, warnings: list[str])
14911509
emit_text(result_text, warnings=warnings)
14921510

14931511

1512+
def _emit_execution_tree(tree_str: str) -> None:
1513+
"""Emit execution tree to stderr."""
1514+
import sys
1515+
1516+
sys.stderr.write("\n=== RLM Execution Tree ===\n")
1517+
sys.stderr.write(tree_str)
1518+
sys.stderr.write("\n")
1519+
1520+
14941521
def _emit_execution_summary(summary: dict[str, object]) -> None:
14951522
"""Emit execution summary to stderr in a human-readable format."""
14961523
import sys

src/rlm_cli/output.py

Lines changed: 85 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -116,13 +116,24 @@ def build_execution_tree(raw: object, depth: int = 0) -> dict[str, object] | Non
116116
if isinstance(prompt, str):
117117
prompt_preview = _truncate(prompt)
118118
elif isinstance(prompt, dict):
119-
prompt_preview = _truncate(str(prompt))
119+
# Try to extract meaningful preview from context dict
120+
if "query" in prompt:
121+
prompt_preview = _truncate(str(prompt["query"]))
122+
elif "root" in prompt:
123+
prompt_preview = f"[context: {prompt.get('root', 'unknown')}]"
124+
else:
125+
prompt_preview = _truncate(str(prompt), 60)
120126
elif isinstance(prompt, list) and prompt:
121127
# Message list - get last user message content
122128
for msg in reversed(prompt):
123129
if isinstance(msg, dict) and msg.get("role") == "user":
124130
content = msg.get("content", "")
125-
prompt_preview = _truncate(str(content))
131+
if isinstance(content, str):
132+
prompt_preview = _truncate(content)
133+
elif isinstance(content, dict) and "query" in content:
134+
prompt_preview = _truncate(str(content["query"]))
135+
else:
136+
prompt_preview = _truncate(str(content))
126137
break
127138

128139
node: dict[str, object] = {
@@ -192,6 +203,78 @@ def _build_iteration_node(iteration: object, num: int, depth: int) -> dict[str,
192203
return node
193204

194205

206+
def render_execution_tree(raw: object) -> str | None:
207+
"""
208+
Render execution tree as ASCII art for terminal display.
209+
210+
Returns a string like:
211+
┌─ [openai/gpt-4] 2.3s $0.05
212+
│ Q: Analyze the codebase...
213+
│ A: I'll start by...
214+
215+
└─┬─ [google/gemini] 1.2s $0.02
216+
│ Q: What is X?
217+
│ A: X is...
218+
219+
└── [openai/gpt-4] 0.5s
220+
Q: Details?
221+
A: The details...
222+
"""
223+
tree = build_execution_tree(raw)
224+
if tree is None:
225+
return None
226+
227+
lines: list[str] = []
228+
229+
def format_node_header(node: dict[str, object]) -> str:
230+
model = node.get("model", "unknown")
231+
duration = node.get("duration", 0)
232+
cost = node.get("cost")
233+
header = f"[{model}] {duration}s"
234+
if cost:
235+
header += f" ${cost:.4f}"
236+
return header
237+
238+
def render_node(node: dict[str, object], prefix: str, is_last: bool, is_root: bool) -> None:
239+
# Determine box drawing characters
240+
if is_root:
241+
branch = "┌─ "
242+
child_prefix = "│ "
243+
elif is_last:
244+
branch = "└── "
245+
child_prefix = " "
246+
else:
247+
branch = "├── "
248+
child_prefix = "│ "
249+
250+
# Node header
251+
header = format_node_header(node)
252+
lines.append(f"{prefix}{branch}{header}")
253+
254+
# Content prefix for Q/A lines
255+
content_prefix = prefix + child_prefix
256+
257+
# Show prompt/response previews
258+
prompt = node.get("prompt_preview", "")
259+
response = node.get("response_preview", "")
260+
if prompt:
261+
lines.append(f"{content_prefix}Q: {prompt}")
262+
if response:
263+
lines.append(f"{content_prefix}A: {response}")
264+
265+
# Process children
266+
children = node.get("children", []) or []
267+
if children:
268+
lines.append(content_prefix.rstrip()) # blank line before children
269+
for i, child in enumerate(children):
270+
if isinstance(child, dict):
271+
is_last_child = i == len(children) - 1
272+
render_node(child, content_prefix, is_last_child, False)
273+
274+
render_node(tree, "", True, True)
275+
return "\n".join(lines)
276+
277+
195278
def build_execution_summary(raw: object) -> dict[str, object] | None:
196279
"""
197280
Build summary statistics from execution tree.

0 commit comments

Comments
 (0)