Skip to content

Commit 50af81c

Browse files
committed
bump version to v0.2.0
1 parent 85bc164 commit 50af81c

File tree

9 files changed

+209
-42
lines changed

9 files changed

+209
-42
lines changed

api/config.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
from ..services.runtime_config import (
3131
ALLOWED_LLM_KEYS,
3232
get_admin_token,
33+
get_apply_semantics,
3334
get_effective_config,
3435
is_loopback_client,
3536
update_config,
@@ -46,6 +47,7 @@
4647
from services.runtime_config import ALLOWED_LLM_KEYS # type: ignore
4748
from services.runtime_config import (
4849
get_admin_token,
50+
get_apply_semantics,
4951
get_effective_config,
5052
is_loopback_client,
5153
update_config,
@@ -482,11 +484,16 @@ async def config_put_handler(request: web.Request) -> web.Response:
482484

483485
# Return updated config
484486
effective, sources = get_effective_config()
487+
488+
# R53: Calculate apply semantics
489+
apply_info = get_apply_semantics(list(updates.keys()))
490+
485491
return web.json_response(
486492
{
487493
"ok": True,
488494
"config": effective,
489495
"sources": sources,
496+
"apply": apply_info,
490497
}
491498
)
492499

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
[project]
22
name = "comfyui-openclaw"
33
description = "Your own personal AIGC Factory. Any picture. Any reel. The Comfy way.©️"
4-
version = "0.1.9"
4+
version = "0.2.0"
55
license = {text = "MIT"}
66
readme = "README.md"
77
requires-python = ">=3.10"

services/llm_client.py

Lines changed: 22 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -119,31 +119,30 @@ def __init__(
119119
if info:
120120
self.base_url = info.base_url
121121

122-
self.model = model or eff_config.get("model")
123-
124-
# R23: Model alias resolution via plugins (using async bridge)
125-
if PLUGINS_AVAILABLE and self.model:
126-
try:
127-
from .plugins.async_bridge import run_async_in_sync_context
122+
# R57: Strict Precedence (Arg > Config > Default)
123+
# CRITICAL: Only inherit config['model'] if the effective provider matches config['provider'].
124+
# If user overrides provider (e.g. "openai") but config has ("anthropic", "claude-3"),
125+
# we must NOT use "claude-3" for "openai".
126+
127+
config_provider = eff_config.get("provider")
128+
config_model = eff_config.get("model")
129+
130+
if model:
131+
# 1. Explicit argument override
132+
self.model = model
133+
elif self.provider == config_provider:
134+
# 2. Config usage (provider matches) -> use config model
135+
self.model = config_model
136+
else:
137+
# 3. Provider mismatch (arg override vs config) -> do NOT use config model
138+
# Fallback to default for the *new* provider
139+
self.model = None
128140

129-
ctx = RequestContext(
130-
provider=self.provider,
131-
model=self.model,
132-
trace_id="init",
133-
)
134-
# Fix: Use async bridge even in __init__ just in case it's called from an async context
135-
resolved = run_async_in_sync_context(
136-
plugin_manager.execute_first("model.resolve", ctx, self.model)
137-
)
138-
if resolved and resolved != self.model:
139-
logger.info(f"Model alias resolved: {self.model} -> {resolved}")
140-
self.model = resolved
141-
except Exception as e:
142-
logger.warning(f"Model resolution plugin failed (non-fatal): {e}")
141+
# If we still have no model, try to get a default for the provider
142+
if not self.model:
143+
from .providers.catalog import DEFAULT_MODEL_BY_PROVIDER
143144

144-
# Fallback to catalog default if config model seems mismatched or empty?
145-
# For now, trust the config (defaults to gpt-4o-mini).
146-
# If user switches provider but not model, it might fail, but that's expected config behavior.
145+
self.model = DEFAULT_MODEL_BY_PROVIDER.get(self.provider, "default")
147146

148147
self.timeout = (
149148
timeout if timeout is not None else eff_config.get("timeout_sec", 120)

services/runtime_config.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -425,6 +425,42 @@ def validate_config_update(updates: Dict[str, Any]) -> Tuple[Dict[str, Any], lis
425425
return sanitized, errors
426426

427427

428+
def get_apply_semantics(updated_keys: list) -> Dict[str, list]:
429+
"""
430+
R53: Determine apply semantics for updated keys.
431+
Returns:
432+
{
433+
"applied_now": [keys applied immediately],
434+
"restart_required": [keys requiring restart],
435+
"notes": [explanatory notes]
436+
}
437+
"""
438+
applied_now = []
439+
restart_required = []
440+
notes = []
441+
442+
for key in updated_keys:
443+
if key in ALLOWED_LLM_KEYS:
444+
# LLM keys are read from file on every request (via get_effective_config),
445+
# so they are effectively "applied now".
446+
applied_now.append(key)
447+
elif key in ALLOWED_SCHEDULER_KEYS:
448+
# Scheduler config is env-only (not file-based) in current implementation,
449+
# but if it were updateable via API, it might require restart or re-init.
450+
# For now, this path is unused by config_put_handler which targets LLM config.
451+
restart_required.append(key)
452+
notes.append(f"{key} requires service restart to take effect.")
453+
else:
454+
# Unknown keys? Assume restart needed for safety if they slipped through validation
455+
restart_required.append(key)
456+
457+
return {
458+
"applied_now": sorted(applied_now),
459+
"restart_required": sorted(restart_required),
460+
"notes": notes,
461+
}
462+
463+
428464
def update_config(updates: Dict[str, Any]) -> Tuple[bool, list]:
429465
"""
430466
Update LLM config, persisting to file.

tests/repro_r53_apply.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
2+
import unittest
3+
import asyncio
4+
from unittest.mock import patch, MagicMock
5+
from api.config import config_put_handler
6+
7+
class TestR53ApplyFeedbackRepro(unittest.TestCase):
8+
def setUp(self):
9+
self.loop = asyncio.new_event_loop()
10+
asyncio.set_event_loop(self.loop)
11+
12+
def tearDown(self):
13+
self.loop.close()
14+
15+
@patch("api.config.update_config")
16+
@patch("api.config.get_effective_config")
17+
@patch("api.config.require_admin_token")
18+
@patch("api.config.check_rate_limit")
19+
@patch("api.config.require_same_origin_if_no_token")
20+
def test_put_returns_apply_metadata(self, mock_csrf, mock_rate, mock_auth, mock_get_config, mock_update):
21+
# Setup mocks
22+
mock_csrf.return_value = None
23+
mock_rate.return_value = True
24+
mock_auth.return_value = (True, None)
25+
26+
# update_config succeeds
27+
mock_update.return_value = (True, [])
28+
29+
# get_effective_config returns the new state
30+
mock_get_config.return_value = ({"provider": "openai"}, {"provider": "file"})
31+
32+
# Mock Request
33+
request = MagicMock()
34+
request.json = MagicMock(return_value=asyncio.Future())
35+
request.json.return_value.set_result({"llm": {"provider": "openai"}})
36+
request.remote = "127.0.0.1"
37+
38+
# Execute
39+
response = self.loop.run_until_complete(config_put_handler(request))
40+
body = json.loads(response.body)
41+
42+
# Assertion: R53 requires an 'apply' field in the PUT /config response.
43+
self.assertIn("apply", body, "R53 Failure: Response missing 'apply' metadata")
44+
45+
import json
46+
if __name__ == "__main__":
47+
unittest.main()

tests/repro_r57_precedence.py

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
2+
import unittest
3+
from unittest.mock import patch, MagicMock
4+
from services.llm_client import LLMClient
5+
6+
class TestR57PrecedenceRepro(unittest.TestCase):
7+
@patch("services.runtime_config.get_effective_config")
8+
def test_provider_model_contamination(self, mock_get_config):
9+
# Scenario: Config has Provider A and Model A
10+
mock_get_config.return_value = (
11+
{"provider": "anthropic", "model": "claude-3-opus", "base_url": ""},
12+
{"provider": "file", "model": "file"}
13+
)
14+
15+
# User requests Provider B explicitly (e.g. via "Test Connection" with overrides)
16+
# They do NOT specify a model (failed to select one, or just testing provider default)
17+
client = LLMClient(provider="openai")
18+
19+
# CURRENT BEHAVIOR (Expected Failure):
20+
# The client picks up "claude-3-opus" from config because it's not None.
21+
# But "claude-3-opus" is invalid for "openai".
22+
23+
print(f"DEBUG: Client Provider={client.provider}, Model={client.model}")
24+
25+
# Assertion: We want the model to BE CLEAN (None or provider default), not the config's model.
26+
# If this fails, it means we reproduced the issue.
27+
self.assertNotEqual(client.model, "claude-3-opus",
28+
"R57 Failure: Client inherited incompatible model from config for a different provider")
29+
30+
if __name__ == "__main__":
31+
unittest.main()

web/tabs/job_monitor_tab.js

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ export function addJob(promptId, traceId = null) {
3333
export const jobMonitorTab = {
3434
id: "job-monitor",
3535
title: "Jobs",
36+
icon: "pi pi-briefcase",
3637
render: async (container) => {
3738
loadJobs();
3839
container.innerHTML = "";

web/tabs/settings_tab.js

Lines changed: 51 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import { getAdminErrorMessage } from "../admin_errors.js";
88
export const settingsTab = {
99
id: "settings",
1010
title: "Settings",
11+
icon: "pi pi-cog",
1112
render: async (container) => {
1213
// IMPORTANT (UI layout): `.moltbot-content` has `overflow: hidden`.
1314
// This tab MUST render its own scroll container (`.moltbot-scroll-area`),
@@ -128,7 +129,14 @@ export const settingsTab = {
128129
// -- LLM Settings Section --
129130
const llmSec = createSection("LLM Settings");
130131
if (configRes.ok) {
131-
const { config, sources, providers } = configRes.data;
132+
// R54: Null-safe destructuring with defaults
133+
const data = configRes.data || {};
134+
const config = data.config || {};
135+
const sources = data.sources || {};
136+
const providers = data.providers || [];
137+
// R53: Apply feedback (optional, for debug/toast later)
138+
const applyInfo = data.apply || {};
139+
132140

133141
// Provider dropdown
134142
const providerRow = createFormRow("Provider", sources.provider === "env");
@@ -380,9 +388,22 @@ export const settingsTab = {
380388
};
381389

382390
const res = await moltbotApi.putConfig(updates, token);
391+
// R53: Hot-Reload Feedback
383392
if (res.ok) {
384-
statusDiv.textContent = "✓ Saved! Restart ComfyUI to apply.";
385-
statusDiv.className = "moltbot-status ok";
393+
const apply = res.data?.apply || {};
394+
let msg = "✓ Saved!";
395+
396+
if (apply.restart_required?.length > 0) {
397+
msg += " Restart required for: " + apply.restart_required.join(", ");
398+
statusDiv.className = "moltbot-status warning"; // Yellow/Orange
399+
} else if (apply.applied_now?.length > 0) {
400+
msg += " Applied immediately (Hot Reload).";
401+
statusDiv.className = "moltbot-status ok";
402+
} else {
403+
// No changes or unknown
404+
statusDiv.className = "moltbot-status ok";
405+
}
406+
statusDiv.textContent = msg;
386407
} else {
387408
const errorMsg = getAdminErrorMessage(res.error, res.status);
388409
statusDiv.textContent = `✗ ${res.errors?.join(", ") || errorMsg}`;
@@ -396,7 +417,16 @@ export const settingsTab = {
396417
const testBtn = document.createElement("button");
397418
testBtn.className = "moltbot-btn moltbot-btn-secondary";
398419
testBtn.textContent = "Test Connection";
420+
421+
// R54: Debounced Test Action to prevent spam
422+
// We use a separate handler because we need to manage button state (disabled/enabled)
423+
// which debounce interferes with if not careful.
424+
// Better strategy: Disable button immediately on click, re-enable after completion.
425+
// Debounce is less critical here if we disable the button, but good for "auto-test on change" (future).
426+
// For now, implementing "Disable while testing" is the better guard than generic debounce for a button click.
399427
testBtn.onclick = async () => {
428+
if (testBtn.disabled) return;
429+
400430
const token = (tokenInput.value || MoltbotSession.getAdminToken() || "").trim();
401431
if (token) MoltbotSession.setAdminToken(token);
402432

@@ -408,22 +438,25 @@ export const settingsTab = {
408438
// selected in the UI, even if the user hasn't clicked Save yet. Otherwise, the backend
409439
// falls back to the effective config (often "openai") and produces confusing errors like:
410440
// "API key not configured for provider 'openai'" while the UI is set to Gemini.
411-
const res = await moltbotApi.testLLM(token, {
412-
provider: providerSelect.value,
413-
model: modelInput.value,
414-
base_url: baseUrlInput.value,
415-
timeout_sec: parseInt(timeoutInput.value) || 120,
416-
max_retries: parseInt(retriesInput.value) || 3,
417-
});
418-
if (res.ok) {
419-
statusDiv.textContent = "✓ Success! " + (res.response ? `"${res.response}"` : "");
420-
statusDiv.className = "moltbot-status ok";
421-
} else {
422-
const errorMsg = getAdminErrorMessage(res.error, res.status);
423-
statusDiv.textContent = `✗ ${errorMsg}`;
424-
statusDiv.className = "moltbot-status error";
441+
try {
442+
const res = await moltbotApi.testLLM(token, {
443+
provider: providerSelect.value,
444+
model: modelInput.value,
445+
base_url: baseUrlInput.value,
446+
timeout_sec: parseInt(timeoutInput.value) || 120,
447+
max_retries: parseInt(retriesInput.value) || 3,
448+
});
449+
if (res.ok) {
450+
statusDiv.textContent = "✓ Success! " + (res.response ? `"${res.response}"` : "");
451+
statusDiv.className = "moltbot-status ok";
452+
} else {
453+
const errorMsg = getAdminErrorMessage(res.error, res.status);
454+
statusDiv.textContent = `✗ ${errorMsg}`;
455+
statusDiv.className = "moltbot-status error";
456+
}
457+
} finally {
458+
testBtn.disabled = false;
425459
}
426-
testBtn.disabled = false;
427460
};
428461
btnRow.appendChild(testBtn);
429462

web/utils/debounce.js

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
2+
/**
3+
* Simple debounce utility (R54 UI Guard).
4+
* Prevents rapid-fire API calls (e.g. test connection spam).
5+
*/
6+
export function debounce(func, wait) {
7+
let timeout;
8+
return function (...args) {
9+
const context = this;
10+
clearTimeout(timeout);
11+
timeout = setTimeout(() => func.apply(context, args), wait);
12+
};
13+
}

0 commit comments

Comments
 (0)