Skip to content

Commit 8c3eda6

Browse files
authored
test(models/openrouter): add e2e tests (#2554)
* test(models/openrouter): add e2e tests * test(models/openrouter): remove invalid models * test(models/openrouter): try to fix test * test(models/openrouter): idk why, but test fail on that check * test(models/openrouter): idk why, but test fail on that
1 parent 4fce45d commit 8c3eda6

File tree

4 files changed

+80
-31
lines changed

4 files changed

+80
-31
lines changed

models/openrouter/manifest.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,5 +26,5 @@ type: plugin
2626
plugins:
2727
models:
2828
- provider/openrouter.yaml
29-
version: 0.0.31
29+
version: 0.0.32
3030
created_at: 2024-09-20T00:13:50.29298939-04:00

models/openrouter/models/llm/_position.yaml

Lines changed: 1 addition & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -8,17 +8,11 @@
88
- openai/o3-mini-high
99
- openai/o3-mini
1010
- openai/o3-mini-2025-01-31
11-
- openai/o1-preview
12-
- openai/o1-mini
13-
- openai/gpt-4.1
14-
- openai/gpt-4.1-mini
15-
- openai/gpt-4.1-nano
1611
- openai/chatgpt-4o-latest
1712
- openai/gpt-4o-2024-11-20
1813
- openai/gpt-4o
1914
- openai/gpt-4o-mini
2015
- openai/gpt-4
21-
- openai/gpt-4-32k
2216
- openai/gpt-3.5-turbo
2317
- anthropic/claude-opus-4.5
2418
- anthropic/claude-opus-4
@@ -30,30 +24,18 @@
3024
- anthropic/claude-3.5-sonnet
3125
- anthropic/claude-haiku-4.5
3226
- anthropic/claude-3-haiku
33-
- anthropic/claude-3-opus
34-
- anthropic/claude-3-sonnet
3527
- google/gemini-3-pro-preview
3628
- google/gemini-3-flash-preview
3729
- google/gemini-2.5-pro
3830
- google/gemini-2.5-flash-preview-09-2025
3931
- google/gemini-2.5-flash
40-
- google/gemini-2.5-flash-image-preview
41-
- google/gemini-2.5-flash-image-preview:free
4232
- google/gemini-2.5-flash-lite-preview-09-2025
4333
- google/gemini-2.5-flash-lite
44-
- google/gemini-2.5-flash-lite-preview-06-17
4534
- google/gemini-2.0-flash-lite-001
46-
- google/gemini-pro-1.5
47-
- google/gemini-flash-1.5
48-
- google/gemini-pro
4935
- google/gemma-3n-e2b-it:free
50-
- cohere/command-r-plus
51-
- cohere/command-r
5236
- meta-llama/llama-3.2-1b-instruct
5337
- meta-llama/llama-3.2-3b-instruct
5438
- meta-llama/llama-3.2-11b-vision-instruct
55-
- meta-llama/llama-3.2-90b-vision-instruct
56-
- meta-llama/llama-3.1-405b-instruct
5739
- meta-llama/llama-3.1-70b-instruct
5840
- meta-llama/llama-3.1-8b-instruct
5941
- meta-llama/llama-3-70b-instruct
@@ -62,27 +44,16 @@
6244
- mistralai/mixtral-8x7b-instruct
6345
- mistralai/mistral-7b-instruct
6446
- qwen/qwen-2.5-72b-instruct
65-
- qwen/qwen-2-72b-instruct
6647
- qwen/qwen3-max
6748
- deepseek/deepseek-chat
6849
- deepseek/deepseek-chat-v3.1
6950
- deepseek/deepseek-chat-v3-0324
70-
- deepseek/deepseek-coder
71-
- deepseek/deepseek-prover-v2
7251
- deepseek/deepseek-r1
7352
- deepseek/deepseek-r1-0528
74-
- deepseek/deepseek-r1-0528-qwen3-8b
7553
- deepseek/deepseek-r1-distill-llama-70b
7654
- deepseek/deepseek-r1-distill-qwen-32b
77-
- deepseek/deepseek-r1-distill-qwen-14b
78-
- deepseek/deepseek-r1-distill-llama-8b
79-
- deepseek/deepseek-r1-distill-qwen-7b
80-
- deepseek/deepseek-r1-distill-qwen-1.5b
81-
- deepseek/deepseek-v3-base
8255
- moonshotai/kimi-k2-0905
83-
- moonshotai/kimi-k2-0711
8456
- x-ai/grok-4.1-fast
8557
- x-ai/grok-4-fast
86-
- x-ai/grok-4-fast:free
8758
- x-ai/grok-3-beta
88-
- x-ai/grok-3-mini-beta
59+
- x-ai/grok-3-mini-beta

models/openrouter/pyproject.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@ dependencies = [
1414
# uv run black . -C -l 100 && uv run ruff check --fix
1515
[dependency-groups]
1616
dev = [
17+
"pyrefly>=0.50.0",
18+
"pytest>=9.0.2",
1719
"black>=25.11.0",
1820
"ruff>=0.14.6",
1921
]
Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
import os
2+
from pathlib import Path
3+
4+
import pytest
5+
import yaml
6+
7+
from dify_plugin.config.integration_config import IntegrationConfig
8+
from dify_plugin.core.entities.plugin.request import (
9+
ModelActions,
10+
ModelInvokeLLMRequest,
11+
PluginInvokeType,
12+
)
13+
from dify_plugin.entities.model import ModelType
14+
from dify_plugin.entities.model.llm import LLMResultChunk
15+
from dify_plugin.integration.run import PluginRunner
16+
17+
18+
def get_all_models() -> list[str]:
19+
models_dir = Path(__file__).parent.parent / "models" / "llm"
20+
position_file = models_dir / "_position.yaml"
21+
if not position_file.exists():
22+
raise FileNotFoundError(f"Missing model position file: {position_file}")
23+
24+
try:
25+
data = yaml.safe_load(position_file.read_text(encoding="utf-8"))
26+
except yaml.YAMLError as exc:
27+
raise ValueError(f"Invalid YAML in {position_file}") from exc
28+
29+
if data is None:
30+
return []
31+
if not isinstance(data, list):
32+
raise ValueError(f"Expected a YAML list in {position_file}")
33+
34+
models: list[str] = []
35+
for item in data:
36+
if isinstance(item, str) and item.strip():
37+
models.append(item.strip())
38+
return models
39+
40+
41+
@pytest.mark.parametrize("model_name", get_all_models())
42+
def test_llm_invoke(model_name: str) -> None:
43+
api_key = os.getenv("OPENROUTER_API_KEY")
44+
if not api_key:
45+
raise ValueError("OPENROUTER_API_KEY environment variable is required")
46+
47+
plugin_path = os.getenv("PLUGIN_FILE_PATH")
48+
if not plugin_path:
49+
plugin_path = str(Path(__file__).parent.parent)
50+
51+
payload = ModelInvokeLLMRequest(
52+
user_id="test_user",
53+
provider="openrouter",
54+
model_type=ModelType.LLM,
55+
model=model_name,
56+
credentials={"api_key": api_key},
57+
prompt_messages=[{"role": "user", "content": "Say hello in one word."}],
58+
stop=None,
59+
tools=None,
60+
stream=True,
61+
model_parameters={},
62+
)
63+
64+
with PluginRunner(
65+
config=IntegrationConfig(), plugin_package_path=plugin_path
66+
) as runner:
67+
results: list[LLMResultChunk] = []
68+
for result in runner.invoke(
69+
access_type=PluginInvokeType.Model,
70+
access_action=ModelActions.InvokeLLM,
71+
payload=payload,
72+
response_type=LLMResultChunk,
73+
):
74+
results.append(result)
75+
76+
assert len(results) > 0, f"No results received for model {model_name}"

0 commit comments

Comments
 (0)