diff --git a/responses_api_agents/verifiers_agent/README.md b/responses_api_agents/verifiers_agent/README.md new file mode 100644 index 000000000..17226280c --- /dev/null +++ b/responses_api_agents/verifiers_agent/README.md @@ -0,0 +1,128 @@ +# Description + +This agent enables running Prime Intellect [verifiers](https://github.com/PrimeIntellect-ai/verifiers) environments, including many in Prime Intellect's [Environments Hub](https://app.primeintellect.ai/dashboard/environments?ex_sort=by_sections) in NeMo Gym. + +## Install Gym + +``` +git clone https://github.com/NVIDIA-NeMo/Gym +cd Gym +uv venv +source .venv/bin/activate +uv sync +``` + +## Test acereason-math example + +First set `env.yaml` for a local model: +``` +policy_base_url: "http://localhost:8000/v1" +policy_api_key: "none" +policy_model_name: "Qwen/Qwen3-4B-Instruct-2507" +``` + +Next, serve the model. + +Make sure to serve the model with longer context length than the generation length in your agent config (e.g. acereason-math.yaml) + +``` +uv pip install vllm +vllm serve Qwen/Qwen3-4B-Instruct-2507 --max-model-len 32768 --reasoning-parser qwen3 --enable-auto-tool-choice --tool-call-parser hermes +``` + + +Now launch NeMo Gym servers: +``` +uv sync +ng_run "+config_paths=[responses_api_agents/verifiers_agent/configs/verifiers_acereason-math.yaml,responses_api_models/vllm_model/configs/vllm_model.yaml]" +``` + +Collect rollouts +``` +ng_collect_rollouts \ + +agent_name=verifiers_agent \ + +input_jsonl_fpath=responses_api_agents/verifiers_agent/data/acereason-math-example.jsonl \ + +output_jsonl_fpath=responses_api_agents/verifiers_agent/data/acereason-math-example-rollouts.jsonl \ + +limit=5 +``` + +View a rollout in the terminal +``` +tail -n 1 responses_api_agents/verifiers_agent/data/acereason-math-example-rollouts.jsonl | jq | less +``` + + +## Testing new prime environments from environments hub + +Some examples: `primeintellect/acereason-math`, `primeintellect/ascii-tree` and `primeintellect/alphabet-sort`. + +### Install an environment + +``` +uv add verifiers +uv add tool prime +prime env install primeintellect/ascii-tree +``` + +### Creating a dataset + +A helper script to make a dataset is in `scripts/create_datset.py`. + +``` +python3 scripts/create_dataset.py --env-id primeintellect/ascii-tree --size 5 --output data/ascii-tree-example.jsonl +``` + +### Update agent server requirements + +Update `requirements.txt` to: +``` +-e nemo-gym[dev] @ ../../ +verifiers==0.1.9.post3 +--extra-index-url https://hub.primeintellect.ai/primeintellect/simple/ +ascii-tree +``` +### Update agent config +Create `configs/ascii-tree.yaml`, primarily updating env id, and any other env specific args: +``` +verifiers_agent: + responses_api_agents: + verifiers_agent: + entrypoint: app.py + model_server: + type: responses_api_models + name: policy_model + model_name: "" + vf_env_id: ascii-tree + vf_env_args: {} + group_size: 1 + max_concurrent_generation: -1 + max_concurrent_scoring: -1 + max_tokens: 8192 + temperature: 1.0 + top_p: 1.0 + +``` + +Now launch NeMo Gym servers: +``` +uv sync +ng_run "+config_paths=[responses_api_agents/verifiers_agent/configs/ascii-tree.yaml,responses_api_models/vllm_model/configs/vllm_model.yaml]" +``` + +Collect rollouts +``` +ng_collect_rollouts \ + +agent_name=verifiers_agent \ + +input_jsonl_fpath=responses_api_agents/verifiers_agent/data/ascii-tree-example.jsonl \ + +output_jsonl_fpath=responses_api_agents/verifiers_agent/data/ascii-tree-example-rollouts.jsonl \ + +limit=5 +``` + + +# Licensing information +Code: Apache 2.0 +Data: N/A + +Dependencies +- nemo_gym: Apache 2.0 +- verifiers: Apache 2.0 diff --git a/responses_api_agents/verifiers_agent/__init__.py b/responses_api_agents/verifiers_agent/__init__.py new file mode 100644 index 000000000..467079831 --- /dev/null +++ b/responses_api_agents/verifiers_agent/__init__.py @@ -0,0 +1,14 @@ +# SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/responses_api_agents/verifiers_agent/app.py b/responses_api_agents/verifiers_agent/app.py new file mode 100644 index 000000000..ba4005192 --- /dev/null +++ b/responses_api_agents/verifiers_agent/app.py @@ -0,0 +1,339 @@ +# Copyright (c) 2026, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import logging +import traceback +from typing import Any + +import verifiers as vf +import verifiers.envs.multiturn_env as _multiturn_env_module +from fastapi import Body, Request, Response +from openai.types.chat.chat_completion import ChatCompletion +from pydantic import ConfigDict, Field +from verifiers.utils.async_utils import maybe_semaphore +from verifiers.utils.response_utils import parse_response_messages as _original_parse_response_messages + +from nemo_gym.base_resources_server import BaseRunRequest, BaseVerifyResponse +from nemo_gym.base_responses_api_agent import BaseResponsesAPIAgentConfig, SimpleResponsesAPIAgent +from nemo_gym.config_types import ModelServerRef +from nemo_gym.global_config import get_first_server_config_dict +from nemo_gym.openai_utils import ( + NeMoGymEasyInputMessage, + NeMoGymResponse, + NeMoGymResponseCreateParamsNonStreaming, + NeMoGymResponseOutputMessage, + NeMoGymResponseOutputMessageForTraining, + NeMoGymResponseOutputText, +) +from nemo_gym.server_utils import get_global_aiohttp_client + + +logger = logging.getLogger(__name__) + + +# patch verifiers to include prompt and generation token ids and logprobs for +# re-tokenization correction in replace_prefix_tokens (https://github.com/NVIDIA-NeMo/RL/blob/main/nemo_rl/models/generation/vllm/vllm_worker_async.py#L40) +async def _patched_parse_response_messages(response, message_type): + messages = await _original_parse_response_messages(response, message_type) + if message_type == "chat" and isinstance(messages, list): + for msg in messages: + if isinstance(msg, dict) and msg.get("role") == "assistant": + if hasattr(response, "prompt_token_ids"): + msg["prompt_token_ids"] = response.prompt_token_ids + if response.choices and hasattr(response.choices[0], "token_ids"): + msg["generation_token_ids"] = response.choices[0].token_ids + if ( + response.choices + and response.choices[0].logprobs + and hasattr(response.choices[0].logprobs, "content") + and response.choices[0].logprobs.content + ): + msg["generation_log_probs"] = [t.logprob for t in response.choices[0].logprobs.content] + return messages + + +_multiturn_env_module.parse_response_messages = _patched_parse_response_messages + + +class VerifiersNeMoGymResponse(NeMoGymResponse): + env_id: str + group_id: str + output: list[dict[str, Any]] + reward: float + metrics: dict[str, Any] = Field(default_factory=dict) + parallel_tool_calls: bool = False + tool_choice: str = "none" + tools: list = Field(default_factory=list) + + +class VerifiersAgentVerifyResponse(BaseVerifyResponse): + model_config = ConfigDict(extra="allow") + response: VerifiersNeMoGymResponse + reward: float + + +class VLLMOpenAIClient: + def __init__(self, base_url: str) -> None: + self._base_url = base_url.rstrip("/") + self.chat = self._Chat(self) + + class _Chat: + def __init__(self, client: "VLLMOpenAIClient") -> None: + self.completions = client + + async def create(self, *args: Any, **kwargs: Any) -> ChatCompletion: + request_body: dict[str, Any] = { + "model": kwargs.get("model", ""), + "messages": kwargs.get("messages", []), + } + for key in ( + "temperature", + "max_tokens", + "max_completion_tokens", + "top_p", + "stop", + "n", + "tools", + "tool_choice", + ): + if key in kwargs and kwargs[key] is not None: + request_body[key] = kwargs[key] + + url = f"{self._base_url}/chat/completions" + try: + session = get_global_aiohttp_client() + async with session.post(url, json=request_body) as resp: + if resp.status != 200: + error_text = await resp.text() + logger.error(f"Request to {url} failed with status {resp.status}: {error_text}") + resp.raise_for_status() + response_dict = await resp.json() + except Exception as e: + logger.error(f"Exception calling {url}: {type(e).__name__}: {e}") + raise + + choice_dict = response_dict["choices"][0] + message_dict = choice_dict.get("message", {}) + + prompt_token_ids = message_dict.pop("prompt_token_ids", []) + generation_token_ids = message_dict.pop("generation_token_ids", []) + generation_log_probs = message_dict.pop("generation_log_probs", []) + + if not generation_token_ids: + logger.warning( + f"No generation_token_ids in response! Full message keys were: {list(choice_dict.get('message', {}).keys())}" + ) + + if prompt_token_ids and isinstance(prompt_token_ids[0], str): + prompt_token_ids = [int(tid) for tid in prompt_token_ids] + + if generation_token_ids and isinstance(generation_token_ids[0], str): + generation_token_ids = [int(tid) for tid in generation_token_ids] + + if generation_token_ids and generation_log_probs: + choice_dict["logprobs"] = { + "content": [ + {"token": f"token_id:{tid}", "logprob": lp, "top_logprobs": []} + for tid, lp in zip(generation_token_ids, generation_log_probs) + ] + } + + response = ChatCompletion.model_validate(response_dict) + setattr(response, "prompt_token_ids", prompt_token_ids) + setattr(response.choices[0], "token_ids", generation_token_ids) + return response + + +class VerifiersAgentConfig(BaseResponsesAPIAgentConfig): + model_server: ModelServerRef + model_name: str = Field(default="", description="Model name") + + vf_env_id: str = Field(default="", description="Verifiers environment ID") + vf_env_args: dict = Field(default_factory=dict, description="Verifiers environment arguments") + + max_concurrent_generation: int = Field( + default=-1, description="Max concurrent generation requests (-1 = unlimited)" + ) + max_concurrent_scoring: int = Field(default=-1, description="Max concurrent scoring requests (-1 = unlimited)") + + max_tokens: int = Field(default=8192, description="Max tokens for generation") + + # nemo rl generation_config overrides these + temperature: float = Field(default=1.0) + top_p: float = Field(default=1.0) + + +class VerifiersAgentRunRequest(BaseRunRequest): + model_config = ConfigDict(extra="allow") + + task_idx: int + vf_env_id: str | None = Field(default=None, description="Verifiers environment ID") + responses_create_params: NeMoGymResponseCreateParamsNonStreaming = Field( + default_factory=lambda: NeMoGymResponseCreateParamsNonStreaming(input=[]) + ) + answer: str = Field(default="", description="Expected answer from dataset") + task: str = Field(default="default", description="Task type from dataset") + example_id: int | str = Field(default=0, description="Example ID from dataset") + info: dict = Field(default_factory=dict, description="Extra info from dataset") + + +class VerifiersAgent(SimpleResponsesAPIAgent): + model_config = ConfigDict(arbitrary_types_allowed=True) + config: VerifiersAgentConfig + + envs_cache: dict[str, Any] = Field(default_factory=dict) # vf.Environment + openai_client_cache: dict[str, VLLMOpenAIClient] = Field(default_factory=dict) + + def _get_env(self, vf_env_id: str) -> vf.Environment: + if vf_env_id not in self.envs_cache: + self.envs_cache[vf_env_id] = vf.load_environment(vf_env_id, **self.config.vf_env_args) + return self.envs_cache[vf_env_id] + + def _get_openai_client(self) -> VLLMOpenAIClient: + cache_key = self.config.model_server.name + if cache_key not in self.openai_client_cache: + server_config_dict = get_first_server_config_dict( + self.server_client.global_config_dict, + self.config.model_server.name, + ) + model_server_url = f"http://{server_config_dict.host}:{server_config_dict.port}" + + if not model_server_url.endswith("/v1"): + model_server_url = model_server_url.rstrip("/") + "/v1" + + self.openai_client_cache[cache_key] = VLLMOpenAIClient(base_url=model_server_url) + + return self.openai_client_cache[cache_key] + + def _convert_trajectory_to_output(self, state: dict) -> list: + output = [] + trajectory = state.get("trajectory", []) + + for step in trajectory: + for msg in step.get("prompt", []): + if isinstance(msg, dict): + role = msg.get("role", "user") + content = msg.get("content", "") + output.append(NeMoGymEasyInputMessage(role=role, content=content).model_dump()) + + tokens = step.get("tokens") + for msg in step.get("completion", []): + if isinstance(msg, dict): + content = msg.get("content", "") + if tokens: + output.append( + NeMoGymResponseOutputMessageForTraining( + id=f"msg_{id(msg)}", + content=[NeMoGymResponseOutputText(text=content, annotations=[])], + prompt_token_ids=tokens.get("prompt_ids", []), + generation_token_ids=tokens.get("completion_ids", []), + generation_log_probs=tokens.get("completion_logprobs", []), + ).model_dump() + ) + else: + output.append( + NeMoGymResponseOutputMessage( + id=f"msg_{id(msg)}", + content=[NeMoGymResponseOutputText(text=content, annotations=[])], + ).model_dump() + ) + + return output + + async def responses( + self, + request: Request, + response: Response, + body: VerifiersAgentRunRequest = Body(), + ) -> VerifiersNeMoGymResponse: + try: + vf_env_id = body.vf_env_id or self.config.vf_env_id + vf_env = self._get_env(vf_env_id) + task_idx = body.task_idx + + prompt_messages = [] + for item in body.responses_create_params.input or []: + if hasattr(item, "role") and hasattr(item, "content"): + prompt_messages.append({"role": item.role, "content": item.content}) + elif isinstance(item, dict): + prompt_messages.append({"role": item.get("role", "user"), "content": item.get("content", "")}) + + rollout_input = vf.RolloutInput( + prompt=prompt_messages, + answer=body.answer, + task=body.task, + info=body.info, + example_id=body.example_id, + ) + + client = self._get_openai_client() + + gen_sem = await maybe_semaphore(self.config.max_concurrent_generation) + score_sem = await maybe_semaphore(self.config.max_concurrent_scoring) + + # prefer NeMo RL generation config set in responses_create_params https://github.com/NVIDIA-NeMo/RL/blob/main/nemo_rl/experience/rollouts.py#L1045-L1046 + sampling_args = { + "max_tokens": self.config.max_tokens, + "temperature": getattr(body.responses_create_params, "temperature", None) or self.config.temperature, + "top_p": getattr(body.responses_create_params, "top_p", None) or self.config.top_p, + } + states = await vf_env.run_group( + group_inputs=[rollout_input], + client=client, + model=self.config.model_name, + gen_sampling_args=sampling_args, + gen_sem=gen_sem, + score_sem=score_sem, + ) + + state = states[0] + reward = state.get("reward", 0.0) or 0.0 + metrics = state.get("metrics", {}) or {} + + output = self._convert_trajectory_to_output(state) + + return VerifiersNeMoGymResponse( + id=f"verifiers-{vf_env_id}-{task_idx}", + created_at=0, + model=self.config.model_name, + object="response", + output=output, + env_id=vf_env_id, + group_id=str(task_idx), + reward=reward, + metrics=metrics, + ) + except Exception as e: + logger.error(f"Exception in responses(): {type(e).__name__}: {e}") + logger.error(f"Traceback:\n{traceback.format_exc()}") + raise + + async def run( + self, + request: Request, + response: Response, + body: VerifiersAgentRunRequest = Body(), + ) -> VerifiersAgentVerifyResponse: + resp = await self.responses(request, response, body) + + return VerifiersAgentVerifyResponse( + responses_create_params=body.responses_create_params, + response=resp, + reward=resp.reward, + ) + + +if __name__ == "__main__": + VerifiersAgent.run_webserver() diff --git a/responses_api_agents/verifiers_agent/configs/acereason-math.yaml b/responses_api_agents/verifiers_agent/configs/acereason-math.yaml new file mode 100644 index 000000000..7e447cbb8 --- /dev/null +++ b/responses_api_agents/verifiers_agent/configs/acereason-math.yaml @@ -0,0 +1,16 @@ +verifiers_agent: + responses_api_agents: + verifiers_agent: + entrypoint: app.py + model_server: + type: responses_api_models + name: policy_model + model_name: "" + vf_env_id: acereason-math + vf_env_args: {} + group_size: 1 + max_concurrent_generation: -1 + max_concurrent_scoring: -1 + max_tokens: 8192 + temperature: 1.0 + top_p: 1.0 diff --git a/responses_api_agents/verifiers_agent/data/acereason-math-example.jsonl b/responses_api_agents/verifiers_agent/data/acereason-math-example.jsonl new file mode 100644 index 000000000..34b279939 --- /dev/null +++ b/responses_api_agents/verifiers_agent/data/acereason-math-example.jsonl @@ -0,0 +1,5 @@ +{"task_idx": 0, "vf_env_id": "acereason-math", "responses_create_params": {"input": [{"content": "Let $ABCD$ be a square. If sides $AB$ and $CD$ are increased by $20\\%$ and sides $AD$ and $BC$ are decreased by $20\\%$ (forming a rectangle), by what percent does the area change?\nPlease reason step by step, and put your final answer within \\boxed{{}}.", "role": "user"}]}, "question": "Let $ABCD$ be a square. If sides $AB$ and $CD$ are increased by $20\\%$ and sides $AD$ and $BC$ are decreased by $20\\%$ (forming a rectangle), by what percent does the area change?\nPlease reason step by step, and put your final answer within \\boxed{{}}.", "answer": "-4", "task": "acereason-math", "example_id": 0, "info": {}, "agent_ref": {"type": "responses_api_agents", "name": "verifiers_agent"}} +{"task_idx": 1, "vf_env_id": "acereason-math", "responses_create_params": {"input": [{"content": "\nAn investor has an open brokerage account with an investment company. In 2021, the investor received the following income from securities:\n\n- Dividends from shares of the company PAO \u201cWinning\u201d amounted to 50,000 rubles.\n- Coupon income from government bonds OFZ amounted to 40,000 rubles.\n- Coupon income from corporate bonds of PAO \u201cReliable\u201d amounted to 30,000 rubles.\n\nIn addition, the investor received a capital gain from selling 100 shares of PAO \"Risky\" at 200 rubles per share. The purchase price was 150 rubles per share. The investor held the shares for 4 months.\n\nCalculate the amount of personal income tax (NDFL) on the income from the securities.\nPlease reason step by step, and put your final answer within \\boxed{{}}.", "role": "user"}]}, "question": "\nAn investor has an open brokerage account with an investment company. In 2021, the investor received the following income from securities:\n\n- Dividends from shares of the company PAO \u201cWinning\u201d amounted to 50,000 rubles.\n- Coupon income from government bonds OFZ amounted to 40,000 rubles.\n- Coupon income from corporate bonds of PAO \u201cReliable\u201d amounted to 30,000 rubles.\n\nIn addition, the investor received a capital gain from selling 100 shares of PAO \"Risky\" at 200 rubles per share. The purchase price was 150 rubles per share. The investor held the shares for 4 months.\n\nCalculate the amount of personal income tax (NDFL) on the income from the securities.\nPlease reason step by step, and put your final answer within \\boxed{{}}.", "answer": "11050", "task": "acereason-math", "example_id": 1, "info": {}, "agent_ref": {"type": "responses_api_agents", "name": "verifiers_agent"}} +{"task_idx": 2, "vf_env_id": "acereason-math", "responses_create_params": {"input": [{"content": "\n58 balls of two colors - red and blue - are arranged in a circle. It is known that the number of consecutive triplets of balls with a majority of red balls is equal to the number of triplets with a majority of blue balls. What is the minimum possible number of red balls?\nPlease reason step by step, and put your final answer within \\boxed{{}}.", "role": "user"}]}, "question": "\n58 balls of two colors - red and blue - are arranged in a circle. It is known that the number of consecutive triplets of balls with a majority of red balls is equal to the number of triplets with a majority of blue balls. What is the minimum possible number of red balls?\nPlease reason step by step, and put your final answer within \\boxed{{}}.", "answer": "20", "task": "acereason-math", "example_id": 2, "info": {}, "agent_ref": {"type": "responses_api_agents", "name": "verifiers_agent"}} +{"task_idx": 3, "vf_env_id": "acereason-math", "responses_create_params": {"input": [{"content": "A waiter at the restaurant U \u0160ejd\u00ed\u0159e always adds the current date to the bill: he increases the total amount spent by as many crowns as the day of the month it is.\n\nIn September, a group of three friends dined at the restaurant twice. The first time, each person paid separately, and the waiter added the date to each bill, resulting in each person being charged 168 CZK. Four days later, they had lunch again and ordered exactly the same as before. This time, however, one person paid for all three. The waiter added the date to the bill only once and asked for 486 CZK in total. The friends were puzzled that although the prices on the menu had not changed, the lunch was cheaper this time, and they uncovered the waiter\u2019s scam. What was the date?\n\n(Hint: Determine what their total bill would have been if each person paid separately the second time as well.)\nPlease reason step by step, and put your final answer within \\boxed{{}}.", "role": "user"}]}, "question": "A waiter at the restaurant U \u0160ejd\u00ed\u0159e always adds the current date to the bill: he increases the total amount spent by as many crowns as the day of the month it is.\n\nIn September, a group of three friends dined at the restaurant twice. The first time, each person paid separately, and the waiter added the date to each bill, resulting in each person being charged 168 CZK. Four days later, they had lunch again and ordered exactly the same as before. This time, however, one person paid for all three. The waiter added the date to the bill only once and asked for 486 CZK in total. The friends were puzzled that although the prices on the menu had not changed, the lunch was cheaper this time, and they uncovered the waiter\u2019s scam. What was the date?\n\n(Hint: Determine what their total bill would have been if each person paid separately the second time as well.)\nPlease reason step by step, and put your final answer within \\boxed{{}}.", "answer": "15", "task": "acereason-math", "example_id": 3, "info": {}, "agent_ref": {"type": "responses_api_agents", "name": "verifiers_agent"}} +{"task_idx": 4, "vf_env_id": "acereason-math", "responses_create_params": {"input": [{"content": "What would the 25th number be in a numeric system where the base is five?\nPlease reason step by step, and put your final answer within \\boxed{{}}.", "role": "user"}]}, "question": "What would the 25th number be in a numeric system where the base is five?\nPlease reason step by step, and put your final answer within \\boxed{{}}.", "answer": "100", "task": "acereason-math", "example_id": 4, "info": {}, "agent_ref": {"type": "responses_api_agents", "name": "verifiers_agent"}} diff --git a/responses_api_agents/verifiers_agent/requirements.txt b/responses_api_agents/verifiers_agent/requirements.txt new file mode 100644 index 000000000..d91417c22 --- /dev/null +++ b/responses_api_agents/verifiers_agent/requirements.txt @@ -0,0 +1,4 @@ +-e nemo-gym[dev] @ ../../ +verifiers==0.1.9.post3 +--extra-index-url https://hub.primeintellect.ai/primeintellect/simple/ +acereason-math \ No newline at end of file diff --git a/responses_api_agents/verifiers_agent/scripts/create_dataset.py b/responses_api_agents/verifiers_agent/scripts/create_dataset.py new file mode 100644 index 000000000..8cf7180e4 --- /dev/null +++ b/responses_api_agents/verifiers_agent/scripts/create_dataset.py @@ -0,0 +1,103 @@ +# Copyright (c) 2026, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import argparse +import json +import logging +from pathlib import Path + +import verifiers as vf + + +logger = logging.getLogger(__name__) + + +def load_verifiers_dataset(vf_env: vf.Environment, n: int = -1, seed: int | None = None) -> list[dict]: + try: + dataset = vf_env.get_dataset(n=n, seed=seed) + except ValueError: + dataset = None + for attr in ["dataset", "train_dataset", "eval_dataset"]: + ds = getattr(vf_env, attr, None) + if ds is not None: + dataset = ds + logger.info(f"Found dataset in vf_env.{attr}") + break + if dataset is None: + raise ValueError("Environment does not have a dataset") + if seed is not None: + dataset = dataset.shuffle(seed=seed) + if n > 0: + dataset = dataset.select(range(min(n, len(dataset)))) + + return [ + { + "prompt": dataset["prompt"][i], + "example_id": dataset["example_id"][i], + "task": dataset["task"][i], + **({"answer": dataset["answer"][i]} if "answer" in dataset.column_names else {}), + **({"info": dataset["info"][i]} if "info" in dataset.column_names else {}), + } + for i in range(len(dataset)) + ] + + +def main(): + parser = argparse.ArgumentParser(description="Create JSONL dataset from verifiers environment") + parser.add_argument("--env-id", required=True, help="Verifiers environment ID (e.g., pmpp, math-env-rlm)") + parser.add_argument("--env-args", default="{}", help="JSON string of environment arguments") + parser.add_argument("--size", type=int, default=-1, help="Number of examples (-1 for all)") + parser.add_argument("--seed", type=int, default=None, help="Random seed for shuffling") + parser.add_argument("--output", required=True, help="Output JSONL file path") + args = parser.parse_args() + + env_args = json.loads(args.env_args) + + print(f"Loading verifiers environment: {args.env_id}") + env = vf.load_environment(args.env_id, **env_args) + + print(f"Getting dataset (size={args.size}, seed={args.seed})") + dataset_rows = load_verifiers_dataset(env, n=args.size, seed=args.seed) + + print(f"Dataset has {len(dataset_rows)} examples") + + output_path = Path(args.output) + output_path.parent.mkdir(parents=True, exist_ok=True) + + with open(output_path, "w") as f: + for i, row in enumerate(dataset_rows): + output_row = { + "task_idx": i, + "vf_env_id": args.env_id, + "responses_create_params": { + "input": row["prompt"], + }, + "agent_ref": { + "type": "responses_api_agents", + "name": "verifiers_agent", + }, + "question": row["prompt"][-1]["content"] if row["prompt"] else "", + "answer": row.get("answer", ""), + "task": row["task"], + "example_id": row["example_id"], + "info": row.get("info", {}), + } + f.write(json.dumps(output_row) + "\n") + + print(f"Wrote {len(dataset_rows)} examples to {output_path}") + + +if __name__ == "__main__": + main() diff --git a/responses_api_agents/verifiers_agent/tests/__init__.py b/responses_api_agents/verifiers_agent/tests/__init__.py new file mode 100644 index 000000000..467079831 --- /dev/null +++ b/responses_api_agents/verifiers_agent/tests/__init__.py @@ -0,0 +1,14 @@ +# SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/responses_api_agents/verifiers_agent/tests/test_app.py b/responses_api_agents/verifiers_agent/tests/test_app.py new file mode 100644 index 000000000..2d4f26443 --- /dev/null +++ b/responses_api_agents/verifiers_agent/tests/test_app.py @@ -0,0 +1,34 @@ +# SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from unittest.mock import MagicMock + +from nemo_gym.config_types import ModelServerRef +from nemo_gym.server_utils import ServerClient +from responses_api_agents.verifiers_agent.app import ( + VerifiersAgent, + VerifiersAgentConfig, +) + + +class TestApp: + def test_sanity(self) -> None: + config = VerifiersAgentConfig( + host="0.0.0.0", + port=8080, + entrypoint="", + name="", + model_server=ModelServerRef(type="responses_api_models", name=""), + ) + VerifiersAgent(config=config, server_client=MagicMock(spec=ServerClient))