Skip to content

Commit c2b5aa1

Browse files
committed
Updated tests
1 parent d64ef59 commit c2b5aa1

File tree

1 file changed

+17
-83
lines changed

1 file changed

+17
-83
lines changed

projects/rocprofiler-compute/tests/test_analyze_commands.py

Lines changed: 17 additions & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -1706,22 +1706,6 @@ def import_db_evaluate():
17061706
return db_analysis.evaluate
17071707

17081708

1709-
def create_metric_evaluator(raw_pmc_df=None, sys_vars=None, empirical_peaks=None):
1710-
"""Create a MetricEvaluator instance with test data."""
1711-
import sys
1712-
1713-
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))
1714-
from utils.parser import MetricEvaluator
1715-
1716-
if raw_pmc_df is None:
1717-
raw_pmc_df = pd.DataFrame({"a": [10.0], "b": [2.0]})
1718-
if sys_vars is None:
1719-
sys_vars = {}
1720-
if empirical_peaks is None:
1721-
empirical_peaks = {}
1722-
return MetricEvaluator(raw_pmc_df, sys_vars, empirical_peaks)
1723-
1724-
17251709
@pytest.mark.misc
17261710
def test_evaluate_division_by_zero_returns_inf_with_warning():
17271711
"""Division by zero should produce inf and emit a warning."""
@@ -1787,27 +1771,6 @@ def test_evaluate_none_expression_returns_none():
17871771
assert result is None
17881772

17891773

1790-
@pytest.mark.misc
1791-
def test_evaluate_errstate_suppresses_runtime_warnings():
1792-
"""np.errstate should suppress RuntimeWarning during evaluation."""
1793-
import warnings as warn_module
1794-
1795-
evaluate = import_db_evaluate()
1796-
pmc_df = pd.DataFrame({"a": [0.0], "b": [0.0]})
1797-
with warn_module.catch_warnings():
1798-
warn_module.simplefilter("error")
1799-
with patch("rocprof_compute_analyze.analysis_db.console_warning"):
1800-
# Should not raise RuntimeWarning thanks to np.errstate
1801-
# 0/0 -> NaN -> scalar NA -> returns None
1802-
result = evaluate(
1803-
"test_metric",
1804-
"pmc_df['a'][0] / pmc_df['b'][0]",
1805-
pmc_df,
1806-
{},
1807-
)
1808-
assert result is None
1809-
1810-
18111774
@pytest.mark.misc
18121775
def test_evaluate_failed_expression_returns_none():
18131776
"""An expression that raises an exception should return None."""
@@ -1833,12 +1796,27 @@ def test_evaluate_scalar_na_returns_none():
18331796
# =============================================================================
18341797

18351798

1799+
def create_metric_evaluator(raw_pmc_df=None, sys_vars=None, empirical_peaks=None):
1800+
"""Create a MetricEvaluator instance with test data."""
1801+
from utils.parser import MetricEvaluator
1802+
1803+
if raw_pmc_df is None:
1804+
raw_pmc_df = pd.DataFrame({"a": [10.0], "b": [2.0]})
1805+
if sys_vars is None:
1806+
sys_vars = {}
1807+
if empirical_peaks is None:
1808+
empirical_peaks = {}
1809+
return MetricEvaluator(raw_pmc_df, sys_vars, empirical_peaks)
1810+
1811+
18361812
@pytest.mark.misc
18371813
def test_metric_evaluator_valid_expression():
18381814
"""Valid expressions should evaluate correctly."""
18391815
evaluator = create_metric_evaluator()
1840-
result = evaluator.eval_expression("raw_pmc_df['a'][0] / raw_pmc_df['b'][0]")
1841-
assert result == 5.0
1816+
with patch("utils.parser.console_warning") as mock_warn:
1817+
result = evaluator.eval_expression("raw_pmc_df['a'][0] + raw_pmc_df['b'][0]")
1818+
assert result == 12.0
1819+
mock_warn.assert_not_called()
18421820

18431821

18441822
@pytest.mark.misc
@@ -1868,24 +1846,6 @@ def test_metric_evaluator_zero_divided_by_zero_returns_na_with_warning():
18681846
assert len(nan_warnings) > 0
18691847

18701848

1871-
@pytest.mark.misc
1872-
def test_metric_evaluator_errstate_suppresses_runtime_warnings():
1873-
"""np.errstate should suppress RuntimeWarning during evaluation."""
1874-
import warnings as warn_module
1875-
1876-
raw_pmc_df = pd.DataFrame({"a": [0.0], "b": [0.0]})
1877-
evaluator = create_metric_evaluator(raw_pmc_df=raw_pmc_df)
1878-
with warn_module.catch_warnings():
1879-
warn_module.simplefilter("error")
1880-
with patch("utils.parser.console_warning"):
1881-
# Should not raise RuntimeWarning thanks to np.errstate
1882-
# 0/0 -> NaN -> scalar NA -> returns "N/A"
1883-
result = evaluator.eval_expression(
1884-
"raw_pmc_df['a'][0] / raw_pmc_df['b'][0]"
1885-
)
1886-
assert result == "N/A"
1887-
1888-
18891849
@pytest.mark.misc
18901850
def test_metric_evaluator_na_expression_returns_na_string():
18911851
"""Expression evaluating to NA should return 'N/A'."""
@@ -1903,29 +1863,3 @@ def test_metric_evaluator_none_expression_returns_na_string():
19031863
with patch("utils.parser.console_debug"):
19041864
result = evaluator.eval_expression("None")
19051865
assert result == "N/A"
1906-
1907-
1908-
@pytest.mark.misc
1909-
def test_metric_evaluator_sys_vars_accessible():
1910-
"""System variables should be accessible in expressions."""
1911-
evaluator = create_metric_evaluator(sys_vars={"my_var": 42})
1912-
result = evaluator.eval_expression("my_var")
1913-
assert result == 42
1914-
1915-
1916-
@pytest.mark.misc
1917-
def test_metric_evaluator_empirical_peaks_accessible():
1918-
"""Empirical peaks should be accessible in expressions."""
1919-
evaluator = create_metric_evaluator(empirical_peaks={"peak_val": 100.0})
1920-
result = evaluator.eval_expression("peak_val")
1921-
assert result == 100.0
1922-
1923-
1924-
@pytest.mark.misc
1925-
def test_metric_evaluator_valid_expression_no_warning():
1926-
"""A valid expression should not produce warnings."""
1927-
evaluator = create_metric_evaluator()
1928-
with patch("utils.parser.console_warning") as mock_warn:
1929-
result = evaluator.eval_expression("raw_pmc_df['a'][0] + raw_pmc_df['b'][0]")
1930-
assert result == 12.0
1931-
mock_warn.assert_not_called()

0 commit comments

Comments
 (0)