Skip to content
Closed
126 changes: 95 additions & 31 deletions experimenter/tests/integration/nimbus/test_desktop_targeting.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import json
import logging
import time
from functools import cache
from pathlib import Path

import pytest
Expand All @@ -9,50 +11,101 @@
from nimbus.utils import helpers


@pytest.fixture(params=helpers.load_targeting_configs())
def targeting_config_slug(request):
return request.param


@pytest.fixture
@cache
def filter_expression_path():
path = Path(__file__).parent / "utils" / "filter_expression.js"
return path.absolute()


@pytest.fixture(scope="function", autouse=True)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we want scope="module" to run this code once.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

actually just realised tha the selenium fixture is function-scoped, so we can't use module scope for setup_browser

def setup_browser(selenium, filter_expression_path):
"""Open about:blank and initialize targeting environment once per test function."""
selenium.get("about:blank")

# Initialize TelemetryEnvironment and ExperimentAPI once
with filter_expression_path.open() as js:
result = Browser.execute_async_script(
selenium,
script=js.read(),
context="chrome",
)
assert result is True, "Failed to initialize targeting environment"

yield
Comment on lines +26 to +30
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm actually curious if this test works without this function.



@pytest.mark.run_targeting
def test_check_advanced_targeting(
selenium,
targeting_config_slug,
experiment_slug,
default_data_api,
filter_expression_path,
):
default_data_api["targetingConfigSlug"] = targeting_config_slug
experiment = helpers.create_experiment(
experiment_slug,
BaseExperimentApplications.FIREFOX_DESKTOP.value,
default_data_api,
targeting=targeting_config_slug,
)
logging.info(f"GraphQL creation: {experiment}")
experiment_data = helpers.load_experiment_data(experiment_slug)
targeting = experiment_data["data"]["experimentBySlug"]["jexlTargetingExpression"]
logging.info(f"Experiment Targeting: {targeting}")
recipe = experiment_data["data"]["experimentBySlug"]["recipeJson"]
logging.info(f"Experiment Recipe: {recipe}")
"""
Evaluates all targeting configs in parallel using Promise.all for fast execution.
"""
targeting_slugs = helpers.load_targeting_configs()
targeting_tests = []

# Inject filter expression
selenium.get("about:blank")
# Create experiments for all targeting configs
for idx, slug in enumerate(targeting_slugs):
test_slug = f"{experiment_slug}_{idx}_{slug}"
default_data_api["targetingConfigSlug"] = slug
helpers.create_experiment(
test_slug,
BaseExperimentApplications.FIREFOX_DESKTOP.value,
default_data_api,
targeting=slug,
)
logging.info(f"Created experiment: {test_slug}")
# Delay to prevent race conditions when creating many experiments
time.sleep(1)

experiment_data = helpers.load_experiment_data(test_slug)
targeting = experiment_data["data"]["experimentBySlug"]["jexlTargetingExpression"]
recipe = experiment_data["data"]["experimentBySlug"]["recipeJson"]

targeting_tests.append(
{
"slug": slug,
"targeting": targeting,
"recipe": json.dumps({"experiment": recipe}),
}
)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should not create any experiments and instead use a mapping of targeting slug to targeting expression. We may be able to just import nimbus.targeting.constants directly here

This is the real bottleneck of this test.


# Evaluate all targeting expressions in parallel using Promise.all
logging.info(
f"Evaluating {len(targeting_tests)} targeting expressions in parallel..."
)
with filter_expression_path.open() as js:
result = Browser.execute_async_script(
results = Browser.execute_async_script(
selenium,
targeting,
json.dumps({"experiment": recipe}),
json.dumps(targeting_tests),
script=js.read(),
context="chrome",
)
assert result is not None, "Invalid Targeting, or bad recipe"

# Validate results
assert results is not None, "Failed to evaluate targeting expressions"
assert len(results) == len(targeting_slugs), (
f"Expected {len(targeting_slugs)} results, got {len(results)}"
)

# Report any failures
failed_tests = []
for result in results:
logging.info(
f"Slug: {result['slug']}, "
f"Result: {result.get('result')}, "
f"Error: {result.get('error')}"
)
if result.get("result") is None:
error_msg = result.get("error", "Unknown error")
failed_tests.append(f"{result['slug']}: {error_msg}")

if failed_tests:
pytest.fail("Failed targeting evaluations:\n" + "\n".join(failed_tests))


@pytest.mark.parametrize(
Expand Down Expand Up @@ -84,6 +137,7 @@ def test_check_audience_targeting(
default_data_api,
filter_expression_path,
):
"""Evaluates single audience targeting configuration."""
default_data_api.update(audience_field)
experiment = helpers.create_experiment(
experiment_slug,
Expand All @@ -98,14 +152,24 @@ def test_check_audience_targeting(
recipe = experiment_data["data"]["experimentBySlug"]["recipeJson"]
logging.info(f"Experiment Recipe: {recipe}")

# Inject filter expression
selenium.get("about:blank")
# Use parallel mode with single item for consistency
targeting_tests = [
{
"slug": audience_field,
"targeting": targeting,
"recipe": json.dumps({"experiment": recipe}),
}
]

with filter_expression_path.open() as js:
result = Browser.execute_async_script(
results = Browser.execute_async_script(
selenium,
targeting,
json.dumps({"experiment": recipe}),
json.dumps(targeting_tests),
script=js.read(),
context="chrome",
)
assert result is not None, "Invalid Targeting, or bad recipe"

assert results is not None and len(results) == 1, "Failed to evaluate targeting"
assert results[0].get("result") is not None, (
f"Invalid Targeting: {results[0].get('error', 'unknown error')}"
)
48 changes: 33 additions & 15 deletions experimenter/tests/integration/nimbus/utils/filter_expression.js
Original file line number Diff line number Diff line change
@@ -1,13 +1,20 @@
async function remoteSettings(targetingString, recipe) {
// Initialize TelemetryEnvironment and ExperimentAPI once at test setup
async function initialize() {
const { TelemetryEnvironment } = ChromeUtils.importESModule("resource://gre/modules/TelemetryEnvironment.sys.mjs");
await TelemetryEnvironment.onInitialized();

const { ExperimentAPI } = ChromeUtils.importESModule("resource://nimbus/ExperimentAPI.sys.mjs");
await ExperimentAPI.ready();

return true;
}

async function remoteSettings(targetingString, recipe) {
const { ASRouterTargeting } = ChromeUtils.importESModule("resource:///modules/asrouter/ASRouterTargeting.sys.mjs");
const { ExperimentAPI } = ChromeUtils.importESModule("resource://nimbus/ExperimentAPI.sys.mjs");
const { TargetingContext } = ChromeUtils.importESModule("resource://messaging-system/targeting/Targeting.sys.mjs");

const _experiment = JSON.parse(recipe);
await ExperimentAPI.ready();

const context = TargetingContext.combineContexts(
_experiment,
Expand All @@ -31,17 +38,28 @@ async function remoteSettings(targetingString, recipe) {
}

/*
Arguments contains 3 items.
arguments[0] - the JEXL targeting string
arguments[1] - the experiment recipe
arguments[3] - the callback from selenium
This script handles two modes:
1. Initialization mode (1 argument): arguments[0] = callback
2. Parallel evaluation mode (2 arguments): arguments[0] = JSON array of {slug, targeting, recipe}, arguments[1] = callback
*/
const [targetingString, recipe, callback] = arguments;

remoteSettings(targetingString, recipe)
.then(result => {
callback(result);
})
.catch(err => {
callback(null);
});

if (arguments.length === 1) {
// Mode 1: Initialization - run once at test setup
const [callback] = arguments;
initialize().then(callback).catch(() => callback(false));
} else {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should keep it simple and just have one mode of operation.

// Mode 2: Parallel evaluation - evaluate all targeting expressions at once
const [targetingTestsJson, callback] = arguments;
const targetingTests = JSON.parse(targetingTestsJson);

Promise.all(
targetingTests.map(async (test) => {
try {
const result = await remoteSettings(test.targeting, test.recipe);
return { slug: test.slug, result: result, error: null };
} catch (err) {
return { slug: test.slug, result: null, error: err.message };
}
})
).then(callback).catch(() => callback(null));
}