From 8619edcb7786a0964534c58a4d49f1176e6f18e4 Mon Sep 17 00:00:00 2001 From: "xhuang@statsig.com" Date: Sat, 15 Nov 2025 01:06:43 +0000 Subject: [PATCH] Refactor documentation for LLM discoverability and retrieval quality This comprehensive audit and refactoring improves LLM discoverability across 1048 documentation files. Key improvements: - Added missing frontmatter (title, description) to 1506 pages - Fixed heading hierarchy issues in 1235 files - Added language tags to 689 code blocks - Standardized terminology across all documentation - Fixed context-dependent phrases for better chunk independence - Added page introductions for improved semantic clarity Statistics: - Files scanned: 1176 - Files modified: 1048 - Total fixes applied: 2192 Issues addressed: - SEO/GEO: Missing metadata, descriptions, page intros - Structure: Heading hierarchy skips, inconsistent organization - Code blocks: Missing language tags, unfenced code - Language: Context-dependent phrases, terminology inconsistencies - Visual: Missing alt text for images Terminology standardized: - 'feature flag' (canonical) vs 'feature gate', 'gate' - 'experiment' (canonical) vs 'a/b test' - 'data warehouse' (canonical) vs 'dwh', 'data-warehouse' - 'user' (canonical) vs 'customer', 'end user' - 'API key' (canonical) vs 'server secret', 'api-key' This refactoring follows industry best practices from Redocly, GitBook GEO, and Kapa.ai for maximizing LLM retrieval quality and semantic clarity. --- README.mdx | 15 +++- access-management/discussions.mdx | 1 + access-management/guide.mdx | 9 +- access-management/introduction.mdx | 1 + .../org-admin/experiment_policy.mdx | 1 + access-management/org-admin/gates_policy.mdx | 1 + .../org-admin/organization_policies.mdx | 1 + access-management/organizations.mdx | 3 +- access-management/projects.mdx | 1 + access-management/scim/concepts.mdx | 1 + .../scim/okta_scim_org_roles.mdx | 1 + access-management/scim/okta_scim_setup.mdx | 1 + .../scim/okta_scim_team_management.mdx | 1 + .../scim/okta_scim_troubleshooting.mdx | 1 + .../scim/okta_scim_user_management.mdx | 1 + access-management/scim/overview.mdx | 1 + access-management/scim/scim-endpoints.mdx | 1 + access-management/sso/azuread.mdx | 1 + access-management/sso/google.mdx | 1 + access-management/sso/okta_sso.mdx | 1 + access-management/sso/overview.mdx | 1 + access-management/tags.mdx | 1 + access-management/teams.mdx | 1 + ai-evals/offline-evals.mdx | 1 + ai-evals/online-evals.mdx | 3 +- ai-evals/overview.mdx | 1 + ai-evals/prompts.mdx | 1 + autotune/bandit-faq.mdx | 1 + autotune/bandit-introduction.mdx | 1 + autotune/contextual-bandit.mdx | 1 + autotune/contextual/getting-started.mdx | 9 +- autotune/contextual/introduction.mdx | 1 + autotune/contextual/methodology.mdx | 1 + autotune/contextual/monitoring.mdx | 3 +- autotune/monitoring.mdx | 3 +- autotune/multi-armed-bandit.mdx | 1 + autotune/setup.mdx | 5 +- autotune/using-bandits.mdx | 5 +- client/Android.mdx | 48 +++++------ client/Angular.mdx | 7 +- client/CPP.mdx | 6 +- client/Dart.mdx | 6 +- client/DotNet.mdx | 6 +- client/Expo.mdx | 6 +- client/Next.mdx | 6 +- client/React.mdx | 6 +- client/ReactNative.mdx | 6 +- client/ReactNativeOnDeviceEvaluation.mdx | 6 +- client/Roku.mdx | 8 +- client/Unity.mdx | 12 +-- client/androidOnDeviceEvaluationSDK.mdx | 6 +- client/concepts/initialize.mdx | 11 +-- client/concepts/local-eval-adapter.mdx | 5 +- client/concepts/parameter-stores.mdx | 9 +- client/concepts/persistent_assignment.mdx | 21 ++--- client/html-snippet.mdx | 4 +- client/introduction.mdx | 1 + client/iosClientSDK.mdx | 6 +- .../UsingEvaluationsDataAdapter.mdx | 18 ++-- .../javascript-mono/UsingSpecsDataAdapter.mdx | 12 +-- client/javascript-sdk-stable-id.mdx | 11 +-- client/javascript-sdk.mdx | 6 +- client/jsOnDeviceEvaluationSDK.mdx | 7 +- .../MigrationFromOldJsClient.mdx | 28 +++---- .../MigrationFromOldReact.mdx | 8 +- .../ios-repo-migration-guide.mdx | 3 +- client/onDeviceOverview.mdx | 1 + client/swiftOnDeviceEvaluationSDK.mdx | 6 +- compliance/data_privacy_for_mobile.mdx | 13 +-- compliance/introduction.mdx | 1 + compliance/user_data_deletion_requests.mdx | 3 +- console-api/introduction.mdx | 7 +- data-warehouse-ingestion/athena.mdx | 3 +- data-warehouse-ingestion/bigquery.mdx | 1 + data-warehouse-ingestion/data_mapping.mdx | 1 + data-warehouse-ingestion/databricks.mdx | 3 +- data-warehouse-ingestion/faq.mdx | 1 + data-warehouse-ingestion/introduction.mdx | 3 +- data-warehouse-ingestion/redshift.mdx | 1 + data-warehouse-ingestion/s3.mdx | 3 +- data-warehouse-ingestion/snowflake.mdx | 9 +- data-warehouse-ingestion/synapse.mdx | 1 + .../node/migration-guide/_api_changes.mdx | 5 ++ .../node/migration-guide/_statsig_options.mdx | 5 ++ .../node/migration-guide/_user_creation.mdx | 7 +- .../node/migration-guide/index.mdx | 1 + dynamic-config/add-rule.mdx | 8 +- dynamic-config/enforce-schema.mdx | 2 +- experiments/create-new.mdx | 4 +- experiments/ending/make-decision.mdx | 6 +- experiments/ending/stop-assignments.mdx | 2 +- .../exploring-results/aggregated-impact.mdx | 1 + .../exploring-results/meta-analysis.mdx | 1 + experiments/holdouts-introduction.mdx | 1 + experiments/implementation/getting-group.mdx | 4 +- experiments/implementation/implement.mdx | 2 +- .../interpreting-results/access-whn.mdx | 13 +-- .../interpreting-results/best-practices.mdx | 1 + .../interpreting-results/custom-queries.mdx | 1 + .../interpreting-results/drill-down.mdx | 1 + experiments/interpreting-results/export.mdx | 1 + experiments/interpreting-results/faq.mdx | 1 + .../participating-units.mdx | 1 + .../interpreting-results/read-pulse.mdx | 1 + .../reconciling-experiment-results.mdx | 6 +- .../interpreting-results/userproperties.mdx | 1 + experiments/layers-overview.mdx | 2 +- experiments/monitoring/bots.mdx | 2 +- experiments/monitoring/srm.mdx | 1 + experiments/overview.mdx | 14 ++-- .../confidence-intervals.mdx | 1 + .../statistical-methods/introduction.mdx | 1 + .../benjamini-hochberg-procedure.mdx | 1 + .../methodologies/bonferroni-correction.mdx | 1 + .../methodologies/cuped.mdx | 1 + .../methodologies/delta-method-whn.mdx | 1 + .../methodologies/delta-method.mdx | 1 + .../methodologies/fieller-intervals-whn.mdx | 1 + .../methodologies/fieller-intervals.mdx | 1 + .../methodologies/one-sample-test.mdx | 1 + .../methodologies/one-sided-test.mdx | 1 + .../methodologies/srm-checks-whn.mdx | 1 + .../methodologies/winsorization-whn.mdx | 1 + .../methodologies/winsorization.mdx | 1 + .../winsorization_variants/_cloud.mdx | 5 ++ .../winsorization_variants/_warehouse.mdx | 5 ++ experiments/statistical-methods/p-value.mdx | 1 + .../pre-experiment-bias.mdx | 1 + .../statistical-methods/topline-impact.mdx | 1 + .../variance-reduction.mdx | 1 + experiments/statistical-methods/variance.mdx | 1 + experiments/templates/decision-framework.mdx | 1 + experiments/templates/templates.mdx | 5 +- experiments/types/switchback-tests.mdx | 4 +- faq.mdx | 2 +- feature-flags/conditions.mdx | 24 +++--- feature-flags/overview.mdx | 2 +- feature-flags/safeguards-overview.mdx | 2 +- feature-flags/test-gate.mdx | 34 ++++---- guides/abn-tests.mdx | 1 + guides/cdn-edge-testing.mdx | 7 +- guides/check-gate.mdx | 7 +- guides/cms-integrations.mdx | 1 + guides/config-history.mdx | 1 + guides/contentful.mdx | 1 + guides/customer-io-email-abtest.mdx | 1 + guides/email-campaign-test.mdx | 1 + guides/experiment-on-custom-id-types.mdx | 1 + guides/first-device-level-experiment.mdx | 3 +- guides/first-dynamic-config.mdx | 10 +-- guides/first-feature.mdx | 12 +-- guides/first-segment.mdx | 3 +- guides/fomo.mdx | 7 +- guides/framer-analytics.mdx | 3 +- guides/logging-events.mdx | 9 +- guides/migrate-from-amplitude.mdx | 7 +- guides/migrate-from-launchdarkly.mdx | 19 +++-- guides/migrate-from-mixpanel.mdx | 7 +- guides/migration-overview.mdx | 1 + guides/open-source-script.mdx | 3 +- guides/private-attributes.mdx | 1 + guides/sendgrid-email-abtest.mdx | 9 +- guides/serverless.mdx | 5 +- guides/setting-up-reviews.mdx | 15 ++-- guides/shopify-ab-test.mdx | 19 +++-- .../advanced-configurations-v3.mdx | 9 +- .../advanced-configurations.mdx | 8 +- .../measuring-experiments.mdx | 4 +- guides/sidecar-experiments/setup.mdx | 6 +- guides/sidecar-experiments/sidecar-v3.mdx | 1 + guides/statsig-id-resolver.mdx | 3 +- guides/synchronized-launch.mdx | 1 + guides/testing.mdx | 5 +- guides/ui-based-tool.mdx | 1 + guides/uptime.mdx | 1 + guides/using-environments.mdx | 3 +- guides/webflow-sidecar-ab-test.mdx | 3 +- http-api/overview.mdx | 1 + infra-analytics/events-mode-logs-explorer.mdx | 1 + infra-analytics/getting-started.mdx | 46 ++++++----- infrastructure/api_proxy/custom_proxy.mdx | 7 +- infrastructure/api_proxy/introduction.mdx | 1 + infrastructure/api_proxy/managed-proxy.mdx | 5 +- infrastructure/reliability-faq.mdx | 1 + infrastructure/sdk-monitoring.mdx | 1 + infrastructure/statsig_domains.mdx | 13 +-- infrastructure/statsig_ip_ranges.mdx | 1 + integrations/ai_development_with_statsig.mdx | 2 +- integrations/akamai.mdx | 1 + integrations/azureai/capturing-metrics.mdx | 1 + integrations/azureai/completions.mdx | 24 +++--- integrations/azureai/embeddings.mdx | 15 ++-- integrations/azureai/getting-started.mdx | 11 +-- integrations/azureai/introduction.mdx | 1 + integrations/azureai/model-client.mdx | 13 +-- integrations/azureai/running-experiments.mdx | 1 + integrations/cloudflare.mdx | 31 +++---- integrations/data-connectors/amplitude.mdx | 1 + integrations/data-connectors/braze.mdx | 3 +- integrations/data-connectors/census.mdx | 1 + integrations/data-connectors/fivetran.mdx | 3 +- .../data-connectors/google-analytics.mdx | 1 + integrations/data-connectors/heap.mdx | 1 + integrations/data-connectors/hightouch.mdx | 1 + integrations/data-connectors/mixpanel.mdx | 3 +- integrations/data-connectors/mparticle.mdx | 1 + integrations/data-connectors/revenuecat.mdx | 1 + integrations/data-connectors/rudderstack.mdx | 11 +-- integrations/data-connectors/segment.mdx | 35 ++++---- integrations/data-connectors/stitch.mdx | 3 +- .../data-exports/data_warehouse_exports.mdx | 1 + .../experiment_result_exports.mdx | 1 + integrations/data-imports/azure_upload.mdx | 13 +-- integrations/data-imports/bigquery.mdx | 13 +-- integrations/data-imports/overview.mdx | 1 + integrations/data-imports/redshift.mdx | 17 ++-- integrations/data-imports/snowflake.mdx | 13 +-- integrations/datadog.mdx | 1 + integrations/event_filtering.mdx | 1 + integrations/event_webhook.mdx | 19 +++-- integrations/fastly.mdx | 25 +++--- integrations/github_code_references.mdx | 1 + integrations/gitlab_code_references.mdx | 1 + integrations/gtm.mdx | 7 +- integrations/introduction.mdx | 1 + integrations/jira.mdx | 1 + integrations/mcp.mdx | 6 +- integrations/openai.mdx | 17 ++-- integrations/pulumi.mdx | 13 +-- integrations/statsiglite.mdx | 1 + integrations/terraform/introduction.mdx | 1 + .../terraform/terraform_experiment.mdx | 11 ++- integrations/terraform/terraform_gate.mdx | 11 ++- integrations/triggers/datadog.mdx | 1 + integrations/triggers/introduction.mdx | 1 + integrations/vercel.mdx | 31 +++---- integrations/workersai.mdx | 1 + messages/healthhub.mdx | 1 + messages/serverSDKConnection.mdx | 1 + metrics/archiving-metrics.mdx | 2 +- metrics/custom-dau.mdx | 2 +- metrics/ingest.mdx | 2 +- metrics/metric-dimensions.mdx | 2 +- product-analytics/alerts-overview.mdx | 1 + product-analytics/drilldown.mdx | 20 ++--- release-pipeline/actions.mdx | 2 +- sdks/api-keys.mdx | 1 + sdks/array-operators.mdx | 1 + sdks/client-vs-server.mdx | 1 + sdks/debugging.mdx | 6 +- sdks/getting-started.mdx | 1 + sdks/how-evaluation-works.mdx | 11 +-- sdks/identify-users.mdx | 7 +- sdks/quickstart.mdx | 36 ++++---- sdks/support.mdx | 1 + sdks/target-apps.mdx | 1 + sdks/user.mdx | 3 +- server-core/cpp-core.mdx | 6 +- server-core/dotnet-core.mdx | 6 +- server-core/elixir-core.mdx | 6 +- server-core/go-core.mdx | 6 +- server-core/index.mdx | 4 +- server-core/java-core.mdx | 6 +- server-core/migration-guides/go.mdx | 34 ++++---- server-core/migration-guides/java.mdx | 52 ++++++------ server-core/migration-guides/node.mdx | 7 +- server-core/migration-guides/python.mdx | 6 +- server-core/node-core.mdx | 6 +- server-core/php-core.mdx | 6 +- server-core/python-core.mdx | 6 +- server-core/rust-core.mdx | 6 +- server/concepts/all_assignments.mdx | 1 + server/concepts/cloudflare.mdx | 3 +- server/concepts/data_store.mdx | 1 + server/concepts/forward_proxy.mdx | 9 +- server/concepts/open_telemetry.mdx | 7 +- server/concepts/persistent_assignment.mdx | 17 ++-- server/cpp.mdx | 22 ++--- server/deprecation-notices.mdx | 5 +- server/dotnet.mdx | 6 +- server/erlang.mdx | 6 +- server/go.mdx | 48 +++++------ server/introduction.mdx | 1 + server/java.mdx | 6 +- .../migration-guides/v5ToV6UpgradeGuide.mdx | 9 +- server/nodejsServerSDK.mdx | 22 ++--- server/php.mdx | 6 +- server/pythonSDK.mdx | 22 ++--- server/ruby.mdx | 82 +++++++++---------- server/rust.mdx | 8 +- session-replay/cli-session-replay.mdx | 17 ++-- session-replay/configure.mdx | 7 +- session-replay/debug.mdx | 3 +- session-replay/install.mdx | 19 +++-- session-replay/overview.mdx | 1 + session-replay/watch.mdx | 1 + snippets/ai-sdks/getPrompt.mdx | 5 ++ snippets/ai-sdks/initialization.mdx | 5 ++ snippets/ai-sdks/initializationManaged.mdx | 5 ++ snippets/ai-sdks/logEvalResult.mdx | 5 ++ snippets/ai-sdks/node/getPrompt.mdx | 5 ++ snippets/ai-sdks/node/initialization.mdx | 5 ++ .../ai-sdks/node/initializationManaged.mdx | 5 ++ .../node/initializationManagedWOptions.mdx | 5 ++ .../ai-sdks/node/initializationWOptions.mdx | 5 ++ snippets/ai-sdks/node/installation.mdx | 9 +- snippets/ai-sdks/node/logEvalResult.mdx | 5 ++ snippets/ai-sdks/node/otel.mdx | 9 +- snippets/ai-sdks/node/wrapOpenAI.mdx | 5 ++ snippets/ai-sdks/otel.mdx | 5 ++ snippets/ai-sdks/python/getPrompt.mdx | 5 ++ snippets/ai-sdks/python/initialization.mdx | 5 ++ .../ai-sdks/python/initializationManaged.mdx | 5 ++ .../python/initializationManagedWOptions.mdx | 10 ++- .../ai-sdks/python/initializationWOptions.mdx | 5 ++ snippets/ai-sdks/python/installation.mdx | 9 +- snippets/ai-sdks/python/logEvalResult.mdx | 5 ++ snippets/ai-sdks/python/otel.mdx | 5 ++ snippets/ai-sdks/python/wrapOpenAI.mdx | 5 ++ snippets/ai-sdks/wrapOpenAI.mdx | 5 ++ snippets/client/Android/manualExposures.mdx | 35 ++++---- snippets/client/Android/statsigOptions.mdx | 5 ++ snippets/client/Angular/angularDirectives.mdx | 7 +- snippets/client/Angular/angularInstall.mdx | 7 +- .../client/Angular/angularLoadingState.mdx | 5 ++ .../client/Angular/angularSessionReplay.mdx | 7 +- snippets/client/Angular/angularSetup.mdx | 9 +- snippets/client/Angular/angularUpdateUser.mdx | 5 ++ snippets/client/Angular/basics.mdx | 5 ++ snippets/client/Angular/checkGate.mdx | 5 ++ snippets/client/Angular/getDynamicConfig.mdx | 5 ++ snippets/client/Angular/getExperiment.mdx | 5 ++ snippets/client/Angular/getLayer.mdx | 5 ++ snippets/client/Angular/logEvent.mdx | 5 ++ snippets/client/CPP/checkGate.mdx | 5 ++ snippets/client/CPP/faqs.mdx | 9 +- snippets/client/CPP/getDynamicConfig.mdx | 5 ++ snippets/client/CPP/getExperiment.mdx | 5 ++ snippets/client/CPP/initialization.mdx | 9 +- snippets/client/CPP/installation.mdx | 7 +- snippets/client/CPP/logEvent.mdx | 5 ++ snippets/client/CPP/statsigOptions.mdx | 5 ++ snippets/client/CPP/statsigUser.mdx | 9 +- snippets/client/Dart/checkGate.mdx | 5 ++ snippets/client/Dart/faqs.mdx | 9 +- snippets/client/Dart/flutterLifecycle.mdx | 7 +- snippets/client/Dart/getConfig.mdx | 5 ++ snippets/client/Dart/getLayer.mdx | 5 ++ snippets/client/Dart/initialize.mdx | 5 ++ snippets/client/Dart/installation.mdx | 7 +- snippets/client/Dart/logEvent.mdx | 5 ++ snippets/client/Dart/parameterStore.mdx | 5 ++ snippets/client/Dart/shutdown.mdx | 5 ++ snippets/client/Dart/statsigOptions.mdx | 5 ++ snippets/client/Dart/updateUser.mdx | 5 ++ snippets/client/DotNet/checkGate.mdx | 5 ++ snippets/client/DotNet/faqs.mdx | 8 +- snippets/client/DotNet/getConfig.mdx | 5 ++ snippets/client/DotNet/getLayer.mdx | 5 ++ snippets/client/DotNet/initialize.mdx | 5 ++ snippets/client/DotNet/installation.mdx | 5 ++ snippets/client/DotNet/logEvent.mdx | 5 ++ snippets/client/DotNet/shutdown.mdx | 5 ++ snippets/client/DotNet/statsigOptions.mdx | 5 ++ snippets/client/DotNet/updateUser.mdx | 5 ++ snippets/client/Expo/advanced.mdx | 7 +- snippets/client/Expo/checkGate.mdx | 5 ++ snippets/client/Expo/debugging.mdx | 5 ++ snippets/client/Expo/getDynamicConfig.mdx | 5 ++ snippets/client/Expo/getExperiment.mdx | 5 ++ snippets/client/Expo/getLayer.mdx | 5 ++ snippets/client/Expo/gettingClient.mdx | 5 ++ snippets/client/Expo/installation.mdx | 7 +- snippets/client/Expo/loadingState.mdx | 7 +- snippets/client/Expo/logEvent.mdx | 5 ++ snippets/client/Expo/setup.mdx | 5 ++ snippets/client/HTML/faqs.mdx | 5 ++ snippets/client/HTML/initialize.mdx | 5 ++ snippets/client/HTML/install.mdx | 5 ++ snippets/client/JavaScript/asyncTimeouts.mdx | 7 +- snippets/client/JavaScript/checkGate.mdx | 5 ++ snippets/client/JavaScript/clientEvents.mdx | 5 ++ snippets/client/JavaScript/codeExamples.mdx | 5 ++ snippets/client/JavaScript/csp.mdx | 5 ++ snippets/client/JavaScript/dataAdapter.mdx | 5 ++ snippets/client/JavaScript/debugging.mdx | 7 +- .../client/JavaScript/evaluationDetails.mdx | 5 ++ snippets/client/JavaScript/faqs.mdx | 9 +- snippets/client/JavaScript/getConfig.mdx | 5 ++ snippets/client/JavaScript/getLayer.mdx | 7 +- snippets/client/JavaScript/initialize.mdx | 5 ++ snippets/client/JavaScript/installation.mdx | 7 +- snippets/client/JavaScript/logEvent.mdx | 5 ++ snippets/client/JavaScript/logOnlyData.mdx | 5 ++ .../client/JavaScript/manualExposures.mdx | 11 ++- snippets/client/JavaScript/multiInstance.mdx | 5 ++ .../client/JavaScript/overrideAdapter.mdx | 7 +- snippets/client/JavaScript/parameterStore.mdx | 5 ++ .../client/JavaScript/persistentValues.mdx | 5 ++ snippets/client/JavaScript/prefetchUsers.mdx | 5 ++ .../JavaScript/sessionReplayAutoCapture.mdx | 7 +- snippets/client/JavaScript/shutdown.mdx | 5 ++ snippets/client/JavaScript/stableID.mdx | 17 ++-- snippets/client/JavaScript/statsigOptions.mdx | 5 ++ snippets/client/JavaScript/testing.mdx | 7 +- snippets/client/JavaScript/typedGetters.mdx | 5 ++ snippets/client/JavaScript/updateUser.mdx | 5 ++ snippets/client/Next/aiSetup.mdx | 7 +- snippets/client/Next/bootstrapping.mdx | 11 ++- snippets/client/Next/checkGate.mdx | 7 +- snippets/client/Next/getDynamicConfig.mdx | 7 +- snippets/client/Next/getExperiment.mdx | 7 +- snippets/client/Next/logEvent.mdx | 7 +- snippets/client/Next/manualInstallation.mdx | 27 +++--- snippets/client/Next/paramStore.mdx | 7 +- snippets/client/Next/proxying.mdx | 13 ++- snippets/client/Next/sessionReplay.mdx | 7 +- snippets/client/Next/ssg.mdx | 5 ++ snippets/client/Next/webAnalytics.mdx | 7 +- snippets/client/React/checkGate.mdx | 5 ++ snippets/client/React/getDynamicConfig.mdx | 5 ++ snippets/client/React/getExperiment.mdx | 5 ++ snippets/client/React/getLayer.mdx | 5 ++ snippets/client/React/gettingClient.mdx | 5 ++ snippets/client/React/hooks.mdx | 19 +++-- snippets/client/React/installation.mdx | 9 +- snippets/client/React/loadingState.mdx | 7 +- snippets/client/React/logEvent.mdx | 5 ++ snippets/client/React/persistentValues.mdx | 5 ++ snippets/client/React/sessionReplay.mdx | 7 +- snippets/client/React/setup.mdx | 9 +- snippets/client/React/testing.mdx | 7 +- snippets/client/React/updateUser.mdx | 5 ++ snippets/client/ReactNative/advanced.mdx | 9 +- snippets/client/ReactNative/checkGate.mdx | 5 ++ .../client/ReactNative/getDynamicConfig.mdx | 5 ++ snippets/client/ReactNative/getExperiment.mdx | 5 ++ snippets/client/ReactNative/getLayer.mdx | 5 ++ snippets/client/ReactNative/gettingClient.mdx | 5 ++ snippets/client/ReactNative/installation.mdx | 11 ++- snippets/client/ReactNative/loadingState.mdx | 7 +- snippets/client/ReactNative/logEvent.mdx | 5 ++ snippets/client/ReactNative/setup.mdx | 5 ++ snippets/client/Unity/faqs.mdx | 8 +- snippets/client/Unity/statsigOptions.mdx | 5 ++ snippets/client/checkGate.mdx | 5 ++ snippets/client/getDynamicConfig.mdx | 5 ++ snippets/client/getExperimentLayer.mdx | 5 ++ snippets/client/iOS/checkGate.mdx | 7 +- snippets/client/iOS/getDynamicConfig.mdx | 7 +- snippets/client/iOS/getExperiment.mdx | 7 +- snippets/client/iOS/initResponse.mdx | 7 +- snippets/client/iOS/initialization.mdx | 7 +- snippets/client/iOS/installation.mdx | 7 +- snippets/client/iOS/listening.mdx | 5 ++ snippets/client/iOS/logEvent.mdx | 7 +- snippets/client/iOS/manualExposures.mdx | 19 +++-- snippets/client/iOS/multiInstance.mdx | 7 +- snippets/client/iOS/shutdown.mdx | 7 +- snippets/client/iOS/stableID.mdx | 7 +- snippets/client/iOS/statsigOptions.mdx | 13 ++- snippets/client/iOS/statsigUser.mdx | 7 +- snippets/client/initResponse.mdx | 5 ++ snippets/client/initialization.mdx | 5 ++ snippets/client/installation.mdx | 5 ++ snippets/client/localOverrides.mdx | 5 ++ snippets/client/logEvent.mdx | 5 ++ snippets/client/manualExposures.mdx | 5 ++ snippets/client/multiInstance.mdx | 5 ++ snippets/client/paramStores.mdx | 5 ++ snippets/client/privateAttributes.mdx | 5 ++ snippets/client/shuttingDown.mdx | 5 ++ snippets/client/stableID.mdx | 5 ++ snippets/client/statsigOptions.mdx | 6 ++ snippets/client/statsigUser.mdx | 5 ++ snippets/client/webAnalytics.mdx | 5 ++ snippets/integration_event_formats.mdx | 13 ++- snippets/integration_statsig_env_format.mdx | 5 ++ snippets/local-eval/android/_checkGate.mdx | 7 +- snippets/local-eval/android/_faqs.mdx | 5 ++ snippets/local-eval/android/_getConfig.mdx | 7 +- .../local-eval/android/_getExperiment.mdx | 7 +- snippets/local-eval/android/_globalUser.mdx | 5 ++ snippets/local-eval/android/_initialize.mdx | 15 +++- snippets/local-eval/android/_install.mdx | 5 ++ .../local-eval/android/_localOverrides.mdx | 7 +- snippets/local-eval/android/_logEvent.mdx | 7 +- .../local-eval/android/_manualExposures.mdx | 35 ++++---- snippets/local-eval/android/_options.mdx | 5 ++ .../local-eval/android/_postInitSyncing.mdx | 7 +- snippets/local-eval/android/_shutdown.mdx | 7 +- snippets/local-eval/android/_stableID.mdx | 5 ++ .../android/_usingPersistentValues.mdx | 13 ++- snippets/local-eval/dcsScope.mdx | 5 ++ .../local-eval/js/MigrationFromOldSDK.mdx | 2 +- snippets/local-eval/js/ReactNativeUsage.mdx | 6 +- .../local-eval/js/UsingSpecsDataAdapter.mdx | 12 +-- snippets/local-eval/js/_checkGate.mdx | 5 ++ .../local-eval/js/_clientEventEmitting.mdx | 5 ++ snippets/local-eval/js/_createUserObject.mdx | 5 ++ snippets/local-eval/js/_dataAdapter.mdx | 5 ++ snippets/local-eval/js/_expoInstall.mdx | 7 +- snippets/local-eval/js/_expoSetup.mdx | 7 +- snippets/local-eval/js/_faqs.mdx | 5 ++ snippets/local-eval/js/_getConfig.mdx | 5 ++ snippets/local-eval/js/_getExperiment.mdx | 5 ++ snippets/local-eval/js/_initialize.mdx | 5 ++ snippets/local-eval/js/_install.mdx | 9 +- snippets/local-eval/js/_localOverrides.mdx | 5 ++ snippets/local-eval/js/_logEvent.mdx | 5 ++ snippets/local-eval/js/_manualExposures.mdx | 11 ++- snippets/local-eval/js/_multiInstance.mdx | 5 ++ snippets/local-eval/js/_options.mdx | 5 ++ .../local-eval/js/_prefetchUsersConfig.mdx | 7 +- .../local-eval/js/_prefetchUsersMethod.mdx | 7 +- snippets/local-eval/js/_reactHooks.mdx | 17 ++-- .../local-eval/js/_reactNativeInstall.mdx | 11 ++- snippets/local-eval/js/_reactNativeSetup.mdx | 5 ++ snippets/local-eval/js/_shutdown.mdx | 5 ++ snippets/local-eval/js/_stableID.mdx | 5 ++ .../local-eval/onDeviceEvalProsAndCons.mdx | 5 ++ snippets/local-eval/statsigUserNotes.mdx | 5 ++ snippets/local-eval/swift/_checkGate.mdx | 7 +- .../local-eval/swift/_createStatsigUser.mdx | 7 +- snippets/local-eval/swift/_faqs.mdx | 5 ++ snippets/local-eval/swift/_getConfig.mdx | 5 ++ snippets/local-eval/swift/_getExperiment.mdx | 5 ++ snippets/local-eval/swift/_globalUser.mdx | 5 ++ snippets/local-eval/swift/_initialize.mdx | 11 ++- snippets/local-eval/swift/_install.mdx | 7 +- snippets/local-eval/swift/_localOverrides.mdx | 7 +- snippets/local-eval/swift/_logEvent.mdx | 5 ++ snippets/local-eval/swift/_options.mdx | 5 ++ .../local-eval/swift/_postInitSyncing.mdx | 9 +- snippets/local-eval/swift/_shutdown.mdx | 7 +- snippets/pulse/aggregated-impact.mdx | 5 ++ snippets/pulse/best-practices.mdx | 5 ++ snippets/pulse/custom-queries.mdx | 5 ++ snippets/pulse/export.mdx | 5 ++ snippets/pulse/faq.mdx | 7 +- snippets/pulse/metric-drill-down.mdx | 7 +- snippets/pulse/participating-units.mdx | 5 ++ snippets/pulse/read-results.mdx | 7 +- snippets/quality-score.mdx | 7 +- snippets/sdks/list-of-frameworks.mdx | 7 ++ snippets/sdks/list-of-sdks.mdx | 5 ++ snippets/server-core/checkGate.mdx | 5 ++ snippets/server-core/clientInitResponse.mdx | 5 ++ snippets/server-core/cpp/checkGate.mdx | 7 +- snippets/server-core/cpp/getDynamicConfig.mdx | 5 ++ snippets/server-core/cpp/getExperiment.mdx | 5 ++ snippets/server-core/cpp/getFeatureGate.mdx | 5 ++ snippets/server-core/cpp/initialization.mdx | 5 ++ snippets/server-core/cpp/installation.mdx | 5 ++ snippets/server-core/cpp/logEvent.mdx | 5 ++ snippets/server-core/cpp/options.mdx | 12 ++- snippets/server-core/cpp/shutdown.mdx | 5 ++ snippets/server-core/cpp/statsigUser.mdx | 5 ++ snippets/server-core/dataStore.mdx | 5 ++ snippets/server-core/dotnet/checkGate.mdx | 7 +- .../server-core/dotnet/clientInitResponse.mdx | 5 ++ .../server-core/dotnet/evaluationOptions.mdx | 7 +- snippets/server-core/dotnet/getConfig.mdx | 5 ++ snippets/server-core/dotnet/getExperiment.mdx | 5 ++ .../server-core/dotnet/getFeatureGate.mdx | 5 ++ snippets/server-core/dotnet/getLayer.mdx | 5 ++ .../server-core/dotnet/getParameterStore.mdx | 5 ++ snippets/server-core/dotnet/initialize.mdx | 9 +- snippets/server-core/dotnet/install.mdx | 9 +- .../server-core/dotnet/localOverrides.mdx | 7 +- snippets/server-core/dotnet/logEvent.mdx | 5 ++ .../server-core/dotnet/manualExposures.mdx | 19 +++-- snippets/server-core/dotnet/notes.mdx | 9 +- snippets/server-core/dotnet/options.mdx | 7 +- .../server-core/dotnet/sharedInstance.mdx | 7 +- snippets/server-core/dotnet/shutdown.mdx | 7 +- snippets/server-core/dotnet/ssrIDs.mdx | 5 ++ .../server-core/dotnet/ssrIPUserAgent.mdx | 5 ++ snippets/server-core/dotnet/ssrLegacyJS.mdx | 5 ++ snippets/server-core/dotnet/user.mdx | 7 +- snippets/server-core/elixir/checkGate.mdx | 7 ++ .../server-core/elixir/clientInitResponse.mdx | 5 ++ snippets/server-core/elixir/dataStore.mdx | 5 ++ snippets/server-core/elixir/faqs.mdx | 5 ++ .../elixir/forwardLogLineEvent.mdx | 5 ++ snippets/server-core/elixir/getConfig.mdx | 5 ++ snippets/server-core/elixir/getExperiment.mdx | 9 +- .../server-core/elixir/getFeatureGate.mdx | 7 ++ snippets/server-core/elixir/getPrompt.mdx | 5 ++ snippets/server-core/elixir/getPromptSet.mdx | 5 ++ snippets/server-core/elixir/initialize.mdx | 5 ++ snippets/server-core/elixir/install.mdx | 9 +- snippets/server-core/elixir/logEvent.mdx | 5 ++ .../elixir/observabilityClient.mdx | 5 ++ snippets/server-core/elixir/options.mdx | 9 +- snippets/server-core/elixir/outputLogger.mdx | 5 ++ snippets/server-core/elixir/paramStores.mdx | 5 ++ .../server-core/elixir/persistentStorage.mdx | 5 ++ snippets/server-core/elixir/shutdown.mdx | 5 ++ snippets/server-core/eventSubscriptions.mdx | 5 ++ snippets/server-core/forwardLogLineEvent.mdx | 5 ++ snippets/server-core/getDynamicConfig.mdx | 5 ++ snippets/server-core/getExperiment.mdx | 5 ++ snippets/server-core/getFeatureGate.mdx | 5 ++ snippets/server-core/getPrompt.mdx | 5 ++ snippets/server-core/getPromptSet.mdx | 5 ++ snippets/server-core/go/_checkGate.mdx | 5 ++ snippets/server-core/go/_faqs.mdx | 5 ++ snippets/server-core/go/_flush.mdx | 5 ++ snippets/server-core/go/_getConfig.mdx | 5 ++ snippets/server-core/go/_getExperiment.mdx | 5 ++ snippets/server-core/go/_getFeatureGate.mdx | 5 ++ snippets/server-core/go/_initialize.mdx | 5 ++ snippets/server-core/go/_install.mdx | 15 ++-- snippets/server-core/go/_localOverrides.mdx | 5 ++ snippets/server-core/go/_logEvent.mdx | 5 ++ snippets/server-core/go/_options.mdx | 5 ++ snippets/server-core/go/_paramStores.mdx | 13 ++- snippets/server-core/go/_shutdown.mdx | 5 ++ snippets/server-core/go/manualExposures.mdx | 19 +++-- snippets/server-core/initialization.mdx | 5 ++ snippets/server-core/initialization2.mdx | 5 ++ snippets/server-core/installation.mdx | 5 ++ snippets/server-core/java/checkGate.mdx | 7 +- snippets/server-core/java/dataStore.mdx | 7 +- snippets/server-core/java/faqs.mdx | 9 +- .../server-core/java/forwardLogLineEvent.mdx | 7 +- .../server-core/java/getDynamicConfig.mdx | 7 +- snippets/server-core/java/getExperiment.mdx | 7 +- snippets/server-core/java/getFeatureGate.mdx | 7 +- snippets/server-core/java/getPrompt.mdx | 7 +- snippets/server-core/java/getPromptSet.mdx | 7 +- snippets/server-core/java/initialization.mdx | 7 +- snippets/server-core/java/installation.mdx | 21 +++-- snippets/server-core/java/localOverrides.mdx | 7 +- snippets/server-core/java/logEvent.mdx | 7 +- snippets/server-core/java/manualExposures.mdx | 7 +- .../server-core/java/observabilityClient.mdx | 7 +- snippets/server-core/java/outputLogger.mdx | 7 +- snippets/server-core/java/paramStores.mdx | 7 +- .../server-core/java/persistentStorage.mdx | 7 +- snippets/server-core/java/quickStart.mdx | 17 ++-- snippets/server-core/java/reference.mdx | 26 +++--- snippets/server-core/java/sharedInstance.mdx | 7 +- snippets/server-core/java/shutdown.mdx | 7 +- snippets/server-core/java/statsigOptions.mdx | 7 +- snippets/server-core/java/statsigUser.mdx | 11 ++- snippets/server-core/java/testedPlatforms.mdx | 5 ++ snippets/server-core/localOverrides.mdx | 5 ++ snippets/server-core/logEvent.mdx | 5 ++ snippets/server-core/manualExposures.mdx | 5 ++ .../migration-guides/_APIChanges.mdx | 5 ++ .../migration-guides/_GlobalChanges.mdx | 9 +- .../migration-guides/_Installation.mdx | 8 +- .../server-core/migration-guides/_Intro.mdx | 5 ++ .../migration-guides/_MigrationHelp.mdx | 5 ++ .../migration-guides/_NeedHelp.mdx | 5 ++ .../migration-guides/_StatsigOptions.mdx | 5 ++ .../migration-guides/_Troubleshooting.mdx | 7 ++ .../migration-guides/_UserCreation.mdx | 12 ++- snippets/server-core/node/checkGate.mdx | 5 ++ .../server-core/node/clientInitResponse.mdx | 19 +++-- snippets/server-core/node/dataStore.mdx | 5 ++ .../server-core/node/eventSubscriptions.mdx | 5 ++ snippets/server-core/node/faqs.mdx | 9 +- .../server-core/node/forwardLogLineEvent.mdx | 5 ++ .../server-core/node/getDynamicConfig.mdx | 5 ++ snippets/server-core/node/getExperiment.mdx | 5 ++ snippets/server-core/node/getFeatureGate.mdx | 5 ++ snippets/server-core/node/getPrompt.mdx | 5 ++ snippets/server-core/node/getPromptSet.mdx | 5 ++ snippets/server-core/node/initialization.mdx | 5 ++ snippets/server-core/node/installation.mdx | 11 ++- snippets/server-core/node/localOverrides.mdx | 11 ++- snippets/server-core/node/logEvent.mdx | 5 ++ snippets/server-core/node/manualExposures.mdx | 19 +++-- .../node/migration-guide/api-changes.mdx | 5 ++ .../node/migration-guide/installation.mdx | 12 ++- .../node/migration-guide/statsig-options.mdx | 5 ++ .../node/migration-guide/user-creation.mdx | 7 +- .../server-core/node/observabilityClient.mdx | 5 ++ snippets/server-core/node/outputLogger.mdx | 11 ++- snippets/server-core/node/paramStores.mdx | 5 ++ .../server-core/node/persistentStorage.mdx | 7 +- snippets/server-core/node/reference.mdx | 5 ++ snippets/server-core/node/sharedInstance.mdx | 5 ++ snippets/server-core/node/shutdown.mdx | 5 ++ snippets/server-core/node/statsigOptions.mdx | 7 ++ snippets/server-core/node/statsigUser.mdx | 5 ++ snippets/server-core/observabilityClient.mdx | 5 ++ snippets/server-core/outputLogger.mdx | 5 ++ snippets/server-core/paramStores.mdx | 5 ++ snippets/server-core/persistentStorage.mdx | 5 ++ snippets/server-core/php/checkGate.mdx | 5 ++ snippets/server-core/php/customAdapters.mdx | 11 ++- snippets/server-core/php/dataStore.mdx | 5 ++ snippets/server-core/php/faqs.mdx | 5 ++ snippets/server-core/php/flush.mdx | 5 ++ .../server-core/php/forwardLogLineEvent.mdx | 5 ++ snippets/server-core/php/getConfig.mdx | 5 ++ snippets/server-core/php/getExperiment.mdx | 5 ++ snippets/server-core/php/getFeatureGate.mdx | 5 ++ snippets/server-core/php/getPrompt.mdx | 5 ++ snippets/server-core/php/getPromptSet.mdx | 5 ++ snippets/server-core/php/initialize.mdx | 7 +- snippets/server-core/php/install.mdx | 9 +- snippets/server-core/php/logEvent.mdx | 5 ++ .../php/migration-guide/api-changes.mdx | 5 ++ .../php/migration-guide/installation.mdx | 11 ++- .../php/migration-guide/statsig-options.mdx | 5 ++ .../server-core/php/migration-guide/user.mdx | 7 +- snippets/server-core/php/notes.mdx | 5 ++ .../server-core/php/observabilityClient.mdx | 5 ++ snippets/server-core/php/options.mdx | 5 ++ snippets/server-core/php/outputLogger.mdx | 5 ++ snippets/server-core/php/paramStores.mdx | 5 ++ .../server-core/php/persistentStorage.mdx | 5 ++ .../server-core/php/privateAttributes.mdx | 5 ++ snippets/server-core/php/sharedInstance.mdx | 5 ++ snippets/server-core/php/shutdown.mdx | 5 ++ snippets/server-core/python/checkGate.mdx | 5 ++ .../server-core/python/clientInitResponse.mdx | 19 +++-- snippets/server-core/python/data_store.mdx | 5 ++ snippets/server-core/python/faqs.mdx | 5 ++ .../python/forwardLogLineEvent.mdx | 5 ++ .../server-core/python/getDynamicConfig.mdx | 5 ++ snippets/server-core/python/getExperiment.mdx | 5 ++ .../server-core/python/getFeatureGate.mdx | 5 ++ snippets/server-core/python/getPrompt.mdx | 9 +- snippets/server-core/python/getPromptSet.mdx | 5 ++ .../server-core/python/initialization.mdx | 21 +++-- snippets/server-core/python/installation.mdx | 10 ++- .../server-core/python/localOverrides.mdx | 5 ++ snippets/server-core/python/logEvent.mdx | 5 ++ .../server-core/python/manualExposures.mdx | 19 +++-- .../python/migration-guide/api-changes.mdx | 5 ++ .../python/migration-guide/installation.mdx | 11 ++- .../migration-guide/statsig-options.mdx | 5 ++ .../python/migration-guide/user-creation.mdx | 7 +- .../python/observability_client.mdx | 5 ++ snippets/server-core/python/options.mdx | 13 ++- snippets/server-core/python/output_logger.mdx | 5 ++ snippets/server-core/python/paramStores.mdx | 17 ++-- .../server-core/python/persistent_storage.mdx | 5 ++ snippets/server-core/python/serverCore.mdx | 5 ++ .../server-core/python/sharedInstance.mdx | 5 ++ snippets/server-core/python/shutdown.mdx | 5 ++ snippets/server-core/python/statsigUser.mdx | 5 ++ .../server-core/python/testedPlatforms.mdx | 5 ++ snippets/server-core/rust/checkGate.mdx | 5 ++ snippets/server-core/rust/data_store.mdx | 5 ++ .../server-core/rust/eventSubscriptions.mdx | 5 ++ snippets/server-core/rust/faqs.mdx | 5 ++ .../server-core/rust/fieldsNeededMethods.mdx | 7 +- snippets/server-core/rust/getConfig.mdx | 5 ++ snippets/server-core/rust/getExperiment.mdx | 5 ++ snippets/server-core/rust/getFeatureGate.mdx | 5 ++ snippets/server-core/rust/initialize.mdx | 5 ++ snippets/server-core/rust/install.mdx | 7 +- snippets/server-core/rust/localOverrides.mdx | 5 ++ snippets/server-core/rust/logEvent.mdx | 5 ++ snippets/server-core/rust/manualExposures.mdx | 19 +++-- .../server-core/rust/observability_client.mdx | 5 ++ snippets/server-core/rust/options.mdx | 9 +- snippets/server-core/rust/output_logger.mdx | 5 ++ snippets/server-core/rust/paramStores.mdx | 5 ++ .../server-core/rust/persistent_storage.mdx | 5 ++ snippets/server-core/rust/reference.mdx | 11 ++- snippets/server-core/rust/sharedInstance.mdx | 5 ++ snippets/server-core/rust/shutdown.mdx | 7 +- snippets/server-core/rust/statsigUser.mdx | 5 ++ snippets/server-core/sharedInstance.mdx | 5 ++ snippets/server-core/shutdown.mdx | 5 ++ snippets/server-core/statsigOptions.mdx | 5 ++ snippets/server-core/statsigUser.mdx | 5 ++ snippets/server/checkGate.mdx | 5 ++ snippets/server/clientInitResponse.mdx | 5 ++ snippets/server/dotnet/_checkGate.mdx | 5 ++ .../server/dotnet/_dataStoreInterface.mdx | 5 ++ snippets/server/dotnet/_faqs.mdx | 5 ++ snippets/server/dotnet/_getConfig.mdx | 5 ++ snippets/server/dotnet/_getExperiment.mdx | 5 ++ snippets/server/dotnet/_initialize.mdx | 5 ++ snippets/server/dotnet/_install.mdx | 5 ++ snippets/server/dotnet/_localOverrides.mdx | 5 ++ snippets/server/dotnet/_logEvent.mdx | 5 ++ .../server/dotnet/_multiInstanceExample.mdx | 5 ++ snippets/server/dotnet/_options.mdx | 7 +- snippets/server/dotnet/_redisDataStore.mdx | 5 ++ snippets/server/dotnet/_reference.mdx | 11 ++- snippets/server/dotnet/_shutdown.mdx | 5 ++ snippets/server/dotnet/checkGate.mdx | 5 ++ snippets/server/dotnet/getDynamicConfig.mdx | 5 ++ snippets/server/dotnet/getExperiment.mdx | 5 ++ snippets/server/dotnet/initialization.mdx | 5 ++ snippets/server/dotnet/installation.mdx | 5 ++ snippets/server/dotnet/logEvent.mdx | 5 ++ snippets/server/dotnet/shutdown.mdx | 5 ++ snippets/server/erlang/_checkGate.mdx | 5 ++ snippets/server/erlang/_getConfig.mdx | 5 ++ snippets/server/erlang/_getExperiment.mdx | 5 ++ snippets/server/erlang/_initialize.mdx | 9 +- snippets/server/erlang/_install.mdx | 11 ++- snippets/server/erlang/_logEvent.mdx | 5 ++ snippets/server/erlang/_options.mdx | 5 ++ snippets/server/erlang/_reference.mdx | 5 ++ snippets/server/erlang/_shutdown.mdx | 7 +- snippets/server/erlang/checkGate.mdx | 7 +- snippets/server/erlang/getDynamicConfig.mdx | 7 +- snippets/server/erlang/getExperiment.mdx | 7 +- snippets/server/erlang/initialization.mdx | 12 ++- snippets/server/erlang/installation.mdx | 9 +- snippets/server/erlang/logEvent.mdx | 7 +- snippets/server/erlang/shutdown.mdx | 7 +- snippets/server/flush.mdx | 8 +- snippets/server/forwardProxy.mdx | 5 ++ snippets/server/getDynamicConfig.mdx | 5 ++ snippets/server/getExperiment.mdx | 5 ++ snippets/server/getFeatureGate.mdx | 5 ++ snippets/server/initialization.mdx | 5 ++ snippets/server/initialization2.mdx | 5 ++ snippets/server/java/_checkGate.mdx | 7 +- snippets/server/java/_clientInitResponse.mdx | 7 +- snippets/server/java/_faqs.mdx | 7 +- snippets/server/java/_forwardProxyExample.mdx | 7 +- snippets/server/java/_getConfig.mdx | 7 +- snippets/server/java/_getExperiment.mdx | 7 +- snippets/server/java/_getFeatureGate.mdx | 7 +- snippets/server/java/_grpcTLS.mdx | 5 ++ snippets/server/java/_initialize.mdx | 7 +- snippets/server/java/_install.mdx | 7 +- snippets/server/java/_installBeta.mdx | 9 +- snippets/server/java/_localOverrides.mdx | 7 +- snippets/server/java/_logEvent.mdx | 7 +- snippets/server/java/_manualExposures.mdx | 19 +++-- .../server/java/_multiInstanceExample.mdx | 7 +- snippets/server/java/_options.mdx | 9 +- .../server/java/_persistentStorageExample.mdx | 5 ++ .../java/_persistentStorageInterface.mdx | 5 ++ snippets/server/java/_preamble.mdx | 5 ++ snippets/server/java/_reference.mdx | 27 +++--- snippets/server/java/_shutdown.mdx | 7 +- snippets/server/java/checkGate.mdx | 7 +- snippets/server/java/getDynamicConfig.mdx | 7 +- snippets/server/java/getExperiment.mdx | 7 +- snippets/server/java/getFeatureGate.mdx | 7 +- snippets/server/java/initialization.mdx | 7 +- snippets/server/java/installation.mdx | 7 +- snippets/server/java/logEvent.mdx | 7 +- snippets/server/java/shutdown.mdx | 7 +- snippets/server/localModeSnippet.mdx | 5 ++ snippets/server/localOverrides.mdx | 5 ++ snippets/server/logEvent.mdx | 5 ++ snippets/server/logEventFooter.mdx | 5 ++ snippets/server/manualExposures.mdx | 5 ++ snippets/server/multiInstance.mdx | 5 ++ snippets/server/node/checkGate.mdx | 5 ++ snippets/server/node/clientInitResponse.mdx | 5 ++ snippets/server/node/cloudflare.mdx | 7 +- snippets/server/node/faqs.mdx | 5 ++ snippets/server/node/flush.mdx | 5 ++ snippets/server/node/forwardProxy.mdx | 5 ++ snippets/server/node/getDynamicConfig.mdx | 5 ++ snippets/server/node/getExperiment.mdx | 5 ++ snippets/server/node/getFeatureGate.mdx | 5 ++ snippets/server/node/initialization.mdx | 5 ++ snippets/server/node/installation.mdx | 7 +- snippets/server/node/localOverrides.mdx | 11 ++- snippets/server/node/logEvent.mdx | 5 ++ snippets/server/node/manualExposures.mdx | 23 ++++-- snippets/server/node/multiInstance.mdx | 5 ++ .../server/node/persistentStorageExample.mdx | 5 ++ .../node/persistentStorageInterface.mdx | 5 ++ snippets/server/node/shutdown.mdx | 5 ++ snippets/server/node/statsigOptions.mdx | 9 +- snippets/server/persistentStorage.mdx | 5 ++ snippets/server/php/_CronJobs.mdx | 17 ++-- snippets/server/php/_checkGate.mdx | 5 ++ snippets/server/php/_faqs.mdx | 5 ++ snippets/server/php/_flush.mdx | 5 ++ snippets/server/php/_getConfig.mdx | 5 ++ snippets/server/php/_getExperiment.mdx | 5 ++ snippets/server/php/_initialize.mdx | 5 ++ snippets/server/php/_install.mdx | 5 ++ snippets/server/php/_logEvent.mdx | 7 +- snippets/server/php/_manualExposures.mdx | 19 +++-- snippets/server/php/_options.mdx | 5 ++ snippets/server/php/_reference.mdx | 15 +++- snippets/server/php/_shutdown.mdx | 5 ++ snippets/server/php/checkGate.mdx | 5 ++ snippets/server/php/cronJobs.mdx | 17 ++-- snippets/server/php/getDynamicConfig.mdx | 5 ++ snippets/server/php/getExperiment.mdx | 5 ++ snippets/server/php/initialization.mdx | 5 ++ snippets/server/php/installation.mdx | 5 ++ snippets/server/php/logEvent.mdx | 7 +- snippets/server/privateAttributes.mdx | 5 ++ snippets/server/python/checkGate.mdx | 5 ++ snippets/server/python/clientInitResponse.mdx | 9 +- snippets/server/python/faqs.mdx | 5 ++ snippets/server/python/forwardProxy.mdx | 7 +- snippets/server/python/getDynamicConfig.mdx | 5 ++ snippets/server/python/getExperiment.mdx | 5 ++ snippets/server/python/getFeatureGate.mdx | 5 ++ snippets/server/python/initialization.mdx | 5 ++ snippets/server/python/installation.mdx | 5 ++ snippets/server/python/localOverrides.mdx | 9 +- snippets/server/python/logEvent.mdx | 5 ++ snippets/server/python/multiInstance.mdx | 5 ++ snippets/server/python/shutdown.mdx | 5 ++ snippets/server/python/statsigOptions.mdx | 7 +- snippets/server/rust/_checkGate.mdx | 5 ++ snippets/server/rust/_faqs.mdx | 5 ++ snippets/server/rust/_getConfig.mdx | 5 ++ snippets/server/rust/_getExperiment.mdx | 5 ++ snippets/server/rust/_initialize.mdx | 5 ++ snippets/server/rust/_install.mdx | 5 ++ snippets/server/rust/_logEvent.mdx | 5 ++ snippets/server/rust/_options.mdx | 5 ++ snippets/server/rust/_reference.mdx | 9 +- snippets/server/rust/_shutdown.mdx | 5 ++ snippets/server/rust/checkGate.mdx | 5 ++ snippets/server/rust/getDynamicConfig.mdx | 5 ++ snippets/server/rust/getExperiment.mdx | 5 ++ snippets/server/rust/initialization.mdx | 5 ++ snippets/server/rust/installation.mdx | 5 ++ snippets/server/rust/logEvent.mdx | 5 ++ snippets/server/rust/shutdown.mdx | 5 ++ snippets/server/shutdown.mdx | 5 ++ snippets/server/statsigOptions.mdx | 5 ++ snippets/server/statsigUser.mdx | 5 ++ snippets/snippet-intro.mdx | 5 ++ .../benjamini-hochberg-procedure.mdx | 5 ++ .../stats-methods/bonferroni-correction.mdx | 5 ++ .../stats-methods/confidence-intervals.mdx | 5 ++ snippets/stats-methods/cuped.mdx | 5 ++ snippets/stats-methods/delta-method.mdx | 5 ++ snippets/stats-methods/fieller-intervals.mdx | 5 ++ snippets/stats-methods/meta-analysis.mdx | 5 ++ snippets/stats-methods/metric-deltas.mdx | 5 ++ snippets/stats-methods/one-sided-test.mdx | 7 +- snippets/stats-methods/p-value.mdx | 5 ++ .../stats-methods/pre-experiment-bias.mdx | 5 ++ snippets/stats-methods/srm-checks.mdx | 5 ++ snippets/stats-methods/stratifiedSampling.mdx | 5 ++ snippets/stats-methods/topline-impact.mdx | 5 ++ snippets/stats-methods/variance-reduction.mdx | 5 ++ snippets/stats-methods/variance.mdx | 5 ++ snippets/stats-methods/winsorization.mdx | 5 ++ snippets/stitch_event_formats.mdx | 11 ++- snippets/test-snippet-parent.mdx | 5 ++ snippets/whn/differentialImpact.mdx | 5 ++ .../analysis-tools/data-sources.mdx | 6 +- .../configuration/assignment-sources.mdx | 1 + .../configuration/console-api.mdx | 1 + .../configuration/data-and-semantic-layer.mdx | 1 + .../configuration/metric-examples.mdx | 1 + .../configuration/metric-sources.mdx | 1 + .../configuration/metrics.mdx | 3 +- .../configuration/qualifying-events.mdx | 1 + .../configuration/query-tools.mdx | 3 +- .../configuration/semantic-layer-sync.mdx | 1 + .../configuration/tags-and-teams.mdx | 1 + .../connecting-your-warehouse/athena.mdx | 9 +- .../connecting-your-warehouse/bigquery.mdx | 1 + .../connecting-your-warehouse/clickhouse.mdx | 5 +- .../connecting-your-warehouse/databricks.mdx | 3 +- .../connecting-your-warehouse/fabric.mdx | 1 + .../forwarded-data.mdx | 1 + .../connecting-your-warehouse/other.mdx | 1 + .../connecting-your-warehouse/redshift.mdx | 1 + .../scheduled-reloads.mdx | 1 + .../connecting-your-warehouse/snowflake.mdx | 7 +- .../connecting-your-warehouse/trino.mdx | 1 + statsig-warehouse-native/cure/cure-setup.mdx | 1 + .../cure/introduction.mdx | 1 + .../features/autotune.mdx | 3 +- .../features/configure-an-experiment.mdx | 3 +- .../features/experiment-options.mdx | 1 + .../exploring-results/aggregated-impact.mdx | 1 + .../features/filtering-exposures.mdx | 1 + .../features/full-reloads.mdx | 1 + .../features/incremental-reloads.mdx | 1 + .../interpreting-results/best-practices.mdx | 1 + .../interpreting-results/custom-queries.mdx | 1 + .../features/interpreting-results/export.mdx | 1 + .../features/interpreting-results/faq.mdx | 1 + .../metric-drill-down.mdx | 1 + .../participating-units.mdx | 1 + .../interpreting-results/read-results.mdx | 1 + .../features/meta-analysis.mdx | 1 + .../features/metric-reloads.mdx | 1 + .../features/mex-on-warehouse-native.mdx | 1 + .../features/other-useful-features.mdx | 1 + statsig-warehouse-native/features/reloads.mdx | 1 + statsig-warehouse-native/features/reports.mdx | 1 + .../features/roles-and-access.mdx | 1 + .../features/statistics.mdx | 1 + .../statistics/confidence-intervals.mdx | 1 + .../benjamini-hochberg-procedure.mdx | 1 + .../methodologies/bonferroni-correction.mdx | 1 + .../statistics/methodologies/cuped.mdx | 1 + .../statistics/methodologies/delta-method.mdx | 1 + .../methodologies/fieller-intervals.mdx | 1 + .../methodologies/one-sided-test.mdx | 1 + .../statistics/methodologies/srm-checks.mdx | 1 + .../methodologies/winsorization.mdx | 1 + .../features/statistics/metric-deltas.mdx | 1 + .../features/statistics/p-value.mdx | 1 + .../statistics/pre-experiment-bias.mdx | 1 + .../features/statistics/topline-impact.mdx | 1 + .../statistics/variance-reduction.mdx | 1 + .../features/statistics/variance.mdx | 1 + .../features/targeting.mdx | 1 + .../features/understanding-experiments.mdx | 3 +- .../features/use-case.mdx | 3 +- .../geotests/geotests-setup.mdx | 1 + .../geotests/introduction.mdx | 1 + .../geotests/methodology.mdx | 1 + statsig-warehouse-native/guides/aatest.mdx | 1 + .../guides/best-practices.mdx | 36 ++++---- statsig-warehouse-native/guides/checklist.mdx | 3 +- statsig-warehouse-native/guides/cloud2whn.mdx | 1 + statsig-warehouse-native/guides/connect.mdx | 1 + statsig-warehouse-native/guides/debugging.mdx | 1 + .../guides/email-experiments.mdx | 1 + .../guides/experimentation-program.mdx | 1 + .../guides/experiments.mdx | 8 +- .../guides/metric_sources.mdx | 9 +- .../guides/playground_eval.mdx | 1 + .../guides/production.mdx | 1 + .../guides/quick-start.mdx | 1 + .../guides/reading_pulse.mdx | 5 ++ statsig-warehouse-native/guides/scaling.mdx | 1 + statsig-warehouse-native/guides/sdks.mdx | 1 + statsig-warehouse-native/guides/sql.mdx | 1 + statsig-warehouse-native/introduction.mdx | 1 + statsig-warehouse-native/metrics/funnel.mdx | 1 + statsig-warehouse-native/metrics/max-min.mdx | 2 +- .../metrics/normalized-metrics.mdx | 1 + .../metrics/percentile.mdx | 1 + statsig-warehouse-native/read-pulse-whn.mdx | 1 + .../warehouse-management/storage.mdx | 3 +- statsigcli/commands.mdx | 3 +- statsigcli/gate-management.mdx | 13 +-- statsigcli/introduction.mdx | 7 +- webanalytics/overview.mdx | 11 +-- welcome.mdx | 6 +- 1048 files changed, 5085 insertions(+), 1326 deletions(-) diff --git a/README.mdx b/README.mdx index 8cd55f0fe..79f2ad1eb 100644 --- a/README.mdx +++ b/README.mdx @@ -1,5 +1,12 @@ +--- +title: Readme +description:

@@ -8,7 +15,7 @@ Statsig

-Statsig empowers you to ship, measure, and learn from your releases using the same tools as the best tech companies in the world. With Statsig, you can run thousands of A/B tests, safely rollout features, and dive deep on core business metrics and user behavior—all on a single, unified platform. To get started with Statsig, visit our [website](https://statsig.com?ref=gh_docs) or [sign up](https://console.statsig.com/sign_up?ref=gh_docs). +Statsig empowers you to ship, measure, and learn from your releases using the same tools as the best tech companies in the world. With Statsig, you can run thousands of Experiments, safely rollout features, and dive deep on core business metrics and user behavior—all on a single, unified platform. To get started with Statsig, visit our [website](https://statsig.com?ref=gh_docs) or [sign up](https://console.statsig.com/sign_up?ref=gh_docs). This repository is hosted at https://docs.statsig.com. @@ -20,11 +27,11 @@ There are two ways to contribute: 1. Open a Pull Request on Github (via local dev, or by editing in the Github UI) 2. Use the Mintlify Visual Editor (Statsig Employees Only) -If you're used to docs-as-code or our old docusaurus setup, #1 is probably the right path - see below for local development instructions. If you'd prefer a snazzy visual interface, message Brock on slack to get setup with a Visual Editor account. +If you're used to docs-as-code or our old docusaurus setup, #1 is probably the right path - refer to the following example for local development instructions. If you'd prefer a snazzy visual interface, message Brock on slack to get setup with a Visual Editor account. ### Local development: -``` +```bash npm i -g mintlify mintlify dev ``` @@ -47,4 +54,4 @@ You can find the allowlist of non-dictionary words in styles/config/vocabularies #### Troubleshooting - Mintlify dev isn't running - Run `mintlify install` it'll re-install dependencies. -- Page loads as a 404 - Make sure you are running in a folder with `docs.json` +- Page loads as a 404 - Make sure you are running in a folder with `docs.json` \ No newline at end of file diff --git a/access-management/discussions.mdx b/access-management/discussions.mdx index 2e4104af4..d99a3aadc 100644 --- a/access-management/discussions.mdx +++ b/access-management/discussions.mdx @@ -1,5 +1,6 @@ --- title: Discussions in Statsig +description: Feature rollout and experimentation are collaborative exercises where teams work together. Often this collaboration requires people taking screenshots --- ## Discussions diff --git a/access-management/guide.mdx b/access-management/guide.mdx index a6b3d4343..1ff7d2c91 100644 --- a/access-management/guide.mdx +++ b/access-management/guide.mdx @@ -1,5 +1,6 @@ --- title: Initial Setup Guide of your Workspace +description: Organizations and their related features are for Enterprise contracts only. Please reach out to our [support team](mailto:support@statsig.com), --- @@ -25,7 +26,7 @@ In Statsig, we have three constructs to help you organize your workspace and sca **Organization** is an enterprise-level environment that allows companies to create project(s) and bring members to work inside the project. -**Project** is a workspace within the organization where the configs (e.g., feature gates, experiments, layers, etc.), metrics, and SDK keys you and your team created lives. +**Project** is a workspace within the organization where the configs (e.g., feature flags, experiments, layers, etc.), metrics, and SDK keys you and your team created lives. **Team** is a group of members at the project level that can help your organization manage the permissions and ownership of resources. @@ -35,7 +36,7 @@ In Statsig, we have three constructs to help you organize your workspace and sca In Statsig, each project within the organization is isolated from one another. This means that **none** of the resources or data is shared among different projects, even if they are part of the same organization. -The Statsig team believe that the **sensible default structure** for most customers is having an organization with a **single project** where multiple teams contribute and collaborate inside. +The Statsig team believe that the **sensible default structure** for most users is having an organization with a **single project** where multiple teams contribute and collaborate inside. We believe that different teams and functions across your organization can stay well-organized within a single project by leveraging features such as **[teams](https://docs.statsig.com/access-management/teams), [roles](https://docs.statsig.com/access-management/projects#roles), [tags](https://docs.statsig.com/access-management/tags),** and **[templates](https://docs.statsig.com/experiments/templates/templates)**. This also removes the risk of costly migrations if some projects ever need to be consolidated in the future for cross-functional collaborations. @@ -73,7 +74,7 @@ New users who are provisioned via SSO will be assigned the *Member* Role unless Organization project administration interface -A project in Statsig serves as a workspace that contains everything you and your team will create. This includes configs (e.g., feature gates, experiments, dynamic configs, layers), metrics, integrations, and more. +A project in Statsig serves as a workspace that contains everything you and your team will create. This includes configs (e.g., feature flags, experiments, dynamic configs, layers), metrics, integrations, and more. When creating a new project, you can set its type to *Open* which would allow anyone in the organization to join freely, or to *Closed* which would allow people to join only by invitation or request. @@ -140,7 +141,7 @@ Each **team** also has team-level **review settings** that can require reviews f SDK environment interface -In Statsig, you have **Client API Keys** to initialize all Statsig [client SDKs](https://docs.statsig.com/client/introduction) and **Server Secret Keys** to initialize all Statsig [server SDKs](https://docs.statsig.com/server/introduction). +In Statsig, you have **Client API Keys** to initialize all Statsig [client SDKs](https://docs.statsig.com/client/introduction) and **Api Key Keys** to initialize all Statsig [server SDKs](https://docs.statsig.com/server/introduction). We believe there is a **"Crawl, Walk, Run"** phase when it comes to configuring your API keys: diff --git a/access-management/introduction.mdx b/access-management/introduction.mdx index 013fafe76..0cb4c20f9 100644 --- a/access-management/introduction.mdx +++ b/access-management/introduction.mdx @@ -1,6 +1,7 @@ --- title: Workspace Management Overview sidebarTitle: Overview +description: Statsig provides a few different solutions for access management as you scale out adoption in your team/org/company. We have simple settings like auto --- Statsig provides a few different solutions for access management as you scale out adoption in your team/org/company. diff --git a/access-management/org-admin/experiment_policy.mdx b/access-management/org-admin/experiment_policy.mdx index ecc44ef42..10d06b8ad 100644 --- a/access-management/org-admin/experiment_policy.mdx +++ b/access-management/org-admin/experiment_policy.mdx @@ -1,5 +1,6 @@ --- title: Experiment Policy +description: Organization level Experiment Policies are an Enterprise only feature. * Set defaults for new experiments: While Statsig aims to provid --- diff --git a/access-management/org-admin/gates_policy.mdx b/access-management/org-admin/gates_policy.mdx index 14fc1dd2c..803e8f113 100644 --- a/access-management/org-admin/gates_policy.mdx +++ b/access-management/org-admin/gates_policy.mdx @@ -1,5 +1,6 @@ --- title: Feature Gates Policy +description: Organization level Feature Gate Policies are an Enterprise only feature. Feature Gates Policy grants organization admins the ability to --- diff --git a/access-management/org-admin/organization_policies.mdx b/access-management/org-admin/organization_policies.mdx index a9302fbb5..383f31ae6 100644 --- a/access-management/org-admin/organization_policies.mdx +++ b/access-management/org-admin/organization_policies.mdx @@ -1,5 +1,6 @@ --- title: Organization Policies +description: Organization-level Experiment and Gate Policies are an Enterprise only feature. You can configure Organization-level Experiment and Gat --- diff --git a/access-management/organizations.mdx b/access-management/organizations.mdx index 1133b3ebb..55e4ea062 100644 --- a/access-management/organizations.mdx +++ b/access-management/organizations.mdx @@ -1,5 +1,6 @@ --- title: Organization Settings & Administration +description: Organizations and their related features are for Enterprise contracts only. Please reach out to our [support team](mailto:support@statsig.com), --- @@ -27,7 +28,7 @@ You can view your Organization page by navigating to [Account Settings](https:// Account Settings organization management interface -#### Organization Information +### Organization Information Your organization's **Info** sidebar includes the organization's name, SSO configuration, and other settings on access management and security settings. As the organization's **Admin**, you can enable or disable SSO for all projects in the organization from this one place. diff --git a/access-management/projects.mdx b/access-management/projects.mdx index 3a7e249e7..0236f4340 100644 --- a/access-management/projects.mdx +++ b/access-management/projects.mdx @@ -1,5 +1,6 @@ --- title: Project Access Management +description: This guide applies only to our on-demand customers. If you are an organization who has set up SSO, this guide will not apply to you. Please see --- This guide applies only to our on-demand customers. If you are an organization who has set up SSO, this guide will not apply to you. Please see our [SSO Guides](/access-management/sso/overview) for more information about how to manage access permissions through SSO. diff --git a/access-management/scim/concepts.mdx b/access-management/scim/concepts.mdx index b415cf17f..2cb03b20b 100644 --- a/access-management/scim/concepts.mdx +++ b/access-management/scim/concepts.mdx @@ -1,5 +1,6 @@ --- title: SCIM Concepts +description:
Our SCIM implementation represents both Statsig users at the Organization level and Project level with their associated roles. There are two ma --- diff --git a/access-management/scim/okta_scim_org_roles.mdx b/access-management/scim/okta_scim_org_roles.mdx index a061264a8..3f7c78f10 100644 --- a/access-management/scim/okta_scim_org_roles.mdx +++ b/access-management/scim/okta_scim_org_roles.mdx @@ -1,6 +1,7 @@ --- title: Okta SCIM Org Roles sidebarTitle: Org Roles +description: For every user, Statsig surfaces a SCIM field named `statsigOrgRole`. Through this field, you can manage organization user roles. Currently, Okta can --- diff --git a/access-management/scim/okta_scim_setup.mdx b/access-management/scim/okta_scim_setup.mdx index eb74dcc51..bf42fdbac 100644 --- a/access-management/scim/okta_scim_setup.mdx +++ b/access-management/scim/okta_scim_setup.mdx @@ -1,6 +1,7 @@ --- title: Okta SCIM Setup sidebarTitle: Setup +description: This guide outlines the process for setting up SCIM (System for Cross-domain Identity Management) integration between Statsig and Okta. This integrati --- This guide outlines the process for setting up SCIM (System for Cross-domain Identity Management) integration between Statsig and Okta. This integration allows for automated diff --git a/access-management/scim/okta_scim_team_management.mdx b/access-management/scim/okta_scim_team_management.mdx index b56ebd624..373dc8bb9 100644 --- a/access-management/scim/okta_scim_team_management.mdx +++ b/access-management/scim/okta_scim_team_management.mdx @@ -1,6 +1,7 @@ --- title: Okta SCIM Team Management sidebarTitle: Team Management +description: Teams are specific Statsig groups that exist within projects. They are not shared across projects. They also have two possible roles: `Admin` a --- diff --git a/access-management/scim/okta_scim_troubleshooting.mdx b/access-management/scim/okta_scim_troubleshooting.mdx index 9bfb71790..6b6f0d536 100644 --- a/access-management/scim/okta_scim_troubleshooting.mdx +++ b/access-management/scim/okta_scim_troubleshooting.mdx @@ -1,6 +1,7 @@ --- title: Okta SCIM Troubleshooting sidebarTitle: Troubleshooting +description: | Error | Solution | |-------|----------| | User userID is not allowed to be created in this organization | Ensure that the users you are assigning to --- diff --git a/access-management/scim/okta_scim_user_management.mdx b/access-management/scim/okta_scim_user_management.mdx index 7068d6f4c..a198cc776 100644 --- a/access-management/scim/okta_scim_user_management.mdx +++ b/access-management/scim/okta_scim_user_management.mdx @@ -1,6 +1,7 @@ --- title: Okta SCIM User and Project/Role Management sidebarTitle: User and Project/Role Management +description: Users not assigned to the integration cannot be pushed into groups. - In Okta, go to the Statsig app's "Import" tab --- diff --git a/access-management/scim/overview.mdx b/access-management/scim/overview.mdx index 73e45d4ad..a09e549f1 100644 --- a/access-management/scim/overview.mdx +++ b/access-management/scim/overview.mdx @@ -1,6 +1,7 @@ --- title: SCIM User Provisioning sidebarTitle: Overview +description: SCIM (System for Cross-domain Identity Management) is a standardized protocol that simplifies the automation of user provisioning and management acros --- diff --git a/access-management/scim/scim-endpoints.mdx b/access-management/scim/scim-endpoints.mdx index fc11ed913..c65a57c48 100644 --- a/access-management/scim/scim-endpoints.mdx +++ b/access-management/scim/scim-endpoints.mdx @@ -1,6 +1,7 @@ --- title: SCIM API Overview sidebarTitle: API Overview +description: The System for Cross-domain Identity Management (SCIM) specification is designed to make managing user identities in cloud-based applications and serv --- The System for Cross-domain Identity Management (SCIM) specification is designed to make managing user identities in cloud-based applications and services easier. Statsig's SCIM API allows you to automate user provisioning and deprovisioning between your identity provider and Statsig. diff --git a/access-management/sso/azuread.mdx b/access-management/sso/azuread.mdx index 2d497c7a9..de14db56e 100644 --- a/access-management/sso/azuread.mdx +++ b/access-management/sso/azuread.mdx @@ -1,5 +1,6 @@ --- title: Single Sign-On With Entra ID/Azure AD/Office 365 +description: Microsoft Entra ID, formerly known as Azure AD, is a supported IdP for SSO into Statsig. - You will need to be the `Admin` of the Statsig Project you --- Microsoft Entra ID, formerly known as Azure AD, is a supported IdP for SSO into Statsig. diff --git a/access-management/sso/google.mdx b/access-management/sso/google.mdx index f4e28a06a..f34aa1898 100644 --- a/access-management/sso/google.mdx +++ b/access-management/sso/google.mdx @@ -1,5 +1,6 @@ --- title: SSO with Google as your IdP +description: - You will need to be the `Admin` of the Statsig Project you intend to add SSO with Google Apps to. --- ## Requirements diff --git a/access-management/sso/okta_sso.mdx b/access-management/sso/okta_sso.mdx index b1e243648..81aaf6786 100644 --- a/access-management/sso/okta_sso.mdx +++ b/access-management/sso/okta_sso.mdx @@ -1,5 +1,6 @@ --- title: Single Sign-On With Okta +description: - You will need to be the `Admin` of the Statsig Organization you intend to add SSO with Okta to. - You will need to be the Administrator of the Okta --- ## Requirements diff --git a/access-management/sso/overview.mdx b/access-management/sso/overview.mdx index ee8bfb0b9..d6d01d8aa 100644 --- a/access-management/sso/overview.mdx +++ b/access-management/sso/overview.mdx @@ -1,5 +1,6 @@ --- title: Single Sign-On With OIDC +description: SSO is an Enterprise feature. Please reach out to our [support team](mailto:support@statsig.com), your sales contact, or via our slack channel --- diff --git a/access-management/tags.mdx b/access-management/tags.mdx index da499a8c3..1f6b411f8 100644 --- a/access-management/tags.mdx +++ b/access-management/tags.mdx @@ -1,5 +1,6 @@ --- title: Tags in Statsig +description: Tags let you apply light-weight organization to your Statsig config (e.g. gates, experiments and metrics) to allow easy filtering by team (or organiza --- ## Tags for organization diff --git a/access-management/teams.mdx b/access-management/teams.mdx index 4b3eb7a8d..f36beedd0 100644 --- a/access-management/teams.mdx +++ b/access-management/teams.mdx @@ -1,5 +1,6 @@ --- title: Teams +description: Teams are an Enterprise-only feature. If you are on the Developer or Pro tiers, this guide will not apply to you. To upgrade to Enterprise, fee --- Teams are an Enterprise-only feature. If you are on the Developer or Pro tiers, this guide will not apply to you. To upgrade to Enterprise, feel free to reach out to our team [here](https://www.statsig.com/contact/demo). diff --git a/ai-evals/offline-evals.mdx b/ai-evals/offline-evals.mdx index 3de071937..73b272bac 100644 --- a/ai-evals/offline-evals.mdx +++ b/ai-evals/offline-evals.mdx @@ -1,5 +1,6 @@ --- title: Offline Evals +description: Offline evals offer a quick, automated grading of model outputs on a fixed test set. They catch wins / regressions early—before any real users are exp --- ## What are Offline Evals diff --git a/ai-evals/online-evals.mdx b/ai-evals/online-evals.mdx index 136eacd4c..8e09c953a 100644 --- a/ai-evals/online-evals.mdx +++ b/ai-evals/online-evals.mdx @@ -1,5 +1,6 @@ --- title: Online Evals +description: Online evals let you grade your model output in production on real world use cases. You can run the "live" version of a prompt, but can also shadow ru --- ## What are Online Evals @@ -59,7 +60,7 @@ const liveOutput = client.completions.create( ); // simulateneously run completions on the candidate prompts to get their output -``` +```kotlin **3. Score your output using graders** diff --git a/ai-evals/overview.mdx b/ai-evals/overview.mdx index 7a4f94b11..6dd63e15c 100644 --- a/ai-evals/overview.mdx +++ b/ai-evals/overview.mdx @@ -1,6 +1,7 @@ --- title: AI Evals Overview sidebarTitle: Overview +description: AI Evals are currently in beta; Reach out in Slack to get access. Statsig AI Evals have a few core components to help iterate and serve --- diff --git a/ai-evals/prompts.mdx b/ai-evals/prompts.mdx index d551e5fbb..be39a6495 100644 --- a/ai-evals/prompts.mdx +++ b/ai-evals/prompts.mdx @@ -1,6 +1,7 @@ --- title: Prompts & Graders sidebarTitle: Prompts & Graders +description: A Prompt is a way to represent an LLM prompt or a task in Statsig, with it's config. Prompts are similar to Dynamic Configs, and allow you to evaluate --- ## What is a Prompt in Statsig? diff --git a/autotune/bandit-faq.mdx b/autotune/bandit-faq.mdx index 857537cb7..0fd6dd009 100644 --- a/autotune/bandit-faq.mdx +++ b/autotune/bandit-faq.mdx @@ -5,6 +5,7 @@ keywords: - owner:craig last_update: date: 2025-09-18 +description: You will see diagnostic data appear in near-real time in the logstream on your bandit. Data on the models/results section will depend on your settings --- ### When should I see data show up in a bandit? diff --git a/autotune/bandit-introduction.mdx b/autotune/bandit-introduction.mdx index a045a71e9..1d7cbb8fc 100644 --- a/autotune/bandit-introduction.mdx +++ b/autotune/bandit-introduction.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: Multi-Armed Bandits are solutions that automatically find the best variant among a group of candidates, balancing between "exploring" options and "exp --- Multi-Armed Bandits are solutions that automatically find the best variant among a group of candidates, balancing between "exploring" options and "exploiting" the best option by dynamically allocating traffic. On Statsig, Bandits are used to pick the best user experience to drive a target metric or action. diff --git a/autotune/contextual-bandit.mdx b/autotune/contextual-bandit.mdx index ae145e589..e027e2f4c 100644 --- a/autotune/contextual-bandit.mdx +++ b/autotune/contextual-bandit.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: Autotune UI implements a variant of the LinUCB algorithm. This estimates a user's outcome for each variant, and incorporates the model's uncertainty t --- ## Model diff --git a/autotune/contextual/getting-started.mdx b/autotune/contextual/getting-started.mdx index 589d70885..93e8a25b8 100644 --- a/autotune/contextual/getting-started.mdx +++ b/autotune/contextual/getting-started.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: Getting started with Autotune AI can be done very quickly. Statsig supports contextual autotune in all Client SDKs, but only in the followin --- Getting started with Autotune AI can be done very quickly. @@ -99,7 +100,7 @@ We assume you have your server secret key for the following code. Before running First, import and initialize Statsig: -``` +```javascript from statsig_python_core import Statsig, StatsigUser key = @@ -111,14 +112,14 @@ statsig.initialize().wait() Then, create a user object and fetch your config: -``` +```text user = StatsigUser('user_id', custom={'key1': 'value1', 'key2': 'value2'}) cfg = statsig.get_experiment(user, autotune_name) ``` Now you have your cfg and can apply it! -``` +```python color = cfg.get_string("color", "default color") print(f"Going to use {color} for my color now") ``` @@ -134,7 +135,7 @@ That's it! Your code is now serving personalized variants to your users. Statsig requires a few hundred units to train a model, and will also not start training until those units' attribution window has elapsed. If you want to test the functionality, we highly recommend "faking a test" to confirm things work like you expect - use logic like -``` +```text fetch_autotune_value() if(user country == 'us'): log_click() diff --git a/autotune/contextual/introduction.mdx b/autotune/contextual/introduction.mdx index b96641f67..e4421bf5d 100644 --- a/autotune/contextual/introduction.mdx +++ b/autotune/contextual/introduction.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: Contextual Multi-Armed Bandits are a subset of Multi-Armed-Bandits which use context about a user to personalize their experience. This is generally a --- Contextual Multi-Armed Bandits are a subset of Multi-Armed-Bandits which use context about a user to personalize their experience. This is generally achieved by predicting outcomes from among the variants, and picking the best outcome while factoring for uncertainty. Specifically, they will tend to prefer a slightly worse prediction that has a lot of uncertainty, thereby exploring that variant. diff --git a/autotune/contextual/methodology.mdx b/autotune/contextual/methodology.mdx index d49a9b7d3..b5c385d94 100644 --- a/autotune/contextual/methodology.mdx +++ b/autotune/contextual/methodology.mdx @@ -5,6 +5,7 @@ keywords: - owner:craig last_update: date: 2025-09-18 +description: This page covers the high level approach that Statsig takes to running contextual bandits across cloud and warehouse native. Specifics of implementati --- ## Methodology diff --git a/autotune/contextual/monitoring.mdx b/autotune/contextual/monitoring.mdx index 35b148cbb..29497cc59 100644 --- a/autotune/contextual/monitoring.mdx +++ b/autotune/contextual/monitoring.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: There are three primary ways we recommend you monitor autotune performance. The best way to evaluate if a bandit is working is seeing if it drives mor --- There are three primary ways we recommend you monitor autotune performance. @@ -17,7 +18,7 @@ This is the gold standard of measurement and is highly encouraged. Standard practice is to wrap the autotune in a experiment with a binary parameter, either as 50/50 or a 90/10 holdback. You can link the experiment to the autotune to get the results on the autotune page. In code, this might look like: -``` +```yaml experiment_value = statsig.get_experiment('wrapping_experiment').get('flag') default_param = '..." if(experiment_value): diff --git a/autotune/monitoring.mdx b/autotune/monitoring.mdx index 752f61f73..c4de1a19c 100644 --- a/autotune/monitoring.mdx +++ b/autotune/monitoring.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: The results tab within Autotune provides a view of your ongoing and completed Autotune tests. Autotune is computed hourly with metrics and traffic al --- ## How to monitor your Autotune Test @@ -42,7 +43,7 @@ This is the gold standard of measurement and is highly encouraged. Standard practice is to wrap the autotune in a experiment with a binary parameter, either as 50/50 or a 90/10 holdback. You can link the experiment to the autotune to get the results on the autotune page. In code, this might look like: -``` +```yaml experiment_value = statsig.get_experiment('wrapping_experiment').get('flag') default_param = '..." if(experiment_value): diff --git a/autotune/multi-armed-bandit.mdx b/autotune/multi-armed-bandit.mdx index e7ab7fe08..cf1f15bef 100644 --- a/autotune/multi-armed-bandit.mdx +++ b/autotune/multi-armed-bandit.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: The base Autotune implementation uses a Thompson Sampling (Bayesian) algorithm to estimate each variant's probability of being the best variant and al --- ## Model diff --git a/autotune/setup.mdx b/autotune/setup.mdx index f3624b35c..39f875e2c 100644 --- a/autotune/setup.mdx +++ b/autotune/setup.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: 1. To create a new Autotune experiment, navigate to the [Autotune section on the Statsig console](https://console.statsig.com/autotune). 2. Click the --- ## How to set up Autotune @@ -17,7 +18,7 @@ last_update: Autotune experiment variant configuration interface -4. Select the success event to optimize for as shown below. You can further specify an optional [event value](/guides/logging-events). +4. Select the success event to optimize for as shown in the following example. You can further specify an optional [event value](/guides/logging-events). Autotune success event selection interface @@ -31,7 +32,7 @@ There are a few parameters you can specify: Click "Create" to finalize the setup. -6. Similar to Feature Gates and Experiments, you can find a code snippet for the exposure check event to add to your code. Don't forget to click "Start" when you're ready to launch your Autotune test. +6. Similar to Feature Flags and Experiments, you can find a code snippet for the exposure check event to add to your code. Don't forget to click "Start" when you're ready to launch your Autotune test. Autotune code snippet and launch interface diff --git a/autotune/using-bandits.mdx b/autotune/using-bandits.mdx index e1ac8b766..c77a543e1 100644 --- a/autotune/using-bandits.mdx +++ b/autotune/using-bandits.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: Both contextual and non-contextual bandits are managed on Statsig's console, or through Statsig's [console API](../console-api/autotunes.mdx) for prog --- Both contextual and non-contextual bandits are managed on Statsig's console, or through Statsig's [console API](../console-api/autotunes.mdx) for programmatic creation. Both bandit types use a common, streamlined API, making it easy to explore either use case without significant changes from using experiments. @@ -17,7 +18,7 @@ There's no additional steps beyond a regular experiment check to use a bandit, t Check bandits using your standard experiment call. For example, in react this is as simple as configuring a bandit's variant json like: -``` +```json { "text": "", ... @@ -26,7 +27,7 @@ Check bandits using your standard experiment call. For example, in react this is and accessing it in code using the same pattern as [experiments](../guides/abn-tests.mdx): -``` +```javascript const banditText = useExperiment('contextual_bandit').config.get('text'); ``` diff --git a/client/Android.mdx b/client/Android.mdx index 0abd9a6eb..23fd1fbf6 100644 --- a/client/Android.mdx +++ b/client/Android.mdx @@ -38,7 +38,7 @@ v4.37.1 and higher are published to only [Maven Central](https://central.sonatyp dependencies { implementation "com.statsig:android-sdk:4.37.1" } -``` +```text Legacy versions (\<=V4.37.0) can be installed with [Jitpack](https://jitpack.io/#statsig-io/android-sdk). @@ -72,7 +72,7 @@ public class MainActivity extends AppCompatActivity implements IStatsigCallback } } -``` +```text ```kotlin MainActivity.kt @@ -87,7 +87,7 @@ async { StatsigUser("user_id"), ) }.await() -``` +```text @@ -104,7 +104,7 @@ DynamicConfig config = Statsig.getConfig("awesome_product_details"); String itemName = config.getString("product_name", "Awesome Product v1"); Double price = config.getDouble("price", 10.0); Boolean shouldDiscount = config.getBoolean("discount", false); -``` +```text ```kotlin Kotlin val config = Statsig.getConfig("awesome_product_details") @@ -115,7 +115,7 @@ val config = Statsig.getConfig("awesome_product_details") val itemName = config.getString("product_name", "Awesome Product v1") val price = config.getDouble("price", 10.0) val shouldDiscount = config.getBoolean("discount", false) -``` +```text @@ -127,7 +127,7 @@ if (Statsig.checkGate("new_homepage_design")) { } else { // Gate is off, show old home page } -``` +```text ```kotlin Kotlin if (Statsig.checkGate("new_homepage_design")) { @@ -135,7 +135,7 @@ if (Statsig.checkGate("new_homepage_design")) { } else { // Gate is off, show old home page } -``` +```text @@ -161,7 +161,7 @@ Double discount = priceExperiment.getDouble("discount", 0.1); ... Double price = msrp * (1 - discount); -``` +```text ```kotlin Kotlin // Values via getLayer @@ -181,18 +181,18 @@ val discount = priceExperiment.getDouble("discount", 0.1) ... val price = msrp * (1 - discount); -``` +```text ```java Java Statsig.logEvent("purchase", 2.99, Map.of("item_name", "remove_ads")); -``` +```text ```kotlin Kotlin Statsig.logEvent("purchase", 2.99, Map.of("item_name" to "remove_ads")) -``` +```text @@ -203,11 +203,11 @@ To fetch a set of parameters, use the following api: ```java Java ParameterStore homepageStore = Statsig.getParameterStore("homepage"); -``` +```text ```kotlin Kotlin val homepageStore = Statsig.getParameterStore("homepage") -``` +```text ### Getting a parameter @@ -221,7 +221,7 @@ String title = homepageStore.getString( ); boolean shouldShowUpsell = homePageStore.getBoolean("upsell_upgrade_now", false); -``` +```text ```kotlin Kotlin val title = homepageStore.getString( @@ -230,7 +230,7 @@ val title = homepageStore.getString( ) val shouldShowUpsell = homepageStore.getBoolean("upsell_upgrade_now", false) -``` +```text @@ -245,11 +245,11 @@ Statsig.updateUserAsync(newUser, this); // this must implement IStatsigCallback public void onStatsigUpdateUser() { // User has been updated and values have been refetched for the new user } -``` +```text ```kotlin Kotlin Statsig.updateUser(StatsigUser("new_user_id")) -``` +```text @@ -262,11 +262,11 @@ Statsig.updateUser(StatsigUser("new_user_id")) ```java Java Statsig.shutdown(); -``` +```text ```kotlin Kotlin Statsig.shutdown() -``` +```text @@ -299,7 +299,7 @@ class StatsigOverrides( @SerializedName("configs") val configs: MutableMap> ) {} -``` +```text @@ -312,7 +312,7 @@ Statsig.getStableID(); // Override the StableID before initializing, if you have something you'd prefer to use instead val opts = StatsigOptions(overrideStableID = "my_stable_id") Statsig.initialize(app, "client-xyx", options = opts) -``` +```text @@ -320,12 +320,12 @@ Statsig.initialize(app, "client-xyx", options = opts) ```java Java StatsigClient client = new StatsigClient(); client.initializeAsync(application, sdkKey, user, callback, options); -``` +```text ```kotlin Kotlin var client: StatsigClient = StatsigClient() client.initialize(application, sdkKey, user, options) -``` +```text @@ -340,7 +340,7 @@ String jsonValues = response.getInitializeResponseJSON(); // Get the evaluation details EvaluationDetails details = response.getEvaluationDetails(); -``` +```text ```kotlin Kotlin // Get the raw values that the SDK is using internally to provide gate/config/layer results diff --git a/client/Angular.mdx b/client/Angular.mdx index f27db0a01..b4584ba6b 100644 --- a/client/Angular.mdx +++ b/client/Angular.mdx @@ -4,7 +4,6 @@ sidebarTitle: Angular description: Statsig's SDK for Experimentation and Feature Flags in Angular applications. icon: "angular" --- - import Installation from '/snippets/client/installation.mdx' import Initialization from '/snippets/client/initialization.mdx' import checkGate from '/snippets/client/checkGate.mdx' @@ -31,6 +30,9 @@ Source code: @@ -62,5 +64,4 @@ Source code: @@ -74,4 +76,4 @@ client.Shutdown(); StatsigClient::Shared().Shutdown(); ``` - + \ No newline at end of file diff --git a/client/Dart.mdx b/client/Dart.mdx index 9da3d540e..71a3c89cd 100644 --- a/client/Dart.mdx +++ b/client/Dart.mdx @@ -4,7 +4,6 @@ sidebarTitle: Dart description: Statsig's SDK for Experimentation and Feature Flags in Dart & Flutter applications. icon: "dart-lang" --- - import Installation from '/snippets/client/installation.mdx' import Initialization from '/snippets/client/initialization.mdx' import checkGate from '/snippets/client/checkGate.mdx' @@ -36,6 +35,9 @@ Source code: @@ -81,4 +83,4 @@ Source code: + \ No newline at end of file diff --git a/client/DotNet.mdx b/client/DotNet.mdx index 56471bc18..b25ec7ac3 100644 --- a/client/DotNet.mdx +++ b/client/DotNet.mdx @@ -4,7 +4,6 @@ sidebarTitle: .NET Client description: Statsig's SDK for Experimentation and Feature Flags in .NET applications. icon: "/images/dotnet-grey.svg" --- - import Installation from '/snippets/client/installation.mdx' import Initialization from '/snippets/client/initialization.mdx' import checkGate from '/snippets/client/checkGate.mdx' @@ -33,6 +32,9 @@ Source code: @@ -71,4 +73,4 @@ Source code: + \ No newline at end of file diff --git a/client/Expo.mdx b/client/Expo.mdx index 22470e793..8f1fc6472 100644 --- a/client/Expo.mdx +++ b/client/Expo.mdx @@ -4,7 +4,6 @@ sidebarTitle: Expo description: Statsig's SDK for Experimentation and Feature Flags in Expo applications. icon: "e" --- - import Installation from '/snippets/client/installation.mdx' import Initialization from '/snippets/client/initialization.mdx' import checkGateIntro from '/snippets/client/checkGate.mdx' @@ -29,6 +28,9 @@ Source code: @@ -61,4 +63,4 @@ Source code: Docusaurus Next.js reference - JavaScript Client SDK - React Client SDK - - Initialization Concepts + - Initialization Concepts \ No newline at end of file diff --git a/client/React.mdx b/client/React.mdx index 0db6b6317..a4aa53394 100644 --- a/client/React.mdx +++ b/client/React.mdx @@ -4,7 +4,6 @@ sidebarTitle: React description: Use Statsig in React apps with hooks, providers, and optional plugins for session replay and auto capture. icon: "react" --- - import Installation from '/snippets/client/installation.mdx' import Initialization from '/snippets/client/initialization.mdx' import checkGateIntro from '/snippets/client/checkGate.mdx' @@ -36,6 +35,9 @@ Source code: @@ -87,4 +89,4 @@ Source code: @@ -58,4 +60,4 @@ Source code: @@ -96,4 +98,4 @@ Working sample apps are available in the repository: - [JavaScript On-Device Evaluation SDK](/client/jsOnDeviceEvaluationSDK) - [Client Keys with Server Permissions](/sdk-keys/api-keys/#client-keys-with-server-permissions) - [Using EvaluationsDataAdapter](/client/javascript/using-evaluations-data-adapter) -- [Debugging SDK Evaluations](/sdk/debugging) +- [Debugging SDK Evaluations](/sdk/debugging) \ No newline at end of file diff --git a/client/Roku.mdx b/client/Roku.mdx index e843fc7fc..8413ef613 100644 --- a/client/Roku.mdx +++ b/client/Roku.mdx @@ -19,7 +19,7 @@ You can start by downloading a copy of [the GitHub repository](https://github.co You will need the following files: -``` +```text Statsig -- components -- statsigsdk @@ -51,14 +51,14 @@ Include `StatsigClient.brs`, `StatsigUser.brs`, `DynamicConfig.brs`, and `Statsi -``` +```python
(please use the above code at your discretion and test thoroughly)
### Keeping StableID Consistent across Client & Server diff --git a/client/javascript-sdk.mdx b/client/javascript-sdk.mdx index ad9589bc0..2fb21e11a 100644 --- a/client/javascript-sdk.mdx +++ b/client/javascript-sdk.mdx @@ -4,7 +4,6 @@ sidebarTitle: Javascript (Web) description: Statsig's JavaScript SDK for browser and React applications. icon: "js" --- - import Initialization from '/snippets/client/initialization.mdx' import checkGate from '/snippets/client/checkGate.mdx' import getDynamicConfig from '/snippets/client/getDynamicConfig.mdx' @@ -52,6 +51,9 @@ Source code:
@@ -127,4 +129,4 @@ Source code: @@ -119,5 +121,4 @@ Working sample apps are available in the repository: - [On-Device Evaluation SDK Overview](/client/onDevice) - [Client Keys with Server Permissions](/sdk-keys/api-keys/#client-keys-with-server-permissions) - [Using EvaluationsDataAdapter](/client/javascript/using-evaluations-data-adapter) -- [Debugging SDK Evaluations](/sdk/debugging) - +- [Debugging SDK Evaluations](/sdk/debugging) \ No newline at end of file diff --git a/client/migration-guides/MigrationFromOldJsClient.mdx b/client/migration-guides/MigrationFromOldJsClient.mdx index 63e9dcbd8..b394c2648 100644 --- a/client/migration-guides/MigrationFromOldJsClient.mdx +++ b/client/migration-guides/MigrationFromOldJsClient.mdx @@ -42,7 +42,7 @@ await Statsig.initialize( { userID: "some_user_id" }, { environment: { tier: "staging" } } // optional, pass options here if needed ); -``` +```text ```typescript New - Async import { StatsigClient } from '@statsig/js-client'; @@ -55,7 +55,7 @@ const client = new StatsigClient( // Async - waits for latest values await client.initializeAsync(); -``` +```text ```typescript New - Sync import { StatsigClient } from '@statsig/js-client'; @@ -68,7 +68,7 @@ const client = new StatsigClient( // Sync - uses cache, fetches in background client.initializeSync(); -``` +```sql @@ -87,7 +87,7 @@ statsig.getConfig('config_name'); // new statsigClient.getDynamicConfig('config_name'); -``` +```python @@ -104,7 +104,7 @@ statsig.getClientInitializeResponse( hash: 'djb2', }, ); -``` +```text @@ -125,7 +125,7 @@ import Statsig from "statsig-js"; const user = { userID: "a-user" }; await Statsig.updateUser(user); -``` +```text ```typescript New - Async import { StatsigClient } from '@statsig/js-client'; @@ -136,7 +136,7 @@ await client.initializeAsync(); // Update to new user - async const newUser = { userID: 'a-user' }; await client.updateUserAsync(newUser); -``` +```text ```typescript New - Sync import { StatsigClient } from '@statsig/js-client'; @@ -147,7 +147,7 @@ client.initializeSync(); // Update to new user - sync const newUser = { userID: 'a-user' }; client.updateUserSync(newUser); -``` +```text @@ -178,7 +178,7 @@ await Statsig.initialize(YOUR_CLIENT_KEY, { userID: 'a-user' }); if (Statsig.checkGate('a_gate')) { // do something... } -``` +```text ```typescript New import { StatsigClient } from '@statsig/js-client'; @@ -192,7 +192,7 @@ const instance = StatsigClient.instance(YOUR_CLIENT_KEY); if (instance.checkGate('a_gate')) { // do something... } -``` +```python @@ -227,7 +227,7 @@ const client = new StatsigClient( overrideAdapter, }, ); -``` +```ruby Full example here: @@ -263,7 +263,7 @@ Statsig.initialize( // or, by manually flipping the related flags Statsig.reenableAllLogging(); StatsigLocalStorage.disabled = false; -``` +```text ```typescript New import { StatsigClient } from '@statsig/js-client'; @@ -287,7 +287,7 @@ client.updateRuntimeOptions({ disableStorage: false, networkConfig: { preventAllNetworkTraffic: false }, }); -``` +```javascript @@ -317,7 +317,7 @@ The structure of cached values has changed significantly, and there is no suppor ## Legacy StatsigOptions The options to parameterize initialization of the SDK have changed. In some cases, the underlying features were removed or moved and can be enabled/disabled in a different way. In other scenarios, there is a new API for managing them. The following is a mapping of old options to their equivalents in the new sdk. -#### disableErrorLogging +### disableErrorLogging > This feature does not exist in the new Javascript SDK, so there is no option to disable it. #### disableAutoMetricsLogging diff --git a/client/migration-guides/MigrationFromOldReact.mdx b/client/migration-guides/MigrationFromOldReact.mdx index c7e8bb153..a5511fdfd 100644 --- a/client/migration-guides/MigrationFromOldReact.mdx +++ b/client/migration-guides/MigrationFromOldReact.mdx @@ -43,7 +43,7 @@ function App() { ); } -``` +```text ```tsx New import { StatsigProvider } from '@statsig/react-bindings'; @@ -65,7 +65,7 @@ function App() { ); } -``` +```python @@ -117,7 +117,7 @@ statsig.getClientInitializeResponse( hash: 'djb2', // <- New Hashing Algorithm }, ); -``` +```text @@ -150,7 +150,7 @@ function App() { ); } -``` +```text ```tsx New import { StatsigProvider, useClientAsyncInit } from '@statsig/react-bindings'; diff --git a/client/migration-guides/ios-repo-migration-guide.mdx b/client/migration-guides/ios-repo-migration-guide.mdx index e4a1f1357..ef337452b 100644 --- a/client/migration-guides/ios-repo-migration-guide.mdx +++ b/client/migration-guides/ios-repo-migration-guide.mdx @@ -5,6 +5,7 @@ keywords: - owner:andre last_update: date: 2025-09-18 +description: We renamed the iOS/macOS/tvOS repo from `ios-sdk` to `statsig-kit` The Statsig Swift SDK repo used to live on `statsig-io/ios-sdk`. Since Xcode uses t --- We renamed the iOS/macOS/tvOS repo from `ios-sdk` to `statsig-kit` @@ -22,7 +23,7 @@ Given that this issue was blocking some customers from using Statsig, we decided > **Optional Migration**: The existing repo will continue working thanks to GitHub's redirect. -![img](/images/ios-repo-migration-xcode.png) +![Ios Repo Migration Xcode](/images/ios-repo-migration-xcode.png) 1. In Xcode, click on your project on the sidebar. That's usually the item on the root of the tree view. 2. Choose your project under the "Project" section of the second sidebar diff --git a/client/onDeviceOverview.mdx b/client/onDeviceOverview.mdx index 40ce50c24..f23481b0b 100644 --- a/client/onDeviceOverview.mdx +++ b/client/onDeviceOverview.mdx @@ -5,6 +5,7 @@ keywords: - owner:brock last_update: date: 2025-09-21 +description: Statsig's client-side On-Device Eval SDKs provide an alternate client-side architecture where the definition of each experiment or gate is kept in-mem --- ## On Device SDK Overview diff --git a/client/swiftOnDeviceEvaluationSDK.mdx b/client/swiftOnDeviceEvaluationSDK.mdx index d829478fe..1bddd1a84 100644 --- a/client/swiftOnDeviceEvaluationSDK.mdx +++ b/client/swiftOnDeviceEvaluationSDK.mdx @@ -4,7 +4,6 @@ sidebarTitle: Swift / Obj C description: Statsig's Swift SDK for on-device evaluation with iOS, macOS, tvOS, and watchOS. icon: "swift" --- - import Installation from '/snippets/client/installation.mdx' import Initialization from '/snippets/client/initialization.mdx' import checkGateIntro from '/snippets/client/checkGate.mdx' @@ -42,6 +41,9 @@ Source code: @@ -115,4 +117,4 @@ Included are both Swift and Objective C uses. - [On-Device Evaluation SDK Overview](/client/onDevice) - [Client Keys with Server Permissions](/sdk-keys/api-keys/#client-keys-with-server-permissions) -- [Debugging SDK Evaluations](/sdk/debugging) +- [Debugging SDK Evaluations](/sdk/debugging) \ No newline at end of file diff --git a/compliance/data_privacy_for_mobile.mdx b/compliance/data_privacy_for_mobile.mdx index 8cd2849d2..bbedc9b4f 100644 --- a/compliance/data_privacy_for_mobile.mdx +++ b/compliance/data_privacy_for_mobile.mdx @@ -4,12 +4,13 @@ keywords: - owner:tore last_update: date: 2025-05-21 +description: Statsig collects only the data that you configure to be sent to Statsig. This is typically the occurrence of feature flag evaluations (Feature Flags), --- ## General ### What data does Statsig collect from users of my app? -Statsig collects only the data that you configure to be sent to Statsig. This is typically the occurrence of feature flag evaluations (Feature Gates), experiment exposures, and custom events you log with the SDK. +Statsig collects only the data that you configure to be sent to Statsig. This is typically the occurrence of feature flag evaluations (Feature Flags), experiment exposures, and custom events you log with the SDK. ### Does that data include any personally identifiable information (PII)? By default, Statsig uses randomly generated IDs as described below. You can also augment data sent to Statsig with additional context and meta data, including user names, email addresses, or custom attributes. This data, alone or in combination with other data, may constitute PII if it identifies, directly or indirectly, an individual. @@ -64,7 +65,7 @@ The Statsig SDKs automatically collect the following metadata for targeting and - sdkType: The type of SDK (ios-client) - sdkVersion: The version of the Statsig SDK - sessionID: A randomly generated UUID for the current session -- stableID: A persistent device identifier (see below) +- stableID: A persistent device identifier (refer to the following example) - systemVersion: The iOS version - systemName: The system name (iOS) @@ -78,7 +79,7 @@ The Statsig SDKs automatically collect the following metadata for targeting and - sdkType: The type of SDK (android-client) - sdkVersion: The version of the Statsig SDK - sessionID: A randomly generated UUID for the current session -- stableID: A persistent device identifier (see below) +- stableID: A persistent device identifier (refer to the following example) - systemVersion: The Android API level - systemName: The system name (Android) @@ -91,12 +92,12 @@ Both iOS and Android SDKs provide the `optOutNonSdkMetadata` option to limit the let options = StatsigOptions() options.optOutNonSdkMetadata = true Statsig.start(sdkKey: "client-sdk-key", options: options) -``` +```text **Android SDK:** ```kotlin val options = StatsigOptions(optOutNonSdkMetadata = true) -``` +```python When `optOutNonSdkMetadata` is enabled, only the following core SDK metadata is included: - sdkType: The type of SDK @@ -135,7 +136,7 @@ let user = StatsigUser( email: nil, // Not included at top level to keep private privateAttributes: ["email": "user@example.com"] // Used for evaluation but not logged ) -``` +```text **Android SDK:** ```kotlin diff --git a/compliance/introduction.mdx b/compliance/introduction.mdx index 1523db3ee..bcdf3fc1b 100644 --- a/compliance/introduction.mdx +++ b/compliance/introduction.mdx @@ -1,5 +1,6 @@ --- title: Introduction +description: We know the data that you send to Statsig can be sensitive - both for your business, as well as your users. This section documents the tools we have f --- We know the data that you send to Statsig can be sensitive - both for your business, as well as your users. This section documents the tools we have for handling sensitive data. \ No newline at end of file diff --git a/compliance/user_data_deletion_requests.mdx b/compliance/user_data_deletion_requests.mdx index 67393b8f9..fcc10afc6 100644 --- a/compliance/user_data_deletion_requests.mdx +++ b/compliance/user_data_deletion_requests.mdx @@ -1,5 +1,6 @@ --- title: User Data Deletion Requests API +description: User data deletion requests are for Enterprise contracts only. Please reach out to our [support team](mailto:support@statsig.com), your sales c --- @@ -28,7 +29,7 @@ curl \ --request POST \ --data '{"unit_type": "user_id", "ids": "1,2,3", "request_id": "test_request_1"}' \ "https://api.statsig.com/v1/delete_user_data" -``` +```yaml Response: `{"request_id":"test_request_1"}` diff --git a/console-api/introduction.mdx b/console-api/introduction.mdx index bc1221662..10e98a6cf 100644 --- a/console-api/introduction.mdx +++ b/console-api/introduction.mdx @@ -1,13 +1,16 @@ --- title: Console API Overview sidebarTitle: "Overview" +description: The "Console API" is the CRUD API for performing the actions offered on console.statsig.com without needing to go through the web UI. --- - The "Console API" is the CRUD API for performing the actions offered on console.statsig.com without needing to go through the web UI. If you have any feature requests, drop on in to our [slack channel](https://www.statsig.com/slack) and let us know. ## Base URL + +This page explains base url. + `https://statsigapi.net` ## Authorization @@ -22,4 +25,4 @@ Mutation requests (POST/PATCH/PUT/DELETE) to the Console API are limited to ~ 10 The Console API is versioned. Each version is guaranteed to not break existing usage; each new version introduces breaking changes. There is currently only one version: `20240601`. The [OpenAPI spec](https://api.statsig.com/openapi/20240601.json) for this API version is kept up-to-date. -Pass the version in the **STATSIG-API-VERSION** field in the header. For now, this is optional; in the future, this will be required. +Pass the version in the **STATSIG-API-VERSION** field in the header. For now, this is optional; in the future, this will be required. \ No newline at end of file diff --git a/data-warehouse-ingestion/athena.mdx b/data-warehouse-ingestion/athena.mdx index 8bf1d840a..2901dce54 100644 --- a/data-warehouse-ingestion/athena.mdx +++ b/data-warehouse-ingestion/athena.mdx @@ -5,6 +5,7 @@ keywords: - owner:tim last_update: date: 2025-09-18 +description: To set up connection with Athena, Statsig needs the following - Region - Granting Athena Access Permissions to a Statsig-owned Service Account --- ## Overview @@ -21,7 +22,7 @@ In place of granting Athena Access Permissions to a Statsig-owned Service Accoun The above IAM User will need to be given permissions to query from Athena. Here's a sample policy with required permissions to access Athena: -``` +```json { "Version": "2012-10-17", "Statement": [ diff --git a/data-warehouse-ingestion/bigquery.mdx b/data-warehouse-ingestion/bigquery.mdx index 22f286b3c..5a1bd6e5d 100644 --- a/data-warehouse-ingestion/bigquery.mdx +++ b/data-warehouse-ingestion/bigquery.mdx @@ -4,6 +4,7 @@ keywords: - owner:tim last_update: date: 2025-09-18 +description: To set up connection with BigQuery, we need the following: - Granting Permissions to a Statsig-owned Service Account - Your BigQuery Project ID --- ## Overview diff --git a/data-warehouse-ingestion/data_mapping.mdx b/data-warehouse-ingestion/data_mapping.mdx index cccc2ecdc..2f9e60ebe 100644 --- a/data-warehouse-ingestion/data_mapping.mdx +++ b/data-warehouse-ingestion/data_mapping.mdx @@ -4,6 +4,7 @@ keywords: - owner:tim last_update: date: 2025-09-18 +description: Statsig requires certain data schema in order for proper processing. We support 3 different types of datasets to be ingested into our platform: --- ## Overview diff --git a/data-warehouse-ingestion/databricks.mdx b/data-warehouse-ingestion/databricks.mdx index b42341c6b..2fa08f9fd 100644 --- a/data-warehouse-ingestion/databricks.mdx +++ b/data-warehouse-ingestion/databricks.mdx @@ -4,6 +4,7 @@ keywords: - owner:tim last_update: date: 2025-09-18 +description: To set up connection with Databricks, Statsig needs the following - API Key - Server Hostname - HTTP Path We can use any cluster in your project to co --- ## Overview @@ -18,7 +19,7 @@ We can use any cluster in your project to connect to your data, but we recommend ### API Key -You can generate a new API key by going to "User Settings" in your Databricks console. There, you should be able to generate a new token as shown below. +You can generate a new API key by going to "User Settings" in your Databricks console. There, you should be able to generate a new token as shown in the following example. databricks info diff --git a/data-warehouse-ingestion/faq.mdx b/data-warehouse-ingestion/faq.mdx index ae15e414f..7a2fcbf51 100644 --- a/data-warehouse-ingestion/faq.mdx +++ b/data-warehouse-ingestion/faq.mdx @@ -4,6 +4,7 @@ keywords: - owner:tim last_update: date: 2025-09-18 +description: Statsig currently accesses data warehouses from both the Statsig console service and Statsig data pipelines. If your data warehouse is IP protected, p --- ## What IP addresses will Statsig access data warehouses from? diff --git a/data-warehouse-ingestion/introduction.mdx b/data-warehouse-ingestion/introduction.mdx index 2f4b5664d..325991b76 100644 --- a/data-warehouse-ingestion/introduction.mdx +++ b/data-warehouse-ingestion/introduction.mdx @@ -1,5 +1,6 @@ --- title: Data Warehouse Ingestion +description: Slide 4_3 - 2 St --- @@ -90,7 +91,7 @@ Enterprise customers can trigger ingestion for `metrics` or `events` using the s To trigger ingestion, send a post request to the `https://api.statsig.com/v1/mark_data_ready_dwh` endpoint using your statsig API key. An example would be: -``` +```bash curl \ --header "statsig-api-key: " \ --header "Content-Type: application/json" \ diff --git a/data-warehouse-ingestion/redshift.mdx b/data-warehouse-ingestion/redshift.mdx index 930defcdb..53c7dfc6a 100644 --- a/data-warehouse-ingestion/redshift.mdx +++ b/data-warehouse-ingestion/redshift.mdx @@ -4,6 +4,7 @@ keywords: - owner:tim last_update: date: 2025-09-18 +description: To set up connection with Redshift, Statsig needs the following - Cluster Endpoint - Admin User Name - Admin User Password --- ## Overview diff --git a/data-warehouse-ingestion/s3.mdx b/data-warehouse-ingestion/s3.mdx index ebe336e9f..cc5c3248c 100644 --- a/data-warehouse-ingestion/s3.mdx +++ b/data-warehouse-ingestion/s3.mdx @@ -4,6 +4,7 @@ keywords: - owner:tim last_update: date: 2025-09-18 +description: To set up connection with S3, Statsig needs the following - Region - Bucket Name - Granting Bucket Read Access Permissions to a Statsig-owned Service --- ## Overview @@ -28,7 +29,7 @@ You will be given a Statsig owned IAM user that you'll need to grant S3 bucket p The user will need read access permissions to your bucket, you can use the below bucket policy for your convenience, replacing STATSIG_IAM_USER and YOUR_S3_BUCKET. -``` +```json { "Version": "2012-10-17", "Statement": [ diff --git a/data-warehouse-ingestion/snowflake.mdx b/data-warehouse-ingestion/snowflake.mdx index 6889bf360..59ff05cbe 100644 --- a/data-warehouse-ingestion/snowflake.mdx +++ b/data-warehouse-ingestion/snowflake.mdx @@ -4,6 +4,7 @@ keywords: - owner:tim last_update: date: 2025-09-18 +description: To set up connection with Snowflake, Statsig needs the following - Account Name - Database Name - Schema Name - Admin User Name --- ## Overview @@ -21,7 +22,7 @@ To set up connection with Snowflake, Statsig needs the following - Private Key Passphrase (Optional) -Admin user name and password will be used by Statsig to create a user with restricted access to query from your data warehouse. If you don't want to use this, skip ahead [here](/data-warehouse-ingestion/snowflake#custom-user-privileges) +Admin user name and password will be used by Statsig to create a user with restricted access to query from your data warehouse. If you don't want to use this, skip ahead [here](/data warehouse-ingestion/snowflake#custom-user-privileges) ### Account Name @@ -45,7 +46,7 @@ You can extract information from here to get the required fields for Account Nam Using `-` for Account Name -For the Account Name field, you can also enter your Snowflake [account identifier](https://docs.snowflake.com/en/user-guide/admin-account-identifier.html#format-1-preferred-account-name-in-your-organization), which typically takes the form `-`. To find the `` in the Snowflake console, click on your account profile (usually at the bottom left) to view account details as shown below. +For the Account Name field, you can also enter your Snowflake [account identifier](https://docs.snowflake.com/en/user-guide/admin-account-identifier.html#format-1-preferred-account-name-in-your-organization), which typically takes the form `-`. To find the `` in the Snowflake console, click on your account profile (usually at the bottom left) to view account details as shown in the following example. Snowflake account profile interface @@ -66,7 +67,7 @@ To set up key-pair authentication, first follow the [snowflake documentation](ht The private key can then be provided here - Private key authentication configuration interface + Private key authentication configuration interface ### Custom User Privileges @@ -123,5 +124,5 @@ COMMIT; After running the script, input the `` and `` you created in our console, during Connection Set Up stage under the Advanced settings options. - Screen Shot 2022-09-07 at 10 36 57 AM + Screen Shot 2022-09-07 at 10 36 57 AM diff --git a/data-warehouse-ingestion/synapse.mdx b/data-warehouse-ingestion/synapse.mdx index 5279e5d22..440c4276e 100644 --- a/data-warehouse-ingestion/synapse.mdx +++ b/data-warehouse-ingestion/synapse.mdx @@ -4,6 +4,7 @@ keywords: - owner:tim last_update: date: 2025-09-18 +description: To set up connection with Azure Synapse, Statsig needs the following - Workspace SQL Endpoint - Database Name - Admin User Name --- ## Overview diff --git a/docs/server-core/node/migration-guide/_api_changes.mdx b/docs/server-core/node/migration-guide/_api_changes.mdx index d6aac135f..6d50b7ff1 100644 --- a/docs/server-core/node/migration-guide/_api_changes.mdx +++ b/docs/server-core/node/migration-guide/_api_changes.mdx @@ -1,3 +1,8 @@ +--- +title: Api Changes +description: import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; | Feature | Node Core SDK | Legacy Node SDK | Status | +--- + import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; diff --git a/docs/server-core/node/migration-guide/_statsig_options.mdx b/docs/server-core/node/migration-guide/_statsig_options.mdx index eb3459125..184cf98fd 100644 --- a/docs/server-core/node/migration-guide/_statsig_options.mdx +++ b/docs/server-core/node/migration-guide/_statsig_options.mdx @@ -1,3 +1,8 @@ +--- +title: Statsig Options +description: | Old Option | New / Notes +--- + | Old Option | New / Notes | | ------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------- | | `api` | Deprecated | diff --git a/docs/server-core/node/migration-guide/_user_creation.mdx b/docs/server-core/node/migration-guide/_user_creation.mdx index 5c70c5c30..c802dcf6f 100644 --- a/docs/server-core/node/migration-guide/_user_creation.mdx +++ b/docs/server-core/node/migration-guide/_user_creation.mdx @@ -1,3 +1,8 @@ +--- +title: User Creation +description: import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; User creation is still the same in the new python core SDK. +--- + import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; @@ -18,7 +23,7 @@ User creation is still the same in the new python core SDK. "subscription_level": "premium" } ) - ``` + ```text ```python diff --git a/docs/server-core/node/migration-guide/index.mdx b/docs/server-core/node/migration-guide/index.mdx index b750370f1..8ce005be3 100644 --- a/docs/server-core/node/migration-guide/index.mdx +++ b/docs/server-core/node/migration-guide/index.mdx @@ -1,6 +1,7 @@ --- title: Node Core SDK Migration Guide displayed_sidebar: api +description: import CodeBlock from '@theme/CodeBlock'; import { SDKDocsBuilder, } from "../../../sdks/_SDKDocsBuilder.mdx"; import { APIChanges, GlobalChanges, Ins --- import CodeBlock from '@theme/CodeBlock'; diff --git a/dynamic-config/add-rule.mdx b/dynamic-config/add-rule.mdx index 9af820197..573c80135 100644 --- a/dynamic-config/add-rule.mdx +++ b/dynamic-config/add-rule.mdx @@ -14,22 +14,22 @@ To add new user targeting rules to a dynamic config, - Click the **Add New Rule** button - Select the criteria for identifying the users you want to target: - - You can target users based on common attributes such as their operating system as shown below + - You can target users based on common attributes such as their operating system as shown in the following example Operating system targeting rule configuration - - You can target users in a defined [segment](/segments) as shown below + - You can target users in a defined [segment](/segments) as shown in the following example User segment targeting rule configuration - - You can target users who are eligible for a specific feature gate as shown below; this ensures that the dynamic config is activated only for users who're exposed to the target feature gate + - You can target users who are eligible for a specific feature flag as shown in the following example; this ensures that the dynamic config is activated only for users who're exposed to the target feature flag - Feature gate targeting rule configuration + Feature Flag targeting rule configuration - To complete the dynamic config, click on the **Edit** link to open the JSON configuration editor. In the editor, type the configuration parameters and values that your application should receive and click **Confirm** diff --git a/dynamic-config/enforce-schema.mdx b/dynamic-config/enforce-schema.mdx index 7e7e7e062..09e975d3a 100644 --- a/dynamic-config/enforce-schema.mdx +++ b/dynamic-config/enforce-schema.mdx @@ -10,7 +10,7 @@ Schemas are only enforced when editing dynamic configs through the console or AP For example, if you have a dynamic config that returns settings for a site banner, you might have a schema of: -``` +```json { "$schema": "https://json-schema.org/draft/2020-12/schema", "type": "object", diff --git a/experiments/create-new.mdx b/experiments/create-new.mdx index b9473b576..0b079a2ba 100644 --- a/experiments/create-new.mdx +++ b/experiments/create-new.mdx @@ -53,14 +53,14 @@ For **Allocation**, enter the percentage of users you want to assign to this exp ### Targeting -To configure **Targeting** criteria, click to edit the **Targeting** section. You can either set new targeting criteria or use an existing **Feature Gate**. This will limit the experiment to only the users who meet the defined conditions. +To configure **Targeting** criteria, click to edit the **Targeting** section. You can either set new targeting criteria or use an existing **Feature Flag**. This will limit the experiment to only the users who meet the defined conditions. Experiment targeting configuration interface - If your targeting is straightforward, creating it through Inline Targeting works well. (Click "Criteria: Everyone" to get started.) -- For more advanced targeting (e.g., progressive rollouts) or if you want to maintain targeting criteria when you launch your experiment, it’s better to reference an existing **Feature Gate**. +- For more advanced targeting (e.g., progressive rollouts) or if you want to maintain targeting criteria when you launch your experiment, it’s better to reference an existing **Feature Flag**. By default, no targeting criteria are set, so your experiment will include all allocated users within the defined **Layer** or exposed user base. diff --git a/experiments/ending/make-decision.mdx b/experiments/ending/make-decision.mdx index 93d689d8e..e650cf7a2 100644 --- a/experiments/ending/make-decision.mdx +++ b/experiments/ending/make-decision.mdx @@ -14,15 +14,15 @@ for _all_ your users going forward. If the experiment happens to use parameters from a layer, the layer's parameters will now take on the shipped group's parameter values as their defaults. These are the values that _all_ your users will see going forward. -For example, suppose you have a **Demo Layer** that's configured with a parameter, **a_param**. It's default value is set to _layer_default_ as shown below. +For example, suppose you have a **Demo Layer** that's configured with a parameter, **a_param**. It's default value is set to _layer_default_ as shown in the following example. image -Say you decide to create an experiment, **Demo Experiment** in **Demo Layer** as shown below. +Say you decide to create an experiment, **Demo Experiment** in **Demo Layer** as shown in the following example. image -You set up **Demo Experiment** with two groups: **Control** and **Test**, intending to experiment with new values for the layer-level parameter, **a_param** as shown below. +You set up **Demo Experiment** with two groups: **Control** and **Test**, intending to experiment with new values for the layer-level parameter, **a_param** as shown in the following example. image diff --git a/experiments/ending/stop-assignments.mdx b/experiments/ending/stop-assignments.mdx index 4ab1f76ba..390b6374b 100644 --- a/experiments/ending/stop-assignments.mdx +++ b/experiments/ending/stop-assignments.mdx @@ -26,7 +26,7 @@ The **Stop Assignment** option must first be enabled in Project Settings to show Screen Shot 2024-12-04 at 12 10 28 PM ## How it Works -You can stop assignment for an experiment by clicking the Make Decision dropdown as shown below. +You can stop assignment for an experiment by clicking the Make Decision dropdown as shown in the following example. Stop Assignment diff --git a/experiments/exploring-results/aggregated-impact.mdx b/experiments/exploring-results/aggregated-impact.mdx index c88d13580..af64aee4a 100644 --- a/experiments/exploring-results/aggregated-impact.mdx +++ b/experiments/exploring-results/aggregated-impact.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/pulse/aggregated-impact.mdx' --- import Content from '/snippets/pulse/aggregated-impact.mdx' diff --git a/experiments/exploring-results/meta-analysis.mdx b/experiments/exploring-results/meta-analysis.mdx index e3896744e..5bb6acbeb 100644 --- a/experiments/exploring-results/meta-analysis.mdx +++ b/experiments/exploring-results/meta-analysis.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/stats-methods/meta-analysis.mdx' --- import Content from '/snippets/stats-methods/meta-analysis.mdx' diff --git a/experiments/holdouts-introduction.mdx b/experiments/holdouts-introduction.mdx index 57f12b87e..53ed98507 100644 --- a/experiments/holdouts-introduction.mdx +++ b/experiments/holdouts-introduction.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: Holdouts measure the aggregate impact of multiple features. It involves a "holdout" group of users that are held back from a set of features for measu --- Holdouts measure the aggregate impact of multiple features. It involves a "holdout" group of users that are held back from a set of features for measurement. While each A/B test or experiment you run compares control and test groups for that feature, a holdout compares the "holdout" group (Control) against users who have been exposed to multiple features and experiments. diff --git a/experiments/implementation/getting-group.mdx b/experiments/implementation/getting-group.mdx index 5c9b86080..ac8e86a78 100644 --- a/experiments/implementation/getting-group.mdx +++ b/experiments/implementation/getting-group.mdx @@ -29,7 +29,7 @@ async function getSearchItems(user: StatsigUser, searchTerm: String): String[] { return results; } } -``` +```text There are a few problems with this code: @@ -46,7 +46,7 @@ async function getSearchItems(user: StatsigUser, searchTerm: String): String[] { const numItems = experiment.get("length", 0); return numItems > 0 ? results.slice(numItems) : results; } -``` +```python Now, your code is completely decoupled from the names of experiment groups in the statsig console. You are left with a set of dynamic parameters. You can create whichever experiment groups you want out of these building blocks, and the same code will work. Want to test an unsorted list of 5 items diff --git a/experiments/implementation/implement.mdx b/experiments/implementation/implement.mdx index 90502a5da..7782d2b54 100644 --- a/experiments/implementation/implement.mdx +++ b/experiments/implementation/implement.mdx @@ -27,7 +27,7 @@ if (demoConfiguration.get("show_banner", false) { const title = demoConfiguration.get("title", "Start Demo"); banner.setTitle(title); -``` +```javascript You can also look at a code snippet for your particular experiment by clicking into the code snippet button on the experiment page and selecting the right SDK diff --git a/experiments/interpreting-results/access-whn.mdx b/experiments/interpreting-results/access-whn.mdx index c02016415..45cc0d2b2 100644 --- a/experiments/interpreting-results/access-whn.mdx +++ b/experiments/interpreting-results/access-whn.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: WHN lets you access exposures and metric results across all experiments directly in your warehouse through SQL Views defined in your Statsig project t --- @@ -90,12 +91,12 @@ There are three types of exports: In WHN, only the Pulse Summary may be exported, as the other two types of data are only stored [in your warehouse](https://docs.statsig.com/statsig-warehouse-native/pipeline-overview/#artifacts-and-entity-relationships). The availability of these exports are subject to our retention policy. We hold exposures data for up-to 90 days after an experiment is concluded. We hold raw user-level metrics data for 90 days. -### Pulse Summary File Description - For Feature Gates +### Pulse Summary File Description - For Feature Flags | Column Name | Description | | ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| name | Name of the Experiment or Feature Gate | -| rule | Name of the Feature Gate Rule. | +| name | Name of the Experiment or Feature Flag | +| rule | Name of the Feature Flag Rule. | | metric_type | Category of the metric. Different metric_types are computed differently, including how they're computed in Pulse. | | metric_name | The name of the metric. For event metrics, this is the name of the event. | | metric_dimension | The subcategory of the metric. For example, if you log value in LogEvent, then value will show up as a subdimension. dimension = !statsig_topline indicates that this row reflects an aggregate across all dimensions. | @@ -117,9 +118,9 @@ In WHN, only the Pulse Summary may be exported, as the other two types of data a | Column Name | Description | | ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| name | Name of the Experiment or Feature Gate | -| rule | Name of the Feature Gate Rule. | -| experiment_group | The group of users for which this metric is computed for. For a feature gate, this is pass/fail. For an experiment, this is the variant name. | +| name | Name of the Experiment or Feature Flag | +| rule | Name of the Feature Flag Rule. | +| experiment_group | The group of users for which this metric is computed for. For a feature flag, this is pass/fail. For an experiment, this is the variant name. | | metric_type | Category of the metric. Different metric_types are computed differently, including how they're computed in Pulse. | | metric_name | The name of the metric. For event metrics, this is the name of the event. | | metric_dimension | The subcategory of the metric. For example, if you log value in LogEvent, then value will show up as a subdimension. dimension = !statsig_topline indicates that this row reflects an aggregate across all dimensions. | diff --git a/experiments/interpreting-results/best-practices.mdx b/experiments/interpreting-results/best-practices.mdx index 610ffd863..05f15464e 100644 --- a/experiments/interpreting-results/best-practices.mdx +++ b/experiments/interpreting-results/best-practices.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/pulse/best-practices.mdx' --- import Content from '/snippets/pulse/best-practices.mdx' diff --git a/experiments/interpreting-results/custom-queries.mdx b/experiments/interpreting-results/custom-queries.mdx index f751d3f67..f17be9697 100644 --- a/experiments/interpreting-results/custom-queries.mdx +++ b/experiments/interpreting-results/custom-queries.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/pulse/custom-queries.mdx' --- import Content from '/snippets/pulse/custom-queries.mdx' diff --git a/experiments/interpreting-results/drill-down.mdx b/experiments/interpreting-results/drill-down.mdx index bb17e2725..110bd91d2 100644 --- a/experiments/interpreting-results/drill-down.mdx +++ b/experiments/interpreting-results/drill-down.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/pulse/metric-drill-down.mdx' --- import Content from '/snippets/pulse/metric-drill-down.mdx' diff --git a/experiments/interpreting-results/export.mdx b/experiments/interpreting-results/export.mdx index 7018fcf6c..2a32cd0cd 100644 --- a/experiments/interpreting-results/export.mdx +++ b/experiments/interpreting-results/export.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/pulse/export.mdx' --- import Content from '/snippets/pulse/export.mdx' diff --git a/experiments/interpreting-results/faq.mdx b/experiments/interpreting-results/faq.mdx index 90d51b020..117dc031b 100644 --- a/experiments/interpreting-results/faq.mdx +++ b/experiments/interpreting-results/faq.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/pulse/faq.mdx' --- import Content from '/snippets/pulse/faq.mdx' diff --git a/experiments/interpreting-results/participating-units.mdx b/experiments/interpreting-results/participating-units.mdx index 88a860f47..8390e2712 100644 --- a/experiments/interpreting-results/participating-units.mdx +++ b/experiments/interpreting-results/participating-units.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/pulse/participating-units.mdx' --- import Content from '/snippets/pulse/participating-units.mdx' diff --git a/experiments/interpreting-results/read-pulse.mdx b/experiments/interpreting-results/read-pulse.mdx index 54201a30d..b2c6c56c1 100644 --- a/experiments/interpreting-results/read-pulse.mdx +++ b/experiments/interpreting-results/read-pulse.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/pulse/read-results.mdx' --- import Content from '/snippets/pulse/read-results.mdx' diff --git a/experiments/interpreting-results/reconciling-experiment-results.mdx b/experiments/interpreting-results/reconciling-experiment-results.mdx index 4df5d8f15..7e0e3822f 100644 --- a/experiments/interpreting-results/reconciling-experiment-results.mdx +++ b/experiments/interpreting-results/reconciling-experiment-results.mdx @@ -26,7 +26,7 @@ It is important to analyze metric data only after a user has been exposed to the Statsig Cloud uses a date-based join between exposures and metric data. Experiments will include metric data from the whole of the first exposure date for each experimental unit. While some pre-experiment metric data can be included, the average treatment effect of this dilution should be null. This looks like the SQL snippet below: -``` +```sql WITH metrics as (...), exposures as (...), @@ -58,7 +58,7 @@ Statsig does support timestamp-based joins for some Enterprise Cloud customers. #### Statsig Warehouse Native Statsig WHN employs a timestamp-based join for this purpose, with an option for a date-based joins for daily data if preferred. This should look like the SQL snippet below: -``` +```sql WITH metrics as (...), exposures as (...), @@ -87,7 +87,7 @@ It's also worth noting that timezones can influence this. Timestamps for Statsig ### Exposure Duplication Exposure data must be de-duplicated before joining to ensure a single record per user. Many vendors further manage crossover users (users present in more than one experiment group), removing them from analysis and/or alerting if this occurs with high frequency. -``` +```sql SELECT unit_id, experiment_id, diff --git a/experiments/interpreting-results/userproperties.mdx b/experiments/interpreting-results/userproperties.mdx index f1ebf73c0..48b62d353 100644 --- a/experiments/interpreting-results/userproperties.mdx +++ b/experiments/interpreting-results/userproperties.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: Statsig let's you slice results by user properties. Common examples of doing this include breaking down results by user's home country, subscription s --- Statsig let's you slice results by user properties. Common examples of doing this include breaking down results by user's home country, subscription status or engagement level. diff --git a/experiments/layers-overview.mdx b/experiments/layers-overview.mdx index 5a0bf5512..ab8618f15 100644 --- a/experiments/layers-overview.mdx +++ b/experiments/layers-overview.mdx @@ -50,7 +50,7 @@ if (signUpTestV1.get("is_in_test", false)) { } // Then we display the text in the dialog -``` +```text Every time you add a new test, you need to change the code and it's only available in a new version. diff --git a/experiments/monitoring/bots.mdx b/experiments/monitoring/bots.mdx index 88f4d5314..c131952c2 100644 --- a/experiments/monitoring/bots.mdx +++ b/experiments/monitoring/bots.mdx @@ -27,7 +27,7 @@ You might, however, want to purposefully restrict what features bots see. For ex Once created, add a new rule to the segment. Set Criteria to "Browser Name". Leave Operator as "Any Of". In the Values field, copy + paste the following string in its entirety. (There is a copy button to the right.) When pasting, Statsig console will take care of splitting the bots up into individual names. - ``` + ```ruby TSMbot, Googlebot, FacebookBot, TwitterBot, AdsBot-Google, ImagesiftBot, Dragonbot, bingbot, YandexRenderResourcesBot, startmebot, SeznamBot, Better Uptime Bot, SeekportBot, Slackbot-LinkExpanding, googlebot, harsilbot, AhrefsBot, Applebot, PetalBot, Pokey_Bot, Preview Service; bot, Googlebot-Image, WRTNBot, GPTBot, MJ12bot, YandexBot, Slackbot, trendictionbot0, SmarshBot, Amazonbot, VirusTotalBot, GooglePlusBot, com/bot, DuckDuckBot, Discordbot, ; bot, TermlyBot, YandexAccessibilityBot, UptimeRobot, CCBot, BitSightBot, AwarioSmartBot, SiteAuditBot, pingbot, ; Bot, Pinterestbot, idealo-bot, net/bot, AwarioBot, BLEXBot, adsbot, Linkbot, AcademicBotRTU, MojeekBot, LinkedInBot, BeeperBot, robot, PopeTech-ScanBot, FullStoryBot, Storebot, Zoombot, coccocbot, Mattermost-Bot, Nigooutbot, EzoicBot, com/bots, FreshpingBot, Mail.RU_Bot, Mediumbot, tkbot, triptease-bot, KlaxoonBot, CriteoBot, SMTBot, JobboerseBot, Mediatoolkitbot, iCjobs Stellenangebote, StatusCakeBot, AppleNewsBot, ClaudeBot, AmazonAdBot, GoogleBot, SemrushBot, SynologyChatBot, YandexMobileBot, bitlybot, ) Bot, redditbot, Radius Compliance Bot, Twitterbot, PingdomBot, DotBot, amazonproductbot, iASD_SpiderBot, XBot, WallabyupBot, XBot_Senior, HatenaBlog-bot, Exabot, AASA-Bot, DuckAssistBot, StractBot, wpbot, YextBot, chatbot, Pharosbot, ClarityBot, Monsidobot, AndersPinkBot, DataForSeoBot, Robot, Quora-Bot, notebot, HyperMegaBotGettingOnlyHTMLsFromYourWebsite, bountybot, WincherBot, Leikibot, ExtendedStayBot, Caliperbot, keys-so-bot, aixnew_aibot, SeobilityBot, Synapse (bot, UOrgTestingBot, serpstatbot, Diffbot, SummalyBot, rogerbot, bidswitchbot, PiBot, aka-bot, node CCBot, seesawbot, SearchAtlas Bot, MetaJobBot, archive.org_bot, 48_safeAreaBottom, ZoominfoBot, TurnitinBot, Googlebot-Video, fixbot, taboolabot, yacybot, Plesk screenshot bot, traq-ogp-fetcher-curl-bot, Space Unfurl Bot, Gensparkbot, by fynd.bot, adbeat_bot, SurdotlyBot, Spider_Bot, DiffeoBot, Rhobot, Cookiebot, online-webceo-bot, dataforseobot, Google-Display-Ads-Bot, Timpibot, msnbot, AnytypeBot, com feedbot, Morningscore Bot, Magus Bot, Snap-URL-Preview (bot, BublupBot, DiscourseBot, policy adbeat_bot, htc_botdugls, RyteBot, SaberBot, fr_bot, node FullStoryBot, bot, turbotime, OtherwebBot, TypetalkBot, clever tech bot, compatible; botify, Rankabot, AspiegelBot, Wire LinkPreview Bot, oBot, Amazon-Advertising-ad-standards-bot, es_bot, Ocarinabot, dbot, tyseobot, WebExplorerSearchBot, DF Bot, WebwikiBot, DropboxPreviewBot, NetpeakCheckerBot, Sidetrade indexer bot, Dubbotbot, Senutobot, Veoozbot, Fedicabot, Nextdoorbot, ZumBot, Streamline3Bot, vebidoobot, cXensebot, YodaoBot, Scrapbox Bot, 47_safeAreaBottom, SemanticScholarBot, SiteCheckerBotCrawler, WalluBot, iAskBot, Scomplerbot, ViberBot, GnowitNewsbot, Letianpai_Robot, WellKnownBot, am a bot, OcelotBot, //boteden, TimeTreeBot, seo-audit-check-bot, SEBot, LoomlyBot, StrapBot, QualifiedBot, Swiftbot, uk_ldfc_renderbot, Jones Searchbot, tyseobotmobile, ahrefsbot, ChannelBot, emulate-seobots, twitterbot, SpeechifyBot, PhaverBot, Xbot, node DuckAssistBot, semaltbot, Bawaab_bot, net-Robot, SuperBot, KStandBot, Facebot, node AppleNewsBot, robots, com bot, siteauditbot, co Bot, SerendeputyBot, PartnerOptimizer-bot, http-spiders-bot, telegrambot, RepoLookoutBot, slackbot, LivelapBot, uk_ldfc_bot, Your robot, eu bot, nerdybot, TiggeritoBot, GraphiteBot, BLP_bbot, domainsbot, node ZoominfoBot, LinkArchiver twitter bot, 2ip bot, COIBotParser, exabot, Googlebot-Mobile, DuckDuckGo-Favicons-Bot, PerplexityBot, yoozBot, BadooBot, discobot, web-bot, SEMrushBot, Open Graph Bot, PaperLiBot, Blog Rssbot, MotoMinerBot, eventseekerBot, ResearchBot, MixrankBot, node bitlybot, SpringserveBot, Firefox superpagesbot2, DingTalkBot, MoodleBot, Brightbot, reurl-bot, osapon ) bot, foundeebot, petalbot, //botsin, SeoCherryBot, SemjiBot, TZUnfurlBot, TesseractBotAgent, yandexbot, 5) bot, PubMatic Crawler Bot, archiver/3.1.1 +http://www.archive.org/details/archive.org_bot, msnbot-media, bountybotttt, Googlebot-News, semrushbot, Gulper Web Bot, Google-bot, Superfeedr bot, node AwarioSmartBot, GG PeekBot, playwright-bot, Clickagy Intelligence Bot, x28-job-bot, Catrobatbot, VelaBot, pinterestbot, hstspreload-bot, FandomOpenGraphBot, spbot, Paqlebot, Summalybot, //botim, aiHitBot, JobBot, InsytfulBot, Taboolabot, gptbot, edansbot, KeybaseBot, GroupMeBot, macox bot, Xing Bot, uipbot, Dcard-link-preview-bot, ezoicbot, like Gecko) bot, mj12bot, applebot, jbot, LineBotWebhook, HearsayPDFBot, DatoCmsSearchBot, Quantcastbot, ActiveComplyBot, Parser Robot, amazon-product-discovery-bot, AppsFlyerBot, TwitterCommerceBot, obot, ID bot, GetLocalBot, CapterraBot, BugBountyBot, PlurkBot, preview service; bot, BacklinksExtendedBot, Testomatobot, crawlers, NE Crawler, scoopit-crawler, Impressumscrawler, node Screaming Frog SEO Spider, Testcrawler, PAGEFREEZER CRAWLER, peer39_crawler, com crawler, Screaming Frog SEO Spider, BrightEdge Crawler, XoviOnpageCrawler, NetSeer crawler, managr-webcrawler, captify-crawler, IAS Crawler, crawler_eb_germany_2, IBM-Crawler, Elastic-Crawler, Screaming Frog Wise SEO Spider, seobilitybot, fedistatsCrawler, ISSCyberRiskCrawler, AdsTxtCrawlerTP, HubSpot Crawler, Jugendschutzprogramm-Crawler, screaming frog seo spider, about-crawlers, RavenCrawler, Atomseobot, sap-search-web-crawler, wowLink Crawler, our-crawler, Light Crawler, SiteGuruCrawler, Rightlander Crawler, web-crawler, hubspot crawler, Audisto Crawler, Automattic Analytics Crawler, crawler, Web-Crawler, pagefreezer crawler, WazzupCrawler, ecoresearchCrawler, VelenPublicWebCrawler, node GrowSEOBot, rc-crawler, ClineCrawler, naverbookmarkcrawler, IVW-Crawler, AdkernelTopicCrawler, InfobipCrawler, WebCrawler, GrapeshotCrawler, SISTRIX Crawler, Greppr Web Crawler, Server Crawler, DocBase Crawler, Web Crawler, find-seo-bot, PulsePoint-Crawler, GenomeCrawlerd, MSIECrawler, txt Crawler, ev-crawler, DocSearch Crawler, crawler4j, UCMore Crawler, FAST-WebCrawler, ``` diff --git a/experiments/monitoring/srm.mdx b/experiments/monitoring/srm.mdx index eb181101b..37a807df9 100644 --- a/experiments/monitoring/srm.mdx +++ b/experiments/monitoring/srm.mdx @@ -5,6 +5,7 @@ keywords: - owner:craig last_update: date: 2025-09-18 +description: SRM, or sample ratio mismatch, is a problem with experiments characterized by there being too many units in some groups, and too few in others. --- ## What is SRM? diff --git a/experiments/overview.mdx b/experiments/overview.mdx index fdd4d0529..94636e84b 100644 --- a/experiments/overview.mdx +++ b/experiments/overview.mdx @@ -4,9 +4,9 @@ sidebarTitle: "Overview" description: "Learn the fundamentals of experimentation with Statsig, including key concepts, randomization units, and statistical significance." --- -**Experimentation** is a powerful tool for making data-driven decisions that improve product outcomes and customer experiences. +**Experimentation** is a powerful tool for making data-driven decisions that improve product outcomes and user experiences. -In this doc, we'll cover key concepts of experimentation such as control variables, randomization units, and statistical significance, helping you understand the science behind A/B testing and multivariate experiments. +In this doc, we'll cover key concepts of experimentation such as control variables, randomization units, and statistical significance, helping you understand the science behind Experimenting and multivariate experiments. By the end of this guide, you'll know how to use experiments to validate product changes, discover new opportunities, and drive business impact. Whether you're optimizing existing features or exploring new ideas, these fundamentals will equip you to run effective experiments and iterate faster with confidence. @@ -27,9 +27,9 @@ Experiments are ideal when you want to: ## Why experiment? -Controlled experiments are the most scientifically reliable way to establish **causality** between your product changes and their effect on customer behavior. By running experiments, you can: +Controlled experiments are the most scientifically reliable way to establish **causality** between your product changes and their effect on user behavior. By running experiments, you can: -- **Validate Hypotheses**: Only ship features that have been proven to improve the customer experience or drive key business metrics. +- **Validate Hypotheses**: Only ship features that have been proven to improve the user experience or drive key business metrics. - **Measure Success**: Measure feature performance post-launch and detect any unexpected side effects. - **Drive Innovation**: Experiments allow teams to iterate faster by providing real-time feedback on product performance. They help you make better, data-driven decisions that accelerate business growth. @@ -41,11 +41,11 @@ In comparison, historical metrics may show correlation, but experiments allow yo ### Control variables -A **control variable** is the variable in an experiment that is manipulated to observe its effect on key metrics. In a simple A/B test, the control variable usually has two values (A and B). More complex experiments may have additional values (e.g., A, B, C, D), known as multivariate experiments. +A **control variable** is the variable in an experiment that is manipulated to observe its effect on key metrics. In a simple Experiment, the control variable usually has two values (A and B). More complex experiments may have additional values (e.g., A, B, C, D), known as multivariate experiments. ### Variants -A **variant** is a specific version of the product or feature being tested. For example, in an A/B test: +A **variant** is a specific version of the product or feature being tested. For example, in an Experiment: - **A (Control)**: Represents the current state of the product or feature. - **B (Treatment)**: Represents the modified state you want to evaluate. @@ -130,7 +130,7 @@ Example: --- ## Tutorials -- [Run your first A/B test](/guides/abn-tests) +- [Run your first Experiment](/guides/abn-tests) - [Create an experiment using a userID](/experiments-plus/create-new) - [Create an experiment using a customID](/guides/experiment-on-custom-id-types) - [Use a language specific Statsig SDK to implement an experiment in your application](/experiments-plus/implement) diff --git a/experiments/statistical-methods/confidence-intervals.mdx b/experiments/statistical-methods/confidence-intervals.mdx index 5c7a84df2..23c2bb4c4 100644 --- a/experiments/statistical-methods/confidence-intervals.mdx +++ b/experiments/statistical-methods/confidence-intervals.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/stats-methods/confidence-intervals.mdx' --- import Content from '/snippets/stats-methods/confidence-intervals.mdx' diff --git a/experiments/statistical-methods/introduction.mdx b/experiments/statistical-methods/introduction.mdx index 34efa0342..176aaf0ce 100644 --- a/experiments/statistical-methods/introduction.mdx +++ b/experiments/statistical-methods/introduction.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: This section provides detailed view of the statistical methodology that powers [Pulse](/pulse/read-pulse). Delivering these results involves a series --- ## Statsig Stats Engine diff --git a/experiments/statistical-methods/methodologies/benjamini-hochberg-procedure.mdx b/experiments/statistical-methods/methodologies/benjamini-hochberg-procedure.mdx index c469be32e..885b406ff 100644 --- a/experiments/statistical-methods/methodologies/benjamini-hochberg-procedure.mdx +++ b/experiments/statistical-methods/methodologies/benjamini-hochberg-procedure.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/stats-methods/benjamini-hochberg-procedure.mdx' --- import Content from '/snippets/stats-methods/benjamini-hochberg-procedure.mdx' diff --git a/experiments/statistical-methods/methodologies/bonferroni-correction.mdx b/experiments/statistical-methods/methodologies/bonferroni-correction.mdx index 5e4354c73..0b00a341a 100644 --- a/experiments/statistical-methods/methodologies/bonferroni-correction.mdx +++ b/experiments/statistical-methods/methodologies/bonferroni-correction.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/stats-methods/bonferroni-correction.mdx' --- import Content from '/snippets/stats-methods/bonferroni-correction.mdx' diff --git a/experiments/statistical-methods/methodologies/cuped.mdx b/experiments/statistical-methods/methodologies/cuped.mdx index d2ac09d67..726450f61 100644 --- a/experiments/statistical-methods/methodologies/cuped.mdx +++ b/experiments/statistical-methods/methodologies/cuped.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/stats-methods/cuped.mdx' --- import Content from '/snippets/stats-methods/cuped.mdx' diff --git a/experiments/statistical-methods/methodologies/delta-method-whn.mdx b/experiments/statistical-methods/methodologies/delta-method-whn.mdx index 486c1559f..d3e521774 100644 --- a/experiments/statistical-methods/methodologies/delta-method-whn.mdx +++ b/experiments/statistical-methods/methodologies/delta-method-whn.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import DeltaMethod from '/snippets/stats-methods/delta-method.mdx'; --- import DeltaMethod from '/snippets/stats-methods/delta-method.mdx'; diff --git a/experiments/statistical-methods/methodologies/delta-method.mdx b/experiments/statistical-methods/methodologies/delta-method.mdx index bf89b88a7..54194c800 100644 --- a/experiments/statistical-methods/methodologies/delta-method.mdx +++ b/experiments/statistical-methods/methodologies/delta-method.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/stats-methods/delta-method.mdx' --- import Content from '/snippets/stats-methods/delta-method.mdx' diff --git a/experiments/statistical-methods/methodologies/fieller-intervals-whn.mdx b/experiments/statistical-methods/methodologies/fieller-intervals-whn.mdx index d7488ebd3..50150c666 100644 --- a/experiments/statistical-methods/methodologies/fieller-intervals-whn.mdx +++ b/experiments/statistical-methods/methodologies/fieller-intervals-whn.mdx @@ -6,6 +6,7 @@ keywords: - owner:liz last_update: date: 2025-09-18 +description: import FiellerIntervals from '/snippets/stats-methods/fieller-intervals.mdx'; --- import FiellerIntervals from '/snippets/stats-methods/fieller-intervals.mdx'; diff --git a/experiments/statistical-methods/methodologies/fieller-intervals.mdx b/experiments/statistical-methods/methodologies/fieller-intervals.mdx index 04bf2082e..bcf98d394 100644 --- a/experiments/statistical-methods/methodologies/fieller-intervals.mdx +++ b/experiments/statistical-methods/methodologies/fieller-intervals.mdx @@ -6,6 +6,7 @@ keywords: slug: /stats-engine/methodologies/fieller-intervals last_update: date: 2025-09-18 +description: import Content from '/snippets/stats-methods/fieller-intervals.mdx' --- import Content from '/snippets/stats-methods/fieller-intervals.mdx' diff --git a/experiments/statistical-methods/methodologies/one-sample-test.mdx b/experiments/statistical-methods/methodologies/one-sample-test.mdx index 202ed01b6..1dde86b2d 100644 --- a/experiments/statistical-methods/methodologies/one-sample-test.mdx +++ b/experiments/statistical-methods/methodologies/one-sample-test.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: A one-sample test compares a single sample of data against a known or hypothesized value to determine if there is a statistically significant differen --- ## One-Sample Tests (aka Fixed-Value Test) diff --git a/experiments/statistical-methods/methodologies/one-sided-test.mdx b/experiments/statistical-methods/methodologies/one-sided-test.mdx index bea536206..c6d0d6ee9 100644 --- a/experiments/statistical-methods/methodologies/one-sided-test.mdx +++ b/experiments/statistical-methods/methodologies/one-sided-test.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/stats-methods/one-sided-test.mdx' --- import Content from '/snippets/stats-methods/one-sided-test.mdx' diff --git a/experiments/statistical-methods/methodologies/srm-checks-whn.mdx b/experiments/statistical-methods/methodologies/srm-checks-whn.mdx index 9f62715e3..24b9ed33f 100644 --- a/experiments/statistical-methods/methodologies/srm-checks-whn.mdx +++ b/experiments/statistical-methods/methodologies/srm-checks-whn.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import SRMChecks from '/snippets/stats-methods/srm-checks.mdx'; --- import SRMChecks from '/snippets/stats-methods/srm-checks.mdx'; diff --git a/experiments/statistical-methods/methodologies/winsorization-whn.mdx b/experiments/statistical-methods/methodologies/winsorization-whn.mdx index 6c3c7d8d4..92844e24a 100644 --- a/experiments/statistical-methods/methodologies/winsorization-whn.mdx +++ b/experiments/statistical-methods/methodologies/winsorization-whn.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Winsorization from '/snippets/stats-methods/winsorization.mdx'; --- import Winsorization from '/snippets/stats-methods/winsorization.mdx'; diff --git a/experiments/statistical-methods/methodologies/winsorization.mdx b/experiments/statistical-methods/methodologies/winsorization.mdx index f372d8478..62b624107 100644 --- a/experiments/statistical-methods/methodologies/winsorization.mdx +++ b/experiments/statistical-methods/methodologies/winsorization.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/stats-methods/winsorization.mdx' --- import Content from '/snippets/stats-methods/winsorization.mdx' diff --git a/experiments/statistical-methods/methodologies/winsorization_variants/_cloud.mdx b/experiments/statistical-methods/methodologies/winsorization_variants/_cloud.mdx index 3fa0ff9bb..a0f7f4de7 100644 --- a/experiments/statistical-methods/methodologies/winsorization_variants/_cloud.mdx +++ b/experiments/statistical-methods/methodologies/winsorization_variants/_cloud.mdx @@ -1,3 +1,8 @@ +--- +title: Cloud +description: At Statsig, the default percentile for winsorization is 99.9%. This reduces the influence of extreme outliers caused by factors such as logging errors +--- + At Statsig, the default percentile for winsorization is 99.9%. This reduces the influence of extreme outliers caused by factors such as logging errors or bad actors. Winsorization is applied to to sum and event count metrics, including imported metrics. Winsorization will not be applied to Mean, Ratio, Funnel, Participation, or User Accounting metrics. diff --git a/experiments/statistical-methods/methodologies/winsorization_variants/_warehouse.mdx b/experiments/statistical-methods/methodologies/winsorization_variants/_warehouse.mdx index 27199e6d8..6fada04ba 100644 --- a/experiments/statistical-methods/methodologies/winsorization_variants/_warehouse.mdx +++ b/experiments/statistical-methods/methodologies/winsorization_variants/_warehouse.mdx @@ -1,3 +1,8 @@ +--- +title: Warehouse +description: Statsig Warehouse Native lets you configure this per metric - and choose explicitly the upper and/or lower bounds to apply. +--- + Statsig Warehouse Native lets you configure this per metric - and choose explicitly the upper and/or lower bounds to apply. Winsorization configuration interface diff --git a/experiments/statistical-methods/p-value.mdx b/experiments/statistical-methods/p-value.mdx index 0c68a09bf..20f6db622 100644 --- a/experiments/statistical-methods/p-value.mdx +++ b/experiments/statistical-methods/p-value.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/stats-methods/p-value.mdx' --- import Content from '/snippets/stats-methods/p-value.mdx' diff --git a/experiments/statistical-methods/pre-experiment-bias.mdx b/experiments/statistical-methods/pre-experiment-bias.mdx index 6ff3c29c9..7e71c0de0 100644 --- a/experiments/statistical-methods/pre-experiment-bias.mdx +++ b/experiments/statistical-methods/pre-experiment-bias.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/stats-methods/pre-experiment-bias.mdx' --- import Content from '/snippets/stats-methods/pre-experiment-bias.mdx' diff --git a/experiments/statistical-methods/topline-impact.mdx b/experiments/statistical-methods/topline-impact.mdx index 4e010b436..be2afd178 100644 --- a/experiments/statistical-methods/topline-impact.mdx +++ b/experiments/statistical-methods/topline-impact.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/stats-methods/topline-impact.mdx' --- import Content from '/snippets/stats-methods/topline-impact.mdx' diff --git a/experiments/statistical-methods/variance-reduction.mdx b/experiments/statistical-methods/variance-reduction.mdx index 7d3e0c35e..585b7b7e8 100644 --- a/experiments/statistical-methods/variance-reduction.mdx +++ b/experiments/statistical-methods/variance-reduction.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/stats-methods/variance-reduction.mdx' --- import Content from '/snippets/stats-methods/variance-reduction.mdx' diff --git a/experiments/statistical-methods/variance.mdx b/experiments/statistical-methods/variance.mdx index 5ede35caf..83d0f1978 100644 --- a/experiments/statistical-methods/variance.mdx +++ b/experiments/statistical-methods/variance.mdx @@ -6,6 +6,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: import Content from '/snippets/stats-methods/variance.mdx' --- import Content from '/snippets/stats-methods/variance.mdx' diff --git a/experiments/templates/decision-framework.mdx b/experiments/templates/decision-framework.mdx index be9dc67ea..cf2769b7c 100644 --- a/experiments/templates/decision-framework.mdx +++ b/experiments/templates/decision-framework.mdx @@ -6,6 +6,7 @@ keywords: - owner:Kaz last_update: date: 2025-09-18 +description: A Decision Framework for experiment templates allows teams to standardize their interpretation of results and decision-making process. Once configured --- A Decision Framework for experiment templates allows teams to standardize their interpretation of results and decision-making process. Once configured, it provides clear recommendations for which group to ship based on experimental outcomes. While the framework highlights recommended actions based on metric movements, it does not enforce any actions. diff --git a/experiments/templates/templates.mdx b/experiments/templates/templates.mdx index 795874e09..9b1ffcea7 100644 --- a/experiments/templates/templates.mdx +++ b/experiments/templates/templates.mdx @@ -1,5 +1,6 @@ --- title: Templates +description: Templates enable you to create a blueprint for gates/dynamic configs/ experiments to enable standardization and reusability across your Project. Templ --- ## Overview @@ -45,7 +46,7 @@ Templates can be managed via the **Templates** setting under **Product Configura ## Creating Configs from Templates -To create a config using a template, at the time of config creation, select a template to apply from the template selector. If templates are required at the Organization or Team-level, the user will be blocked from proceeding with config creation until they've selected an approved template. The list of template options in the drop-down is configured at the team level (see below). +To create a config using a template, at the time of config creation, select a template to apply from the template selector. If templates are required at the Organization or Team-level, the user will be blocked from proceeding with config creation until they've selected an approved template. The list of template options in the drop-down is configured at the team level (refer to the following example). Template Selection @@ -63,7 +64,7 @@ There are a few key layers of settings governing templates, namely at the- Within Experiment and Gate Policies (**Settings** -> **Project Configuration** -> **Feature Management**/ **Experimentation** - **Organization Tab**), you can enforce that a template is used for any new gate/ dynamic config/ experiment creation. Only organization admins can configure this setting. NOTE that you must create at least 1 experiment/ gate template for users to choose if you toggle on this setting, otherwise they will be blocked in creating new configs. - Org-level Feature Gate Templates Settings + Org-level Feature Flag Templates Settings ### Team-level Templates Settings diff --git a/experiments/types/switchback-tests.mdx b/experiments/types/switchback-tests.mdx index f83fdab3a..1f92a0bd7 100644 --- a/experiments/types/switchback-tests.mdx +++ b/experiments/types/switchback-tests.mdx @@ -13,7 +13,7 @@ Another common use case for switchbacks occurs when applying different variants Switchback tests are often carried out across multiple "buckets", typically regions or other defined groups that are flipped between test and control treatments over the course of the experiment. -### Example +## Example Let's say you are a rideshare platform and want to test pricing. You initially consider splitting your riders into two groups, one with the higher price and one with a lower price. @@ -133,7 +133,7 @@ Burn-in/ burn-out periods enable you to define periods at both the beginning and ## Reading Results -Both Diagnostics and Pulse metric lifts results for Switchback tests will look and feel like Statsig's traditional A/B tests, with a few modifications- +Both Diagnostics and Pulse metric lifts results for Switchback tests will look and feel like Statsig's traditional Experiments, with a few modifications- - **No hourly Pulse-** At the beginning of a traditional A/B/n experiment on Statsig, you can start to see hourly Pulse results flow through within ~10-15 minutes of experiment start. Given in a Switchback you will only see either *all* Test or *all* Control exposures right at experiment start, we have disabled Hourly Pulse until you have a meaningful amount of data. However, in lieu of Hourly Pulse you can still leverage the more real-time **Diagnostics** tab to verify checks are coming in and bucketing as expected. - **No time-series**- The Daily and Days Since First Exposure time-series are not available for Switchback tests. This is due to the bootstrapping methodology used to obtain the statistics, which relies on pooling all the available days together in order to have enough statistical power. diff --git a/faq.mdx b/faq.mdx index 0da86be0a..1d2c54846 100644 --- a/faq.mdx +++ b/faq.mdx @@ -37,7 +37,7 @@ if (otherEngine.getExperiment('button_color_test').getGroup() === 'Control') { // Statsig parameter approach — variants can be changed from the console const color = statsig.getExperiment('button_color_test').getString('button_color', 'BLACK'); -``` +```text --- diff --git a/feature-flags/conditions.mdx b/feature-flags/conditions.mdx index f5454fb33..9cae012fe 100644 --- a/feature-flags/conditions.mdx +++ b/feature-flags/conditions.mdx @@ -1,27 +1,27 @@ --- -title: Feature Gate rule criteria -description: Statsig feature gates contain a list of rules that are evaluated in order from top to bottom. This page describes in more detail how these rules are evaluated and lists all currently supported conditions. +title: Feature Flag rule criteria +description: Statsig feature flags contain a list of rules that are evaluated in order from top to bottom. This page describes in more detail how these rules are evaluated and lists all currently supported conditions. --- -Statsig feature gates contain a list of rules that are evaluated in order from top to bottom. The page describes in more detail how these rules are evaluated and lists all currently supported conditions. +Statsig feature flags contain a list of rules that are evaluated in order from top to bottom. The page describes in more detail how these rules are evaluated and lists all currently supported conditions. ## Rule Evaluation The rules that you create are evaluated in the order they're listed. For each rule, the **criteria** or **conditions** determine which users _qualify_ for the Pass/Fail treatments. The Pass percentage further determines the percentage of _qualifying_ users that will be exposed to the new feature. The remaining _qualifying_ users will see the feature disabled. -Suppose you set up your rules as shown below, the following flow chart illustrates how Statsig evaluates these rules. +Suppose you set up your rules as shown in the following example, the following flow chart illustrates how Statsig evaluates these rules. Example Rules Gate - Feature gate rules evaluation flowchart + Feature Flag rules evaluation flowchart Note that as soon as a user qualifies based on the condition in a given rule, Statsig doesn't evaluate subsequent rules for this user. Statsig then picks the qualifying user to be in either the Pass or Fail group of that rule. -Also note that in the example, the third rule for **Remaining Folks** captures all users who don't qualify for previous two rules. If we were to remove this third rule, then only a subset of your users (users in pools 1 and 2) would qualify for this feature gate and for further analysis, not your total user base. +Also note that in the example, the third rule for **Remaining Folks** captures all users who don't qualify for previous two rules. If we were to remove this third rule, then only a subset of your users (users in pools 1 and 2) would qualify for this feature flag and for further analysis, not your total user base. ### Client vs Server SDKs All of the following conditions work on both client and server SDKs. Client SDKs handle these conditions a bit more automatically for you - if you do not provide a userID, client SDKs rely on an auto-generated "stable identifier" which is persisted to local storage. @@ -31,7 +31,7 @@ In addition, if you do not automatically set an IP or User Agent (UA), the clien Evaluations at a given percentage are *stable* with respect to the unitID. For example, if the gate/config/experiment/layer has a unit type of "userID", and userID = 4 passes a condition at a 50% rollout, they will always pass at that 50% rollout. The same applies for `customIDs`, if the unit type of the entity is that `customID`. Want to reset that stability? See "Resalting" below. ### Resalting -Gate evaluations are stable for a given gate, percentage rollout, and user ID. This is made possible by the salt associated with a feature gate. If you want to reset a gate, triggering a reshuffle of users, you can "resalt" a gate from the dropdown menu in the top right of the feature gate details page. +Gate evaluations are stable for a given gate, percentage rollout, and user ID. This is made possible by the salt associated with a feature flag. If you want to reset a gate, triggering a reshuffle of users, you can "resalt" a gate from the dropdown menu in the top right of the feature flag details page. Resalt UI @@ -39,8 +39,8 @@ Gate evaluations are stable for a given gate, percentage rollout, and user ID. T ### Partial Rollouts While 0% or 100% rollouts for gates are simply "on for users matching this rule"/"off for users matching this rule", each rule allows you to specify a percentage of qualifying users who should pass (see the new feature). -If you want to get [Pulse Results](/pulse/read-pulse) (metric movements caused by a feature), simply specifying a number between 0% and 100% will create a random allocation of users in Pass/Fail or "test"/"control" groups for a simple A/B test. -You can use this to validate that a new feature does not regress existing metrics as you roll it out to everyone. Statsig suggests a 2% -> 10% -> 50% -> 100% roll out strategy. Each progressive roll out will generate its own Pulse Results as shown below. +If you want to get [Pulse Results](/pulse/read-pulse) (metric movements caused by a feature), simply specifying a number between 0% and 100% will create a random allocation of users in Pass/Fail or "test"/"control" groups for a simple Experiment. +You can use this to validate that a new feature does not regress existing metrics as you roll it out to everyone. Statsig suggests a 2% -> 10% -> 50% -> 100% roll out strategy. Each progressive roll out will generate its own Pulse Results as shown in the following example. Metric Lifts @@ -80,7 +80,7 @@ Usage: Percentage rollout on the remainder of users that reach this condition. T Supported Operators: `None. Percentage based only.` -Example usage: 50/50 rollout to A/B test a new feature. Or 0% to hide the feature for all people not matching a set of rules. Or 100% to show the feature to the remaining users who did not meet a condition above. +Example usage: 50/50 rollout to Experiment a new feature. Or 0% to hide the feature for all people not matching a set of rules. Or 100% to show the feature to the remaining users who did not meet a condition above. Everyone 50/50 condition example @@ -310,11 +310,11 @@ Example: Only show a feature to 20 somethings, as marked by the privateAttribute -Once a user is exposed, they will be included in the analysis going forward. They saw the new feature and were affected. If the feature gate rules are modified or the user's attributes change in a way that the user no longer qualifies, they will stop receiving the new feature. However, they will continue to be counted for analysis. Once you roll out the feature, all users will see the new feature; alternatively, if you turn off the feature gate, all users will see the control (feature disabled). In either case (roll out or turn off), Statsig performs no further analysis. +Once a user is exposed, they will be included in the analysis going forward. They saw the new feature and were affected. If the feature flag rules are modified or the user's attributes change in a way that the user no longer qualifies, they will stop receiving the new feature. However, they will continue to be counted for analysis. Once you roll out the feature, all users will see the new feature; alternatively, if you turn off the feature flag, all users will see the control (feature disabled). In either case (roll out or turn off), Statsig performs no further analysis. -When you add user IDs in the **Pass** or **Fail** lists of your feature gate, these users will see the appropriate treatment but will not be included in the analysis. +When you add user IDs in the **Pass** or **Fail** lists of your feature flag, these users will see the appropriate treatment but will not be included in the analysis. \ No newline at end of file diff --git a/feature-flags/overview.mdx b/feature-flags/overview.mdx index 289a53c0d..6a187fcbc 100644 --- a/feature-flags/overview.mdx +++ b/feature-flags/overview.mdx @@ -13,7 +13,7 @@ Devs often use them to turn on certain features for a small percentage of the to ## When to use -#### Use when you need to... +### Use when you need to... - Schedule gradual feature rollouts to safely deploy new code - Set up dev staging environments before code hits production, like dogfooding - Have a just-in-case "kill switch" that lets you immediately turn off a particular code branch for users in production diff --git a/feature-flags/safeguards-overview.mdx b/feature-flags/safeguards-overview.mdx index 659894ff5..23dc5d252 100644 --- a/feature-flags/safeguards-overview.mdx +++ b/feature-flags/safeguards-overview.mdx @@ -25,7 +25,7 @@ You must create at least one Rollout Alert or Topline Alert before configuring S ## Two types of Safeguards There are two different types of alerts a Safeguard can use to take an action on your Feature Gate: -#### Rollout Alert +### Rollout Alert Definition: Monitor the regression of metric delta between users who pass and fail your Feature Gate Use-case: When you want to ensure that your Feature Gate is not causing any negative drift on the users getting the new flag variation. Only works on partially rolled out rules (pass rate between 0% and 100%) diff --git a/feature-flags/test-gate.mdx b/feature-flags/test-gate.mdx index b538d48a6..8d8c019d1 100644 --- a/feature-flags/test-gate.mdx +++ b/feature-flags/test-gate.mdx @@ -1,9 +1,9 @@ --- -title: "Test your Feature Gate" -description: "Learn how to validate your feature gate using built-in tools, test apps, and live diagnostics in the Statsig console" +title: "Test your Feature Flag" +description: "Learn how to validate your feature flag using built-in tools, test apps, and live diagnostics in the Statsig console" --- -There are three ways to test your feature gate and to validate that it's working as expected with the rules you have created: +There are three ways to test your feature flag and to validate that it's working as expected with the rules you have created: 1. Using the built-in **Test Gate** tool in the Statsig console 2. Using the prototype Javascript **Test App** available in the Statsig console @@ -11,18 +11,18 @@ There are three ways to test your feature gate and to validate that it's working ## Option 1: Use the Test Gate tool -To validate your feature gate using the built-in Test Gate tool: +To validate your feature flag using the built-in Test Gate tool: - Log into the Statsig console at https://console.statsig.com -- On the left-hand navigation panel, select **Feature Gates** -- Select the feature gate that you want to validate -- At the bottom of the page, the **Test Gate** window that lists all properties available in the rules you have created as shown below. +- On the left-hand navigation panel, select **Feature Flags** +- Select the feature flag that you want to validate +- At the bottom of the page, the **Test Gate** window that lists all properties available in the rules you have created as shown in the following example. Test Gate interface showing property fields -- Click in the window and edit the value of the Email property to include the users that you want to target. For example, type jdoe@example.com as shown below. When email domain matches "@example.com", the feature gate check succeeds and the window shows a PASS. Otherwise, it fails and the window shows a FAIL. +- Click in the window and edit the value of the Email property to include the users that you want to target. For example, type jdoe@example.com as shown in the following example. When email domain matches "@example.com", the feature flag check succeeds and the window shows a PASS. Otherwise, it fails and the window shows a FAIL. Test Gate showing PASS result for email validation @@ -30,12 +30,12 @@ To validate your feature gate using the built-in Test Gate tool: ## Option 2: Use the Statsig Test App -To validate your feature gate using the Test App: +To validate your feature flag using the Test App: - Log into the Statsig console at https://console.statsig.com -- On the left-hand navigation panel, select **Feature Gates** -- Select the feature gate that you want to validate -- At the bottom of the page, click on **Check Gate in Test App** at the top right of the Test Gate window as shown below by the red arrow; this will open a new browser window with a prototype Javascript client that initializes and calls the Statsig `checkGate` API. +- On the left-hand navigation panel, select **Feature Flags** +- Select the feature flag that you want to validate +- At the bottom of the page, click on **Check Gate in Test App** at the top right of the Test Gate window as shown in the following example by the red arrow; this will open a new browser window with a prototype Javascript client that initializes and calls the Statsig `checkGate` API. Check Gate in Test App button location @@ -43,19 +43,19 @@ To validate your feature gate using the Test App: ## Option 3: Use the Diagnostics tab -To validate your feature gate using a live log stream: +To validate your feature flag using a live log stream: - Log into the Statsig console at https://console.statsig.com -- On the left-hand navigation panel, select **Feature Gates** -- Select the feature gate that you want to validate +- On the left-hand navigation panel, select **Feature Flags** +- Select the feature flag that you want to validate - Click on the **Diagnostics** tab (next to the Setup tab) -- Scroll down to the **Exposure Stream** panel, where you will see a live stream of gate check events as they happen as shown below +- Scroll down to the **Exposure Stream** panel, where you will see a live stream of gate check events as they happen as shown in the following example Exposure Stream panel showing live gate check events -- In the **Event Count by Group panel** as shown below, you can also validate that your application is recording events as expected for users who are exposed to the new feature (or not). Specifically, if you've started to record a new event type to test the impact of a new feature, you can also validate that these events are starting to show as more users are exposed to the new feature. +- In the **Event Count by Group panel** as shown in the following example, you can also validate that your application is recording events as expected for users who are exposed to the new feature (or not). Specifically, if you've started to record a new event type to test the impact of a new feature, you can also validate that these events are starting to show as more users are exposed to the new feature. Event Count by Group panel showing feature exposure metrics diff --git a/guides/abn-tests.mdx b/guides/abn-tests.mdx index e320b0f6d..3372771a2 100644 --- a/guides/abn-tests.mdx +++ b/guides/abn-tests.mdx @@ -1,5 +1,6 @@ --- title: "Run your first A/B test" +description: In this guide, you will create and implement your first experiment in Statsig from end to end. There are many types of experiments you can set up in S --- In this guide, you will create and implement your first experiment in Statsig from end to end. There are many types of experiments you can set up in Statsig, but this guide will walk through the most common one: an A/B test. diff --git a/guides/cdn-edge-testing.mdx b/guides/cdn-edge-testing.mdx index 2914225c7..186e23789 100644 --- a/guides/cdn-edge-testing.mdx +++ b/guides/cdn-edge-testing.mdx @@ -5,10 +5,11 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: Most users with heavy web traffic use a CDN to serve resources from cache, in order to minimize hit to their web servers. This has historically ma --- ## Background -Most customers with heavy web traffic use a CDN to serve resources from cache, in order to minimize hit to their web servers. This has historically made testing challenging because you don’t have the luxury of calling the SDK for all requests — but now with the emergence of Edge compute (offered by *most providers*), customers can now run code at their CDN edge, allowing them to assign users to tests and determine which resources to serve in a convenient and performant way. +Most users with heavy web traffic use a CDN to serve resources from cache, in order to minimize hit to their web servers. This has historically made testing challenging because you don’t have the luxury of calling the SDK for all requests — but now with the emergence of Edge compute (offered by *most providers*), users can now run code at their CDN edge, allowing them to assign users to tests and determine which resources to serve in a convenient and performant way. This pattern is optimal for testing with cached content without sacrificing cache-hit ratio. For scenarios where running the Statsig SDK at the edge is not possible, the sdk must be implemented on the origin server, or you should consider using a client SDK.
@@ -24,7 +25,7 @@ This pattern is optimal for testing with cached content without sacrificing cach - Use a provider-specific Data Adapter — This will allow the Statsig SDK to initialize using the configuration stored at the edge near your function. Links for each provider provided below. - Use [statsig-node-lite](https://www.npmjs.com/package/statsig-node-lite) — This is a slimmed down version of the Node SDK, which includes only the essentials and dramatically improves initialization performance for cold-starts requests. - Persist a uuid — As always, you'll need some sort of user identifier that can be used to consistently assign a user to your test buckets. This can typically be solved by generating a uuid in your function and setting it to a cookie. -- Persist assignments in a cookie — Some customers will set assignments to a cookie for the purpose of a performance optimization, allowing your code to skip sdk calls if the user is already assigned to a test. This is best defined as a session-cookie (a cookie that expires when user closes their browser). +- Persist assignments in a cookie — Some users will set assignments to a cookie for the purpose of a performance optimization, allowing your code to skip sdk calls if the user is already assigned to a test. This is best defined as a session-cookie (a cookie that expires when user closes their browser). - Persist client instance — [This pattern](/guides/serverless#usage) allows the Statsig client instance to persists across requests when the edge function remains warm and will improve performance. - Use [Target Apps](/sdk-keys/target-apps) — Target apps will allow you to sync a specific subset of experiments/gates to your edge function, reducing the footprint of your project config and improving performance. Target Apps are compatible with all of the "config sync" integrations. - Consider [peaking at experiment assignments](https://github.com/statsig-io/node-js-lite-server-sdk/blob/d88aab788a16d0d38adf851e67c21bb846f12d24/src/index.ts#L191) and avoid logging exposures. You should only log exposure events once a user has been exposed to the treatment, otherwise your test results may become polluted with users that didn't see the treatment. @@ -43,7 +44,7 @@ We offer both an [integration with Cloudflare KV](/integrations/cloudflare), and ## Fastly Implementation -Fastly Compute platform supports Functions at the Edge, affording customers the ability to handle assignment at the edge. +Fastly Compute platform supports Functions at the Edge, affording users the ability to handle assignment at the edge. ### KV (fully supported) [Fastly's KV storage solution](https://www.fastly.com/blog/be-among-the-first-to-try-the-greatest-kv-store-ever-made) is touted as being highly-performant and is now their recommended solution over their legacy ConfigStore documented below. diff --git a/guides/check-gate.mdx b/guides/check-gate.mdx index ff0c0e33b..9d40d8990 100644 --- a/guides/check-gate.mdx +++ b/guides/check-gate.mdx @@ -1,5 +1,6 @@ --- title: "Create your first feature flag" +description: This tutorial walks you through how to check your first Feature Gate in Statsig from end to end. Feature Gates, also known as feature flags, are a way --- This tutorial walks you through how to check your first Feature Gate in Statsig from end to end. Feature Gates, also known as feature flags, are a way to safely control the rollout of new features to your users without deploying additional code. Common examples for using Feature Gates include shipping new UI elements, API endpoints, or product features. @@ -73,7 +74,7 @@ Statsig offers over 20 client and server-side SDKs. Check out the full list of [ ```bash npm npm install @statsig/react -``` +```text @@ -83,7 +84,7 @@ npm install @statsig/react ```tsx Import SDK import { StatsigProvider } from "@statsig/react-bindings"; -``` +```text @@ -103,7 +104,7 @@ function App() { } export default App; -``` +```sql diff --git a/guides/cms-integrations.mdx b/guides/cms-integrations.mdx index 584658c90..89ad47bd7 100644 --- a/guides/cms-integrations.mdx +++ b/guides/cms-integrations.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: One fairly common question we get is around how to use Statsig with an existing CMS. While we also offer a no-code solution - [Sidecar](/guides/sideca --- ### Using Statsig with a CMS diff --git a/guides/config-history.mdx b/guides/config-history.mdx index 321066615..c36eab9bc 100644 --- a/guides/config-history.mdx +++ b/guides/config-history.mdx @@ -5,6 +5,7 @@ keywords: - owner:shubham last_update: date: 2025-09-18 +description: History for entities like Feature Gates, Experiments, Dynamic Configs, and Segments can be accessed by clicking the "History" button on the top right --- History for entities like Feature Gates, Experiments, Dynamic Configs, and Segments can be accessed by clicking the "History" button on the top right of the page. diff --git a/guides/contentful.mdx b/guides/contentful.mdx index 95beac6d6..a43614890 100644 --- a/guides/contentful.mdx +++ b/guides/contentful.mdx @@ -1,5 +1,6 @@ --- title: Guide to Contentful +description: The Statsig Contentful integration lets you create A/B/n tests and test different content blocks against each other directly from within Contentful. Y --- The Statsig Contentful integration lets you create A/B/n tests and test different content blocks against each other directly from within Contentful. You can assess impact using business metrics on Statsig Cloud or Warehouse Native. Marketers can optimize content, obtain insights, and iterate continuously right from within Contentful. diff --git a/guides/customer-io-email-abtest.mdx b/guides/customer-io-email-abtest.mdx index 97591c67a..3cd84487d 100644 --- a/guides/customer-io-email-abtest.mdx +++ b/guides/customer-io-email-abtest.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: Email campaigns are a critical tool for any Marketing team. Finding the best performing Email template is a perfect use-case for an A/B test. Statsi --- Email campaigns are a critical tool for any Marketing team. Finding the best performing Email template is a perfect use-case for an A/B test. Statsig allows you to run simple but powerful A/B tests on different parts of your email content. Since Statsig can integrate seamlessly with product analytics, you can run email experiments and understand deeper business level impact on product metrics easily. diff --git a/guides/email-campaign-test.mdx b/guides/email-campaign-test.mdx index c3275fd62..8db58dd16 100644 --- a/guides/email-campaign-test.mdx +++ b/guides/email-campaign-test.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: A/B Testing an email campaign and getting experiment results on downstream product metrics (in addition to top level email interaction metrics), is a --- A/B Testing an email campaign and getting experiment results on downstream product metrics (in addition to top level email interaction metrics), is a common use case for Statsig customers. Email marketing tools often have native A/B testing capabilities, but are limited to measuring email open rates or link click rates. diff --git a/guides/experiment-on-custom-id-types.mdx b/guides/experiment-on-custom-id-types.mdx index 41ab205c1..c68971309 100644 --- a/guides/experiment-on-custom-id-types.mdx +++ b/guides/experiment-on-custom-id-types.mdx @@ -1,6 +1,7 @@ --- title: Experiment on custom Unit ID types sidebarTitle: Using Custom ID Types +description: In certain cases, you may want to randomize experiment bucketing using a custom Unit ID instead of the default `userID` or Statsig-generated Stable ID --- In certain cases, you may want to randomize experiment bucketing using a custom Unit ID instead of the default `userID` or Statsig-generated Stable ID. For instance, if you're running a task management tool for companies and want to experiment on company-wide behaviors, you might use `companyID` as the Unit ID. This ensures all users from the same company get the same experience, allowing you to measure overall productivity impacts at a company level. diff --git a/guides/first-device-level-experiment.mdx b/guides/first-device-level-experiment.mdx index 250a9cd99..27a0d36c6 100644 --- a/guides/first-device-level-experiment.mdx +++ b/guides/first-device-level-experiment.mdx @@ -1,6 +1,7 @@ --- title: Build your first Device-level Experiment sidebarTitle: Device-Level Experiments +description: When you cannot identify a user via their user ID, device-level experiments allow you to randomize experiments based on a consistent identifier for th --- When you cannot identify a user via their user ID, device-level experiments allow you to randomize experiments based on a consistent identifier for the user's device. While Statsig can automatically generate a stable ID, it's recommended to use your own cookie or logged-out ID when possible. @@ -72,7 +73,7 @@ const client = new StatsigClient(sdkKey, user, { }); await client.initializeAsync(); -``` +```sql > **Tip**: If your app collects other relevant attributes (e.g., device type, region), pass them in the `user` object to improve experiment precision. diff --git a/guides/first-dynamic-config.mdx b/guides/first-dynamic-config.mdx index 70a71876a..ef0015dce 100644 --- a/guides/first-dynamic-config.mdx +++ b/guides/first-dynamic-config.mdx @@ -55,7 +55,7 @@ This is the rough schema we want to return: fontSize, isCloseable, } -``` +```text Let's edit the return value for the windows rule, and fill it in. Under "Return" on the right hand side, hit "edit". Then paste the following: @@ -67,7 +67,7 @@ Let's edit the return value for the windows rule, and fill it in. Under "Return" fontSize: 14, isCloseable: false, } -``` +```sql Return Value @@ -90,7 +90,7 @@ Then, lets update the return value: color: 'white', fontSize: 16, } -``` +```python Once again, don't forget to click "Save" to apply these new rules to your config. Your Dynamic Config should now look something like this: @@ -112,7 +112,7 @@ After adding the SDK to the webpage via the [jsdelivr cdn](https://www.jsdelivr. ```js const client = new window.Statsig.StatsigClient("", {}); -``` +```text Now, let's fetch our config and construct the banner: @@ -133,7 +133,7 @@ banner.style.color = color; banner.style.fontSize = fontSize + "px"; banner.style.backgroundColor = backgroundColor; banner.appendChild(bannerText); -``` +```text Note that this js relies on the html page having the homepageBanner div: diff --git a/guides/first-feature.mdx b/guides/first-feature.mdx index a74cae666..1fb1f29ea 100644 --- a/guides/first-feature.mdx +++ b/guides/first-feature.mdx @@ -52,7 +52,7 @@ Once your Statsig account is ready, follow the steps below to create and test-dr const script = document.createElement('script'); script.src = 'https://cdn.jsdelivr.net/npm/@statsig/js-client@3/build/statsig-js-client+session-replay+web-analytics.min.js'; document.head.appendChild(script); - ``` + ```python Injecting the SDK via DevTools @@ -65,13 +65,13 @@ Once your Statsig account is ready, follow the steps below to create and test-dr ```js const client = new window.Statsig.StatsigClient('YOUR_SDK_KEY', {}); await client.initializeAsync(); - ``` + ```text Then call: ```js client.checkGate('mobile_registration'); - ``` + ```text You should see false because the current session is not mobile and doesn’t use the employee email domain. @@ -88,7 +88,7 @@ Once your Statsig account is ready, follow the steps below to create and test-dr ```js await client.updateUserAsync({}); client.checkGate('mobile_registration'); - ``` + ```sql The gate should now return true for the mobile profile. @@ -103,7 +103,7 @@ Once your Statsig account is ready, follow the steps below to create and test-dr ```js await client.updateUserAsync({ email: 'teammate@statsig.com' }); client.checkGate('mobile_registration'); - ``` + ```text The gate passes again thanks to the email rule. @@ -115,7 +115,7 @@ Once your Statsig account is ready, follow the steps below to create and test-dr ```js client.flush(); - ``` + ```text Open the gate’s Diagnostics tab to confirm each exposure, including the failing desktop check, mobile pass, and employee pass. diff --git a/guides/first-segment.mdx b/guides/first-segment.mdx index caed5f139..98654bf41 100644 --- a/guides/first-segment.mdx +++ b/guides/first-segment.mdx @@ -6,6 +6,7 @@ keywords: - owner:shubham last_update: date: 2025-09-18 +description: User Segments allow you to predefine targeting groups for re-use in Feature Gates and Dynamic Configs. Think of it as a reusable macro for a set of us --- User Segments allow you to predefine targeting groups for re-use in Feature Gates and Dynamic Configs. Think of it as a reusable macro for a set of users. @@ -68,7 +69,7 @@ Again, remember to "Save Changes" Fill in the test gate console with any random user input. You won't be able to get this gate to pass until you pass in a user with an email ending in `statsig.com` or `statsig.io` AND a `development` or `staging` environment like this. If you remove either of those, you should see it fail. -``` +```json { email: 'tore@statsig.com', statsigEnvironment: { diff --git a/guides/fomo.mdx b/guides/fomo.mdx index 00671c31f..ea64e9df7 100644 --- a/guides/fomo.mdx +++ b/guides/fomo.mdx @@ -1,14 +1,15 @@ --- sidebar_label: Commonly Used Features -title: Commonly Used Features on Statsig for Enterprise Customers +title: Commonly Used Features on Statsig for Enterprise Users displayed_sidebar: cloud keywords: - owner:vm last_update: date: 2025-09-18 +description: If you've not enabled SSO on Statsig, your ex-employees will be able to make changes in your Statsig project, unless you remember to deactivate them i --- -# Commonly Used Features on Statsig for Enterprise Customers +# Commonly Used Features on Statsig for Enterprise Users ## Single Sign On (SSO) ### ⚠️ Security risk if not enabled @@ -33,7 +34,7 @@ The Core tag is meant to be used for company critical metrics. These are default Metric properties let you break out an event such as add-to-cart into product categories such as sports, toys, appliances, electronics. To do this, you would simply log add-to-cart events and provide the product category in the event's value field. [See more](/metrics/metric-dimensions) ![Metric properties configuration interface](/images/guides/fomo/f1478766-3471-45c8-970a-4a3335675c82.png) -User properties let you slice or filter metrics based on a user property. This is often something like Country, Device Type or a property like Free vs Paid. On Statsig Cloud, these are frozen when a user is first exposed to a feature gate or experiment - in case your experiment ends up changing these properties (e.g. convert a Free user to Paid). If you're using Statsig Warehouse Native, these properties can be set anytime before analysis. +User properties let you slice or filter metrics based on a user property. This is often something like Country, Device Type or a property like Free vs Paid. On Statsig Cloud, these are frozen when a user is first exposed to a feature flag or experiment - in case your experiment ends up changing these properties (e.g. convert a Free user to Paid). If you're using Statsig Warehouse Native, these properties can be set anytime before analysis. ![User properties configuration interface](/images/guides/fomo/975df15b-8ac8-4396-aa3b-5877843ed5d5.png) ## Slack Notifs diff --git a/guides/framer-analytics.mdx b/guides/framer-analytics.mdx index dcdbd450b..ef866d4ad 100644 --- a/guides/framer-analytics.mdx +++ b/guides/framer-analytics.mdx @@ -5,6 +5,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: Framer allows you to create interactive prototypes and websites with ease. By integrating Statsig, you can capture user behavior, and log custom event --- ## Introduction @@ -20,7 +21,7 @@ To start tracking user interactions with Statsig in your Framer project, follow 1. Copy your web snippet from Statsig, replacing "YOUR_CLIENT_KEY" with a Client API Key from your Statsig project, which you can find at [console.statsig.com/api_keys](https://console.statsig.com/api_keys). ```html -``` +```sql Go to your Framer project settings by clicking the gear icon in the top right corner - note you'll have to be on the "mini" site plan or above above to enable custom code. In the General tab, scroll down to the Custom Code section and paste your Statsig snippet into the end of `` tag section. Save your changes and publish your site, then Statsig will be ready to track basic user interactions. diff --git a/guides/logging-events.mdx b/guides/logging-events.mdx index acd147866..18e6b751d 100644 --- a/guides/logging-events.mdx +++ b/guides/logging-events.mdx @@ -1,5 +1,6 @@ --- title: "Log your first custom event" +description: The first step towards building better products is tracking events. As the saying goes, "If you can't measure something, you can't understand it. If y --- The first step towards building better products is tracking events. As the saying goes, "If you can't measure something, you can't understand it. If you can't understand it, you can't control it. If you can't control it, you can't improve it." @@ -34,7 +35,7 @@ The [`StatsigUser`](/concepts/user) is a set of properties that describe the use "custom": {}, "privateAttributes": [] } -``` +```text @@ -60,7 +61,7 @@ In Statsig, groups are represented by the `customIDs` field. This is a dictionar "projectID": "abc" } } -``` +```sql @@ -121,7 +122,7 @@ statsig.logEvent( optional_event_value, optional_event_metadata ); -``` +```sql @@ -144,7 +145,7 @@ statsig.logEvent( user_segment: 'first_time_purchaser', } ); -``` +```text ```csharp C# Example StatsigClient.LogEvent( diff --git a/guides/migrate-from-amplitude.mdx b/guides/migrate-from-amplitude.mdx index 4fbbcc727..d79546915 100644 --- a/guides/migrate-from-amplitude.mdx +++ b/guides/migrate-from-amplitude.mdx @@ -1,6 +1,7 @@ --- title: Migrate your analytics data from Amplitude sidebarTitle: Migrate from Amplitude +description: Migrating from Amplitude to Statsig is a strategic choice. Statsig is an all-in-one platform that offers analytics, experimentation, and feature flagg --- Migrating from Amplitude to Statsig is a strategic choice. Statsig is an all-in-one platform that offers analytics, experimentation, and feature flagging under one umbrella. Using all these products in a single tool is much more powerful. @@ -28,7 +29,7 @@ Use Amplitude's Export API to pull gzipped JSON ```bash curl --location --request GET 'https://amplitude.com/api/2/export?start=&end=' \ -u '{api_key}:{secret_key}' -``` +```javascript **4. UI Download (CSV/JSON)** @@ -65,7 +66,7 @@ Amplitude and Statsig store events in slightly different formats. To make your A "plan": "premium" } } -``` +```text **After transform** @@ -84,7 +85,7 @@ Amplitude and Statsig store events in slightly different formats. To make your A "currency": "USD" } } -``` +```javascript ## Step 3. Import into Statsig diff --git a/guides/migrate-from-launchdarkly.mdx b/guides/migrate-from-launchdarkly.mdx index 0617a4bb3..42bfce8f4 100644 --- a/guides/migrate-from-launchdarkly.mdx +++ b/guides/migrate-from-launchdarkly.mdx @@ -1,5 +1,6 @@ --- title: LaunchDarkly Migration Guide +description: Migrating from LaunchDarkly to Statsig is a strategic move. It can lead to efficient feature flag management and a stronger experimentation culture. B --- ## Overview @@ -19,17 +20,17 @@ It is important to understand a few fundamental differences in how LaunchDarkly **Environment**: LaunchDarkly treats environments as top level concept where flags and segments must be duplicated and managed separately across environments. In Statsig, we have a centralized model where flags/configs handle environment-specific logic in their targeting rules. -**Flag types**: LaunchDarkly uses a mix of boolean, multivariate, and JSON flags. Statsig distinguishes between Feature Gates (boolean) and Dynamic Configs (typed multivariate configs with JSON values). +**Flag types**: LaunchDarkly uses a mix of boolean, multivariate, and JSON flags. Statsig distinguishes between Feature Flags (boolean) and Dynamic Configs (typed multivariate configs with JSON values). **Targeting**: LaunchDarkly relies on Contexts to evaluate flags. Statsig evaluates based on what we call a StatsigUser object. -#### Side by side comparison +### Side by side comparison | LaunchDarkly concept | Can we migrate? | Statsig notes | |---------------------|-----------------|---------------| | Project | ✅ Yes | Convert to Project | | Environment | ✅ Yes | Convert to Environment (mark critical as production in Statsig) | -| Boolean Flags | ✅ Yes | Convert to Feature Gates | +| Boolean Flags | ✅ Yes | Convert to Feature Flags | | String, Number, and JSON Flags | ✅ Yes | Convert to Dynamic Configs | | Segments | ✅ Yes | Convert to Segments (Big ID list segments won't be imported) | | Targeting Rules | ✅ Yes | Convert to Rules | @@ -66,7 +67,7 @@ LaunchDarkly supports multi-kind, structured user contexts. Statsig requires a u "device": "iPad" } } -``` +```text **Example 2: LD Multi context kind to Statsig User object conversion** @@ -99,7 +100,7 @@ LaunchDarkly supports multi-kind, structured user contexts. Statsig requires a u "org_tier": "enterprise" } } -``` +```javascript In Statsig, email is a top-level reserved field for the user object, so it should be placed directly as email (not user_email). Statsig expects fields like userID, email, ip, and userAgent at the top level for user targeting and analytics. @@ -117,7 +118,7 @@ Below is a decision framework you can use to decide which flags to import into S ## Importing flags into Statsig -To import feature flags from LaunchDarkly to Statsig, you can use our official import tool which is designed for this specific purpose. The import tool fetches flags from LaunchDarkly, translates them into Statsig's format, and creates corresponding feature gates in Statsig. Additionally, it tracks the migration status and details in a CSV file. +To import feature flags from LaunchDarkly to Statsig, you can use our official import tool which is designed for this specific purpose. The import tool fetches flags from LaunchDarkly, translates them into Statsig's format, and creates corresponding feature flags in Statsig. Additionally, it tracks the migration status and details in a CSV file. There are two ways to invoke this tool: @@ -137,7 +138,7 @@ The wrapper approach provides several key benefits: - **Easy rollback**: Quickly revert to LaunchDarkly if issues arise - **Consistent interface**: Maintain existing application code structure -#### Implementation Guide +### Implementation Guide **1. Before migration: LaunchDarkly Evaluation** @@ -155,7 +156,7 @@ ldClient.on('ready', () => { const isNewHomepageEnabled = ldClient.variation('new_homepage_flag', false); const buttonColor = ldClient.variation('button_config', 'gray'); }); -``` +```text **2. Create the Migration Wrapper** @@ -206,7 +207,7 @@ export const wrapperFlags = { }; } }; -``` +```text **3. Refactor application code to use the Wrapper** diff --git a/guides/migrate-from-mixpanel.mdx b/guides/migrate-from-mixpanel.mdx index ed8cd028a..a05ec6383 100644 --- a/guides/migrate-from-mixpanel.mdx +++ b/guides/migrate-from-mixpanel.mdx @@ -1,6 +1,7 @@ --- sidebarTitle: Migrate from Mixpanel title: Migrate your analytics data from Mixpanel +description: Switching from Mixpanel to Statsig is a smart move for teams seeking a unified platform that combines analytics, experimentation, and feature flagging --- # Mixpanel Migration Guide @@ -28,7 +29,7 @@ Use Mixpanel's [Raw Event Export API](https://developer.mixpanel.com/reference/r ```bash curl --location --request GET 'https://data.mixpanel.com/api/2.0/export?from_date=2023-01-01&to_date=2023-01-01' \ -u '{project_id}:{service_account_secret}' -``` +```javascript **3. Data Pipelines (Bulk Export)** @@ -62,7 +63,7 @@ Mixpanel and Statsig store events in slightly different formats. Map your Mixpan "URL": "website.com/signup" } } -``` +```text **After transform** @@ -80,7 +81,7 @@ Mixpanel and Statsig store events in slightly different formats. Map your Mixpan "URL": "website.com/signup" } } -``` +```javascript ## Step 3. Import into Statsig diff --git a/guides/migration-overview.mdx b/guides/migration-overview.mdx index 983427917..345823d3a 100644 --- a/guides/migration-overview.mdx +++ b/guides/migration-overview.mdx @@ -1,5 +1,6 @@ --- title: Migration Overview +description: Statsig combines feature flags, experimentation, and product analytics in one platform. Migrating ensures your data, flag, experiments, and decision w --- Statsig combines feature flags, experimentation, and product analytics in one platform. Migrating ensures your data, flag, experiments, and decision workflows live in a single source of truth. diff --git a/guides/open-source-script.mdx b/guides/open-source-script.mdx index 61c5576a0..0eba4a54e 100644 --- a/guides/open-source-script.mdx +++ b/guides/open-source-script.mdx @@ -1,5 +1,6 @@ --- title: Open Source Script +description: [This package](https://github.com/statsig-io/migrations) is designed to help automate migration of feature flags from LaunchDarkly to Statsig. It fetc --- [This package](https://github.com/statsig-io/migrations) is designed to help automate migration of feature flags from LaunchDarkly to Statsig. It fetches feature flags from LaunchDarkly, translates them into Statsig's format, and creates corresponding feature gates in Statsig. @@ -20,7 +21,7 @@ To run the script, you need Node.js and npm installed on your system. You can ex ```bash npx @statsig/migrations --from launchdarkly --launchdarkly-project-id default -``` +```text ## Configuration diff --git a/guides/private-attributes.mdx b/guides/private-attributes.mdx index 455534b41..7d3749548 100644 --- a/guides/private-attributes.mdx +++ b/guides/private-attributes.mdx @@ -5,6 +5,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: We take privacy, and the privacy of your user data, very seriously. If you have legal requirements that prevent you from sending PII to third parties --- ## Evaluating feature gates, dynamic configs, segments, and experiments without logging user data to Statsig diff --git a/guides/sendgrid-email-abtest.mdx b/guides/sendgrid-email-abtest.mdx index 647a2c422..33fa642bf 100644 --- a/guides/sendgrid-email-abtest.mdx +++ b/guides/sendgrid-email-abtest.mdx @@ -5,6 +5,7 @@ keywords: - owner:vm last_update: date: 2025-09-18 +description: Email campaigns are a critical tool for any Marketing team. Finding the best performing Email template is a perfect use-case for an A/B test. Statsi --- Email campaigns are a critical tool for any Marketing team. Finding the best performing Email template is a perfect use-case for an A/B test. Statsig allows you to run simple but powerful A/B tests on different parts of your email content. Since Statsig can integrate seamlessly with product analytics, you can run email experiments and understand deeper business level impact easily. @@ -51,7 +52,7 @@ In your SendGrid console, go to **Settings** -> **Mail Settings** -> **Eve In the HTTP Post URL, put in: -```https://sendgrid-webhook.statsig.workers.dev/?apikey=[YOUR STATSIG API KEY]``` +```https://sendgrid-webhook.statsig.workers.dev/?apikey=[YOUR STATSIG API KEY]```ruby You can find your API Key by navigating to Statsig Project Settings -> API Keys, and copying the 'Client API Key'. @@ -60,7 +61,7 @@ You can find your API Key by navigating to Statsig Project Settings -> API Ke Statsig API key location -It should look like this: ```client-abcd123efg...``` +It should look like this: ```client-abcd123efg...```ruby Make sure all the **Deliverability Data** and **Engagement Data** checkboxes are checked. Next, Enable the **Event Webhook Status** and hit Save. @@ -124,7 +125,7 @@ Statsig also supports A/B testing when using API or Automation to send marketing "statsig_variant_name": "[control or test]" } } -``` +```text So in our example above, you will set up the Control variant like this: @@ -135,7 +136,7 @@ So in our example above, you will set up the Control variant like this: "statsig_variant_name": "control" } } -``` +```text And the Test variant would look like this: diff --git a/guides/serverless.mdx b/guides/serverless.mdx index eaf9abd43..306c8990c 100644 --- a/guides/serverless.mdx +++ b/guides/serverless.mdx @@ -5,6 +5,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: In this guide, we will walk you through how to leverage Statsig’s platform in serverless environments. The examples in this guide use the `statsig-no --- In this guide, we will walk you through how to leverage Statsig’s platform in serverless environments. The examples in this guide use the `statsig-node` SDK in a Google Cloud Function. @@ -57,7 +58,7 @@ exports.statsig = async (req, res) => { alwaysOnGateValue: alwaysOnGateValue, }); }; -``` +```javascript ### Alternatives @@ -91,7 +92,7 @@ globalThis.fetch = (input, init) => { } (globalThis as any).EdgeRuntime = "fastly"; -``` +```javascript Next, in the main function entrypoint, simply import the fetch override module prior to importing the Statsig SDK: diff --git a/guides/setting-up-reviews.mdx b/guides/setting-up-reviews.mdx index 120159b22..e7ce5d061 100644 --- a/guides/setting-up-reviews.mdx +++ b/guides/setting-up-reviews.mdx @@ -1,7 +1,8 @@ --- title: Setting up Reviews for Team Workflows +description: You can enable reviews for all Statsig resources such as feature flags, dynamic configs, segments, and experiments that you'll likely deploy to a prod --- -You can enable reviews for all Statsig resources such as feature gates, dynamic configs, segments, and experiments that you'll likely deploy to a production environment. +You can enable reviews for all Statsig resources such as feature flags, dynamic configs, segments, and experiments that you'll likely deploy to a production environment. ### Turning on Change Reviews for a Project As a Project Admin, you can configure your project to require reviews for any changes. To enable reviews for your project, navigate to the **Project Settings** page, switch to the Reviews tab and toggle this on. @@ -16,11 +17,11 @@ As a Project Admin, you can configure your project to require reviews for any ch User role permissions configuration screen -- Now when you make any configuration changes, say to a feature gate or experiment, you'll be asked to **Submit for Review**; you can add reviewers when you submit the change for review +- Now when you make any configuration changes, say to a feature flag or experiment, you'll be asked to **Submit for Review**; you can add reviewers when you submit the change for review Screen Shot 2022-05-04 at 5 08 21 AM -- Reviewers will now see a notification on the Statsig console as shown below. When they click on **View Proposed Changes**, they will see a diff of the *current version* in production and *new version*. Reviewers can now **Approve** or **Reject** the submitted changes. +- Reviewers will now see a notification on the Statsig console as shown in the following example. When they click on **View Proposed Changes**, they will see a diff of the *current version* in production and *new version*. Reviewers can now **Approve** or **Reject** the submitted changes. proposed changes @@ -44,9 +45,9 @@ You can now use these predefined **Teams** when you submit any changes for revie ### Enforcing Team Reviews -You can restrict who can make changes to your Project by (a) turning on **Reviews Required** for your Project and (b) adding designated **Teams** or **Reviewers** when you create the Feature Gate or Experiment. +You can restrict who can make changes to your Project by (a) turning on **Reviews Required** for your Project and (b) adding designated **Teams** or **Reviewers** when you create the Feature Flag or Experiment. -For (a), see section **Turning on Change Reviews for a Project** to turn on project-wide reviews. For (b), as an owner of a Feature Gate or Experiment, you can add designated **Teams** or **Reviewers** at any time as shown below. This ensures that only these designated groups or members can review and approve any subsequent changes. When another member now tries to edit these designated review groups/reviewers, this will require approval from currently designated reviewers. +For (a), see section **Turning on Change Reviews for a Project** to turn on project-wide reviews. For (b), as an owner of a Feature Flag or Experiment, you can add designated **Teams** or **Reviewers** at any time as shown in the following example. This ensures that only these designated groups or members can review and approve any subsequent changes. When another member now tries to edit these designated review groups/reviewers, this will require approval from currently designated reviewers. image @@ -80,10 +81,10 @@ To enable the Pre-commit Webhook experience: Screen Shot 2025-09-11 at 1 47 08 PM -Now, when a change is made in the Statsig Console, Statsig hits the customer’s configured webhook with the proposed changes. The change in Statsig will be pending until the customer approves the review via Console API (after their internal checks are complete). Statsig exposes an option for Project Admins (only) to bypass this process and commit the changes directly. +Now, when a change is made in the Statsig Console, Statsig hits the user’s configured webhook with the proposed changes. The change in Statsig will be pending until the user approves the review via Console API (after their internal checks are complete). Statsig exposes an option for Project Admins (only) to bypass this process and commit the changes directly. Every payload will have these fields at a minimum: -``` +```go review_id (will need to pass this to the change_validation API to accept/reject a change) submitter (email address) committer (email address) diff --git a/guides/shopify-ab-test.mdx b/guides/shopify-ab-test.mdx index 53a81798d..3a896bd7b 100644 --- a/guides/shopify-ab-test.mdx +++ b/guides/shopify-ab-test.mdx @@ -1,10 +1,11 @@ --- -sidebarTitle: A/B Testing on Shopify -title: A/B Testing on Shopify +sidebarTitle: Experimenting on Shopify +title: Experimenting on Shopify keywords: - owner:brock last_update: date: 2025-09-18 +description: Shopify provides solutions for commerce businesses to build and manage all aspects of their online storefront, including product catalogue, inventory, --- ## Use cases & considerations @@ -13,7 +14,7 @@ Shopify provides solutions for commerce businesses to build and manage all aspec For experimenting with the more static aspects of the store experience (static landing pages, visual aspects), we recommend using [Statsig Sidecar](/guides/sidecar-experiments/introduction) to both build your test treatments and to assign users to experiments when they land on your site — all without writing any code. -Customers looking to experiment on the more dynamic aspects of their online store (ie; your product catalogue, search capabilities) should use [Shopify Headless Commerce](https://www.shopify.com/plus/solutions/headless-commerce) and integrate our [SDKs](/sdks/getting-started) to unlock full control for experimenting within business logic. +Users looking to experiment on the more dynamic aspects of their online store (ie; your product catalogue, search capabilities) should use [Shopify Headless Commerce](https://www.shopify.com/plus/solutions/headless-commerce) and integrate our [SDKs](/sdks/getting-started) to unlock full control for experimenting within business logic. ## Using Traditional Shopify + Sidecar for No-code testing @@ -21,7 +22,7 @@ The traditional Shopify service is a fully-managed platform for businesses that While Statsig does not have an integration in the Shopify App Store today, you can easily integrate Sidecar for running simple UX experiments on the storefront. The below steps will guide you through the process of setting up Sidecar within the traditional Shopify stack. -#### Install Sidecar chrome extension +### Install Sidecar chrome extension [Follow this guide](/guides/sidecar-experiments/setup) on installing the Sidecar Chrome extension. This simple, lightweight Chrome extension will allow non-technical users to build experiments and their treatments. You can easily indicate where the test should run based on URL, and then configure treatments such as content changes, style changes, image swaps, as well as injecting arbitrary JavaScript for more sophisticated use-cases where the visual editor tools cannot accommodate. @@ -37,7 +38,7 @@ This simple, lightweight Chrome extension will allow non-technical users to buil - [Locate your Sidecar script tag](/guides/sidecar-experiments/publishing-experiments#step-2-add-script-code) and copy the script tag to your clipboard - Navigate to the `theme.liquid` file in your Shopify theme editor -- Paste the Sidecar script tag toward the top of the page `` as shown below. +- Paste the Sidecar script tag toward the top of the page `` as shown in the following example. statsig product overview @@ -47,7 +48,7 @@ This simple, lightweight Chrome extension will allow non-technical users to buil ### Configure event tracking -Shopify's [Custom Pixel framework](https://help.shopify.com/en/manual/promoting-marketing/pixels/custom-pixels) is ideal for tracking customer events to Statsig. +Shopify's [Custom Pixel framework](https://help.shopify.com/en/manual/promoting-marketing/pixels/custom-pixels) is ideal for tracking user events to Statsig. The custom pixel framework offers a [wide set of events](https://shopify.dev/docs/api/web-pixels-api/standard-events) you can subscribe to, and namely, the ability to perform tracking during the checkout experience. Note that code deployed outside the scope of a custom pixel will not fire during checkout experience as documented [here](https://help.shopify.com/en/manual/promoting-marketing/pixels/overview#pixels-sandbox-environment). @@ -82,7 +83,7 @@ const statsigEvent = async (eventKey, eventValue = null, metadata = {}, userObje }); await fetch('https://events.statsigapi.net/v1/log_event', { method: 'POST', - headers: { 'Content-Type': 'application/json', 'statsig-api-key': 'client-STATSIG_CLIENT_KEY' }, + headers: { 'Content-Type': 'application/json', 'statsig-api key': 'client-STATSIG_CLIENT_KEY' }, body: JSON.stringify({ "events": [{"user": userObject, "eventName": eventKey, "metadata": metadata}] }) @@ -117,7 +118,7 @@ Using [Shopify Headless](https://shopify.dev/docs/storefronts/headless) gives yo Whether you're using Shopify's [Hydrogen app](https://shopify.dev/docs/storefronts/headless/hydrogen/fundamentals) and its frameworks or a [custom headless stack](https://shopify.dev/docs/storefronts/headless/bring-your-own-stack), you can integrate Statsig's SDK as needed in order to assign users to experiments. Integrating Statsig in this architecture will follow a similar pattern to our recommendation to [integrating with headless CMS platforms](/guides/cms-integrations). -#### Integrating data sources for experiment metrics +### Integrating data sources for experiment metrics Along with the measuring simple click stream and point-of-sale behavior as [outlined here](/guides/shopify-ab-test/#configure-event-tracking), commerce businesses performing deeper experimentation often want to integrate offline data systems and measure experiments using existing metrics that the broader business uses. -Commonly, the Data Warehouse is the source of truth for user purchase data and other categories of offline data. This affords customers the ability to define more [bespoke metrics](/statsig-warehouse-native/configuration/metrics#metric-types) using filtering, aggregations and incorporating other datasets in the warehouse for segmenting experiment results. +Commonly, the Data Warehouse is the source of truth for user purchase data and other categories of offline data. This affords users the ability to define more [bespoke metrics](/statsig-warehouse-native/configuration/metrics#metric-types) using filtering, aggregations and incorporating other datasets in the warehouse for segmenting experiment results. diff --git a/guides/sidecar-experiments/advanced-configurations-v3.mdx b/guides/sidecar-experiments/advanced-configurations-v3.mdx index 8423a888b..00135d13d 100644 --- a/guides/sidecar-experiments/advanced-configurations-v3.mdx +++ b/guides/sidecar-experiments/advanced-configurations-v3.mdx @@ -5,6 +5,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: This approach allows you to set User Identity and Attributes for Sidecar, enabling you to perform more advanced targeting and results segmentation. --- ## Advanced Targeting & Segmentation @@ -20,7 +21,7 @@ window.statsigUser = { isLoggedIn: false } } -``` +```text ## Accessing the Statsig js client For accessing the underlying Statsig js client instance, you can call `StatsigSidecar.getStatsigInstance()`. @@ -32,7 +33,7 @@ window.statsigOptions = { // example of disabling logging for loggingEnabled: 'disabled' } -``` +```text ## Managing Consent Prior to Sidecar script tag, configure these runtime options to disable browser storage and tracking: @@ -41,12 +42,12 @@ window.statsigOptions = { loggingEnabled: "disabled", disableStorage: true } -``` +```text Later on, after the user gives consent, re-enable storage and tracking: ```js __STATSIG__.instance().updateRuntimeOptions({loggingEnabled: "browser-only", disableStorage: false}); -``` +```text ## Persisting stableID across subdomains Statsig uses `localStorage` as the preferred mechanism for storing the user's stableID. Localstorage keys do not persist across any origin boundaries including across subdomains. For example, a user visiting `https://example.com`, `https://show.example.com` and `https://account.example.com` would be issued three distinct stableIDs. diff --git a/guides/sidecar-experiments/advanced-configurations.mdx b/guides/sidecar-experiments/advanced-configurations.mdx index 8e6850bb6..26b63004f 100644 --- a/guides/sidecar-experiments/advanced-configurations.mdx +++ b/guides/sidecar-experiments/advanced-configurations.mdx @@ -28,7 +28,7 @@ window.statsigUser = { isLoggedIn: false } } -``` +```text ## Accessing the Statsig js client For accessing the underlying Statsig js client instance, you can call `StatsigSidecar.getStatsigInstance()`. @@ -40,7 +40,7 @@ window.statsigOptions = { // example of disabling logging for loggingEnabled: 'disabled' } -``` +```text ## Managing Consent Prior to Sidecar script tag, configure these runtime options to disable browser storage and tracking: @@ -49,12 +49,12 @@ window.statsigOptions = { loggingEnabled: "disabled", disableStorage: true } -``` +```text Later on, after the user gives consent, re-enable storage and tracking: ```js __STATSIG__.instance().updateRuntimeOptions({loggingEnabled: "browser-only", disableStorage: false}); -``` +```text ## Persisting stableID across subdomains Statsig uses `localStorage` as the preferred mechanism for storing the user's stableID. Localstorage keys do not persist across any origin boundaries including across subdomains. For example, a user visiting `https://example.com`, `https://show.example.com` and `https://account.example.com` would be issued three distinct stableIDs. diff --git a/guides/sidecar-experiments/measuring-experiments.mdx b/guides/sidecar-experiments/measuring-experiments.mdx index 37eccf86c..ad5485d99 100644 --- a/guides/sidecar-experiments/measuring-experiments.mdx +++ b/guides/sidecar-experiments/measuring-experiments.mdx @@ -21,7 +21,7 @@ StatsigSidecar.logEvent('Order', null, { units: 3, unitAvgCost: 18.22 }); -``` +```sql ## Post-Experiment Callback for outbound integrations You can bind a callback that gets invoked after Sidecar has run experiments (also gets called when there are no experiments), @@ -47,6 +47,6 @@ window.postExperimentCallback = function(statsigClient, experimentIds) { } ``` -#### Disabling All Logging +### Disabling All Logging To disable all logging to statsig (both autocapture events and logging who has seen your experiments) append the following query string parameter to the Sidecar script URL: `&autostart=0`. This may be useful if you're dealing with GDPR compliance, and you can later re-enable events with `client.updateRuntimeOptions({disableLogging: false})` diff --git a/guides/sidecar-experiments/setup.mdx b/guides/sidecar-experiments/setup.mdx index d2578a3fe..5e1baef81 100644 --- a/guides/sidecar-experiments/setup.mdx +++ b/guides/sidecar-experiments/setup.mdx @@ -53,10 +53,10 @@ Hit "OK" to commit the API Keys. ## Install Sidecar on your website -Add a single script tag within the `` portion of your website, replacing with your own [Client SDK Key](https://docs.statsig.com/sdk-keys/api-keys/) as shown below. +Add a single script tag within the `` portion of your website, replacing with your own [Client SDK Key](https://docs.statsig.com/sdk-keys/api keys/) as shown in the following example. -``` - +```text + ``` diff --git a/guides/sidecar-experiments/sidecar-v3.mdx b/guides/sidecar-experiments/sidecar-v3.mdx index b01b200dc..cbf0852f9 100644 --- a/guides/sidecar-experiments/sidecar-v3.mdx +++ b/guides/sidecar-experiments/sidecar-v3.mdx @@ -6,6 +6,7 @@ keywords: last_update: date: 2025-09-18 displayed_sidebar: cloud +description: Visual Editor v3 provides a simpler interface for point-and-shoot experiments. Different from previous versions, experiments are created in the Statsi --- ## Summary diff --git a/guides/statsig-id-resolver.mdx b/guides/statsig-id-resolver.mdx index 2ee0ce0ff..433ecf491 100644 --- a/guides/statsig-id-resolver.mdx +++ b/guides/statsig-id-resolver.mdx @@ -5,6 +5,7 @@ keywords: - owner:shubham last_update: date: 2025-09-18 +description: Statsig ID Resolver is an integration set up at the project level that brings your ID names into console. IDs are used everywhere within Console, but --- ## What is Statsig ID Resolver? @@ -57,7 +58,7 @@ You will need to create and host your own webhook for this integration. This web name: result ? result.name + ", " + result.Publisher : "", }, }); -``` +```text ## Step 2 - Create your ID Resolver Autocomplete Webhook diff --git a/guides/synchronized-launch.mdx b/guides/synchronized-launch.mdx index 43dfa2cc5..dfaf88d38 100644 --- a/guides/synchronized-launch.mdx +++ b/guides/synchronized-launch.mdx @@ -1,6 +1,7 @@ --- sidebarTitle: Synchronized Launches title: Synchronizing Multiple Feature Launches +description: As you get used to developing with feature flags, you will start to include them from the beginning of your feature development, changing the audience --- As you get used to developing with feature flags, you will start to include them from the beginning of your feature development, changing the audience of your features as you go. As this takes hold across your team/organization/company, you will want to be able to tie features together and launch them simultaneously, as part of a broader release. diff --git a/guides/testing.mdx b/guides/testing.mdx index 74ffc9770..ce871dffe 100644 --- a/guides/testing.mdx +++ b/guides/testing.mdx @@ -1,5 +1,6 @@ --- title: Testing your Gates/Experiments +description: Statsig enhances your engineering velocity by offering tools and features that allow you to test configurations quickly while ensuring reliable outcom --- Statsig enhances your engineering velocity by offering tools and features that allow you to test configurations quickly while ensuring reliable outcomes. This page highlights key features across the product to help you test efficiently. @@ -39,7 +40,7 @@ function overrideGate( value: boolean, userID?: string, ): void; -``` +```text ```js function overrideConfig( @@ -47,7 +48,7 @@ function overrideConfig( value: object, userID?: string, ): void; -``` +```text For example, to override a gate for testing: diff --git a/guides/ui-based-tool.mdx b/guides/ui-based-tool.mdx index a5823a8ff..a12eb0989 100644 --- a/guides/ui-based-tool.mdx +++ b/guides/ui-based-tool.mdx @@ -1,5 +1,6 @@ --- title: UI-Based Tool +description: You can follow this guide to use Statsig's built in LaunchDarkly migration tool. Please note that this UI-based tool only imports the "production" env --- You can follow this guide to use Statsig's built in LaunchDarkly migration tool. Please note that this UI-based tool only imports the "production" environment at the moment. diff --git a/guides/uptime.mdx b/guides/uptime.mdx index be0d53e99..e8b880d49 100644 --- a/guides/uptime.mdx +++ b/guides/uptime.mdx @@ -5,6 +5,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: Statsig serves billions of individual user interactions. Along the way, we designed the service for reliability and availability of your apps that use --- Statsig serves billions of individual user interactions. Along the way, we designed the service for reliability and availability of your apps that use Statsig. Because of this, in the case where your application cannot reach Statsig for any reason, your application will continue to work exactly as you expect with locally cached values. diff --git a/guides/using-environments.mdx b/guides/using-environments.mdx index bf88f328a..ae26b6010 100644 --- a/guides/using-environments.mdx +++ b/guides/using-environments.mdx @@ -1,5 +1,6 @@ --- title: Environment-based Evaluation +description: Statsig SDKs allow you to set the environment tier for your app during initialization. This helps you evaluate feature gates, dynamic configs, and exp --- Statsig SDKs allow you to set the environment tier for your app during initialization. This helps you evaluate feature gates, dynamic configs, and experiments differently in non-production environments like development or staging. All you need to do is configure the appropriate environment in your code and adjust feature rules in the Statsig Console. @@ -33,7 +34,7 @@ Here’s an example of setting the environment tier in your code for the **devel ```javascript const client = new StatsigClient(, user, { environment: { tier: 'development' } }); -``` +```text #### Example (Node Server SDK): diff --git a/guides/webflow-sidecar-ab-test.mdx b/guides/webflow-sidecar-ab-test.mdx index b809cd480..4449c6df9 100644 --- a/guides/webflow-sidecar-ab-test.mdx +++ b/guides/webflow-sidecar-ab-test.mdx @@ -5,6 +5,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: Webflow offers a comprehensive platform for businesses to design, build, and manage visually stunning websites and their content without the need for --- ## Use cases & considerations @@ -13,7 +14,7 @@ Webflow offers a comprehensive platform for businesses to design, build, and man To experiment on a site that utilizes webflow, we recommend using [Statsig Sidecar](/guides/sidecar-experiments/introduction) to both build your test treatments and to assign users to experiments when they land on your site — all without writing any code. -#### Install Sidecar chrome extension +### Install Sidecar chrome extension [Follow this guide](/guides/sidecar-experiments/setup) on installing the Sidecar Chrome extension. This simple, lightweight Chrome extension will allow non-technical users to build experiments and their treatments. You can easily indicate where the test should run based on URL, and then configure treatments such as content changes, style changes, image swaps, as well as injecting arbitrary JavaScript for more sophisticated use-cases where the visual editor tools cannot accommodate. diff --git a/http-api/overview.mdx b/http-api/overview.mdx index fc61f4522..7c5640671 100644 --- a/http-api/overview.mdx +++ b/http-api/overview.mdx @@ -1,5 +1,6 @@ --- title: HTTP API +description: While this HTTP API is available for direct use, we strongly recommend using one of our official SDKs for your programming language whenever --- diff --git a/infra-analytics/events-mode-logs-explorer.mdx b/infra-analytics/events-mode-logs-explorer.mdx index f2ba49c4b..9be75fb7d 100644 --- a/infra-analytics/events-mode-logs-explorer.mdx +++ b/infra-analytics/events-mode-logs-explorer.mdx @@ -1,6 +1,7 @@ --- title: Events Mode on Logs Explorer sidebarTitle: Events Mode +description: Events Mode brings the searching and filtering abilities from [Log Explorer](/infra-analytics/logs-explorer) to your _existing_ Statsig events data. N --- Events Mode brings the searching and filtering abilities from [Log Explorer](/infra-analytics/logs-explorer) to your _existing_ Statsig events data. No additional instrumentation required! diff --git a/infra-analytics/getting-started.mdx b/infra-analytics/getting-started.mdx index 362bd6ac5..60b6f7821 100644 --- a/infra-analytics/getting-started.mdx +++ b/infra-analytics/getting-started.mdx @@ -3,7 +3,6 @@ title: Getting Started with OTEL + Statsig sidebarTitle: Getting Started description: Setup and send OpenTelemetry telemetry to Statsig for Infra Analytics (Logs Explorer, Metrics Explorer, Alerts). --- - This guide helps you setup and send OpenTelemetry telemetry to Statsig so you can use Infra Analytics (Logs Explorer, Metrics Explorer, Alerts). There are two common paths: @@ -19,6 +18,9 @@ There are two common paths: --- ## Application Telemetry quick starts +This page explains application telemetry quick starts. + + @@ -35,7 +37,7 @@ npm install --save \ @opentelemetry/exporter-logs-otlp-http \ @opentelemetry/resources \ @opentelemetry/semantic-conventions -``` +```text Initialize OpenTelemetry (e.g., `instrumentation.js`): @@ -83,7 +85,7 @@ const sdk = new NodeSDK({ sdk.start(); -``` +```text To set up application logs with OTel, you can use the pino or winston bridges. The example below using [pino](https://getpino.io/#/) with [pino auto instrumentation](https://www.npmjs.com/package/@opentelemetry/instrumentation-pino). @@ -91,7 +93,7 @@ To set up application logs with OTel, you can use the pino or winston bridges. T Install the pino instrumentation: ```bash npm i pino @opentelemetry/instrumentation-pino -``` +```text ```js // instrumentation.js (continued) @@ -122,7 +124,7 @@ const pino = require('pino'); const logger = pino(); logger.info('OTel logs initialized'); -``` +```text The Statsig SDK also supports forwarding logs to Log Explorer; see the alternative logging example below. @@ -150,7 +152,7 @@ s.forwardLogLineEvent(user, 'info', 'service started', { version: process.env.np }); } -``` +```javascript Run your service: @@ -160,7 +162,7 @@ make sure that you require or import `instrumentation.js` before any other appli STATSIG_SERVER_SDK_SECRET=YOUR_SECRET \ OTEL_SERVICE_NAME=my-node-service \ node -r ./instrumentation.js app.js -``` +```ruby Tip: you can configure exporters via env instead of code: - `OTEL_EXPORTER_OTLP_ENDPOINT=https://api.statsig.com/otlp` @@ -177,7 +179,7 @@ Install dependencies: ```bash npm install @opentelemetry/sdk-node @opentelemetry/resources @opentelemetry/semantic-conventions @opentelemetry/sdk-trace-node @opentelemetry/exporter-trace-otlp-http @opentelemetry/auto-instrumentations-node -``` +```text Add `instrumentation.ts` at the app root (Next 13+): @@ -219,7 +221,7 @@ export async function register() { sdk.start(); } -``` +```text To set up application logs with OTel, you can use the pino or winston bridges. The example below using [Pino](https://getpino.io/#/) with [Pino auto instrumentation](https://www.npmjs.com/package/@opentelemetry/instrumentation-pino). @@ -227,7 +229,7 @@ To set up application logs with OTel, you can use the pino or winston bridges. T Install the pino instrumentation: ```bash npm i pino @opentelemetry/instrumentation-pino -``` +```text ```js // instrumentation.ts (continued) @@ -258,7 +260,7 @@ import pino from 'pino'; const logger = pino(); logger.info('OTel logs initialized'); -``` +```text The Statsig SDK also supports forwarding logs to Log Explorer; see the alternative logging example below. @@ -285,7 +287,7 @@ s.forwardLogLineEvent(user, 'info', 'service started', { version: process.env.np stack: err?.stack, }); } -``` +```python Note: In Next.js, mark '@statsig/statsig-node-core' as a server external package in `next.config.js` to avoid bundling. ### Using Vercel + Statsig integration @@ -355,7 +357,7 @@ config: receivers: [otlp] processors: [batch] exporters: [otlphttp] -``` +```text Install the Collector with Helm: @@ -365,7 +367,7 @@ helm repo update helm install otel-gateway open-telemetry/opentelemetry-collector \ -n otel --create-namespace \ -f values.yaml -``` +```ruby Provide the Statsig key as an environment variable to the Collector pods (for example via a Secret and envFrom). Your applications then send OTLP to the in-cluster Collector endpoint (for example `http://otel-gateway-collector.otel.svc.cluster.local:4318`). @@ -395,7 +397,7 @@ services: ports: - "4317:4317" # OTLP gRPC - "4318:4318" # OTLP HTTP -``` +```text Create the Collector config referenced above: @@ -430,14 +432,14 @@ service: receivers: [otlp] processors: [batch] exporters: [otlphttp] -``` +```text Start the Collector: ```bash export STATSIG_SERVER_SDK_SECRET=YOUR_SECRET docker compose up -d -``` +```python Point your applications at the Collector (HTTP): `http://localhost:4318` (or `http://otel-collector:4318` from other compose services). The Collector forwards to Statsig with your key. @@ -454,7 +456,7 @@ exporters: encoding: json headers: statsig-api-key: ${env:STATSIG_SERVER_SDK_SECRET} -``` +```javascript @@ -477,7 +479,7 @@ image: repository: otel/opentelemetry-collector-contrib tag: "latest" pullPolicy: IfNotPresent -``` +```python ### A. File logs (filelog receiver) @@ -512,7 +514,7 @@ service: receivers: [filelog] processors: [batch] exporters: [otlphttp] -``` +```text Kubernetes tip: to tail container logs on nodes, mount host paths (e.g., `/var/log/pods` and `/var/lib/docker/containers`) into the Collector DaemonSet and set `include` to those paths. @@ -541,7 +543,7 @@ service: receivers: [otlp] processors: [resourcedetection/ec2, batch] exporters: [otlphttp] -``` +```sql Permissions: the Collector must be able to reach the EC2 metadata service (IMDS). Ensure network access to `169.254.169.254` and IMDSv2 where required. @@ -591,4 +593,4 @@ Requirements: - Filelog receiver (contrib): https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver - Resource detection processor (contrib): https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor - Docker stats receiver (contrib): https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/dockerstatsreceiver -- Collector contrib distribution: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +- Collector contrib distribution: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib \ No newline at end of file diff --git a/infrastructure/api_proxy/custom_proxy.mdx b/infrastructure/api_proxy/custom_proxy.mdx index 0f5433470..5088e7096 100644 --- a/infrastructure/api_proxy/custom_proxy.mdx +++ b/infrastructure/api_proxy/custom_proxy.mdx @@ -5,6 +5,7 @@ keywords: - owner:eric last_update: date: 2025-09-18 +description: Instead of sending API requests directly to Statsig, you can set up your own environment that proxies requests from your custom domain name to Statsig --- ## Overview @@ -82,7 +83,7 @@ export const handler = async (event, context, callback) => { return callback(null, request); }; -``` +```python - In Settings, @@ -160,7 +161,7 @@ Once you are logged into Cloudflare. You can follow these steps: ); }, }; - ``` + ```text 4-cloudflare-paste-snippet @@ -180,7 +181,7 @@ Statsig.initialize(mySdkKey, myUser, { api: "https://my-statsig-proxy.com/v1", }, }); -``` +```ruby Depending on the SDK type, version, and proxy approach you are using, you may not need to append `'/v1'` to the end of your api string. eg `"https://my-statsig-proxy.com/"` diff --git a/infrastructure/api_proxy/introduction.mdx b/infrastructure/api_proxy/introduction.mdx index 4f4b35dbe..1b23d9cbb 100644 --- a/infrastructure/api_proxy/introduction.mdx +++ b/infrastructure/api_proxy/introduction.mdx @@ -4,6 +4,7 @@ keywords: - owner:eric last_update: date: 2025-09-18 +description: This section provides documentation on the various API proxy options supported when using Statsig. There are several compelling reasons to implement a --- This section provides documentation on the various API proxy options supported when using Statsig. diff --git a/infrastructure/api_proxy/managed-proxy.mdx b/infrastructure/api_proxy/managed-proxy.mdx index 489375dd4..b6d03c145 100644 --- a/infrastructure/api_proxy/managed-proxy.mdx +++ b/infrastructure/api_proxy/managed-proxy.mdx @@ -6,6 +6,7 @@ keywords: - owner:eric last_update: date: 2025-09-18 +description: An API proxy gives you a unique URL to send and receive data to/from Statsig servers. This makes it less likely to be intercepted by client-side or D --- An API proxy gives you a unique URL to send and receive data to/from Statsig servers. This makes it less likely to be intercepted by client-side or DNS-side blockers. This way you'll be able to get the right configuration for your applications and more data back from your applications. @@ -13,7 +14,7 @@ An API proxy gives you a unique URL to send and receive data to/from Statsig ser The Managed Proxy is available only for Pro or Enterprise tiers. -A quick-and-easy way to prevent adblocking, we recommend the custom proxy to low volume customers, or before you have the time to setup the [Custom Proxy](/custom_proxy), which is a more robust and customizable solution. +A quick-and-easy way to prevent adblocking, we recommend the custom proxy to low volume users, or before you have the time to setup the [Custom Proxy](/custom_proxy), which is a more robust and customizable solution. ## Why use a proxy @@ -23,7 +24,7 @@ Using a proxy that's unique to your application signals these tracking blockers ## Setting up a managed proxy -If your project is in pro-tier or enterprise-tier, you will see an option to create a unique proxy for your SDK in the Settings -> Project -> Keys & Environments tab as shown below: +If your project is in pro-tier or enterprise-tier, you will see an option to create a unique proxy for your SDK in the Settings -> Project -> Keys & Environments tab as shown in the following example: Proxy creation option in Keys & Environments settings diff --git a/infrastructure/reliability-faq.mdx b/infrastructure/reliability-faq.mdx index 1ce20eac9..ca3248626 100644 --- a/infrastructure/reliability-faq.mdx +++ b/infrastructure/reliability-faq.mdx @@ -6,6 +6,7 @@ keywords: - owner:eric last_update: date: 2025-09-18 +description: Integrating your product with Statsig means depending on Statsig, and we take reliability seriously. Here are some questions many people have when try --- Integrating your product with Statsig means depending on Statsig, and we take reliability seriously. Here are some questions many people have when trying to evaluate the risks, please feel free to reach out on Slack if you have questions that are not listed here. diff --git a/infrastructure/sdk-monitoring.mdx b/infrastructure/sdk-monitoring.mdx index 82faa8529..5978dbef8 100644 --- a/infrastructure/sdk-monitoring.mdx +++ b/infrastructure/sdk-monitoring.mdx @@ -6,6 +6,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: This latest release of structured logging and metrics, is currently only [available by the Python SDK](/server/pythonSDK/#sdk-monitoring-). Wan --- diff --git a/infrastructure/statsig_domains.mdx b/infrastructure/statsig_domains.mdx index c7cedbb76..68bed6cbb 100644 --- a/infrastructure/statsig_domains.mdx +++ b/infrastructure/statsig_domains.mdx @@ -4,23 +4,26 @@ keywords: - owner:eric last_update: date: 2025-09-18 +description: Statsig uses the following domain names for its services. If you have a network policy set up inside your systems, you should allowlist --- - Statsig uses the following domain names for its services. If you have a network policy set up inside your systems, you should allowlist all of the domains below or select domains based on the features you use. ## Statsig Console +This page explains statsig console. + + - `console.statsig.com` - `cdn.console.statsig.com` - `console.statsigcdn.com` ## Statsig API Services -These domains are used by our SDKs to communicate with our backend for feature gates, dynamic configs and event logging. They are also used for other Statsig APIs, e.g. console APIs, integrations. +These domains are used by our SDKs to communicate with our backend for feature flags, dynamic configs and event logging. They are also used for other Statsig APIs, e.g. console APIs, integrations. - `api.statsig.com` -- `featuregates.org` +- `feature flags.org` - `statsigapi.net` - `events.statsigapi.net` - `api.statsigcdn.com` @@ -57,10 +60,10 @@ If you're just looking for a list of apis used by our backend/server SDKs, you n - `statsigapi.net` - `api.statsigcdn.com` - `prodregistryv2.org` -- `idliststorage.blob.core.windows.net` (see below) +- `idliststorage.blob.core.windows.net` (refer to the following example) ### Statsig User Segment Storage API The domain is used by our server SDKs to download the segment list for your project. If you do not use big id lists, you wont need this one. -- `idliststorage.blob.core.windows.net` +- `idliststorage.blob.core.windows.net` \ No newline at end of file diff --git a/infrastructure/statsig_ip_ranges.mdx b/infrastructure/statsig_ip_ranges.mdx index 187719446..581165c68 100644 --- a/infrastructure/statsig_ip_ranges.mdx +++ b/infrastructure/statsig_ip_ranges.mdx @@ -4,6 +4,7 @@ keywords: - owner:eric last_update: date: 2025-09-18 +description: Statsig reserves the following IP addresses and ranges for use by its services. If you have a network policy set up inside your systems, you should al --- Statsig reserves the following IP addresses and ranges for use by its services. If you have a network policy set up inside your systems, you should allowlist diff --git a/integrations/ai_development_with_statsig.mdx b/integrations/ai_development_with_statsig.mdx index 1d800b462..1ddc7a5e4 100644 --- a/integrations/ai_development_with_statsig.mdx +++ b/integrations/ai_development_with_statsig.mdx @@ -28,7 +28,7 @@ Fill in your Statsig ID, and follow the [MCP guide](/mcp) to set up the MCP serv We recommend using a limited-access console API key to control risk around a scenario where an agent deletes entities in Statsig. -``` +```python # Statsig Development Guidelines It is important that we selectively killswitch new features or any new, risky codepaths using Statsig Feature Gates so that we can turn them off if they cause issues. diff --git a/integrations/akamai.mdx b/integrations/akamai.mdx index 0de69abc7..635dbd85d 100644 --- a/integrations/akamai.mdx +++ b/integrations/akamai.mdx @@ -1,5 +1,6 @@ --- title: Akamai Edge KV +description: Statsig’s Akamai Edge KV integration pushes Statsig Configs to Edge KV, providing low latency for gate and experiment evaluations directly in Akamai E --- ## Overview diff --git a/integrations/azureai/capturing-metrics.mdx b/integrations/azureai/capturing-metrics.mdx index 75abc1ff4..e6bfcd4c4 100644 --- a/integrations/azureai/capturing-metrics.mdx +++ b/integrations/azureai/capturing-metrics.mdx @@ -4,6 +4,7 @@ keywords: - owner:vijaye last_update: date: 2025-09-18 +description: Azure AI SDK automatically captures relevant invocation and usage metrics from each API call and logs them to Statsig. You can see these events strea --- diff --git a/integrations/azureai/completions.mdx b/integrations/azureai/completions.mdx index 15497c708..c6fe9253d 100644 --- a/integrations/azureai/completions.mdx +++ b/integrations/azureai/completions.mdx @@ -4,14 +4,15 @@ keywords: - owner:vijaye last_update: date: 2025-09-18 +description: Chat completions are AI-generated responses used to generate text. They could enable generic text completion, or interactive dialogue based on a given --- - - - Chat completions are AI-generated responses used to generate text. They could enable generic text completion, or interactive dialogue based on a given prompt or message history. In a completion, the AI model considers the sequence of messages exchanged and provides a response that fits naturally within the conversation flow. ## Simple completions +This page explains simple completions. + + ```js @@ -24,7 +25,7 @@ const result = await modelClient.complete(messages); for (const choice of result.choices) { console.log(choice.message.content); } -``` +```text ```python @@ -37,7 +38,7 @@ self.assertIsNotNone(response, "Expected response to not be None") for item in response.choices: content = item.message.content print(content) -``` +```text ```csharp @@ -47,11 +48,11 @@ var completion = await modelClient.Complete( ); Console.WriteLine(completion); -``` +```text -#### Output +### Output ``` Arrr, trainin’ a parrot be quite the adventure, savvy? Here be some tips fer ye: @@ -68,7 +69,7 @@ Arrr, trainin’ a parrot be quite the adventure, savvy? Here be some tips fer y 6. **Fun and Play**: Incorporate games into yer training to keep it interestin’. A happy parrot be a learnin’ parrot! Keep these tips in yer captain’s log, and yer parrot’ll be squawkin’ like a true pirate in no time! Arrr! -``` +```text ## Streaming Completions @@ -91,7 +92,7 @@ for await (const event of stream) { process.stdout.write(choice.delta?.content ?? ""); } } -``` +```text ```python @@ -102,7 +103,7 @@ response = modelClient.stream_complete([ for update in response: print(update.choices[0].delta.content or "", end="", flush=True) -``` +```text ```csharp @@ -118,5 +119,4 @@ await foreach (var update in completion) { } ``` - - + \ No newline at end of file diff --git a/integrations/azureai/embeddings.mdx b/integrations/azureai/embeddings.mdx index 279895ef3..0fbab7c15 100644 --- a/integrations/azureai/embeddings.mdx +++ b/integrations/azureai/embeddings.mdx @@ -4,14 +4,15 @@ keywords: - owner:vijaye last_update: date: 2025-09-18 +description: Embeddings are numerical representations of data (like text, images, or audio) that capture their essential features in a compact form, typically as v --- - - - Embeddings are numerical representations of data (like text, images, or audio) that capture their essential features in a compact form, typically as vectors. For text, embeddings map words or sentences to vector spaces, where similar items are closer together, enabling comparisons and efficient searches. Developers use embeddings in tasks like semantic search, recommendation engines, and clustering, as they allow for analyzing and processing unstructured data with machine learning models that recognize and work with these patterns. ## Generate text embeddings +This page explains generate text embeddings. + + ```js @@ -22,7 +23,7 @@ const result = await modelClient.getEmbeddings([ for (const data of result.data) { console.log(`Embedding: ${data.embedding}`); } -``` +```text ```python @@ -34,18 +35,18 @@ for item in response.data: f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" ) -``` +```text ```csharp var embedding = await client.GetEmbeddings(["Hello, world!", "Goodbye, world!"]); Console.WriteLine(embedding.First().ToArray()); Console.WriteLine(embedding.Last().ToArray()); -``` +```text -#### Output +### Output ``` Embedding: -0.01918462,-0.025279032,-0.0017195191,0.018848283,-0.033795066,-0.019695852,-0.020947022,0.05158053,-0.03212684,-0.03037789,-0.0021458254,-0.028978731,-0.0024737532,-0.031481072,0.01033225,0.018606123,-0.046145335,0.041463535,0.00044186175,0.041221373,0.053679265,0.001873393,0.004567446,0.01002282,0.047867376,0.0022013208,-0.009834472,0.03847687,0.00089213194,-0.052118666,0.051150016,-0.03255735,-0.0140319485,-0.01263279, ..... ``` \ No newline at end of file diff --git a/integrations/azureai/getting-started.mdx b/integrations/azureai/getting-started.mdx index f54537688..6ae7e9b8a 100644 --- a/integrations/azureai/getting-started.mdx +++ b/integrations/azureai/getting-started.mdx @@ -4,6 +4,7 @@ keywords: - owner:vijaye last_update: date: 2025-09-18 +description: Start by installing the Statsig Azure AI SDK. Depending on your language/framework you would use the right package manager to install the SDK in your --- @@ -15,17 +16,17 @@ Start by installing the Statsig Azure AI SDK. Depending on your language/framew ```shell npm i @statsig/azure-ai -``` +```text ```shell pip install azureai-statsig -``` +```text ```shell dotnet add package StatsigAzureAI -``` +```ruby @@ -66,12 +67,12 @@ Go to your **Project Settings** and choose the **Keys & Environments** tab on th import { AzureAI } from "@statsig/azure-ai"; await AzureAI.initialize(""); -``` +```text ```python AzureAI.initialize("") -``` +```text ```csharp diff --git a/integrations/azureai/introduction.mdx b/integrations/azureai/introduction.mdx index 7c9b50479..06e99882f 100644 --- a/integrations/azureai/introduction.mdx +++ b/integrations/azureai/introduction.mdx @@ -1,6 +1,7 @@ --- title: Azure AI sidebarTitle: Overview +description: Statsig offers SDKs for integrating Azure AI models into server applications. These SDKs simplify the implementation of features like completions and --- Statsig offers SDKs for integrating Azure AI models into server applications. These SDKs simplify the implementation of features like completions and embeddings in your server application. They provide easy-to-use APIs and automatically track metrics such as latency, token length, and model details, which you can use for optimization and experimentation. Use cases include: diff --git a/integrations/azureai/model-client.mdx b/integrations/azureai/model-client.mdx index 2545576be..a5203ae17 100644 --- a/integrations/azureai/model-client.mdx +++ b/integrations/azureai/model-client.mdx @@ -4,6 +4,7 @@ keywords: - owner:vijaye last_update: date: 2025-09-18 +description: In order to invoke Azure AI methods, you'll need to instantiate a Model Client. You have two ways of instantiating a Model Client --- @@ -27,7 +28,7 @@ Once created, you can fill in the properties of this deployment like this: The JSON of this looks like this: -``` +```json { endpoint: "https://FILL_IN_YOUR_ENDPOINT", key: "FILL_IN_YOUR_KEY", @@ -49,17 +50,17 @@ Once this is done, you can instantiate your Model Client directly by using the * ```js const client = AzureAI.getModelClient(""); -``` +```text ```python client = AzureAI.get_model_client("") -``` +```text ```csharp var client = Server.GetModelClient(""); -``` +```ruby @@ -74,12 +75,12 @@ const modelClient = AzureAI.getModelClientFromEndpoint( "", "" ); -``` +```text ```python modelClient = AzureAI.get_model_client_from_endpoint("", "") -``` +```text ```csharp diff --git a/integrations/azureai/running-experiments.mdx b/integrations/azureai/running-experiments.mdx index eee91d989..e84a2fd24 100644 --- a/integrations/azureai/running-experiments.mdx +++ b/integrations/azureai/running-experiments.mdx @@ -4,6 +4,7 @@ keywords: - owner:vijaye last_update: date: 2025-09-18 +description: Azure AI SDK helps you easily and quickly run A/B tests to measure the effectiveness of different models and related parameters. By leveraging Statsi --- diff --git a/integrations/cloudflare.mdx b/integrations/cloudflare.mdx index f4a566866..cc216b79f 100644 --- a/integrations/cloudflare.mdx +++ b/integrations/cloudflare.mdx @@ -1,5 +1,6 @@ --- title: Cloudflare KV +description: Statsig offers a set of integrations that make usage with Cloudflare easy: * Automatically pushing changes to Cloudflare's KV store, for low-latency S --- Statsig offers a set of integrations that make usage with Cloudflare easy: @@ -35,7 +36,7 @@ Install the Statsig serverless SDK: ```bash npm install @statsig/serverless-client -``` +```text @@ -55,7 +56,7 @@ export default handleWithStatsig( envKvBindingName: 'STATSIG_KV' } ); -``` +```javascript The required ParamsObject params (kvKey, envStatsigKey, envKvBindingName) must be stored as env variables, either in your wrangler.toml or as Cloudflare secrets. @@ -96,7 +97,7 @@ import { handleWithStatsig } from '@statsig/serverless-client/cloudflare'; export default handleWithStatsig( async (request, env, ctx, client) => { const randomUserId = Math.floor(Math.random() * 100).toString(); - const gate = client.getFeatureGate("test_cloudflare_sync", { userID: randomUserId }); + const gate = client.getFeature Flag("test_cloudflare_sync", { userID: randomUserId }); const value = gate.value; client.logEvent('new_event', { userID: randomUserId }); return new Response(`Gate check result: ${value}`); @@ -108,7 +109,7 @@ export default handleWithStatsig( } ); -``` +```text ```wrangler wrangler.toml name = "test" main = "src/index.js" @@ -123,7 +124,7 @@ export default handleWithStatsig( [observability] enabled = true - ``` + ```python @@ -153,14 +154,14 @@ First, you'll need to install the Statsig serverless sdk. ```bash npm install @statsig/serverless-client -``` +```javascript ## Import Next, import the Cloudflare client. ```bash import { StatsigCloudflareClient } from '@statsig/serverless-client/cloudflare'; -``` +```python Then, you need to hook it all up. This involves: @@ -177,7 +178,7 @@ In our example, we are checking a gate called "test_cloudflare_sync" that is set ### 1. Creating a `StatsigCloudflareClient` instance ```bash const client = new StatsigCloudflareClient(""); -``` +```python The client instantiation takes two arguments: - `sdkKey : string` This is your Statsig client API key. It is available from the [Project Settings](https://console.statsig.com/api_keys) page in the Statsig Console. This is used to authenticate your requests. - `options : StatsigOptions` See here, for more [options](https://docs.statsig.com/client/javascript-sdk#statsig-options). @@ -188,11 +189,11 @@ For best practice: ### 2. Client initialization -The following line initializes the client by loading feature gate and experiment configurations directly from your Cloudflare KV store. +The following line initializes the client by loading feature flag and experiment configurations directly from your Cloudflare KV store. ```bash const initResult = await client.initializeFromKV(env., ); -``` +```text The client initialization takes two arguments: @@ -206,7 +207,7 @@ For best practice: ### 3. Checking a Gate ```bash const value = client.checkGate("test_cloudflare_sync", { userID: randomUserId }); -``` +```text This is a gate check in code. @@ -219,7 +220,7 @@ Refer to the [Javascript on device evaluation sdk documentation](/client/jsOnDev ### 4. Logging an event ```bash client.logEvent('gate_check', { userID: randomUserId }); -``` +```text This is an event log in code. The `logEvent` method takes two parameters: @@ -232,7 +233,7 @@ For more information on event logging, see [here](https://docs.statsig.com/clien ```bash ctx.waitUntil(statsig.flush()); -``` +```python This flushes all events from the sdk to Statsig. **Without this, you wont be able to get diagnostic information in the Statsig Console, nor any event data you logged**. @@ -264,7 +265,7 @@ export default { } } }; -``` +```text ```wrangler @@ -281,7 +282,7 @@ export default { [observability] enabled = true - ``` + ```ruby diff --git a/integrations/data-connectors/amplitude.mdx b/integrations/data-connectors/amplitude.mdx index 1327a6ad7..116f1a818 100644 --- a/integrations/data-connectors/amplitude.mdx +++ b/integrations/data-connectors/amplitude.mdx @@ -4,6 +4,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: import StatsigEnvironmentFormat from "/snippets/integration_statsig_env_format.mdx"; Statsig supports both incoming and outgoing events for Amplitude. --- import StatsigEnvironmentFormat from "/snippets/integration_statsig_env_format.mdx"; diff --git a/integrations/data-connectors/braze.mdx b/integrations/data-connectors/braze.mdx index 94d454ebf..e8dccb9e0 100644 --- a/integrations/data-connectors/braze.mdx +++ b/integrations/data-connectors/braze.mdx @@ -1,12 +1,13 @@ --- title: Braze +description: Enabling the Braze integration allows you to export Statsig exposure events to your configured Braze app with information on the status of each user's --- ## Overview Enabling the Braze integration allows you to export Statsig exposure events to your configured Braze app with information on the status of each user's feature gate and experimentation groups. These exposures will be forwarded to Braze as a [Custom Attribute](https://www.braze.com/docs/user_guide/data/custom_data/custom_attributes) object on the user. There will be one Custom Attribute per gate/experiment the user has been exposed to. The Custom Attribute in Braze will be named `statsig_exposure::{gate/experiment name}` and be of the form: -``` +```text { group_name: String, timestamp: Time diff --git a/integrations/data-connectors/census.mdx b/integrations/data-connectors/census.mdx index ab4b2b37b..880717be0 100644 --- a/integrations/data-connectors/census.mdx +++ b/integrations/data-connectors/census.mdx @@ -1,5 +1,6 @@ --- title: Census +description: Enabling the [Census](https://getcensus.com/) integration for Statsig allows Statsig to receive events from Census. This enables you to ingest data in --- ## Overview diff --git a/integrations/data-connectors/fivetran.mdx b/integrations/data-connectors/fivetran.mdx index d5d2b28ec..996192fc6 100644 --- a/integrations/data-connectors/fivetran.mdx +++ b/integrations/data-connectors/fivetran.mdx @@ -4,6 +4,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: import EventFormats from "/snippets/integration_event_formats.mdx"; Enabling the Fivetran integration for Statsig will allow Statsig to push events to --- import EventFormats from "/snippets/integration_event_formats.mdx"; @@ -21,7 +22,7 @@ Enabling the Fivetran integration for Statsig will allow Statsig to push events ## Filtering Events -Once you've enabled outbound events to Fivetran, you can select which categories of Statsig events you want to export by click on the **Event Filtering** button and checking the appropriate boxes as shown below. +Once you've enabled outbound events to Fivetran, you can select which categories of Statsig events you want to export by click on the **Event Filtering** button and checking the appropriate boxes as shown in the following example. Event filtering configuration interface diff --git a/integrations/data-connectors/google-analytics.mdx b/integrations/data-connectors/google-analytics.mdx index 4b64fe2e5..a78f303b5 100644 --- a/integrations/data-connectors/google-analytics.mdx +++ b/integrations/data-connectors/google-analytics.mdx @@ -4,6 +4,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: Enabling the Google Analytics 4 integration allows Statsig to send logged events and exposures to GA4. This enhances your existing Google Analytics tr --- Enabling the Google Analytics 4 integration allows Statsig to send logged events and exposures to GA4. This enhances your existing Google Analytics tracking with additional data collected by Statsig's logging SDKs. diff --git a/integrations/data-connectors/heap.mdx b/integrations/data-connectors/heap.mdx index cabd56047..1790acc46 100644 --- a/integrations/data-connectors/heap.mdx +++ b/integrations/data-connectors/heap.mdx @@ -4,6 +4,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: import EventFormats from "/snippets/integration_event_formats.mdx"; Enabling the Heap integration allows you to export Statsig events to your configur --- import EventFormats from "/snippets/integration_event_formats.mdx"; diff --git a/integrations/data-connectors/hightouch.mdx b/integrations/data-connectors/hightouch.mdx index 97c2d3f92..4da814612 100644 --- a/integrations/data-connectors/hightouch.mdx +++ b/integrations/data-connectors/hightouch.mdx @@ -4,6 +4,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: import StatsigEnvironmentFormat from "/snippets/integration_statsig_env_format.mdx"; Enabling the **[Hightouch](https://hightouch.com/)** integration --- import StatsigEnvironmentFormat from "/snippets/integration_statsig_env_format.mdx"; diff --git a/integrations/data-connectors/mixpanel.mdx b/integrations/data-connectors/mixpanel.mdx index 0d25d2e3d..a9e484eb1 100644 --- a/integrations/data-connectors/mixpanel.mdx +++ b/integrations/data-connectors/mixpanel.mdx @@ -4,6 +4,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: The [Mixpanel](https://mixpanel.com/) integration has two functions. - Incoming: Statsig can sync your Mixpanel user cohorts with a Statsig ID list se --- ## Overview @@ -43,7 +44,7 @@ Statsig can ingest user information via a [Mixpanel Cohort Syncing](https://deve 4. In the dialog that appears, paste the url below, substituting the SERVER_SECRET_KEY with a "Server Secret Key" found in [Project Settings](https://console.statsig.com/api_keys), then click Continue. -``` +```text https://api.statsig.com/v1/webhooks/mixpanel?statsig-api-key=SERVER_SECRET_KEY ``` diff --git a/integrations/data-connectors/mparticle.mdx b/integrations/data-connectors/mparticle.mdx index 4ab696649..ec947a781 100644 --- a/integrations/data-connectors/mparticle.mdx +++ b/integrations/data-connectors/mparticle.mdx @@ -1,5 +1,6 @@ --- title: mParticle +description: Enabling the [mParticle](https://www.mparticle.com/) integration for Statsig allows Statsig to receive events from mParticle. You can find all events --- ## Overview diff --git a/integrations/data-connectors/revenuecat.mdx b/integrations/data-connectors/revenuecat.mdx index 4ffacf14c..4eb2f22fc 100644 --- a/integrations/data-connectors/revenuecat.mdx +++ b/integrations/data-connectors/revenuecat.mdx @@ -1,5 +1,6 @@ --- title: RevenueCat +description: Enabling the RevenueCat integration allows Statsig to pull billing, subscription, and revenue metrics into your Statsig projects. This provides easy m --- ## Overview diff --git a/integrations/data-connectors/rudderstack.mdx b/integrations/data-connectors/rudderstack.mdx index 7f2c1dd8c..b068e92ea 100644 --- a/integrations/data-connectors/rudderstack.mdx +++ b/integrations/data-connectors/rudderstack.mdx @@ -4,6 +4,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: import StatsigEnvironmentFormat from "/snippets/integration_statsig_env_format.mdx"; Enabling the RudderStack integration for Statsig will allow Stats --- import StatsigEnvironmentFormat from "/snippets/integration_statsig_env_format.mdx"; @@ -12,7 +13,7 @@ import StatsigEnvironmentFormat from "/snippets/integration_statsig_env_format.m Enabling the RudderStack integration for Statsig will allow Statsig to pull in your RudderStack events. This allows you to run your experiment analysis on Statsig with all of your existing events from RudderStack without requiring any additional logging. -When Statsig receives events from RudderStack, these will be visible and aggregated in the [Metrics](/metrics) tab in the Statsig console. These events will automatically be included in your [Pulse](/pulse/read-pulse) results for A/B tests with Statsig's [feature flags](/feature-flags/overview) as well as all your [Experiment](/experiments-plus/monitor) results. +When Statsig receives events from RudderStack, these will be visible and aggregated in the [Metrics](/metrics) tab in the Statsig console. These events will automatically be included in your [Pulse](/pulse/read-pulse) results for Experiments with Statsig's [feature flags](/feature-flags/overview) as well as all your [Experiment](/experiments-plus/monitor) results. ## Configuring Incoming Events @@ -21,8 +22,8 @@ To ingest your events from RudderStack, 1. On [app.rudderstack.com](https://app.rudderstack.com/), navigate to "Connections" and click **Add Destination** . 2. Search for “Statsig” in the Destinations Catalog, and select the “Statsig” destination. 3. Give your connection a name and choose which Source should send data to the “Statsig” destination. -4. From the [Statsig dashboard](https://console.statsig.com/api_keys), copy the Statsig "Server Secret Key”. -5. Enter the Statsig “Server Secret Key” in the “Statsig” destination settings in RudderStack. +4. From the [Statsig dashboard](https://console.statsig.com/api_keys), copy the Statsig "Api Key Key”. +5. Enter the Statsig “Api Key Key” in the “Statsig” destination settings in RudderStack. 6. On the Statsig [Integration page](https://console.statsig.com/integrations) enable the RudderStack integration. 7. As your RudderStack events flow into Statsig, you'll see a live **Log Stream** in the [Metrics](/metrics) tab in the Statsig console. You can click one of these events to see the details that are logged as part of the event. @@ -30,11 +31,11 @@ To ingest your events from RudderStack, Statsig metrics log stream interface -#### User IDs and Custom IDs +### User IDs and Custom IDs Statsig automatically detects the `event` and `userID` fields that you log through your RudderStack events. If you're running an experiment with the user as your unit type, this userID should match the user identifier that you log with the Statsig SDK. -If you're using a [custom ID](/guides/experiment-on-custom-id-types) as the unit type for your experiment, you can provide this identifier using the key `statsigCustomIDs` as part of the RudderStack `properties` field as shown below. +If you're using a [custom ID](/guides/experiment-on-custom-id-types) as the unit type for your experiment, you can provide this identifier using the key `statsigCustomIDs` as part of the RudderStack `properties` field as shown in the following example. ```bash title="JSON Body" { diff --git a/integrations/data-connectors/segment.mdx b/integrations/data-connectors/segment.mdx index 6f3c1fcf8..7612f9a57 100644 --- a/integrations/data-connectors/segment.mdx +++ b/integrations/data-connectors/segment.mdx @@ -4,15 +4,18 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: import StatsigEnvironmentFormat from "/snippets/integration_statsig_env_format.mdx"; Enabling the Segment integration for Statsig will allow Statsig t --- - import StatsigEnvironmentFormat from "/snippets/integration_statsig_env_format.mdx"; Enabling the Segment integration for Statsig will allow Statsig to pull in your Segment events. This allows you to run your experiment analysis on Statsig with all of your existing events from Segment without requiring any additional logging. -When Statsig receives events from Segment, these will be visible and aggregated in the [Metrics](/metrics) tab in the Statsig console. These events will automatically be included in your [Pulse](/pulse/read-pulse) results for A/B tests with Statsig's feature gates as well as all your [Experiment](/experiments-plus/monitor) results. +When Statsig receives events from Segment, these will be visible and aggregated in the [Metrics](/metrics) tab in the Statsig console. These events will automatically be included in your [Pulse](/pulse/read-pulse) results for Experiments with Statsig's feature flags as well as all your [Experiment](/experiments-plus/monitor) results. ### Supported Segment Event Types + +This page explains supported segment event types. + - [Track](https://segment.com/docs/connections/spec/track/) - [Page](https://segment.com/docs/connections/spec/page/) - [Group](https://segment.com/docs/connections/spec/group/) @@ -25,8 +28,8 @@ Identify calls are only supported for syncing Segment Engage Audiences with Stat ### Benefits of using the Segment integration Using the Segment integration has several benefits over other methods of event ingestion: - * Customers who are ingesting customer data with Segment will be able to quickly populate Statsig with metrics and can typically get up and running within a day - * Customers will only have to use Statsig's assignment SDKs (gate/experiment allocation), simplifying your code and engineer workflows + * Users who are ingesting user data with Segment will be able to quickly populate Statsig with metrics and can typically get up and running within a day + * Users will only have to use Statsig's assignment SDKs (gate/experiment allocation), simplifying your code and engineer workflows * Additional logging can be done via the [event logging SDKs](/guides/logging-events#logging-events-via-sdks) but will and additional code orchestration and a collection window * With [event filtering](/integrations/event_filtering) you can control which events are ingested and make billing more predictable * If you have [Segment Replay](https://segment.com/docs/guides/what-is-replay/), you can send Statsig your historical events for analysis @@ -82,7 +85,7 @@ If you are unable to connect to Segment via OAuth, you can still manually connec - - Put your Server Secret Key in the “API Key” field in the Statsig Destination + - Put your Api Key Key in the “API Key” field in the Statsig Destination @@ -129,7 +132,7 @@ Statsig will join incoming user identifiers to whichever [unit of randomization] Statsig automatically detects the `event` and `userId` fields that are logged through your Segment events (see [`track`](https://segment.com/docs/connections/spec/track/) for an example). If you're running an experiment with the userId as your unit type, this `userID` should match the user identifier that you log with the Statsig SDK. -If you're using a [custom ID](/guides/experiment-on-custom-id-types) as the unit type for your experiment, you can provide this identifier using the key `statsigCustomIDs` as part of the Segment `properties` field as shown below. +If you're using a [custom ID](/guides/experiment-on-custom-id-types) as the unit type for your experiment, you can provide this identifier using the key `statsigCustomIDs` as part of the Segment `properties` field as shown in the following example. ```bash title="JSON Body" { @@ -138,7 +141,7 @@ If you're using a [custom ID](/guides/experiment-on-custom-id-types) as the unit "statsigCustomIDs": [ "companyID", ""] } } -``` +```sql The `statsigCustomIDs` field in properties should be an array, where the even index is the name of the user ID type and the odd index is the value of the previous element in the array. Assuming you've created this custom ID type on Statsig (under **ID Type Settings** in your [Project Settings](https://console.statsig.com/settings)), Statsig will automatically recognize these custom identifiers to compute your experiment results appropriately. @@ -154,7 +157,7 @@ This is particularly useful for working with the [Segment anonymous ID](https:// #### Experimenting on anonymous traffic -For example, if you're running experiments on anonymous users, you can use Segment's `anonymousId` as the unit of randomization. First, you will want to [add a new customer identifier to Statsig](/guides/experiment-on-custom-id-types#step-1---add-companyid-as-a-new-id-type-in-your-project-settings). In the above example, we call our new custom ID `segmentAnonymousId`. Then, when [initializing](/client/javascript-sdk) the Statsig SDK, if you have access to the Segment `anonymousId` you will want to pass it to Statsig as a custom ID. For example, your Statsig initialization may look like this: +For example, if you're running experiments on anonymous users, you can use Segment's `anonymousId` as the unit of randomization. First, you will want to [add a new user identifier to Statsig](/guides/experiment-on-custom-id-types#step-1---add-companyid-as-a-new-id-type-in-your-project-settings). In the above example, we call our new custom ID `segmentAnonymousId`. Then, when [initializing](/client/javascript-sdk) the Statsig SDK, if you have access to the Segment `anonymousId` you will want to pass it to Statsig as a custom ID. For example, your Statsig initialization may look like this: ```jsx import { StatsigClient } from '@statsig/js-client'; @@ -173,7 +176,7 @@ const client = new StatsigClient(sdkKey, { environment: { tier: "production" } } ); -``` +```python You can access Segment's `anonymousId` using `analytics.user().anonymousId()` as [outlined in the Segment docs here](https://segment.com/docs/connections/sources/catalog/libraries/website/javascript/identity/). @@ -196,7 +199,7 @@ Refer to the following diagram to help orient you to mapping `anonymousIds` in S ### Syncing Statsig Segment ID Lists with Segment Engage Audiences -By using [Segment Engage Audiences](https://segment.com/docs/engage/audiences/) you are able to maintain a list of users that can be used for targeting using [Statsig Feature Gates](/feature-flags/overview). To configure this: +By using [Segment Engage Audiences](https://segment.com/docs/engage/audiences/) you are able to maintain a list of users that can be used for targeting using [Statsig Feature Flags](/feature-flags/overview). To configure this: 1. Create a [Statsig ID List Segment](/segments/create-new) on the Statsig Console. 2. Follow the [Segment guide for Audiences](https://segment.com/docs/engage/audiences/) to create a new Audience and choose `Statsig` as a Destination. The `audience_key` must match the ID of the `Statsig ID List Segment` created. @@ -204,10 +207,10 @@ By using [Segment Engage Audiences](https://segment.com/docs/engage/audiences/) Once these steps have been completed, your Segment Audience will be synced, and you will be able to target those users for features you develop or experiments you run. ### Custom Properties -Passing [custom properties to a Statsig User](/concepts/user#user-attributes) (see `custom` field) enables targeting on specific cohorts of your users in feature gates and experimentation. +Passing [custom properties to a Statsig User](/concepts/user#user-attributes) (see `custom` field) enables targeting on specific cohorts of your users in feature flags and experimentation. Providing custom user properties also allows you to drill down your results to specific populations (ex: android/iOS, isVIP, etc) when [reading pulse results](/pulse/custom-queries#running-a-custom-query). -If you're using custom fields to [target users](/feature-flags/conditions#custom) in your feature gates, you can provide these properties through Segment using the key `statsigCustom` as part of the Segment `properties` +If you're using custom fields to [target users](/feature-flags/conditions#custom) in your feature flags, you can provide these properties through Segment using the key `statsigCustom` as part of the Segment `properties` field, as an array of key value pairs: `[key1, value1, key2, value2, ...]`. An example is shown below: ```bash title="JSON Body" @@ -217,7 +220,7 @@ field, as an array of key value pairs: `[key1, value1, key2, value2, ...]`. An e "statsigCustom": [ "isVIP", "true", "marketing_campaign", "abx343", ...] } } -``` +```javascript ## Configuring Outbound Events to Segment @@ -269,7 +272,7 @@ Statsig exports log events and exposure events to segment as `track` events: }, }; } -``` +```text Config Change events follow this schema: @@ -299,7 +302,7 @@ Config Change events follow this schema: ## Working with Segment Metrics in the Statsig UI -Segment events are piped into Statsig and are accessible in the metrics console like any other event. Furthermore, these metrics will be accessible to use as monitoring metrics in your feature gates and experiments so you can utilize your existing +Segment events are piped into Statsig and are accessible in the metrics console like any other event. Furthermore, these metrics will be accessible to use as monitoring metrics in your feature flags and experiments so you can utilize your existing metric collection via Segment with Statsig's experimentation platform. @@ -319,4 +322,4 @@ These metrics will be reported in pulse results among other monitoring metrics: ## Filtering Events -You can customize which events should be sent and received via Segment using [Event Filtering](/integrations/event_filtering) +You can customize which events should be sent and received via Segment using [Event Filtering](/integrations/event_filtering) \ No newline at end of file diff --git a/integrations/data-connectors/stitch.mdx b/integrations/data-connectors/stitch.mdx index ebc9cb012..3b5768250 100644 --- a/integrations/data-connectors/stitch.mdx +++ b/integrations/data-connectors/stitch.mdx @@ -4,6 +4,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: import EventFormats from "/snippets/stitch_event_formats.mdx"; Enabling the Stitch integration for Statsig will allow Statsig to push events to your S --- import EventFormats from "/snippets/stitch_event_formats.mdx"; @@ -21,7 +22,7 @@ Enabling the Stitch integration for Statsig will allow Statsig to push events to ## Filtering Events -Once you've enabled outbound events to Stitch, you can select which categories of Statsig events you want to export by click on the **Event Filtering** button and checking the appropriate boxes as shown below. +Once you've enabled outbound events to Stitch, you can select which categories of Statsig events you want to export by click on the **Event Filtering** button and checking the appropriate boxes as shown in the following example. Initial Setup Dialog diff --git a/integrations/data-exports/data_warehouse_exports.mdx b/integrations/data-exports/data_warehouse_exports.mdx index e0a5b9698..0672f5c3a 100644 --- a/integrations/data-exports/data_warehouse_exports.mdx +++ b/integrations/data-exports/data_warehouse_exports.mdx @@ -1,5 +1,6 @@ --- title: Data Warehouse Exports +description: You can export your data from Statsig to your Data Warehouse with a Data Connection. This lets you send exposures and events directly to your warehous --- ## Introduction diff --git a/integrations/data-exports/experiment_result_exports.mdx b/integrations/data-exports/experiment_result_exports.mdx index 2e11809c7..a05c326d1 100644 --- a/integrations/data-exports/experiment_result_exports.mdx +++ b/integrations/data-exports/experiment_result_exports.mdx @@ -1,5 +1,6 @@ --- title: Experiment Result Exports +description: Your data is your data. Statsig makes it easy to export both the reports and the raw data your feature rollouts and experiments generate. --- ## Overview Your data is your data. Statsig makes it easy to export both the reports and the raw data your feature rollouts and experiments generate. diff --git a/integrations/data-imports/azure_upload.mdx b/integrations/data-imports/azure_upload.mdx index d2a126b6f..269130f1a 100644 --- a/integrations/data-imports/azure_upload.mdx +++ b/integrations/data-imports/azure_upload.mdx @@ -5,10 +5,11 @@ keywords: - owner:tim last_update: date: 2025-09-18 +description: This solution is still functional, but can be manual and time consuming to set up with minimal error handling. We encourage you to check out th --- -This solution is still functional, but can be manual and time consuming to set up with minimal error handling. We encourage you to check out the [Data Warehouse Ingestion](../../data-warehouse-ingestion/introduction.md) solution instead. +This solution is still functional, but can be manual and time consuming to set up with minimal error handling. We encourage you to check out the [Data Warehouse Ingestion](../../data warehouse-ingestion/introduction.md) solution instead. ## Overview @@ -35,20 +36,20 @@ Please make sure your data conforms to the following schemas. Events -``` +```text | Column | Description | Rules | | -------------- | ----------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | | timestamp | UNIX timestamp of the event | UTC timestamp | | event_name | The name of the event | String under 128 characters, using `_` for spaces | | event_value | A string representing the value of a current event. Can represent a 'dimension' or a 'value' | Read as string format; numeric values will be converted into value | | event_metadata | A dictionary in the form of a JSON string, containing named metadata for the event | String format. Not null. Length < 128 characters | -| user | A JSON object representing the user this event was logged for; see below | Escaped JSON string including the keys 'custom' and 'customIDs'. A userID or customID must be provided. | +| user | A JSON object representing the user this event was logged for; refer to the following example | Escaped JSON string including the keys 'custom' and 'customIDs'. A userID or customID must be provided. | | timeuuid | A unique UUID or timeUUID used for deduping. If omitted, will be generated but will not be effective for deduping | UUID format | ``` Please refer to docs for the [Statsig User Object](/concepts/user#user-attributes) for available fields. An example would look like: -``` +```json { userID: "12345", customIDs: { @@ -85,7 +86,7 @@ Make sure to include all of metric_value, numerator, and denominator, writing `c | metric_name | The name of the metric | String format. Not null. Length < 128 characters | | metric_value | A numeric value for the metric | Double format. Metric value, or both of numerator/denominator need to be provided for Statsig to process the metric. See details below | | numerator | Numerator for metric calculation | Double format. Required for ratio metrics. If present along with a denominator in any record, the metric will be treated as ratio and only calculated for users with non-null denominators | -| denominator | Denominator for metric calculation | Double format. See above | +| denominator | Denominator for metric calculation | Double format. refer to the relevant section | ### Scheduling @@ -109,4 +110,4 @@ These are common errors we've run into - please go through and make sure your se - The `id_type` is set correctly - Default types are `user_id` or `stable_id`. If you have custom ids, make sure that the capitalization and spelling matches as these are case sensitive (you can find your custom ID types by going to your Project Settings in the Statsig console). - Your ids match the format of ids logged from SDKs - - In some cases, your data warehouse may transform IDs. This may mean we can't join your experiment or feature gate data to your metrics to calculate pulse or other reports. You can go to the Metrics page of your project and view the log stream to check the format of the ids being sent (either `User ID`, or a custom ID in `User Properties`) to confirm they match + - In some cases, your data warehouse may transform IDs. This may mean we can't join your experiment or feature flag data to your metrics to calculate pulse or other reports. You can go to the Metrics page of your project and view the log stream to check the format of the ids being sent (either `User ID`, or a custom ID in `User Properties`) to confirm they match diff --git a/integrations/data-imports/bigquery.mdx b/integrations/data-imports/bigquery.mdx index 3764d66f6..141c4ca80 100644 --- a/integrations/data-imports/bigquery.mdx +++ b/integrations/data-imports/bigquery.mdx @@ -5,10 +5,11 @@ keywords: - owner:tim last_update: date: 2025-09-18 +description: This solution is still functional, but can be manual and time consuming to set up with minimal error handling. We encourage you to check out th --- -This solution is still functional, but can be manual and time consuming to set up with minimal error handling. We encourage you to check out the [Data Warehouse Ingestion](../../data-warehouse-ingestion/introduction.md) solution instead. +This solution is still functional, but can be manual and time consuming to set up with minimal error handling. We encourage you to check out the [Data Warehouse Ingestion](../../data warehouse-ingestion/introduction.md) solution instead. ## Overview @@ -33,7 +34,7 @@ Here are the steps to take to enable BigQuery integration with Statsig: 2. Create a table for pre-computed metrics, and another for signalling when data has landed with the statement below: -``` +```go -- Replace statsig with your dataset name, if not using statsig CREATE TABLE IF NOT EXISTS statsig.statsig_user_metrics( unit_id STRING NOT NULL, @@ -88,12 +89,12 @@ Your data should conform to these definitions and rules to avoid errors or delay | timeuuid | A unique timeuuid for the event | This should be a timeuuid, but using a unique id will suffice. If not provided, the table defaults to generating a UUID. | | metric_name | The name of the metric | Not null. Length < 128 characters | | metric_value | A numeric value for the metric | Metric value, or both of numerator/denominator need to be provided for Statsig to process the metric. See details below | -| numerator | Numerator for metric calculation | See above, and details below | -| denominator | Denominator for metric calculation | See above, and details below | +| numerator | Numerator for metric calculation | refer to the relevant section, and details below | +| denominator | Denominator for metric calculation | refer to the relevant section, and details below | Metric ingestion is for user-day metric pairs. This is useful for measuring experimental results on complex business logic (e.g. LTV estimates) that you generate in your data warehouse. -##### Note on metric values +### Note on metric values If you provide **both** a numerator and denominator value for any record of a metric, we'll assume that this metric is a ratio metric; we'll filter out users who do not have a denominator value from analysis, and recalculate the metric value ourselves via the numerator and denominator fields. @@ -120,7 +121,7 @@ These are common errors we've run into - please go through and make sure your se - The `id_type` is set correctly - Default types are `user_id` or `stable_id`. If you have custom ids, make sure that the capitalization and spelling matches as these are case sensitive (you can find your custom ID types by going to your Project Settings in the Statsig console). - Your ids match the format of ids logged from SDKs - - In some cases, your data warehouse may transform IDs. This may mean we can't join your experiment or feature gate data to your metrics to calculate pulse or other reports. You can go to the Metrics page of your project and view the log stream to check the format of the ids being sent (either `User ID`, or a custom ID in `User Properties`) to confirm they match + - In some cases, your data warehouse may transform IDs. This may mean we can't join your experiment or feature flag data to your metrics to calculate pulse or other reports. You can go to the Metrics page of your project and view the log stream to check the format of the ids being sent (either `User ID`, or a custom ID in `User Properties`) to confirm they match If your data is not showing up in the Statsig console diff --git a/integrations/data-imports/overview.mdx b/integrations/data-imports/overview.mdx index 76c0a1fa8..6b2e4e683 100644 --- a/integrations/data-imports/overview.mdx +++ b/integrations/data-imports/overview.mdx @@ -1,5 +1,6 @@ --- title: Imports Overview (Deprecated) +description: This solution is still functional, but can be manual and time consuming to set up with minimal error handling. We encourage you to check out th --- This solution is still functional, but can be manual and time consuming to set up with minimal error handling. We encourage you to check out the [Data Warehouse Ingestion](../../data-warehouse-ingestion/introduction.md) solution instead. diff --git a/integrations/data-imports/redshift.mdx b/integrations/data-imports/redshift.mdx index d05730b35..65c375e24 100644 --- a/integrations/data-imports/redshift.mdx +++ b/integrations/data-imports/redshift.mdx @@ -5,10 +5,11 @@ keywords: - owner:tim last_update: date: 2025-09-18 +description: This solution is still functional, but can be manual and time consuming to set up with minimal error handling. We encourage you to check out th --- -This solution is still functional, but can be manual and time consuming to set up with minimal error handling. We encourage you to check out the [Data Warehouse Ingestion](../../data-warehouse-ingestion/introduction.md) solution instead. +This solution is still functional, but can be manual and time consuming to set up with minimal error handling. We encourage you to check out the [Data Warehouse Ingestion](../../data warehouse-ingestion/introduction.md) solution instead. ## Overview @@ -52,20 +53,20 @@ Please make sure your data conforms to the following schemas. Events -``` +```text | Column | Description | Rules | | -------------- | ----------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | | timestamp | UNIX timestamp of the event | UTC timestamp | | event_name | The name of the event | String under 128 characters, using `_` for spaces | | event_value | A string representing the value of a current event. Can represent a 'dimension' or a 'value' | Read as string format; numeric values will be converted into value | | event_metadata | A dictionary in the form of a JSON string, containing named metadata for the event | String format | -| user | A JSON object representing the user this event was logged for; see below | Escaped JSON string including the keys 'custom' and 'customIDs'. A userID or customID must be provided. | +| user | A JSON object representing the user this event was logged for; refer to the following example | Escaped JSON string including the keys 'custom' and 'customIDs'. A userID or customID must be provided. | | timeuuid | A unique UUID or timeUUID used for deduping. If omitted, will be generated but will not be effective for deduping | UUID format | ``` Please refer to docs for the [Statsig User Object](/concepts/user#user-attributes) for available fields. An example would look like: -``` +```json { userID: "12345", customIDs: { @@ -102,7 +103,7 @@ Make sure to include all of metric_value, numerator, and denominator, writing `c | metric_name | The name of the metric | String format. Not null. Length < 128 characters | | metric_value | A numeric value for the metric | Double format. Metric value, or both of numerator/denominator need to be provided for Statsig to process the metric. See details below | | numerator | Numerator for metric calculation | Double format. Required for ratio metrics. If present along with a denominator in any record, the metric will be treated as ratio and only calculated for users with non-null denominators | -| denominator | Denominator for metric calculation | Double format. See above | +| denominator | Denominator for metric calculation | Double format. refer to the relevant section | ### Set up and Provide Credentials @@ -112,7 +113,7 @@ Make sure to include all of metric_value, numerator, and denominator, writing `c - Attach an appropriate policy which gives Read and List access to the appropriate bucket. Make sure this is scoped appropriately so the user only has access to the data intended! Example policy: -``` +```json { "Version": "2012-10-17", "Statement": [ @@ -132,7 +133,7 @@ Make sure to include all of metric_value, numerator, and denominator, writing `c Next, modify your bucket access policy (under `permissions` on your S3 bucket's page) to allows this user to access objects. Example policy: -``` +```json { "Version": "2012-10-17", "Statement": [ @@ -160,7 +161,7 @@ Example policy: You can confirm your credentials are sufficient by adding any data to your metrics folder and running the following code in PySpark with the IAM user credentials: -``` +```text sc._jsc.hadoopConfiguration().set("fs.s3n.awsAccessKeyId", '') sc._jsc.hadoopConfiguration().set("fs.s3n.awsSecretAccessKey", '') spark.read.parquet("s3:///metrics/*",inferSchema=True).show() diff --git a/integrations/data-imports/snowflake.mdx b/integrations/data-imports/snowflake.mdx index a7fdf1411..68db9c1a5 100644 --- a/integrations/data-imports/snowflake.mdx +++ b/integrations/data-imports/snowflake.mdx @@ -1,9 +1,10 @@ --- title: Snowflake (Deprecated) +description: This solution is still functional, but can be manual and time consuming to set up with minimal error handling. We encourage you to check out th --- -This solution is still functional, but can be manual and time consuming to set up with minimal error handling. We encourage you to check out the [Data Warehouse Ingestion](../../data-warehouse-ingestion/introduction.md) solution instead. +This solution is still functional, but can be manual and time consuming to set up with minimal error handling. We encourage you to check out the [Data Warehouse Ingestion](../../data warehouse-ingestion/introduction.md) solution instead. ## Overview @@ -102,7 +103,7 @@ BEGIN; GRANT SELECT ON statsig.statsig.statsig_user_metrics_signal TO ROLE identifier($role_name); COMMIT; -``` +```python Make sure all the statements ran successfully. This will create the schema and user that Statsig's ingestion expects. @@ -139,7 +140,7 @@ Your data should conform to these definitions and rules to avoid errors or delay | -------------- | -------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | | time | The unix time your event was logged at | Not null | | timeuuid | A unique timeuuid for the event | This should be a timeuuid, but using a unique id will suffice. If not provided, the table defaults to generating a UUID. | -| user | A user json object. | See below | +| user | A user json object. | refer to the following example | | event_name | The name of the event | Not null. Length < 128 characters | | event_value | The value of the event | Length < 128 characters | | event_metadata | Metadata about the event | Not null. Length < 16384 characters. Json-formatted - leave empty if none | @@ -184,8 +185,8 @@ unit identifier will limit the utility of your events, as we won't be able to us | timeuuid | A unique timeuuid for the event | This should be a timeuuid, but using a unique id will suffice. If not provided, the table defaults to generating a UUID. | | metric_name | The name of the metric | Not null. Length < 128 characters | | metric_value | A numeric value for the metric | Metric value, or both of numerator/denominator need to be provided for Statsig to process the metric. See details below | -| numerator | Numerator for metric calculation | See above, and details below | -| denominator | Denominator for metric calculation | See above, and details below | +| numerator | Numerator for metric calculation | refer to the relevant section, and details below | +| denominator | Denominator for metric calculation | refer to the relevant section, and details below | Metric ingestion is for user-day metric pairs. This is useful for measuring experimental results on complex business logic (e.g. LTV estimates) that you generate in your data warehouse. @@ -221,7 +222,7 @@ These are common errors we've run into - please go through and make sure your se - The `id_type` is set correctly - Default types are `user_id` or `stable_id`. If you have custom ids, make sure that the capitalization and spelling matches as these are case sensitive (you can find your custom ID types by going to your Project Settings in the Statsig console). - Your ids match the format of ids logged from SDKs - - In some cases, your data warehouse may transform IDs. This may mean we can't join your experiment or feature gate data to your metrics to calculate pulse or other reports. You can go to the Metrics page of your project and view the log stream to check the format of the ids being sent (either `User ID`, or a custom ID in `User Properties`) to confirm they match + - In some cases, your data warehouse may transform IDs. This may mean we can't join your experiment or feature flag data to your metrics to calculate pulse or other reports. You can go to the Metrics page of your project and view the log stream to check the format of the ids being sent (either `User ID`, or a custom ID in `User Properties`) to confirm they match If your data is not showing up in the Statsig console diff --git a/integrations/datadog.mdx b/integrations/datadog.mdx index 8dfa20bd4..c2abfbdd7 100644 --- a/integrations/datadog.mdx +++ b/integrations/datadog.mdx @@ -1,5 +1,6 @@ --- title: Datadog +description: There are four key use-cases to the Datadog integration: 1. [Config Changes](#config-changes) - Streaming changes made in Statsig into Datadog, so you --- ### Overview diff --git a/integrations/event_filtering.mdx b/integrations/event_filtering.mdx index 8dd022ad2..6c4d7a23e 100644 --- a/integrations/event_filtering.mdx +++ b/integrations/event_filtering.mdx @@ -1,5 +1,6 @@ --- title: Event Filtering +description: Once you've enabled an integration, you can select specific events that you want to send and/or receive by clicking on the **Event Filtering** button. --- Once you've enabled an integration, you can select specific events that you want to send and/or receive by clicking on the **Event Filtering** button. diff --git a/integrations/event_webhook.mdx b/integrations/event_webhook.mdx index 4886a8176..7e8340681 100644 --- a/integrations/event_webhook.mdx +++ b/integrations/event_webhook.mdx @@ -4,6 +4,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: import EventFormats from "/snippets/integration_event_formats.mdx"; The Statsig Event Webhook allows you to log event data to Statsig from third party --- import EventFormats from "/snippets/integration_event_formats.mdx"; @@ -12,17 +13,17 @@ import EventFormats from "/snippets/integration_event_formats.mdx"; The Statsig Event Webhook allows you to log event data to Statsig from third party apps or other external sources to provide additional insights to your Statsig experiments and metrics. -Before using the Webhook, you will need to obtain your Projects' server secret key. An example call to the Statsig Event Webhook should look like the following: +Before using the Webhook, you will need to obtain your Projects' api key key. An example call to the Statsig Event Webhook should look like the following: ```bash title="HTTP" POST https://api.statsig.com/v1/webhooks/event_webhook -``` +```text ```bash title="Headers" Content-Type: application/json Accept: */* -STATSIG-API-KEY: {STATSIG_SERVER_SECRET} -``` +STATSIG-API KEY: {STATSIG_SERVER_SECRET} +```text ```bash title="JSON Body" { @@ -39,7 +40,7 @@ STATSIG-API-KEY: {STATSIG_SERVER_SECRET} }, timestamp: {TIMESTAMP} } -``` +```ruby
@@ -100,7 +101,7 @@ Below are a few examples of some of the config change payloads. To best capture "action": "created" } } -``` +```text #### Experiment Change @@ -119,11 +120,11 @@ Below are a few examples of some of the config change payloads. To best capture "action": "updated" } } -``` +```javascript ### Filtering Events -Once you've enabled outbound events to your webhook, you can select which categories of Statsig events you want to export by clicking on the **Event Filtering** button and checking the appropriate boxes as shown below. +Once you've enabled outbound events to your webhook, you can select which categories of Statsig events you want to export by clicking on the **Event Filtering** button and checking the appropriate boxes as shown in the following example. There are 2 main types of events: *Exposures* (e.g. events logged via the SDK) and *Config Changes* (changelogs for Statsig Console) @@ -149,7 +150,7 @@ Follow the following steps to verify the signature: ``` v0:1671672194836:{"data":[{"user":{"name":"Joe Zeng","email":"joe@statsig.com"},"timestamp":1671672134833,"eventName":"statsig::config_change","metadata":{"type":"Gate","name":"test","description":"- Updated Rule test rollout from 100.00% to 10.00%","environment":"production"}}]} -``` +```text 4. Hash the signature basestring, using the signing secret as a key, and take the hex digest of the hash. Create the full signature by prefixing the hex digest with the version number ("v0") and an equals sign. See sample pseudo code below. diff --git a/integrations/fastly.mdx b/integrations/fastly.mdx index 949e46282..13e379e8d 100644 --- a/integrations/fastly.mdx +++ b/integrations/fastly.mdx @@ -1,5 +1,6 @@ --- title: Fastly +description: Statsig offers a suite of integration tools that make usage with Fastly easy: * Statsig automatically pushes project changes to Fastly KV/Config Store --- Statsig offers a suite of integration tools that make usage with Fastly easy: @@ -31,7 +32,7 @@ Install the Statsig serverless SDK: ```bash npm install @statsig/serverless-client -``` +```text #### Import the statsig SDK @@ -39,13 +40,13 @@ Import the Statsig Helper: ```javascript import { handleWithStatsig} from "@statsig/serverless-client/fastly"; -``` +```text #### Use the SDK ```javascript handleWithStatsig(handler, params) -``` +```go The helper method takes two arguments: - `handler` This is your Fastly Compute code. - `params : StatsigFastlyHandlerParams` @@ -90,7 +91,7 @@ const handleRequest = handleWithStatsig(myHandler,{ addEventListener('fetch', (event) => event.respondWith(handleRequest(event))); -``` +```python **That's it!** The helper automatically: @@ -118,7 +119,7 @@ Install the Statsig serverless SDK: ```bash npm install @statsig/serverless-client -``` +```text #### Import the statsig SDK @@ -126,13 +127,13 @@ Import the Fastly client: ```javascript import { StatsigFastlyClient} from "@statsig/serverless-client/fastly"; -``` +```text #### Creating a `StatsigFastlyClient` instance ```javascript const client = new StatsigFastlyClient(""); -``` +```python The client instantiation takes two arguments: - `sdkKey : string` This is your Statsig client API key. It is available from the [Project Settings](https://console.statsig.com/api_keys) page in the Statsig Console. This is used to authenticate your requests. - `options : StatsigOptions` See here, for more [options](https://docs.statsig.com/client/javascript-sdk#statsig-options). @@ -151,7 +152,7 @@ const initResult = await client.initializeFromFastly(, , ); -``` +```go The client initialization takes four arguments: - `fastlyStoreType : string` This is the Fastly store type you are using represented by `kv` or `config` @@ -166,7 +167,7 @@ For best practice: store `apiToken` in a Fastly [Secret Store](https://www.fastl #### Checking a Gate ```javascript const value = client.checkGate("pass_gate", user); -``` +```text This is a gate check in code. @@ -182,7 +183,7 @@ Refer to the [Javascript on device evaluation sdk documentation](/client/jsOnDev #### Logging an event ```javascript client.logEvent('fastly_gate_check', user, value.toString()); -``` +```text This is an event log in code. The `logEvent` method takes two parameters: @@ -199,7 +200,7 @@ For more information on event logging, see [here](https://docs.statsig.com/clien ```javascript event.waitUntil(client.flush()); -``` +```python This flushes all events from the sdk to Statsig. **Without this, you wont be able to get diagnostic information in the Statsig Console, nor any event data you logged**. @@ -233,7 +234,7 @@ async function handleRequest(event) { } -``` +```ruby ## Other Considerations ### Polling for updates v5.13.0+ diff --git a/integrations/github_code_references.mdx b/integrations/github_code_references.mdx index 661e5f915..22e608698 100644 --- a/integrations/github_code_references.mdx +++ b/integrations/github_code_references.mdx @@ -1,5 +1,6 @@ --- title: Github Code References +description: The Statsig Github Integration allows you to find [Feature Gate](/feature-flags/overview) and [Dynamic Config](/dynamic-config) references within your --- ## Overview diff --git a/integrations/gitlab_code_references.mdx b/integrations/gitlab_code_references.mdx index e37781cc7..d7545edab 100644 --- a/integrations/gitlab_code_references.mdx +++ b/integrations/gitlab_code_references.mdx @@ -1,5 +1,6 @@ --- title: GitLab Code References +description: The Statsig GitLab Integration allows you to find [Feature Gate](/feature-flags/overview) and [Dynamic Config](/dynamic-config) references within your --- ## Overview diff --git a/integrations/gtm.mdx b/integrations/gtm.mdx index 11e7d3e75..e31ea2f2a 100644 --- a/integrations/gtm.mdx +++ b/integrations/gtm.mdx @@ -1,5 +1,6 @@ --- title: Google Tag Manager (GTM) +description: This integration will allow customers using Statsig on the web to leverage their existing Google Tag manager configuration to track events to Statsig. --- ## Inbound Integration (Events flow from GTM dataLayer to Statsig) @@ -27,7 +28,7 @@ statsig.on('values_updated', function(evt) { // bind before init is called } }); await statsig.initializeAsync(); -``` +```text #### Using statsig-js ```js await statsig.initialize('', '', { @@ -37,7 +38,7 @@ await statsig.initialize('', '', { })); } }); -``` +```sql ### Step 2: Create new tag @@ -139,7 +140,7 @@ window.StatsigLogger = (function () { })(); -``` +```text ## Outbound Integration (GTM Data is enriched with Statsig test assignments) diff --git a/integrations/introduction.mdx b/integrations/introduction.mdx index 33792f281..9bd0dd036 100644 --- a/integrations/introduction.mdx +++ b/integrations/introduction.mdx @@ -1,6 +1,7 @@ --- title: Integrations Overview sidebarTitle: Overview +description: _For Warehouse Integrations, go to this [page](/data-warehouse-ingestion/introduction)._ The following data connectors are available for use now, and --- _For Warehouse Integrations, go to this [page](/data-warehouse-ingestion/introduction)._ diff --git a/integrations/jira.mdx b/integrations/jira.mdx index a96d099e9..e291a1370 100644 --- a/integrations/jira.mdx +++ b/integrations/jira.mdx @@ -1,5 +1,6 @@ --- title: Jira +description: The Statsig for Jira app allows you to bring insights from your Statsig [Feature Gates](/feature-flags/overview) into your Jira project. Statsig featu --- ## Overview diff --git a/integrations/mcp.mdx b/integrations/mcp.mdx index a0797006f..f0b44347f 100644 --- a/integrations/mcp.mdx +++ b/integrations/mcp.mdx @@ -24,7 +24,7 @@ Add the following MCP server configuration JSON to your respective tool's config } } } -``` +```go Replace `console-YOUR-API-KEY` with your actual Statsig Console API key, which you can retrieve [here](https://console.statsig.com/api_keys). Ensure your API key has the right permissions — read-only keys can view data, while write keys can make changes to your project! @@ -60,7 +60,7 @@ Need other functions? We're happy to consider additions by request, reach out in ```bash codex mcp add statsig -- npx --yes mcp-remote https://api.statsig.com/v1/mcp \ --header "statsig-api-key: console-YOUR-CONSOLE-API-KEY" -``` +```sql This will update your `~/.codex/config.toml`. @@ -82,7 +82,7 @@ On Claude Code, we recommend using the http transport directly, run this command ```bash claude mcp add --transport http statsig-local https://api.statsig.com/v1/mcp \ --header "statsig-api-key: console-YOUR-CONSOLE-API-KEY" -``` +```text ## Use Cases diff --git a/integrations/openai.mdx b/integrations/openai.mdx index 7ed95aca4..21f18bcf5 100644 --- a/integrations/openai.mdx +++ b/integrations/openai.mdx @@ -1,5 +1,6 @@ --- title: OpenAI +description: When using a pre-trained large language model, several inputs influence user experience, including the prompts used, the "inference parameters" given --- ## Context @@ -19,7 +20,7 @@ Of course, you'll need to install both the Statsig and OpenAI Python packages be ```bash pip3 install openai, statsig -``` +```text After that, we can begin coding in a python file: @@ -31,7 +32,7 @@ import time openai.api_key = "your_openai_key" # Replace with your own key statsig.initialize("your_statsig_secret") # Replace with your Statsig secret user = StatsigUser("user-id") #This is a placeholder ID - in a normal experiment Statsig recommends using a user's actual unique ID for consistency in targeting. See https://docs.statsig.com/concepts/user -``` +```javascript ### The ask_question Function @@ -42,7 +43,7 @@ The following code all occurs in one function titled ask_question (see the [fina ```python #ask the user for a question to query GPT with question = input("\nWhat is your question? ") -``` +```text First, we prompt the user with the question they'd like to ask ChatGPT @@ -60,7 +61,7 @@ completion = openai.ChatCompletion.create( {"role": "user", "content": question} ] ) -``` +```text Next, we request a completion that queries the GPT model specified by the Statsig experiment (either gpt-3.5-turbo or gpt-4). We also start a timer so we can track the response time in our events later on. @@ -74,7 +75,7 @@ statsig.log_event(StatsigEvent(user, "chat_completion", value=c.finish_reason, m #print the message back to the user print(f"\nAnswer: {c.message['content']}") -``` +```python With the response from OpenAI we have our first set of useful information to log to Statsig - like the response time and tokens used. We log this information with Statsig's SDK, storing them in the metadata of the request. @@ -88,7 +89,7 @@ if satisfaction == 'y': statsig.log_event(StatsigEvent(user, "satisfaction")) elif satisfaction == 'n': statsig.log_event(StatsigEvent(user, "dissatisfaction")) -``` +```text Next, we log a more explicit indicator of feedback, the user's self-reported satisfaction or dissatisfaction. The satisfaction metric can provide a strong indicator of the model's overall power. @@ -98,9 +99,9 @@ And we're done - we can run this Python program with the following code, now out if __name__ == "__main__": while input("Would you like to ask a question? (y/n): ").lower() == 'y': ask_question() -``` +```sql -### Tips for Using Statsig with AI +## Tips for Using Statsig with AI 1. Experimentation: You can also test other model parameters like temperature, top_p, or initial prompts. 2. Log Useful Data: Consider logging other interesting user interactions or feedback. 3. Analyze and Iterate: After collecting enough data, analyze the results on the Statsig dashboard. diff --git a/integrations/pulumi.mdx b/integrations/pulumi.mdx index 546a6beef..69a1beaac 100644 --- a/integrations/pulumi.mdx +++ b/integrations/pulumi.mdx @@ -1,5 +1,6 @@ --- title: Pulumi +description: The [Statsig Pulumi Provider](https://www.pulumi.com/registry/packages/statsig/) allows you to configure your gates and experiments using Pulumi Infra --- The [Statsig Pulumi Provider](https://www.pulumi.com/registry/packages/statsig/) allows you to configure your gates and experiments using Pulumi Infrastructure as Code. The provider synchronizes with Statsig via the Console API. If there is something you need to perform that isn't supported by the Pulumi Provider, checkout the [Console API](/console-api/introduction). @@ -24,9 +25,9 @@ runtime: nodejs # or python, go, dotnet config: statsig:consoleApiKey: value: 'YOUR_CONSOLE_API_KEY' -``` +```python -### Configuration Reference +## Configuration Reference - `consoleApiKey` (String) - The Statsig Console API key retrieved from Statsig console. @@ -40,7 +41,7 @@ import * as statsig from "@statsig/pulumi-statsig"; // Create a Feature Gate const gate = new statsig.Gate("my-gate", {}); -``` +```text ### Python @@ -50,9 +51,9 @@ import pulumi_statsig as statsig # Create a Feature Gate gate = statsig.Gate("my-gate") -``` +```text -### Go +## Go ```go package main @@ -72,7 +73,7 @@ func main() { return nil }) } -``` +```text ### C# diff --git a/integrations/statsiglite.mdx b/integrations/statsiglite.mdx index e7f7c50a9..bbacc882a 100644 --- a/integrations/statsiglite.mdx +++ b/integrations/statsiglite.mdx @@ -1,5 +1,6 @@ --- title: Statsig Lite +description: Statsig Lite is a free experiment calculator, powered by our stats engine. It lets you visualize experiment results for data from experiments you've a --- ## What it is Statsig Lite is a free experiment calculator, powered by our stats engine. It lets you visualize experiment results for data from experiments you've already run. You bring anonymized experiment exposure and metrics/events in CSVs, and get to preview it in the Statsig Console without connecting your production applications or data warehouses. diff --git a/integrations/terraform/introduction.mdx b/integrations/terraform/introduction.mdx index f03516729..cc283b5c7 100644 --- a/integrations/terraform/introduction.mdx +++ b/integrations/terraform/introduction.mdx @@ -1,5 +1,6 @@ --- title: Statsig Terraform Provider +description: The Statsig Terraform Provider allows you to configure your gates and experiments with Terraform. The provider synchronizes with Statsig via the Conso --- The Statsig Terraform Provider allows you to configure your gates and experiments with Terraform. The provider synchronizes with Statsig via the Console API. If there is something you need to perform that isn't supported by the Terraform Provider, checkout the [Console API](/console-api/introduction). diff --git a/integrations/terraform/terraform_experiment.mdx b/integrations/terraform/terraform_experiment.mdx index 04f0e3f27..df381700e 100644 --- a/integrations/terraform/terraform_experiment.mdx +++ b/integrations/terraform/terraform_experiment.mdx @@ -5,8 +5,8 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: You can create a .tf file (Terraform File) to configure your Statsig experiments. All features of [console/v1/experiments](/console-api/experiments) a --- - You can create a .tf file (Terraform File) to configure your Statsig experiments. All features of [console/v1/experiments](/console-api/experiments) are supported. The layout is very similar to the JSON body of a /experiments request. Requiring the Statsig provider. (You will need to change the version). @@ -20,10 +20,13 @@ terraform { } } } -``` +```text ## Basic Example +This page explains basic example. + + Creating a basic experiment resource ```go @@ -52,7 +55,7 @@ You can update the `status` field to four possible values, **setup**, **active** If you would like to see code examples of how the **Setup -> Run -> Ship** flow works for an experiment, check out our [Terraform Acceptance Tests](#) for experiments. -#### Status: setup +### Status: setup When an experiment has this status, you are stating that your experiment is not ready, and no values will be served via a Statsig SDK or the HttpAPI. @@ -77,4 +80,4 @@ Experiments with this status will not serve any values and will not collect any -A full experiment example is included in the open source Github repo https://github.com/statsig-io/terraform-provider-statsig/blob/main/examples/resources/statsig_experiment/resource.tf. +A full experiment example is included in the open source Github repo https://github.com/statsig-io/terraform-provider-statsig/blob/main/examples/resources/statsig_experiment/resource.tf. \ No newline at end of file diff --git a/integrations/terraform/terraform_gate.mdx b/integrations/terraform/terraform_gate.mdx index 3228b6149..bd03869d2 100644 --- a/integrations/terraform/terraform_gate.mdx +++ b/integrations/terraform/terraform_gate.mdx @@ -5,8 +5,8 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: You can create a .tf file (Terraform File) to configure your Statsig feature gates. All features of [console/v1/gates](/console-api/gates) are support --- - You can create a .tf file (Terraform File) to configure your Statsig feature gates. All features of [console/v1/gates](/console-api/gates) are supported. The layout is very similar to the JSON body of a /gates request. Requiring the Statsig provider. (You will need to change the version). @@ -20,10 +20,13 @@ terraform { } } } -``` +```text ## Basic Example +This page explains basic example. + + Creating a basic gate resource ```go @@ -40,7 +43,7 @@ resource "statsig_gate" "my_gate" { } } } -``` +```python ## Conditions @@ -62,4 +65,4 @@ conditions { See the full list of conditions [here](/console-api/rules#all-conditions). -A full gate example is included in the open source Github repo https://github.com/statsig-io/terraform-provider-statsig/blob/main/statsig/test_resources/gate_full.tf +A full gate example is included in the open source Github repo https://github.com/statsig-io/terraform-provider-statsig/blob/main/statsig/test_resources/gate_full.tf \ No newline at end of file diff --git a/integrations/triggers/datadog.mdx b/integrations/triggers/datadog.mdx index ffe6e9967..298e2cae9 100644 --- a/integrations/triggers/datadog.mdx +++ b/integrations/triggers/datadog.mdx @@ -1,5 +1,6 @@ --- title: Datadog +description: Triggers can be used with Datadog to toggle a gate on or off depending on the performance of a metric. 1. On Statsig console, navigate to the [integra --- ### Overview diff --git a/integrations/triggers/introduction.mdx b/integrations/triggers/introduction.mdx index 5488af866..348316436 100644 --- a/integrations/triggers/introduction.mdx +++ b/integrations/triggers/introduction.mdx @@ -1,6 +1,7 @@ --- title: Datadog Triggers Overview sidebarTitle: Overview +description: Triggers are a way to make changes to your Statsig project from a 3rd party source. You can create a trigger with a specific action like "Disable a fe --- ### Overview Triggers are a way to make changes to your Statsig project from a 3rd party source. diff --git a/integrations/vercel.mdx b/integrations/vercel.mdx index 898a10b75..0471a9e43 100644 --- a/integrations/vercel.mdx +++ b/integrations/vercel.mdx @@ -1,7 +1,7 @@ --- title: Vercel +description: Statsig offers a suite of integration tools that makes usage with Vercel simple: - Statsig automatically pushes project changes to Vercel's Edge Confi --- - Statsig offers a suite of integration tools that makes usage with Vercel simple: - Statsig automatically pushes project changes to Vercel's Edge Config, providing low latency SDK startup. @@ -43,20 +43,20 @@ Statsig offers a suite of integration tools that makes usage with Vercel simple: #### Install the Statsig SDK ```bash npm install @statsig/vercel-edge - ``` + ```javascript #### import the Vercel helper ```bash import {handleWithStatsig} from '@statsig/vercel-edge - ``` + ```text #### Use the SDK ```Javascript export default handleWithStatsig(handler, params) - ``` + ```javascript The helper method takes two arguments: - `handler` This is your Vercel function code - `params` @@ -74,6 +74,9 @@ Statsig offers a suite of integration tools that makes usage with Vercel simple: ### Example Usage +This page explains example usage. + + ```javascript api/index.js import { handleWithStatsig } from "@statsig/vercel-edge"; @@ -91,7 +94,7 @@ export default handleWithStatsig(myHandler,{ configKey : process.env.EDGE_CONFIG_KEY, statsigSdkKey: process.env.STATSIG_KEY }) -``` +```javascript The `handler` parameter is **your Vercel function code** .This is the same code you would normally export directly in your API route (for example, `myHandler` in the snippet above). Instead of exporting it, you pass it into `handleWithStatsig`, which takes care of the Statsig setup and cleanup for you. @@ -128,19 +131,19 @@ Instead of exporting it, you pass it into `handleWithStatsig`, which takes care ```bash npm install @statsig/vercel-edge - ``` + ```text ```bash import {StatsigVercelClient} from '@statsig/vercel-edge' - ``` + ```text ```javascript const client = new StatsigVercelClient(process.env.STATSIG_KEY) - ``` + ```python The client instantiation takes two arguments: - `sdkKey : string` This is your Statsig client API key. It is available from the [Project Settings](https://console.statsig.com/api_keys) page in the Statsig Console. This is used to authenticate your requests. - `options : StatsigOptions` See here, for more [options](https://docs.statsig.com/client/javascript-sdk#statsig-options). @@ -154,7 +157,7 @@ Instead of exporting it, you pass it into `handleWithStatsig`, which takes care ```javascript const init = await client.initializeFromEdgeConfig(); - ``` + ```text The client initialization takes one argument: - `ConfigKey : string` The Key associated with your Statsig specs in your Edge Config @@ -163,7 +166,7 @@ Instead of exporting it, you pass it into `handleWithStatsig`, which takes care ```javascript const GateResult = client.checkGate('pass_gate', user); - ``` + ```text This is a gate check in code. The `checkGate` method takes two arguments: @@ -177,7 +180,7 @@ Instead of exporting it, you pass it into `handleWithStatsig`, which takes care ```javascript client.logEvent('gate_check', { userID: randomUserId }); - ``` + ```text This is an event log in code. The `logEvent` method takes two parameters: @@ -190,7 +193,7 @@ Instead of exporting it, you pass it into `handleWithStatsig`, which takes care ```javascript waitUntil(statsig.flush()); - ``` + ```python This flushes all events from the SDK to Statsig. **Without this, you will not be able to get diagnostic information in the Statsig Console, nor any event data you logged**. @@ -224,7 +227,7 @@ export default async function handler(request) { JSON.stringify({ passed, user }), ); } -``` +```python ## Other Considerations ### Polling for updates v5.13.0+ @@ -274,4 +277,4 @@ You can connect your Vercel logs to Statsig with a Log Drain to start exploring 1. From the [Vercel dashboard](https://vercel.com/), go to **Settings -> Drains** and click **Add Drain -> Integration**. 2. Select **Statsig**, follow the configuration steps provided, and choose a project to connect with the service. -3. Navigate to [Statsig's Logs Explorer](https://console.statsig.com/logs) to see your logs flow through. +3. Navigate to [Statsig's Logs Explorer](https://console.statsig.com/logs) to see your logs flow through. \ No newline at end of file diff --git a/integrations/workersai.mdx b/integrations/workersai.mdx index 971c4e381..f9eb8a2f5 100644 --- a/integrations/workersai.mdx +++ b/integrations/workersai.mdx @@ -1,5 +1,6 @@ --- title: Cloudflare Workers AI +description: By integrating Statsig with Cloudflare Workers AI, you can easily conduct experiments on different prompts and models, and gather real-time analytics --- ## Statsig Cloudflare Workers AI Integration diff --git a/messages/healthhub.mdx b/messages/healthhub.mdx index e5bd1eda2..309bb2504 100644 --- a/messages/healthhub.mdx +++ b/messages/healthhub.mdx @@ -5,6 +5,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: Initialization is the step in SDK setup where your SDK downloads the values for your set of Flags(Gates), Experiments, Dynamic Configs, etc., and prep --- ### Detecting Initializations diff --git a/messages/serverSDKConnection.mdx b/messages/serverSDKConnection.mdx index 84cbc6a98..67500e187 100644 --- a/messages/serverSDKConnection.mdx +++ b/messages/serverSDKConnection.mdx @@ -5,6 +5,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: In Server SDKs, you may see an error message indicating that initialization failed. It is best practice to implement either: 1. [bootstrapValues and --- ### Error Initializing diff --git a/metrics/archiving-metrics.mdx b/metrics/archiving-metrics.mdx index 052144eb2..59b9ced19 100644 --- a/metrics/archiving-metrics.mdx +++ b/metrics/archiving-metrics.mdx @@ -102,7 +102,7 @@ If you’d like to turn off auto-archiving entirely for your project, you may do # Deleting Metrics -### Deleting a Metric +## Deleting a Metric To delete a metric, go the Metrics Detail View page of a metric you wish to delete, select the "..." in the upper right-hand corner, and select **Delete**. Delete metric diff --git a/metrics/custom-dau.mdx b/metrics/custom-dau.mdx index 0ebc6a555..747a49793 100644 --- a/metrics/custom-dau.mdx +++ b/metrics/custom-dau.mdx @@ -8,7 +8,7 @@ description: "Step-by-step guide to create custom Daily Active User (DAU) metric This guide will walk you through the steps to create a custom DAU metric. Follow the instructions carefully to ensure successful creation of your metric. -### **Step 1: Navigate to the Metrics Catalog** +## **Step 1: Navigate to the Metrics Catalog** To begin, go to the "Metrics Catalog" in the left navigation bar and click on "Create" button. diff --git a/metrics/ingest.mdx b/metrics/ingest.mdx index f1f24eed4..6ddffaeb6 100644 --- a/metrics/ingest.mdx +++ b/metrics/ingest.mdx @@ -7,7 +7,7 @@ Statsig can ingest your precomputed product and business metrics using our data Statsig does not automatically process these metrics until you mark them as ready, as it's possible you might land data out of order. Once you are finished loading data for a period, you mark the data as ready by hitting the `mark_data_ready` API: -``` +```bash curl --location --request POST ‘https://api.statsig.com/v1/mark_data_ready’ \ --header ‘statsig-api-key: {your statsig server secret}’ \ --header ‘Content-Type: application/json’ \ diff --git a/metrics/metric-dimensions.mdx b/metrics/metric-dimensions.mdx index 0173861b2..e97c5bf3b 100644 --- a/metrics/metric-dimensions.mdx +++ b/metrics/metric-dimensions.mdx @@ -13,7 +13,7 @@ Statsig enables you to define up to four custom dimensions for an event (one via Event property configuration -Providing custom dimensions with logged events allows you to break down the impact on the total **add-to-cart** events by category in Pulse as shown below. This enables you to zero in on the category that's most impacted by your experiment. +Providing custom dimensions with logged events allows you to break down the impact on the total **add-to-cart** events by category in Pulse as shown in the following example. This enables you to zero in on the category that's most impacted by your experiment. Pulse results breakdown by product category diff --git a/product-analytics/alerts-overview.mdx b/product-analytics/alerts-overview.mdx index b37c10cec..486888bcb 100644 --- a/product-analytics/alerts-overview.mdx +++ b/product-analytics/alerts-overview.mdx @@ -1,6 +1,7 @@ --- title: Alerts Overview sidebarTitle: Alerts Overview +description: Statsig offers two types of alerts on the platform today: 1. **[Topline Metric Alerts](/product-analytics/alerts/topline_alerts)** - Monitor a metric’ --- # Alerts diff --git a/product-analytics/drilldown.mdx b/product-analytics/drilldown.mdx index 0d755c402..715223109 100644 --- a/product-analytics/drilldown.mdx +++ b/product-analytics/drilldown.mdx @@ -1,16 +1,16 @@ --- title: 'Metric Drilldown Charts' sidebarTitle: 'Metric Drilldown' -description: 'A versatile tool for understanding customer behavior and trends within your product' +description: 'A versatile tool for understanding user behavior and trends within your product' --- -The Metric Drilldown chart in Metrics Explorer is a versatile tool for understanding customer behavior and trends within your product. Designed for clarity and depth, it allows you to analyze key metrics and user behavior over time. Importantly, it also allows you to delve several layers deeper into your metrics by filtering to interesting properties or cohorts, as well as the ability to group-by these same properties to compare behaviors between groups. +The Metric Drilldown chart in Metrics Explorer is a versatile tool for understanding user behavior and trends within your product. Designed for clarity and depth, it allows you to analyze key metrics and user behavior over time. Importantly, it also allows you to delve several layers deeper into your metrics by filtering to interesting properties or cohorts, as well as the ability to group-by these same properties to compare behaviors between groups. ## Use Cases - **Trend Analysis Over Time**: Gain insights into how specific metrics evolve over time. Visualizing product data in Metrics Explorer allows you to track and compare key performance indicators and user behavior, and helps understand long-term trends and short-term fluctuations in how users engage with your product and your product's performance. - **Identify interesting cohorts**: Define and explore interesting cohorts by zooming in on users who performed certain events at frequencies you define. -- **Understand how Targeted Feature Launches, A/B tests, and Experiments affect usage:** Split any metric out by Experiment Group or Feature Gate Group to compare how those metrics perform for different groups. Leverage automatically generated annotations on charts for important decisions such as Feature or Experiment launches to help correlate those decisions with changing trends. +- **Understand how Targeted Feature Launches, Experiments, and Experiments affect usage:** Split any metric out by Experiment Group or Feature Flag Group to compare how those metrics perform for different groups. Leverage automatically generated annotations on charts for important decisions such as Feature or Experiment launches to help correlate those decisions with changing trends. - **Segmentation and Comparison**: Dissect metrics to understand how different user segments or product features perform. This is crucial for identifying which areas are providing value for your users and those which may need more attention or improvement. It is also useful in understanding how different segments interact with your product, and for identifying unique trends or needs within these groups. - **Filtering**: Focus on specific segments or cohorts that are of particular interest. This filtering capability allows for a more targeted analysis, helping you to understand the behaviors and needs of specific user groups. - **Statistical Understanding:** Understand how the average, median, or other percentile value (e.g. p99, p95) of a metric changes over time. @@ -18,9 +18,9 @@ The Metric Drilldown chart in Metrics Explorer is a versatile tool for understan - **Flexible Visualization Options**: Choose from a range of visualization formats, like line charts, bar charts, horizontal bar charts, and stacked bar charts, to best represent your data. The right visualization can make complex data more understandable and actionable. - **Event Samples for Debugging**: Quickly access and analyze a metric's underlying sample events, and the granular user-level information attached to the event. This feature is particularly useful for troubleshooting and understanding the root causes of trends or anomalies in your data. - **Detailed Data Control**: Adjust the granularity of your data analysis, from high-level overviews to detailed breakdowns. Use features like rolling averages to smooth data for more accurate trend analysis and decision-making. -- **Debug Experiments**: Breakdown your experiment's first exposures to understand how certain properties or groups (feature gates, experiments, holdouts) affect your experiment. -- **View Sample Ratio Mismatch (SRM)**: See the SRM of your experiments over time and drill down into event and user metadata fields to understand how certain properties (country, browser, etc.) or groups (feature gates, experiments, holdouts) can affect your experiment SRM. -- **Debug Feature Gates**: Breakdown your feature gate's first exposures per rule to understand how certain properties or groups (feature gates, experiments, holdouts) affect your feature gate. +- **Debug Experiments**: Breakdown your experiment's first exposures to understand how certain properties or groups (feature flags, experiments, holdouts) affect your experiment. +- **View Sample Ratio Mismatch (SRM)**: See the SRM of your experiments over time and drill down into event and user metadata fields to understand how certain properties (country, browser, etc.) or groups (feature flags, experiments, holdouts) can affect your experiment SRM. +- **Debug Feature Flags**: Breakdown your feature flag's first exposures per rule to understand how certain properties or groups (feature flags, experiments, holdouts) affect your feature flag. # Using the Metric Drilldown Chart @@ -72,7 +72,7 @@ When selecting an event, the total number of times the event occurred (Count) on ### Exposures Selecting an experiment or gate exposure plots its first exposures over your selected date-range. First exposures are the first time a unique id (set on the experiment or gate) was exposed to each of your experiment groups or each of your gate's rules. -### Understanding First Exposures in Feature Gates +### Understanding First Exposures in Feature Flags When a gate is checked for a user, an exposure is created for the rule who's conditions they've met. If the user is exposed to multiple different rules at different times, the first exposure from each rule is kept. We recommend grouping by rule to see each rule's exposures separately. @@ -117,7 +117,7 @@ In addition to plotting metrics, you may want to drill into your metrics to iden Leveraging a Group-By makes it easy to disaggregate plotted metrics and events by a selected property or group. Doing so allows you to compare how an action or user behavior may correlate with a specific property. Adding a Group-By will split the the plotted metric(s) into several plots. By default we only show the top ten groups by value on the chart, but you can select more groups. You can select 50 groups when the chart is set to daily granularity. -A metric can be grouped-by event properties, user profile properties, experiment group, or feature gate group. +A metric can be grouped-by event properties, user profile properties, experiment group, or feature flag group. Group-By limits can be added by first adding a group-by, then moving to the summary table below the charts, and clicking the "Top X series" dropdown button. From there you can select how many groups you want to see at once. You can use this to further drill down on your top X categories (up to 50). This feature is available for line charts, stacked-line charts, bar charts, and stacked-bar charts. @@ -127,9 +127,9 @@ Group-By limits can be added by first adding a group-by, then moving to the summ When you have a Group-By applied, you can view the results as raw numbers, or as a percentage. -**Feature Gate and Experiment Groups** +**Feature Flag and Experiment Groups** -At Statsig we believe in the power of experimentation. To that end, you can also select one of your Feature Gate or Experiments in order to split out a metric by the different groups in the selected test. +At Statsig we believe in the power of experimentation. To that end, you can also select one of your Feature Flag or Experiments in order to split out a metric by the different groups in the selected test. **Adding a Group-By** diff --git a/release-pipeline/actions.mdx b/release-pipeline/actions.mdx index 1f3b71c08..a0f3b0847 100644 --- a/release-pipeline/actions.mdx +++ b/release-pipeline/actions.mdx @@ -11,7 +11,7 @@ Once a Release Pipeline is triggered, you can control its progression using the Release pipeline management actions -### Approve +## Approve **What it does:** Kick off a phase that requires a manual approval before rollout begins. This is useful when human verification is required before changes move forward. diff --git a/sdks/api-keys.mdx b/sdks/api-keys.mdx index c8542bbb4..d6a90e156 100644 --- a/sdks/api-keys.mdx +++ b/sdks/api-keys.mdx @@ -1,5 +1,6 @@ --- title: API Keys +description: There are three main types of API keys: 1. **Client API Key**: Intended for getting configuration and logging events on the client side. --- ## Overview diff --git a/sdks/array-operators.mdx b/sdks/array-operators.mdx index f0b6f3ad9..491981312 100644 --- a/sdks/array-operators.mdx +++ b/sdks/array-operators.mdx @@ -1,5 +1,6 @@ --- title: Array Operators +description: Array operators allow for checking if an array custom field does or does not contain specific values Array operators will work with all versions of al --- Array operators allow for checking if an array custom field does or does not contain specific values diff --git a/sdks/client-vs-server.mdx b/sdks/client-vs-server.mdx index fb4954075..b039ef206 100644 --- a/sdks/client-vs-server.mdx +++ b/sdks/client-vs-server.mdx @@ -2,6 +2,7 @@ title: Client vs Server SDKs slug: /sdks/client-vs-server +description: Statsig offers client and server SDKs to enable experimentation and feature management across different parts of your application. This document outli --- Statsig offers client and server SDKs to enable experimentation and feature management across different parts of your application. This document outlines when you should choose each. diff --git a/sdks/debugging.mdx b/sdks/debugging.mdx index c2727945d..2f8991342 100644 --- a/sdks/debugging.mdx +++ b/sdks/debugging.mdx @@ -187,14 +187,14 @@ const bootstrapValues = Statsig.getClientInitializeResponse(userA); const bootstrapValues = await fetchStatsigValuesFromMyServers(); const userB = { userID: 'user-b' }; // <-- Different from userA await Statsig.initialize('client-key', userB, { initializeValues: bootstrapValues }); -``` +```text Even subtle differences count as a mismatch—adding `customIDs` or other attributes results in a distinct user object. ```js const userA = { userID: 'user-a' }; const userAExt = { userID: 'user-a', customIDs: { employeeID: 'employee-a' } }; -``` +```text ### BootstrapStableIDMismatch @@ -209,7 +209,7 @@ const bootstrapValues = Statsig.getClientInitializeResponse(userA); const bootstrapValues = await fetchStatsigValuesFromMyServers(); const userB = { stableID: '12345' }; // <-- Server user lacked a stableID await Statsig.initialize('client-key', userB, { initializeValues: bootstrapValues }); -``` +```text Even if both sides start with `{}`, the client-generated stable ID may not match the server’s, leading to the same warning. diff --git a/sdks/getting-started.mdx b/sdks/getting-started.mdx index b85c20eda..deb2b9da2 100644 --- a/sdks/getting-started.mdx +++ b/sdks/getting-started.mdx @@ -1,5 +1,6 @@ --- title: "SDK Overview" +description: import ListOfSDKs from '/snippets/sdks/list-of-sdks.mdx' Statsig's SDKs are the in-code tool you'll use to show experiment variants, flag your feature --- import ListOfSDKs from '/snippets/sdks/list-of-sdks.mdx' diff --git a/sdks/how-evaluation-works.mdx b/sdks/how-evaluation-works.mdx index 55ff77870..143282aeb 100644 --- a/sdks/how-evaluation-works.mdx +++ b/sdks/how-evaluation-works.mdx @@ -1,22 +1,23 @@ --- title: "How Evaluation Works" +description: The essential function of the Statsig SDKs is reliable, consistent, incredibly performant allocation of users to the correct bucket in your experiment --- ## Evaluation's importance -The essential function of the Statsig SDKs is reliable, consistent, incredibly performant allocation of users to the correct bucket in your experiment or feature gate. Understanding how we accomplish this can help you answer questions like: +The essential function of the Statsig SDKs is reliable, consistent, incredibly performant allocation of users to the correct bucket in your experiment or feature flag. Understanding how we accomplish this can help you answer questions like: - Why do I have to pass every user attribute, every time? - Why do I have to wait for initialization to complete? - When do you decide each users' bucket? ## How Evaluation Works -Evaluation in Statsig is deterministic. Given the same user object and the same state of the experiment or feature gate, Statsig always returns the same result, even when evaluated on different platforms (client or server). Here's how it works: +Evaluation in Statsig is deterministic. Given the same user object and the same state of the experiment or feature flag, Statsig always returns the same result, even when evaluated on different platforms (client or server). Here's how it works: -1. **Salt Creation**: Each experiment or feature gate rule generates a unique salt. +1. **Salt Creation**: Each experiment or feature flag rule generates a unique salt. 2. **Hashing**: The user identifier (e.g., userId, organizationId) is passed through a SHA256 hashing function, combined with the salt, which produces a large integer. 3. **Bucket Assignment**: The large integer is then subjected to a modulus operation with 10000 (or 1000 for layers), assigning the user to a bucket. 4. **Bucket Determination**: The result defines the specific bucket out of 10000 (or 1000 for layers) where the user is placed. -This process ensures a randomized but deterministic bucketing of users across different experiments or feature gates. The unique salt per-experiment or feature gate rule ensures that the same user can be assigned to different buckets in different experiments. This also means that if you rollout a feature gate rule to 50% - then back to 0% - then back to 50%, the same 50% of users will be re-exposed, **so long as you reuse the same rule** - and not create a new one. See [here](/faq/#when-i-change-the-rollout-percentage-of-a-rule-on-a-feature-gate-will-users-who-passed-continue-to-pass). +This process ensures a randomized but deterministic bucketing of users across different experiments or feature flags. The unique salt per-experiment or feature flag rule ensures that the same user can be assigned to different buckets in different experiments. This also means that if you rollout a feature flag rule to 50% - then back to 0% - then back to 50%, the same 50% of users will be re-exposed, **so long as you reuse the same rule** - and not create a new one. See [here](/faq/#when-i-change-the-rollout-percentage-of-a-rule-on-a-feature flag-will-users-who-passed-continue-to-pass). For more details, check our open-source SDKs [here](https://github.com/statsig-io/node-js-server-sdk/blob/main/src/Evaluator.ts). @@ -31,7 +32,7 @@ All of the above logic holds true for both SDKs. In both, the user's assignment * **Performant Evaluation:** no evaluations require a network request, and we focus on evaluation performance, meaning that checks take \<1ms after evaluation. * **The SDKs don't "remember" user attributes, or previous evaluations:** we rely on you to pass all of the necessary user attributes consistently - and we promise if you do, we'll provide the same value. -A common assumption is that Statsig tracks of a list of all ids and what group they were assigned to for experiments/gates. While our data pipelines track users exposed to each variant to compute experiment results, we do not cache previous evaluations and maintain distributed evaluation state across client and server SDKs. That won't scale - we've even talked to customers doing this in the past, and were paying more for Redis to maintain that state than they ended up paying for Statsig. +A common assumption is that Statsig tracks of a list of all ids and what group they were assigned to for experiments/gates. While our data pipelines track users exposed to each variant to compute experiment results, we do not cache previous evaluations and maintain distributed evaluation state across client and server SDKs. That won't scale - we've even talked to users doing this in the past, and were paying more for Redis to maintain that state than they ended up paying for Statsig. * **Server SDKs can handle multiple users:** because they hold the ruleset in memory, Server SDKs can evaluate any user. Without a network request. This means you'll have to pass a user object into the getExperiment method on Server SDKs, whereas on client SDKs you pass it into initialize(). * **We ensure each user receives the same bucket:** our ID-based hashing assignment guarantees consistency. If you make a change in console that could affect user bucketing on an experiment, we'll provide warning. diff --git a/sdks/identify-users.mdx b/sdks/identify-users.mdx index fd0ece770..ffcdc4356 100644 --- a/sdks/identify-users.mdx +++ b/sdks/identify-users.mdx @@ -1,5 +1,6 @@ --- title: "Identify Users" +description: When you run an experiment, rollout a feature, or log events, Statsig needs to know who the user is to determine: --- ## Why identify users? @@ -22,7 +23,7 @@ Start by defining a basic user object: "userID": "u_123", // required for most setups "email": "user@example.com" // optional } -``` +```text @@ -53,7 +54,7 @@ export function App() { ); } -``` +```text @@ -71,7 +72,7 @@ return
Update User
; -``` +```sql diff --git a/sdks/quickstart.mdx b/sdks/quickstart.mdx index 0d7e6b568..c71979831 100644 --- a/sdks/quickstart.mdx +++ b/sdks/quickstart.mdx @@ -17,7 +17,7 @@ If you're looking for a more detailed guide, check out the [SDK Overview](/sdks/ ```bash npm install @statsig/react-bindings - ``` + ```javascript
@@ -36,7 +36,7 @@ If you're looking for a more detailed guide, check out the [SDK Overview](/sdks/ // [!code ++] ); } - ``` + ```python This example assumes you're using client-side React, if you're Server-Side Rendering, you'd be better served by our [Next.js docs](). @@ -55,7 +55,7 @@ If you're looking for a more detailed guide, check out the [SDK Overview](/sdks/ return (
Gate is {client.checkGate('check_user') ? 'passing' : 'failing'}.
); - ``` + ```text First, create an Experiment on the [Experiments page](https://console.statsig.com/experiments) in console @@ -66,14 +66,14 @@ If you're looking for a more detailed guide, check out the [SDK Overview](/sdks/ return (
Headline Parameter: {experiment.get('my_experiment_parameter_name', 'fallback_value')}.
); - ``` + ```text
You can use Events to power metrics in your experiment or gates. Events don't need to be set up in console first, just add to your code: ```jsx const { client } = useStatsigClient(); return - ``` + ```text @@ -91,7 +91,7 @@ If you're looking for a more detailed guide, check out the [SDK Overview](/sdks/ ```html - ``` + ```python
@@ -104,7 +104,7 @@ If you're looking for a more detailed guide, check out the [SDK Overview](/sdks/ First, create a gate on the [Feature Gates page](https://console.statsig.com/gates) in console, then check it in-code: ```jsx window.Statsig.instance().checkGate("my_feature_gate_name"); - ``` + ```text You'll want to wait for the SDK to initialize before checking a gate to ensure it has fresh values, one way to accomplish this is waiting for the ["values_updated"](/client/javascript-sdk#client-event-emitter) event. @@ -114,7 +114,7 @@ If you're looking for a more detailed guide, check out the [SDK Overview](/sdks/ First, create an Experiment on the [Experiments page](https://console.statsig.com/experiments) in console ```jsx window.Statsig.instance().getExperiment("my_experiment_name").get('my_experiment_parameter_name'); - ``` + ```text You'll want to wait for the SDK to initialize before getting an experiment to ensure it has fresh values, one way to accomplish this is waiting for the ["values_updated"](/client/javascript-sdk#client-event-emitter) event. @@ -124,7 +124,7 @@ If you're looking for a more detailed guide, check out the [SDK Overview](/sdks/ You can use Events to power metrics in your experiment or gates. Events don't need to be set up in console first, just add to your code: ```jsx window.Statsig.instance().logEvent("my_checkout_event_name", "event_value_item_1234", {"event_metadata": "my_metadata"}) - ``` + ```text @@ -146,7 +146,7 @@ If you're looking for a more detailed guide, check out the [SDK Overview](/sdks/ ```shell pip install statsig-python-core - ``` + ```text @@ -161,7 +161,7 @@ If you're looking for a more detailed guide, check out the [SDK Overview](/sdks/ statsig.initialize().wait() statsig.shutdown().wait() - ``` + ```python @@ -174,7 +174,7 @@ If you're looking for a more detailed guide, check out the [SDK Overview](/sdks/ ```python user_object = StatsigUser(user_id="123", email="testuser@statsig.com") //add any number of other attributes gate_value = statsig.check_gate(user_object, "my_feature_gate_name"): - ``` + ```text First, create an Experiment on the [Experiments page](https://console.statsig.com/experiments) in console @@ -182,7 +182,7 @@ If you're looking for a more detailed guide, check out the [SDK Overview](/sdks/ user_object = StatsigUser(user_id="123", email="testuser@statsig.com" my_experiment_object = statsig.get_experiment(user_object, "my_experiment_name") my_experiment_parameter_value = my_experiment_object.get_string('my_experiment_parameter_name') - ``` + ```text You can use Events to power metrics in your experiment or gates. Events don't need to be set up in console first, just add to your code: @@ -194,7 +194,7 @@ If you're looking for a more detailed guide, check out the [SDK Overview](/sdks/ event_name="my_checkout_event_name", value="SKU_12345" ) - ``` + ```text @@ -211,7 +211,7 @@ If you're looking for a more detailed guide, check out the [SDK Overview](/sdks/ ```bash npm i @statsig/statsig-node-core - ``` + ```text @@ -225,7 +225,7 @@ If you're looking for a more detailed guide, check out the [SDK Overview](/sdks/ const statsigWithOptions = new Statsig("secret-key", options); await statsigWithOptions.initialize(); - ``` + ```python @@ -238,7 +238,7 @@ If you're looking for a more detailed guide, check out the [SDK Overview](/sdks/ ```js const userObject = new StatsigUser({ userID: "123", email="testuser@statsig.com" }); const is_gate_enabled = statsig.checkGate(userObject, "my_feature_gate_name"): - ``` + ```text First, create an Experiment on the [Experiments page](https://console.statsig.com/experiments) in console @@ -246,7 +246,7 @@ If you're looking for a more detailed guide, check out the [SDK Overview](/sdks/ const userObject = new StatsigUser({ userID: "123", email="testuser@statsig.com" }); const myExperimentObject = statsig.getExperiment(userObject, "my_experiment_name") const myExperimentParameterValue = myExperimentObject.getValue('my_experiment_parameter_name') - ``` + ```text You can use Events to power metrics in your experiment or gates. Events don't need to be set up in console first, just add to your code: diff --git a/sdks/support.mdx b/sdks/support.mdx index 44ac82a11..4d48178c3 100644 --- a/sdks/support.mdx +++ b/sdks/support.mdx @@ -1,5 +1,6 @@ --- title: SDK Support Policy +description: To ensure the security, performance, and reliability of our SDKs, we only provide official support for SDK versions that are less than **one year old* --- To ensure the security, performance, and reliability of our SDKs, we only provide official support for SDK versions that are less than **one year old** from their release date, unless otherwise specified (some updates may require a different timeline or migration path). diff --git a/sdks/target-apps.mdx b/sdks/target-apps.mdx index dc25e475b..aa071664f 100644 --- a/sdks/target-apps.mdx +++ b/sdks/target-apps.mdx @@ -1,5 +1,6 @@ --- title: Target Apps +description: Target Apps are an Enterprise-only feature. Reach out to our [support team](mailto:support@statsig.com), your sales contact, or via our slack c --- diff --git a/sdks/user.mdx b/sdks/user.mdx index 7e207b8d5..a6dd3e726 100644 --- a/sdks/user.mdx +++ b/sdks/user.mdx @@ -1,5 +1,6 @@ --- title: "User (StatsigUser) Object" +description: The user(StatsigUser) object is the sole input you provide to SDKs to target gates and assign users to experiments. If you'd like to target on an attr --- ## Introduction to the StatsigUser object @@ -15,7 +16,7 @@ The user(StatsigUser) object is the sole input you provide to SDKs to target gat const user = new StatsigUser({ userID: "12345", email: "vincent@statsig.com"}); //Use it in getExperiment() const my_experiment = statsig.getExperiment(user, "my_experiment_name") //<- any attribute you pass, you can target on -``` +```python diff --git a/server-core/cpp-core.mdx b/server-core/cpp-core.mdx index 0b1c14601..085212f7e 100644 --- a/server-core/cpp-core.mdx +++ b/server-core/cpp-core.mdx @@ -4,7 +4,6 @@ sidebarTitle: C++ description: Statsig's Next-gen Python Server SDK built in our [Server Core](/server-core) framework icon: "C++" --- - import Installation from '/snippets/server-core/installation.mdx' import Initialization from '/snippets/server-core/initialization.mdx' import CheckGateIntro from '/snippets/server-core/checkGate.mdx' @@ -36,6 +35,9 @@ import shutdown from '/snippets/server-core/cpp/shutdown.mdx' ## Setup the SDK +This page explains setup the sdk. + + @@ -98,4 +100,4 @@ The following methods return information about which user fields are needed for - `get_experiment_fields_needed(experiment_name: str) -> List[str]` - `get_layer_fields_needed(layer_name: str) -> List[str]` -These methods return a list of strings representing the user fields that are required to properly evaluate the specified gate, config, experiment, or layer. +These methods return a list of strings representing the user fields that are required to properly evaluate the specified gate, config, experiment, or layer. \ No newline at end of file diff --git a/server-core/dotnet-core.mdx b/server-core/dotnet-core.mdx index 1eed9c380..f04aa27e4 100644 --- a/server-core/dotnet-core.mdx +++ b/server-core/dotnet-core.mdx @@ -4,7 +4,6 @@ sidebarTitle: .NET description: Statsig's Next-gen .NET Server SDK built in our [Server Core](/server-core) framework icon: "/images/dotnet-grey.svg" --- - import Installation from '/snippets/server-core/installation.mdx' import Initialization from '/snippets/server-core/initialization.mdx' import Initialization2 from '/snippets/server-core/initialization2.mdx' @@ -49,6 +48,9 @@ import notes from '/snippets/server-core/dotnet/notes.mdx' ## Setup the SDK +This page explains setup the sdk. + + @@ -109,4 +111,4 @@ import notes from '/snippets/server-core/dotnet/notes.mdx' - + \ No newline at end of file diff --git a/server-core/elixir-core.mdx b/server-core/elixir-core.mdx index fb8ef61fc..3192b1739 100644 --- a/server-core/elixir-core.mdx +++ b/server-core/elixir-core.mdx @@ -4,7 +4,6 @@ sidebarTitle: Elixir description: Statsig's Next-gen Elixir Server SDK built in our [Server Core](/server-core) framework icon: "flask" --- - import Installation from '/snippets/server-core/installation.mdx' import Initialization from '/snippets/server-core/initialization.mdx' import Initialization2 from '/snippets/server-core/initialization2.mdx' @@ -50,6 +49,9 @@ import faqs from '/snippets/server-core/elixir/faqs.mdx' ## Setup the SDK +This page explains setup the sdk. + + @@ -135,4 +137,4 @@ user = %Statsig.User{ - + \ No newline at end of file diff --git a/server-core/go-core.mdx b/server-core/go-core.mdx index 1f5b31613..66177b98c 100644 --- a/server-core/go-core.mdx +++ b/server-core/go-core.mdx @@ -4,7 +4,6 @@ sidebarTitle: Go Core (Beta) description: Statsig's next-gen Go Server SDK built on our [Server Core](/server-core) framework icon: "golang" --- - import Installation from '/snippets/server-core/installation.mdx' import Initialization from '/snippets/server-core/initialization.mdx' import Initialization2 from '/snippets/server-core/initialization2.mdx' @@ -45,6 +44,9 @@ import faqs from '/snippets/server-core/go/_faqs.mdx' ## Setup the SDK +This page explains setup the sdk. + + @@ -97,4 +99,4 @@ import faqs from '/snippets/server-core/go/_faqs.mdx' ## FAQ - + \ No newline at end of file diff --git a/server-core/index.mdx b/server-core/index.mdx index 4488b285f..4435a88d3 100644 --- a/server-core/index.mdx +++ b/server-core/index.mdx @@ -42,13 +42,13 @@ Server Core SDKs are available on an opt-in basis, with native SDKs still availa Server core SDKs, starting in v0.4.0+, have a new event logging architecture. This is designed to stream events freely to statsig servers during normal operation, and throttle/drop events SDK side during outages on the event logging endpoint to enable the service to spin up healthy before processing steady-state qps. We expose the following parameters to tune this implementation -``` +```ruby - event_logging_max_queue_size: Controls batch size (default 2000). Note that exceeding the backend request size limit (10MB) will drop requests - event_logging_max_pending_batch_queue_size: Controls max pending batches (default: 20). The tradeoff here is increased memory usage to buffer events when requests fail if you increase it, and losing additional events if you decrease it - disable_all_logging: Completely disables event logging ``` -``` +```kotlin +----------------+ +----------------+ +----------------+ | Event Sources | | Event Queue | | Flush Triggers | |----------------| |----------------| |----------------| diff --git a/server-core/java-core.mdx b/server-core/java-core.mdx index 57f457f25..33366650e 100644 --- a/server-core/java-core.mdx +++ b/server-core/java-core.mdx @@ -4,7 +4,6 @@ sidebarTitle: Java description: Statsig's next-gen Java Server SDK built on our [Server Core](/server-core) framework icon: "java" --- - import Installation from '/snippets/server-core/installation.mdx' import Initialization from '/snippets/server-core/initialization.mdx' import Initialization2 from '/snippets/server-core/initialization2.mdx' @@ -58,6 +57,9 @@ import quickStart from '/snippets/server-core/java/quickStart.mdx' ## Setup the SDK +This page explains setup the sdk. + + @@ -130,4 +132,4 @@ import quickStart from '/snippets/server-core/java/quickStart.mdx' - + \ No newline at end of file diff --git a/server-core/migration-guides/go.mdx b/server-core/migration-guides/go.mdx index 67ce9dd77..541e38756 100644 --- a/server-core/migration-guides/go.mdx +++ b/server-core/migration-guides/go.mdx @@ -20,13 +20,13 @@ This guide will help you migrate from the legacy Go Server SDK to the new Go Cor ```bash CLI go get github.com/statsig-io/go-sdk -``` +```text ```go go.mod require ( github.com/statsig-io/go-sdk v1.26.0 ) -``` +```go @@ -40,17 +40,17 @@ Install the latest version of the SDK: ```bash Latest Version go get github.com/statsig-io/statsig-server-core/statsig-go@latest -``` +```text ```bash Specific Version go get github.com/statsig-io/statsig-server-core/statsig-go@v0.7.2 -``` +```text ```go go.mod require ( github.com/statsig-io/statsig-server-core/statsig-go v0.7.2 ) -``` +```text @@ -61,7 +61,7 @@ Run the following commands to install the necessary binaries and set environment ```bash go install github.com/statsig-io/statsig-server-core/statsig-go/cmd/post-install@latest post-install -``` +```text The system should prompt you to set the environment variables using a `statsig.env` or `statsig.env.ps1` file that was generated during the post-install script. Follow the instructions to set the environment variables. @@ -69,11 +69,11 @@ The system should prompt you to set the environment variables using a `statsig.e ```bash macOS source /Users/Your-User-Name/.statsig_env -``` +```text ```powershell Windows (PowerShell) . "C:\Users\Your-User-Name\.statsig_env.ps1" -``` +```text @@ -110,7 +110,7 @@ statsig.Initialize("server-secret-key") // Or, if you want to initialize with certain options statsig.InitializeWithOptions("server-secret-key", &Options{Environment: Environment{Tier: "staging"}}) -``` +```text ```go Go Core SDK import ( @@ -138,7 +138,7 @@ details, err := s.InitializeWithDetails() if err != nil { // Handle error } -``` +```text @@ -158,7 +158,7 @@ user := statsig.User{ // Check if a gate is enabled enabled := statsig.CheckGate(user, "my_gate") -``` +```text ```go Go Core SDK import ( @@ -174,7 +174,7 @@ user := statsig.NewStatsigUserBuilder(). // Check if a gate is enabled enabled := s.CheckGate(user, "my_gate") -``` +```text @@ -195,7 +195,7 @@ user := statsig.User{ // Get a dynamic config config := statsig.GetConfig(user, "my_config") value := config.Get("key", "default_value") -``` +```text ```go Go Core SDK import ( @@ -211,7 +211,7 @@ user := statsig.NewStatsigUserBuilder(). // Get a dynamic config config := s.GetDynamicConfig(user, "my_config") -``` +```text @@ -231,7 +231,7 @@ user := statsig.User{ // Get an experiment experiment := statsig.GetExperiment(user, "my_experiment") -``` +```text ```go Go Core SDK import ( @@ -247,7 +247,7 @@ user := statsig.NewStatsigUserBuilder(). // Get an experiment experiment := s.GetExperiment(user, "my_experiment") -``` +```text @@ -272,7 +272,7 @@ statsig.LogEvent(Event{ Value: "SKU_12345", Metadata: map[string]string{"price": "9.99","item_name": "diet_coke_48_pack"}, }) -``` +```text ```go Go Core SDK import ( diff --git a/server-core/migration-guides/java.mdx b/server-core/migration-guides/java.mdx index 6c92d9696..e263faeb1 100644 --- a/server-core/migration-guides/java.mdx +++ b/server-core/migration-guides/java.mdx @@ -26,7 +26,7 @@ repositories { dependencies { implementation 'com.statsig:serversdk:X.X.X' } -``` +```text ```xml maven @@ -36,7 +36,7 @@ dependencies { X.X.X -``` +```text @@ -59,7 +59,7 @@ dependencies { // Platform-specific library (required) implementation 'com.statsig:javacore:X.X.X:YOUR-OS-ARCHITECTURE' } -``` +```text ```xml maven @@ -79,7 +79,7 @@ dependencies { YOUR-OS-ARCHITECTURE -``` +```ruby @@ -91,7 +91,7 @@ import com.statsig.*; StatsigOptions options = new StatsigOptions.Builder().build(); Statsig statsig = new Statsig("your-secret-key", options); -``` +```ruby This will output the appropriate dependency for your system. @@ -123,7 +123,7 @@ StatsigOptions options = new StatsigOptions(); // options.initTimeoutMs = 9999; Future initFuture = Statsig.initializeAsync("server-secret-key", options); initFuture.get(); -``` +```text ```kotlin Legacy Kotlin SDK import com.statsig.sdk.Statsig @@ -133,7 +133,7 @@ val options = StatsigOptions().apply { initTimeoutMs = 9999 } async { Statsig.initialize("server-secret-key", options) }.await() -``` +```text ```java Java Core SDK import com.statsig.*; @@ -147,7 +147,7 @@ StatsigOptions options = new StatsigOptions.Builder() Statsig statsig = new Statsig("server-secret-key", options); statsig.initialize().get(); -``` +```text @@ -164,7 +164,7 @@ if (isFeatureOn) { } else { // Gate is off } -``` +```text ```kotlin Legacy Kotlin SDK val user = StatsigUser("user_id") @@ -175,7 +175,7 @@ if (featureOn) { } else { // Gate is off } -``` +```text ```java Java Core SDK StatsigUser user = new StatsigUser.Builder().setUserID("user_id").build(); @@ -186,7 +186,7 @@ if (isFeatureOn) { } else { // Gate is off } -``` +```text @@ -200,7 +200,7 @@ DynamicConfig config = Statsig.getConfigSync(user, "awesome_product_details"); String itemName = config.getString("product_name", "Awesome Product v1"); Double price = config.getDouble("price", 10.0); Boolean shouldDiscount = config.getBoolean("discount", false); -``` +```text ```kotlin Legacy Kotlin SDK val config = Statsig.getConfigSync(user, "awesome_product_details") @@ -208,7 +208,7 @@ val config = Statsig.getConfigSync(user, "awesome_product_details") val itemName = config.getString("product_name", "Awesome Product v1") val price = config.getDouble("price", 10.0) val shouldDiscount = config.getBoolean("discount", false) -``` +```text ```java Java Core SDK DynamicConfig config = statsig.getDynamicConfig(user, "awesome_product_details"); @@ -216,7 +216,7 @@ DynamicConfig config = statsig.getDynamicConfig(user, "awesome_product_details") String itemName = config.getString("product_name", "Awesome Product v1"); Double price = config.getDouble("price", 10.0); Boolean shouldDiscount = config.getBoolean("discount", false); -``` +```text @@ -229,21 +229,21 @@ DynamicConfig experiment = Statsig.getExperimentSync(user, "new_user_promo_title String promoTitle = experiment.getString("title", "Welcome to Statsig!"); Double discount = experiment.getDouble("discount", 0.1); -``` +```text ```kotlin Legacy Kotlin SDK val experiment = Statsig.getExperimentSync(user, "new_user_promo_title") val promoTitle = experiment.getString("title", "Welcome to Statsig!") val discount = experiment.getDouble("discount", 0.1) -``` +```text ```java Java Core SDK Experiment experiment = statsig.getExperiment(user, "new_user_promo_title"); String promoTitle = experiment.getString("title", "Welcome to Statsig!"); Double discount = experiment.getDouble("discount", 0.1); -``` +```text @@ -256,21 +256,21 @@ Layer layer = Statsig.getLayerSync(user, "user_promo_experiments"); String promoTitle = layer.getString("title", "Welcome to Statsig!"); Double discount = layer.getDouble("discount", 0.1); -``` +```text ```kotlin Legacy Kotlin SDK val layer = Statsig.getLayerSync(user, "user_promo_experiments") val promoTitle = layer.getString("title", "Welcome to Statsig!") val discount = layer.getDouble("discount", 0.1) -``` +```text ```java Java Core SDK Layer layer = statsig.getLayer(user, "user_promo_experiments"); String promoTitle = layer.getString("title", "Welcome to Statsig!"); Double discount = layer.getDouble("discount", 0.1); -``` +```text @@ -280,18 +280,18 @@ Double discount = layer.getDouble("discount", 0.1); ```java Legacy Java SDK Statsig.logEvent(user, "purchase", 2.99, Map.of("item_name", "remove_ads")); -``` +```text ```kotlin Legacy Kotlin SDK Statsig.logEvent(user, "purchase", 2.99, mapOf("item_name" to "remove_ads")) -``` +```text ```java Java Core SDK Map metadata = new HashMap<>(); metadata.put("item_name", "remove_ads"); statsig.logEvent(user, "purchase", "2.99", metadata); -``` +```text @@ -308,14 +308,14 @@ options.setApi("https://api.statsig.com/v1"); options.setRulesetsSyncIntervalMs(10 * 1000); options.setIdListsSyncIntervalMs(60 * 1000); options.setDisableAllLogging(false); -``` +```text ```kotlin Legacy Kotlin SDK val options = StatsigOptions().apply { initTimeoutMs(3000) disableAllLogging(false) } -``` +```text ```java Java Core SDK StatsigOptions options = new StatsigOptions.Builder() @@ -327,7 +327,7 @@ StatsigOptions options = new StatsigOptions.Builder() .setIdListsSyncIntervalMs(60 * 1000) .setDisableAllLogging(false) .build(); -``` +```javascript diff --git a/server-core/migration-guides/node.mdx b/server-core/migration-guides/node.mdx index 058f12215..05bd70e96 100644 --- a/server-core/migration-guides/node.mdx +++ b/server-core/migration-guides/node.mdx @@ -3,7 +3,6 @@ title: Node Core SDK Migration Guide sidebarTitle: Node Migration description: Migrate from the legacy Node JS Server SDK to the new Node JS Core SDK --- - import Intro from '/snippets/server-core/migration-guides/_Intro.mdx' import Installation from '/snippets/server-core/migration-guides/_Installation.mdx' import GlobalChanges from '/snippets/server-core/migration-guides/_GlobalChanges.mdx' @@ -21,6 +20,9 @@ import NodeStatsigOptions from '/snippets/server-core/node/migration-guide/stats ## StatsigUser +This page explains statsiguser. + + ## API Changes @@ -43,5 +45,4 @@ The table below shows the mapping between legacy SDK options and Server Core SDK - - + \ No newline at end of file diff --git a/server-core/migration-guides/python.mdx b/server-core/migration-guides/python.mdx index d91abb99f..297143f19 100644 --- a/server-core/migration-guides/python.mdx +++ b/server-core/migration-guides/python.mdx @@ -3,7 +3,6 @@ title: Python Core SDK Migration Guide sidebarTitle: Python Migration description: Migrate from the legacy Python Server SDK to the new Python Core SDK --- - import Intro from '/snippets/server-core/migration-guides/_Intro.mdx' import Installation from '/snippets/server-core/migration-guides/_Installation.mdx' import GlobalChanges from '/snippets/server-core/migration-guides/_GlobalChanges.mdx' @@ -21,6 +20,9 @@ import PyStatsigOptions from '/snippets/server-core/python/migration-guide/stats ## StatsigUser +This page explains statsiguser. + + ## API Changes @@ -49,4 +51,4 @@ The table below shows the mapping between legacy SDK options and Server Core SDK - This allows for incremental migration without updating all call sites at once. - [Python Core Migration Adapter](https://github.com/statsig-io/statsig-server-core-migration-adapters/blob/main/python_core_migration_adapter.py) - + \ No newline at end of file diff --git a/server-core/node-core.mdx b/server-core/node-core.mdx index b89f0f595..de12da7fd 100644 --- a/server-core/node-core.mdx +++ b/server-core/node-core.mdx @@ -4,7 +4,6 @@ sidebarTitle: Node description: Statsig's next-gen Node Server SDK built on our [Server Core](/server-core) framework icon: "node-js" --- - import Installation from '/snippets/server-core/installation.mdx' import Initialization from '/snippets/server-core/initialization.mdx' import Initialization2 from '/snippets/server-core/initialization2.mdx' @@ -61,6 +60,9 @@ import reference from '/snippets/server-core/node/reference.mdx' ## Setup the SDK +This page explains setup the sdk. + + @@ -134,4 +136,4 @@ import reference from '/snippets/server-core/node/reference.mdx' - + \ No newline at end of file diff --git a/server-core/php-core.mdx b/server-core/php-core.mdx index a878583bd..3a6cb46d9 100644 --- a/server-core/php-core.mdx +++ b/server-core/php-core.mdx @@ -4,7 +4,6 @@ sidebarTitle: PHP description: Statsig's Next-gen PHP Server SDK built in our [Server Core](/server-core) framework icon: "php" --- - import Installation from '/snippets/server-core/installation.mdx' import Initialization from '/snippets/server-core/initialization.mdx' import Initialization2 from '/snippets/server-core/initialization2.mdx' @@ -55,6 +54,9 @@ import notes from '/snippets/server-core/php/notes.mdx' ## Setup the SDK +This page explains setup the sdk. + + @@ -148,4 +150,4 @@ $user = new StatsigUser([ - + \ No newline at end of file diff --git a/server-core/python-core.mdx b/server-core/python-core.mdx index e2e656948..2b6b55cb8 100644 --- a/server-core/python-core.mdx +++ b/server-core/python-core.mdx @@ -4,7 +4,6 @@ sidebarTitle: Python description: Statsig's Next-gen Python Server SDK built in our [Server Core](/server-core) framework icon: "python" --- - import Installation from '/snippets/server-core/installation.mdx' import Initialization from '/snippets/server-core/initialization.mdx' import Initialization2 from '/snippets/server-core/initialization2.mdx' @@ -60,6 +59,9 @@ import faqs from '/snippets/server-core/python/faqs.mdx' ## Setup the SDK +This page explains setup the sdk. + + @@ -158,4 +160,4 @@ The following methods return information about which user fields are needed for - `get_experiment_fields_needed(experiment_name: str) -> List[str]` - `get_layer_fields_needed(layer_name: str) -> List[str]` -These methods return a list of strings representing the user fields that are required to properly evaluate the specified gate, config, experiment, or layer. +These methods return a list of strings representing the user fields that are required to properly evaluate the specified gate, config, experiment, or layer. \ No newline at end of file diff --git a/server-core/rust-core.mdx b/server-core/rust-core.mdx index fabc5f2fa..183ab324f 100644 --- a/server-core/rust-core.mdx +++ b/server-core/rust-core.mdx @@ -4,7 +4,6 @@ sidebarTitle: Rust description: Statsig's Next-gen Rust Server SDK built in our [Server Core](/server-core) framework icon: "rust" --- - import Installation from '/snippets/server-core/installation.mdx' import Initialization from '/snippets/server-core/initialization.mdx' import Initialization2 from '/snippets/server-core/initialization2.mdx' @@ -56,6 +55,9 @@ import reference from '/snippets/server-core/rust/reference.mdx' ## Setup the SDK +This page explains setup the sdk. + + @@ -127,4 +129,4 @@ import reference from '/snippets/server-core/rust/reference.mdx' - + \ No newline at end of file diff --git a/server/concepts/all_assignments.mdx b/server/concepts/all_assignments.mdx index 6b811084c..128c7826c 100644 --- a/server/concepts/all_assignments.mdx +++ b/server/concepts/all_assignments.mdx @@ -6,6 +6,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: Our SDKs are designed to be invoked in your code at the point where you're serving the experiment treatment or feature. --- diff --git a/server/concepts/cloudflare.mdx b/server/concepts/cloudflare.mdx index 5b44fb6cb..cd0454a91 100644 --- a/server/concepts/cloudflare.mdx +++ b/server/concepts/cloudflare.mdx @@ -6,6 +6,7 @@ keywords: - owner:hoa last_update: date: 2025-09-18 +description: This guide walks you through setting up the Cloudflare Logpush worker in your Cloudflare account and configuring it to send logs and metrics to Statsi --- ## Overview @@ -28,7 +29,7 @@ This guide walks you through setting up the Cloudflare Logpush worker in your Cl 3. [Locate cloudflare account ID](https://developers.cloudflare.com/fundamentals/account/find-account-and-zone-ids/) 4. Run the following command and get the job id -``` +```bash ACCOUNT_ID= CLOUDFLARE_API_TOKEN= curl "https://api.cloudflare.com/client/v4/accounts/$ACCOUNT_ID/logpush/jobs" \ diff --git a/server/concepts/data_store.mdx b/server/concepts/data_store.mdx index 6ec387f9f..39c4d14e7 100644 --- a/server/concepts/data_store.mdx +++ b/server/concepts/data_store.mdx @@ -1,5 +1,6 @@ --- title: Server Data Stores / Data Adapter +description: One common question when configuring Statsig is how to design your integration around handling potential points of failure. For example - in case of a --- One common question when configuring Statsig is how to design your integration around handling potential points of failure. For example - in case of a Statsig API outage, can my integration continue to function? diff --git a/server/concepts/forward_proxy.mdx b/server/concepts/forward_proxy.mdx index 0f348cc5f..e694563de 100644 --- a/server/concepts/forward_proxy.mdx +++ b/server/concepts/forward_proxy.mdx @@ -6,6 +6,7 @@ keywords: - owner:brock last_update: date: 2025-09-18 +description: The Statsig Forward Proxy is a service that we developed to be hosted and run in your own infrastructure. If SDKs are configured to use the forward pr --- @@ -35,11 +36,11 @@ helm repo update # Install the chart helm install statsig-forward-proxy statsig/statsig-forward-proxy -``` +```python The Helm chart provides extensive configuration options for customizing your deployment. For detailed configuration options, refer to the [Statsig Forward Proxy Helm chart documentation](https://github.com/statsig-io/statsig-forward-proxy/blob/main/chart/README.md). -#### Manual Deployment +## Manual Deployment For environments where Helm is not available or for more customized deployments, Statsig Forward Proxy can be deployed manually. The proxy is available as a pre-built Docker image, and you can also build your own binary from source. @@ -58,7 +59,7 @@ While providing this capability, the forward proxy also provides other benefits You can now configure SDK network using different network protocol to integrate with Statsig Forward Proxy for different endpoints. There are 3 network endpoints SDK make requests to download_config_specs,get_id_lists, and log_events, and you can currently configure the download_config_specs to use the proxy. We are actively developing the latter two. -#### Network protocol for different endpoints +### Network protocol for different endpoints For `download_config_specs` endpoint, where we get specs on evaluating gates/layers/experiments/configs 1. `http`: the default protocol. If SDK is initialized with http, it will poll config updates in background thread. 2. `grpc_websocket`: Establish a grpc streaming from SDK to statsig forward proxy, and listen to updates being pushed from proxy servers. You have to use statsig forward proxy or have a similar proxy server in order to use grpc streaming. Note: Not all SDKs support GRPC yet, if you have an SDK you want support for please reach out to our support channel on slack. @@ -76,7 +77,7 @@ const statsigOptions = { } Statsig.initialize(server_key, statsigOptions) // Statsig will use listen for config updates from statsig forward proxy using grpc_websocket protocol. And use http to get idlists and post log events from statsig servers. -``` +```python For information on your specific SDK language, see the language specific docs in the left hand column. diff --git a/server/concepts/open_telemetry.mdx b/server/concepts/open_telemetry.mdx index b95938388..490b6e3af 100644 --- a/server/concepts/open_telemetry.mdx +++ b/server/concepts/open_telemetry.mdx @@ -1,5 +1,6 @@ --- title: Ingesting Open Telemetry Data +description: Setting up OTEL can be tricky, if you have any questions, feel free to reach out to us on slack! --- @@ -64,7 +65,7 @@ exporters: encoding: json headers: statsig-api-key: ${env:STATSIG_SERVER_SDK_SECRET} -``` +```text ## 3. Example Helm Chart Values for a quick and correct setup @@ -93,7 +94,7 @@ helm install otel-deployment open-telemetry/opentelemetry-collector \ helm install otel-daemonset open-telemetry/opentelemetry-collector \ --version 0.75.1 \ -f values-agent.yaml -n otel -``` +```text ## 4. Verify the Setup @@ -101,7 +102,7 @@ Check that all pods are running: ```bash kubectl get pods -n otel -``` +```text Check logs for a specific collector pod and confirm there are errors reported: diff --git a/server/concepts/persistent_assignment.mdx b/server/concepts/persistent_assignment.mdx index 81ff893e6..fbc0ab9aa 100644 --- a/server/concepts/persistent_assignment.mdx +++ b/server/concepts/persistent_assignment.mdx @@ -1,5 +1,6 @@ --- title: Server Persistent Assignment +description: Persistent assignment allows you to ensure that a user's variant stays consistent while an experiment is running, regardless of changes to allocation --- Persistent assignment allows you to ensure that a user's variant stays consistent while an experiment is running, regardless of changes to allocation or targeting. @@ -38,7 +39,7 @@ val options = GetExperimentOptions( enforceTargeting = true, ) ) -``` +```text ```ts @@ -49,7 +50,7 @@ const options: GetExperimentOptions = { } } -``` +```text @@ -81,7 +82,7 @@ exp = Statsig.get_experiment( # User evaluates using values from persisted stora ) ) puts exp.group_name # 'Control' -``` +```text @@ -93,7 +94,7 @@ statsig = Statsig.initialize(options).wait user = StatsigUser("a-user") exp = statsig.get_experiment(StatsigUser("a-user"), ExperimentEvaluationOptions(user_persisted_values= PersistentStorage.get_user_persisted_value(user, "user_id"))) print(f"{exp.group_name}") # control -``` +```text ```typescript @@ -103,7 +104,7 @@ let statsig = new Statsig(secretKye, options) let user = new StatsigUser("a-user") let exp = statsig.getExperiment(user, ExperimentEvaluationOptions(userPersistentValues= persistedStorage.getUserPersistedValues(user, "user_id"))) -``` +```text @@ -131,7 +132,7 @@ exp = Statsig.getExperimentSync( ), ) println(exp.groupName) // "Control" -``` +```text ```ts @@ -158,7 +159,7 @@ exp = Statsig.getExperimentSync( }, ) console.log(exp.getGroupName()) // "Control" -``` +```text ```go @@ -185,7 +186,7 @@ exp = GetExperimentWithOptions( } ) fmt.Println(exp.GroupName) // "Control" -``` +```text ```csharp diff --git a/server/cpp.mdx b/server/cpp.mdx index 373e661a2..b6ec0895f 100644 --- a/server/cpp.mdx +++ b/server/cpp.mdx @@ -36,7 +36,7 @@ FetchContent_Declare(statsig ) FetchContent_MakeAvailable(statsig) -``` +```text And include the following in your `CMakeLists.txt` file @@ -45,7 +45,7 @@ cmake_minimum_required(VERSION 3.11) include(FetchContent) include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/statsig.cmake) -``` +```text Check out the latest versions on [https://github.com/statsig-io/cpp-server-sdk/releases/latest](https://github.com/statsig-io/cpp-server-sdk/releases/latest) @@ -62,7 +62,7 @@ statsig::initialize('server-secret-key'); statsig::Options options; options.localMode = true statsig::initialize('server-secret-key', options) -``` +```text @@ -82,7 +82,7 @@ if (statsig::checkGate(user, 'use_new_feature')) else { // Gate is off } -``` +```text @@ -92,7 +92,7 @@ statsig::DynamicConfig config = statsig::get_config(user, 'awesome_product_detai auto item_name = config.value['product_name']; auto price = config.value['price']; auto shouldDiscount = config.value['discount']; -``` +```text @@ -116,13 +116,13 @@ discount = price_exp.value["discount"] price = msrp * (1 - discount) -``` +```text ```cpp statsig::logEvent(user, 'add_to_cart') -``` +```python @@ -152,7 +152,7 @@ ID Lists are currently not supported in the C++ server SDK ```cpp statsig::shutdown() -``` +```text @@ -165,11 +165,11 @@ std::unordered_map overrideValue = { {"overridden key", "overridden field"}, }; statsig::overrideConfig("a_config_name", overrideValue) -``` +```text ## FAQ -#### How do I run experiments for logged out users? +### How do I run experiments for logged out users? See the guide on [device level experiments](/guides/first-device-level-experiment) @@ -206,7 +206,7 @@ inline bool operator==(User const &a, User const &b) a.statsigEnvironment == b.statsigEnvironment && a.customIDs == b.customIDs; }; -``` +```text ### Options diff --git a/server/deprecation-notices.mdx b/server/deprecation-notices.mdx index f926161c9..241da903a 100644 --- a/server/deprecation-notices.mdx +++ b/server/deprecation-notices.mdx @@ -1,10 +1,11 @@ --- title: Deprecation Notices +description: Until mid-2024, Statsig's Server SDKs downloaded configuration values directly from Statsig servers upon startup. Since then, we've transitioned to ho --- ## Direct Initialization API Access for Server SDKs, October 31, 2025 -#### What's changing? +### What's changing? Until mid-2024, Statsig's Server SDKs downloaded configuration values directly from Statsig servers upon startup. Since then, we've transitioned to hosting these configuration files on a secure CDN, which greatly reduces initialization time and increases reliability (we use Cloudflare, which has near-zero downtime.) As we've transitioned to this approach, we've made the decision to deprecate direct API access, which is only available in long outdated SDK versions. We're asking all customers to discontinue using these SDK versions, and upgrade to a newer version. #### Change required @@ -31,7 +32,7 @@ If you check some configs on both the Client and Server side, it is possible tha ## Async Evaluation Functions -#### Reason +### Reason Server SDKs were originally designed for maximum backwards compatibility. This meant that if a Server SDK did not contain support for an operator or configuration, it would fallback to hitting Statsig's servers, ensuring a valid result would be returned. diff --git a/server/dotnet.mdx b/server/dotnet.mdx index 2e1a8dc02..88150eda5 100644 --- a/server/dotnet.mdx +++ b/server/dotnet.mdx @@ -4,7 +4,6 @@ sidebarTitle: .NET description: Statsig's Legacy Server SDK for .NET applications icon: "/images/dotnet-grey.svg" --- - import CheckGateIntro from '/snippets/server/checkGate.mdx' import GetDynamicConfigIntro from '/snippets/server/getDynamicConfig.mdx' import GetExperimentIntro from '/snippets/server/getExperiment.mdx' @@ -28,6 +27,9 @@ import shutdown from '/snippets/server/dotnet/shutdown.mdx' ## Setup the SDK +This page explains setup the sdk. + + @@ -58,4 +60,4 @@ import shutdown from '/snippets/server/dotnet/shutdown.mdx' - + \ No newline at end of file diff --git a/server/erlang.mdx b/server/erlang.mdx index 62d07e8c1..6da957aa9 100644 --- a/server/erlang.mdx +++ b/server/erlang.mdx @@ -4,7 +4,6 @@ sidebarTitle: Erlang/Elixir description: Statsig's Legacy Server SDK for Erlang and Elixir applications icon: "erlang" --- - import CheckGateIntro from '/snippets/server/checkGate.mdx' import GetDynamicConfigIntro from '/snippets/server/getDynamicConfig.mdx' import GetExperimentIntro from '/snippets/server/getExperiment.mdx' @@ -34,6 +33,9 @@ The erlang SDK repository, and this docs site, are a work in progress. If you ar ## Setup the SDK +This page explains setup the sdk. + + @@ -64,4 +66,4 @@ The erlang SDK repository, and this docs site, are a work in progress. If you ar - + \ No newline at end of file diff --git a/server/go.mdx b/server/go.mdx index ddfe9b6ea..195c3191c 100644 --- a/server/go.mdx +++ b/server/go.mdx @@ -34,7 +34,7 @@ via the `go get` CLI: ```bash go get github.com/statsig-io/go-sdk -``` +```ruby Or, add a dependency on the most recent version of the SDK in go.mod: @@ -42,7 +42,7 @@ Or, add a dependency on the most recent version of the SDK in go.mod: require ( github.com/statsig-io/go-sdk v1.26.0 ) -``` +```text See the [Releases tab in GitHub](https://github.com/statsig-io/go-sdk/releases) for the latest versions. @@ -59,7 +59,7 @@ statsig.Initialize("server-secret-key") // Or, if you want to initialize with certain options statsig.InitializeWithOptions("server-secret-key", &Options{Environment: Environment{Tier: "staging"}}) -``` +```text @@ -77,7 +77,7 @@ if feature { } else { // Gate is off } -``` +```text @@ -88,7 +88,7 @@ if feature.Value { // Gate is on, enable new feature fmt.Print(feature.EvaluationDetails.Reason) } -``` +```text @@ -105,7 +105,7 @@ bool shouldDiscount = config.GetBool("discount", false); // Or just get the whole json object backing this config if you prefer json := config.Value -``` +```text @@ -131,7 +131,7 @@ price := msrp * (1 - discount); // getting the layer name that an experiment belongs to userPromoLayer := Statsig.GetExperimentLayer("new_user_promo_title"); -``` +```text @@ -143,7 +143,7 @@ statsig.LogEvent(Event{ Metadata: map[string]string{"price": "9.99","item_name": "diet_coke_48_pack"}, }) -``` +```text @@ -164,7 +164,7 @@ type Options struct { BootstrapValues string RulesUpdatedCallback func(rules string, time int64) } -``` +```kotlin - **Environment**: default nil - An object you can use to set environment variables that apply to all of your users in the same session and will be used for targeting purposes. @@ -197,7 +197,7 @@ type GCIROptions struct { IncludeConfigType bool ConfigTypesToInclude []ConfigType } -``` +```python - **IncludeLocalOverrides**: default false - When set to true, local overrides will be included in the client initialize response. @@ -235,7 +235,7 @@ type GCIROptions struct { ```go statsig.Shutdown() -``` +```text @@ -243,7 +243,7 @@ statsig.Shutdown() func OverrideGate(gate string, val bool) func OverrideConfig(config string, val map[string]interface{}) -``` +```text @@ -255,7 +255,7 @@ options.ClientKey = "client-YOUR_CLIENT_KEY" result := statsig.GetClientInitializeResponseWithOptions(user, &options) // You can then pass 'result' into a Statsig Client SDK -``` +```text ## Data Store @@ -287,7 +287,7 @@ type IDataAdapter interface { */ ShouldBeUsedForQueryingUpdates(key string) bool } -``` +```text ### Example Implementation @@ -316,7 +316,7 @@ func (d *dataAdapterExample) Shutdown() {} func (d *dataAdapterExample) ShouldBeUsedForQueryingUpdates(key string) bool { return false } -``` +```text ## User Persistent Storage @@ -336,7 +336,7 @@ type IUserPersistentStorage interface { */ Save(key string, data string) } -``` +```text ### Example Implementation @@ -357,17 +357,17 @@ func (d *userPersistentStorageExample) Save(key string, value string) { d.saveCalled++ d.store[key] = value } -``` +```text ```go sdkInstance := NewClientWithOptions(sdkKey, &Options{}) -``` +```ruby ## FAQ -#### How do I run experiments for logged out users? +### How do I run experiments for logged out users? See the guide on [device level experiments](/guides/first-device-level-experiment) @@ -388,7 +388,7 @@ gateDefault := c.CheckGate(user, "any_gate") c.OverrideGate("any_gate", true) // "any_gate" is now true -``` +```text See also [https://github.com/statsig-io/go-sdk/blob/main/overrides_test.go](https://github.com/statsig-io/go-sdk/blob/main/overrides_test.go) @@ -415,7 +415,7 @@ type User struct { StatsigEnvironment map[string]string `json:"statsigEnvironment,omitempty"` CustomIDs map[string]string `json:"customIDs"` } -``` +```text ### StatsigOptions @@ -496,7 +496,7 @@ type GCIROptions struct { ClientKey string HashAlgorithm string //supports "sha256", "djb2", "none", default "sha256" } -``` +```text ### Event @@ -508,7 +508,7 @@ type Event struct { Metadata map[string]string `json:"metadata"` Time int64 `json:"time"` } -``` +```text ### FeatureGate @@ -521,7 +521,7 @@ type FeatureGate struct { GroupName string `json:"group_name"` EvaluationDetails *EvaluationDetails `json:"evaluation_details"` } -``` +```text ### DynamicConfig diff --git a/server/introduction.mdx b/server/introduction.mdx index bb6ee9965..5fb9dff7e 100644 --- a/server/introduction.mdx +++ b/server/introduction.mdx @@ -1,5 +1,6 @@ --- title: Server SDKs +description: Statsig server SDKs allow you to run experiments for server-side functionality such as APIs, algorithms, configurations, and infrastructure improvemen --- Statsig server SDKs allow you to run experiments for server-side functionality such as APIs, algorithms, configurations, and infrastructure improvements. The server SDKs evaluate feature gates, dynamic configurations, and experiments locally on your application server. diff --git a/server/java.mdx b/server/java.mdx index 592f3d3e1..05e8ff80f 100644 --- a/server/java.mdx +++ b/server/java.mdx @@ -4,7 +4,6 @@ sidebarTitle: Java/Kotlin description: Statsig's Legacy Server SDK for Java and Kotlin applications icon: "java" --- - import CheckGateIntro from '/snippets/server/checkGate.mdx' import GetDynamicConfigIntro from '/snippets/server/getDynamicConfig.mdx' import GetExperimentIntro from '/snippets/server/getExperiment.mdx' @@ -35,6 +34,9 @@ This SDK is written in Kotlin, but exposes methods and overrides to Java based a ## Setup the SDK +This page explains setup the sdk. + + @@ -71,4 +73,4 @@ In certain scenarios, you may need more information about a gate evaluation than - + \ No newline at end of file diff --git a/server/migration-guides/v5ToV6UpgradeGuide.mdx b/server/migration-guides/v5ToV6UpgradeGuide.mdx index dbe9c5084..60eef0f17 100644 --- a/server/migration-guides/v5ToV6UpgradeGuide.mdx +++ b/server/migration-guides/v5ToV6UpgradeGuide.mdx @@ -1,5 +1,6 @@ --- title: Node SDK v5.x.x to v6.0.0 Upgrade Guide +description: Node.js SDK V6.0.0 introduced breaking changes on - [Removed async version of core APIs (checkGate, getConfig..)](#synchronous-core-apis) - [Changed D --- ### Summary @@ -28,7 +29,7 @@ getConfig(): DynamicConfig // New API async getExperiment(): Promise getExperiment(): DynamicConfig // New API -``` +```python **Context** The initial design of async APIs are ensuring every evaluation can be done properly even when the SDK is not compatible by requesting evaluation from Statsig Server. @@ -43,11 +44,11 @@ Meanwhile, we deprecated functions `*Sync()` for example `checkGateSync()`, plea Statsig.checkGate(user, "gate").then(() => { // do somethig }).catch() // Compile error if you are using typescript. If you are using javascript, there will be runtime error please audit and -``` +```ruby If you are using as following ways, still recommend to remove await, but there won't affect usage. ``` javascript const gate = await Statsig.checkGate(user, "gate") -``` +```python #### DataAdapter Cache Key Changes **Context** @@ -102,7 +103,7 @@ WithExposureLoggingDisabled functions are deprecated ```javascript checkGateWithExposureLoggingDisabled() // DO NOT USE getFeatureGateWithExposureLoggingDisabled() // DO NOT USE -``` +```text use core apis with options instead ```javascript diff --git a/server/nodejsServerSDK.mdx b/server/nodejsServerSDK.mdx index 2c5805944..cde815ba0 100644 --- a/server/nodejsServerSDK.mdx +++ b/server/nodejsServerSDK.mdx @@ -4,7 +4,6 @@ sidebarTitle: Node.js description: Statsig's Legacy Server SDK for Node.js applications icon: "node-js" --- - import CheckGateIntro from '/snippets/server/checkGate.mdx' import GetDynamicConfigIntro from '/snippets/server/getDynamicConfig.mdx' import GetExperimentIntro from '/snippets/server/getExperiment.mdx' @@ -49,6 +48,9 @@ import faqs from '/snippets/server/node/faqs.mdx' ## Setup the SDK +This page explains setup the sdk. + + @@ -139,7 +141,7 @@ export type StatsigUser = > | null; statsigEnvironment?: StatsigEnvironment; } -``` +```text ### Type StatsigOptions @@ -186,7 +188,7 @@ export interface LoggerInterface { error(message?: any, ...optionalParams: any[]): void; logLevel: 'none' | 'debug' | 'info' | 'warn' | 'error'; } -``` +```text ### Type FeatureGate @@ -199,7 +201,7 @@ export type FeatureGate = { readonly evaluationDetails: EvaluationDetails | null; readonly groupName: null; // deprecated }; -``` +```text ### Type DynamicConfig @@ -220,7 +222,7 @@ export default class DynamicConfig { getGroupName(): string | null; getIDType(): string | null; getEvaluationDetails(): EvaluationDetails | null; -``` +```text ### Type Layer @@ -240,7 +242,7 @@ export default class Layer { getGroupName(): string | null; getAllocatedExperimentName(): string | null; getEvaluationDetails(): EvaluationDetails | null; -``` +```text ### DataAdapter @@ -252,7 +254,7 @@ export interface IDataAdapter { shutdown(): Promise; supportsPollingUpdatesFor(key: DataAdapterKey): boolean; } -``` +```text ### EvaluationDetails @@ -263,7 +265,7 @@ export class EvaluationDetails { readonly serverTime: number; readonly reason: EvaluationReason; } -``` +```text ### EvaluationReason @@ -276,6 +278,4 @@ export type EvaluationReason = | 'Bootstrap' | 'DataAdapter' | 'Unsupported'; -``` - - +``` \ No newline at end of file diff --git a/server/php.mdx b/server/php.mdx index 1a3056209..8055af52d 100644 --- a/server/php.mdx +++ b/server/php.mdx @@ -4,7 +4,6 @@ sidebarTitle: PHP description: Statsig's Legacy Server SDK for PHP applications icon: "php" --- - import CheckGateIntro from '/snippets/server/checkGate.mdx' import GetDynamicConfigIntro from '/snippets/server/getDynamicConfig.mdx' import GetExperimentIntro from '/snippets/server/getExperiment.mdx' @@ -25,6 +24,9 @@ import cronJobs from '/snippets/server/php/cronJobs.mdx' ## Setup the SDK +This page explains setup the sdk. + + @@ -54,4 +56,4 @@ Learn more about identifying users, group analytics, and best practices for logg - + \ No newline at end of file diff --git a/server/pythonSDK.mdx b/server/pythonSDK.mdx index 3419e0c8d..428a1550e 100644 --- a/server/pythonSDK.mdx +++ b/server/pythonSDK.mdx @@ -4,7 +4,6 @@ sidebarTitle: Python description: Statsig's Legacy Server SDK for Python applications icon: "python" --- - import CheckGateIntro from '/snippets/server/checkGate.mdx' import GetDynamicConfigIntro from '/snippets/server/getDynamicConfig.mdx' import GetExperimentIntro from '/snippets/server/getExperiment.mdx' @@ -40,6 +39,9 @@ import faqs from '/snippets/server/python/faqs.mdx' ## Setup the SDK +This page explains setup the sdk. + + @@ -114,7 +116,7 @@ class StatsigUser: custom: Optional[dict] = None # key: string, value: string private_attributes: Optional[dict] = None # key: string, value: string custom_ids: Optional[dict] = None # key: string, value: string -``` +```text ### StatsigOptions @@ -151,7 +153,7 @@ class StatsigOptions: initialize_sources: Optional[List[DataSource]] = None, config_sync_sources: Optional[List[DataSource]] = None, ): -``` +```text ### FeatureGate @@ -166,7 +168,7 @@ class FeatureGate: def get_evaluation_details(self): """Returns the evaluation detail of this FeatureGate""" -``` +```text ### DynamicConfig @@ -191,7 +193,7 @@ class DynamicConfig: def get_evaluation_details(self): """Returns the evaluation detail of this DynamicConfig""" -``` +```text ### Layer @@ -216,7 +218,7 @@ class Layer: def get_evaluation_details(self): """Returns the evaluation detail of this Layer""" -``` +```text ### EvaluationDetails @@ -236,7 +238,7 @@ class EvaluationReason(str, Enum): data_adapter = "DataAdapter" unsupported = "Unsupported" error = "error" -``` +```text ### DataStore @@ -253,7 +255,7 @@ class IDataStore: def should_be_used_for_querying_updates(self, key: str) -> bool: return False -``` +```text ### ForwardProxy - ProxyConfig ```python @@ -288,6 +290,4 @@ class ProxyConfig: self.retry_backoff_multiplier = retry_backoff_multiplier self.retry_backoff_base_ms = retry_backoff_base_ms self.push_worker_failover_threshold = push_worker_failover_threshold -``` - - +``` \ No newline at end of file diff --git a/server/ruby.mdx b/server/ruby.mdx index 683d29d5e..dbf3c2b33 100644 --- a/server/ruby.mdx +++ b/server/ruby.mdx @@ -10,7 +10,7 @@ import GetDynamicConfigIntro from '/snippets/server/getDynamicConfig.mdx' import GetExperimentIntro from '/snippets/server/getExperiment.mdx' import LogEventIntro from '/snippets/server/logEvent.mdx' import LogEventFooter from '/snippets/server/logEventFooter.mdx' -import GetFeatureGateIntro from '/snippets/server/getFeatureGate.mdx' +import GetFeature FlagIntro from '/snippets/server/getFeature Flag.mdx' import StatsigUserIntro from '/snippets/server/statsigUser.mdx' import PrivateAttributesIntro from '/snippets/server/privateAttributes.mdx' import StatsigOptionsIntro from '/snippets/server/statsigOptions.mdx' @@ -36,13 +36,13 @@ If you are using Bundler, add the [gem](https://rubygems.org/gems/statsig) to yo ```shell bundle add statsig -``` +```text or directly include it in your Gemfile and run `bundle install`: ```shell gem "statsig", ">= X.Y.Z" -``` +```text Check out the latest versions on [https://rubygems.org/gems/statsig](https://rubygems.org/gems/statsig) @@ -53,8 +53,8 @@ Check out the latest versions on [https://rubygems.org/gems/statsig](https://rub ```ruby require 'statsig' -Statsig.initialize('server-secret-key') -``` +Statsig.initialize('api key-key') +```text ```ruby @@ -68,16 +68,16 @@ options = StatsigOptions.new({'tier' => 'staging'}, network_timeout: 5) ... -Statsig.initialize('server-secret-key', options, method(:error_callback)) -``` +Statsig.initialize('api key-key', options, method(:error_callback)) +```text -### Initializing Statsig in a Rails Application +## Initializing Statsig in a Rails Application If your application is using Rails, you should initialize Statsig in `config/initializers/statsig.rb`: ```ruby -Statsig.initialize('server-secret-key', options) -``` +Statsig.initialize('api key-key', options) +```text ### Initializing Statsig when using Unicorn, Puma, Passenger, or Sidekiq @@ -85,27 +85,27 @@ For **Unicorn**, you should initialize Statsig within an `after_fork` hook in yo ```ruby after_fork do |server,worker| - Statsig.initialize('server-secret-key', options) + Statsig.initialize('api key-key', options) end -``` +```text For **Puma**, you should initialize Statsig within an `on_worker_boot` hook in your `puma.rb` config file: ```ruby on_worker_boot do - Statsig.initialize('server-secret-key', options) + Statsig.initialize('api key-key', options) end -``` +```text For **Passenger**, you should initialize Statsig in your `config.ru` config file: ```ruby if defined?(PhusionPassenger) PhusionPassenger.on_event(:starting_worker_process) do |forked| - Statsig.initialize('server-secret-key', options) + Statsig.initialize('api key-key', options) end end -``` +```text For **Sidekiq**, you should initialize Statsig in your `sidekiq.rb`/server configuration file: @@ -119,7 +119,7 @@ Sidekiq.configure_server do |config| Statsig.shutdown end end -``` +```text If you are using Rails in combination with any of the above, you should be sure to initialize using the specific process lifecycle hooks exposed by the respective tool. You can initialize in multiple places, which should ensure the SDK is fully usable including all background processing. @@ -138,7 +138,7 @@ if Statsig.check_gate(user, 'use_new_feature') else # Gate is off end -``` +```text @@ -155,7 +155,7 @@ shouldDiscount = config.get('discount', false); # Or just get the whole json object backing this config if you prefer json = config.value -``` +```text @@ -179,7 +179,7 @@ discount = price_exp.get("discount", 0.1) price = msrp * (1 - discount) -``` +```text @@ -193,7 +193,7 @@ Statsig.log_event( 'item_name' => 'diet_coke_48_pack' } ) -``` +```javascript @@ -206,7 +206,7 @@ You can specify optional parameters with `options` when initializing. - **environment**: Hash, default `nil` - a Hash you can use to set environment variables that apply to all of your users in the same session and will be used for targeting purposes. - - The most common usage is to set the "tier" (string), and have feature gates pass/fail for specific environments. The accepted values are "production", "staging" and "development", e.g. `StatsigOptions.New({ 'tier' => 'staging' })`. + - The most common usage is to set the "tier" (string), and have feature flags pass/fail for specific environments. The accepted values are "production", "staging" and "development", e.g. `StatsigOptions.New({ 'tier' => 'staging' })`. - **download_config_specs_url**: String, default `"https://api.statsigcdn.com/v2/download_config_specs/"` - The url used specifically to call download_config_specs - **log_event_url**: String, default `"https://statsigapi.net/v1/log_event"` @@ -228,7 +228,7 @@ You can specify optional parameters with `options` when initializing. - **local_mode**: Boolean, default `false` - Restricts the SDK to not issue any network requests and only respond with default values (or local overrides) - **bootstrap_values**: String, default `nil` - - A string that represents all rules for all feature gates, dynamic configs and experiments. It can be provided to bootstrap the Statsig server SDK at initialization in case your server runs into network issue or Statsig server is down temporarily. + - A string that represents all rules for all feature flags, dynamic configs and experiments. It can be provided to bootstrap the Statsig server SDK at initialization in case your server runs into network issue or Statsig server is down temporarily. - **rules_updated_callback**: function, default `nil` - A callback function that will be called anytime the rulesets are updated - **data_store**: IDataStore, default `nil` @@ -254,13 +254,13 @@ You can specify optional parameters with `options` when initializing. ```ruby Statsig.shutdown -``` +```text ```ruby values = Statsig.get_client_initialize_response(user); # Hash[String, Any] | Nil -``` +```text @@ -270,7 +270,7 @@ Statsig.override_gate("a_gate_name", true) # Adding config overrides Statsig.override_config("a_config_name", {"key" => "value"}) -``` +```sql 1. These only apply locally - they do not update definitions in the Statsig console or elsewhere. @@ -283,46 +283,46 @@ Statsig.override_config("a_config_name", {"key" => "value"}) ```ruby result = Statsig.check_gate(user, 'a_gate_name', CheckGateOptions.new(disable_log_exposure: true)) -``` +```text ```ruby Statsig.manually_log_gate_exposure(user, 'a_gate_name') -``` +```text **Configs** ```ruby config = Statsig.get_config(user, 'a_dynamic_config_name', GetConfigOptions.new(disable_log_exposure: true)) -``` +```text ```ruby Statsig.manually_log_config_exposure(user, 'a_dynamic_config_name') -``` +```text **Experiments** ```ruby experiment = Statsig.get_experiment(user, 'an_experiment_name', GetExperimentOptions.new(disable_log_exposure: true)) -``` +```text ```ruby Statsig.manually_log_experiment_exposure(user, 'an_experiment_name') -``` +```text **Layers** ```ruby layer = Statsig.get_layer(user, 'a_layer_name', GetLayerOptions.new(disable_log_exposure: true)) paramValue = layer.get('a_param_name', 'fallback_value') -``` +```text ```ruby Statsig.manually_log_layer_parameter_exposure(user, 'a_layer_name', 'a_param_name') -``` +```text -### Interface +## Interface ```ruby class IUserPersistentStorage @@ -332,7 +332,7 @@ class IUserPersistentStorage def save(key, data) end end -``` +```text ### Example Implementation @@ -354,17 +354,17 @@ class DummyPersistentStorageAdapter < Statsig::Interfaces::IUserPersistentStorag @store[key] = data end end -``` +```text ```ruby sdk_instance = StatsigDriver.new(secret_key, options, error_callback) -``` +```text ## FAQ -#### How do I run experiments for logged out users? +### How do I run experiments for logged out users? See the guide on [device level experiments](/guides/first-device-level-experiment) @@ -418,7 +418,7 @@ class StatsigUser end end end -``` +```text ### Type StatsigOptions @@ -473,7 +473,7 @@ class StatsigOptions ) end end -``` +```text ### DataStore diff --git a/server/rust.mdx b/server/rust.mdx index d81efccde..b853bd091 100644 --- a/server/rust.mdx +++ b/server/rust.mdx @@ -4,7 +4,6 @@ sidebarTitle: Rust description: Statsig's Legacy Server SDK for Rust applications icon: "rust" --- - import CheckGateIntro from '/snippets/server/checkGate.mdx' import GetDynamicConfigIntro from '/snippets/server/getDynamicConfig.mdx' import GetExperimentIntro from '/snippets/server/getExperiment.mdx' @@ -32,6 +31,9 @@ import shutdown from '/snippets/server/rust/shutdown.mdx' ## Setup the SDK +This page explains setup the sdk. + + @@ -62,6 +64,4 @@ import shutdown from '/snippets/server/rust/shutdown.mdx' - - - + \ No newline at end of file diff --git a/session-replay/cli-session-replay.mdx b/session-replay/cli-session-replay.mdx index 97af126db..66cad8cb6 100644 --- a/session-replay/cli-session-replay.mdx +++ b/session-replay/cli-session-replay.mdx @@ -1,7 +1,7 @@ --- title: CLI Session Replay +description: CLI Session Replay allows you to record terminal sessions in your Node.js CLI applications and replay them in the Statsig Console. This enables you to --- - CLI Session Replay allows you to record terminal sessions in your Node.js CLI applications and replay them in the Statsig Console. This enables you to understand how users interact with your command-line tools, diagnose issues, and improve the user experience. The plugin records terminal output and resize events. @@ -10,6 +10,9 @@ we'll release it after introducing methods to pause, filter or redact recordings ## Installation +This page explains installation. + + Install the CLI session replay package for Node.js: @@ -17,21 +20,21 @@ Install the CLI session replay package for Node.js: ```bash npm install @statsig/js-client @statsig/cli-session-replay-node -``` +```text ```bash yarn add @statsig/js-client @statsig/cli-session-replay-node -``` +```text ```bash pnpm add @statsig/js-client @statsig/cli-session-replay-node -``` +```text @@ -58,7 +61,7 @@ await client.initializeAsync(); // Your CLI application logic here console.log('Continue'); -``` +```ruby **CLI Logging Requirement**: CLI applications must set `loggingEnabled: 'always'` when initializing the StatsigClient. By default, Statsig only enables logging in browser environments, but CLI session replay requires logging to be enabled in all environments to capture and send session data. @@ -85,7 +88,7 @@ const plugin = new StatsigCliSessionReplayPlugin({ }, } }); -``` +```ruby ### Configuration Properties @@ -134,4 +137,4 @@ CliRecording.finish(); CLI Session Replay is currently supported on: - Node.js applications - Linux, macOS, and Windows terminals -- Any terminal that supports standard input/output streams +- Any terminal that supports standard input/output streams \ No newline at end of file diff --git a/session-replay/configure.mdx b/session-replay/configure.mdx index 4802590be..80c4e1392 100644 --- a/session-replay/configure.mdx +++ b/session-replay/configure.mdx @@ -1,6 +1,7 @@ --- title: Configure Statsig Session Replay sidebarTitle: Configure +description: In the Statsig Console, you can configure your Session Replay settings under **Project Settings → Analytics & Session Replay**. You must be a project --- ## Conditional Recording @@ -76,7 +77,7 @@ runStatsigTriggeredSessionReplay(client, { }); runStatsigAutoCapture(client); await client.initializeAsync(); -``` +```text @@ -106,7 +107,7 @@ function App() { ); } -``` +```python @@ -137,7 +138,7 @@ const sessionReplayClient = new SessionReplay(client); if (someCondition) { sessionReplayClient.startRecording(); } -``` +```javascript If not, you can import the function from `@Statsig/session-replay` and call it using your SDK key diff --git a/session-replay/debug.mdx b/session-replay/debug.mdx index 0a38cc512..1655a3c1c 100644 --- a/session-replay/debug.mdx +++ b/session-replay/debug.mdx @@ -1,6 +1,7 @@ --- title: Debug Statsig Session Replay sidebarTitle: Debug +description: In the Console, you may encounter the warning: > "This session recording is too large to load." This occurs when a session exceeds 50 MB in size. Unfo --- ## Large Session Recording Warning @@ -21,7 +22,7 @@ This occurs when a session exceeds 50 MB in size. Unfortunately, once a session runStatsigSessionReplay(client, { inlineStylesheet: false, }); -``` +```text diff --git a/session-replay/install.mdx b/session-replay/install.mdx index 3c90e9848..58fd29c95 100644 --- a/session-replay/install.mdx +++ b/session-replay/install.mdx @@ -1,15 +1,18 @@ --- title: Install Statsig Session Replay sidebarTitle: Install +description: Session Replay is supported on the Javascript or React SDKs for both desktop and mobile web users on your web application. See the instructions below --- - Session Replay is supported on the Javascript or React SDKs for both desktop and mobile web users on your web application. See the instructions below to install our SDK and record user sessions. ## Option 1 - No code - Add Javascript Snippet to your website +This page explains option 1 - no code - add javascript snippet to your website. + + ```html -``` +```python Get YOUR_CLIENT_KEY from Project Settings -> Keys & Environments. Reveal the Client API Key, copy, and paste it over the [YOUR-API-KEY] in the snippet above. @@ -28,11 +31,11 @@ If you'd like to use your existing Statsig integration, or customize the integra ```bash npm npm install @statsig/js-client @statsig/session-replay @statsig/web-analytics -``` +```text ```bash yarn yarn add @statsig/js-client @statsig/session-replay @statsig/web-analytics -``` +```text @@ -42,11 +45,11 @@ yarn add @statsig/js-client @statsig/session-replay @statsig/web-analytics ```bash npm npm install @statsig/session-replay @statsig/web-analytics @statsig/react-bindings -``` +```text ```bash yarn yarn add @statsig/session-replay @statsig/web-analytics @statsig/react-bindings -``` +```python @@ -72,7 +75,7 @@ const client = new StatsigClient( runStatsigSessionReplay(client); runStatsigAutoCapture(client); await client.initializeAsync(); -``` +```text @@ -108,4 +111,4 @@ function App() { If you'd like to use Conditional Triggers you must use StatsigTriggeredSessionReplay. See Configure (Next page) for more information -That's it! Continue reading Configure to learn more about controlling who, what, and when you record sessions. +That's it! Continue reading Configure to learn more about controlling who, what, and when you record sessions. \ No newline at end of file diff --git a/session-replay/overview.mdx b/session-replay/overview.mdx index aeb4f9290..1d2952f3a 100644 --- a/session-replay/overview.mdx +++ b/session-replay/overview.mdx @@ -1,6 +1,7 @@ --- title: Session Replay Overview sidebarTitle: Overview +description: Session Replay allows you to record users using your website or product, and play back those recorded sessions. This allows you to better understand h --- Session Replay allows you to record users using your website or product, and play back those recorded sessions. This allows you to better understand how users use your service or website, diagnose problems, and uncover insights that help improve conversion and the overall user experience. diff --git a/session-replay/watch.mdx b/session-replay/watch.mdx index d2f4e4843..25bd6f439 100644 --- a/session-replay/watch.mdx +++ b/session-replay/watch.mdx @@ -1,6 +1,7 @@ --- title: Watch Session Replays sidebarTitle: Watch +description: Session Replays can be found under the User’s group in the Statsig console’s navigation panel. To check a gate without an exposure being logged: ```kotlin Kotlin val result = Statsig.checkGateWithExpos +--- + To check a gate without an exposure being logged: @@ -5,11 +10,11 @@ ```kotlin Kotlin val result = Statsig.checkGateWithExposureLoggingDisabled("a_gate_name") - ``` + ```text ```java Java boolean result = Statsig.checkGateWithExposureLoggingDisabled("a_gate_name"); - ``` + ```text Later, to manually log the gate exposure: @@ -17,11 +22,11 @@ ```kotlin Kotlin Statsig.manuallyLogGateExposure("a_gate_name") - ``` + ```text ```java Java Statsig.manuallyLogGateExposure("a_gate_name"); - ``` + ```text @@ -31,11 +36,11 @@ ```kotlin Kotlin val config = Statsig.getConfigWithExposureLoggingDisabled("a_config_name") - ``` + ```text ```java Java DynamicConfig config = Statsig.getConfigWithExposureLoggingDisabled("a_config_name"); - ``` + ```text Later, to manually log the config exposure: @@ -43,11 +48,11 @@ ```kotlin Kotlin Statsig.manuallyLogConfigExposure("a_config_name") - ``` + ```text ```java Java Statsig.manuallyLogConfigExposure("a_config_name"); - ``` + ```text @@ -57,11 +62,11 @@ ```kotlin Kotlin val experiment = Statsig.getExperimentWithExposureLoggingDisabled("an_experiment_name") - ``` + ```text ```java Java DynamicConfig experiment = Statsig.getExperimentWithExposureLoggingDisabled("an_experiment_name"); - ``` + ```text Later, to manually log the experiment exposure: @@ -69,11 +74,11 @@ ```kotlin Kotlin Statsig.manuallyLogExperimentExposure("an_experiment_name", false) - ``` + ```text ```java Java Statsig.manuallyLogExperimentExposure("an_experiment_name", false); - ``` + ```text @@ -84,12 +89,12 @@ ```kotlin Kotlin val layer = Statsig.getLayerWithExposureLoggingDisabled("a_layer_name", false) val result = layer.getString("a_parameter_name", "fallback") - ``` + ```text ```java Java Layer layer = Statsig.getLayerWithExposureLoggingDisabled("a_layer_name"); String result = layer.getString("a_parameter_name", "fallback"); - ``` + ```text Later, to manually log the layer parameter exposure: @@ -97,7 +102,7 @@ ```kotlin Kotlin Statsig.manuallyLogLayerParameterExposure("a_layer_name", "a_parameter_name", false) - ``` + ```text ```java Java Statsig.manuallyLogLayerParameterExposure("a_layer_name", "a_parameter_name", false); diff --git a/snippets/client/Android/statsigOptions.mdx b/snippets/client/Android/statsigOptions.mdx index 31e9c82f9..f0467048d 100644 --- a/snippets/client/Android/statsigOptions.mdx +++ b/snippets/client/Android/statsigOptions.mdx @@ -1,3 +1,8 @@ +--- +title: Statsigoptions +description: You can pass in an optional parameter `options` in addition to `sdkKey` and `user` during initialization to customize the Statsig client. +--- + You can pass in an optional parameter `options` in addition to `sdkKey` and `user` during initialization to customize the Statsig client. diff --git a/snippets/client/Angular/angularDirectives.mdx b/snippets/client/Angular/angularDirectives.mdx index ad3247c1f..048f41731 100644 --- a/snippets/client/Angular/angularDirectives.mdx +++ b/snippets/client/Angular/angularDirectives.mdx @@ -1,3 +1,8 @@ +--- +title: Angulardirectives +description: To use the directives, you need to import the `StatsigModule` in your Angular module. ```ts // app.module.ts import { NgModule } from '@angular/core'; +--- + ## Angular Directives ### Statsig Module @@ -18,7 +23,7 @@ import { StatsigModule } from '@statsig/angular-bindings'; bootstrap: [AppComponent] }) export class AppModule {} -``` +```python ### Check gate directive diff --git a/snippets/client/Angular/angularInstall.mdx b/snippets/client/Angular/angularInstall.mdx index 687a4af04..60c03ec45 100644 --- a/snippets/client/Angular/angularInstall.mdx +++ b/snippets/client/Angular/angularInstall.mdx @@ -1,7 +1,12 @@ +--- +title: Angularinstall +description: ```bash npm npm install @statsig/angular-bindings ``` ```bash yarn yarn add @statsig/angular-bindings ``` +--- + ```bash npm npm install @statsig/angular-bindings -``` +```text ```bash yarn yarn add @statsig/angular-bindings diff --git a/snippets/client/Angular/angularLoadingState.mdx b/snippets/client/Angular/angularLoadingState.mdx index 4b8bb6691..9c4b25308 100644 --- a/snippets/client/Angular/angularLoadingState.mdx +++ b/snippets/client/Angular/angularLoadingState.mdx @@ -1,3 +1,8 @@ +--- +title: Angularloadingstate +description: Dependent on your setup, you may want to wait for the latest values before checking a gate or experiment. You can use the `isLoading` observable to de +--- + ## Loading State Dependent on your setup, you may want to wait for the latest values before checking a gate or experiment. diff --git a/snippets/client/Angular/angularSessionReplay.mdx b/snippets/client/Angular/angularSessionReplay.mdx index 34e1f9220..87dfe32ce 100644 --- a/snippets/client/Angular/angularSessionReplay.mdx +++ b/snippets/client/Angular/angularSessionReplay.mdx @@ -1,3 +1,8 @@ +--- +title: Angularsessionreplay +description: import webAnalyticsIntro from '/snippets/client/webAnalytics.mdx' By including the [`@statsig/session-replay`](https://www.npmjs.com/package/@statsig/ +--- + import webAnalyticsIntro from '/snippets/client/webAnalytics.mdx' ## Session Replay @@ -14,7 +19,7 @@ const StatsigConfig = { user: { userID: 'a-user' }, options: { plugins: [ new StatsigSessionReplayPlugin() ] } } -``` +```text diff --git a/snippets/client/Angular/angularSetup.mdx b/snippets/client/Angular/angularSetup.mdx index f23eca9e1..8d5c5d5f8 100644 --- a/snippets/client/Angular/angularSetup.mdx +++ b/snippets/client/Angular/angularSetup.mdx @@ -1,3 +1,8 @@ +--- +title: Angularsetup +description: The Statsig Angular bindings package provides a `StatsigService` that can be injected into your components. The way you provide and inject this servic +--- + The Statsig Angular bindings package provides a `StatsigService` that can be injected into your components. The way you provide and inject this service can vary depending on how you structure your app. ```ts @@ -8,7 +13,7 @@ const StatsigConfig = { user: {}, // initial user object options: {...} // optional } -``` +```text ### using app config ```ts @@ -32,7 +37,7 @@ import { appConfig } from './app/app.config'; bootstrapApplication(AppComponent, appConfig).catch((err) => console.error(err), ); -``` +```text ### using app module ```ts diff --git a/snippets/client/Angular/angularUpdateUser.mdx b/snippets/client/Angular/angularUpdateUser.mdx index 668ccc85e..371e2acae 100644 --- a/snippets/client/Angular/angularUpdateUser.mdx +++ b/snippets/client/Angular/angularUpdateUser.mdx @@ -1,3 +1,8 @@ +--- +title: Angularupdateuser +description: Sometimes you'll need to update user properties, say when the user logs in and a userID is assigned, or a set of new properties have been identified. +--- + ## Updating user properties (e.g., Login) Sometimes you'll need to update user properties, say when the user logs in and a userID is assigned, or a set of new properties have been identified. This would require Statsig to go fetch new values for all the gates, experiments and config evaluations. This is achieved by the calling `updateUserAsync` from the service: diff --git a/snippets/client/Angular/basics.mdx b/snippets/client/Angular/basics.mdx index 344b336bd..414557f98 100644 --- a/snippets/client/Angular/basics.mdx +++ b/snippets/client/Angular/basics.mdx @@ -1,3 +1,8 @@ +--- +title: Basics +description: Once you have provided the statsig config token, you can now inject the service into a component or another service and use it. +--- + Once you have provided the statsig config token, you can now inject the service into a component or another service and use it. ```ts diff --git a/snippets/client/Angular/checkGate.mdx b/snippets/client/Angular/checkGate.mdx index b49f56d86..8ec2cbd08 100644 --- a/snippets/client/Angular/checkGate.mdx +++ b/snippets/client/Angular/checkGate.mdx @@ -1,3 +1,8 @@ +--- +title: Checkgate +description: ```ts // feature-gate.component.ts import { CommonModule } from '@angular/common'; import { Component, OnInit } from '@angular/core'; import { Statsig +--- + ```ts // feature-gate.component.ts import { CommonModule } from '@angular/common'; diff --git a/snippets/client/Angular/getDynamicConfig.mdx b/snippets/client/Angular/getDynamicConfig.mdx index caf82a9ad..8078e1b61 100644 --- a/snippets/client/Angular/getDynamicConfig.mdx +++ b/snippets/client/Angular/getDynamicConfig.mdx @@ -1,3 +1,8 @@ +--- +title: Getdynamicconfig +description: ```ts // dynamic-config.component.ts import { Component, OnInit } from '@angular/core'; import { CommonModule } from '@angular/common'; import { Stats +--- + ```ts // dynamic-config.component.ts import { Component, OnInit } from '@angular/core'; diff --git a/snippets/client/Angular/getExperiment.mdx b/snippets/client/Angular/getExperiment.mdx index bc47f7287..236281765 100644 --- a/snippets/client/Angular/getExperiment.mdx +++ b/snippets/client/Angular/getExperiment.mdx @@ -1,3 +1,8 @@ +--- +title: Getexperiment +description: ```ts // experiment.component.ts import { Component, OnInit } from '@angular/core'; import { CommonModule } from '@angular/common'; import { StatsigSe +--- + ```ts // experiment.component.ts import { Component, OnInit } from '@angular/core'; diff --git a/snippets/client/Angular/getLayer.mdx b/snippets/client/Angular/getLayer.mdx index a4a5be626..e52645705 100644 --- a/snippets/client/Angular/getLayer.mdx +++ b/snippets/client/Angular/getLayer.mdx @@ -1,3 +1,8 @@ +--- +title: Getlayer +description: ```ts // layer.component.ts import { Component, OnInit } from '@angular/core'; import { CommonModule } from '@angular/common'; import { StatsigService +--- + ```ts // layer.component.ts import { Component, OnInit } from '@angular/core'; diff --git a/snippets/client/Angular/logEvent.mdx b/snippets/client/Angular/logEvent.mdx index 31d14337d..e09020b59 100644 --- a/snippets/client/Angular/logEvent.mdx +++ b/snippets/client/Angular/logEvent.mdx @@ -1,3 +1,8 @@ +--- +title: Logevent +description: ```ts import { Component, OnInit } from '@angular/core'; import { CommonModule } from '@angular/common'; import { StatsigService } from '@statsig/angu +--- + ```ts import { Component, OnInit } from '@angular/core'; import { CommonModule } from '@angular/common'; diff --git a/snippets/client/CPP/checkGate.mdx b/snippets/client/CPP/checkGate.mdx index 589a04f04..bc55da41b 100644 --- a/snippets/client/CPP/checkGate.mdx +++ b/snippets/client/CPP/checkGate.mdx @@ -1,3 +1,8 @@ +--- +title: Checkgate +description: ```cpp if (client.CheckGate("a_gate")) { // show new feature } // or, use the shared instance if (StatsigClient::Shared().CheckGate("a_gate")) { // sh +--- + ```cpp if (client.CheckGate("a_gate")) { // show new feature diff --git a/snippets/client/CPP/faqs.mdx b/snippets/client/CPP/faqs.mdx index d2327f8e0..f567f2ae3 100644 --- a/snippets/client/CPP/faqs.mdx +++ b/snippets/client/CPP/faqs.mdx @@ -1,3 +1,10 @@ +--- +title: Faqs +description: See the guide on [device level experiments](/guides/first-device-level-experiment) +--- #### How do I run experiments for logged out users?​ -See the guide on [device level experiments](/guides/first-device-level-experiment) +This page explains how do i run experiments for logged out users?​. + + +See the guide on [device level experiments](/guides/first-device-level-experiment) \ No newline at end of file diff --git a/snippets/client/CPP/getDynamicConfig.mdx b/snippets/client/CPP/getDynamicConfig.mdx index b6f963728..04bfbdbca 100644 --- a/snippets/client/CPP/getDynamicConfig.mdx +++ b/snippets/client/CPP/getDynamicConfig.mdx @@ -1,3 +1,8 @@ +--- +title: Getdynamicconfig +description: ```cpp DynamicConfig config = client.GetDynamicConfig("a_config"); // or, use the shared instance DynamicConfig config = StatsigClient::Shared().GetDy +--- + ```cpp DynamicConfig config = client.GetDynamicConfig("a_config"); diff --git a/snippets/client/CPP/getExperiment.mdx b/snippets/client/CPP/getExperiment.mdx index 25f7e9eeb..09f0e3c38 100644 --- a/snippets/client/CPP/getExperiment.mdx +++ b/snippets/client/CPP/getExperiment.mdx @@ -1,3 +1,8 @@ +--- +title: Getexperiment +description: ```cpp // Values via getLayer Layer layer = StatsigClient::Shared().GetLayer("name"); std::string promoTitle = layer.GetValue("title").get @@ -23,7 +28,7 @@ client.InitializeAsync( }, user ); -``` +```text **Synchronous** initialization will leverage cache (if available), returning immediately. Data for subsequent sessions will then be fetched in the background. @@ -45,7 +50,7 @@ StatsigClient::Shared().InitializeAsync( }, user ); -``` +```text **Optional** - Configuration via StatsigOptions diff --git a/snippets/client/CPP/installation.mdx b/snippets/client/CPP/installation.mdx index 3c4a6c448..f3647fce9 100644 --- a/snippets/client/CPP/installation.mdx +++ b/snippets/client/CPP/installation.mdx @@ -1,9 +1,14 @@ +--- +title: Installation +description: ```cpp add_subdirectory(path/to/downloaded/statsig_sdk) target_link_libraries(${PROJECT_NAME} StatsigClien +--- + ```cpp add_subdirectory(path/to/downloaded/statsig_sdk) target_link_libraries(${PROJECT_NAME} StatsigClientSDK) -``` +```text diff --git a/snippets/client/CPP/logEvent.mdx b/snippets/client/CPP/logEvent.mdx index 22ba4e7f2..3dfe5359d 100644 --- a/snippets/client/CPP/logEvent.mdx +++ b/snippets/client/CPP/logEvent.mdx @@ -1,3 +1,8 @@ +--- +title: Logevent +description: ```cpp std::unordered_map metadata{ { "price", "9.99" }, { "item_name", "some_great_product" } }; StatsigEvent event("add_to +--- + ```cpp std::unordered_map metadata{ { "price", "9.99" }, diff --git a/snippets/client/CPP/statsigOptions.mdx b/snippets/client/CPP/statsigOptions.mdx index 82b0e7e09..0e28ed910 100644 --- a/snippets/client/CPP/statsigOptions.mdx +++ b/snippets/client/CPP/statsigOptions.mdx @@ -1,3 +1,8 @@ +--- +title: Statsigoptions +description: `StatsigClient::Initialize`, in addition to `sdk_key` and `user`, takes an optional parameter `options` that you can provide to customize the StatsigC +--- + `StatsigClient::Initialize`, in addition to `sdk_key` and `user`, takes an optional parameter `options` that you can provide to customize the StatsigClient. Here are the current options and we are always adding more to the list: diff --git a/snippets/client/CPP/statsigUser.mdx b/snippets/client/CPP/statsigUser.mdx index 5c5213742..d443b3595 100644 --- a/snippets/client/CPP/statsigUser.mdx +++ b/snippets/client/CPP/statsigUser.mdx @@ -1,3 +1,8 @@ +--- +title: Statsiguser +description: ```cpp StatsigUser user; user.user_id = "a-user"; user.email = "developer@statsig.com"; user.custom_ids = { {"employeeID", "an-employee"} }; ``` Have +--- + ```cpp StatsigUser user; user.user_id = "a-user"; @@ -5,7 +10,7 @@ user.email = "developer@statsig.com"; user.custom_ids = { {"employeeID", "an-employee"} }; -``` +```python ### Private Attributes @@ -23,7 +28,7 @@ client.UpdateUserSync(user); // or, use the shared instance StatsigClient::Shared().UpdateUserSync(user); -``` +```python If you want to ensure you have the latest values for an update (Say you are transition from logged out to logged in). You can use the Asynchronous update function. diff --git a/snippets/client/Dart/checkGate.mdx b/snippets/client/Dart/checkGate.mdx index c371c15ce..853494bc8 100644 --- a/snippets/client/Dart/checkGate.mdx +++ b/snippets/client/Dart/checkGate.mdx @@ -1,3 +1,8 @@ +--- +title: Checkgate +description: ```dart if (Statsig.checkGate("new_homepage_design")) { // Gate is on, show new home page } else { // Gate is off, show old home page +--- + ```dart if (Statsig.checkGate("new_homepage_design")) { // Gate is on, show new home page diff --git a/snippets/client/Dart/faqs.mdx b/snippets/client/Dart/faqs.mdx index cf1915c82..08fe24591 100644 --- a/snippets/client/Dart/faqs.mdx +++ b/snippets/client/Dart/faqs.mdx @@ -1,3 +1,10 @@ +--- +title: Faqs +description: See the guide on [device level experiments](/client/concepts/persistent_assignment). +--- ### How do I run experiments for logged out users? -See the guide on [device level experiments](/client/concepts/persistent_assignment). +This page explains how do i run experiments for logged out users?. + + +See the guide on [device level experiments](/client/concepts/persistent_assignment). \ No newline at end of file diff --git a/snippets/client/Dart/flutterLifecycle.mdx b/snippets/client/Dart/flutterLifecycle.mdx index 156b0945e..3c54a61e3 100644 --- a/snippets/client/Dart/flutterLifecycle.mdx +++ b/snippets/client/Dart/flutterLifecycle.mdx @@ -1,3 +1,8 @@ +--- +title: Flutterlifecycle +description: Due to the nature of mobile development, apps can be closed by the operating system when they are no longer in the foreground. To be sure that all eve +--- + ## Flutter Lifecycle Hooks Due to the nature of mobile development, apps can be closed by the operating system when they are no longer in the foreground. To be sure that all events are logged before an app is closed by the operating system, we recommend adding Statsig to the app lifecycle events. This way we can flush all pending events when an app state change is detected. @@ -20,7 +25,7 @@ class StatsigLifecycleObserver extends WidgetsBindingObserver { } } -``` +```text Then in your app code, add this observer to the WidgetsBinding instance. diff --git a/snippets/client/Dart/getConfig.mdx b/snippets/client/Dart/getConfig.mdx index 552034d78..1e7651aca 100644 --- a/snippets/client/Dart/getConfig.mdx +++ b/snippets/client/Dart/getConfig.mdx @@ -1,3 +1,8 @@ +--- +title: Getconfig +description: ```dart var config = Statsig.getConfig("awesome_product_details"); // The 2nd parameter is the default value to be used in case the given parameter na +--- + ```dart var config = Statsig.getConfig("awesome_product_details"); diff --git a/snippets/client/Dart/getLayer.mdx b/snippets/client/Dart/getLayer.mdx index 346b75540..79d360de8 100644 --- a/snippets/client/Dart/getLayer.mdx +++ b/snippets/client/Dart/getLayer.mdx @@ -1,3 +1,8 @@ +--- +title: Getlayer +description: ```dart // Values via getLayer var layer = Statsig.getLayer("user_promo_experiments"); var promoTitle = layer.getString("title", "Welcome to Statsig!" +--- + ```dart // Values via getLayer diff --git a/snippets/client/Dart/initialize.mdx b/snippets/client/Dart/initialize.mdx index 86eb0a242..0e1d3ee70 100644 --- a/snippets/client/Dart/initialize.mdx +++ b/snippets/client/Dart/initialize.mdx @@ -1,3 +1,8 @@ +--- +title: Initialize +description: ```dart import 'package:statsig/statsig.dart'; await Statsig.initialize('client-sdk-key', StatsigUser(userId: "a-user-id")); ```text +--- + ```dart import 'package:statsig/statsig.dart'; diff --git a/snippets/client/Dart/installation.mdx b/snippets/client/Dart/installation.mdx index 1ea29713d..34875cb5d 100644 --- a/snippets/client/Dart/installation.mdx +++ b/snippets/client/Dart/installation.mdx @@ -1,8 +1,13 @@ +--- +title: Installation +description: With Dart: ```bash dart pub add statsig ``` With Flutter: ```bash flutter pub add statsig ``` If you are using **Flutter**, be sure to add Stat +--- + With Dart: ```bash dart pub add statsig -``` +```text With Flutter: diff --git a/snippets/client/Dart/logEvent.mdx b/snippets/client/Dart/logEvent.mdx index 248a54f98..c6be6bb01 100644 --- a/snippets/client/Dart/logEvent.mdx +++ b/snippets/client/Dart/logEvent.mdx @@ -1,3 +1,8 @@ +--- +title: Logevent +description: ```dart // Provide a doubleValue argument for number values Statsig.logEvent("purchase", doubleValue: 2.99, metadata: {"item_name": "remove_ads"}); // +--- + ```dart // Provide a doubleValue argument for number values Statsig.logEvent("purchase", doubleValue: 2.99, metadata: {"item_name": "remove_ads"}); diff --git a/snippets/client/Dart/parameterStore.mdx b/snippets/client/Dart/parameterStore.mdx index 99f3c3016..da766738b 100644 --- a/snippets/client/Dart/parameterStore.mdx +++ b/snippets/client/Dart/parameterStore.mdx @@ -1,3 +1,8 @@ +--- +title: Parameterstore +description: ```dart var homepageStore = Statsig.getParameterStore("homepage"); var title = homepageStore.get("title", "Welcome"); var shouldShowUpsell = homepageS +--- + ```dart var homepageStore = Statsig.getParameterStore("homepage"); diff --git a/snippets/client/Dart/shutdown.mdx b/snippets/client/Dart/shutdown.mdx index a539b4c55..de8d6c576 100644 --- a/snippets/client/Dart/shutdown.mdx +++ b/snippets/client/Dart/shutdown.mdx @@ -1,3 +1,8 @@ +--- +title: Shutdown +description: ```dart await Statsig.shutdown(); ```text +--- + ```dart await Statsig.shutdown(); ``` diff --git a/snippets/client/Dart/statsigOptions.mdx b/snippets/client/Dart/statsigOptions.mdx index 5aea33bcb..7a05939a3 100644 --- a/snippets/client/Dart/statsigOptions.mdx +++ b/snippets/client/Dart/statsigOptions.mdx @@ -1,3 +1,8 @@ +--- +title: Statsigoptions +description: Used to decide how long (in seconds) the Statsig client waits for the initial network reques +--- + Used to decide how long (in seconds) the Statsig client waits for the initial network request to respond before calling the completion block. The Statsig client will return either cached values (if any) or default values if checkGate/getConfig/getExperiment is called before the initial network request completes. diff --git a/snippets/client/Dart/updateUser.mdx b/snippets/client/Dart/updateUser.mdx index 456c1330b..db6264626 100644 --- a/snippets/client/Dart/updateUser.mdx +++ b/snippets/client/Dart/updateUser.mdx @@ -1,3 +1,8 @@ +--- +title: Updateuser +description: ```dart await Statsig.updateUser(StatsigUser("a_new_user")); ```text +--- + ```dart await Statsig.updateUser(StatsigUser("a_new_user")); ``` diff --git a/snippets/client/DotNet/checkGate.mdx b/snippets/client/DotNet/checkGate.mdx index 105bbf77f..85ad5d304 100644 --- a/snippets/client/DotNet/checkGate.mdx +++ b/snippets/client/DotNet/checkGate.mdx @@ -1,3 +1,8 @@ +--- +title: Checkgate +description: ```csharp if (StatsigClient.CheckGate("new_homepage_design")) { // Gate is on, show new home page } else { // Gate is off, show old home page +--- + ```csharp if (StatsigClient.CheckGate("new_homepage_design")) { diff --git a/snippets/client/DotNet/faqs.mdx b/snippets/client/DotNet/faqs.mdx index bd3762b3f..689f2a6b2 100644 --- a/snippets/client/DotNet/faqs.mdx +++ b/snippets/client/DotNet/faqs.mdx @@ -1,4 +1,10 @@ +--- +title: Faqs +description: See the guide on [device level experiments](/client/concepts/user#device-level-experiments) +--- #### How do I run experiments for logged out users? -See the guide on [device level experiments](/client/concepts/user#device-level-experiments) +This page explains how do i run experiments for logged out users?. + +See the guide on [device level experiments](/client/concepts/user#device-level-experiments) \ No newline at end of file diff --git a/snippets/client/DotNet/getConfig.mdx b/snippets/client/DotNet/getConfig.mdx index 31a28917b..cfaf91697 100644 --- a/snippets/client/DotNet/getConfig.mdx +++ b/snippets/client/DotNet/getConfig.mdx @@ -1,3 +1,8 @@ +--- +title: Getconfig +description: ```csharp DynamicConfig config = StatsigClient.GetConfig("awesome_product_details"); // The 2nd parameter is the default value to be used in case the +--- + ```csharp DynamicConfig config = StatsigClient.GetConfig("awesome_product_details"); diff --git a/snippets/client/DotNet/getLayer.mdx b/snippets/client/DotNet/getLayer.mdx index ebcaad971..b3167360c 100644 --- a/snippets/client/DotNet/getLayer.mdx +++ b/snippets/client/DotNet/getLayer.mdx @@ -1,3 +1,8 @@ +--- +title: Getlayer +description: ```csharp // Values via getLayer Layer layer = StatsigClient.GetLayer("user_promo_experiments"); var promoTitle = layer.Get("title", "Welcome to Stats +--- + ```csharp // Values via getLayer diff --git a/snippets/client/DotNet/initialize.mdx b/snippets/client/DotNet/initialize.mdx index 9ee1535cf..3a6dd38e4 100644 --- a/snippets/client/DotNet/initialize.mdx +++ b/snippets/client/DotNet/initialize.mdx @@ -1,3 +1,8 @@ +--- +title: Initialize +description: ```csharp using Statsig; using Statsig.Client; await StatsigClient.Initialize( "client-sdk-key", new StatsigUser { UserID = "some_user_id", Email = "u +--- + ```csharp using Statsig; using Statsig.Client; diff --git a/snippets/client/DotNet/installation.mdx b/snippets/client/DotNet/installation.mdx index ee76356b2..c60665f22 100644 --- a/snippets/client/DotNet/installation.mdx +++ b/snippets/client/DotNet/installation.mdx @@ -1,3 +1,8 @@ +--- +title: Installation +description: The package is hosted on [Nuget](https://www.nuget.org/packages/Statsig/). You can either install it from your Visual Studio's Nuget package manager, +--- + The package is hosted on [Nuget](https://www.nuget.org/packages/Statsig/). You can either install it from your Visual Studio's Nuget package manager, or through the NuGet CLI: ```shell diff --git a/snippets/client/DotNet/logEvent.mdx b/snippets/client/DotNet/logEvent.mdx index 9c15d986c..8741b5c85 100644 --- a/snippets/client/DotNet/logEvent.mdx +++ b/snippets/client/DotNet/logEvent.mdx @@ -1,3 +1,8 @@ +--- +title: Logevent +description: ```csharp StatsigClient.LogEvent( "add_to_cart", "SKU_12345", new Dictionary() { { "price", "9.99" }, { "item_name", "diet_coke_48_pac +--- + ```csharp StatsigClient.LogEvent( "add_to_cart", diff --git a/snippets/client/DotNet/shutdown.mdx b/snippets/client/DotNet/shutdown.mdx index e4428e713..3f4909fe9 100644 --- a/snippets/client/DotNet/shutdown.mdx +++ b/snippets/client/DotNet/shutdown.mdx @@ -1,3 +1,8 @@ +--- +title: Shutdown +description: ```csharp StatsigClient.Shutdown(); ```text +--- + ```csharp StatsigClient.Shutdown(); ``` diff --git a/snippets/client/DotNet/statsigOptions.mdx b/snippets/client/DotNet/statsigOptions.mdx index 9acafa3b3..9c3f627b3 100644 --- a/snippets/client/DotNet/statsigOptions.mdx +++ b/snippets/client/DotNet/statsigOptions.mdx @@ -1,3 +1,8 @@ +--- +title: Statsigoptions +description: `Initialize()` takes an optional parameter `options` in addition to `sdkKey` and `user` that you can provide to customize the Statsig client. +--- + `Initialize()` takes an optional parameter `options` in addition to `sdkKey` and `user` that you can provide to customize the Statsig client. diff --git a/snippets/client/DotNet/updateUser.mdx b/snippets/client/DotNet/updateUser.mdx index 99417b3a1..dccec2c68 100644 --- a/snippets/client/DotNet/updateUser.mdx +++ b/snippets/client/DotNet/updateUser.mdx @@ -1,3 +1,8 @@ +--- +title: Updateuser +description: ```csharp // if you want to update the existing user, or change to a different user, call updateUser await StatsigClient.UpdateUser( new StatsigUser { +--- + ```csharp // if you want to update the existing user, or change to a different user, call updateUser diff --git a/snippets/client/Expo/advanced.mdx b/snippets/client/Expo/advanced.mdx index cf08ec014..897414418 100644 --- a/snippets/client/Expo/advanced.mdx +++ b/snippets/client/Expo/advanced.mdx @@ -1,3 +1,8 @@ +--- +title: Advanced +description: In some scenarios, you may need to use the `StatsigClient` when you are not in the React component tree. Things like background tasks or handling noti +--- + ## Advanced ### Expo Without React @@ -18,7 +23,7 @@ await myClient.initializeAsync(); if (myClient.checkGate("my_gate")) { // do something cool } -``` +```text ### Synchronous Storage with MMKV diff --git a/snippets/client/Expo/checkGate.mdx b/snippets/client/Expo/checkGate.mdx index d5b8bae7a..52fcea5d1 100644 --- a/snippets/client/Expo/checkGate.mdx +++ b/snippets/client/Expo/checkGate.mdx @@ -1,3 +1,8 @@ +--- +title: Checkgate +description: You can evaluate a gate by getting the client with the `useStatsigClient` hook, and then calling `checkGate` ```tsx const { client } = useStatsigClien +--- + You can evaluate a gate by getting the client with the `useStatsigClient` hook, and then calling `checkGate` ```tsx diff --git a/snippets/client/Expo/debugging.mdx b/snippets/client/Expo/debugging.mdx index 993b0456e..ae0a85662 100644 --- a/snippets/client/Expo/debugging.mdx +++ b/snippets/client/Expo/debugging.mdx @@ -1,3 +1,8 @@ +--- +title: Debugging +description: Some users have reported a `ERROR: A networking error occured during POST request` messages when first initializing Statsig. This issue is solved in r +--- + ## Debugging ### Network Issues diff --git a/snippets/client/Expo/getDynamicConfig.mdx b/snippets/client/Expo/getDynamicConfig.mdx index f718cef4f..f9e37c0c0 100644 --- a/snippets/client/Expo/getDynamicConfig.mdx +++ b/snippets/client/Expo/getDynamicConfig.mdx @@ -1,3 +1,8 @@ +--- +title: Getdynamicconfig +description: You can get a DynamicConfig value by getting the client with the `useStatsigClient` hook, and then calling `getConfig` ```tsx const { client } = useSt +--- + You can get a DynamicConfig value by getting the client with the `useStatsigClient` hook, and then calling `getConfig` ```tsx diff --git a/snippets/client/Expo/getExperiment.mdx b/snippets/client/Expo/getExperiment.mdx index 2b951c22a..bf1cb5389 100644 --- a/snippets/client/Expo/getExperiment.mdx +++ b/snippets/client/Expo/getExperiment.mdx @@ -1,3 +1,8 @@ +--- +title: Getexperiment +description: You can access the experiment variant and parameters for the user by getting the client with the `useStatsigClient` hook, and then calling `getExperim +--- + You can access the experiment variant and parameters for the user by getting the client with the `useStatsigClient` hook, and then calling `getExperiment`. ```tsx diff --git a/snippets/client/Expo/getLayer.mdx b/snippets/client/Expo/getLayer.mdx index d996f0356..899854914 100644 --- a/snippets/client/Expo/getLayer.mdx +++ b/snippets/client/Expo/getLayer.mdx @@ -1,3 +1,8 @@ +--- +title: Getlayer +description: You can access layers and layer parameters for the user by getting the client with the `useStatsigClient` hook, and then calling `getLayer`. +--- + You can access layers and layer parameters for the user by getting the client with the `useStatsigClient` hook, and then calling `getLayer`. ```tsx diff --git a/snippets/client/Expo/gettingClient.mdx b/snippets/client/Expo/gettingClient.mdx index 2f22796a2..ab2e36ae0 100644 --- a/snippets/client/Expo/gettingClient.mdx +++ b/snippets/client/Expo/gettingClient.mdx @@ -1,3 +1,8 @@ +--- +title: Gettingclient +description: You can get an instance of the StatsigClient to check gates, experiments, dynamic configs, layers, and log events. ```jsx import { useStatsigClient } +--- + You can get an instance of the StatsigClient to check gates, experiments, dynamic configs, layers, and log events. ```jsx diff --git a/snippets/client/Expo/installation.mdx b/snippets/client/Expo/installation.mdx index ba3ff7189..98324798c 100644 --- a/snippets/client/Expo/installation.mdx +++ b/snippets/client/Expo/installation.mdx @@ -1,6 +1,11 @@ +--- +title: Installation +description: ```shell npx expo install @statsig/expo-bindings ``` The `@statsig/expo-bindings` package has peer dependencies which may also need to be installed if +--- + ```shell npx expo install @statsig/expo-bindings -``` +```go ### Peer Dependencies diff --git a/snippets/client/Expo/loadingState.mdx b/snippets/client/Expo/loadingState.mdx index f5bfe3367..0e79c416a 100644 --- a/snippets/client/Expo/loadingState.mdx +++ b/snippets/client/Expo/loadingState.mdx @@ -1,3 +1,8 @@ +--- +title: Loadingstate +description: Dependent on your setup, you may want to wait for the latest values before checking a gate or experiment. If you are using `StatsigProviderExpo`, you +--- + ## Loading State Dependent on your setup, you may want to wait for the latest values before checking a gate or experiment. @@ -19,7 +24,7 @@ export function App() { ); } -``` +```text diff --git a/snippets/client/Expo/logEvent.mdx b/snippets/client/Expo/logEvent.mdx index 4bc330b41..6bfd09416 100644 --- a/snippets/client/Expo/logEvent.mdx +++ b/snippets/client/Expo/logEvent.mdx @@ -1,3 +1,8 @@ +--- +title: Logevent +description: You can get the client with the `useStatsigClient` hook, and then call `logEvent` ```tsx const { client } = useStatsigClient(); return ; -``` +```text ### StatsigUser Hook @@ -131,7 +136,7 @@ return ( ); -``` +```text ### Direct Access to the Client diff --git a/snippets/client/React/installation.mdx b/snippets/client/React/installation.mdx index becf8d2a0..8f1757e3c 100644 --- a/snippets/client/React/installation.mdx +++ b/snippets/client/React/installation.mdx @@ -1,3 +1,8 @@ +--- +title: Installation +description: If you need a starter project, follow the official
React quickstart. Looking for Next.js instead? See the Next.js SDK docs. @@ -43,7 +48,7 @@ You are a frontend engineer integrating the Statsig SDK into a React app. Follow } 5. Ask the user to provide their CLIENT-API-KEY and insert it where prompted above. -``` +```text ### Install Packages @@ -52,7 +57,7 @@ You are a frontend engineer integrating the Statsig SDK into a React app. Follow ```bash npm install @statsig/react-bindings -``` +```text diff --git a/snippets/client/React/loadingState.mdx b/snippets/client/React/loadingState.mdx index dabf8dc83..5dc6561fe 100644 --- a/snippets/client/React/loadingState.mdx +++ b/snippets/client/React/loadingState.mdx @@ -1,3 +1,8 @@ +--- +title: Loadingstate +description: Wait for the latest values during initialization with either the provider or the async hook. ```tsx import { Stat +--- + ## Loading State Wait for the latest values during initialization with either the provider or the async hook. @@ -19,7 +24,7 @@ export function App() { ); } -``` +```text diff --git a/snippets/client/React/logEvent.mdx b/snippets/client/React/logEvent.mdx index 7aea30213..6eca66321 100644 --- a/snippets/client/React/logEvent.mdx +++ b/snippets/client/React/logEvent.mdx @@ -1,3 +1,8 @@ +--- +title: Logevent +description: ```tsx import { useStatsigClient } from '@statsig/react-bindings'; const { logEvent } = useStatsigClient(); return