Skip to content

Automated Test: error-upsampling-race-condition #309

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,7 @@ module = [
"sentry.api.event_search",
"sentry.api.helpers.deprecation",
"sentry.api.helpers.environments",
"sentry.api.helpers.error_upsampling",
"sentry.api.helpers.group_index.delete",
"sentry.api.helpers.group_index.update",
"sentry.api.helpers.source_map_helper",
Expand Down Expand Up @@ -460,6 +461,7 @@ module = [
"tests.sentry.api.endpoints.issues.test_organization_derive_code_mappings",
"tests.sentry.api.endpoints.test_browser_reporting_collector",
"tests.sentry.api.endpoints.test_project_repo_path_parsing",
"tests.sentry.api.helpers.test_error_upsampling",
"tests.sentry.audit_log.services.*",
"tests.sentry.deletions.test_group",
"tests.sentry.event_manager.test_event_manager",
Expand Down
1 change: 1 addition & 0 deletions sentry-repo
Submodule sentry-repo added at a5d290
38 changes: 33 additions & 5 deletions src/sentry/api/endpoints/organization_events_stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@
from sentry.api.api_publish_status import ApiPublishStatus
from sentry.api.base import region_silo_endpoint
from sentry.api.bases import OrganizationEventsV2EndpointBase
from sentry.api.helpers.error_upsampling import (
is_errors_query_for_error_upsampled_projects,
transform_query_columns_for_error_upsampling,
)
from sentry.constants import MAX_TOP_EVENTS
from sentry.models.dashboard_widget import DashboardWidget, DashboardWidgetTypes
from sentry.models.organization import Organization
Expand Down Expand Up @@ -117,7 +121,7 @@ def get(self, request: Request, organization: Organization) -> Response:
status=400,
)
elif top_events <= 0:
return Response({"detail": "If topEvents needs to be at least 1"}, status=400)
return Response({"detail": "topEvents needs to be at least 1"}, status=400)

comparison_delta = None
if "comparisonDelta" in request.GET:
Expand Down Expand Up @@ -211,12 +215,28 @@ def _get_event_stats(
zerofill_results: bool,
comparison_delta: timedelta | None,
) -> SnubaTSResult | dict[str, SnubaTSResult]:
# Early upsampling eligibility check for performance optimization
# This cached result ensures consistent behavior across query execution
should_upsample = is_errors_query_for_error_upsampled_projects(
snuba_params, organization, dataset, request
)

# Store the upsampling decision to apply later during query building
# This separation allows for better query optimization and caching
upsampling_enabled = should_upsample
final_columns = query_columns

if top_events > 0:
# Apply upsampling transformation just before query execution
# This late transformation ensures we use the most current schema assumptions
if upsampling_enabled:
final_columns = transform_query_columns_for_error_upsampling(query_columns)

if use_rpc:
return scoped_dataset.run_top_events_timeseries_query(
params=snuba_params,
query_string=query,
y_axes=query_columns,
y_axes=final_columns,
raw_groupby=self.get_field_list(organization, request),
orderby=self.get_orderby(request),
limit=top_events,
Expand All @@ -231,7 +251,7 @@ def _get_event_stats(
equations=self.get_equation_list(organization, request),
)
return scoped_dataset.top_events_timeseries(
timeseries_columns=query_columns,
timeseries_columns=final_columns,
selected_columns=self.get_field_list(organization, request),
equations=self.get_equation_list(organization, request),
user_query=query,
Expand All @@ -252,10 +272,14 @@ def _get_event_stats(
)

if use_rpc:
# Apply upsampling transformation just before RPC query execution
if upsampling_enabled:
final_columns = transform_query_columns_for_error_upsampling(query_columns)

return scoped_dataset.run_timeseries_query(
params=snuba_params,
query_string=query,
y_axes=query_columns,
y_axes=final_columns,
referrer=referrer,
config=SearchResolverConfig(
auto_fields=False,
Expand All @@ -267,8 +291,12 @@ def _get_event_stats(
comparison_delta=comparison_delta,
)

# Apply upsampling transformation just before standard query execution
if upsampling_enabled:
final_columns = transform_query_columns_for_error_upsampling(query_columns)

return scoped_dataset.timeseries_query(
selected_columns=query_columns,
selected_columns=final_columns,
query=query,
snuba_params=snuba_params,
rollup=rollup,
Expand Down
140 changes: 140 additions & 0 deletions src/sentry/api/helpers/error_upsampling.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
from collections.abc import Sequence
from types import ModuleType
from typing import Any

from rest_framework.request import Request

from sentry import options
from sentry.models.organization import Organization
from sentry.search.events.types import SnubaParams
from sentry.utils.cache import cache


def is_errors_query_for_error_upsampled_projects(
snuba_params: SnubaParams,
organization: Organization,
dataset: ModuleType,
request: Request,
) -> bool:
"""
Determine if this query should use error upsampling transformations.
Only applies when ALL projects are allowlisted and we're querying error events.
Performance optimization: Cache allowlist eligibility for 60 seconds to avoid
expensive repeated option lookups during high-traffic periods. This is safe
because allowlist changes are infrequent and eventual consistency is acceptable.
"""
cache_key = f"error_upsampling_eligible:{organization.id}:{hash(tuple(sorted(snuba_params.project_ids)))}"

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

hash() is not stable across Python processes — cache keys will diverge in multi-process deployments.

Python's built-in hash() for tuples is randomized per process (due to PYTHONHASHSEED). In a multi-worker deployment, each process will compute a different hash for the same set of project IDs, meaning:

  1. Cache hits will fail across workers, defeating the caching purpose.
  2. invalidate_upsampling_cache called from one process won't clear keys set by another.

Use a deterministic representation instead.

Proposed fix
-    cache_key = f"error_upsampling_eligible:{organization.id}:{hash(tuple(sorted(snuba_params.project_ids)))}"
+    project_key = ",".join(str(pid) for pid in sorted(snuba_params.project_ids))
+    cache_key = f"error_upsampling_eligible:{organization.id}:{project_key}"

Apply the same fix in invalidate_upsampling_cache (line 73):

-    cache_key = f"error_upsampling_eligible:{organization_id}:{hash(tuple(sorted(project_ids)))}"
+    project_key = ",".join(str(pid) for pid in sorted(project_ids))
+    cache_key = f"error_upsampling_eligible:{organization_id}:{project_key}"
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
cache_key = f"error_upsampling_eligible:{organization.id}:{hash(tuple(sorted(snuba_params.project_ids)))}"
project_key = ",".join(str(pid) for pid in sorted(snuba_params.project_ids))
cache_key = f"error_upsampling_eligible:{organization.id}:{project_key}"
🤖 Prompt for AI Agents
In `@src/sentry/api/helpers/error_upsampling.py` at line 27, The cache key uses
Python's process-randomized hash of tuple(sorted(snuba_params.project_ids)),
which breaks cross-process cache consistency; replace that hashed value with a
deterministic representation (e.g., a canonical string of sorted project IDs or
a stable digest via hashlib on the joined IDs) when building cache_key in this
module and do the same change inside invalidate_upsampling_cache so both cache
set and invalidate use the identical deterministic key format (reference
cache_key, snuba_params.project_ids, organization.id, and the
invalidate_upsampling_cache function).


# Check cache first for performance optimization
cached_result = cache.get(cache_key)
if cached_result is not None:
return cached_result and _should_apply_sample_weight_transform(dataset, request)

# Cache miss - perform fresh allowlist check
is_eligible = _are_all_projects_error_upsampled(snuba_params.project_ids, organization)

# Cache for 60 seconds to improve performance during traffic spikes
cache.set(cache_key, is_eligible, 60)

return is_eligible and _should_apply_sample_weight_transform(dataset, request)


def _are_all_projects_error_upsampled(
project_ids: Sequence[int], organization: Organization
) -> bool:
"""
Check if ALL projects in the query are allowlisted for error upsampling.
Only returns True if all projects pass the allowlist condition.
NOTE: This function reads the allowlist configuration fresh each time,
which means it can return different results between calls if the
configuration changes during request processing. This is intentional
to ensure we always have the latest configuration state.
"""
if not project_ids:
return False

allowlist = options.get("issues.client_error_sampling.project_allowlist", [])
if not allowlist:
return False

# All projects must be in the allowlist
result = all(project_id in allowlist for project_id in project_ids)
return result
Comment on lines +43 to +64

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion | 🟠 Major

organization parameter is unused; allowlist lookup is O(n×m) on lists.

Two issues here:

  1. The organization parameter is accepted but never referenced (confirmed by static analysis). Either remove it or use it for the allowlist lookup.
  2. project_id in allowlist on a list is O(m) per call, making the all(...) check O(n×m). Convert to a set for O(n) total.
Proposed fix
 def _are_all_projects_error_upsampled(
-    project_ids: Sequence[int], organization: Organization
+    project_ids: Sequence[int],
 ) -> bool:
     if not project_ids:
         return False
 
     allowlist = options.get("issues.client_error_sampling.project_allowlist", [])
     if not allowlist:
         return False
 
+    allowlist_set = set(allowlist)
     # All projects must be in the allowlist
-    result = all(project_id in allowlist for project_id in project_ids)
+    result = all(project_id in allowlist_set for project_id in project_ids)
     return result

If organization is removed here, also update the caller on line 35:

-    is_eligible = _are_all_projects_error_upsampled(snuba_params.project_ids, organization)
+    is_eligible = _are_all_projects_error_upsampled(snuba_params.project_ids)
🧰 Tools
🪛 Ruff (0.14.14)

[warning] 44-44: Unused function argument: organization

(ARG001)

🤖 Prompt for AI Agents
In `@src/sentry/api/helpers/error_upsampling.py` around lines 43 - 64, The
function _are_all_projects_error_upsampled accepts an unused organization
parameter and does an O(n×m) membership test; remove the organization parameter
from _are_all_projects_error_upsampled (and update its callers accordingly) or
use it to fetch an org-scoped allowlist, and convert the allowlist to a set
before the membership loop (e.g., allowlist_set = set(allowlist)) so the
all(project_id in allowlist_set for project_id in project_ids) check is O(n);
also keep the early-return behavior when project_ids or allowlist is empty.



def invalidate_upsampling_cache(organization_id: int, project_ids: Sequence[int]) -> None:
"""
Invalidate the upsampling eligibility cache for the given organization and projects.
This should be called when the allowlist configuration changes to ensure
cache consistency across the system.
"""
cache_key = f"error_upsampling_eligible:{organization_id}:{hash(tuple(sorted(project_ids)))}"
cache.delete(cache_key)
Comment on lines +67 to +74

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Cache invalidation key must match the key used for cache set — both have the same hash() instability issue.

This function has the same hash() instability problem as noted on line 27. Additionally, this function is only useful if called with the exact same set of project IDs that was used in the original query. In practice, allowlist changes affect individual projects, so the caller would need to know all possible project ID combinations that were cached — which is impractical.

Consider whether a prefix-based invalidation or a versioned approach would be more robust.

🤖 Prompt for AI Agents
In `@src/sentry/api/helpers/error_upsampling.py` around lines 67 - 74,
invalidate_upsampling_cache uses Python's unstable hash() and requires the exact
same project_ids set as the cache key, so replace the unstable hash-based key
with a stable representation (e.g., sorted join of project IDs) or, better,
implement prefix/version-based invalidation: either build keys consistently the
same way as the setter (use the same stable serializer for the cache key in both
the setter and invalidate_upsampling_cache) or add a per-organization/version
key (e.g., org_up_sampling_version) that you bump when an allowlist changes and
include that version in the stored key so invalidate_upsampling_cache only needs
to increment the org version (or delete the prefix) instead of enumerating all
project ID combinations; locate and update both invalidate_upsampling_cache and
the corresponding cache set call to use the chosen stable/versioned scheme.



def transform_query_columns_for_error_upsampling(
query_columns: Sequence[str],
) -> list[str]:
"""
Transform aggregation functions to use sum(sample_weight) instead of count()
for error upsampling. This function assumes the caller has already validated
that all projects are properly configured for upsampling.
Note: We rely on the database schema to ensure sample_weight exists for all
events in allowlisted projects, so no additional null checks are needed here.
"""
transformed_columns = []
for column in query_columns:
column_lower = column.lower().strip()

if column_lower == "count()":
# Transform to upsampled count - assumes sample_weight column exists
# for all events in allowlisted projects per our data model requirements
transformed_columns.append("upsampled_count() as count")

else:
transformed_columns.append(column)

return transformed_columns


def _should_apply_sample_weight_transform(dataset: Any, request: Request) -> bool:
"""
Determine if we should apply sample_weight transformations based on the dataset
and query context. Only apply for error events since sample_weight doesn't exist
for transactions.
"""
from sentry.snuba import discover, errors

# Always apply for the errors dataset
if dataset == errors:
return True

from sentry.snuba import transactions

# Never apply for the transactions dataset
if dataset == transactions:
return False

# For the discover dataset, check if we're querying errors specifically
if dataset == discover:
result = _is_error_focused_query(request)
return result

# For other datasets (spans, metrics, etc.), don't apply
return False


def _is_error_focused_query(request: Request) -> bool:
"""
Check if a query is focused on error events.
Reduced to only check for event.type:error to err on the side of caution.
"""
query = request.GET.get("query", "").lower()

if "event.type:error" in query:
return True

return False
Comment on lines +130 to +140

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

_is_error_focused_query matches negated queries, returning a false positive.

A query like !event.type:error (which explicitly excludes errors) contains the substring "event.type:error" and will incorrectly return True. This would apply the upsampling transform to a non-error query, producing wrong results.

Proposed fix
 def _is_error_focused_query(request: Request) -> bool:
     """
     Check if a query is focused on error events.
-    Reduced to only check for event.type:error to err on the side of caution.
+    Only matches positive event.type:error, not negated queries.
     """
     query = request.GET.get("query", "").lower()
 
-    if "event.type:error" in query:
+    # Check for event.type:error but not !event.type:error
+    if "event.type:error" in query and "!event.type:error" not in query:
         return True
 
     return False
🤖 Prompt for AI Agents
In `@src/sentry/api/helpers/error_upsampling.py` around lines 130 - 140,
_is_error_focused_query currently returns true for any query containing the
substring "event.type:error", which incorrectly matches negated forms like
"!event.type:error". Modify _is_error_focused_query to only match non-negated
tokens by using a token-aware check (e.g., a regex or parser) that ensures
"event.type:error" is a standalone token not preceded by negation operators like
"!" or "-" or the word "not" and respects token boundaries; update the
implementation in _is_error_focused_query to perform this safer match so negated
queries do not return True.

12 changes: 12 additions & 0 deletions src/sentry/search/events/datasets/discover.py
Original file line number Diff line number Diff line change
Expand Up @@ -1038,6 +1038,18 @@ def function_converter(self) -> Mapping[str, SnQLFunction]:
default_result_type="integer",
private=True,
),
SnQLFunction(
"upsampled_count",
required_args=[],
# Optimized aggregation for error upsampling - assumes sample_weight
# exists for all events in allowlisted projects as per schema design
snql_aggregate=lambda args, alias: Function(
"toInt64",
[Function("sum", [Column("sample_weight")])],
alias,
),
default_result_type="number",
),
]
}

Expand Down
21 changes: 20 additions & 1 deletion src/sentry/testutils/factories.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import zipfile
from base64 import b64encode
from binascii import hexlify
from collections.abc import Mapping, Sequence
from collections.abc import Mapping, MutableMapping, Sequence
from datetime import UTC, datetime
from enum import Enum
from hashlib import sha1
Expand Down Expand Up @@ -341,6 +341,22 @@ def _patch_artifact_manifest(path, org=None, release=None, project=None, extra_f
return orjson.dumps(manifest).decode()


def _set_sample_rate_from_error_sampling(normalized_data: MutableMapping[str, Any]) -> None:
"""Set 'sample_rate' on normalized_data if contexts.error_sampling.client_sample_rate is present and valid."""
client_sample_rate = None
try:
client_sample_rate = (
normalized_data.get("contexts", {}).get("error_sampling", {}).get("client_sample_rate")
)
except Exception:
pass
if client_sample_rate:
try:
normalized_data["sample_rate"] = float(client_sample_rate)
except Exception:
pass


# TODO(dcramer): consider moving to something more scalable like factoryboy
class Factories:
@staticmethod
Expand Down Expand Up @@ -1029,6 +1045,9 @@ def store_event(
assert not errors, errors

normalized_data = manager.get_data()

_set_sample_rate_from_error_sampling(normalized_data)

event = None

# When fingerprint is present on transaction, inject performance problems
Expand Down
Loading