Add smoke tests for endpoints and optimize database queries

This commit is contained in:
Joakim Hellsén 2026-04-10 23:54:10 +02:00
commit 1782db4840
Signed by: Joakim Hellsén
SSH key fingerprint: SHA256:/9h/CsExpFp+PRhsfA0xznFx2CGfTT5R/kpuFfUgEQk
8 changed files with 1044 additions and 48 deletions

View file

@ -1,5 +1,7 @@
import logging
from collections import OrderedDict
from typing import TYPE_CHECKING
from typing import Any
import auto_prefetch
from django.conf import settings
@ -508,6 +510,93 @@ class DropCampaign(auto_prefetch.Model):
def __str__(self) -> str:
return self.name
@classmethod
def active_for_dashboard(
cls,
now: datetime.datetime,
) -> models.QuerySet[DropCampaign]:
"""Return active campaigns with relations needed by the dashboard.
Args:
now: Current timestamp used to evaluate active campaigns.
Returns:
QuerySet of active campaigns ordered by newest start date.
"""
return (
cls.objects
.filter(start_at__lte=now, end_at__gte=now)
.only(
"twitch_id",
"name",
"image_url",
"image_file",
"start_at",
"end_at",
"allow_is_enabled",
"game",
"game__twitch_id",
"game__display_name",
"game__slug",
"game__box_art",
"game__box_art_file",
)
.select_related("game")
.prefetch_related(
models.Prefetch(
"game__owners",
queryset=Organization.objects.only("twitch_id", "name"),
),
models.Prefetch(
"allow_channels",
queryset=Channel.objects.only(
"twitch_id",
"name",
"display_name",
).order_by("display_name"),
to_attr="channels_ordered",
),
)
.order_by("-start_at")
)
@staticmethod
def grouped_by_game(
campaigns: models.QuerySet[DropCampaign],
) -> OrderedDict[str, dict[str, Any]]:
"""Group campaigns by game for dashboard rendering.
The grouping keeps insertion order and avoids duplicate per-game cards when
games have multiple owners.
Args:
campaigns: Campaign queryset from active_for_dashboard().
Returns:
Ordered mapping keyed by game twitch_id.
"""
campaigns_by_game: OrderedDict[str, dict[str, Any]] = OrderedDict()
for campaign in campaigns:
game: Game = campaign.game
game_id: str = game.twitch_id
if game_id not in campaigns_by_game:
campaigns_by_game[game_id] = {
"name": game.display_name,
"box_art": game.box_art_best_url,
"owners": list(game.owners.all()),
"campaigns": [],
}
campaigns_by_game[game_id]["campaigns"].append({
"campaign": campaign,
"image_url": campaign.listing_image_url,
"allowed_channels": getattr(campaign, "channels_ordered", []),
})
return campaigns_by_game
@property
def is_active(self) -> bool:
"""Check if the campaign is currently active."""
@ -526,19 +615,21 @@ class DropCampaign(auto_prefetch.Model):
"Skull & Bones - Closed Beta" -> "Closed Beta" (& is replaced
with "and")
"""
if not self.game or not self.game.display_name:
self_game: Game | None = self.game
if not self_game or not self_game.display_name:
return self.name
game_variations = [self.game.display_name]
if "&" in self.game.display_name:
game_variations.append(self.game.display_name.replace("&", "and"))
if "and" in self.game.display_name:
game_variations.append(self.game.display_name.replace("and", "&"))
game_variations: list[str] = [self_game.display_name]
if "&" in self_game.display_name:
game_variations.append(self_game.display_name.replace("&", "and"))
if "and" in self_game.display_name:
game_variations.append(self_game.display_name.replace("and", "&"))
for game_name in game_variations:
# Check for different separators after the game name
for separator in [" - ", " | ", " "]:
prefix_to_check = game_name + separator
prefix_to_check: str = game_name + separator
name: str = self.name
if name.startswith(prefix_to_check):
@ -573,6 +664,20 @@ class DropCampaign(auto_prefetch.Model):
return ""
@property
def listing_image_url(self) -> str:
"""Return a campaign image URL optimized for list views.
This intentionally avoids traversing drops/benefits to prevent N+1 queries
in list pages that render many campaigns.
"""
try:
if self.image_file and getattr(self.image_file, "url", None):
return self.image_file.url
except (AttributeError, OSError, ValueError) as exc:
logger.debug("Failed to resolve DropCampaign.image_file url: %s", exc)
return self.image_url or ""
@property
def duration_iso(self) -> str:
"""Return the campaign duration in ISO 8601 format (e.g., 'P3DT4H30M').
@ -1006,6 +1111,32 @@ class RewardCampaign(auto_prefetch.Model):
"""Return a string representation of the reward campaign."""
return f"{self.brand}: {self.name}" if self.brand else self.name
@classmethod
def active_for_dashboard(
cls,
now: datetime.datetime,
) -> models.QuerySet[RewardCampaign]:
"""Return active reward campaigns with only dashboard-needed fields."""
return (
cls.objects
.filter(starts_at__lte=now, ends_at__gte=now)
.only(
"twitch_id",
"name",
"brand",
"summary",
"external_url",
"starts_at",
"ends_at",
"is_sitewide",
"game",
"game__twitch_id",
"game__display_name",
)
.select_related("game")
.order_by("-starts_at")
)
@property
def is_active(self) -> bool:
"""Check if the reward campaign is currently active."""

View file

@ -9,8 +9,10 @@ from typing import Literal
import pytest
from django.core.handlers.wsgi import WSGIRequest
from django.core.paginator import Paginator
from django.db import connection
from django.db.models import Max
from django.test import RequestFactory
from django.test.utils import CaptureQueriesContext
from django.urls import reverse
from django.utils import timezone
@ -34,6 +36,7 @@ from twitch.views import _truncate_description
if TYPE_CHECKING:
from django.core.handlers.wsgi import WSGIRequest
from django.db.models import QuerySet
from django.test import Client
from django.test.client import _MonkeyPatchedWSGIResponse
from django.test.utils import ContextList
@ -537,6 +540,225 @@ class TestChannelListView:
assert game.twitch_id in context["campaigns_by_game"]
assert len(context["campaigns_by_game"][game.twitch_id]["campaigns"]) == 1
@pytest.mark.django_db
def test_dashboard_queries_use_indexes(self) -> None:
"""Dashboard source queries should use indexes for active-window filtering."""
now: datetime.datetime = timezone.now()
org: Organization = Organization.objects.create(
twitch_id="org_index_test",
name="Org Index Test",
)
game: Game = Game.objects.create(
twitch_id="game_index_test",
name="Game Index Test",
display_name="Game Index Test",
)
game.owners.add(org)
# Add enough rows so the query planner has a reason to pick indexes.
campaigns: list[DropCampaign] = []
for i in range(250):
campaigns.extend((
DropCampaign(
twitch_id=f"inactive_old_{i}",
name=f"Inactive old {i}",
game=game,
operation_names=["DropCampaignDetails"],
start_at=now - timedelta(days=60),
end_at=now - timedelta(days=30),
),
DropCampaign(
twitch_id=f"inactive_future_{i}",
name=f"Inactive future {i}",
game=game,
operation_names=["DropCampaignDetails"],
start_at=now + timedelta(days=30),
end_at=now + timedelta(days=60),
),
))
campaigns.append(
DropCampaign(
twitch_id="active_for_dashboard_index_test",
name="Active campaign",
game=game,
operation_names=["DropCampaignDetails"],
start_at=now - timedelta(hours=1),
end_at=now + timedelta(hours=1),
),
)
DropCampaign.objects.bulk_create(campaigns)
reward_campaigns: list[RewardCampaign] = []
for i in range(250):
reward_campaigns.extend((
RewardCampaign(
twitch_id=f"reward_inactive_old_{i}",
name=f"Reward inactive old {i}",
game=game,
starts_at=now - timedelta(days=60),
ends_at=now - timedelta(days=30),
),
RewardCampaign(
twitch_id=f"reward_inactive_future_{i}",
name=f"Reward inactive future {i}",
game=game,
starts_at=now + timedelta(days=30),
ends_at=now + timedelta(days=60),
),
))
reward_campaigns.append(
RewardCampaign(
twitch_id="reward_active_for_dashboard_index_test",
name="Active reward campaign",
game=game,
starts_at=now - timedelta(hours=1),
ends_at=now + timedelta(hours=1),
),
)
RewardCampaign.objects.bulk_create(reward_campaigns)
active_campaigns_qs: QuerySet[DropCampaign] = DropCampaign.active_for_dashboard(
now,
)
active_reward_campaigns_qs: QuerySet[RewardCampaign] = (
RewardCampaign.objects
.filter(starts_at__lte=now, ends_at__gte=now)
.select_related("game")
.order_by("-starts_at")
)
campaigns_plan: str = active_campaigns_qs.explain()
reward_plan: str = active_reward_campaigns_qs.explain()
if connection.vendor == "sqlite":
campaigns_uses_index: bool = "USING INDEX" in campaigns_plan.upper()
rewards_uses_index: bool = "USING INDEX" in reward_plan.upper()
elif connection.vendor == "postgresql":
campaigns_uses_index = (
"INDEX SCAN" in campaigns_plan.upper()
or "BITMAP INDEX SCAN" in campaigns_plan.upper()
)
rewards_uses_index = (
"INDEX SCAN" in reward_plan.upper()
or "BITMAP INDEX SCAN" in reward_plan.upper()
)
else:
pytest.skip(
f"Unsupported DB vendor for index-plan assertion: {connection.vendor}",
)
assert campaigns_uses_index, campaigns_plan
assert rewards_uses_index, reward_plan
@pytest.mark.django_db
def test_dashboard_query_count_stays_flat_with_more_data(
self,
client: Client,
) -> None:
"""Dashboard should avoid N+1 queries as campaign volume grows."""
now: datetime.datetime = timezone.now()
org: Organization = Organization.objects.create(
twitch_id="org_query_count",
name="Org Query Count",
)
game: Game = Game.objects.create(
twitch_id="game_query_count",
name="game_query_count",
display_name="Game Query Count",
)
game.owners.add(org)
def _capture_dashboard_select_count() -> int:
with CaptureQueriesContext(connection) as queries:
response: _MonkeyPatchedWSGIResponse = client.get(
reverse("twitch:dashboard"),
)
assert response.status_code == 200
select_queries: list[str] = [
query_info["sql"]
for query_info in queries.captured_queries
if query_info["sql"].lstrip().upper().startswith("SELECT")
]
return len(select_queries)
# Baseline: one active drop campaign and one active reward campaign.
base_campaign: DropCampaign = DropCampaign.objects.create(
twitch_id="baseline_campaign",
name="Baseline campaign",
game=game,
operation_names=["DropCampaignDetails"],
start_at=now - timedelta(hours=1),
end_at=now + timedelta(hours=1),
)
base_channel: Channel = Channel.objects.create(
twitch_id="baseline_channel",
name="baselinechannel",
display_name="BaselineChannel",
)
base_campaign.allow_channels.add(base_channel)
RewardCampaign.objects.create(
twitch_id="baseline_reward_campaign",
name="Baseline reward campaign",
game=game,
starts_at=now - timedelta(hours=1),
ends_at=now + timedelta(hours=1),
summary="Baseline summary",
external_url="https://example.com/reward/baseline",
)
baseline_select_count: int = _capture_dashboard_select_count()
# Scale up active dashboard data substantially.
extra_campaigns: list[DropCampaign] = [
DropCampaign(
twitch_id=f"scaled_campaign_{i}",
name=f"Scaled campaign {i}",
game=game,
operation_names=["DropCampaignDetails"],
start_at=now - timedelta(hours=2),
end_at=now + timedelta(hours=2),
)
for i in range(12)
]
DropCampaign.objects.bulk_create(extra_campaigns)
for i, campaign in enumerate(
DropCampaign.objects.filter(
twitch_id__startswith="scaled_campaign_",
).order_by("twitch_id"),
):
channel: Channel = Channel.objects.create(
twitch_id=f"scaled_channel_{i}",
name=f"scaledchannel{i}",
display_name=f"ScaledChannel{i}",
)
campaign.allow_channels.add(channel)
extra_rewards: list[RewardCampaign] = [
RewardCampaign(
twitch_id=f"scaled_reward_{i}",
name=f"Scaled reward {i}",
game=game,
starts_at=now - timedelta(hours=2),
ends_at=now + timedelta(hours=2),
summary=f"Scaled summary {i}",
external_url=f"https://example.com/reward/{i}",
)
for i in range(12)
]
RewardCampaign.objects.bulk_create(extra_rewards)
scaled_select_count: int = _capture_dashboard_select_count()
assert scaled_select_count <= baseline_select_count + 2, (
"Dashboard SELECT query count grew with data volume; possible N+1 regression. "
f"baseline={baseline_select_count}, scaled={scaled_select_count}"
)
@pytest.mark.django_db
def test_debug_view(self, client: Client) -> None:
"""Test debug view returns 200 and has games_without_owner in context."""

View file

@ -1071,48 +1071,14 @@ def dashboard(request: HttpRequest) -> HttpResponse:
HttpResponse: The rendered dashboard template.
"""
now: datetime.datetime = timezone.now()
active_campaigns: QuerySet[DropCampaign] = (
DropCampaign.objects
.filter(start_at__lte=now, end_at__gte=now)
.select_related("game")
.prefetch_related("game__owners")
.prefetch_related(
Prefetch(
"allow_channels",
queryset=Channel.objects.order_by("display_name"),
to_attr="channels_ordered",
),
)
.order_by("-start_at")
active_campaigns: QuerySet[DropCampaign] = DropCampaign.active_for_dashboard(now)
campaigns_by_game: OrderedDict[str, dict[str, Any]] = DropCampaign.grouped_by_game(
active_campaigns,
)
# Preserve insertion order (newest campaigns first).
# Group by game so games with multiple owners don't render duplicate campaign cards.
campaigns_by_game: OrderedDict[str, dict[str, Any]] = OrderedDict()
for campaign in active_campaigns:
game: Game = campaign.game
game_id: str = game.twitch_id
if game_id not in campaigns_by_game:
campaigns_by_game[game_id] = {
"name": game.display_name,
"box_art": game.box_art_best_url,
"owners": list(game.owners.all()),
"campaigns": [],
}
campaigns_by_game[game_id]["campaigns"].append({
"campaign": campaign,
"allowed_channels": getattr(campaign, "channels_ordered", []),
})
# Get active reward campaigns (Quest rewards)
active_reward_campaigns: QuerySet[RewardCampaign] = (
RewardCampaign.objects
.filter(starts_at__lte=now, ends_at__gte=now)
.select_related("game")
.order_by("-starts_at")
RewardCampaign.active_for_dashboard(now)
)
# WebSite schema with SearchAction for sitelinks search box