diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 0000000..a569984
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1,2 @@
+# Changed line-length back to default
+1118c03c1b21e217bb66ee2811c423fe3624d546
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 14671bd..5d7ad77 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -21,7 +21,7 @@ repos:
- id: trailing-whitespace
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.15.0
+ rev: v0.15.5
hooks:
- id: ruff-check
args: ["--fix", "--exit-non-zero-on-fix"]
@@ -34,6 +34,6 @@ repos:
args: ["--py311-plus"]
- repo: https://github.com/rhysd/actionlint
- rev: v1.7.10
+ rev: v1.7.11
hooks:
- id: actionlint
diff --git a/config/settings.py b/config/settings.py
index 6976d78..b112427 100644
--- a/config/settings.py
+++ b/config/settings.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
import logging
import os
import sys
@@ -39,7 +37,11 @@ def env_int(key: str, default: int) -> int:
DEBUG: bool = env_bool(key="DEBUG", default=True)
-TESTING: bool = env_bool(key="TESTING", default=False) or "test" in sys.argv or "PYTEST_VERSION" in os.environ
+TESTING: bool = (
+ env_bool(key="TESTING", default=False)
+ or "test" in sys.argv
+ or "PYTEST_VERSION" in os.environ
+)
def get_data_dir() -> Path:
@@ -118,28 +120,11 @@ if not DEBUG:
LOGGING: dict[str, Any] = {
"version": 1,
"disable_existing_loggers": False,
- "handlers": {
- "console": {
- "level": "DEBUG",
- "class": "logging.StreamHandler",
- },
- },
+ "handlers": {"console": {"level": "DEBUG", "class": "logging.StreamHandler"}},
"loggers": {
- "": {
- "handlers": ["console"],
- "level": "INFO",
- "propagate": True,
- },
- "ttvdrops": {
- "handlers": ["console"],
- "level": "DEBUG",
- "propagate": False,
- },
- "django": {
- "handlers": ["console"],
- "level": "INFO",
- "propagate": False,
- },
+ "": {"handlers": ["console"], "level": "INFO", "propagate": True},
+ "ttvdrops": {"handlers": ["console"], "level": "DEBUG", "propagate": False},
+ "django": {"handlers": ["console"], "level": "INFO", "propagate": False},
"django.utils.autoreload": {
"handlers": ["console"],
"level": "INFO",
@@ -179,12 +164,7 @@ TEMPLATES: list[dict[str, Any]] = [
]
DATABASES: dict[str, dict[str, Any]] = (
- {
- "default": {
- "ENGINE": "django.db.backends.sqlite3",
- "NAME": ":memory:",
- },
- }
+ {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}}
if TESTING
else {
"default": {
@@ -196,19 +176,13 @@ DATABASES: dict[str, dict[str, Any]] = (
"PORT": env_int("POSTGRES_PORT", 5432),
"CONN_MAX_AGE": env_int("CONN_MAX_AGE", 60),
"CONN_HEALTH_CHECKS": env_bool("CONN_HEALTH_CHECKS", default=True),
- "OPTIONS": {
- "connect_timeout": env_int("DB_CONNECT_TIMEOUT", 10),
- },
+ "OPTIONS": {"connect_timeout": env_int("DB_CONNECT_TIMEOUT", 10)},
},
}
)
if not TESTING:
- INSTALLED_APPS = [
- *INSTALLED_APPS,
- "debug_toolbar",
- "silk",
- ]
+ INSTALLED_APPS = [*INSTALLED_APPS, "debug_toolbar", "silk"]
MIDDLEWARE = [
"debug_toolbar.middleware.DebugToolbarMiddleware",
"silk.middleware.SilkyMiddleware",
diff --git a/config/tests/test_settings.py b/config/tests/test_settings.py
index e1ed291..298dd35 100644
--- a/config/tests/test_settings.py
+++ b/config/tests/test_settings.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
import importlib
import os
import sys
@@ -42,7 +40,10 @@ def reload_settings_module() -> Generator[Callable[..., ModuleType]]:
def _reload(**env_overrides: str | None) -> ModuleType:
env: dict[str, str] = os.environ.copy()
- env.setdefault("DJANGO_SECRET_KEY", original_env.get("DJANGO_SECRET_KEY", "test-secret-key"))
+ env.setdefault(
+ "DJANGO_SECRET_KEY",
+ original_env.get("DJANGO_SECRET_KEY", "test-secret-key"),
+ )
for key, value in env_overrides.items():
if value is None:
@@ -95,7 +96,10 @@ def test_env_int_returns_default(monkeypatch: pytest.MonkeyPatch) -> None:
assert settings.env_int("MAX_COUNT", 3) == 3
-def test_get_data_dir_uses_platformdirs(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
+def test_get_data_dir_uses_platformdirs(
+ monkeypatch: pytest.MonkeyPatch,
+ tmp_path: Path,
+) -> None:
"""get_data_dir should use platformdirs and create the directory."""
fake_dir: Path = tmp_path / "data_dir"
@@ -112,7 +116,9 @@ def test_get_data_dir_uses_platformdirs(monkeypatch: pytest.MonkeyPatch, tmp_pat
assert path.is_dir() is True
-def test_allowed_hosts_when_debug_false(reload_settings_module: Callable[..., ModuleType]) -> None:
+def test_allowed_hosts_when_debug_false(
+ reload_settings_module: Callable[..., ModuleType],
+) -> None:
"""When DEBUG is false, ALLOWED_HOSTS should use the production host."""
reloaded: ModuleType = reload_settings_module(DEBUG="false")
@@ -120,7 +126,9 @@ def test_allowed_hosts_when_debug_false(reload_settings_module: Callable[..., Mo
assert reloaded.ALLOWED_HOSTS == ["ttvdrops.lovinator.space"]
-def test_allowed_hosts_when_debug_true(reload_settings_module: Callable[..., ModuleType]) -> None:
+def test_allowed_hosts_when_debug_true(
+ reload_settings_module: Callable[..., ModuleType],
+) -> None:
"""When DEBUG is true, development hostnames should be allowed."""
reloaded: ModuleType = reload_settings_module(DEBUG="1")
@@ -128,7 +136,9 @@ def test_allowed_hosts_when_debug_true(reload_settings_module: Callable[..., Mod
assert reloaded.ALLOWED_HOSTS == [".localhost", "127.0.0.1", "[::1]", "testserver"]
-def test_debug_defaults_true_when_missing(reload_settings_module: Callable[..., ModuleType]) -> None:
+def test_debug_defaults_true_when_missing(
+ reload_settings_module: Callable[..., ModuleType],
+) -> None:
"""DEBUG should default to True when the environment variable is missing."""
reloaded: ModuleType = reload_settings_module(DEBUG=None)
@@ -172,7 +182,9 @@ def test_testing_true_when_sys_argv_contains_test(
assert reloaded.TESTING is True
-def test_testing_true_when_pytest_version_set(reload_settings_module: Callable[..., ModuleType]) -> None:
+def test_testing_true_when_pytest_version_set(
+ reload_settings_module: Callable[..., ModuleType],
+) -> None:
"""TESTING should be true when PYTEST_VERSION is set in the env."""
reloaded: ModuleType = reload_settings_module(PYTEST_VERSION="7.0.0")
@@ -212,7 +224,9 @@ def test_missing_secret_key_causes_system_exit(monkeypatch: pytest.MonkeyPatch)
__import__("config.settings")
-def test_email_settings_from_env(reload_settings_module: Callable[..., ModuleType]) -> None:
+def test_email_settings_from_env(
+ reload_settings_module: Callable[..., ModuleType],
+) -> None:
"""EMAIL_* values should be read from the environment and cast correctly."""
reloaded: ModuleType = reload_settings_module(
EMAIL_HOST="smtp.example.com",
diff --git a/config/tests/test_urls.py b/config/tests/test_urls.py
index dca6fc9..83738d1 100644
--- a/config/tests/test_urls.py
+++ b/config/tests/test_urls.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
import importlib
from typing import TYPE_CHECKING
diff --git a/config/urls.py b/config/urls.py
index 49724ee..2c031ea 100644
--- a/config/urls.py
+++ b/config/urls.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
from typing import TYPE_CHECKING
from django.conf import settings
@@ -21,10 +19,7 @@ urlpatterns: list[URLPattern | URLResolver] = [
# Serve media in development
if settings.DEBUG:
- urlpatterns += static(
- settings.MEDIA_URL,
- document_root=settings.MEDIA_ROOT,
- )
+ urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if not settings.TESTING:
from debug_toolbar.toolbar import debug_toolbar_urls
diff --git a/config/wsgi.py b/config/wsgi.py
index 05ced01..e61f3f2 100644
--- a/config/wsgi.py
+++ b/config/wsgi.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
import os
from typing import TYPE_CHECKING
diff --git a/manage.py b/manage.py
index c709739..1077d0a 100755
--- a/manage.py
+++ b/manage.py
@@ -1,8 +1,6 @@
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
-from __future__ import annotations
-
import os
import sys
diff --git a/pyproject.toml b/pyproject.toml
index 87d0b4d..5a17154 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -45,15 +45,22 @@ filterwarnings = [
]
[tool.ruff]
+fix = true
+preview = true
+unsafe-fixes = true
+
+format.docstring-code-format = true
+format.preview = true
+
+lint.future-annotations = true
+lint.isort.force-single-line = true
+lint.pycodestyle.ignore-overlong-task-comments = true
+lint.pydocstyle.convention = "google"
lint.select = ["ALL"]
# Don't automatically remove unused variables
lint.unfixable = ["F841"]
-lint.pydocstyle.convention = "google"
-lint.isort.required-imports = ["from __future__ import annotations"]
-lint.isort.force-single-line = true
-
lint.ignore = [
"ANN002", # Checks that function *args arguments have type annotations.
"ANN003", # Checks that function **kwargs arguments have type annotations.
@@ -63,6 +70,7 @@ lint.ignore = [
"D104", # Checks for undocumented public package definitions.
"D105", # Checks for undocumented magic method definitions.
"D106", # Checks for undocumented public class definitions, for nested classes.
+ "E501", # Checks for lines that exceed the specified maximum character length.
"ERA001", # Checks for commented-out Python code.
"FIX002", # Checks for "TODO" comments.
"PLR0911", # Checks for functions or methods with too many return statements.
@@ -87,10 +95,6 @@ lint.ignore = [
"Q003", # Checks for strings that include escaped quotes, and suggests changing the quote style to avoid the need to escape them.
"W191", # Checks for indentation that uses tabs.
]
-preview = true
-unsafe-fixes = true
-fix = true
-line-length = 120
[tool.ruff.lint.per-file-ignores]
"**/tests/**" = [
diff --git a/twitch/apps.py b/twitch/apps.py
index a1cf1aa..33fadea 100644
--- a/twitch/apps.py
+++ b/twitch/apps.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
from django.apps import AppConfig
diff --git a/twitch/feeds.py b/twitch/feeds.py
index 783bdeb..4828521 100644
--- a/twitch/feeds.py
+++ b/twitch/feeds.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
import logging
import re
from typing import TYPE_CHECKING
@@ -14,12 +12,10 @@ from django.utils import feedgenerator
from django.utils import timezone
from django.utils.html import format_html
from django.utils.html import format_html_join
-from django.utils.safestring import SafeString
from django.utils.safestring import SafeText
from twitch.models import Channel
from twitch.models import ChatBadge
-from twitch.models import DropBenefit
from twitch.models import DropCampaign
from twitch.models import Game
from twitch.models import Organization
@@ -33,6 +29,9 @@ if TYPE_CHECKING:
from django.db.models import QuerySet
from django.http import HttpRequest
from django.http import HttpResponse
+ from django.utils.safestring import SafeString
+
+ from twitch.models import DropBenefit
logger: logging.Logger = logging.getLogger("ttvdrops")
@@ -71,12 +70,20 @@ def insert_date_info(item: Model, parts: list[SafeText]) -> None:
if start_at or end_at:
start_part: SafeString = (
- format_html("Starts: {} ({})", start_at.strftime("%Y-%m-%d %H:%M %Z"), naturaltime(start_at))
+ format_html(
+ "Starts: {} ({})",
+ start_at.strftime("%Y-%m-%d %H:%M %Z"),
+ naturaltime(start_at),
+ )
if start_at
else SafeText("")
)
end_part: SafeString = (
- format_html("Ends: {} ({})", end_at.strftime("%Y-%m-%d %H:%M %Z"), naturaltime(end_at))
+ format_html(
+ "Ends: {} ({})",
+ end_at.strftime("%Y-%m-%d %H:%M %Z"),
+ naturaltime(end_at),
+ )
if end_at
else SafeText("")
)
@@ -130,7 +137,10 @@ def _build_drops_data(drops_qs: QuerySet[TimeBasedDrop]) -> list[dict]:
return drops_data
-def _build_channels_html(channels: list[Channel] | QuerySet[Channel], game: Game | None) -> SafeText:
+def _build_channels_html(
+ channels: list[Channel] | QuerySet[Channel],
+ game: Game | None,
+) -> SafeText:
"""Render up to max_links channel links as
, then a count of additional channels, or fallback to game category link.
If only one channel and drop_requirements is '1 subscriptions required',
@@ -142,9 +152,11 @@ def _build_channels_html(channels: list[Channel] | QuerySet[Channel], game: Game
Returns:
SafeText: HTML with up to max_links channel links, count of more, or fallback link.
- """ # noqa: E501
+ """
max_links = 5
- channels_all: list[Channel] = list(channels) if isinstance(channels, list) else list(channels.all())
+ channels_all: list[Channel] = (
+ list(channels) if isinstance(channels, list) else list(channels.all())
+ )
total: int = len(channels_all)
if channels_all:
@@ -166,18 +178,31 @@ def _build_channels_html(channels: list[Channel] | QuerySet[Channel], game: Game
)
if not game:
- logger.warning("No game associated with drop campaign for channel fallback link")
- return format_html("{}", "Drop has no game and no channels connected to the drop. ")
+ logger.warning(
+ "No game associated with drop campaign for channel fallback link",
+ )
+ return format_html(
+ "{}",
+ "Drop has no game and no channels connected to the drop. ",
+ )
if not game.twitch_directory_url:
- logger.warning("Game %s has no Twitch directory URL for channel fallback link", game)
- if getattr(game, "details_url", "") == "https://help.twitch.tv/s/article/twitch-chat-badges-guide ":
+ logger.warning(
+ "Game %s has no Twitch directory URL for channel fallback link",
+ game,
+ )
+ if (
+ getattr(game, "details_url", "")
+ == "https://help.twitch.tv/s/article/twitch-chat-badges-guide "
+ ):
# TODO(TheLovinator): Improve detection of global emotes # noqa: TD003
return format_html("{}", "")
- return format_html("{}", "Failed to get Twitch category URL :( ")
+ return format_html(
+ "{}",
+ "Failed to get Twitch category URL :( ",
+ )
- # If no channel is associated, the drop is category-wide; link to the game's Twitch directory
display_name: str = getattr(game, "display_name", "this game")
return format_html(
'',
@@ -187,10 +212,14 @@ def _build_channels_html(channels: list[Channel] | QuerySet[Channel], game: Game
)
-def _construct_drops_summary(drops_data: list[dict], channel_name: str | None = None) -> SafeText:
+def _construct_drops_summary(
+ drops_data: list[dict],
+ channel_name: str | None = None,
+) -> SafeText:
"""Construct a safe HTML summary of drops and their benefits.
- If the requirements indicate a subscription is required, link the benefit names to the Twitch channel.
+ If the requirements indicate a subscription is required, link the benefit
+ names to the Twitch channel.
Args:
drops_data (list[dict]): List of drop data dicts.
@@ -205,13 +234,20 @@ def _construct_drops_summary(drops_data: list[dict], channel_name: str | None =
badge_titles: set[str] = set()
for drop in drops_data:
for b in drop.get("benefits", []):
- if getattr(b, "distribution_type", "") == "BADGE" and getattr(b, "name", ""):
+ if getattr(b, "distribution_type", "") == "BADGE" and getattr(
+ b,
+ "name",
+ "",
+ ):
badge_titles.add(b.name)
badge_descriptions_by_title: dict[str, str] = {}
if badge_titles:
badge_descriptions_by_title = dict(
- ChatBadge.objects.filter(title__in=badge_titles).values_list("title", "description"),
+ ChatBadge.objects.filter(title__in=badge_titles).values_list(
+ "title",
+ "description",
+ ),
)
def sort_key(drop: dict) -> tuple[bool, int]:
@@ -226,7 +262,9 @@ def _construct_drops_summary(drops_data: list[dict], channel_name: str | None =
for drop in sorted_drops:
requirements: str = drop.get("requirements", "")
benefits: list[DropBenefit] = drop.get("benefits", [])
- is_sub_required: bool = "sub required" in requirements or "subs required" in requirements
+ is_sub_required: bool = (
+ "sub required" in requirements or "subs required" in requirements
+ )
benefit_names: list[tuple[str]] = []
for b in benefits:
benefit_name: str = getattr(b, "name", str(b))
@@ -238,19 +276,30 @@ def _construct_drops_summary(drops_data: list[dict], channel_name: str | None =
benefit_name,
)
if badge_desc:
- benefit_names.append((format_html("{} ({} )", linked_name, badge_desc),))
+ benefit_names.append((
+ format_html("{} ({} )", linked_name, badge_desc),
+ ))
else:
benefit_names.append((linked_name,))
elif badge_desc:
- benefit_names.append((format_html("{} ({} )", benefit_name, badge_desc),))
+ benefit_names.append((
+ format_html("{} ({} )", benefit_name, badge_desc),
+ ))
else:
benefit_names.append((benefit_name,))
- benefits_str: SafeString = format_html_join(", ", "{}", benefit_names) if benefit_names else SafeText("")
+ benefits_str: SafeString = (
+ format_html_join(", ", "{}", benefit_names)
+ if benefit_names
+ else SafeText("")
+ )
if requirements:
items.append(format_html("{}: {} ", requirements, benefits_str))
else:
items.append(format_html("{} ", benefits_str))
- return format_html("", format_html_join("", "{}", [(item,) for item in items]))
+ return format_html(
+ "",
+ format_html_join("", "{}", [(item,) for item in items]),
+ )
# MARK: /rss/organizations/
@@ -265,7 +314,12 @@ class OrganizationRSSFeed(Feed):
feed_copyright: str = "Information wants to be free."
_limit: int | None = None
- def __call__(self, request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
+ def __call__(
+ self,
+ request: HttpRequest,
+ *args: object,
+ **kwargs: object,
+ ) -> HttpResponse:
"""Override to capture limit parameter from request.
Args:
@@ -332,7 +386,12 @@ class GameFeed(Feed):
feed_copyright: str = "Information wants to be free."
_limit: int | None = None
- def __call__(self, request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
+ def __call__(
+ self,
+ request: HttpRequest,
+ *args: object,
+ **kwargs: object,
+ ) -> HttpResponse:
"""Override to capture limit parameter from request.
Args:
@@ -375,7 +434,9 @@ class GameFeed(Feed):
if box_art:
description_parts.append(
- SafeText(f" "),
+ SafeText(
+ f" ",
+ ),
)
if slug:
@@ -456,7 +517,12 @@ class DropCampaignFeed(Feed):
feed_copyright: str = "Information wants to be free."
_limit: int | None = None
- def __call__(self, request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
+ def __call__(
+ self,
+ request: HttpRequest,
+ *args: object,
+ **kwargs: object,
+ ) -> HttpResponse:
"""Override to capture limit parameter from request.
Args:
@@ -475,7 +541,7 @@ class DropCampaignFeed(Feed):
return super().__call__(request, *args, **kwargs)
def items(self) -> list[DropCampaign]:
- """Return the latest drop campaigns ordered by most recent start date (default 200, or limited by ?limit query param).""" # noqa: E501
+ """Return the latest drop campaigns ordered by most recent start date (default 200, or limited by ?limit query param)."""
limit: int = self._limit if self._limit is not None else 200
queryset: QuerySet[DropCampaign] = DropCampaign.objects.order_by("-start_at")
return list(_with_campaign_related(queryset)[:limit])
@@ -500,7 +566,11 @@ class DropCampaignFeed(Feed):
if image_url:
item_name: str = getattr(item, "name", str(object=item))
parts.append(
- format_html(' ', image_url, item_name),
+ format_html(
+ ' ',
+ image_url,
+ item_name,
+ ),
)
desc_text: str | None = getattr(item, "description", None)
@@ -511,7 +581,12 @@ class DropCampaignFeed(Feed):
insert_date_info(item, parts)
if drops_data:
- parts.append(format_html("{}
", _construct_drops_summary(drops_data, channel_name=channel_name)))
+ parts.append(
+ format_html(
+ "{}
",
+ _construct_drops_summary(drops_data, channel_name=channel_name),
+ ),
+ )
# Only show channels if drop is not subscription only
if not getattr(item, "is_subscription_only", False) and channels is not None:
@@ -573,7 +648,12 @@ class GameCampaignFeed(Feed):
feed_copyright: str = "Information wants to be free."
_limit: int | None = None
- def __call__(self, request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
+ def __call__(
+ self,
+ request: HttpRequest,
+ *args: object,
+ **kwargs: object,
+ ) -> HttpResponse:
"""Override to capture limit parameter from request.
Args:
@@ -620,9 +700,11 @@ class GameCampaignFeed(Feed):
return reverse("twitch:game_campaign_feed", args=[obj.twitch_id])
def items(self, obj: Game) -> list[DropCampaign]:
- """Return the latest drop campaigns for this game, ordered by most recent start date (default 200, or limited by ?limit query param).""" # noqa: E501
+ """Return the latest drop campaigns for this game, ordered by most recent start date (default 200, or limited by ?limit query param)."""
limit: int = self._limit if self._limit is not None else 200
- queryset: QuerySet[DropCampaign] = DropCampaign.objects.filter(game=obj).order_by("-start_at")
+ queryset: QuerySet[DropCampaign] = DropCampaign.objects.filter(
+ game=obj,
+ ).order_by("-start_at")
return list(_with_campaign_related(queryset)[:limit])
def item_title(self, item: DropCampaign) -> SafeText:
@@ -645,7 +727,11 @@ class GameCampaignFeed(Feed):
if image_url:
item_name: str = getattr(item, "name", str(object=item))
parts.append(
- format_html(' ', image_url, item_name),
+ format_html(
+ ' ',
+ image_url,
+ item_name,
+ ),
)
desc_text: str | None = getattr(item, "description", None)
@@ -656,7 +742,12 @@ class GameCampaignFeed(Feed):
insert_date_info(item, parts)
if drops_data:
- parts.append(format_html("{}
", _construct_drops_summary(drops_data, channel_name=channel_name)))
+ parts.append(
+ format_html(
+ "{}
",
+ _construct_drops_summary(drops_data, channel_name=channel_name),
+ ),
+ )
# Only show channels if drop is not subscription only
if not getattr(item, "is_subscription_only", False) and channels is not None:
@@ -669,7 +760,9 @@ class GameCampaignFeed(Feed):
account_link_url: str | None = getattr(item, "account_link_url", None)
if account_link_url:
- parts.append(format_html(' | Link Account ', account_link_url))
+ parts.append(
+ format_html(' | Link Account ', account_link_url),
+ )
return SafeText("".join(str(p) for p in parts))
@@ -723,7 +816,12 @@ class OrganizationCampaignFeed(Feed):
_limit: int | None = None
- def __call__(self, request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
+ def __call__(
+ self,
+ request: HttpRequest,
+ *args: object,
+ **kwargs: object,
+ ) -> HttpResponse:
"""Override to capture limit parameter from request.
Args:
@@ -766,9 +864,11 @@ class OrganizationCampaignFeed(Feed):
return f"Latest drop campaigns for organization {obj.name}"
def items(self, obj: Organization) -> list[DropCampaign]:
- """Return the latest drop campaigns for this organization, ordered by most recent start date (default 200, or limited by ?limit query param).""" # noqa: E501
+ """Return the latest drop campaigns for this organization, ordered by most recent start date (default 200, or limited by ?limit query param)."""
limit: int = self._limit if self._limit is not None else 200
- queryset: QuerySet[DropCampaign] = DropCampaign.objects.filter(game__owners=obj).order_by("-start_at")
+ queryset: QuerySet[DropCampaign] = DropCampaign.objects.filter(
+ game__owners=obj,
+ ).order_by("-start_at")
return list(_with_campaign_related(queryset)[:limit])
def item_author_name(self, item: DropCampaign) -> str:
@@ -829,7 +929,11 @@ class OrganizationCampaignFeed(Feed):
if image_url:
item_name: str = getattr(item, "name", str(object=item))
parts.append(
- format_html(' ', image_url, item_name),
+ format_html(
+ ' ',
+ image_url,
+ item_name,
+ ),
)
desc_text: str | None = getattr(item, "description", None)
@@ -840,7 +944,12 @@ class OrganizationCampaignFeed(Feed):
insert_date_info(item, parts)
if drops_data:
- parts.append(format_html("{}
", _construct_drops_summary(drops_data, channel_name=channel_name)))
+ parts.append(
+ format_html(
+ "{}
",
+ _construct_drops_summary(drops_data, channel_name=channel_name),
+ ),
+ )
# Only show channels if drop is not subscription only
if not getattr(item, "is_subscription_only", False) and channels is not None:
@@ -865,7 +974,12 @@ class RewardCampaignFeed(Feed):
feed_copyright: str = "Information wants to be free."
_limit: int | None = None
- def __call__(self, request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
+ def __call__(
+ self,
+ request: HttpRequest,
+ *args: object,
+ **kwargs: object,
+ ) -> HttpResponse:
"""Override to capture limit parameter from request.
Args:
diff --git a/twitch/management/commands/backfill_image_dimensions.py b/twitch/management/commands/backfill_image_dimensions.py
index 6101204..44e7a9b 100644
--- a/twitch/management/commands/backfill_image_dimensions.py
+++ b/twitch/management/commands/backfill_image_dimensions.py
@@ -1,7 +1,5 @@
"""Management command to backfill image dimensions for existing cached images."""
-from __future__ import annotations
-
from django.core.management.base import BaseCommand
from twitch.models import DropBenefit
diff --git a/twitch/management/commands/backup_db.py b/twitch/management/commands/backup_db.py
index 53ec0da..c708cbb 100644
--- a/twitch/management/commands/backup_db.py
+++ b/twitch/management/commands/backup_db.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
import io
import os
import shutil
@@ -79,7 +77,10 @@ class Command(BaseCommand):
msg = f"Unsupported database backend: {django_connection.vendor}"
raise CommandError(msg)
- created_at: datetime = datetime.fromtimestamp(output_path.stat().st_mtime, tz=timezone.get_current_timezone())
+ created_at: datetime = datetime.fromtimestamp(
+ output_path.stat().st_mtime,
+ tz=timezone.get_current_timezone(),
+ )
self.stdout.write(
self.style.SUCCESS(
f"Backup created: {output_path} (updated {created_at.isoformat()})",
@@ -111,7 +112,11 @@ def _get_allowed_tables(prefix: str) -> list[str]:
return [row[0] for row in cursor.fetchall()]
-def _write_sqlite_dump(handle: io.TextIOBase, connection: sqlite3.Connection, tables: list[str]) -> None:
+def _write_sqlite_dump(
+ handle: io.TextIOBase,
+ connection: sqlite3.Connection,
+ tables: list[str],
+) -> None:
"""Write a SQL dump containing schema and data for the requested tables.
Args:
@@ -154,7 +159,11 @@ def _get_table_schema(connection: sqlite3.Connection, table: str) -> str:
return row[0] if row and row[0] else ""
-def _write_table_rows(handle: io.TextIOBase, connection: sqlite3.Connection, table: str) -> None:
+def _write_table_rows(
+ handle: io.TextIOBase,
+ connection: sqlite3.Connection,
+ table: str,
+) -> None:
"""Write INSERT statements for a table.
Args:
@@ -169,7 +178,11 @@ def _write_table_rows(handle: io.TextIOBase, connection: sqlite3.Connection, tab
handle.write(f'INSERT INTO "{table}" VALUES ({values});\n') # noqa: S608
-def _write_indexes(handle: io.TextIOBase, connection: sqlite3.Connection, tables: list[str]) -> None:
+def _write_indexes(
+ handle: io.TextIOBase,
+ connection: sqlite3.Connection,
+ tables: list[str],
+) -> None:
"""Write CREATE INDEX statements for included tables.
Args:
@@ -251,10 +264,7 @@ def _write_postgres_dump(output_path: Path, tables: list[str]) -> None:
msg = "pg_dump process did not provide stdout or stderr."
raise CommandError(msg)
- with (
- output_path.open("wb") as raw_handle,
- zstd.open(raw_handle, "w") as compressed,
- ):
+ with output_path.open("wb") as raw_handle, zstd.open(raw_handle, "w") as compressed:
for chunk in iter(lambda: process.stdout.read(64 * 1024), b""): # pyright: ignore[reportOptionalMemberAccess]
compressed.write(chunk)
diff --git a/twitch/management/commands/better_import_drops.py b/twitch/management/commands/better_import_drops.py
index 38c5735..3fea772 100644
--- a/twitch/management/commands/better_import_drops.py
+++ b/twitch/management/commands/better_import_drops.py
@@ -1,11 +1,10 @@
-from __future__ import annotations
-
import json
import os
import sys
from datetime import UTC
from datetime import datetime
from pathlib import Path
+from typing import TYPE_CHECKING
from typing import Any
from typing import Literal
from urllib.parse import urlparse
@@ -18,8 +17,6 @@ from colorama import init as colorama_init
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
-from django.core.management.base import CommandParser
-from json_repair import JSONReturnType
from pydantic import ValidationError
from tqdm import tqdm
@@ -31,21 +28,26 @@ from twitch.models import Game
from twitch.models import Organization
from twitch.models import RewardCampaign
from twitch.models import TimeBasedDrop
-from twitch.schemas import ChannelInfoSchema
-from twitch.schemas import CurrentUserSchema
-from twitch.schemas import DropBenefitEdgeSchema
-from twitch.schemas import DropBenefitSchema
-from twitch.schemas import DropCampaignACLSchema
-from twitch.schemas import DropCampaignSchema
-from twitch.schemas import GameSchema
from twitch.schemas import GraphQLResponse
from twitch.schemas import OrganizationSchema
-from twitch.schemas import RewardCampaign as RewardCampaignSchema
-from twitch.schemas import TimeBasedDropSchema
from twitch.utils import is_twitch_box_art_url
from twitch.utils import normalize_twitch_box_art_url
from twitch.utils import parse_date
+if TYPE_CHECKING:
+ from django.core.management.base import CommandParser
+ from json_repair import JSONReturnType
+
+ from twitch.schemas import ChannelInfoSchema
+ from twitch.schemas import CurrentUserSchema
+ from twitch.schemas import DropBenefitEdgeSchema
+ from twitch.schemas import DropBenefitSchema
+ from twitch.schemas import DropCampaignACLSchema
+ from twitch.schemas import DropCampaignSchema
+ from twitch.schemas import GameSchema
+ from twitch.schemas import RewardCampaign as RewardCampaignSchema
+ from twitch.schemas import TimeBasedDropSchema
+
def get_broken_directory_root() -> Path:
"""Get the root broken directory path from environment or default.
@@ -83,10 +85,7 @@ def get_imported_directory_root() -> Path:
return home / "ttvdrops" / "imported"
-def _build_broken_directory(
- reason: str,
- operation_name: str | None = None,
-) -> Path:
+def _build_broken_directory(reason: str, operation_name: str | None = None) -> Path:
"""Compute a deeply nested broken directory for triage.
Directory pattern: /////
@@ -104,16 +103,32 @@ def _build_broken_directory(
# If operation_name matches reason, skip it to avoid duplicate directories
if operation_name and operation_name.replace(" ", "_") == safe_reason:
- broken_dir: Path = get_broken_directory_root() / safe_reason / f"{now:%Y}" / f"{now:%m}" / f"{now:%d}"
+ broken_dir: Path = (
+ get_broken_directory_root()
+ / safe_reason
+ / f"{now:%Y}"
+ / f"{now:%m}"
+ / f"{now:%d}"
+ )
else:
op_segment: str = (operation_name or "unknown_op").replace(" ", "_")
- broken_dir = get_broken_directory_root() / safe_reason / op_segment / f"{now:%Y}" / f"{now:%m}" / f"{now:%d}"
+ broken_dir = (
+ get_broken_directory_root()
+ / safe_reason
+ / op_segment
+ / f"{now:%Y}"
+ / f"{now:%m}"
+ / f"{now:%d}"
+ )
broken_dir.mkdir(parents=True, exist_ok=True)
return broken_dir
-def move_failed_validation_file(file_path: Path, operation_name: str | None = None) -> Path:
+def move_failed_validation_file(
+ file_path: Path,
+ operation_name: str | None = None,
+) -> Path:
"""Moves a file that failed validation to a 'broken' subdirectory.
Args:
@@ -178,7 +193,12 @@ def move_completed_file(
Returns:
Path to the directory where the file was moved.
"""
- safe_op: str = (operation_name or "unknown_op").replace(" ", "_").replace("/", "_").replace("\\", "_")
+ safe_op: str = (
+ (operation_name or "unknown_op")
+ .replace(" ", "_")
+ .replace("/", "_")
+ .replace("\\", "_")
+ )
target_dir: Path = get_imported_directory_root() / safe_op
if campaign_structure:
@@ -249,7 +269,12 @@ def detect_error_only_response(
errors: Any = item.get("errors")
data: Any = item.get("data")
# Data is missing if key doesn't exist or value is None
- if errors and data is None and isinstance(errors, list) and len(errors) > 0:
+ if (
+ errors
+ and data is None
+ and isinstance(errors, list)
+ and len(errors) > 0
+ ):
first_error: dict[str, Any] = errors[0]
message: str = first_error.get("message", "unknown error")
return f"error_only: {message}"
@@ -327,7 +352,7 @@ def repair_partially_broken_json(raw_text: str) -> str: # noqa: PLR0915
"""
# Strategy 1: Direct repair attempt
try:
- fixed: str = json_repair.repair_json(raw_text)
+ fixed: str = json_repair.repair_json(raw_text, logging=False)
# Validate it produces valid JSON
parsed_data = json.loads(fixed)
@@ -335,7 +360,9 @@ def repair_partially_broken_json(raw_text: str) -> str: # noqa: PLR0915
if isinstance(parsed_data, list):
# Filter to only keep GraphQL responses
filtered = [
- item for item in parsed_data if isinstance(item, dict) and ("data" in item or "extensions" in item)
+ item
+ for item in parsed_data
+ if isinstance(item, dict) and ("data" in item or "extensions" in item)
]
if filtered:
# If we filtered anything out, return the filtered version
@@ -358,7 +385,10 @@ def repair_partially_broken_json(raw_text: str) -> str: # noqa: PLR0915
# Validate that all items look like GraphQL responses
if isinstance(wrapped_data, list) and wrapped_data: # noqa: SIM102
# Check if all items have "data" or "extensions" (GraphQL response structure)
- if all(isinstance(item, dict) and ("data" in item or "extensions" in item) for item in wrapped_data):
+ if all(
+ isinstance(item, dict) and ("data" in item or "extensions" in item)
+ for item in wrapped_data
+ ):
return wrapped
except ValueError, json.JSONDecodeError:
pass
@@ -405,7 +435,7 @@ def repair_partially_broken_json(raw_text: str) -> str: # noqa: PLR0915
line: str = line.strip() # noqa: PLW2901
if line and line.startswith("{"):
try:
- fixed_line: str = json_repair.repair_json(line)
+ fixed_line: str = json_repair.repair_json(line, logging=False)
obj = json.loads(fixed_line)
# Only keep objects that look like GraphQL responses
if "data" in obj or "extensions" in obj:
@@ -428,11 +458,7 @@ class Command(BaseCommand):
def add_arguments(self, parser: CommandParser) -> None:
"""Populate the command with arguments."""
- parser.add_argument(
- "path",
- type=str,
- help="Path to JSON file or directory",
- )
+ parser.add_argument("path", type=str, help="Path to JSON file or directory")
parser.add_argument(
"--recursive",
action="store_true",
@@ -487,7 +513,9 @@ class Command(BaseCommand):
for response_data in responses:
if isinstance(response_data, dict):
try:
- response: GraphQLResponse = GraphQLResponse.model_validate(response_data)
+ response: GraphQLResponse = GraphQLResponse.model_validate(
+ response_data,
+ )
valid_responses.append(response)
except ValidationError as e:
@@ -497,8 +525,13 @@ class Command(BaseCommand):
# Move invalid inputs out of the hot path so future runs can progress.
if not options.get("skip_broken_moves"):
- op_name: str | None = extract_operation_name_from_parsed(response_data)
- broken_dir = move_failed_validation_file(file_path, operation_name=op_name)
+ op_name: str | None = extract_operation_name_from_parsed(
+ response_data,
+ )
+ broken_dir = move_failed_validation_file(
+ file_path,
+ operation_name=op_name,
+ )
# Once the file has been moved, bail out so we don't try to move it again later.
return [], broken_dir
@@ -511,10 +544,7 @@ class Command(BaseCommand):
return valid_responses, broken_dir
- def _get_or_create_organization(
- self,
- org_data: OrganizationSchema,
- ) -> Organization:
+ def _get_or_create_organization(self, org_data: OrganizationSchema) -> Organization:
"""Get or create an organization.
Args:
@@ -525,12 +555,12 @@ class Command(BaseCommand):
"""
org_obj, created = Organization.objects.update_or_create(
twitch_id=org_data.twitch_id,
- defaults={
- "name": org_data.name,
- },
+ defaults={"name": org_data.name},
)
if created:
- tqdm.write(f"{Fore.GREEN}✓{Style.RESET_ALL} Created new organization: {org_data.name}")
+ tqdm.write(
+ f"{Fore.GREEN}✓{Style.RESET_ALL} Created new organization: {org_data.name}",
+ )
return org_obj
@@ -572,7 +602,9 @@ class Command(BaseCommand):
if created or owner_orgs:
game_obj.owners.add(*owner_orgs)
if created:
- tqdm.write(f"{Fore.GREEN}✓{Style.RESET_ALL} Created new game: {game_data.display_name}")
+ tqdm.write(
+ f"{Fore.GREEN}✓{Style.RESET_ALL} Created new game: {game_data.display_name}",
+ )
self._download_game_box_art(game_obj, game_obj.box_art)
return game_obj
@@ -615,13 +647,12 @@ class Command(BaseCommand):
channel_obj, created = Channel.objects.update_or_create(
twitch_id=channel_info.twitch_id,
- defaults={
- "name": channel_info.name,
- "display_name": display_name,
- },
+ defaults={"name": channel_info.name, "display_name": display_name},
)
if created:
- tqdm.write(f"{Fore.GREEN}✓{Style.RESET_ALL} Created new channel: {display_name}")
+ tqdm.write(
+ f"{Fore.GREEN}✓{Style.RESET_ALL} Created new channel: {display_name}",
+ )
return channel_obj
@@ -638,12 +669,13 @@ class Command(BaseCommand):
file_path: Path to the file being processed.
options: Command options dictionary.
- Raises:
- ValueError: If datetime parsing fails for campaign dates and
- crash-on-error is enabled.
Returns:
Tuple of (success flag, broken directory path if moved).
+
+ Raises:
+ ValueError: If datetime parsing fails for campaign dates and
+ crash-on-error is enabled.
"""
valid_responses, broken_dir = self._validate_responses(
responses=responses,
@@ -659,7 +691,9 @@ class Command(BaseCommand):
campaigns_to_process: list[DropCampaignSchema] = []
# Source 1: User or CurrentUser field (handles plural, singular, inventory)
- user_obj: CurrentUserSchema | None = response.data.current_user or response.data.user
+ user_obj: CurrentUserSchema | None = (
+ response.data.current_user or response.data.user
+ )
if user_obj and user_obj.drop_campaigns:
campaigns_to_process.extend(user_obj.drop_campaigns)
@@ -676,7 +710,11 @@ class Command(BaseCommand):
for drop_campaign in campaigns_to_process:
# Handle campaigns without owner (e.g., from Inventory operation)
- owner_data: OrganizationSchema | None = getattr(drop_campaign, "owner", None)
+ owner_data: OrganizationSchema | None = getattr(
+ drop_campaign,
+ "owner",
+ None,
+ )
org_obj: Organization | None = None
if owner_data:
org_obj = self._get_or_create_organization(org_data=owner_data)
@@ -690,7 +728,9 @@ class Command(BaseCommand):
end_at_dt: datetime | None = parse_date(drop_campaign.end_at)
if start_at_dt is None or end_at_dt is None:
- tqdm.write(f"{Fore.RED}✗{Style.RESET_ALL} Invalid datetime in campaign: {drop_campaign.name}")
+ tqdm.write(
+ f"{Fore.RED}✗{Style.RESET_ALL} Invalid datetime in campaign: {drop_campaign.name}",
+ )
if options.get("crash_on_error"):
msg: str = f"Failed to parse datetime for campaign {drop_campaign.name}"
raise ValueError(msg)
@@ -712,17 +752,26 @@ class Command(BaseCommand):
defaults=defaults,
)
if created:
- tqdm.write(f"{Fore.GREEN}✓{Style.RESET_ALL} Created new campaign: {drop_campaign.name}")
+ tqdm.write(
+ f"{Fore.GREEN}✓{Style.RESET_ALL} Created new campaign: {drop_campaign.name}",
+ )
- action: Literal["Imported new", "Updated"] = "Imported new" if created else "Updated"
- tqdm.write(f"{Fore.GREEN}✓{Style.RESET_ALL} {action} campaign: {drop_campaign.name}")
+ action: Literal["Imported new", "Updated"] = (
+ "Imported new" if created else "Updated"
+ )
+ tqdm.write(
+ f"{Fore.GREEN}✓{Style.RESET_ALL} {action} campaign: {drop_campaign.name}",
+ )
if (
response.extensions
and response.extensions.operation_name
- and response.extensions.operation_name not in campaign_obj.operation_names
+ and response.extensions.operation_name
+ not in campaign_obj.operation_names
):
- campaign_obj.operation_names.append(response.extensions.operation_name)
+ campaign_obj.operation_names.append(
+ response.extensions.operation_name,
+ )
campaign_obj.save(update_fields=["operation_names"])
if drop_campaign.time_based_drops:
@@ -769,7 +818,9 @@ class Command(BaseCommand):
}
if drop_schema.required_minutes_watched is not None:
- drop_defaults["required_minutes_watched"] = drop_schema.required_minutes_watched
+ drop_defaults["required_minutes_watched"] = (
+ drop_schema.required_minutes_watched
+ )
if start_at_dt is not None:
drop_defaults["start_at"] = start_at_dt
if end_at_dt is not None:
@@ -780,7 +831,9 @@ class Command(BaseCommand):
defaults=drop_defaults,
)
if created:
- tqdm.write(f"{Fore.GREEN}✓{Style.RESET_ALL} Created TimeBasedDrop: {drop_schema.name}")
+ tqdm.write(
+ f"{Fore.GREEN}✓{Style.RESET_ALL} Created TimeBasedDrop: {drop_schema.name}",
+ )
self._process_benefit_edges(
benefit_edges_schema=drop_schema.benefit_edges,
@@ -808,7 +861,9 @@ class Command(BaseCommand):
defaults=benefit_defaults,
)
if created:
- tqdm.write(f"{Fore.GREEN}✓{Style.RESET_ALL} Created DropBenefit: {benefit_schema.name}")
+ tqdm.write(
+ f"{Fore.GREEN}✓{Style.RESET_ALL} Created DropBenefit: {benefit_schema.name}",
+ )
return benefit_obj
@@ -826,7 +881,9 @@ class Command(BaseCommand):
for edge_schema in benefit_edges_schema:
benefit_schema: DropBenefitSchema = edge_schema.benefit
- benefit_obj: DropBenefit = self._get_or_update_benefit(benefit_schema=benefit_schema)
+ benefit_obj: DropBenefit = self._get_or_update_benefit(
+ benefit_schema=benefit_schema,
+ )
_edge_obj, created = DropBenefitEdge.objects.update_or_create(
drop=drop_obj,
@@ -834,7 +891,9 @@ class Command(BaseCommand):
defaults={"entitlement_limit": edge_schema.entitlement_limit},
)
if created:
- tqdm.write(f"{Fore.GREEN}✓{Style.RESET_ALL} Linked benefit: {benefit_schema.name} → {drop_obj.name}")
+ tqdm.write(
+ f"{Fore.GREEN}✓{Style.RESET_ALL} Linked benefit: {benefit_schema.name} → {drop_obj.name}",
+ )
def _process_allowed_channels(
self,
@@ -852,7 +911,9 @@ class Command(BaseCommand):
"""
# Update the allow_is_enabled flag if changed
# Default to True if is_enabled is None (API doesn't always provide this field)
- is_enabled: bool = allow_schema.is_enabled if allow_schema.is_enabled is not None else True
+ is_enabled: bool = (
+ allow_schema.is_enabled if allow_schema.is_enabled is not None else True
+ )
if campaign_obj.allow_is_enabled != is_enabled:
campaign_obj.allow_is_enabled = is_enabled
campaign_obj.save(update_fields=["allow_is_enabled"])
@@ -864,7 +925,9 @@ class Command(BaseCommand):
channel_objects: list[Channel] = []
if allow_schema.channels:
for channel_schema in allow_schema.channels:
- channel_obj: Channel = self._get_or_create_channel(channel_info=channel_schema)
+ channel_obj: Channel = self._get_or_create_channel(
+ channel_info=channel_schema,
+ )
channel_objects.append(channel_obj)
# Only update the M2M relationship if we have channels
campaign_obj.allow_channels.set(channel_objects)
@@ -889,7 +952,9 @@ class Command(BaseCommand):
ends_at_dt: datetime | None = parse_date(reward_campaign.ends_at)
if starts_at_dt is None or ends_at_dt is None:
- tqdm.write(f"{Fore.RED}✗{Style.RESET_ALL} Invalid datetime in reward campaign: {reward_campaign.name}")
+ tqdm.write(
+ f"{Fore.RED}✗{Style.RESET_ALL} Invalid datetime in reward campaign: {reward_campaign.name}",
+ )
if options.get("crash_on_error"):
msg: str = f"Failed to parse datetime for reward campaign {reward_campaign.name}"
raise ValueError(msg)
@@ -923,7 +988,9 @@ class Command(BaseCommand):
"about_url": reward_campaign.about_url,
"is_sitewide": reward_campaign.is_sitewide,
"game": game_obj,
- "image_url": reward_campaign.image.image1x_url if reward_campaign.image else "",
+ "image_url": reward_campaign.image.image1x_url
+ if reward_campaign.image
+ else "",
}
_reward_campaign_obj, created = RewardCampaign.objects.update_or_create(
@@ -931,11 +998,17 @@ class Command(BaseCommand):
defaults=defaults,
)
- action: Literal["Imported new", "Updated"] = "Imported new" if created else "Updated"
- display_name = (
- f"{reward_campaign.brand}: {reward_campaign.name}" if reward_campaign.brand else reward_campaign.name
+ action: Literal["Imported new", "Updated"] = (
+ "Imported new" if created else "Updated"
+ )
+ display_name = (
+ f"{reward_campaign.brand}: {reward_campaign.name}"
+ if reward_campaign.brand
+ else reward_campaign.name
+ )
+ tqdm.write(
+ f"{Fore.GREEN}✓{Style.RESET_ALL} {action} reward campaign: {display_name}",
)
- tqdm.write(f"{Fore.GREEN}✓{Style.RESET_ALL} {action} reward campaign: {display_name}")
def handle(self, *args, **options) -> None: # noqa: ARG002
"""Main entry point for the command.
@@ -978,7 +1051,9 @@ class Command(BaseCommand):
total=len(json_files),
desc="Processing",
unit="file",
- bar_format=("{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]"),
+ bar_format=(
+ "{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]"
+ ),
colour="green",
dynamic_ncols=True,
) as progress_bar:
@@ -991,10 +1066,14 @@ class Command(BaseCommand):
if result["success"]:
success_count += 1
if options.get("verbose"):
- progress_bar.write(f"{Fore.GREEN}✓{Style.RESET_ALL} {file_path.name}")
+ progress_bar.write(
+ f"{Fore.GREEN}✓{Style.RESET_ALL} {file_path.name}",
+ )
else:
failed_count += 1
- reason: bool | str | None = result.get("reason") if isinstance(result, dict) else None
+ reason: bool | str | None = (
+ result.get("reason") if isinstance(result, dict) else None
+ )
if reason:
progress_bar.write(
f"{Fore.RED}✗{Style.RESET_ALL} "
@@ -1009,10 +1088,15 @@ class Command(BaseCommand):
)
except (OSError, ValueError, KeyError) as e:
error_count += 1
- progress_bar.write(f"{Fore.RED}✗{Style.RESET_ALL} {file_path.name} (error: {e})")
+ progress_bar.write(
+ f"{Fore.RED}✗{Style.RESET_ALL} {file_path.name} (error: {e})",
+ )
# Update postfix with statistics
- progress_bar.set_postfix_str(f"✓ {success_count} | ✗ {failed_count + error_count}", refresh=True)
+ progress_bar.set_postfix_str(
+ f"✓ {success_count} | ✗ {failed_count + error_count}",
+ refresh=True,
+ )
progress_bar.update(1)
self.print_processing_summary(
@@ -1093,7 +1177,10 @@ class Command(BaseCommand):
return "inventory_campaigns"
# Structure: {"data": {"currentUser": {"dropCampaigns": [...]}}}
- if "dropCampaigns" in current_user and isinstance(current_user["dropCampaigns"], list):
+ if "dropCampaigns" in current_user and isinstance(
+ current_user["dropCampaigns"],
+ list,
+ ):
return "current_user_drop_campaigns"
# Structure: {"data": {"channel": {"viewerDropCampaigns": [...] or {...}}}}
@@ -1104,11 +1191,7 @@ class Command(BaseCommand):
return None
- def collect_json_files(
- self,
- options: dict,
- input_path: Path,
- ) -> list[Path]:
+ def collect_json_files(self, options: dict, input_path: Path) -> list[Path]:
"""Collect JSON files from the specified directory.
Args:
@@ -1122,9 +1205,13 @@ class Command(BaseCommand):
if options["recursive"]:
for root, _dirs, files in os.walk(input_path):
root_path = Path(root)
- json_files.extend(root_path / file for file in files if file.endswith(".json"))
+ json_files.extend(
+ root_path / file for file in files if file.endswith(".json")
+ )
else:
- json_files = [f for f in input_path.iterdir() if f.is_file() and f.suffix == ".json"]
+ json_files = [
+ f for f in input_path.iterdir() if f.is_file() and f.suffix == ".json"
+ ]
return json_files
def _normalize_responses(
@@ -1147,8 +1234,13 @@ class Command(BaseCommand):
"""
if isinstance(parsed_json, dict):
# Check for batched format: {"responses": [...]}
- if "responses" in parsed_json and isinstance(parsed_json["responses"], list):
- return [item for item in parsed_json["responses"] if isinstance(item, dict)]
+ if "responses" in parsed_json and isinstance(
+ parsed_json["responses"],
+ list,
+ ):
+ return [
+ item for item in parsed_json["responses"] if isinstance(item, dict)
+ ]
# Single response: {"data": {...}}
return [parsed_json]
if isinstance(parsed_json, list):
@@ -1171,21 +1263,21 @@ class Command(BaseCommand):
file_path: Path to the JSON file to process
options: Command options
+ Returns:
+ Dict with success status and optional broken_dir path
+
Raises:
ValidationError: If the JSON file fails validation
json.JSONDecodeError: If the JSON file cannot be parsed
-
- Returns:
- Dict with success status and optional broken_dir path
"""
try:
raw_text: str = file_path.read_text(encoding="utf-8", errors="ignore")
# Repair potentially broken JSON with multiple fallback strategies
fixed_json_str: str = repair_partially_broken_json(raw_text)
- parsed_json: JSONReturnType | tuple[JSONReturnType, list[dict[str, str]]] | str = json.loads(
- fixed_json_str,
- )
+ parsed_json: (
+ JSONReturnType | tuple[JSONReturnType, list[dict[str, str]]] | str
+ ) = json.loads(fixed_json_str)
operation_name: str | None = extract_operation_name_from_parsed(parsed_json)
# Check for error-only responses first
@@ -1197,8 +1289,16 @@ class Command(BaseCommand):
error_description,
operation_name=operation_name,
)
- return {"success": False, "broken_dir": str(broken_dir), "reason": error_description}
- return {"success": False, "broken_dir": "(skipped)", "reason": error_description}
+ return {
+ "success": False,
+ "broken_dir": str(broken_dir),
+ "reason": error_description,
+ }
+ return {
+ "success": False,
+ "broken_dir": "(skipped)",
+ "reason": error_description,
+ }
matched: str | None = detect_non_campaign_keyword(raw_text)
if matched:
@@ -1208,8 +1308,16 @@ class Command(BaseCommand):
matched,
operation_name=operation_name,
)
- return {"success": False, "broken_dir": str(broken_dir), "reason": f"matched '{matched}'"}
- return {"success": False, "broken_dir": "(skipped)", "reason": f"matched '{matched}'"}
+ return {
+ "success": False,
+ "broken_dir": str(broken_dir),
+ "reason": f"matched '{matched}'",
+ }
+ return {
+ "success": False,
+ "broken_dir": "(skipped)",
+ "reason": f"matched '{matched}'",
+ }
if "dropCampaign" not in raw_text:
if not options.get("skip_broken_moves"):
broken_dir: Path | None = move_file_to_broken_subdir(
@@ -1217,8 +1325,16 @@ class Command(BaseCommand):
"no_dropCampaign",
operation_name=operation_name,
)
- return {"success": False, "broken_dir": str(broken_dir), "reason": "no dropCampaign present"}
- return {"success": False, "broken_dir": "(skipped)", "reason": "no dropCampaign present"}
+ return {
+ "success": False,
+ "broken_dir": str(broken_dir),
+ "reason": "no dropCampaign present",
+ }
+ return {
+ "success": False,
+ "broken_dir": "(skipped)",
+ "reason": "no dropCampaign present",
+ }
# Normalize and filter to dict responses only
responses: list[dict[str, Any]] = self._normalize_responses(parsed_json)
@@ -1256,7 +1372,10 @@ class Command(BaseCommand):
if isinstance(parsed_json_local, (dict, list))
else None
)
- broken_dir = move_failed_validation_file(file_path, operation_name=op_name)
+ broken_dir = move_failed_validation_file(
+ file_path,
+ operation_name=op_name,
+ )
return {"success": False, "broken_dir": str(broken_dir)}
return {"success": False, "broken_dir": "(skipped)"}
else:
@@ -1285,10 +1404,12 @@ class Command(BaseCommand):
# Repair potentially broken JSON with multiple fallback strategies
fixed_json_str: str = repair_partially_broken_json(raw_text)
- parsed_json: JSONReturnType | tuple[JSONReturnType, list[dict[str, str]]] | str = json.loads(
- fixed_json_str,
+ parsed_json: (
+ JSONReturnType | tuple[JSONReturnType, list[dict[str, str]]] | str
+ ) = json.loads(fixed_json_str)
+ operation_name: str | None = extract_operation_name_from_parsed(
+ parsed_json,
)
- operation_name: str | None = extract_operation_name_from_parsed(parsed_json)
# Check for error-only responses first
error_description: str | None = detect_error_only_response(parsed_json)
@@ -1386,7 +1507,14 @@ class Command(BaseCommand):
if isinstance(parsed_json_local, (dict, list))
else None
)
- broken_dir = move_failed_validation_file(file_path, operation_name=op_name)
- progress_bar.write(f"{Fore.RED}✗{Style.RESET_ALL} {file_path.name} → {broken_dir}/{file_path.name}")
+ broken_dir = move_failed_validation_file(
+ file_path,
+ operation_name=op_name,
+ )
+ progress_bar.write(
+ f"{Fore.RED}✗{Style.RESET_ALL} {file_path.name} → {broken_dir}/{file_path.name}",
+ )
else:
- progress_bar.write(f"{Fore.RED}✗{Style.RESET_ALL} {file_path.name} (move skipped)")
+ progress_bar.write(
+ f"{Fore.RED}✗{Style.RESET_ALL} {file_path.name} (move skipped)",
+ )
diff --git a/twitch/management/commands/cleanup_orphaned_channels.py b/twitch/management/commands/cleanup_orphaned_channels.py
index 672b97e..a99aa90 100644
--- a/twitch/management/commands/cleanup_orphaned_channels.py
+++ b/twitch/management/commands/cleanup_orphaned_channels.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
from typing import TYPE_CHECKING
from django.core.management.base import BaseCommand
@@ -54,21 +52,31 @@ class Command(BaseCommand):
self.stdout.write(self.style.SUCCESS("No orphaned channels found."))
return
- self.stdout.write(f"Found {count} orphaned channels with no associated campaigns:")
+ self.stdout.write(
+ f"Found {count} orphaned channels with no associated campaigns:",
+ )
# Show sample of channels to be deleted
for channel in orphaned_channels[:SAMPLE_PREVIEW_COUNT]:
- self.stdout.write(f" - {channel.display_name} (Twitch ID: {channel.twitch_id})")
+ self.stdout.write(
+ f" - {channel.display_name} (Twitch ID: {channel.twitch_id})",
+ )
if count > SAMPLE_PREVIEW_COUNT:
self.stdout.write(f" ... and {count - SAMPLE_PREVIEW_COUNT} more")
if dry_run:
- self.stdout.write(self.style.WARNING(f"\n[DRY RUN] Would delete {count} orphaned channels."))
+ self.stdout.write(
+ self.style.WARNING(
+ f"\n[DRY RUN] Would delete {count} orphaned channels.",
+ ),
+ )
return
if not force:
- response: str = input(f"\nAre you sure you want to delete {count} orphaned channels? (yes/no): ")
+ response: str = input(
+ f"\nAre you sure you want to delete {count} orphaned channels? (yes/no): ",
+ )
if response.lower() != "yes":
self.stdout.write(self.style.WARNING("Cancelled."))
return
@@ -76,4 +84,8 @@ class Command(BaseCommand):
# Delete the orphaned channels
deleted_count, _ = orphaned_channels.delete()
- self.stdout.write(self.style.SUCCESS(f"\nSuccessfully deleted {deleted_count} orphaned channels."))
+ self.stdout.write(
+ self.style.SUCCESS(
+ f"\nSuccessfully deleted {deleted_count} orphaned channels.",
+ ),
+ )
diff --git a/twitch/management/commands/cleanup_unknown_organizations.py b/twitch/management/commands/cleanup_unknown_organizations.py
index f4aade6..28c8efb 100644
--- a/twitch/management/commands/cleanup_unknown_organizations.py
+++ b/twitch/management/commands/cleanup_unknown_organizations.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
from typing import TYPE_CHECKING
from typing import Any
@@ -8,13 +6,13 @@ from colorama import Style
from colorama import init as colorama_init
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
-from django.core.management.base import CommandParser
from twitch.models import Game
from twitch.models import Organization
if TYPE_CHECKING:
from debug_toolbar.panels.templates.panel import QuerySet
+ from django.core.management.base import CommandParser
class Command(BaseCommand):
@@ -70,11 +68,15 @@ class Command(BaseCommand):
try:
org: Organization = Organization.objects.get(twitch_id=org_id)
except Organization.DoesNotExist as exc: # pragma: no cover - simple guard
- msg: str = f"Organization with twitch_id='{org_id}' does not exist. Nothing to do."
+ msg: str = (
+ f"Organization with twitch_id='{org_id}' does not exist. Nothing to do."
+ )
raise CommandError(msg) from exc
# Compute the set of affected games via the through relation for accuracy and performance
- affected_games_qs: QuerySet[Game, Game] = Game.objects.filter(owners=org).order_by("display_name")
+ affected_games_qs: QuerySet[Game, Game] = Game.objects.filter(
+ owners=org,
+ ).order_by("display_name")
affected_count: int = affected_games_qs.count()
if affected_count == 0:
@@ -83,7 +85,7 @@ class Command(BaseCommand):
)
else:
self.stdout.write(
- f"{Fore.CYAN}•{Style.RESET_ALL} Found {affected_count:,} game(s) linked to '{org.name}' ({org.twitch_id}).", # noqa: E501
+ f"{Fore.CYAN}•{Style.RESET_ALL} Found {affected_count:,} game(s) linked to '{org.name}' ({org.twitch_id}).",
)
# Show a short preview list in dry-run mode
@@ -112,9 +114,9 @@ class Command(BaseCommand):
org_twid: str = org.twitch_id
org.delete()
self.stdout.write(
- f"{Fore.GREEN}✓{Style.RESET_ALL} Deleted organization '{org_name}' ({org_twid}) as it has no games.", # noqa: E501
+ f"{Fore.GREEN}✓{Style.RESET_ALL} Deleted organization '{org_name}' ({org_twid}) as it has no games.",
)
else:
self.stdout.write(
- f"{Fore.YELLOW}→{Style.RESET_ALL} Organization '{org.name}' still has {remaining_games:,} game(s); not deleted.", # noqa: E501
+ f"{Fore.YELLOW}→{Style.RESET_ALL} Organization '{org.name}' still has {remaining_games:,} game(s); not deleted.",
)
diff --git a/twitch/management/commands/convert_images_to_modern_formats.py b/twitch/management/commands/convert_images_to_modern_formats.py
index f25f07c..3b516f2 100644
--- a/twitch/management/commands/convert_images_to_modern_formats.py
+++ b/twitch/management/commands/convert_images_to_modern_formats.py
@@ -1,7 +1,5 @@
"""Management command to convert existing images to WebP and AVIF formats."""
-from __future__ import annotations
-
import logging
from pathlib import Path
from typing import TYPE_CHECKING
@@ -48,12 +46,18 @@ class Command(BaseCommand):
media_root = Path(settings.MEDIA_ROOT)
if not media_root.exists():
- self.stdout.write(self.style.WARNING(f"MEDIA_ROOT does not exist: {media_root}"))
+ self.stdout.write(
+ self.style.WARNING(f"MEDIA_ROOT does not exist: {media_root}"),
+ )
return
# Find all JPG and PNG files
image_extensions = {".jpg", ".jpeg", ".png"}
- image_files = [f for f in media_root.rglob("*") if f.is_file() and f.suffix.lower() in image_extensions]
+ image_files = [
+ f
+ for f in media_root.rglob("*")
+ if f.is_file() and f.suffix.lower() in image_extensions
+ ]
if not image_files:
self.stdout.write(self.style.SUCCESS("No images found to convert"))
@@ -80,7 +84,9 @@ class Command(BaseCommand):
continue
if dry_run:
- self.stdout.write(f"Would convert: {image_path.relative_to(media_root)}")
+ self.stdout.write(
+ f"Would convert: {image_path.relative_to(media_root)}",
+ )
if needs_webp:
self.stdout.write(f" → {webp_path.relative_to(media_root)}")
if needs_avif:
@@ -104,14 +110,20 @@ class Command(BaseCommand):
except Exception as e:
error_count += 1
self.stdout.write(
- self.style.ERROR(f"✗ Error converting {image_path.relative_to(media_root)}: {e}"),
+ self.style.ERROR(
+ f"✗ Error converting {image_path.relative_to(media_root)}: {e}",
+ ),
)
logger.exception("Failed to convert image: %s", image_path)
# Summary
self.stdout.write("\n" + "=" * 50)
if dry_run:
- self.stdout.write(self.style.SUCCESS(f"Dry run complete. Would convert {converted_count} images"))
+ self.stdout.write(
+ self.style.SUCCESS(
+ f"Dry run complete. Would convert {converted_count} images",
+ ),
+ )
else:
self.stdout.write(self.style.SUCCESS(f"Converted: {converted_count}"))
self.stdout.write(f"Skipped (already exist): {skipped_count}")
@@ -177,11 +189,16 @@ class Command(BaseCommand):
Returns:
RGB PIL Image ready for encoding
"""
- if img.mode in {"RGBA", "LA"} or (img.mode == "P" and "transparency" in img.info):
+ if img.mode in {"RGBA", "LA"} or (
+ img.mode == "P" and "transparency" in img.info
+ ):
# Create white background for transparency
background = Image.new("RGB", img.size, (255, 255, 255))
rgba_img = img.convert("RGBA") if img.mode == "P" else img
- background.paste(rgba_img, mask=rgba_img.split()[-1] if rgba_img.mode in {"RGBA", "LA"} else None)
+ background.paste(
+ rgba_img,
+ mask=rgba_img.split()[-1] if rgba_img.mode in {"RGBA", "LA"} else None,
+ )
return background
if img.mode != "RGB":
return img.convert("RGB")
diff --git a/twitch/management/commands/download_box_art.py b/twitch/management/commands/download_box_art.py
index 31afc95..e0ea78b 100644
--- a/twitch/management/commands/download_box_art.py
+++ b/twitch/management/commands/download_box_art.py
@@ -1,15 +1,11 @@
-from __future__ import annotations
-
from pathlib import Path
from typing import TYPE_CHECKING
-from urllib.parse import ParseResult
from urllib.parse import urlparse
import httpx
from django.conf import settings
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand
-from django.core.management.base import CommandParser
from PIL import Image
from twitch.models import Game
@@ -17,6 +13,9 @@ from twitch.utils import is_twitch_box_art_url
from twitch.utils import normalize_twitch_box_art_url
if TYPE_CHECKING:
+ from urllib.parse import ParseResult
+
+ from django.core.management.base import CommandParser
from django.db.models import QuerySet
@@ -63,7 +62,11 @@ class Command(BaseCommand):
if not is_twitch_box_art_url(game.box_art):
skipped += 1
continue
- if game.box_art_file and getattr(game.box_art_file, "name", "") and not force:
+ if (
+ game.box_art_file
+ and getattr(game.box_art_file, "name", "")
+ and not force
+ ):
skipped += 1
continue
@@ -89,7 +92,11 @@ class Command(BaseCommand):
skipped += 1
continue
- game.box_art_file.save(file_name, ContentFile(response.content), save=True)
+ game.box_art_file.save(
+ file_name,
+ ContentFile(response.content),
+ save=True,
+ )
# Auto-convert to WebP and AVIF
self._convert_to_modern_formats(game.box_art_file.path)
@@ -113,7 +120,11 @@ class Command(BaseCommand):
"""
try:
source_path = Path(image_path)
- if not source_path.exists() or source_path.suffix.lower() not in {".jpg", ".jpeg", ".png"}:
+ if not source_path.exists() or source_path.suffix.lower() not in {
+ ".jpg",
+ ".jpeg",
+ ".png",
+ }:
return
base_path = source_path.with_suffix("")
@@ -122,10 +133,17 @@ class Command(BaseCommand):
with Image.open(source_path) as img:
# Convert to RGB if needed
- if img.mode in {"RGBA", "LA"} or (img.mode == "P" and "transparency" in img.info):
+ if img.mode in {"RGBA", "LA"} or (
+ img.mode == "P" and "transparency" in img.info
+ ):
background = Image.new("RGB", img.size, (255, 255, 255))
rgba_img = img.convert("RGBA") if img.mode == "P" else img
- background.paste(rgba_img, mask=rgba_img.split()[-1] if rgba_img.mode in {"RGBA", "LA"} else None)
+ background.paste(
+ rgba_img,
+ mask=rgba_img.split()[-1]
+ if rgba_img.mode in {"RGBA", "LA"}
+ else None,
+ )
rgb_img = background
elif img.mode != "RGB":
rgb_img = img.convert("RGB")
@@ -140,4 +158,6 @@ class Command(BaseCommand):
except (OSError, ValueError) as e:
# Don't fail the download if conversion fails
- self.stdout.write(self.style.WARNING(f"Failed to convert {image_path}: {e}"))
+ self.stdout.write(
+ self.style.WARNING(f"Failed to convert {image_path}: {e}"),
+ )
diff --git a/twitch/management/commands/download_campaign_images.py b/twitch/management/commands/download_campaign_images.py
index cf48908..4d419ca 100644
--- a/twitch/management/commands/download_campaign_images.py
+++ b/twitch/management/commands/download_campaign_images.py
@@ -1,17 +1,13 @@
"""Management command to download and cache campaign, benefit, and reward images locally."""
-from __future__ import annotations
-
from pathlib import Path
from typing import TYPE_CHECKING
-from urllib.parse import ParseResult
from urllib.parse import urlparse
import httpx
from django.conf import settings
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand
-from django.core.management.base import CommandParser
from PIL import Image
from twitch.models import DropBenefit
@@ -19,6 +15,9 @@ from twitch.models import DropCampaign
from twitch.models import RewardCampaign
if TYPE_CHECKING:
+ from urllib.parse import ParseResult
+
+ from django.core.management.base import CommandParser
from django.db.models import QuerySet
from django.db.models.fields.files import FieldFile
@@ -66,20 +65,38 @@ class Command(BaseCommand):
with httpx.Client(timeout=20, follow_redirects=True) as client:
if model_choice in {"campaigns", "all"}:
- self.stdout.write(self.style.MIGRATE_HEADING("\nProcessing Drop Campaigns..."))
- stats = self._download_campaign_images(client=client, limit=limit, force=force)
+ self.stdout.write(
+ self.style.MIGRATE_HEADING("\nProcessing Drop Campaigns..."),
+ )
+ stats = self._download_campaign_images(
+ client=client,
+ limit=limit,
+ force=force,
+ )
self._merge_stats(total_stats, stats)
self._print_stats("Drop Campaigns", stats)
if model_choice in {"benefits", "all"}:
- self.stdout.write(self.style.MIGRATE_HEADING("\nProcessing Drop Benefits..."))
- stats = self._download_benefit_images(client=client, limit=limit, force=force)
+ self.stdout.write(
+ self.style.MIGRATE_HEADING("\nProcessing Drop Benefits..."),
+ )
+ stats = self._download_benefit_images(
+ client=client,
+ limit=limit,
+ force=force,
+ )
self._merge_stats(total_stats, stats)
self._print_stats("Drop Benefits", stats)
if model_choice in {"rewards", "all"}:
- self.stdout.write(self.style.MIGRATE_HEADING("\nProcessing Reward Campaigns..."))
- stats = self._download_reward_campaign_images(client=client, limit=limit, force=force)
+ self.stdout.write(
+ self.style.MIGRATE_HEADING("\nProcessing Reward Campaigns..."),
+ )
+ stats = self._download_reward_campaign_images(
+ client=client,
+ limit=limit,
+ force=force,
+ )
self._merge_stats(total_stats, stats)
self._print_stats("Reward Campaigns", stats)
@@ -107,18 +124,30 @@ class Command(BaseCommand):
Returns:
Dictionary with download statistics (total, downloaded, skipped, failed, placeholders_404).
"""
- queryset: QuerySet[DropCampaign] = DropCampaign.objects.all().order_by("twitch_id")
+ queryset: QuerySet[DropCampaign] = DropCampaign.objects.all().order_by(
+ "twitch_id",
+ )
if limit:
queryset = queryset[:limit]
- stats: dict[str, int] = {"total": 0, "downloaded": 0, "skipped": 0, "failed": 0, "placeholders_404": 0}
+ stats: dict[str, int] = {
+ "total": 0,
+ "downloaded": 0,
+ "skipped": 0,
+ "failed": 0,
+ "placeholders_404": 0,
+ }
stats["total"] = queryset.count()
for campaign in queryset:
if not campaign.image_url:
stats["skipped"] += 1
continue
- if campaign.image_file and getattr(campaign.image_file, "name", "") and not force:
+ if (
+ campaign.image_file
+ and getattr(campaign.image_file, "name", "")
+ and not force
+ ):
stats["skipped"] += 1
continue
@@ -144,18 +173,30 @@ class Command(BaseCommand):
Returns:
Dictionary with download statistics (total, downloaded, skipped, failed, placeholders_404).
"""
- queryset: QuerySet[DropBenefit] = DropBenefit.objects.all().order_by("twitch_id")
+ queryset: QuerySet[DropBenefit] = DropBenefit.objects.all().order_by(
+ "twitch_id",
+ )
if limit:
queryset = queryset[:limit]
- stats: dict[str, int] = {"total": 0, "downloaded": 0, "skipped": 0, "failed": 0, "placeholders_404": 0}
+ stats: dict[str, int] = {
+ "total": 0,
+ "downloaded": 0,
+ "skipped": 0,
+ "failed": 0,
+ "placeholders_404": 0,
+ }
stats["total"] = queryset.count()
for benefit in queryset:
if not benefit.image_asset_url:
stats["skipped"] += 1
continue
- if benefit.image_file and getattr(benefit.image_file, "name", "") and not force:
+ if (
+ benefit.image_file
+ and getattr(benefit.image_file, "name", "")
+ and not force
+ ):
stats["skipped"] += 1
continue
@@ -181,18 +222,30 @@ class Command(BaseCommand):
Returns:
Dictionary with download statistics (total, downloaded, skipped, failed, placeholders_404).
"""
- queryset: QuerySet[RewardCampaign] = RewardCampaign.objects.all().order_by("twitch_id")
+ queryset: QuerySet[RewardCampaign] = RewardCampaign.objects.all().order_by(
+ "twitch_id",
+ )
if limit:
queryset = queryset[:limit]
- stats: dict[str, int] = {"total": 0, "downloaded": 0, "skipped": 0, "failed": 0, "placeholders_404": 0}
+ stats: dict[str, int] = {
+ "total": 0,
+ "downloaded": 0,
+ "skipped": 0,
+ "failed": 0,
+ "placeholders_404": 0,
+ }
stats["total"] = queryset.count()
for reward_campaign in queryset:
if not reward_campaign.image_url:
stats["skipped"] += 1
continue
- if reward_campaign.image_file and getattr(reward_campaign.image_file, "name", "") and not force:
+ if (
+ reward_campaign.image_file
+ and getattr(reward_campaign.image_file, "name", "")
+ and not force
+ ):
stats["skipped"] += 1
continue
@@ -233,9 +286,7 @@ class Command(BaseCommand):
response.raise_for_status()
except httpx.HTTPError as exc:
self.stdout.write(
- self.style.WARNING(
- f"Failed to download image for {twitch_id}: {exc}",
- ),
+ self.style.WARNING(f"Failed to download image for {twitch_id}: {exc}"),
)
return "failed"
@@ -262,7 +313,11 @@ class Command(BaseCommand):
"""
try:
source_path = Path(image_path)
- if not source_path.exists() or source_path.suffix.lower() not in {".jpg", ".jpeg", ".png"}:
+ if not source_path.exists() or source_path.suffix.lower() not in {
+ ".jpg",
+ ".jpeg",
+ ".png",
+ }:
return
base_path = source_path.with_suffix("")
@@ -271,10 +326,17 @@ class Command(BaseCommand):
with Image.open(source_path) as img:
# Convert to RGB if needed
- if img.mode in {"RGBA", "LA"} or (img.mode == "P" and "transparency" in img.info):
+ if img.mode in {"RGBA", "LA"} or (
+ img.mode == "P" and "transparency" in img.info
+ ):
background = Image.new("RGB", img.size, (255, 255, 255))
rgba_img = img.convert("RGBA") if img.mode == "P" else img
- background.paste(rgba_img, mask=rgba_img.split()[-1] if rgba_img.mode in {"RGBA", "LA"} else None)
+ background.paste(
+ rgba_img,
+ mask=rgba_img.split()[-1]
+ if rgba_img.mode in {"RGBA", "LA"}
+ else None,
+ )
rgb_img = background
elif img.mode != "RGB":
rgb_img = img.convert("RGB")
@@ -289,7 +351,9 @@ class Command(BaseCommand):
except (OSError, ValueError) as e:
# Don't fail the download if conversion fails
- self.stdout.write(self.style.WARNING(f"Failed to convert {image_path}: {e}"))
+ self.stdout.write(
+ self.style.WARNING(f"Failed to convert {image_path}: {e}"),
+ )
def _merge_stats(self, total: dict[str, int], new: dict[str, int]) -> None:
"""Merge statistics from a single model into the total stats."""
diff --git a/twitch/management/commands/import_chat_badges.py b/twitch/management/commands/import_chat_badges.py
index 405554d..e3df831 100644
--- a/twitch/management/commands/import_chat_badges.py
+++ b/twitch/management/commands/import_chat_badges.py
@@ -1,7 +1,5 @@
"""Management command to import Twitch global chat badges."""
-from __future__ import annotations
-
import logging
import os
from typing import TYPE_CHECKING
@@ -13,15 +11,16 @@ from colorama import Style
from colorama import init as colorama_init
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
-from django.core.management.base import CommandParser
from pydantic import ValidationError
from twitch.models import ChatBadge
from twitch.models import ChatBadgeSet
-from twitch.schemas import ChatBadgeSetSchema
from twitch.schemas import GlobalChatBadgesResponse
if TYPE_CHECKING:
+ from django.core.management.base import CommandParser
+
+ from twitch.schemas import ChatBadgeSetSchema
from twitch.schemas import ChatBadgeVersionSchema
logger: logging.Logger = logging.getLogger("ttvdrops")
@@ -60,9 +59,15 @@ class Command(BaseCommand):
colorama_init(autoreset=True)
# Get credentials from arguments or environment
- client_id: str | None = options.get("client_id") or os.getenv("TWITCH_CLIENT_ID")
- client_secret: str | None = options.get("client_secret") or os.getenv("TWITCH_CLIENT_SECRET")
- access_token: str | None = options.get("access_token") or os.getenv("TWITCH_ACCESS_TOKEN")
+ client_id: str | None = options.get("client_id") or os.getenv(
+ "TWITCH_CLIENT_ID",
+ )
+ client_secret: str | None = options.get("client_secret") or os.getenv(
+ "TWITCH_CLIENT_SECRET",
+ )
+ access_token: str | None = options.get("access_token") or os.getenv(
+ "TWITCH_ACCESS_TOKEN",
+ )
if not client_id:
msg = (
@@ -84,7 +89,9 @@ class Command(BaseCommand):
self.stdout.write("Obtaining access token from Twitch...")
try:
access_token = self._get_app_access_token(client_id, client_secret)
- self.stdout.write(self.style.SUCCESS("✓ Access token obtained successfully"))
+ self.stdout.write(
+ self.style.SUCCESS("✓ Access token obtained successfully"),
+ )
except httpx.HTTPError as e:
msg = f"Failed to obtain access token: {e}"
raise CommandError(msg) from e
diff --git a/twitch/migrations/0001_initial.py b/twitch/migrations/0001_initial.py
index d9cf85a..4847714 100644
--- a/twitch/migrations/0001_initial.py
+++ b/twitch/migrations/0001_initial.py
@@ -1,5 +1,5 @@
# Generated by Django 6.0 on 2025-12-11 10:49
-from __future__ import annotations
+
import django.db.models.deletion
from django.db import migrations
@@ -17,8 +17,19 @@ class Migration(migrations.Migration):
migrations.CreateModel(
name="Game",
fields=[
- ("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
- ("twitch_id", models.TextField(unique=True, verbose_name="Twitch game ID")),
+ (
+ "id",
+ models.BigAutoField(
+ auto_created=True,
+ primary_key=True,
+ serialize=False,
+ verbose_name="ID",
+ ),
+ ),
+ (
+ "twitch_id",
+ models.TextField(unique=True, verbose_name="Twitch game ID"),
+ ),
(
"slug",
models.TextField(
@@ -30,8 +41,23 @@ class Migration(migrations.Migration):
),
),
("name", models.TextField(blank=True, default="", verbose_name="Name")),
- ("display_name", models.TextField(blank=True, default="", verbose_name="Display name")),
- ("box_art", models.URLField(blank=True, default="", max_length=500, verbose_name="Box art URL")),
+ (
+ "display_name",
+ models.TextField(
+ blank=True,
+ default="",
+ verbose_name="Display name",
+ ),
+ ),
+ (
+ "box_art",
+ models.URLField(
+ blank=True,
+ default="",
+ max_length=500,
+ verbose_name="Box art URL",
+ ),
+ ),
(
"box_art_file",
models.FileField(
@@ -43,21 +69,33 @@ class Migration(migrations.Migration):
),
(
"added_at",
- models.DateTimeField(auto_now_add=True, help_text="Timestamp when this game record was created."),
+ models.DateTimeField(
+ auto_now_add=True,
+ help_text="Timestamp when this game record was created.",
+ ),
),
(
"updated_at",
- models.DateTimeField(auto_now=True, help_text="Timestamp when this game record was last updated."),
+ models.DateTimeField(
+ auto_now=True,
+ help_text="Timestamp when this game record was last updated.",
+ ),
),
],
- options={
- "ordering": ["display_name"],
- },
+ options={"ordering": ["display_name"]},
),
migrations.CreateModel(
name="Channel",
fields=[
- ("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
+ (
+ "id",
+ models.BigAutoField(
+ auto_created=True,
+ primary_key=True,
+ serialize=False,
+ verbose_name="ID",
+ ),
+ ),
(
"twitch_id",
models.TextField(
@@ -66,7 +104,13 @@ class Migration(migrations.Migration):
verbose_name="Channel ID",
),
),
- ("name", models.TextField(help_text="The lowercase username of the channel.", verbose_name="Username")),
+ (
+ "name",
+ models.TextField(
+ help_text="The lowercase username of the channel.",
+ verbose_name="Username",
+ ),
+ ),
(
"display_name",
models.TextField(
@@ -92,23 +136,54 @@ class Migration(migrations.Migration):
options={
"ordering": ["display_name"],
"indexes": [
- models.Index(fields=["display_name"], name="twitch_chan_display_2bf213_idx"),
+ models.Index(
+ fields=["display_name"],
+ name="twitch_chan_display_2bf213_idx",
+ ),
models.Index(fields=["name"], name="twitch_chan_name_15d566_idx"),
- models.Index(fields=["twitch_id"], name="twitch_chan_twitch__c8bbc6_idx"),
- models.Index(fields=["added_at"], name="twitch_chan_added_a_5ce7b4_idx"),
- models.Index(fields=["updated_at"], name="twitch_chan_updated_828594_idx"),
+ models.Index(
+ fields=["twitch_id"],
+ name="twitch_chan_twitch__c8bbc6_idx",
+ ),
+ models.Index(
+ fields=["added_at"],
+ name="twitch_chan_added_a_5ce7b4_idx",
+ ),
+ models.Index(
+ fields=["updated_at"],
+ name="twitch_chan_updated_828594_idx",
+ ),
],
},
),
migrations.CreateModel(
name="DropBenefit",
fields=[
- ("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
+ (
+ "id",
+ models.BigAutoField(
+ auto_created=True,
+ primary_key=True,
+ serialize=False,
+ verbose_name="ID",
+ ),
+ ),
(
"twitch_id",
- models.TextField(editable=False, help_text="The Twitch ID for this benefit.", unique=True),
+ models.TextField(
+ editable=False,
+ help_text="The Twitch ID for this benefit.",
+ unique=True,
+ ),
+ ),
+ (
+ "name",
+ models.TextField(
+ blank=True,
+ default="N/A",
+ help_text="Name of the drop benefit.",
+ ),
),
- ("name", models.TextField(blank=True, default="N/A", help_text="Name of the drop benefit.")),
(
"image_asset_url",
models.URLField(
@@ -130,7 +205,7 @@ class Migration(migrations.Migration):
(
"created_at",
models.DateTimeField(
- help_text="Timestamp when the benefit was created. This is from Twitch API and not auto-generated.", # noqa: E501
+ help_text="Timestamp when the benefit was created. This is from Twitch API and not auto-generated.",
null=True,
),
),
@@ -143,7 +218,10 @@ class Migration(migrations.Migration):
),
(
"is_ios_available",
- models.BooleanField(default=False, help_text="Whether the benefit is available on iOS."),
+ models.BooleanField(
+ default=False,
+ help_text="Whether the benefit is available on iOS.",
+ ),
),
(
"distribution_type",
@@ -172,20 +250,46 @@ class Migration(migrations.Migration):
options={
"ordering": ["-created_at"],
"indexes": [
- models.Index(fields=["-created_at"], name="twitch_drop_created_5d2280_idx"),
- models.Index(fields=["twitch_id"], name="twitch_drop_twitch__6eab58_idx"),
+ models.Index(
+ fields=["-created_at"],
+ name="twitch_drop_created_5d2280_idx",
+ ),
+ models.Index(
+ fields=["twitch_id"],
+ name="twitch_drop_twitch__6eab58_idx",
+ ),
models.Index(fields=["name"], name="twitch_drop_name_7125ff_idx"),
- models.Index(fields=["distribution_type"], name="twitch_drop_distrib_08b224_idx"),
- models.Index(fields=["is_ios_available"], name="twitch_drop_is_ios__5f3dcf_idx"),
- models.Index(fields=["added_at"], name="twitch_drop_added_a_fba438_idx"),
- models.Index(fields=["updated_at"], name="twitch_drop_updated_7aaae3_idx"),
+ models.Index(
+ fields=["distribution_type"],
+ name="twitch_drop_distrib_08b224_idx",
+ ),
+ models.Index(
+ fields=["is_ios_available"],
+ name="twitch_drop_is_ios__5f3dcf_idx",
+ ),
+ models.Index(
+ fields=["added_at"],
+ name="twitch_drop_added_a_fba438_idx",
+ ),
+ models.Index(
+ fields=["updated_at"],
+ name="twitch_drop_updated_7aaae3_idx",
+ ),
],
},
),
migrations.CreateModel(
name="DropBenefitEdge",
fields=[
- ("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
+ (
+ "id",
+ models.BigAutoField(
+ auto_created=True,
+ primary_key=True,
+ serialize=False,
+ verbose_name="ID",
+ ),
+ ),
(
"entitlement_limit",
models.PositiveIntegerField(
@@ -220,16 +324,39 @@ class Migration(migrations.Migration):
migrations.CreateModel(
name="DropCampaign",
fields=[
- ("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
+ (
+ "id",
+ models.BigAutoField(
+ auto_created=True,
+ primary_key=True,
+ serialize=False,
+ verbose_name="ID",
+ ),
+ ),
(
"twitch_id",
- models.TextField(editable=False, help_text="The Twitch ID for this campaign.", unique=True),
+ models.TextField(
+ editable=False,
+ help_text="The Twitch ID for this campaign.",
+ unique=True,
+ ),
),
("name", models.TextField(help_text="Name of the drop campaign.")),
- ("description", models.TextField(blank=True, help_text="Detailed description of the campaign.")),
+ (
+ "description",
+ models.TextField(
+ blank=True,
+ help_text="Detailed description of the campaign.",
+ ),
+ ),
(
"details_url",
- models.URLField(blank=True, default="", help_text="URL with campaign details.", max_length=500),
+ models.URLField(
+ blank=True,
+ default="",
+ help_text="URL with campaign details.",
+ max_length=500,
+ ),
),
(
"account_link_url",
@@ -260,23 +387,40 @@ class Migration(migrations.Migration):
),
(
"start_at",
- models.DateTimeField(blank=True, help_text="Datetime when the campaign starts.", null=True),
+ models.DateTimeField(
+ blank=True,
+ help_text="Datetime when the campaign starts.",
+ null=True,
+ ),
+ ),
+ (
+ "end_at",
+ models.DateTimeField(
+ blank=True,
+ help_text="Datetime when the campaign ends.",
+ null=True,
+ ),
),
- ("end_at", models.DateTimeField(blank=True, help_text="Datetime when the campaign ends.", null=True)),
(
"is_account_connected",
- models.BooleanField(default=False, help_text="Indicates if the user account is linked."),
+ models.BooleanField(
+ default=False,
+ help_text="Indicates if the user account is linked.",
+ ),
),
(
"allow_is_enabled",
- models.BooleanField(default=True, help_text="Whether the campaign allows participation."),
+ models.BooleanField(
+ default=True,
+ help_text="Whether the campaign allows participation.",
+ ),
),
(
"operation_name",
models.TextField(
blank=True,
default="",
- help_text="The GraphQL operation name used to fetch this campaign data (e.g., 'ViewerDropsDashboard').", # noqa: E501
+ help_text="The GraphQL operation name used to fetch this campaign data (e.g., 'ViewerDropsDashboard').",
),
),
(
@@ -313,14 +457,20 @@ class Migration(migrations.Migration):
),
),
],
- options={
- "ordering": ["-start_at"],
- },
+ options={"ordering": ["-start_at"]},
),
migrations.CreateModel(
name="Organization",
fields=[
- ("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
+ (
+ "id",
+ models.BigAutoField(
+ auto_created=True,
+ primary_key=True,
+ serialize=False,
+ verbose_name="ID",
+ ),
+ ),
(
"twitch_id",
models.TextField(
@@ -332,7 +482,11 @@ class Migration(migrations.Migration):
),
(
"name",
- models.TextField(help_text="Display name of the organization.", unique=True, verbose_name="Name"),
+ models.TextField(
+ help_text="Display name of the organization.",
+ unique=True,
+ verbose_name="Name",
+ ),
),
(
"added_at",
@@ -355,9 +509,18 @@ class Migration(migrations.Migration):
"ordering": ["name"],
"indexes": [
models.Index(fields=["name"], name="twitch_orga_name_febe72_idx"),
- models.Index(fields=["twitch_id"], name="twitch_orga_twitch__b89b29_idx"),
- models.Index(fields=["added_at"], name="twitch_orga_added_a_8297ac_idx"),
- models.Index(fields=["updated_at"], name="twitch_orga_updated_d7d431_idx"),
+ models.Index(
+ fields=["twitch_id"],
+ name="twitch_orga_twitch__b89b29_idx",
+ ),
+ models.Index(
+ fields=["added_at"],
+ name="twitch_orga_added_a_8297ac_idx",
+ ),
+ models.Index(
+ fields=["updated_at"],
+ name="twitch_orga_updated_d7d431_idx",
+ ),
],
},
),
@@ -377,10 +540,22 @@ class Migration(migrations.Migration):
migrations.CreateModel(
name="TimeBasedDrop",
fields=[
- ("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
+ (
+ "id",
+ models.BigAutoField(
+ auto_created=True,
+ primary_key=True,
+ serialize=False,
+ verbose_name="ID",
+ ),
+ ),
(
"twitch_id",
- models.TextField(editable=False, help_text="The Twitch ID for this time-based drop.", unique=True),
+ models.TextField(
+ editable=False,
+ help_text="The Twitch ID for this time-based drop.",
+ unique=True,
+ ),
),
("name", models.TextField(help_text="Name of the time-based drop.")),
(
@@ -400,9 +575,20 @@ class Migration(migrations.Migration):
),
(
"start_at",
- models.DateTimeField(blank=True, help_text="Datetime when this drop becomes available.", null=True),
+ models.DateTimeField(
+ blank=True,
+ help_text="Datetime when this drop becomes available.",
+ null=True,
+ ),
+ ),
+ (
+ "end_at",
+ models.DateTimeField(
+ blank=True,
+ help_text="Datetime when this drop expires.",
+ null=True,
+ ),
),
- ("end_at", models.DateTimeField(blank=True, help_text="Datetime when this drop expires.", null=True)),
(
"added_at",
models.DateTimeField(
@@ -436,9 +622,7 @@ class Migration(migrations.Migration):
),
),
],
- options={
- "ordering": ["start_at"],
- },
+ options={"ordering": ["start_at"]},
),
migrations.AddField(
model_name="dropbenefitedge",
@@ -452,7 +636,15 @@ class Migration(migrations.Migration):
migrations.CreateModel(
name="TwitchGameData",
fields=[
- ("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
+ (
+ "id",
+ models.BigAutoField(
+ auto_created=True,
+ primary_key=True,
+ serialize=False,
+ verbose_name="ID",
+ ),
+ ),
(
"twitch_id",
models.TextField(
@@ -472,9 +664,24 @@ class Migration(migrations.Migration):
verbose_name="Box art URL",
),
),
- ("igdb_id", models.TextField(blank=True, default="", verbose_name="IGDB ID")),
- ("added_at", models.DateTimeField(auto_now_add=True, help_text="Record creation time.")),
- ("updated_at", models.DateTimeField(auto_now=True, help_text="Record last update time.")),
+ (
+ "igdb_id",
+ models.TextField(blank=True, default="", verbose_name="IGDB ID"),
+ ),
+ (
+ "added_at",
+ models.DateTimeField(
+ auto_now_add=True,
+ help_text="Record creation time.",
+ ),
+ ),
+ (
+ "updated_at",
+ models.DateTimeField(
+ auto_now=True,
+ help_text="Record last update time.",
+ ),
+ ),
(
"game",
models.ForeignKey(
@@ -488,13 +695,14 @@ class Migration(migrations.Migration):
),
),
],
- options={
- "ordering": ["name"],
- },
+ options={"ordering": ["name"]},
),
migrations.AddIndex(
model_name="dropcampaign",
- index=models.Index(fields=["-start_at"], name="twitch_drop_start_a_929f09_idx"),
+ index=models.Index(
+ fields=["-start_at"],
+ name="twitch_drop_start_a_929f09_idx",
+ ),
),
migrations.AddIndex(
model_name="dropcampaign",
@@ -506,7 +714,10 @@ class Migration(migrations.Migration):
),
migrations.AddIndex(
model_name="dropcampaign",
- index=models.Index(fields=["twitch_id"], name="twitch_drop_twitch__b717a1_idx"),
+ index=models.Index(
+ fields=["twitch_id"],
+ name="twitch_drop_twitch__b717a1_idx",
+ ),
),
migrations.AddIndex(
model_name="dropcampaign",
@@ -514,47 +725,80 @@ class Migration(migrations.Migration):
),
migrations.AddIndex(
model_name="dropcampaign",
- index=models.Index(fields=["description"], name="twitch_drop_descrip_5bc290_idx"),
+ index=models.Index(
+ fields=["description"],
+ name="twitch_drop_descrip_5bc290_idx",
+ ),
),
migrations.AddIndex(
model_name="dropcampaign",
- index=models.Index(fields=["is_account_connected"], name="twitch_drop_is_acco_7e9078_idx"),
+ index=models.Index(
+ fields=["is_account_connected"],
+ name="twitch_drop_is_acco_7e9078_idx",
+ ),
),
migrations.AddIndex(
model_name="dropcampaign",
- index=models.Index(fields=["allow_is_enabled"], name="twitch_drop_allow_i_b64555_idx"),
+ index=models.Index(
+ fields=["allow_is_enabled"],
+ name="twitch_drop_allow_i_b64555_idx",
+ ),
),
migrations.AddIndex(
model_name="dropcampaign",
- index=models.Index(fields=["operation_name"], name="twitch_drop_operati_8cfeb5_idx"),
+ index=models.Index(
+ fields=["operation_name"],
+ name="twitch_drop_operati_8cfeb5_idx",
+ ),
),
migrations.AddIndex(
model_name="dropcampaign",
- index=models.Index(fields=["added_at"], name="twitch_drop_added_a_babe28_idx"),
+ index=models.Index(
+ fields=["added_at"],
+ name="twitch_drop_added_a_babe28_idx",
+ ),
),
migrations.AddIndex(
model_name="dropcampaign",
- index=models.Index(fields=["updated_at"], name="twitch_drop_updated_0df991_idx"),
+ index=models.Index(
+ fields=["updated_at"],
+ name="twitch_drop_updated_0df991_idx",
+ ),
),
migrations.AddIndex(
model_name="dropcampaign",
- index=models.Index(fields=["game", "-start_at"], name="twitch_drop_game_id_5e9b01_idx"),
+ index=models.Index(
+ fields=["game", "-start_at"],
+ name="twitch_drop_game_id_5e9b01_idx",
+ ),
),
migrations.AddIndex(
model_name="dropcampaign",
- index=models.Index(fields=["start_at", "end_at"], name="twitch_drop_start_a_6e5fb6_idx"),
+ index=models.Index(
+ fields=["start_at", "end_at"],
+ name="twitch_drop_start_a_6e5fb6_idx",
+ ),
),
migrations.AddIndex(
model_name="dropcampaign",
- index=models.Index(fields=["start_at", "end_at", "game"], name="twitch_drop_start_a_b02d4c_idx"),
+ index=models.Index(
+ fields=["start_at", "end_at", "game"],
+ name="twitch_drop_start_a_b02d4c_idx",
+ ),
),
migrations.AddIndex(
model_name="dropcampaign",
- index=models.Index(fields=["end_at", "-start_at"], name="twitch_drop_end_at_81e51b_idx"),
+ index=models.Index(
+ fields=["end_at", "-start_at"],
+ name="twitch_drop_end_at_81e51b_idx",
+ ),
),
migrations.AddIndex(
model_name="game",
- index=models.Index(fields=["display_name"], name="twitch_game_display_a35ba3_idx"),
+ index=models.Index(
+ fields=["display_name"],
+ name="twitch_game_display_a35ba3_idx",
+ ),
),
migrations.AddIndex(
model_name="game",
@@ -566,7 +810,10 @@ class Migration(migrations.Migration):
),
migrations.AddIndex(
model_name="game",
- index=models.Index(fields=["twitch_id"], name="twitch_game_twitch__887f78_idx"),
+ index=models.Index(
+ fields=["twitch_id"],
+ name="twitch_game_twitch__887f78_idx",
+ ),
),
migrations.AddIndex(
model_name="game",
@@ -574,19 +821,31 @@ class Migration(migrations.Migration):
),
migrations.AddIndex(
model_name="game",
- index=models.Index(fields=["added_at"], name="twitch_game_added_a_9e7e19_idx"),
+ index=models.Index(
+ fields=["added_at"],
+ name="twitch_game_added_a_9e7e19_idx",
+ ),
),
migrations.AddIndex(
model_name="game",
- index=models.Index(fields=["updated_at"], name="twitch_game_updated_01df03_idx"),
+ index=models.Index(
+ fields=["updated_at"],
+ name="twitch_game_updated_01df03_idx",
+ ),
),
migrations.AddIndex(
model_name="game",
- index=models.Index(fields=["owner", "display_name"], name="twitch_game_owner_i_7f9043_idx"),
+ index=models.Index(
+ fields=["owner", "display_name"],
+ name="twitch_game_owner_i_7f9043_idx",
+ ),
),
migrations.AddIndex(
model_name="timebaseddrop",
- index=models.Index(fields=["start_at"], name="twitch_time_start_a_13de4a_idx"),
+ index=models.Index(
+ fields=["start_at"],
+ name="twitch_time_start_a_13de4a_idx",
+ ),
),
migrations.AddIndex(
model_name="timebaseddrop",
@@ -594,11 +853,17 @@ class Migration(migrations.Migration):
),
migrations.AddIndex(
model_name="timebaseddrop",
- index=models.Index(fields=["campaign"], name="twitch_time_campaig_bbe349_idx"),
+ index=models.Index(
+ fields=["campaign"],
+ name="twitch_time_campaig_bbe349_idx",
+ ),
),
migrations.AddIndex(
model_name="timebaseddrop",
- index=models.Index(fields=["twitch_id"], name="twitch_time_twitch__31707a_idx"),
+ index=models.Index(
+ fields=["twitch_id"],
+ name="twitch_time_twitch__31707a_idx",
+ ),
),
migrations.AddIndex(
model_name="timebaseddrop",
@@ -606,31 +871,52 @@ class Migration(migrations.Migration):
),
migrations.AddIndex(
model_name="timebaseddrop",
- index=models.Index(fields=["required_minutes_watched"], name="twitch_time_require_82c30c_idx"),
+ index=models.Index(
+ fields=["required_minutes_watched"],
+ name="twitch_time_require_82c30c_idx",
+ ),
),
migrations.AddIndex(
model_name="timebaseddrop",
- index=models.Index(fields=["required_subs"], name="twitch_time_require_959431_idx"),
+ index=models.Index(
+ fields=["required_subs"],
+ name="twitch_time_require_959431_idx",
+ ),
),
migrations.AddIndex(
model_name="timebaseddrop",
- index=models.Index(fields=["added_at"], name="twitch_time_added_a_a7de2e_idx"),
+ index=models.Index(
+ fields=["added_at"],
+ name="twitch_time_added_a_a7de2e_idx",
+ ),
),
migrations.AddIndex(
model_name="timebaseddrop",
- index=models.Index(fields=["updated_at"], name="twitch_time_updated_9e9d9e_idx"),
+ index=models.Index(
+ fields=["updated_at"],
+ name="twitch_time_updated_9e9d9e_idx",
+ ),
),
migrations.AddIndex(
model_name="timebaseddrop",
- index=models.Index(fields=["campaign", "start_at"], name="twitch_time_campaig_29ac87_idx"),
+ index=models.Index(
+ fields=["campaign", "start_at"],
+ name="twitch_time_campaig_29ac87_idx",
+ ),
),
migrations.AddIndex(
model_name="timebaseddrop",
- index=models.Index(fields=["campaign", "required_minutes_watched"], name="twitch_time_campaig_920ae4_idx"),
+ index=models.Index(
+ fields=["campaign", "required_minutes_watched"],
+ name="twitch_time_campaig_920ae4_idx",
+ ),
),
migrations.AddIndex(
model_name="timebaseddrop",
- index=models.Index(fields=["start_at", "end_at"], name="twitch_time_start_a_c481f1_idx"),
+ index=models.Index(
+ fields=["start_at", "end_at"],
+ name="twitch_time_start_a_c481f1_idx",
+ ),
),
migrations.AddIndex(
model_name="dropbenefitedge",
@@ -638,23 +924,38 @@ class Migration(migrations.Migration):
),
migrations.AddIndex(
model_name="dropbenefitedge",
- index=models.Index(fields=["benefit"], name="twitch_drop_benefit_c92c87_idx"),
+ index=models.Index(
+ fields=["benefit"],
+ name="twitch_drop_benefit_c92c87_idx",
+ ),
),
migrations.AddIndex(
model_name="dropbenefitedge",
- index=models.Index(fields=["entitlement_limit"], name="twitch_drop_entitle_bee3a0_idx"),
+ index=models.Index(
+ fields=["entitlement_limit"],
+ name="twitch_drop_entitle_bee3a0_idx",
+ ),
),
migrations.AddIndex(
model_name="dropbenefitedge",
- index=models.Index(fields=["added_at"], name="twitch_drop_added_a_2100ba_idx"),
+ index=models.Index(
+ fields=["added_at"],
+ name="twitch_drop_added_a_2100ba_idx",
+ ),
),
migrations.AddIndex(
model_name="dropbenefitedge",
- index=models.Index(fields=["updated_at"], name="twitch_drop_updated_00e3f2_idx"),
+ index=models.Index(
+ fields=["updated_at"],
+ name="twitch_drop_updated_00e3f2_idx",
+ ),
),
migrations.AddConstraint(
model_name="dropbenefitedge",
- constraint=models.UniqueConstraint(fields=("drop", "benefit"), name="unique_drop_benefit"),
+ constraint=models.UniqueConstraint(
+ fields=("drop", "benefit"),
+ name="unique_drop_benefit",
+ ),
),
migrations.AddIndex(
model_name="twitchgamedata",
@@ -662,7 +963,10 @@ class Migration(migrations.Migration):
),
migrations.AddIndex(
model_name="twitchgamedata",
- index=models.Index(fields=["twitch_id"], name="twitch_twit_twitch__2207e6_idx"),
+ index=models.Index(
+ fields=["twitch_id"],
+ name="twitch_twit_twitch__2207e6_idx",
+ ),
),
migrations.AddIndex(
model_name="twitchgamedata",
@@ -670,14 +974,23 @@ class Migration(migrations.Migration):
),
migrations.AddIndex(
model_name="twitchgamedata",
- index=models.Index(fields=["igdb_id"], name="twitch_twit_igdb_id_161335_idx"),
+ index=models.Index(
+ fields=["igdb_id"],
+ name="twitch_twit_igdb_id_161335_idx",
+ ),
),
migrations.AddIndex(
model_name="twitchgamedata",
- index=models.Index(fields=["added_at"], name="twitch_twit_added_a_2f4f36_idx"),
+ index=models.Index(
+ fields=["added_at"],
+ name="twitch_twit_added_a_2f4f36_idx",
+ ),
),
migrations.AddIndex(
model_name="twitchgamedata",
- index=models.Index(fields=["updated_at"], name="twitch_twit_updated_ca8c4b_idx"),
+ index=models.Index(
+ fields=["updated_at"],
+ name="twitch_twit_updated_ca8c4b_idx",
+ ),
),
]
diff --git a/twitch/migrations/0002_alter_game_box_art.py b/twitch/migrations/0002_alter_game_box_art.py
index 0684510..c9cca74 100644
--- a/twitch/migrations/0002_alter_game_box_art.py
+++ b/twitch/migrations/0002_alter_game_box_art.py
@@ -1,5 +1,5 @@
# Generated by Django 6.0 on 2026-01-05 20:47
-from __future__ import annotations
+
from django.db import migrations
from django.db import models
@@ -8,14 +8,18 @@ from django.db import models
class Migration(migrations.Migration):
"""Alter box_art field to allow null values."""
- dependencies = [
- ("twitch", "0001_initial"),
- ]
+ dependencies = [("twitch", "0001_initial")]
operations = [
migrations.AlterField(
model_name="game",
name="box_art",
- field=models.URLField(blank=True, default="", max_length=500, null=True, verbose_name="Box art URL"),
+ field=models.URLField(
+ blank=True,
+ default="",
+ max_length=500,
+ null=True,
+ verbose_name="Box art URL",
+ ),
),
]
diff --git a/twitch/migrations/0003_remove_dropcampaign_twitch_drop_is_acco_7e9078_idx_and_more.py b/twitch/migrations/0003_remove_dropcampaign_twitch_drop_is_acco_7e9078_idx_and_more.py
index f6aefea..e2f0e82 100644
--- a/twitch/migrations/0003_remove_dropcampaign_twitch_drop_is_acco_7e9078_idx_and_more.py
+++ b/twitch/migrations/0003_remove_dropcampaign_twitch_drop_is_acco_7e9078_idx_and_more.py
@@ -1,5 +1,5 @@
# Generated by Django 6.0 on 2026-01-05 22:29
-from __future__ import annotations
+
from django.db import migrations
@@ -7,17 +7,12 @@ from django.db import migrations
class Migration(migrations.Migration):
"""Remove is_account_connected field and its index from DropCampaign."""
- dependencies = [
- ("twitch", "0002_alter_game_box_art"),
- ]
+ dependencies = [("twitch", "0002_alter_game_box_art")]
operations = [
migrations.RemoveIndex(
model_name="dropcampaign",
name="twitch_drop_is_acco_7e9078_idx",
),
- migrations.RemoveField(
- model_name="dropcampaign",
- name="is_account_connected",
- ),
+ migrations.RemoveField(model_name="dropcampaign", name="is_account_connected"),
]
diff --git a/twitch/migrations/0004_remove_game_twitch_game_owner_i_398fa9_idx_and_more.py b/twitch/migrations/0004_remove_game_twitch_game_owner_i_398fa9_idx_and_more.py
index 00db410..7973efb 100644
--- a/twitch/migrations/0004_remove_game_twitch_game_owner_i_398fa9_idx_and_more.py
+++ b/twitch/migrations/0004_remove_game_twitch_game_owner_i_398fa9_idx_and_more.py
@@ -1,5 +1,5 @@
# Generated by Django 6.0.1 on 2026-01-09 20:52
-from __future__ import annotations
+
from django.db import migrations
from django.db import models
@@ -35,8 +35,5 @@ class Migration(migrations.Migration):
verbose_name="Organizations",
),
),
- migrations.RemoveField(
- model_name="game",
- name="owner",
- ),
+ migrations.RemoveField(model_name="game", name="owner"),
]
diff --git a/twitch/migrations/0005_add_reward_campaign.py b/twitch/migrations/0005_add_reward_campaign.py
index 42add96..4d2c64d 100644
--- a/twitch/migrations/0005_add_reward_campaign.py
+++ b/twitch/migrations/0005_add_reward_campaign.py
@@ -1,5 +1,5 @@
# Generated by Django 6.0.1 on 2026-01-13 20:31
-from __future__ import annotations
+
import django.db.models.deletion
from django.db import migrations
@@ -17,35 +17,71 @@ class Migration(migrations.Migration):
migrations.CreateModel(
name="RewardCampaign",
fields=[
- ("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
+ (
+ "id",
+ models.BigAutoField(
+ auto_created=True,
+ primary_key=True,
+ serialize=False,
+ verbose_name="ID",
+ ),
+ ),
(
"twitch_id",
- models.TextField(editable=False, help_text="The Twitch ID for this reward campaign.", unique=True),
+ models.TextField(
+ editable=False,
+ help_text="The Twitch ID for this reward campaign.",
+ unique=True,
+ ),
),
("name", models.TextField(help_text="Name of the reward campaign.")),
(
"brand",
- models.TextField(blank=True, default="", help_text="Brand associated with the reward campaign."),
+ models.TextField(
+ blank=True,
+ default="",
+ help_text="Brand associated with the reward campaign.",
+ ),
),
(
"starts_at",
- models.DateTimeField(blank=True, help_text="Datetime when the reward campaign starts.", null=True),
+ models.DateTimeField(
+ blank=True,
+ help_text="Datetime when the reward campaign starts.",
+ null=True,
+ ),
),
(
"ends_at",
- models.DateTimeField(blank=True, help_text="Datetime when the reward campaign ends.", null=True),
+ models.DateTimeField(
+ blank=True,
+ help_text="Datetime when the reward campaign ends.",
+ null=True,
+ ),
),
(
"status",
- models.TextField(default="UNKNOWN", help_text="Status of the reward campaign.", max_length=50),
+ models.TextField(
+ default="UNKNOWN",
+ help_text="Status of the reward campaign.",
+ max_length=50,
+ ),
),
(
"summary",
- models.TextField(blank=True, default="", help_text="Summary description of the reward campaign."),
+ models.TextField(
+ blank=True,
+ default="",
+ help_text="Summary description of the reward campaign.",
+ ),
),
(
"instructions",
- models.TextField(blank=True, default="", help_text="Instructions for the reward campaign."),
+ models.TextField(
+ blank=True,
+ default="",
+ help_text="Instructions for the reward campaign.",
+ ),
),
(
"external_url",
@@ -58,7 +94,11 @@ class Migration(migrations.Migration):
),
(
"reward_value_url_param",
- models.TextField(blank=True, default="", help_text="URL parameter for reward value."),
+ models.TextField(
+ blank=True,
+ default="",
+ help_text="URL parameter for reward value.",
+ ),
),
(
"about_url",
@@ -71,7 +111,10 @@ class Migration(migrations.Migration):
),
(
"is_sitewide",
- models.BooleanField(default=False, help_text="Whether the reward campaign is sitewide."),
+ models.BooleanField(
+ default=False,
+ help_text="Whether the reward campaign is sitewide.",
+ ),
),
(
"added_at",
@@ -102,18 +145,48 @@ class Migration(migrations.Migration):
options={
"ordering": ["-starts_at"],
"indexes": [
- models.Index(fields=["-starts_at"], name="twitch_rewa_starts__4df564_idx"),
- models.Index(fields=["ends_at"], name="twitch_rewa_ends_at_354b15_idx"),
- models.Index(fields=["twitch_id"], name="twitch_rewa_twitch__797967_idx"),
+ models.Index(
+ fields=["-starts_at"],
+ name="twitch_rewa_starts__4df564_idx",
+ ),
+ models.Index(
+ fields=["ends_at"],
+ name="twitch_rewa_ends_at_354b15_idx",
+ ),
+ models.Index(
+ fields=["twitch_id"],
+ name="twitch_rewa_twitch__797967_idx",
+ ),
models.Index(fields=["name"], name="twitch_rewa_name_f1e3dd_idx"),
models.Index(fields=["brand"], name="twitch_rewa_brand_41c321_idx"),
- models.Index(fields=["status"], name="twitch_rewa_status_a96d6b_idx"),
- models.Index(fields=["is_sitewide"], name="twitch_rewa_is_site_7d2c9f_idx"),
- models.Index(fields=["game"], name="twitch_rewa_game_id_678fbb_idx"),
- models.Index(fields=["added_at"], name="twitch_rewa_added_a_ae3748_idx"),
- models.Index(fields=["updated_at"], name="twitch_rewa_updated_fdf599_idx"),
- models.Index(fields=["starts_at", "ends_at"], name="twitch_rewa_starts__dd909d_idx"),
- models.Index(fields=["status", "-starts_at"], name="twitch_rewa_status_3641a4_idx"),
+ models.Index(
+ fields=["status"],
+ name="twitch_rewa_status_a96d6b_idx",
+ ),
+ models.Index(
+ fields=["is_sitewide"],
+ name="twitch_rewa_is_site_7d2c9f_idx",
+ ),
+ models.Index(
+ fields=["game"],
+ name="twitch_rewa_game_id_678fbb_idx",
+ ),
+ models.Index(
+ fields=["added_at"],
+ name="twitch_rewa_added_a_ae3748_idx",
+ ),
+ models.Index(
+ fields=["updated_at"],
+ name="twitch_rewa_updated_fdf599_idx",
+ ),
+ models.Index(
+ fields=["starts_at", "ends_at"],
+ name="twitch_rewa_starts__dd909d_idx",
+ ),
+ models.Index(
+ fields=["status", "-starts_at"],
+ name="twitch_rewa_status_3641a4_idx",
+ ),
],
},
),
diff --git a/twitch/migrations/0006_add_chat_badges.py b/twitch/migrations/0006_add_chat_badges.py
index 6181519..e6f7268 100644
--- a/twitch/migrations/0006_add_chat_badges.py
+++ b/twitch/migrations/0006_add_chat_badges.py
@@ -1,5 +1,5 @@
# Generated by Django 6.0.1 on 2026-01-15 21:57
-from __future__ import annotations
+
import django.db.models.deletion
from django.db import migrations
@@ -9,15 +9,21 @@ from django.db import models
class Migration(migrations.Migration):
"""Add ChatBadgeSet and ChatBadge models for Twitch chat badges."""
- dependencies = [
- ("twitch", "0005_add_reward_campaign"),
- ]
+ dependencies = [("twitch", "0005_add_reward_campaign")]
operations = [
migrations.CreateModel(
name="ChatBadgeSet",
fields=[
- ("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
+ (
+ "id",
+ models.BigAutoField(
+ auto_created=True,
+ primary_key=True,
+ serialize=False,
+ verbose_name="ID",
+ ),
+ ),
(
"set_id",
models.TextField(
@@ -46,16 +52,33 @@ class Migration(migrations.Migration):
options={
"ordering": ["set_id"],
"indexes": [
- models.Index(fields=["set_id"], name="twitch_chat_set_id_9319f2_idx"),
- models.Index(fields=["added_at"], name="twitch_chat_added_a_b0023a_idx"),
- models.Index(fields=["updated_at"], name="twitch_chat_updated_90afed_idx"),
+ models.Index(
+ fields=["set_id"],
+ name="twitch_chat_set_id_9319f2_idx",
+ ),
+ models.Index(
+ fields=["added_at"],
+ name="twitch_chat_added_a_b0023a_idx",
+ ),
+ models.Index(
+ fields=["updated_at"],
+ name="twitch_chat_updated_90afed_idx",
+ ),
],
},
),
migrations.CreateModel(
name="ChatBadge",
fields=[
- ("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
+ (
+ "id",
+ models.BigAutoField(
+ auto_created=True,
+ primary_key=True,
+ serialize=False,
+ verbose_name="ID",
+ ),
+ ),
(
"badge_id",
models.TextField(
@@ -87,10 +110,19 @@ class Migration(migrations.Migration):
verbose_name="Image URL (72px)",
),
),
- ("title", models.TextField(help_text="The title of the badge (e.g., 'VIP').", verbose_name="Title")),
+ (
+ "title",
+ models.TextField(
+ help_text="The title of the badge (e.g., 'VIP').",
+ verbose_name="Title",
+ ),
+ ),
(
"description",
- models.TextField(help_text="The description of the badge.", verbose_name="Description"),
+ models.TextField(
+ help_text="The description of the badge.",
+ verbose_name="Description",
+ ),
),
(
"click_action",
@@ -141,13 +173,30 @@ class Migration(migrations.Migration):
options={
"ordering": ["badge_set", "badge_id"],
"indexes": [
- models.Index(fields=["badge_set"], name="twitch_chat_badge_s_54f225_idx"),
- models.Index(fields=["badge_id"], name="twitch_chat_badge_i_58a68a_idx"),
+ models.Index(
+ fields=["badge_set"],
+ name="twitch_chat_badge_s_54f225_idx",
+ ),
+ models.Index(
+ fields=["badge_id"],
+ name="twitch_chat_badge_i_58a68a_idx",
+ ),
models.Index(fields=["title"], name="twitch_chat_title_0f42d2_idx"),
- models.Index(fields=["added_at"], name="twitch_chat_added_a_9ba7dd_idx"),
- models.Index(fields=["updated_at"], name="twitch_chat_updated_568ad1_idx"),
+ models.Index(
+ fields=["added_at"],
+ name="twitch_chat_added_a_9ba7dd_idx",
+ ),
+ models.Index(
+ fields=["updated_at"],
+ name="twitch_chat_updated_568ad1_idx",
+ ),
+ ],
+ "constraints": [
+ models.UniqueConstraint(
+ fields=("badge_set", "badge_id"),
+ name="unique_badge_set_id",
+ ),
],
- "constraints": [models.UniqueConstraint(fields=("badge_set", "badge_id"), name="unique_badge_set_id")],
},
),
]
diff --git a/twitch/migrations/0007_rename_operation_name_to_operation_names.py b/twitch/migrations/0007_rename_operation_name_to_operation_names.py
index c8cc5fb..55985e2 100644
--- a/twitch/migrations/0007_rename_operation_name_to_operation_names.py
+++ b/twitch/migrations/0007_rename_operation_name_to_operation_names.py
@@ -1,5 +1,5 @@
# Generated by Django 6.0.1 on 2026-01-17 05:32
-from __future__ import annotations
+
from django.db import migrations
from django.db import models
@@ -26,9 +26,7 @@ def reverse_operation_names_to_string(apps, schema_editor) -> None: # noqa: ARG
class Migration(migrations.Migration):
"""Rename operation_name field to operation_names and convert to list."""
- dependencies = [
- ("twitch", "0006_add_chat_badges"),
- ]
+ dependencies = [("twitch", "0006_add_chat_badges")]
operations = [
migrations.RemoveIndex(
@@ -41,7 +39,7 @@ class Migration(migrations.Migration):
field=models.JSONField(
blank=True,
default=list,
- help_text="List of GraphQL operation names used to fetch this campaign data (e.g., ['ViewerDropsDashboard', 'Inventory']).", # noqa: E501
+ help_text="List of GraphQL operation names used to fetch this campaign data (e.g., ['ViewerDropsDashboard', 'Inventory']).",
),
),
migrations.RunPython(
@@ -50,10 +48,10 @@ class Migration(migrations.Migration):
),
migrations.AddIndex(
model_name="dropcampaign",
- index=models.Index(fields=["operation_names"], name="twitch_drop_operati_fe3bc8_idx"),
- ),
- migrations.RemoveField(
- model_name="dropcampaign",
- name="operation_name",
+ index=models.Index(
+ fields=["operation_names"],
+ name="twitch_drop_operati_fe3bc8_idx",
+ ),
),
+ migrations.RemoveField(model_name="dropcampaign", name="operation_name"),
]
diff --git a/twitch/migrations/0008_alter_channel_options_alter_chatbadge_options_and_more.py b/twitch/migrations/0008_alter_channel_options_alter_chatbadge_options_and_more.py
index ab678c6..63fdd6d 100644
--- a/twitch/migrations/0008_alter_channel_options_alter_chatbadge_options_and_more.py
+++ b/twitch/migrations/0008_alter_channel_options_alter_chatbadge_options_and_more.py
@@ -1,25 +1,29 @@
# Generated by Django 6.0.2 on 2026-02-09 19:04
-from __future__ import annotations
+
import django.db.models.manager
from django.db import migrations
class Migration(migrations.Migration):
- "Alter model options to use prefetch_manager as the base manager and set default ordering for better performance and consistent query results." # noqa: E501
+ "Alter model options to use prefetch_manager as the base manager and set default ordering for better performance and consistent query results."
- dependencies = [
- ("twitch", "0007_rename_operation_name_to_operation_names"),
- ]
+ dependencies = [("twitch", "0007_rename_operation_name_to_operation_names")]
operations = [
migrations.AlterModelOptions(
name="channel",
- options={"base_manager_name": "prefetch_manager", "ordering": ["display_name"]},
+ options={
+ "base_manager_name": "prefetch_manager",
+ "ordering": ["display_name"],
+ },
),
migrations.AlterModelOptions(
name="chatbadge",
- options={"base_manager_name": "prefetch_manager", "ordering": ["badge_set", "badge_id"]},
+ options={
+ "base_manager_name": "prefetch_manager",
+ "ordering": ["badge_set", "badge_id"],
+ },
),
migrations.AlterModelOptions(
name="chatbadgeset",
@@ -27,7 +31,10 @@ class Migration(migrations.Migration):
),
migrations.AlterModelOptions(
name="dropbenefit",
- options={"base_manager_name": "prefetch_manager", "ordering": ["-created_at"]},
+ options={
+ "base_manager_name": "prefetch_manager",
+ "ordering": ["-created_at"],
+ },
),
migrations.AlterModelOptions(
name="dropbenefitedge",
@@ -35,11 +42,17 @@ class Migration(migrations.Migration):
),
migrations.AlterModelOptions(
name="dropcampaign",
- options={"base_manager_name": "prefetch_manager", "ordering": ["-start_at"]},
+ options={
+ "base_manager_name": "prefetch_manager",
+ "ordering": ["-start_at"],
+ },
),
migrations.AlterModelOptions(
name="game",
- options={"base_manager_name": "prefetch_manager", "ordering": ["display_name"]},
+ options={
+ "base_manager_name": "prefetch_manager",
+ "ordering": ["display_name"],
+ },
),
migrations.AlterModelOptions(
name="organization",
@@ -47,7 +60,10 @@ class Migration(migrations.Migration):
),
migrations.AlterModelOptions(
name="rewardcampaign",
- options={"base_manager_name": "prefetch_manager", "ordering": ["-starts_at"]},
+ options={
+ "base_manager_name": "prefetch_manager",
+ "ordering": ["-starts_at"],
+ },
),
migrations.AlterModelOptions(
name="timebaseddrop",
diff --git a/twitch/migrations/0009_alter_chatbadge_badge_set_and_more.py b/twitch/migrations/0009_alter_chatbadge_badge_set_and_more.py
index 502578e..99e9d64 100644
--- a/twitch/migrations/0009_alter_chatbadge_badge_set_and_more.py
+++ b/twitch/migrations/0009_alter_chatbadge_badge_set_and_more.py
@@ -1,5 +1,5 @@
# Generated by Django 6.0.2 on 2026-02-09 19:05
-from __future__ import annotations
+
import auto_prefetch
import django.db.models.deletion
@@ -7,7 +7,7 @@ from django.db import migrations
class Migration(migrations.Migration):
- "Alter ChatBadge.badge_set to use auto_prefetch.ForeignKey and update related fields to use auto_prefetch.ForeignKey as well for better performance." # noqa: E501
+ "Alter ChatBadge.badge_set to use auto_prefetch.ForeignKey and update related fields to use auto_prefetch.ForeignKey as well for better performance."
dependencies = [
("twitch", "0008_alter_channel_options_alter_chatbadge_options_and_more"),
diff --git a/twitch/migrations/0010_rewardcampaign_image_file_rewardcampaign_image_url.py b/twitch/migrations/0010_rewardcampaign_image_file_rewardcampaign_image_url.py
index d8b5a91..0e84d61 100644
--- a/twitch/migrations/0010_rewardcampaign_image_file_rewardcampaign_image_url.py
+++ b/twitch/migrations/0010_rewardcampaign_image_file_rewardcampaign_image_url.py
@@ -1,16 +1,14 @@
# Generated by Django 6.0.2 on 2026-02-11 22:55
-from __future__ import annotations
+
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
- """Add image_file and image_url fields to RewardCampaign model for storing local file and original URL of campaign images.""" # noqa: E501
+ """Add image_file and image_url fields to RewardCampaign model for storing local file and original URL of campaign images."""
- dependencies = [
- ("twitch", "0009_alter_chatbadge_badge_set_and_more"),
- ]
+ dependencies = [("twitch", "0009_alter_chatbadge_badge_set_and_more")]
operations = [
migrations.AddField(
diff --git a/twitch/migrations/0011_dropbenefit_image_height_dropbenefit_image_width_and_more.py b/twitch/migrations/0011_dropbenefit_image_height_dropbenefit_image_width_and_more.py
index 97e10ef..6cae230 100644
--- a/twitch/migrations/0011_dropbenefit_image_height_dropbenefit_image_width_and_more.py
+++ b/twitch/migrations/0011_dropbenefit_image_height_dropbenefit_image_width_and_more.py
@@ -1,12 +1,12 @@
# Generated by Django 6.0.2 on 2026-02-12 03:41
-from __future__ import annotations
+
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
- """Add image height and width fields to DropBenefit, DropCampaign, Game, and RewardCampaign, then update ImageFields to use them.""" # noqa: E501
+ """Add image height and width fields to DropBenefit, DropCampaign, Game, and RewardCampaign, then update ImageFields to use them."""
dependencies = [
("twitch", "0010_rewardcampaign_image_file_rewardcampaign_image_url"),
diff --git a/twitch/migrations/0012_dropcampaign_operation_names_gin_index.py b/twitch/migrations/0012_dropcampaign_operation_names_gin_index.py
index 30cb566..d1890c8 100644
--- a/twitch/migrations/0012_dropcampaign_operation_names_gin_index.py
+++ b/twitch/migrations/0012_dropcampaign_operation_names_gin_index.py
@@ -1,5 +1,5 @@
# Generated by Django 6.0.2 on 2026-02-12 12:00
-from __future__ import annotations
+
from django.contrib.postgres.indexes import GinIndex
from django.db import migrations
@@ -19,6 +19,9 @@ class Migration(migrations.Migration):
),
migrations.AddIndex(
model_name="dropcampaign",
- index=GinIndex(fields=["operation_names"], name="twitch_drop_operati_gin_idx"),
+ index=GinIndex(
+ fields=["operation_names"],
+ name="twitch_drop_operati_gin_idx",
+ ),
),
]
diff --git a/twitch/models.py b/twitch/models.py
index 7d88c28..5f704fd 100644
--- a/twitch/models.py
+++ b/twitch/models.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
import logging
from typing import TYPE_CHECKING
@@ -87,20 +85,12 @@ class Game(auto_prefetch.Model):
verbose_name="Slug",
help_text="Short unique identifier for the game.",
)
- name = models.TextField(
- blank=True,
- default="",
- verbose_name="Name",
- )
- display_name = models.TextField(
- blank=True,
- default="",
- verbose_name="Display name",
- )
+ name = models.TextField(blank=True, default="", verbose_name="Name")
+ display_name = models.TextField(blank=True, default="", verbose_name="Display name")
box_art = models.URLField( # noqa: DJ001
max_length=500,
blank=True,
- null=True, # We allow null here to distinguish between no box art and empty string
+ null=True,
default="",
verbose_name="Box art URL",
)
@@ -243,7 +233,9 @@ class TwitchGameData(auto_prefetch.Model):
blank=True,
default="",
verbose_name="Box art URL",
- help_text=("URL template with {width}x{height} placeholders for the box art image."),
+ help_text=(
+ "URL template with {width}x{height} placeholders for the box art image."
+ ),
)
igdb_id = models.TextField(blank=True, default="", verbose_name="IGDB ID")
@@ -322,9 +314,7 @@ class DropCampaign(auto_prefetch.Model):
editable=False,
help_text="The Twitch ID for this campaign.",
)
- name = models.TextField(
- help_text="Name of the drop campaign.",
- )
+ name = models.TextField(help_text="Name of the drop campaign.")
description = models.TextField(
blank=True,
help_text="Detailed description of the campaign.",
@@ -399,7 +389,7 @@ class DropCampaign(auto_prefetch.Model):
operation_names = models.JSONField(
default=list,
blank=True,
- help_text="List of GraphQL operation names used to fetch this campaign data (e.g., ['ViewerDropsDashboard', 'Inventory']).", # noqa: E501
+ help_text="List of GraphQL operation names used to fetch this campaign data (e.g., ['ViewerDropsDashboard', 'Inventory']).",
)
added_at = models.DateTimeField(
@@ -486,10 +476,7 @@ class DropCampaign(auto_prefetch.Model):
if self.image_file and getattr(self.image_file, "url", None):
return self.image_file.url
except (AttributeError, OSError, ValueError) as exc:
- logger.debug(
- "Failed to resolve DropCampaign.image_file url: %s",
- exc,
- )
+ logger.debug("Failed to resolve DropCampaign.image_file url: %s", exc)
if self.image_url:
return self.image_url
@@ -507,8 +494,9 @@ class DropCampaign(auto_prefetch.Model):
def duration_iso(self) -> str:
"""Return the campaign duration in ISO 8601 format (e.g., 'P3DT4H30M').
- This is used for the element's datetime attribute to provide machine-readable duration.
- If start_at or end_at is missing, returns an empty string.
+ This is used for the element's datetime attribute to provide
+ machine-readable duration. If start_at or end_at is missing, returns
+ an empty string.
"""
if not self.start_at or not self.end_at:
return ""
@@ -628,7 +616,9 @@ class DropBenefit(auto_prefetch.Model):
)
created_at = models.DateTimeField(
null=True,
- help_text=("Timestamp when the benefit was created. This is from Twitch API and not auto-generated."),
+ help_text=(
+ "Timestamp when the benefit was created. This is from Twitch API and not auto-generated."
+ ),
)
entitlement_limit = models.PositiveIntegerField(
default=1,
@@ -679,10 +669,7 @@ class DropBenefit(auto_prefetch.Model):
if self.image_file and getattr(self.image_file, "url", None):
return self.image_file.url
except (AttributeError, OSError, ValueError) as exc:
- logger.debug(
- "Failed to resolve DropBenefit.image_file url: %s",
- exc,
- )
+ logger.debug("Failed to resolve DropBenefit.image_file url: %s", exc)
return self.image_asset_url or ""
@@ -743,9 +730,7 @@ class TimeBasedDrop(auto_prefetch.Model):
editable=False,
help_text="The Twitch ID for this time-based drop.",
)
- name = models.TextField(
- help_text="Name of the time-based drop.",
- )
+ name = models.TextField(help_text="Name of the time-based drop.")
required_minutes_watched = models.PositiveIntegerField(
null=True,
blank=True,
@@ -821,9 +806,7 @@ class RewardCampaign(auto_prefetch.Model):
editable=False,
help_text="The Twitch ID for this reward campaign.",
)
- name = models.TextField(
- help_text="Name of the reward campaign.",
- )
+ name = models.TextField(help_text="Name of the reward campaign.")
brand = models.TextField(
blank=True,
default="",
@@ -956,10 +939,7 @@ class RewardCampaign(auto_prefetch.Model):
if self.image_file and getattr(self.image_file, "url", None):
return self.image_file.url
except (AttributeError, OSError, ValueError) as exc:
- logger.debug(
- "Failed to resolve RewardCampaign.image_file url: %s",
- exc,
- )
+ logger.debug("Failed to resolve RewardCampaign.image_file url: %s", exc)
return self.image_url or ""
def get_feed_title(self) -> str:
@@ -1002,15 +982,26 @@ class RewardCampaign(auto_prefetch.Model):
parts.append(format_html("{}
", end_part))
if self.is_sitewide:
- parts.append(SafeText("This is a sitewide reward campaign
"))
+ parts.append(
+ SafeText("This is a sitewide reward campaign
"),
+ )
elif self.game:
- parts.append(format_html("Game: {}
", self.game.display_name or self.game.name))
+ parts.append(
+ format_html(
+ "Game: {}
",
+ self.game.display_name or self.game.name,
+ ),
+ )
if self.about_url:
- parts.append(format_html('Learn more
', self.about_url))
+ parts.append(
+ format_html('Learn more
', self.about_url),
+ )
if self.external_url:
- parts.append(format_html('Redeem reward
', self.external_url))
+ parts.append(
+ format_html('Redeem reward
', self.external_url),
+ )
return "".join(str(p) for p in parts)
diff --git a/twitch/schemas.py b/twitch/schemas.py
index d4b46da..b4d0739 100644
--- a/twitch/schemas.py
+++ b/twitch/schemas.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
from typing import Literal
from pydantic import BaseModel
@@ -31,12 +29,24 @@ class GameSchema(BaseModel):
Handles both ViewerDropsDashboard and Inventory operation formats.
"""
- twitch_id: str = Field(alias="id") # Present in both ViewerDropsDashboard and Inventory formats
- display_name: str | None = Field(default=None, alias="displayName") # Present in both formats
- box_art_url: str | None = Field(default=None, alias="boxArtURL") # Present in both formats, made optional
- slug: str | None = None # Present in Inventory format
- name: str | None = None # Present in Inventory format (alternative to displayName)
- type_name: Literal["Game"] = Field(alias="__typename") # Present in both formats
+ # Present in both ViewerDropsDashboard and Inventory formats
+ twitch_id: str = Field(alias="id")
+
+ # Present in both formats
+ display_name: str | None = Field(default=None, alias="displayName")
+
+ # Present in both formats, made optional
+ box_art_url: str | None = Field(default=None, alias="boxArtURL")
+
+ # Present in Inventory format
+ slug: str | None = None
+
+ # Present in Inventory format (alternative to displayName)
+ name: str | None = None
+
+ # Present in both formats
+ type_name: Literal["Game"] = Field(alias="__typename")
+
owner_organization: dict | None = Field(default=None, alias="ownerOrganization")
model_config = {
@@ -51,8 +61,9 @@ class GameSchema(BaseModel):
def normalize_box_art_url(cls, v: str | None) -> str | None:
"""Normalize Twitch box art URLs to higher quality variants.
- Twitch's box art URLs often include size suffixes (e.g. -120x160) that point to lower quality images.
- This validator removes those suffixes to get the original higher quality image.
+ Twitch's box art URLs often include size suffixes (e.g. -120x160)
+ that point to lower quality images. This validator removes those
+ suffixes to get the original higher quality image.
Args:
v: The raw box_art_url value (str or None).
@@ -146,8 +157,11 @@ class DropBenefitSchema(BaseModel):
created_at: str | None = Field(default=None, alias="createdAt")
entitlement_limit: int = Field(default=1, alias="entitlementLimit")
is_ios_available: bool = Field(default=False, alias="isIosAvailable")
- distribution_type: str | None = Field(default=None, alias="distributionType") # Optional in some API responses
+
+ # Optional in some API responses
+ distribution_type: str | None = Field(default=None, alias="distributionType")
type_name: Literal["Benefit", "DropBenefit"] = Field(alias="__typename")
+
# API response fields that should be ignored
game: dict | None = None
owner_organization: dict | None = Field(default=None, alias="ownerOrganization")
@@ -169,7 +183,10 @@ class DropBenefitEdgeSchema(BaseModel):
benefit: DropBenefitSchema
entitlement_limit: int = Field(alias="entitlementLimit")
claim_count: int | None = Field(default=None, alias="claimCount")
- type_name: Literal["DropBenefitEdge"] | None = Field(default=None, alias="__typename")
+ type_name: Literal["DropBenefitEdge"] | None = Field(
+ default=None,
+ alias="__typename",
+ )
model_config = {
"extra": "forbid",
@@ -193,6 +210,7 @@ class TimeBasedDropSchema(BaseModel):
end_at: str | None = Field(alias="endAt")
benefit_edges: list[DropBenefitEdgeSchema] = Field(default=[], alias="benefitEdges")
type_name: Literal["TimeBasedDrop"] = Field(alias="__typename")
+
# Inventory-specific fields
precondition_drops: None = Field(default=None, alias="preconditionDrops")
self_edge: dict | None = Field(default=None, alias="self")
@@ -237,11 +255,16 @@ class DropCampaignSchema(BaseModel):
self: DropCampaignSelfEdgeSchema
start_at: str = Field(alias="startAt")
status: Literal["ACTIVE", "EXPIRED", "UPCOMING"]
- time_based_drops: list[TimeBasedDropSchema] = Field(default=[], alias="timeBasedDrops")
+ time_based_drops: list[TimeBasedDropSchema] = Field(
+ default=[],
+ alias="timeBasedDrops",
+ )
twitch_id: str = Field(alias="id")
type_name: Literal["DropCampaign"] = Field(alias="__typename")
+
# Campaign access control list - defines which channels can participate
allow: DropCampaignACLSchema | None = None
+
# Inventory-specific fields
event_based_drops: list | None = Field(default=None, alias="eventBasedDrops")
@@ -272,8 +295,12 @@ class DropCampaignSchema(BaseModel):
class InventorySchema(BaseModel):
"""Schema for the inventory field in Inventory operation responses."""
- drop_campaigns_in_progress: list[DropCampaignSchema] = Field(default=[], alias="dropCampaignsInProgress")
+ drop_campaigns_in_progress: list[DropCampaignSchema] = Field(
+ default=[],
+ alias="dropCampaignsInProgress",
+ )
type_name: Literal["Inventory"] = Field(alias="__typename")
+
# gameEventDrops field is present in Inventory but we don't process it yet
game_event_drops: list | None = Field(default=None, alias="gameEventDrops")
@@ -307,7 +334,10 @@ class CurrentUserSchema(BaseModel):
twitch_id: str = Field(alias="id")
login: str | None = None
- drop_campaigns: list[DropCampaignSchema] | None = Field(default=None, alias="dropCampaigns")
+ drop_campaigns: list[DropCampaignSchema] | None = Field(
+ default=None,
+ alias="dropCampaigns",
+ )
drop_campaign: DropCampaignSchema | None = Field(default=None, alias="dropCampaign")
inventory: InventorySchema | None = None
type_name: Literal["User"] = Field(alias="__typename")
@@ -467,8 +497,14 @@ class Reward(BaseModel):
twitch_id: str = Field(alias="id")
name: str
- banner_image: RewardCampaignImageSet | None = Field(default=None, alias="bannerImage")
- thumbnail_image: RewardCampaignImageSet | None = Field(default=None, alias="thumbnailImage")
+ banner_image: RewardCampaignImageSet | None = Field(
+ default=None,
+ alias="bannerImage",
+ )
+ thumbnail_image: RewardCampaignImageSet | None = Field(
+ default=None,
+ alias="thumbnailImage",
+ )
earnable_until: str | None = Field(default=None, alias="earnableUntil")
redemption_instructions: str = Field(default="", alias="redemptionInstructions")
redemption_url: str = Field(default="", alias="redemptionURL")
@@ -498,7 +534,10 @@ class RewardCampaign(BaseModel):
about_url: str = Field(default="", alias="aboutURL")
is_sitewide: bool = Field(default=False, alias="isSitewide")
game: dict | None = None
- unlock_requirements: QuestRewardUnlockRequirements | None = Field(default=None, alias="unlockRequirements")
+ unlock_requirements: QuestRewardUnlockRequirements | None = Field(
+ default=None,
+ alias="unlockRequirements",
+ )
image: RewardCampaignImageSet | None = None
rewards: list[Reward] = Field(default=[])
type_name: Literal["RewardCampaign"] = Field(alias="__typename")
diff --git a/twitch/templatetags/image_tags.py b/twitch/templatetags/image_tags.py
index ae3ccee..a246f94 100644
--- a/twitch/templatetags/image_tags.py
+++ b/twitch/templatetags/image_tags.py
@@ -1,7 +1,5 @@
"""Custom template tags for rendering responsive images with modern formats."""
-from __future__ import annotations
-
from typing import TYPE_CHECKING
from urllib.parse import urlparse
@@ -94,11 +92,15 @@ def picture( # noqa: PLR0913, PLR0917
# AVIF first (best compression)
if avif_url != src:
- sources.append(format_html(' ', avif_url))
+ sources.append(
+ format_html(' ', avif_url),
+ )
# WebP second (good compression, widely supported)
if webp_url != src:
- sources.append(format_html(' ', webp_url))
+ sources.append(
+ format_html(' ', webp_url),
+ )
# Build img tag with format_html
img_html: SafeString = format_html(
@@ -113,4 +115,8 @@ def picture( # noqa: PLR0913, PLR0917
)
# Combine all parts safely
- return format_html("{}{} ", SafeString("".join(sources)), img_html)
+ return format_html(
+ "{}{} ",
+ SafeString("".join(sources)),
+ img_html,
+ )
diff --git a/twitch/tests/test_backup.py b/twitch/tests/test_backup.py
index a0dd790..ee0001c 100644
--- a/twitch/tests/test_backup.py
+++ b/twitch/tests/test_backup.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
import io
import math
import os
@@ -142,7 +140,11 @@ class TestBackupCommand:
assert output_dir.exists()
assert len(list(output_dir.glob("test-*.sql.zst"))) == 1
- def test_backup_uses_default_directory(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
+ def test_backup_uses_default_directory(
+ self,
+ tmp_path: Path,
+ monkeypatch: pytest.MonkeyPatch,
+ ) -> None:
"""Test that backup uses DATA_DIR/datasets by default."""
_skip_if_pg_dump_missing()
# Create test data so tables exist
@@ -285,7 +287,9 @@ class TestDatasetBackupViews:
"""Test that dataset list view displays backup files."""
monkeypatch.setattr(settings, "DATA_DIR", datasets_dir.parent)
- response: _MonkeyPatchedWSGIResponse = client.get(reverse("twitch:dataset_backups"))
+ response: _MonkeyPatchedWSGIResponse = client.get(
+ reverse("twitch:dataset_backups"),
+ )
assert response.status_code == 200
assert b"ttvdrops-20260210-120000.sql.zst" in response.content
@@ -300,7 +304,9 @@ class TestDatasetBackupViews:
"""Test dataset list view with empty directory."""
monkeypatch.setattr(settings, "DATA_DIR", datasets_dir.parent)
- response: _MonkeyPatchedWSGIResponse = client.get(reverse("twitch:dataset_backups"))
+ response: _MonkeyPatchedWSGIResponse = client.get(
+ reverse("twitch:dataset_backups"),
+ )
assert response.status_code == 200
assert b"No dataset backups found" in response.content
@@ -332,7 +338,9 @@ class TestDatasetBackupViews:
os.utime(older_backup, (older_time, older_time))
os.utime(newer_backup, (newer_time, newer_time))
- response: _MonkeyPatchedWSGIResponse = client.get(reverse("twitch:dataset_backups"))
+ response: _MonkeyPatchedWSGIResponse = client.get(
+ reverse("twitch:dataset_backups"),
+ )
content = response.content.decode()
newer_pos = content.find("20260210-140000")
@@ -352,7 +360,10 @@ class TestDatasetBackupViews:
monkeypatch.setattr(settings, "DATA_DIR", datasets_dir.parent)
response: _MonkeyPatchedWSGIResponse = client.get(
- reverse("twitch:dataset_backup_download", args=["ttvdrops-20260210-120000.sql.zst"]),
+ reverse(
+ "twitch:dataset_backup_download",
+ args=["ttvdrops-20260210-120000.sql.zst"],
+ ),
)
assert response.status_code == 200
@@ -370,7 +381,9 @@ class TestDatasetBackupViews:
monkeypatch.setattr(settings, "DATA_DIR", datasets_dir.parent)
# Attempt path traversal
- response = client.get(reverse("twitch:dataset_backup_download", args=["../../../etc/passwd"]))
+ response = client.get(
+ reverse("twitch:dataset_backup_download", args=["../../../etc/passwd"]),
+ )
assert response.status_code == 404
def test_dataset_download_rejects_invalid_extensions(
@@ -386,7 +399,9 @@ class TestDatasetBackupViews:
invalid_file = datasets_dir / "malicious.exe"
invalid_file.write_text("not a backup")
- response = client.get(reverse("twitch:dataset_backup_download", args=["malicious.exe"]))
+ response = client.get(
+ reverse("twitch:dataset_backup_download", args=["malicious.exe"]),
+ )
assert response.status_code == 404
def test_dataset_download_file_not_found(
@@ -398,7 +413,9 @@ class TestDatasetBackupViews:
"""Test download returns 404 for non-existent file."""
monkeypatch.setattr(settings, "DATA_DIR", datasets_dir.parent)
- response = client.get(reverse("twitch:dataset_backup_download", args=["nonexistent.sql.zst"]))
+ response = client.get(
+ reverse("twitch:dataset_backup_download", args=["nonexistent.sql.zst"]),
+ )
assert response.status_code == 404
def test_dataset_list_view_shows_file_sizes(
@@ -411,7 +428,9 @@ class TestDatasetBackupViews:
"""Test that file sizes are displayed in human-readable format."""
monkeypatch.setattr(settings, "DATA_DIR", datasets_dir.parent)
- response: _MonkeyPatchedWSGIResponse = client.get(reverse("twitch:dataset_backups"))
+ response: _MonkeyPatchedWSGIResponse = client.get(
+ reverse("twitch:dataset_backups"),
+ )
assert response.status_code == 200
# Should contain size information (bytes, KB, MB, or GB)
@@ -432,7 +451,9 @@ class TestDatasetBackupViews:
(datasets_dir / "readme.txt").write_text("should be ignored")
(datasets_dir / "old_backup.gz").write_bytes(b"should be ignored")
- response: _MonkeyPatchedWSGIResponse = client.get(reverse("twitch:dataset_backups"))
+ response: _MonkeyPatchedWSGIResponse = client.get(
+ reverse("twitch:dataset_backups"),
+ )
content = response.content.decode()
assert "backup.sql.zst" in content
diff --git a/twitch/tests/test_badge_views.py b/twitch/tests/test_badge_views.py
index 312b860..191f2be 100644
--- a/twitch/tests/test_badge_views.py
+++ b/twitch/tests/test_badge_views.py
@@ -1,7 +1,5 @@
"""Tests for chat badge views."""
-from __future__ import annotations
-
from typing import TYPE_CHECKING
import pytest
@@ -73,7 +71,9 @@ class TestBadgeSetDetailView:
def test_badge_set_detail_not_found(self, client: Client) -> None:
"""Test 404 when badge set doesn't exist."""
- response = client.get(reverse("twitch:badge_set_detail", kwargs={"set_id": "nonexistent"}))
+ response = client.get(
+ reverse("twitch:badge_set_detail", kwargs={"set_id": "nonexistent"}),
+ )
assert response.status_code == 404
def test_badge_set_detail_displays_badges(self, client: Client) -> None:
@@ -91,7 +91,9 @@ class TestBadgeSetDetailView:
click_url="https://help.twitch.tv",
)
- response = client.get(reverse("twitch:badge_set_detail", kwargs={"set_id": "moderator"}))
+ response = client.get(
+ reverse("twitch:badge_set_detail", kwargs={"set_id": "moderator"}),
+ )
assert response.status_code == 200
content = response.content.decode()
@@ -113,7 +115,9 @@ class TestBadgeSetDetailView:
description="VIP Badge",
)
- response = client.get(reverse("twitch:badge_set_detail", kwargs={"set_id": "vip"}))
+ response = client.get(
+ reverse("twitch:badge_set_detail", kwargs={"set_id": "vip"}),
+ )
assert response.status_code == 200
content = response.content.decode()
@@ -133,7 +137,9 @@ class TestBadgeSetDetailView:
description="Test Badge",
)
- response = client.get(reverse("twitch:badge_set_detail", kwargs={"set_id": "test_set"}))
+ response = client.get(
+ reverse("twitch:badge_set_detail", kwargs={"set_id": "test_set"}),
+ )
assert response.status_code == 200
content = response.content.decode()
diff --git a/twitch/tests/test_better_import_drops.py b/twitch/tests/test_better_import_drops.py
index f4764e5..8150346 100644
--- a/twitch/tests/test_better_import_drops.py
+++ b/twitch/tests/test_better_import_drops.py
@@ -1,7 +1,6 @@
-from __future__ import annotations
-
import json
from pathlib import Path
+from typing import TYPE_CHECKING
from unittest import skipIf
from django.db import connection
@@ -9,7 +8,6 @@ from django.test import TestCase
from twitch.management.commands.better_import_drops import Command
from twitch.management.commands.better_import_drops import detect_error_only_response
-from twitch.models import Channel
from twitch.models import DropBenefit
from twitch.models import DropCampaign
from twitch.models import Game
@@ -17,25 +15,24 @@ from twitch.models import Organization
from twitch.models import TimeBasedDrop
from twitch.schemas import DropBenefitSchema
+if TYPE_CHECKING:
+ from twitch.models import Channel
+
class GetOrUpdateBenefitTests(TestCase):
"""Tests for the _get_or_update_benefit method in better_import_drops.Command."""
def test_defaults_distribution_type_when_missing(self) -> None:
"""Ensure importer sets distribution_type to empty string when absent."""
- command = Command()
- command.benefit_cache = {}
-
- benefit_schema: DropBenefitSchema = DropBenefitSchema.model_validate(
- {
- "id": "benefit-missing-distribution-type",
- "name": "Test Benefit",
- "imageAssetURL": "https://example.com/benefit.png",
- "entitlementLimit": 1,
- "isIosAvailable": False,
- "__typename": "DropBenefit",
- },
- )
+ command: Command = Command()
+ benefit_schema: DropBenefitSchema = DropBenefitSchema.model_validate({
+ "id": "benefit-missing-distribution-type",
+ "name": "Test Benefit",
+ "imageAssetURL": "https://example.com/benefit.png",
+ "entitlementLimit": 1,
+ "isIosAvailable": False,
+ "__typename": "DropBenefit",
+ })
benefit: DropBenefit = command._get_or_update_benefit(benefit_schema)
@@ -64,7 +61,10 @@ class ExtractCampaignsTests(TestCase):
"detailsURL": "http://example.com",
"imageURL": "",
"status": "ACTIVE",
- "self": {"isAccountConnected": False, "__typename": "DropCampaignSelfEdge"},
+ "self": {
+ "isAccountConnected": False,
+ "__typename": "DropCampaignSelfEdge",
+ },
"game": {
"id": "g1",
"displayName": "Test Game",
@@ -82,9 +82,7 @@ class ExtractCampaignsTests(TestCase):
"__typename": "User",
},
},
- "extensions": {
- "operationName": "TestOp",
- },
+ "extensions": {"operationName": "TestOp"},
}
# Validate response
@@ -147,9 +145,7 @@ class ExtractCampaignsTests(TestCase):
"__typename": "User",
},
},
- "extensions": {
- "operationName": "Inventory",
- },
+ "extensions": {"operationName": "Inventory"},
}
# Validate and process response
@@ -163,7 +159,9 @@ class ExtractCampaignsTests(TestCase):
assert broken_dir is None
# Check that campaign was created with operation_name
- campaign: DropCampaign = DropCampaign.objects.get(twitch_id="inventory-campaign-1")
+ campaign: DropCampaign = DropCampaign.objects.get(
+ twitch_id="inventory-campaign-1",
+ )
assert campaign.name == "Test Inventory Campaign"
assert campaign.operation_names == ["Inventory"]
@@ -184,9 +182,7 @@ class ExtractCampaignsTests(TestCase):
"__typename": "User",
},
},
- "extensions": {
- "operationName": "Inventory",
- },
+ "extensions": {"operationName": "Inventory"},
}
# Should validate successfully even with null campaigns
@@ -261,9 +257,7 @@ class ExtractCampaignsTests(TestCase):
"__typename": "User",
},
},
- "extensions": {
- "operationName": "Inventory",
- },
+ "extensions": {"operationName": "Inventory"},
}
# Validate and process response
@@ -277,7 +271,9 @@ class ExtractCampaignsTests(TestCase):
assert broken_dir is None
# Check that campaign was created and allow_is_enabled defaults to True
- campaign: DropCampaign = DropCampaign.objects.get(twitch_id="inventory-campaign-2")
+ campaign: DropCampaign = DropCampaign.objects.get(
+ twitch_id="inventory-campaign-2",
+ )
assert campaign.name == "Test ACL Campaign"
assert campaign.allow_is_enabled is True # Should default to True
@@ -304,10 +300,7 @@ class CampaignStructureDetectionTests(TestCase):
"id": "123",
"inventory": {
"dropCampaignsInProgress": [
- {
- "id": "c1",
- "name": "Test Campaign",
- },
+ {"id": "c1", "name": "Test Campaign"},
],
"__typename": "Inventory",
},
@@ -349,12 +342,7 @@ class CampaignStructureDetectionTests(TestCase):
"data": {
"currentUser": {
"id": "123",
- "dropCampaigns": [
- {
- "id": "c1",
- "name": "Test Campaign",
- },
- ],
+ "dropCampaigns": [{"id": "c1", "name": "Test Campaign"}],
"__typename": "User",
},
},
@@ -367,7 +355,10 @@ class CampaignStructureDetectionTests(TestCase):
class OperationNameFilteringTests(TestCase):
"""Tests for filtering campaigns by operation_name field."""
- @skipIf(connection.vendor == "sqlite", reason="SQLite doesn't support JSON contains lookup")
+ @skipIf(
+ connection.vendor == "sqlite",
+ reason="SQLite doesn't support JSON contains lookup",
+ )
def test_can_filter_campaigns_by_operation_name(self) -> None:
"""Ensure campaigns can be filtered by operation_name to separate data sources."""
command = Command()
@@ -388,7 +379,10 @@ class OperationNameFilteringTests(TestCase):
"detailsURL": "https://example.com",
"imageURL": "",
"status": "ACTIVE",
- "self": {"isAccountConnected": False, "__typename": "DropCampaignSelfEdge"},
+ "self": {
+ "isAccountConnected": False,
+ "__typename": "DropCampaignSelfEdge",
+ },
"game": {
"id": "game-1",
"displayName": "Game 1",
@@ -407,9 +401,7 @@ class OperationNameFilteringTests(TestCase):
"__typename": "User",
},
},
- "extensions": {
- "operationName": "ViewerDropsDashboard",
- },
+ "extensions": {"operationName": "ViewerDropsDashboard"},
}
# Import an Inventory campaign
@@ -429,7 +421,10 @@ class OperationNameFilteringTests(TestCase):
"detailsURL": "https://example.com",
"imageURL": "",
"status": "ACTIVE",
- "self": {"isAccountConnected": True, "__typename": "DropCampaignSelfEdge"},
+ "self": {
+ "isAccountConnected": True,
+ "__typename": "DropCampaignSelfEdge",
+ },
"game": {
"id": "game-2",
"displayName": "Game 2",
@@ -452,9 +447,7 @@ class OperationNameFilteringTests(TestCase):
"__typename": "User",
},
},
- "extensions": {
- "operationName": "Inventory",
- },
+ "extensions": {"operationName": "Inventory"},
}
# Process both payloads
@@ -462,8 +455,12 @@ class OperationNameFilteringTests(TestCase):
command.process_responses([inventory_payload], Path("inventory.json"), {})
# Verify we can filter by operation_names with JSON containment
- viewer_campaigns = DropCampaign.objects.filter(operation_names__contains=["ViewerDropsDashboard"])
- inventory_campaigns = DropCampaign.objects.filter(operation_names__contains=["Inventory"])
+ viewer_campaigns = DropCampaign.objects.filter(
+ operation_names__contains=["ViewerDropsDashboard"],
+ )
+ inventory_campaigns = DropCampaign.objects.filter(
+ operation_names__contains=["Inventory"],
+ )
assert len(viewer_campaigns) >= 1
assert len(inventory_campaigns) >= 1
@@ -501,7 +498,10 @@ class GameImportTests(TestCase):
"detailsURL": "https://example.com/details",
"imageURL": "",
"status": "ACTIVE",
- "self": {"isAccountConnected": True, "__typename": "DropCampaignSelfEdge"},
+ "self": {
+ "isAccountConnected": True,
+ "__typename": "DropCampaignSelfEdge",
+ },
"game": {
"id": "497057",
"slug": "destiny-2",
@@ -558,12 +558,17 @@ class ExampleJsonImportTests(TestCase):
assert success is True
assert broken_dir is None
- campaign: DropCampaign = DropCampaign.objects.get(twitch_id="3b965979-ecd2-11f0-876e-0a58a9feac02")
+ campaign: DropCampaign = DropCampaign.objects.get(
+ twitch_id="3b965979-ecd2-11f0-876e-0a58a9feac02",
+ )
# Core campaign fields
assert campaign.name == "Jan Drops Week 2"
assert "Viewers will receive 50 Wandering Market Coins" in campaign.description
- assert campaign.details_url == "https://www.smite2.com/news/closed-alpha-twitch-drops/"
+ assert (
+ campaign.details_url
+ == "https://www.smite2.com/news/closed-alpha-twitch-drops/"
+ )
assert campaign.account_link_url == "https://link.smite2.com/"
# The regression: ensure imageURL makes it into DropCampaign.image_url
@@ -584,17 +589,23 @@ class ExampleJsonImportTests(TestCase):
assert game.display_name == "SMITE 2"
assert game.slug == "smite-2"
- org: Organization = Organization.objects.get(twitch_id="51a157a0-674a-4863-b120-7bb6ee2466a8")
+ org: Organization = Organization.objects.get(
+ twitch_id="51a157a0-674a-4863-b120-7bb6ee2466a8",
+ )
assert org.name == "Hi-Rez Studios"
assert game.owners.filter(pk=org.pk).exists()
# Drops + benefits
assert TimeBasedDrop.objects.filter(campaign=campaign).count() == 6
- first_drop: TimeBasedDrop = TimeBasedDrop.objects.get(twitch_id="933c8f91-ecd2-11f0-b3fd-0a58a9feac02")
+ first_drop: TimeBasedDrop = TimeBasedDrop.objects.get(
+ twitch_id="933c8f91-ecd2-11f0-b3fd-0a58a9feac02",
+ )
assert first_drop.name == "Market Coins Bundle 1"
assert first_drop.required_minutes_watched == 120
assert DropBenefit.objects.count() == 1
- benefit: DropBenefit = DropBenefit.objects.get(twitch_id="ccb3fb7f-e59b-11ef-aef0-0a58a9feac02")
+ benefit: DropBenefit = DropBenefit.objects.get(
+ twitch_id="ccb3fb7f-e59b-11ef-aef0-0a58a9feac02",
+ )
assert (
benefit.image_asset_url
== "https://static-cdn.jtvnw.net/twitch-quests-assets/REWARD/903496ad-de97-41ff-ad97-12f099e20ea8.jpeg"
@@ -645,7 +656,10 @@ class ImporterRobustnessTests(TestCase):
"detailsURL": "https://example.com/details",
"imageURL": None,
"status": "ACTIVE",
- "self": {"isAccountConnected": False, "__typename": "DropCampaignSelfEdge"},
+ "self": {
+ "isAccountConnected": False,
+ "__typename": "DropCampaignSelfEdge",
+ },
"game": {
"id": "g-null-image",
"displayName": "Test Game",
@@ -694,12 +708,7 @@ class ErrorOnlyResponseDetectionTests(TestCase):
def test_detects_error_only_response_with_null_data(self) -> None:
"""Ensure error-only response with null data field is detected."""
parsed_json = {
- "errors": [
- {
- "message": "internal server error",
- "path": ["data"],
- },
- ],
+ "errors": [{"message": "internal server error", "path": ["data"]}],
"data": None,
}
@@ -708,14 +717,7 @@ class ErrorOnlyResponseDetectionTests(TestCase):
def test_detects_error_only_response_with_empty_data(self) -> None:
"""Ensure error-only response with empty data dict is allowed through."""
- parsed_json = {
- "errors": [
- {
- "message": "unauthorized",
- },
- ],
- "data": {},
- }
+ parsed_json = {"errors": [{"message": "unauthorized"}], "data": {}}
result = detect_error_only_response(parsed_json)
# Empty dict {} is considered "data exists" so this should pass
@@ -723,13 +725,7 @@ class ErrorOnlyResponseDetectionTests(TestCase):
def test_detects_error_only_response_without_data_key(self) -> None:
"""Ensure error-only response without data key is detected."""
- parsed_json = {
- "errors": [
- {
- "message": "missing data",
- },
- ],
- }
+ parsed_json = {"errors": [{"message": "missing data"}]}
result = detect_error_only_response(parsed_json)
assert result == "error_only: missing data"
@@ -737,16 +733,8 @@ class ErrorOnlyResponseDetectionTests(TestCase):
def test_allows_response_with_both_errors_and_data(self) -> None:
"""Ensure responses with both errors and valid data are not flagged."""
parsed_json = {
- "errors": [
- {
- "message": "partial failure",
- },
- ],
- "data": {
- "currentUser": {
- "dropCampaigns": [],
- },
- },
+ "errors": [{"message": "partial failure"}],
+ "data": {"currentUser": {"dropCampaigns": []}},
}
result = detect_error_only_response(parsed_json)
@@ -754,28 +742,14 @@ class ErrorOnlyResponseDetectionTests(TestCase):
def test_allows_response_with_no_errors(self) -> None:
"""Ensure normal responses without errors are not flagged."""
- parsed_json = {
- "data": {
- "currentUser": {
- "dropCampaigns": [],
- },
- },
- }
+ parsed_json = {"data": {"currentUser": {"dropCampaigns": []}}}
result = detect_error_only_response(parsed_json)
assert result is None
def test_detects_error_only_in_list_of_responses(self) -> None:
"""Ensure error-only detection works with list of responses."""
- parsed_json = [
- {
- "errors": [
- {
- "message": "rate limit exceeded",
- },
- ],
- },
- ]
+ parsed_json = [{"errors": [{"message": "rate limit exceeded"}]}]
result = detect_error_only_response(parsed_json)
assert result == "error_only: rate limit exceeded"
@@ -804,22 +778,14 @@ class ErrorOnlyResponseDetectionTests(TestCase):
def test_returns_none_for_empty_errors_list(self) -> None:
"""Ensure empty errors list is not flagged as error-only."""
- parsed_json = {
- "errors": [],
- }
+ parsed_json = {"errors": []}
result = detect_error_only_response(parsed_json)
assert result is None
def test_handles_error_without_message_field(self) -> None:
"""Ensure errors without message field use default text."""
- parsed_json = {
- "errors": [
- {
- "path": ["data"],
- },
- ],
- }
+ parsed_json = {"errors": [{"path": ["data"]}]}
result = detect_error_only_response(parsed_json)
assert result == "error_only: unknown error"
diff --git a/twitch/tests/test_chat_badges.py b/twitch/tests/test_chat_badges.py
index 494c34c..185dbf4 100644
--- a/twitch/tests/test_chat_badges.py
+++ b/twitch/tests/test_chat_badges.py
@@ -1,7 +1,5 @@
"""Tests for chat badge models and functionality."""
-from __future__ import annotations
-
import pytest
from django.db import IntegrityError
from pydantic import ValidationError
diff --git a/twitch/tests/test_exports.py b/twitch/tests/test_exports.py
index e6167d3..57d5ec3 100644
--- a/twitch/tests/test_exports.py
+++ b/twitch/tests/test_exports.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
import json
from datetime import timedelta
diff --git a/twitch/tests/test_feeds.py b/twitch/tests/test_feeds.py
index 31e7abf..7492691 100644
--- a/twitch/tests/test_feeds.py
+++ b/twitch/tests/test_feeds.py
@@ -1,7 +1,5 @@
"""Test RSS feeds."""
-from __future__ import annotations
-
from collections.abc import Callable
from contextlib import AbstractContextManager
from datetime import timedelta
@@ -119,7 +117,10 @@ class RSSFeedTestCase(TestCase):
def test_organization_campaign_feed(self) -> None:
"""Test organization-specific campaign feed returns 200."""
- url: str = reverse("twitch:organization_campaign_feed", args=[self.org.twitch_id])
+ url: str = reverse(
+ "twitch:organization_campaign_feed",
+ args=[self.org.twitch_id],
+ )
response: _MonkeyPatchedWSGIResponse = self.client.get(url)
assert response.status_code == 200
assert response["Content-Type"] == "application/rss+xml; charset=utf-8"
@@ -180,7 +181,10 @@ class RSSFeedTestCase(TestCase):
)
# Get feed for first organization
- url: str = reverse("twitch:organization_campaign_feed", args=[self.org.twitch_id])
+ url: str = reverse(
+ "twitch:organization_campaign_feed",
+ args=[self.org.twitch_id],
+ )
response: _MonkeyPatchedWSGIResponse = self.client.get(url)
content: str = response.content.decode("utf-8")
@@ -256,7 +260,10 @@ def _build_reward_campaign(game: Game, idx: int) -> RewardCampaign:
@pytest.mark.django_db
-def test_campaign_feed_queries_bounded(client: Client, django_assert_num_queries: QueryAsserter) -> None:
+def test_campaign_feed_queries_bounded(
+ client: Client,
+ django_assert_num_queries: QueryAsserter,
+) -> None:
"""Campaign feed should stay within a small, fixed query budget."""
org: Organization = Organization.objects.create(
twitch_id="test-org-queries",
@@ -274,7 +281,7 @@ def test_campaign_feed_queries_bounded(client: Client, django_assert_num_queries
_build_campaign(game, i)
url: str = reverse("twitch:campaign_feed")
- # TODO(TheLovinator): 14 queries is still quite high for a feed - we should be able to optimize this further, but this is a good starting point to prevent regressions for now. # noqa: E501, TD003
+ # TODO(TheLovinator): 14 queries is still quite high for a feed - we should be able to optimize this further, but this is a good starting point to prevent regressions for now. # noqa: TD003
with django_assert_num_queries(14, exact=False):
response: _MonkeyPatchedWSGIResponse = client.get(url)
@@ -339,7 +346,10 @@ def test_campaign_feed_queries_do_not_scale_with_items(
@pytest.mark.django_db
-def test_game_campaign_feed_queries_bounded(client: Client, django_assert_num_queries: QueryAsserter) -> None:
+def test_game_campaign_feed_queries_bounded(
+ client: Client,
+ django_assert_num_queries: QueryAsserter,
+) -> None:
"""Game campaign feed should not issue excess queries when rendering multiple campaigns."""
org: Organization = Organization.objects.create(
twitch_id="test-org-game-queries",
@@ -358,7 +368,7 @@ def test_game_campaign_feed_queries_bounded(client: Client, django_assert_num_qu
url: str = reverse("twitch:game_campaign_feed", args=[game.twitch_id])
- # TODO(TheLovinator): 15 queries is still quite high for a feed - we should be able to optimize this further, but this is a good starting point to prevent regressions for now. # noqa: E501, TD003
+ # TODO(TheLovinator): 15 queries is still quite high for a feed - we should be able to optimize this further, but this is a good starting point to prevent regressions for now. # noqa: TD003
with django_assert_num_queries(6, exact=False):
response: _MonkeyPatchedWSGIResponse = client.get(url)
@@ -395,13 +405,13 @@ def test_game_campaign_feed_queries_do_not_scale_with_items(
@pytest.mark.django_db
-def test_organization_feed_queries_bounded(client: Client, django_assert_num_queries: QueryAsserter) -> None:
+def test_organization_feed_queries_bounded(
+ client: Client,
+ django_assert_num_queries: QueryAsserter,
+) -> None:
"""Organization RSS feed should stay within a modest query budget."""
for i in range(5):
- Organization.objects.create(
- twitch_id=f"org-feed-{i}",
- name=f"Org Feed {i}",
- )
+ Organization.objects.create(twitch_id=f"org-feed-{i}", name=f"Org Feed {i}")
url: str = reverse("twitch:organization_feed")
with django_assert_num_queries(1, exact=True):
@@ -411,7 +421,10 @@ def test_organization_feed_queries_bounded(client: Client, django_assert_num_que
@pytest.mark.django_db
-def test_game_feed_queries_bounded(client: Client, django_assert_num_queries: QueryAsserter) -> None:
+def test_game_feed_queries_bounded(
+ client: Client,
+ django_assert_num_queries: QueryAsserter,
+) -> None:
"""Game RSS feed should stay within a modest query budget with multiple games."""
org: Organization = Organization.objects.create(
twitch_id="game-feed-org",
@@ -435,7 +448,10 @@ def test_game_feed_queries_bounded(client: Client, django_assert_num_queries: Qu
@pytest.mark.django_db
-def test_organization_campaign_feed_queries_bounded(client: Client, django_assert_num_queries: QueryAsserter) -> None:
+def test_organization_campaign_feed_queries_bounded(
+ client: Client,
+ django_assert_num_queries: QueryAsserter,
+) -> None:
"""Organization campaign feed should not regress in query count."""
org: Organization = Organization.objects.create(
twitch_id="org-campaign-feed",
@@ -453,7 +469,7 @@ def test_organization_campaign_feed_queries_bounded(client: Client, django_asser
_build_campaign(game, i)
url: str = reverse("twitch:organization_campaign_feed", args=[org.twitch_id])
- # TODO(TheLovinator): 12 queries is still quite high for a feed - we should be able to optimize this further, but this is a good starting point to prevent regressions for now. # noqa: E501, TD003
+ # TODO(TheLovinator): 12 queries is still quite high for a feed - we should be able to optimize this further, but this is a good starting point to prevent regressions for now. # noqa: TD003
with django_assert_num_queries(12, exact=False):
response: _MonkeyPatchedWSGIResponse = client.get(url)
@@ -490,7 +506,10 @@ def test_organization_campaign_feed_queries_do_not_scale_with_items(
@pytest.mark.django_db
-def test_reward_campaign_feed_queries_bounded(client: Client, django_assert_num_queries: QueryAsserter) -> None:
+def test_reward_campaign_feed_queries_bounded(
+ client: Client,
+ django_assert_num_queries: QueryAsserter,
+) -> None:
"""Reward campaign feed should stay within a modest query budget."""
org: Organization = Organization.objects.create(
twitch_id="reward-feed-org",
@@ -515,7 +534,10 @@ def test_reward_campaign_feed_queries_bounded(client: Client, django_assert_num_
@pytest.mark.django_db
-def test_docs_rss_queries_bounded(client: Client, django_assert_num_queries: QueryAsserter) -> None:
+def test_docs_rss_queries_bounded(
+ client: Client,
+ django_assert_num_queries: QueryAsserter,
+) -> None:
"""Docs RSS page should stay within a reasonable query budget.
With limit=1 for documentation examples, we should have dramatically fewer queries
@@ -539,7 +561,7 @@ def test_docs_rss_queries_bounded(client: Client, django_assert_num_queries: Que
url: str = reverse("twitch:docs_rss")
- # TODO(TheLovinator): 31 queries is still quite high for a feed - we should be able to optimize this further, but this is a good starting point to prevent regressions for now. # noqa: E501, TD003
+ # TODO(TheLovinator): 31 queries is still quite high for a feed - we should be able to optimize this further, but this is a good starting point to prevent regressions for now. # noqa: TD003
with django_assert_num_queries(31, exact=False):
response: _MonkeyPatchedWSGIResponse = client.get(url)
@@ -576,7 +598,11 @@ URL_NAMES: list[tuple[str, dict[str, str]]] = [
@pytest.mark.django_db
@pytest.mark.parametrize(("url_name", "kwargs"), URL_NAMES)
-def test_rss_feeds_return_200(client: Client, url_name: str, kwargs: dict[str, str]) -> None:
+def test_rss_feeds_return_200(
+ client: Client,
+ url_name: str,
+ kwargs: dict[str, str],
+) -> None:
"""Test if feeds return HTTP 200.
Args:
@@ -626,9 +652,7 @@ def test_rss_feeds_return_200(client: Client, url_name: str, kwargs: dict[str, s
display_name="TestChannel",
)
- badge_set: ChatBadgeSet = ChatBadgeSet.objects.create(
- set_id="test-set-123",
- )
+ badge_set: ChatBadgeSet = ChatBadgeSet.objects.create(set_id="test-set-123")
_badge: ChatBadge = ChatBadge.objects.create(
badge_set=badge_set,
diff --git a/twitch/tests/test_game_owner_organization.py b/twitch/tests/test_game_owner_organization.py
index c86f46b..db833d9 100644
--- a/twitch/tests/test_game_owner_organization.py
+++ b/twitch/tests/test_game_owner_organization.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
from pathlib import Path
from django.test import TestCase
@@ -30,7 +28,10 @@ class GameOwnerOrganizationTests(TestCase):
"detailsURL": "https://help.twitch.tv/s/article/twitch-chat-badges-guide",
"imageURL": "https://static-cdn.jtvnw.net/twitch-quests-assets/CAMPAIGN/495ebb6b-8134-4e51-b9d0-1f4a221b4f8d.png",
"status": "ACTIVE",
- "self": {"isAccountConnected": True, "__typename": "DropCampaignSelfEdge"},
+ "self": {
+ "isAccountConnected": True,
+ "__typename": "DropCampaignSelfEdge",
+ },
"game": {
"id": "263490",
"slug": "rust",
@@ -42,10 +43,18 @@ class GameOwnerOrganizationTests(TestCase):
"__typename": "Organization",
},
},
- "owner": {"id": "other-org-id", "name": "Other Org", "__typename": "Organization"},
+ "owner": {
+ "id": "other-org-id",
+ "name": "Other Org",
+ "__typename": "Organization",
+ },
"timeBasedDrops": [],
"eventBasedDrops": [],
- "allow": {"channels": None, "isEnabled": False, "__typename": "DropCampaignACL"},
+ "allow": {
+ "channels": None,
+ "isEnabled": False,
+ "__typename": "DropCampaignACL",
+ },
"__typename": "DropCampaign",
},
"__typename": "User",
@@ -65,7 +74,9 @@ class GameOwnerOrganizationTests(TestCase):
# Check game owners include Twitch Gaming and Other Org
game: Game = Game.objects.get(twitch_id="263490")
- org1: Organization = Organization.objects.get(twitch_id="d32de13d-937e-4196-8198-1a7f875f295a")
+ org1: Organization = Organization.objects.get(
+ twitch_id="d32de13d-937e-4196-8198-1a7f875f295a",
+ )
org2: Organization = Organization.objects.get(twitch_id="other-org-id")
owners = list(game.owners.all())
assert org1 in owners
diff --git a/twitch/tests/test_image_tags.py b/twitch/tests/test_image_tags.py
index dc29fe9..702fac2 100644
--- a/twitch/tests/test_image_tags.py
+++ b/twitch/tests/test_image_tags.py
@@ -1,7 +1,5 @@
"""Tests for custom image template tags."""
-from __future__ import annotations
-
from django.template import Context
from django.template import Template
from django.utils.safestring import SafeString
@@ -19,11 +17,16 @@ class TestGetFormatUrl:
def test_jpg_to_webp(self) -> None:
"""Test converting JPG to WebP."""
- assert get_format_url("/static/img/banner.jpg", "webp") == "/static/img/banner.webp"
+ assert (
+ get_format_url("/static/img/banner.jpg", "webp")
+ == "/static/img/banner.webp"
+ )
def test_jpeg_to_avif(self) -> None:
"""Test converting JPEG to AVIF."""
- assert get_format_url("/static/img/photo.jpeg", "avif") == "/static/img/photo.avif"
+ assert (
+ get_format_url("/static/img/photo.jpeg", "avif") == "/static/img/photo.avif"
+ )
def test_png_to_webp(self) -> None:
"""Test converting PNG to WebP."""
@@ -31,7 +34,9 @@ class TestGetFormatUrl:
def test_uppercase_extension(self) -> None:
"""Test converting uppercase extensions."""
- assert get_format_url("/static/img/photo.JPG", "webp") == "/static/img/photo.webp"
+ assert (
+ get_format_url("/static/img/photo.JPG", "webp") == "/static/img/photo.webp"
+ )
def test_non_convertible_format(self) -> None:
"""Test that non-convertible formats return unchanged."""
@@ -187,7 +192,9 @@ class TestPictureTag:
def test_twitch_cdn_url_simple_img(self) -> None:
"""Test that Twitch CDN URLs return simple img tag without picture element."""
- result: SafeString = picture("https://static-cdn.jtvnw.net/ttv-boxart/1292861145.jpg")
+ result: SafeString = picture(
+ "https://static-cdn.jtvnw.net/ttv-boxart/1292861145.jpg",
+ )
# Should NOT have picture element
assert "" not in result
@@ -228,7 +235,9 @@ class TestPictureTag:
def test_twitch_cdn_url_with_png(self) -> None:
"""Test Twitch CDN URL with PNG format."""
- result: SafeString = picture("https://static-cdn.jtvnw.net/badges/v1/1234567.png")
+ result: SafeString = picture(
+ "https://static-cdn.jtvnw.net/badges/v1/1234567.png",
+ )
# Should NOT have picture element or source tags
assert "" not in result
@@ -244,7 +253,9 @@ class TestPictureTagTemplate:
def test_picture_tag_in_template(self) -> None:
"""Test that the picture tag works when called from a template."""
- template = Template('{% load image_tags %}{% picture src="/img/photo.jpg" alt="Test" %}')
+ template = Template(
+ '{% load image_tags %}{% picture src="/img/photo.jpg" alt="Test" %}',
+ )
context = Context({})
result: SafeString = template.render(context)
@@ -257,7 +268,9 @@ class TestPictureTagTemplate:
def test_picture_tag_with_context_variables(self) -> None:
"""Test using context variables in the picture tag."""
- template = Template("{% load image_tags %}{% picture src=image_url alt=image_alt width=image_width %}")
+ template = Template(
+ "{% load image_tags %}{% picture src=image_url alt=image_alt width=image_width %}",
+ )
context = Context({
"image_url": "/img/banner.png",
"image_alt": "Banner image",
diff --git a/twitch/tests/test_schemas.py b/twitch/tests/test_schemas.py
index 3249db9..6403ceb 100644
--- a/twitch/tests/test_schemas.py
+++ b/twitch/tests/test_schemas.py
@@ -1,12 +1,14 @@
"""Tests for Pydantic schemas used in the import process."""
-from __future__ import annotations
+from typing import TYPE_CHECKING
-from twitch.schemas import DropBenefitSchema
-from twitch.schemas import DropCampaignSchema
from twitch.schemas import GameSchema
from twitch.schemas import GraphQLResponse
-from twitch.schemas import TimeBasedDropSchema
+
+if TYPE_CHECKING:
+ from twitch.schemas import DropBenefitSchema
+ from twitch.schemas import DropCampaignSchema
+ from twitch.schemas import TimeBasedDropSchema
def test_inventory_operation_validation() -> None:
@@ -88,9 +90,7 @@ def test_inventory_operation_validation() -> None:
"__typename": "User",
},
},
- "extensions": {
- "operationName": "Inventory",
- },
+ "extensions": {"operationName": "Inventory"},
}
# This should not raise ValidationError
@@ -121,16 +121,16 @@ def test_inventory_operation_validation() -> None:
def test_game_schema_normalizes_twitch_box_art_url() -> None:
"""Ensure Twitch box art URLs are normalized for higher quality."""
- schema: GameSchema = GameSchema.model_validate(
- {
- "id": "65654",
- "displayName": "Test Game",
- "boxArtURL": "https://static-cdn.jtvnw.net/ttv-boxart/65654_IGDB-120x160.jpg",
- "__typename": "Game",
- },
- )
+ schema: GameSchema = GameSchema.model_validate({
+ "id": "65654",
+ "displayName": "Test Game",
+ "boxArtURL": "https://static-cdn.jtvnw.net/ttv-boxart/65654_IGDB-120x160.jpg",
+ "__typename": "Game",
+ })
- assert schema.box_art_url == "https://static-cdn.jtvnw.net/ttv-boxart/65654_IGDB.jpg"
+ assert (
+ schema.box_art_url == "https://static-cdn.jtvnw.net/ttv-boxart/65654_IGDB.jpg"
+ )
def test_viewer_drops_dashboard_operation_still_works() -> None:
@@ -175,9 +175,7 @@ def test_viewer_drops_dashboard_operation_still_works() -> None:
"__typename": "User",
},
},
- "extensions": {
- "operationName": "ViewerDropsDashboard",
- },
+ "extensions": {"operationName": "ViewerDropsDashboard"},
}
# This should not raise ValidationError
@@ -201,11 +199,25 @@ def test_graphql_response_with_errors() -> None:
"errors": [
{
"message": "service timeout",
- "path": ["currentUser", "inventory", "dropCampaignsInProgress", 7, "allow", "channels"],
+ "path": [
+ "currentUser",
+ "inventory",
+ "dropCampaignsInProgress",
+ 7,
+ "allow",
+ "channels",
+ ],
},
{
"message": "service timeout",
- "path": ["currentUser", "inventory", "dropCampaignsInProgress", 10, "allow", "channels"],
+ "path": [
+ "currentUser",
+ "inventory",
+ "dropCampaignsInProgress",
+ 10,
+ "allow",
+ "channels",
+ ],
},
],
"data": {
@@ -244,9 +256,7 @@ def test_graphql_response_with_errors() -> None:
"__typename": "User",
},
},
- "extensions": {
- "operationName": "Inventory",
- },
+ "extensions": {"operationName": "Inventory"},
}
# This should not raise ValidationError even with errors field present
@@ -256,7 +266,14 @@ def test_graphql_response_with_errors() -> None:
assert response.errors is not None
assert len(response.errors) == 2
assert response.errors[0].message == "service timeout"
- assert response.errors[0].path == ["currentUser", "inventory", "dropCampaignsInProgress", 7, "allow", "channels"]
+ assert response.errors[0].path == [
+ "currentUser",
+ "inventory",
+ "dropCampaignsInProgress",
+ 7,
+ "allow",
+ "channels",
+ ]
# Verify the data is still accessible and valid
assert response.data.current_user is not None
@@ -323,7 +340,7 @@ def test_drop_campaign_details_missing_distribution_type() -> None:
"benefitEdges": [
{
"benefit": {
- "id": "6948a129-2c6d-4d88-9444-6b96918a19f8_CUSTOM_ID_WOWS_TwitchDrops_1307_250ct", # noqa: E501
+ "id": "6948a129-2c6d-4d88-9444-6b96918a19f8_CUSTOM_ID_WOWS_TwitchDrops_1307_250ct",
"createdAt": "2024-08-06T16:03:15.89Z",
"entitlementLimit": 1,
"game": {
@@ -390,7 +407,9 @@ def test_drop_campaign_details_missing_distribution_type() -> None:
assert len(first_drop.benefit_edges) == 1
benefit: DropBenefitSchema = first_drop.benefit_edges[0].benefit
assert benefit.name == "13.7 Update: 250 CT"
- assert benefit.distribution_type is None # This field was missing in the API response
+ assert (
+ benefit.distribution_type is None
+ ) # This field was missing in the API response
def test_reward_campaigns_available_to_user() -> None:
@@ -454,9 +473,7 @@ def test_reward_campaigns_available_to_user() -> None:
},
],
},
- "extensions": {
- "operationName": "ViewerDropsDashboard",
- },
+ "extensions": {"operationName": "ViewerDropsDashboard"},
}
# This should not raise ValidationError
diff --git a/twitch/tests/test_views.py b/twitch/tests/test_views.py
index 20430cc..2cc5c80 100644
--- a/twitch/tests/test_views.py
+++ b/twitch/tests/test_views.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
import datetime
import json
from datetime import timedelta
@@ -22,7 +20,6 @@ from twitch.models import DropCampaign
from twitch.models import Game
from twitch.models import Organization
from twitch.models import TimeBasedDrop
-from twitch.views import Page
from twitch.views import _build_breadcrumb_schema
from twitch.views import _build_pagination_info
from twitch.views import _build_seo_context
@@ -34,19 +31,26 @@ if TYPE_CHECKING:
from django.test.client import _MonkeyPatchedWSGIResponse
from django.test.utils import ContextList
+ from twitch.views import Page
+
@pytest.mark.django_db
class TestSearchView:
"""Tests for the search_view function."""
@pytest.fixture
- def sample_data(self) -> dict[str, Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit]:
+ def sample_data(
+ self,
+ ) -> dict[str, Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit]:
"""Create sample data for testing.
Returns:
A dictionary containing the created sample data.
"""
- org: Organization = Organization.objects.create(twitch_id="123", name="Test Organization")
+ org: Organization = Organization.objects.create(
+ twitch_id="123",
+ name="Test Organization",
+ )
game: Game = Game.objects.create(
twitch_id="456",
name="test_game",
@@ -78,7 +82,9 @@ class TestSearchView:
}
@staticmethod
- def _get_context(response: _MonkeyPatchedWSGIResponse) -> ContextList | dict[str, Any]:
+ def _get_context(
+ response: _MonkeyPatchedWSGIResponse,
+ ) -> ContextList | dict[str, Any]:
"""Normalize Django test response context to a plain dict.
Args:
@@ -95,7 +101,10 @@ class TestSearchView:
def test_empty_query(
self,
client: Client,
- sample_data: dict[str, Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit],
+ sample_data: dict[
+ str,
+ Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit,
+ ],
) -> None:
"""Test search with empty query returns no results."""
response: _MonkeyPatchedWSGIResponse = client.get("/search/?q=")
@@ -108,7 +117,10 @@ class TestSearchView:
def test_no_query_parameter(
self,
client: Client,
- sample_data: dict[str, Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit],
+ sample_data: dict[
+ str,
+ Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit,
+ ],
) -> None:
"""Test search with no query parameter returns no results."""
response: _MonkeyPatchedWSGIResponse = client.get("/search/")
@@ -124,7 +136,10 @@ class TestSearchView:
def test_short_query_istartswith(
self,
client: Client,
- sample_data: dict[str, Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit],
+ sample_data: dict[
+ str,
+ Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit,
+ ],
model_key: Literal["org", "game", "campaign", "drop", "benefit"],
) -> None:
"""Test short query (< 3 chars) uses istartswith for all models."""
@@ -151,7 +166,10 @@ class TestSearchView:
def test_long_query_icontains(
self,
client: Client,
- sample_data: dict[str, Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit],
+ sample_data: dict[
+ str,
+ Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit,
+ ],
model_key: Literal["org", "game", "campaign", "drop", "benefit"],
) -> None:
"""Test long query (>= 3 chars) uses icontains for all models."""
@@ -174,7 +192,10 @@ class TestSearchView:
def test_campaign_description_search(
self,
client: Client,
- sample_data: dict[str, Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit],
+ sample_data: dict[
+ str,
+ Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit,
+ ],
) -> None:
"""Test that campaign description is searchable."""
response: _MonkeyPatchedWSGIResponse = client.get("/search/?q=campaign")
@@ -186,7 +207,10 @@ class TestSearchView:
def test_game_display_name_search(
self,
client: Client,
- sample_data: dict[str, Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit],
+ sample_data: dict[
+ str,
+ Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit,
+ ],
) -> None:
"""Test that game display_name is searchable."""
response: _MonkeyPatchedWSGIResponse = client.get("/search/?q=Game")
@@ -198,7 +222,10 @@ class TestSearchView:
def test_query_no_matches(
self,
client: Client,
- sample_data: dict[str, Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit],
+ sample_data: dict[
+ str,
+ Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit,
+ ],
) -> None:
"""Test search with query that has no matches."""
response: _MonkeyPatchedWSGIResponse = client.get("/search/?q=xyz")
@@ -211,7 +238,10 @@ class TestSearchView:
def test_context_contains_query(
self,
client: Client,
- sample_data: dict[str, Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit],
+ sample_data: dict[
+ str,
+ Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit,
+ ],
) -> None:
"""Test that context contains the search query."""
query = "Test"
@@ -222,15 +252,15 @@ class TestSearchView:
@pytest.mark.parametrize(
("model_key", "related_field"),
- [
- ("campaigns", "game"),
- ("drops", "campaign"),
- ],
+ [("campaigns", "game"), ("drops", "campaign")],
)
def test_select_related_optimization(
self,
client: Client,
- sample_data: dict[str, Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit],
+ sample_data: dict[
+ str,
+ Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit,
+ ],
model_key: str,
related_field: str,
) -> None:
@@ -238,11 +268,15 @@ class TestSearchView:
response: _MonkeyPatchedWSGIResponse = client.get("/search/?q=Test")
context: ContextList | dict[str, Any] = self._get_context(response)
- results: list[Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit] = context["results"][model_key]
+ results: list[
+ Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit
+ ] = context["results"][model_key]
assert len(results) > 0
# Verify the related object is accessible without additional query
- first_result: Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit = results[0]
+ first_result: (
+ Organization | Game | DropCampaign | TimeBasedDrop | DropBenefit
+ ) = results[0]
assert hasattr(first_result, related_field)
@@ -251,13 +285,18 @@ class TestChannelListView:
"""Tests for the ChannelListView."""
@pytest.fixture
- def channel_with_campaigns(self) -> dict[str, Channel | Game | Organization | list[DropCampaign]]:
+ def channel_with_campaigns(
+ self,
+ ) -> dict[str, Channel | Game | Organization | list[DropCampaign]]:
"""Create a channel with multiple campaigns for testing.
Returns:
A dictionary containing the created channel and campaigns.
"""
- org: Organization = Organization.objects.create(twitch_id="org1", name="Test Org")
+ org: Organization = Organization.objects.create(
+ twitch_id="org1",
+ name="Test Org",
+ )
game: Game = Game.objects.create(
twitch_id="game1",
name="test_game",
@@ -284,12 +323,7 @@ class TestChannelListView:
campaign.allow_channels.add(channel)
campaigns.append(campaign)
- return {
- "channel": channel,
- "campaigns": campaigns,
- "game": game,
- "org": org,
- }
+ return {"channel": channel, "campaigns": campaigns, "game": game, "org": org}
def test_channel_list_loads(self, client: Client) -> None:
"""Test that channel list view loads successfully."""
@@ -299,7 +333,10 @@ class TestChannelListView:
def test_campaign_count_annotation(
self,
client: Client,
- channel_with_campaigns: dict[str, Channel | Game | Organization | list[DropCampaign]],
+ channel_with_campaigns: dict[
+ str,
+ Channel | Game | Organization | list[DropCampaign],
+ ],
) -> None:
"""Test that campaign_count is correctly annotated for channels."""
channel: Channel = channel_with_campaigns["channel"] # type: ignore[assignment]
@@ -313,13 +350,18 @@ class TestChannelListView:
channels: list[Channel] = context["channels"]
# Find our test channel in the results
- test_channel: Channel | None = next((ch for ch in channels if ch.twitch_id == channel.twitch_id), None)
+ test_channel: Channel | None = next(
+ (ch for ch in channels if ch.twitch_id == channel.twitch_id),
+ None,
+ )
assert test_channel is not None
assert hasattr(test_channel, "campaign_count")
campaign_count: int | None = getattr(test_channel, "campaign_count", None)
- assert campaign_count == len(campaigns), f"Expected campaign_count to be {len(campaigns)}, got {campaign_count}"
+ assert campaign_count == len(campaigns), (
+ f"Expected campaign_count to be {len(campaigns)}, got {campaign_count}"
+ )
def test_campaign_count_zero_for_channel_without_campaigns(
self,
@@ -339,7 +381,10 @@ class TestChannelListView:
context = context[-1]
channels: list[Channel] = context["channels"]
- test_channel: Channel | None = next((ch for ch in channels if ch.twitch_id == channel.twitch_id), None)
+ test_channel: Channel | None = next(
+ (ch for ch in channels if ch.twitch_id == channel.twitch_id),
+ None,
+ )
assert test_channel is not None
assert hasattr(test_channel, "campaign_count")
@@ -350,7 +395,10 @@ class TestChannelListView:
def test_channels_ordered_by_campaign_count(
self,
client: Client,
- channel_with_campaigns: dict[str, Channel | Game | Organization | list[DropCampaign]],
+ channel_with_campaigns: dict[
+ str,
+ Channel | Game | Organization | list[DropCampaign],
+ ],
) -> None:
"""Test that channels are ordered by campaign_count descending."""
game: Game = channel_with_campaigns["game"] # type: ignore[assignment]
@@ -380,17 +428,28 @@ class TestChannelListView:
channels: list[Channel] = list(context["channels"])
# The channel with 10 campaigns should come before the one with 5
- channel2_index: int | None = next((i for i, ch in enumerate(channels) if ch.twitch_id == "channel2"), None)
- channel1_index: int | None = next((i for i, ch in enumerate(channels) if ch.twitch_id == "channel1"), None)
+ channel2_index: int | None = next(
+ (i for i, ch in enumerate(channels) if ch.twitch_id == "channel2"),
+ None,
+ )
+ channel1_index: int | None = next(
+ (i for i, ch in enumerate(channels) if ch.twitch_id == "channel1"),
+ None,
+ )
assert channel2_index is not None
assert channel1_index is not None
- assert channel2_index < channel1_index, "Channel with more campaigns should appear first"
+ assert channel2_index < channel1_index, (
+ "Channel with more campaigns should appear first"
+ )
def test_channel_search_filters_correctly(
self,
client: Client,
- channel_with_campaigns: dict[str, Channel | Game | Organization | list[DropCampaign]],
+ channel_with_campaigns: dict[
+ str,
+ Channel | Game | Organization | list[DropCampaign],
+ ],
) -> None:
"""Test that search parameter filters channels correctly."""
channel: Channel = channel_with_campaigns["channel"] # type: ignore[assignment]
@@ -402,7 +461,9 @@ class TestChannelListView:
display_name="OtherChannel",
)
- response: _MonkeyPatchedWSGIResponse = client.get(f"/channels/?search={channel.name}")
+ response: _MonkeyPatchedWSGIResponse = client.get(
+ f"/channels/?search={channel.name}",
+ )
context: ContextList | dict[str, Any] = response.context # type: ignore[assignment]
if isinstance(context, list):
context = context[-1]
@@ -421,12 +482,25 @@ class TestChannelListView:
assert "active_campaigns" in response.context
@pytest.mark.django_db
- def test_dashboard_dedupes_campaigns_for_multi_owner_game(self, client: Client) -> None:
+ def test_dashboard_dedupes_campaigns_for_multi_owner_game(
+ self,
+ client: Client,
+ ) -> None:
"""Dashboard should not render duplicate campaign cards when a game has multiple owners."""
now = timezone.now()
- org1: Organization = Organization.objects.create(twitch_id="org_a", name="Org A")
- org2: Organization = Organization.objects.create(twitch_id="org_b", name="Org B")
- game: Game = Game.objects.create(twitch_id="game_multi_owner", name="game", display_name="Multi Owner")
+ org1: Organization = Organization.objects.create(
+ twitch_id="org_a",
+ name="Org A",
+ )
+ org2: Organization = Organization.objects.create(
+ twitch_id="org_b",
+ name="Org B",
+ )
+ game: Game = Game.objects.create(
+ twitch_id="game_multi_owner",
+ name="game",
+ display_name="Multi Owner",
+ )
game.owners.add(org1, org2)
campaign: DropCampaign = DropCampaign.objects.create(
@@ -463,14 +537,20 @@ class TestChannelListView:
@pytest.mark.django_db
def test_drop_campaign_list_view(self, client: Client) -> None:
"""Test campaign list view returns 200 and has campaigns in context."""
- response: _MonkeyPatchedWSGIResponse = client.get(reverse("twitch:campaign_list"))
+ response: _MonkeyPatchedWSGIResponse = client.get(
+ reverse("twitch:campaign_list"),
+ )
assert response.status_code == 200
assert "campaigns" in response.context
@pytest.mark.django_db
def test_drop_campaign_list_pagination(self, client: Client) -> None:
"""Test pagination works correctly with 100 items per page."""
- game: Game = Game.objects.create(twitch_id="g1", name="Game", display_name="Game")
+ game: Game = Game.objects.create(
+ twitch_id="g1",
+ name="Game",
+ display_name="Game",
+ )
now: datetime.datetime = timezone.now()
# Create 150 campaigns to test pagination
@@ -488,7 +568,9 @@ class TestChannelListView:
DropCampaign.objects.bulk_create(campaigns)
# Test first page
- response: _MonkeyPatchedWSGIResponse = client.get(reverse("twitch:campaign_list"))
+ response: _MonkeyPatchedWSGIResponse = client.get(
+ reverse("twitch:campaign_list"),
+ )
assert response.status_code == 200
assert "is_paginated" in response.context
assert response.context["is_paginated"] is True
@@ -508,7 +590,11 @@ class TestChannelListView:
@pytest.mark.django_db
def test_drop_campaign_list_status_filter_active(self, client: Client) -> None:
"""Test filtering for active campaigns only."""
- game: Game = Game.objects.create(twitch_id="g1", name="Game", display_name="Game")
+ game: Game = Game.objects.create(
+ twitch_id="g1",
+ name="Game",
+ display_name="Game",
+ )
now: datetime.datetime = timezone.now()
# Create active campaign
@@ -553,7 +639,11 @@ class TestChannelListView:
@pytest.mark.django_db
def test_drop_campaign_list_status_filter_upcoming(self, client: Client) -> None:
"""Test filtering for upcoming campaigns only."""
- game: Game = Game.objects.create(twitch_id="g1", name="Game", display_name="Game")
+ game: Game = Game.objects.create(
+ twitch_id="g1",
+ name="Game",
+ display_name="Game",
+ )
now: datetime.datetime = timezone.now()
# Create active campaign
@@ -598,7 +688,11 @@ class TestChannelListView:
@pytest.mark.django_db
def test_drop_campaign_list_status_filter_expired(self, client: Client) -> None:
"""Test filtering for expired campaigns only."""
- game: Game = Game.objects.create(twitch_id="g1", name="Game", display_name="Game")
+ game: Game = Game.objects.create(
+ twitch_id="g1",
+ name="Game",
+ display_name="Game",
+ )
now: datetime.datetime = timezone.now()
# Create active campaign
@@ -643,8 +737,16 @@ class TestChannelListView:
@pytest.mark.django_db
def test_drop_campaign_list_game_filter(self, client: Client) -> None:
"""Test filtering campaigns by game."""
- game1: Game = Game.objects.create(twitch_id="g1", name="Game 1", display_name="Game 1")
- game2: Game = Game.objects.create(twitch_id="g2", name="Game 2", display_name="Game 2")
+ game1: Game = Game.objects.create(
+ twitch_id="g1",
+ name="Game 1",
+ display_name="Game 1",
+ )
+ game2: Game = Game.objects.create(
+ twitch_id="g2",
+ name="Game 2",
+ display_name="Game 2",
+ )
now: datetime.datetime = timezone.now()
# Create campaigns for game 1
@@ -692,9 +794,16 @@ class TestChannelListView:
assert campaigns[0].game.twitch_id == "g2"
@pytest.mark.django_db
- def test_drop_campaign_list_pagination_preserves_filters(self, client: Client) -> None:
+ def test_drop_campaign_list_pagination_preserves_filters(
+ self,
+ client: Client,
+ ) -> None:
"""Test that pagination links preserve game and status filters."""
- game: Game = Game.objects.create(twitch_id="g1", name="Game", display_name="Game")
+ game: Game = Game.objects.create(
+ twitch_id="g1",
+ name="Game",
+ display_name="Game",
+ )
now: datetime.datetime = timezone.now()
# Create 150 active campaigns for game g1
@@ -726,7 +835,11 @@ class TestChannelListView:
@pytest.mark.django_db
def test_drop_campaign_detail_view(self, client: Client, db: object) -> None:
"""Test campaign detail view returns 200 and has campaign in context."""
- game: Game = Game.objects.create(twitch_id="g1", name="Game", display_name="Game")
+ game: Game = Game.objects.create(
+ twitch_id="g1",
+ name="Game",
+ display_name="Game",
+ )
campaign: DropCampaign = DropCampaign.objects.create(
twitch_id="c1",
name="Campaign",
@@ -744,7 +857,11 @@ class TestChannelListView:
client: Client,
) -> None:
"""Test campaign detail view includes badge benefit description from ChatBadge."""
- game: Game = Game.objects.create(twitch_id="g-badge", name="Game", display_name="Game")
+ game: Game = Game.objects.create(
+ twitch_id="g-badge",
+ name="Game",
+ display_name="Game",
+ )
campaign: DropCampaign = DropCampaign.objects.create(
twitch_id="c-badge",
name="Campaign",
@@ -803,7 +920,11 @@ class TestChannelListView:
@pytest.mark.django_db
def test_game_detail_view(self, client: Client, db: object) -> None:
"""Test game detail view returns 200 and has game in context."""
- game: Game = Game.objects.create(twitch_id="g2", name="Game2", display_name="Game2")
+ game: Game = Game.objects.create(
+ twitch_id="g2",
+ name="Game2",
+ display_name="Game2",
+ )
url: str = reverse("twitch:game_detail", args=[game.twitch_id])
response: _MonkeyPatchedWSGIResponse = client.get(url)
assert response.status_code == 200
@@ -828,7 +949,11 @@ class TestChannelListView:
@pytest.mark.django_db
def test_channel_detail_view(self, client: Client, db: object) -> None:
"""Test channel detail view returns 200 and has channel in context."""
- channel: Channel = Channel.objects.create(twitch_id="ch1", name="Channel1", display_name="Channel1")
+ channel: Channel = Channel.objects.create(
+ twitch_id="ch1",
+ name="Channel1",
+ display_name="Channel1",
+ )
url: str = reverse("twitch:channel_detail", args=[channel.twitch_id])
response: _MonkeyPatchedWSGIResponse = client.get(url)
assert response.status_code == 200
@@ -858,7 +983,7 @@ class TestSEOHelperFunctions:
def test_truncate_description_long_text(self) -> None:
"""Test that long text is truncated at word boundary."""
- text = "This is a very long description that exceeds the maximum length and should be truncated at a word boundary to avoid cutting off in the middle of a word" # noqa: E501
+ text = "This is a very long description that exceeds the maximum length and should be truncated at a word boundary to avoid cutting off in the middle of a word"
result: str = _truncate_description(text, max_length=50)
assert len(result) <= 53 # Allow some flexibility
assert not result.endswith(" ")
@@ -890,7 +1015,9 @@ class TestSEOHelperFunctions:
def test_build_seo_context_with_all_parameters(self) -> None:
"""Test _build_seo_context with all parameters."""
now: datetime.datetime = timezone.now()
- breadcrumb: list[dict[str, int | str]] = [{"position": 1, "name": "Home", "url": "/"}]
+ breadcrumb: list[dict[str, int | str]] = [
+ {"position": 1, "name": "Home", "url": "/"},
+ ]
context: dict[str, Any] = _build_seo_context(
page_title="Test",
@@ -938,7 +1065,11 @@ class TestSEOHelperFunctions:
paginator: Paginator[int] = Paginator(items, 10)
page: Page[int] = paginator.get_page(1)
- info: list[dict[str, str]] | None = _build_pagination_info(request, page, "/campaigns/")
+ info: list[dict[str, str]] | None = _build_pagination_info(
+ request,
+ page,
+ "/campaigns/",
+ )
assert info is not None
assert len(info) == 1
@@ -954,7 +1085,11 @@ class TestSEOHelperFunctions:
paginator: Paginator[int] = Paginator(items, 10)
page: Page[int] = paginator.get_page(2)
- info: list[dict[str, str]] | None = _build_pagination_info(request, page, "/campaigns/")
+ info: list[dict[str, str]] | None = _build_pagination_info(
+ request,
+ page,
+ "/campaigns/",
+ )
assert info is not None
assert len(info) == 2
@@ -975,7 +1110,10 @@ class TestSEOMetaTags:
Returns:
dict[str, Any]: A dictionary containing the created organization, game, and campaign.
"""
- org: Organization = Organization.objects.create(twitch_id="org1", name="Test Org")
+ org: Organization = Organization.objects.create(
+ twitch_id="org1",
+ name="Test Org",
+ )
game: Game = Game.objects.create(
twitch_id="game1",
name="test_game",
@@ -995,7 +1133,9 @@ class TestSEOMetaTags:
def test_campaign_list_view_has_seo_context(self, client: Client) -> None:
"""Test campaign list view has SEO context variables."""
- response: _MonkeyPatchedWSGIResponse = client.get(reverse("twitch:campaign_list"))
+ response: _MonkeyPatchedWSGIResponse = client.get(
+ reverse("twitch:campaign_list"),
+ )
assert response.status_code == 200
assert "page_title" in response.context
assert "page_description" in response.context
@@ -1050,7 +1190,10 @@ class TestSEOMetaTags:
def test_organization_detail_view_has_breadcrumb(self, client: Client) -> None:
"""Test organization detail view has breadcrumb."""
- org: Organization = Organization.objects.create(twitch_id="org1", name="Test Org")
+ org: Organization = Organization.objects.create(
+ twitch_id="org1",
+ name="Test Org",
+ )
url: str = reverse("twitch:organization_detail", args=[org.twitch_id])
response: _MonkeyPatchedWSGIResponse = client.get(url)
@@ -1059,7 +1202,11 @@ class TestSEOMetaTags:
def test_channel_detail_view_has_breadcrumb(self, client: Client) -> None:
"""Test channel detail view has breadcrumb."""
- channel: Channel = Channel.objects.create(twitch_id="ch1", name="ch1", display_name="Channel 1")
+ channel: Channel = Channel.objects.create(
+ twitch_id="ch1",
+ name="ch1",
+ display_name="Channel 1",
+ )
url: str = reverse("twitch:channel_detail", args=[channel.twitch_id])
response: _MonkeyPatchedWSGIResponse = client.get(url)
@@ -1068,10 +1215,11 @@ class TestSEOMetaTags:
def test_noindex_pages_have_robots_directive(self, client: Client) -> None:
"""Test that pages with noindex have proper robots directive."""
- response: _MonkeyPatchedWSGIResponse = client.get(reverse("twitch:dataset_backups"))
+ response: _MonkeyPatchedWSGIResponse = client.get(
+ reverse("twitch:dataset_backups"),
+ )
assert response.status_code == 200
assert "robots_directive" in response.context
- assert "noindex" in response.context["robots_directive"]
@pytest.mark.django_db
@@ -1085,14 +1233,21 @@ class TestSitemapView:
Returns:
dict[str, Any]: A dictionary containing the created organization, game, channel, campaign, and badge set.
"""
- org: Organization = Organization.objects.create(twitch_id="org1", name="Test Org")
+ org: Organization = Organization.objects.create(
+ twitch_id="org1",
+ name="Test Org",
+ )
game: Game = Game.objects.create(
twitch_id="game1",
name="test_game",
display_name="Test Game",
)
game.owners.add(org)
- channel: Channel = Channel.objects.create(twitch_id="ch1", name="ch1", display_name="Channel 1")
+ channel: Channel = Channel.objects.create(
+ twitch_id="ch1",
+ name="ch1",
+ display_name="Channel 1",
+ )
campaign: DropCampaign = DropCampaign.objects.create(
twitch_id="camp1",
name="Test Campaign",
@@ -1109,31 +1264,50 @@ class TestSitemapView:
"badge": badge,
}
- def test_sitemap_view_returns_xml(self, client: Client, sample_entities: dict[str, Any]) -> None:
+ def test_sitemap_view_returns_xml(
+ self,
+ client: Client,
+ sample_entities: dict[str, Any],
+ ) -> None:
"""Test sitemap view returns XML content."""
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
assert response.status_code == 200
assert response["Content-Type"] == "application/xml"
- def test_sitemap_contains_xml_declaration(self, client: Client, sample_entities: dict[str, Any]) -> None:
+ def test_sitemap_contains_xml_declaration(
+ self,
+ client: Client,
+ sample_entities: dict[str, Any],
+ ) -> None:
"""Test sitemap contains proper XML declaration."""
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
content = response.content.decode()
assert content.startswith('')
- def test_sitemap_contains_urlset(self, client: Client, sample_entities: dict[str, Any]) -> None:
+ def test_sitemap_contains_urlset(
+ self,
+ client: Client,
+ sample_entities: dict[str, Any],
+ ) -> None:
"""Test sitemap contains urlset element."""
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
content: str = response.content.decode()
assert "" in content
- def test_sitemap_contains_static_pages(self, client: Client, sample_entities: dict[str, Any]) -> None:
+ def test_sitemap_contains_static_pages(
+ self,
+ client: Client,
+ sample_entities: dict[str, Any],
+ ) -> None:
"""Test sitemap includes static pages."""
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
content: str = response.content.decode()
# Check for some static pages
- assert "http://testserver/ " in content or "http://localhost:8000/ " in content
+ assert (
+ "http://testserver/ " in content
+ or "http://localhost:8000/ " in content
+ )
assert "/campaigns/" in content
assert "/games/" in content
@@ -1192,21 +1366,33 @@ class TestSitemapView:
content: str = response.content.decode()
assert f"/badges/{badge.set_id}/" in content # pyright: ignore[reportAttributeAccessIssue]
- def test_sitemap_includes_priority(self, client: Client, sample_entities: dict[str, Any]) -> None:
+ def test_sitemap_includes_priority(
+ self,
+ client: Client,
+ sample_entities: dict[str, Any],
+ ) -> None:
"""Test sitemap includes priority values."""
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
content: str = response.content.decode()
assert "" in content
assert " " in content
- def test_sitemap_includes_changefreq(self, client: Client, sample_entities: dict[str, Any]) -> None:
+ def test_sitemap_includes_changefreq(
+ self,
+ client: Client,
+ sample_entities: dict[str, Any],
+ ) -> None:
"""Test sitemap includes changefreq values."""
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
content: str = response.content.decode()
assert "" in content
assert " " in content
- def test_sitemap_includes_lastmod(self, client: Client, sample_entities: dict[str, Any]) -> None:
+ def test_sitemap_includes_lastmod(
+ self,
+ client: Client,
+ sample_entities: dict[str, Any],
+ ) -> None:
"""Test sitemap includes lastmod for detail pages."""
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
content: str = response.content.decode()
@@ -1275,7 +1461,10 @@ class TestSEOPaginationLinks:
def test_campaign_list_first_page_has_next(self, client: Client) -> None:
"""Test campaign list first page has next link."""
# Create a game and multiple campaigns to trigger pagination
- org: Organization = Organization.objects.create(twitch_id="org1", name="Test Org")
+ org: Organization = Organization.objects.create(
+ twitch_id="org1",
+ name="Test Org",
+ )
game = Game.objects.create(
twitch_id="game1",
name="test_game",
diff --git a/twitch/urls.py b/twitch/urls.py
index b9950f9..f414bb5 100644
--- a/twitch/urls.py
+++ b/twitch/urls.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
from typing import TYPE_CHECKING
from django.urls import path
@@ -23,9 +21,17 @@ urlpatterns: list[URLPattern] = [
path("badges/", views.badge_list_view, name="badge_list"),
path("badges//", views.badge_set_detail_view, name="badge_set_detail"),
path("campaigns/", views.drop_campaign_list_view, name="campaign_list"),
- path("campaigns//", views.drop_campaign_detail_view, name="campaign_detail"),
+ path(
+ "campaigns//",
+ views.drop_campaign_detail_view,
+ name="campaign_detail",
+ ),
path("channels/", views.ChannelListView.as_view(), name="channel_list"),
- path("channels//", views.ChannelDetailView.as_view(), name="channel_detail"),
+ path(
+ "channels//",
+ views.ChannelDetailView.as_view(),
+ name="channel_detail",
+ ),
path("debug/", views.debug_view, name="debug"),
path("datasets/", views.dataset_backups_view, name="dataset_backups"),
path(
@@ -39,20 +45,56 @@ urlpatterns: list[URLPattern] = [
path("games/list/", views.GamesListView.as_view(), name="games_list"),
path("games//", views.GameDetailView.as_view(), name="game_detail"),
path("organizations/", views.org_list_view, name="org_list"),
- path("organizations//", views.organization_detail_view, name="organization_detail"),
- path("reward-campaigns/", views.reward_campaign_list_view, name="reward_campaign_list"),
- path("reward-campaigns//", views.reward_campaign_detail_view, name="reward_campaign_detail"),
+ path(
+ "organizations//",
+ views.organization_detail_view,
+ name="organization_detail",
+ ),
+ path(
+ "reward-campaigns/",
+ views.reward_campaign_list_view,
+ name="reward_campaign_list",
+ ),
+ path(
+ "reward-campaigns//",
+ views.reward_campaign_detail_view,
+ name="reward_campaign_detail",
+ ),
path("search/", views.search_view, name="search"),
- path("export/campaigns/csv/", views.export_campaigns_csv, name="export_campaigns_csv"),
- path("export/campaigns/json/", views.export_campaigns_json, name="export_campaigns_json"),
+ path(
+ "export/campaigns/csv/",
+ views.export_campaigns_csv,
+ name="export_campaigns_csv",
+ ),
+ path(
+ "export/campaigns/json/",
+ views.export_campaigns_json,
+ name="export_campaigns_json",
+ ),
path("export/games/csv/", views.export_games_csv, name="export_games_csv"),
path("export/games/json/", views.export_games_json, name="export_games_json"),
- path("export/organizations/csv/", views.export_organizations_csv, name="export_organizations_csv"),
- path("export/organizations/json/", views.export_organizations_json, name="export_organizations_json"),
+ path(
+ "export/organizations/csv/",
+ views.export_organizations_csv,
+ name="export_organizations_csv",
+ ),
+ path(
+ "export/organizations/json/",
+ views.export_organizations_json,
+ name="export_organizations_json",
+ ),
path("rss/campaigns/", DropCampaignFeed(), name="campaign_feed"),
path("rss/games/", GameFeed(), name="game_feed"),
- path("rss/games//campaigns/", GameCampaignFeed(), name="game_campaign_feed"),
+ path(
+ "rss/games//campaigns/",
+ GameCampaignFeed(),
+ name="game_campaign_feed",
+ ),
path("rss/organizations/", OrganizationRSSFeed(), name="organization_feed"),
- path("rss/organizations//campaigns/", OrganizationCampaignFeed(), name="organization_campaign_feed"),
+ path(
+ "rss/organizations//campaigns/",
+ OrganizationCampaignFeed(),
+ name="organization_campaign_feed",
+ ),
path("rss/reward-campaigns/", RewardCampaignFeed(), name="reward_campaign_feed"),
]
diff --git a/twitch/utils.py b/twitch/utils.py
index 4957dfb..de5effe 100644
--- a/twitch/utils.py
+++ b/twitch/utils.py
@@ -1,9 +1,6 @@
-from __future__ import annotations
-
import re
from functools import lru_cache
from typing import TYPE_CHECKING
-from urllib.parse import ParseResult
from urllib.parse import urlparse
from urllib.parse import urlunparse
@@ -12,10 +9,13 @@ from django.utils import timezone
if TYPE_CHECKING:
from datetime import datetime
+ from urllib.parse import ParseResult
TWITCH_BOX_ART_HOST = "static-cdn.jtvnw.net"
TWITCH_BOX_ART_PATH_PREFIX = "/ttv-boxart/"
-TWITCH_BOX_ART_SIZE_PATTERN: re.Pattern[str] = re.compile(r"-(\{width\}|\d+)x(\{height\}|\d+)(?=\.[A-Za-z0-9]+$)")
+TWITCH_BOX_ART_SIZE_PATTERN: re.Pattern[str] = re.compile(
+ r"-(\{width\}|\d+)x(\{height\}|\d+)(?=\.[A-Za-z0-9]+$)",
+)
def is_twitch_box_art_url(url: str) -> bool:
@@ -24,7 +24,9 @@ def is_twitch_box_art_url(url: str) -> bool:
return False
parsed: ParseResult = urlparse(url)
- return parsed.netloc == TWITCH_BOX_ART_HOST and parsed.path.startswith(TWITCH_BOX_ART_PATH_PREFIX)
+ return parsed.netloc == TWITCH_BOX_ART_HOST and parsed.path.startswith(
+ TWITCH_BOX_ART_PATH_PREFIX,
+ )
def normalize_twitch_box_art_url(url: str) -> str:
@@ -44,7 +46,10 @@ def normalize_twitch_box_art_url(url: str) -> str:
return url
parsed: ParseResult = urlparse(url)
- if parsed.netloc != TWITCH_BOX_ART_HOST or not parsed.path.startswith(TWITCH_BOX_ART_PATH_PREFIX):
+ if parsed.netloc != TWITCH_BOX_ART_HOST:
+ return url
+
+ if not parsed.path.startswith(TWITCH_BOX_ART_PATH_PREFIX):
return url
normalized_path: str = TWITCH_BOX_ART_SIZE_PATTERN.sub("", parsed.path)
diff --git a/twitch/views.py b/twitch/views.py
index 31c10f9..4d46e64 100644
--- a/twitch/views.py
+++ b/twitch/views.py
@@ -1,5 +1,3 @@
-from __future__ import annotations
-
import csv
import datetime
import json
@@ -29,7 +27,6 @@ from django.db.models.functions import Trim
from django.db.models.query import QuerySet
from django.http import FileResponse
from django.http import Http404
-from django.http import HttpRequest
from django.http import HttpResponse
from django.shortcuts import render
from django.template.defaultfilters import filesizeformat
@@ -64,13 +61,14 @@ if TYPE_CHECKING:
from debug_toolbar.utils import QueryDict
from django.db.models.query import QuerySet
+ from django.http import HttpRequest
logger: logging.Logger = logging.getLogger("ttvdrops.views")
MIN_QUERY_LENGTH_FOR_FTS = 3
MIN_SEARCH_RANK = 0.05
-DEFAULT_SITE_DESCRIPTION = "Twitch Drops Tracker - Track your Twitch drops and campaigns easily."
+DEFAULT_SITE_DESCRIPTION = "Archive of Twitch drops, campaigns, rewards, and more."
def _truncate_description(text: str, max_length: int = 160) -> str:
@@ -124,6 +122,12 @@ def _build_seo_context( # noqa: PLR0913, PLR0917
Returns:
Dict with SEO context variables to pass to render().
"""
+ # TODO(TheLovinator): Instead of having so many parameters, # noqa: TD003
+ # consider having a single "seo_info" parameter that
+ # can contain all of these optional fields. This would make
+ # it easier to extend in the future without changing the
+ # function signature.
+
context: dict[str, Any] = {
"page_title": page_title,
"page_description": page_description or DEFAULT_SITE_DESCRIPTION,
@@ -148,9 +152,7 @@ def _build_seo_context( # noqa: PLR0913, PLR0917
return context
-def _build_breadcrumb_schema(
- items: list[dict[str, str | int]],
-) -> dict[str, Any]:
+def _build_breadcrumb_schema(items: list[dict[str, str | int]]) -> dict[str, Any]:
"""Build a BreadcrumbList schema for structured data.
Args:
@@ -160,6 +162,8 @@ def _build_breadcrumb_schema(
Returns:
BreadcrumbList schema dict.
"""
+ # TODO(TheLovinator): Replace dict with something more structured, like a dataclass or namedtuple, for better type safety and readability. # noqa: TD003
+
breadcrumb_items: list[dict[str, str | int]] = []
for position, item in enumerate(items, start=1):
breadcrumb_items.append({
@@ -216,7 +220,9 @@ def _build_pagination_info(
def emote_gallery_view(request: HttpRequest) -> HttpResponse:
- """View to display all emote images (distribution_type='EMOTE'), clickable to their campaign.
+ """View to display all emote images.
+
+ Emotes are associated with DropBenefits of type "EMOTE".
Args:
request: The HTTP request.
@@ -240,7 +246,10 @@ def emote_gallery_view(request: HttpRequest) -> HttpResponse:
emotes: list[dict[str, str | DropCampaign]] = []
for benefit in emote_benefits:
# Find the first drop with a campaign for this benefit
- drop: TimeBasedDrop | None = next((d for d in getattr(benefit, "_emote_drops", []) if d.campaign), None)
+ drop: TimeBasedDrop | None = next(
+ (d for d in getattr(benefit, "_emote_drops", []) if d.campaign),
+ None,
+ )
if drop and drop.campaign:
emotes.append({
"image_url": benefit.image_best_url,
@@ -248,13 +257,10 @@ def emote_gallery_view(request: HttpRequest) -> HttpResponse:
})
seo_context: dict[str, Any] = _build_seo_context(
- page_title="Twitch Emotes Gallery",
- page_description="Browse all Twitch drop emotes and find the campaigns that award them.",
+ page_title="Twitch Emotes",
+ page_description="List of all Twitch emotes available as rewards.",
)
- context: dict[str, Any] = {
- "emotes": emotes,
- **seo_context,
- }
+ context: dict[str, Any] = {"emotes": emotes, **seo_context}
return render(request, "twitch/emote_gallery.html", context)
@@ -273,19 +279,29 @@ def search_view(request: HttpRequest) -> HttpResponse:
if query:
if len(query) < MIN_QUERY_LENGTH_FOR_FTS:
- results["organizations"] = Organization.objects.filter(name__istartswith=query)
- results["games"] = Game.objects.filter(Q(name__istartswith=query) | Q(display_name__istartswith=query))
+ results["organizations"] = Organization.objects.filter(
+ name__istartswith=query,
+ )
+ results["games"] = Game.objects.filter(
+ Q(name__istartswith=query) | Q(display_name__istartswith=query),
+ )
results["campaigns"] = DropCampaign.objects.filter(
Q(name__istartswith=query) | Q(description__icontains=query),
).select_related("game")
- results["drops"] = TimeBasedDrop.objects.filter(name__istartswith=query).select_related("campaign")
- results["benefits"] = DropBenefit.objects.filter(name__istartswith=query).prefetch_related(
- "drops__campaign",
- )
+ results["drops"] = TimeBasedDrop.objects.filter(
+ name__istartswith=query,
+ ).select_related("campaign")
+ results["benefits"] = DropBenefit.objects.filter(
+ name__istartswith=query,
+ ).prefetch_related("drops__campaign")
results["reward_campaigns"] = RewardCampaign.objects.filter(
- Q(name__istartswith=query) | Q(brand__istartswith=query) | Q(summary__icontains=query),
+ Q(name__istartswith=query)
+ | Q(brand__istartswith=query)
+ | Q(summary__icontains=query),
).select_related("game")
- results["badge_sets"] = ChatBadgeSet.objects.filter(set_id__istartswith=query)
+ results["badge_sets"] = ChatBadgeSet.objects.filter(
+ set_id__istartswith=query,
+ )
results["badges"] = ChatBadge.objects.filter(
Q(title__istartswith=query) | Q(description__icontains=query),
).select_related("badge_set")
@@ -306,18 +322,28 @@ def search_view(request: HttpRequest) -> HttpResponse:
name__icontains=query,
).prefetch_related("drops__campaign")
results["reward_campaigns"] = RewardCampaign.objects.filter(
- Q(name__icontains=query) | Q(brand__icontains=query) | Q(summary__icontains=query),
+ Q(name__icontains=query)
+ | Q(brand__icontains=query)
+ | Q(summary__icontains=query),
).select_related("game")
results["badge_sets"] = ChatBadgeSet.objects.filter(set_id__icontains=query)
results["badges"] = ChatBadge.objects.filter(
Q(title__icontains=query) | Q(description__icontains=query),
).select_related("badge_set")
+ total_results_count: int = sum(len(qs) for qs in results.values())
+
+ # TODO(TheLovinator): Make the description more informative by including counts of each result type, e.g. "Found 5 games, 3 campaigns, and 10 drops for 'rust'." # noqa: TD003
+ if query:
+ page_title: str = f"Search Results for '{query}'"[:60]
+ page_description: str = f"Found {total_results_count} results for '{query}'."
+ else:
+ page_title = "Search"
+ page_description = "Search for drops, games, channels, and organizations."
+
seo_context: dict[str, Any] = _build_seo_context(
- page_title=f"Search Results for '{query}'" if query else "Search",
- page_description=f"Search results for '{query}' across Twitch drops, campaigns, games, and more."
- if query
- else "Search for Twitch drops, campaigns, games, channels, and organizations.",
+ page_title=page_title,
+ page_description=page_description,
)
return render(
request,
@@ -342,12 +368,7 @@ def org_list_view(request: HttpRequest) -> HttpResponse:
serialized_orgs: str = serialize(
"json",
orgs,
- fields=(
- "twitch_id",
- "name",
- "added_at",
- "updated_at",
- ),
+ fields=("twitch_id", "name", "added_at", "updated_at"),
)
orgs_data: list[dict] = json.loads(serialized_orgs)
@@ -356,13 +377,13 @@ def org_list_view(request: HttpRequest) -> HttpResponse:
"@context": "https://schema.org",
"@type": "CollectionPage",
"name": "Twitch Organizations",
- "description": "Browse all Twitch organizations that offer drop campaigns and rewards.",
+ "description": "List of Twitch organizations.",
"url": request.build_absolute_uri("/organizations/"),
}
seo_context: dict[str, Any] = _build_seo_context(
page_title="Twitch Organizations",
- page_description="Browse all Twitch organizations that offer drop campaigns and rewards.",
+ page_description="List of Twitch organizations.",
schema_data=collection_schema,
)
context: dict[str, Any] = {
@@ -375,7 +396,7 @@ def org_list_view(request: HttpRequest) -> HttpResponse:
# MARK: /organizations//
-def organization_detail_view(request: HttpRequest, twitch_id: str) -> HttpResponse:
+def organization_detail_view(request: HttpRequest, twitch_id: str) -> HttpResponse: # noqa: PLR0914
"""Function-based view for organization detail.
Args:
@@ -399,12 +420,7 @@ def organization_detail_view(request: HttpRequest, twitch_id: str) -> HttpRespon
serialized_org: str = serialize(
"json",
[organization],
- fields=(
- "twitch_id",
- "name",
- "added_at",
- "updated_at",
- ),
+ fields=("twitch_id", "name", "added_at", "updated_at"),
)
org_data: list[dict] = json.loads(serialized_org)
@@ -427,13 +443,17 @@ def organization_detail_view(request: HttpRequest, twitch_id: str) -> HttpRespon
org_name: str = organization.name or organization.twitch_id
games_count: int = games.count()
- org_description: str = f"{org_name} offers {games_count} game(s) with Twitch drop campaigns and rewards."
+ s: Literal["", "s"] = "" if games_count == 1 else "s"
+ org_description: str = f"{org_name} has {games_count} game{s}."
+ url: str = request.build_absolute_uri(
+ reverse("twitch:organization_detail", args=[organization.twitch_id]),
+ )
org_schema: dict[str, str | dict[str, str]] = {
"@context": "https://schema.org",
"@type": "Organization",
"name": org_name,
- "url": request.build_absolute_uri(reverse("twitch:organization_detail", args=[organization.twitch_id])),
+ "url": url,
"description": org_description,
}
@@ -443,7 +463,9 @@ def organization_detail_view(request: HttpRequest, twitch_id: str) -> HttpRespon
{"name": "Organizations", "url": request.build_absolute_uri("/organizations/")},
{
"name": org_name,
- "url": request.build_absolute_uri(reverse("twitch:organization_detail", args=[organization.twitch_id])),
+ "url": request.build_absolute_uri(
+ reverse("twitch:organization_detail", args=[organization.twitch_id]),
+ ),
},
])
@@ -452,7 +474,7 @@ def organization_detail_view(request: HttpRequest, twitch_id: str) -> HttpRespon
page_description=org_description,
schema_data=org_schema,
breadcrumb_schema=breadcrumb_schema,
- modified_date=organization.updated_at.isoformat() if organization.updated_at else None,
+ modified_date=organization.updated_at.isoformat(),
)
context: dict[str, Any] = {
"organization": organization,
@@ -512,9 +534,9 @@ def drop_campaign_list_view(request: HttpRequest) -> HttpResponse: # noqa: PLR0
except Game.DoesNotExist:
pass
- description = "Browse all Twitch drop campaigns with active drops, upcoming campaigns, and rewards."
+ description = "Browse Twitch drop campaigns"
if status_filter == "active":
- description = "Browse currently active Twitch drop campaigns with rewards available now."
+ description = "Browse active Twitch drop campaigns."
elif status_filter == "upcoming":
description = "View upcoming Twitch drop campaigns starting soon."
elif status_filter == "expired":
@@ -529,7 +551,11 @@ def drop_campaign_list_view(request: HttpRequest) -> HttpResponse: # noqa: PLR0
elif game_filter:
base_url += f"?game={game_filter}"
- pagination_info: list[dict[str, str]] | None = _build_pagination_info(request, campaigns, base_url)
+ pagination_info: list[dict[str, str]] | None = _build_pagination_info(
+ request,
+ campaigns,
+ base_url,
+ )
# CollectionPage schema for campaign list
collection_schema: dict[str, str] = {
@@ -587,6 +613,9 @@ def dataset_backups_view(request: HttpRequest) -> HttpResponse:
Returns:
HttpResponse: The rendered dataset backups page.
"""
+ # TODO(TheLovinator): Instead of only using sql we should also support other formats like parquet, csv, or json. # noqa: TD003
+ # TODO(TheLovinator): Upload to s3 instead. # noqa: TD003
+
datasets_root: Path = settings.DATA_DIR / "datasets"
search_dirs: list[Path] = [datasets_root]
seen_paths: set[str] = set()
@@ -626,9 +655,8 @@ def dataset_backups_view(request: HttpRequest) -> HttpResponse:
datasets.sort(key=operator.itemgetter("updated_at"), reverse=True)
seo_context: dict[str, Any] = _build_seo_context(
- page_title="Database Backups - TTVDrops",
- page_description="Download database backups and datasets containing Twitch drops, campaigns, and related data.",
- robots_directive="noindex, follow",
+ page_title="Twitch Dataset",
+ page_description="Database backups and datasets available for download.",
)
context: dict[str, Any] = {
"datasets": datasets,
@@ -639,7 +667,10 @@ def dataset_backups_view(request: HttpRequest) -> HttpResponse:
return render(request, "twitch/dataset_backups.html", context)
-def dataset_backup_download_view(request: HttpRequest, relative_path: str) -> FileResponse: # noqa: ARG001
+def dataset_backup_download_view(
+ request: HttpRequest, # noqa: ARG001
+ relative_path: str,
+) -> FileResponse:
"""Download a dataset backup from the data directory.
Args:
@@ -652,7 +683,8 @@ def dataset_backup_download_view(request: HttpRequest, relative_path: str) -> Fi
Raises:
Http404: When the file is not found or is outside the data directory.
"""
- allowed_endings = (".zst",)
+ # TODO(TheLovinator): Use s3 instead of local disk. # noqa: TD003
+
datasets_root: Path = settings.DATA_DIR / "datasets"
requested_path: Path = (datasets_root / relative_path).resolve()
data_root: Path = datasets_root.resolve()
@@ -665,7 +697,7 @@ def dataset_backup_download_view(request: HttpRequest, relative_path: str) -> Fi
if not requested_path.exists() or not requested_path.is_file():
msg = "File not found"
raise Http404(msg)
- if not requested_path.name.endswith(allowed_endings):
+ if not requested_path.name.endswith(".zst"):
msg = "File not found"
raise Http404(msg)
@@ -676,7 +708,10 @@ def dataset_backup_download_view(request: HttpRequest, relative_path: str) -> Fi
)
-def _enhance_drops_with_context(drops: QuerySet[TimeBasedDrop], now: datetime.datetime) -> list[dict[str, Any]]:
+def _enhance_drops_with_context(
+ drops: QuerySet[TimeBasedDrop],
+ now: datetime.datetime,
+) -> list[dict[str, Any]]:
"""Helper to enhance drops with countdown and context.
Args:
@@ -684,7 +719,7 @@ def _enhance_drops_with_context(drops: QuerySet[TimeBasedDrop], now: datetime.da
now: Current datetime.
Returns:
- List of dicts with drop, local_start, local_end, timezone_name, and countdown_text.
+ List of dicts with drop and additional context for display.
"""
enhanced: list[dict[str, Any]] = []
for drop in drops:
@@ -737,9 +772,7 @@ def drop_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRespo
queryset=Channel.objects.order_by("display_name"),
to_attr="channels_ordered",
),
- ).get(
- twitch_id=twitch_id,
- )
+ ).get(twitch_id=twitch_id)
except DropCampaign.DoesNotExist as exc:
msg = "No campaign found matching the query"
raise Http404(msg) from exc
@@ -781,7 +814,10 @@ def drop_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRespo
if benefit.distribution_type == "BADGE" and benefit.name
}
badge_descriptions_by_title: dict[str, str] = dict(
- ChatBadge.objects.filter(title__in=badge_benefit_names).values_list("title", "description"),
+ ChatBadge.objects.filter(title__in=badge_benefit_names).values_list(
+ "title",
+ "description",
+ ),
)
serialized_drops = serialize(
@@ -829,7 +865,9 @@ def drop_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRespo
if fields.get("description"):
continue
- badge_description: str | None = badge_descriptions_by_title.get(fields.get("name", ""))
+ badge_description: str | None = badge_descriptions_by_title.get(
+ fields.get("name", ""),
+ )
if badge_description:
fields["description"] = badge_description
@@ -845,7 +883,9 @@ def drop_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRespo
awarded_badge = None
for benefit in drop.benefits.all():
if benefit.distribution_type == "BADGE":
- awarded_badge: ChatBadge | None = ChatBadge.objects.filter(title=benefit.name).first()
+ awarded_badge: ChatBadge | None = ChatBadge.objects.filter(
+ title=benefit.name,
+ ).first()
break
enhanced_drop["awarded_badge"] = awarded_badge
@@ -865,20 +905,29 @@ def drop_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRespo
else f"Twitch drop campaign: {campaign_name}"
)
campaign_image: str | None = campaign.image_best_url
- campaign_image_width: int | None = campaign.image_width if campaign.image_file else None
- campaign_image_height: int | None = campaign.image_height if campaign.image_file else None
+ campaign_image_width: int | None = (
+ campaign.image_width if campaign.image_file else None
+ )
+ campaign_image_height: int | None = (
+ campaign.image_height if campaign.image_file else None
+ )
+ url: str = request.build_absolute_uri(
+ reverse("twitch:campaign_detail", args=[campaign.twitch_id]),
+ )
+
+ # TODO(TheLovinator): If the campaign has specific allowed channels, we could list those as potential locations instead of just linking to Twitch homepage. # noqa: TD003
campaign_schema: dict[str, str | dict[str, str]] = {
"@context": "https://schema.org",
"@type": "Event",
"name": campaign_name,
"description": campaign_description,
- "url": request.build_absolute_uri(reverse("twitch:campaign_detail", args=[campaign.twitch_id])),
+ "url": url,
"eventStatus": "https://schema.org/EventScheduled",
"eventAttendanceMode": "https://schema.org/OnlineEventAttendanceMode",
"location": {
"@type": "VirtualLocation",
- "url": "https://www.twitch.tv",
+ "url": "https://www.twitch.tv/",
},
}
if campaign.start_at:
@@ -896,17 +945,24 @@ def drop_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRespo
}
# Breadcrumb schema for navigation
- game_name: str = campaign.game.display_name or campaign.game.name or campaign.game.twitch_id
+ # TODO(TheLovinator): We should have a game.get_display_name() method that encapsulates the logic of choosing between display_name, name, and twitch_id. # noqa: TD003
+ game_name: str = (
+ campaign.game.display_name or campaign.game.name or campaign.game.twitch_id
+ )
breadcrumb_schema: dict[str, Any] = _build_breadcrumb_schema([
{"name": "Home", "url": request.build_absolute_uri("/")},
{"name": "Games", "url": request.build_absolute_uri("/games/")},
{
"name": game_name,
- "url": request.build_absolute_uri(reverse("twitch:game_detail", args=[campaign.game.twitch_id])),
+ "url": request.build_absolute_uri(
+ reverse("twitch:game_detail", args=[campaign.game.twitch_id]),
+ ),
},
{
"name": campaign_name,
- "url": request.build_absolute_uri(reverse("twitch:campaign_detail", args=[campaign.twitch_id])),
+ "url": request.build_absolute_uri(
+ reverse("twitch:campaign_detail", args=[campaign.twitch_id]),
+ ),
},
])
@@ -990,7 +1046,9 @@ class GamesGridView(ListView):
.order_by("display_name")
)
- games_by_org: defaultdict[Organization, list[dict[str, Game]]] = defaultdict(list)
+ games_by_org: defaultdict[Organization, list[dict[str, Game]]] = defaultdict(
+ list,
+ )
for game in games_with_campaigns:
for org in game.owners.all():
games_by_org[org].append({"game": game})
@@ -1003,14 +1061,14 @@ class GamesGridView(ListView):
collection_schema: dict[str, str] = {
"@context": "https://schema.org",
"@type": "CollectionPage",
- "name": "Twitch Drop Games",
- "description": "Browse all Twitch games with active drop campaigns and rewards.",
+ "name": "Twitch Games",
+ "description": "Twitch games that had or have Twitch drops.",
"url": self.request.build_absolute_uri("/games/"),
}
seo_context: dict[str, Any] = _build_seo_context(
- page_title="Twitch Drop Games",
- page_description="Browse all Twitch games with active drop campaigns and rewards.",
+ page_title="Twitch Games",
+ page_description="Twitch games that had or have Twitch drops.",
schema_data=collection_schema,
)
context.update(seo_context)
@@ -1085,7 +1143,8 @@ class GameDetailView(DetailView):
# Bulk-load all matching ChatBadge instances to avoid N+1 queries
badges_by_title: dict[str, ChatBadge] = {
- badge.title: badge for badge in ChatBadge.objects.filter(title__in=benefit_badge_titles)
+ badge.title: badge
+ for badge in ChatBadge.objects.filter(title__in=benefit_badge_titles)
}
for drop in drops_list:
@@ -1122,19 +1181,31 @@ class GameDetailView(DetailView):
and campaign.end_at >= now
]
active_campaigns.sort(
- key=lambda c: c.end_at if c.end_at is not None else datetime.datetime.max.replace(tzinfo=datetime.UTC),
+ key=lambda c: (
+ c.end_at
+ if c.end_at is not None
+ else datetime.datetime.max.replace(tzinfo=datetime.UTC)
+ ),
)
upcoming_campaigns: list[DropCampaign] = [
- campaign for campaign in all_campaigns if campaign.start_at is not None and campaign.start_at > now
+ campaign
+ for campaign in all_campaigns
+ if campaign.start_at is not None and campaign.start_at > now
]
upcoming_campaigns.sort(
- key=lambda c: c.start_at if c.start_at is not None else datetime.datetime.max.replace(tzinfo=datetime.UTC),
+ key=lambda c: (
+ c.start_at
+ if c.start_at is not None
+ else datetime.datetime.max.replace(tzinfo=datetime.UTC)
+ ),
)
expired_campaigns: list[DropCampaign] = [
- campaign for campaign in all_campaigns if campaign.end_at is not None and campaign.end_at < now
+ campaign
+ for campaign in all_campaigns
+ if campaign.end_at is not None and campaign.end_at < now
]
serialized_game: str = serialize(
@@ -1173,27 +1244,27 @@ class GameDetailView(DetailView):
"updated_at",
),
)
- campaigns_data: list[dict[str, Any]] = json.loads(
- serialized_campaigns,
- )
+ campaigns_data: list[dict[str, Any]] = json.loads(serialized_campaigns)
game_data[0]["fields"]["campaigns"] = campaigns_data
owners: list[Organization] = list(game.owners.all())
game_name: str = game.display_name or game.name or game.twitch_id
- game_description: str = (
- f"Twitch drop campaigns for {game_name}. View active, upcoming, and completed drop rewards."
- )
+ game_description: str = f"Twitch drop campaigns for {game_name}."
game_image: str | None = game.box_art_best_url
game_image_width: int | None = game.box_art_width if game.box_art_file else None
- game_image_height: int | None = game.box_art_height if game.box_art_file else None
+ game_image_height: int | None = (
+ game.box_art_height if game.box_art_file else None
+ )
game_schema: dict[str, Any] = {
"@context": "https://schema.org",
"@type": "VideoGame",
"name": game_name,
"description": game_description,
- "url": self.request.build_absolute_uri(reverse("twitch:game_detail", args=[game.twitch_id])),
+ "url": self.request.build_absolute_uri(
+ reverse("twitch:game_detail", args=[game.twitch_id]),
+ ),
}
if game.box_art_best_url:
game_schema["image"] = game.box_art_best_url
@@ -1209,7 +1280,9 @@ class GameDetailView(DetailView):
{"name": "Games", "url": self.request.build_absolute_uri("/games/")},
{
"name": game_name,
- "url": self.request.build_absolute_uri(reverse("twitch:game_detail", args=[game.twitch_id])),
+ "url": self.request.build_absolute_uri(
+ reverse("twitch:game_detail", args=[game.twitch_id]),
+ ),
},
])
@@ -1223,19 +1296,17 @@ class GameDetailView(DetailView):
breadcrumb_schema=breadcrumb_schema,
modified_date=game.updated_at.isoformat() if game.updated_at else None,
)
- context.update(
- {
- "active_campaigns": active_campaigns,
- "upcoming_campaigns": upcoming_campaigns,
- "expired_campaigns": expired_campaigns,
- "owner": owners[0] if owners else None,
- "owners": owners,
- "drop_awarded_badges": drop_awarded_badges,
- "now": now,
- "game_data": format_and_color_json(game_data[0]),
- **seo_context,
- },
- )
+ context.update({
+ "active_campaigns": active_campaigns,
+ "upcoming_campaigns": upcoming_campaigns,
+ "expired_campaigns": expired_campaigns,
+ "owner": owners[0] if owners else None,
+ "owners": owners,
+ "drop_awarded_badges": drop_awarded_badges,
+ "now": now,
+ "game_data": format_and_color_json(game_data[0]),
+ **seo_context,
+ })
return context
@@ -1266,8 +1337,8 @@ def dashboard(request: HttpRequest) -> HttpResponse:
.order_by("-start_at")
)
- # Preserve insertion order (newest campaigns first). Group by game so games with multiple owners
- # don't render duplicate campaign cards.
+ # Preserve insertion order (newest campaigns first).
+ # Group by game so games with multiple owners don't render duplicate campaign cards.
campaigns_by_game: OrderedDict[str, dict[str, Any]] = OrderedDict()
for campaign in active_campaigns:
@@ -1296,6 +1367,7 @@ def dashboard(request: HttpRequest) -> HttpResponse:
)
# WebSite schema with SearchAction for sitelinks search box
+ # TODO(TheLovinator): Should this be on all pages instead of just the dashboard? # noqa: TD003
website_schema: dict[str, str | dict[str, str | dict[str, str]]] = {
"@context": "https://schema.org",
"@type": "WebSite",
@@ -1305,15 +1377,17 @@ def dashboard(request: HttpRequest) -> HttpResponse:
"@type": "SearchAction",
"target": {
"@type": "EntryPoint",
- "urlTemplate": request.build_absolute_uri("/search/?q={search_term_string}"),
+ "urlTemplate": request.build_absolute_uri(
+ "/search/?q={search_term_string}",
+ ),
},
"query-input": "required name=search_term_string",
},
}
seo_context: dict[str, Any] = _build_seo_context(
- page_title="ttvdrops Dashboard",
- page_description="Dashboard showing active Twitch drop campaigns, rewards, and quests. Track all current drops and campaigns.", # noqa: E501
+ page_title="Twitch Drops",
+ page_description="Overview of active Twitch drop campaigns and rewards.",
og_type="website",
schema_data=website_schema,
)
@@ -1372,11 +1446,11 @@ def reward_campaign_list_view(request: HttpRequest) -> HttpResponse:
if status_filter:
title += f" ({status_filter.capitalize()})"
- description = "Browse all Twitch reward campaigns with active quests and rewards."
+ description = "Twitch rewards."
if status_filter == "active":
- description = "Browse currently active Twitch reward campaigns with quests and rewards available now."
+ description = "Browse active Twitch reward campaigns."
elif status_filter == "upcoming":
- description = "View upcoming Twitch reward campaigns starting soon."
+ description = "Browse upcoming Twitch reward campaigns."
elif status_filter == "expired":
description = "Browse expired Twitch reward campaigns."
@@ -1389,7 +1463,11 @@ def reward_campaign_list_view(request: HttpRequest) -> HttpResponse:
elif game_filter:
base_url += f"?game={game_filter}"
- pagination_info: list[dict[str, str]] | None = _build_pagination_info(request, reward_campaigns, base_url)
+ pagination_info: list[dict[str, str]] | None = _build_pagination_info(
+ request,
+ reward_campaigns,
+ base_url,
+ )
# CollectionPage schema for reward campaigns list
collection_schema: dict[str, str | dict[str, str | dict[str, str]]] = {
@@ -1434,9 +1512,9 @@ def reward_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRes
Http404: If the reward campaign is not found.
"""
try:
- reward_campaign: RewardCampaign = RewardCampaign.objects.select_related("game").get(
- twitch_id=twitch_id,
- )
+ reward_campaign: RewardCampaign = RewardCampaign.objects.select_related(
+ "game",
+ ).get(twitch_id=twitch_id)
except RewardCampaign.DoesNotExist as exc:
msg = "No reward campaign found matching the query"
raise Http404(msg) from exc
@@ -1469,7 +1547,7 @@ def reward_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRes
campaign_description: str = (
_truncate_description(reward_campaign.summary)
if reward_campaign.summary
- else f"Twitch reward campaign: {campaign_name}"
+ else f"{campaign_name}"
)
campaign_schema: dict[str, str | dict[str, str]] = {
@@ -1477,13 +1555,12 @@ def reward_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRes
"@type": "Event",
"name": campaign_name,
"description": campaign_description,
- "url": request.build_absolute_uri(reverse("twitch:reward_campaign_detail", args=[reward_campaign.twitch_id])),
+ "url": request.build_absolute_uri(
+ reverse("twitch:reward_campaign_detail", args=[reward_campaign.twitch_id]),
+ ),
"eventStatus": "https://schema.org/EventScheduled",
"eventAttendanceMode": "https://schema.org/OnlineEventAttendanceMode",
- "location": {
- "@type": "VirtualLocation",
- "url": "https://www.twitch.tv",
- },
+ "location": {"@type": "VirtualLocation", "url": "https://www.twitch.tv"},
}
if reward_campaign.starts_at:
campaign_schema["startDate"] = reward_campaign.starts_at.isoformat()
@@ -1499,11 +1576,17 @@ def reward_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRes
# Breadcrumb schema
breadcrumb_schema: dict[str, Any] = _build_breadcrumb_schema([
{"name": "Home", "url": request.build_absolute_uri("/")},
- {"name": "Reward Campaigns", "url": request.build_absolute_uri("/reward-campaigns/")},
+ {
+ "name": "Reward Campaigns",
+ "url": request.build_absolute_uri("/reward-campaigns/"),
+ },
{
"name": campaign_name,
"url": request.build_absolute_uri(
- reverse("twitch:reward_campaign_detail", args=[reward_campaign.twitch_id]),
+ reverse(
+ "twitch:reward_campaign_detail",
+ args=[reward_campaign.twitch_id],
+ ),
),
},
])
@@ -1513,7 +1596,7 @@ def reward_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRes
page_description=campaign_description,
schema_data=campaign_schema,
breadcrumb_schema=breadcrumb_schema,
- modified_date=reward_campaign.updated_at.isoformat() if reward_campaign.updated_at else None,
+ modified_date=reward_campaign.updated_at.isoformat(),
)
context: dict[str, Any] = {
"reward_campaign": reward_campaign,
@@ -1544,7 +1627,9 @@ def debug_view(request: HttpRequest) -> HttpResponse:
broken_image_campaigns: QuerySet[DropCampaign] = (
DropCampaign.objects
.filter(
- Q(image_url__isnull=True) | Q(image_url__exact="") | ~Q(image_url__startswith="http"),
+ Q(image_url__isnull=True)
+ | Q(image_url__exact="")
+ | ~Q(image_url__startswith="http"),
)
.exclude(
Exists(
@@ -1560,15 +1645,15 @@ def debug_view(request: HttpRequest) -> HttpResponse:
broken_benefit_images: QuerySet[DropBenefit] = DropBenefit.objects.annotate(
trimmed_url=Trim("image_asset_url"),
).filter(
- Q(image_asset_url__isnull=True) | Q(trimmed_url__exact="") | ~Q(image_asset_url__startswith="http"),
+ Q(image_asset_url__isnull=True)
+ | Q(trimmed_url__exact="")
+ | ~Q(image_asset_url__startswith="http"),
)
# Time-based drops without any benefits
drops_without_benefits: QuerySet[TimeBasedDrop] = TimeBasedDrop.objects.filter(
benefits__isnull=True,
- ).select_related(
- "campaign__game",
- )
+ ).select_related("campaign__game")
# Campaigns with invalid dates (start after end or missing either)
invalid_date_campaigns: QuerySet[DropCampaign] = DropCampaign.objects.filter(
@@ -1585,12 +1670,14 @@ def debug_view(request: HttpRequest) -> HttpResponse:
.order_by("game__display_name", "name")
)
- # Active campaigns with no images at all (no direct URL and no benefit image fallbacks)
+ # Active campaigns with no images at all
active_missing_image: QuerySet[DropCampaign] = (
DropCampaign.objects
.filter(start_at__lte=now, end_at__gte=now)
.filter(
- Q(image_url__isnull=True) | Q(image_url__exact="") | ~Q(image_url__startswith="http"),
+ Q(image_url__isnull=True)
+ | Q(image_url__exact="")
+ | ~Q(image_url__startswith="http"),
)
.exclude(
Exists(
@@ -1608,29 +1695,34 @@ def debug_view(request: HttpRequest) -> HttpResponse:
for campaign in DropCampaign.objects.only("operation_names"):
for op_name in campaign.operation_names:
if op_name and op_name.strip():
- operation_names_counter[op_name.strip()] = operation_names_counter.get(op_name.strip(), 0) + 1
+ operation_names_counter[op_name.strip()] = (
+ operation_names_counter.get(op_name.strip(), 0) + 1
+ )
operation_names_with_counts: list[dict[str, Any]] = [
- {"trimmed_op": op_name, "count": count} for op_name, count in sorted(operation_names_counter.items())
+ {"trimmed_op": op_name, "count": count}
+ for op_name, count in sorted(operation_names_counter.items())
]
# Campaigns missing DropCampaignDetails operation name
- # SQLite doesn't support JSON contains lookup, so we handle it in Python for compatibility
+ # Need to handle SQLite separately since it doesn't support JSONField lookups
+ # Sqlite is used when testing
if connection.vendor == "sqlite":
- # For SQLite, fetch all campaigns and filter in Python
- all_campaigns: QuerySet[DropCampaign] = DropCampaign.objects.select_related("game").order_by(
- "game__display_name",
- "name",
- )
+ all_campaigns: QuerySet[DropCampaign] = DropCampaign.objects.select_related(
+ "game",
+ ).order_by("game__display_name", "name")
campaigns_missing_dropcampaigndetails: list[DropCampaign] = [
- c for c in all_campaigns if c.operation_names is None or "DropCampaignDetails" not in c.operation_names
+ c
+ for c in all_campaigns
+ if c.operation_names is None
+ or "DropCampaignDetails" not in c.operation_names
]
else:
- # For PostgreSQL, use the efficient contains lookup
campaigns_missing_dropcampaigndetails: list[DropCampaign] = list(
DropCampaign.objects
.filter(
- Q(operation_names__isnull=True) | ~Q(operation_names__contains=["DropCampaignDetails"]),
+ Q(operation_names__isnull=True)
+ | ~Q(operation_names__contains=["DropCampaignDetails"]),
)
.select_related("game")
.order_by("game__display_name", "name"),
@@ -1650,17 +1742,13 @@ def debug_view(request: HttpRequest) -> HttpResponse:
}
seo_context: dict[str, Any] = _build_seo_context(
- page_title="Debug - TTVDrops",
- page_description="Debug page showing data inconsistencies and potential issues in the TTVDrops database.",
+ page_title="Debug",
+ page_description="Debug view showing potentially broken or inconsistent data.",
robots_directive="noindex, nofollow",
)
context.update(seo_context)
- return render(
- request,
- "twitch/debug.html",
- context,
- )
+ return render(request, "twitch/debug.html", context)
# MARK: /games/list/
@@ -1684,7 +1772,7 @@ def docs_rss_view(request: HttpRequest) -> HttpResponse:
def absolute(path: str) -> str:
try:
return request.build_absolute_uri(path)
- except Exception: # pragma: no cover - defensive logging for docs only
+ except Exception:
logger.exception("Failed to build absolute URL for %s", path)
return path
@@ -1700,7 +1788,7 @@ def docs_rss_view(request: HttpRequest) -> HttpResponse:
trimmed = trimmed[:second_item] + trimmed[end_channel:]
formatted: str = trimmed.replace("><", ">\n<")
return "\n".join(line for line in formatted.splitlines() if line.strip())
- except Exception: # pragma: no cover - defensive formatting for docs only
+ except Exception:
logger.exception("Failed to pretty-print RSS example")
return xml_str
@@ -1714,8 +1802,11 @@ def docs_rss_view(request: HttpRequest) -> HttpResponse:
response: HttpResponse = feed_view(limited_request, *args)
return _pretty_example(response.content.decode("utf-8"))
- except Exception: # pragma: no cover - defensive logging for docs only
- logger.exception("Failed to render %s for RSS docs", feed_view.__class__.__name__)
+ except Exception:
+ logger.exception(
+ "Failed to render %s for RSS docs",
+ feed_view.__class__.__name__,
+ )
return ""
feeds: list[dict[str, str]] = [
@@ -1755,30 +1846,40 @@ def docs_rss_view(request: HttpRequest) -> HttpResponse:
"title": "Campaigns for a Single Game",
"description": "Latest drop campaigns for one game.",
"url": (
- absolute(reverse("twitch:game_campaign_feed", args=[sample_game.twitch_id]))
+ absolute(
+ reverse("twitch:game_campaign_feed", args=[sample_game.twitch_id]),
+ )
if sample_game
else absolute("/rss/games//campaigns/")
),
"has_sample": bool(sample_game),
- "example_xml": render_feed(GameCampaignFeed(), sample_game.twitch_id) if sample_game else "",
+ "example_xml": render_feed(GameCampaignFeed(), sample_game.twitch_id)
+ if sample_game
+ else "",
},
{
"title": "Campaigns for an Organization",
"description": "Drop campaigns across games owned by one organization.",
"url": (
- absolute(reverse("twitch:organization_campaign_feed", args=[sample_org.twitch_id]))
+ absolute(
+ reverse(
+ "twitch:organization_campaign_feed",
+ args=[sample_org.twitch_id],
+ ),
+ )
if sample_org
else absolute("/rss/organizations//campaigns/")
),
"has_sample": bool(sample_org),
- "example_xml": render_feed(OrganizationCampaignFeed(), sample_org.twitch_id) if sample_org else "",
+ "example_xml": render_feed(OrganizationCampaignFeed(), sample_org.twitch_id)
+ if sample_org
+ else "",
},
]
seo_context: dict[str, Any] = _build_seo_context(
- page_title="RSS Feeds - TTVDrops",
- page_description="Available RSS feeds for Twitch drops, campaigns, games, organizations, and rewards.",
- robots_directive="noindex, follow",
+ page_title="Twitch RSS Feeds",
+ page_description="RSS feeds for Twitch drops.",
)
return render(
request,
@@ -1812,9 +1913,15 @@ class ChannelListView(ListView):
search_query: str | None = self.request.GET.get("search")
if search_query:
- queryset = queryset.filter(Q(name__icontains=search_query) | Q(display_name__icontains=search_query))
+ queryset = queryset.filter(
+ Q(name__icontains=search_query)
+ | Q(display_name__icontains=search_query),
+ )
- return queryset.annotate(campaign_count=Count("allowed_campaigns")).order_by("-campaign_count", "name")
+ return queryset.annotate(campaign_count=Count("allowed_campaigns")).order_by(
+ "-campaign_count",
+ "name",
+ )
def get_context_data(self, **kwargs) -> dict[str, Any]:
"""Add additional context data.
@@ -1835,7 +1942,9 @@ class ChannelListView(ListView):
page_obj: Page | None = context.get("page_obj")
pagination_info: list[dict[str, str]] | None = (
- _build_pagination_info(self.request, page_obj, base_url) if isinstance(page_obj, Page) else None
+ _build_pagination_info(self.request, page_obj, base_url)
+ if isinstance(page_obj, Page)
+ else None
)
# CollectionPage schema for channels list
@@ -1843,13 +1952,13 @@ class ChannelListView(ListView):
"@context": "https://schema.org",
"@type": "CollectionPage",
"name": "Twitch Channels",
- "description": "Browse Twitch channels participating in drop campaigns and find their available rewards.",
+ "description": "List of Twitch channels participating in drop campaigns.",
"url": self.request.build_absolute_uri("/channels/"),
}
seo_context: dict[str, Any] = _build_seo_context(
page_title="Twitch Channels",
- page_description="Browse Twitch channels participating in drop campaigns and find their available rewards.",
+ page_description="List of Twitch channels participating in drop campaigns.",
pagination_info=pagination_info,
schema_data=collection_schema,
)
@@ -1931,30 +2040,36 @@ class ChannelDetailView(DetailView):
and campaign.end_at >= now
]
active_campaigns.sort(
- key=lambda c: c.end_at if c.end_at is not None else datetime.datetime.max.replace(tzinfo=datetime.UTC),
+ key=lambda c: (
+ c.end_at
+ if c.end_at is not None
+ else datetime.datetime.max.replace(tzinfo=datetime.UTC)
+ ),
)
upcoming_campaigns: list[DropCampaign] = [
- campaign for campaign in all_campaigns if campaign.start_at is not None and campaign.start_at > now
+ campaign
+ for campaign in all_campaigns
+ if campaign.start_at is not None and campaign.start_at > now
]
upcoming_campaigns.sort(
- key=lambda c: c.start_at if c.start_at is not None else datetime.datetime.max.replace(tzinfo=datetime.UTC),
+ key=lambda c: (
+ c.start_at
+ if c.start_at is not None
+ else datetime.datetime.max.replace(tzinfo=datetime.UTC)
+ ),
)
expired_campaigns: list[DropCampaign] = [
- campaign for campaign in all_campaigns if campaign.end_at is not None and campaign.end_at < now
+ campaign
+ for campaign in all_campaigns
+ if campaign.end_at is not None and campaign.end_at < now
]
serialized_channel: str = serialize(
"json",
[channel],
- fields=(
- "twitch_id",
- "name",
- "display_name",
- "added_at",
- "updated_at",
- ),
+ fields=("twitch_id", "name", "display_name", "added_at", "updated_at"),
)
channel_data: list[dict[str, Any]] = json.loads(serialized_channel)
@@ -1978,15 +2093,20 @@ class ChannelDetailView(DetailView):
campaigns_data: list[dict[str, Any]] = json.loads(serialized_campaigns)
channel_data[0]["fields"]["campaigns"] = campaigns_data
- channel_name: str = channel.display_name or channel.name or channel.twitch_id
- channel_description: str = f"Twitch channel {channel_name} participating in drop campaigns. View active, upcoming, and expired campaign rewards." # noqa: E501
+ name: str = channel.display_name or channel.name or channel.twitch_id
+ total_campaigns: int = len(all_campaigns)
+ description: str = f"{name} participates in {total_campaigns} drop campaign"
+ if total_campaigns > 1:
+ description += "s"
channel_schema: dict[str, Any] = {
"@context": "https://schema.org",
"@type": "BroadcastChannel",
- "name": channel_name,
- "description": channel_description,
- "url": self.request.build_absolute_uri(reverse("twitch:channel_detail", args=[channel.twitch_id])),
+ "name": name,
+ "description": description,
+ "url": self.request.build_absolute_uri(
+ reverse("twitch:channel_detail", args=[channel.twitch_id]),
+ ),
"broadcastChannelId": channel.twitch_id,
"providerName": "Twitch",
}
@@ -1996,28 +2116,30 @@ class ChannelDetailView(DetailView):
{"name": "Home", "url": self.request.build_absolute_uri("/")},
{"name": "Channels", "url": self.request.build_absolute_uri("/channels/")},
{
- "name": channel_name,
- "url": self.request.build_absolute_uri(reverse("twitch:channel_detail", args=[channel.twitch_id])),
+ "name": name,
+ "url": self.request.build_absolute_uri(
+ reverse("twitch:channel_detail", args=[channel.twitch_id]),
+ ),
},
])
seo_context: dict[str, Any] = _build_seo_context(
- page_title=channel_name,
- page_description=channel_description,
+ page_title=name,
+ page_description=description,
schema_data=channel_schema,
breadcrumb_schema=breadcrumb_schema,
- modified_date=channel.updated_at.isoformat() if channel.updated_at else None,
- )
- context.update(
- {
- "active_campaigns": active_campaigns,
- "upcoming_campaigns": upcoming_campaigns,
- "expired_campaigns": expired_campaigns,
- "now": now,
- "channel_data": format_and_color_json(channel_data[0]),
- **seo_context,
- },
+ modified_date=channel.updated_at.isoformat()
+ if channel.updated_at
+ else None,
)
+ context.update({
+ "active_campaigns": active_campaigns,
+ "upcoming_campaigns": upcoming_campaigns,
+ "expired_campaigns": expired_campaigns,
+ "now": now,
+ "channel_data": format_and_color_json(channel_data[0]),
+ **seo_context,
+ })
return context
@@ -2036,10 +2158,7 @@ def badge_list_view(request: HttpRequest) -> HttpResponse:
ChatBadgeSet.objects
.all()
.prefetch_related(
- Prefetch(
- "badges",
- queryset=ChatBadge.objects.order_by("badge_id"),
- ),
+ Prefetch("badges", queryset=ChatBadge.objects.order_by("badge_id")),
)
.order_by("set_id")
)
@@ -2057,14 +2176,14 @@ def badge_list_view(request: HttpRequest) -> HttpResponse:
collection_schema: dict[str, str] = {
"@context": "https://schema.org",
"@type": "CollectionPage",
- "name": "Twitch Chat Badges",
- "description": "Browse all Twitch chat badges awarded through drop campaigns and their associated rewards.",
+ "name": "Twitch chat badges",
+ "description": "List of Twitch chat badges awarded through drop campaigns.",
"url": request.build_absolute_uri("/badges/"),
}
seo_context: dict[str, Any] = _build_seo_context(
page_title="Twitch Chat Badges",
- page_description="Browse all Twitch chat badges awarded through drop campaigns and their associated rewards.",
+ page_description="List of Twitch chat badges awarded through drop campaigns.",
schema_data=collection_schema,
)
context: dict[str, Any] = {
@@ -2092,10 +2211,7 @@ def badge_set_detail_view(request: HttpRequest, set_id: str) -> HttpResponse:
"""
try:
badge_set: ChatBadgeSet = ChatBadgeSet.objects.prefetch_related(
- Prefetch(
- "badges",
- queryset=ChatBadge.objects.order_by("badge_id"),
- ),
+ Prefetch("badges", queryset=ChatBadge.objects.order_by("badge_id")),
).get(set_id=set_id)
except ChatBadgeSet.DoesNotExist as exc:
msg = "No badge set found matching the query"
@@ -2118,11 +2234,7 @@ def badge_set_detail_view(request: HttpRequest, set_id: str) -> HttpResponse:
serialized_set: str = serialize(
"json",
[badge_set],
- fields=(
- "set_id",
- "added_at",
- "updated_at",
- ),
+ fields=("set_id", "added_at", "updated_at"),
)
set_data: list[dict[str, Any]] = json.loads(serialized_set)
@@ -2147,16 +2259,16 @@ def badge_set_detail_view(request: HttpRequest, set_id: str) -> HttpResponse:
set_data[0]["fields"]["badges"] = badges_data
badge_set_name: str = badge_set.set_id
- badge_set_description: str = (
- f"Twitch chat badge set {badge_set_name} with {badges.count()} badge(s) awarded through drop campaigns."
- )
+ badge_set_description: str = f"Twitch chat badge set {badge_set_name} with {badges.count()} badge{'s' if badges.count() != 1 else ''} awarded through drop campaigns."
badge_schema: dict[str, Any] = {
"@context": "https://schema.org",
"@type": "ItemList",
"name": badge_set_name,
"description": badge_set_description,
- "url": request.build_absolute_uri(reverse("twitch:badge_set_detail", args=[badge_set.set_id])),
+ "url": request.build_absolute_uri(
+ reverse("twitch:badge_set_detail", args=[badge_set.set_id]),
+ ),
}
seo_context: dict[str, Any] = _build_seo_context(
@@ -2303,7 +2415,7 @@ def export_campaigns_json(request: HttpRequest) -> HttpResponse:
"details_url": campaign.details_url,
"account_link_url": campaign.account_link_url,
"added_at": campaign.added_at.isoformat() if campaign.added_at else None,
- "updated_at": campaign.updated_at.isoformat() if campaign.updated_at else None,
+ "updated_at": campaign.updated_at.isoformat(),
})
# Create JSON response
@@ -2407,12 +2519,7 @@ def export_organizations_csv(request: HttpRequest) -> HttpResponse: # noqa: ARG
response["Content-Disposition"] = "attachment; filename=organizations.csv"
writer = csv.writer(response)
- writer.writerow([
- "Twitch ID",
- "Name",
- "Added At",
- "Updated At",
- ])
+ writer.writerow(["Twitch ID", "Name", "Added At", "Updated At"])
for org in queryset:
writer.writerow([
@@ -2458,7 +2565,7 @@ def export_organizations_json(request: HttpRequest) -> HttpResponse: # noqa: AR
# MARK: /sitemap.xml
-def sitemap_view(request: HttpRequest) -> HttpResponse:
+def sitemap_view(request: HttpRequest) -> HttpResponse: # noqa: PLR0915
"""Generate a dynamic XML sitemap for search engines.
Args:
@@ -2476,9 +2583,17 @@ def sitemap_view(request: HttpRequest) -> HttpResponse:
sitemap_urls.extend([
{"url": f"{base_url}/", "priority": "1.0", "changefreq": "daily"},
{"url": f"{base_url}/campaigns/", "priority": "0.9", "changefreq": "daily"},
- {"url": f"{base_url}/reward-campaigns/", "priority": "0.9", "changefreq": "daily"},
+ {
+ "url": f"{base_url}/reward-campaigns/",
+ "priority": "0.9",
+ "changefreq": "daily",
+ },
{"url": f"{base_url}/games/", "priority": "0.9", "changefreq": "weekly"},
- {"url": f"{base_url}/organizations/", "priority": "0.8", "changefreq": "weekly"},
+ {
+ "url": f"{base_url}/organizations/",
+ "priority": "0.8",
+ "changefreq": "weekly",
+ },
{"url": f"{base_url}/channels/", "priority": "0.8", "changefreq": "weekly"},
{"url": f"{base_url}/badges/", "priority": "0.7", "changefreq": "monthly"},
{"url": f"{base_url}/emotes/", "priority": "0.7", "changefreq": "monthly"},
@@ -2500,8 +2615,10 @@ def sitemap_view(request: HttpRequest) -> HttpResponse:
# Dynamic detail pages - Campaigns
campaigns: QuerySet[DropCampaign] = DropCampaign.objects.all()
for campaign in campaigns:
+ resource_url: str = reverse("twitch:campaign_detail", args=[campaign.twitch_id])
+ full_url: str = f"{base_url}{resource_url}"
entry: dict[str, str | dict[str, str]] = {
- "url": f"{base_url}{reverse('twitch:campaign_detail', args=[campaign.twitch_id])}",
+ "url": full_url,
"priority": "0.7",
"changefreq": "weekly",
}
@@ -2512,8 +2629,10 @@ def sitemap_view(request: HttpRequest) -> HttpResponse:
# Dynamic detail pages - Organizations
orgs: QuerySet[Organization] = Organization.objects.all()
for org in orgs:
+ resource_url = reverse("twitch:organization_detail", args=[org.twitch_id])
+ full_url: str = f"{base_url}{resource_url}"
entry: dict[str, str | dict[str, str]] = {
- "url": f"{base_url}{reverse('twitch:organization_detail', args=[org.twitch_id])}",
+ "url": full_url,
"priority": "0.7",
"changefreq": "weekly",
}
@@ -2524,8 +2643,10 @@ def sitemap_view(request: HttpRequest) -> HttpResponse:
# Dynamic detail pages - Channels
channels: QuerySet[Channel] = Channel.objects.all()
for channel in channels:
+ resource_url = reverse("twitch:channel_detail", args=[channel.twitch_id])
+ full_url: str = f"{base_url}{resource_url}"
entry: dict[str, str | dict[str, str]] = {
- "url": f"{base_url}{reverse('twitch:channel_detail', args=[channel.twitch_id])}",
+ "url": full_url,
"priority": "0.6",
"changefreq": "weekly",
}
@@ -2535,20 +2656,27 @@ def sitemap_view(request: HttpRequest) -> HttpResponse:
# Dynamic detail pages - Badges
badge_sets: QuerySet[ChatBadgeSet] = ChatBadgeSet.objects.all()
- sitemap_urls.extend(
- {
- "url": f"{base_url}{reverse('twitch:badge_set_detail', args=[badge_set.set_id])}",
+ for badge_set in badge_sets:
+ resource_url = reverse("twitch:badge_set_detail", args=[badge_set.set_id])
+ full_url: str = f"{base_url}{resource_url}"
+ sitemap_urls.append({
+ "url": full_url,
"priority": "0.5",
"changefreq": "monthly",
- }
- for badge_set in badge_sets
- )
+ })
# Dynamic detail pages - Reward Campaigns
reward_campaigns: QuerySet[RewardCampaign] = RewardCampaign.objects.all()
for reward_campaign in reward_campaigns:
+ resource_url = reverse(
+ "twitch:reward_campaign_detail",
+ args=[
+ reward_campaign.twitch_id,
+ ],
+ )
+ full_url: str = f"{base_url}{resource_url}"
entry: dict[str, str | dict[str, str]] = {
- "url": f"{base_url}{reverse('twitch:reward_campaign_detail', args=[reward_campaign.twitch_id])}",
+ "url": full_url,
"priority": "0.6",
"changefreq": "weekly",
}
@@ -2565,7 +2693,9 @@ def sitemap_view(request: HttpRequest) -> HttpResponse:
xml_content += f" {url_entry['url']} \n"
if url_entry.get("lastmod"):
xml_content += f" {url_entry['lastmod']} \n"
- xml_content += f" {url_entry.get('changefreq', 'monthly')} \n"
+ xml_content += (
+ f" {url_entry.get('changefreq', 'monthly')} \n"
+ )
xml_content += f" {url_entry.get('priority', '0.5')} \n"
xml_content += " \n"