diff --git a/.github/workflows/deploy.yaml b/.github/workflows/deploy.yaml index 2df4917..648b85a 100644 --- a/.github/workflows/deploy.yaml +++ b/.github/workflows/deploy.yaml @@ -21,7 +21,7 @@ jobs: steps: - uses: actions/checkout@v6 - - run: uv sync --all-extras --dev + - run: uv sync --all-extras --dev -U - run: uv run pytest - name: Deploy to Server if: ${{ success() }} diff --git a/README.md b/README.md index 9fa842f..6622af1 100644 --- a/README.md +++ b/README.md @@ -114,22 +114,13 @@ uv run python manage.py backup_db --output-dir "" --prefix "ttvdrops" ### How the duck does permissions work on Linux? ```bash -sudo groupadd responses -sudo usermod -aG responses lovinator -sudo usermod -aG responses ttvdrops +sudo chown -R ttvdrops:http /home/ttvdrops/.local/share/TTVDrops/media/ +sudo chgrp -R http /home/ttvdrops/.local/share/TTVDrops/media +sudo find /home/ttvdrops/.local/share/TTVDrops/media -type d -exec chmod 2775 {} \; +sudo find /home/ttvdrops/.local/share/TTVDrops/media -type f -exec chmod 664 {} \; -sudo chown -R lovinator:responses /mnt/fourteen/Data/Responses -sudo chown -R lovinator:responses /mnt/fourteen/Data/ttvdrops -sudo chmod -R 2775 /mnt/fourteen/Data/Responses -sudo chmod -R 2775 /mnt/fourteen/Data/ttvdrops - -# Import dir -sudo setfacl -b /mnt/fourteen/Data/Responses /mnt/fourteen/Data/Responses/imported -sudo setfacl -m g:responses:rwx /mnt/fourteen/Data/Responses /mnt/fourteen/Data/Responses/imported -sudo setfacl -d -m g:responses:rwx /mnt/fourteen/Data/Responses /mnt/fourteen/Data/Responses/imported - -# Backup dir -sudo setfacl -b /mnt/fourteen/Data/ttvdrops -sudo setfacl -m g:responses:rwx /mnt/fourteen/Data/ttvdrops -sudo setfacl -d -m g:responses:rwx /mnt/fourteen/Data/ttvdrops +sudo chown -R ttvdrops:http /home/ttvdrops/.local/share/TTVDrops/datasets/ +sudo chgrp -R http /home/ttvdrops/.local/share/TTVDrops/datasets/ +sudo find /home/ttvdrops/.local/share/TTVDrops/datasets -type d -exec chmod 2775 {} \; +sudo find /home/ttvdrops/.local/share/TTVDrops/datasets -type f -exec chmod 664 {} \; ``` diff --git a/config/settings.py b/config/settings.py index ca45e20..da3fb63 100644 --- a/config/settings.py +++ b/config/settings.py @@ -4,6 +4,7 @@ import sys from pathlib import Path from typing import Any +import sentry_sdk from dotenv import load_dotenv from platformdirs import user_data_dir @@ -140,6 +141,8 @@ INSTALLED_APPS: list[str] = [ "django.contrib.postgres", "twitch.apps.TwitchConfig", "kick.apps.KickConfig", + "youtube.apps.YoutubeConfig", + "core.apps.CoreConfig", ] MIDDLEWARE: list[str] = [ @@ -189,3 +192,13 @@ if not TESTING: "silk.middleware.SilkyMiddleware", *MIDDLEWARE, ] + + if not DEBUG: + sentry_sdk.init( + dsn="https://1aa1ac672090fb795783de0e90a2b19f@o4505228040339456.ingest.us.sentry.io/4511055670738944", + send_default_pii=True, + enable_logs=True, + traces_sample_rate=1.0, + profile_session_sample_rate=1.0, + profile_lifecycle="trace", + ) diff --git a/config/tests/test_settings.py b/config/tests/test_settings.py index 298dd35..1b4a5f9 100644 --- a/config/tests/test_settings.py +++ b/config/tests/test_settings.py @@ -12,7 +12,6 @@ from config import settings if TYPE_CHECKING: from collections.abc import Callable from collections.abc import Generator - from collections.abc import Iterator from pathlib import Path from types import ModuleType @@ -28,7 +27,7 @@ def reload_settings_module() -> Generator[Callable[..., ModuleType]]: original_env: dict[str, str] = os.environ.copy() @contextmanager - def temporary_env(env: dict[str, str]) -> Iterator[None]: + def temporary_env(env: dict[str, str]) -> Generator[None]: previous_env: dict[str, str] = os.environ.copy() os.environ.clear() os.environ.update(env) diff --git a/config/tests/test_urls.py b/config/tests/test_urls.py index 8e04206..5e6a482 100644 --- a/config/tests/test_urls.py +++ b/config/tests/test_urls.py @@ -34,8 +34,13 @@ def _reload_urls_with(**overrides) -> ModuleType: def test_top_level_named_routes_available() -> None: """Top-level routes defined in `config.urls` are reversible.""" assert reverse("sitemap") == "/sitemap.xml" + # ensure the included `twitch` namespace is present - assert reverse("twitch:dashboard") == "/" + msg: str = f"Expected 'twitch:dashboard' to reverse to '/twitch/', got {reverse('twitch:dashboard')}" + assert reverse("twitch:dashboard") == "/twitch/", msg + + youtube_msg: str = f"Expected 'youtube:index' to reverse to '/youtube/', got {reverse('youtube:index')}" + assert reverse("youtube:index") == "/youtube/", youtube_msg def test_debug_tools_not_present_while_testing() -> None: diff --git a/config/urls.py b/config/urls.py index 0a9ea65..22186a7 100644 --- a/config/urls.py +++ b/config/urls.py @@ -5,16 +5,22 @@ from django.conf.urls.static import static from django.urls import include from django.urls import path -from twitch import views as twitch_views +from core import views as core_views if TYPE_CHECKING: from django.urls.resolvers import URLPattern from django.urls.resolvers import URLResolver urlpatterns: list[URLPattern | URLResolver] = [ - path(route="sitemap.xml", view=twitch_views.sitemap_view, name="sitemap"), - path(route="", view=include("twitch.urls", namespace="twitch")), + path(route="sitemap.xml", view=core_views.sitemap_view, name="sitemap"), + # Core app + path(route="", view=include("core.urls", namespace="core")), + # Twitch app + path(route="twitch/", view=include("twitch.urls", namespace="twitch")), + # Kick app path(route="kick/", view=include("kick.urls", namespace="kick")), + # YouTube app + path(route="youtube/", view=include("youtube.urls", namespace="youtube")), ] # Serve media in development diff --git a/core/__init__.py b/core/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/admin.py b/core/admin.py new file mode 100644 index 0000000..846f6b4 --- /dev/null +++ b/core/admin.py @@ -0,0 +1 @@ +# Register your models here. diff --git a/core/apps.py b/core/apps.py new file mode 100644 index 0000000..0568ae1 --- /dev/null +++ b/core/apps.py @@ -0,0 +1,7 @@ +from django.apps import AppConfig + + +class CoreConfig(AppConfig): + """Core application configuration.""" + + name = "core" diff --git a/core/migrations/__init__.py b/core/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/models.py b/core/models.py new file mode 100644 index 0000000..6b20219 --- /dev/null +++ b/core/models.py @@ -0,0 +1 @@ +# Create your models here. diff --git a/core/tests/__init__.py b/core/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/urls.py b/core/urls.py new file mode 100644 index 0000000..6966557 --- /dev/null +++ b/core/urls.py @@ -0,0 +1,96 @@ +from typing import TYPE_CHECKING + +from django.urls import path + +from core import views +from twitch.feeds import DropCampaignFeed +from twitch.feeds import GameFeed + +if TYPE_CHECKING: + from django.urls.resolvers import URLPattern + from django.urls.resolvers import URLResolver + +app_name = "core" + + +urlpatterns: list[URLPattern | URLResolver] = [ + # / + path("", views.dashboard, name="dashboard"), + # /search/ + path("search/", views.search_view, name="search"), + # /debug/ + path("debug/", views.debug_view, name="debug"), + # /datasets/ + path("datasets/", views.dataset_backups_view, name="dataset_backups"), + # /datasets/download// + path( + "datasets/download//", + views.dataset_backup_download_view, + name="dataset_backup_download", + ), + # /docs/rss/ + path("docs/rss/", views.docs_rss_view, name="docs_rss"), + # RSS feeds + # /rss/campaigns/ - all active campaigns + path("rss/campaigns/", DropCampaignFeed(), name="campaign_feed"), + # /rss/games/ - newly added games + path("rss/games/", GameFeed(), name="game_feed"), + # /rss/games//campaigns/ - active campaigns for a specific game + path( + "rss/games//campaigns/", + views.GameCampaignFeed(), + name="game_campaign_feed", + ), + # /rss/organizations/ - newly added organizations + path( + "rss/organizations/", + views.OrganizationRSSFeed(), + name="organization_feed", + ), + # /rss/reward-campaigns/ - all active reward campaigns + path( + "rss/reward-campaigns/", + views.RewardCampaignFeed(), + name="reward_campaign_feed", + ), + # Atom feeds (added alongside RSS to preserve backward compatibility) + path("atom/campaigns/", views.DropCampaignAtomFeed(), name="campaign_feed_atom"), + path("atom/games/", views.GameAtomFeed(), name="game_feed_atom"), + path( + "atom/games//campaigns/", + views.GameCampaignAtomFeed(), + name="game_campaign_feed_atom", + ), + path( + "atom/organizations/", + views.OrganizationAtomFeed(), + name="organization_feed_atom", + ), + path( + "atom/reward-campaigns/", + views.RewardCampaignAtomFeed(), + name="reward_campaign_feed_atom", + ), + # Discord feeds (Atom feeds with Discord relative timestamps) + path( + "discord/campaigns/", + views.DropCampaignDiscordFeed(), + name="campaign_feed_discord", + ), + path("discord/games/", views.GameDiscordFeed(), name="game_feed_discord"), + path( + "discord/games//campaigns/", + views.GameCampaignDiscordFeed(), + name="game_campaign_feed_discord", + ), + path( + "discord/organizations/", + views.OrganizationDiscordFeed(), + name="organization_feed_discord", + ), + path( + "discord/reward-campaigns/", + views.RewardCampaignDiscordFeed(), + name="reward_campaign_feed_discord", + ), +] diff --git a/core/views.py b/core/views.py new file mode 100644 index 0000000..6d1d9cf --- /dev/null +++ b/core/views.py @@ -0,0 +1,922 @@ +import datetime +import json +import logging +import operator +from collections import OrderedDict +from copy import copy +from typing import TYPE_CHECKING +from typing import Any + +from django.conf import settings +from django.db import connection +from django.db.models import Count +from django.db.models import Exists +from django.db.models import F +from django.db.models import OuterRef +from django.db.models import Prefetch +from django.db.models import Q +from django.db.models.functions import Trim +from django.db.models.query import QuerySet +from django.http import FileResponse +from django.http import Http404 +from django.http import HttpResponse +from django.shortcuts import render +from django.template.defaultfilters import filesizeformat +from django.urls import reverse +from django.utils import timezone + +from kick.models import KickChannel +from kick.models import KickDropCampaign +from twitch.feeds import DropCampaignAtomFeed +from twitch.feeds import DropCampaignDiscordFeed +from twitch.feeds import DropCampaignFeed +from twitch.feeds import GameAtomFeed +from twitch.feeds import GameCampaignAtomFeed +from twitch.feeds import GameCampaignDiscordFeed +from twitch.feeds import GameCampaignFeed +from twitch.feeds import GameDiscordFeed +from twitch.feeds import GameFeed +from twitch.feeds import OrganizationAtomFeed +from twitch.feeds import OrganizationDiscordFeed +from twitch.feeds import OrganizationRSSFeed +from twitch.feeds import RewardCampaignAtomFeed +from twitch.feeds import RewardCampaignDiscordFeed +from twitch.feeds import RewardCampaignFeed +from twitch.models import Channel +from twitch.models import ChatBadge +from twitch.models import ChatBadgeSet +from twitch.models import DropBenefit +from twitch.models import DropCampaign +from twitch.models import Game +from twitch.models import Organization +from twitch.models import RewardCampaign +from twitch.models import TimeBasedDrop + +if TYPE_CHECKING: + from collections.abc import Callable + from os import stat_result + from pathlib import Path + + from django.db.models import QuerySet + from django.http import HttpRequest + from django.http.request import QueryDict + + +logger: logging.Logger = logging.getLogger("ttvdrops.views") + + +MIN_QUERY_LENGTH_FOR_FTS = 3 +MIN_SEARCH_RANK = 0.05 +DEFAULT_SITE_DESCRIPTION = "Archive of Twitch drops, campaigns, rewards, and more." + + +def _build_seo_context( # noqa: PLR0913, PLR0917 + page_title: str = "ttvdrops", + page_description: str | None = None, + page_image: str | None = None, + page_image_width: int | None = None, + page_image_height: int | None = None, + og_type: str = "website", + schema_data: dict[str, Any] | None = None, + breadcrumb_schema: dict[str, Any] | None = None, + pagination_info: list[dict[str, str]] | None = None, + published_date: str | None = None, + modified_date: str | None = None, + robots_directive: str = "index, follow", +) -> dict[str, Any]: + """Build SEO context for template rendering. + + Args: + page_title: Page title (shown in browser tab, og:title). + page_description: Page description (meta description, og:description). + page_image: Image URL for og:image meta tag. + page_image_width: Width of the image in pixels. + page_image_height: Height of the image in pixels. + og_type: OpenGraph type (e.g., "website", "article"). + schema_data: Dict representation of Schema.org JSON-LD data. + breadcrumb_schema: Breadcrumb schema dict for navigation hierarchy. + pagination_info: List of dicts with "rel" (prev|next|first|last) and "url". + published_date: ISO 8601 published date (e.g., "2025-01-01T00:00:00Z"). + modified_date: ISO 8601 modified date. + robots_directive: Robots meta content (e.g., "index, follow" or "noindex"). + + Returns: + Dict with SEO context variables to pass to render(). + """ + # TODO(TheLovinator): Instead of having so many parameters, # noqa: TD003 + # consider having a single "seo_info" parameter that + # can contain all of these optional fields. This would make + # it easier to extend in the future without changing the + # function signature. + + context: dict[str, Any] = { + "page_title": page_title, + "page_description": page_description or DEFAULT_SITE_DESCRIPTION, + "og_type": og_type, + "robots_directive": robots_directive, + } + if page_image: + context["page_image"] = page_image + if page_image_width and page_image_height: + context["page_image_width"] = page_image_width + context["page_image_height"] = page_image_height + if schema_data: + context["schema_data"] = json.dumps(schema_data) + if breadcrumb_schema: + context["breadcrumb_schema"] = json.dumps(breadcrumb_schema) + if pagination_info: + context["pagination_info"] = pagination_info + if published_date: + context["published_date"] = published_date + if modified_date: + context["modified_date"] = modified_date + return context + + +# MARK: /sitemap.xml +def sitemap_view(request: HttpRequest) -> HttpResponse: # noqa: PLR0915 + """Generate a dynamic XML sitemap for search engines. + + Args: + request: The HTTP request. + + Returns: + HttpResponse: XML sitemap. + """ + base_url: str = f"{request.scheme}://{request.get_host()}" + + # Start building sitemap XML + sitemap_urls: list[dict[str, str | dict[str, str]]] = [] + + # Static pages + sitemap_urls.extend([ + {"url": f"{base_url}/", "priority": "1.0", "changefreq": "daily"}, + {"url": f"{base_url}/campaigns/", "priority": "0.9", "changefreq": "daily"}, + { + "url": f"{base_url}/reward-campaigns/", + "priority": "0.9", + "changefreq": "daily", + }, + {"url": f"{base_url}/games/", "priority": "0.9", "changefreq": "weekly"}, + { + "url": f"{base_url}/organizations/", + "priority": "0.8", + "changefreq": "weekly", + }, + {"url": f"{base_url}/channels/", "priority": "0.8", "changefreq": "weekly"}, + {"url": f"{base_url}/badges/", "priority": "0.7", "changefreq": "monthly"}, + {"url": f"{base_url}/emotes/", "priority": "0.7", "changefreq": "monthly"}, + {"url": f"{base_url}/search/", "priority": "0.6", "changefreq": "monthly"}, + ]) + + # Dynamic detail pages - Games + games: QuerySet[Game] = Game.objects.all() + for game in games: + entry: dict[str, str | dict[str, str]] = { + "url": f"{base_url}{reverse('twitch:game_detail', args=[game.twitch_id])}", + "priority": "0.8", + "changefreq": "weekly", + } + if game.updated_at: + entry["lastmod"] = game.updated_at.isoformat() + sitemap_urls.append(entry) + + # Dynamic detail pages - Campaigns + campaigns: QuerySet[DropCampaign] = DropCampaign.objects.all() + for campaign in campaigns: + resource_url: str = reverse("twitch:campaign_detail", args=[campaign.twitch_id]) + full_url: str = f"{base_url}{resource_url}" + entry: dict[str, str | dict[str, str]] = { + "url": full_url, + "priority": "0.7", + "changefreq": "weekly", + } + if campaign.updated_at: + entry["lastmod"] = campaign.updated_at.isoformat() + sitemap_urls.append(entry) + + # Dynamic detail pages - Organizations + orgs: QuerySet[Organization] = Organization.objects.all() + for org in orgs: + resource_url = reverse("twitch:organization_detail", args=[org.twitch_id]) + full_url: str = f"{base_url}{resource_url}" + entry: dict[str, str | dict[str, str]] = { + "url": full_url, + "priority": "0.7", + "changefreq": "weekly", + } + if org.updated_at: + entry["lastmod"] = org.updated_at.isoformat() + sitemap_urls.append(entry) + + # Dynamic detail pages - Channels + channels: QuerySet[Channel] = Channel.objects.all() + for channel in channels: + resource_url = reverse("twitch:channel_detail", args=[channel.twitch_id]) + full_url: str = f"{base_url}{resource_url}" + entry: dict[str, str | dict[str, str]] = { + "url": full_url, + "priority": "0.6", + "changefreq": "weekly", + } + if channel.updated_at: + entry["lastmod"] = channel.updated_at.isoformat() + sitemap_urls.append(entry) + + # Dynamic detail pages - Badges + badge_sets: QuerySet[ChatBadgeSet] = ChatBadgeSet.objects.all() + for badge_set in badge_sets: + resource_url = reverse("twitch:badge_set_detail", args=[badge_set.set_id]) + full_url: str = f"{base_url}{resource_url}" + sitemap_urls.append({ + "url": full_url, + "priority": "0.5", + "changefreq": "monthly", + }) + + # Dynamic detail pages - Reward Campaigns + reward_campaigns: QuerySet[RewardCampaign] = RewardCampaign.objects.all() + for reward_campaign in reward_campaigns: + resource_url = reverse( + "twitch:reward_campaign_detail", + args=[ + reward_campaign.twitch_id, + ], + ) + full_url: str = f"{base_url}{resource_url}" + entry: dict[str, str | dict[str, str]] = { + "url": full_url, + "priority": "0.6", + "changefreq": "weekly", + } + if reward_campaign.updated_at: + entry["lastmod"] = reward_campaign.updated_at.isoformat() + sitemap_urls.append(entry) + + # Build XML + xml_content = '\n' + xml_content += '\n' + + for url_entry in sitemap_urls: + xml_content += " \n" + xml_content += f" {url_entry['url']}\n" + if url_entry.get("lastmod"): + xml_content += f" {url_entry['lastmod']}\n" + xml_content += ( + f" {url_entry.get('changefreq', 'monthly')}\n" + ) + xml_content += f" {url_entry.get('priority', '0.5')}\n" + xml_content += " \n" + + xml_content += "" + + return HttpResponse(xml_content, content_type="application/xml") + + +# MARK: /docs/rss/ +def docs_rss_view(request: HttpRequest) -> HttpResponse: + """View for /docs/rss that lists all available RSS feeds. + + Args: + request: The HTTP request object. + + Returns: + Rendered HTML response with list of RSS feeds. + """ + + def absolute(path: str) -> str: + try: + return request.build_absolute_uri(path) + except Exception: + logger.exception("Failed to build absolute URL for %s", path) + return path + + def _pretty_example(xml_str: str, max_items: int = 1) -> str: + try: + trimmed: str = xml_str.strip() + first_item: int = trimmed.find("", second_item) + if end_channel != -1: + trimmed = trimmed[:second_item] + trimmed[end_channel:] + formatted: str = trimmed.replace("><", ">\n<") + return "\n".join(line for line in formatted.splitlines() if line.strip()) + except Exception: + logger.exception("Failed to pretty-print RSS example") + return xml_str + + def render_feed(feed_view: Callable[..., HttpResponse], *args: object) -> str: + try: + limited_request: HttpRequest = copy(request) + # Add limit=1 to GET parameters + get_data: QueryDict = request.GET.copy() + get_data["limit"] = "1" + limited_request.GET = get_data + + response: HttpResponse = feed_view(limited_request, *args) + return _pretty_example(response.content.decode("utf-8")) + except Exception: + logger.exception( + "Failed to render %s for RSS docs", + feed_view.__class__.__name__, + ) + return "" + + show_atom: bool = bool(request.GET.get("show_atom")) + + feeds: list[dict[str, str]] = [ + { + "title": "All Organizations", + "description": "Latest organizations added to TTVDrops", + "url": absolute(reverse("core:organization_feed")), + "atom_url": absolute(reverse("core:organization_feed_atom")), + "discord_url": absolute(reverse("core:organization_feed_discord")), + "example_xml": render_feed(OrganizationRSSFeed()), + "example_xml_atom": render_feed(OrganizationAtomFeed()) + if show_atom + else "", + "example_xml_discord": render_feed(OrganizationDiscordFeed()) + if show_atom + else "", + }, + { + "title": "All Games", + "description": "Latest games added to TTVDrops", + "url": absolute(reverse("core:game_feed")), + "atom_url": absolute(reverse("core:game_feed_atom")), + "discord_url": absolute(reverse("core:game_feed_discord")), + "example_xml": render_feed(GameFeed()), + "example_xml_atom": render_feed(GameAtomFeed()) if show_atom else "", + "example_xml_discord": render_feed(GameDiscordFeed()) if show_atom else "", + }, + { + "title": "All Drop Campaigns", + "description": "Latest drop campaigns across all games", + "url": absolute(reverse("core:campaign_feed")), + "atom_url": absolute(reverse("core:campaign_feed_atom")), + "discord_url": absolute(reverse("core:campaign_feed_discord")), + "example_xml": render_feed(DropCampaignFeed()), + "example_xml_atom": render_feed(DropCampaignAtomFeed()) + if show_atom + else "", + "example_xml_discord": render_feed(DropCampaignDiscordFeed()) + if show_atom + else "", + }, + { + "title": "All Reward Campaigns", + "description": "Latest reward campaigns (Quest rewards) on Twitch", + "url": absolute(reverse("core:reward_campaign_feed")), + "atom_url": absolute(reverse("core:reward_campaign_feed_atom")), + "discord_url": absolute(reverse("core:reward_campaign_feed_discord")), + "example_xml": render_feed(RewardCampaignFeed()), + "example_xml_atom": render_feed(RewardCampaignAtomFeed()) + if show_atom + else "", + "example_xml_discord": render_feed(RewardCampaignDiscordFeed()) + if show_atom + else "", + }, + ] + + sample_game: Game | None = Game.objects.order_by("-added_at").first() + sample_org: Organization | None = Organization.objects.order_by("-added_at").first() + if sample_org is None and sample_game is not None: + sample_org = sample_game.owners.order_by("-pk").first() + + filtered_feeds: list[dict[str, str | bool]] = [ + { + "title": "Campaigns for a Single Game", + "description": "Latest drop campaigns for one game.", + "url": ( + absolute( + reverse("core:game_campaign_feed", args=[sample_game.twitch_id]), + ) + if sample_game + else absolute("/rss/games//campaigns/") + ), + "atom_url": ( + absolute( + reverse( + "core:game_campaign_feed_atom", + args=[sample_game.twitch_id], + ), + ) + if sample_game + else absolute("/atom/games//campaigns/") + ), + "discord_url": ( + absolute( + reverse( + "core:game_campaign_feed_discord", + args=[sample_game.twitch_id], + ), + ) + if sample_game + else absolute("/discord/games//campaigns/") + ), + "has_sample": bool(sample_game), + "example_xml": render_feed(GameCampaignFeed(), sample_game.twitch_id) + if sample_game + else "", + "example_xml_atom": ( + render_feed(GameCampaignAtomFeed(), sample_game.twitch_id) + if sample_game and show_atom + else "" + ), + "example_xml_discord": ( + render_feed(GameCampaignDiscordFeed(), sample_game.twitch_id) + if sample_game and show_atom + else "" + ), + }, + ] + + seo_context: dict[str, Any] = _build_seo_context( + page_title="Twitch RSS Feeds", + page_description="RSS feeds for Twitch drops.", + ) + return render( + request, + "twitch/docs_rss.html", + { + "feeds": feeds, + "filtered_feeds": filtered_feeds, + "sample_game": sample_game, + "sample_org": sample_org, + **seo_context, + }, + ) + + +# MARK: /debug/ +def debug_view(request: HttpRequest) -> HttpResponse: + """Debug view showing potentially broken or inconsistent data. + + Returns: + HttpResponse: Rendered debug template or redirect if unauthorized. + """ + now: datetime.datetime = timezone.now() + + # Games with no assigned owner organization + games_without_owner: QuerySet[Game] = Game.objects.filter( + owners__isnull=True, + ).order_by("display_name") + + # Campaigns with no images at all (no direct URL and no benefit image fallbacks) + broken_image_campaigns: QuerySet[DropCampaign] = ( + DropCampaign.objects + .filter( + Q(image_url__isnull=True) + | Q(image_url__exact="") + | ~Q(image_url__startswith="http"), + ) + .exclude( + Exists( + TimeBasedDrop.objects.filter(campaign=OuterRef("pk")).filter( + benefits__image_asset_url__startswith="http", + ), + ), + ) + .select_related("game") + ) + + # Benefits with missing images + broken_benefit_images: QuerySet[DropBenefit] = DropBenefit.objects.annotate( + trimmed_url=Trim("image_asset_url"), + ).filter( + Q(image_asset_url__isnull=True) + | Q(trimmed_url__exact="") + | ~Q(image_asset_url__startswith="http"), + ) + + # Time-based drops without any benefits + drops_without_benefits: QuerySet[TimeBasedDrop] = TimeBasedDrop.objects.filter( + benefits__isnull=True, + ).select_related("campaign__game") + + # Campaigns with invalid dates (start after end or missing either) + invalid_date_campaigns: QuerySet[DropCampaign] = DropCampaign.objects.filter( + Q(start_at__gt=F("end_at")) | Q(start_at__isnull=True) | Q(end_at__isnull=True), + ).select_related("game") + + # Duplicate campaign names per game. + # We retrieve the game's name for user-friendly display. + duplicate_name_campaigns: QuerySet[DropCampaign, dict[str, Any]] = ( + DropCampaign.objects + .values("game__display_name", "name", "game__twitch_id") + .annotate(name_count=Count("twitch_id")) + .filter(name_count__gt=1) + .order_by("game__display_name", "name") + ) + + # Active campaigns with no images at all + active_missing_image: QuerySet[DropCampaign] = ( + DropCampaign.objects + .filter(start_at__lte=now, end_at__gte=now) + .filter( + Q(image_url__isnull=True) + | Q(image_url__exact="") + | ~Q(image_url__startswith="http"), + ) + .exclude( + Exists( + TimeBasedDrop.objects.filter(campaign=OuterRef("pk")).filter( + benefits__image_asset_url__startswith="http", + ), + ), + ) + .select_related("game") + ) + + # Distinct GraphQL operation names used to fetch campaigns with counts + # Since operation_names is now a JSON list field, we need to flatten and count + operation_names_counter: dict[str, int] = {} + for campaign in DropCampaign.objects.only("operation_names"): + for op_name in campaign.operation_names: + if op_name and op_name.strip(): + operation_names_counter[op_name.strip()] = ( + operation_names_counter.get(op_name.strip(), 0) + 1 + ) + + operation_names_with_counts: list[dict[str, Any]] = [ + {"trimmed_op": op_name, "count": count} + for op_name, count in sorted(operation_names_counter.items()) + ] + + # Campaigns missing DropCampaignDetails operation name + # Need to handle SQLite separately since it doesn't support JSONField lookups + # Sqlite is used when testing + if connection.vendor == "sqlite": + all_campaigns: QuerySet[DropCampaign] = DropCampaign.objects.select_related( + "game", + ).order_by("game__display_name", "name") + campaigns_missing_dropcampaigndetails: list[DropCampaign] = [ + c + for c in all_campaigns + if c.operation_names is None + or "DropCampaignDetails" not in c.operation_names + ] + else: + campaigns_missing_dropcampaigndetails: list[DropCampaign] = list( + DropCampaign.objects + .filter( + Q(operation_names__isnull=True) + | ~Q(operation_names__contains=["DropCampaignDetails"]), + ) + .select_related("game") + .order_by("game__display_name", "name"), + ) + + context: dict[str, Any] = { + "now": now, + "games_without_owner": games_without_owner, + "broken_image_campaigns": broken_image_campaigns, + "broken_benefit_images": broken_benefit_images, + "drops_without_benefits": drops_without_benefits, + "invalid_date_campaigns": invalid_date_campaigns, + "duplicate_name_campaigns": duplicate_name_campaigns, + "active_missing_image": active_missing_image, + "operation_names_with_counts": operation_names_with_counts, + "campaigns_missing_dropcampaigndetails": campaigns_missing_dropcampaigndetails, + } + + seo_context: dict[str, Any] = _build_seo_context( + page_title="Debug", + page_description="Debug view showing potentially broken or inconsistent data.", + robots_directive="noindex, nofollow", + ) + context.update(seo_context) + + return render(request, "twitch/debug.html", context) + + +# MARK: /datasets/ +def dataset_backups_view(request: HttpRequest) -> HttpResponse: + """View to list database backup datasets on disk. + + Args: + request: The HTTP request. + + Returns: + HttpResponse: The rendered dataset backups page. + """ + # TODO(TheLovinator): Instead of only using sql we should also support other formats like parquet, csv, or json. # noqa: TD003 + # TODO(TheLovinator): Upload to s3 instead. # noqa: TD003 + # TODO(TheLovinator): https://developers.google.com/search/docs/appearance/structured-data/dataset#json-ld + datasets_root: Path = settings.DATA_DIR / "datasets" + search_dirs: list[Path] = [datasets_root] + seen_paths: set[str] = set() + datasets: list[dict[str, Any]] = [] + + for folder in search_dirs: + if not folder.exists() or not folder.is_dir(): + continue + + # Only include .zst files + for path in folder.glob("*.zst"): + if not path.is_file(): + continue + key = str(path.resolve()) + if key in seen_paths: + continue + seen_paths.add(key) + stat: stat_result = path.stat() + updated_at: datetime.datetime = datetime.datetime.fromtimestamp( + stat.st_mtime, + tz=timezone.get_current_timezone(), + ) + try: + display_path = str(path.relative_to(datasets_root)) + download_path: str | None = display_path + except ValueError: + display_path: str = path.name + download_path: str | None = None + datasets.append({ + "name": path.name, + "display_path": display_path, + "download_path": download_path, + "size": filesizeformat(stat.st_size), + "updated_at": updated_at, + }) + + datasets.sort(key=operator.itemgetter("updated_at"), reverse=True) + + seo_context: dict[str, Any] = _build_seo_context( + page_title="Twitch Dataset", + page_description="Database backups and datasets available for download.", + ) + context: dict[str, Any] = { + "datasets": datasets, + "data_dir": str(datasets_root), + "dataset_count": len(datasets), + **seo_context, + } + return render(request, "twitch/dataset_backups.html", context) + + +def dataset_backup_download_view( + request: HttpRequest, # noqa: ARG001 + relative_path: str, +) -> FileResponse: + """Download a dataset backup from the data directory. + + Args: + request: The HTTP request. + relative_path: The path relative to the data directory. + + Returns: + FileResponse: The file response for the requested dataset. + + Raises: + Http404: When the file is not found or is outside the data directory. + """ + # TODO(TheLovinator): Use s3 instead of local disk. # noqa: TD003 + + datasets_root: Path = settings.DATA_DIR / "datasets" + requested_path: Path = (datasets_root / relative_path).resolve() + data_root: Path = datasets_root.resolve() + + try: + requested_path.relative_to(data_root) + except ValueError as exc: + msg = "File not found" + raise Http404(msg) from exc + if not requested_path.exists() or not requested_path.is_file(): + msg = "File not found" + raise Http404(msg) + if not requested_path.name.endswith(".zst"): + msg = "File not found" + raise Http404(msg) + + return FileResponse( + requested_path.open("rb"), + as_attachment=True, + filename=requested_path.name, + ) + + +# MARK: /search/ +def search_view(request: HttpRequest) -> HttpResponse: + """Search view for all models. + + Args: + request: The HTTP request. + + Returns: + HttpResponse: The rendered search results. + """ + query: str = request.GET.get("q", "") + results: dict[str, QuerySet] = {} + + if query: + if len(query) < MIN_QUERY_LENGTH_FOR_FTS: + results["organizations"] = Organization.objects.filter( + name__istartswith=query, + ) + results["games"] = Game.objects.filter( + Q(name__istartswith=query) | Q(display_name__istartswith=query), + ) + + results["campaigns"] = DropCampaign.objects.filter( + Q(name__istartswith=query) | Q(description__icontains=query), + ).select_related("game") + + results["drops"] = TimeBasedDrop.objects.filter( + name__istartswith=query, + ).select_related("campaign") + + results["benefits"] = DropBenefit.objects.filter( + name__istartswith=query, + ).prefetch_related("drops__campaign") + + results["reward_campaigns"] = RewardCampaign.objects.filter( + Q(name__istartswith=query) + | Q(brand__istartswith=query) + | Q(summary__icontains=query), + ).select_related("game") + + results["badge_sets"] = ChatBadgeSet.objects.filter( + set_id__istartswith=query, + ) + + results["badges"] = ChatBadge.objects.filter( + Q(title__istartswith=query) | Q(description__icontains=query), + ).select_related("badge_set") + else: + results["organizations"] = Organization.objects.filter( + name__icontains=query, + ) + results["games"] = Game.objects.filter( + Q(name__icontains=query) | Q(display_name__icontains=query), + ) + + results["campaigns"] = DropCampaign.objects.filter( + Q(name__icontains=query) | Q(description__icontains=query), + ).select_related("game") + + results["drops"] = TimeBasedDrop.objects.filter( + name__icontains=query, + ).select_related("campaign") + + results["benefits"] = DropBenefit.objects.filter( + name__icontains=query, + ).prefetch_related("drops__campaign") + + results["reward_campaigns"] = RewardCampaign.objects.filter( + Q(name__icontains=query) + | Q(brand__icontains=query) + | Q(summary__icontains=query), + ).select_related("game") + + results["badge_sets"] = ChatBadgeSet.objects.filter(set_id__icontains=query) + results["badges"] = ChatBadge.objects.filter( + Q(title__icontains=query) | Q(description__icontains=query), + ).select_related("badge_set") + + total_results_count: int = sum(len(qs) for qs in results.values()) + + # TODO(TheLovinator): Make the description more informative by including counts of each result type, e.g. "Found 5 games, 3 campaigns, and 10 drops for 'rust'." # noqa: TD003 + if query: + page_title: str = f"Search Results for '{query}'"[:60] + page_description: str = f"Found {total_results_count} results for '{query}'." + else: + page_title = "Search" + page_description = "Search for drops, games, channels, and organizations." + + seo_context: dict[str, Any] = _build_seo_context( + page_title=page_title, + page_description=page_description, + ) + return render( + request, + "twitch/search_results.html", + {"query": query, "results": results, **seo_context}, + ) + + +# MARK: / +def dashboard(request: HttpRequest) -> HttpResponse: + """Dashboard view showing summary stats and latest campaigns. + + Args: + request: The HTTP request. + + Returns: + HttpResponse: The rendered dashboard page. + """ + now: datetime.datetime = timezone.now() + + active_twitch_campaigns: QuerySet[DropCampaign] = ( + DropCampaign.objects + .filter(start_at__lte=now, end_at__gte=now) + .select_related("game") + .prefetch_related("game__owners") + .prefetch_related( + Prefetch( + "allow_channels", + queryset=Channel.objects.order_by("display_name"), + to_attr="channels_ordered", + ), + ) + .order_by("-start_at") + ) + + twitch_campaigns_by_game: OrderedDict[str, dict[str, Any]] = OrderedDict() + for campaign in active_twitch_campaigns: + game: Game = campaign.game + game_id: str = game.twitch_id + if game_id not in twitch_campaigns_by_game: + twitch_campaigns_by_game[game_id] = { + "name": game.display_name, + "box_art": game.box_art_best_url, + "owners": list(game.owners.all()), + "campaigns": [], + } + twitch_campaigns_by_game[game_id]["campaigns"].append({ + "campaign": campaign, + "allowed_channels": getattr(campaign, "channels_ordered", []), + }) + + active_kick_campaigns: QuerySet[KickDropCampaign] = ( + KickDropCampaign.objects + .filter(starts_at__lte=now, ends_at__gte=now) + .select_related("organization", "category") + .prefetch_related( + Prefetch("channels", queryset=KickChannel.objects.select_related("user")), + "rewards", + ) + .order_by("-starts_at") + ) + + kick_campaigns_by_game: OrderedDict[str, dict[str, Any]] = OrderedDict() + for campaign in active_kick_campaigns: + if campaign.category is None: + game_key: str = "unknown" + game_name: str = "Unknown Category" + game_image: str = "" + game_kick_id: int | None = None + else: + game_key = str(campaign.category.kick_id) + game_name = campaign.category.name + game_image = campaign.category.image_url + game_kick_id = campaign.category.kick_id + + if game_key not in kick_campaigns_by_game: + kick_campaigns_by_game[game_key] = { + "name": game_name, + "image": game_image, + "kick_id": game_kick_id, + "campaigns": [], + } + + kick_campaigns_by_game[game_key]["campaigns"].append({ + "campaign": campaign, + "channels": list(campaign.channels.all()), + "rewards": list(campaign.rewards.all()), + }) + + active_reward_campaigns: QuerySet[RewardCampaign] = ( + RewardCampaign.objects + .filter(starts_at__lte=now, ends_at__gte=now) + .select_related("game") + .order_by("-starts_at") + ) + + website_schema: dict[str, str | dict[str, str | dict[str, str]]] = { + "@context": "https://schema.org", + "@type": "WebSite", + "name": "ttvdrops", + "url": request.build_absolute_uri("/"), + "potentialAction": { + "@type": "SearchAction", + "target": { + "@type": "EntryPoint", + "urlTemplate": request.build_absolute_uri( + "/search/?q={search_term_string}", + ), + }, + "query-input": "required name=search_term_string", + }, + } + + seo_context: dict[str, Any] = _build_seo_context( + page_title="Twitch/Kick Drops", + page_description=("Twitch and Kick drops."), + og_type="website", + schema_data=website_schema, + ) + + return render( + request, + "core/dashboard.html", + { + "campaigns_by_game": twitch_campaigns_by_game, + "kick_campaigns_by_game": kick_campaigns_by_game, + "active_reward_campaigns": active_reward_campaigns, + "now": now, + **seo_context, + }, + ) diff --git a/kick/models.py b/kick/models.py index 7cef031..7f6ff01 100644 --- a/kick/models.py +++ b/kick/models.py @@ -289,7 +289,7 @@ class KickDropCampaign(auto_prefetch.Model): """Return the image URL for the campaign.""" # Image from first drop if self.rewards.exists(): # pyright: ignore[reportAttributeAccessIssue] - first_reward: KickReward = self.rewards.first() # pyright: ignore[reportAttributeAccessIssue] + first_reward: KickReward | None = self.rewards.first() # pyright: ignore[reportAttributeAccessIssue] if first_reward and first_reward.image_url: return first_reward.full_image_url diff --git a/kick/tests/test_kick.py b/kick/tests/test_kick.py index 59420ef..32e1adf 100644 --- a/kick/tests/test_kick.py +++ b/kick/tests/test_kick.py @@ -442,6 +442,8 @@ class ImportKickDropsCommandTest(TestCase): campaign: KickDropCampaign = KickDropCampaign.objects.get() assert campaign.name == "PUBG 9th Anniversary" assert campaign.status == "active" + assert campaign.organization is not None + assert campaign.category is not None assert campaign.organization.name == "KRAFTON" assert campaign.category.name == "PUBG: Battlegrounds" diff --git a/pyproject.toml b/pyproject.toml index 5a17154..65da0c1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,6 +20,7 @@ dependencies = [ "pydantic", "pygments", "python-dotenv", + "sentry-sdk", "setproctitle", "tqdm", ] diff --git a/templates/base.html b/templates/base.html index 71488ab..ebcdf65 100644 --- a/templates/base.html +++ b/templates/base.html @@ -24,55 +24,55 @@ {% include "includes/meta_tags.html" %} - + + href="{% url 'core:campaign_feed' %}" /> + href="{% url 'core:campaign_feed_atom' %}" /> + href="{% url 'core:campaign_feed_discord' %}" /> + href="{% url 'core:game_feed' %}" /> + href="{% url 'core:game_feed_atom' %}" /> + href="{% url 'core:game_feed_discord' %}" /> + href="{% url 'core:organization_feed' %}" /> + href="{% url 'core:organization_feed_atom' %}" /> + href="{% url 'core:organization_feed_discord' %}" /> + href="{% url 'core:reward_campaign_feed' %}" /> + href="{% url 'core:reward_campaign_feed_atom' %}" /> + href="{% url 'core:reward_campaign_feed_discord' %}" /> diff --git a/templates/core/dashboard.html b/templates/core/dashboard.html new file mode 100644 index 0000000..d6b1bdf --- /dev/null +++ b/templates/core/dashboard.html @@ -0,0 +1,355 @@ +{% extends "base.html" %} +{% load image_tags %} +{% block title %} + Drops Dashboard +{% endblock title %} +{% block extra_head %} + + + + + + +{% endblock extra_head %} +{% block content %} +
+

Active Drops Dashboard

+

+ A combined overview of currently active Twitch and Kick drops campaigns. +
+ Click any campaign to open details. +

+
+
+
+

Twitch Campaigns

+ +
+ {% if campaigns_by_game %} + {% for game_id, game_data in campaigns_by_game.items %} +
+
+

+ {{ game_data.name }} +

+ {% if game_data.owners %} +
+ Organizations: + {% for org in game_data.owners %} + {{ org.name }} + {% if not forloop.last %},{% endif %} + {% endfor %} +
+ {% endif %} +
+
+
{% picture game_data.box_art alt="Box art for "|add:game_data.name width=200 %}
+
+
+ {% for campaign_data in game_data.campaigns %} +
+
+ + {% picture campaign_data.campaign.image_best_url|default:campaign_data.campaign.image_url alt="Image for "|add:campaign_data.campaign.name width=120 %} +

{{ campaign_data.campaign.clean_name }}

+
+ + + +
+ Channels: +
    + {% if campaign_data.campaign.allow_is_enabled %} + {% if campaign_data.allowed_channels %} + {% for channel in campaign_data.allowed_channels|slice:":5" %} +
  • + + {{ channel.display_name }}[i] +
  • + {% endfor %} + {% else %} + {% if campaign_data.campaign.game.twitch_directory_url %} +
  • + + Go to a participating live channel + +
  • + {% else %} +
  • Failed to get Twitch directory URL :(
  • + {% endif %} + {% endif %} + {% if campaign_data.allowed_channels|length > 5 %} +
  • + ... and {{ campaign_data.allowed_channels|length|add:"-5" }} more +
  • + {% endif %} + {% endif %} +
+
+
+
+ {% endfor %} +
+
+
+
+ {% endfor %} + {% else %} +

No active Twitch campaigns at the moment.

+ {% endif %} +
+ {% if active_reward_campaigns %} +
+
+

+ Twitch Reward Campaigns (Quest Rewards) +

+
+
+ {% for campaign in active_reward_campaigns %} + + {% endfor %} +
+
+ {% endif %} +
+
+

Kick Campaigns

+ +
+ {% if kick_campaigns_by_game %} + {% for game_id, game_data in kick_campaigns_by_game.items %} +
+
+

+ {% if game_data.kick_id %} + {{ game_data.name }} + {% else %} + {{ game_data.name }} + {% endif %} +

+
+
+
+ {% if game_data.image %} + Image for {{ game_data.name }} + {% else %} +
No Image
+ {% endif %} +
+
+
+ {% for campaign_data in game_data.campaigns %} +
+
+ + {% if campaign_data.campaign.image_url %} + Image for {{ campaign_data.campaign.name }} + {% endif %} +

{{ campaign_data.campaign.name }}

+
+ + + {% if campaign_data.campaign.organization %} +

+ Organization: + {{ campaign_data.campaign.organization.name }} +

+ {% endif %} +
+ Channels: + +
+ {% if campaign_data.rewards %} +
+ Rewards: +
    + {% for reward in campaign_data.rewards|slice:":3" %} +
  • {{ reward.name }} ({{ reward.required_units }} min)
  • + {% endfor %} + {% if campaign_data.rewards|length > 3 %} +
  • ... and {{ campaign_data.rewards|length|add:"-3" }} more
  • + {% endif %} +
+
+ {% endif %} +
+
+ {% endfor %} +
+
+
+
+ {% endfor %} + {% else %} +

No active Kick campaigns at the moment.

+ {% endif %} +
+
+ {% endblock content %} diff --git a/templates/twitch/campaign_detail.html b/templates/twitch/campaign_detail.html index b4bb0df..61cfccc 100644 --- a/templates/twitch/campaign_detail.html +++ b/templates/twitch/campaign_detail.html @@ -9,15 +9,15 @@ + href="{% url 'core:game_campaign_feed' campaign.game.twitch_id %}" /> + href="{% url 'core:game_campaign_feed_atom' campaign.game.twitch_id %}" /> + href="{% url 'core:game_campaign_feed_discord' campaign.game.twitch_id %}" /> {% endif %} {% endblock extra_head %} {% block content %} @@ -90,11 +90,11 @@ {% endif %} {% if campaign.game %} - [rss] - [atom] - [discord] {% endif %} diff --git a/templates/twitch/campaign_list.html b/templates/twitch/campaign_list.html index a2a1783..9e904c0 100644 --- a/templates/twitch/campaign_list.html +++ b/templates/twitch/campaign_list.html @@ -9,15 +9,15 @@ + href="{% url 'core:campaign_feed' %}" /> + href="{% url 'core:campaign_feed_atom' %}" /> + href="{% url 'core:campaign_feed_discord' %}" /> {% endblock extra_head %} {% block content %}
@@ -25,11 +25,11 @@

Drop Campaigns

- [rss] - [atom] - [discord] [csv] diff --git a/templates/twitch/dashboard.html b/templates/twitch/dashboard.html index 5b0808f..134e050 100644 --- a/templates/twitch/dashboard.html +++ b/templates/twitch/dashboard.html @@ -8,15 +8,15 @@ + href="{% url 'core:campaign_feed' %}" /> + href="{% url 'core:campaign_feed_atom' %}" /> + href="{% url 'core:campaign_feed_discord' %}" /> {% endblock extra_head %} {% block content %}
@@ -33,11 +33,11 @@


diff --git a/templates/twitch/dataset_backups.html b/templates/twitch/dataset_backups.html index a527452..c6caa80 100644 --- a/templates/twitch/dataset_backups.html +++ b/templates/twitch/dataset_backups.html @@ -18,7 +18,7 @@ {% for dataset in datasets %} - {{ dataset.name }} + {{ dataset.name }} {{ dataset.size }} diff --git a/templates/twitch/game_detail.html b/templates/twitch/game_detail.html index 26f6e0c..6e68f7e 100644 --- a/templates/twitch/game_detail.html +++ b/templates/twitch/game_detail.html @@ -8,15 +8,15 @@ + href="{% url 'core:game_campaign_feed' game.twitch_id %}" /> + href="{% url 'core:game_campaign_feed_atom' game.twitch_id %}" /> + href="{% url 'core:game_campaign_feed_discord' game.twitch_id %}" /> {% endif %} {% endblock extra_head %} {% block content %} @@ -49,11 +49,11 @@
Twitch slug: {{ game.slug }}
diff --git a/templates/twitch/games_grid.html b/templates/twitch/games_grid.html index 87fe9ee..cc9fa22 100644 --- a/templates/twitch/games_grid.html +++ b/templates/twitch/games_grid.html @@ -7,15 +7,15 @@ + href="{% url 'core:game_feed' %}" /> + href="{% url 'core:game_feed_atom' %}" /> + href="{% url 'core:game_feed_discord' %}" /> {% endblock extra_head %} {% block content %}
@@ -23,10 +23,10 @@

All Games

[list] - [rss] - [rss] + [atom] - [discord] [csv] diff --git a/templates/twitch/games_list.html b/templates/twitch/games_list.html index 27857d5..c2c1406 100644 --- a/templates/twitch/games_list.html +++ b/templates/twitch/games_list.html @@ -6,25 +6,25 @@ + href="{% url 'core:game_feed' %}" /> + href="{% url 'core:game_feed_atom' %}" /> + href="{% url 'core:game_feed_discord' %}" /> {% endblock extra_head %} {% block content %}

Games List

[grid] - [rss] - [rss] + [atom] - [discord] [csv] diff --git a/templates/twitch/org_list.html b/templates/twitch/org_list.html index 068369d..838acae 100644 --- a/templates/twitch/org_list.html +++ b/templates/twitch/org_list.html @@ -5,11 +5,11 @@ {% block content %}

Organizations

- [rss] - [atom] - [discord] [csv] diff --git a/templates/twitch/organization_detail.html b/templates/twitch/organization_detail.html index 6a05d19..25326b0 100644 --- a/templates/twitch/organization_detail.html +++ b/templates/twitch/organization_detail.html @@ -8,15 +8,15 @@ + href="{% url 'core:game_campaign_feed' game.twitch_id %}" /> + href="{% url 'core:game_campaign_feed_atom' game.twitch_id %}" /> + href="{% url 'core:game_campaign_feed_discord' game.twitch_id %}" /> {% endfor %} {% endif %} {% endblock extra_head %} diff --git a/templates/twitch/reward_campaign_detail.html b/templates/twitch/reward_campaign_detail.html index ffd9388..234c6b5 100644 --- a/templates/twitch/reward_campaign_detail.html +++ b/templates/twitch/reward_campaign_detail.html @@ -8,15 +8,15 @@ + href="{% url 'core:reward_campaign_feed' %}" /> + href="{% url 'core:reward_campaign_feed_atom' %}" /> + href="{% url 'core:reward_campaign_feed_discord' %}" /> {% endblock extra_head %} {% block content %} @@ -35,12 +35,12 @@ {% endif %} diff --git a/templates/twitch/reward_campaign_list.html b/templates/twitch/reward_campaign_list.html index 99b8e7f..97affdf 100644 --- a/templates/twitch/reward_campaign_list.html +++ b/templates/twitch/reward_campaign_list.html @@ -7,25 +7,25 @@ + href="{% url 'core:reward_campaign_feed' %}" /> + href="{% url 'core:reward_campaign_feed_atom' %}" /> + href="{% url 'core:reward_campaign_feed_discord' %}" /> {% endblock extra_head %} {% block content %}

Reward Campaigns

This is an archive of old Twitch reward campaigns because we do not monitor them.

diff --git a/templates/youtube/index.html b/templates/youtube/index.html new file mode 100644 index 0000000..f5dca90 --- /dev/null +++ b/templates/youtube/index.html @@ -0,0 +1,20 @@ +{% extends "base.html" %} +{% block title %} + YouTube Drops Channels +{% endblock title %} +{% block content %} +
+

YouTube Drops Channels

+

Official channels from YouTube partner accounts where drops/rewards may be available.

+ {% for group in partner_groups %} +

{{ group.partner }}

+ + {% endfor %} +
+{% endblock content %} diff --git a/tools/systemd/ttvdrops-import-drops.service b/tools/systemd/ttvdrops-import-drops.service index 85f2b4d..d4e204b 100644 --- a/tools/systemd/ttvdrops-import-drops.service +++ b/tools/systemd/ttvdrops-import-drops.service @@ -7,6 +7,8 @@ Wants=network-online.target Type=simple User=ttvdrops Group=ttvdrops +SupplementaryGroups=http +UMask=0002 WorkingDirectory=/home/ttvdrops/ttvdrops EnvironmentFile=/home/ttvdrops/ttvdrops/.env ExecStart=/usr/bin/uv run python manage.py watch_imports /mnt/fourteen/Data/Responses/pending --verbose diff --git a/tools/systemd/ttvdrops-import-kick-drops.service b/tools/systemd/ttvdrops-import-kick-drops.service index 6858562..2932aab 100644 --- a/tools/systemd/ttvdrops-import-kick-drops.service +++ b/tools/systemd/ttvdrops-import-kick-drops.service @@ -7,6 +7,8 @@ Wants=network-online.target Type=oneshot User=ttvdrops Group=ttvdrops +SupplementaryGroups=http +UMask=0002 WorkingDirectory=/home/ttvdrops/ttvdrops EnvironmentFile=/home/ttvdrops/ttvdrops/.env ExecStart=/usr/bin/uv run python manage.py import_kick_drops diff --git a/twitch/feeds.py b/twitch/feeds.py index 69626ca..6a8678c 100644 --- a/twitch/feeds.py +++ b/twitch/feeds.py @@ -177,7 +177,9 @@ class TTVDropsAtomBaseFeed(TTVDropsBaseFeed): feed_type = BrowserFriendlyAtom1Feed -def _with_campaign_related(queryset: QuerySet[DropCampaign]) -> QuerySet[DropCampaign]: +def _with_campaign_related( + queryset: QuerySet[DropCampaign, DropCampaign], +) -> QuerySet[DropCampaign, DropCampaign]: """Apply related-selects/prefetches needed by feed rendering to avoid N+1 queries. Returns: @@ -759,7 +761,7 @@ class OrganizationRSSFeed(TTVDropsBaseFeed): def feed_url(self) -> str: """Return the absolute URL for this feed.""" - return reverse("twitch:organization_feed") + return reverse("core:organization_feed") # MARK: /rss/games/ @@ -829,7 +831,7 @@ class GameFeed(TTVDropsBaseFeed): # Get the full URL for TTVDrops game detail page game_url: str = reverse("twitch:game_detail", args=[twitch_id]) - rss_feed_url: str = reverse("twitch:game_campaign_feed", args=[twitch_id]) + rss_feed_url: str = reverse("core:game_campaign_feed", args=[twitch_id]) twitch_directory_url: str = getattr(item, "twitch_directory_url", "") description_parts.append( @@ -911,7 +913,7 @@ class GameFeed(TTVDropsBaseFeed): def feed_url(self) -> str: """Return the URL to the RSS feed itself.""" - return reverse("twitch:game_feed") + return reverse("core:game_feed") # MARK: /rss/campaigns/ @@ -1054,7 +1056,7 @@ class DropCampaignFeed(TTVDropsBaseFeed): def feed_url(self) -> str: """Return the URL to the RSS feed itself.""" - return reverse("twitch:campaign_feed") + return reverse("core:campaign_feed") # MARK: /rss/games//campaigns/ @@ -1230,7 +1232,7 @@ class GameCampaignFeed(TTVDropsBaseFeed): def feed_url(self, obj: Game) -> str: """Return the URL to the RSS feed itself.""" - return reverse("twitch:game_campaign_feed", args=[obj.twitch_id]) + return reverse("core:game_campaign_feed", args=[obj.twitch_id]) # MARK: /rss/reward-campaigns/ @@ -1422,7 +1424,7 @@ class RewardCampaignFeed(TTVDropsBaseFeed): def feed_url(self) -> str: """Return the URL to the RSS feed itself.""" - return reverse("twitch:reward_campaign_feed") + return reverse("core:reward_campaign_feed") # Atom feed variants: reuse existing logic but switch the feed generator to Atom @@ -1433,7 +1435,7 @@ class OrganizationAtomFeed(TTVDropsAtomBaseFeed, OrganizationRSSFeed): def feed_url(self) -> str: """Return the URL to the Atom feed itself.""" - return reverse("twitch:organization_feed_atom") + return reverse("core:organization_feed_atom") class GameAtomFeed(TTVDropsAtomBaseFeed, GameFeed): @@ -1443,7 +1445,7 @@ class GameAtomFeed(TTVDropsAtomBaseFeed, GameFeed): def feed_url(self) -> str: """Return the URL to the Atom feed itself.""" - return reverse("twitch:game_feed_atom") + return reverse("core:game_feed_atom") class DropCampaignAtomFeed(TTVDropsAtomBaseFeed, DropCampaignFeed): @@ -1453,7 +1455,7 @@ class DropCampaignAtomFeed(TTVDropsAtomBaseFeed, DropCampaignFeed): def feed_url(self) -> str: """Return the URL to the Atom feed itself.""" - return reverse("twitch:campaign_feed_atom") + return reverse("core:campaign_feed_atom") class GameCampaignAtomFeed(TTVDropsAtomBaseFeed, GameCampaignFeed): @@ -1461,7 +1463,7 @@ class GameCampaignAtomFeed(TTVDropsAtomBaseFeed, GameCampaignFeed): def feed_url(self, obj: Game) -> str: """Return the URL to the Atom feed itself.""" - return reverse("twitch:game_campaign_feed_atom", args=[obj.twitch_id]) + return reverse("core:game_campaign_feed_atom", args=[obj.twitch_id]) class RewardCampaignAtomFeed(TTVDropsAtomBaseFeed, RewardCampaignFeed): @@ -1471,7 +1473,7 @@ class RewardCampaignAtomFeed(TTVDropsAtomBaseFeed, RewardCampaignFeed): def feed_url(self) -> str: """Return the URL to the Atom feed itself.""" - return reverse("twitch:reward_campaign_feed_atom") + return reverse("core:reward_campaign_feed_atom") # Discord feed variants: Atom feeds with Discord relative timestamps @@ -1482,7 +1484,7 @@ class OrganizationDiscordFeed(TTVDropsAtomBaseFeed, OrganizationRSSFeed): def feed_url(self) -> str: """Return the URL to the Discord feed itself.""" - return reverse("twitch:organization_feed_discord") + return reverse("core:organization_feed_discord") class GameDiscordFeed(TTVDropsAtomBaseFeed, GameFeed): @@ -1492,7 +1494,7 @@ class GameDiscordFeed(TTVDropsAtomBaseFeed, GameFeed): def feed_url(self) -> str: """Return the URL to the Discord feed itself.""" - return reverse("twitch:game_feed_discord") + return reverse("core:game_feed_discord") class DropCampaignDiscordFeed(TTVDropsAtomBaseFeed, DropCampaignFeed): @@ -1515,7 +1517,7 @@ class DropCampaignDiscordFeed(TTVDropsAtomBaseFeed, DropCampaignFeed): def feed_url(self) -> str: """Return the URL to the Discord feed itself.""" - return reverse("twitch:campaign_feed_discord") + return reverse("core:campaign_feed_discord") class GameCampaignDiscordFeed(TTVDropsAtomBaseFeed, GameCampaignFeed): @@ -1535,7 +1537,7 @@ class GameCampaignDiscordFeed(TTVDropsAtomBaseFeed, GameCampaignFeed): def feed_url(self, obj: Game) -> str: """Return the URL to the Discord feed itself.""" - return reverse("twitch:game_campaign_feed_discord", args=[obj.twitch_id]) + return reverse("core:game_campaign_feed_discord", args=[obj.twitch_id]) class RewardCampaignDiscordFeed(TTVDropsAtomBaseFeed, RewardCampaignFeed): @@ -1602,4 +1604,4 @@ class RewardCampaignDiscordFeed(TTVDropsAtomBaseFeed, RewardCampaignFeed): def feed_url(self) -> str: """Return the URL to the Discord feed itself.""" - return reverse("twitch:reward_campaign_feed_discord") + return reverse("core:reward_campaign_feed_discord") diff --git a/twitch/management/commands/better_import_drops.py b/twitch/management/commands/better_import_drops.py index 3fea772..26c408e 100644 --- a/twitch/management/commands/better_import_drops.py +++ b/twitch/management/commands/better_import_drops.py @@ -631,6 +631,9 @@ class Command(BaseCommand): ) return + if game_obj.box_art_file is None: + return + game_obj.box_art_file.save(file_name, ContentFile(response.content), save=True) def _get_or_create_channel(self, channel_info: ChannelInfoSchema) -> Channel: diff --git a/twitch/management/commands/cleanup_unknown_organizations.py b/twitch/management/commands/cleanup_unknown_organizations.py index 28c8efb..3f07263 100644 --- a/twitch/management/commands/cleanup_unknown_organizations.py +++ b/twitch/management/commands/cleanup_unknown_organizations.py @@ -11,8 +11,8 @@ from twitch.models import Game from twitch.models import Organization if TYPE_CHECKING: - from debug_toolbar.panels.templates.panel import QuerySet from django.core.management.base import CommandParser + from django.db.models import QuerySet class Command(BaseCommand): diff --git a/twitch/management/commands/download_box_art.py b/twitch/management/commands/download_box_art.py index e0ea78b..56504e4 100644 --- a/twitch/management/commands/download_box_art.py +++ b/twitch/management/commands/download_box_art.py @@ -5,6 +5,7 @@ from urllib.parse import urlparse import httpx from django.conf import settings from django.core.files.base import ContentFile +from django.core.management import call_command from django.core.management.base import BaseCommand from PIL import Image @@ -38,7 +39,7 @@ class Command(BaseCommand): help="Re-download even if a local box art file already exists.", ) - def handle(self, *_args: object, **options: object) -> None: + def handle(self, *_args: object, **options: object) -> None: # noqa: PLR0914, PLR0915 """Download Twitch box art images for all games.""" limit_value: object | None = options.get("limit") limit: int | None = limit_value if isinstance(limit_value, int) else None @@ -92,6 +93,10 @@ class Command(BaseCommand): skipped += 1 continue + if game.box_art_file is None: + failed += 1 + continue + game.box_art_file.save( file_name, ContentFile(response.content), @@ -99,7 +104,9 @@ class Command(BaseCommand): ) # Auto-convert to WebP and AVIF - self._convert_to_modern_formats(game.box_art_file.path) + box_art_path: str | None = getattr(game.box_art_file, "path", None) + if box_art_path: + self._convert_to_modern_formats(box_art_path) downloaded += 1 @@ -112,6 +119,15 @@ class Command(BaseCommand): box_art_dir: Path = Path(settings.MEDIA_ROOT) / "games" / "box_art" self.stdout.write(self.style.SUCCESS(f"Saved box art to: {box_art_dir}")) + # Convert downloaded images to modern formats (WebP, AVIF) + if downloaded > 0: + self.stdout.write( + self.style.MIGRATE_HEADING( + "\nConverting downloaded images to modern formats...", + ), + ) + call_command("convert_images_to_modern_formats") + def _convert_to_modern_formats(self, image_path: str) -> None: """Convert downloaded image to WebP and AVIF formats. diff --git a/twitch/management/commands/download_campaign_images.py b/twitch/management/commands/download_campaign_images.py index 4d419ca..8e13f4e 100644 --- a/twitch/management/commands/download_campaign_images.py +++ b/twitch/management/commands/download_campaign_images.py @@ -7,6 +7,7 @@ from urllib.parse import urlparse import httpx from django.conf import settings from django.core.files.base import ContentFile +from django.core.management import call_command from django.core.management.base import BaseCommand from PIL import Image @@ -20,6 +21,7 @@ if TYPE_CHECKING: from django.core.management.base import CommandParser from django.db.models import QuerySet from django.db.models.fields.files import FieldFile + from PIL.ImageFile import ImageFile class Command(BaseCommand): @@ -68,7 +70,7 @@ class Command(BaseCommand): self.stdout.write( self.style.MIGRATE_HEADING("\nProcessing Drop Campaigns..."), ) - stats = self._download_campaign_images( + stats: dict[str, int] = self._download_campaign_images( client=client, limit=limit, force=force, @@ -112,6 +114,15 @@ class Command(BaseCommand): ), ) + # Convert downloaded images to modern formats (WebP, AVIF) + if total_stats["downloaded"] > 0: + self.stdout.write( + self.style.MIGRATE_HEADING( + "\nConverting downloaded images to modern formats...", + ), + ) + call_command("convert_images_to_modern_formats") + def _download_campaign_images( self, client: httpx.Client, @@ -151,7 +162,7 @@ class Command(BaseCommand): stats["skipped"] += 1 continue - result = self._download_image( + result: str = self._download_image( client, campaign.image_url, campaign.twitch_id, @@ -200,7 +211,7 @@ class Command(BaseCommand): stats["skipped"] += 1 continue - result = self._download_image( + result: str = self._download_image( client, benefit.image_asset_url, benefit.twitch_id, @@ -249,7 +260,7 @@ class Command(BaseCommand): stats["skipped"] += 1 continue - result = self._download_image( + result: str = self._download_image( client, reward_campaign.image_url, reward_campaign.twitch_id, @@ -264,7 +275,7 @@ class Command(BaseCommand): client: httpx.Client, image_url: str, twitch_id: str, - file_field: FieldFile, + file_field: FieldFile | None, ) -> str: """Download a single image and save it to the file field. @@ -281,6 +292,9 @@ class Command(BaseCommand): suffix: str = Path(parsed_url.path).suffix or ".jpg" file_name: str = f"{twitch_id}{suffix}" + if file_field is None: + return "failed" + try: response: httpx.Response = client.get(image_url) response.raise_for_status() @@ -299,7 +313,9 @@ class Command(BaseCommand): file_field.save(file_name, ContentFile(response.content), save=True) # Auto-convert to WebP and AVIF - self._convert_to_modern_formats(file_field.path) + image_path: str | None = getattr(file_field, "path", None) + if image_path: + self._convert_to_modern_formats(image_path) return "downloaded" @@ -320,17 +336,19 @@ class Command(BaseCommand): }: return - base_path = source_path.with_suffix("") - webp_path = base_path.with_suffix(".webp") - avif_path = base_path.with_suffix(".avif") + base_path: Path = source_path.with_suffix("") + webp_path: Path = base_path.with_suffix(".webp") + avif_path: Path = base_path.with_suffix(".avif") with Image.open(source_path) as img: # Convert to RGB if needed if img.mode in {"RGBA", "LA"} or ( img.mode == "P" and "transparency" in img.info ): - background = Image.new("RGB", img.size, (255, 255, 255)) - rgba_img = img.convert("RGBA") if img.mode == "P" else img + background: Image = Image.new("RGB", img.size, (255, 255, 255)) + rgba_img: Image | ImageFile = ( + img.convert("RGBA") if img.mode == "P" else img + ) background.paste( rgba_img, mask=rgba_img.split()[-1] @@ -339,9 +357,9 @@ class Command(BaseCommand): ) rgb_img = background elif img.mode != "RGB": - rgb_img = img.convert("RGB") + rgb_img: Image = img.convert("RGB") else: - rgb_img = img + rgb_img: ImageFile = img # Save WebP rgb_img.save(webp_path, "WEBP", quality=85, method=6) @@ -372,11 +390,11 @@ class Command(BaseCommand): ), ) if stats["downloaded"] > 0: - media_path = Path(settings.MEDIA_ROOT) + media_path: Path = Path(settings.MEDIA_ROOT) if "Campaigns" in model_name and "Reward" not in model_name: - image_dir = media_path / "campaigns" / "images" + image_dir: Path = media_path / "campaigns" / "images" elif "Benefits" in model_name: - image_dir = media_path / "benefits" / "images" + image_dir: Path = media_path / "benefits" / "images" else: - image_dir = media_path / "reward_campaigns" / "images" + image_dir: Path = media_path / "reward_campaigns" / "images" self.stdout.write(self.style.SUCCESS(f"Saved images to: {image_dir}")) diff --git a/twitch/tests/test_backup.py b/twitch/tests/test_backup.py index ee0001c..08d8c4f 100644 --- a/twitch/tests/test_backup.py +++ b/twitch/tests/test_backup.py @@ -288,7 +288,7 @@ class TestDatasetBackupViews: monkeypatch.setattr(settings, "DATA_DIR", datasets_dir.parent) response: _MonkeyPatchedWSGIResponse = client.get( - reverse("twitch:dataset_backups"), + reverse("core:dataset_backups"), ) assert response.status_code == 200 @@ -305,7 +305,7 @@ class TestDatasetBackupViews: monkeypatch.setattr(settings, "DATA_DIR", datasets_dir.parent) response: _MonkeyPatchedWSGIResponse = client.get( - reverse("twitch:dataset_backups"), + reverse("core:dataset_backups"), ) assert response.status_code == 200 @@ -339,7 +339,7 @@ class TestDatasetBackupViews: os.utime(newer_backup, (newer_time, newer_time)) response: _MonkeyPatchedWSGIResponse = client.get( - reverse("twitch:dataset_backups"), + reverse("core:dataset_backups"), ) content = response.content.decode() @@ -361,7 +361,7 @@ class TestDatasetBackupViews: response: _MonkeyPatchedWSGIResponse = client.get( reverse( - "twitch:dataset_backup_download", + "core:dataset_backup_download", args=["ttvdrops-20260210-120000.sql.zst"], ), ) @@ -382,7 +382,7 @@ class TestDatasetBackupViews: # Attempt path traversal response = client.get( - reverse("twitch:dataset_backup_download", args=["../../../etc/passwd"]), + reverse("core:dataset_backup_download", args=["../../../etc/passwd"]), ) assert response.status_code == 404 @@ -400,7 +400,7 @@ class TestDatasetBackupViews: invalid_file.write_text("not a backup") response = client.get( - reverse("twitch:dataset_backup_download", args=["malicious.exe"]), + reverse("core:dataset_backup_download", args=["malicious.exe"]), ) assert response.status_code == 404 @@ -414,7 +414,7 @@ class TestDatasetBackupViews: monkeypatch.setattr(settings, "DATA_DIR", datasets_dir.parent) response = client.get( - reverse("twitch:dataset_backup_download", args=["nonexistent.sql.zst"]), + reverse("core:dataset_backup_download", args=["nonexistent.sql.zst"]), ) assert response.status_code == 404 @@ -429,7 +429,7 @@ class TestDatasetBackupViews: monkeypatch.setattr(settings, "DATA_DIR", datasets_dir.parent) response: _MonkeyPatchedWSGIResponse = client.get( - reverse("twitch:dataset_backups"), + reverse("core:dataset_backups"), ) assert response.status_code == 200 @@ -452,7 +452,7 @@ class TestDatasetBackupViews: (datasets_dir / "old_backup.gz").write_bytes(b"should be ignored") response: _MonkeyPatchedWSGIResponse = client.get( - reverse("twitch:dataset_backups"), + reverse("core:dataset_backups"), ) content = response.content.decode() @@ -481,7 +481,7 @@ class TestDatasetBackupViews: handle.write("-- Test\n") response: _MonkeyPatchedWSGIResponse = client.get( - reverse("twitch:dataset_backup_download", args=["2026/02/backup.sql.zst"]), + reverse("core:dataset_backup_download", args=["2026/02/backup.sql.zst"]), ) assert response.status_code == 200 diff --git a/twitch/tests/test_badge_views.py b/twitch/tests/test_badge_views.py index 191f2be..aa75a1b 100644 --- a/twitch/tests/test_badge_views.py +++ b/twitch/tests/test_badge_views.py @@ -155,7 +155,7 @@ class TestBadgeSearch: ChatBadgeSet.objects.create(set_id="vip") ChatBadgeSet.objects.create(set_id="subscriber") - response = client.get(reverse("twitch:search"), {"q": "vip"}) + response = client.get(reverse("core:search"), {"q": "vip"}) assert response.status_code == 200 content = response.content.decode() @@ -175,7 +175,7 @@ class TestBadgeSearch: description="Test description", ) - response = client.get(reverse("twitch:search"), {"q": "Moderator"}) + response = client.get(reverse("core:search"), {"q": "Moderator"}) assert response.status_code == 200 content = response.content.decode() @@ -195,7 +195,7 @@ class TestBadgeSearch: description="Unique description text", ) - response = client.get(reverse("twitch:search"), {"q": "Unique description"}) + response = client.get(reverse("core:search"), {"q": "Unique description"}) assert response.status_code == 200 content = response.content.decode() diff --git a/twitch/tests/test_exports.py b/twitch/tests/test_exports.py index 57d5ec3..f6a14a2 100644 --- a/twitch/tests/test_exports.py +++ b/twitch/tests/test_exports.py @@ -44,7 +44,7 @@ class ExportViewsTestCase(TestCase): def test_export_campaigns_csv(self) -> None: """Test CSV export of campaigns.""" - response = self.client.get("/export/campaigns/csv/") + response = self.client.get("/twitch/export/campaigns/csv/") assert response.status_code == 200 assert response["Content-Type"] == "text/csv" assert b"Twitch ID" in response.content @@ -53,7 +53,7 @@ class ExportViewsTestCase(TestCase): def test_export_campaigns_json(self) -> None: """Test JSON export of campaigns.""" - response = self.client.get("/export/campaigns/json/") + response = self.client.get("/twitch/export/campaigns/json/") assert response.status_code == 200 assert response["Content-Type"] == "application/json" @@ -66,7 +66,7 @@ class ExportViewsTestCase(TestCase): def test_export_games_csv(self) -> None: """Test CSV export of games.""" - response = self.client.get("/export/games/csv/") + response = self.client.get("/twitch/export/games/csv/") assert response.status_code == 200 assert response["Content-Type"] == "text/csv" assert b"Twitch ID" in response.content @@ -75,7 +75,7 @@ class ExportViewsTestCase(TestCase): def test_export_games_json(self) -> None: """Test JSON export of games.""" - response = self.client.get("/export/games/json/") + response = self.client.get("/twitch/export/games/json/") assert response.status_code == 200 assert response["Content-Type"] == "application/json" @@ -87,7 +87,7 @@ class ExportViewsTestCase(TestCase): def test_export_organizations_csv(self) -> None: """Test CSV export of organizations.""" - response = self.client.get("/export/organizations/csv/") + response = self.client.get("/twitch/export/organizations/csv/") assert response.status_code == 200 assert response["Content-Type"] == "text/csv" assert b"Twitch ID" in response.content @@ -96,7 +96,7 @@ class ExportViewsTestCase(TestCase): def test_export_organizations_json(self) -> None: """Test JSON export of organizations.""" - response = self.client.get("/export/organizations/json/") + response = self.client.get("/twitch/export/organizations/json/") assert response.status_code == 200 assert response["Content-Type"] == "application/json" @@ -108,13 +108,13 @@ class ExportViewsTestCase(TestCase): def test_export_campaigns_csv_with_filters(self) -> None: """Test CSV export of campaigns with status filter.""" - response = self.client.get("/export/campaigns/csv/?status=active") + response = self.client.get("/twitch/export/campaigns/csv/?status=active") assert response.status_code == 200 assert b"campaign123" in response.content def test_export_campaigns_json_with_filters(self) -> None: """Test JSON export of campaigns with status filter.""" - response = self.client.get("/export/campaigns/json/?status=active") + response = self.client.get("/twitch/export/campaigns/json/?status=active") assert response.status_code == 200 data = json.loads(response.content) diff --git a/twitch/tests/test_feeds.py b/twitch/tests/test_feeds.py index 384327d..cb1fdec 100644 --- a/twitch/tests/test_feeds.py +++ b/twitch/tests/test_feeds.py @@ -106,7 +106,7 @@ class RSSFeedTestCase(TestCase): def test_organization_feed(self) -> None: """Test organization feed returns 200.""" - url: str = reverse("twitch:organization_feed") + url: str = reverse("core:organization_feed") response: _MonkeyPatchedWSGIResponse = self.client.get(url) assert response.status_code == 200 assert response["Content-Type"] == "application/xml; charset=utf-8" @@ -114,7 +114,7 @@ class RSSFeedTestCase(TestCase): def test_game_feed(self) -> None: """Test game feed returns 200.""" - url: str = reverse("twitch:game_feed") + url: str = reverse("core:game_feed") response: _MonkeyPatchedWSGIResponse = self.client.get(url) assert response.status_code == 200 assert response["Content-Type"] == "application/xml; charset=utf-8" @@ -123,7 +123,7 @@ class RSSFeedTestCase(TestCase): assert "Owned by Test Organization." in content expected_rss_link: str = reverse( - "twitch:game_campaign_feed", + "core:game_campaign_feed", args=[self.game.twitch_id], ) assert expected_rss_link in content @@ -137,7 +137,7 @@ class RSSFeedTestCase(TestCase): def test_organization_atom_feed(self) -> None: """Test organization Atom feed returns 200 and Atom XML.""" - url: str = reverse("twitch:organization_feed_atom") + url: str = reverse("core:organization_feed_atom") response: _MonkeyPatchedWSGIResponse = self.client.get(url) msg: str = f"Expected 200 OK, got {response.status_code} with content: {response.content.decode('utf-8')}" @@ -151,14 +151,14 @@ class RSSFeedTestCase(TestCase): def test_game_atom_feed(self) -> None: """Test game Atom feed returns 200 and contains expected content.""" - url: str = reverse("twitch:game_feed_atom") + url: str = reverse("core:game_feed_atom") response: _MonkeyPatchedWSGIResponse = self.client.get(url) assert response.status_code == 200 assert response["Content-Type"] == "application/xml; charset=utf-8" content: str = response.content.decode("utf-8") assert "Owned by Test Organization." in content expected_atom_link: str = reverse( - "twitch:game_campaign_feed", + "core:game_campaign_feed", args=[self.game.twitch_id], ) assert expected_atom_link in content @@ -167,7 +167,7 @@ class RSSFeedTestCase(TestCase): def test_campaign_atom_feed_uses_url_ids_and_correct_self_link(self) -> None: """Atom campaign feed should use URL ids and a matching self link.""" - url: str = reverse("twitch:campaign_feed_atom") + url: str = reverse("core:campaign_feed_atom") response: _MonkeyPatchedWSGIResponse = self.client.get(url) assert response.status_code == 200 @@ -180,33 +180,35 @@ class RSSFeedTestCase(TestCase): assert 'href="http://testserver/atom/campaigns/"' in content, msg msg: str = f"Expected entry ID to be the campaign URL, got: {content}" - assert "http://testserver/campaigns/test-campaign-123/" in content, msg + assert ( + "http://testserver/twitch/campaigns/test-campaign-123/" in content + ), msg def test_all_atom_feeds_use_url_ids_and_correct_self_links(self) -> None: """All Atom feeds should use absolute URL entry IDs and matching self links.""" atom_feed_cases: list[tuple[str, dict[str, str], str]] = [ ( - "twitch:campaign_feed_atom", + "core:campaign_feed_atom", {}, f"http://testserver{reverse('twitch:campaign_detail', args=[self.campaign.twitch_id])}", ), ( - "twitch:game_feed_atom", + "core:game_feed_atom", {}, f"http://testserver{reverse('twitch:game_detail', args=[self.game.twitch_id])}", ), ( - "twitch:game_campaign_feed_atom", + "core:game_campaign_feed_atom", {"twitch_id": self.game.twitch_id}, f"http://testserver{reverse('twitch:campaign_detail', args=[self.campaign.twitch_id])}", ), ( - "twitch:organization_feed_atom", + "core:organization_feed_atom", {}, f"http://testserver{reverse('twitch:organization_detail', args=[self.org.twitch_id])}", ), ( - "twitch:reward_campaign_feed_atom", + "core:reward_campaign_feed_atom", {}, f"http://testserver{reverse('twitch:reward_campaign_detail', args=[self.reward_campaign.twitch_id])}", ), @@ -246,7 +248,7 @@ class RSSFeedTestCase(TestCase): ) drop.benefits.add(benefit) - url: str = reverse("twitch:campaign_feed_atom") + url: str = reverse("core:campaign_feed_atom") response: _MonkeyPatchedWSGIResponse = self.client.get(url) assert response.status_code == 200 @@ -257,11 +259,11 @@ class RSSFeedTestCase(TestCase): def test_atom_feeds_include_stylesheet_processing_instruction(self) -> None: """Atom feeds should include an xml-stylesheet processing instruction.""" feed_urls: list[str] = [ - reverse("twitch:campaign_feed_atom"), - reverse("twitch:game_feed_atom"), - reverse("twitch:game_campaign_feed_atom", args=[self.game.twitch_id]), - reverse("twitch:organization_feed_atom"), - reverse("twitch:reward_campaign_feed_atom"), + reverse("core:campaign_feed_atom"), + reverse("core:game_feed_atom"), + reverse("core:game_campaign_feed_atom", args=[self.game.twitch_id]), + reverse("core:organization_feed_atom"), + reverse("core:reward_campaign_feed_atom"), ] for url in feed_urls: @@ -277,6 +279,7 @@ class RSSFeedTestCase(TestCase): def test_campaign_and_game_feeds_use_absolute_media_enclosure_urls(self) -> None: """Campaign/game RSS+Atom enclosures should use absolute URLs for local media files.""" self.game.box_art = "" + assert self.game.box_art_file is not None self.game.box_art_file.save( "box.png", ContentFile(b"game-image-bytes"), @@ -287,6 +290,7 @@ class RSSFeedTestCase(TestCase): self.game.save() self.campaign.image_url = "" + assert self.campaign.image_file is not None self.campaign.image_file.save( "campaign.png", ContentFile(b"campaign-image-bytes"), @@ -297,12 +301,12 @@ class RSSFeedTestCase(TestCase): self.campaign.save() feed_urls: list[str] = [ - reverse("twitch:game_feed"), - reverse("twitch:campaign_feed"), - reverse("twitch:game_campaign_feed", args=[self.game.twitch_id]), - reverse("twitch:game_feed_atom"), - reverse("twitch:campaign_feed_atom"), - reverse("twitch:game_campaign_feed_atom", args=[self.game.twitch_id]), + reverse("core:game_feed"), + reverse("core:campaign_feed"), + reverse("core:game_campaign_feed", args=[self.game.twitch_id]), + reverse("core:game_feed_atom"), + reverse("core:campaign_feed_atom"), + reverse("core:game_campaign_feed_atom", args=[self.game.twitch_id]), ] for url in feed_urls: @@ -333,14 +337,14 @@ class RSSFeedTestCase(TestCase): self.reward_campaign.save() feed_urls: list[str] = [ - reverse("twitch:game_feed"), - reverse("twitch:campaign_feed"), - reverse("twitch:game_campaign_feed", args=[self.game.twitch_id]), - reverse("twitch:reward_campaign_feed"), - reverse("twitch:game_feed_atom"), - reverse("twitch:campaign_feed_atom"), - reverse("twitch:game_campaign_feed_atom", args=[self.game.twitch_id]), - reverse("twitch:reward_campaign_feed_atom"), + reverse("core:game_feed"), + reverse("core:campaign_feed"), + reverse("core:game_campaign_feed", args=[self.game.twitch_id]), + reverse("core:reward_campaign_feed"), + reverse("core:game_feed_atom"), + reverse("core:campaign_feed_atom"), + reverse("core:game_campaign_feed_atom", args=[self.game.twitch_id]), + reverse("core:reward_campaign_feed_atom"), ] for url in feed_urls: @@ -378,7 +382,7 @@ class RSSFeedTestCase(TestCase): def test_campaign_feed(self) -> None: """Test campaign feed returns 200.""" - url: str = reverse("twitch:campaign_feed") + url: str = reverse("core:campaign_feed") response: _MonkeyPatchedWSGIResponse = self.client.get(url) assert response.status_code == 200 assert response["Content-Type"] == "application/xml; charset=utf-8" @@ -392,11 +396,11 @@ class RSSFeedTestCase(TestCase): def test_rss_feeds_include_stylesheet_processing_instruction(self) -> None: """RSS feeds should include an xml-stylesheet processing instruction.""" feed_urls: list[str] = [ - reverse("twitch:campaign_feed"), - reverse("twitch:game_feed"), - reverse("twitch:game_campaign_feed", args=[self.game.twitch_id]), - reverse("twitch:organization_feed"), - reverse("twitch:reward_campaign_feed"), + reverse("core:campaign_feed"), + reverse("core:game_feed"), + reverse("core:game_campaign_feed", args=[self.game.twitch_id]), + reverse("core:organization_feed"), + reverse("core:reward_campaign_feed"), ] for url in feed_urls: @@ -443,11 +447,11 @@ class RSSFeedTestCase(TestCase): def test_rss_feeds_include_shared_metadata_fields(self) -> None: """RSS output should contain base feed metadata fields.""" feed_urls: list[str] = [ - reverse("twitch:campaign_feed"), - reverse("twitch:game_feed"), - reverse("twitch:game_campaign_feed", args=[self.game.twitch_id]), - reverse("twitch:organization_feed"), - reverse("twitch:reward_campaign_feed"), + reverse("core:campaign_feed"), + reverse("core:game_feed"), + reverse("core:game_campaign_feed", args=[self.game.twitch_id]), + reverse("core:organization_feed"), + reverse("core:reward_campaign_feed"), ] for url in feed_urls: @@ -480,7 +484,7 @@ class RSSFeedTestCase(TestCase): operation_names=["DropCampaignDetails"], ) - url: str = reverse("twitch:campaign_feed") + url: str = reverse("core:campaign_feed") response: _MonkeyPatchedWSGIResponse = self.client.get(url) assert response.status_code == 200 content: str = response.content.decode("utf-8") @@ -539,7 +543,7 @@ class RSSFeedTestCase(TestCase): description="This badge was earned by subscribing.", ) - url: str = reverse("twitch:campaign_feed") + url: str = reverse("core:campaign_feed") response: _MonkeyPatchedWSGIResponse = self.client.get(url) assert response.status_code == 200 content: str = response.content.decode("utf-8") @@ -547,7 +551,7 @@ class RSSFeedTestCase(TestCase): def test_game_campaign_feed(self) -> None: """Test game-specific campaign feed returns 200.""" - url: str = reverse("twitch:game_campaign_feed", args=[self.game.twitch_id]) + url: str = reverse("core:game_campaign_feed", args=[self.game.twitch_id]) response: _MonkeyPatchedWSGIResponse = self.client.get(url) assert response.status_code == 200 assert response["Content-Type"] == "application/xml; charset=utf-8" @@ -576,7 +580,7 @@ class RSSFeedTestCase(TestCase): ) # Get feed for first game - url: str = reverse("twitch:game_campaign_feed", args=[self.game.twitch_id]) + url: str = reverse("core:game_campaign_feed", args=[self.game.twitch_id]) response: _MonkeyPatchedWSGIResponse = self.client.get(url) content: str = response.content.decode("utf-8") @@ -609,7 +613,7 @@ class RSSFeedTestCase(TestCase): operation_names=["DropCampaignDetails"], ) - url: str = reverse("twitch:game_campaign_feed", args=[self.game.twitch_id]) + url: str = reverse("core:game_campaign_feed", args=[self.game.twitch_id]) response: _MonkeyPatchedWSGIResponse = self.client.get(url) assert response.status_code == 200 content: str = response.content.decode("utf-8") @@ -664,7 +668,7 @@ class RSSFeedTestCase(TestCase): game=self.game, ) - url: str = reverse("twitch:reward_campaign_feed") + url: str = reverse("core:reward_campaign_feed") response: _MonkeyPatchedWSGIResponse = self.client.get(url) assert response.status_code == 200 content: str = response.content.decode("utf-8") @@ -710,6 +714,7 @@ class RSSFeedTestCase(TestCase): name="File Game", display_name="File Game", ) + assert game2.box_art_file is not None game2.box_art_file.save("sample.png", ContentFile(b"hello")) game2.save() @@ -721,6 +726,7 @@ class RSSFeedTestCase(TestCase): end_at=timezone.now() + timedelta(days=1), operation_names=["DropCampaignDetails"], ) + assert campaign2.image_file is not None campaign2.image_file.save("camp.jpg", ContentFile(b"world")) campaign2.save() @@ -855,7 +861,7 @@ def test_campaign_feed_queries_bounded( for i in range(3): _build_campaign(game, i) - url: str = reverse("twitch:campaign_feed") + url: str = reverse("core:campaign_feed") # TODO(TheLovinator): 14 queries is still quite high for a feed - we should be able to optimize this further, but this is a good starting point to prevent regressions for now. # noqa: TD003 with django_assert_num_queries(14, exact=False): response: _MonkeyPatchedWSGIResponse = client.get(url) @@ -911,7 +917,7 @@ def test_campaign_feed_queries_do_not_scale_with_items( ) drop.benefits.add(benefit) - url: str = reverse("twitch:campaign_feed") + url: str = reverse("core:campaign_feed") # N+1 safeguard: query count should not scale linearly with campaign count. with django_assert_num_queries(40, exact=False): @@ -941,7 +947,7 @@ def test_game_campaign_feed_queries_bounded( for i in range(3): _build_campaign(game, i) - url: str = reverse("twitch:game_campaign_feed", args=[game.twitch_id]) + url: str = reverse("core:game_campaign_feed", args=[game.twitch_id]) with django_assert_num_queries(6, exact=False): response: _MonkeyPatchedWSGIResponse = client.get(url) @@ -970,7 +976,7 @@ def test_game_campaign_feed_queries_do_not_scale_with_items( for i in range(50): _build_campaign(game, i) - url: str = reverse("twitch:game_campaign_feed", args=[game.twitch_id]) + url: str = reverse("core:game_campaign_feed", args=[game.twitch_id]) with django_assert_num_queries(6, exact=False): response: _MonkeyPatchedWSGIResponse = client.get(url) @@ -987,7 +993,7 @@ def test_organization_feed_queries_bounded( for i in range(5): Organization.objects.create(twitch_id=f"org-feed-{i}", name=f"Org Feed {i}") - url: str = reverse("twitch:organization_feed") + url: str = reverse("core:organization_feed") with django_assert_num_queries(1, exact=True): response: _MonkeyPatchedWSGIResponse = client.get(url) @@ -1014,7 +1020,7 @@ def test_game_feed_queries_bounded( ) game.owners.add(org) - url: str = reverse("twitch:game_feed") + url: str = reverse("core:game_feed") # One query for games + one prefetch query for owners. with django_assert_num_queries(2, exact=True): response: _MonkeyPatchedWSGIResponse = client.get(url) @@ -1043,7 +1049,7 @@ def test_reward_campaign_feed_queries_bounded( for i in range(3): _build_reward_campaign(game, i) - url: str = reverse("twitch:reward_campaign_feed") + url: str = reverse("core:reward_campaign_feed") with django_assert_num_queries(1, exact=True): response: _MonkeyPatchedWSGIResponse = client.get(url) @@ -1076,7 +1082,7 @@ def test_docs_rss_queries_bounded( _build_campaign(game, i) _build_reward_campaign(game, i) - url: str = reverse("twitch:docs_rss") + url: str = reverse("core:docs_rss") # TODO(TheLovinator): 31 queries is still quite high for a feed - we should be able to optimize this further, but this is a good starting point to prevent regressions for now. # noqa: TD003 with django_assert_num_queries(31, exact=False): @@ -1093,8 +1099,8 @@ URL_NAMES: list[tuple[str, dict[str, str]]] = [ ("twitch:campaign_detail", {"twitch_id": "test-campaign-123"}), ("twitch:channel_list", {}), ("twitch:channel_detail", {"twitch_id": "test-channel-123"}), - ("twitch:debug", {}), - ("twitch:docs_rss", {}), + ("core:debug", {}), + ("core:docs_rss", {}), ("twitch:emote_gallery", {}), ("twitch:games_grid", {}), ("twitch:games_list", {}), @@ -1103,12 +1109,12 @@ URL_NAMES: list[tuple[str, dict[str, str]]] = [ ("twitch:organization_detail", {"twitch_id": "test-org-123"}), ("twitch:reward_campaign_list", {}), ("twitch:reward_campaign_detail", {"twitch_id": "test-reward-123"}), - ("twitch:search", {}), - ("twitch:campaign_feed", {}), - ("twitch:game_feed", {}), - ("twitch:game_campaign_feed", {"twitch_id": "test-game-123"}), - ("twitch:organization_feed", {}), - ("twitch:reward_campaign_feed", {}), + ("core:search", {}), + ("core:campaign_feed", {}), + ("core:game_feed", {}), + ("core:game_campaign_feed", {"twitch_id": "test-game-123"}), + ("core:organization_feed", {}), + ("core:reward_campaign_feed", {}), ] @@ -1251,7 +1257,7 @@ class DiscordFeedTestCase(TestCase): def test_organization_discord_feed(self) -> None: """Test organization Discord feed returns 200.""" - url: str = reverse("twitch:organization_feed_discord") + url: str = reverse("core:organization_feed_discord") response: _MonkeyPatchedWSGIResponse = self.client.get(url) assert response.status_code == 200 assert response["Content-Type"] == "application/xml; charset=utf-8" @@ -1262,7 +1268,7 @@ class DiscordFeedTestCase(TestCase): def test_game_discord_feed(self) -> None: """Test game Discord feed returns 200.""" - url: str = reverse("twitch:game_feed_discord") + url: str = reverse("core:game_feed_discord") response: _MonkeyPatchedWSGIResponse = self.client.get(url) assert response.status_code == 200 assert response["Content-Type"] == "application/xml; charset=utf-8" @@ -1272,7 +1278,7 @@ class DiscordFeedTestCase(TestCase): def test_campaign_discord_feed(self) -> None: """Test campaign Discord feed returns 200 with Discord timestamps.""" - url: str = reverse("twitch:campaign_feed_discord") + url: str = reverse("core:campaign_feed_discord") response: _MonkeyPatchedWSGIResponse = self.client.get(url) assert response.status_code == 200 assert response["Content-Type"] == "application/xml; charset=utf-8" @@ -1286,7 +1292,7 @@ class DiscordFeedTestCase(TestCase): def test_game_campaign_discord_feed(self) -> None: """Test game-specific campaign Discord feed returns 200.""" url: str = reverse( - "twitch:game_campaign_feed_discord", + "core:game_campaign_feed_discord", args=[self.game.twitch_id], ) response: _MonkeyPatchedWSGIResponse = self.client.get(url) @@ -1298,7 +1304,7 @@ class DiscordFeedTestCase(TestCase): def test_reward_campaign_discord_feed(self) -> None: """Test reward campaign Discord feed returns 200.""" - url: str = reverse("twitch:reward_campaign_feed_discord") + url: str = reverse("core:reward_campaign_feed_discord") response: _MonkeyPatchedWSGIResponse = self.client.get(url) assert response.status_code == 200 assert response["Content-Type"] == "application/xml; charset=utf-8" @@ -1313,27 +1319,27 @@ class DiscordFeedTestCase(TestCase): """All Discord feeds should use absolute URL entry IDs and matching self links.""" discord_feed_cases: list[tuple[str, dict[str, str], str]] = [ ( - "twitch:campaign_feed_discord", + "core:campaign_feed_discord", {}, f"http://testserver{reverse('twitch:campaign_detail', args=[self.campaign.twitch_id])}", ), ( - "twitch:game_feed_discord", + "core:game_feed_discord", {}, f"http://testserver{reverse('twitch:game_detail', args=[self.game.twitch_id])}", ), ( - "twitch:game_campaign_feed_discord", + "core:game_campaign_feed_discord", {"twitch_id": self.game.twitch_id}, f"http://testserver{reverse('twitch:campaign_detail', args=[self.campaign.twitch_id])}", ), ( - "twitch:organization_feed_discord", + "core:organization_feed_discord", {}, f"http://testserver{reverse('twitch:organization_detail', args=[self.org.twitch_id])}", ), ( - "twitch:reward_campaign_feed_discord", + "core:reward_campaign_feed_discord", {}, f"http://testserver{reverse('twitch:reward_campaign_detail', args=[self.reward_campaign.twitch_id])}", ), @@ -1359,11 +1365,11 @@ class DiscordFeedTestCase(TestCase): def test_discord_feeds_include_stylesheet_processing_instruction(self) -> None: """Discord feeds should include an xml-stylesheet processing instruction.""" feed_urls: list[str] = [ - reverse("twitch:campaign_feed_discord"), - reverse("twitch:game_feed_discord"), - reverse("twitch:game_campaign_feed_discord", args=[self.game.twitch_id]), - reverse("twitch:organization_feed_discord"), - reverse("twitch:reward_campaign_feed_discord"), + reverse("core:campaign_feed_discord"), + reverse("core:game_feed_discord"), + reverse("core:game_campaign_feed_discord", args=[self.game.twitch_id]), + reverse("core:organization_feed_discord"), + reverse("core:reward_campaign_feed_discord"), ] for url in feed_urls: @@ -1378,7 +1384,7 @@ class DiscordFeedTestCase(TestCase): def test_discord_campaign_feed_contains_discord_timestamps(self) -> None: """Discord campaign feed should contain Discord relative timestamps.""" - url: str = reverse("twitch:campaign_feed_discord") + url: str = reverse("core:campaign_feed_discord") response: _MonkeyPatchedWSGIResponse = self.client.get(url) assert response.status_code == 200 content: str = response.content.decode("utf-8") @@ -1392,7 +1398,7 @@ class DiscordFeedTestCase(TestCase): def test_discord_reward_campaign_feed_contains_discord_timestamps(self) -> None: """Discord reward campaign feed should contain Discord relative timestamps.""" - url: str = reverse("twitch:reward_campaign_feed_discord") + url: str = reverse("core:reward_campaign_feed_discord") response: _MonkeyPatchedWSGIResponse = self.client.get(url) assert response.status_code == 200 content: str = response.content.decode("utf-8") diff --git a/twitch/tests/test_views.py b/twitch/tests/test_views.py index 90b622b..d608a32 100644 --- a/twitch/tests/test_views.py +++ b/twitch/tests/test_views.py @@ -327,7 +327,7 @@ class TestChannelListView: def test_channel_list_loads(self, client: Client) -> None: """Test that channel list view loads successfully.""" - response: _MonkeyPatchedWSGIResponse = client.get("/channels/") + response: _MonkeyPatchedWSGIResponse = client.get("/twitch/channels/") assert response.status_code == 200 def test_campaign_count_annotation( @@ -342,7 +342,7 @@ class TestChannelListView: channel: Channel = channel_with_campaigns["channel"] # type: ignore[assignment] campaigns: list[DropCampaign] = channel_with_campaigns["campaigns"] # type: ignore[assignment] - response: _MonkeyPatchedWSGIResponse = client.get("/channels/") + response: _MonkeyPatchedWSGIResponse = client.get("/twitch/channels/") context: ContextList | dict[str, Any] = response.context # type: ignore[assignment] if isinstance(context, list): context = context[-1] @@ -375,7 +375,7 @@ class TestChannelListView: display_name="NoCampaigns", ) - response: _MonkeyPatchedWSGIResponse = client.get("/channels/") + response: _MonkeyPatchedWSGIResponse = client.get("/twitch/channels/") context: ContextList | dict[str, Any] = response.context # type: ignore[assignment] if isinstance(context, list): context = context[-1] @@ -420,7 +420,7 @@ class TestChannelListView: ) campaign.allow_channels.add(channel2) - response: _MonkeyPatchedWSGIResponse = client.get("/channels/") + response: _MonkeyPatchedWSGIResponse = client.get("/twitch/channels/") context: ContextList | dict[str, Any] = response.context # type: ignore[assignment] if isinstance(context, list): context = context[-1] @@ -462,7 +462,7 @@ class TestChannelListView: ) response: _MonkeyPatchedWSGIResponse = client.get( - f"/channels/?search={channel.name}", + f"/twitch/channels/?search={channel.name}", ) context: ContextList | dict[str, Any] = response.context # type: ignore[assignment] if isinstance(context, list): @@ -527,7 +527,7 @@ class TestChannelListView: @pytest.mark.django_db def test_debug_view(self, client: Client) -> None: """Test debug view returns 200 and has games_without_owner in context.""" - response: _MonkeyPatchedWSGIResponse = client.get(reverse("twitch:debug")) + response: _MonkeyPatchedWSGIResponse = client.get(reverse("core:debug")) assert response.status_code == 200 assert "games_without_owner" in response.context @@ -1014,7 +1014,7 @@ class TestChannelListView: @pytest.mark.django_db def test_docs_rss_view(self, client: Client) -> None: """Test docs RSS view returns 200 and has feeds in context.""" - response: _MonkeyPatchedWSGIResponse = client.get(reverse("twitch:docs_rss")) + response: _MonkeyPatchedWSGIResponse = client.get(reverse("core:docs_rss")) assert response.status_code == 200 assert "feeds" in response.context assert "filtered_feeds" in response.context @@ -1067,9 +1067,18 @@ class TestSEOHelperFunctions: def test_build_seo_context_with_all_parameters(self) -> None: """Test _build_seo_context with all parameters.""" now: datetime.datetime = timezone.now() - breadcrumb: list[dict[str, int | str]] = [ - {"position": 1, "name": "Home", "url": "/"}, - ] + breadcrumb: dict[str, Any] = { + "@context": "https://schema.org", + "@type": "BreadcrumbList", + "itemListElement": [ + { + "@type": "ListItem", + "position": 1, + "name": "Home", + "item": "/", + }, + ], + } context: dict[str, Any] = _build_seo_context( page_title="Test", @@ -1077,7 +1086,7 @@ class TestSEOHelperFunctions: page_image="https://example.com/img.jpg", og_type="article", schema_data={}, - breadcrumb_schema=breadcrumb, # pyright: ignore[reportArgumentType] + breadcrumb_schema=breadcrumb, pagination_info=[{"rel": "next", "url": "/page/2/"}], published_date=now.isoformat(), modified_date=now.isoformat(), @@ -1268,7 +1277,7 @@ class TestSEOMetaTags: def test_noindex_pages_have_robots_directive(self, client: Client) -> None: """Test that pages with noindex have proper robots directive.""" response: _MonkeyPatchedWSGIResponse = client.get( - reverse("twitch:dataset_backups"), + reverse("core:dataset_backups"), ) assert response.status_code == 200 assert "robots_directive" in response.context @@ -1405,7 +1414,7 @@ class TestSitemapView: channel: Channel = sample_entities["channel"] response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml") content: str = response.content.decode() - assert f"/channels/{channel.twitch_id}/" in content + assert f"/twitch/channels/{channel.twitch_id}/" in content def test_sitemap_contains_badge_detail_pages( self, diff --git a/twitch/urls.py b/twitch/urls.py index 2947512..0ca95d1 100644 --- a/twitch/urls.py +++ b/twitch/urls.py @@ -3,21 +3,6 @@ from typing import TYPE_CHECKING from django.urls import path from twitch import views -from twitch.feeds import DropCampaignAtomFeed -from twitch.feeds import DropCampaignDiscordFeed -from twitch.feeds import DropCampaignFeed -from twitch.feeds import GameAtomFeed -from twitch.feeds import GameCampaignAtomFeed -from twitch.feeds import GameCampaignDiscordFeed -from twitch.feeds import GameCampaignFeed -from twitch.feeds import GameDiscordFeed -from twitch.feeds import GameFeed -from twitch.feeds import OrganizationAtomFeed -from twitch.feeds import OrganizationDiscordFeed -from twitch.feeds import OrganizationRSSFeed -from twitch.feeds import RewardCampaignAtomFeed -from twitch.feeds import RewardCampaignDiscordFeed -from twitch.feeds import RewardCampaignFeed if TYPE_CHECKING: from django.urls.resolvers import URLPattern @@ -27,129 +12,82 @@ app_name = "twitch" urlpatterns: list[URLPattern | URLResolver] = [ + # /twitch/ path("", views.dashboard, name="dashboard"), + # /twitch/badges/ path("badges/", views.badge_list_view, name="badge_list"), + # /twitch/badges// path("badges//", views.badge_set_detail_view, name="badge_set_detail"), + # /twitch/campaigns/ path("campaigns/", views.drop_campaign_list_view, name="campaign_list"), + # /twitch/campaigns// path( "campaigns//", views.drop_campaign_detail_view, name="campaign_detail", ), + # /twitch/channels/ path("channels/", views.ChannelListView.as_view(), name="channel_list"), + # /twitch/channels// path( "channels//", views.ChannelDetailView.as_view(), name="channel_detail", ), - path("debug/", views.debug_view, name="debug"), - path("datasets/", views.dataset_backups_view, name="dataset_backups"), - path( - "datasets/download//", - views.dataset_backup_download_view, - name="dataset_backup_download", - ), - path("docs/rss/", views.docs_rss_view, name="docs_rss"), + # /twitch/emotes/ path("emotes/", views.emote_gallery_view, name="emote_gallery"), + # /twitch/games/ path("games/", views.GamesGridView.as_view(), name="games_grid"), + # /twitch/games/list/ path("games/list/", views.GamesListView.as_view(), name="games_list"), + # /twitch/games// path("games//", views.GameDetailView.as_view(), name="game_detail"), + # /twitch/organizations/ path("organizations/", views.org_list_view, name="org_list"), + # /twitch/organizations// path( "organizations//", views.organization_detail_view, name="organization_detail", ), + # /twitch/reward-campaigns/ path( "reward-campaigns/", views.reward_campaign_list_view, name="reward_campaign_list", ), + # /twitch/reward-campaigns// path( "reward-campaigns//", views.reward_campaign_detail_view, name="reward_campaign_detail", ), - path("search/", views.search_view, name="search"), + # /twitch/export/campaigns/csv/ path( "export/campaigns/csv/", views.export_campaigns_csv, name="export_campaigns_csv", ), + # /twitch/export/campaigns/json/ path( "export/campaigns/json/", views.export_campaigns_json, name="export_campaigns_json", ), + # /twitch/export/games/csv/ path("export/games/csv/", views.export_games_csv, name="export_games_csv"), + # /twitch/export/games/json/ path("export/games/json/", views.export_games_json, name="export_games_json"), + # /twitch/export/organizations/csv/ path( "export/organizations/csv/", views.export_organizations_csv, name="export_organizations_csv", ), + # /twitch/export/organizations/json/ path( "export/organizations/json/", views.export_organizations_json, name="export_organizations_json", ), - # RSS feeds - # /rss/campaigns/ - all active campaigns - path("rss/campaigns/", DropCampaignFeed(), name="campaign_feed"), - # /rss/games/ - newly added games - path("rss/games/", GameFeed(), name="game_feed"), - # /rss/games//campaigns/ - active campaigns for a specific game - path( - "rss/games//campaigns/", - GameCampaignFeed(), - name="game_campaign_feed", - ), - # /rss/organizations/ - newly added organizations - path( - "rss/organizations/", - OrganizationRSSFeed(), - name="organization_feed", - ), - # /rss/reward-campaigns/ - all active reward campaigns - path( - "rss/reward-campaigns/", - RewardCampaignFeed(), - name="reward_campaign_feed", - ), - # Atom feeds (added alongside RSS to preserve backward compatibility) - path("atom/campaigns/", DropCampaignAtomFeed(), name="campaign_feed_atom"), - path("atom/games/", GameAtomFeed(), name="game_feed_atom"), - path( - "atom/games//campaigns/", - GameCampaignAtomFeed(), - name="game_campaign_feed_atom", - ), - path( - "atom/organizations/", - OrganizationAtomFeed(), - name="organization_feed_atom", - ), - path( - "atom/reward-campaigns/", - RewardCampaignAtomFeed(), - name="reward_campaign_feed_atom", - ), - # Discord feeds (Atom feeds with Discord relative timestamps) - path("discord/campaigns/", DropCampaignDiscordFeed(), name="campaign_feed_discord"), - path("discord/games/", GameDiscordFeed(), name="game_feed_discord"), - path( - "discord/games//campaigns/", - GameCampaignDiscordFeed(), - name="game_campaign_feed_discord", - ), - path( - "discord/organizations/", - OrganizationDiscordFeed(), - name="organization_feed_discord", - ), - path( - "discord/reward-campaigns/", - RewardCampaignDiscordFeed(), - name="reward_campaign_feed_discord", - ), ] diff --git a/twitch/utils.py b/twitch/utils.py index de5effe..87e8c57 100644 --- a/twitch/utils.py +++ b/twitch/utils.py @@ -53,7 +53,7 @@ def normalize_twitch_box_art_url(url: str) -> str: return url normalized_path: str = TWITCH_BOX_ART_SIZE_PATTERN.sub("", parsed.path) - return urlunparse(parsed._replace(path=normalized_path)) + return str(urlunparse(parsed._replace(path=normalized_path))) @lru_cache(maxsize=40 * 40 * 1024) diff --git a/twitch/views.py b/twitch/views.py index c4ef275..952ba81 100644 --- a/twitch/views.py +++ b/twitch/views.py @@ -2,36 +2,26 @@ import csv import datetime import json import logging -import operator from collections import OrderedDict from collections import defaultdict -from copy import copy from typing import TYPE_CHECKING from typing import Any from typing import Literal -from django.conf import settings from django.core.paginator import EmptyPage from django.core.paginator import Page from django.core.paginator import PageNotAnInteger from django.core.paginator import Paginator from django.core.serializers import serialize -from django.db import connection from django.db.models import Case from django.db.models import Count -from django.db.models import Exists -from django.db.models import F -from django.db.models import OuterRef from django.db.models import Prefetch from django.db.models import Q from django.db.models import When -from django.db.models.functions import Trim from django.db.models.query import QuerySet -from django.http import FileResponse from django.http import Http404 from django.http import HttpResponse from django.shortcuts import render -from django.template.defaultfilters import filesizeformat from django.urls import reverse from django.utils import timezone from django.views.generic import DetailView @@ -40,21 +30,6 @@ from pygments import highlight from pygments.formatters import HtmlFormatter from pygments.lexers.data import JsonLexer -from twitch.feeds import DropCampaignAtomFeed -from twitch.feeds import DropCampaignDiscordFeed -from twitch.feeds import DropCampaignFeed -from twitch.feeds import GameAtomFeed -from twitch.feeds import GameCampaignAtomFeed -from twitch.feeds import GameCampaignDiscordFeed -from twitch.feeds import GameCampaignFeed -from twitch.feeds import GameDiscordFeed -from twitch.feeds import GameFeed -from twitch.feeds import OrganizationAtomFeed -from twitch.feeds import OrganizationDiscordFeed -from twitch.feeds import OrganizationRSSFeed -from twitch.feeds import RewardCampaignAtomFeed -from twitch.feeds import RewardCampaignDiscordFeed -from twitch.feeds import RewardCampaignFeed from twitch.models import Channel from twitch.models import ChatBadge from twitch.models import ChatBadgeSet @@ -66,11 +41,6 @@ from twitch.models import RewardCampaign from twitch.models import TimeBasedDrop if TYPE_CHECKING: - from collections.abc import Callable - from os import stat_result - from pathlib import Path - - from debug_toolbar.utils import QueryDict from django.db.models import QuerySet from django.http import HttpRequest @@ -274,105 +244,6 @@ def emote_gallery_view(request: HttpRequest) -> HttpResponse: return render(request, "twitch/emote_gallery.html", context) -# MARK: /search/ -def search_view(request: HttpRequest) -> HttpResponse: - """Search view for all models. - - Args: - request: The HTTP request. - - Returns: - HttpResponse: The rendered search results. - """ - query: str = request.GET.get("q", "") - results: dict[str, QuerySet] = {} - - if query: - if len(query) < MIN_QUERY_LENGTH_FOR_FTS: - results["organizations"] = Organization.objects.filter( - name__istartswith=query, - ) - results["games"] = Game.objects.filter( - Q(name__istartswith=query) | Q(display_name__istartswith=query), - ) - - results["campaigns"] = DropCampaign.objects.filter( - Q(name__istartswith=query) | Q(description__icontains=query), - ).select_related("game") - - results["drops"] = TimeBasedDrop.objects.filter( - name__istartswith=query, - ).select_related("campaign") - - results["benefits"] = DropBenefit.objects.filter( - name__istartswith=query, - ).prefetch_related("drops__campaign") - - results["reward_campaigns"] = RewardCampaign.objects.filter( - Q(name__istartswith=query) - | Q(brand__istartswith=query) - | Q(summary__icontains=query), - ).select_related("game") - - results["badge_sets"] = ChatBadgeSet.objects.filter( - set_id__istartswith=query, - ) - - results["badges"] = ChatBadge.objects.filter( - Q(title__istartswith=query) | Q(description__icontains=query), - ).select_related("badge_set") - else: - results["organizations"] = Organization.objects.filter( - name__icontains=query, - ) - results["games"] = Game.objects.filter( - Q(name__icontains=query) | Q(display_name__icontains=query), - ) - - results["campaigns"] = DropCampaign.objects.filter( - Q(name__icontains=query) | Q(description__icontains=query), - ).select_related("game") - - results["drops"] = TimeBasedDrop.objects.filter( - name__icontains=query, - ).select_related("campaign") - - results["benefits"] = DropBenefit.objects.filter( - name__icontains=query, - ).prefetch_related("drops__campaign") - - results["reward_campaigns"] = RewardCampaign.objects.filter( - Q(name__icontains=query) - | Q(brand__icontains=query) - | Q(summary__icontains=query), - ).select_related("game") - - results["badge_sets"] = ChatBadgeSet.objects.filter(set_id__icontains=query) - results["badges"] = ChatBadge.objects.filter( - Q(title__icontains=query) | Q(description__icontains=query), - ).select_related("badge_set") - - total_results_count: int = sum(len(qs) for qs in results.values()) - - # TODO(TheLovinator): Make the description more informative by including counts of each result type, e.g. "Found 5 games, 3 campaigns, and 10 drops for 'rust'." # noqa: TD003 - if query: - page_title: str = f"Search Results for '{query}'"[:60] - page_description: str = f"Found {total_results_count} results for '{query}'." - else: - page_title = "Search" - page_description = "Search for drops, games, channels, and organizations." - - seo_context: dict[str, Any] = _build_seo_context( - page_title=page_title, - page_description=page_description, - ) - return render( - request, - "twitch/search_results.html", - {"query": query, "results": results, **seo_context}, - ) - - # MARK: /organizations/ def org_list_view(request: HttpRequest) -> HttpResponse: """Function-based view for organization list. @@ -624,111 +495,6 @@ def format_and_color_json(data: dict[str, Any] | list[dict] | str) -> str: return highlight(formatted_code, JsonLexer(), HtmlFormatter()) -# MARK: /datasets/ -def dataset_backups_view(request: HttpRequest) -> HttpResponse: - """View to list database backup datasets on disk. - - Args: - request: The HTTP request. - - Returns: - HttpResponse: The rendered dataset backups page. - """ - # TODO(TheLovinator): Instead of only using sql we should also support other formats like parquet, csv, or json. # noqa: TD003 - # TODO(TheLovinator): Upload to s3 instead. # noqa: TD003 - # TODO(TheLovinator): https://developers.google.com/search/docs/appearance/structured-data/dataset#json-ld - datasets_root: Path = settings.DATA_DIR / "datasets" - search_dirs: list[Path] = [datasets_root] - seen_paths: set[str] = set() - datasets: list[dict[str, Any]] = [] - - for folder in search_dirs: - if not folder.exists() or not folder.is_dir(): - continue - - # Only include .zst files - for path in folder.glob("*.zst"): - if not path.is_file(): - continue - key = str(path.resolve()) - if key in seen_paths: - continue - seen_paths.add(key) - stat: stat_result = path.stat() - updated_at: datetime.datetime = datetime.datetime.fromtimestamp( - stat.st_mtime, - tz=timezone.get_current_timezone(), - ) - try: - display_path = str(path.relative_to(datasets_root)) - download_path: str | None = display_path - except ValueError: - display_path: str = path.name - download_path: str | None = None - datasets.append({ - "name": path.name, - "display_path": display_path, - "download_path": download_path, - "size": filesizeformat(stat.st_size), - "updated_at": updated_at, - }) - - datasets.sort(key=operator.itemgetter("updated_at"), reverse=True) - - seo_context: dict[str, Any] = _build_seo_context( - page_title="Twitch Dataset", - page_description="Database backups and datasets available for download.", - ) - context: dict[str, Any] = { - "datasets": datasets, - "data_dir": str(datasets_root), - "dataset_count": len(datasets), - **seo_context, - } - return render(request, "twitch/dataset_backups.html", context) - - -def dataset_backup_download_view( - request: HttpRequest, # noqa: ARG001 - relative_path: str, -) -> FileResponse: - """Download a dataset backup from the data directory. - - Args: - request: The HTTP request. - relative_path: The path relative to the data directory. - - Returns: - FileResponse: The file response for the requested dataset. - - Raises: - Http404: When the file is not found or is outside the data directory. - """ - # TODO(TheLovinator): Use s3 instead of local disk. # noqa: TD003 - - datasets_root: Path = settings.DATA_DIR / "datasets" - requested_path: Path = (datasets_root / relative_path).resolve() - data_root: Path = datasets_root.resolve() - - try: - requested_path.relative_to(data_root) - except ValueError as exc: - msg = "File not found" - raise Http404(msg) from exc - if not requested_path.exists() or not requested_path.is_file(): - msg = "File not found" - raise Http404(msg) - if not requested_path.name.endswith(".zst"): - msg = "File not found" - raise Http404(msg) - - return FileResponse( - requested_path.open("rb"), - as_attachment=True, - filename=requested_path.name, - ) - - def _enhance_drops_with_context( drops: QuerySet[TimeBasedDrop], now: datetime.datetime, @@ -1626,331 +1392,11 @@ def reward_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRes return render(request, "twitch/reward_campaign_detail.html", context) -# MARK: /debug/ -def debug_view(request: HttpRequest) -> HttpResponse: - """Debug view showing potentially broken or inconsistent data. - - Returns: - HttpResponse: Rendered debug template or redirect if unauthorized. - """ - now: datetime.datetime = timezone.now() - - # Games with no assigned owner organization - games_without_owner: QuerySet[Game] = Game.objects.filter( - owners__isnull=True, - ).order_by("display_name") - - # Campaigns with no images at all (no direct URL and no benefit image fallbacks) - broken_image_campaigns: QuerySet[DropCampaign] = ( - DropCampaign.objects - .filter( - Q(image_url__isnull=True) - | Q(image_url__exact="") - | ~Q(image_url__startswith="http"), - ) - .exclude( - Exists( - TimeBasedDrop.objects.filter(campaign=OuterRef("pk")).filter( - benefits__image_asset_url__startswith="http", - ), - ), - ) - .select_related("game") - ) - - # Benefits with missing images - broken_benefit_images: QuerySet[DropBenefit] = DropBenefit.objects.annotate( - trimmed_url=Trim("image_asset_url"), - ).filter( - Q(image_asset_url__isnull=True) - | Q(trimmed_url__exact="") - | ~Q(image_asset_url__startswith="http"), - ) - - # Time-based drops without any benefits - drops_without_benefits: QuerySet[TimeBasedDrop] = TimeBasedDrop.objects.filter( - benefits__isnull=True, - ).select_related("campaign__game") - - # Campaigns with invalid dates (start after end or missing either) - invalid_date_campaigns: QuerySet[DropCampaign] = DropCampaign.objects.filter( - Q(start_at__gt=F("end_at")) | Q(start_at__isnull=True) | Q(end_at__isnull=True), - ).select_related("game") - - # Duplicate campaign names per game. - # We retrieve the game's name for user-friendly display. - duplicate_name_campaigns: QuerySet[DropCampaign, dict[str, Any]] = ( - DropCampaign.objects - .values("game__display_name", "name", "game__twitch_id") - .annotate(name_count=Count("twitch_id")) - .filter(name_count__gt=1) - .order_by("game__display_name", "name") - ) - - # Active campaigns with no images at all - active_missing_image: QuerySet[DropCampaign] = ( - DropCampaign.objects - .filter(start_at__lte=now, end_at__gte=now) - .filter( - Q(image_url__isnull=True) - | Q(image_url__exact="") - | ~Q(image_url__startswith="http"), - ) - .exclude( - Exists( - TimeBasedDrop.objects.filter(campaign=OuterRef("pk")).filter( - benefits__image_asset_url__startswith="http", - ), - ), - ) - .select_related("game") - ) - - # Distinct GraphQL operation names used to fetch campaigns with counts - # Since operation_names is now a JSON list field, we need to flatten and count - operation_names_counter: dict[str, int] = {} - for campaign in DropCampaign.objects.only("operation_names"): - for op_name in campaign.operation_names: - if op_name and op_name.strip(): - operation_names_counter[op_name.strip()] = ( - operation_names_counter.get(op_name.strip(), 0) + 1 - ) - - operation_names_with_counts: list[dict[str, Any]] = [ - {"trimmed_op": op_name, "count": count} - for op_name, count in sorted(operation_names_counter.items()) - ] - - # Campaigns missing DropCampaignDetails operation name - # Need to handle SQLite separately since it doesn't support JSONField lookups - # Sqlite is used when testing - if connection.vendor == "sqlite": - all_campaigns: QuerySet[DropCampaign] = DropCampaign.objects.select_related( - "game", - ).order_by("game__display_name", "name") - campaigns_missing_dropcampaigndetails: list[DropCampaign] = [ - c - for c in all_campaigns - if c.operation_names is None - or "DropCampaignDetails" not in c.operation_names - ] - else: - campaigns_missing_dropcampaigndetails: list[DropCampaign] = list( - DropCampaign.objects - .filter( - Q(operation_names__isnull=True) - | ~Q(operation_names__contains=["DropCampaignDetails"]), - ) - .select_related("game") - .order_by("game__display_name", "name"), - ) - - context: dict[str, Any] = { - "now": now, - "games_without_owner": games_without_owner, - "broken_image_campaigns": broken_image_campaigns, - "broken_benefit_images": broken_benefit_images, - "drops_without_benefits": drops_without_benefits, - "invalid_date_campaigns": invalid_date_campaigns, - "duplicate_name_campaigns": duplicate_name_campaigns, - "active_missing_image": active_missing_image, - "operation_names_with_counts": operation_names_with_counts, - "campaigns_missing_dropcampaigndetails": campaigns_missing_dropcampaigndetails, - } - - seo_context: dict[str, Any] = _build_seo_context( - page_title="Debug", - page_description="Debug view showing potentially broken or inconsistent data.", - robots_directive="noindex, nofollow", - ) - context.update(seo_context) - - return render(request, "twitch/debug.html", context) - - # MARK: /games/list/ class GamesListView(GamesGridView): """List view for games in simple list format.""" - template_name: str = "twitch/games_list.html" - - -# MARK: /docs/rss/ -def docs_rss_view(request: HttpRequest) -> HttpResponse: - """View for /docs/rss that lists all available RSS feeds. - - Args: - request: The HTTP request object. - - Returns: - Rendered HTML response with list of RSS feeds. - """ - - def absolute(path: str) -> str: - try: - return request.build_absolute_uri(path) - except Exception: - logger.exception("Failed to build absolute URL for %s", path) - return path - - def _pretty_example(xml_str: str, max_items: int = 1) -> str: - try: - trimmed: str = xml_str.strip() - first_item: int = trimmed.find("", second_item) - if end_channel != -1: - trimmed = trimmed[:second_item] + trimmed[end_channel:] - formatted: str = trimmed.replace("><", ">\n<") - return "\n".join(line for line in formatted.splitlines() if line.strip()) - except Exception: - logger.exception("Failed to pretty-print RSS example") - return xml_str - - def render_feed(feed_view: Callable[..., HttpResponse], *args: object) -> str: - try: - limited_request: HttpRequest = copy(request) - # Add limit=1 to GET parameters - get_data: QueryDict = request.GET.copy() - get_data["limit"] = "1" - limited_request.GET = get_data # pyright: ignore[reportAttributeAccessIssue] - - response: HttpResponse = feed_view(limited_request, *args) - return _pretty_example(response.content.decode("utf-8")) - except Exception: - logger.exception( - "Failed to render %s for RSS docs", - feed_view.__class__.__name__, - ) - return "" - - show_atom: bool = bool(request.GET.get("show_atom")) - - feeds: list[dict[str, str]] = [ - { - "title": "All Organizations", - "description": "Latest organizations added to TTVDrops", - "url": absolute(reverse("twitch:organization_feed")), - "atom_url": absolute(reverse("twitch:organization_feed_atom")), - "discord_url": absolute(reverse("twitch:organization_feed_discord")), - "example_xml": render_feed(OrganizationRSSFeed()), - "example_xml_atom": render_feed(OrganizationAtomFeed()) - if show_atom - else "", - "example_xml_discord": render_feed(OrganizationDiscordFeed()) - if show_atom - else "", - }, - { - "title": "All Games", - "description": "Latest games added to TTVDrops", - "url": absolute(reverse("twitch:game_feed")), - "atom_url": absolute(reverse("twitch:game_feed_atom")), - "discord_url": absolute(reverse("twitch:game_feed_discord")), - "example_xml": render_feed(GameFeed()), - "example_xml_atom": render_feed(GameAtomFeed()) if show_atom else "", - "example_xml_discord": render_feed(GameDiscordFeed()) if show_atom else "", - }, - { - "title": "All Drop Campaigns", - "description": "Latest drop campaigns across all games", - "url": absolute(reverse("twitch:campaign_feed")), - "atom_url": absolute(reverse("twitch:campaign_feed_atom")), - "discord_url": absolute(reverse("twitch:campaign_feed_discord")), - "example_xml": render_feed(DropCampaignFeed()), - "example_xml_atom": render_feed(DropCampaignAtomFeed()) - if show_atom - else "", - "example_xml_discord": render_feed(DropCampaignDiscordFeed()) - if show_atom - else "", - }, - { - "title": "All Reward Campaigns", - "description": "Latest reward campaigns (Quest rewards) on Twitch", - "url": absolute(reverse("twitch:reward_campaign_feed")), - "atom_url": absolute(reverse("twitch:reward_campaign_feed_atom")), - "discord_url": absolute(reverse("twitch:reward_campaign_feed_discord")), - "example_xml": render_feed(RewardCampaignFeed()), - "example_xml_atom": render_feed(RewardCampaignAtomFeed()) - if show_atom - else "", - "example_xml_discord": render_feed(RewardCampaignDiscordFeed()) - if show_atom - else "", - }, - ] - - sample_game: Game | None = Game.objects.order_by("-added_at").first() - sample_org: Organization | None = Organization.objects.order_by("-added_at").first() - if sample_org is None and sample_game is not None: - sample_org = sample_game.owners.order_by("-pk").first() - - filtered_feeds: list[dict[str, str | bool]] = [ - { - "title": "Campaigns for a Single Game", - "description": "Latest drop campaigns for one game.", - "url": ( - absolute( - reverse("twitch:game_campaign_feed", args=[sample_game.twitch_id]), - ) - if sample_game - else absolute("/rss/games//campaigns/") - ), - "atom_url": ( - absolute( - reverse( - "twitch:game_campaign_feed_atom", - args=[sample_game.twitch_id], - ), - ) - if sample_game - else absolute("/atom/games//campaigns/") - ), - "discord_url": ( - absolute( - reverse( - "twitch:game_campaign_feed_discord", - args=[sample_game.twitch_id], - ), - ) - if sample_game - else absolute("/discord/games//campaigns/") - ), - "has_sample": bool(sample_game), - "example_xml": render_feed(GameCampaignFeed(), sample_game.twitch_id) - if sample_game - else "", - "example_xml_atom": ( - render_feed(GameCampaignAtomFeed(), sample_game.twitch_id) - if sample_game and show_atom - else "" - ), - "example_xml_discord": ( - render_feed(GameCampaignDiscordFeed(), sample_game.twitch_id) - if sample_game and show_atom - else "" - ), - }, - ] - - seo_context: dict[str, Any] = _build_seo_context( - page_title="Twitch RSS Feeds", - page_description="RSS feeds for Twitch drops.", - ) - return render( - request, - "twitch/docs_rss.html", - { - "feeds": feeds, - "filtered_feeds": filtered_feeds, - "sample_game": sample_game, - "sample_org": sample_org, - **seo_context, - }, - ) + template_name: str | None = "twitch/games_list.html" # MARK: /channels/ @@ -2302,7 +1748,7 @@ def badge_set_detail_view(request: HttpRequest, set_id: str) -> HttpResponse: ) return ChatBadge.objects.filter(pk__in=badge_ids).order_by(preserved_order) - badges = get_sorted_badges(badge_set) + badges: QuerySet[ChatBadge, ChatBadge] = get_sorted_badges(badge_set) # Attach award_campaigns attribute to each badge for template use for badge in badges: @@ -2647,143 +2093,3 @@ def export_organizations_json(request: HttpRequest) -> HttpResponse: # noqa: AR response["Content-Disposition"] = "attachment; filename=organizations.json" return response - - -# MARK: /sitemap.xml -def sitemap_view(request: HttpRequest) -> HttpResponse: # noqa: PLR0915 - """Generate a dynamic XML sitemap for search engines. - - Args: - request: The HTTP request. - - Returns: - HttpResponse: XML sitemap. - """ - base_url: str = f"{request.scheme}://{request.get_host()}" - - # Start building sitemap XML - sitemap_urls: list[dict[str, str | dict[str, str]]] = [] - - # Static pages - sitemap_urls.extend([ - {"url": f"{base_url}/", "priority": "1.0", "changefreq": "daily"}, - {"url": f"{base_url}/campaigns/", "priority": "0.9", "changefreq": "daily"}, - { - "url": f"{base_url}/reward-campaigns/", - "priority": "0.9", - "changefreq": "daily", - }, - {"url": f"{base_url}/games/", "priority": "0.9", "changefreq": "weekly"}, - { - "url": f"{base_url}/organizations/", - "priority": "0.8", - "changefreq": "weekly", - }, - {"url": f"{base_url}/channels/", "priority": "0.8", "changefreq": "weekly"}, - {"url": f"{base_url}/badges/", "priority": "0.7", "changefreq": "monthly"}, - {"url": f"{base_url}/emotes/", "priority": "0.7", "changefreq": "monthly"}, - {"url": f"{base_url}/search/", "priority": "0.6", "changefreq": "monthly"}, - ]) - - # Dynamic detail pages - Games - games: QuerySet[Game] = Game.objects.all() - for game in games: - entry: dict[str, str | dict[str, str]] = { - "url": f"{base_url}{reverse('twitch:game_detail', args=[game.twitch_id])}", - "priority": "0.8", - "changefreq": "weekly", - } - if game.updated_at: - entry["lastmod"] = game.updated_at.isoformat() - sitemap_urls.append(entry) - - # Dynamic detail pages - Campaigns - campaigns: QuerySet[DropCampaign] = DropCampaign.objects.all() - for campaign in campaigns: - resource_url: str = reverse("twitch:campaign_detail", args=[campaign.twitch_id]) - full_url: str = f"{base_url}{resource_url}" - entry: dict[str, str | dict[str, str]] = { - "url": full_url, - "priority": "0.7", - "changefreq": "weekly", - } - if campaign.updated_at: - entry["lastmod"] = campaign.updated_at.isoformat() - sitemap_urls.append(entry) - - # Dynamic detail pages - Organizations - orgs: QuerySet[Organization] = Organization.objects.all() - for org in orgs: - resource_url = reverse("twitch:organization_detail", args=[org.twitch_id]) - full_url: str = f"{base_url}{resource_url}" - entry: dict[str, str | dict[str, str]] = { - "url": full_url, - "priority": "0.7", - "changefreq": "weekly", - } - if org.updated_at: - entry["lastmod"] = org.updated_at.isoformat() - sitemap_urls.append(entry) - - # Dynamic detail pages - Channels - channels: QuerySet[Channel] = Channel.objects.all() - for channel in channels: - resource_url = reverse("twitch:channel_detail", args=[channel.twitch_id]) - full_url: str = f"{base_url}{resource_url}" - entry: dict[str, str | dict[str, str]] = { - "url": full_url, - "priority": "0.6", - "changefreq": "weekly", - } - if channel.updated_at: - entry["lastmod"] = channel.updated_at.isoformat() - sitemap_urls.append(entry) - - # Dynamic detail pages - Badges - badge_sets: QuerySet[ChatBadgeSet] = ChatBadgeSet.objects.all() - for badge_set in badge_sets: - resource_url = reverse("twitch:badge_set_detail", args=[badge_set.set_id]) - full_url: str = f"{base_url}{resource_url}" - sitemap_urls.append({ - "url": full_url, - "priority": "0.5", - "changefreq": "monthly", - }) - - # Dynamic detail pages - Reward Campaigns - reward_campaigns: QuerySet[RewardCampaign] = RewardCampaign.objects.all() - for reward_campaign in reward_campaigns: - resource_url = reverse( - "twitch:reward_campaign_detail", - args=[ - reward_campaign.twitch_id, - ], - ) - full_url: str = f"{base_url}{resource_url}" - entry: dict[str, str | dict[str, str]] = { - "url": full_url, - "priority": "0.6", - "changefreq": "weekly", - } - if reward_campaign.updated_at: - entry["lastmod"] = reward_campaign.updated_at.isoformat() - sitemap_urls.append(entry) - - # Build XML - xml_content = '\n' - xml_content += '\n' - - for url_entry in sitemap_urls: - xml_content += " \n" - xml_content += f" {url_entry['url']}\n" - if url_entry.get("lastmod"): - xml_content += f" {url_entry['lastmod']}\n" - xml_content += ( - f" {url_entry.get('changefreq', 'monthly')}\n" - ) - xml_content += f" {url_entry.get('priority', '0.5')}\n" - xml_content += " \n" - - xml_content += "" - - return HttpResponse(xml_content, content_type="application/xml") diff --git a/youtube/__init__.py b/youtube/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/youtube/apps.py b/youtube/apps.py new file mode 100644 index 0000000..b3b48ca --- /dev/null +++ b/youtube/apps.py @@ -0,0 +1,7 @@ +from django.apps import AppConfig + + +class YoutubeConfig(AppConfig): + """Django app configuration for the YouTube app.""" + + name = "youtube" diff --git a/youtube/migrations/__init__.py b/youtube/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/youtube/tests/__init__.py b/youtube/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/youtube/tests/test_youtube.py b/youtube/tests/test_youtube.py new file mode 100644 index 0000000..6b207ce --- /dev/null +++ b/youtube/tests/test_youtube.py @@ -0,0 +1,49 @@ +from typing import TYPE_CHECKING + +from django.test import TestCase +from django.urls import reverse + +if TYPE_CHECKING: + from django.test.client import _MonkeyPatchedWSGIResponse + + +class YouTubeIndexViewTest(TestCase): + """Tests for the YouTube drops channels index page.""" + + def test_index_returns_200(self) -> None: + """The YouTube index page should return HTTP 200.""" + response: _MonkeyPatchedWSGIResponse = self.client.get(reverse("youtube:index")) + assert response.status_code == 200 + + def test_index_displays_known_channels(self) -> None: + """The page should include key known channels from the partner list.""" + response: _MonkeyPatchedWSGIResponse = self.client.get(reverse("youtube:index")) + content: str = response.content.decode() + + assert "YouTube Drops Channels" in content + assert "Call of Duty" in content + assert "PlayOverwatch" in content + assert "Hearthstone" in content + assert "Fortnite" in content + assert "Riot Games" in content + assert "Ubisoft" in content + + def test_index_includes_partner_urls(self) -> None: + """The page should render partner channel links from the source list.""" + response: _MonkeyPatchedWSGIResponse = self.client.get(reverse("youtube:index")) + content: str = response.content.decode() + + assert "https://www.youtube.com/channel/UCbLIqv9Puhyp9_ZjVtfOy7w" in content + assert "https://www.youtube.com/user/epicfortnite" in content + assert "https://www.youtube.com/lolesports" in content + + def test_index_groups_partners_alphabetically(self) -> None: + """Partner sections should render grouped and in alphabetical order.""" + response: _MonkeyPatchedWSGIResponse = self.client.get(reverse("youtube:index")) + content: str = response.content.decode() + + assert "

Activision (Call of Duty)

" in content + assert "

Battle.net / Blizzard

" in content + assert content.index("

Activision (Call of Duty)

") < content.index( + "

Battle.net / Blizzard

", + ) diff --git a/youtube/urls.py b/youtube/urls.py new file mode 100644 index 0000000..046b2c8 --- /dev/null +++ b/youtube/urls.py @@ -0,0 +1,16 @@ +from typing import TYPE_CHECKING + +from django.urls import path + +from youtube import views + +if TYPE_CHECKING: + from django.urls.resolvers import URLPattern + from django.urls.resolvers import URLResolver + +app_name = "youtube" + + +urlpatterns: list[URLPattern | URLResolver] = [ + path(route="", view=views.index, name="index"), +] diff --git a/youtube/views.py b/youtube/views.py new file mode 100644 index 0000000..28b2e27 --- /dev/null +++ b/youtube/views.py @@ -0,0 +1,115 @@ +from collections import defaultdict +from typing import TYPE_CHECKING + +from django.shortcuts import render + +if TYPE_CHECKING: + from django.http import HttpRequest + from django.http import HttpResponse + + +def index(request: HttpRequest) -> HttpResponse: + """Render a minimal list of YouTube channels with known drops-enabled partners. + + Returns: + HttpResponse: Rendered index page for YouTube drops channels. + """ + channels: list[dict[str, str]] = [ + { + "partner": "Activision (Call of Duty)", + "channel": "Call of Duty", + "url": "https://www.youtube.com/channel/UCbLIqv9Puhyp9_ZjVtfOy7w", + }, + { + "partner": "Battle.net / Blizzard", + "channel": "PlayOverwatch", + "url": "https://www.youtube.com/c/playoverwatch/featured", + }, + { + "partner": "Battle.net / Blizzard", + "channel": "Hearthstone", + "url": "https://www.youtube.com/c/Hearthstone/featured", + }, + { + "partner": "Electronic Arts", + "channel": "FIFA", + "url": "https://www.youtube.com/channel/UCFA6YGp5lvgayO20lk7_Ung", + }, + { + "partner": "Electronic Arts", + "channel": "EA Madden NFL", + "url": "https://www.youtube.com/@EAMaddenNFL", + }, + { + "partner": "Epic Games", + "channel": "Fortnite", + "url": "https://www.youtube.com/user/epicfortnite", + }, + { + "partner": "Garena", + "channel": "Free Fire", + "url": "https://www.youtube.com/channel/UC_vVy4OI86F0amXqFN_zTMg", + }, + { + "partner": "Krafton (PUBG)", + "channel": "PUBG: BATTLEGROUNDS", + "url": "https://www.youtube.com/channel/UCTDO0RgowRyaAEUrPnBAg4g", + }, + { + "partner": "MLBB", + "channel": "Mobile Legends: Bang Bang", + "url": "https://www.youtube.com/channel/UCqmld-BIYME2i_ooRTo1EOg", + }, + { + "partner": "NBA", + "channel": "NBA", + "url": "https://www.youtube.com/user/NBA", + }, + { + "partner": "NFL", + "channel": "NFL", + "url": "https://www.youtube.com/@NFL", + }, + { + "partner": "PUBG Mobile", + "channel": "PUBG MOBILE", + "url": "https://www.youtube.com/channel/UCTDO0RgowRyaAEUrPnBAg4g", + }, + { + "partner": "Riot Games", + "channel": "Riot Games", + "url": "https://www.youtube.com/user/RiotGamesInc", + }, + { + "partner": "Riot Games", + "channel": "LoL Esports", + "url": "https://www.youtube.com/lolesports", + }, + { + "partner": "Supercell", + "channel": "Clash Royale", + "url": "https://www.youtube.com/channel/UC_F8DoJf9MZogEOU51TpTbQ", + }, + { + "partner": "Ubisoft", + "channel": "Ubisoft", + "url": "https://www.youtube.com/user/ubisoft", + }, + ] + + grouped_channels: dict[str, list[dict[str, str]]] = defaultdict(list) + for channel in channels: + grouped_channels[channel["partner"]].append(channel) + + partner_groups: list[dict[str, str | list[dict[str, str]]]] = [] + for partner in sorted(grouped_channels.keys(), key=str.lower): + sorted_items: list[dict[str, str]] = sorted( + grouped_channels[partner], + key=lambda item: item["channel"].lower(), + ) + partner_groups.append({"partner": partner, "channels": sorted_items}) + + context: dict[str, list[dict[str, str | list[dict[str, str]]]]] = { + "partner_groups": partner_groups, + } + return render(request=request, template_name="youtube/index.html", context=context)