Improve SEO

This commit is contained in:
Joakim Hellsén 2026-02-11 22:15:14 +01:00
commit bbeed5c9f0
Signed by: Joakim Hellsén
SSH key fingerprint: SHA256:/9h/CsExpFp+PRhsfA0xznFx2CGfTT5R/kpuFfUgEQk
6 changed files with 1331 additions and 45 deletions

View file

@ -7,11 +7,15 @@ from django.conf.urls.static import static
from django.urls import include
from django.urls import path
from twitch import views as twitch_views
if TYPE_CHECKING:
from django.urls.resolvers import URLPattern
from django.urls.resolvers import URLResolver
urlpatterns: [URLPattern | URLResolver] = [ # type: ignore[assignment]
urlpatterns: list[URLPattern | URLResolver] = [
path("sitemap.xml", twitch_views.sitemap_view, name="sitemap"),
path("robots.txt", twitch_views.robots_txt_view, name="robots"),
path(route="", view=include("twitch.urls", namespace="twitch")),
]

View file

@ -103,4 +103,4 @@ line-length = 120
[tool.djlint]
profile = "django"
ignore = "H021"
ignore = "H021,H030"

View file

@ -16,14 +16,13 @@
<link rel="manifest" href="{% static 'site.webmanifest' %}" />
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta name="description"
content="Twitch Drops Tracker - Track your Twitch drops and campaigns easily." />
<meta name="keywords" content="Twitch, Drops" />
<title>
{% block title %}
ttvdrops
{% endblock title %}
</title>
{% include "includes/meta_tags.html" %}
<style>
html {
color-scheme: light dark;

View file

@ -0,0 +1,70 @@
{# SEO Meta Tags Include #}
{# #}
{# Expected context variables: #}
{# - page_title: str - Page title (defaults to "ttvdrops") #}
{# - page_description: str - Page description (defaults to site description) #}
{# - page_image: str - Image URL for og:image (optional) #}
{# - page_url: str - Full URL for og:url and canonical (defaults to request.build_absolute_uri) #}
{# - og_type: str - OpenGraph type (defaults to "website") #}
{# - schema_data: str - JSON-LD schema data serialized as string (optional) #}
{# - breadcrumb_schema: str - Breadcrumb JSON-LD schema (optional) #}
{# - pagination_info: dict - {"rel": "prev|next|first|last", "url": "..."} (optional) #}
{# - published_date: str - ISO 8601 published date (optional) #}
{# - modified_date: str - ISO 8601 modified date (optional) #}
{# - robots_directive: str - robots meta content (default: "index, follow") #}
{# #}
{% load static %}
{# Preconnect to external resources for performance #}
<link rel="preconnect" href="https://static-cdn.jtvnw.net" />
<link rel="dns-prefetch" href="https://static-cdn.jtvnw.net" />
{# Description meta tag #}
<meta name="description"
content="{% firstof page_description 'ttvdrops - Track Twitch drops.' %}" />
{# Robots meta tag (control indexing) #}
<meta name="robots"
content="{% firstof robots_directive 'index, follow' %}" />
{# Author and Copyright #}
<meta name="author" content="TheLovinator1" />
<meta name="copyright"
content="This work is dedicated to the public domain under CC0 1.0 Universal." />
{# Open Graph tags for social sharing #}
<meta property="og:title" content="{% firstof page_title 'ttvdrops' %}" />
<meta property="og:description"
content="{% firstof page_description 'ttvdrops - Track Twitch drops.' %}" />
<meta property="og:type" content="{% firstof og_type 'website' %}" />
<meta property="og:url"
content="{% firstof page_url request.build_absolute_uri %}" />
{% if page_image %}
<meta property="og:image" content="{{ page_image }}" />
<meta property="og:image:width" content="1200" />
<meta property="og:image:height" content="630" />
{% endif %}
{# Twitter Card tags for rich previews #}
<meta name="twitter:card"
content="{% if page_image %}summary_large_image{% else %}summary{% endif %}" />
<meta name="twitter:title" content="{% firstof page_title 'ttvdrops' %}" />
<meta name="twitter:description"
content="{% firstof page_description 'ttvdrops - Track Twitch drops.' %}" />
{% if page_image %}<meta name="twitter:image" content="{{ page_image }}" />{% endif %}
{# Article dates for content pages #}
{% if published_date %}<meta property="article:published_time" content="{{ published_date }}" />{% endif %}
{% if modified_date %}<meta property="article:modified_time" content="{{ modified_date }}" />{% endif %}
{# Canonical tag #}
<link rel="canonical"
href="{% firstof page_url request.build_absolute_uri %}" />
{# Pagination links (for crawler efficiency) #}
{% if pagination_info %}
{% if pagination_info.rel == "prev" %}
<link rel="prev" href="{{ pagination_info.url }}" />
{% elif pagination_info.rel == "next" %}
<link rel="next" href="{{ pagination_info.url }}" />
{% elif pagination_info.rel == "first" %}
<link rel="first" href="{{ pagination_info.url }}" />
{% elif pagination_info.rel == "last" %}
<link rel="last" href="{{ pagination_info.url }}" />
{% endif %}
{% endif %}
{# Schema.org JSON-LD structured data #}
{% if schema_data %}<script type="application/ld+json">{{ schema_data|safe }}</script>{% endif %}
{# Breadcrumb schema #}
{% if breadcrumb_schema %}<script type="application/ld+json">{{ breadcrumb_schema|safe }}</script>{% endif %}

View file

@ -1,12 +1,16 @@
from __future__ import annotations
import datetime
import json
from datetime import timedelta
from typing import TYPE_CHECKING
from typing import Any
from typing import Literal
import pytest
from django.core.handlers.wsgi import WSGIRequest
from django.core.paginator import Paginator
from django.test import RequestFactory
from django.urls import reverse
from django.utils import timezone
@ -18,8 +22,14 @@ from twitch.models import DropCampaign
from twitch.models import Game
from twitch.models import Organization
from twitch.models import TimeBasedDrop
from twitch.views import Page
from twitch.views import _build_breadcrumb_schema
from twitch.views import _build_pagination_info
from twitch.views import _build_seo_context
from twitch.views import _truncate_description
if TYPE_CHECKING:
from django.core.handlers.wsgi import WSGIRequest
from django.test import Client
from django.test.client import _MonkeyPatchedWSGIResponse
from django.test.utils import ContextList
@ -834,3 +844,481 @@ class TestChannelListView:
assert response.context["feeds"][0]["example_xml"]
html: str = response.content.decode()
assert '<code class="language-xml">' in html
@pytest.mark.django_db
class TestSEOHelperFunctions:
"""Tests for SEO helper functions."""
def test_truncate_description_short_text(self) -> None:
"""Test that short text is not truncated."""
text = "This is a short description"
result: str = _truncate_description(text, max_length=160)
assert result == text
def test_truncate_description_long_text(self) -> None:
"""Test that long text is truncated at word boundary."""
text = "This is a very long description that exceeds the maximum length and should be truncated at a word boundary to avoid cutting off in the middle of a word" # noqa: E501
result: str = _truncate_description(text, max_length=50)
assert len(result) <= 53 # Allow some flexibility
assert not result.endswith(" ")
def test_truncate_description_adds_ellipsis(self) -> None:
"""Test that truncation adds ellipsis."""
text = "This is a very long description that exceeds the maximum length"
result: str = _truncate_description(text, max_length=30)
assert result.endswith("") # Uses en-dash, not three dots
def test_build_seo_context_required_fields(self) -> None:
"""Test that _build_seo_context returns all required fields."""
context: dict[str, Any] = _build_seo_context(
page_title="Test Title",
page_description="Test Description",
page_image="https://example.com/image.jpg",
og_type="article",
schema_data={"@context": "https://schema.org"},
)
assert context["page_title"] == "Test Title"
assert context["page_description"] == "Test Description"
assert context["page_image"] == "https://example.com/image.jpg"
assert context["og_type"] == "article"
assert context["robots_directive"] == "index, follow" # default
# schema_data is JSON-dumped to a string in context
assert json.loads(context["schema_data"]) == {"@context": "https://schema.org"}
def test_build_seo_context_with_all_parameters(self) -> None:
"""Test _build_seo_context with all parameters."""
now: datetime.datetime = timezone.now()
breadcrumb: list[dict[str, int | str]] = [{"position": 1, "name": "Home", "url": "/"}]
context: dict[str, Any] = _build_seo_context(
page_title="Test",
page_description="Desc",
page_image="https://example.com/img.jpg",
og_type="article",
schema_data={},
breadcrumb_schema=breadcrumb, # pyright: ignore[reportArgumentType]
pagination_info={"rel": "next", "url": "/page/2/"},
published_date=now.isoformat(),
modified_date=now.isoformat(),
robots_directive="noindex, follow",
)
# breadcrumb_schema is JSON-dumped, so parse it back
assert json.loads(context["breadcrumb_schema"]) == breadcrumb
assert context["pagination_info"] == {"rel": "next", "url": "/page/2/"}
assert context["published_date"] == now.isoformat()
assert context["modified_date"] == now.isoformat()
assert context["robots_directive"] == "noindex, follow"
def test_build_breadcrumb_schema_structure(self) -> None:
"""Test that _build_breadcrumb_schema creates proper BreadcrumbList structure."""
items: list[dict[str, str | int]] = [
{"name": "Home", "url": "/"},
{"name": "Games", "url": "/games/"},
{"name": "Test Game", "url": "/games/123/"},
]
schema: dict[str, Any] = _build_breadcrumb_schema(items)
assert schema["@context"] == "https://schema.org"
assert schema["@type"] == "BreadcrumbList"
assert schema["itemListElement"][0]["@type"] == "ListItem"
assert schema["itemListElement"][0]["position"] == 1
assert schema["itemListElement"][0]["name"] == "Home"
assert schema["itemListElement"][2]["position"] == 3
def test_build_pagination_info_with_next_page(self) -> None:
"""Test _build_pagination_info extracts next page URL."""
factory = RequestFactory()
request: WSGIRequest = factory.get("/campaigns/?page=1")
items: list[int] = list(range(100))
paginator: Paginator[int] = Paginator(items, 10)
page: Page[int] = paginator.get_page(1)
info: dict[str, str] | None = _build_pagination_info(request, page, "/campaigns/")
assert info is not None
assert "url" in info
assert "rel" in info
assert info["rel"] == "next"
assert "page=2" in info["url"]
def test_build_pagination_info_with_prev_page(self) -> None:
"""Test _build_pagination_info extracts prev page URL."""
factory = RequestFactory()
request: WSGIRequest = factory.get("/campaigns/?page=2")
items: list[int] = list(range(100))
paginator: Paginator[int] = Paginator(items, 10)
page: Page[int] = paginator.get_page(2)
info: dict[str, str] | None = _build_pagination_info(request, page, "/campaigns/")
assert info is not None
assert "url" in info
assert "rel" in info
assert info["rel"] == "prev"
assert "page=1" in info["url"]
@pytest.mark.django_db
class TestSEOMetaTags:
"""Tests for SEO meta tags in views."""
@pytest.fixture
def game_with_campaign(self) -> dict[str, Any]:
"""Create a game with campaign for testing.
Returns:
dict[str, Any]: A dictionary containing the created organization, game, and campaign.
"""
org: Organization = Organization.objects.create(twitch_id="org1", name="Test Org")
game: Game = Game.objects.create(
twitch_id="game1",
name="test_game",
display_name="Test Game",
box_art="https://example.com/box_art.jpg",
)
game.owners.add(org)
campaign: DropCampaign = DropCampaign.objects.create(
twitch_id="camp1",
name="Test Campaign",
description="Campaign description",
game=game,
image_url="https://example.com/campaign.jpg",
operation_names=["DropCampaignDetails"],
)
return {"org": org, "game": game, "campaign": campaign}
def test_campaign_list_view_has_seo_context(self, client: Client) -> None:
"""Test campaign list view has SEO context variables."""
response: _MonkeyPatchedWSGIResponse = client.get(reverse("twitch:campaign_list"))
assert response.status_code == 200
assert "page_title" in response.context
assert "page_description" in response.context
def test_campaign_detail_view_has_breadcrumb(
self,
client: Client,
game_with_campaign: dict[str, Any],
) -> None:
"""Test campaign detail view has breadcrumb schema."""
campaign: DropCampaign = game_with_campaign["campaign"]
url = reverse("twitch:campaign_detail", args=[campaign.twitch_id])
response: _MonkeyPatchedWSGIResponse = client.get(url)
assert response.status_code == 200
assert "breadcrumb_schema" in response.context
# breadcrumb_schema is JSON-dumped in context
breadcrumb_str = response.context["breadcrumb_schema"]
breadcrumb = json.loads(breadcrumb_str)
assert breadcrumb["@type"] == "BreadcrumbList"
assert len(breadcrumb["itemListElement"]) >= 3
def test_campaign_detail_view_has_modified_date(
self,
client: Client,
game_with_campaign: dict[str, Any],
) -> None:
"""Test campaign detail view has modified_date."""
campaign: DropCampaign = game_with_campaign["campaign"]
url = reverse("twitch:campaign_detail", args=[campaign.twitch_id])
response: _MonkeyPatchedWSGIResponse = client.get(url)
assert response.status_code == 200
assert "modified_date" in response.context
assert response.context["modified_date"] is not None
def test_game_detail_view_has_seo_context(
self,
client: Client,
game_with_campaign: dict[str, Any],
) -> None:
"""Test game detail view has full SEO context."""
game: Game = game_with_campaign["game"]
url: str = reverse("twitch:game_detail", args=[game.twitch_id])
response: _MonkeyPatchedWSGIResponse = client.get(url)
assert response.status_code == 200
assert "page_title" in response.context
assert "page_description" in response.context
assert "breadcrumb_schema" in response.context
assert "modified_date" in response.context
def test_organization_detail_view_has_breadcrumb(self, client: Client) -> None:
"""Test organization detail view has breadcrumb."""
org: Organization = Organization.objects.create(twitch_id="org1", name="Test Org")
url: str = reverse("twitch:organization_detail", args=[org.twitch_id])
response: _MonkeyPatchedWSGIResponse = client.get(url)
assert response.status_code == 200
assert "breadcrumb_schema" in response.context
def test_channel_detail_view_has_breadcrumb(self, client: Client) -> None:
"""Test channel detail view has breadcrumb."""
channel: Channel = Channel.objects.create(twitch_id="ch1", name="ch1", display_name="Channel 1")
url: str = reverse("twitch:channel_detail", args=[channel.twitch_id])
response: _MonkeyPatchedWSGIResponse = client.get(url)
assert response.status_code == 200
assert "breadcrumb_schema" in response.context
def test_noindex_pages_have_robots_directive(self, client: Client) -> None:
"""Test that pages with noindex have proper robots directive."""
response: _MonkeyPatchedWSGIResponse = client.get(reverse("twitch:dataset_backups"))
assert response.status_code == 200
assert "robots_directive" in response.context
assert "noindex" in response.context["robots_directive"]
@pytest.mark.django_db
class TestSitemapView:
"""Tests for the sitemap.xml view."""
@pytest.fixture
def sample_entities(self) -> dict[str, Any]:
"""Create sample entities for sitemap testing.
Returns:
dict[str, Any]: A dictionary containing the created organization, game, channel, campaign, and badge set.
"""
org: Organization = Organization.objects.create(twitch_id="org1", name="Test Org")
game: Game = Game.objects.create(
twitch_id="game1",
name="test_game",
display_name="Test Game",
)
game.owners.add(org)
channel: Channel = Channel.objects.create(twitch_id="ch1", name="ch1", display_name="Channel 1")
campaign: DropCampaign = DropCampaign.objects.create(
twitch_id="camp1",
name="Test Campaign",
description="Desc",
game=game,
operation_names=["DropCampaignDetails"],
)
badge: ChatBadgeSet = ChatBadgeSet.objects.create(set_id="badge1")
return {
"org": org,
"game": game,
"channel": channel,
"campaign": campaign,
"badge": badge,
}
def test_sitemap_view_returns_xml(self, client: Client, sample_entities: dict[str, Any]) -> None:
"""Test sitemap view returns XML content."""
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
assert response.status_code == 200
assert response["Content-Type"] == "application/xml"
def test_sitemap_contains_xml_declaration(self, client: Client, sample_entities: dict[str, Any]) -> None:
"""Test sitemap contains proper XML declaration."""
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
content = response.content.decode()
assert content.startswith('<?xml version="1.0" encoding="UTF-8"?>')
def test_sitemap_contains_urlset(self, client: Client, sample_entities: dict[str, Any]) -> None:
"""Test sitemap contains urlset element."""
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
content: str = response.content.decode()
assert "<urlset" in content
assert "</urlset>" in content
def test_sitemap_contains_static_pages(self, client: Client, sample_entities: dict[str, Any]) -> None:
"""Test sitemap includes static pages."""
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
content: str = response.content.decode()
# Check for some static pages
assert "<loc>http://testserver/</loc>" in content or "<loc>http://localhost:8000/</loc>" in content
assert "/campaigns/" in content
assert "/games/" in content
def test_sitemap_contains_game_detail_pages(
self,
client: Client,
sample_entities: dict[str, Any],
) -> None:
"""Test sitemap includes game detail pages."""
game: Game = sample_entities["game"]
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
content: str = response.content.decode()
assert f"/games/{game.twitch_id}/" in content
def test_sitemap_contains_campaign_detail_pages(
self,
client: Client,
sample_entities: dict[str, Any],
) -> None:
"""Test sitemap includes campaign detail pages."""
campaign: DropCampaign = sample_entities["campaign"]
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
content: str = response.content.decode()
assert f"/campaigns/{campaign.twitch_id}/" in content
def test_sitemap_contains_organization_detail_pages(
self,
client: Client,
sample_entities: dict[str, Any],
) -> None:
"""Test sitemap includes organization detail pages."""
org: Organization = sample_entities["org"]
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
content: str = response.content.decode()
assert f"/organizations/{org.twitch_id}/" in content
def test_sitemap_contains_channel_detail_pages(
self,
client: Client,
sample_entities: dict[str, Any],
) -> None:
"""Test sitemap includes channel detail pages."""
channel: Channel = sample_entities["channel"]
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
content: str = response.content.decode()
assert f"/channels/{channel.twitch_id}/" in content
def test_sitemap_contains_badge_detail_pages(
self,
client: Client,
sample_entities: dict[str, Any],
) -> None:
"""Test sitemap includes badge detail pages."""
badge: ChatBadge = sample_entities["badge"]
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
content: str = response.content.decode()
assert f"/badges/{badge.set_id}/" in content # pyright: ignore[reportAttributeAccessIssue]
def test_sitemap_includes_priority(self, client: Client, sample_entities: dict[str, Any]) -> None:
"""Test sitemap includes priority values."""
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
content: str = response.content.decode()
assert "<priority>" in content
assert "</priority>" in content
def test_sitemap_includes_changefreq(self, client: Client, sample_entities: dict[str, Any]) -> None:
"""Test sitemap includes changefreq values."""
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
content: str = response.content.decode()
assert "<changefreq>" in content
assert "</changefreq>" in content
def test_sitemap_includes_lastmod(self, client: Client, sample_entities: dict[str, Any]) -> None:
"""Test sitemap includes lastmod for detail pages."""
response: _MonkeyPatchedWSGIResponse = client.get("/sitemap.xml")
content: str = response.content.decode()
# Check for lastmod in game or campaign entries
assert "<lastmod>" in content
@pytest.mark.django_db
class TestRobotsTxtView:
"""Tests for the robots.txt view."""
def test_robots_txt_returns_text(self, client: Client) -> None:
"""Test robots.txt view returns text content."""
response: _MonkeyPatchedWSGIResponse = client.get("/robots.txt")
assert response.status_code == 200
assert response["Content-Type"] in {"text/plain", "text/plain; charset=utf-8"}
def test_robots_txt_user_agent(self, client: Client) -> None:
"""Test robots.txt contains user-agent."""
response: _MonkeyPatchedWSGIResponse = client.get("/robots.txt")
content: str = response.content.decode()
assert "User-agent: *" in content
def test_robots_txt_allow_root(self, client: Client) -> None:
"""Test robots.txt allows root path."""
response: _MonkeyPatchedWSGIResponse = client.get("/robots.txt")
content: str = response.content.decode()
assert "Allow: /" in content
def test_robots_txt_disallow_admin(self, client: Client) -> None:
"""Test robots.txt disallows /admin/."""
response: _MonkeyPatchedWSGIResponse = client.get("/robots.txt")
content: str = response.content.decode()
assert "Disallow: /admin/" in content
def test_robots_txt_disallow_debug(self, client: Client) -> None:
"""Test robots.txt disallows /debug/."""
response: _MonkeyPatchedWSGIResponse = client.get("/robots.txt")
content: str = response.content.decode()
assert "Disallow: /debug/" in content
def test_robots_txt_disallow_datasets(self, client: Client) -> None:
"""Test robots.txt disallows /datasets/."""
response: _MonkeyPatchedWSGIResponse = client.get("/robots.txt")
content: str = response.content.decode()
assert "Disallow: /datasets/" in content
def test_robots_txt_sitemap_reference(self, client: Client) -> None:
"""Test robots.txt references sitemap."""
response: _MonkeyPatchedWSGIResponse = client.get("/robots.txt")
content: str = response.content.decode()
assert "Sitemap:" in content
assert "/sitemap.xml" in content
def test_robots_txt_disallow_export(self, client: Client) -> None:
"""Test robots.txt disallows /export/."""
response: _MonkeyPatchedWSGIResponse = client.get("/robots.txt")
content: str = response.content.decode()
assert "Disallow: /export/" in content
@pytest.mark.django_db
class TestSEOPaginationLinks:
"""Tests for SEO pagination links in views."""
def test_campaign_list_first_page_has_next(self, client: Client) -> None:
"""Test campaign list first page has next link."""
# Create a game and multiple campaigns to trigger pagination
org: Organization = Organization.objects.create(twitch_id="org1", name="Test Org")
game = Game.objects.create(
twitch_id="game1",
name="test_game",
display_name="Test Game",
)
game.owners.add(org)
for i in range(25):
DropCampaign.objects.create(
twitch_id=f"camp{i}",
name=f"Campaign {i}",
description="Desc",
game=game,
operation_names=["DropCampaignDetails"],
)
response = client.get(reverse("twitch:campaign_list"))
assert response.status_code == 200
if response.context.get("page_obj") and response.context["page_obj"].has_next():
assert "pagination_info" in response.context
def test_campaign_list_pagination_info_structure(self, client: Client) -> None:
"""Test pagination_info has correct structure."""
# Create a game and multiple campaigns to trigger pagination
org = Organization.objects.create(twitch_id="org1", name="Test Org")
game = Game.objects.create(
twitch_id="game1",
name="test_game",
display_name="Test Game",
)
game.owners.add(org)
for i in range(25):
DropCampaign.objects.create(
twitch_id=f"camp{i}",
name=f"Campaign {i}",
description="Desc",
game=game,
operation_names=["DropCampaignDetails"],
)
response = client.get(reverse("twitch:campaign_list"))
assert response.status_code == 200
if "pagination_info" in response.context:
pagination_info = response.context["pagination_info"]
# Should be a dict with rel and url
assert isinstance(pagination_info, dict)
assert "rel" in pagination_info or pagination_info is None

View file

@ -25,6 +25,7 @@ from django.db.models import Prefetch
from django.db.models import Q
from django.db.models import Subquery
from django.db.models.functions import Trim
from django.db.models.query import QuerySet
from django.http import FileResponse
from django.http import Http404
from django.http import HttpRequest
@ -68,6 +69,142 @@ logger: logging.Logger = logging.getLogger("ttvdrops.views")
MIN_QUERY_LENGTH_FOR_FTS = 3
MIN_SEARCH_RANK = 0.05
DEFAULT_SITE_DESCRIPTION = "Twitch Drops Tracker - Track your Twitch drops and campaigns easily."
def _truncate_description(text: str, max_length: int = 160) -> str:
"""Truncate text to a reasonable description length (for meta tags).
Args:
text: The text to truncate.
max_length: Maximum length for the description.
Returns:
Truncated text with ellipsis if needed.
"""
if not text:
return ""
text = text.strip()
if len(text) <= max_length:
return text
return text[:max_length].rsplit(" ", 1)[0] + ""
def _build_seo_context( # noqa: PLR0913, PLR0917
page_title: str = "ttvdrops",
page_description: str | None = None,
page_image: str | None = None,
og_type: str = "website",
schema_data: dict[str, Any] | None = None,
breadcrumb_schema: dict[str, Any] | None = None,
pagination_info: dict[str, str] | None = None,
published_date: str | None = None,
modified_date: str | None = None,
robots_directive: str = "index, follow",
) -> dict[str, Any]:
"""Build SEO context for template rendering.
Args:
page_title: Page title (shown in browser tab, og:title).
page_description: Page description (meta description, og:description).
page_image: Image URL for og:image meta tag.
og_type: OpenGraph type (e.g., "website", "article").
schema_data: Dict representation of Schema.org JSON-LD data.
breadcrumb_schema: Breadcrumb schema dict for navigation hierarchy.
pagination_info: Dict with "rel" (prev|next|first|last) and "url".
published_date: ISO 8601 published date (e.g., "2025-01-01T00:00:00Z").
modified_date: ISO 8601 modified date.
robots_directive: Robots meta content (e.g., "index, follow" or "noindex").
Returns:
Dict with SEO context variables to pass to render().
"""
context: dict[str, Any] = {
"page_title": page_title,
"page_description": page_description or DEFAULT_SITE_DESCRIPTION,
"og_type": og_type,
"robots_directive": robots_directive,
}
if page_image:
context["page_image"] = page_image
if schema_data:
context["schema_data"] = json.dumps(schema_data)
if breadcrumb_schema:
context["breadcrumb_schema"] = json.dumps(breadcrumb_schema)
if pagination_info:
context["pagination_info"] = pagination_info
if published_date:
context["published_date"] = published_date
if modified_date:
context["modified_date"] = modified_date
return context
def _build_breadcrumb_schema(
items: list[dict[str, str | int]],
) -> dict[str, Any]:
"""Build a BreadcrumbList schema for structured data.
Args:
items: List of dicts with "name" and "url" keys.
First item should be homepage.
Returns:
BreadcrumbList schema dict.
"""
breadcrumb_items: list[dict[str, str | int]] = []
for position, item in enumerate(items, start=1):
breadcrumb_items.append({
"@type": "ListItem",
"position": position,
"name": item["name"],
"item": item["url"],
})
return {
"@context": "https://schema.org",
"@type": "BreadcrumbList",
"itemListElement": breadcrumb_items,
}
def _build_pagination_info(
request: HttpRequest,
page_obj: Page,
base_url: str,
) -> dict[str, str] | None:
"""Build pagination link info for rel="next"/"prev" tags.
Args:
request: HTTP request to build absolute URLs.
page_obj: Django Page object from paginator.
base_url: Base URL for pagination (e.g., "/campaigns/?status=active").
Returns:
Dict with rel and url, or None if no prev/next.
"""
pagination_info: dict[str, str] | None = None
if page_obj.has_next():
next_url: str = f"{base_url}?page={page_obj.next_page_number()}"
if "?" in base_url:
# Preserve existing query params
next_url = f"{base_url}&page={page_obj.next_page_number()}"
pagination_info = {
"rel": "next",
"url": request.build_absolute_uri(next_url),
}
if page_obj.has_previous():
prev_url: str = f"{base_url}?page={page_obj.previous_page_number()}"
if "?" in base_url:
prev_url = f"{base_url}&page={page_obj.previous_page_number()}"
pagination_info = {
"rel": "prev",
"url": request.build_absolute_uri(prev_url),
}
return pagination_info
def emote_gallery_view(request: HttpRequest) -> HttpResponse:
@ -102,7 +239,14 @@ def emote_gallery_view(request: HttpRequest) -> HttpResponse:
"campaign": drop.campaign,
})
context: dict[str, list[dict[str, Any]]] = {"emotes": emotes}
seo_context: dict[str, Any] = _build_seo_context(
page_title="Twitch Emotes Gallery",
page_description="Browse all Twitch drop emotes and find the campaigns that award them.",
)
context: dict[str, Any] = {
"emotes": emotes,
**seo_context,
}
return render(request, "twitch/emote_gallery.html", context)
@ -161,10 +305,16 @@ def search_view(request: HttpRequest) -> HttpResponse:
Q(title__icontains=query) | Q(description__icontains=query),
).select_related("badge_set")
seo_context: dict[str, Any] = _build_seo_context(
page_title=f"Search Results for '{query}'" if query else "Search",
page_description=f"Search results for '{query}' across Twitch drops, campaigns, games, and more."
if query
else "Search for Twitch drops, campaigns, games, channels, and organizations.",
)
return render(
request,
"twitch/search_results.html",
{"query": query, "results": results},
{"query": query, "results": results, **seo_context},
)
@ -193,9 +343,24 @@ def org_list_view(request: HttpRequest) -> HttpResponse:
)
orgs_data: list[dict] = json.loads(serialized_orgs)
# CollectionPage schema for organizations list
collection_schema: dict[str, str] = {
"@context": "https://schema.org",
"@type": "CollectionPage",
"name": "Twitch Organizations",
"description": "Browse all Twitch organizations that offer drop campaigns and rewards.",
"url": request.build_absolute_uri("/organizations/"),
}
seo_context: dict[str, Any] = _build_seo_context(
page_title="Twitch Organizations",
page_description="Browse all Twitch organizations that offer drop campaigns and rewards.",
schema_data=collection_schema,
)
context: dict[str, Any] = {
"orgs": orgs,
"orgs_data": format_and_color_json(orgs_data),
**seo_context,
}
return render(request, "twitch/org_list.html", context)
@ -252,17 +417,47 @@ def organization_detail_view(request: HttpRequest, twitch_id: str) -> HttpRespon
games_data: list[dict] = json.loads(serialized_games)
org_data[0]["fields"]["games"] = games_data
org_name: str = organization.name or organization.twitch_id
games_count: int = games.count()
org_description: str = f"{org_name} offers {games_count} game(s) with Twitch drop campaigns and rewards."
org_schema: dict[str, str | dict[str, str]] = {
"@context": "https://schema.org",
"@type": "Organization",
"name": org_name,
"url": request.build_absolute_uri(reverse("twitch:organization_detail", args=[organization.twitch_id])),
"description": org_description,
}
# Breadcrumb schema
breadcrumb_schema: dict[str, Any] = _build_breadcrumb_schema([
{"name": "Home", "url": request.build_absolute_uri("/")},
{"name": "Organizations", "url": request.build_absolute_uri("/organizations/")},
{
"name": org_name,
"url": request.build_absolute_uri(reverse("twitch:organization_detail", args=[organization.twitch_id])),
},
])
seo_context: dict[str, Any] = _build_seo_context(
page_title=org_name,
page_description=org_description,
schema_data=org_schema,
breadcrumb_schema=breadcrumb_schema,
modified_date=organization.updated_at.isoformat() if organization.updated_at else None,
)
context: dict[str, Any] = {
"organization": organization,
"games": games,
"org_data": format_and_color_json(org_data[0]),
**seo_context,
}
return render(request, "twitch/organization_detail.html", context)
# MARK: /campaigns/
def drop_campaign_list_view(request: HttpRequest) -> HttpResponse:
def drop_campaign_list_view(request: HttpRequest) -> HttpResponse: # noqa: PLR0914, PLR0915
"""Function-based view for drop campaigns list.
Args:
@ -299,6 +494,50 @@ def drop_campaign_list_view(request: HttpRequest) -> HttpResponse:
except EmptyPage:
campaigns = paginator.page(paginator.num_pages)
title = "Twitch Drop Campaigns"
if status_filter:
title += f" ({status_filter.capitalize()})"
if game_filter:
try:
game: Game = Game.objects.get(twitch_id=game_filter)
title += f" - {game.display_name}"
except Game.DoesNotExist:
pass
description = "Browse all Twitch drop campaigns with active drops, upcoming campaigns, and rewards."
if status_filter == "active":
description = "Browse currently active Twitch drop campaigns with rewards available now."
elif status_filter == "upcoming":
description = "View upcoming Twitch drop campaigns starting soon."
elif status_filter == "expired":
description = "Browse expired Twitch drop campaigns."
# Build base URL for pagination
base_url = "/campaigns/"
if status_filter:
base_url += f"?status={status_filter}"
if game_filter:
base_url += f"&game={game_filter}"
elif game_filter:
base_url += f"?game={game_filter}"
pagination_info: dict[str, str] | None = _build_pagination_info(request, campaigns, base_url)
# CollectionPage schema for campaign list
collection_schema: dict[str, str] = {
"@context": "https://schema.org",
"@type": "CollectionPage",
"name": title,
"description": description,
"url": request.build_absolute_uri(base_url),
}
seo_context: dict[str, Any] = _build_seo_context(
page_title=title,
page_description=description,
pagination_info=pagination_info,
schema_data=collection_schema,
)
context: dict[str, Any] = {
"campaigns": campaigns,
"page_obj": campaigns,
@ -309,6 +548,7 @@ def drop_campaign_list_view(request: HttpRequest) -> HttpResponse:
"selected_game": game_filter or "",
"selected_per_page": per_page,
"selected_status": status_filter or "",
**seo_context,
}
return render(request, "twitch/campaign_list.html", context)
@ -377,10 +617,16 @@ def dataset_backups_view(request: HttpRequest) -> HttpResponse:
datasets.sort(key=operator.itemgetter("updated_at"), reverse=True)
seo_context: dict[str, Any] = _build_seo_context(
page_title="Database Backups - TTVDrops",
page_description="Download database backups and datasets containing Twitch drops, campaigns, and related data.",
robots_directive="noindex, follow",
)
context: dict[str, Any] = {
"datasets": datasets,
"data_dir": str(datasets_root),
"dataset_count": len(datasets),
**seo_context,
}
return render(request, "twitch/dataset_backups.html", context)
@ -462,7 +708,7 @@ def _enhance_drops_with_context(drops: QuerySet[TimeBasedDrop], now: datetime.da
# MARK: /campaigns/<twitch_id>/
def drop_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpResponse: # noqa: PLR0914
def drop_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpResponse: # noqa: PLR0914, PLR0915
"""Function-based view for a drop campaign detail.
Args:
@ -498,7 +744,7 @@ def drop_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRespo
.order_by("required_minutes_watched")
)
serialized_campaign = serialize(
serialized_campaign: str = serialize(
"json",
[campaign],
fields=(
@ -517,7 +763,7 @@ def drop_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRespo
"updated_at",
),
)
campaign_data = json.loads(serialized_campaign)
campaign_data: list[dict[str, Any]] = json.loads(serialized_campaign)
if drops.exists():
badge_benefit_names: set[str] = {
@ -549,7 +795,7 @@ def drop_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRespo
for i, drop in enumerate(drops):
drop_benefits: list[DropBenefit] = list(drop.benefits.all())
if drop_benefits:
serialized_benefits = serialize(
serialized_benefits: str = serialize(
"json",
drop_benefits,
fields=(
@ -564,7 +810,7 @@ def drop_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRespo
"distribution_type",
),
)
benefits_data = json.loads(serialized_benefits)
benefits_data: list[dict[str, Any]] = json.loads(serialized_benefits)
for benefit_data in benefits_data:
fields: dict[str, Any] = benefit_data.get("fields", {})
@ -604,6 +850,66 @@ def drop_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRespo
"allowed_channels": getattr(campaign, "channels_ordered", []),
}
campaign_name: str = campaign.name or campaign.clean_name or campaign.twitch_id
campaign_description: str = (
_truncate_description(campaign.description)
if campaign.description
else f"Twitch drop campaign: {campaign_name}"
)
campaign_image: str | None = campaign.image_url
campaign_schema: dict[str, str | dict[str, str]] = {
"@context": "https://schema.org",
"@type": "Event",
"name": campaign_name,
"description": campaign_description,
"url": request.build_absolute_uri(reverse("twitch:campaign_detail", args=[campaign.twitch_id])),
"eventStatus": "https://schema.org/EventScheduled",
"eventAttendanceMode": "https://schema.org/OnlineEventAttendanceMode",
"location": {
"@type": "VirtualLocation",
"url": "https://www.twitch.tv",
},
}
if campaign.start_at:
campaign_schema["startDate"] = campaign.start_at.isoformat()
if campaign.end_at:
campaign_schema["endDate"] = campaign.end_at.isoformat()
if campaign_image:
campaign_schema["image"] = campaign_image
if campaign.game and campaign.game.owners.exists():
owner: Organization | None = campaign.game.owners.first()
if owner:
campaign_schema["organizer"] = {
"@type": "Organization",
"name": owner.name or owner.twitch_id,
}
# Breadcrumb schema for navigation
game_name: str = campaign.game.display_name or campaign.game.name or campaign.game.twitch_id
breadcrumb_schema: dict[str, Any] = _build_breadcrumb_schema([
{"name": "Home", "url": request.build_absolute_uri("/")},
{"name": "Games", "url": request.build_absolute_uri("/games/")},
{
"name": game_name,
"url": request.build_absolute_uri(reverse("twitch:game_detail", args=[campaign.game.twitch_id])),
},
{
"name": campaign_name,
"url": request.build_absolute_uri(reverse("twitch:campaign_detail", args=[campaign.twitch_id])),
},
])
seo_context: dict[str, Any] = _build_seo_context(
page_title=campaign_name,
page_description=campaign_description,
page_image=campaign_image,
schema_data=campaign_schema,
breadcrumb_schema=breadcrumb_schema,
modified_date=campaign.updated_at.isoformat() if campaign.updated_at else None,
)
context.update(seo_context)
return render(request, "twitch/campaign_detail.html", context)
@ -681,6 +987,22 @@ class GamesGridView(ListView):
sorted(games_by_org.items(), key=lambda item: item[0].name),
)
# CollectionPage schema for games list
collection_schema: dict[str, str] = {
"@context": "https://schema.org",
"@type": "CollectionPage",
"name": "Twitch Drop Games",
"description": "Browse all Twitch games with active drop campaigns and rewards.",
"url": self.request.build_absolute_uri("/games/"),
}
seo_context: dict[str, Any] = _build_seo_context(
page_title="Twitch Drop Games",
page_description="Browse all Twitch games with active drop campaigns and rewards.",
schema_data=collection_schema,
)
context.update(seo_context)
return context
@ -709,16 +1031,16 @@ class GameDetailView(DetailView):
queryset = self.get_queryset()
# Use twitch_id as the lookup field since it's the primary key
twitch_id = self.kwargs.get("twitch_id")
twitch_id: str | None = self.kwargs.get("twitch_id")
try:
game = queryset.get(twitch_id=twitch_id)
game: Game = queryset.get(twitch_id=twitch_id)
except Game.DoesNotExist as exc:
msg = "No game found matching the query"
raise Http404(msg) from exc
return game
def get_context_data(self, **kwargs: object) -> dict[str, Any]:
def get_context_data(self, **kwargs: object) -> dict[str, Any]: # noqa: PLR0914
"""Add additional context data.
Args:
@ -832,6 +1154,45 @@ class GameDetailView(DetailView):
owners: list[Organization] = list(game.owners.all())
game_name: str = game.display_name or game.name or game.twitch_id
game_description: str = (
f"Twitch drop campaigns for {game_name}. View active, upcoming, and completed drop rewards."
)
game_image: str | None = game.box_art
game_schema: dict[str, Any] = {
"@context": "https://schema.org",
"@type": "VideoGame",
"name": game_name,
"description": game_description,
"url": self.request.build_absolute_uri(reverse("twitch:game_detail", args=[game.twitch_id])),
}
if game.box_art:
game_schema["image"] = game.box_art
if owners:
game_schema["publisher"] = {
"@type": "Organization",
"name": owners[0].name or owners[0].twitch_id,
}
# Breadcrumb schema
breadcrumb_schema: dict[str, Any] = _build_breadcrumb_schema([
{"name": "Home", "url": self.request.build_absolute_uri("/")},
{"name": "Games", "url": self.request.build_absolute_uri("/games/")},
{
"name": game_name,
"url": self.request.build_absolute_uri(reverse("twitch:game_detail", args=[game.twitch_id])),
},
])
seo_context: dict[str, Any] = _build_seo_context(
page_title=game_name,
page_description=game_description,
page_image=game_image,
schema_data=game_schema,
breadcrumb_schema=breadcrumb_schema,
modified_date=game.updated_at.isoformat() if game.updated_at else None,
)
context.update(
{
"active_campaigns": active_campaigns,
@ -842,6 +1203,7 @@ class GameDetailView(DetailView):
"drop_awarded_badges": drop_awarded_badges,
"now": now,
"game_data": format_and_color_json(game_data[0]),
**seo_context,
},
)
@ -903,6 +1265,28 @@ def dashboard(request: HttpRequest) -> HttpResponse:
.order_by("-starts_at")
)
# WebSite schema with SearchAction for sitelinks search box
website_schema: dict[str, str | dict[str, str | dict[str, str]]] = {
"@context": "https://schema.org",
"@type": "WebSite",
"name": "ttvdrops",
"url": request.build_absolute_uri("/"),
"potentialAction": {
"@type": "SearchAction",
"target": {
"@type": "EntryPoint",
"urlTemplate": request.build_absolute_uri("/search/?q={search_term_string}"),
},
"query-input": "required name=search_term_string",
},
}
seo_context: dict[str, Any] = _build_seo_context(
page_title="ttvdrops Dashboard",
page_description="Dashboard showing active Twitch drop campaigns, rewards, and quests. Track all current drops and campaigns.", # noqa: E501
og_type="website",
schema_data=website_schema,
)
return render(
request,
"twitch/dashboard.html",
@ -911,6 +1295,7 @@ def dashboard(request: HttpRequest) -> HttpResponse:
"campaigns_by_game": campaigns_by_game,
"active_reward_campaigns": active_reward_campaigns,
"now": now,
**seo_context,
},
)
@ -936,7 +1321,7 @@ def reward_campaign_list_view(request: HttpRequest) -> HttpResponse:
queryset = queryset.select_related("game").order_by("-starts_at")
# Optionally filter by status (active, upcoming, expired)
now = timezone.now()
now: datetime.datetime = timezone.now()
if status_filter == "active":
queryset = queryset.filter(starts_at__lte=now, ends_at__gte=now)
elif status_filter == "upcoming":
@ -953,6 +1338,44 @@ def reward_campaign_list_view(request: HttpRequest) -> HttpResponse:
except EmptyPage:
reward_campaigns = paginator.page(paginator.num_pages)
title = "Twitch Reward Campaigns"
if status_filter:
title += f" ({status_filter.capitalize()})"
description = "Browse all Twitch reward campaigns with active quests and rewards."
if status_filter == "active":
description = "Browse currently active Twitch reward campaigns with quests and rewards available now."
elif status_filter == "upcoming":
description = "View upcoming Twitch reward campaigns starting soon."
elif status_filter == "expired":
description = "Browse expired Twitch reward campaigns."
# Build base URL for pagination
base_url = "/reward-campaigns/"
if status_filter:
base_url += f"?status={status_filter}"
if game_filter:
base_url += f"&game={game_filter}"
elif game_filter:
base_url += f"?game={game_filter}"
pagination_info: dict[str, str] | None = _build_pagination_info(request, reward_campaigns, base_url)
# CollectionPage schema for reward campaigns list
collection_schema: dict[str, str | dict[str, str | dict[str, str]]] = {
"@context": "https://schema.org",
"@type": "CollectionPage",
"name": title,
"description": description,
"url": request.build_absolute_uri(base_url),
}
seo_context: dict[str, Any] = _build_seo_context(
page_title=title,
page_description=description,
pagination_info=pagination_info,
schema_data=collection_schema,
)
context: dict[str, Any] = {
"reward_campaigns": reward_campaigns,
"games": Game.objects.all().order_by("display_name"),
@ -961,6 +1384,7 @@ def reward_campaign_list_view(request: HttpRequest) -> HttpResponse:
"selected_game": game_filter or "",
"selected_per_page": per_page,
"selected_status": status_filter or "",
**seo_context,
}
return render(request, "twitch/reward_campaign_list.html", context)
@ -987,7 +1411,7 @@ def reward_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRes
msg = "No reward campaign found matching the query"
raise Http404(msg) from exc
serialized_campaign = serialize(
serialized_campaign: str = serialize(
"json",
[reward_campaign],
fields=(
@ -1011,11 +1435,62 @@ def reward_campaign_detail_view(request: HttpRequest, twitch_id: str) -> HttpRes
now: datetime.datetime = timezone.now()
campaign_name: str = reward_campaign.name or reward_campaign.twitch_id
campaign_description: str = (
_truncate_description(reward_campaign.summary)
if reward_campaign.summary
else f"Twitch reward campaign: {campaign_name}"
)
campaign_schema: dict[str, str | dict[str, str]] = {
"@context": "https://schema.org",
"@type": "Event",
"name": campaign_name,
"description": campaign_description,
"url": request.build_absolute_uri(reverse("twitch:reward_campaign_detail", args=[reward_campaign.twitch_id])),
"eventStatus": "https://schema.org/EventScheduled",
"eventAttendanceMode": "https://schema.org/OnlineEventAttendanceMode",
"location": {
"@type": "VirtualLocation",
"url": "https://www.twitch.tv",
},
}
if reward_campaign.starts_at:
campaign_schema["startDate"] = reward_campaign.starts_at.isoformat()
if reward_campaign.ends_at:
campaign_schema["endDate"] = reward_campaign.ends_at.isoformat()
if reward_campaign.game and reward_campaign.game.owners.exists():
owner = reward_campaign.game.owners.first()
campaign_schema["organizer"] = {
"@type": "Organization",
"name": owner.name or owner.twitch_id,
}
# Breadcrumb schema
breadcrumb_schema: dict[str, Any] = _build_breadcrumb_schema([
{"name": "Home", "url": request.build_absolute_uri("/")},
{"name": "Reward Campaigns", "url": request.build_absolute_uri("/reward-campaigns/")},
{
"name": campaign_name,
"url": request.build_absolute_uri(
reverse("twitch:reward_campaign_detail", args=[reward_campaign.twitch_id]),
),
},
])
seo_context: dict[str, Any] = _build_seo_context(
page_title=campaign_name,
page_description=campaign_description,
schema_data=campaign_schema,
breadcrumb_schema=breadcrumb_schema,
modified_date=reward_campaign.updated_at.isoformat() if reward_campaign.updated_at else None,
)
context: dict[str, Any] = {
"reward_campaign": reward_campaign,
"now": now,
"campaign_data": format_and_color_json(campaign_data[0]),
"is_active": reward_campaign.is_active,
**seo_context,
}
return render(request, "twitch/reward_campaign_detail.html", context)
@ -1061,7 +1536,7 @@ def debug_view(request: HttpRequest) -> HttpResponse:
# Duplicate campaign names per game.
# We retrieve the game's name for user-friendly display.
duplicate_name_campaigns = (
duplicate_name_campaigns: QuerySet[DropCampaign, dict[str, Any]] = (
DropCampaign.objects
.values("game__display_name", "name", "game__twitch_id")
.annotate(name_count=Count("twitch_id"))
@ -1113,6 +1588,13 @@ def debug_view(request: HttpRequest) -> HttpResponse:
"campaigns_missing_dropcampaigndetails": campaigns_missing_dropcampaigndetails,
}
seo_context: dict[str, Any] = _build_seo_context(
page_title="Debug - TTVDrops",
page_description="Debug page showing data inconsistencies and potential issues in the TTVDrops database.",
robots_directive="noindex, nofollow",
)
context.update(seo_context)
return render(
request,
"twitch/debug.html",
@ -1124,7 +1606,7 @@ def debug_view(request: HttpRequest) -> HttpResponse:
class GamesListView(GamesGridView):
"""List view for games in simple list format."""
template_name = "twitch/games_list.html"
template_name: str = "twitch/games_list.html"
# MARK: /docs/rss/
@ -1147,15 +1629,15 @@ def docs_rss_view(request: HttpRequest) -> HttpResponse:
def _pretty_example(xml_str: str, max_items: int = 1) -> str:
try:
trimmed = xml_str.strip()
first_item = trimmed.find("<item")
trimmed: str = xml_str.strip()
first_item: int = trimmed.find("<item")
if first_item != -1 and max_items == 1:
second_item = trimmed.find("<item", first_item + 5)
second_item: int = trimmed.find("<item", first_item + 5)
if second_item != -1:
end_channel = trimmed.find("</channel>", second_item)
end_channel: int = trimmed.find("</channel>", second_item)
if end_channel != -1:
trimmed = trimmed[:second_item] + trimmed[end_channel:]
formatted = trimmed.replace("><", ">\n<")
formatted: str = trimmed.replace("><", ">\n<")
return "\n".join(line for line in formatted.splitlines() if line.strip())
except Exception: # pragma: no cover - defensive formatting for docs only
logger.exception("Failed to pretty-print RSS example")
@ -1232,6 +1714,11 @@ def docs_rss_view(request: HttpRequest) -> HttpResponse:
},
]
seo_context: dict[str, Any] = _build_seo_context(
page_title="RSS Feeds - TTVDrops",
page_description="Available RSS feeds for Twitch drops, campaigns, games, organizations, and rewards.",
robots_directive="noindex, follow",
)
return render(
request,
"twitch/docs_rss.html",
@ -1240,6 +1727,7 @@ def docs_rss_view(request: HttpRequest) -> HttpResponse:
"filtered_feeds": filtered_feeds,
"sample_game": sample_game,
"sample_org": sample_org,
**seo_context,
},
)
@ -1285,7 +1773,35 @@ class ChannelListView(ListView):
dict: Context data.
"""
context: dict[str, Any] = super().get_context_data(**kwargs)
context["search_query"] = self.request.GET.get("search", "")
search_query: str = self.request.GET.get("search", "")
# Build pagination info
base_url = "/channels/"
if search_query:
base_url += f"?search={search_query}"
page_obj: Page | None = context.get("page_obj")
pagination_info: dict[str, str] | None = (
_build_pagination_info(self.request, page_obj, base_url) if isinstance(page_obj, Page) else None
)
# CollectionPage schema for channels list
collection_schema: dict[str, str | dict[str, str | dict[str, str]]] = {
"@context": "https://schema.org",
"@type": "CollectionPage",
"name": "Twitch Channels",
"description": "Browse Twitch channels participating in drop campaigns and find their available rewards.",
"url": self.request.build_absolute_uri("/channels/"),
}
seo_context: dict[str, Any] = _build_seo_context(
page_title="Twitch Channels",
page_description="Browse Twitch channels participating in drop campaigns and find their available rewards.",
pagination_info=pagination_info,
schema_data=collection_schema,
)
context.update(seo_context)
context["search_query"] = search_query
return context
@ -1313,16 +1829,16 @@ class ChannelDetailView(DetailView):
if queryset is None:
queryset = self.get_queryset()
twitch_id = self.kwargs.get("twitch_id")
twitch_id: str | None = self.kwargs.get("twitch_id")
try:
channel = queryset.get(twitch_id=twitch_id)
channel: Channel = queryset.get(twitch_id=twitch_id)
except Channel.DoesNotExist as exc:
msg = "No channel found matching the query"
raise Http404(msg) from exc
return channel
def get_context_data(self, **kwargs: object) -> dict[str, Any]:
def get_context_data(self, **kwargs: object) -> dict[str, Any]: # noqa: PLR0914
"""Add additional context data.
Args:
@ -1376,7 +1892,7 @@ class ChannelDetailView(DetailView):
campaign for campaign in all_campaigns if campaign.end_at is not None and campaign.end_at < now
]
serialized_channel = serialize(
serialized_channel: str = serialize(
"json",
[channel],
fields=(
@ -1387,10 +1903,10 @@ class ChannelDetailView(DetailView):
"updated_at",
),
)
channel_data = json.loads(serialized_channel)
channel_data: list[dict[str, Any]] = json.loads(serialized_channel)
if all_campaigns.exists():
serialized_campaigns = serialize(
serialized_campaigns: str = serialize(
"json",
all_campaigns,
fields=(
@ -1406,9 +1922,39 @@ class ChannelDetailView(DetailView):
"updated_at",
),
)
campaigns_data = json.loads(serialized_campaigns)
campaigns_data: list[dict[str, Any]] = json.loads(serialized_campaigns)
channel_data[0]["fields"]["campaigns"] = campaigns_data
channel_name: str = channel.display_name or channel.name or channel.twitch_id
channel_description: str = f"Twitch channel {channel_name} participating in drop campaigns. View active, upcoming, and expired campaign rewards." # noqa: E501
channel_schema: dict[str, Any] = {
"@context": "https://schema.org",
"@type": "BroadcastChannel",
"name": channel_name,
"description": channel_description,
"url": self.request.build_absolute_uri(reverse("twitch:channel_detail", args=[channel.twitch_id])),
"broadcastChannelId": channel.twitch_id,
"providerName": "Twitch",
}
# Breadcrumb schema
breadcrumb_schema: dict[str, Any] = _build_breadcrumb_schema([
{"name": "Home", "url": self.request.build_absolute_uri("/")},
{"name": "Channels", "url": self.request.build_absolute_uri("/channels/")},
{
"name": channel_name,
"url": self.request.build_absolute_uri(reverse("twitch:channel_detail", args=[channel.twitch_id])),
},
])
seo_context: dict[str, Any] = _build_seo_context(
page_title=channel_name,
page_description=channel_description,
schema_data=channel_schema,
breadcrumb_schema=breadcrumb_schema,
modified_date=channel.updated_at.isoformat() if channel.updated_at else None,
)
context.update(
{
"active_campaigns": active_campaigns,
@ -1416,6 +1962,7 @@ class ChannelDetailView(DetailView):
"expired_campaigns": expired_campaigns,
"now": now,
"channel_data": format_and_color_json(channel_data[0]),
**seo_context,
},
)
@ -1453,9 +2000,24 @@ def badge_list_view(request: HttpRequest) -> HttpResponse:
for badge_set in badge_sets
]
# CollectionPage schema for badges list
collection_schema: dict[str, str] = {
"@context": "https://schema.org",
"@type": "CollectionPage",
"name": "Twitch Chat Badges",
"description": "Browse all Twitch chat badges awarded through drop campaigns and their associated rewards.",
"url": request.build_absolute_uri("/badges/"),
}
seo_context: dict[str, Any] = _build_seo_context(
page_title="Twitch Chat Badges",
page_description="Browse all Twitch chat badges awarded through drop campaigns and their associated rewards.",
schema_data=collection_schema,
)
context: dict[str, Any] = {
"badge_sets": badge_sets,
"badge_data": badge_data,
**seo_context,
}
return render(request, "twitch/badge_list.html", context)
@ -1500,7 +2062,7 @@ def badge_set_detail_view(request: HttpRequest, set_id: str) -> HttpResponse:
badge.award_campaigns = list(campaigns) # pyright: ignore[reportAttributeAccessIssue]
# Serialize for JSON display
serialized_set = serialize(
serialized_set: str = serialize(
"json",
[badge_set],
fields=(
@ -1512,7 +2074,7 @@ def badge_set_detail_view(request: HttpRequest, set_id: str) -> HttpResponse:
set_data: list[dict[str, Any]] = json.loads(serialized_set)
if badges.exists():
serialized_badges = serialize(
serialized_badges: str = serialize(
"json",
badges,
fields=(
@ -1531,10 +2093,29 @@ def badge_set_detail_view(request: HttpRequest, set_id: str) -> HttpResponse:
badges_data: list[dict[str, Any]] = json.loads(serialized_badges)
set_data[0]["fields"]["badges"] = badges_data
badge_set_name: str = badge_set.set_id
badge_set_description: str = (
f"Twitch chat badge set {badge_set_name} with {badges.count()} badge(s) awarded through drop campaigns."
)
badge_schema: dict[str, Any] = {
"@context": "https://schema.org",
"@type": "ItemList",
"name": badge_set_name,
"description": badge_set_description,
"url": request.build_absolute_uri(reverse("twitch:badge_set_detail", args=[badge_set.set_id])),
}
seo_context: dict[str, Any] = _build_seo_context(
page_title=f"Badge Set: {badge_set_name}",
page_description=badge_set_description,
schema_data=badge_schema,
)
context: dict[str, Any] = {
"badge_set": badge_set,
"badges": badges,
"set_data": format_and_color_json(set_data[0]),
**seo_context,
}
return render(request, "twitch/badge_set_detail.html", context)
@ -1570,7 +2151,7 @@ def export_campaigns_csv(request: HttpRequest) -> HttpResponse:
queryset = queryset.filter(end_at__lt=now)
# Create CSV response
response = HttpResponse(content_type="text/csv")
response: HttpResponse = HttpResponse(content_type="text/csv")
response["Content-Disposition"] = "attachment; filename=campaigns.csv"
writer = csv.writer(response)
@ -1591,18 +2172,18 @@ def export_campaigns_csv(request: HttpRequest) -> HttpResponse:
# Determine campaign status
if campaign.start_at and campaign.end_at:
if campaign.start_at <= now <= campaign.end_at:
status = "Active"
status: str = "Active"
elif campaign.start_at > now:
status = "Upcoming"
status: str = "Upcoming"
else:
status = "Expired"
status: str = "Expired"
else:
status = "Unknown"
status: str = "Unknown"
writer.writerow([
campaign.twitch_id,
campaign.name,
campaign.description[:100] if campaign.description else "", # Truncate for CSV
campaign.description or "",
campaign.game.name if campaign.game else "",
status,
campaign.start_at.isoformat() if campaign.start_at else "",
@ -1635,7 +2216,7 @@ def export_campaigns_json(request: HttpRequest) -> HttpResponse:
queryset = queryset.prefetch_related("game__owners").order_by("-start_at")
now = timezone.now()
now: datetime.datetime = timezone.now()
if status_filter == "active":
queryset = queryset.filter(start_at__lte=now, end_at__gte=now)
elif status_filter == "upcoming":
@ -1649,13 +2230,13 @@ def export_campaigns_json(request: HttpRequest) -> HttpResponse:
# Determine campaign status
if campaign.start_at and campaign.end_at:
if campaign.start_at <= now <= campaign.end_at:
status = "Active"
status: str = "Active"
elif campaign.start_at > now:
status = "Upcoming"
status: str = "Upcoming"
else:
status = "Expired"
status: str = "Expired"
else:
status = "Unknown"
status: str = "Unknown"
campaigns_data.append({
"twitch_id": campaign.twitch_id,
@ -1821,3 +2402,147 @@ def export_organizations_json(request: HttpRequest) -> HttpResponse: # noqa: AR
response["Content-Disposition"] = "attachment; filename=organizations.json"
return response
# MARK: /sitemap.xml
def sitemap_view(request: HttpRequest) -> HttpResponse:
"""Generate a dynamic XML sitemap for search engines.
Args:
request: The HTTP request.
Returns:
HttpResponse: XML sitemap.
"""
base_url: str = f"{request.scheme}://{request.get_host()}"
# Start building sitemap XML
sitemap_urls: list[dict[str, str | dict[str, str]]] = []
# Static pages
sitemap_urls.extend([
{"url": f"{base_url}/", "priority": "1.0", "changefreq": "daily"},
{"url": f"{base_url}/campaigns/", "priority": "0.9", "changefreq": "daily"},
{"url": f"{base_url}/reward-campaigns/", "priority": "0.9", "changefreq": "daily"},
{"url": f"{base_url}/games/", "priority": "0.9", "changefreq": "weekly"},
{"url": f"{base_url}/organizations/", "priority": "0.8", "changefreq": "weekly"},
{"url": f"{base_url}/channels/", "priority": "0.8", "changefreq": "weekly"},
{"url": f"{base_url}/badges/", "priority": "0.7", "changefreq": "monthly"},
{"url": f"{base_url}/emotes/", "priority": "0.7", "changefreq": "monthly"},
{"url": f"{base_url}/search/", "priority": "0.6", "changefreq": "monthly"},
])
# Dynamic detail pages - Games
games: QuerySet[Game] = Game.objects.all()
for game in games:
entry: dict[str, str | dict[str, str]] = {
"url": f"{base_url}{reverse('twitch:game_detail', args=[game.twitch_id])}",
"priority": "0.8",
"changefreq": "weekly",
}
if game.updated_at:
entry["lastmod"] = game.updated_at.isoformat()
sitemap_urls.append(entry)
# Dynamic detail pages - Campaigns
campaigns: QuerySet[DropCampaign] = DropCampaign.objects.all()
for campaign in campaigns:
entry: dict[str, str | dict[str, str]] = {
"url": f"{base_url}{reverse('twitch:campaign_detail', args=[campaign.twitch_id])}",
"priority": "0.7",
"changefreq": "weekly",
}
if campaign.updated_at:
entry["lastmod"] = campaign.updated_at.isoformat()
sitemap_urls.append(entry)
# Dynamic detail pages - Organizations
orgs: QuerySet[Organization] = Organization.objects.all()
for org in orgs:
entry: dict[str, str | dict[str, str]] = {
"url": f"{base_url}{reverse('twitch:organization_detail', args=[org.twitch_id])}",
"priority": "0.7",
"changefreq": "weekly",
}
if org.updated_at:
entry["lastmod"] = org.updated_at.isoformat()
sitemap_urls.append(entry)
# Dynamic detail pages - Channels
channels: QuerySet[Channel] = Channel.objects.all()
for channel in channels:
entry: dict[str, str | dict[str, str]] = {
"url": f"{base_url}{reverse('twitch:channel_detail', args=[channel.twitch_id])}",
"priority": "0.6",
"changefreq": "weekly",
}
if channel.updated_at:
entry["lastmod"] = channel.updated_at.isoformat()
sitemap_urls.append(entry)
# Dynamic detail pages - Badges
badge_sets: QuerySet[ChatBadgeSet] = ChatBadgeSet.objects.all()
sitemap_urls.extend(
{
"url": f"{base_url}{reverse('twitch:badge_set_detail', args=[badge_set.set_id])}",
"priority": "0.5",
"changefreq": "monthly",
}
for badge_set in badge_sets
)
# Dynamic detail pages - Reward Campaigns
reward_campaigns: QuerySet[RewardCampaign] = RewardCampaign.objects.all()
for reward_campaign in reward_campaigns:
entry: dict[str, str | dict[str, str]] = {
"url": f"{base_url}{reverse('twitch:reward_campaign_detail', args=[reward_campaign.twitch_id])}",
"priority": "0.6",
"changefreq": "weekly",
}
if reward_campaign.updated_at:
entry["lastmod"] = reward_campaign.updated_at.isoformat()
sitemap_urls.append(entry)
# Build XML
xml_content = '<?xml version="1.0" encoding="UTF-8"?>\n'
xml_content += '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n'
for url_entry in sitemap_urls:
xml_content += " <url>\n"
xml_content += f" <loc>{url_entry['url']}</loc>\n"
if url_entry.get("lastmod"):
xml_content += f" <lastmod>{url_entry['lastmod']}</lastmod>\n"
xml_content += f" <changefreq>{url_entry.get('changefreq', 'monthly')}</changefreq>\n"
xml_content += f" <priority>{url_entry.get('priority', '0.5')}</priority>\n"
xml_content += " </url>\n"
xml_content += "</urlset>"
return HttpResponse(xml_content, content_type="application/xml")
# MARK: /robots.txt
def robots_txt_view(request: HttpRequest) -> HttpResponse:
"""Generate robots.txt for search engine crawlers.
Args:
request: The HTTP request.
Returns:
HttpResponse: robots.txt content.
"""
base_url: str = f"{request.scheme}://{request.get_host()}"
robots_content: str = f"""User-agent: *
Allow: /
Disallow: /admin/
Disallow: /debug/
Disallow: /datasets/
Disallow: /docs/rss/
Disallow: /export/
# Sitemap location
Sitemap: {base_url}/sitemap.xml
"""
return HttpResponse(robots_content, content_type="text/plain")