Lower line-length to default and don't add from __future__ import annotations to everything

This commit is contained in:
Joakim Hellsén 2026-03-09 04:37:54 +01:00
commit 1118c03c1b
Signed by: Joakim Hellsén
SSH key fingerprint: SHA256:/9h/CsExpFp+PRhsfA0xznFx2CGfTT5R/kpuFfUgEQk
46 changed files with 2338 additions and 1085 deletions

View file

@ -1,7 +1,5 @@
"""Management command to backfill image dimensions for existing cached images."""
from __future__ import annotations
from django.core.management.base import BaseCommand
from twitch.models import DropBenefit

View file

@ -1,5 +1,3 @@
from __future__ import annotations
import io
import os
import shutil
@ -79,7 +77,10 @@ class Command(BaseCommand):
msg = f"Unsupported database backend: {django_connection.vendor}"
raise CommandError(msg)
created_at: datetime = datetime.fromtimestamp(output_path.stat().st_mtime, tz=timezone.get_current_timezone())
created_at: datetime = datetime.fromtimestamp(
output_path.stat().st_mtime,
tz=timezone.get_current_timezone(),
)
self.stdout.write(
self.style.SUCCESS(
f"Backup created: {output_path} (updated {created_at.isoformat()})",
@ -111,7 +112,11 @@ def _get_allowed_tables(prefix: str) -> list[str]:
return [row[0] for row in cursor.fetchall()]
def _write_sqlite_dump(handle: io.TextIOBase, connection: sqlite3.Connection, tables: list[str]) -> None:
def _write_sqlite_dump(
handle: io.TextIOBase,
connection: sqlite3.Connection,
tables: list[str],
) -> None:
"""Write a SQL dump containing schema and data for the requested tables.
Args:
@ -154,7 +159,11 @@ def _get_table_schema(connection: sqlite3.Connection, table: str) -> str:
return row[0] if row and row[0] else ""
def _write_table_rows(handle: io.TextIOBase, connection: sqlite3.Connection, table: str) -> None:
def _write_table_rows(
handle: io.TextIOBase,
connection: sqlite3.Connection,
table: str,
) -> None:
"""Write INSERT statements for a table.
Args:
@ -169,7 +178,11 @@ def _write_table_rows(handle: io.TextIOBase, connection: sqlite3.Connection, tab
handle.write(f'INSERT INTO "{table}" VALUES ({values});\n') # noqa: S608
def _write_indexes(handle: io.TextIOBase, connection: sqlite3.Connection, tables: list[str]) -> None:
def _write_indexes(
handle: io.TextIOBase,
connection: sqlite3.Connection,
tables: list[str],
) -> None:
"""Write CREATE INDEX statements for included tables.
Args:
@ -251,10 +264,7 @@ def _write_postgres_dump(output_path: Path, tables: list[str]) -> None:
msg = "pg_dump process did not provide stdout or stderr."
raise CommandError(msg)
with (
output_path.open("wb") as raw_handle,
zstd.open(raw_handle, "w") as compressed,
):
with output_path.open("wb") as raw_handle, zstd.open(raw_handle, "w") as compressed:
for chunk in iter(lambda: process.stdout.read(64 * 1024), b""): # pyright: ignore[reportOptionalMemberAccess]
compressed.write(chunk)

View file

@ -1,11 +1,10 @@
from __future__ import annotations
import json
import os
import sys
from datetime import UTC
from datetime import datetime
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any
from typing import Literal
from urllib.parse import urlparse
@ -18,8 +17,6 @@ from colorama import init as colorama_init
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.core.management.base import CommandParser
from json_repair import JSONReturnType
from pydantic import ValidationError
from tqdm import tqdm
@ -31,21 +28,26 @@ from twitch.models import Game
from twitch.models import Organization
from twitch.models import RewardCampaign
from twitch.models import TimeBasedDrop
from twitch.schemas import ChannelInfoSchema
from twitch.schemas import CurrentUserSchema
from twitch.schemas import DropBenefitEdgeSchema
from twitch.schemas import DropBenefitSchema
from twitch.schemas import DropCampaignACLSchema
from twitch.schemas import DropCampaignSchema
from twitch.schemas import GameSchema
from twitch.schemas import GraphQLResponse
from twitch.schemas import OrganizationSchema
from twitch.schemas import RewardCampaign as RewardCampaignSchema
from twitch.schemas import TimeBasedDropSchema
from twitch.utils import is_twitch_box_art_url
from twitch.utils import normalize_twitch_box_art_url
from twitch.utils import parse_date
if TYPE_CHECKING:
from django.core.management.base import CommandParser
from json_repair import JSONReturnType
from twitch.schemas import ChannelInfoSchema
from twitch.schemas import CurrentUserSchema
from twitch.schemas import DropBenefitEdgeSchema
from twitch.schemas import DropBenefitSchema
from twitch.schemas import DropCampaignACLSchema
from twitch.schemas import DropCampaignSchema
from twitch.schemas import GameSchema
from twitch.schemas import RewardCampaign as RewardCampaignSchema
from twitch.schemas import TimeBasedDropSchema
def get_broken_directory_root() -> Path:
"""Get the root broken directory path from environment or default.
@ -83,10 +85,7 @@ def get_imported_directory_root() -> Path:
return home / "ttvdrops" / "imported"
def _build_broken_directory(
reason: str,
operation_name: str | None = None,
) -> Path:
def _build_broken_directory(reason: str, operation_name: str | None = None) -> Path:
"""Compute a deeply nested broken directory for triage.
Directory pattern: <broken_root>/<reason>/<operation>/<YYYY>/<MM>/<DD>
@ -104,16 +103,32 @@ def _build_broken_directory(
# If operation_name matches reason, skip it to avoid duplicate directories
if operation_name and operation_name.replace(" ", "_") == safe_reason:
broken_dir: Path = get_broken_directory_root() / safe_reason / f"{now:%Y}" / f"{now:%m}" / f"{now:%d}"
broken_dir: Path = (
get_broken_directory_root()
/ safe_reason
/ f"{now:%Y}"
/ f"{now:%m}"
/ f"{now:%d}"
)
else:
op_segment: str = (operation_name or "unknown_op").replace(" ", "_")
broken_dir = get_broken_directory_root() / safe_reason / op_segment / f"{now:%Y}" / f"{now:%m}" / f"{now:%d}"
broken_dir = (
get_broken_directory_root()
/ safe_reason
/ op_segment
/ f"{now:%Y}"
/ f"{now:%m}"
/ f"{now:%d}"
)
broken_dir.mkdir(parents=True, exist_ok=True)
return broken_dir
def move_failed_validation_file(file_path: Path, operation_name: str | None = None) -> Path:
def move_failed_validation_file(
file_path: Path,
operation_name: str | None = None,
) -> Path:
"""Moves a file that failed validation to a 'broken' subdirectory.
Args:
@ -178,7 +193,12 @@ def move_completed_file(
Returns:
Path to the directory where the file was moved.
"""
safe_op: str = (operation_name or "unknown_op").replace(" ", "_").replace("/", "_").replace("\\", "_")
safe_op: str = (
(operation_name or "unknown_op")
.replace(" ", "_")
.replace("/", "_")
.replace("\\", "_")
)
target_dir: Path = get_imported_directory_root() / safe_op
if campaign_structure:
@ -249,7 +269,12 @@ def detect_error_only_response(
errors: Any = item.get("errors")
data: Any = item.get("data")
# Data is missing if key doesn't exist or value is None
if errors and data is None and isinstance(errors, list) and len(errors) > 0:
if (
errors
and data is None
and isinstance(errors, list)
and len(errors) > 0
):
first_error: dict[str, Any] = errors[0]
message: str = first_error.get("message", "unknown error")
return f"error_only: {message}"
@ -327,7 +352,7 @@ def repair_partially_broken_json(raw_text: str) -> str: # noqa: PLR0915
"""
# Strategy 1: Direct repair attempt
try:
fixed: str = json_repair.repair_json(raw_text)
fixed: str = json_repair.repair_json(raw_text, logging=False)
# Validate it produces valid JSON
parsed_data = json.loads(fixed)
@ -335,7 +360,9 @@ def repair_partially_broken_json(raw_text: str) -> str: # noqa: PLR0915
if isinstance(parsed_data, list):
# Filter to only keep GraphQL responses
filtered = [
item for item in parsed_data if isinstance(item, dict) and ("data" in item or "extensions" in item)
item
for item in parsed_data
if isinstance(item, dict) and ("data" in item or "extensions" in item)
]
if filtered:
# If we filtered anything out, return the filtered version
@ -358,7 +385,10 @@ def repair_partially_broken_json(raw_text: str) -> str: # noqa: PLR0915
# Validate that all items look like GraphQL responses
if isinstance(wrapped_data, list) and wrapped_data: # noqa: SIM102
# Check if all items have "data" or "extensions" (GraphQL response structure)
if all(isinstance(item, dict) and ("data" in item or "extensions" in item) for item in wrapped_data):
if all(
isinstance(item, dict) and ("data" in item or "extensions" in item)
for item in wrapped_data
):
return wrapped
except ValueError, json.JSONDecodeError:
pass
@ -405,7 +435,7 @@ def repair_partially_broken_json(raw_text: str) -> str: # noqa: PLR0915
line: str = line.strip() # noqa: PLW2901
if line and line.startswith("{"):
try:
fixed_line: str = json_repair.repair_json(line)
fixed_line: str = json_repair.repair_json(line, logging=False)
obj = json.loads(fixed_line)
# Only keep objects that look like GraphQL responses
if "data" in obj or "extensions" in obj:
@ -428,11 +458,7 @@ class Command(BaseCommand):
def add_arguments(self, parser: CommandParser) -> None:
"""Populate the command with arguments."""
parser.add_argument(
"path",
type=str,
help="Path to JSON file or directory",
)
parser.add_argument("path", type=str, help="Path to JSON file or directory")
parser.add_argument(
"--recursive",
action="store_true",
@ -487,7 +513,9 @@ class Command(BaseCommand):
for response_data in responses:
if isinstance(response_data, dict):
try:
response: GraphQLResponse = GraphQLResponse.model_validate(response_data)
response: GraphQLResponse = GraphQLResponse.model_validate(
response_data,
)
valid_responses.append(response)
except ValidationError as e:
@ -497,8 +525,13 @@ class Command(BaseCommand):
# Move invalid inputs out of the hot path so future runs can progress.
if not options.get("skip_broken_moves"):
op_name: str | None = extract_operation_name_from_parsed(response_data)
broken_dir = move_failed_validation_file(file_path, operation_name=op_name)
op_name: str | None = extract_operation_name_from_parsed(
response_data,
)
broken_dir = move_failed_validation_file(
file_path,
operation_name=op_name,
)
# Once the file has been moved, bail out so we don't try to move it again later.
return [], broken_dir
@ -511,10 +544,7 @@ class Command(BaseCommand):
return valid_responses, broken_dir
def _get_or_create_organization(
self,
org_data: OrganizationSchema,
) -> Organization:
def _get_or_create_organization(self, org_data: OrganizationSchema) -> Organization:
"""Get or create an organization.
Args:
@ -525,12 +555,12 @@ class Command(BaseCommand):
"""
org_obj, created = Organization.objects.update_or_create(
twitch_id=org_data.twitch_id,
defaults={
"name": org_data.name,
},
defaults={"name": org_data.name},
)
if created:
tqdm.write(f"{Fore.GREEN}{Style.RESET_ALL} Created new organization: {org_data.name}")
tqdm.write(
f"{Fore.GREEN}{Style.RESET_ALL} Created new organization: {org_data.name}",
)
return org_obj
@ -572,7 +602,9 @@ class Command(BaseCommand):
if created or owner_orgs:
game_obj.owners.add(*owner_orgs)
if created:
tqdm.write(f"{Fore.GREEN}{Style.RESET_ALL} Created new game: {game_data.display_name}")
tqdm.write(
f"{Fore.GREEN}{Style.RESET_ALL} Created new game: {game_data.display_name}",
)
self._download_game_box_art(game_obj, game_obj.box_art)
return game_obj
@ -615,13 +647,12 @@ class Command(BaseCommand):
channel_obj, created = Channel.objects.update_or_create(
twitch_id=channel_info.twitch_id,
defaults={
"name": channel_info.name,
"display_name": display_name,
},
defaults={"name": channel_info.name, "display_name": display_name},
)
if created:
tqdm.write(f"{Fore.GREEN}{Style.RESET_ALL} Created new channel: {display_name}")
tqdm.write(
f"{Fore.GREEN}{Style.RESET_ALL} Created new channel: {display_name}",
)
return channel_obj
@ -638,12 +669,13 @@ class Command(BaseCommand):
file_path: Path to the file being processed.
options: Command options dictionary.
Raises:
ValueError: If datetime parsing fails for campaign dates and
crash-on-error is enabled.
Returns:
Tuple of (success flag, broken directory path if moved).
Raises:
ValueError: If datetime parsing fails for campaign dates and
crash-on-error is enabled.
"""
valid_responses, broken_dir = self._validate_responses(
responses=responses,
@ -659,7 +691,9 @@ class Command(BaseCommand):
campaigns_to_process: list[DropCampaignSchema] = []
# Source 1: User or CurrentUser field (handles plural, singular, inventory)
user_obj: CurrentUserSchema | None = response.data.current_user or response.data.user
user_obj: CurrentUserSchema | None = (
response.data.current_user or response.data.user
)
if user_obj and user_obj.drop_campaigns:
campaigns_to_process.extend(user_obj.drop_campaigns)
@ -676,7 +710,11 @@ class Command(BaseCommand):
for drop_campaign in campaigns_to_process:
# Handle campaigns without owner (e.g., from Inventory operation)
owner_data: OrganizationSchema | None = getattr(drop_campaign, "owner", None)
owner_data: OrganizationSchema | None = getattr(
drop_campaign,
"owner",
None,
)
org_obj: Organization | None = None
if owner_data:
org_obj = self._get_or_create_organization(org_data=owner_data)
@ -690,7 +728,9 @@ class Command(BaseCommand):
end_at_dt: datetime | None = parse_date(drop_campaign.end_at)
if start_at_dt is None or end_at_dt is None:
tqdm.write(f"{Fore.RED}{Style.RESET_ALL} Invalid datetime in campaign: {drop_campaign.name}")
tqdm.write(
f"{Fore.RED}{Style.RESET_ALL} Invalid datetime in campaign: {drop_campaign.name}",
)
if options.get("crash_on_error"):
msg: str = f"Failed to parse datetime for campaign {drop_campaign.name}"
raise ValueError(msg)
@ -712,17 +752,26 @@ class Command(BaseCommand):
defaults=defaults,
)
if created:
tqdm.write(f"{Fore.GREEN}{Style.RESET_ALL} Created new campaign: {drop_campaign.name}")
tqdm.write(
f"{Fore.GREEN}{Style.RESET_ALL} Created new campaign: {drop_campaign.name}",
)
action: Literal["Imported new", "Updated"] = "Imported new" if created else "Updated"
tqdm.write(f"{Fore.GREEN}{Style.RESET_ALL} {action} campaign: {drop_campaign.name}")
action: Literal["Imported new", "Updated"] = (
"Imported new" if created else "Updated"
)
tqdm.write(
f"{Fore.GREEN}{Style.RESET_ALL} {action} campaign: {drop_campaign.name}",
)
if (
response.extensions
and response.extensions.operation_name
and response.extensions.operation_name not in campaign_obj.operation_names
and response.extensions.operation_name
not in campaign_obj.operation_names
):
campaign_obj.operation_names.append(response.extensions.operation_name)
campaign_obj.operation_names.append(
response.extensions.operation_name,
)
campaign_obj.save(update_fields=["operation_names"])
if drop_campaign.time_based_drops:
@ -769,7 +818,9 @@ class Command(BaseCommand):
}
if drop_schema.required_minutes_watched is not None:
drop_defaults["required_minutes_watched"] = drop_schema.required_minutes_watched
drop_defaults["required_minutes_watched"] = (
drop_schema.required_minutes_watched
)
if start_at_dt is not None:
drop_defaults["start_at"] = start_at_dt
if end_at_dt is not None:
@ -780,7 +831,9 @@ class Command(BaseCommand):
defaults=drop_defaults,
)
if created:
tqdm.write(f"{Fore.GREEN}{Style.RESET_ALL} Created TimeBasedDrop: {drop_schema.name}")
tqdm.write(
f"{Fore.GREEN}{Style.RESET_ALL} Created TimeBasedDrop: {drop_schema.name}",
)
self._process_benefit_edges(
benefit_edges_schema=drop_schema.benefit_edges,
@ -808,7 +861,9 @@ class Command(BaseCommand):
defaults=benefit_defaults,
)
if created:
tqdm.write(f"{Fore.GREEN}{Style.RESET_ALL} Created DropBenefit: {benefit_schema.name}")
tqdm.write(
f"{Fore.GREEN}{Style.RESET_ALL} Created DropBenefit: {benefit_schema.name}",
)
return benefit_obj
@ -826,7 +881,9 @@ class Command(BaseCommand):
for edge_schema in benefit_edges_schema:
benefit_schema: DropBenefitSchema = edge_schema.benefit
benefit_obj: DropBenefit = self._get_or_update_benefit(benefit_schema=benefit_schema)
benefit_obj: DropBenefit = self._get_or_update_benefit(
benefit_schema=benefit_schema,
)
_edge_obj, created = DropBenefitEdge.objects.update_or_create(
drop=drop_obj,
@ -834,7 +891,9 @@ class Command(BaseCommand):
defaults={"entitlement_limit": edge_schema.entitlement_limit},
)
if created:
tqdm.write(f"{Fore.GREEN}{Style.RESET_ALL} Linked benefit: {benefit_schema.name}{drop_obj.name}")
tqdm.write(
f"{Fore.GREEN}{Style.RESET_ALL} Linked benefit: {benefit_schema.name}{drop_obj.name}",
)
def _process_allowed_channels(
self,
@ -852,7 +911,9 @@ class Command(BaseCommand):
"""
# Update the allow_is_enabled flag if changed
# Default to True if is_enabled is None (API doesn't always provide this field)
is_enabled: bool = allow_schema.is_enabled if allow_schema.is_enabled is not None else True
is_enabled: bool = (
allow_schema.is_enabled if allow_schema.is_enabled is not None else True
)
if campaign_obj.allow_is_enabled != is_enabled:
campaign_obj.allow_is_enabled = is_enabled
campaign_obj.save(update_fields=["allow_is_enabled"])
@ -864,7 +925,9 @@ class Command(BaseCommand):
channel_objects: list[Channel] = []
if allow_schema.channels:
for channel_schema in allow_schema.channels:
channel_obj: Channel = self._get_or_create_channel(channel_info=channel_schema)
channel_obj: Channel = self._get_or_create_channel(
channel_info=channel_schema,
)
channel_objects.append(channel_obj)
# Only update the M2M relationship if we have channels
campaign_obj.allow_channels.set(channel_objects)
@ -889,7 +952,9 @@ class Command(BaseCommand):
ends_at_dt: datetime | None = parse_date(reward_campaign.ends_at)
if starts_at_dt is None or ends_at_dt is None:
tqdm.write(f"{Fore.RED}{Style.RESET_ALL} Invalid datetime in reward campaign: {reward_campaign.name}")
tqdm.write(
f"{Fore.RED}{Style.RESET_ALL} Invalid datetime in reward campaign: {reward_campaign.name}",
)
if options.get("crash_on_error"):
msg: str = f"Failed to parse datetime for reward campaign {reward_campaign.name}"
raise ValueError(msg)
@ -923,7 +988,9 @@ class Command(BaseCommand):
"about_url": reward_campaign.about_url,
"is_sitewide": reward_campaign.is_sitewide,
"game": game_obj,
"image_url": reward_campaign.image.image1x_url if reward_campaign.image else "",
"image_url": reward_campaign.image.image1x_url
if reward_campaign.image
else "",
}
_reward_campaign_obj, created = RewardCampaign.objects.update_or_create(
@ -931,11 +998,17 @@ class Command(BaseCommand):
defaults=defaults,
)
action: Literal["Imported new", "Updated"] = "Imported new" if created else "Updated"
display_name = (
f"{reward_campaign.brand}: {reward_campaign.name}" if reward_campaign.brand else reward_campaign.name
action: Literal["Imported new", "Updated"] = (
"Imported new" if created else "Updated"
)
display_name = (
f"{reward_campaign.brand}: {reward_campaign.name}"
if reward_campaign.brand
else reward_campaign.name
)
tqdm.write(
f"{Fore.GREEN}{Style.RESET_ALL} {action} reward campaign: {display_name}",
)
tqdm.write(f"{Fore.GREEN}{Style.RESET_ALL} {action} reward campaign: {display_name}")
def handle(self, *args, **options) -> None: # noqa: ARG002
"""Main entry point for the command.
@ -978,7 +1051,9 @@ class Command(BaseCommand):
total=len(json_files),
desc="Processing",
unit="file",
bar_format=("{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]"),
bar_format=(
"{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]"
),
colour="green",
dynamic_ncols=True,
) as progress_bar:
@ -991,10 +1066,14 @@ class Command(BaseCommand):
if result["success"]:
success_count += 1
if options.get("verbose"):
progress_bar.write(f"{Fore.GREEN}{Style.RESET_ALL} {file_path.name}")
progress_bar.write(
f"{Fore.GREEN}{Style.RESET_ALL} {file_path.name}",
)
else:
failed_count += 1
reason: bool | str | None = result.get("reason") if isinstance(result, dict) else None
reason: bool | str | None = (
result.get("reason") if isinstance(result, dict) else None
)
if reason:
progress_bar.write(
f"{Fore.RED}{Style.RESET_ALL} "
@ -1009,10 +1088,15 @@ class Command(BaseCommand):
)
except (OSError, ValueError, KeyError) as e:
error_count += 1
progress_bar.write(f"{Fore.RED}{Style.RESET_ALL} {file_path.name} (error: {e})")
progress_bar.write(
f"{Fore.RED}{Style.RESET_ALL} {file_path.name} (error: {e})",
)
# Update postfix with statistics
progress_bar.set_postfix_str(f"{success_count} | ✗ {failed_count + error_count}", refresh=True)
progress_bar.set_postfix_str(
f"{success_count} | ✗ {failed_count + error_count}",
refresh=True,
)
progress_bar.update(1)
self.print_processing_summary(
@ -1093,7 +1177,10 @@ class Command(BaseCommand):
return "inventory_campaigns"
# Structure: {"data": {"currentUser": {"dropCampaigns": [...]}}}
if "dropCampaigns" in current_user and isinstance(current_user["dropCampaigns"], list):
if "dropCampaigns" in current_user and isinstance(
current_user["dropCampaigns"],
list,
):
return "current_user_drop_campaigns"
# Structure: {"data": {"channel": {"viewerDropCampaigns": [...] or {...}}}}
@ -1104,11 +1191,7 @@ class Command(BaseCommand):
return None
def collect_json_files(
self,
options: dict,
input_path: Path,
) -> list[Path]:
def collect_json_files(self, options: dict, input_path: Path) -> list[Path]:
"""Collect JSON files from the specified directory.
Args:
@ -1122,9 +1205,13 @@ class Command(BaseCommand):
if options["recursive"]:
for root, _dirs, files in os.walk(input_path):
root_path = Path(root)
json_files.extend(root_path / file for file in files if file.endswith(".json"))
json_files.extend(
root_path / file for file in files if file.endswith(".json")
)
else:
json_files = [f for f in input_path.iterdir() if f.is_file() and f.suffix == ".json"]
json_files = [
f for f in input_path.iterdir() if f.is_file() and f.suffix == ".json"
]
return json_files
def _normalize_responses(
@ -1147,8 +1234,13 @@ class Command(BaseCommand):
"""
if isinstance(parsed_json, dict):
# Check for batched format: {"responses": [...]}
if "responses" in parsed_json and isinstance(parsed_json["responses"], list):
return [item for item in parsed_json["responses"] if isinstance(item, dict)]
if "responses" in parsed_json and isinstance(
parsed_json["responses"],
list,
):
return [
item for item in parsed_json["responses"] if isinstance(item, dict)
]
# Single response: {"data": {...}}
return [parsed_json]
if isinstance(parsed_json, list):
@ -1171,21 +1263,21 @@ class Command(BaseCommand):
file_path: Path to the JSON file to process
options: Command options
Returns:
Dict with success status and optional broken_dir path
Raises:
ValidationError: If the JSON file fails validation
json.JSONDecodeError: If the JSON file cannot be parsed
Returns:
Dict with success status and optional broken_dir path
"""
try:
raw_text: str = file_path.read_text(encoding="utf-8", errors="ignore")
# Repair potentially broken JSON with multiple fallback strategies
fixed_json_str: str = repair_partially_broken_json(raw_text)
parsed_json: JSONReturnType | tuple[JSONReturnType, list[dict[str, str]]] | str = json.loads(
fixed_json_str,
)
parsed_json: (
JSONReturnType | tuple[JSONReturnType, list[dict[str, str]]] | str
) = json.loads(fixed_json_str)
operation_name: str | None = extract_operation_name_from_parsed(parsed_json)
# Check for error-only responses first
@ -1197,8 +1289,16 @@ class Command(BaseCommand):
error_description,
operation_name=operation_name,
)
return {"success": False, "broken_dir": str(broken_dir), "reason": error_description}
return {"success": False, "broken_dir": "(skipped)", "reason": error_description}
return {
"success": False,
"broken_dir": str(broken_dir),
"reason": error_description,
}
return {
"success": False,
"broken_dir": "(skipped)",
"reason": error_description,
}
matched: str | None = detect_non_campaign_keyword(raw_text)
if matched:
@ -1208,8 +1308,16 @@ class Command(BaseCommand):
matched,
operation_name=operation_name,
)
return {"success": False, "broken_dir": str(broken_dir), "reason": f"matched '{matched}'"}
return {"success": False, "broken_dir": "(skipped)", "reason": f"matched '{matched}'"}
return {
"success": False,
"broken_dir": str(broken_dir),
"reason": f"matched '{matched}'",
}
return {
"success": False,
"broken_dir": "(skipped)",
"reason": f"matched '{matched}'",
}
if "dropCampaign" not in raw_text:
if not options.get("skip_broken_moves"):
broken_dir: Path | None = move_file_to_broken_subdir(
@ -1217,8 +1325,16 @@ class Command(BaseCommand):
"no_dropCampaign",
operation_name=operation_name,
)
return {"success": False, "broken_dir": str(broken_dir), "reason": "no dropCampaign present"}
return {"success": False, "broken_dir": "(skipped)", "reason": "no dropCampaign present"}
return {
"success": False,
"broken_dir": str(broken_dir),
"reason": "no dropCampaign present",
}
return {
"success": False,
"broken_dir": "(skipped)",
"reason": "no dropCampaign present",
}
# Normalize and filter to dict responses only
responses: list[dict[str, Any]] = self._normalize_responses(parsed_json)
@ -1256,7 +1372,10 @@ class Command(BaseCommand):
if isinstance(parsed_json_local, (dict, list))
else None
)
broken_dir = move_failed_validation_file(file_path, operation_name=op_name)
broken_dir = move_failed_validation_file(
file_path,
operation_name=op_name,
)
return {"success": False, "broken_dir": str(broken_dir)}
return {"success": False, "broken_dir": "(skipped)"}
else:
@ -1285,10 +1404,12 @@ class Command(BaseCommand):
# Repair potentially broken JSON with multiple fallback strategies
fixed_json_str: str = repair_partially_broken_json(raw_text)
parsed_json: JSONReturnType | tuple[JSONReturnType, list[dict[str, str]]] | str = json.loads(
fixed_json_str,
parsed_json: (
JSONReturnType | tuple[JSONReturnType, list[dict[str, str]]] | str
) = json.loads(fixed_json_str)
operation_name: str | None = extract_operation_name_from_parsed(
parsed_json,
)
operation_name: str | None = extract_operation_name_from_parsed(parsed_json)
# Check for error-only responses first
error_description: str | None = detect_error_only_response(parsed_json)
@ -1386,7 +1507,14 @@ class Command(BaseCommand):
if isinstance(parsed_json_local, (dict, list))
else None
)
broken_dir = move_failed_validation_file(file_path, operation_name=op_name)
progress_bar.write(f"{Fore.RED}{Style.RESET_ALL} {file_path.name}{broken_dir}/{file_path.name}")
broken_dir = move_failed_validation_file(
file_path,
operation_name=op_name,
)
progress_bar.write(
f"{Fore.RED}{Style.RESET_ALL} {file_path.name}{broken_dir}/{file_path.name}",
)
else:
progress_bar.write(f"{Fore.RED}{Style.RESET_ALL} {file_path.name} (move skipped)")
progress_bar.write(
f"{Fore.RED}{Style.RESET_ALL} {file_path.name} (move skipped)",
)

View file

@ -1,5 +1,3 @@
from __future__ import annotations
from typing import TYPE_CHECKING
from django.core.management.base import BaseCommand
@ -54,21 +52,31 @@ class Command(BaseCommand):
self.stdout.write(self.style.SUCCESS("No orphaned channels found."))
return
self.stdout.write(f"Found {count} orphaned channels with no associated campaigns:")
self.stdout.write(
f"Found {count} orphaned channels with no associated campaigns:",
)
# Show sample of channels to be deleted
for channel in orphaned_channels[:SAMPLE_PREVIEW_COUNT]:
self.stdout.write(f" - {channel.display_name} (Twitch ID: {channel.twitch_id})")
self.stdout.write(
f" - {channel.display_name} (Twitch ID: {channel.twitch_id})",
)
if count > SAMPLE_PREVIEW_COUNT:
self.stdout.write(f" ... and {count - SAMPLE_PREVIEW_COUNT} more")
if dry_run:
self.stdout.write(self.style.WARNING(f"\n[DRY RUN] Would delete {count} orphaned channels."))
self.stdout.write(
self.style.WARNING(
f"\n[DRY RUN] Would delete {count} orphaned channels.",
),
)
return
if not force:
response: str = input(f"\nAre you sure you want to delete {count} orphaned channels? (yes/no): ")
response: str = input(
f"\nAre you sure you want to delete {count} orphaned channels? (yes/no): ",
)
if response.lower() != "yes":
self.stdout.write(self.style.WARNING("Cancelled."))
return
@ -76,4 +84,8 @@ class Command(BaseCommand):
# Delete the orphaned channels
deleted_count, _ = orphaned_channels.delete()
self.stdout.write(self.style.SUCCESS(f"\nSuccessfully deleted {deleted_count} orphaned channels."))
self.stdout.write(
self.style.SUCCESS(
f"\nSuccessfully deleted {deleted_count} orphaned channels.",
),
)

View file

@ -1,5 +1,3 @@
from __future__ import annotations
from typing import TYPE_CHECKING
from typing import Any
@ -8,13 +6,13 @@ from colorama import Style
from colorama import init as colorama_init
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.core.management.base import CommandParser
from twitch.models import Game
from twitch.models import Organization
if TYPE_CHECKING:
from debug_toolbar.panels.templates.panel import QuerySet
from django.core.management.base import CommandParser
class Command(BaseCommand):
@ -70,11 +68,15 @@ class Command(BaseCommand):
try:
org: Organization = Organization.objects.get(twitch_id=org_id)
except Organization.DoesNotExist as exc: # pragma: no cover - simple guard
msg: str = f"Organization with twitch_id='{org_id}' does not exist. Nothing to do."
msg: str = (
f"Organization with twitch_id='{org_id}' does not exist. Nothing to do."
)
raise CommandError(msg) from exc
# Compute the set of affected games via the through relation for accuracy and performance
affected_games_qs: QuerySet[Game, Game] = Game.objects.filter(owners=org).order_by("display_name")
affected_games_qs: QuerySet[Game, Game] = Game.objects.filter(
owners=org,
).order_by("display_name")
affected_count: int = affected_games_qs.count()
if affected_count == 0:
@ -83,7 +85,7 @@ class Command(BaseCommand):
)
else:
self.stdout.write(
f"{Fore.CYAN}{Style.RESET_ALL} Found {affected_count:,} game(s) linked to '{org.name}' ({org.twitch_id}).", # noqa: E501
f"{Fore.CYAN}{Style.RESET_ALL} Found {affected_count:,} game(s) linked to '{org.name}' ({org.twitch_id}).",
)
# Show a short preview list in dry-run mode
@ -112,9 +114,9 @@ class Command(BaseCommand):
org_twid: str = org.twitch_id
org.delete()
self.stdout.write(
f"{Fore.GREEN}{Style.RESET_ALL} Deleted organization '{org_name}' ({org_twid}) as it has no games.", # noqa: E501
f"{Fore.GREEN}{Style.RESET_ALL} Deleted organization '{org_name}' ({org_twid}) as it has no games.",
)
else:
self.stdout.write(
f"{Fore.YELLOW}{Style.RESET_ALL} Organization '{org.name}' still has {remaining_games:,} game(s); not deleted.", # noqa: E501
f"{Fore.YELLOW}{Style.RESET_ALL} Organization '{org.name}' still has {remaining_games:,} game(s); not deleted.",
)

View file

@ -1,7 +1,5 @@
"""Management command to convert existing images to WebP and AVIF formats."""
from __future__ import annotations
import logging
from pathlib import Path
from typing import TYPE_CHECKING
@ -48,12 +46,18 @@ class Command(BaseCommand):
media_root = Path(settings.MEDIA_ROOT)
if not media_root.exists():
self.stdout.write(self.style.WARNING(f"MEDIA_ROOT does not exist: {media_root}"))
self.stdout.write(
self.style.WARNING(f"MEDIA_ROOT does not exist: {media_root}"),
)
return
# Find all JPG and PNG files
image_extensions = {".jpg", ".jpeg", ".png"}
image_files = [f for f in media_root.rglob("*") if f.is_file() and f.suffix.lower() in image_extensions]
image_files = [
f
for f in media_root.rglob("*")
if f.is_file() and f.suffix.lower() in image_extensions
]
if not image_files:
self.stdout.write(self.style.SUCCESS("No images found to convert"))
@ -80,7 +84,9 @@ class Command(BaseCommand):
continue
if dry_run:
self.stdout.write(f"Would convert: {image_path.relative_to(media_root)}")
self.stdout.write(
f"Would convert: {image_path.relative_to(media_root)}",
)
if needs_webp:
self.stdout.write(f"{webp_path.relative_to(media_root)}")
if needs_avif:
@ -104,14 +110,20 @@ class Command(BaseCommand):
except Exception as e:
error_count += 1
self.stdout.write(
self.style.ERROR(f"✗ Error converting {image_path.relative_to(media_root)}: {e}"),
self.style.ERROR(
f"✗ Error converting {image_path.relative_to(media_root)}: {e}",
),
)
logger.exception("Failed to convert image: %s", image_path)
# Summary
self.stdout.write("\n" + "=" * 50)
if dry_run:
self.stdout.write(self.style.SUCCESS(f"Dry run complete. Would convert {converted_count} images"))
self.stdout.write(
self.style.SUCCESS(
f"Dry run complete. Would convert {converted_count} images",
),
)
else:
self.stdout.write(self.style.SUCCESS(f"Converted: {converted_count}"))
self.stdout.write(f"Skipped (already exist): {skipped_count}")
@ -177,11 +189,16 @@ class Command(BaseCommand):
Returns:
RGB PIL Image ready for encoding
"""
if img.mode in {"RGBA", "LA"} or (img.mode == "P" and "transparency" in img.info):
if img.mode in {"RGBA", "LA"} or (
img.mode == "P" and "transparency" in img.info
):
# Create white background for transparency
background = Image.new("RGB", img.size, (255, 255, 255))
rgba_img = img.convert("RGBA") if img.mode == "P" else img
background.paste(rgba_img, mask=rgba_img.split()[-1] if rgba_img.mode in {"RGBA", "LA"} else None)
background.paste(
rgba_img,
mask=rgba_img.split()[-1] if rgba_img.mode in {"RGBA", "LA"} else None,
)
return background
if img.mode != "RGB":
return img.convert("RGB")

View file

@ -1,15 +1,11 @@
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING
from urllib.parse import ParseResult
from urllib.parse import urlparse
import httpx
from django.conf import settings
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand
from django.core.management.base import CommandParser
from PIL import Image
from twitch.models import Game
@ -17,6 +13,9 @@ from twitch.utils import is_twitch_box_art_url
from twitch.utils import normalize_twitch_box_art_url
if TYPE_CHECKING:
from urllib.parse import ParseResult
from django.core.management.base import CommandParser
from django.db.models import QuerySet
@ -63,7 +62,11 @@ class Command(BaseCommand):
if not is_twitch_box_art_url(game.box_art):
skipped += 1
continue
if game.box_art_file and getattr(game.box_art_file, "name", "") and not force:
if (
game.box_art_file
and getattr(game.box_art_file, "name", "")
and not force
):
skipped += 1
continue
@ -89,7 +92,11 @@ class Command(BaseCommand):
skipped += 1
continue
game.box_art_file.save(file_name, ContentFile(response.content), save=True)
game.box_art_file.save(
file_name,
ContentFile(response.content),
save=True,
)
# Auto-convert to WebP and AVIF
self._convert_to_modern_formats(game.box_art_file.path)
@ -113,7 +120,11 @@ class Command(BaseCommand):
"""
try:
source_path = Path(image_path)
if not source_path.exists() or source_path.suffix.lower() not in {".jpg", ".jpeg", ".png"}:
if not source_path.exists() or source_path.suffix.lower() not in {
".jpg",
".jpeg",
".png",
}:
return
base_path = source_path.with_suffix("")
@ -122,10 +133,17 @@ class Command(BaseCommand):
with Image.open(source_path) as img:
# Convert to RGB if needed
if img.mode in {"RGBA", "LA"} or (img.mode == "P" and "transparency" in img.info):
if img.mode in {"RGBA", "LA"} or (
img.mode == "P" and "transparency" in img.info
):
background = Image.new("RGB", img.size, (255, 255, 255))
rgba_img = img.convert("RGBA") if img.mode == "P" else img
background.paste(rgba_img, mask=rgba_img.split()[-1] if rgba_img.mode in {"RGBA", "LA"} else None)
background.paste(
rgba_img,
mask=rgba_img.split()[-1]
if rgba_img.mode in {"RGBA", "LA"}
else None,
)
rgb_img = background
elif img.mode != "RGB":
rgb_img = img.convert("RGB")
@ -140,4 +158,6 @@ class Command(BaseCommand):
except (OSError, ValueError) as e:
# Don't fail the download if conversion fails
self.stdout.write(self.style.WARNING(f"Failed to convert {image_path}: {e}"))
self.stdout.write(
self.style.WARNING(f"Failed to convert {image_path}: {e}"),
)

View file

@ -1,17 +1,13 @@
"""Management command to download and cache campaign, benefit, and reward images locally."""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING
from urllib.parse import ParseResult
from urllib.parse import urlparse
import httpx
from django.conf import settings
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand
from django.core.management.base import CommandParser
from PIL import Image
from twitch.models import DropBenefit
@ -19,6 +15,9 @@ from twitch.models import DropCampaign
from twitch.models import RewardCampaign
if TYPE_CHECKING:
from urllib.parse import ParseResult
from django.core.management.base import CommandParser
from django.db.models import QuerySet
from django.db.models.fields.files import FieldFile
@ -66,20 +65,38 @@ class Command(BaseCommand):
with httpx.Client(timeout=20, follow_redirects=True) as client:
if model_choice in {"campaigns", "all"}:
self.stdout.write(self.style.MIGRATE_HEADING("\nProcessing Drop Campaigns..."))
stats = self._download_campaign_images(client=client, limit=limit, force=force)
self.stdout.write(
self.style.MIGRATE_HEADING("\nProcessing Drop Campaigns..."),
)
stats = self._download_campaign_images(
client=client,
limit=limit,
force=force,
)
self._merge_stats(total_stats, stats)
self._print_stats("Drop Campaigns", stats)
if model_choice in {"benefits", "all"}:
self.stdout.write(self.style.MIGRATE_HEADING("\nProcessing Drop Benefits..."))
stats = self._download_benefit_images(client=client, limit=limit, force=force)
self.stdout.write(
self.style.MIGRATE_HEADING("\nProcessing Drop Benefits..."),
)
stats = self._download_benefit_images(
client=client,
limit=limit,
force=force,
)
self._merge_stats(total_stats, stats)
self._print_stats("Drop Benefits", stats)
if model_choice in {"rewards", "all"}:
self.stdout.write(self.style.MIGRATE_HEADING("\nProcessing Reward Campaigns..."))
stats = self._download_reward_campaign_images(client=client, limit=limit, force=force)
self.stdout.write(
self.style.MIGRATE_HEADING("\nProcessing Reward Campaigns..."),
)
stats = self._download_reward_campaign_images(
client=client,
limit=limit,
force=force,
)
self._merge_stats(total_stats, stats)
self._print_stats("Reward Campaigns", stats)
@ -107,18 +124,30 @@ class Command(BaseCommand):
Returns:
Dictionary with download statistics (total, downloaded, skipped, failed, placeholders_404).
"""
queryset: QuerySet[DropCampaign] = DropCampaign.objects.all().order_by("twitch_id")
queryset: QuerySet[DropCampaign] = DropCampaign.objects.all().order_by(
"twitch_id",
)
if limit:
queryset = queryset[:limit]
stats: dict[str, int] = {"total": 0, "downloaded": 0, "skipped": 0, "failed": 0, "placeholders_404": 0}
stats: dict[str, int] = {
"total": 0,
"downloaded": 0,
"skipped": 0,
"failed": 0,
"placeholders_404": 0,
}
stats["total"] = queryset.count()
for campaign in queryset:
if not campaign.image_url:
stats["skipped"] += 1
continue
if campaign.image_file and getattr(campaign.image_file, "name", "") and not force:
if (
campaign.image_file
and getattr(campaign.image_file, "name", "")
and not force
):
stats["skipped"] += 1
continue
@ -144,18 +173,30 @@ class Command(BaseCommand):
Returns:
Dictionary with download statistics (total, downloaded, skipped, failed, placeholders_404).
"""
queryset: QuerySet[DropBenefit] = DropBenefit.objects.all().order_by("twitch_id")
queryset: QuerySet[DropBenefit] = DropBenefit.objects.all().order_by(
"twitch_id",
)
if limit:
queryset = queryset[:limit]
stats: dict[str, int] = {"total": 0, "downloaded": 0, "skipped": 0, "failed": 0, "placeholders_404": 0}
stats: dict[str, int] = {
"total": 0,
"downloaded": 0,
"skipped": 0,
"failed": 0,
"placeholders_404": 0,
}
stats["total"] = queryset.count()
for benefit in queryset:
if not benefit.image_asset_url:
stats["skipped"] += 1
continue
if benefit.image_file and getattr(benefit.image_file, "name", "") and not force:
if (
benefit.image_file
and getattr(benefit.image_file, "name", "")
and not force
):
stats["skipped"] += 1
continue
@ -181,18 +222,30 @@ class Command(BaseCommand):
Returns:
Dictionary with download statistics (total, downloaded, skipped, failed, placeholders_404).
"""
queryset: QuerySet[RewardCampaign] = RewardCampaign.objects.all().order_by("twitch_id")
queryset: QuerySet[RewardCampaign] = RewardCampaign.objects.all().order_by(
"twitch_id",
)
if limit:
queryset = queryset[:limit]
stats: dict[str, int] = {"total": 0, "downloaded": 0, "skipped": 0, "failed": 0, "placeholders_404": 0}
stats: dict[str, int] = {
"total": 0,
"downloaded": 0,
"skipped": 0,
"failed": 0,
"placeholders_404": 0,
}
stats["total"] = queryset.count()
for reward_campaign in queryset:
if not reward_campaign.image_url:
stats["skipped"] += 1
continue
if reward_campaign.image_file and getattr(reward_campaign.image_file, "name", "") and not force:
if (
reward_campaign.image_file
and getattr(reward_campaign.image_file, "name", "")
and not force
):
stats["skipped"] += 1
continue
@ -233,9 +286,7 @@ class Command(BaseCommand):
response.raise_for_status()
except httpx.HTTPError as exc:
self.stdout.write(
self.style.WARNING(
f"Failed to download image for {twitch_id}: {exc}",
),
self.style.WARNING(f"Failed to download image for {twitch_id}: {exc}"),
)
return "failed"
@ -262,7 +313,11 @@ class Command(BaseCommand):
"""
try:
source_path = Path(image_path)
if not source_path.exists() or source_path.suffix.lower() not in {".jpg", ".jpeg", ".png"}:
if not source_path.exists() or source_path.suffix.lower() not in {
".jpg",
".jpeg",
".png",
}:
return
base_path = source_path.with_suffix("")
@ -271,10 +326,17 @@ class Command(BaseCommand):
with Image.open(source_path) as img:
# Convert to RGB if needed
if img.mode in {"RGBA", "LA"} or (img.mode == "P" and "transparency" in img.info):
if img.mode in {"RGBA", "LA"} or (
img.mode == "P" and "transparency" in img.info
):
background = Image.new("RGB", img.size, (255, 255, 255))
rgba_img = img.convert("RGBA") if img.mode == "P" else img
background.paste(rgba_img, mask=rgba_img.split()[-1] if rgba_img.mode in {"RGBA", "LA"} else None)
background.paste(
rgba_img,
mask=rgba_img.split()[-1]
if rgba_img.mode in {"RGBA", "LA"}
else None,
)
rgb_img = background
elif img.mode != "RGB":
rgb_img = img.convert("RGB")
@ -289,7 +351,9 @@ class Command(BaseCommand):
except (OSError, ValueError) as e:
# Don't fail the download if conversion fails
self.stdout.write(self.style.WARNING(f"Failed to convert {image_path}: {e}"))
self.stdout.write(
self.style.WARNING(f"Failed to convert {image_path}: {e}"),
)
def _merge_stats(self, total: dict[str, int], new: dict[str, int]) -> None:
"""Merge statistics from a single model into the total stats."""

View file

@ -1,7 +1,5 @@
"""Management command to import Twitch global chat badges."""
from __future__ import annotations
import logging
import os
from typing import TYPE_CHECKING
@ -13,15 +11,16 @@ from colorama import Style
from colorama import init as colorama_init
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.core.management.base import CommandParser
from pydantic import ValidationError
from twitch.models import ChatBadge
from twitch.models import ChatBadgeSet
from twitch.schemas import ChatBadgeSetSchema
from twitch.schemas import GlobalChatBadgesResponse
if TYPE_CHECKING:
from django.core.management.base import CommandParser
from twitch.schemas import ChatBadgeSetSchema
from twitch.schemas import ChatBadgeVersionSchema
logger: logging.Logger = logging.getLogger("ttvdrops")
@ -60,9 +59,15 @@ class Command(BaseCommand):
colorama_init(autoreset=True)
# Get credentials from arguments or environment
client_id: str | None = options.get("client_id") or os.getenv("TWITCH_CLIENT_ID")
client_secret: str | None = options.get("client_secret") or os.getenv("TWITCH_CLIENT_SECRET")
access_token: str | None = options.get("access_token") or os.getenv("TWITCH_ACCESS_TOKEN")
client_id: str | None = options.get("client_id") or os.getenv(
"TWITCH_CLIENT_ID",
)
client_secret: str | None = options.get("client_secret") or os.getenv(
"TWITCH_CLIENT_SECRET",
)
access_token: str | None = options.get("access_token") or os.getenv(
"TWITCH_ACCESS_TOKEN",
)
if not client_id:
msg = (
@ -84,7 +89,9 @@ class Command(BaseCommand):
self.stdout.write("Obtaining access token from Twitch...")
try:
access_token = self._get_app_access_token(client_id, client_secret)
self.stdout.write(self.style.SUCCESS("✓ Access token obtained successfully"))
self.stdout.write(
self.style.SUCCESS("✓ Access token obtained successfully"),
)
except httpx.HTTPError as e:
msg = f"Failed to obtain access token: {e}"
raise CommandError(msg) from e